[yt-svn] commit/yt: 32 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Tue Jun 28 08:30:09 PDT 2016


32 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/cb28ea909ec8/
Changeset:   cb28ea909ec8
Branch:      yt
User:        charlie_watson
Date:        2016-06-13 20:09:15+00:00
Summary:     added first four def funcs to Node class
Affected #:  1 file

diff -r 023bffb03ceb52b06b0e821ea481c660006502d3 -r cb28ea909ec8c88a405a0543e83d9c4355b8205d yt/utilities/lib/amr_kdtools.pyx
--- a/yt/utilities/lib/amr_kdtools.pyx
+++ b/yt/utilities/lib/amr_kdtools.pyx
@@ -92,17 +92,34 @@
     def __dealloc__(self):
         if self.split != NULL: free(self.split)
 
-def get_left_edge(Node node):
-    le = np.empty(3, dtype='float64')
-    for i in range(3):
-        le[i] = node.left_edge[i]
-    return le
+# Begin input of converted methods
 
-def get_right_edge(Node node):
-    re = np.empty(3, dtype='float64')
-    for i in range(3):
-        re[i] = node.right_edge[i]
-    return re
+    def get_left_edge(self):
+        le = np.empty(3, dtype='float64')
+        for i in range(3):
+            le[i] = self.left_edge[i]
+        return le
+
+    def get_right_edge(self):
+        re = np.empty(3, dtype='float64')
+        for i in range(3):
+            re[i] = self.right_edge[i]
+        return re
+
+    def set_dirty(self, bint state):
+        for node in depth_traverse(self):
+            node.dirty = state
+
+    def kd_traverse(self, viewpoint=None):
+        if viewpoint is None:
+            for node in depth_traverse(self):
+                if _kd_is_leaf(node) == 1 and node.grid != -1:
+                    yield node
+        else:
+            for node in viewpoint_traverse(self, viewpoint):
+                if _kd_is_leaf(node) == 1 and node.grid != -1:
+                    yield node
+
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
@@ -133,19 +150,7 @@
     else:
         return 0
 
-def set_dirty(Node trunk, bint state):
-    for node in depth_traverse(trunk):
-        node.dirty = state
 
-def kd_traverse(Node trunk, viewpoint=None):
-    if viewpoint is None:
-        for node in depth_traverse(trunk):
-            if _kd_is_leaf(node) == 1 and node.grid != -1:
-                yield node
-    else:
-        for node in viewpoint_traverse(trunk, viewpoint):
-            if _kd_is_leaf(node) == 1 and node.grid != -1:
-                yield node
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
@@ -827,4 +832,3 @@
     """
     assert(point_in_node(node, point))
     return _find_node(node, point)
-


https://bitbucket.org/yt_analysis/yt/commits/211223c2660a/
Changeset:   211223c2660a
Branch:      yt
User:        charlie_watson
Date:        2016-06-13 21:23:32+00:00
Summary:     all methods starting with Node node are now self
Affected #:  1 file

diff -r cb28ea909ec8c88a405a0543e83d9c4355b8205d -r 211223c2660aed63b7462ca793a9fd3aff867172 yt/utilities/lib/amr_kdtools.pyx
--- a/yt/utilities/lib/amr_kdtools.pyx
+++ b/yt/utilities/lib/amr_kdtools.pyx
@@ -120,6 +120,688 @@
                 if _kd_is_leaf(node) == 1 and node.grid != -1:
                     yield node
 
+    def add_pygrid(self,
+                       np.float64_t[:] gle,
+                       np.float64_t[:] gre,
+                       int gid,
+                       int rank,
+                       int size):
+
+        """
+        The entire purpose of this function is to move everything from ndarrays
+        to internal C pointers.
+        """
+        cdef np.float64_t[:] pgles = cvarray(format="d", shape=(3,), itemsize=sizeof(np.float64_t))
+        cdef np.float64_t[:] pgres = cvarray(format="d", shape=(3,), itemsize=sizeof(np.float64_t))
+        cdef int j
+        for j in range(3):
+            pgles[j] = gle[j]
+            pgres[j] = gre[j]
+
+        add_grid(self, pgles, pgres, gid, rank, size)
+
+        @cython.boundscheck(False)
+        @cython.wraparound(False)
+        @cython.cdivision(True)
+        cdef add_grid(self,
+                           np.float64_t[:] gle,
+                           np.float64_t[:] gre,
+                           int gid,
+                           int rank,
+                           int size):
+
+            if not should_i_build(self, rank, size):
+                return
+
+            if _kd_is_leaf(self) == 1:
+                insert_grid(self, gle, gre, gid, rank, size)
+            else:
+                less_id = gle[self.split.dim] < self.split.pos
+                if less_id:
+                    add_grid(self.left, gle, gre,
+                             gid, rank, size)
+
+                greater_id = gre[self.split.dim] > self.split.pos
+                if greater_id:
+                    add_grid(self.right, gle, gre,
+                             gid, rank, size)
+            return
+
+
+
+        @cython.boundscheck(False)
+        @cython.wraparound(False)
+        @cython.cdivision(True)
+        cdef insert_grid(self,
+                        np.float64_t[:] gle,
+                        np.float64_t[:] gre,
+                        int grid_id,
+                        int rank,
+                        int size):
+            if not should_i_build(self, rank, size):
+                return
+
+            # If we should continue to split based on parallelism, do so!
+            if should_i_split(self, rank, size):
+                geo_split(self, gle, gre, grid_id, rank, size)
+                return
+
+            cdef int contained = 1
+            for i in range(3):
+                if gle[i] > self.left_edge[i] or\
+                   gre[i] < self.right_edge[i]:
+                    contained *= 0
+
+            if contained == 1:
+                self.grid = grid_id
+                assert(self.grid != -1)
+                return
+
+            # Split the grid
+            cdef int check = split_grid(self, gle, gre, grid_id, rank, size)
+            # If check is -1, then we have found a place where there are no choices.
+            # Exit out and set the node to None.
+            if check == -1:
+                self.grid = -1
+            return
+
+        @cython.boundscheck(False)
+        @cython.wraparound(False)
+        @cython.cdivision(True)
+        cpdef add_grids(self,
+                            int ngrids,
+                            np.float64_t[:,:] gles,
+                            np.float64_t[:,:] gres,
+                            np.int64_t[:] gids,
+                            int rank,
+                            int size):
+            cdef int i, j, nless, ngreater, index
+            cdef np.float64_t[:,:] less_gles, less_gres, greater_gles, greater_gres
+            cdef np.int64_t[:] l_ids, g_ids
+            if not should_i_build(self, rank, size):
+                return
+
+            if _kd_is_leaf(self) == 1:
+                insert_grids(self, ngrids, gles, gres, gids, rank, size)
+                return
+
+            less_ids = cvarray(format="q", shape=(ngrids,), itemsize=sizeof(np.int64_t))
+            greater_ids = cvarray(format="q", shape=(ngrids,), itemsize=sizeof(np.int64_t))
+
+            nless = 0
+            ngreater = 0
+            for i in range(ngrids):
+                if gles[i, self.split.dim] < self.split.pos:
+                    less_ids[nless] = i
+                    nless += 1
+
+                if gres[i, self.split.dim] > self.split.pos:
+                    greater_ids[ngreater] = i
+                    ngreater += 1
+
+            #print 'nless: %i' % nless
+            #print 'ngreater: %i' % ngreater
+
+            if nless > 0:
+                less_gles = cvarray(format="d", shape=(nless,3), itemsize=sizeof(np.float64_t))
+                less_gres = cvarray(format="d", shape=(nless,3), itemsize=sizeof(np.float64_t))
+                l_ids = cvarray(format="q", shape=(nless,), itemsize=sizeof(np.int64_t))
+
+                for i in range(nless):
+                    index = less_ids[i]
+                    l_ids[i] = gids[index]
+                    for j in range(3):
+                        less_gles[i,j] = gles[index,j]
+                        less_gres[i,j] = gres[index,j]
+
+                add_grids(self.left, nless, less_gles, less_gres,
+                          l_ids, rank, size)
+
+            if ngreater > 0:
+                greater_gles = cvarray(format="d", shape=(ngreater,3), itemsize=sizeof(np.float64_t))
+                greater_gres = cvarray(format="d", shape=(ngreater,3), itemsize=sizeof(np.float64_t))
+                g_ids = cvarray(format="q", shape=(ngreater,), itemsize=sizeof(np.int64_t))
+
+                for i in range(ngreater):
+                    index = greater_ids[i]
+                    g_ids[i] = gids[index]
+                    for j in range(3):
+                        greater_gles[i,j] = gles[index,j]
+                        greater_gres[i,j] = gres[index,j]
+
+                add_grids(self.right, ngreater, greater_gles, greater_gres,
+                          g_ids, rank, size)
+
+            return
+
+        @cython.boundscheck(False)
+        @cython.wraparound(False)
+        @cython.cdivision(True)
+        cdef int should_i_split(self, int rank, int size):
+            if self.node_id < size and self.node_id > 0:
+                return 1
+            return 0
+
+        @cython.boundscheck(False)
+        @cython.wraparound(False)
+        @cython.cdivision(True)
+        cdef void insert_grids(self,
+                               int ngrids,
+                               np.float64_t[:,:] gles,
+                               np.float64_t[:,:] gres,
+                               np.int64_t[:] gids,
+                               int rank,
+                               int size):
+
+            if not should_i_build(self, rank, size) or ngrids == 0:
+                return
+            cdef int contained = 1
+            cdef int check
+
+            if ngrids == 1:
+                # If we should continue to split based on parallelism, do so!
+                if should_i_split(self, rank, size):
+                    geo_split(self, gles[0,:], gres[0,:], gids[0], rank, size)
+                    return
+
+                for i in range(3):
+                    contained *= gles[0,i] <= self.left_edge[i]
+                    contained *= gres[0,i] >= self.right_edge[i]
+
+                if contained == 1:
+                    # print 'Node fully contained, setting to grid: %i' % gids[0]
+                    self.grid = gids[0]
+                    assert(self.grid != -1)
+                    return
+
+            # Split the grids
+            check = split_grids(self, ngrids, gles, gres, gids, rank, size)
+            # If check is -1, then we have found a place where there are no choices.
+            # Exit out and set the node to None.
+            if check == -1:
+                self.grid = -1
+            return
+
+        @cython.boundscheck(False)
+        @cython.wraparound(False)
+        @cython.cdivision(True)
+        cdef split_grid(self,
+                       np.float64_t[:] gle,
+                       np.float64_t[:] gre,
+                       int gid,
+                       int rank,
+                       int size):
+
+            cdef int j
+            cdef np.uint8_t[:] less_ids, greater_ids
+            data = cvarray(format="d", shape=(1,2,3), itemsize=sizeof(np.float64_t))
+            for j in range(3):
+                data[0,0,j] = gle[j]
+                data[0,1,j] = gre[j]
+
+            less_ids = cvarray(format="B", shape=(1,), itemsize=sizeof(np.uint8_t))
+            greater_ids = cvarray(format="B", shape=(1,), itemsize=sizeof(np.uint8_t))
+
+            best_dim, split_pos, nless, ngreater = \
+                kdtree_get_choices(1, data, self.left_edge, self.right_edge,
+                                  less_ids, greater_ids)
+
+            # If best_dim is -1, then we have found a place where there are no choices.
+            # Exit out and set the node to None.
+            if best_dim == -1:
+                return -1
+
+
+            split = <Split *> malloc(sizeof(Split))
+            split.dim = best_dim
+            split.pos = split_pos
+
+            # Create a Split
+            divide(self, split)
+
+            # Populate Left Node
+            #print 'Inserting left node', self.left_edge, self.right_edge
+            if nless == 1:
+                insert_grid(self.left, gle, gre,
+                             gid, rank, size)
+
+            # Populate Right Node
+            #print 'Inserting right node', self.left_edge, self.right_edge
+            if ngreater == 1:
+                insert_grid(self.right, gle, gre,
+                             gid, rank, size)
+
+            return 0
+
+        @cython.boundscheck(False)
+        @cython.wraparound(False)
+        @cython.cdivision(True)
+        cdef kdtree_get_choices(int n_grids,
+                                np.float64_t[:,:,:] data,
+                                np.float64_t[:] l_corner,
+                                np.float64_t[:] r_corner,
+                                np.uint8_t[:] less_ids,
+                                np.uint8_t[:] greater_ids,
+                               ):
+            cdef int i, j, k, dim, n_unique, best_dim, my_split
+            cdef np.float64_t split
+            cdef np.float64_t[:,:] uniquedims
+            cdef np.float64_t[:] uniques
+            uniquedims = cvarray(format="d", shape=(3, 2*n_grids), itemsize=sizeof(np.float64_t))
+            my_max = 0
+            my_split = 0
+            best_dim = -1
+            for dim in range(3):
+                n_unique = 0
+                uniques = uniquedims[dim]
+                for i in range(n_grids):
+                    # Check for disqualification
+                    for j in range(2):
+                        # print "Checking against", i,j,dim,data[i,j,dim]
+                        if not (l_corner[dim] < data[i][j][dim] and
+                                data[i][j][dim] < r_corner[dim]):
+                            # print "Skipping ", data[i,j,dim], l_corner[dim], r_corner[dim]
+                            continue
+                        skipit = 0
+                        # Add our left ...
+                        for k in range(n_unique):
+                            if uniques[k] == data[i][j][dim]:
+                                skipit = 1
+                                # print "Identified", uniques[k], data[i,j,dim], n_unique
+                                break
+                        if skipit == 0:
+                            uniques[n_unique] = data[i][j][dim]
+                            n_unique += 1
+                if n_unique > my_max:
+                    best_dim = dim
+                    my_max = n_unique
+                    my_split = (n_unique-1)/2
+            if best_dim == -1:
+                return -1, 0, 0, 0
+            # I recognize how lame this is.
+            cdef np.ndarray[np.float64_t, ndim=1] tarr = np.empty(my_max, dtype='float64')
+            for i in range(my_max):
+                # print "Setting tarr: ", i, uniquedims[best_dim][i]
+                tarr[i] = uniquedims[best_dim][i]
+            tarr.sort()
+            split = tarr[my_split]
+            cdef int nless=0, ngreater=0
+            for i in range(n_grids):
+                if data[i][0][best_dim] < split:
+                    less_ids[i] = 1
+                    nless += 1
+                else:
+                    less_ids[i] = 0
+                if data[i][1][best_dim] > split:
+                    greater_ids[i] = 1
+                    ngreater += 1
+                else:
+                    greater_ids[i] = 0
+
+            # Return out unique values
+            return best_dim, split, nless, ngreater
+
+        #@cython.boundscheck(False)
+        #@cython.wraparound(False)
+        #@cython.cdivision(True)
+        cdef int split_grids(self,
+                               int ngrids,
+                               np.float64_t[:,:] gles,
+                               np.float64_t[:,:] gres,
+                               np.int64_t[:] gids,
+                               int rank,
+                               int size):
+            # Find a Split
+            cdef int i, j, index
+            cdef np.float64_t[:,:] less_gles, less_gres, greater_gles, greater_gres
+            cdef np.int64_t[:] l_ids, g_ids
+            if ngrids == 0: return 0
+
+            data = cvarray(format="d", shape=(ngrids,2,3), itemsize=sizeof(np.float64_t))
+
+            for i in range(ngrids):
+                for j in range(3):
+                    data[i,0,j] = gles[i,j]
+                    data[i,1,j] = gres[i,j]
+
+            less_ids = cvarray(format="B", shape=(ngrids,), itemsize=sizeof(np.uint8_t))
+            greater_ids = cvarray(format="B", shape=(ngrids,), itemsize=sizeof(np.uint8_t))
+
+            best_dim, split_pos, nless, ngreater = \
+                kdtree_get_choices(ngrids, data, self.left_edge, self.right_edge,
+                                  less_ids, greater_ids)
+
+
+            # If best_dim is -1, then we have found a place where there are no choices.
+            # Exit out and set the node to None.
+            if best_dim == -1:
+                print 'Failed to split grids.'
+                return -1
+
+            split = <Split *> malloc(sizeof(Split))
+            split.dim = best_dim
+            split.pos = split_pos
+
+            # Create a Split
+            divide(self, split)
+
+            less_index = cvarray(format="q", shape=(ngrids,), itemsize=sizeof(np.int64_t))
+            greater_index = cvarray(format="q", shape=(ngrids,), itemsize=sizeof(np.int64_t))
+
+            nless = 0
+            ngreater = 0
+            for i in range(ngrids):
+                if less_ids[i] == 1:
+                    less_index[nless] = i
+                    nless += 1
+
+                if greater_ids[i] == 1:
+                    greater_index[ngreater] = i
+                    ngreater += 1
+
+            if nless > 0:
+                less_gles = cvarray(format="d", shape=(nless,3), itemsize=sizeof(np.float64_t))
+                less_gres = cvarray(format="d", shape=(nless,3), itemsize=sizeof(np.float64_t))
+                l_ids = cvarray(format="q", shape=(nless,), itemsize=sizeof(np.int64_t))
+
+                for i in range(nless):
+                    index = less_index[i]
+                    l_ids[i] = gids[index]
+                    for j in range(3):
+                        less_gles[i,j] = gles[index,j]
+                        less_gres[i,j] = gres[index,j]
+
+                # Populate Left Node
+                #print 'Inserting left node', self.left_edge, self.right_edge
+                insert_grids(self.left, nless, less_gles, less_gres,
+                             l_ids, rank, size)
+
+            if ngreater > 0:
+                greater_gles = cvarray(format="d", shape=(ngreater,3), itemsize=sizeof(np.float64_t))
+                greater_gres = cvarray(format="d", shape=(ngreater,3), itemsize=sizeof(np.float64_t))
+                g_ids = cvarray(format="q", shape=(ngreater,), itemsize=sizeof(np.int64_t))
+
+                for i in range(ngreater):
+                    index = greater_index[i]
+                    g_ids[i] = gids[index]
+                    for j in range(3):
+                        greater_gles[i,j] = gles[index,j]
+                        greater_gres[i,j] = gres[index,j]
+
+                # Populate Right Node
+                #print 'Inserting right node', self.left_edge, self.right_edge
+                insert_grids(self.right, ngreater, greater_gles, greater_gres,
+                             g_ids, rank, size)
+
+            return 0
+
+        cdef geo_split(self,
+                       np.float64_t[:] gle,
+                       np.float64_t[:] gre,
+                       int grid_id,
+                       int rank,
+                       int size):
+            cdef int big_dim = 0
+            cdef int i
+            cdef np.float64_t v, my_max = 0.0
+
+            for i in range(3):
+                v = gre[i] - gle[i]
+                if v > my_max:
+                    my_max = v
+                    big_dim = i
+
+            new_pos = (gre[big_dim] + gle[big_dim])/2.
+
+            lnew_gle = cvarray(format="d", shape=(3,), itemsize=sizeof(np.float64_t))
+            lnew_gre = cvarray(format="d", shape=(3,), itemsize=sizeof(np.float64_t))
+            rnew_gle = cvarray(format="d", shape=(3,), itemsize=sizeof(np.float64_t))
+            rnew_gre = cvarray(format="d", shape=(3,), itemsize=sizeof(np.float64_t))
+
+            for j in range(3):
+                lnew_gle[j] = gle[j]
+                lnew_gre[j] = gre[j]
+                rnew_gle[j] = gle[j]
+                rnew_gre[j] = gre[j]
+
+            split = <Split *> malloc(sizeof(Split))
+            split.dim = big_dim
+            split.pos = new_pos
+
+            # Create a Split
+            divide(self, split)
+
+            #lnew_gre[big_dim] = new_pos
+            # Populate Left Node
+            #print 'Inserting left node', self.left_edge, self.right_edge
+            insert_grid(self.left, lnew_gle, lnew_gre,
+                    grid_id, rank, size)
+
+            #rnew_gle[big_dim] = new_pos
+            # Populate Right Node
+            #print 'Inserting right node', self.left_edge, self.right_edge
+            insert_grid(self.right, rnew_gle, rnew_gre,
+                    grid_id, rank, size)
+            return
+
+        cdef void divide(self, Split * split):
+            # Create a Split
+            self.split = split
+
+            cdef np.float64_t[:] le = np.empty(3, dtype='float64')
+            cdef np.float64_t[:] re = np.empty(3, dtype='float64')
+
+            cdef int i
+            for i in range(3):
+                le[i] = self.left_edge[i]
+                re[i] = self.right_edge[i]
+            re[split.dim] = split.pos
+
+            self.left = Node(self, None, None,
+                             le, re, self.grid,
+                             _lchild_id(self.node_id))
+
+            re[split.dim] = self.right_edge[split.dim]
+            le[split.dim] = split.pos
+            self.right = Node(self, None, None,
+                              le, re, self.grid,
+                              _rchild_id(self.node_id))
+
+            return
+        #
+        def kd_sum_volume(self):
+            cdef np.float64_t vol = 1.0
+            if (self.left is None) and (self.right is None):
+                if self.grid == -1:
+                    return 0.0
+                for i in range(3):
+                    vol *= self.right_edge[i] - self.left_edge[i]
+                return vol
+            else:
+                return kd_sum_volume(self.left) + kd_sum_volume(self.right)
+
+        def kd_node_check(self):
+            assert (self.left is None) == (self.right is None)
+            if (self.left is None) and (self.right is None):
+                if self.grid != -1:
+                    return np.prod(self.right_edge - self.left_edge)
+                else: return 0.0
+            else:
+                return kd_node_check(self.left)+kd_node_check(self.right)
+
+        def kd_is_leaf(self):
+            cdef int has_l_child = self.left == None
+            cdef int has_r_child = self.right == None
+            assert has_l_child == has_r_child
+            return has_l_child
+
+        cdef int _kd_is_leaf(self):
+            if self.left is None or self.right is None:
+                return 1
+            return 0
+
+        def step_depth(Node current, Node previous):
+            '''
+            Takes a single step in the depth-first traversal
+            '''
+            if _kd_is_leaf(current) == 1: # At a leaf, move back up
+                previous = current
+                current = current.parent
+
+            elif current.parent is previous: # Moving down, go left first
+                previous = current
+                if current.left is not None:
+                    current = current.left
+                elif current.right is not None:
+                    current = current.right
+                else:
+                    current = current.parent
+
+            elif current.left is previous: # Moving up from left, go right
+                previous = current
+                if current.right is not None:
+                    current = current.right
+                else:
+                    current = current.parent
+
+            elif current.right is previous: # Moving up from right child, move up
+                previous = current
+                current = current.parent
+
+            return current, previous
+
+        def depth_traverse(Node trunk, max_node=None):
+            '''
+            Yields a depth-first traversal of the kd tree always going to
+            the left child before the right.
+            '''
+            current = trunk
+            previous = None
+            if max_node is None:
+                max_node = np.inf
+            while current is not None:
+                yield current
+                current, previous = step_depth(current, previous)
+                if current is None: break
+                if current.node_id >= max_node:
+                    current = current.parent
+                    previous = current.right
+
+        def depth_first_touch(Node tree, max_node=None):
+            '''
+            Yields a depth-first traversal of the kd tree always going to
+            the left child before the right.
+            '''
+            current = tree
+            previous = None
+            if max_node is None:
+                max_node = np.inf
+            while current is not None:
+                if previous is None or previous.parent != current:
+                    yield current
+                current, previous = step_depth(current, previous)
+                if current is None: break
+                if current.node_id >= max_node:
+                    current = current.parent
+                    previous = current.right
+
+        def breadth_traverse(Node tree):
+            '''
+            Yields a breadth-first traversal of the kd tree always going to
+            the left child before the right.
+            '''
+            current = tree
+            previous = None
+            while current is not None:
+                yield current
+                current, previous = step_depth(current, previous)
+
+
+        def viewpoint_traverse(Node tree, viewpoint):
+            '''
+            Yields a viewpoint dependent traversal of the kd-tree.  Starts
+            with nodes furthest away from viewpoint.
+            '''
+
+            current = tree
+            previous = None
+            while current is not None:
+                yield current
+                current, previous = step_viewpoint(current, previous, viewpoint)
+
+        def step_viewpoint(Node current,
+                           Node previous,
+                           viewpoint):
+            '''
+            Takes a single step in the viewpoint based traversal.  Always
+            goes to the node furthest away from viewpoint first.
+            '''
+            if _kd_is_leaf(current) == 1: # At a leaf, move back up
+                previous = current
+                current = current.parent
+            elif current.split.dim is None: # This is a dead node
+                previous = current
+                current = current.parent
+
+            elif current.parent is previous: # Moving down
+                previous = current
+                if viewpoint[current.split.dim] <= current.split.pos:
+                    if current.right is not None:
+                        current = current.right
+                    else:
+                        previous = current.right
+                else:
+                    if current.left is not None:
+                        current = current.left
+                    else:
+                        previous = current.left
+
+            elif current.right is previous: # Moving up from right
+                previous = current
+                if viewpoint[current.split.dim] <= current.split.pos:
+                    if current.left is not None:
+                        current = current.left
+                    else:
+                        current = current.parent
+                else:
+                    current = current.parent
+
+            elif current.left is previous: # Moving up from left child
+                previous = current
+                if viewpoint[current.split.dim] > current.split.pos:
+                    if current.right is not None:
+                        current = current.right
+                    else:
+                        current = current.parent
+                else:
+                    current = current.parent
+
+            return current, previous
+
+        cdef int point_in_node(self,
+                               np.float64_t[:] point):
+            cdef int i
+            cdef int inside = 1
+            for i in range(3):
+                inside *= self.left_edge[i] <= point[i]
+                inside *= self.right_edge[i] > point[i]
+            return inside
+
+        cdef Node _find_node(self, np.float64_t[:] point):
+            while _kd_is_leaf(self) == 0:
+                if point[self.split.dim] < self.split.pos:
+                    self = self.left
+                else:
+                    self = self.right
+            return self
+
+        def find_node(self,
+                      np.float64_t[:] point):
+            """
+            Find the AMRKDTree node enclosing a position
+            """
+            assert(point_in_node(self, point))
+            return _find_node(self, point)
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
@@ -142,693 +824,10 @@
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
-cdef int should_i_build(Node node, int rank, int size):
-    if (node.node_id < size) or (node.node_id >= 2*size):
+cdef int should_i_build(self, int rank, int size):
+    if (self.node_id < size) or (self.node_id >= 2*size):
         return 1
-    elif node.node_id - size == rank:
+    elif self.node_id - size == rank:
         return 1
     else:
         return 0
-
-
-
- at cython.boundscheck(False)
- at cython.wraparound(False)
- at cython.cdivision(True)
-cdef add_grid(Node node,
-                   np.float64_t[:] gle,
-                   np.float64_t[:] gre,
-                   int gid,
-                   int rank,
-                   int size):
-
-    if not should_i_build(node, rank, size):
-        return
-
-    if _kd_is_leaf(node) == 1:
-        insert_grid(node, gle, gre, gid, rank, size)
-    else:
-        less_id = gle[node.split.dim] < node.split.pos
-        if less_id:
-            add_grid(node.left, gle, gre,
-                     gid, rank, size)
-
-        greater_id = gre[node.split.dim] > node.split.pos
-        if greater_id:
-            add_grid(node.right, gle, gre,
-                     gid, rank, size)
-    return
-
-def add_pygrid(Node node,
-                   np.float64_t[:] gle,
-                   np.float64_t[:] gre,
-                   int gid,
-                   int rank,
-                   int size):
-
-    """
-    The entire purpose of this function is to move everything from ndarrays
-    to internal C pointers.
-    """
-    cdef np.float64_t[:] pgles = cvarray(format="d", shape=(3,), itemsize=sizeof(np.float64_t))
-    cdef np.float64_t[:] pgres = cvarray(format="d", shape=(3,), itemsize=sizeof(np.float64_t))
-    cdef int j
-    for j in range(3):
-        pgles[j] = gle[j]
-        pgres[j] = gre[j]
-
-    add_grid(node, pgles, pgres, gid, rank, size)
-
- at cython.boundscheck(False)
- at cython.wraparound(False)
- at cython.cdivision(True)
-cdef insert_grid(Node node,
-                np.float64_t[:] gle,
-                np.float64_t[:] gre,
-                int grid_id,
-                int rank,
-                int size):
-    if not should_i_build(node, rank, size):
-        return
-
-    # If we should continue to split based on parallelism, do so!
-    if should_i_split(node, rank, size):
-        geo_split(node, gle, gre, grid_id, rank, size)
-        return
-
-    cdef int contained = 1
-    for i in range(3):
-        if gle[i] > node.left_edge[i] or\
-           gre[i] < node.right_edge[i]:
-            contained *= 0
-
-    if contained == 1:
-        node.grid = grid_id
-        assert(node.grid != -1)
-        return
-
-    # Split the grid
-    cdef int check = split_grid(node, gle, gre, grid_id, rank, size)
-    # If check is -1, then we have found a place where there are no choices.
-    # Exit out and set the node to None.
-    if check == -1:
-        node.grid = -1
-    return
-
- at cython.boundscheck(False)
- at cython.wraparound(False)
- at cython.cdivision(True)
-cpdef add_grids(Node node,
-                    int ngrids,
-                    np.float64_t[:,:] gles,
-                    np.float64_t[:,:] gres,
-                    np.int64_t[:] gids,
-                    int rank,
-                    int size):
-    cdef int i, j, nless, ngreater, index
-    cdef np.float64_t[:,:] less_gles, less_gres, greater_gles, greater_gres
-    cdef np.int64_t[:] l_ids, g_ids
-    if not should_i_build(node, rank, size):
-        return
-
-    if _kd_is_leaf(node) == 1:
-        insert_grids(node, ngrids, gles, gres, gids, rank, size)
-        return
-
-    less_ids = cvarray(format="q", shape=(ngrids,), itemsize=sizeof(np.int64_t))
-    greater_ids = cvarray(format="q", shape=(ngrids,), itemsize=sizeof(np.int64_t))
-
-    nless = 0
-    ngreater = 0
-    for i in range(ngrids):
-        if gles[i, node.split.dim] < node.split.pos:
-            less_ids[nless] = i
-            nless += 1
-
-        if gres[i, node.split.dim] > node.split.pos:
-            greater_ids[ngreater] = i
-            ngreater += 1
-
-    #print 'nless: %i' % nless
-    #print 'ngreater: %i' % ngreater
-
-    if nless > 0:
-        less_gles = cvarray(format="d", shape=(nless,3), itemsize=sizeof(np.float64_t))
-        less_gres = cvarray(format="d", shape=(nless,3), itemsize=sizeof(np.float64_t))
-        l_ids = cvarray(format="q", shape=(nless,), itemsize=sizeof(np.int64_t))
-
-        for i in range(nless):
-            index = less_ids[i]
-            l_ids[i] = gids[index]
-            for j in range(3):
-                less_gles[i,j] = gles[index,j]
-                less_gres[i,j] = gres[index,j]
-
-        add_grids(node.left, nless, less_gles, less_gres,
-                  l_ids, rank, size)
-
-    if ngreater > 0:
-        greater_gles = cvarray(format="d", shape=(ngreater,3), itemsize=sizeof(np.float64_t))
-        greater_gres = cvarray(format="d", shape=(ngreater,3), itemsize=sizeof(np.float64_t))
-        g_ids = cvarray(format="q", shape=(ngreater,), itemsize=sizeof(np.int64_t))
-
-        for i in range(ngreater):
-            index = greater_ids[i]
-            g_ids[i] = gids[index]
-            for j in range(3):
-                greater_gles[i,j] = gles[index,j]
-                greater_gres[i,j] = gres[index,j]
-
-        add_grids(node.right, ngreater, greater_gles, greater_gres,
-                  g_ids, rank, size)
-
-    return
-
- at cython.boundscheck(False)
- at cython.wraparound(False)
- at cython.cdivision(True)
-cdef int should_i_split(Node node, int rank, int size):
-    if node.node_id < size and node.node_id > 0:
-        return 1
-    return 0
-
- at cython.boundscheck(False)
- at cython.wraparound(False)
- at cython.cdivision(True)
-cdef void insert_grids(Node node,
-                       int ngrids,
-                       np.float64_t[:,:] gles,
-                       np.float64_t[:,:] gres,
-                       np.int64_t[:] gids,
-                       int rank,
-                       int size):
-
-    if not should_i_build(node, rank, size) or ngrids == 0:
-        return
-    cdef int contained = 1
-    cdef int check
-
-    if ngrids == 1:
-        # If we should continue to split based on parallelism, do so!
-        if should_i_split(node, rank, size):
-            geo_split(node, gles[0,:], gres[0,:], gids[0], rank, size)
-            return
-
-        for i in range(3):
-            contained *= gles[0,i] <= node.left_edge[i]
-            contained *= gres[0,i] >= node.right_edge[i]
-
-        if contained == 1:
-            # print 'Node fully contained, setting to grid: %i' % gids[0]
-            node.grid = gids[0]
-            assert(node.grid != -1)
-            return
-
-    # Split the grids
-    check = split_grids(node, ngrids, gles, gres, gids, rank, size)
-    # If check is -1, then we have found a place where there are no choices.
-    # Exit out and set the node to None.
-    if check == -1:
-        node.grid = -1
-    return
-
- at cython.boundscheck(False)
- at cython.wraparound(False)
- at cython.cdivision(True)
-cdef split_grid(Node node,
-               np.float64_t[:] gle,
-               np.float64_t[:] gre,
-               int gid,
-               int rank,
-               int size):
-
-    cdef int j
-    cdef np.uint8_t[:] less_ids, greater_ids
-    data = cvarray(format="d", shape=(1,2,3), itemsize=sizeof(np.float64_t))
-    for j in range(3):
-        data[0,0,j] = gle[j]
-        data[0,1,j] = gre[j]
-
-    less_ids = cvarray(format="B", shape=(1,), itemsize=sizeof(np.uint8_t))
-    greater_ids = cvarray(format="B", shape=(1,), itemsize=sizeof(np.uint8_t))
-
-    best_dim, split_pos, nless, ngreater = \
-        kdtree_get_choices(1, data, node.left_edge, node.right_edge,
-                          less_ids, greater_ids)
-
-    # If best_dim is -1, then we have found a place where there are no choices.
-    # Exit out and set the node to None.
-    if best_dim == -1:
-        return -1
-
-
-    split = <Split *> malloc(sizeof(Split))
-    split.dim = best_dim
-    split.pos = split_pos
-
-    # Create a Split
-    divide(node, split)
-
-    # Populate Left Node
-    #print 'Inserting left node', node.left_edge, node.right_edge
-    if nless == 1:
-        insert_grid(node.left, gle, gre,
-                     gid, rank, size)
-
-    # Populate Right Node
-    #print 'Inserting right node', node.left_edge, node.right_edge
-    if ngreater == 1:
-        insert_grid(node.right, gle, gre,
-                     gid, rank, size)
-
-    return 0
-
- at cython.boundscheck(False)
- at cython.wraparound(False)
- at cython.cdivision(True)
-cdef kdtree_get_choices(int n_grids,
-                        np.float64_t[:,:,:] data,
-                        np.float64_t[:] l_corner,
-                        np.float64_t[:] r_corner,
-                        np.uint8_t[:] less_ids,
-                        np.uint8_t[:] greater_ids,
-                       ):
-    cdef int i, j, k, dim, n_unique, best_dim, my_split
-    cdef np.float64_t split
-    cdef np.float64_t[:,:] uniquedims
-    cdef np.float64_t[:] uniques
-    uniquedims = cvarray(format="d", shape=(3, 2*n_grids), itemsize=sizeof(np.float64_t))
-    my_max = 0
-    my_split = 0
-    best_dim = -1
-    for dim in range(3):
-        n_unique = 0
-        uniques = uniquedims[dim]
-        for i in range(n_grids):
-            # Check for disqualification
-            for j in range(2):
-                # print "Checking against", i,j,dim,data[i,j,dim]
-                if not (l_corner[dim] < data[i][j][dim] and
-                        data[i][j][dim] < r_corner[dim]):
-                    # print "Skipping ", data[i,j,dim], l_corner[dim], r_corner[dim]
-                    continue
-                skipit = 0
-                # Add our left ...
-                for k in range(n_unique):
-                    if uniques[k] == data[i][j][dim]:
-                        skipit = 1
-                        # print "Identified", uniques[k], data[i,j,dim], n_unique
-                        break
-                if skipit == 0:
-                    uniques[n_unique] = data[i][j][dim]
-                    n_unique += 1
-        if n_unique > my_max:
-            best_dim = dim
-            my_max = n_unique
-            my_split = (n_unique-1)/2
-    if best_dim == -1:
-        return -1, 0, 0, 0
-    # I recognize how lame this is.
-    cdef np.ndarray[np.float64_t, ndim=1] tarr = np.empty(my_max, dtype='float64')
-    for i in range(my_max):
-        # print "Setting tarr: ", i, uniquedims[best_dim][i]
-        tarr[i] = uniquedims[best_dim][i]
-    tarr.sort()
-    split = tarr[my_split]
-    cdef int nless=0, ngreater=0
-    for i in range(n_grids):
-        if data[i][0][best_dim] < split:
-            less_ids[i] = 1
-            nless += 1
-        else:
-            less_ids[i] = 0
-        if data[i][1][best_dim] > split:
-            greater_ids[i] = 1
-            ngreater += 1
-        else:
-            greater_ids[i] = 0
-
-    # Return out unique values
-    return best_dim, split, nless, ngreater
-
-#@cython.boundscheck(False)
-#@cython.wraparound(False)
-#@cython.cdivision(True)
-cdef int split_grids(Node node,
-                       int ngrids,
-                       np.float64_t[:,:] gles,
-                       np.float64_t[:,:] gres,
-                       np.int64_t[:] gids,
-                       int rank,
-                       int size):
-    # Find a Split
-    cdef int i, j, index
-    cdef np.float64_t[:,:] less_gles, less_gres, greater_gles, greater_gres
-    cdef np.int64_t[:] l_ids, g_ids
-    if ngrids == 0: return 0
-
-    data = cvarray(format="d", shape=(ngrids,2,3), itemsize=sizeof(np.float64_t))
-
-    for i in range(ngrids):
-        for j in range(3):
-            data[i,0,j] = gles[i,j]
-            data[i,1,j] = gres[i,j]
-
-    less_ids = cvarray(format="B", shape=(ngrids,), itemsize=sizeof(np.uint8_t))
-    greater_ids = cvarray(format="B", shape=(ngrids,), itemsize=sizeof(np.uint8_t))
-
-    best_dim, split_pos, nless, ngreater = \
-        kdtree_get_choices(ngrids, data, node.left_edge, node.right_edge,
-                          less_ids, greater_ids)
-
-
-    # If best_dim is -1, then we have found a place where there are no choices.
-    # Exit out and set the node to None.
-    if best_dim == -1:
-        print 'Failed to split grids.'
-        return -1
-
-    split = <Split *> malloc(sizeof(Split))
-    split.dim = best_dim
-    split.pos = split_pos
-
-    # Create a Split
-    divide(node, split)
-
-    less_index = cvarray(format="q", shape=(ngrids,), itemsize=sizeof(np.int64_t))
-    greater_index = cvarray(format="q", shape=(ngrids,), itemsize=sizeof(np.int64_t))
-
-    nless = 0
-    ngreater = 0
-    for i in range(ngrids):
-        if less_ids[i] == 1:
-            less_index[nless] = i
-            nless += 1
-
-        if greater_ids[i] == 1:
-            greater_index[ngreater] = i
-            ngreater += 1
-
-    if nless > 0:
-        less_gles = cvarray(format="d", shape=(nless,3), itemsize=sizeof(np.float64_t))
-        less_gres = cvarray(format="d", shape=(nless,3), itemsize=sizeof(np.float64_t))
-        l_ids = cvarray(format="q", shape=(nless,), itemsize=sizeof(np.int64_t))
-
-        for i in range(nless):
-            index = less_index[i]
-            l_ids[i] = gids[index]
-            for j in range(3):
-                less_gles[i,j] = gles[index,j]
-                less_gres[i,j] = gres[index,j]
-
-        # Populate Left Node
-        #print 'Inserting left node', node.left_edge, node.right_edge
-        insert_grids(node.left, nless, less_gles, less_gres,
-                     l_ids, rank, size)
-
-    if ngreater > 0:
-        greater_gles = cvarray(format="d", shape=(ngreater,3), itemsize=sizeof(np.float64_t))
-        greater_gres = cvarray(format="d", shape=(ngreater,3), itemsize=sizeof(np.float64_t))
-        g_ids = cvarray(format="q", shape=(ngreater,), itemsize=sizeof(np.int64_t))
-
-        for i in range(ngreater):
-            index = greater_index[i]
-            g_ids[i] = gids[index]
-            for j in range(3):
-                greater_gles[i,j] = gles[index,j]
-                greater_gres[i,j] = gres[index,j]
-
-        # Populate Right Node
-        #print 'Inserting right node', node.left_edge, node.right_edge
-        insert_grids(node.right, ngreater, greater_gles, greater_gres,
-                     g_ids, rank, size)
-
-    return 0
-
-cdef geo_split(Node node,
-               np.float64_t[:] gle,
-               np.float64_t[:] gre,
-               int grid_id,
-               int rank,
-               int size):
-    cdef int big_dim = 0
-    cdef int i
-    cdef np.float64_t v, my_max = 0.0
-
-    for i in range(3):
-        v = gre[i] - gle[i]
-        if v > my_max:
-            my_max = v
-            big_dim = i
-
-    new_pos = (gre[big_dim] + gle[big_dim])/2.
-
-    lnew_gle = cvarray(format="d", shape=(3,), itemsize=sizeof(np.float64_t))
-    lnew_gre = cvarray(format="d", shape=(3,), itemsize=sizeof(np.float64_t))
-    rnew_gle = cvarray(format="d", shape=(3,), itemsize=sizeof(np.float64_t))
-    rnew_gre = cvarray(format="d", shape=(3,), itemsize=sizeof(np.float64_t))
-
-    for j in range(3):
-        lnew_gle[j] = gle[j]
-        lnew_gre[j] = gre[j]
-        rnew_gle[j] = gle[j]
-        rnew_gre[j] = gre[j]
-
-    split = <Split *> malloc(sizeof(Split))
-    split.dim = big_dim
-    split.pos = new_pos
-
-    # Create a Split
-    divide(node, split)
-
-    #lnew_gre[big_dim] = new_pos
-    # Populate Left Node
-    #print 'Inserting left node', node.left_edge, node.right_edge
-    insert_grid(node.left, lnew_gle, lnew_gre,
-            grid_id, rank, size)
-
-    #rnew_gle[big_dim] = new_pos
-    # Populate Right Node
-    #print 'Inserting right node', node.left_edge, node.right_edge
-    insert_grid(node.right, rnew_gle, rnew_gre,
-            grid_id, rank, size)
-    return
-
-cdef void divide(Node node, Split * split):
-    # Create a Split
-    node.split = split
-
-    cdef np.float64_t[:] le = np.empty(3, dtype='float64')
-    cdef np.float64_t[:] re = np.empty(3, dtype='float64')
-
-    cdef int i
-    for i in range(3):
-        le[i] = node.left_edge[i]
-        re[i] = node.right_edge[i]
-    re[split.dim] = split.pos
-
-    node.left = Node(node, None, None,
-                     le, re, node.grid,
-                     _lchild_id(node.node_id))
-
-    re[split.dim] = node.right_edge[split.dim]
-    le[split.dim] = split.pos
-    node.right = Node(node, None, None,
-                      le, re, node.grid,
-                      _rchild_id(node.node_id))
-
-    return
-#
-def kd_sum_volume(Node node):
-    cdef np.float64_t vol = 1.0
-    if (node.left is None) and (node.right is None):
-        if node.grid == -1:
-            return 0.0
-        for i in range(3):
-            vol *= node.right_edge[i] - node.left_edge[i]
-        return vol
-    else:
-        return kd_sum_volume(node.left) + kd_sum_volume(node.right)
-
-def kd_node_check(Node node):
-    assert (node.left is None) == (node.right is None)
-    if (node.left is None) and (node.right is None):
-        if node.grid != -1:
-            return np.prod(node.right_edge - node.left_edge)
-        else: return 0.0
-    else:
-        return kd_node_check(node.left)+kd_node_check(node.right)
-
-def kd_is_leaf(Node node):
-    cdef int has_l_child = node.left == None
-    cdef int has_r_child = node.right == None
-    assert has_l_child == has_r_child
-    return has_l_child
-
-cdef int _kd_is_leaf(Node node):
-    if node.left is None or node.right is None:
-        return 1
-    return 0
-
-def step_depth(Node current, Node previous):
-    '''
-    Takes a single step in the depth-first traversal
-    '''
-    if _kd_is_leaf(current) == 1: # At a leaf, move back up
-        previous = current
-        current = current.parent
-
-    elif current.parent is previous: # Moving down, go left first
-        previous = current
-        if current.left is not None:
-            current = current.left
-        elif current.right is not None:
-            current = current.right
-        else:
-            current = current.parent
-
-    elif current.left is previous: # Moving up from left, go right
-        previous = current
-        if current.right is not None:
-            current = current.right
-        else:
-            current = current.parent
-
-    elif current.right is previous: # Moving up from right child, move up
-        previous = current
-        current = current.parent
-
-    return current, previous
-
-def depth_traverse(Node trunk, max_node=None):
-    '''
-    Yields a depth-first traversal of the kd tree always going to
-    the left child before the right.
-    '''
-    current = trunk
-    previous = None
-    if max_node is None:
-        max_node = np.inf
-    while current is not None:
-        yield current
-        current, previous = step_depth(current, previous)
-        if current is None: break
-        if current.node_id >= max_node:
-            current = current.parent
-            previous = current.right
-
-def depth_first_touch(Node tree, max_node=None):
-    '''
-    Yields a depth-first traversal of the kd tree always going to
-    the left child before the right.
-    '''
-    current = tree
-    previous = None
-    if max_node is None:
-        max_node = np.inf
-    while current is not None:
-        if previous is None or previous.parent != current:
-            yield current
-        current, previous = step_depth(current, previous)
-        if current is None: break
-        if current.node_id >= max_node:
-            current = current.parent
-            previous = current.right
-
-def breadth_traverse(Node tree):
-    '''
-    Yields a breadth-first traversal of the kd tree always going to
-    the left child before the right.
-    '''
-    current = tree
-    previous = None
-    while current is not None:
-        yield current
-        current, previous = step_depth(current, previous)
-
-
-def viewpoint_traverse(Node tree, viewpoint):
-    '''
-    Yields a viewpoint dependent traversal of the kd-tree.  Starts
-    with nodes furthest away from viewpoint.
-    '''
-
-    current = tree
-    previous = None
-    while current is not None:
-        yield current
-        current, previous = step_viewpoint(current, previous, viewpoint)
-
-def step_viewpoint(Node current,
-                   Node previous,
-                   viewpoint):
-    '''
-    Takes a single step in the viewpoint based traversal.  Always
-    goes to the node furthest away from viewpoint first.
-    '''
-    if _kd_is_leaf(current) == 1: # At a leaf, move back up
-        previous = current
-        current = current.parent
-    elif current.split.dim is None: # This is a dead node
-        previous = current
-        current = current.parent
-
-    elif current.parent is previous: # Moving down
-        previous = current
-        if viewpoint[current.split.dim] <= current.split.pos:
-            if current.right is not None:
-                current = current.right
-            else:
-                previous = current.right
-        else:
-            if current.left is not None:
-                current = current.left
-            else:
-                previous = current.left
-
-    elif current.right is previous: # Moving up from right
-        previous = current
-        if viewpoint[current.split.dim] <= current.split.pos:
-            if current.left is not None:
-                current = current.left
-            else:
-                current = current.parent
-        else:
-            current = current.parent
-
-    elif current.left is previous: # Moving up from left child
-        previous = current
-        if viewpoint[current.split.dim] > current.split.pos:
-            if current.right is not None:
-                current = current.right
-            else:
-                current = current.parent
-        else:
-            current = current.parent
-
-    return current, previous
-
-cdef int point_in_node(Node node,
-                       np.float64_t[:] point):
-    cdef int i
-    cdef int inside = 1
-    for i in range(3):
-        inside *= node.left_edge[i] <= point[i]
-        inside *= node.right_edge[i] > point[i]
-    return inside
-
-cdef Node _find_node(Node node, np.float64_t[:] point):
-    while _kd_is_leaf(node) == 0:
-        if point[node.split.dim] < node.split.pos:
-            node = node.left
-        else:
-            node = node.right
-    return node
-
-def find_node(Node node,
-              np.float64_t[:] point):
-    """
-    Find the AMRKDTree node enclosing a position
-    """
-    assert(point_in_node(node, point))
-    return _find_node(node, point)


https://bitbucket.org/yt_analysis/yt/commits/86c3b0df3e4b/
Changeset:   86c3b0df3e4b
Branch:      yt
User:        charlie_watson
Date:        2016-06-13 22:34:02+00:00
Summary:     set_dirty, get_left_edge, get_right_edge, kd_traverse calls all updated
Affected #:  3 files

diff -r 211223c2660aed63b7462ca793a9fd3aff867172 -r 86c3b0df3e4b3ab004b1c3c0fa04d13022b80191 yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -114,8 +114,8 @@
             grid = self.ds.index.grids[node.grid - self._id_offset]
             dds = grid.dds
             gle = grid.LeftEdge
-            nle = self.ds.arr(get_left_edge(node), input_units="code_length")
-            nre = self.ds.arr(get_right_edge(node), input_units="code_length")
+            nle = self.ds.arr(node.get_left_edge(), input_units="code_length")
+            nre = self.ds.arr(node.get_right_edge(), input_units="code_length")
             li = np.rint((nle-gle)/dds).astype('int32')
             ri = np.rint((nre-gle)/dds).astype('int32')
             dims = (ri - li).astype('int32')
@@ -139,8 +139,8 @@
             grid = self.ds.index.grids[node.grid - self._id_offset]
             dds = grid.dds
             gle = grid.LeftEdge
-            nle = self.ds.arr(get_left_edge(node), input_units="code_length")
-            nre = self.ds.arr(get_right_edge(node), input_units="code_length")
+            nle = self.ds.arr(node.get_left_edge(), input_units="code_length")
+            nre = self.ds.arr(node.get_right_edge(), input_units="code_length")
             li = np.rint((nle-gle)/dds).astype('int32')
             ri = np.rint((nre-gle)/dds).astype('int32')
             dims = (ri - li).astype('int32')
@@ -186,7 +186,7 @@
         regenerate_data = self.fields is None or \
                           len(self.fields) != len(new_fields) or \
                           self.fields != new_fields or force
-        set_dirty(self.tree.trunk, regenerate_data)
+        self.tree.trunk.set_dirty(regenerate_data)
         self.fields = new_fields
 
         if self.log_fields is not None and not regenerate_data:
@@ -214,18 +214,18 @@
         self.set_fields(fields, log_fields, no_ghost)
 
     def traverse(self, viewpoint=None):
-        for node in kd_traverse(self.tree.trunk, viewpoint=viewpoint):
+        for node in self.tree.trunk.kd_traverse(viewpoint=viewpoint):
             yield self.get_brick_data(node)
 
     def slice_traverse(self, viewpoint = None):
         if not hasattr(self.ds.index, "grid"):
             raise NotImplementedError
-        for node in kd_traverse(self.tree.trunk, viewpoint=viewpoint):
+        for node in self.tree.trunk.kd_traverse(viewpoint=viewpoint):
             grid = self.ds.index.grids[node.grid - self._id_offset]
             dds = grid.dds
             gle = grid.LeftEdge.in_units("code_length").ndarray_view()
-            nle = get_left_edge(node)
-            nre = get_right_edge(node)
+            nle = node.get_left_edge()
+            nre = node.get_right_edge()
             li = np.rint((nle-gle)/dds).astype('int32')
             ri = np.rint((nre-gle)/dds).astype('int32')
             dims = (ri - li).astype('int32')
@@ -290,8 +290,8 @@
         grid = self.ds.index.grids[node.grid - self._id_offset]
         dds = grid.dds.ndarray_view()
         gle = grid.LeftEdge.ndarray_view()
-        nle = get_left_edge(node)
-        nre = get_right_edge(node)
+        nle = node.get_left_edge()
+        nre = node.get_right_edge()
         li = np.rint((nle-gle)/dds).astype('int32')
         ri = np.rint((nre-gle)/dds).astype('int32')
         dims = (ri - li).astype('int32')

diff -r 211223c2660aed63b7462ca793a9fd3aff867172 -r 86c3b0df3e4b3ab004b1c3c0fa04d13022b80191 yt/utilities/lib/amr_kdtools.pyx
--- a/yt/utilities/lib/amr_kdtools.pyx
+++ b/yt/utilities/lib/amr_kdtools.pyx
@@ -67,12 +67,6 @@
         else:
             return np.nan
 
-    def get_left_edge(self):
-        return get_left_edge(self)
-
-    def get_right_edge(self):
-        return get_right_edge(self)
-
     def set_left_edge(self, np.float64_t[:] left_edge):
         cdef int i
         for i in range(3):

diff -r 211223c2660aed63b7462ca793a9fd3aff867172 -r 86c3b0df3e4b3ab004b1c3c0fa04d13022b80191 yt/utilities/tests/test_amr_kdtree.py
--- a/yt/utilities/tests/test_amr_kdtree.py
+++ b/yt/utilities/tests/test_amr_kdtree.py
@@ -52,8 +52,8 @@
         grid = ds.index.grids[node.grid - kd._id_offset]
         dds = grid.dds
         gle = grid.LeftEdge
-        nle = get_left_edge(node)
-        nre = get_right_edge(node)
+        nle = node.get_left_edge()
+        nre = node.get_right_edge()
         li = np.rint((nle-gle)/dds).astype('int32')
         ri = np.rint((nre-gle)/dds).astype('int32')
         dims = (ri - li).astype('int32')


https://bitbucket.org/yt_analysis/yt/commits/53486916e626/
Changeset:   53486916e626
Branch:      yt
User:        charlie_watson
Date:        2016-06-14 01:13:48+00:00
Summary:     changed method calls add_grid insert_grid add_grids should_i_split
Affected #:  2 files

diff -r 86c3b0df3e4b3ab004b1c3c0fa04d13022b80191 -r 53486916e626c79d275830058c5aee825b3ecdc0 yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -25,15 +25,10 @@
     scatter_image
 from yt.utilities.lib.amr_kdtools import \
     Node, \
-    add_grids, \
     find_node, \
     kd_is_leaf, \
-    set_dirty, \
     depth_traverse, \
     depth_first_touch, \
-    kd_traverse, \
-    get_left_edge, \
-    get_right_edge, \
     kd_sum_volume, \
     kd_node_check
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
@@ -95,7 +90,7 @@
         gles = np.array([g.LeftEdge for g in grids])
         gres = np.array([g.RightEdge for g in grids])
         gids = np.array([g.id for g in grids], dtype="int64")
-        add_grids(self.trunk, gids.size, gles, gres, gids,
+        self.trunk.add_grids(gids.size, gles, gres, gids,
                     self.comm_rank, self.comm_size)
         del gles, gres, gids, grids
 

diff -r 86c3b0df3e4b3ab004b1c3c0fa04d13022b80191 -r 53486916e626c79d275830058c5aee825b3ecdc0 yt/utilities/lib/amr_kdtools.pyx
--- a/yt/utilities/lib/amr_kdtools.pyx
+++ b/yt/utilities/lib/amr_kdtools.pyx
@@ -132,670 +132,670 @@
             pgles[j] = gle[j]
             pgres[j] = gre[j]
 
-        add_grid(self, pgles, pgres, gid, rank, size)
+        self.add_grid(pgles, pgres, gid, rank, size)
 
-        @cython.boundscheck(False)
-        @cython.wraparound(False)
-        @cython.cdivision(True)
-        cdef add_grid(self,
-                           np.float64_t[:] gle,
-                           np.float64_t[:] gre,
-                           int gid,
-                           int rank,
-                           int size):
-
-            if not should_i_build(self, rank, size):
-                return
-
-            if _kd_is_leaf(self) == 1:
-                insert_grid(self, gle, gre, gid, rank, size)
-            else:
-                less_id = gle[self.split.dim] < self.split.pos
-                if less_id:
-                    add_grid(self.left, gle, gre,
-                             gid, rank, size)
-
-                greater_id = gre[self.split.dim] > self.split.pos
-                if greater_id:
-                    add_grid(self.right, gle, gre,
-                             gid, rank, size)
-            return
-
-
-
-        @cython.boundscheck(False)
-        @cython.wraparound(False)
-        @cython.cdivision(True)
-        cdef insert_grid(self,
-                        np.float64_t[:] gle,
-                        np.float64_t[:] gre,
-                        int grid_id,
-                        int rank,
-                        int size):
-            if not should_i_build(self, rank, size):
-                return
-
-            # If we should continue to split based on parallelism, do so!
-            if should_i_split(self, rank, size):
-                geo_split(self, gle, gre, grid_id, rank, size)
-                return
-
-            cdef int contained = 1
-            for i in range(3):
-                if gle[i] > self.left_edge[i] or\
-                   gre[i] < self.right_edge[i]:
-                    contained *= 0
-
-            if contained == 1:
-                self.grid = grid_id
-                assert(self.grid != -1)
-                return
-
-            # Split the grid
-            cdef int check = split_grid(self, gle, gre, grid_id, rank, size)
-            # If check is -1, then we have found a place where there are no choices.
-            # Exit out and set the node to None.
-            if check == -1:
-                self.grid = -1
-            return
-
-        @cython.boundscheck(False)
-        @cython.wraparound(False)
-        @cython.cdivision(True)
-        cpdef add_grids(self,
-                            int ngrids,
-                            np.float64_t[:,:] gles,
-                            np.float64_t[:,:] gres,
-                            np.int64_t[:] gids,
-                            int rank,
-                            int size):
-            cdef int i, j, nless, ngreater, index
-            cdef np.float64_t[:,:] less_gles, less_gres, greater_gles, greater_gres
-            cdef np.int64_t[:] l_ids, g_ids
-            if not should_i_build(self, rank, size):
-                return
-
-            if _kd_is_leaf(self) == 1:
-                insert_grids(self, ngrids, gles, gres, gids, rank, size)
-                return
-
-            less_ids = cvarray(format="q", shape=(ngrids,), itemsize=sizeof(np.int64_t))
-            greater_ids = cvarray(format="q", shape=(ngrids,), itemsize=sizeof(np.int64_t))
-
-            nless = 0
-            ngreater = 0
-            for i in range(ngrids):
-                if gles[i, self.split.dim] < self.split.pos:
-                    less_ids[nless] = i
-                    nless += 1
-
-                if gres[i, self.split.dim] > self.split.pos:
-                    greater_ids[ngreater] = i
-                    ngreater += 1
-
-            #print 'nless: %i' % nless
-            #print 'ngreater: %i' % ngreater
-
-            if nless > 0:
-                less_gles = cvarray(format="d", shape=(nless,3), itemsize=sizeof(np.float64_t))
-                less_gres = cvarray(format="d", shape=(nless,3), itemsize=sizeof(np.float64_t))
-                l_ids = cvarray(format="q", shape=(nless,), itemsize=sizeof(np.int64_t))
-
-                for i in range(nless):
-                    index = less_ids[i]
-                    l_ids[i] = gids[index]
-                    for j in range(3):
-                        less_gles[i,j] = gles[index,j]
-                        less_gres[i,j] = gres[index,j]
-
-                add_grids(self.left, nless, less_gles, less_gres,
-                          l_ids, rank, size)
-
-            if ngreater > 0:
-                greater_gles = cvarray(format="d", shape=(ngreater,3), itemsize=sizeof(np.float64_t))
-                greater_gres = cvarray(format="d", shape=(ngreater,3), itemsize=sizeof(np.float64_t))
-                g_ids = cvarray(format="q", shape=(ngreater,), itemsize=sizeof(np.int64_t))
-
-                for i in range(ngreater):
-                    index = greater_ids[i]
-                    g_ids[i] = gids[index]
-                    for j in range(3):
-                        greater_gles[i,j] = gles[index,j]
-                        greater_gres[i,j] = gres[index,j]
-
-                add_grids(self.right, ngreater, greater_gles, greater_gres,
-                          g_ids, rank, size)
-
-            return
-
-        @cython.boundscheck(False)
-        @cython.wraparound(False)
-        @cython.cdivision(True)
-        cdef int should_i_split(self, int rank, int size):
-            if self.node_id < size and self.node_id > 0:
-                return 1
-            return 0
-
-        @cython.boundscheck(False)
-        @cython.wraparound(False)
-        @cython.cdivision(True)
-        cdef void insert_grids(self,
-                               int ngrids,
-                               np.float64_t[:,:] gles,
-                               np.float64_t[:,:] gres,
-                               np.int64_t[:] gids,
-                               int rank,
-                               int size):
-
-            if not should_i_build(self, rank, size) or ngrids == 0:
-                return
-            cdef int contained = 1
-            cdef int check
-
-            if ngrids == 1:
-                # If we should continue to split based on parallelism, do so!
-                if should_i_split(self, rank, size):
-                    geo_split(self, gles[0,:], gres[0,:], gids[0], rank, size)
-                    return
-
-                for i in range(3):
-                    contained *= gles[0,i] <= self.left_edge[i]
-                    contained *= gres[0,i] >= self.right_edge[i]
-
-                if contained == 1:
-                    # print 'Node fully contained, setting to grid: %i' % gids[0]
-                    self.grid = gids[0]
-                    assert(self.grid != -1)
-                    return
-
-            # Split the grids
-            check = split_grids(self, ngrids, gles, gres, gids, rank, size)
-            # If check is -1, then we have found a place where there are no choices.
-            # Exit out and set the node to None.
-            if check == -1:
-                self.grid = -1
-            return
-
-        @cython.boundscheck(False)
-        @cython.wraparound(False)
-        @cython.cdivision(True)
-        cdef split_grid(self,
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef add_grid(self,
                        np.float64_t[:] gle,
                        np.float64_t[:] gre,
                        int gid,
                        int rank,
                        int size):
 
-            cdef int j
-            cdef np.uint8_t[:] less_ids, greater_ids
-            data = cvarray(format="d", shape=(1,2,3), itemsize=sizeof(np.float64_t))
+        if not should_i_build(self, rank, size):
+            return
+
+        if _kd_is_leaf(self) == 1:
+            self.insert_grid(gle, gre, gid, rank, size)
+        else:
+            less_id = gle[self.split.dim] < self.split.pos
+            if less_id:
+                self.left.add_grid(gle, gre,
+                         gid, rank, size)
+
+            greater_id = gre[self.split.dim] > self.split.pos
+            if greater_id:
+                self.right.add_grid(gle, gre,
+                         gid, rank, size)
+        return
+
+
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef insert_grid(self,
+                    np.float64_t[:] gle,
+                    np.float64_t[:] gre,
+                    int grid_id,
+                    int rank,
+                    int size):
+        if not should_i_build(self, rank, size):
+            return
+
+        # If we should continue to split based on parallelism, do so!
+        if self.should_i_split(rank, size):
+            geo_split(self, gle, gre, grid_id, rank, size)
+            return
+
+        cdef int contained = 1
+        for i in range(3):
+            if gle[i] > self.left_edge[i] or\
+               gre[i] < self.right_edge[i]:
+                contained *= 0
+
+        if contained == 1:
+            self.grid = grid_id
+            assert(self.grid != -1)
+            return
+
+        # Split the grid
+        cdef int check = split_grid(self, gle, gre, grid_id, rank, size)
+        # If check is -1, then we have found a place where there are no choices.
+        # Exit out and set the node to None.
+        if check == -1:
+            self.grid = -1
+        return
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cpdef add_grids(self,
+                        int ngrids,
+                        np.float64_t[:,:] gles,
+                        np.float64_t[:,:] gres,
+                        np.int64_t[:] gids,
+                        int rank,
+                        int size):
+        cdef int i, j, nless, ngreater, index
+        cdef np.float64_t[:,:] less_gles, less_gres, greater_gles, greater_gres
+        cdef np.int64_t[:] l_ids, g_ids
+        if not should_i_build(self, rank, size):
+            return
+
+        if _kd_is_leaf(self) == 1:
+            insert_grids(self, ngrids, gles, gres, gids, rank, size)
+            return
+
+        less_ids = cvarray(format="q", shape=(ngrids,), itemsize=sizeof(np.int64_t))
+        greater_ids = cvarray(format="q", shape=(ngrids,), itemsize=sizeof(np.int64_t))
+
+        nless = 0
+        ngreater = 0
+        for i in range(ngrids):
+            if gles[i, self.split.dim] < self.split.pos:
+                less_ids[nless] = i
+                nless += 1
+
+            if gres[i, self.split.dim] > self.split.pos:
+                greater_ids[ngreater] = i
+                ngreater += 1
+
+        #print 'nless: %i' % nless
+        #print 'ngreater: %i' % ngreater
+
+        if nless > 0:
+            less_gles = cvarray(format="d", shape=(nless,3), itemsize=sizeof(np.float64_t))
+            less_gres = cvarray(format="d", shape=(nless,3), itemsize=sizeof(np.float64_t))
+            l_ids = cvarray(format="q", shape=(nless,), itemsize=sizeof(np.int64_t))
+
+            for i in range(nless):
+                index = less_ids[i]
+                l_ids[i] = gids[index]
+                for j in range(3):
+                    less_gles[i,j] = gles[index,j]
+                    less_gres[i,j] = gres[index,j]
+
+            self.left.add_grids(nless, less_gles, less_gres,
+                      l_ids, rank, size)
+
+        if ngreater > 0:
+            greater_gles = cvarray(format="d", shape=(ngreater,3), itemsize=sizeof(np.float64_t))
+            greater_gres = cvarray(format="d", shape=(ngreater,3), itemsize=sizeof(np.float64_t))
+            g_ids = cvarray(format="q", shape=(ngreater,), itemsize=sizeof(np.int64_t))
+
+            for i in range(ngreater):
+                index = greater_ids[i]
+                g_ids[i] = gids[index]
+                for j in range(3):
+                    greater_gles[i,j] = gles[index,j]
+                    greater_gres[i,j] = gres[index,j]
+
+            self.right.add_grids(ngreater, greater_gles, greater_gres,
+                      g_ids, rank, size)
+
+        return
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef int should_i_split(self, int rank, int size):
+        if self.node_id < size and self.node_id > 0:
+            return 1
+        return 0
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef void insert_grids(self,
+                           int ngrids,
+                           np.float64_t[:,:] gles,
+                           np.float64_t[:,:] gres,
+                           np.int64_t[:] gids,
+                           int rank,
+                           int size):
+
+        if not should_i_build(self, rank, size) or ngrids == 0:
+            return
+        cdef int contained = 1
+        cdef int check
+
+        if ngrids == 1:
+            # If we should continue to split based on parallelism, do so!
+            if self.should_i_split(rank, size):
+                geo_split(self, gles[0,:], gres[0,:], gids[0], rank, size)
+                return
+
+            for i in range(3):
+                contained *= gles[0,i] <= self.left_edge[i]
+                contained *= gres[0,i] >= self.right_edge[i]
+
+            if contained == 1:
+                # print 'Node fully contained, setting to grid: %i' % gids[0]
+                self.grid = gids[0]
+                assert(self.grid != -1)
+                return
+
+        # Split the grids
+        check = split_grids(self, ngrids, gles, gres, gids, rank, size)
+        # If check is -1, then we have found a place where there are no choices.
+        # Exit out and set the node to None.
+        if check == -1:
+            self.grid = -1
+        return
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef split_grid(self,
+                   np.float64_t[:] gle,
+                   np.float64_t[:] gre,
+                   int gid,
+                   int rank,
+                   int size):
+
+        cdef int j
+        cdef np.uint8_t[:] less_ids, greater_ids
+        data = cvarray(format="d", shape=(1,2,3), itemsize=sizeof(np.float64_t))
+        for j in range(3):
+            data[0,0,j] = gle[j]
+            data[0,1,j] = gre[j]
+
+        less_ids = cvarray(format="B", shape=(1,), itemsize=sizeof(np.uint8_t))
+        greater_ids = cvarray(format="B", shape=(1,), itemsize=sizeof(np.uint8_t))
+
+        best_dim, split_pos, nless, ngreater = \
+            kdtree_get_choices(1, data, self.left_edge, self.right_edge,
+                              less_ids, greater_ids)
+
+        # If best_dim is -1, then we have found a place where there are no choices.
+        # Exit out and set the node to None.
+        if best_dim == -1:
+            return -1
+
+
+        split = <Split *> malloc(sizeof(Split))
+        split.dim = best_dim
+        split.pos = split_pos
+
+        # Create a Split
+        divide(self, split)
+
+        # Populate Left Node
+        #print 'Inserting left node', self.left_edge, self.right_edge
+        if nless == 1:
+            self.left.insert_grid(gle, gre,
+                         gid, rank, size)
+
+        # Populate Right Node
+        #print 'Inserting right node', self.left_edge, self.right_edge
+        if ngreater == 1:
+            self.right.insert_grid(gle, gre,
+                         gid, rank, size)
+
+        return 0
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef kdtree_get_choices(int n_grids,
+                            np.float64_t[:,:,:] data,
+                            np.float64_t[:] l_corner,
+                            np.float64_t[:] r_corner,
+                            np.uint8_t[:] less_ids,
+                            np.uint8_t[:] greater_ids,
+                           ):
+        cdef int i, j, k, dim, n_unique, best_dim, my_split
+        cdef np.float64_t split
+        cdef np.float64_t[:,:] uniquedims
+        cdef np.float64_t[:] uniques
+        uniquedims = cvarray(format="d", shape=(3, 2*n_grids), itemsize=sizeof(np.float64_t))
+        my_max = 0
+        my_split = 0
+        best_dim = -1
+        for dim in range(3):
+            n_unique = 0
+            uniques = uniquedims[dim]
+            for i in range(n_grids):
+                # Check for disqualification
+                for j in range(2):
+                    # print "Checking against", i,j,dim,data[i,j,dim]
+                    if not (l_corner[dim] < data[i][j][dim] and
+                            data[i][j][dim] < r_corner[dim]):
+                        # print "Skipping ", data[i,j,dim], l_corner[dim], r_corner[dim]
+                        continue
+                    skipit = 0
+                    # Add our left ...
+                    for k in range(n_unique):
+                        if uniques[k] == data[i][j][dim]:
+                            skipit = 1
+                            # print "Identified", uniques[k], data[i,j,dim], n_unique
+                            break
+                    if skipit == 0:
+                        uniques[n_unique] = data[i][j][dim]
+                        n_unique += 1
+            if n_unique > my_max:
+                best_dim = dim
+                my_max = n_unique
+                my_split = (n_unique-1)/2
+        if best_dim == -1:
+            return -1, 0, 0, 0
+        # I recognize how lame this is.
+        cdef np.ndarray[np.float64_t, ndim=1] tarr = np.empty(my_max, dtype='float64')
+        for i in range(my_max):
+            # print "Setting tarr: ", i, uniquedims[best_dim][i]
+            tarr[i] = uniquedims[best_dim][i]
+        tarr.sort()
+        split = tarr[my_split]
+        cdef int nless=0, ngreater=0
+        for i in range(n_grids):
+            if data[i][0][best_dim] < split:
+                less_ids[i] = 1
+                nless += 1
+            else:
+                less_ids[i] = 0
+            if data[i][1][best_dim] > split:
+                greater_ids[i] = 1
+                ngreater += 1
+            else:
+                greater_ids[i] = 0
+
+        # Return out unique values
+        return best_dim, split, nless, ngreater
+
+    #@cython.boundscheck(False)
+    #@cython.wraparound(False)
+    #@cython.cdivision(True)
+    cdef int split_grids(self,
+                           int ngrids,
+                           np.float64_t[:,:] gles,
+                           np.float64_t[:,:] gres,
+                           np.int64_t[:] gids,
+                           int rank,
+                           int size):
+        # Find a Split
+        cdef int i, j, index
+        cdef np.float64_t[:,:] less_gles, less_gres, greater_gles, greater_gres
+        cdef np.int64_t[:] l_ids, g_ids
+        if ngrids == 0: return 0
+
+        data = cvarray(format="d", shape=(ngrids,2,3), itemsize=sizeof(np.float64_t))
+
+        for i in range(ngrids):
             for j in range(3):
-                data[0,0,j] = gle[j]
-                data[0,1,j] = gre[j]
+                data[i,0,j] = gles[i,j]
+                data[i,1,j] = gres[i,j]
 
-            less_ids = cvarray(format="B", shape=(1,), itemsize=sizeof(np.uint8_t))
-            greater_ids = cvarray(format="B", shape=(1,), itemsize=sizeof(np.uint8_t))
+        less_ids = cvarray(format="B", shape=(ngrids,), itemsize=sizeof(np.uint8_t))
+        greater_ids = cvarray(format="B", shape=(ngrids,), itemsize=sizeof(np.uint8_t))
 
-            best_dim, split_pos, nless, ngreater = \
-                kdtree_get_choices(1, data, self.left_edge, self.right_edge,
-                                  less_ids, greater_ids)
+        best_dim, split_pos, nless, ngreater = \
+            kdtree_get_choices(ngrids, data, self.left_edge, self.right_edge,
+                              less_ids, greater_ids)
 
-            # If best_dim is -1, then we have found a place where there are no choices.
-            # Exit out and set the node to None.
-            if best_dim == -1:
-                return -1
 
+        # If best_dim is -1, then we have found a place where there are no choices.
+        # Exit out and set the node to None.
+        if best_dim == -1:
+            print 'Failed to split grids.'
+            return -1
 
-            split = <Split *> malloc(sizeof(Split))
-            split.dim = best_dim
-            split.pos = split_pos
+        split = <Split *> malloc(sizeof(Split))
+        split.dim = best_dim
+        split.pos = split_pos
 
-            # Create a Split
-            divide(self, split)
+        # Create a Split
+        divide(self, split)
+
+        less_index = cvarray(format="q", shape=(ngrids,), itemsize=sizeof(np.int64_t))
+        greater_index = cvarray(format="q", shape=(ngrids,), itemsize=sizeof(np.int64_t))
+
+        nless = 0
+        ngreater = 0
+        for i in range(ngrids):
+            if less_ids[i] == 1:
+                less_index[nless] = i
+                nless += 1
+
+            if greater_ids[i] == 1:
+                greater_index[ngreater] = i
+                ngreater += 1
+
+        if nless > 0:
+            less_gles = cvarray(format="d", shape=(nless,3), itemsize=sizeof(np.float64_t))
+            less_gres = cvarray(format="d", shape=(nless,3), itemsize=sizeof(np.float64_t))
+            l_ids = cvarray(format="q", shape=(nless,), itemsize=sizeof(np.int64_t))
+
+            for i in range(nless):
+                index = less_index[i]
+                l_ids[i] = gids[index]
+                for j in range(3):
+                    less_gles[i,j] = gles[index,j]
+                    less_gres[i,j] = gres[index,j]
 
             # Populate Left Node
             #print 'Inserting left node', self.left_edge, self.right_edge
-            if nless == 1:
-                insert_grid(self.left, gle, gre,
-                             gid, rank, size)
+            insert_grids(self.left, nless, less_gles, less_gres,
+                         l_ids, rank, size)
+
+        if ngreater > 0:
+            greater_gles = cvarray(format="d", shape=(ngreater,3), itemsize=sizeof(np.float64_t))
+            greater_gres = cvarray(format="d", shape=(ngreater,3), itemsize=sizeof(np.float64_t))
+            g_ids = cvarray(format="q", shape=(ngreater,), itemsize=sizeof(np.int64_t))
+
+            for i in range(ngreater):
+                index = greater_index[i]
+                g_ids[i] = gids[index]
+                for j in range(3):
+                    greater_gles[i,j] = gles[index,j]
+                    greater_gres[i,j] = gres[index,j]
 
             # Populate Right Node
             #print 'Inserting right node', self.left_edge, self.right_edge
-            if ngreater == 1:
-                insert_grid(self.right, gle, gre,
-                             gid, rank, size)
+            insert_grids(self.right, ngreater, greater_gles, greater_gres,
+                         g_ids, rank, size)
 
-            return 0
+        return 0
 
-        @cython.boundscheck(False)
-        @cython.wraparound(False)
-        @cython.cdivision(True)
-        cdef kdtree_get_choices(int n_grids,
-                                np.float64_t[:,:,:] data,
-                                np.float64_t[:] l_corner,
-                                np.float64_t[:] r_corner,
-                                np.uint8_t[:] less_ids,
-                                np.uint8_t[:] greater_ids,
-                               ):
-            cdef int i, j, k, dim, n_unique, best_dim, my_split
-            cdef np.float64_t split
-            cdef np.float64_t[:,:] uniquedims
-            cdef np.float64_t[:] uniques
-            uniquedims = cvarray(format="d", shape=(3, 2*n_grids), itemsize=sizeof(np.float64_t))
-            my_max = 0
-            my_split = 0
-            best_dim = -1
-            for dim in range(3):
-                n_unique = 0
-                uniques = uniquedims[dim]
-                for i in range(n_grids):
-                    # Check for disqualification
-                    for j in range(2):
-                        # print "Checking against", i,j,dim,data[i,j,dim]
-                        if not (l_corner[dim] < data[i][j][dim] and
-                                data[i][j][dim] < r_corner[dim]):
-                            # print "Skipping ", data[i,j,dim], l_corner[dim], r_corner[dim]
-                            continue
-                        skipit = 0
-                        # Add our left ...
-                        for k in range(n_unique):
-                            if uniques[k] == data[i][j][dim]:
-                                skipit = 1
-                                # print "Identified", uniques[k], data[i,j,dim], n_unique
-                                break
-                        if skipit == 0:
-                            uniques[n_unique] = data[i][j][dim]
-                            n_unique += 1
-                if n_unique > my_max:
-                    best_dim = dim
-                    my_max = n_unique
-                    my_split = (n_unique-1)/2
-            if best_dim == -1:
-                return -1, 0, 0, 0
-            # I recognize how lame this is.
-            cdef np.ndarray[np.float64_t, ndim=1] tarr = np.empty(my_max, dtype='float64')
-            for i in range(my_max):
-                # print "Setting tarr: ", i, uniquedims[best_dim][i]
-                tarr[i] = uniquedims[best_dim][i]
-            tarr.sort()
-            split = tarr[my_split]
-            cdef int nless=0, ngreater=0
-            for i in range(n_grids):
-                if data[i][0][best_dim] < split:
-                    less_ids[i] = 1
-                    nless += 1
-                else:
-                    less_ids[i] = 0
-                if data[i][1][best_dim] > split:
-                    greater_ids[i] = 1
-                    ngreater += 1
-                else:
-                    greater_ids[i] = 0
+    cdef geo_split(self,
+                   np.float64_t[:] gle,
+                   np.float64_t[:] gre,
+                   int grid_id,
+                   int rank,
+                   int size):
+        cdef int big_dim = 0
+        cdef int i
+        cdef np.float64_t v, my_max = 0.0
 
-            # Return out unique values
-            return best_dim, split, nless, ngreater
+        for i in range(3):
+            v = gre[i] - gle[i]
+            if v > my_max:
+                my_max = v
+                big_dim = i
 
-        #@cython.boundscheck(False)
-        #@cython.wraparound(False)
-        #@cython.cdivision(True)
-        cdef int split_grids(self,
-                               int ngrids,
-                               np.float64_t[:,:] gles,
-                               np.float64_t[:,:] gres,
-                               np.int64_t[:] gids,
-                               int rank,
-                               int size):
-            # Find a Split
-            cdef int i, j, index
-            cdef np.float64_t[:,:] less_gles, less_gres, greater_gles, greater_gres
-            cdef np.int64_t[:] l_ids, g_ids
-            if ngrids == 0: return 0
+        new_pos = (gre[big_dim] + gle[big_dim])/2.
 
-            data = cvarray(format="d", shape=(ngrids,2,3), itemsize=sizeof(np.float64_t))
+        lnew_gle = cvarray(format="d", shape=(3,), itemsize=sizeof(np.float64_t))
+        lnew_gre = cvarray(format="d", shape=(3,), itemsize=sizeof(np.float64_t))
+        rnew_gle = cvarray(format="d", shape=(3,), itemsize=sizeof(np.float64_t))
+        rnew_gre = cvarray(format="d", shape=(3,), itemsize=sizeof(np.float64_t))
 
-            for i in range(ngrids):
-                for j in range(3):
-                    data[i,0,j] = gles[i,j]
-                    data[i,1,j] = gres[i,j]
+        for j in range(3):
+            lnew_gle[j] = gle[j]
+            lnew_gre[j] = gre[j]
+            rnew_gle[j] = gle[j]
+            rnew_gre[j] = gre[j]
 
-            less_ids = cvarray(format="B", shape=(ngrids,), itemsize=sizeof(np.uint8_t))
-            greater_ids = cvarray(format="B", shape=(ngrids,), itemsize=sizeof(np.uint8_t))
+        split = <Split *> malloc(sizeof(Split))
+        split.dim = big_dim
+        split.pos = new_pos
 
-            best_dim, split_pos, nless, ngreater = \
-                kdtree_get_choices(ngrids, data, self.left_edge, self.right_edge,
-                                  less_ids, greater_ids)
+        # Create a Split
+        divide(self, split)
 
+        #lnew_gre[big_dim] = new_pos
+        # Populate Left Node
+        #print 'Inserting left node', self.left_edge, self.right_edge
+        self.left.insert_grid(lnew_gle, lnew_gre,
+                grid_id, rank, size)
 
-            # If best_dim is -1, then we have found a place where there are no choices.
-            # Exit out and set the node to None.
-            if best_dim == -1:
-                print 'Failed to split grids.'
-                return -1
+        #rnew_gle[big_dim] = new_pos
+        # Populate Right Node
+        #print 'Inserting right node', self.left_edge, self.right_edge
+        self.right.insert_grid(rnew_gle, rnew_gre,
+                grid_id, rank, size)
+        return
 
-            split = <Split *> malloc(sizeof(Split))
-            split.dim = best_dim
-            split.pos = split_pos
+    cdef void divide(self, Split * split):
+        # Create a Split
+        self.split = split
 
-            # Create a Split
-            divide(self, split)
+        cdef np.float64_t[:] le = np.empty(3, dtype='float64')
+        cdef np.float64_t[:] re = np.empty(3, dtype='float64')
 
-            less_index = cvarray(format="q", shape=(ngrids,), itemsize=sizeof(np.int64_t))
-            greater_index = cvarray(format="q", shape=(ngrids,), itemsize=sizeof(np.int64_t))
+        cdef int i
+        for i in range(3):
+            le[i] = self.left_edge[i]
+            re[i] = self.right_edge[i]
+        re[split.dim] = split.pos
 
-            nless = 0
-            ngreater = 0
-            for i in range(ngrids):
-                if less_ids[i] == 1:
-                    less_index[nless] = i
-                    nless += 1
+        self.left = Node(self, None, None,
+                         le, re, self.grid,
+                         _lchild_id(self.node_id))
 
-                if greater_ids[i] == 1:
-                    greater_index[ngreater] = i
-                    ngreater += 1
+        re[split.dim] = self.right_edge[split.dim]
+        le[split.dim] = split.pos
+        self.right = Node(self, None, None,
+                          le, re, self.grid,
+                          _rchild_id(self.node_id))
 
-            if nless > 0:
-                less_gles = cvarray(format="d", shape=(nless,3), itemsize=sizeof(np.float64_t))
-                less_gres = cvarray(format="d", shape=(nless,3), itemsize=sizeof(np.float64_t))
-                l_ids = cvarray(format="q", shape=(nless,), itemsize=sizeof(np.int64_t))
+        return
+    #
+    def kd_sum_volume(self):
+        cdef np.float64_t vol = 1.0
+        if (self.left is None) and (self.right is None):
+            if self.grid == -1:
+                return 0.0
+            for i in range(3):
+                vol *= self.right_edge[i] - self.left_edge[i]
+            return vol
+        else:
+            return kd_sum_volume(self.left) + kd_sum_volume(self.right)
 
-                for i in range(nless):
-                    index = less_index[i]
-                    l_ids[i] = gids[index]
-                    for j in range(3):
-                        less_gles[i,j] = gles[index,j]
-                        less_gres[i,j] = gres[index,j]
+    def kd_node_check(self):
+        assert (self.left is None) == (self.right is None)
+        if (self.left is None) and (self.right is None):
+            if self.grid != -1:
+                return np.prod(self.right_edge - self.left_edge)
+            else: return 0.0
+        else:
+            return kd_node_check(self.left)+kd_node_check(self.right)
 
-                # Populate Left Node
-                #print 'Inserting left node', self.left_edge, self.right_edge
-                insert_grids(self.left, nless, less_gles, less_gres,
-                             l_ids, rank, size)
+    def kd_is_leaf(self):
+        cdef int has_l_child = self.left == None
+        cdef int has_r_child = self.right == None
+        assert has_l_child == has_r_child
+        return has_l_child
 
-            if ngreater > 0:
-                greater_gles = cvarray(format="d", shape=(ngreater,3), itemsize=sizeof(np.float64_t))
-                greater_gres = cvarray(format="d", shape=(ngreater,3), itemsize=sizeof(np.float64_t))
-                g_ids = cvarray(format="q", shape=(ngreater,), itemsize=sizeof(np.int64_t))
+    cdef int _kd_is_leaf(self):
+        if self.left is None or self.right is None:
+            return 1
+        return 0
 
-                for i in range(ngreater):
-                    index = greater_index[i]
-                    g_ids[i] = gids[index]
-                    for j in range(3):
-                        greater_gles[i,j] = gles[index,j]
-                        greater_gres[i,j] = gres[index,j]
+    def step_depth(Node current, Node previous):
+        '''
+        Takes a single step in the depth-first traversal
+        '''
+        if _kd_is_leaf(current) == 1: # At a leaf, move back up
+            previous = current
+            current = current.parent
 
-                # Populate Right Node
-                #print 'Inserting right node', self.left_edge, self.right_edge
-                insert_grids(self.right, ngreater, greater_gles, greater_gres,
-                             g_ids, rank, size)
-
-            return 0
-
-        cdef geo_split(self,
-                       np.float64_t[:] gle,
-                       np.float64_t[:] gre,
-                       int grid_id,
-                       int rank,
-                       int size):
-            cdef int big_dim = 0
-            cdef int i
-            cdef np.float64_t v, my_max = 0.0
-
-            for i in range(3):
-                v = gre[i] - gle[i]
-                if v > my_max:
-                    my_max = v
-                    big_dim = i
-
-            new_pos = (gre[big_dim] + gle[big_dim])/2.
-
-            lnew_gle = cvarray(format="d", shape=(3,), itemsize=sizeof(np.float64_t))
-            lnew_gre = cvarray(format="d", shape=(3,), itemsize=sizeof(np.float64_t))
-            rnew_gle = cvarray(format="d", shape=(3,), itemsize=sizeof(np.float64_t))
-            rnew_gre = cvarray(format="d", shape=(3,), itemsize=sizeof(np.float64_t))
-
-            for j in range(3):
-                lnew_gle[j] = gle[j]
-                lnew_gre[j] = gre[j]
-                rnew_gle[j] = gle[j]
-                rnew_gre[j] = gre[j]
-
-            split = <Split *> malloc(sizeof(Split))
-            split.dim = big_dim
-            split.pos = new_pos
-
-            # Create a Split
-            divide(self, split)
-
-            #lnew_gre[big_dim] = new_pos
-            # Populate Left Node
-            #print 'Inserting left node', self.left_edge, self.right_edge
-            insert_grid(self.left, lnew_gle, lnew_gre,
-                    grid_id, rank, size)
-
-            #rnew_gle[big_dim] = new_pos
-            # Populate Right Node
-            #print 'Inserting right node', self.left_edge, self.right_edge
-            insert_grid(self.right, rnew_gle, rnew_gre,
-                    grid_id, rank, size)
-            return
-
-        cdef void divide(self, Split * split):
-            # Create a Split
-            self.split = split
-
-            cdef np.float64_t[:] le = np.empty(3, dtype='float64')
-            cdef np.float64_t[:] re = np.empty(3, dtype='float64')
-
-            cdef int i
-            for i in range(3):
-                le[i] = self.left_edge[i]
-                re[i] = self.right_edge[i]
-            re[split.dim] = split.pos
-
-            self.left = Node(self, None, None,
-                             le, re, self.grid,
-                             _lchild_id(self.node_id))
-
-            re[split.dim] = self.right_edge[split.dim]
-            le[split.dim] = split.pos
-            self.right = Node(self, None, None,
-                              le, re, self.grid,
-                              _rchild_id(self.node_id))
-
-            return
-        #
-        def kd_sum_volume(self):
-            cdef np.float64_t vol = 1.0
-            if (self.left is None) and (self.right is None):
-                if self.grid == -1:
-                    return 0.0
-                for i in range(3):
-                    vol *= self.right_edge[i] - self.left_edge[i]
-                return vol
+        elif current.parent is previous: # Moving down, go left first
+            previous = current
+            if current.left is not None:
+                current = current.left
+            elif current.right is not None:
+                current = current.right
             else:
-                return kd_sum_volume(self.left) + kd_sum_volume(self.right)
-
-        def kd_node_check(self):
-            assert (self.left is None) == (self.right is None)
-            if (self.left is None) and (self.right is None):
-                if self.grid != -1:
-                    return np.prod(self.right_edge - self.left_edge)
-                else: return 0.0
-            else:
-                return kd_node_check(self.left)+kd_node_check(self.right)
-
-        def kd_is_leaf(self):
-            cdef int has_l_child = self.left == None
-            cdef int has_r_child = self.right == None
-            assert has_l_child == has_r_child
-            return has_l_child
-
-        cdef int _kd_is_leaf(self):
-            if self.left is None or self.right is None:
-                return 1
-            return 0
-
-        def step_depth(Node current, Node previous):
-            '''
-            Takes a single step in the depth-first traversal
-            '''
-            if _kd_is_leaf(current) == 1: # At a leaf, move back up
-                previous = current
                 current = current.parent
 
-            elif current.parent is previous: # Moving down, go left first
-                previous = current
+        elif current.left is previous: # Moving up from left, go right
+            previous = current
+            if current.right is not None:
+                current = current.right
+            else:
+                current = current.parent
+
+        elif current.right is previous: # Moving up from right child, move up
+            previous = current
+            current = current.parent
+
+        return current, previous
+
+    def depth_traverse(Node trunk, max_node=None):
+        '''
+        Yields a depth-first traversal of the kd tree always going to
+        the left child before the right.
+        '''
+        current = trunk
+        previous = None
+        if max_node is None:
+            max_node = np.inf
+        while current is not None:
+            yield current
+            current, previous = step_depth(current, previous)
+            if current is None: break
+            if current.node_id >= max_node:
+                current = current.parent
+                previous = current.right
+
+    def depth_first_touch(Node tree, max_node=None):
+        '''
+        Yields a depth-first traversal of the kd tree always going to
+        the left child before the right.
+        '''
+        current = tree
+        previous = None
+        if max_node is None:
+            max_node = np.inf
+        while current is not None:
+            if previous is None or previous.parent != current:
+                yield current
+            current, previous = step_depth(current, previous)
+            if current is None: break
+            if current.node_id >= max_node:
+                current = current.parent
+                previous = current.right
+
+    def breadth_traverse(Node tree):
+        '''
+        Yields a breadth-first traversal of the kd tree always going to
+        the left child before the right.
+        '''
+        current = tree
+        previous = None
+        while current is not None:
+            yield current
+            current, previous = step_depth(current, previous)
+
+
+    def viewpoint_traverse(Node tree, viewpoint):
+        '''
+        Yields a viewpoint dependent traversal of the kd-tree.  Starts
+        with nodes furthest away from viewpoint.
+        '''
+
+        current = tree
+        previous = None
+        while current is not None:
+            yield current
+            current, previous = step_viewpoint(current, previous, viewpoint)
+
+    def step_viewpoint(Node current,
+                       Node previous,
+                       viewpoint):
+        '''
+        Takes a single step in the viewpoint based traversal.  Always
+        goes to the node furthest away from viewpoint first.
+        '''
+        if _kd_is_leaf(current) == 1: # At a leaf, move back up
+            previous = current
+            current = current.parent
+        elif current.split.dim is None: # This is a dead node
+            previous = current
+            current = current.parent
+
+        elif current.parent is previous: # Moving down
+            previous = current
+            if viewpoint[current.split.dim] <= current.split.pos:
+                if current.right is not None:
+                    current = current.right
+                else:
+                    previous = current.right
+            else:
                 if current.left is not None:
                     current = current.left
-                elif current.right is not None:
-                    current = current.right
+                else:
+                    previous = current.left
+
+        elif current.right is previous: # Moving up from right
+            previous = current
+            if viewpoint[current.split.dim] <= current.split.pos:
+                if current.left is not None:
+                    current = current.left
                 else:
                     current = current.parent
+            else:
+                current = current.parent
 
-            elif current.left is previous: # Moving up from left, go right
-                previous = current
+        elif current.left is previous: # Moving up from left child
+            previous = current
+            if viewpoint[current.split.dim] > current.split.pos:
                 if current.right is not None:
                     current = current.right
                 else:
                     current = current.parent
-
-            elif current.right is previous: # Moving up from right child, move up
-                previous = current
+            else:
                 current = current.parent
 
-            return current, previous
+        return current, previous
 
-        def depth_traverse(Node trunk, max_node=None):
-            '''
-            Yields a depth-first traversal of the kd tree always going to
-            the left child before the right.
-            '''
-            current = trunk
-            previous = None
-            if max_node is None:
-                max_node = np.inf
-            while current is not None:
-                yield current
-                current, previous = step_depth(current, previous)
-                if current is None: break
-                if current.node_id >= max_node:
-                    current = current.parent
-                    previous = current.right
+    cdef int point_in_node(self,
+                           np.float64_t[:] point):
+        cdef int i
+        cdef int inside = 1
+        for i in range(3):
+            inside *= self.left_edge[i] <= point[i]
+            inside *= self.right_edge[i] > point[i]
+        return inside
 
-        def depth_first_touch(Node tree, max_node=None):
-            '''
-            Yields a depth-first traversal of the kd tree always going to
-            the left child before the right.
-            '''
-            current = tree
-            previous = None
-            if max_node is None:
-                max_node = np.inf
-            while current is not None:
-                if previous is None or previous.parent != current:
-                    yield current
-                current, previous = step_depth(current, previous)
-                if current is None: break
-                if current.node_id >= max_node:
-                    current = current.parent
-                    previous = current.right
+    cdef Node _find_node(self, np.float64_t[:] point):
+        while _kd_is_leaf(self) == 0:
+            if point[self.split.dim] < self.split.pos:
+                self = self.left
+            else:
+                self = self.right
+        return self
 
-        def breadth_traverse(Node tree):
-            '''
-            Yields a breadth-first traversal of the kd tree always going to
-            the left child before the right.
-            '''
-            current = tree
-            previous = None
-            while current is not None:
-                yield current
-                current, previous = step_depth(current, previous)
-
-
-        def viewpoint_traverse(Node tree, viewpoint):
-            '''
-            Yields a viewpoint dependent traversal of the kd-tree.  Starts
-            with nodes furthest away from viewpoint.
-            '''
-
-            current = tree
-            previous = None
-            while current is not None:
-                yield current
-                current, previous = step_viewpoint(current, previous, viewpoint)
-
-        def step_viewpoint(Node current,
-                           Node previous,
-                           viewpoint):
-            '''
-            Takes a single step in the viewpoint based traversal.  Always
-            goes to the node furthest away from viewpoint first.
-            '''
-            if _kd_is_leaf(current) == 1: # At a leaf, move back up
-                previous = current
-                current = current.parent
-            elif current.split.dim is None: # This is a dead node
-                previous = current
-                current = current.parent
-
-            elif current.parent is previous: # Moving down
-                previous = current
-                if viewpoint[current.split.dim] <= current.split.pos:
-                    if current.right is not None:
-                        current = current.right
-                    else:
-                        previous = current.right
-                else:
-                    if current.left is not None:
-                        current = current.left
-                    else:
-                        previous = current.left
-
-            elif current.right is previous: # Moving up from right
-                previous = current
-                if viewpoint[current.split.dim] <= current.split.pos:
-                    if current.left is not None:
-                        current = current.left
-                    else:
-                        current = current.parent
-                else:
-                    current = current.parent
-
-            elif current.left is previous: # Moving up from left child
-                previous = current
-                if viewpoint[current.split.dim] > current.split.pos:
-                    if current.right is not None:
-                        current = current.right
-                    else:
-                        current = current.parent
-                else:
-                    current = current.parent
-
-            return current, previous
-
-        cdef int point_in_node(self,
-                               np.float64_t[:] point):
-            cdef int i
-            cdef int inside = 1
-            for i in range(3):
-                inside *= self.left_edge[i] <= point[i]
-                inside *= self.right_edge[i] > point[i]
-            return inside
-
-        cdef Node _find_node(self, np.float64_t[:] point):
-            while _kd_is_leaf(self) == 0:
-                if point[self.split.dim] < self.split.pos:
-                    self = self.left
-                else:
-                    self = self.right
-            return self
-
-        def find_node(self,
-                      np.float64_t[:] point):
-            """
-            Find the AMRKDTree node enclosing a position
-            """
-            assert(point_in_node(self, point))
-            return _find_node(self, point)
+    def find_node(self,
+                  np.float64_t[:] point):
+        """
+        Find the AMRKDTree node enclosing a position
+        """
+        assert(point_in_node(self, point))
+        return _find_node(self, point)
 
 @cython.boundscheck(False)
 @cython.wraparound(False)


https://bitbucket.org/yt_analysis/yt/commits/3f8766659c4e/
Changeset:   3f8766659c4e
Branch:      yt
User:        charlie_watson
Date:        2016-06-14 01:17:38+00:00
Summary:     updated calls to insert_grids
Affected #:  1 file

diff -r 53486916e626c79d275830058c5aee825b3ecdc0 -r 3f8766659c4ed26baa7f165872c25bc84a5ef16a yt/utilities/lib/amr_kdtools.pyx
--- a/yt/utilities/lib/amr_kdtools.pyx
+++ b/yt/utilities/lib/amr_kdtools.pyx
@@ -216,7 +216,7 @@
             return
 
         if _kd_is_leaf(self) == 1:
-            insert_grids(self, ngrids, gles, gres, gids, rank, size)
+            self.insert_grids(ngrids, gles, gres, gids, rank, size)
             return
 
         less_ids = cvarray(format="q", shape=(ngrids,), itemsize=sizeof(np.int64_t))
@@ -507,7 +507,7 @@
 
             # Populate Left Node
             #print 'Inserting left node', self.left_edge, self.right_edge
-            insert_grids(self.left, nless, less_gles, less_gres,
+            self.left.insert_grids(nless, less_gles, less_gres,
                          l_ids, rank, size)
 
         if ngreater > 0:
@@ -524,7 +524,7 @@
 
             # Populate Right Node
             #print 'Inserting right node', self.left_edge, self.right_edge
-            insert_grids(self.right, ngreater, greater_gles, greater_gres,
+            self.right.insert_grids(ngreater, greater_gles, greater_gres,
                          g_ids, rank, size)
 
         return 0


https://bitbucket.org/yt_analysis/yt/commits/ec69848f220c/
Changeset:   ec69848f220c
Branch:      yt
User:        charlie_watson
Date:        2016-06-14 01:20:15+00:00
Summary:     changed calls to split_grid and split_grids
Affected #:  1 file

diff -r 3f8766659c4ed26baa7f165872c25bc84a5ef16a -r ec69848f220c66b84a1734b7c3ea5b4bfc3d28f3 yt/utilities/lib/amr_kdtools.pyx
--- a/yt/utilities/lib/amr_kdtools.pyx
+++ b/yt/utilities/lib/amr_kdtools.pyx
@@ -192,7 +192,7 @@
             return
 
         # Split the grid
-        cdef int check = split_grid(self, gle, gre, grid_id, rank, size)
+        cdef int check = self.split_grid(gle, gre, grid_id, rank, size)
         # If check is -1, then we have found a place where there are no choices.
         # Exit out and set the node to None.
         if check == -1:
@@ -309,7 +309,7 @@
                 return
 
         # Split the grids
-        check = split_grids(self, ngrids, gles, gres, gids, rank, size)
+        check = self.split_grids(ngrids, gles, gres, gids, rank, size)
         # If check is -1, then we have found a place where there are no choices.
         # Exit out and set the node to None.
         if check == -1:


https://bitbucket.org/yt_analysis/yt/commits/85570f4dd38d/
Changeset:   85570f4dd38d
Branch:      yt
User:        charlie_watson
Date:        2016-06-14 01:25:59+00:00
Summary:     moved kdtree_get_choices out of Node class methods bc does not contain Node instance
Affected #:  1 file

diff -r ec69848f220c66b84a1734b7c3ea5b4bfc3d28f3 -r 85570f4dd38d675ad0cc07c4ec22bba0b76dc8da yt/utilities/lib/amr_kdtools.pyx
--- a/yt/utilities/lib/amr_kdtools.pyx
+++ b/yt/utilities/lib/amr_kdtools.pyx
@@ -367,74 +367,6 @@
 
         return 0
 
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    cdef kdtree_get_choices(int n_grids,
-                            np.float64_t[:,:,:] data,
-                            np.float64_t[:] l_corner,
-                            np.float64_t[:] r_corner,
-                            np.uint8_t[:] less_ids,
-                            np.uint8_t[:] greater_ids,
-                           ):
-        cdef int i, j, k, dim, n_unique, best_dim, my_split
-        cdef np.float64_t split
-        cdef np.float64_t[:,:] uniquedims
-        cdef np.float64_t[:] uniques
-        uniquedims = cvarray(format="d", shape=(3, 2*n_grids), itemsize=sizeof(np.float64_t))
-        my_max = 0
-        my_split = 0
-        best_dim = -1
-        for dim in range(3):
-            n_unique = 0
-            uniques = uniquedims[dim]
-            for i in range(n_grids):
-                # Check for disqualification
-                for j in range(2):
-                    # print "Checking against", i,j,dim,data[i,j,dim]
-                    if not (l_corner[dim] < data[i][j][dim] and
-                            data[i][j][dim] < r_corner[dim]):
-                        # print "Skipping ", data[i,j,dim], l_corner[dim], r_corner[dim]
-                        continue
-                    skipit = 0
-                    # Add our left ...
-                    for k in range(n_unique):
-                        if uniques[k] == data[i][j][dim]:
-                            skipit = 1
-                            # print "Identified", uniques[k], data[i,j,dim], n_unique
-                            break
-                    if skipit == 0:
-                        uniques[n_unique] = data[i][j][dim]
-                        n_unique += 1
-            if n_unique > my_max:
-                best_dim = dim
-                my_max = n_unique
-                my_split = (n_unique-1)/2
-        if best_dim == -1:
-            return -1, 0, 0, 0
-        # I recognize how lame this is.
-        cdef np.ndarray[np.float64_t, ndim=1] tarr = np.empty(my_max, dtype='float64')
-        for i in range(my_max):
-            # print "Setting tarr: ", i, uniquedims[best_dim][i]
-            tarr[i] = uniquedims[best_dim][i]
-        tarr.sort()
-        split = tarr[my_split]
-        cdef int nless=0, ngreater=0
-        for i in range(n_grids):
-            if data[i][0][best_dim] < split:
-                less_ids[i] = 1
-                nless += 1
-            else:
-                less_ids[i] = 0
-            if data[i][1][best_dim] > split:
-                greater_ids[i] = 1
-                ngreater += 1
-            else:
-                greater_ids[i] = 0
-
-        # Return out unique values
-        return best_dim, split, nless, ngreater
-
     #@cython.boundscheck(False)
     #@cython.wraparound(False)
     #@cython.cdivision(True)
@@ -825,3 +757,71 @@
         return 1
     else:
         return 0
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef kdtree_get_choices(int n_grids,
+                        np.float64_t[:,:,:] data,
+                        np.float64_t[:] l_corner,
+                        np.float64_t[:] r_corner,
+                        np.uint8_t[:] less_ids,
+                        np.uint8_t[:] greater_ids,
+                       ):
+    cdef int i, j, k, dim, n_unique, best_dim, my_split
+    cdef np.float64_t split
+    cdef np.float64_t[:,:] uniquedims
+    cdef np.float64_t[:] uniques
+    uniquedims = cvarray(format="d", shape=(3, 2*n_grids), itemsize=sizeof(np.float64_t))
+    my_max = 0
+    my_split = 0
+    best_dim = -1
+    for dim in range(3):
+        n_unique = 0
+        uniques = uniquedims[dim]
+        for i in range(n_grids):
+            # Check for disqualification
+            for j in range(2):
+                # print "Checking against", i,j,dim,data[i,j,dim]
+                if not (l_corner[dim] < data[i][j][dim] and
+                        data[i][j][dim] < r_corner[dim]):
+                    # print "Skipping ", data[i,j,dim], l_corner[dim], r_corner[dim]
+                    continue
+                skipit = 0
+                # Add our left ...
+                for k in range(n_unique):
+                    if uniques[k] == data[i][j][dim]:
+                        skipit = 1
+                        # print "Identified", uniques[k], data[i,j,dim], n_unique
+                        break
+                if skipit == 0:
+                    uniques[n_unique] = data[i][j][dim]
+                    n_unique += 1
+        if n_unique > my_max:
+            best_dim = dim
+            my_max = n_unique
+            my_split = (n_unique-1)/2
+    if best_dim == -1:
+        return -1, 0, 0, 0
+    # I recognize how lame this is.
+    cdef np.ndarray[np.float64_t, ndim=1] tarr = np.empty(my_max, dtype='float64')
+    for i in range(my_max):
+        # print "Setting tarr: ", i, uniquedims[best_dim][i]
+        tarr[i] = uniquedims[best_dim][i]
+    tarr.sort()
+    split = tarr[my_split]
+    cdef int nless=0, ngreater=0
+    for i in range(n_grids):
+        if data[i][0][best_dim] < split:
+            less_ids[i] = 1
+            nless += 1
+        else:
+            less_ids[i] = 0
+        if data[i][1][best_dim] > split:
+            greater_ids[i] = 1
+            ngreater += 1
+        else:
+            greater_ids[i] = 0
+
+    # Return out unique values
+    return best_dim, split, nless, ngreater


https://bitbucket.org/yt_analysis/yt/commits/b9e24723a287/
Changeset:   b9e24723a287
Branch:      yt
User:        charlie_watson
Date:        2016-06-14 01:27:47+00:00
Summary:     changed calls to geo_split
Affected #:  1 file

diff -r 85570f4dd38d675ad0cc07c4ec22bba0b76dc8da -r b9e24723a287cfd16c5b49da24e3c2045ba3770a yt/utilities/lib/amr_kdtools.pyx
--- a/yt/utilities/lib/amr_kdtools.pyx
+++ b/yt/utilities/lib/amr_kdtools.pyx
@@ -177,7 +177,7 @@
 
         # If we should continue to split based on parallelism, do so!
         if self.should_i_split(rank, size):
-            geo_split(self, gle, gre, grid_id, rank, size)
+            self.geo_split(gle, gre, grid_id, rank, size)
             return
 
         cdef int contained = 1
@@ -295,7 +295,7 @@
         if ngrids == 1:
             # If we should continue to split based on parallelism, do so!
             if self.should_i_split(rank, size):
-                geo_split(self, gles[0,:], gres[0,:], gids[0], rank, size)
+                self.geo_split(gles[0,:], gres[0,:], gids[0], rank, size)
                 return
 
             for i in range(3):


https://bitbucket.org/yt_analysis/yt/commits/41225032d5c2/
Changeset:   41225032d5c2
Branch:      yt
User:        charlie_watson
Date:        2016-06-14 01:29:38+00:00
Summary:     changed calls to divide, specific to amr_kdtools.pyx
Affected #:  1 file

diff -r b9e24723a287cfd16c5b49da24e3c2045ba3770a -r 41225032d5c2f3d779e916a75c6962ac2c65a022 yt/utilities/lib/amr_kdtools.pyx
--- a/yt/utilities/lib/amr_kdtools.pyx
+++ b/yt/utilities/lib/amr_kdtools.pyx
@@ -351,7 +351,7 @@
         split.pos = split_pos
 
         # Create a Split
-        divide(self, split)
+        self.divide(split)
 
         # Populate Left Node
         #print 'Inserting left node', self.left_edge, self.right_edge
@@ -409,7 +409,7 @@
         split.pos = split_pos
 
         # Create a Split
-        divide(self, split)
+        self.divide(split)
 
         less_index = cvarray(format="q", shape=(ngrids,), itemsize=sizeof(np.int64_t))
         greater_index = cvarray(format="q", shape=(ngrids,), itemsize=sizeof(np.int64_t))
@@ -495,7 +495,7 @@
         split.pos = new_pos
 
         # Create a Split
-        divide(self, split)
+        self.divide(split)
 
         #lnew_gre[big_dim] = new_pos
         # Populate Left Node


https://bitbucket.org/yt_analysis/yt/commits/950efd64f38a/
Changeset:   950efd64f38a
Branch:      yt
User:        charlie_watson
Date:        2016-06-14 01:33:17+00:00
Summary:     changed calls to kd_sum_volume
Affected #:  2 files

diff -r 41225032d5c2f3d779e916a75c6962ac2c65a022 -r 950efd64f38ac1f20bd3a6f68559a40f5049e29a yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -29,7 +29,6 @@
     kd_is_leaf, \
     depth_traverse, \
     depth_first_touch, \
-    kd_sum_volume, \
     kd_node_check
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     ParallelAnalysisInterface
@@ -120,7 +119,7 @@
             # print grid, dims, li, ri
 
         # Calculate the Volume
-        vol = kd_sum_volume(self.trunk)
+        vol = self.trunk.kd_sum_volume()
         mylog.debug('AMRKDTree volume = %e' % vol)
         kd_node_check(self.trunk)
 
@@ -557,11 +556,11 @@
             if splitdims[i] != -1:
                 n.create_split(splitdims[i], splitposs[i])
 
-        mylog.info('AMRKDTree rebuilt, Final Volume: %e' % kd_sum_volume(self.tree.trunk))
+        mylog.info('AMRKDTree rebuilt, Final Volume: %e' % self.tree.trunk.kd_sum_volume())
         return self.tree.trunk
 
     def count_volume(self):
-        return kd_sum_volume(self.tree.trunk)
+        return self.tree.trunk.kd_sum_volume()
 
     def count_cells(self):
         return self.tree.sum_cells()
@@ -576,6 +575,6 @@
     hv = AMRKDTree(ds)
     t2 = time()
 
-    print(kd_sum_volume(hv.tree.trunk))
+    print(hv.tree.trunk.kd_sum_volume())
     print(kd_node_check(hv.tree.trunk))
     print('Time: %e seconds' % (t2-t1))

diff -r 41225032d5c2f3d779e916a75c6962ac2c65a022 -r 950efd64f38ac1f20bd3a6f68559a40f5049e29a yt/utilities/lib/amr_kdtools.pyx
--- a/yt/utilities/lib/amr_kdtools.pyx
+++ b/yt/utilities/lib/amr_kdtools.pyx
@@ -544,7 +544,7 @@
                 vol *= self.right_edge[i] - self.left_edge[i]
             return vol
         else:
-            return kd_sum_volume(self.left) + kd_sum_volume(self.right)
+            return self.left.kd_sum_volume() + self.right.kd_sum_volume()
 
     def kd_node_check(self):
         assert (self.left is None) == (self.right is None)


https://bitbucket.org/yt_analysis/yt/commits/3d72e92b2069/
Changeset:   3d72e92b2069
Branch:      yt
User:        charlie_watson
Date:        2016-06-14 18:10:55+00:00
Summary:     changed calls to kd_node_check
Affected #:  2 files

diff -r 950efd64f38ac1f20bd3a6f68559a40f5049e29a -r 3d72e92b206923ccb02bb6aeb2c8677987e87e75 yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -29,7 +29,6 @@
     kd_is_leaf, \
     depth_traverse, \
     depth_first_touch, \
-    kd_node_check
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     ParallelAnalysisInterface
 from yt.utilities.lib.grid_traversal import PartitionedGrid
@@ -121,7 +120,7 @@
         # Calculate the Volume
         vol = self.trunk.kd_sum_volume()
         mylog.debug('AMRKDTree volume = %e' % vol)
-        kd_node_check(self.trunk)
+        self.trunk.kd_node_check()
 
     def sum_cells(self, all_cells=False):
         cells = 0
@@ -576,5 +575,5 @@
     t2 = time()
 
     print(hv.tree.trunk.kd_sum_volume())
-    print(kd_node_check(hv.tree.trunk))
+    print(hv.tree.trunk.kd_node_check())
     print('Time: %e seconds' % (t2-t1))

diff -r 950efd64f38ac1f20bd3a6f68559a40f5049e29a -r 3d72e92b206923ccb02bb6aeb2c8677987e87e75 yt/utilities/lib/amr_kdtools.pyx
--- a/yt/utilities/lib/amr_kdtools.pyx
+++ b/yt/utilities/lib/amr_kdtools.pyx
@@ -553,7 +553,7 @@
                 return np.prod(self.right_edge - self.left_edge)
             else: return 0.0
         else:
-            return kd_node_check(self.left)+kd_node_check(self.right)
+            return self.left.kd_node_check()+self.right.kd_node_check()
 
     def kd_is_leaf(self):
         cdef int has_l_child = self.left == None


https://bitbucket.org/yt_analysis/yt/commits/faf465b5d119/
Changeset:   faf465b5d119
Branch:      yt
User:        charlie_watson
Date:        2016-06-14 18:14:33+00:00
Summary:     changed calls to kd_is_leaf
Affected #:  1 file

diff -r 3d72e92b206923ccb02bb6aeb2c8677987e87e75 -r faf465b5d119467b34d5d16f3f6af929aa6a0fce yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -26,7 +26,6 @@
 from yt.utilities.lib.amr_kdtools import \
     Node, \
     find_node, \
-    kd_is_leaf, \
     depth_traverse, \
     depth_first_touch, \
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
@@ -127,7 +126,7 @@
         for node in depth_traverse(self.trunk):
             if node.grid == -1:
                 continue
-            if not all_cells and not kd_is_leaf(node):
+            if not all_cells and not node.kd_is_leaf():
                 continue
             grid = self.ds.index.grids[node.grid - self._id_offset]
             dds = grid.dds


https://bitbucket.org/yt_analysis/yt/commits/c41c84995c38/
Changeset:   c41c84995c38
Branch:      yt
User:        charlie_watson
Date:        2016-06-14 18:20:06+00:00
Summary:     changed calls to _kd_is_leaf
Affected #:  1 file

diff -r faf465b5d119467b34d5d16f3f6af929aa6a0fce -r c41c84995c383f09f0b73cc3fe9325f4e2780efa yt/utilities/lib/amr_kdtools.pyx
--- a/yt/utilities/lib/amr_kdtools.pyx
+++ b/yt/utilities/lib/amr_kdtools.pyx
@@ -107,11 +107,11 @@
     def kd_traverse(self, viewpoint=None):
         if viewpoint is None:
             for node in depth_traverse(self):
-                if _kd_is_leaf(node) == 1 and node.grid != -1:
+                if node._kd_is_leaf() == 1 and node.grid != -1:
                     yield node
         else:
             for node in viewpoint_traverse(self, viewpoint):
-                if _kd_is_leaf(node) == 1 and node.grid != -1:
+                if node._kd_is_leaf() == 1 and node.grid != -1:
                     yield node
 
     def add_pygrid(self,
@@ -147,7 +147,7 @@
         if not should_i_build(self, rank, size):
             return
 
-        if _kd_is_leaf(self) == 1:
+        if self._kd_is_leaf() == 1:
             self.insert_grid(gle, gre, gid, rank, size)
         else:
             less_id = gle[self.split.dim] < self.split.pos
@@ -215,7 +215,7 @@
         if not should_i_build(self, rank, size):
             return
 
-        if _kd_is_leaf(self) == 1:
+        if self._kd_is_leaf() == 1:
             self.insert_grids(ngrids, gles, gres, gids, rank, size)
             return
 
@@ -570,7 +570,7 @@
         '''
         Takes a single step in the depth-first traversal
         '''
-        if _kd_is_leaf(current) == 1: # At a leaf, move back up
+        if current._kd_is_leaf() == 1: # At a leaf, move back up
             previous = current
             current = current.parent
 
@@ -662,7 +662,7 @@
         Takes a single step in the viewpoint based traversal.  Always
         goes to the node furthest away from viewpoint first.
         '''
-        if _kd_is_leaf(current) == 1: # At a leaf, move back up
+        if current._kd_is_leaf() == 1: # At a leaf, move back up
             previous = current
             current = current.parent
         elif current.split.dim is None: # This is a dead node
@@ -714,7 +714,7 @@
         return inside
 
     cdef Node _find_node(self, np.float64_t[:] point):
-        while _kd_is_leaf(self) == 0:
+        while self._kd_is_leaf() == 0:
             if point[self.split.dim] < self.split.pos:
                 self = self.left
             else:


https://bitbucket.org/yt_analysis/yt/commits/4476eea81199/
Changeset:   4476eea81199
Branch:      yt
User:        charlie_watson
Date:        2016-06-14 18:33:28+00:00
Summary:     changed calls to depth_traverse
Affected #:  3 files

diff -r c41c84995c383f09f0b73cc3fe9325f4e2780efa -r 4476eea811991d91a75459c60112c4041aec26bc yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -26,7 +26,6 @@
 from yt.utilities.lib.amr_kdtools import \
     Node, \
     find_node, \
-    depth_traverse, \
     depth_first_touch, \
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     ParallelAnalysisInterface
@@ -100,7 +99,7 @@
             self.add_grids(grids)
 
     def check_tree(self):
-        for node in depth_traverse(self.trunk):
+        for node in self.trunk.depth_traverse():
             if node.grid == -1:
                 continue
             grid = self.ds.index.grids[node.grid - self._id_offset]
@@ -123,7 +122,7 @@
 
     def sum_cells(self, all_cells=False):
         cells = 0
-        for node in depth_traverse(self.trunk):
+        for node in self.trunk.depth_traverse():
             if node.grid == -1:
                 continue
             if not all_cells and not node.kd_is_leaf():
@@ -426,7 +425,7 @@
         if self.comm.rank != 0:
             self.comm.recv_array(self.comm.rank-1, tag=self.comm.rank-1)
         f = h5py.File(fn,'w')
-        for node in depth_traverse(self.tree):
+        for node in self.tree.depth_traverse():
             i = node.node_id
             if node.data is not None:
                 for fi,field in enumerate(self.fields):
@@ -447,7 +446,7 @@
             self.comm.recv_array(self.comm.rank-1, tag=self.comm.rank-1)
         try:
             f = h5py.File(fn,"a")
-            for node in depth_traverse(self.tree):
+            for node in self.tree.depth_traverse():
                 i = node.node_id
                 if node.grid != -1:
                     data = [f["brick_%s_%s" %

diff -r c41c84995c383f09f0b73cc3fe9325f4e2780efa -r 4476eea811991d91a75459c60112c4041aec26bc yt/utilities/lib/amr_kdtools.pyx
--- a/yt/utilities/lib/amr_kdtools.pyx
+++ b/yt/utilities/lib/amr_kdtools.pyx
@@ -101,12 +101,12 @@
         return re
 
     def set_dirty(self, bint state):
-        for node in depth_traverse(self):
+        for node in self.depth_traverse():
             node.dirty = state
 
     def kd_traverse(self, viewpoint=None):
         if viewpoint is None:
-            for node in depth_traverse(self):
+            for node in self.depth_traverse():
                 if node._kd_is_leaf() == 1 and node.grid != -1:
                     yield node
         else:
@@ -596,12 +596,12 @@
 
         return current, previous
 
-    def depth_traverse(Node trunk, max_node=None):
+    def depth_traverse(self, max_node=None):
         '''
         Yields a depth-first traversal of the kd tree always going to
         the left child before the right.
         '''
-        current = trunk
+        current = self
         previous = None
         if max_node is None:
             max_node = np.inf

diff -r c41c84995c383f09f0b73cc3fe9325f4e2780efa -r 4476eea811991d91a75459c60112c4041aec26bc yt/utilities/tests/test_amr_kdtree.py
--- a/yt/utilities/tests/test_amr_kdtree.py
+++ b/yt/utilities/tests/test_amr_kdtree.py
@@ -14,8 +14,7 @@
 #-----------------------------------------------------------------------------
 
 from yt.utilities.amr_kdtree.api import AMRKDTree
-from yt.utilities.lib.amr_kdtools import depth_traverse, \
-        get_left_edge, get_right_edge
+from yt.utilities.lib.amr_kdtools import Node
 import yt.utilities.initial_conditions as ic
 import yt.utilities.flagging_methods as fm
 from yt.frontends.stream.api import load_uniform_grid, refine_amr
@@ -46,7 +45,7 @@
 
     # This largely reproduces the AMRKDTree.tree.check_tree() functionality
     tree_ok = True
-    for node in depth_traverse(kd.tree.trunk):
+    for node in kd.tree.trunk.depth_traverse():
         if node.grid is None:
             continue
         grid = ds.index.grids[node.grid - kd._id_offset]


https://bitbucket.org/yt_analysis/yt/commits/f0778c44b9e8/
Changeset:   f0778c44b9e8
Branch:      yt
User:        charlie_watson
Date:        2016-06-14 18:35:45+00:00
Summary:     changed calls to depth_first_touch
Affected #:  2 files

diff -r 4476eea811991d91a75459c60112c4041aec26bc -r f0778c44b9e8e8a9b5e2b753a483e0e4b76b5e1e yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -26,7 +26,6 @@
 from yt.utilities.lib.amr_kdtools import \
     Node, \
     find_node, \
-    depth_first_touch, \
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     ParallelAnalysisInterface
 from yt.utilities.lib.grid_traversal import PartitionedGrid
@@ -497,7 +496,7 @@
         gridids = []
         splitdims = []
         splitposs = []
-        for node in depth_first_touch(self.tree.trunk):
+        for node in self.tree.trunk.depth_first_touch():
             nids.append(node.node_id)
             les.append(node.get_left_edge())
             res.append(node.get_right_edge())

diff -r 4476eea811991d91a75459c60112c4041aec26bc -r f0778c44b9e8e8a9b5e2b753a483e0e4b76b5e1e yt/utilities/lib/amr_kdtools.pyx
--- a/yt/utilities/lib/amr_kdtools.pyx
+++ b/yt/utilities/lib/amr_kdtools.pyx
@@ -613,12 +613,12 @@
                 current = current.parent
                 previous = current.right
 
-    def depth_first_touch(Node tree, max_node=None):
+    def depth_first_touch(self, max_node=None):
         '''
         Yields a depth-first traversal of the kd tree always going to
         the left child before the right.
         '''
-        current = tree
+        current = self
         previous = None
         if max_node is None:
             max_node = np.inf


https://bitbucket.org/yt_analysis/yt/commits/9959928e456b/
Changeset:   9959928e456b
Branch:      yt
User:        charlie_watson
Date:        2016-06-14 18:36:59+00:00
Summary:     changed calls to breadth_traverse
Affected #:  1 file

diff -r f0778c44b9e8e8a9b5e2b753a483e0e4b76b5e1e -r 9959928e456b76c9e39d414ac4a34c9298df2349 yt/utilities/lib/amr_kdtools.pyx
--- a/yt/utilities/lib/amr_kdtools.pyx
+++ b/yt/utilities/lib/amr_kdtools.pyx
@@ -631,12 +631,12 @@
                 current = current.parent
                 previous = current.right
 
-    def breadth_traverse(Node tree):
+    def breadth_traverse(self):
         '''
         Yields a breadth-first traversal of the kd tree always going to
         the left child before the right.
         '''
-        current = tree
+        current = self
         previous = None
         while current is not None:
             yield current


https://bitbucket.org/yt_analysis/yt/commits/11034d61d19d/
Changeset:   11034d61d19d
Branch:      yt
User:        charlie_watson
Date:        2016-06-14 18:38:28+00:00
Summary:     changed calls to viewpoint_traverse
Affected #:  1 file

diff -r 9959928e456b76c9e39d414ac4a34c9298df2349 -r 11034d61d19dece7538923b3d7ddf8c1941ef595 yt/utilities/lib/amr_kdtools.pyx
--- a/yt/utilities/lib/amr_kdtools.pyx
+++ b/yt/utilities/lib/amr_kdtools.pyx
@@ -110,7 +110,7 @@
                 if node._kd_is_leaf() == 1 and node.grid != -1:
                     yield node
         else:
-            for node in viewpoint_traverse(self, viewpoint):
+            for node in self.viewpoint_traverse(viewpoint):
                 if node._kd_is_leaf() == 1 and node.grid != -1:
                     yield node
 
@@ -643,13 +643,13 @@
             current, previous = step_depth(current, previous)
 
 
-    def viewpoint_traverse(Node tree, viewpoint):
+    def viewpoint_traverse(self, viewpoint):
         '''
         Yields a viewpoint dependent traversal of the kd-tree.  Starts
         with nodes furthest away from viewpoint.
         '''
 
-        current = tree
+        current = self
         previous = None
         while current is not None:
             yield current


https://bitbucket.org/yt_analysis/yt/commits/8c1b9778af37/
Changeset:   8c1b9778af37
Branch:      yt
User:        charlie_watson
Date:        2016-06-14 18:57:51+00:00
Summary:     changed calls to point_in_node and changed func def in amr_kdtools.pxd
Affected #:  2 files

diff -r 11034d61d19dece7538923b3d7ddf8c1941ef595 -r 8c1b9778af376540b1ca835b6f8d34cbf8e3c260 yt/utilities/lib/amr_kdtools.pxd
--- a/yt/utilities/lib/amr_kdtools.pxd
+++ b/yt/utilities/lib/amr_kdtools.pxd
@@ -35,6 +35,6 @@
     cdef Split * split
     cdef int level
 
-cdef int point_in_node(Node node, np.float64_t[:] point)
+cdef int point_in_node(np.float64_t[:] point)
 cdef Node _find_node(Node node, np.float64_t[:] point)
-cdef int _kd_is_leaf(Node node)
+cdef int _kd_is_leaf()

diff -r 11034d61d19dece7538923b3d7ddf8c1941ef595 -r 8c1b9778af376540b1ca835b6f8d34cbf8e3c260 yt/utilities/lib/amr_kdtools.pyx
--- a/yt/utilities/lib/amr_kdtools.pyx
+++ b/yt/utilities/lib/amr_kdtools.pyx
@@ -726,7 +726,7 @@
         """
         Find the AMRKDTree node enclosing a position
         """
-        assert(point_in_node(self, point))
+        assert(self.point_in_node(point))
         return _find_node(self, point)
 
 @cython.boundscheck(False)


https://bitbucket.org/yt_analysis/yt/commits/af4a08945411/
Changeset:   af4a08945411
Branch:      yt
User:        charlie_watson
Date:        2016-06-14 19:01:21+00:00
Summary:     updated calls to _find_node
Affected #:  3 files

diff -r 8c1b9778af376540b1ca835b6f8d34cbf8e3c260 -r af4a0894541171dc03290c2d9f2266312bfc86d1 yt/utilities/lib/amr_kdtools.pxd
--- a/yt/utilities/lib/amr_kdtools.pxd
+++ b/yt/utilities/lib/amr_kdtools.pxd
@@ -36,5 +36,5 @@
     cdef int level
 
 cdef int point_in_node(np.float64_t[:] point)
-cdef Node _find_node(Node node, np.float64_t[:] point)
+cdef Node _find_node(np.float64_t[:] point)
 cdef int _kd_is_leaf()

diff -r 8c1b9778af376540b1ca835b6f8d34cbf8e3c260 -r af4a0894541171dc03290c2d9f2266312bfc86d1 yt/utilities/lib/amr_kdtools.pyx
--- a/yt/utilities/lib/amr_kdtools.pyx
+++ b/yt/utilities/lib/amr_kdtools.pyx
@@ -727,7 +727,7 @@
         Find the AMRKDTree node enclosing a position
         """
         assert(self.point_in_node(point))
-        return _find_node(self, point)
+        return self._find_node(point)
 
 @cython.boundscheck(False)
 @cython.wraparound(False)

diff -r 8c1b9778af376540b1ca835b6f8d34cbf8e3c260 -r af4a0894541171dc03290c2d9f2266312bfc86d1 yt/utilities/lib/contour_finding.pyx
--- a/yt/utilities/lib/contour_finding.pyx
+++ b/yt/utilities/lib/contour_finding.pyx
@@ -25,7 +25,7 @@
     OctreeContainer, OctInfo
 from yt.geometry.oct_visitors cimport \
     Oct
-from .amr_kdtools cimport _find_node, Node
+from .amr_kdtools cimport Node
 from .grid_traversal cimport VolumeContainer, PartitionedGrid, \
     vc_index, vc_pos_index
 import sys
@@ -446,7 +446,7 @@
                                 pos[ax] = vc0.dims[ax]
                                 my_pos[ax] = vc0.dims[ax]-1
                             get_spos(vc0, pos[0], pos[1], pos[2], ax, spos)
-                            adj_node = _find_node(trunk, spos)
+                            adj_node = trunk._find_node(spos)
                             vc1 = vcs[adj_node.node_ind]
                             if spos_contained(vc1, spos):
                                 index = vc_index(vc0, my_pos[0],


https://bitbucket.org/yt_analysis/yt/commits/0f9ab25b0078/
Changeset:   0f9ab25b0078
Branch:      yt
User:        charlie_watson
Date:        2016-06-14 19:13:30+00:00
Summary:     changed calls to find_node
Affected #:  1 file

diff -r af4a0894541171dc03290c2d9f2266312bfc86d1 -r 0f9ab25b00780a7e9278f3da523c0d5f1a65d894 yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -23,9 +23,7 @@
     receive_and_reduce, \
     send_to_parent, \
     scatter_image
-from yt.utilities.lib.amr_kdtools import \
-    Node, \
-    find_node, \
+from yt.utilities.lib.amr_kdtools import Node
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     ParallelAnalysisInterface
 from yt.utilities.lib.grid_traversal import PartitionedGrid
@@ -238,7 +236,7 @@
         return temp
 
     def locate_node(self, pos):
-        return find_node(self.tree.trunk, pos)
+        return self.tree.trunk.find_node(pos)
 
     def get_reduce_owners(self):
         owners = {}


https://bitbucket.org/yt_analysis/yt/commits/0169594e8e43/
Changeset:   0169594e8e43
Branch:      yt
User:        charlie_watson
Date:        2016-06-14 20:54:16+00:00
Summary:     moved point_in_node _find_node and _kd_is_leaf into Node declaration in amr_kdtools.pxd
Affected #:  1 file

diff -r 0f9ab25b00780a7e9278f3da523c0d5f1a65d894 -r 0169594e8e4387c79e99c0a21446f3b293c1572d yt/utilities/lib/amr_kdtools.pxd
--- a/yt/utilities/lib/amr_kdtools.pxd
+++ b/yt/utilities/lib/amr_kdtools.pxd
@@ -34,7 +34,6 @@
     cdef public data
     cdef Split * split
     cdef int level
-
-cdef int point_in_node(np.float64_t[:] point)
-cdef Node _find_node(np.float64_t[:] point)
-cdef int _kd_is_leaf()
+    cdef int point_in_node(np.float64_t[:] point)
+    cdef Node _find_node(np.float64_t[:] point)
+    cdef int _kd_is_leaf()


https://bitbucket.org/yt_analysis/yt/commits/a20422f19b70/
Changeset:   a20422f19b70
Branch:      yt
User:        charlie_watson
Date:        2016-06-14 20:56:34+00:00
Summary:     adding node calls back to amr_kdtools.pxd
Affected #:  1 file

diff -r 0169594e8e4387c79e99c0a21446f3b293c1572d -r a20422f19b700ae7dc8860c08bf0ab4992c17da9 yt/utilities/lib/amr_kdtools.pxd
--- a/yt/utilities/lib/amr_kdtools.pxd
+++ b/yt/utilities/lib/amr_kdtools.pxd
@@ -34,6 +34,6 @@
     cdef public data
     cdef Split * split
     cdef int level
-    cdef int point_in_node(np.float64_t[:] point)
-    cdef Node _find_node(np.float64_t[:] point)
-    cdef int _kd_is_leaf()
+    cdef int point_in_node(Node node, np.float64_t[:] point)
+    cdef Node _find_node(Node node, np.float64_t[:] point)
+    cdef int _kd_is_leaf(Node node)


https://bitbucket.org/yt_analysis/yt/commits/90670cd8a7f1/
Changeset:   90670cd8a7f1
Branch:      yt
User:        charlie_watson
Date:        2016-06-15 01:13:57+00:00
Summary:     updating amr_kdtools.pxd and moving step_depth and step_viewpoint out of Node class def
Affected #:  2 files

diff -r a20422f19b700ae7dc8860c08bf0ab4992c17da9 -r 90670cd8a7f1f8c3c5177ce26921cba88219e446 yt/utilities/lib/amr_kdtools.pxd
--- a/yt/utilities/lib/amr_kdtools.pxd
+++ b/yt/utilities/lib/amr_kdtools.pxd
@@ -34,6 +34,51 @@
     cdef public data
     cdef Split * split
     cdef int level
-    cdef int point_in_node(Node node, np.float64_t[:] point)
-    cdef Node _find_node(Node node, np.float64_t[:] point)
-    cdef int _kd_is_leaf(Node node)
+    cdef int point_in_node(self, np.float64_t[:] point)
+    cdef Node _find_node(self, np.float64_t[:] point)
+    cdef int _kd_is_leaf(self)
+    cdef add_grid(self, np.float64_t[:] gle, np.float64_t[:] gre,
+                       int gid,
+                       int rank,
+                       int size)
+    cdef insert_grid(self,
+                       np.float64_t[:] gle,
+                       np.float64_t[:] gre,
+                       int grid_id,
+                       int rank,
+                       int size)
+    cpdef add_grids(self,
+                       int ngrids,
+                       np.float64_t[:,:] gles,
+                       np.float64_t[:,:] gres,
+                       np.int64_t[:] gids,
+                       int rank,
+                       int size)
+    cdef int should_i_split(self, int rank, int size)
+    cdef void insert_grids(self,
+                       int ngrids,
+                       np.float64_t[:,:] gles,
+                       np.float64_t[:,:] gres,
+                       np.int64_t[:] gids,
+                       int rank,
+                       int size)
+    cdef split_grid(self,
+                       np.float64_t[:] gle,
+                       np.float64_t[:] gre,
+                       int gid,
+                       int rank,
+                       int size)
+    cdef int split_grids(self,
+                       int ngrids,
+                       np.float64_t[:,:] gles,
+                       np.float64_t[:,:] gres,
+                       np.int64_t[:] gids,
+                       int rank,
+                       int size)
+    cdef geo_split(self,
+                       np.float64_t[:] gle,
+                       np.float64_t[:] gre,
+                       int grid_id,
+                       int rank,
+                       int size)
+    cdef void divide(self, Split * split)

diff -r a20422f19b700ae7dc8860c08bf0ab4992c17da9 -r 90670cd8a7f1f8c3c5177ce26921cba88219e446 yt/utilities/lib/amr_kdtools.pyx
--- a/yt/utilities/lib/amr_kdtools.pyx
+++ b/yt/utilities/lib/amr_kdtools.pyx
@@ -566,36 +566,6 @@
             return 1
         return 0
 
-    def step_depth(Node current, Node previous):
-        '''
-        Takes a single step in the depth-first traversal
-        '''
-        if current._kd_is_leaf() == 1: # At a leaf, move back up
-            previous = current
-            current = current.parent
-
-        elif current.parent is previous: # Moving down, go left first
-            previous = current
-            if current.left is not None:
-                current = current.left
-            elif current.right is not None:
-                current = current.right
-            else:
-                current = current.parent
-
-        elif current.left is previous: # Moving up from left, go right
-            previous = current
-            if current.right is not None:
-                current = current.right
-            else:
-                current = current.parent
-
-        elif current.right is previous: # Moving up from right child, move up
-            previous = current
-            current = current.parent
-
-        return current, previous
-
     def depth_traverse(self, max_node=None):
         '''
         Yields a depth-first traversal of the kd tree always going to
@@ -655,55 +625,6 @@
             yield current
             current, previous = step_viewpoint(current, previous, viewpoint)
 
-    def step_viewpoint(Node current,
-                       Node previous,
-                       viewpoint):
-        '''
-        Takes a single step in the viewpoint based traversal.  Always
-        goes to the node furthest away from viewpoint first.
-        '''
-        if current._kd_is_leaf() == 1: # At a leaf, move back up
-            previous = current
-            current = current.parent
-        elif current.split.dim is None: # This is a dead node
-            previous = current
-            current = current.parent
-
-        elif current.parent is previous: # Moving down
-            previous = current
-            if viewpoint[current.split.dim] <= current.split.pos:
-                if current.right is not None:
-                    current = current.right
-                else:
-                    previous = current.right
-            else:
-                if current.left is not None:
-                    current = current.left
-                else:
-                    previous = current.left
-
-        elif current.right is previous: # Moving up from right
-            previous = current
-            if viewpoint[current.split.dim] <= current.split.pos:
-                if current.left is not None:
-                    current = current.left
-                else:
-                    current = current.parent
-            else:
-                current = current.parent
-
-        elif current.left is previous: # Moving up from left child
-            previous = current
-            if viewpoint[current.split.dim] > current.split.pos:
-                if current.right is not None:
-                    current = current.right
-                else:
-                    current = current.parent
-            else:
-                current = current.parent
-
-        return current, previous
-
     cdef int point_in_node(self,
                            np.float64_t[:] point):
         cdef int i
@@ -758,6 +679,85 @@
     else:
         return 0
 
+def step_depth(Node current, Node previous):
+    '''
+    Takes a single step in the depth-first traversal
+    '''
+    if current._kd_is_leaf() == 1: # At a leaf, move back up
+        previous = current
+        current = current.parent
+
+    elif current.parent is previous: # Moving down, go left first
+        previous = current
+        if current.left is not None:
+            current = current.left
+        elif current.right is not None:
+            current = current.right
+        else:
+            current = current.parent
+
+    elif current.left is previous: # Moving up from left, go right
+        previous = current
+        if current.right is not None:
+            current = current.right
+        else:
+            current = current.parent
+
+    elif current.right is previous: # Moving up from right child, move up
+        previous = current
+        current = current.parent
+
+    return current, previous
+
+def step_viewpoint(Node current,
+                   Node previous,
+                   viewpoint):
+    '''
+    Takes a single step in the viewpoint based traversal.  Always
+    goes to the node furthest away from viewpoint first.
+    '''
+    if current._kd_is_leaf() == 1: # At a leaf, move back up
+        previous = current
+        current = current.parent
+    elif current.split.dim is None: # This is a dead node
+        previous = current
+        current = current.parent
+
+    elif current.parent is previous: # Moving down
+        previous = current
+        if viewpoint[current.split.dim] <= current.split.pos:
+            if current.right is not None:
+                current = current.right
+            else:
+                previous = current.right
+        else:
+            if current.left is not None:
+                current = current.left
+            else:
+                previous = current.left
+
+    elif current.right is previous: # Moving up from right
+        previous = current
+        if viewpoint[current.split.dim] <= current.split.pos:
+            if current.left is not None:
+                current = current.left
+            else:
+                current = current.parent
+        else:
+            current = current.parent
+
+    elif current.left is previous: # Moving up from left child
+        previous = current
+        if viewpoint[current.split.dim] > current.split.pos:
+            if current.right is not None:
+                current = current.right
+            else:
+                current = current.parent
+        else:
+            current = current.parent
+
+    return current, previous
+
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)


https://bitbucket.org/yt_analysis/yt/commits/24500c046977/
Changeset:   24500c046977
Branch:      yt
User:        MatthewTurk
Date:        2016-06-15 20:11:49+00:00
Summary:     Declare the iteration variable
Affected #:  1 file

diff -r 90670cd8a7f1f8c3c5177ce26921cba88219e446 -r 24500c04697758496a05e65f3ff5c994c0d800fb yt/utilities/lib/amr_kdtools.pyx
--- a/yt/utilities/lib/amr_kdtools.pyx
+++ b/yt/utilities/lib/amr_kdtools.pyx
@@ -105,6 +105,7 @@
             node.dirty = state
 
     def kd_traverse(self, viewpoint=None):
+        cdef Node node
         if viewpoint is None:
             for node in self.depth_traverse():
                 if node._kd_is_leaf() == 1 and node.grid != -1:


https://bitbucket.org/yt_analysis/yt/commits/ca819d59f0e4/
Changeset:   ca819d59f0e4
Branch:      yt
User:        charlie_watson
Date:        2016-06-16 00:03:49+00:00
Summary:     following matts lead, defining Node node explicitly
Affected #:  2 files

diff -r 24500c04697758496a05e65f3ff5c994c0d800fb -r ca819d59f0e41fce50b1fb28ef8e18ac7a25bdf7 yt/utilities/lib/amr_kdtools.pxd
--- a/yt/utilities/lib/amr_kdtools.pxd
+++ b/yt/utilities/lib/amr_kdtools.pxd
@@ -19,7 +19,7 @@
     int dim
     np.float64_t pos
 
-cdef class Node
+#cdef class Node
 
 cdef class Node:
     cdef public Node left

diff -r 24500c04697758496a05e65f3ff5c994c0d800fb -r ca819d59f0e41fce50b1fb28ef8e18ac7a25bdf7 yt/utilities/lib/amr_kdtools.pyx
--- a/yt/utilities/lib/amr_kdtools.pyx
+++ b/yt/utilities/lib/amr_kdtools.pyx
@@ -86,7 +86,7 @@
     def __dealloc__(self):
         if self.split != NULL: free(self.split)
 
-# Begin input of converted methods
+    # Begin input of converted methods
 
     def get_left_edge(self):
         le = np.empty(3, dtype='float64')
@@ -101,6 +101,7 @@
         return re
 
     def set_dirty(self, bint state):
+        cdef Node node
         for node in self.depth_traverse():
             node.dirty = state
 


https://bitbucket.org/yt_analysis/yt/commits/f90f0090a782/
Changeset:   f90f0090a782
Branch:      yt
User:        charlie_watson
Date:        2016-06-16 00:05:17+00:00
Summary:     Merging with tip
Affected #:  57 files

diff -r ca819d59f0e41fce50b1fb28ef8e18ac7a25bdf7 -r f90f0090a782564903c8a64cdcb79bd55db5587b CONTRIBUTING.rst
--- a/CONTRIBUTING.rst
+++ b/CONTRIBUTING.rst
@@ -651,7 +651,7 @@
 .. _multiple-PRs:
 
 Working with Multiple BitBucket Pull Requests
-+++++++++++++++++++++++++++++++++++++++++++++
+---------------------------------------------
 
 Once you become active developing for yt, you may be working on
 various aspects of the code or bugfixes at the same time.  Currently,

diff -r ca819d59f0e41fce50b1fb28ef8e18ac7a25bdf7 -r f90f0090a782564903c8a64cdcb79bd55db5587b doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -704,7 +704,7 @@
 if type -P curl &>/dev/null
 then
     echo "Using curl"
-    export GETFILE="curl -sSO"
+    export GETFILE="curl -sSOL"
 else
     echo "Using wget"
     export GETFILE="wget -nv"

diff -r ca819d59f0e41fce50b1fb28ef8e18ac7a25bdf7 -r f90f0090a782564903c8a64cdcb79bd55db5587b doc/source/_static/yt_icon.png
Binary file doc/source/_static/yt_icon.png has changed

diff -r ca819d59f0e41fce50b1fb28ef8e18ac7a25bdf7 -r f90f0090a782564903c8a64cdcb79bd55db5587b doc/source/analyzing/fields.rst
--- a/doc/source/analyzing/fields.rst
+++ b/doc/source/analyzing/fields.rst
@@ -98,22 +98,31 @@
 .. code-block:: python
 
     ad = ds.all_data()
+
+    # just a field name
     density = ad['density']
+
+    # field tuple with no parentheses
     density = ad['gas', 'density']
+
+    # full field tuple
     density = ad[('gas', 'density')]
-    dnesity = ad[ds.fields.gas.density]
+
+    # through the ds.fields object
+    density = ad[ds.fields.gas.density]
 
 The first data access example is the simplest. In that example, the field type
 is inferred from the name of the field. The next two examples use the field type
 explicitly, this might be necessary if there is more than one field type with a
-"density" field defined in the same simulation. The third example is a slightly
-more verbose and is syntactically identical to the second example due to the way
-indexing functions in Python. The final example uses the ``ds.fields` object
-described above. This way of accessing fields lends itself to interactive use,
-especially if you make heavy use of IPython's tab completion features. Any of
-these ways of denoting the ``('gas', 'density')`` field can be used when
-supplying a field name to a yt data object, analysis routines, or plotting and
-visualization function.
+"density" field defined in the same dataset. The third example is slightly more
+verbose but is syntactically identical to the second example due to the way
+indexing works in the Python language.
+
+The final example uses the ``ds.fields`` object described above. This way of
+accessing fields lends itself to interactive use, especially if you make heavy
+use of IPython's tab completion features. Any of these ways of denoting the
+``('gas', 'density')`` field can be used when supplying a field name to a yt
+data object, analysis routines, or plotting and visualization function.
 
 Accessing Fields without a Field Type
 -------------------------------------

diff -r ca819d59f0e41fce50b1fb28ef8e18ac7a25bdf7 -r f90f0090a782564903c8a64cdcb79bd55db5587b setup.py
--- a/setup.py
+++ b/setup.py
@@ -83,114 +83,70 @@
     Extension("yt.geometry.grid_visitors",
               ["yt/geometry/grid_visitors.pyx"],
               include_dirs=["yt/utilities/lib"],
-              libraries=std_libs,
-              depends=["yt/utilities/lib/fp_utils.pxd",
-                       "yt/geometry/grid_visitors.pxd"]),
+              libraries=std_libs),
     Extension("yt.geometry.grid_container",
               ["yt/geometry/grid_container.pyx"],
               include_dirs=["yt/utilities/lib/"],
-              libraries=std_libs,
-              depends=["yt/utilities/lib/fp_utils.pxd",
-                       "yt/geometry/grid_container.pxd",
-                       "yt/geometry/grid_visitors.pxd"]),
+              libraries=std_libs),
     Extension("yt.geometry.oct_container",
               ["yt/geometry/oct_container.pyx",
                "yt/utilities/lib/tsearch.c"],
               include_dirs=["yt/utilities/lib"],
-              libraries=std_libs,
-              depends=["yt/utilities/lib/fp_utils.pxd",
-                       "yt/geometry/oct_container.pxd",
-                       "yt/geometry/selection_routines.pxd"]),
+              libraries=std_libs),
     Extension("yt.geometry.oct_visitors",
               ["yt/geometry/oct_visitors.pyx"],
               include_dirs=["yt/utilities/lib/"],
-              libraries=std_libs,
-              depends=["yt/utilities/lib/fp_utils.pxd",
-                       "yt/geometry/oct_container.pxd",
-                       "yt/geometry/selection_routines.pxd"]),
+              libraries=std_libs),
     Extension("yt.geometry.particle_oct_container",
               ["yt/geometry/particle_oct_container.pyx"],
               include_dirs=["yt/utilities/lib/"],
-              libraries=std_libs,
-              depends=["yt/utilities/lib/fp_utils.pxd",
-                       "yt/geometry/oct_container.pxd",
-                       "yt/geometry/selection_routines.pxd"]),
+              libraries=std_libs),
     Extension("yt.geometry.selection_routines",
               ["yt/geometry/selection_routines.pyx"],
               include_dirs=["yt/utilities/lib/"],
-              libraries=std_libs,
-              depends=["yt/utilities/lib/fp_utils.pxd",
-                       "yt/utilities/lib/grid_traversal.pxd",
-                       "yt/geometry/oct_container.pxd",
-                       "yt/geometry/oct_visitors.pxd",
-                       "yt/geometry/grid_container.pxd",
-                       "yt/geometry/grid_visitors.pxd",
-                       "yt/geometry/selection_routines.pxd"]),
+              libraries=std_libs),
     Extension("yt.geometry.particle_deposit",
               ["yt/geometry/particle_deposit.pyx"],
               include_dirs=["yt/utilities/lib/"],
-              libraries=std_libs,
-              depends=["yt/utilities/lib/fp_utils.pxd",
-                       "yt/geometry/oct_container.pxd",
-                       "yt/geometry/selection_routines.pxd",
-                       "yt/geometry/particle_deposit.pxd"]),
+              libraries=std_libs),
     Extension("yt.geometry.particle_smooth",
               ["yt/geometry/particle_smooth.pyx"],
               include_dirs=["yt/utilities/lib/"],
-              libraries=std_libs,
-              depends=["yt/utilities/lib/fp_utils.pxd",
-                       "yt/geometry/oct_container.pxd",
-                       "yt/geometry/selection_routines.pxd",
-                       "yt/geometry/particle_deposit.pxd",
-                       "yt/geometry/particle_smooth.pxd"]),
+              libraries=std_libs),
     Extension("yt.geometry.fake_octree",
               ["yt/geometry/fake_octree.pyx"],
               include_dirs=["yt/utilities/lib/"],
-              libraries=std_libs,
-              depends=["yt/utilities/lib/fp_utils.pxd",
-                       "yt/geometry/oct_container.pxd",
-                       "yt/geometry/selection_routines.pxd"]),
+              libraries=std_libs),
     Extension("yt.utilities.spatial.ckdtree",
               ["yt/utilities/spatial/ckdtree.pyx"],
               include_dirs=["yt/utilities/lib/"],
               libraries=std_libs),
     Extension("yt.utilities.lib.bitarray",
               ["yt/utilities/lib/bitarray.pyx"],
-              libraries=std_libs, depends=["yt/utilities/lib/bitarray.pxd"]),
+              libraries=std_libs),
     Extension("yt.utilities.lib.bounding_volume_hierarchy",
               ["yt/utilities/lib/bounding_volume_hierarchy.pyx"],
               include_dirs=["yt/utilities/lib/"],
               extra_compile_args=omp_args,
               extra_link_args=omp_args,
               libraries=std_libs,
-              depends=["yt/utilities/lib/element_mappings.pxd",
-                       "yt/utilities/lib/mesh_triangulation.h",
-                       "yt/utilities/lib/vec3_ops.pxd",
-                       "yt/utilities/lib/primitives.pxd"]),
+              depends=["yt/utilities/lib/mesh_triangulation.h"]),
     Extension("yt.utilities.lib.contour_finding",
               ["yt/utilities/lib/contour_finding.pyx"],
               include_dirs=["yt/utilities/lib/",
                             "yt/geometry/"],
-              libraries=std_libs,
-              depends=["yt/utilities/lib/fp_utils.pxd",
-                       "yt/utilities/lib/amr_kdtools.pxd",
-                       "yt/utilities/lib/grid_traversal.pxd",
-                       "yt/utilities/lib/contour_finding.pxd",
-                       "yt/geometry/oct_container.pxd"]),
+              libraries=std_libs),
     Extension("yt.utilities.lib.geometry_utils",
               ["yt/utilities/lib/geometry_utils.pyx"],
               extra_compile_args=omp_args,
               extra_link_args=omp_args,
-              libraries=std_libs, depends=["yt/utilities/lib/fp_utils.pxd"]),
+              libraries=std_libs),
     Extension("yt.utilities.lib.marching_cubes",
               ["yt/utilities/lib/marching_cubes.pyx",
                "yt/utilities/lib/fixed_interpolator.c"],
               include_dirs=["yt/utilities/lib/"],
               libraries=std_libs,
-              depends=["yt/utilities/lib/fp_utils.pxd",
-                       "yt/utilities/lib/fixed_interpolator.pxd",
-                       "yt/utilities/lib/fixed_interpolator.h",
-                       ]),
+              depends=["yt/utilities/lib/fixed_interpolator.h"]),
     Extension("yt.utilities.lib.mesh_triangulation",
               ["yt/utilities/lib/mesh_triangulation.pyx"],
               depends=["yt/utilities/lib/mesh_triangulation.h"]),
@@ -198,15 +154,11 @@
               ["yt/utilities/lib/pixelization_routines.pyx",
                "yt/utilities/lib/pixelization_constants.c"],
               include_dirs=["yt/utilities/lib/"],
-              libraries=std_libs, depends=["yt/utilities/lib/fp_utils.pxd",
-                                        "yt/utilities/lib/pixelization_constants.h",
-                                        "yt/utilities/lib/element_mappings.pxd"]),
+              libraries=std_libs,
+              depends=["yt/utilities/lib/pixelization_constants.h"]),
     Extension("yt.utilities.lib.primitives",
               ["yt/utilities/lib/primitives.pyx"],
-              libraries=std_libs, 
-              depends=["yt/utilities/lib/primitives.pxd",
-                       "yt/utilities/lib/vec3_ops.pxd",
-                       "yt/utilities/lib/bounding_volume_hierarchy.pxd"]),
+              libraries=std_libs),
     Extension("yt.utilities.lib.origami",
               ["yt/utilities/lib/origami.pyx",
                "yt/utilities/lib/origami_tags.c"],
@@ -220,15 +172,11 @@
               libraries=std_libs,
               extra_compile_args=omp_args,
               extra_link_args=omp_args,
-              depends=["yt/utilities/lib/fp_utils.pxd",
-                       "yt/utilities/lib/kdtree.h",
-                       "yt/utilities/lib/fixed_interpolator.h",
-                       "yt/utilities/lib/fixed_interpolator.pxd",
-                       "yt/utilities/lib/field_interpolation_tables.pxd",
-                       "yt/utilities/lib/vec3_ops.pxd"]),
+              depends=["yt/utilities/lib/kdtree.h",
+                       "yt/utilities/lib/fixed_interpolator.h"]),
     Extension("yt.utilities.lib.element_mappings",
               ["yt/utilities/lib/element_mappings.pyx"],
-              libraries=std_libs, depends=["yt/utilities/lib/element_mappings.pxd"]),
+              libraries=std_libs),
     Extension("yt.utilities.lib.alt_ray_tracers",
               ["yt/utilities/lib/alt_ray_tracers.pyx"],
               libraries=std_libs),
@@ -244,7 +192,7 @@
     cython_extensions.append(
         Extension("yt.utilities.lib.{}".format(ext_name),
                   ["yt/utilities/lib/{}.pyx".format(ext_name)],
-                  libraries=std_libs, depends=["yt/utilities/lib/fp_utils.pxd"]))
+                  libraries=std_libs))
 
 lib_exts = ["write_array", "ragged_arrays", "line_integral_convolution"]
 for ext_name in lib_exts:
@@ -265,19 +213,12 @@
               include_dirs=["yt/frontends/artio/artio_headers/",
                             "yt/geometry/",
                             "yt/utilities/lib/"],
-              depends=glob.glob("yt/frontends/artio/artio_headers/*.c") +
-              ["yt/utilities/lib/fp_utils.pxd",
-               "yt/geometry/oct_container.pxd",
-               "yt/geometry/selection_routines.pxd",
-               "yt/geometry/particle_deposit.pxd"]),
+              depends=glob.glob("yt/frontends/artio/artio_headers/*.c")),
     Extension("yt.utilities.spatial._distance_wrap",
               glob.glob("yt/utilities/spatial/src/*.c")),
     Extension("yt.visualization._MPL",
               ["yt/visualization/_MPL.c"],
               libraries=std_libs),
-    Extension("yt.utilities.data_point_utilities",
-              ["yt/utilities/data_point_utilities.c"],
-              libraries=std_libs),
 ]
 
 # EMBREE
@@ -285,31 +226,13 @@
     embree_extensions = [
         Extension("yt.utilities.lib.mesh_construction",
                   ["yt/utilities/lib/mesh_construction.pyx"],
-                  depends=["yt/utilities/lib/mesh_construction.pxd",
-                           "yt/utilities/lib/mesh_triangulation.h",
-                           "yt/utilities/lib/mesh_intersection.pxd",
-                           "yt/utilities/lib/mesh_samplers.pxd",
-                           "yt/utilities/lib/mesh_traversal.pxd"]),
+                  depends=["yt/utilities/lib/mesh_triangulation.h"]),
         Extension("yt.utilities.lib.mesh_traversal",
-                  ["yt/utilities/lib/mesh_traversal.pyx"],
-                  depends=["yt/utilities/lib/mesh_traversal.pxd",
-                           "yt/utilities/lib/grid_traversal.pxd",
-                           "yt/utilities/lib/bounding_volume_hierarchy.pxd"]),
+                  ["yt/utilities/lib/mesh_traversal.pyx"]),
         Extension("yt.utilities.lib.mesh_samplers",
-                  ["yt/utilities/lib/mesh_samplers.pyx"],
-                  depends=["yt/utilities/lib/mesh_samplers.pxd",
-                           "yt/utilities/lib/element_mappings.pxd",
-                           "yt/utilities/lib/mesh_construction.pxd",
-                           "yt/utilities/lib/bounding_volume_hierarchy.pxd",
-                           "yt/utilities/lib/primitives.pxd"]),
+                  ["yt/utilities/lib/mesh_samplers.pyx"]),
         Extension("yt.utilities.lib.mesh_intersection",
-                  ["yt/utilities/lib/mesh_intersection.pyx"],
-                  depends=["yt/utilities/lib/mesh_intersection.pxd",
-                           "yt/utilities/lib/mesh_construction.pxd",
-                           "yt/utilities/lib/bounding_volume_hierarchy.pxd",
-                           "yt/utilities/lib/mesh_samplers.pxd",
-                           "yt/utilities/lib/primitives.pxd",
-                           "yt/utilities/lib/vec3_ops.pxd"]),
+                  ["yt/utilities/lib/mesh_intersection.pyx"]),
     ]
 
     embree_prefix = os.path.abspath(read_embree_location())
@@ -385,9 +308,12 @@
         _build_py.run(self)
 
 class build_ext(_build_ext):
-    # subclass setuptools extension builder to avoid importing numpy
+    # subclass setuptools extension builder to avoid importing cython and numpy
     # at top level in setup.py. See http://stackoverflow.com/a/21621689/1382869
     def finalize_options(self):
+        from Cython.Build import cythonize
+        self.distribution.ext_modules[:] = cythonize(
+                self.distribution.ext_modules)
         _build_ext.finalize_options(self)
         # Prevent numpy from thinking it is still in its setup process
         # see http://stackoverflow.com/a/21621493/1382869
@@ -437,6 +363,7 @@
     ]
     },
     packages=find_packages(),
+    package_data = {'':['*.pxd']},
     setup_requires=[
         'numpy',
         'cython>=0.22',
@@ -457,7 +384,7 @@
     zip_safe=False,
     scripts=["scripts/iyt"],
     data_files=MAPSERVER_FILES + [(SHADERS_DIR, SHADERS_FILES)],
-    ext_modules=cython_extensions + extensions
+    ext_modules=cython_extensions + extensions,
 )
 
 # This info about 'ckdtree' should be incorporated somehow...

diff -r ca819d59f0e41fce50b1fb28ef8e18ac7a25bdf7 -r f90f0090a782564903c8a64cdcb79bd55db5587b tests/tests.yaml
--- a/tests/tests.yaml
+++ b/tests/tests.yaml
@@ -26,7 +26,7 @@
   local_gdf_000:
     - yt/frontends/gdf/tests/test_outputs.py
 
-  local_gizmo_000:
+  local_gizmo_001:
     - yt/frontends/gizmo/tests/test_outputs.py
 
   local_halos_000:

diff -r ca819d59f0e41fce50b1fb28ef8e18ac7a25bdf7 -r f90f0090a782564903c8a64cdcb79bd55db5587b yt/analysis_modules/level_sets/clump_handling.py
--- a/yt/analysis_modules/level_sets/clump_handling.py
+++ b/yt/analysis_modules/level_sets/clump_handling.py
@@ -57,6 +57,9 @@
         self.min_val = self.data[field].min()
         self.max_val = self.data[field].max()
 
+        if parent is not None:
+            self.data.parent = self.parent.data
+
         # List containing characteristics about clumps that are to be written 
         # out by the write routines.
         if clump_info is None:

diff -r ca819d59f0e41fce50b1fb28ef8e18ac7a25bdf7 -r f90f0090a782564903c8a64cdcb79bd55db5587b yt/analysis_modules/level_sets/tests/test_clump_finding.py
--- /dev/null
+++ b/yt/analysis_modules/level_sets/tests/test_clump_finding.py
@@ -0,0 +1,74 @@
+"""
+Clump finder tests
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2016, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+
+from yt.analysis_modules.level_sets.api import \
+    Clump, \
+    find_clumps, \
+    get_lowest_clumps
+from yt.frontends.stream.api import \
+    load_uniform_grid
+from yt.testing import \
+    assert_array_equal, \
+    assert_equal
+
+def test_clump_finding():
+    n_c = 8
+    n_p = 1
+    dims = (n_c, n_c, n_c)
+
+    density = np.ones(dims)
+    high_rho = 10.
+    # add a couple disconnected density enhancements
+    density[2, 2, 2] = high_rho
+    density[6, 6, 6] = high_rho
+
+    # put a particle at the center of one of them
+    dx = 1. / n_c
+    px = 2.5 * dx * np.ones(n_p)
+    
+    data = {"density": density,
+            "particle_mass": np.ones(n_p),
+            "particle_position_x": px,
+            "particle_position_y": px,
+            "particle_position_z": px,
+            "number_of_particles": n_p}
+
+    ds = load_uniform_grid(data, dims)
+
+    ad = ds.all_data()
+    master_clump = Clump(ad, ("gas", "density"))
+    master_clump.add_validator("min_cells", 1)
+
+    find_clumps(master_clump, 0.5, 2. * high_rho, 10.)
+
+    # there should be two children
+    assert_equal(len(master_clump.children), 2)
+
+    leaf_clumps = get_lowest_clumps(master_clump)
+    # two leaf clumps
+    assert_equal(len(leaf_clumps), 2)
+
+
+    # check some clump fields
+    assert_equal(master_clump.children[0]["density"][0].size, 1)
+    assert_equal(master_clump.children[0]["density"][0], ad["density"].max())
+    assert_equal(master_clump.children[0]["particle_mass"].size, 1)
+    assert_array_equal(master_clump.children[0]["particle_mass"], ad["particle_mass"])
+    assert_equal(master_clump.children[1]["density"][0].size, 1)
+    assert_equal(master_clump.children[1]["density"][0], ad["density"].max())
+    assert_equal(master_clump.children[1]["particle_mass"].size, 0)

diff -r ca819d59f0e41fce50b1fb28ef8e18ac7a25bdf7 -r f90f0090a782564903c8a64cdcb79bd55db5587b yt/analysis_modules/photon_simulator/photon_simulator.py
--- a/yt/analysis_modules/photon_simulator/photon_simulator.py
+++ b/yt/analysis_modules/photon_simulator/photon_simulator.py
@@ -1194,8 +1194,9 @@
         col6 = pyfits.Column(name='FLUX', format='D', array=np.array([flux.value]))
         col7 = pyfits.Column(name='SPECTRUM', format='80A', array=np.array([phfile+"[PHLIST,1]"]))
         col8 = pyfits.Column(name='IMAGE', format='80A', array=np.array([phfile+"[PHLIST,1]"]))
+        col9 = pyfits.Column(name='SRC_NAME', format='80A', array=np.array(["yt_src"]))
 
-        coldefs = pyfits.ColDefs([col1, col2, col3, col4, col5, col6, col7, col8])
+        coldefs = pyfits.ColDefs([col1, col2, col3, col4, col5, col6, col7, col8, col9])
 
         wrhdu = pyfits.BinTableHDU.from_columns(coldefs)
         wrhdu.update_ext_name("SRC_CAT")
@@ -1350,13 +1351,17 @@
             f = pyfits.open(self.parameters["RMF"])
             nchan = int(f["EBOUNDS"].header["DETCHANS"])
             num = 0
-            for i in range(1,len(f["EBOUNDS"].columns)+1):
-                if f["EBOUNDS"].header["TTYPE%d" % i] == "CHANNEL":
+            if "MATRIX" in f:
+                mat_key = "MATRIX"
+            elif "SPECRESP MATRIX" in f:
+                mat_key = "SPECRESP MATRIX"
+            for i in range(1,len(f[mat_key].columns)+1):
+                if f[mat_key].header["TTYPE%d" % i] == "F_CHAN":
                     num = i
                     break
             if num > 0:
                 tlmin = "TLMIN%d" % num
-                cmin = int(f["EBOUNDS"].header[tlmin])
+                cmin = int(f[mat_key].header[tlmin])
             else:
                 mylog.warning("Cannot determine minimum allowed value for channel. " +
                               "Setting to 0, which may be wrong.")

diff -r ca819d59f0e41fce50b1fb28ef8e18ac7a25bdf7 -r f90f0090a782564903c8a64cdcb79bd55db5587b yt/analysis_modules/photon_simulator/spectral_models.py
--- a/yt/analysis_modules/photon_simulator/spectral_models.py
+++ b/yt/analysis_modules/photon_simulator/spectral_models.py
@@ -264,7 +264,7 @@
         emid = self.emid.d
         if self.thermal_broad:
             sigma = E0*np.sqrt(2.*kT*erg_per_keV/(self.A[element]*amu_grams))/cl
-            vec = broaden_lines(E0, sigma, amp, emid)*de
+            vec = broaden_lines(E0, sigma, amp, ebins)
         else:
             vec = np.histogram(E0, ebins, weights=amp)[0]
         tmpspec += vec

diff -r ca819d59f0e41fce50b1fb28ef8e18ac7a25bdf7 -r f90f0090a782564903c8a64cdcb79bd55db5587b yt/analysis_modules/photon_simulator/utils.pyx
--- a/yt/analysis_modules/photon_simulator/utils.pyx
+++ b/yt/analysis_modules/photon_simulator/utils.pyx
@@ -1,31 +1,30 @@
 import numpy as np
 cimport numpy as np
 cimport cython
-from libc.math cimport exp
-
-cdef double gfac = 1.0/np.sqrt(np.pi)
-
+from libc.math cimport erf
+    
 @cython.cdivision(True)
 @cython.boundscheck(False)
 @cython.wraparound(False)
 def broaden_lines(np.ndarray[np.float64_t, ndim=1] E0,
                   np.ndarray[np.float64_t, ndim=1] sigma,
                   np.ndarray[np.float64_t, ndim=1] amp,
-                  np.ndarray[np.float64_t, ndim=1] E):
+                  np.ndarray[np.float64_t, ndim=1] ebins):
 
-    cdef int i, j, n
-    cdef double x, isigma, iamp
-    cdef np.ndarray[np.float64_t, ndim=1] lines
+    cdef int i, j, n, m
+    cdef double x, isigma
+    cdef np.ndarray[np.float64_t, ndim=1] cdf, vec
 
     n = E0.shape[0]
-    m = E.shape[0]
-    lines = np.zeros(m)
-
+    m = ebins.shape[0]
+    cdf = np.zeros(m)
+    vec = np.zeros(m-1)
+    
     for i in range(n):
         isigma = 1.0/sigma[i]
-        iamp = gfac*amp[i]*isigma
         for j in range(m):
-            x = (E[j]-E0[i])*isigma
-            lines[j] += iamp*exp(-x*x)
-
-    return lines
+            x = (ebins[j]-E0[i])*isigma
+            cdf[j] = 0.5*(1+erf(x))
+        for j in range(m-1):
+            vec[j] = vec[j] + (cdf[j+1] - cdf[j])*amp[i]
+    return vec

diff -r ca819d59f0e41fce50b1fb28ef8e18ac7a25bdf7 -r f90f0090a782564903c8a64cdcb79bd55db5587b yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -24,12 +24,13 @@
     iterable, \
     validate_width_tuple, \
     fix_length
+from yt.geometry.selection_routines import \
+    points_in_cells
 from yt.units.yt_array import \
     YTArray
 from yt.utilities.exceptions import \
     YTSphereTooSmall, \
     YTIllDefinedCutRegion, \
-    YTMixedCutRegion, \
     YTEllipsoidOrdering
 from yt.utilities.minimal_representation import \
     MinimalSliceData
@@ -793,8 +794,10 @@
         for field in fields:
             f = self.base_object[field]
             if f.shape != ind.shape:
-                raise YTMixedCutRegion(self.conditionals, field)
-            self.field_data[field] = self.base_object[field][ind]
+                parent = getattr(self, "parent", self.base_object)
+                self.field_data[field] = parent[field][self._part_ind]
+            else:
+                self.field_data[field] = self.base_object[field][ind]
 
     @property
     def blocks(self):
@@ -822,6 +825,22 @@
                 np.logical_and(res, ind, ind)
         return ind
 
+    _particle_mask = None
+    @property
+    def _part_ind(self):
+        if self._particle_mask is None:
+            parent = getattr(self, "parent", self.base_object)
+            units = "code_length"
+            mask = points_in_cells(
+                self["x"].to(units), self["y"].to(units),
+                self["z"].to(units), self["dx"].to(units),
+                self["dy"].to(units), self["dz"].to(units),
+                parent["particle_position_x"].to(units),
+                parent["particle_position_y"].to(units),
+                parent["particle_position_z"].to(units))
+            self._particle_mask = mask
+        return self._particle_mask
+
     @property
     def icoords(self):
         return self.base_object.icoords[self._cond_ind,:]

diff -r ca819d59f0e41fce50b1fb28ef8e18ac7a25bdf7 -r f90f0090a782564903c8a64cdcb79bd55db5587b yt/fields/astro_fields.py
--- a/yt/fields/astro_fields.py
+++ b/yt/fields/astro_fields.py
@@ -136,7 +136,8 @@
     registry.add_field((ftype, "sz_kinetic"),
                        function=_sz_kinetic,
                        units=unit_system["length"]**-1,
-                       validators=[ValidateParameter("axis")])
+                       validators=[
+                           ValidateParameter("axis", {'axis': [0, 1, 2]})])
 
     def _szy(field, data):
         scale = 0.88 / mh * kboltz / (me * clight*clight) * sigma_thompson

diff -r ca819d59f0e41fce50b1fb28ef8e18ac7a25bdf7 -r f90f0090a782564903c8a64cdcb79bd55db5587b yt/fields/derived_field.py
--- a/yt/fields/derived_field.py
+++ b/yt/fields/derived_field.py
@@ -23,6 +23,7 @@
     NeedsDataField, \
     NeedsProperty, \
     NeedsParameter, \
+    NeedsParameterValue, \
     FieldUnitsError
 from .field_detector import \
     FieldDetector
@@ -256,14 +257,21 @@
     pass
 
 class ValidateParameter(FieldValidator):
-    def __init__(self, parameters):
+    def __init__(self, parameters, parameter_values=None):
         """
         This validator ensures that the dataset has a given parameter.
+
+        If *parameter_values* is supplied, this will also ensure that the field
+        is available for all permutations of the field parameter.
         """
         FieldValidator.__init__(self)
         self.parameters = ensure_list(parameters)
+        self.parameter_values = parameter_values
     def __call__(self, data):
         doesnt_have = []
+        if self.parameter_values is not None:
+            if isinstance(data, FieldDetector):
+                raise NeedsParameterValue(self.parameter_values)
         for p in self.parameters:
             if not data.has_field_parameter(p):
                 doesnt_have.append(p)

diff -r ca819d59f0e41fce50b1fb28ef8e18ac7a25bdf7 -r f90f0090a782564903c8a64cdcb79bd55db5587b yt/fields/field_detector.py
--- a/yt/fields/field_detector.py
+++ b/yt/fields/field_detector.py
@@ -17,7 +17,8 @@
 from collections import defaultdict
 from yt.units.yt_array import YTArray
 from .field_exceptions import \
-    NeedsGridType
+    NeedsGridType, \
+    NeedsParameterValue
 
 class FieldDetector(defaultdict):
     Level = 1
@@ -26,7 +27,7 @@
     _id_offset = 0
     domain_id = 0
 
-    def __init__(self, nd = 16, ds = None, flat = False):
+    def __init__(self, nd = 16, ds = None, flat = False, field_parameters=None):
         self.nd = nd
         self.flat = flat
         self._spatial = not flat
@@ -36,6 +37,7 @@
         self.LeftEdge = [0.0, 0.0, 0.0]
         self.RightEdge = [1.0, 1.0, 1.0]
         self.dds = np.ones(3, "float64")
+        self.field_parameters = field_parameters
         class fake_dataset(defaultdict):
             pass
 
@@ -106,6 +108,32 @@
                 for i in nfd.requested_parameters:
                     if i not in self.requested_parameters:
                         self.requested_parameters.append(i)
+            except NeedsParameterValue as npv:
+                # redo field detection with a new FieldDetector, ensuring
+                # all needed field parameter values are set
+                for param in npv.parameter_values:
+                    # temporarily remove any ValidateParameter instances for
+                    # this field to avoid infinitely re-raising
+                    # NeedsParameterValue exceptions
+                    saved_validators = []
+                    for i, validator in enumerate(finfo.validators):
+                        params = getattr(validator, 'parameters', [])
+                        if param in params:
+                            saved_validators.append(validator)
+                            del finfo.validators[i]
+
+                    for pv in npv.parameter_values[param]:
+                        nfd = FieldDetector(self.nd, ds=self.ds,
+                                            field_parameters={param: pv})
+                        vv = finfo(nfd)
+                        for i in nfd.requested:
+                            if i not in self.requested:
+                                self.requested.append(i)
+                        for i in nfd.requested_parameters:
+                            if i not in self.requested_parameters:
+                                self.requested_parameters.append(i)
+
+                    finfo.validators.extend(saved_validators)
             if vv is not None:
                 if not self.flat: self[item] = vv
                 else: self[item] = vv.ravel()
@@ -176,6 +204,8 @@
         }
 
     def get_field_parameter(self, param, default = 0.0):
+        if self.field_parameters and param in self.field_parameters:
+            return self.field_parameters[param]
         self.requested_parameters.append(param)
         if param in ['bulk_velocity', 'center', 'normal']:
             return self.ds.arr(np.random.random(3) * 1e-2, self.fp_units[param])

diff -r ca819d59f0e41fce50b1fb28ef8e18ac7a25bdf7 -r f90f0090a782564903c8a64cdcb79bd55db5587b yt/fields/field_exceptions.py
--- a/yt/fields/field_exceptions.py
+++ b/yt/fields/field_exceptions.py
@@ -46,6 +46,10 @@
     def __str__(self):
         return "(%s)" % (self.missing_parameters)
 
+class NeedsParameterValue(ValidationException):
+    def __init__(self, parameter_values):
+        self.parameter_values = parameter_values
+
 class NeedsConfiguration(ValidationException):
     def __init__(self, parameter, value):
         self.parameter = parameter

diff -r ca819d59f0e41fce50b1fb28ef8e18ac7a25bdf7 -r f90f0090a782564903c8a64cdcb79bd55db5587b yt/fields/species_fields.py
--- a/yt/fields/species_fields.py
+++ b/yt/fields/species_fields.py
@@ -90,6 +90,10 @@
                        particle_type = particle_type,
                        units = unit_system["number_density"])
 
+    return [(ftype, "%s_number_density" % species),
+            (ftype, "%s_density" % species),
+            (ftype, "%s_mass" % species)]
+
 def add_species_field_by_fraction(registry, ftype, species, 
                                   particle_type = False):
     """
@@ -114,6 +118,10 @@
                        particle_type = particle_type,
                        units = unit_system["number_density"])
 
+    return [(ftype, "%s_number_density" % species),
+            (ftype, "%s_density" % species),
+            (ftype, "%s_mass" % species)]
+
 def add_species_aliases(registry, ftype, alias_species, species):
     """
     This takes a field registry, a fluid type, and two species names.  

diff -r ca819d59f0e41fce50b1fb28ef8e18ac7a25bdf7 -r f90f0090a782564903c8a64cdcb79bd55db5587b yt/frontends/art/api.py
--- a/yt/frontends/art/api.py
+++ b/yt/frontends/art/api.py
@@ -17,7 +17,8 @@
       ARTDomainFile,\
       ARTDomainSubset,\
       ARTIndex,\
-      ARTDataset
+      ARTDataset, \
+      DarkMatterARTDataset
 
 from .fields import \
       ARTFieldInfo

diff -r ca819d59f0e41fce50b1fb28ef8e18ac7a25bdf7 -r f90f0090a782564903c8a64cdcb79bd55db5587b yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -633,6 +633,10 @@
             return False
         if not f.endswith(suffix):
             return False
+        if "s0" not in f:
+            # ATOMIC.DAT, for instance, passes the other tests, but then dies
+            # during _find_files because it can't be split.
+            return False
         with open(f, 'rb') as fh:
             try:
                 amr_prefix, amr_suffix = filename_pattern['amr']

diff -r ca819d59f0e41fce50b1fb28ef8e18ac7a25bdf7 -r f90f0090a782564903c8a64cdcb79bd55db5587b yt/frontends/eagle/tests/test_outputs.py
--- a/yt/frontends/eagle/tests/test_outputs.py
+++ b/yt/frontends/eagle/tests/test_outputs.py
@@ -24,3 +24,10 @@
 @requires_file(s28)
 def test_EagleDataset():
     assert isinstance(data_dir_load(s28), EagleDataset)
+
+s399 = "snipshot_399_z000p000/snip_399_z000p000.0.hdf5"
+ at requires_file(s399)
+def test_Snipshot():
+    ds = data_dir_load(s399)
+    ds.index
+    assert isinstance(ds, EagleDataset)

diff -r ca819d59f0e41fce50b1fb28ef8e18ac7a25bdf7 -r f90f0090a782564903c8a64cdcb79bd55db5587b yt/frontends/gadget/tests/test_outputs.py
--- a/yt/frontends/gadget/tests/test_outputs.py
+++ b/yt/frontends/gadget/tests/test_outputs.py
@@ -25,7 +25,6 @@
 
 isothermal_h5 = "IsothermalCollapse/snap_505.hdf5"
 isothermal_bin = "IsothermalCollapse/snap_505"
-g64 = "gizmo_64/output/snap_N64L16_135.hdf5"
 
 # This maps from field names to weight field names to use for projections
 iso_fields = OrderedDict(
@@ -42,11 +41,6 @@
 )
 iso_kwargs = dict(bounding_box=[[-3, 3], [-3, 3], [-3, 3]])
 
-g64_fields = iso_fields.copy()
-g64_fields["deposit", "PartType4_density"] = None
-g64_kwargs = dict(bounding_box=[[-1e5, 1e5], [-1e5, 1e5], [-1e5, 1e5]])
-
-
 @requires_file(isothermal_h5)
 @requires_file(isothermal_bin)
 def test_GadgetDataset():
@@ -62,10 +56,3 @@
     for test in sph_answer(ds, 'snap_505', 2**17, iso_fields):
         test_iso_collapse.__name__ = test.description
         yield test
-
- at requires_ds(g64, big_data=True)
-def test_gizmo_64():
-    ds = data_dir_load(g64, kwargs=g64_kwargs)
-    for test in sph_answer(ds, 'snap_N64L16_135', 524288, g64_fields):
-        test_gizmo_64.__name__ = test.description
-        yield test

diff -r ca819d59f0e41fce50b1fb28ef8e18ac7a25bdf7 -r f90f0090a782564903c8a64cdcb79bd55db5587b yt/frontends/gdf/api.py
--- a/yt/frontends/gdf/api.py
+++ b/yt/frontends/gdf/api.py
@@ -23,3 +23,5 @@
 
 from .io import \
       IOHandlerGDFHDF5
+
+from . import tests

diff -r ca819d59f0e41fce50b1fb28ef8e18ac7a25bdf7 -r f90f0090a782564903c8a64cdcb79bd55db5587b yt/frontends/gizmo/fields.py
--- a/yt/frontends/gizmo/fields.py
+++ b/yt/frontends/gizmo/fields.py
@@ -99,6 +99,15 @@
               data[ptype, "%s_metallicity" % species]
 
         num_neighbors = 64
+        for species in ['H', 'H_p0', 'H_p1']:
+            for suf in ["_density", "_number_density"]:
+                field = "%s%s" % (species, suf)
+                fn = add_volume_weighted_smoothed_field(
+                    ptype, "particle_position", "particle_mass",
+                    "smoothing_length", "density", field,
+                    self, num_neighbors)
+                self.alias(("gas", field), fn[0])
+
         for species in self.nuclei_names:
             self.add_field(
                 (ptype, "%s_nuclei_mass_density" % species),

diff -r ca819d59f0e41fce50b1fb28ef8e18ac7a25bdf7 -r f90f0090a782564903c8a64cdcb79bd55db5587b yt/frontends/gizmo/tests/test_outputs.py
--- a/yt/frontends/gizmo/tests/test_outputs.py
+++ b/yt/frontends/gizmo/tests/test_outputs.py
@@ -34,13 +34,16 @@
         (('gas', 'velocity_magnitude'), None),
         (("deposit", "all_count"), None),
         (("deposit", "all_cic"), None),
+        (("deposit", "PartType0_density"), None),
     ]
 )
 
- at requires_ds(FIRE_m12i)
-def test_GizmoDataset():
-    ds = data_dir_load(FIRE_m12i)
+g64 = "gizmo_64/output/snap_N64L16_135.hdf5"
+
+ at requires_ds(g64, big_data=True)
+def test_gizmo_64():
+    ds = data_dir_load(g64)
     assert isinstance(ds, GizmoDataset)
-    for test in sph_answer(ds, 'snapshot_600', 4786950, fields):
-        test_GizmoDataset.__name__ = test.description
+    for test in sph_answer(ds, 'snap_N64L16_135', 524288, fields):
+        test_gizmo_64.__name__ = test.description
         yield test

diff -r ca819d59f0e41fce50b1fb28ef8e18ac7a25bdf7 -r f90f0090a782564903c8a64cdcb79bd55db5587b yt/frontends/owls/fields.py
--- a/yt/frontends/owls/fields.py
+++ b/yt/frontends/owls/fields.py
@@ -76,8 +76,6 @@
 
         smoothed_suffixes = ("_number_density", "_density", "_mass")
 
-
-
         # we add particle element fields for stars and gas
         #-----------------------------------------------------
         if ptype in self._add_elements:
@@ -144,6 +142,9 @@
                     symbol = ion[0:1].capitalize()
                     roman = int(ion[1:])
 
+                if (ptype, symbol + "_fraction") not in self.field_aliases:
+                    continue
+
                 pstr = "_p" + str(roman-1)
                 yt_ion = symbol + pstr
 
@@ -166,6 +167,9 @@
                     symbol = ion[0:1].capitalize()
                     roman = int(ion[1:])
 
+                if (ptype, symbol + "_fraction") not in self.field_aliases:
+                    continue
+
                 pstr = "_p" + str(roman-1)
                 yt_ion = symbol + pstr
 
@@ -201,6 +205,9 @@
                 symbol = ion[0:1].capitalize()
                 roman = int(ion[1:])
 
+            if (ptype, symbol + "_fraction") not in self.field_aliases:
+                continue
+
             pstr = "_p" + str(roman-1)
             yt_ion = symbol + pstr
             ftype = ptype

diff -r ca819d59f0e41fce50b1fb28ef8e18ac7a25bdf7 -r f90f0090a782564903c8a64cdcb79bd55db5587b yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -963,3 +963,24 @@
     except ImportError:
         requests = None
     return requests
+
+ at contextlib.contextmanager
+def dummy_context_manager(*args, **kwargs):
+    yield
+
+def matplotlib_style_context(style_name=None, after_reset=True):
+    """Returns a context manager for controlling matplotlib style.
+
+    Arguments are passed to matplotlib.style.context() if specified. Defaults
+    to setting "classic" style, after resetting to the default config parameters.
+
+    On older matplotlib versions (<=1.5.0) where matplotlib.style isn't
+    available, returns a dummy context manager.
+    """
+    if style_name is None:
+        style_name = 'classic'
+    try:
+        import matplotlib.style
+        return matplotlib.style.context(style_name, after_reset=after_reset)
+    except ImportError:
+        return dummy_context_manager()

diff -r ca819d59f0e41fce50b1fb28ef8e18ac7a25bdf7 -r f90f0090a782564903c8a64cdcb79bd55db5587b yt/geometry/coordinates/cartesian_coordinates.py
--- a/yt/geometry/coordinates/cartesian_coordinates.py
+++ b/yt/geometry/coordinates/cartesian_coordinates.py
@@ -29,6 +29,7 @@
 
 
 class CartesianCoordinateHandler(CoordinateHandler):
+    name = "cartesian"
 
     def __init__(self, ds, ordering = ('x','y','z')):
         super(CartesianCoordinateHandler, self).__init__(ds, ordering)

diff -r ca819d59f0e41fce50b1fb28ef8e18ac7a25bdf7 -r f90f0090a782564903c8a64cdcb79bd55db5587b yt/geometry/coordinates/coordinate_handler.py
--- a/yt/geometry/coordinates/coordinate_handler.py
+++ b/yt/geometry/coordinates/coordinate_handler.py
@@ -71,6 +71,7 @@
                     ds.quan(width[0], fix_unitary(width[1])))
 
 class CoordinateHandler(object):
+    name = None
     
     def __init__(self, ds, ordering):
         self.ds = weakref.proxy(ds)
@@ -132,10 +133,13 @@
         self._axis_id = ai
         return ai
 
+    _image_axis_name = None
     @property
     def image_axis_name(self):
         # Default
-        rv = {}
+        if self._image_axis_name is not None:
+            return self._image_axis_name
+        self._image_axis_name = rv = {}
         for i in range(3):
             rv[i] = (self.axis_name[self.x_axis[i]],
                      self.axis_name[self.y_axis[i]])

diff -r ca819d59f0e41fce50b1fb28ef8e18ac7a25bdf7 -r f90f0090a782564903c8a64cdcb79bd55db5587b yt/geometry/coordinates/cylindrical_coordinates.py
--- a/yt/geometry/coordinates/cylindrical_coordinates.py
+++ b/yt/geometry/coordinates/cylindrical_coordinates.py
@@ -29,6 +29,7 @@
 #
 
 class CylindricalCoordinateHandler(CoordinateHandler):
+    name = "cylindrical"
 
     def __init__(self, ds, ordering = ('r', 'z', 'theta')):
         super(CylindricalCoordinateHandler, self).__init__(ds, ordering)

diff -r ca819d59f0e41fce50b1fb28ef8e18ac7a25bdf7 -r f90f0090a782564903c8a64cdcb79bd55db5587b yt/geometry/coordinates/geographic_coordinates.py
--- a/yt/geometry/coordinates/geographic_coordinates.py
+++ b/yt/geometry/coordinates/geographic_coordinates.py
@@ -23,6 +23,7 @@
     pixelize_cylinder, pixelize_aitoff
 
 class GeographicCoordinateHandler(CoordinateHandler):
+    name = "geographic"
 
     def __init__(self, ds, ordering = ('latitude', 'longitude', 'altitude')):
         super(GeographicCoordinateHandler, self).__init__(ds, ordering)

diff -r ca819d59f0e41fce50b1fb28ef8e18ac7a25bdf7 -r f90f0090a782564903c8a64cdcb79bd55db5587b yt/geometry/coordinates/polar_coordinates.py
--- a/yt/geometry/coordinates/polar_coordinates.py
+++ b/yt/geometry/coordinates/polar_coordinates.py
@@ -18,6 +18,7 @@
 
 
 class PolarCoordinateHandler(CylindricalCoordinateHandler):
+    name = "polar"
 
     def __init__(self, ds, ordering = ('r', 'theta', 'z')):
         super(PolarCoordinateHandler, self).__init__(ds, ordering)

diff -r ca819d59f0e41fce50b1fb28ef8e18ac7a25bdf7 -r f90f0090a782564903c8a64cdcb79bd55db5587b yt/geometry/coordinates/spec_cube_coordinates.py
--- a/yt/geometry/coordinates/spec_cube_coordinates.py
+++ b/yt/geometry/coordinates/spec_cube_coordinates.py
@@ -20,6 +20,7 @@
     _get_coord_fields
 
 class SpectralCubeCoordinateHandler(CartesianCoordinateHandler):
+    name = "spectral_cube"
 
     def __init__(self, ds, ordering = ('x', 'y', 'z')):
         ordering = tuple("xyz"[axis] for axis in

diff -r ca819d59f0e41fce50b1fb28ef8e18ac7a25bdf7 -r f90f0090a782564903c8a64cdcb79bd55db5587b yt/geometry/coordinates/spherical_coordinates.py
--- a/yt/geometry/coordinates/spherical_coordinates.py
+++ b/yt/geometry/coordinates/spherical_coordinates.py
@@ -23,6 +23,7 @@
     pixelize_cylinder, pixelize_aitoff
 
 class SphericalCoordinateHandler(CoordinateHandler):
+    name = "spherical"
 
     def __init__(self, ds, ordering = ('r', 'theta', 'phi')):
         super(SphericalCoordinateHandler, self).__init__(ds, ordering)

diff -r ca819d59f0e41fce50b1fb28ef8e18ac7a25bdf7 -r f90f0090a782564903c8a64cdcb79bd55db5587b yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -2046,3 +2046,42 @@
         return ("halo_particles", self.halo_id)
 
 halo_particles_selector = HaloParticlesSelector
+
+ at cython.cdivision(True)
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+def points_in_cells(
+        np.float64_t[:] cx,
+        np.float64_t[:] cy,
+        np.float64_t[:] cz,
+        np.float64_t[:] dx,
+        np.float64_t[:] dy,
+        np.float64_t[:] dz,
+        np.float64_t[:] px,
+        np.float64_t[:] py,
+        np.float64_t[:] pz):
+    # Take a list of cells and particles and calculate which particles
+    # are enclosed within one of the cells.  This is used for querying
+    # particle fields on clump/contour objects.
+    # We use brute force since the cells are a relatively unordered collection.
+
+    cdef int p, c, n_p, n_c
+
+    n_p = px.size
+    n_c = cx.size
+    mask = np.ones(n_p, dtype="bool")
+
+    for p in range(n_p):
+        for c in range(n_c):
+            if fabs(px[p] - cx[c]) > 0.5 * dx[c]:
+                mask[p] = False
+                continue
+            if fabs(py[p] - cy[c]) > 0.5 * dy[c]:
+                mask[p] = False
+                continue
+            if fabs(pz[p] - cz[c]) > 0.5 * dz[c]:
+                mask[p] = False
+                continue
+            if mask[p]: break
+
+    return mask

diff -r ca819d59f0e41fce50b1fb28ef8e18ac7a25bdf7 -r f90f0090a782564903c8a64cdcb79bd55db5587b yt/units/tests/test_units.py
--- a/yt/units/tests/test_units.py
+++ b/yt/units/tests/test_units.py
@@ -477,6 +477,9 @@
     test_unit = Unit('cm**-3', base_value=1.0, registry=ds.unit_registry)
     assert_equal(test_unit.latex_repr, '\\frac{1}{\\rm{cm}^{3}}')
 
+    test_unit = Unit('m_geom/l_geom**3')
+    assert_equal(test_unit.latex_repr, '\\frac{1}{M_\\odot^{2}}')
+
 def test_latitude_longitude():
     lat = unit_symbols.lat
     lon = unit_symbols.lon

diff -r ca819d59f0e41fce50b1fb28ef8e18ac7a25bdf7 -r f90f0090a782564903c8a64cdcb79bd55db5587b yt/units/tests/test_ytarray.py
--- a/yt/units/tests/test_ytarray.py
+++ b/yt/units/tests/test_ytarray.py
@@ -626,6 +626,29 @@
     new_length = fix_length(length, ds=ds)
     yield assert_equal, YTQuantity(10, 'cm'), new_length
 
+def test_code_unit_combinations():
+    """
+    Test comparing code units coming from different datasets
+    """
+    ds1 = fake_random_ds(64, nprocs=1, length_unit=1)
+    ds2 = fake_random_ds(64, nprocs=1, length_unit=10)
+
+    q1 = ds1.quan(1, 'code_length')
+    q2 = ds2.quan(1, 'code_length')
+
+    assert_equal(10*q1, q2)
+    assert_equal(q1/q2, 0.1)
+    assert_true(q1 < q2)
+    assert_true(q2 > q1)
+    assert_true(not bool(q1 > q2))
+    assert_true(not bool(q2 < q1))
+    assert_true(q1 != q2)
+    assert_true(not bool(q1 == q2))
+
+    assert_equal((q1 + q2).in_cgs().value, 11)
+    assert_equal((q2 + q1).in_cgs().value, 11)
+    assert_equal((q1 - q2).in_cgs().value, -9)
+    assert_equal((q2 - q1).in_cgs().value, 9)
 
 def test_ytarray_pickle():
     ds = fake_random_ds(64, nprocs=1)

diff -r ca819d59f0e41fce50b1fb28ef8e18ac7a25bdf7 -r f90f0090a782564903c8a64cdcb79bd55db5587b yt/units/unit_lookup_table.py
--- a/yt/units/unit_lookup_table.py
+++ b/yt/units/unit_lookup_table.py
@@ -23,7 +23,7 @@
     planck_charge_esu, planck_energy_erg, planck_mass_grams, \
     planck_temperature_K, planck_time_s, mass_hydrogen_grams, \
     grams_per_pound, standard_gravity_cm_per_s2, pascal_per_atm, \
-    newton_cgs
+    newton_cgs, cm_per_rearth, cm_per_rjup
 import numpy as np
 
 # Lookup a unit symbol with the symbol string, and provide a tuple with the
@@ -87,6 +87,8 @@
     "msun": (mass_sun_grams, dimensions.mass, 0.0, r"M_\odot"),
     "Rsun": (cm_per_rsun, dimensions.length, 0.0, r"R_\odot"),
     "rsun": (cm_per_rsun, dimensions.length, 0.0, r"R_\odot"),
+    "R_sun": (cm_per_rsun, dimensions.length, 0.0, r"R_\odot"),
+    "r_sun": (cm_per_rsun, dimensions.length, 0.0, r"R_\odot"),
     "Lsun": (luminosity_sun_ergs_per_sec, dimensions.power, 0.0, r"L_\odot"),
     "Tsun": (temp_sun_kelvin, dimensions.temperature, 0.0, r"T_\odot"),
     "Zsun": (metallicity_sun, dimensions.dimensionless, 0.0, r"Z_\odot"),
@@ -151,6 +153,12 @@
     "m_geom": (mass_sun_grams, dimensions.mass, 0.0, r"M_\odot"),
     "l_geom": (newton_cgs*mass_sun_grams/speed_of_light_cm_per_s**2, dimensions.length, 0.0, r"M_\odot"),
     "t_geom": (newton_cgs*mass_sun_grams/speed_of_light_cm_per_s**3, dimensions.time, 0.0, r"M_\odot"),
+
+    # Some Solar System units
+    "R_earth": (cm_per_rearth, dimensions.length, 0.0, r"R_\oplus"),
+    "r_earth": (cm_per_rearth, dimensions.length, 0.0, r"R_\oplus"),
+    "R_jup": (cm_per_rjup, dimensions.length, 0.0, r"R_\mathrm{Jup}"),
+    "r_jup": (cm_per_rjup, dimensions.length, 0.0, r"R_\mathrm{Jup}"),
 }
 
 # This dictionary formatting from magnitude package, credit to Juan Reyero.

diff -r ca819d59f0e41fce50b1fb28ef8e18ac7a25bdf7 -r f90f0090a782564903c8a64cdcb79bd55db5587b yt/units/unit_object.py
--- a/yt/units/unit_object.py
+++ b/yt/units/unit_object.py
@@ -110,8 +110,25 @@
             symbol_table[ex] = registry.lut[str(ex)][3]
         except:
             symbol_table[ex] = r"\rm{" + str(ex).replace('_', '\ ') + "}"
+
+    # invert the symbol table dict to look for keys with identical values
+    invert_symbols = {}
+    for key, value in symbol_table.items():
+        if value not in invert_symbols:
+            invert_symbols[value] = [key]
+        else:
+            invert_symbols[value].append(key)
+
+    # if there are any units with identical latex representations, substitute
+    # units to avoid  uncanceled terms in the final latex expresion.
+    for val in invert_symbols:
+        symbols = invert_symbols[val]
+        for i in range(1, len(symbols)):
+            expr = expr.subs(symbols[i], symbols[0])
+
     latex_repr = latex(expr, symbol_names=symbol_table, mul_symbol="dot",
                        fold_frac_powers=True, fold_short_frac=True)
+
     if latex_repr == '1':
         return ''
     else:
@@ -258,7 +275,11 @@
     def latex_repr(self):
         if self._latex_repr is not None:
             return self._latex_repr
-        self._latex_repr = get_latex_representation(self.expr, self.registry)
+        if self.expr.is_Atom:
+            expr = self.expr
+        else:
+            expr = self.expr.copy()
+        self._latex_repr = get_latex_representation(expr, self.registry)
         return self._latex_repr
 
     ### Some sympy conventions

diff -r ca819d59f0e41fce50b1fb28ef8e18ac7a25bdf7 -r f90f0090a782564903c8a64cdcb79bd55db5587b yt/units/unit_symbols.py
--- a/yt/units/unit_symbols.py
+++ b/yt/units/unit_symbols.py
@@ -116,11 +116,11 @@
 
 Msun = solar_mass = quan(1.0, "Msun")
 msun = quan(1.0, "msun")
-Rsun = solar_radius = quan(1.0, "Rsun")
-rsun = quan(1.0, "rsun")
-Lsun = lsun = solar_luminosity = quan(1.0, "Lsun")
-Tsun = solar_temperature = quan(1.0, "Tsun")
-Zsun = solar_metallicity = quan(1.0, "Zsun")
+Rsun = R_sun = solar_radius = quan(1.0, "Rsun")
+rsun = r_sun = quan(1.0, "rsun")
+Lsun = lsun = l_sun = solar_luminosity = quan(1.0, "Lsun")
+Tsun = T_sun = solar_temperature = quan(1.0, "Tsun")
+Zsun = Z_sun = solar_metallicity = quan(1.0, "Zsun")
 
 #
 # Misc Astronomical units
@@ -129,6 +129,10 @@
 AU = astronomical_unit = quan(1.0, "AU")
 au = quan(1.0, "au")
 ly = light_year = quan(1.0, "ly")
+Rearth = R_earth = earth_radius = quan(1.0, 'R_earth')
+rearth = r_earth = quan(1.0, 'r_earth')
+Rjup = R_jup = jupiter_radius = quan(1.0, 'R_jup')
+rjup = r_jup = quan(1.0, 'r_jup')
 
 #
 # Physical units

diff -r ca819d59f0e41fce50b1fb28ef8e18ac7a25bdf7 -r f90f0090a782564903c8a64cdcb79bd55db5587b yt/units/yt_array.py
--- a/yt/units/yt_array.py
+++ b/yt/units/yt_array.py
@@ -167,7 +167,8 @@
     # Check that other is a YTArray.
     if hasattr(other, 'units'):
         if this.units.expr is other.units.expr:
-            return other
+            if this.units.base_value == other.units.base_value:
+                return other
         if not this.units.same_dimensions_as(other.units):
             raise YTUnitOperationError(op_string, this.units, other.units)
         return other.in_units(this.units)

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/47669dc82f69/
Changeset:   47669dc82f69
Branch:      yt
User:        charlie_watson
Date:        2016-06-16 21:04:18+00:00
Summary:     deleting erroneous comment and eliminating extra Node import
Affected #:  2 files

diff -r f90f0090a782564903c8a64cdcb79bd55db5587b -r 47669dc82f69fc1826880e8d26c25e7c125f94b2 yt/utilities/lib/amr_kdtools.pxd
--- a/yt/utilities/lib/amr_kdtools.pxd
+++ b/yt/utilities/lib/amr_kdtools.pxd
@@ -19,8 +19,6 @@
     int dim
     np.float64_t pos
 
-#cdef class Node
-
 cdef class Node:
     cdef public Node left
     cdef public Node right

diff -r f90f0090a782564903c8a64cdcb79bd55db5587b -r 47669dc82f69fc1826880e8d26c25e7c125f94b2 yt/utilities/tests/test_amr_kdtree.py
--- a/yt/utilities/tests/test_amr_kdtree.py
+++ b/yt/utilities/tests/test_amr_kdtree.py
@@ -14,7 +14,9 @@
 #-----------------------------------------------------------------------------
 
 from yt.utilities.amr_kdtree.api import AMRKDTree
-from yt.utilities.lib.amr_kdtools import Node
+# Much like the test that uses the import statement below, this is also disabled
+# until needed. Fido was taking issue with it so it has been commented out.
+#from yt.utilities.lib.amr_kdtools import Node
 import yt.utilities.initial_conditions as ic
 import yt.utilities.flagging_methods as fm
 from yt.frontends.stream.api import load_uniform_grid, refine_amr


https://bitbucket.org/yt_analysis/yt/commits/7bf1796eef82/
Changeset:   7bf1796eef82
Branch:      yt
User:        charlie_watson
Date:        2016-06-17 01:12:31+00:00
Summary:     removed commented out import in test_amr_kdtree.py
Affected #:  1 file

diff -r 47669dc82f69fc1826880e8d26c25e7c125f94b2 -r 7bf1796eef82c4e526c1118177ee2b0b80dce7b0 yt/utilities/tests/test_amr_kdtree.py
--- a/yt/utilities/tests/test_amr_kdtree.py
+++ b/yt/utilities/tests/test_amr_kdtree.py
@@ -14,9 +14,6 @@
 #-----------------------------------------------------------------------------
 
 from yt.utilities.amr_kdtree.api import AMRKDTree
-# Much like the test that uses the import statement below, this is also disabled
-# until needed. Fido was taking issue with it so it has been commented out.
-#from yt.utilities.lib.amr_kdtools import Node
 import yt.utilities.initial_conditions as ic
 import yt.utilities.flagging_methods as fm
 from yt.frontends.stream.api import load_uniform_grid, refine_amr


https://bitbucket.org/yt_analysis/yt/commits/5fc7e0730583/
Changeset:   5fc7e0730583
Branch:      yt
User:        charlie_watson
Date:        2016-06-27 22:50:57+00:00
Summary:     adding explicit Node definition to speed up should_i_build
Affected #:  1 file

diff -r 7bf1796eef82c4e526c1118177ee2b0b80dce7b0 -r 5fc7e0730583f85c79dd7cc09e75cb1378ff2c5b yt/utilities/lib/amr_kdtools.pyx
--- a/yt/utilities/lib/amr_kdtools.pyx
+++ b/yt/utilities/lib/amr_kdtools.pyx
@@ -674,6 +674,7 @@
 @cython.wraparound(False)
 @cython.cdivision(True)
 cdef int should_i_build(self, int rank, int size):
+    Node self
     if (self.node_id < size) or (self.node_id >= 2*size):
         return 1
     elif self.node_id - size == rank:


https://bitbucket.org/yt_analysis/yt/commits/19ea66b61c5f/
Changeset:   19ea66b61c5f
Branch:      yt
User:        charlie_watson
Date:        2016-06-28 00:53:37+00:00
Summary:     adding cdef to prior commit
Affected #:  1 file

diff -r 5fc7e0730583f85c79dd7cc09e75cb1378ff2c5b -r 19ea66b61c5ff8584c69dc7687d5229175d37f32 yt/utilities/lib/amr_kdtools.pyx
--- a/yt/utilities/lib/amr_kdtools.pyx
+++ b/yt/utilities/lib/amr_kdtools.pyx
@@ -674,10 +674,11 @@
 @cython.wraparound(False)
 @cython.cdivision(True)
 cdef int should_i_build(self, int rank, int size):
-    Node self
-    if (self.node_id < size) or (self.node_id >= 2*size):
+    cdef Node node
+    node = self
+    if (node.node_id < size) or (node.node_id >= 2*size):
         return 1
-    elif self.node_id - size == rank:
+    elif node.node_id - size == rank:
         return 1
     else:
         return 0


https://bitbucket.org/yt_analysis/yt/commits/f499a0db58f4/
Changeset:   f499a0db58f4
Branch:      yt
User:        charlie_watson
Date:        2016-06-28 01:27:32+00:00
Summary:     changing name self to node
Affected #:  1 file

diff -r 19ea66b61c5ff8584c69dc7687d5229175d37f32 -r f499a0db58f4c258c2151aac572b942d3502f87f yt/utilities/lib/amr_kdtools.pyx
--- a/yt/utilities/lib/amr_kdtools.pyx
+++ b/yt/utilities/lib/amr_kdtools.pyx
@@ -673,9 +673,7 @@
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
-cdef int should_i_build(self, int rank, int size):
-    cdef Node node
-    node = self
+cdef int should_i_build(Node node, int rank, int size):
     if (node.node_id < size) or (node.node_id >= 2*size):
         return 1
     elif node.node_id - size == rank:


https://bitbucket.org/yt_analysis/yt/commits/3ca8a739e529/
Changeset:   3ca8a739e529
Branch:      yt
User:        MatthewTurk
Date:        2016-06-28 15:29:48+00:00
Summary:     Merged in charlie_watson/yt (pull request #2240)

Changing amr_kdtree functions to be Node class methods
Affected #:  5 files

diff -r d48c4a143f6e3c69c9a1016725d8328f8ea3af53 -r 3ca8a739e5296422ea7954ce8d2717908be25dd6 yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -23,19 +23,7 @@
     receive_and_reduce, \
     send_to_parent, \
     scatter_image
-from yt.utilities.lib.amr_kdtools import \
-    Node, \
-    add_grids, \
-    find_node, \
-    kd_is_leaf, \
-    set_dirty, \
-    depth_traverse, \
-    depth_first_touch, \
-    kd_traverse, \
-    get_left_edge, \
-    get_right_edge, \
-    kd_sum_volume, \
-    kd_node_check
+from yt.utilities.lib.amr_kdtools import Node
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     ParallelAnalysisInterface
 from yt.utilities.lib.grid_traversal import PartitionedGrid
@@ -95,7 +83,7 @@
         gles = np.array([g.LeftEdge for g in grids])
         gres = np.array([g.RightEdge for g in grids])
         gids = np.array([g.id for g in grids], dtype="int64")
-        add_grids(self.trunk, gids.size, gles, gres, gids,
+        self.trunk.add_grids(gids.size, gles, gres, gids,
                     self.comm_rank, self.comm_size)
         del gles, gres, gids, grids
 
@@ -108,14 +96,14 @@
             self.add_grids(grids)
 
     def check_tree(self):
-        for node in depth_traverse(self.trunk):
+        for node in self.trunk.depth_traverse():
             if node.grid == -1:
                 continue
             grid = self.ds.index.grids[node.grid - self._id_offset]
             dds = grid.dds
             gle = grid.LeftEdge
-            nle = self.ds.arr(get_left_edge(node), input_units="code_length")
-            nre = self.ds.arr(get_right_edge(node), input_units="code_length")
+            nle = self.ds.arr(node.get_left_edge(), input_units="code_length")
+            nre = self.ds.arr(node.get_right_edge(), input_units="code_length")
             li = np.rint((nle-gle)/dds).astype('int32')
             ri = np.rint((nre-gle)/dds).astype('int32')
             dims = (ri - li).astype('int32')
@@ -125,22 +113,22 @@
             # print grid, dims, li, ri
 
         # Calculate the Volume
-        vol = kd_sum_volume(self.trunk)
+        vol = self.trunk.kd_sum_volume()
         mylog.debug('AMRKDTree volume = %e' % vol)
-        kd_node_check(self.trunk)
+        self.trunk.kd_node_check()
 
     def sum_cells(self, all_cells=False):
         cells = 0
-        for node in depth_traverse(self.trunk):
+        for node in self.trunk.depth_traverse():
             if node.grid == -1:
                 continue
-            if not all_cells and not kd_is_leaf(node):
+            if not all_cells and not node.kd_is_leaf():
                 continue
             grid = self.ds.index.grids[node.grid - self._id_offset]
             dds = grid.dds
             gle = grid.LeftEdge
-            nle = self.ds.arr(get_left_edge(node), input_units="code_length")
-            nre = self.ds.arr(get_right_edge(node), input_units="code_length")
+            nle = self.ds.arr(node.get_left_edge(), input_units="code_length")
+            nre = self.ds.arr(node.get_right_edge(), input_units="code_length")
             li = np.rint((nle-gle)/dds).astype('int32')
             ri = np.rint((nre-gle)/dds).astype('int32')
             dims = (ri - li).astype('int32')
@@ -186,7 +174,7 @@
         regenerate_data = self.fields is None or \
                           len(self.fields) != len(new_fields) or \
                           self.fields != new_fields or force
-        set_dirty(self.tree.trunk, regenerate_data)
+        self.tree.trunk.set_dirty(regenerate_data)
         self.fields = new_fields
 
         if self.log_fields is not None and not regenerate_data:
@@ -214,18 +202,18 @@
         self.set_fields(fields, log_fields, no_ghost)
 
     def traverse(self, viewpoint=None):
-        for node in kd_traverse(self.tree.trunk, viewpoint=viewpoint):
+        for node in self.tree.trunk.kd_traverse(viewpoint=viewpoint):
             yield self.get_brick_data(node)
 
     def slice_traverse(self, viewpoint = None):
         if not hasattr(self.ds.index, "grid"):
             raise NotImplementedError
-        for node in kd_traverse(self.tree.trunk, viewpoint=viewpoint):
+        for node in self.tree.trunk.kd_traverse(viewpoint=viewpoint):
             grid = self.ds.index.grids[node.grid - self._id_offset]
             dds = grid.dds
             gle = grid.LeftEdge.in_units("code_length").ndarray_view()
-            nle = get_left_edge(node)
-            nre = get_right_edge(node)
+            nle = node.get_left_edge()
+            nre = node.get_right_edge()
             li = np.rint((nle-gle)/dds).astype('int32')
             ri = np.rint((nre-gle)/dds).astype('int32')
             dims = (ri - li).astype('int32')
@@ -248,7 +236,7 @@
         return temp
 
     def locate_node(self, pos):
-        return find_node(self.tree.trunk, pos)
+        return self.tree.trunk.find_node(pos)
 
     def get_reduce_owners(self):
         owners = {}
@@ -290,8 +278,8 @@
         grid = self.ds.index.grids[node.grid - self._id_offset]
         dds = grid.dds.ndarray_view()
         gle = grid.LeftEdge.ndarray_view()
-        nle = get_left_edge(node)
-        nre = get_right_edge(node)
+        nle = node.get_left_edge()
+        nre = node.get_right_edge()
         li = np.rint((nle-gle)/dds).astype('int32')
         ri = np.rint((nre-gle)/dds).astype('int32')
         dims = (ri - li).astype('int32')
@@ -434,7 +422,7 @@
         if self.comm.rank != 0:
             self.comm.recv_array(self.comm.rank-1, tag=self.comm.rank-1)
         f = h5py.File(fn,'w')
-        for node in depth_traverse(self.tree):
+        for node in self.tree.depth_traverse():
             i = node.node_id
             if node.data is not None:
                 for fi,field in enumerate(self.fields):
@@ -455,7 +443,7 @@
             self.comm.recv_array(self.comm.rank-1, tag=self.comm.rank-1)
         try:
             f = h5py.File(fn,"a")
-            for node in depth_traverse(self.tree):
+            for node in self.tree.depth_traverse():
                 i = node.node_id
                 if node.grid != -1:
                     data = [f["brick_%s_%s" %
@@ -506,7 +494,7 @@
         gridids = []
         splitdims = []
         splitposs = []
-        for node in depth_first_touch(self.tree.trunk):
+        for node in self.tree.trunk.depth_first_touch():
             nids.append(node.node_id)
             les.append(node.get_left_edge())
             res.append(node.get_right_edge())
@@ -562,11 +550,11 @@
             if splitdims[i] != -1:
                 n.create_split(splitdims[i], splitposs[i])
 
-        mylog.info('AMRKDTree rebuilt, Final Volume: %e' % kd_sum_volume(self.tree.trunk))
+        mylog.info('AMRKDTree rebuilt, Final Volume: %e' % self.tree.trunk.kd_sum_volume())
         return self.tree.trunk
 
     def count_volume(self):
-        return kd_sum_volume(self.tree.trunk)
+        return self.tree.trunk.kd_sum_volume()
 
     def count_cells(self):
         return self.tree.sum_cells()
@@ -581,6 +569,6 @@
     hv = AMRKDTree(ds)
     t2 = time()
 
-    print(kd_sum_volume(hv.tree.trunk))
-    print(kd_node_check(hv.tree.trunk))
+    print(hv.tree.trunk.kd_sum_volume())
+    print(hv.tree.trunk.kd_node_check())
     print('Time: %e seconds' % (t2-t1))

diff -r d48c4a143f6e3c69c9a1016725d8328f8ea3af53 -r 3ca8a739e5296422ea7954ce8d2717908be25dd6 yt/utilities/lib/amr_kdtools.pxd
--- a/yt/utilities/lib/amr_kdtools.pxd
+++ b/yt/utilities/lib/amr_kdtools.pxd
@@ -19,8 +19,6 @@
     int dim
     np.float64_t pos
 
-cdef class Node
-
 cdef class Node:
     cdef public Node left
     cdef public Node right
@@ -34,7 +32,51 @@
     cdef public data
     cdef Split * split
     cdef int level
-
-cdef int point_in_node(Node node, np.float64_t[:] point)
-cdef Node _find_node(Node node, np.float64_t[:] point)
-cdef int _kd_is_leaf(Node node)
+    cdef int point_in_node(self, np.float64_t[:] point)
+    cdef Node _find_node(self, np.float64_t[:] point)
+    cdef int _kd_is_leaf(self)
+    cdef add_grid(self, np.float64_t[:] gle, np.float64_t[:] gre,
+                       int gid,
+                       int rank,
+                       int size)
+    cdef insert_grid(self,
+                       np.float64_t[:] gle,
+                       np.float64_t[:] gre,
+                       int grid_id,
+                       int rank,
+                       int size)
+    cpdef add_grids(self,
+                       int ngrids,
+                       np.float64_t[:,:] gles,
+                       np.float64_t[:,:] gres,
+                       np.int64_t[:] gids,
+                       int rank,
+                       int size)
+    cdef int should_i_split(self, int rank, int size)
+    cdef void insert_grids(self,
+                       int ngrids,
+                       np.float64_t[:,:] gles,
+                       np.float64_t[:,:] gres,
+                       np.int64_t[:] gids,
+                       int rank,
+                       int size)
+    cdef split_grid(self,
+                       np.float64_t[:] gle,
+                       np.float64_t[:] gre,
+                       int gid,
+                       int rank,
+                       int size)
+    cdef int split_grids(self,
+                       int ngrids,
+                       np.float64_t[:,:] gles,
+                       np.float64_t[:,:] gres,
+                       np.int64_t[:] gids,
+                       int rank,
+                       int size)
+    cdef geo_split(self,
+                       np.float64_t[:] gle,
+                       np.float64_t[:] gre,
+                       int grid_id,
+                       int rank,
+                       int size)
+    cdef void divide(self, Split * split)

diff -r d48c4a143f6e3c69c9a1016725d8328f8ea3af53 -r 3ca8a739e5296422ea7954ce8d2717908be25dd6 yt/utilities/lib/amr_kdtools.pyx
--- a/yt/utilities/lib/amr_kdtools.pyx
+++ b/yt/utilities/lib/amr_kdtools.pyx
@@ -67,12 +67,6 @@
         else:
             return np.nan
 
-    def get_left_edge(self):
-        return get_left_edge(self)
-
-    def get_right_edge(self):
-        return get_right_edge(self)
-
     def set_left_edge(self, np.float64_t[:] left_edge):
         cdef int i
         for i in range(3):
@@ -92,17 +86,571 @@
     def __dealloc__(self):
         if self.split != NULL: free(self.split)
 
-def get_left_edge(Node node):
-    le = np.empty(3, dtype='float64')
-    for i in range(3):
-        le[i] = node.left_edge[i]
-    return le
+    # Begin input of converted methods
 
-def get_right_edge(Node node):
-    re = np.empty(3, dtype='float64')
-    for i in range(3):
-        re[i] = node.right_edge[i]
-    return re
+    def get_left_edge(self):
+        le = np.empty(3, dtype='float64')
+        for i in range(3):
+            le[i] = self.left_edge[i]
+        return le
+
+    def get_right_edge(self):
+        re = np.empty(3, dtype='float64')
+        for i in range(3):
+            re[i] = self.right_edge[i]
+        return re
+
+    def set_dirty(self, bint state):
+        cdef Node node
+        for node in self.depth_traverse():
+            node.dirty = state
+
+    def kd_traverse(self, viewpoint=None):
+        cdef Node node
+        if viewpoint is None:
+            for node in self.depth_traverse():
+                if node._kd_is_leaf() == 1 and node.grid != -1:
+                    yield node
+        else:
+            for node in self.viewpoint_traverse(viewpoint):
+                if node._kd_is_leaf() == 1 and node.grid != -1:
+                    yield node
+
+    def add_pygrid(self,
+                       np.float64_t[:] gle,
+                       np.float64_t[:] gre,
+                       int gid,
+                       int rank,
+                       int size):
+
+        """
+        The entire purpose of this function is to move everything from ndarrays
+        to internal C pointers.
+        """
+        cdef np.float64_t[:] pgles = cvarray(format="d", shape=(3,), itemsize=sizeof(np.float64_t))
+        cdef np.float64_t[:] pgres = cvarray(format="d", shape=(3,), itemsize=sizeof(np.float64_t))
+        cdef int j
+        for j in range(3):
+            pgles[j] = gle[j]
+            pgres[j] = gre[j]
+
+        self.add_grid(pgles, pgres, gid, rank, size)
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef add_grid(self,
+                       np.float64_t[:] gle,
+                       np.float64_t[:] gre,
+                       int gid,
+                       int rank,
+                       int size):
+
+        if not should_i_build(self, rank, size):
+            return
+
+        if self._kd_is_leaf() == 1:
+            self.insert_grid(gle, gre, gid, rank, size)
+        else:
+            less_id = gle[self.split.dim] < self.split.pos
+            if less_id:
+                self.left.add_grid(gle, gre,
+                         gid, rank, size)
+
+            greater_id = gre[self.split.dim] > self.split.pos
+            if greater_id:
+                self.right.add_grid(gle, gre,
+                         gid, rank, size)
+        return
+
+
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef insert_grid(self,
+                    np.float64_t[:] gle,
+                    np.float64_t[:] gre,
+                    int grid_id,
+                    int rank,
+                    int size):
+        if not should_i_build(self, rank, size):
+            return
+
+        # If we should continue to split based on parallelism, do so!
+        if self.should_i_split(rank, size):
+            self.geo_split(gle, gre, grid_id, rank, size)
+            return
+
+        cdef int contained = 1
+        for i in range(3):
+            if gle[i] > self.left_edge[i] or\
+               gre[i] < self.right_edge[i]:
+                contained *= 0
+
+        if contained == 1:
+            self.grid = grid_id
+            assert(self.grid != -1)
+            return
+
+        # Split the grid
+        cdef int check = self.split_grid(gle, gre, grid_id, rank, size)
+        # If check is -1, then we have found a place where there are no choices.
+        # Exit out and set the node to None.
+        if check == -1:
+            self.grid = -1
+        return
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cpdef add_grids(self,
+                        int ngrids,
+                        np.float64_t[:,:] gles,
+                        np.float64_t[:,:] gres,
+                        np.int64_t[:] gids,
+                        int rank,
+                        int size):
+        cdef int i, j, nless, ngreater, index
+        cdef np.float64_t[:,:] less_gles, less_gres, greater_gles, greater_gres
+        cdef np.int64_t[:] l_ids, g_ids
+        if not should_i_build(self, rank, size):
+            return
+
+        if self._kd_is_leaf() == 1:
+            self.insert_grids(ngrids, gles, gres, gids, rank, size)
+            return
+
+        less_ids = cvarray(format="q", shape=(ngrids,), itemsize=sizeof(np.int64_t))
+        greater_ids = cvarray(format="q", shape=(ngrids,), itemsize=sizeof(np.int64_t))
+
+        nless = 0
+        ngreater = 0
+        for i in range(ngrids):
+            if gles[i, self.split.dim] < self.split.pos:
+                less_ids[nless] = i
+                nless += 1
+
+            if gres[i, self.split.dim] > self.split.pos:
+                greater_ids[ngreater] = i
+                ngreater += 1
+
+        #print 'nless: %i' % nless
+        #print 'ngreater: %i' % ngreater
+
+        if nless > 0:
+            less_gles = cvarray(format="d", shape=(nless,3), itemsize=sizeof(np.float64_t))
+            less_gres = cvarray(format="d", shape=(nless,3), itemsize=sizeof(np.float64_t))
+            l_ids = cvarray(format="q", shape=(nless,), itemsize=sizeof(np.int64_t))
+
+            for i in range(nless):
+                index = less_ids[i]
+                l_ids[i] = gids[index]
+                for j in range(3):
+                    less_gles[i,j] = gles[index,j]
+                    less_gres[i,j] = gres[index,j]
+
+            self.left.add_grids(nless, less_gles, less_gres,
+                      l_ids, rank, size)
+
+        if ngreater > 0:
+            greater_gles = cvarray(format="d", shape=(ngreater,3), itemsize=sizeof(np.float64_t))
+            greater_gres = cvarray(format="d", shape=(ngreater,3), itemsize=sizeof(np.float64_t))
+            g_ids = cvarray(format="q", shape=(ngreater,), itemsize=sizeof(np.int64_t))
+
+            for i in range(ngreater):
+                index = greater_ids[i]
+                g_ids[i] = gids[index]
+                for j in range(3):
+                    greater_gles[i,j] = gles[index,j]
+                    greater_gres[i,j] = gres[index,j]
+
+            self.right.add_grids(ngreater, greater_gles, greater_gres,
+                      g_ids, rank, size)
+
+        return
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef int should_i_split(self, int rank, int size):
+        if self.node_id < size and self.node_id > 0:
+            return 1
+        return 0
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef void insert_grids(self,
+                           int ngrids,
+                           np.float64_t[:,:] gles,
+                           np.float64_t[:,:] gres,
+                           np.int64_t[:] gids,
+                           int rank,
+                           int size):
+
+        if not should_i_build(self, rank, size) or ngrids == 0:
+            return
+        cdef int contained = 1
+        cdef int check
+
+        if ngrids == 1:
+            # If we should continue to split based on parallelism, do so!
+            if self.should_i_split(rank, size):
+                self.geo_split(gles[0,:], gres[0,:], gids[0], rank, size)
+                return
+
+            for i in range(3):
+                contained *= gles[0,i] <= self.left_edge[i]
+                contained *= gres[0,i] >= self.right_edge[i]
+
+            if contained == 1:
+                # print 'Node fully contained, setting to grid: %i' % gids[0]
+                self.grid = gids[0]
+                assert(self.grid != -1)
+                return
+
+        # Split the grids
+        check = self.split_grids(ngrids, gles, gres, gids, rank, size)
+        # If check is -1, then we have found a place where there are no choices.
+        # Exit out and set the node to None.
+        if check == -1:
+            self.grid = -1
+        return
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef split_grid(self,
+                   np.float64_t[:] gle,
+                   np.float64_t[:] gre,
+                   int gid,
+                   int rank,
+                   int size):
+
+        cdef int j
+        cdef np.uint8_t[:] less_ids, greater_ids
+        data = cvarray(format="d", shape=(1,2,3), itemsize=sizeof(np.float64_t))
+        for j in range(3):
+            data[0,0,j] = gle[j]
+            data[0,1,j] = gre[j]
+
+        less_ids = cvarray(format="B", shape=(1,), itemsize=sizeof(np.uint8_t))
+        greater_ids = cvarray(format="B", shape=(1,), itemsize=sizeof(np.uint8_t))
+
+        best_dim, split_pos, nless, ngreater = \
+            kdtree_get_choices(1, data, self.left_edge, self.right_edge,
+                              less_ids, greater_ids)
+
+        # If best_dim is -1, then we have found a place where there are no choices.
+        # Exit out and set the node to None.
+        if best_dim == -1:
+            return -1
+
+
+        split = <Split *> malloc(sizeof(Split))
+        split.dim = best_dim
+        split.pos = split_pos
+
+        # Create a Split
+        self.divide(split)
+
+        # Populate Left Node
+        #print 'Inserting left node', self.left_edge, self.right_edge
+        if nless == 1:
+            self.left.insert_grid(gle, gre,
+                         gid, rank, size)
+
+        # Populate Right Node
+        #print 'Inserting right node', self.left_edge, self.right_edge
+        if ngreater == 1:
+            self.right.insert_grid(gle, gre,
+                         gid, rank, size)
+
+        return 0
+
+    #@cython.boundscheck(False)
+    #@cython.wraparound(False)
+    #@cython.cdivision(True)
+    cdef int split_grids(self,
+                           int ngrids,
+                           np.float64_t[:,:] gles,
+                           np.float64_t[:,:] gres,
+                           np.int64_t[:] gids,
+                           int rank,
+                           int size):
+        # Find a Split
+        cdef int i, j, index
+        cdef np.float64_t[:,:] less_gles, less_gres, greater_gles, greater_gres
+        cdef np.int64_t[:] l_ids, g_ids
+        if ngrids == 0: return 0
+
+        data = cvarray(format="d", shape=(ngrids,2,3), itemsize=sizeof(np.float64_t))
+
+        for i in range(ngrids):
+            for j in range(3):
+                data[i,0,j] = gles[i,j]
+                data[i,1,j] = gres[i,j]
+
+        less_ids = cvarray(format="B", shape=(ngrids,), itemsize=sizeof(np.uint8_t))
+        greater_ids = cvarray(format="B", shape=(ngrids,), itemsize=sizeof(np.uint8_t))
+
+        best_dim, split_pos, nless, ngreater = \
+            kdtree_get_choices(ngrids, data, self.left_edge, self.right_edge,
+                              less_ids, greater_ids)
+
+
+        # If best_dim is -1, then we have found a place where there are no choices.
+        # Exit out and set the node to None.
+        if best_dim == -1:
+            print 'Failed to split grids.'
+            return -1
+
+        split = <Split *> malloc(sizeof(Split))
+        split.dim = best_dim
+        split.pos = split_pos
+
+        # Create a Split
+        self.divide(split)
+
+        less_index = cvarray(format="q", shape=(ngrids,), itemsize=sizeof(np.int64_t))
+        greater_index = cvarray(format="q", shape=(ngrids,), itemsize=sizeof(np.int64_t))
+
+        nless = 0
+        ngreater = 0
+        for i in range(ngrids):
+            if less_ids[i] == 1:
+                less_index[nless] = i
+                nless += 1
+
+            if greater_ids[i] == 1:
+                greater_index[ngreater] = i
+                ngreater += 1
+
+        if nless > 0:
+            less_gles = cvarray(format="d", shape=(nless,3), itemsize=sizeof(np.float64_t))
+            less_gres = cvarray(format="d", shape=(nless,3), itemsize=sizeof(np.float64_t))
+            l_ids = cvarray(format="q", shape=(nless,), itemsize=sizeof(np.int64_t))
+
+            for i in range(nless):
+                index = less_index[i]
+                l_ids[i] = gids[index]
+                for j in range(3):
+                    less_gles[i,j] = gles[index,j]
+                    less_gres[i,j] = gres[index,j]
+
+            # Populate Left Node
+            #print 'Inserting left node', self.left_edge, self.right_edge
+            self.left.insert_grids(nless, less_gles, less_gres,
+                         l_ids, rank, size)
+
+        if ngreater > 0:
+            greater_gles = cvarray(format="d", shape=(ngreater,3), itemsize=sizeof(np.float64_t))
+            greater_gres = cvarray(format="d", shape=(ngreater,3), itemsize=sizeof(np.float64_t))
+            g_ids = cvarray(format="q", shape=(ngreater,), itemsize=sizeof(np.int64_t))
+
+            for i in range(ngreater):
+                index = greater_index[i]
+                g_ids[i] = gids[index]
+                for j in range(3):
+                    greater_gles[i,j] = gles[index,j]
+                    greater_gres[i,j] = gres[index,j]
+
+            # Populate Right Node
+            #print 'Inserting right node', self.left_edge, self.right_edge
+            self.right.insert_grids(ngreater, greater_gles, greater_gres,
+                         g_ids, rank, size)
+
+        return 0
+
+    cdef geo_split(self,
+                   np.float64_t[:] gle,
+                   np.float64_t[:] gre,
+                   int grid_id,
+                   int rank,
+                   int size):
+        cdef int big_dim = 0
+        cdef int i
+        cdef np.float64_t v, my_max = 0.0
+
+        for i in range(3):
+            v = gre[i] - gle[i]
+            if v > my_max:
+                my_max = v
+                big_dim = i
+
+        new_pos = (gre[big_dim] + gle[big_dim])/2.
+
+        lnew_gle = cvarray(format="d", shape=(3,), itemsize=sizeof(np.float64_t))
+        lnew_gre = cvarray(format="d", shape=(3,), itemsize=sizeof(np.float64_t))
+        rnew_gle = cvarray(format="d", shape=(3,), itemsize=sizeof(np.float64_t))
+        rnew_gre = cvarray(format="d", shape=(3,), itemsize=sizeof(np.float64_t))
+
+        for j in range(3):
+            lnew_gle[j] = gle[j]
+            lnew_gre[j] = gre[j]
+            rnew_gle[j] = gle[j]
+            rnew_gre[j] = gre[j]
+
+        split = <Split *> malloc(sizeof(Split))
+        split.dim = big_dim
+        split.pos = new_pos
+
+        # Create a Split
+        self.divide(split)
+
+        #lnew_gre[big_dim] = new_pos
+        # Populate Left Node
+        #print 'Inserting left node', self.left_edge, self.right_edge
+        self.left.insert_grid(lnew_gle, lnew_gre,
+                grid_id, rank, size)
+
+        #rnew_gle[big_dim] = new_pos
+        # Populate Right Node
+        #print 'Inserting right node', self.left_edge, self.right_edge
+        self.right.insert_grid(rnew_gle, rnew_gre,
+                grid_id, rank, size)
+        return
+
+    cdef void divide(self, Split * split):
+        # Create a Split
+        self.split = split
+
+        cdef np.float64_t[:] le = np.empty(3, dtype='float64')
+        cdef np.float64_t[:] re = np.empty(3, dtype='float64')
+
+        cdef int i
+        for i in range(3):
+            le[i] = self.left_edge[i]
+            re[i] = self.right_edge[i]
+        re[split.dim] = split.pos
+
+        self.left = Node(self, None, None,
+                         le, re, self.grid,
+                         _lchild_id(self.node_id))
+
+        re[split.dim] = self.right_edge[split.dim]
+        le[split.dim] = split.pos
+        self.right = Node(self, None, None,
+                          le, re, self.grid,
+                          _rchild_id(self.node_id))
+
+        return
+    #
+    def kd_sum_volume(self):
+        cdef np.float64_t vol = 1.0
+        if (self.left is None) and (self.right is None):
+            if self.grid == -1:
+                return 0.0
+            for i in range(3):
+                vol *= self.right_edge[i] - self.left_edge[i]
+            return vol
+        else:
+            return self.left.kd_sum_volume() + self.right.kd_sum_volume()
+
+    def kd_node_check(self):
+        assert (self.left is None) == (self.right is None)
+        if (self.left is None) and (self.right is None):
+            if self.grid != -1:
+                return np.prod(self.right_edge - self.left_edge)
+            else: return 0.0
+        else:
+            return self.left.kd_node_check()+self.right.kd_node_check()
+
+    def kd_is_leaf(self):
+        cdef int has_l_child = self.left == None
+        cdef int has_r_child = self.right == None
+        assert has_l_child == has_r_child
+        return has_l_child
+
+    cdef int _kd_is_leaf(self):
+        if self.left is None or self.right is None:
+            return 1
+        return 0
+
+    def depth_traverse(self, max_node=None):
+        '''
+        Yields a depth-first traversal of the kd tree always going to
+        the left child before the right.
+        '''
+        current = self
+        previous = None
+        if max_node is None:
+            max_node = np.inf
+        while current is not None:
+            yield current
+            current, previous = step_depth(current, previous)
+            if current is None: break
+            if current.node_id >= max_node:
+                current = current.parent
+                previous = current.right
+
+    def depth_first_touch(self, max_node=None):
+        '''
+        Yields a depth-first traversal of the kd tree always going to
+        the left child before the right.
+        '''
+        current = self
+        previous = None
+        if max_node is None:
+            max_node = np.inf
+        while current is not None:
+            if previous is None or previous.parent != current:
+                yield current
+            current, previous = step_depth(current, previous)
+            if current is None: break
+            if current.node_id >= max_node:
+                current = current.parent
+                previous = current.right
+
+    def breadth_traverse(self):
+        '''
+        Yields a breadth-first traversal of the kd tree always going to
+        the left child before the right.
+        '''
+        current = self
+        previous = None
+        while current is not None:
+            yield current
+            current, previous = step_depth(current, previous)
+
+
+    def viewpoint_traverse(self, viewpoint):
+        '''
+        Yields a viewpoint dependent traversal of the kd-tree.  Starts
+        with nodes furthest away from viewpoint.
+        '''
+
+        current = self
+        previous = None
+        while current is not None:
+            yield current
+            current, previous = step_viewpoint(current, previous, viewpoint)
+
+    cdef int point_in_node(self,
+                           np.float64_t[:] point):
+        cdef int i
+        cdef int inside = 1
+        for i in range(3):
+            inside *= self.left_edge[i] <= point[i]
+            inside *= self.right_edge[i] > point[i]
+        return inside
+
+    cdef Node _find_node(self, np.float64_t[:] point):
+        while self._kd_is_leaf() == 0:
+            if point[self.split.dim] < self.split.pos:
+                self = self.left
+            else:
+                self = self.right
+        return self
+
+    def find_node(self,
+                  np.float64_t[:] point):
+        """
+        Find the AMRKDTree node enclosing a position
+        """
+        assert(self.point_in_node(point))
+        return self._find_node(point)
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
@@ -133,270 +681,84 @@
     else:
         return 0
 
-def set_dirty(Node trunk, bint state):
-    for node in depth_traverse(trunk):
-        node.dirty = state
+def step_depth(Node current, Node previous):
+    '''
+    Takes a single step in the depth-first traversal
+    '''
+    if current._kd_is_leaf() == 1: # At a leaf, move back up
+        previous = current
+        current = current.parent
 
-def kd_traverse(Node trunk, viewpoint=None):
-    if viewpoint is None:
-        for node in depth_traverse(trunk):
-            if _kd_is_leaf(node) == 1 and node.grid != -1:
-                yield node
-    else:
-        for node in viewpoint_traverse(trunk, viewpoint):
-            if _kd_is_leaf(node) == 1 and node.grid != -1:
-                yield node
+    elif current.parent is previous: # Moving down, go left first
+        previous = current
+        if current.left is not None:
+            current = current.left
+        elif current.right is not None:
+            current = current.right
+        else:
+            current = current.parent
 
- at cython.boundscheck(False)
- at cython.wraparound(False)
- at cython.cdivision(True)
-cdef add_grid(Node node,
-                   np.float64_t[:] gle,
-                   np.float64_t[:] gre,
-                   int gid,
-                   int rank,
-                   int size):
+    elif current.left is previous: # Moving up from left, go right
+        previous = current
+        if current.right is not None:
+            current = current.right
+        else:
+            current = current.parent
 
-    if not should_i_build(node, rank, size):
-        return
+    elif current.right is previous: # Moving up from right child, move up
+        previous = current
+        current = current.parent
 
-    if _kd_is_leaf(node) == 1:
-        insert_grid(node, gle, gre, gid, rank, size)
-    else:
-        less_id = gle[node.split.dim] < node.split.pos
-        if less_id:
-            add_grid(node.left, gle, gre,
-                     gid, rank, size)
+    return current, previous
 
-        greater_id = gre[node.split.dim] > node.split.pos
-        if greater_id:
-            add_grid(node.right, gle, gre,
-                     gid, rank, size)
-    return
+def step_viewpoint(Node current,
+                   Node previous,
+                   viewpoint):
+    '''
+    Takes a single step in the viewpoint based traversal.  Always
+    goes to the node furthest away from viewpoint first.
+    '''
+    if current._kd_is_leaf() == 1: # At a leaf, move back up
+        previous = current
+        current = current.parent
+    elif current.split.dim is None: # This is a dead node
+        previous = current
+        current = current.parent
 
-def add_pygrid(Node node,
-                   np.float64_t[:] gle,
-                   np.float64_t[:] gre,
-                   int gid,
-                   int rank,
-                   int size):
+    elif current.parent is previous: # Moving down
+        previous = current
+        if viewpoint[current.split.dim] <= current.split.pos:
+            if current.right is not None:
+                current = current.right
+            else:
+                previous = current.right
+        else:
+            if current.left is not None:
+                current = current.left
+            else:
+                previous = current.left
 
-    """
-    The entire purpose of this function is to move everything from ndarrays
-    to internal C pointers.
-    """
-    cdef np.float64_t[:] pgles = cvarray(format="d", shape=(3,), itemsize=sizeof(np.float64_t))
-    cdef np.float64_t[:] pgres = cvarray(format="d", shape=(3,), itemsize=sizeof(np.float64_t))
-    cdef int j
-    for j in range(3):
-        pgles[j] = gle[j]
-        pgres[j] = gre[j]
+    elif current.right is previous: # Moving up from right
+        previous = current
+        if viewpoint[current.split.dim] <= current.split.pos:
+            if current.left is not None:
+                current = current.left
+            else:
+                current = current.parent
+        else:
+            current = current.parent
 
-    add_grid(node, pgles, pgres, gid, rank, size)
+    elif current.left is previous: # Moving up from left child
+        previous = current
+        if viewpoint[current.split.dim] > current.split.pos:
+            if current.right is not None:
+                current = current.right
+            else:
+                current = current.parent
+        else:
+            current = current.parent
 
- at cython.boundscheck(False)
- at cython.wraparound(False)
- at cython.cdivision(True)
-cdef insert_grid(Node node,
-                np.float64_t[:] gle,
-                np.float64_t[:] gre,
-                int grid_id,
-                int rank,
-                int size):
-    if not should_i_build(node, rank, size):
-        return
-
-    # If we should continue to split based on parallelism, do so!
-    if should_i_split(node, rank, size):
-        geo_split(node, gle, gre, grid_id, rank, size)
-        return
-
-    cdef int contained = 1
-    for i in range(3):
-        if gle[i] > node.left_edge[i] or\
-           gre[i] < node.right_edge[i]:
-            contained *= 0
-
-    if contained == 1:
-        node.grid = grid_id
-        assert(node.grid != -1)
-        return
-
-    # Split the grid
-    cdef int check = split_grid(node, gle, gre, grid_id, rank, size)
-    # If check is -1, then we have found a place where there are no choices.
-    # Exit out and set the node to None.
-    if check == -1:
-        node.grid = -1
-    return
-
- at cython.boundscheck(False)
- at cython.wraparound(False)
- at cython.cdivision(True)
-cpdef add_grids(Node node,
-                    int ngrids,
-                    np.float64_t[:,:] gles,
-                    np.float64_t[:,:] gres,
-                    np.int64_t[:] gids,
-                    int rank,
-                    int size):
-    cdef int i, j, nless, ngreater, index
-    cdef np.float64_t[:,:] less_gles, less_gres, greater_gles, greater_gres
-    cdef np.int64_t[:] l_ids, g_ids
-    if not should_i_build(node, rank, size):
-        return
-
-    if _kd_is_leaf(node) == 1:
-        insert_grids(node, ngrids, gles, gres, gids, rank, size)
-        return
-
-    less_ids = cvarray(format="q", shape=(ngrids,), itemsize=sizeof(np.int64_t))
-    greater_ids = cvarray(format="q", shape=(ngrids,), itemsize=sizeof(np.int64_t))
-
-    nless = 0
-    ngreater = 0
-    for i in range(ngrids):
-        if gles[i, node.split.dim] < node.split.pos:
-            less_ids[nless] = i
-            nless += 1
-
-        if gres[i, node.split.dim] > node.split.pos:
-            greater_ids[ngreater] = i
-            ngreater += 1
-
-    #print 'nless: %i' % nless
-    #print 'ngreater: %i' % ngreater
-
-    if nless > 0:
-        less_gles = cvarray(format="d", shape=(nless,3), itemsize=sizeof(np.float64_t))
-        less_gres = cvarray(format="d", shape=(nless,3), itemsize=sizeof(np.float64_t))
-        l_ids = cvarray(format="q", shape=(nless,), itemsize=sizeof(np.int64_t))
-
-        for i in range(nless):
-            index = less_ids[i]
-            l_ids[i] = gids[index]
-            for j in range(3):
-                less_gles[i,j] = gles[index,j]
-                less_gres[i,j] = gres[index,j]
-
-        add_grids(node.left, nless, less_gles, less_gres,
-                  l_ids, rank, size)
-
-    if ngreater > 0:
-        greater_gles = cvarray(format="d", shape=(ngreater,3), itemsize=sizeof(np.float64_t))
-        greater_gres = cvarray(format="d", shape=(ngreater,3), itemsize=sizeof(np.float64_t))
-        g_ids = cvarray(format="q", shape=(ngreater,), itemsize=sizeof(np.int64_t))
-
-        for i in range(ngreater):
-            index = greater_ids[i]
-            g_ids[i] = gids[index]
-            for j in range(3):
-                greater_gles[i,j] = gles[index,j]
-                greater_gres[i,j] = gres[index,j]
-
-        add_grids(node.right, ngreater, greater_gles, greater_gres,
-                  g_ids, rank, size)
-
-    return
-
- at cython.boundscheck(False)
- at cython.wraparound(False)
- at cython.cdivision(True)
-cdef int should_i_split(Node node, int rank, int size):
-    if node.node_id < size and node.node_id > 0:
-        return 1
-    return 0
-
- at cython.boundscheck(False)
- at cython.wraparound(False)
- at cython.cdivision(True)
-cdef void insert_grids(Node node,
-                       int ngrids,
-                       np.float64_t[:,:] gles,
-                       np.float64_t[:,:] gres,
-                       np.int64_t[:] gids,
-                       int rank,
-                       int size):
-
-    if not should_i_build(node, rank, size) or ngrids == 0:
-        return
-    cdef int contained = 1
-    cdef int check
-
-    if ngrids == 1:
-        # If we should continue to split based on parallelism, do so!
-        if should_i_split(node, rank, size):
-            geo_split(node, gles[0,:], gres[0,:], gids[0], rank, size)
-            return
-
-        for i in range(3):
-            contained *= gles[0,i] <= node.left_edge[i]
-            contained *= gres[0,i] >= node.right_edge[i]
-
-        if contained == 1:
-            # print 'Node fully contained, setting to grid: %i' % gids[0]
-            node.grid = gids[0]
-            assert(node.grid != -1)
-            return
-
-    # Split the grids
-    check = split_grids(node, ngrids, gles, gres, gids, rank, size)
-    # If check is -1, then we have found a place where there are no choices.
-    # Exit out and set the node to None.
-    if check == -1:
-        node.grid = -1
-    return
-
- at cython.boundscheck(False)
- at cython.wraparound(False)
- at cython.cdivision(True)
-cdef split_grid(Node node,
-               np.float64_t[:] gle,
-               np.float64_t[:] gre,
-               int gid,
-               int rank,
-               int size):
-
-    cdef int j
-    cdef np.uint8_t[:] less_ids, greater_ids
-    data = cvarray(format="d", shape=(1,2,3), itemsize=sizeof(np.float64_t))
-    for j in range(3):
-        data[0,0,j] = gle[j]
-        data[0,1,j] = gre[j]
-
-    less_ids = cvarray(format="B", shape=(1,), itemsize=sizeof(np.uint8_t))
-    greater_ids = cvarray(format="B", shape=(1,), itemsize=sizeof(np.uint8_t))
-
-    best_dim, split_pos, nless, ngreater = \
-        kdtree_get_choices(1, data, node.left_edge, node.right_edge,
-                          less_ids, greater_ids)
-
-    # If best_dim is -1, then we have found a place where there are no choices.
-    # Exit out and set the node to None.
-    if best_dim == -1:
-        return -1
-
-
-    split = <Split *> malloc(sizeof(Split))
-    split.dim = best_dim
-    split.pos = split_pos
-
-    # Create a Split
-    divide(node, split)
-
-    # Populate Left Node
-    #print 'Inserting left node', node.left_edge, node.right_edge
-    if nless == 1:
-        insert_grid(node.left, gle, gre,
-                     gid, rank, size)
-
-    # Populate Right Node
-    #print 'Inserting right node', node.left_edge, node.right_edge
-    if ngreater == 1:
-        insert_grid(node.right, gle, gre,
-                     gid, rank, size)
-
-    return 0
+    return current, previous
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
@@ -465,366 +827,3 @@
 
     # Return out unique values
     return best_dim, split, nless, ngreater
-
-#@cython.boundscheck(False)
-#@cython.wraparound(False)
-#@cython.cdivision(True)
-cdef int split_grids(Node node,
-                       int ngrids,
-                       np.float64_t[:,:] gles,
-                       np.float64_t[:,:] gres,
-                       np.int64_t[:] gids,
-                       int rank,
-                       int size):
-    # Find a Split
-    cdef int i, j, index
-    cdef np.float64_t[:,:] less_gles, less_gres, greater_gles, greater_gres
-    cdef np.int64_t[:] l_ids, g_ids
-    if ngrids == 0: return 0
-
-    data = cvarray(format="d", shape=(ngrids,2,3), itemsize=sizeof(np.float64_t))
-
-    for i in range(ngrids):
-        for j in range(3):
-            data[i,0,j] = gles[i,j]
-            data[i,1,j] = gres[i,j]
-
-    less_ids = cvarray(format="B", shape=(ngrids,), itemsize=sizeof(np.uint8_t))
-    greater_ids = cvarray(format="B", shape=(ngrids,), itemsize=sizeof(np.uint8_t))
-
-    best_dim, split_pos, nless, ngreater = \
-        kdtree_get_choices(ngrids, data, node.left_edge, node.right_edge,
-                          less_ids, greater_ids)
-
-
-    # If best_dim is -1, then we have found a place where there are no choices.
-    # Exit out and set the node to None.
-    if best_dim == -1:
-        print 'Failed to split grids.'
-        return -1
-
-    split = <Split *> malloc(sizeof(Split))
-    split.dim = best_dim
-    split.pos = split_pos
-
-    # Create a Split
-    divide(node, split)
-
-    less_index = cvarray(format="q", shape=(ngrids,), itemsize=sizeof(np.int64_t))
-    greater_index = cvarray(format="q", shape=(ngrids,), itemsize=sizeof(np.int64_t))
-
-    nless = 0
-    ngreater = 0
-    for i in range(ngrids):
-        if less_ids[i] == 1:
-            less_index[nless] = i
-            nless += 1
-
-        if greater_ids[i] == 1:
-            greater_index[ngreater] = i
-            ngreater += 1
-
-    if nless > 0:
-        less_gles = cvarray(format="d", shape=(nless,3), itemsize=sizeof(np.float64_t))
-        less_gres = cvarray(format="d", shape=(nless,3), itemsize=sizeof(np.float64_t))
-        l_ids = cvarray(format="q", shape=(nless,), itemsize=sizeof(np.int64_t))
-
-        for i in range(nless):
-            index = less_index[i]
-            l_ids[i] = gids[index]
-            for j in range(3):
-                less_gles[i,j] = gles[index,j]
-                less_gres[i,j] = gres[index,j]
-
-        # Populate Left Node
-        #print 'Inserting left node', node.left_edge, node.right_edge
-        insert_grids(node.left, nless, less_gles, less_gres,
-                     l_ids, rank, size)
-
-    if ngreater > 0:
-        greater_gles = cvarray(format="d", shape=(ngreater,3), itemsize=sizeof(np.float64_t))
-        greater_gres = cvarray(format="d", shape=(ngreater,3), itemsize=sizeof(np.float64_t))
-        g_ids = cvarray(format="q", shape=(ngreater,), itemsize=sizeof(np.int64_t))
-
-        for i in range(ngreater):
-            index = greater_index[i]
-            g_ids[i] = gids[index]
-            for j in range(3):
-                greater_gles[i,j] = gles[index,j]
-                greater_gres[i,j] = gres[index,j]
-
-        # Populate Right Node
-        #print 'Inserting right node', node.left_edge, node.right_edge
-        insert_grids(node.right, ngreater, greater_gles, greater_gres,
-                     g_ids, rank, size)
-
-    return 0
-
-cdef geo_split(Node node,
-               np.float64_t[:] gle,
-               np.float64_t[:] gre,
-               int grid_id,
-               int rank,
-               int size):
-    cdef int big_dim = 0
-    cdef int i
-    cdef np.float64_t v, my_max = 0.0
-
-    for i in range(3):
-        v = gre[i] - gle[i]
-        if v > my_max:
-            my_max = v
-            big_dim = i
-
-    new_pos = (gre[big_dim] + gle[big_dim])/2.
-
-    lnew_gle = cvarray(format="d", shape=(3,), itemsize=sizeof(np.float64_t))
-    lnew_gre = cvarray(format="d", shape=(3,), itemsize=sizeof(np.float64_t))
-    rnew_gle = cvarray(format="d", shape=(3,), itemsize=sizeof(np.float64_t))
-    rnew_gre = cvarray(format="d", shape=(3,), itemsize=sizeof(np.float64_t))
-
-    for j in range(3):
-        lnew_gle[j] = gle[j]
-        lnew_gre[j] = gre[j]
-        rnew_gle[j] = gle[j]
-        rnew_gre[j] = gre[j]
-
-    split = <Split *> malloc(sizeof(Split))
-    split.dim = big_dim
-    split.pos = new_pos
-
-    # Create a Split
-    divide(node, split)
-
-    #lnew_gre[big_dim] = new_pos
-    # Populate Left Node
-    #print 'Inserting left node', node.left_edge, node.right_edge
-    insert_grid(node.left, lnew_gle, lnew_gre,
-            grid_id, rank, size)
-
-    #rnew_gle[big_dim] = new_pos
-    # Populate Right Node
-    #print 'Inserting right node', node.left_edge, node.right_edge
-    insert_grid(node.right, rnew_gle, rnew_gre,
-            grid_id, rank, size)
-    return
-
-cdef void divide(Node node, Split * split):
-    # Create a Split
-    node.split = split
-
-    cdef np.float64_t[:] le = np.empty(3, dtype='float64')
-    cdef np.float64_t[:] re = np.empty(3, dtype='float64')
-
-    cdef int i
-    for i in range(3):
-        le[i] = node.left_edge[i]
-        re[i] = node.right_edge[i]
-    re[split.dim] = split.pos
-
-    node.left = Node(node, None, None,
-                     le, re, node.grid,
-                     _lchild_id(node.node_id))
-
-    re[split.dim] = node.right_edge[split.dim]
-    le[split.dim] = split.pos
-    node.right = Node(node, None, None,
-                      le, re, node.grid,
-                      _rchild_id(node.node_id))
-
-    return
-#
-def kd_sum_volume(Node node):
-    cdef np.float64_t vol = 1.0
-    if (node.left is None) and (node.right is None):
-        if node.grid == -1:
-            return 0.0
-        for i in range(3):
-            vol *= node.right_edge[i] - node.left_edge[i]
-        return vol
-    else:
-        return kd_sum_volume(node.left) + kd_sum_volume(node.right)
-
-def kd_node_check(Node node):
-    assert (node.left is None) == (node.right is None)
-    if (node.left is None) and (node.right is None):
-        if node.grid != -1:
-            return np.prod(node.right_edge - node.left_edge)
-        else: return 0.0
-    else:
-        return kd_node_check(node.left)+kd_node_check(node.right)
-
-def kd_is_leaf(Node node):
-    cdef int has_l_child = node.left == None
-    cdef int has_r_child = node.right == None
-    assert has_l_child == has_r_child
-    return has_l_child
-
-cdef int _kd_is_leaf(Node node):
-    if node.left is None or node.right is None:
-        return 1
-    return 0
-
-def step_depth(Node current, Node previous):
-    '''
-    Takes a single step in the depth-first traversal
-    '''
-    if _kd_is_leaf(current) == 1: # At a leaf, move back up
-        previous = current
-        current = current.parent
-
-    elif current.parent is previous: # Moving down, go left first
-        previous = current
-        if current.left is not None:
-            current = current.left
-        elif current.right is not None:
-            current = current.right
-        else:
-            current = current.parent
-
-    elif current.left is previous: # Moving up from left, go right
-        previous = current
-        if current.right is not None:
-            current = current.right
-        else:
-            current = current.parent
-
-    elif current.right is previous: # Moving up from right child, move up
-        previous = current
-        current = current.parent
-
-    return current, previous
-
-def depth_traverse(Node trunk, max_node=None):
-    '''
-    Yields a depth-first traversal of the kd tree always going to
-    the left child before the right.
-    '''
-    current = trunk
-    previous = None
-    if max_node is None:
-        max_node = np.inf
-    while current is not None:
-        yield current
-        current, previous = step_depth(current, previous)
-        if current is None: break
-        if current.node_id >= max_node:
-            current = current.parent
-            previous = current.right
-
-def depth_first_touch(Node tree, max_node=None):
-    '''
-    Yields a depth-first traversal of the kd tree always going to
-    the left child before the right.
-    '''
-    current = tree
-    previous = None
-    if max_node is None:
-        max_node = np.inf
-    while current is not None:
-        if previous is None or previous.parent != current:
-            yield current
-        current, previous = step_depth(current, previous)
-        if current is None: break
-        if current.node_id >= max_node:
-            current = current.parent
-            previous = current.right
-
-def breadth_traverse(Node tree):
-    '''
-    Yields a breadth-first traversal of the kd tree always going to
-    the left child before the right.
-    '''
-    current = tree
-    previous = None
-    while current is not None:
-        yield current
-        current, previous = step_depth(current, previous)
-
-
-def viewpoint_traverse(Node tree, viewpoint):
-    '''
-    Yields a viewpoint dependent traversal of the kd-tree.  Starts
-    with nodes furthest away from viewpoint.
-    '''
-
-    current = tree
-    previous = None
-    while current is not None:
-        yield current
-        current, previous = step_viewpoint(current, previous, viewpoint)
-
-def step_viewpoint(Node current,
-                   Node previous,
-                   viewpoint):
-    '''
-    Takes a single step in the viewpoint based traversal.  Always
-    goes to the node furthest away from viewpoint first.
-    '''
-    if _kd_is_leaf(current) == 1: # At a leaf, move back up
-        previous = current
-        current = current.parent
-    elif current.split.dim is None: # This is a dead node
-        previous = current
-        current = current.parent
-
-    elif current.parent is previous: # Moving down
-        previous = current
-        if viewpoint[current.split.dim] <= current.split.pos:
-            if current.right is not None:
-                current = current.right
-            else:
-                previous = current.right
-        else:
-            if current.left is not None:
-                current = current.left
-            else:
-                previous = current.left
-
-    elif current.right is previous: # Moving up from right
-        previous = current
-        if viewpoint[current.split.dim] <= current.split.pos:
-            if current.left is not None:
-                current = current.left
-            else:
-                current = current.parent
-        else:
-            current = current.parent
-
-    elif current.left is previous: # Moving up from left child
-        previous = current
-        if viewpoint[current.split.dim] > current.split.pos:
-            if current.right is not None:
-                current = current.right
-            else:
-                current = current.parent
-        else:
-            current = current.parent
-
-    return current, previous
-
-cdef int point_in_node(Node node,
-                       np.float64_t[:] point):
-    cdef int i
-    cdef int inside = 1
-    for i in range(3):
-        inside *= node.left_edge[i] <= point[i]
-        inside *= node.right_edge[i] > point[i]
-    return inside
-
-cdef Node _find_node(Node node, np.float64_t[:] point):
-    while _kd_is_leaf(node) == 0:
-        if point[node.split.dim] < node.split.pos:
-            node = node.left
-        else:
-            node = node.right
-    return node
-
-def find_node(Node node,
-              np.float64_t[:] point):
-    """
-    Find the AMRKDTree node enclosing a position
-    """
-    assert(point_in_node(node, point))
-    return _find_node(node, point)
-

diff -r d48c4a143f6e3c69c9a1016725d8328f8ea3af53 -r 3ca8a739e5296422ea7954ce8d2717908be25dd6 yt/utilities/lib/contour_finding.pyx
--- a/yt/utilities/lib/contour_finding.pyx
+++ b/yt/utilities/lib/contour_finding.pyx
@@ -25,7 +25,7 @@
     OctreeContainer, OctInfo
 from yt.geometry.oct_visitors cimport \
     Oct
-from .amr_kdtools cimport _find_node, Node
+from .amr_kdtools cimport Node
 from .grid_traversal cimport VolumeContainer, PartitionedGrid, \
     vc_index, vc_pos_index
 import sys
@@ -446,7 +446,7 @@
                                 pos[ax] = vc0.dims[ax]
                                 my_pos[ax] = vc0.dims[ax]-1
                             get_spos(vc0, pos[0], pos[1], pos[2], ax, spos)
-                            adj_node = _find_node(trunk, spos)
+                            adj_node = trunk._find_node(spos)
                             vc1 = vcs[adj_node.node_ind]
                             if spos_contained(vc1, spos):
                                 index = vc_index(vc0, my_pos[0],

diff -r d48c4a143f6e3c69c9a1016725d8328f8ea3af53 -r 3ca8a739e5296422ea7954ce8d2717908be25dd6 yt/utilities/tests/test_amr_kdtree.py
--- a/yt/utilities/tests/test_amr_kdtree.py
+++ b/yt/utilities/tests/test_amr_kdtree.py
@@ -14,8 +14,6 @@
 #-----------------------------------------------------------------------------
 
 from yt.utilities.amr_kdtree.api import AMRKDTree
-from yt.utilities.lib.amr_kdtools import depth_traverse, \
-        get_left_edge, get_right_edge
 import yt.utilities.initial_conditions as ic
 import yt.utilities.flagging_methods as fm
 from yt.frontends.stream.api import load_uniform_grid, refine_amr
@@ -46,14 +44,14 @@
 
     # This largely reproduces the AMRKDTree.tree.check_tree() functionality
     tree_ok = True
-    for node in depth_traverse(kd.tree.trunk):
+    for node in kd.tree.trunk.depth_traverse():
         if node.grid is None:
             continue
         grid = ds.index.grids[node.grid - kd._id_offset]
         dds = grid.dds
         gle = grid.LeftEdge
-        nle = get_left_edge(node)
-        nre = get_right_edge(node)
+        nle = node.get_left_edge()
+        nre = node.get_right_edge()
         li = np.rint((nle-gle)/dds).astype('int32')
         ri = np.rint((nre-gle)/dds).astype('int32')
         dims = (ri - li).astype('int32')

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.


More information about the yt-svn mailing list