[yt-svn] commit/yt: 23 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Wed Feb 24 09:37:22 PST 2016


23 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/e9dd3ea87dc9/
Changeset:   e9dd3ea87dc9
Branch:      yt
User:        atmyers
Date:        2016-02-03 07:13:21+00:00
Summary:     initial check-in of bounding volume hierarchy code
Affected #:  3 files

diff -r 5beb2280f5c889b80eaa6cfa962e298b105aff1d -r e9dd3ea87dc98946ddf8ff59530aee1209982f11 .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -30,6 +30,7 @@
 yt/utilities/lib/amr_kdtools.c
 yt/utilities/lib/basic_octree.c
 yt/utilities/lib/bitarray.c
+yt/utilities/lib/bounding_volume_hierarchy.c
 yt/utilities/lib/contour_finding.c
 yt/utilities/lib/depth_first_octree.c
 yt/utilities/lib/element_mappings.c

diff -r 5beb2280f5c889b80eaa6cfa962e298b105aff1d -r e9dd3ea87dc98946ddf8ff59530aee1209982f11 yt/utilities/lib/bounding_volume_hierarchy.pyx
--- /dev/null
+++ b/yt/utilities/lib/bounding_volume_hierarchy.pyx
@@ -0,0 +1,126 @@
+cimport cython 
+import numpy as np
+cimport numpy as np
+from libc.math cimport fabs, fmax, fmin
+from libc.stdlib cimport malloc, free
+
+cdef extern from "mesh_construction.h":
+    enum:
+        MAX_NUM_TRI
+    int triangulate_hex[MAX_NUM_TRI][3]
+
+cdef struct BBox:
+    np.float64_t left_edge[3]
+    np.float64_t right_edge[3]
+    
+cdef struct BVHNode:
+    np.int64_t begin
+    np.int64_t end
+    BVHNode* left
+    BVHNode* right
+    BBox bbox
+    
+cdef struct Triangle:
+    np.float64_t centroid[3]
+    BBox bbox
+
+cdef class BVH:
+    cdef BVHNode* root
+    cdef Triangle* triangles
+    
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    def __init__(self,
+                 np.float64_t[:, ::1] vertices,
+                 np.int64_t[:, ::1] indices):
+        
+        cdef np.int64_t num_elem = indices.shape[0]
+        cdef np.int64_t num_tri = 12*num_elem
+        self.triangles = <Triangle*> malloc(num_tri * sizeof(Triangle))
+        
+        cdef np.int64_t i, j, k
+        cdef np.int64_t offset, tri_index
+        cdef np.int64_t v0, v1, v2
+        cdef np.float64_t[:] p0
+        cdef np.float64_t[:] p1
+        cdef np.float64_t[:] p2
+        cdef Triangle* tri
+        for i in range(num_elem):
+            offset = 12*i
+            for j in range(12):
+                tri_index = offset + j
+                tri = &(self.triangles[tri_index])
+                p0 = vertices[indices[i][triangulate_hex[j][0]]]
+                p1 = vertices[indices[i][triangulate_hex[j][1]]]
+                p2 = vertices[indices[i][triangulate_hex[j][2]]]
+                for k in range(3):
+                    tri.centroid[k] = (p0[k] + p1[k] + p2[k]) / 3.0
+                    tri.bbox.left_edge[k] = fmin(fmin(p0[k], p1[k]), p2[k])
+                    tri.bbox.right_edge[k] = fmax(fmax(p0[k], p1[k]), p2[k])
+                    
+        self.root = self.build(0, num_tri)
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef np.int64_t partition(self, np.int64_t begin, np.int64_t end,
+                              np.int64_t ax, np.float64_t split):
+        cdef np.int64_t mid = begin
+        while (begin != end):
+            if self.triangles[mid].centroid[ax] > split:
+                mid += 1
+            elif self.triangles[begin].centroid[ax] > split:
+                self.triangles[mid], self.triangles[begin] = \
+                self.triangles[begin], self.triangles[mid]
+                mid += 1
+            begin += 1
+        return mid
+    
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef void _get_node_bbox(self, BVHNode* node, 
+                             np.int64_t begin, np.int64_t end):
+        cdef np.int64_t i, j
+        cdef BBox box = self.triangles[begin].bbox
+        for i in range(begin+1, end):
+            for j in range(3):
+                box.left_edge[j] = fmax(box.left_edge[j], 
+                                        self.triangles[i].bbox.left_edge[j])
+                box.right_edge[j] = fmin(box.right_edge[j], 
+                                         self.triangles[i].bbox.right_edge[j])
+        node.bbox = box       
+    
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef BVHNode* build(self, np.int64_t begin, np.int64_t end):
+        cdef BVHNode *node = <BVHNode* > malloc(sizeof(BVHNode))
+        node.begin = begin
+        node.end = end
+
+        self._get_node_bbox(node, begin, end)
+        
+        if (end - begin) == 1:
+            return node
+        
+        cdef np.int64_t ax = 0
+        cdef np.float64_t d = fabs(node.bbox.right_edge[0] - 
+                                   node.bbox.left_edge[0])
+        if fabs(node.bbox.right_edge[1] - node.bbox.left_edge[1]) > d:
+            ax = 1
+        if fabs(node.bbox.right_edge[2] - node.bbox.left_edge[2]) > d:
+            ax = 2
+
+        cdef np.float64_t split = 0.5*(node.bbox.right_edge[ax] - 
+                                       node.bbox.left_edge[ax])
+
+        cdef np.int64_t mid = self.partition(begin, end, ax, split)
+        if(mid == begin or mid == end):
+            mid = begin + (end-begin)/2
+
+        node.left = self.build(begin, mid)
+        node.right = self.build(mid, end)
+
+        return node

diff -r 5beb2280f5c889b80eaa6cfa962e298b105aff1d -r e9dd3ea87dc98946ddf8ff59530aee1209982f11 yt/utilities/lib/setup.py
--- a/yt/utilities/lib/setup.py
+++ b/yt/utilities/lib/setup.py
@@ -116,6 +116,9 @@
     config.add_extension("bitarray", 
                 ["yt/utilities/lib/bitarray.pyx"],
                 libraries=["m"], depends=["yt/utilities/lib/bitarray.pxd"])
+    config.add_extension("bounding_volume_hierarchy", 
+                ["yt/utilities/lib/bounding_volume_hierarchy.pyx"],
+                libraries=["m"])    
     config.add_extension("particle_mesh_operations", 
                 ["yt/utilities/lib/particle_mesh_operations.pyx"],
                 libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd"])


https://bitbucket.org/yt_analysis/yt/commits/0c03f14716b3/
Changeset:   0c03f14716b3
Branch:      yt
User:        atmyers
Date:        2016-02-03 08:12:38+00:00
Summary:     some work towards implementing the ray-trace
Affected #:  1 file

diff -r e9dd3ea87dc98946ddf8ff59530aee1209982f11 -r 0c03f14716b348f1fa198e8a99811a3b14eae580 yt/utilities/lib/bounding_volume_hierarchy.pyx
--- a/yt/utilities/lib/bounding_volume_hierarchy.pyx
+++ b/yt/utilities/lib/bounding_volume_hierarchy.pyx
@@ -9,10 +9,21 @@
         MAX_NUM_TRI
     int triangulate_hex[MAX_NUM_TRI][3]
 
+# ray data structure
+cdef struct Ray:
+    np.float64_t origin[3]
+    np.float64_t direction[3]
+    np.float64_t data_val
+    np.float64_t t_near
+    np.float64_t t_far
+    np.int64_t  elem_id
+
+# axis-aligned bounding box
 cdef struct BBox:
     np.float64_t left_edge[3]
     np.float64_t right_edge[3]
-    
+
+# node for the bounding volume hierarchy
 cdef struct BVHNode:
     np.int64_t begin
     np.int64_t end
@@ -20,14 +31,18 @@
     BVHNode* right
     BBox bbox
     
+# triangle data structure
 cdef struct Triangle:
+    np.int64_t v0, v1, v2
+    np.int64_t elem_id
     np.float64_t centroid[3]
     BBox bbox
 
 cdef class BVH:
     cdef BVHNode* root
     cdef Triangle* triangles
-    
+    cdef np.float64_t[:, ::1] vertices
+
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
@@ -35,13 +50,13 @@
                  np.float64_t[:, ::1] vertices,
                  np.int64_t[:, ::1] indices):
         
+        self.vertices = vertices
         cdef np.int64_t num_elem = indices.shape[0]
         cdef np.int64_t num_tri = 12*num_elem
         self.triangles = <Triangle*> malloc(num_tri * sizeof(Triangle))
         
         cdef np.int64_t i, j, k
         cdef np.int64_t offset, tri_index
-        cdef np.int64_t v0, v1, v2
         cdef np.float64_t[:] p0
         cdef np.float64_t[:] p1
         cdef np.float64_t[:] p2
@@ -51,9 +66,13 @@
             for j in range(12):
                 tri_index = offset + j
                 tri = &(self.triangles[tri_index])
-                p0 = vertices[indices[i][triangulate_hex[j][0]]]
-                p1 = vertices[indices[i][triangulate_hex[j][1]]]
-                p2 = vertices[indices[i][triangulate_hex[j][2]]]
+                tri.elem_id = i
+                tri.v0 = indices[i][triangulate_hex[j][0]]
+                tri.v1 = indices[i][triangulate_hex[j][1]]
+                tri.v2 = indices[i][triangulate_hex[j][2]]                
+                p0 = vertices[tri.v0]
+                p1 = vertices[tri.v1]
+                p2 = vertices[tri.v2]
                 for k in range(3):
                     tri.centroid[k] = (p0[k] + p1[k] + p2[k]) / 3.0
                     tri.bbox.left_edge[k] = fmin(fmin(p0[k], p1[k]), p2[k])


https://bitbucket.org/yt_analysis/yt/commits/0c2780fd34fa/
Changeset:   0c2780fd34fa
Branch:      yt
User:        atmyers
Date:        2016-02-05 22:09:21+00:00
Summary:     adding code for ray triangle intersection
Affected #:  1 file

diff -r 0c03f14716b348f1fa198e8a99811a3b14eae580 -r 0c2780fd34fa6239fa04063a74d1a9cc9eeb92a7 yt/utilities/lib/bounding_volume_hierarchy.pyx
--- a/yt/utilities/lib/bounding_volume_hierarchy.pyx
+++ b/yt/utilities/lib/bounding_volume_hierarchy.pyx
@@ -38,6 +38,30 @@
     np.float64_t centroid[3]
     BBox bbox
 
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef np.float64_t dot(const np.float64_t* a, 
+                      const np.float64_t* b) nogil:
+    cdef np.int64_t i
+    cdef np.float64_t rv = 0.0
+    for i in range(3):
+        rv += a[i]*b[i]
+    return rv
+
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef void cross(const np.float64_t* a, 
+                const np.float64_t* b,
+                np.float64_t* c) nogil:
+    c[0] = a[1]*b[2] - a[2]*b[1]
+    c[1] = a[2]*b[0] - a[0]*b[2]
+    c[2] = a[0]*b[1] - a[1]*b[0]
+
+
 cdef class BVH:
     cdef BVHNode* root
     cdef Triangle* triangles
@@ -110,7 +134,124 @@
                 box.right_edge[j] = fmin(box.right_edge[j], 
                                          self.triangles[i].bbox.right_edge[j])
         node.bbox = box       
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef np.int64_t _ray_triangle_intersect(self, Ray* ray, np.int64_t tri_index):
+        # cribbed from 3D Math Primer for Graphics and Game Development, section A.16
+        cdef np.float64_t p0[3]
+        cdef np.float64_t p1[3]
+        cdef np.float64_t p2[3]
+        cdef np.float64_t e1[3]
+        cdef np.float64_t e2[3]
+
+        cdef Triangle* tri
+        tri = &(self.triangles[tri_index])
+        
+        cdef int i
+        for i in range(3):
+            p0[i] = self.vertices[tri.v0][i]
+            p1[i] = self.vertices[tri.v1][i]
+            p2[i] = self.vertices[tri.v2][i]
+            e1[i] = p1[i] - p0[i];
+            e2[i] = p2[i] - p1[i];
+        
+        cdef np.float64_t N[3]
+        cross(e1, e1, N);
+        cdef np.float64_t dotprod = dot(N, ray.direction)
+
+        if not dotprod < 0.0:
+            # ray is travelling the wrong direction
+            return False
+
+        cdef np.float64_t d = dot(N, p0)
+        cdef np.float64_t t = d - dot(N, ray.origin) 
+
+        if not t <= 0.0:
+            # ray origin is behind triangle
+            return False
+
+        t /= dotprod
+
+        if not t >= ray.t_far:
+            # closer intersection already found
+            return False
+
+        assert(t >= 0.0)
+        assert(t <= ray.t_far)
+
+        cdef np.float64_t p[3]
+        for i in range(3):
+            p[i] = ray.origin[i] + ray.direction[i]*t
+        
+        cdef np.float64_t u0, u1, u2, v0, v1, v2
+        
+        if fabs(N[0]) > fabs(N[1]):
+            if fabs(N[0]) > fabs(N[2]):
+                u0 = p[1]  - p0[1]
+                u1 = p1[1] - p0[1]
+                u2 = p2[1] - p0[1]
+
+                v0 = p[2]  - p0[2]
+                v1 = p1[2] - p0[2]
+                v2 = p2[2] - p0[2]
     
+            else:
+                u0 = p[0]  - p0[0]
+                u1 = p1[0] - p0[0]
+                u2 = p2[0] - p0[0]
+
+                v0 = p[1]  - p0[1]
+                v1 = p1[1] - p0[1]
+                v2 = p2[1] - p0[1]
+        else:
+            if fabs(N[1]) > fabs(N[2]):
+                u0 = p[0]  - p0[0]
+                u1 = p1[0] - p0[0]
+                u2 = p2[0] - p0[0]
+
+                v0 = p[2]  - p0[2]
+                v1 = p1[2] - p0[2]
+                v2 = p2[2] - p0[2]
+            else:
+                u0 = p[0]  - p0[0]
+                u1 = p1[0] - p0[0]
+                u2 = p2[0] - p0[0]
+
+                v0 = p[1]  - p0[1]
+                v1 = p1[1] - p0[1]
+                v2 = p2[1] - p0[1]
+
+        cdef np.float64_t temp = u1*v2 - u2*v1
+
+        if not (temp is not  0.0):
+            # denominator is invalid
+            return False
+
+        temp = 1.0/temp
+
+        cdef np.float64_t a = (u0*v2 - u2*v0)*temp
+        if not (a >= 0.0):
+            # barycentric coord out of range
+            return False
+
+        cdef np.float64_t b = (u1*v0 - u0*v1)*temp
+        if not (b >= 0.0):
+            # barycentric coord out of range
+            return False
+
+        cdef np.float64_t c = 1.0 - a - b
+        if not (c >= 0.0):
+            # barycentric coord out of range
+            return False
+
+        # we have a hit, update ray
+        ray.t_far = t
+        ray.elem_id = tri.elem_id
+
+        return True
+
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)


https://bitbucket.org/yt_analysis/yt/commits/d18c05042bff/
Changeset:   d18c05042bff
Branch:      yt
User:        atmyers
Date:        2016-02-06 00:08:50+00:00
Summary:     adding ray / bounding box intersection check
Affected #:  1 file

diff -r 0c2780fd34fa6239fa04063a74d1a9cc9eeb92a7 -r d18c05042bff11ea2866b278e0f7e0397c7f463a yt/utilities/lib/bounding_volume_hierarchy.pyx
--- a/yt/utilities/lib/bounding_volume_hierarchy.pyx
+++ b/yt/utilities/lib/bounding_volume_hierarchy.pyx
@@ -13,10 +13,11 @@
 cdef struct Ray:
     np.float64_t origin[3]
     np.float64_t direction[3]
+    np.float64_t inv_dir[3]
     np.float64_t data_val
     np.float64_t t_near
     np.float64_t t_far
-    np.int64_t  elem_id
+    np.int64_t elem_id
 
 # axis-aligned bounding box
 cdef struct BBox:
@@ -33,7 +34,9 @@
     
 # triangle data structure
 cdef struct Triangle:
-    np.int64_t v0, v1, v2
+    np.float64_t p0[3]
+    np.float64_t p1[3]
+    np.float64_t p2[3]
     np.int64_t elem_id
     np.float64_t centroid[3]
     BBox bbox
@@ -62,6 +65,154 @@
     c[2] = a[0]*b[1] - a[1]*b[0]
 
 
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef np.int64_t ray_triangle_intersect(Ray* ray, const Triangle* tri):
+    # guts of this are cribbed from "3D Math Primer for Graphics 
+    # and Game Development", section A.16
+
+    # edge vectors
+    cdef np.float64_t e1[3]
+    cdef np.float64_t e2[3]
+    cdef int i
+    for i in range(3):
+        e1[i] = tri.p1[i] - tri.p0[i];
+        e2[i] = tri.p2[i] - tri.p1[i];
+        
+    # normal vector
+    cdef np.float64_t N[3]
+    cross(e1, e1, N);
+
+    cdef np.float64_t dotprod = dot(N, ray.direction)
+    if not dotprod < 0.0:
+        # ray is travelling the wrong direction
+        return False
+
+    # distance along ray
+    cdef np.float64_t d = dot(N, tri.p0)
+    cdef np.float64_t t = d - dot(N, ray.origin) 
+
+    if not t <= 0.0:
+        # ray origin is behind triangle
+        return False
+
+    t /= dotprod
+
+    if not t >= ray.t_far:
+        # closer intersection already found
+        return False
+
+#    assert(t >= 0.0)
+#    assert(t <= ray.t_far)
+    
+    # 3D point of intersection
+    cdef np.float64_t p[3]
+    for i in range(3):
+        p[i] = ray.origin[i] + ray.direction[i]*t
+        
+    cdef np.float64_t u0, u1, u2, v0, v1, v2
+        
+    if fabs(N[0]) > fabs(N[1]):
+        if fabs(N[0]) > fabs(N[2]):
+            u0 = p[1]      - tri.p0[1]
+            u1 = tri.p1[1] - tri.p0[1]
+            u2 = tri.p2[1] - tri.p0[1]
+
+            v0 = p[2]      - tri.p0[2]
+            v1 = tri.p1[2] - tri.p0[2]
+            v2 = tri.p2[2] - tri.p0[2]
+    
+        else:
+            u0 = p[0]      - tri.p0[0]
+            u1 = tri.p1[0] - tri.p0[0]
+            u2 = tri.p2[0] - tri.p0[0]
+            
+            v0 = p[1]      - tri.p0[1]
+            v1 = tri.p1[1] - tri.p0[1]
+            v2 = tri.p2[1] - tri.p0[1]
+    else:
+        if fabs(N[1]) > fabs(N[2]):
+            u0 = p[0]      - tri.p0[0]
+            u1 = tri.p1[0] - tri.p0[0]
+            u2 = tri.p2[0] - tri.p0[0]
+
+            v0 = p[2]      - tri.p0[2]
+            v1 = tri.p1[2] - tri.p0[2]
+            v2 = tri.p2[2] - tri.p0[2]
+        else:
+            u0 = p[0]      - tri.p0[0]
+            u1 = tri.p1[0] - tri.p0[0]
+            u2 = tri.p2[0] - tri.p0[0]
+
+            v0 = p[1]      - tri.p0[1]
+            v1 = tri.p1[1] - tri.p0[1]
+            v2 = tri.p2[1] - tri.p0[1]
+
+    cdef np.float64_t temp = u1*v2 - u2*v1
+
+    if not (temp is not  0.0):
+        # denominator is invalid
+        return False
+
+    temp = 1.0/temp
+
+    cdef np.float64_t a = (u0*v2 - u2*v0)*temp
+    if not (a >= 0.0):
+        # barycentric coord out of range
+        return False
+
+    cdef np.float64_t b = (u1*v0 - u0*v1)*temp
+    if not (b >= 0.0):
+        # barycentric coord out of range
+        return False
+
+    cdef np.float64_t c = 1.0 - a - b
+    if not (c >= 0.0):
+        # barycentric coord out of range
+        return False
+
+    # we have a hit, update ray
+    ray.t_far = t
+    ray.elem_id = tri.elem_id
+
+    return True
+
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef np.int64_t ray_bbox_intersect(Ray* ray, const BBox* bbox):
+
+    cdef np.float64_t tx1 = (bbox.left_edge[0]  - 
+                             ray.origin[0])*ray.inv_dir[0]
+    cdef np.float64_t tx2 = (bbox.right_edge[0] -
+                             ray.origin[0])*ray.inv_dir[0]
+ 
+    cdef np.float64_t tmin = fmin(tx1, tx2)
+    cdef np.float64_t tmax = fmax(tx1, tx2)
+
+    cdef np.float64_t ty1 = (bbox.left_edge[1]  -
+                             ray.origin[1])*ray.inv_dir[1]
+    cdef np.float64_t ty2 = (bbox.right_edge[1] -
+                             ray.origin[1])*ray.inv_dir[1]
+ 
+    tmin = fmax(tmin, fmin(ty1, ty2))
+    tmax = fmin(tmax, fmax(ty1, ty2))
+
+    cdef np.float64_t tz1 = (bbox.left_edge[2]  -
+                             ray.origin[2])*ray.inv_dir[2]
+    cdef np.float64_t tz2 = (bbox.right_edge[2] -
+                             ray.origin[2])*ray.inv_dir[2]
+ 
+    tmin = fmax(tmin, fmin(tz1, tz2))
+    tmax = fmin(tmax, fmax(tz1, tz2))
+
+    if not (tmax > 0):
+        return False
+ 
+    return tmax >= tmin;
+
 cdef class BVH:
     cdef BVHNode* root
     cdef Triangle* triangles
@@ -81,9 +232,7 @@
         
         cdef np.int64_t i, j, k
         cdef np.int64_t offset, tri_index
-        cdef np.float64_t[:] p0
-        cdef np.float64_t[:] p1
-        cdef np.float64_t[:] p2
+        cdef np.int64_t v0, v1, v2
         cdef Triangle* tri
         for i in range(num_elem):
             offset = 12*i
@@ -91,16 +240,16 @@
                 tri_index = offset + j
                 tri = &(self.triangles[tri_index])
                 tri.elem_id = i
-                tri.v0 = indices[i][triangulate_hex[j][0]]
-                tri.v1 = indices[i][triangulate_hex[j][1]]
-                tri.v2 = indices[i][triangulate_hex[j][2]]                
-                p0 = vertices[tri.v0]
-                p1 = vertices[tri.v1]
-                p2 = vertices[tri.v2]
+                v0 = indices[i][triangulate_hex[j][0]]
+                v1 = indices[i][triangulate_hex[j][1]]
+                v2 = indices[i][triangulate_hex[j][2]]
                 for k in range(3):
-                    tri.centroid[k] = (p0[k] + p1[k] + p2[k]) / 3.0
-                    tri.bbox.left_edge[k] = fmin(fmin(p0[k], p1[k]), p2[k])
-                    tri.bbox.right_edge[k] = fmax(fmax(p0[k], p1[k]), p2[k])
+                    tri.p0[k] = vertices[v0][k]
+                    tri.p1[k] = vertices[v1][k]
+                    tri.p2[k] = vertices[v2][k]
+                    tri.centroid[k] = (tri.p0[k] + tri.p1[k] + tri.p2[k]) / 3.0
+                    tri.bbox.left_edge[k] = fmin(fmin(tri.p0[k], tri.p1[k]), tri.p2[k])
+                    tri.bbox.right_edge[k] = fmax(fmax(tri.p0[k], tri.p1[k]), tri.p2[k])
                     
         self.root = self.build(0, num_tri)
 
@@ -138,123 +287,6 @@
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    cdef np.int64_t _ray_triangle_intersect(self, Ray* ray, np.int64_t tri_index):
-        # cribbed from 3D Math Primer for Graphics and Game Development, section A.16
-        cdef np.float64_t p0[3]
-        cdef np.float64_t p1[3]
-        cdef np.float64_t p2[3]
-        cdef np.float64_t e1[3]
-        cdef np.float64_t e2[3]
-
-        cdef Triangle* tri
-        tri = &(self.triangles[tri_index])
-        
-        cdef int i
-        for i in range(3):
-            p0[i] = self.vertices[tri.v0][i]
-            p1[i] = self.vertices[tri.v1][i]
-            p2[i] = self.vertices[tri.v2][i]
-            e1[i] = p1[i] - p0[i];
-            e2[i] = p2[i] - p1[i];
-        
-        cdef np.float64_t N[3]
-        cross(e1, e1, N);
-        cdef np.float64_t dotprod = dot(N, ray.direction)
-
-        if not dotprod < 0.0:
-            # ray is travelling the wrong direction
-            return False
-
-        cdef np.float64_t d = dot(N, p0)
-        cdef np.float64_t t = d - dot(N, ray.origin) 
-
-        if not t <= 0.0:
-            # ray origin is behind triangle
-            return False
-
-        t /= dotprod
-
-        if not t >= ray.t_far:
-            # closer intersection already found
-            return False
-
-        assert(t >= 0.0)
-        assert(t <= ray.t_far)
-
-        cdef np.float64_t p[3]
-        for i in range(3):
-            p[i] = ray.origin[i] + ray.direction[i]*t
-        
-        cdef np.float64_t u0, u1, u2, v0, v1, v2
-        
-        if fabs(N[0]) > fabs(N[1]):
-            if fabs(N[0]) > fabs(N[2]):
-                u0 = p[1]  - p0[1]
-                u1 = p1[1] - p0[1]
-                u2 = p2[1] - p0[1]
-
-                v0 = p[2]  - p0[2]
-                v1 = p1[2] - p0[2]
-                v2 = p2[2] - p0[2]
-    
-            else:
-                u0 = p[0]  - p0[0]
-                u1 = p1[0] - p0[0]
-                u2 = p2[0] - p0[0]
-
-                v0 = p[1]  - p0[1]
-                v1 = p1[1] - p0[1]
-                v2 = p2[1] - p0[1]
-        else:
-            if fabs(N[1]) > fabs(N[2]):
-                u0 = p[0]  - p0[0]
-                u1 = p1[0] - p0[0]
-                u2 = p2[0] - p0[0]
-
-                v0 = p[2]  - p0[2]
-                v1 = p1[2] - p0[2]
-                v2 = p2[2] - p0[2]
-            else:
-                u0 = p[0]  - p0[0]
-                u1 = p1[0] - p0[0]
-                u2 = p2[0] - p0[0]
-
-                v0 = p[1]  - p0[1]
-                v1 = p1[1] - p0[1]
-                v2 = p2[1] - p0[1]
-
-        cdef np.float64_t temp = u1*v2 - u2*v1
-
-        if not (temp is not  0.0):
-            # denominator is invalid
-            return False
-
-        temp = 1.0/temp
-
-        cdef np.float64_t a = (u0*v2 - u2*v0)*temp
-        if not (a >= 0.0):
-            # barycentric coord out of range
-            return False
-
-        cdef np.float64_t b = (u1*v0 - u0*v1)*temp
-        if not (b >= 0.0):
-            # barycentric coord out of range
-            return False
-
-        cdef np.float64_t c = 1.0 - a - b
-        if not (c >= 0.0):
-            # barycentric coord out of range
-            return False
-
-        # we have a hit, update ray
-        ray.t_far = t
-        ray.elem_id = tri.elem_id
-
-        return True
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
     cdef BVHNode* build(self, np.int64_t begin, np.int64_t end):
         cdef BVHNode *node = <BVHNode* > malloc(sizeof(BVHNode))
         node.begin = begin


https://bitbucket.org/yt_analysis/yt/commits/c22c1e797f64/
Changeset:   c22c1e797f64
Branch:      yt
User:        atmyers
Date:        2016-02-06 00:40:49+00:00
Summary:     adding recursive intersect test. currently borked.
Affected #:  1 file

diff -r d18c05042bff11ea2866b278e0f7e0397c7f463a -r c22c1e797f64ea9da5692ec828c30c4b96286d2a yt/utilities/lib/bounding_volume_hierarchy.pyx
--- a/yt/utilities/lib/bounding_volume_hierarchy.pyx
+++ b/yt/utilities/lib/bounding_volume_hierarchy.pyx
@@ -182,7 +182,7 @@
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
-cdef np.int64_t ray_bbox_intersect(Ray* ray, const BBox* bbox):
+cdef np.int64_t ray_bbox_intersect(Ray* ray, const BBox bbox):
 
     cdef np.float64_t tx1 = (bbox.left_edge[0]  - 
                              ray.origin[0])*ray.inv_dir[0]
@@ -228,12 +228,13 @@
         self.vertices = vertices
         cdef np.int64_t num_elem = indices.shape[0]
         cdef np.int64_t num_tri = 12*num_elem
-        self.triangles = <Triangle*> malloc(num_tri * sizeof(Triangle))
-        
+
+        # fill our array of triangles
         cdef np.int64_t i, j, k
         cdef np.int64_t offset, tri_index
         cdef np.int64_t v0, v1, v2
         cdef Triangle* tri
+        self.triangles = <Triangle*> malloc(num_tri * sizeof(Triangle))
         for i in range(num_elem):
             offset = 12*i
             for j in range(12):
@@ -250,8 +251,11 @@
                     tri.centroid[k] = (tri.p0[k] + tri.p1[k] + tri.p2[k]) / 3.0
                     tri.bbox.left_edge[k] = fmin(fmin(tri.p0[k], tri.p1[k]), tri.p2[k])
                     tri.bbox.right_edge[k] = fmax(fmax(tri.p0[k], tri.p1[k]), tri.p2[k])
-                    
-        self.root = self.build(0, num_tri)
+
+        # recursive build
+        self.root = self._build(0, num_tri)
+
+        self.intersect()
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
@@ -287,16 +291,63 @@
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    cdef BVHNode* build(self, np.int64_t begin, np.int64_t end):
+    cdef void intersect(self):
+        cdef Ray ray
+        ray.origin[0]     = -5.0
+        ray.origin[1]     = 0.0
+        ray.origin[2]     = 1.0
+        ray.direction[0]  = 1.0
+        ray.direction[1]  = 0.0
+        ray.direction[2]  = 0.0
+        ray.t_near        = 0.0
+        ray.t_far         = 1e300
+        
+        cdef int i 
+        for i in range(3):
+            ray.inv_dir[i] = 1.0 / ray.direction[i]
+        
+        self._recursive_intersect(&ray, self.root)
+        print ray.elem_id
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef void _recursive_intersect(self, Ray* ray, BVHNode* node):
+
+        # check for bbox intersection:
+        if not ray_bbox_intersect(ray, node.bbox):
+            return
+
+        # check for leaf
+        cdef np.int64_t i, hit
+        cdef Triangle* tri
+        if (node.end - node.begin) == 1:
+            print 'testing triangles'
+            for i in range(node.begin, node.end):
+                tri = &(self.triangles[i])
+                hit = ray_triangle_intersect(ray, tri)
+                if hit:
+                    print hit
+
+        # if not leaf, intersect with left and right children
+        self._recursive_intersect(ray, node.left)
+        self._recursive_intersect(ray, node.right)
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef BVHNode* _build(self, np.int64_t begin, np.int64_t end):
         cdef BVHNode *node = <BVHNode* > malloc(sizeof(BVHNode))
         node.begin = begin
         node.end = end
 
         self._get_node_bbox(node, begin, end)
         
+        # check for leaf
         if (end - begin) == 1:
             return node
         
+        # compute longest dimension
         cdef np.int64_t ax = 0
         cdef np.float64_t d = fabs(node.bbox.right_edge[0] - 
                                    node.bbox.left_edge[0])
@@ -305,14 +356,16 @@
         if fabs(node.bbox.right_edge[2] - node.bbox.left_edge[2]) > d:
             ax = 2
 
+        # split in half along that dimension
         cdef np.float64_t split = 0.5*(node.bbox.right_edge[ax] - 
                                        node.bbox.left_edge[ax])
 
+        # sort triangle list
         cdef np.int64_t mid = self.partition(begin, end, ax, split)
         if(mid == begin or mid == end):
             mid = begin + (end-begin)/2
 
-        node.left = self.build(begin, mid)
-        node.right = self.build(mid, end)
+        node.left = self._build(begin, mid)
+        node.right = self._build(mid, end)
 
         return node


https://bitbucket.org/yt_analysis/yt/commits/f580b756264e/
Changeset:   f580b756264e
Branch:      yt
User:        atmyers
Date:        2016-02-06 00:47:55+00:00
Summary:     some temp debug coe
Affected #:  1 file

diff -r c22c1e797f64ea9da5692ec828c30c4b96286d2a -r f580b756264ee29fabc2abf45d372ab089bc5875 yt/utilities/lib/bounding_volume_hierarchy.pyx
--- a/yt/utilities/lib/bounding_volume_hierarchy.pyx
+++ b/yt/utilities/lib/bounding_volume_hierarchy.pyx
@@ -293,15 +293,16 @@
     @cython.cdivision(True)
     cdef void intersect(self):
         cdef Ray ray
-        ray.origin[0]     = -5.0
+        ray.origin[0]     = 0.0
         ray.origin[1]     = 0.0
-        ray.origin[2]     = 1.0
-        ray.direction[0]  = 1.0
+        ray.origin[2]     = -5.0
+        ray.direction[0]  = 0.0
         ray.direction[1]  = 0.0
-        ray.direction[2]  = 0.0
+        ray.direction[2]  = 1.0
         ray.t_near        = 0.0
         ray.t_far         = 1e300
-        
+        ray.elem_id       = -1
+
         cdef int i 
         for i in range(3):
             ray.inv_dir[i] = 1.0 / ray.direction[i]
@@ -316,6 +317,9 @@
 
         # check for bbox intersection:
         if not ray_bbox_intersect(ray, node.bbox):
+            print 'bailing'
+            print node.bbox.left_edge[0], node.bbox.left_edge[1], node.bbox.left_edge[2]
+            print node.bbox.right_edge[0], node.bbox.right_edge[1], node.bbox.right_edge[2]
             return
 
         # check for leaf


https://bitbucket.org/yt_analysis/yt/commits/accb8e801473/
Changeset:   accb8e801473
Branch:      yt
User:        atmyers
Date:        2016-02-06 03:01:47+00:00
Summary:     fixing some stupid bugs
Affected #:  1 file

diff -r f580b756264ee29fabc2abf45d372ab089bc5875 -r accb8e801473e03ae8c7b0c89b7d402c1212c16f yt/utilities/lib/bounding_volume_hierarchy.pyx
--- a/yt/utilities/lib/bounding_volume_hierarchy.pyx
+++ b/yt/utilities/lib/bounding_volume_hierarchy.pyx
@@ -82,7 +82,7 @@
         
     # normal vector
     cdef np.float64_t N[3]
-    cross(e1, e1, N);
+    cross(e1, e2, N);
 
     cdef np.float64_t dotprod = dot(N, ray.direction)
     if not dotprod < 0.0:
@@ -99,12 +99,12 @@
 
     t /= dotprod
 
-    if not t >= ray.t_far:
+    if not t <= ray.t_far:
         # closer intersection already found
         return False
 
-#    assert(t >= 0.0)
-#    assert(t <= ray.t_far)
+    assert(t >= 0.0)
+    assert(t <= ray.t_far)
     
     # 3D point of intersection
     cdef np.float64_t p[3]
@@ -122,7 +122,6 @@
             v0 = p[2]      - tri.p0[2]
             v1 = tri.p1[2] - tri.p0[2]
             v2 = tri.p2[2] - tri.p0[2]
-    
         else:
             u0 = p[0]      - tri.p0[0]
             u1 = tri.p1[0] - tri.p0[0]
@@ -150,7 +149,6 @@
             v2 = tri.p2[1] - tri.p0[1]
 
     cdef np.float64_t temp = u1*v2 - u2*v1
-
     if not (temp is not  0.0):
         # denominator is invalid
         return False
@@ -184,34 +182,20 @@
 @cython.cdivision(True)
 cdef np.int64_t ray_bbox_intersect(Ray* ray, const BBox bbox):
 
-    cdef np.float64_t tx1 = (bbox.left_edge[0]  - 
-                             ray.origin[0])*ray.inv_dir[0]
-    cdef np.float64_t tx2 = (bbox.right_edge[0] -
-                             ray.origin[0])*ray.inv_dir[0]
+    cdef np.float64_t tmin = -1.0e300
+    cdef np.float64_t tmax =  1.0e300
  
-    cdef np.float64_t tmin = fmin(tx1, tx2)
-    cdef np.float64_t tmax = fmax(tx1, tx2)
+    cdef np.int64_t i
+    cdef np.float64_t t1, t2
+    for i in range(3):
+        t1 = (bbox.left_edge[i]  - ray.origin[i])*ray.inv_dir[i]
+        t2 = (bbox.right_edge[i] - ray.origin[i])*ray.inv_dir[i]
+ 
+        tmin = fmax(tmin, fmin(t1, t2))
+        tmax = fmin(tmax, fmax(t1, t2))
+ 
+    return tmax > fmax(tmin, 0.0)
 
-    cdef np.float64_t ty1 = (bbox.left_edge[1]  -
-                             ray.origin[1])*ray.inv_dir[1]
-    cdef np.float64_t ty2 = (bbox.right_edge[1] -
-                             ray.origin[1])*ray.inv_dir[1]
- 
-    tmin = fmax(tmin, fmin(ty1, ty2))
-    tmax = fmin(tmax, fmax(ty1, ty2))
-
-    cdef np.float64_t tz1 = (bbox.left_edge[2]  -
-                             ray.origin[2])*ray.inv_dir[2]
-    cdef np.float64_t tz2 = (bbox.right_edge[2] -
-                             ray.origin[2])*ray.inv_dir[2]
- 
-    tmin = fmax(tmin, fmin(tz1, tz2))
-    tmax = fmin(tmax, fmax(tz1, tz2))
-
-    if not (tmax > 0):
-        return False
- 
-    return tmax >= tmin;
 
 cdef class BVH:
     cdef BVHNode* root
@@ -249,7 +233,7 @@
                     tri.p1[k] = vertices[v1][k]
                     tri.p2[k] = vertices[v2][k]
                     tri.centroid[k] = (tri.p0[k] + tri.p1[k] + tri.p2[k]) / 3.0
-                    tri.bbox.left_edge[k] = fmin(fmin(tri.p0[k], tri.p1[k]), tri.p2[k])
+                    tri.bbox.left_edge[k]  = fmin(fmin(tri.p0[k], tri.p1[k]), tri.p2[k])
                     tri.bbox.right_edge[k] = fmax(fmax(tri.p0[k], tri.p1[k]), tri.p2[k])
 
         # recursive build
@@ -282,11 +266,11 @@
         cdef BBox box = self.triangles[begin].bbox
         for i in range(begin+1, end):
             for j in range(3):
-                box.left_edge[j] = fmax(box.left_edge[j], 
+                box.left_edge[j] = fmin(box.left_edge[j],
                                         self.triangles[i].bbox.left_edge[j])
-                box.right_edge[j] = fmin(box.right_edge[j], 
+                box.right_edge[j] = fmax(box.right_edge[j], 
                                          self.triangles[i].bbox.right_edge[j])
-        node.bbox = box       
+        node.bbox = box
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
@@ -300,7 +284,7 @@
         ray.direction[1]  = 0.0
         ray.direction[2]  = 1.0
         ray.t_near        = 0.0
-        ray.t_far         = 1e300
+        ray.t_far         = 1.0e300
         ray.elem_id       = -1
 
         cdef int i 
@@ -315,24 +299,20 @@
     @cython.cdivision(True)
     cdef void _recursive_intersect(self, Ray* ray, BVHNode* node):
 
+        
+
         # check for bbox intersection:
         if not ray_bbox_intersect(ray, node.bbox):
-            print 'bailing'
-            print node.bbox.left_edge[0], node.bbox.left_edge[1], node.bbox.left_edge[2]
-            print node.bbox.right_edge[0], node.bbox.right_edge[1], node.bbox.right_edge[2]
             return
 
         # check for leaf
         cdef np.int64_t i, hit
         cdef Triangle* tri
         if (node.end - node.begin) == 1:
-            print 'testing triangles'
             for i in range(node.begin, node.end):
                 tri = &(self.triangles[i])
                 hit = ray_triangle_intersect(ray, tri)
-                if hit:
-                    print hit
-
+            return
         # if not leaf, intersect with left and right children
         self._recursive_intersect(ray, node.left)
         self._recursive_intersect(ray, node.right)


https://bitbucket.org/yt_analysis/yt/commits/5df21776949e/
Changeset:   5df21776949e
Branch:      yt
User:        atmyers
Date:        2016-02-06 03:54:20+00:00
Summary:     splitting into pxd and pyx files
Affected #:  3 files

diff -r accb8e801473e03ae8c7b0c89b7d402c1212c16f -r 5df21776949eded6be255444ab8825d3fbcfd0bd yt/utilities/lib/bounding_volume_hierarchy.pxd
--- /dev/null
+++ b/yt/utilities/lib/bounding_volume_hierarchy.pxd
@@ -0,0 +1,47 @@
+cimport cython 
+import numpy as np
+cimport numpy as np
+
+# ray data structure
+cdef struct Ray:
+    np.float64_t origin[3]
+    np.float64_t direction[3]
+    np.float64_t inv_dir[3]
+    np.float64_t data_val
+    np.float64_t t_near
+    np.float64_t t_far
+    np.int64_t elem_id
+
+# axis-aligned bounding box
+cdef struct BBox:
+    np.float64_t left_edge[3]
+    np.float64_t right_edge[3]
+
+# node for the bounding volume hierarchy
+cdef struct BVHNode:
+    np.int64_t begin
+    np.int64_t end
+    BVHNode* left
+    BVHNode* right
+    BBox bbox
+    
+# triangle data structure
+cdef struct Triangle:
+    np.float64_t p0[3]
+    np.float64_t p1[3]
+    np.float64_t p2[3]
+    np.int64_t elem_id
+    np.float64_t centroid[3]
+    BBox bbox
+
+cdef class BVH:
+    cdef BVHNode* root
+    cdef Triangle* triangles
+    cdef np.float64_t[:, ::1] vertices
+    cdef np.int64_t partition(self, np.int64_t begin, np.int64_t end,
+                              np.int64_t ax, np.float64_t split)
+    cdef void intersect(self, Ray* ray)
+    cdef void _get_node_bbox(self, BVHNode* node, 
+                             np.int64_t begin, np.int64_t end)
+    cdef void _recursive_intersect(self, Ray* ray, BVHNode* node)
+    cdef BVHNode* _build(self, np.int64_t begin, np.int64_t end)

diff -r accb8e801473e03ae8c7b0c89b7d402c1212c16f -r 5df21776949eded6be255444ab8825d3fbcfd0bd yt/utilities/lib/bounding_volume_hierarchy.pyx
--- a/yt/utilities/lib/bounding_volume_hierarchy.pyx
+++ b/yt/utilities/lib/bounding_volume_hierarchy.pyx
@@ -9,38 +9,6 @@
         MAX_NUM_TRI
     int triangulate_hex[MAX_NUM_TRI][3]
 
-# ray data structure
-cdef struct Ray:
-    np.float64_t origin[3]
-    np.float64_t direction[3]
-    np.float64_t inv_dir[3]
-    np.float64_t data_val
-    np.float64_t t_near
-    np.float64_t t_far
-    np.int64_t elem_id
-
-# axis-aligned bounding box
-cdef struct BBox:
-    np.float64_t left_edge[3]
-    np.float64_t right_edge[3]
-
-# node for the bounding volume hierarchy
-cdef struct BVHNode:
-    np.int64_t begin
-    np.int64_t end
-    BVHNode* left
-    BVHNode* right
-    BBox bbox
-    
-# triangle data structure
-cdef struct Triangle:
-    np.float64_t p0[3]
-    np.float64_t p1[3]
-    np.float64_t p2[3]
-    np.int64_t elem_id
-    np.float64_t centroid[3]
-    BBox bbox
-
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
@@ -198,9 +166,6 @@
 
 
 cdef class BVH:
-    cdef BVHNode* root
-    cdef Triangle* triangles
-    cdef np.float64_t[:, ::1] vertices
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
@@ -239,7 +204,6 @@
         # recursive build
         self.root = self._build(0, num_tri)
 
-        self.intersect()
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
@@ -275,32 +239,13 @@
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    cdef void intersect(self):
-        cdef Ray ray
-        ray.origin[0]     = 0.0
-        ray.origin[1]     = 0.0
-        ray.origin[2]     = -5.0
-        ray.direction[0]  = 0.0
-        ray.direction[1]  = 0.0
-        ray.direction[2]  = 1.0
-        ray.t_near        = 0.0
-        ray.t_far         = 1.0e300
-        ray.elem_id       = -1
-
-        cdef int i 
-        for i in range(3):
-            ray.inv_dir[i] = 1.0 / ray.direction[i]
-        
-        self._recursive_intersect(&ray, self.root)
-        print ray.elem_id
+    cdef void intersect(self, Ray* ray):
+        self._recursive_intersect(ray, self.root)
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
     cdef void _recursive_intersect(self, Ray* ray, BVHNode* node):
-
-        
-
         # check for bbox intersection:
         if not ray_bbox_intersect(ray, node.bbox):
             return

diff -r accb8e801473e03ae8c7b0c89b7d402c1212c16f -r 5df21776949eded6be255444ab8825d3fbcfd0bd yt/utilities/lib/setup.py
--- a/yt/utilities/lib/setup.py
+++ b/yt/utilities/lib/setup.py
@@ -117,8 +117,9 @@
                 ["yt/utilities/lib/bitarray.pyx"],
                 libraries=["m"], depends=["yt/utilities/lib/bitarray.pxd"])
     config.add_extension("bounding_volume_hierarchy", 
-                ["yt/utilities/lib/bounding_volume_hierarchy.pyx"],
-                libraries=["m"])    
+                         ["yt/utilities/lib/bounding_volume_hierarchy.pyx"],
+                         libraries=["m"], 
+                         depends=["yt/utilities/lib/bounding_volume_hierarchy.pxd"])
     config.add_extension("particle_mesh_operations", 
                 ["yt/utilities/lib/particle_mesh_operations.pyx"],
                 libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd"])


https://bitbucket.org/yt_analysis/yt/commits/ff9c9af94f0f/
Changeset:   ff9c9af94f0f
Branch:      yt
User:        atmyers
Date:        2016-02-06 08:07:22+00:00
Summary:     fix calculation of biggest box dimension
Affected #:  1 file

diff -r 5df21776949eded6be255444ab8825d3fbcfd0bd -r ff9c9af94f0fb376c0805facc5ea8d088e7ee0c4 yt/utilities/lib/bounding_volume_hierarchy.pyx
--- a/yt/utilities/lib/bounding_volume_hierarchy.pyx
+++ b/yt/utilities/lib/bounding_volume_hierarchy.pyx
@@ -246,6 +246,7 @@
     @cython.wraparound(False)
     @cython.cdivision(True)
     cdef void _recursive_intersect(self, Ray* ray, BVHNode* node):
+
         # check for bbox intersection:
         if not ray_bbox_intersect(ray, node.bbox):
             return
@@ -286,14 +287,15 @@
             ax = 2
 
         # split in half along that dimension
-        cdef np.float64_t split = 0.5*(node.bbox.right_edge[ax] - 
+        cdef np.float64_t split = 0.5*(node.bbox.right_edge[ax] +
                                        node.bbox.left_edge[ax])
 
         # sort triangle list
         cdef np.int64_t mid = self.partition(begin, end, ax, split)
+
         if(mid == begin or mid == end):
             mid = begin + (end-begin)/2
-
+            
         node.left = self._build(begin, mid)
         node.right = self._build(mid, end)
 


https://bitbucket.org/yt_analysis/yt/commits/cf1dd9809008/
Changeset:   cf1dd9809008
Branch:      yt
User:        atmyers
Date:        2016-02-12 20:15:40+00:00
Summary:     different ray-triangle intersection code
Affected #:  2 files

diff -r ff9c9af94f0fb376c0805facc5ea8d088e7ee0c4 -r cf1dd9809008df9d12c207a951f14ba36b65e576 yt/utilities/lib/bounding_volume_hierarchy.pyx
--- a/yt/utilities/lib/bounding_volume_hierarchy.pyx
+++ b/yt/utilities/lib/bounding_volume_hierarchy.pyx
@@ -37,118 +37,56 @@
 @cython.wraparound(False)
 @cython.cdivision(True)
 cdef np.int64_t ray_triangle_intersect(Ray* ray, const Triangle* tri):
-    # guts of this are cribbed from "3D Math Primer for Graphics 
-    # and Game Development", section A.16
+# https://en.wikipedia.org/wiki/M%C3%B6ller%E2%80%93Trumbore_intersection_algorithm
 
     # edge vectors
     cdef np.float64_t e1[3]
     cdef np.float64_t e2[3]
     cdef int i
     for i in range(3):
-        e1[i] = tri.p1[i] - tri.p0[i];
-        e2[i] = tri.p2[i] - tri.p1[i];
-        
-    # normal vector
-    cdef np.float64_t N[3]
-    cross(e1, e2, N);
+        e1[i] = tri.p1[i] - tri.p0[i]
+        e2[i] = tri.p2[i] - tri.p0[i]
 
-    cdef np.float64_t dotprod = dot(N, ray.direction)
-    if not dotprod < 0.0:
-        # ray is travelling the wrong direction
+    cdef np.float64_t P[3]
+    cross(ray.direction, e2, P)
+
+    cdef np.float64_t det, inv_det
+    det = dot(e1, P)
+    if(det > -1.0e-10 and det < 1.0e-10): 
+        return False
+    inv_det = 1.0 / det
+
+    cdef np.float64_t T[3]
+    for i in range(3):
+        T[i] = ray.origin[i] - tri.p0[i] 
+
+    cdef np.float64_t u = dot(T, P) * inv_det
+    if(u < 0.0 or u > 1.0):
         return False
 
-    # distance along ray
-    cdef np.float64_t d = dot(N, tri.p0)
-    cdef np.float64_t t = d - dot(N, ray.origin) 
+    cdef np.float64_t Q[3]
+    cross(T, e1, Q)
 
-    if not t <= 0.0:
-        # ray origin is behind triangle
+    cdef np.float64_t v = dot(ray.direction, Q) * inv_det
+    if(v < 0.0 or u + v  > 1.0):
         return False
 
-    t /= dotprod
+    cdef np.float64_t t = dot(e2, Q) * inv_det
 
-    if not t <= ray.t_far:
-        # closer intersection already found
-        return False
+    if(t > 1.0e-10 and t < ray.t_far):
+        ray.t_far = t
+        ray.data_val = 1.0
+        ray.elem_id = tri.elem_id
+        return True
 
-    assert(t >= 0.0)
-    assert(t <= ray.t_far)
-    
-    # 3D point of intersection
-    cdef np.float64_t p[3]
-    for i in range(3):
-        p[i] = ray.origin[i] + ray.direction[i]*t
-        
-    cdef np.float64_t u0, u1, u2, v0, v1, v2
-        
-    if fabs(N[0]) > fabs(N[1]):
-        if fabs(N[0]) > fabs(N[2]):
-            u0 = p[1]      - tri.p0[1]
-            u1 = tri.p1[1] - tri.p0[1]
-            u2 = tri.p2[1] - tri.p0[1]
-
-            v0 = p[2]      - tri.p0[2]
-            v1 = tri.p1[2] - tri.p0[2]
-            v2 = tri.p2[2] - tri.p0[2]
-        else:
-            u0 = p[0]      - tri.p0[0]
-            u1 = tri.p1[0] - tri.p0[0]
-            u2 = tri.p2[0] - tri.p0[0]
-            
-            v0 = p[1]      - tri.p0[1]
-            v1 = tri.p1[1] - tri.p0[1]
-            v2 = tri.p2[1] - tri.p0[1]
-    else:
-        if fabs(N[1]) > fabs(N[2]):
-            u0 = p[0]      - tri.p0[0]
-            u1 = tri.p1[0] - tri.p0[0]
-            u2 = tri.p2[0] - tri.p0[0]
-
-            v0 = p[2]      - tri.p0[2]
-            v1 = tri.p1[2] - tri.p0[2]
-            v2 = tri.p2[2] - tri.p0[2]
-        else:
-            u0 = p[0]      - tri.p0[0]
-            u1 = tri.p1[0] - tri.p0[0]
-            u2 = tri.p2[0] - tri.p0[0]
-
-            v0 = p[1]      - tri.p0[1]
-            v1 = tri.p1[1] - tri.p0[1]
-            v2 = tri.p2[1] - tri.p0[1]
-
-    cdef np.float64_t temp = u1*v2 - u2*v1
-    if not (temp is not  0.0):
-        # denominator is invalid
-        return False
-
-    temp = 1.0/temp
-
-    cdef np.float64_t a = (u0*v2 - u2*v0)*temp
-    if not (a >= 0.0):
-        # barycentric coord out of range
-        return False
-
-    cdef np.float64_t b = (u1*v0 - u0*v1)*temp
-    if not (b >= 0.0):
-        # barycentric coord out of range
-        return False
-
-    cdef np.float64_t c = 1.0 - a - b
-    if not (c >= 0.0):
-        # barycentric coord out of range
-        return False
-
-    # we have a hit, update ray
-    ray.t_far = t
-    ray.elem_id = tri.elem_id
-
-    return True
+    return False
 
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
 cdef np.int64_t ray_bbox_intersect(Ray* ray, const BBox bbox):
+# https://tavianator.com/fast-branchless-raybounding-box-intersections/
 
     cdef np.float64_t tmin = -1.0e300
     cdef np.float64_t tmax =  1.0e300
@@ -157,12 +95,11 @@
     cdef np.float64_t t1, t2
     for i in range(3):
         t1 = (bbox.left_edge[i]  - ray.origin[i])*ray.inv_dir[i]
-        t2 = (bbox.right_edge[i] - ray.origin[i])*ray.inv_dir[i]
- 
+        t2 = (bbox.right_edge[i] - ray.origin[i])*ray.inv_dir[i] 
         tmin = fmax(tmin, fmin(t1, t2))
         tmax = fmin(tmax, fmax(t1, t2))
  
-    return tmax > fmax(tmin, 0.0)
+    return tmax >= fmax(tmin, 0.0)
 
 
 cdef class BVH:
@@ -204,7 +141,6 @@
         # recursive build
         self.root = self._build(0, num_tri)
 
-
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
@@ -259,6 +195,7 @@
                 tri = &(self.triangles[i])
                 hit = ray_triangle_intersect(ray, tri)
             return
+
         # if not leaf, intersect with left and right children
         self._recursive_intersect(ray, node.left)
         self._recursive_intersect(ray, node.right)

diff -r ff9c9af94f0fb376c0805facc5ea8d088e7ee0c4 -r cf1dd9809008df9d12c207a951f14ba36b65e576 yt/utilities/lib/setup.py
--- a/yt/utilities/lib/setup.py
+++ b/yt/utilities/lib/setup.py
@@ -116,11 +116,11 @@
     config.add_extension("bitarray", 
                 ["yt/utilities/lib/bitarray.pyx"],
                 libraries=["m"], depends=["yt/utilities/lib/bitarray.pxd"])
-    config.add_extension("bounding_volume_hierarchy", 
+    config.add_extension("bounding_volume_hierarchy",
                          ["yt/utilities/lib/bounding_volume_hierarchy.pyx"],
-                         libraries=["m"], 
+                         libraries=["m"],
                          depends=["yt/utilities/lib/bounding_volume_hierarchy.pxd"])
-    config.add_extension("particle_mesh_operations", 
+    config.add_extension("particle_mesh_operations",
                 ["yt/utilities/lib/particle_mesh_operations.pyx"],
                 libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd"])
     config.add_extension("contour_finding", 


https://bitbucket.org/yt_analysis/yt/commits/b071f22c8a98/
Changeset:   b071f22c8a98
Branch:      yt
User:        atmyers
Date:        2016-02-12 20:38:34+00:00
Summary:     make the leaf size 8 triangles
Affected #:  2 files

diff -r cf1dd9809008df9d12c207a951f14ba36b65e576 -r b071f22c8a9862e085b76e2fd6c847bc3e6ca57e yt/utilities/lib/bounding_volume_hierarchy.pxd
--- a/yt/utilities/lib/bounding_volume_hierarchy.pxd
+++ b/yt/utilities/lib/bounding_volume_hierarchy.pxd
@@ -39,9 +39,9 @@
     cdef Triangle* triangles
     cdef np.float64_t[:, ::1] vertices
     cdef np.int64_t partition(self, np.int64_t begin, np.int64_t end,
-                              np.int64_t ax, np.float64_t split)
-    cdef void intersect(self, Ray* ray)
+                              np.int64_t ax, np.float64_t split) nogil
+    cdef void intersect(self, Ray* ray) nogil
     cdef void _get_node_bbox(self, BVHNode* node, 
-                             np.int64_t begin, np.int64_t end)
-    cdef void _recursive_intersect(self, Ray* ray, BVHNode* node)
-    cdef BVHNode* _build(self, np.int64_t begin, np.int64_t end)
+                             np.int64_t begin, np.int64_t end) nogil
+    cdef void _recursive_intersect(self, Ray* ray, BVHNode* node) nogil
+    cdef BVHNode* _build(self, np.int64_t begin, np.int64_t end) nogil

diff -r cf1dd9809008df9d12c207a951f14ba36b65e576 -r b071f22c8a9862e085b76e2fd6c847bc3e6ca57e yt/utilities/lib/bounding_volume_hierarchy.pyx
--- a/yt/utilities/lib/bounding_volume_hierarchy.pyx
+++ b/yt/utilities/lib/bounding_volume_hierarchy.pyx
@@ -36,7 +36,7 @@
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
-cdef np.int64_t ray_triangle_intersect(Ray* ray, const Triangle* tri):
+cdef np.int64_t ray_triangle_intersect(Ray* ray, const Triangle* tri) nogil:
 # https://en.wikipedia.org/wiki/M%C3%B6ller%E2%80%93Trumbore_intersection_algorithm
 
     # edge vectors
@@ -85,7 +85,7 @@
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
-cdef np.int64_t ray_bbox_intersect(Ray* ray, const BBox bbox):
+cdef np.int64_t ray_bbox_intersect(Ray* ray, const BBox bbox) nogil:
 # https://tavianator.com/fast-branchless-raybounding-box-intersections/
 
     cdef np.float64_t tmin = -1.0e300
@@ -145,7 +145,7 @@
     @cython.wraparound(False)
     @cython.cdivision(True)
     cdef np.int64_t partition(self, np.int64_t begin, np.int64_t end,
-                              np.int64_t ax, np.float64_t split):
+                              np.int64_t ax, np.float64_t split) nogil:
         cdef np.int64_t mid = begin
         while (begin != end):
             if self.triangles[mid].centroid[ax] > split:
@@ -161,7 +161,7 @@
     @cython.wraparound(False)
     @cython.cdivision(True)
     cdef void _get_node_bbox(self, BVHNode* node, 
-                             np.int64_t begin, np.int64_t end):
+                             np.int64_t begin, np.int64_t end) nogil:
         cdef np.int64_t i, j
         cdef BBox box = self.triangles[begin].bbox
         for i in range(begin+1, end):
@@ -175,13 +175,13 @@
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    cdef void intersect(self, Ray* ray):
+    cdef void intersect(self, Ray* ray) nogil:
         self._recursive_intersect(ray, self.root)
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    cdef void _recursive_intersect(self, Ray* ray, BVHNode* node):
+    cdef void _recursive_intersect(self, Ray* ray, BVHNode* node) nogil:
 
         # check for bbox intersection:
         if not ray_bbox_intersect(ray, node.bbox):
@@ -190,7 +190,7 @@
         # check for leaf
         cdef np.int64_t i, hit
         cdef Triangle* tri
-        if (node.end - node.begin) == 1:
+        if (node.end - node.begin) <= 8:
             for i in range(node.begin, node.end):
                 tri = &(self.triangles[i])
                 hit = ray_triangle_intersect(ray, tri)
@@ -203,7 +203,7 @@
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    cdef BVHNode* _build(self, np.int64_t begin, np.int64_t end):
+    cdef BVHNode* _build(self, np.int64_t begin, np.int64_t end) nogil:
         cdef BVHNode *node = <BVHNode* > malloc(sizeof(BVHNode))
         node.begin = begin
         node.end = end
@@ -211,7 +211,7 @@
         self._get_node_bbox(node, begin, end)
         
         # check for leaf
-        if (end - begin) == 1:
+        if (end - begin) <= 8:
             return node
         
         # compute longest dimension


https://bitbucket.org/yt_analysis/yt/commits/1b4967ae51cb/
Changeset:   1b4967ae51cb
Branch:      yt
User:        atmyers
Date:        2016-02-12 22:53:06+00:00
Summary:     some comments
Affected #:  1 file

diff -r b071f22c8a9862e085b76e2fd6c847bc3e6ca57e -r 1b4967ae51cb4be7b166b0d1103324e2a643cca6 yt/utilities/lib/bounding_volume_hierarchy.pyx
--- a/yt/utilities/lib/bounding_volume_hierarchy.pyx
+++ b/yt/utilities/lib/bounding_volume_hierarchy.pyx
@@ -103,6 +103,17 @@
 
 
 cdef class BVH:
+    '''
+
+    This class implements a bounding volume hierarchy (BVH), a spatial acceleration
+    structure for fast ray-tracing. A BVH is like a kd-tree, except that instead of 
+    partitioning the *volume* of the parent to create the children, we partition the 
+    triangles themselves into 'left' or 'right' sub-trees. The bounding volume for a
+    node is then determined by computing the bounding volume of the triangles that
+    belong to it. This allows us to quickly discard triangles that are not close 
+    to intersecting a given ray.
+
+    '''
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
@@ -144,8 +155,12 @@
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    cdef np.int64_t partition(self, np.int64_t begin, np.int64_t end,
-                              np.int64_t ax, np.float64_t split) nogil:
+    cdef np.int64_t _partition(self, np.int64_t begin, np.int64_t end,
+                               np.int64_t ax, np.float64_t split) nogil:
+        # this re-orders the triangle array so that all of the triangles 
+        # to the left of mid have centroids less than or equal to "split" 
+        # along the direction "ax". All the triangles to the right of mid 
+        # will have centroids *greater* than "split" along "ax".
         cdef np.int64_t mid = begin
         while (begin != end):
             if self.triangles[mid].centroid[ax] > split:
@@ -214,6 +229,9 @@
         if (end - begin) <= 8:
             return node
         
+        # we use the "split in the middle of the longest axis approach"
+        # see: http://www.vadimkravcenko.com/bvh-tree-building/
+
         # compute longest dimension
         cdef np.int64_t ax = 0
         cdef np.float64_t d = fabs(node.bbox.right_edge[0] - 
@@ -228,11 +246,12 @@
                                        node.bbox.left_edge[ax])
 
         # sort triangle list
-        cdef np.int64_t mid = self.partition(begin, end, ax, split)
+        cdef np.int64_t mid = self._partition(begin, end, ax, split)
 
         if(mid == begin or mid == end):
             mid = begin + (end-begin)/2
-            
+        
+        # recursively build sub-trees
         node.left = self._build(begin, mid)
         node.right = self._build(mid, end)
 


https://bitbucket.org/yt_analysis/yt/commits/3cf9a28c8eec/
Changeset:   3cf9a28c8eec
Branch:      yt
User:        atmyers
Date:        2016-02-16 19:52:52+00:00
Summary:     change name of partition method here as well
Affected #:  1 file

diff -r 1b4967ae51cb4be7b166b0d1103324e2a643cca6 -r 3cf9a28c8eecd0a5059fedda7d35809ca11ccdc7 yt/utilities/lib/bounding_volume_hierarchy.pxd
--- a/yt/utilities/lib/bounding_volume_hierarchy.pxd
+++ b/yt/utilities/lib/bounding_volume_hierarchy.pxd
@@ -38,8 +38,8 @@
     cdef BVHNode* root
     cdef Triangle* triangles
     cdef np.float64_t[:, ::1] vertices
-    cdef np.int64_t partition(self, np.int64_t begin, np.int64_t end,
-                              np.int64_t ax, np.float64_t split) nogil
+    cdef np.int64_t _partition(self, np.int64_t begin, np.int64_t end,
+                               np.int64_t ax, np.float64_t split) nogil
     cdef void intersect(self, Ray* ray) nogil
     cdef void _get_node_bbox(self, BVHNode* node, 
                              np.int64_t begin, np.int64_t end) nogil


https://bitbucket.org/yt_analysis/yt/commits/b64822ed0327/
Changeset:   b64822ed0327
Branch:      yt
User:        atmyers
Date:        2016-02-16 20:33:43+00:00
Summary:     adding test for bounding volume hierarchy code
Affected #:  2 files

diff -r 3cf9a28c8eecd0a5059fedda7d35809ca11ccdc7 -r b64822ed0327496f792cc9a74dd08fade7b91e27 yt/utilities/lib/bounding_volume_hierarchy.pyx
--- a/yt/utilities/lib/bounding_volume_hierarchy.pyx
+++ b/yt/utilities/lib/bounding_volume_hierarchy.pyx
@@ -256,3 +256,42 @@
         node.right = self._build(mid, end)
 
         return node
+
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef void cast_rays(np.float64_t* image,
+                    const np.float64_t* origins,
+                    const np.float64_t* direction,
+                    const int N, 
+                    BVH bvh) nogil:
+
+    cdef Ray ray
+    cdef int i, j, offset
+    
+    for i in range(3):
+        ray.direction[i] = direction[i]
+        ray.inv_dir[i] = 1.0 / direction[i]      
+   
+    for i in range(N):
+        for j in range(3):
+            ray.origin[j] = origins[N*j + i]
+        ray.t_far = 1e30
+        ray.t_near = 0.0
+        ray.data_val = 0
+        bvh.intersect(&ray)
+        image[i] = ray.data_val
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+def test_ray_trace(np.ndarray[np.float64_t, ndim=1] image, 
+                   np.ndarray[np.float64_t, ndim=2] origins,
+                   np.ndarray[np.float64_t, ndim=1] direction,
+                   BVH bvh):
+    
+    cdef int N = origins.shape[0]
+    cast_rays(<np.float64_t*> image.data,
+              <np.float64_t*> origins.data, 
+              <np.float64_t*> direction.data, N, bvh)

diff -r 3cf9a28c8eecd0a5059fedda7d35809ca11ccdc7 -r b64822ed0327496f792cc9a74dd08fade7b91e27 yt/utilities/lib/tests/test_bounding_volume_hierarchy.py
--- /dev/null
+++ b/yt/utilities/lib/tests/test_bounding_volume_hierarchy.py
@@ -0,0 +1,44 @@
+import yt
+import numpy as np
+from yt.utilities.lib.bounding_volume_hierarchy import BVH, \
+    test_ray_trace
+from yt.visualization.volume_rendering.api import Camera
+from yt.testing import requires_file
+
+
+def get_rays(camera):
+
+    normal_vector = camera.unit_vectors[2].d
+    W = np.array([8.0, 8.0])
+    N = np.array([800, 800])
+    dx = W / N
+    
+    x_points = np.linspace((-N[0]/2 + 0.5)*dx[0], (N[0]/2 - 0.5)*dx[0], N[0])
+    y_points = np.linspace((-N[1]/2 + 0.5)*dx[1], (N[1]/2 - 0.5)*dx[0], N[1])
+    
+    X, Y = np.meshgrid(x_points, y_points)
+    result = np.dot(camera.unit_vectors[0:2].T, [X.ravel(), Y.ravel()]) 
+    vec_origins = result.T + camera.position
+    return np.array(vec_origins), np.array(normal_vector)
+
+
+fn = "MOOSE_sample_data/out.e-s010"
+
+
+ at requires_file(fn)
+def test_bounding_volume_hierarchy():
+    ds = yt.load(fn)
+    vertices = ds.index.meshes[0].connectivity_coords
+    indices  = ds.index.meshes[0].connectivity_indices - 1
+
+    bvh = BVH(vertices, indices)
+
+    cam = Camera()
+    cam.set_position(np.array([8.0, 8.0, 8.0]))
+    cam.focus = np.array([0.0, 0.0, 0.0])
+    origins, direction = get_rays(cam)
+
+    image = np.empty(800*800, np.float64)
+    test_ray_trace(image, origins, direction, bvh)
+    image = image.reshape((800, 800))
+    return image


https://bitbucket.org/yt_analysis/yt/commits/23cf9b595583/
Changeset:   23cf9b595583
Branch:      yt
User:        atmyers
Date:        2016-02-16 20:38:01+00:00
Summary:     merging
Affected #:  112 files

diff -r b64822ed0327496f792cc9a74dd08fade7b91e27 -r 23cf9b595583d119e23f8f2fd95aff3971088cd4 doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -330,12 +330,17 @@
 Exodus II Data
 --------------
 
+.. note::
+   To load Exodus II data, you need to have the `netcdf4 <http://unidata.github.io/
+   netcdf4-python/>`_ python interface installed.
+
 Exodus II is a file format for Finite Element datasets that is used by the MOOSE
 framework for file IO. Support for this format (and for unstructured mesh data in 
-general) is a new feature as of yt 3.3, so while we aim to fully support it, we also expect 
-there to be some buggy features at present. Currently, yt can visualize first-order
-mesh types only (4-node quads, 8-node hexes, 3-node triangles, and 4-node tetrahedra).
-Development of higher-order visualization capability is a work in progress.
+general) is a new feature as of yt 3.3, so while we aim to fully support it, we 
+also expect there to be some buggy features at present. Currently, yt can visualize 
+quads, hexes, triangles, and tetrahedral element types at first order. Additionally,
+there is experimental support for the high-order visualization of 20-node hex elements.
+Development of more high-order visualization capability is a work in progress.
 
 To load an Exodus II dataset, you can use the ``yt.load`` command on the Exodus II
 file:
@@ -348,14 +353,15 @@
 Because Exodus II datasets can have multiple steps (which can correspond to time steps, 
 picard iterations, non-linear solve iterations, etc...), you can also specify a step
 argument when you load an Exodus II data that defines the index at which to look when
-you read data from the file.
+you read data from the file. Omitting this argument is the same as passing in 0, and
+setting ``step=-1`` selects the last time output in the file.
 
 You can access the connectivity information directly by doing:
 
 .. code-block:: python
     
    import yt
-   ds = yt.load("MOOSE_sample_data/out.e-s010", step=0)
+   ds = yt.load("MOOSE_sample_data/out.e-s010", step=-1)
    print(ds.index.meshes[0].connectivity_coords)
    print(ds.index.meshes[0].connectivity_indices)
    print(ds.index.meshes[1].connectivity_coords)
@@ -368,7 +374,7 @@
 .. code-block:: python
     
    import yt
-   ds = yt.load("MOOSE_sample_data/out.e-s010", step=0)
+   ds = yt.load("MOOSE_sample_data/out.e-s010")
    print(ds.field_list)
 
 This will give you a list of field names like ``('connect1', 'diffused')`` and 
@@ -380,7 +386,7 @@
 .. code-block:: python
     
    import yt
-   ds = yt.load("MOOSE_sample_data/out.e-s010", step=0)
+   ds = yt.load("MOOSE_sample_data/out.e-s010")
    ad = ds.all_data()  # geometric selection, this just grabs everything
    print(ad['connect1', 'convected'])
 
@@ -390,7 +396,7 @@
 .. code-block:: python
 
    import yt
-   ds = yt.load("MOOSE_sample_data/out.e-s010", step=0)
+   ds = yt.load("MOOSE_sample_data/out.e-s010")
    ad = ds.all_data()
    print(ad['connect1', 'convected'].shape)
 
@@ -401,7 +407,7 @@
 .. code-block:: python
 
    import yt
-   ds = yt.load("MOOSE_sample_data/out.e-s010", step=0)
+   ds = yt.load("MOOSE_sample_data/out.e-s010")
    ad = ds.all_data()
    print(ad['connect1', 'vertex_x'])
 
@@ -411,7 +417,7 @@
 .. code-block:: python
 
    import yt
-   ds = yt.load("MOOSE_sample_data/out.e-s010", step=0)
+   ds = yt.load("MOOSE_sample_data/out.e-s010")
    ad = ds.all_data()
    print(ad['connect1', 'conv_indicator'].shape)
 
@@ -420,6 +426,61 @@
 For information about visualizing unstructured mesh data, including Exodus II datasets, 
 please see :ref:`unstructured-mesh-slices` and :ref:`unstructured_mesh_rendering`. 
 
+Displacement Fields
+^^^^^^^^^^^^^^^^^^^
+
+Finite element codes often solve for the displacement of each vertex from its 
+original position as a node variable, rather than updating the actual vertex 
+positions with time. For analysis and visualization, it is often useful to turn 
+these displacements on or off, and to be able to scale them arbitrarily to 
+emphasize certain features of the solution. To allow this, if ``yt`` detects 
+displacement fields in an Exodus II dataset (using the convention that they will
+ be named ``disp_x``, ``disp_y``, etc...), it will add optionally add these to 
+the mesh vertex positions for the purposes of visualization. Displacement fields 
+can be controlled when a dataset is loaded by passing in an optional dictionary 
+to the ``yt.load`` command. This feature is turned off by default, meaning that 
+a dataset loaded as 
+
+.. code-block:: python
+
+   import yt
+   ds = yt.load("MOOSE_sample_data/mps_out.e")
+
+will not include the displacements in the vertex positions. The displacements can
+be turned on separately for each mesh in the file by passing in a a tuple of 
+(scale, offset) pairs for the meshes you want to enable displacements for. 
+For example, the following code snippet turns displacements on for the second 
+mesh, but not the first:
+
+.. code-block:: python
+
+    import yt
+    ds = yt.load("MOOSE_sample_data/mps_out.e", step=10,
+                 displacements={'connect2': (1.0, [0.0, 0.0, 0.0])})
+
+The displacements can also be scaled by an arbitrary factor before they are 
+added in to the vertex positions. The following code turns on displacements
+for both ``connect1`` and ``connect2``, scaling the former by a factor of 5.0
+and the later by a factor of 10.0:
+
+.. code-block:: python
+
+    import yt
+    ds = yt.load("MOOSE_sample_data/mps_out.e", step=10,
+                 displacements={'connect1': (5.0, [0.0, 0.0, 0.0]),
+                                'connect2': (10.0, [0.0, 0.0, 0.0])})
+
+Finally, we can also apply an arbitrary offset to the mesh vertices after 
+the scale factor is applied. For example, the following code scales all
+displacements in the second mesh by a factor of 5.0, and then shifts
+each vertex in the mesh by 1.0 unit in the z-direction:
+
+.. code-block:: python
+
+    import yt
+    ds = yt.load("MOOSE_sample_data/mps_out.e", step=10,
+                  displacements={'connect2': (5.0, [0.0, 0.0, 1.0])})
+
 
 FITS Data
 ---------
@@ -1232,10 +1293,9 @@
 
 yt has support for reading halo catalogs produced by Rockstar and the inline 
 FOF/SUBFIND halo finders of Gadget and OWLS.  The halo catalogs are treated as 
-particle datasets where each particle represents a single halo.  At this time, 
-yt does not have the ability to load the member particles for a given halo.  
-However, once loaded, further halo analysis can be performed using 
-:ref:`halo_catalog`.
+particle datasets where each particle represents a single halo.  Member particles
+for individual halos can be accessed through halo data containers.  Further halo
+analysis can be performed using :ref:`halo_catalog`.
 
 In the case where halo catalogs are written to multiple files, one must only 
 give the path to one of them.
@@ -1269,11 +1329,39 @@
    # x component of the spin
    print(ad["Subhalo", "SubhaloSpin_0"])
 
+Halo member particles are accessed by creating halo data containers with the
+type of halo ("Group" or "Subhalo") and the halo id.  Scalar values for halos
+can be accessed in the same way.  Halos also have mass, position, and velocity
+attributes.
+
+.. code-block:: python
+
+   halo = ds.halo("Group", 0)
+   # member particles for this halo
+   print halo["member_ids"]
+   # halo virial radius
+   print halo["Group_R_Crit200"]
+   # halo mass
+   print halo.mass
+
+Subhalos containers can be created using either their absolute ids or their
+subhalo ids.
+
+.. code-block:: python
+
+   # first subhalo of the first halo
+   subhalo = ds.halo("Subhalo", (0, 0))
+   # this subhalo's absolute id
+   print subhalo.group_identifier
+   # member particles
+   print subhalo["member_ids"]
+
 OWLS FOF/SUBFIND
 ^^^^^^^^^^^^^^^^
 
 OWLS halo catalogs have a very similar structure to regular Gadget halo catalogs.  
-The two field types are "FOF" and "SUBFIND".
+The two field types are "FOF" and "SUBFIND".  At this time, halo member particles
+cannot be loaded.
 
 .. code-block:: python
 

diff -r b64822ed0327496f792cc9a74dd08fade7b91e27 -r 23cf9b595583d119e23f8f2fd95aff3971088cd4 doc/source/visualizing/unstructured_mesh_rendering.rst
--- a/doc/source/visualizing/unstructured_mesh_rendering.rst
+++ b/doc/source/visualizing/unstructured_mesh_rendering.rst
@@ -14,7 +14,7 @@
 
 .. code-block:: bash
 
-    conda install -c http://use.yt/with_conda/ yt
+    conda install -c http://use.yt/with_conda/ yt=3.3_dev
 
 If you want to install from source, you can use the ``get_yt.sh`` script.
 Be sure to set the INST_YT_SOURCE and INST_UNSTRUCTURED flags to 1 at the 
@@ -114,7 +114,7 @@
     cam = sc.camera
     cam.focus = ds.arr([0.0, 0.0, 0.0], 'code_length')
     cam_pos = ds.arr([-3.0, 3.0, -3.0], 'code_length')
-    north_vector = ds.arr([0.0, 1.0, 1.0], 'dimensionless')
+    north_vector = ds.arr([0.0, -1.0, -1.0], 'dimensionless')
     cam.set_position(cam_pos, north_vector)
 
     # increase the default resolution
@@ -142,7 +142,7 @@
     cam = sc.camera
     cam.focus = ds.arr([0.0, 0.0, 0.0], 'code_length')
     cam_pos = ds.arr([-3.0, 3.0, -3.0], 'code_length')
-    north_vector = ds.arr([0.0, 1.0, 1.0], 'dimensionless')
+    north_vector = ds.arr([0.0, -1.0, -1.0], 'dimensionless')
     cam.set_position(cam_pos, north_vector)
 
     # increase the default resolution
@@ -174,7 +174,7 @@
     cam = sc.camera
     cam.focus = ds.arr([0.0, 0.0, 0.0], 'code_length')
     cam_pos = ds.arr([-3.0, 3.0, -3.0], 'code_length')
-    north_vector = ds.arr([0.0, 1.0, 1.0], 'dimensionless')
+    north_vector = ds.arr([0.0, -1.0, -1.0], 'dimensionless')
     cam.set_position(cam_pos, north_vector)
    
     # increase the default resolution
@@ -205,7 +205,7 @@
     cam = sc.camera
     camera_position = ds.arr([3.0, 3.0, 3.0], 'code_length')
     cam.set_width(ds.arr([2.0, 2.0, 2.0], 'code_length'))
-    north_vector = ds.arr([0.0, 1.0, 0.0], 'dimensionless')
+    north_vector = ds.arr([0.0, -1.0, 0.0], 'dimensionless')
     cam.set_position(camera_position, north_vector)
 
     # increase the default resolution
@@ -236,7 +236,7 @@
     # adjust the camera position and orientation
     cam = sc.camera
     camera_position = ds.arr([-1.0, 1.0, -0.5], 'code_length')
-    north_vector = ds.arr([0.0, 1.0, 1.0], 'dimensionless')
+    north_vector = ds.arr([0.0, -1.0, -1.0], 'dimensionless')
     cam.width = ds.arr([0.04, 0.04, 0.04], 'code_length')
     cam.set_position(camera_position, north_vector)
 
@@ -248,6 +248,43 @@
     sc.annotate_mesh_lines()
     sc.save()
 
+The dataset in the above example contains displacement fields, so this is a good
+opportunity to demonstrate their use. The following example is exactly like the
+above, except we scale the displacements by a factor of a 10.0, and additionally 
+add an offset to the mesh by 1.0 unit in the x-direction:
+
+.. python-script::
+
+    import yt
+
+    # We load the last time frame
+    ds = yt.load("MOOSE_sample_data/mps_out.e", step=-1,
+                 displacements={'connect2': (10.0, [0.01, 0.0, 0.0])})
+
+    # create a default scene
+    sc = yt.create_scene(ds, ("connect2", "temp"))
+
+    # override the default colormap. This time we also override
+    # the default color bounds
+    ms = sc.get_source(0)
+    ms.cmap = 'hot'
+    ms.color_bounds = (500.0, 1700.0)
+
+    # adjust the camera position and orientation
+    cam = sc.camera
+    camera_position = ds.arr([-1.0, 1.0, -0.5], 'code_length')
+    north_vector = ds.arr([0.0, 1.0, 1.0], 'dimensionless')
+    cam.width = ds.arr([0.05, 0.05, 0.05], 'code_length')
+    cam.set_position(camera_position, north_vector)
+    
+    # increase the default resolution
+    cam.resolution = (800, 800)
+
+    # render, draw the element boundaries, and save
+    sc.render()
+    sc.annotate_mesh_lines()
+    sc.save()
+
 As with other volume renderings in yt, you can swap out different lenses. Here is 
 an example that uses a "perspective" lens, for which the rays diverge from the 
 camera position according to some opening angle:
@@ -270,7 +307,7 @@
     cam = Camera(ds, lens_type='perspective')
     cam.focus = ds.arr([0.0, 0.0, 0.0], 'code_length')
     cam_pos = ds.arr([-4.5, 4.5, -4.5], 'code_length')
-    north_vector = ds.arr([0.0, 1.0, 1.0], 'dimensionless')
+    north_vector = ds.arr([0.0, -1.0, -1.0], 'dimensionless')
     cam.set_position(cam_pos, north_vector)
    
     # tell our scene to use it
@@ -303,7 +340,7 @@
     cam = Camera(ds)
     cam.focus = ds.arr([0.0, 0.0, 0.0], 'code_length')
     cam.set_position(ds.arr([-3.0, 3.0, -3.0], 'code_length'),
-    ds.arr([0.0, 1.0, 0.0], 'dimensionless'))
+                     ds.arr([0.0, -1.0, 0.0], 'dimensionless'))
     cam.set_width = ds.arr([8.0, 8.0, 8.0], 'code_length')
     cam.resolution = (800, 800)
 
@@ -348,7 +385,7 @@
     cam = sc.camera
     cam.focus = ds.arr([0.0, 0.0, 0.0], 'code_length')
     cam_pos = ds.arr([-3.0, 3.0, -3.0], 'code_length')
-    north_vector = ds.arr([0.0, 1.0, 1.0], 'dimensionless')
+    north_vector = ds.arr([0.0, -1.0, -1.0], 'dimensionless')
     cam.set_position(cam_pos, north_vector)
 
     # increase the default resolution
@@ -398,7 +435,7 @@
 	cam = Camera(ds)
 	camera_position = ds.arr([0.1, 0.0, 0.1], 'code_length')
 	cam.focus = ds.domain_center
-	north_vector = ds.arr([0.3032476, 0.71782557, -0.62671153], 'dimensionless')
+	north_vector = ds.arr([-0.3032476, -0.71782557, 0.62671153], 'dimensionless')
 	cam.width = ds.arr([ 0.04,  0.04,  0.04], 'code_length')
 	cam.resolution = (800, 800)
 	cam.set_position(camera_position, north_vector)

diff -r b64822ed0327496f792cc9a74dd08fade7b91e27 -r 23cf9b595583d119e23f8f2fd95aff3971088cd4 setup.py
--- a/setup.py
+++ b/setup.py
@@ -1,27 +1,20 @@
 import os
-import os.path
 import glob
 import sys
-import time
-import subprocess
-import shutil
-import glob
+from sys import platform as _platform
+from setuptools import setup, find_packages
+from setuptools.extension import Extension
+from setuptools.command.build_ext import build_ext as _build_ext
+from setuptools.command.build_py import build_py as _build_py
+from setupext import \
+    check_for_openmp, check_for_pyembree, read_embree_location, \
+    get_mercurial_changeset_id
 
 if sys.version_info < (2, 7):
     print("yt currently requires Python version 2.7")
     print("certain features may fail unexpectedly and silently with older versions.")
     sys.exit(1)
 
-import setuptools
-from distutils.command.build_py import build_py
-from numpy.distutils.misc_util import appendpath
-from numpy.distutils.command import install_data as np_install_data
-from numpy.distutils import log
-from distutils import version
-
-from distutils.core import Command
-from distutils.spawn import find_executable
-
 MAPSERVER_FILES = []
 MAPSERVER_DIRS = [
     "",
@@ -36,109 +29,277 @@
         files += glob.glob("%s/*.%s" % (dir_name, ext))
     MAPSERVER_FILES.append((dir_name, files))
 
-# Verify that we have Cython installed
-REQ_CYTHON = '0.22'
-try:
-    import Cython
-    needs_cython = \
-        version.LooseVersion(Cython.__version__) < version.LooseVersion(REQ_CYTHON)
-except ImportError as e:
-    needs_cython = True
-
-if needs_cython:
-    print("Cython is a build-time requirement for the source tree of yt.")
-    print("Please either install yt from a provided, release tarball,")
-    print("or install Cython (version %s or higher)." % REQ_CYTHON)
-    print("You may be able to accomplish this by typing:")
-    print("     pip install -U Cython")
-    sys.exit(1)
-
-######
-# This next bit comes from Matthew Brett, to get Cython working with NumPy
-# distutils.  I added a bit to get C++ Cython working.
-from os.path import join as pjoin, dirname
-from distutils.dep_util import newer_group
-from distutils.errors import DistutilsError
-
-
-def generate_a_pyrex_source(self, base, ext_name, source, extension):
-    ''' Monkey patch for numpy build_src.build_src method
-
-    Uses Cython instead of Pyrex.
-
-    Assumes Cython is present
-    '''
-    if self.inplace:
-        target_dir = dirname(base)
-    else:
-        target_dir = appendpath(self.build_src, dirname(base))
-    if extension.language == "c++":
-        cplus = True
-        file_ext = ".cpp"
-    else:
-        cplus = False
-        file_ext = ".c"
-    target_file = pjoin(target_dir, ext_name + file_ext)
-    depends = [source] + extension.depends
-    if self.force or newer_group(depends, target_file, 'newer'):
-        import Cython.Compiler.Main
-        log.info("cythonc:> %s" % (target_file))
-        self.mkpath(target_dir)
-        options = Cython.Compiler.Main.CompilationOptions(
-            defaults=Cython.Compiler.Main.default_options,
-            include_path=extension.include_dirs,
-            cplus=cplus,
-            output_file=target_file)
-        cython_result = Cython.Compiler.Main.compile(source,
-                                                     options=options)
-        if cython_result.num_errors != 0:
-            raise DistutilsError("%d errors while compiling %r with Cython"
-                                 % (cython_result.num_errors, source))
-    return target_file
-
-
-from numpy.distutils.command import build_src
-build_src.build_src.generate_a_pyrex_source = generate_a_pyrex_source
-# End snippet
-######
-
 VERSION = "3.3.dev0"
 
 if os.path.exists('MANIFEST'):
     os.remove('MANIFEST')
 
 
-def get_mercurial_changeset_id(target_dir):
-    """adapted from a script by Jason F. Harris, published at
+if check_for_openmp() is True:
+    omp_args = ['-fopenmp']
+else:
+    omp_args = None
 
-    http://jasonfharris.com/blog/2010/05/versioning-your-application-with-the-mercurial-changeset-hash/
 
-    """
-    import subprocess
-    import re
-    get_changeset = subprocess.Popen('hg identify -b -i',
-                                     stdout=subprocess.PIPE,
-                                     stderr=subprocess.PIPE,
-                                     shell=True)
+cython_extensions = [
+    Extension("yt.analysis_modules.photon_simulator.utils",
+              ["yt/analysis_modules/photon_simulator/utils.pyx"]),
+    Extension("yt.analysis_modules.ppv_cube.ppv_utils",
+              ["yt/analysis_modules/ppv_cube/ppv_utils.pyx"],
+              libraries=["m"]),
+    Extension("yt.geometry.grid_visitors",
+              ["yt/geometry/grid_visitors.pyx"],
+              include_dirs=["yt/utilities/lib"],
+              libraries=["m"],
+              depends=["yt/utilities/lib/fp_utils.pxd",
+                       "yt/geometry/grid_visitors.pxd"]),
+    Extension("yt.geometry.grid_container",
+              ["yt/geometry/grid_container.pyx"],
+              include_dirs=["yt/utilities/lib/"],
+              libraries=["m"],
+              depends=["yt/utilities/lib/fp_utils.pxd",
+                       "yt/geometry/grid_container.pxd",
+                       "yt/geometry/grid_visitors.pxd"]),
+    Extension("yt.geometry.oct_container",
+              ["yt/geometry/oct_container.pyx"],
+              include_dirs=["yt/utilities/lib"],
+              libraries=["m"],
+              depends=["yt/utilities/lib/fp_utils.pxd",
+                       "yt/geometry/oct_container.pxd",
+                       "yt/geometry/selection_routines.pxd"]),
+    Extension("yt.geometry.oct_visitors",
+              ["yt/geometry/oct_visitors.pyx"],
+              include_dirs=["yt/utilities/lib/"],
+              libraries=["m"],
+              depends=["yt/utilities/lib/fp_utils.pxd",
+                       "yt/geometry/oct_container.pxd",
+                       "yt/geometry/selection_routines.pxd"]),
+    Extension("yt.geometry.particle_oct_container",
+              ["yt/geometry/particle_oct_container.pyx"],
+              include_dirs=["yt/utilities/lib/"],
+              libraries=["m"],
+              depends=["yt/utilities/lib/fp_utils.pxd",
+                       "yt/geometry/oct_container.pxd",
+                       "yt/geometry/selection_routines.pxd"]),
+    Extension("yt.geometry.selection_routines",
+              ["yt/geometry/selection_routines.pyx"],
+              include_dirs=["yt/utilities/lib/"],
+              libraries=["m"],
+              depends=["yt/utilities/lib/fp_utils.pxd",
+                       "yt/utilities/lib/grid_traversal.pxd",
+                       "yt/geometry/oct_container.pxd",
+                       "yt/geometry/oct_visitors.pxd",
+                       "yt/geometry/grid_container.pxd",
+                       "yt/geometry/grid_visitors.pxd",
+                       "yt/geometry/selection_routines.pxd"]),
+    Extension("yt.geometry.particle_deposit",
+              ["yt/geometry/particle_deposit.pyx"],
+              include_dirs=["yt/utilities/lib/"],
+              libraries=["m"],
+              depends=["yt/utilities/lib/fp_utils.pxd",
+                       "yt/geometry/oct_container.pxd",
+                       "yt/geometry/selection_routines.pxd",
+                       "yt/geometry/particle_deposit.pxd"]),
+    Extension("yt.geometry.particle_smooth",
+              ["yt/geometry/particle_smooth.pyx"],
+              include_dirs=["yt/utilities/lib/"],
+              libraries=["m"],
+              depends=["yt/utilities/lib/fp_utils.pxd",
+                       "yt/geometry/oct_container.pxd",
+                       "yt/geometry/selection_routines.pxd",
+                       "yt/geometry/particle_deposit.pxd",
+                       "yt/geometry/particle_smooth.pxd"]),
+    Extension("yt.geometry.fake_octree",
+              ["yt/geometry/fake_octree.pyx"],
+              include_dirs=["yt/utilities/lib/"],
+              libraries=["m"],
+              depends=["yt/utilities/lib/fp_utils.pxd",
+                       "yt/geometry/oct_container.pxd",
+                       "yt/geometry/selection_routines.pxd"]),
+    Extension("yt.utilities.spatial.ckdtree",
+              ["yt/utilities/spatial/ckdtree.pyx"],
+              libraries=["m"]),
+    Extension("yt.utilities.lib.bitarray",
+              ["yt/utilities/lib/bitarray.pyx"],
+              libraries=["m"], depends=["yt/utilities/lib/bitarray.pxd"]),
+    Extension("yt.utilities.lib.contour_finding",
+              ["yt/utilities/lib/contour_finding.pyx"],
+              include_dirs=["yt/utilities/lib/",
+                            "yt/geometry/"],
+              libraries=["m"],
+              depends=["yt/utilities/lib/fp_utils.pxd",
+                       "yt/utilities/lib/amr_kdtools.pxd",
+                       "yt/utilities/lib/grid_traversal.pxd",
+                       "yt/utilities/lib/contour_finding.pxd",
+                       "yt/geometry/oct_container.pxd"]),
+    Extension("yt.utilities.lib.geometry_utils",
+              ["yt/utilities/lib/geometry_utils.pyx"],
+              extra_compile_args=omp_args,
+              extra_link_args=omp_args,
+              libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd"]),
+    Extension("yt.utilities.lib.marching_cubes",
+              ["yt/utilities/lib/marching_cubes.pyx",
+               "yt/utilities/lib/fixed_interpolator.c"],
+              include_dirs=["yt/utilities/lib/"],
+              libraries=["m"],
+              depends=["yt/utilities/lib/fp_utils.pxd",
+                       "yt/utilities/lib/fixed_interpolator.pxd",
+                       "yt/utilities/lib/fixed_interpolator.h",
+                       ]),
+    Extension("yt.utilities.lib.pixelization_routines",
+              ["yt/utilities/lib/pixelization_routines.pyx",
+               "yt/utilities/lib/pixelization_constants.c"],
+              include_dirs=["yt/utilities/lib/"],
+              language="c++",
+              libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd",
+                                        "yt/utilities/lib/pixelization_constants.h",
+                                        "yt/utilities/lib/element_mappings.pxd"]),
+    Extension("yt.utilities.lib.origami",
+              ["yt/utilities/lib/origami.pyx",
+               "yt/utilities/lib/origami_tags.c"],
+              include_dirs=["yt/utilities/lib/"],
+              depends=["yt/utilities/lib/origami_tags.h"]),
+    Extension("yt.utilities.lib.grid_traversal",
+              ["yt/utilities/lib/grid_traversal.pyx",
+               "yt/utilities/lib/fixed_interpolator.c",
+               "yt/utilities/lib/kdtree.c"],
+              include_dirs=["yt/utilities/lib/"],
+              libraries=["m"],
+              extra_compile_args=omp_args,
+              extra_link_args=omp_args,
+              depends=["yt/utilities/lib/fp_utils.pxd",
+                       "yt/utilities/lib/kdtree.h",
+                       "yt/utilities/lib/fixed_interpolator.h",
+                       "yt/utilities/lib/fixed_interpolator.pxd",
+                       "yt/utilities/lib/field_interpolation_tables.pxd"]),
+    Extension("yt.utilities.lib.element_mappings",
+              ["yt/utilities/lib/element_mappings.pyx"],
+              libraries=["m"], depends=["yt/utilities/lib/element_mappings.pxd"]),
+    Extension("yt.utilities.lib.alt_ray_tracers",
+              ["yt/utilities/lib/alt_ray_tracers.pyx"],
+              libraries=["m"]),
+]
 
-    if (get_changeset.stderr.read() != ""):
-        print("Error in obtaining current changeset of the Mercurial repository")
-        changeset = None
+lib_exts = [
+    "particle_mesh_operations", "depth_first_octree", "fortran_reader",
+    "interpolators", "misc_utilities", "basic_octree", "image_utilities",
+    "points_in_volume", "quad_tree", "ray_integrators", "mesh_utilities",
+    "amr_kdtools"
+]
+for ext_name in lib_exts:
+    cython_extensions.append(
+        Extension("yt.utilities.lib.{}".format(ext_name),
+                  ["yt/utilities/lib/{}.pyx".format(ext_name)],
+                  libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd"]))
 
-    changeset = get_changeset.stdout.read().strip().decode("UTF-8")
-    if (not re.search("^[0-9a-f]{12}", changeset)):
-        print("Current changeset of the Mercurial repository is malformed")
-        changeset = None
+lib_exts = ["write_array", "ragged_arrays", "line_integral_convolution"]
+for ext_name in lib_exts:
+    cython_extensions.append(
+        Extension("yt.utilities.lib.{}".format(ext_name),
+                  ["yt/utilities/lib/{}.pyx".format(ext_name)]))
 
-    return changeset
+extensions = [
+    Extension("yt.analysis_modules.halo_finding.fof.EnzoFOF",
+              ["yt/analysis_modules/halo_finding/fof/EnzoFOF.c",
+               "yt/analysis_modules/halo_finding/fof/kd.c"],
+              libraries=["m"]),
+    Extension("yt.analysis_modules.halo_finding.hop.EnzoHop",
+              glob.glob("yt/analysis_modules/halo_finding/hop/*.c")),
+    Extension("yt.frontends.artio._artio_caller",
+              ["yt/frontends/artio/_artio_caller.pyx"] +
+              glob.glob("yt/frontends/artio/artio_headers/*.c"),
+              include_dirs=["yt/frontends/artio/artio_headers/",
+                            "yt/geometry/",
+                            "yt/utilities/lib/"],
+              depends=glob.glob("yt/frontends/artio/artio_headers/*.c") +
+              ["yt/utilities/lib/fp_utils.pxd",
+               "yt/geometry/oct_container.pxd",
+               "yt/geometry/selection_routines.pxd",
+               "yt/geometry/particle_deposit.pxd"]),
+    Extension("yt.utilities.spatial._distance_wrap",
+              glob.glob("yt/utilities/spatial/src/*.c")),
+    Extension("yt.visualization._MPL",
+              ["yt/visualization/_MPL.c"],
+              libraries=["m"]),
+    Extension("yt.utilities.data_point_utilities",
+              ["yt/utilities/data_point_utilities.c"],
+              libraries=["m"]),
+]
 
+# EMBREE
+if check_for_pyembree() is not None:
+    embree_extensions = [
+        Extension("yt.utilities.lib.mesh_construction",
+                  ["yt/utilities/lib/mesh_construction.pyx"],
+                  depends=["yt/utilities/lib/mesh_construction.pxd"]),
+        Extension("yt.utilities.lib.mesh_traversal",
+                  ["yt/utilities/lib/mesh_traversal.pyx"],
+                  depends=["yt/utilities/lib/mesh_traversal.pxd",
+                           "yt/utilities/lib/grid_traversal.pxd"]),
+        Extension("yt.utilities.lib.mesh_samplers",
+                  ["yt/utilities/lib/mesh_samplers.pyx"],
+                  depends=["yt/utilities/lib/mesh_samplers.pxd",
+                           "yt/utilities/lib/element_mappings.pxd",
+                           "yt/utilities/lib/mesh_construction.pxd"]),
+        Extension("yt.utilities.lib.mesh_intersection",
+                  ["yt/utilities/lib/mesh_intersection.pyx"],
+                  depends=["yt/utilities/lib/mesh_intersection.pxd",
+                           "yt/utilities/lib/mesh_construction.pxd"]),
+    ]
 
-class my_build_src(build_src.build_src):
-    def run(self):
-        build_src.build_src.run(self)
+    embree_prefix = os.path.abspath(read_embree_location())
+    if _platform == "darwin":
+        embree_lib_name = "embree.2"
+    else:
+        embree_lib_name = "embree"
 
+    for ext in embree_extensions:
+        ext.include_dirs.append(os.path.join(embree_prefix, 'include'))
+        ext.library_dirs.append(os.path.join(embree_prefix, 'lib'))
+        ext.language = "c++"
+        ext.libraries += ["m", embree_lib_name]
 
-class my_build_py(build_py):
+    cython_extensions += embree_extensions
+
+# ROCKSTAR
+if os.path.exists("rockstar.cfg"):
+    try:
+        rd = open("rockstar.cfg").read().strip()
+    except IOError:
+        print("Reading Rockstar location from rockstar.cfg failed.")
+        print("Please place the base directory of your")
+        print("Rockstar install in rockstar.cfg and restart.")
+        print("(ex: \"echo '/path/to/Rockstar-0.99' > rockstar.cfg\" )")
+        sys.exit(1)
+
+    rockstar_extdir = "yt/analysis_modules/halo_finding/rockstar"
+    rockstar_extensions = [
+        Extension("yt.analysis_modules.halo_finding.rockstar.rockstar_interface",
+                  sources=[os.path.join(rockstar_extdir, "rockstar_interface.pyx")]),
+        Extension("yt.analysis_modules.halo_finding.rockstar.rockstar_groupies",
+                  sources=[os.path.join(rockstar_extdir, "rockstar_groupies.pyx")])
+    ]
+    for ext in rockstar_extensions:
+        ext.library_dirs.append(rd)
+        ext.libraries.append("rockstar")
+        ext.define_macros.append(("THREADSAFE", ""))
+        ext.include_dirs += [rd,
+                             os.path.join(rd, "io"), os.path.join(rd, "util")]
+    extensions += rockstar_extensions
+
+if os.environ.get("GPERFTOOLS", "no").upper() != "NO":
+    gpd = os.environ["GPERFTOOLS"]
+    idir = os.path.join(gpd, "include")
+    ldir = os.path.join(gpd, "lib")
+    print(("INCLUDE AND LIB DIRS", idir, ldir))
+    cython_extensions.append(
+        Extension("yt.utilities.lib.perftools_wrap",
+                  ["yt/utilities/lib/perftools_wrap.pyx"],
+                  libraries=["profiler"],
+                  library_dirs=[ldir],
+                  include_dirs=[idir]))
+
+class build_py(_build_py):
     def run(self):
         # honor the --dry-run flag
         if not self.dry_run:
@@ -148,68 +309,74 @@
             self.mkpath(target_dir)
             with open(os.path.join(target_dir, '__hg_version__.py'), 'w') as fobj:
                 fobj.write("hg_version = '%s'\n" % changeset)
+        _build_py.run(self)
 
-        build_py.run(self)
+class build_ext(_build_ext):
+    # subclass setuptools extension builder to avoid importing numpy
+    # at top level in setup.py. See http://stackoverflow.com/a/21621689/1382869
+    def finalize_options(self):
+        _build_ext.finalize_options(self)
+        # Prevent numpy from thinking it is still in its setup process
+        # see http://stackoverflow.com/a/21621493/1382869
+        __builtins__.__NUMPY_SETUP__ = False
+        import numpy
+        self.include_dirs.append(numpy.get_include())
 
+setup(
+    name="yt",
+    version=VERSION,
+    description="An analysis and visualization toolkit for Astrophysical "
+                + "simulations, focusing on Adaptive Mesh Refinement data "
+                  "from Enzo, Orion, FLASH, and others.",
+    classifiers=["Development Status :: 5 - Production/Stable",
+                 "Environment :: Console",
+                 "Intended Audience :: Science/Research",
+                 "License :: OSI Approved :: BSD License",
+                 "Operating System :: MacOS :: MacOS X",
+                 "Operating System :: POSIX :: AIX",
+                 "Operating System :: POSIX :: Linux",
+                 "Programming Language :: C",
+                 "Programming Language :: Python",
+                 "Topic :: Scientific/Engineering :: Astronomy",
+                 "Topic :: Scientific/Engineering :: Physics",
+                 "Topic :: Scientific/Engineering :: Visualization"],
+    keywords='astronomy astrophysics visualization ' +
+    'amr adaptivemeshrefinement',
+    entry_points={'console_scripts': [
+        'yt = yt.utilities.command_line:run_main',
+    ],
+        'nose.plugins.0.10': [
+            'answer-testing = yt.utilities.answer_testing.framework:AnswerTesting'
+    ]
+    },
+    packages=find_packages(),
+    setup_requires=[
+        'numpy',
+        'cython>=0.22'
+    ],
+    install_requires=[
+        # 'matplotlib',  # messes up nosetests will be fixed in future PRs
+        'sympy',
+        'numpy',
+        'IPython',
+    ],
+    cmdclass={'build_ext': build_ext, 'build_py': build_py},
+    author="The yt project",
+    author_email="yt-dev at lists.spacepope.org",
+    url="http://yt-project.org/",
+    license="BSD",
+    zip_safe=False,
+    scripts=["scripts/iyt"],
+    data_files=MAPSERVER_FILES,
+    ext_modules=cython_extensions + extensions
+)
 
-def configuration(parent_package='', top_path=None):
-    from numpy.distutils.misc_util import Configuration
-
-    config = Configuration(None, parent_package, top_path)
-    config.set_options(ignore_setup_xxx_py=True,
-                       assume_default_configuration=True,
-                       delegate_options_to_subpackages=True,
-                       quiet=True)
-
-    config.make_config_py()
-    # config.make_svn_version_py()
-    config.add_subpackage('yt', 'yt')
-    config.add_scripts("scripts/iyt")
-
-    return config
-
-
-def setup_package():
-
-    from numpy.distutils.core import setup
-
-    setup(
-        name="yt",
-        version=VERSION,
-        description="An analysis and visualization toolkit for Astrophysical "
-                    + "simulations, focusing on Adaptive Mesh Refinement data "
-                      "from Enzo, Orion, FLASH, and others.",
-        classifiers=["Development Status :: 5 - Production/Stable",
-                     "Environment :: Console",
-                     "Intended Audience :: Science/Research",
-                     "License :: OSI Approved :: BSD License",
-                     "Operating System :: MacOS :: MacOS X",
-                     "Operating System :: POSIX :: AIX",
-                     "Operating System :: POSIX :: Linux",
-                     "Programming Language :: C",
-                     "Programming Language :: Python",
-                     "Topic :: Scientific/Engineering :: Astronomy",
-                     "Topic :: Scientific/Engineering :: Physics",
-                     "Topic :: Scientific/Engineering :: Visualization"],
-        keywords='astronomy astrophysics visualization ' +
-        'amr adaptivemeshrefinement',
-        entry_points={'console_scripts': [
-        'yt = yt.utilities.command_line:run_main',
-        ],
-            'nose.plugins.0.10': [
-                'answer-testing = yt.utilities.answer_testing.framework:AnswerTesting'
-            ]
-        },
-        author="The yt project",
-        author_email="yt-dev at lists.spacepope.org",
-        url="http://yt-project.org/",
-        license="BSD",
-        configuration=configuration,
-        zip_safe=False,
-        data_files=MAPSERVER_FILES,
-        cmdclass={'build_py': my_build_py, 'build_src': my_build_src},
-    )
-    return
-
-if __name__ == '__main__':
-    setup_package()
+# This info about 'ckdtree' should be incorporated somehow...
+#    setup(maintainer="SciPy Developers",
+#          author="Anne Archibald",
+#          maintainer_email="scipy-dev at scipy.org",
+#          description="Spatial algorithms and data structures",
+#          url="http://www.scipy.org",
+#          license="SciPy License (BSD Style)",
+#          **configuration(top_path='').todict()
+#   )

diff -r b64822ed0327496f792cc9a74dd08fade7b91e27 -r 23cf9b595583d119e23f8f2fd95aff3971088cd4 setupext.py
--- /dev/null
+++ b/setupext.py
@@ -0,0 +1,100 @@
+import os
+from pkg_resources import resource_filename
+import shutil
+import subprocess
+import sys
+import tempfile
+
+def check_for_openmp():
+    """Returns True if local setup supports OpenMP, False otherwise"""
+
+    # See https://bugs.python.org/issue25150
+    if sys.version_info[:3] == (3, 5, 0) or os.name == 'nt':
+        return False
+
+    # Create a temporary directory
+    tmpdir = tempfile.mkdtemp()
+    curdir = os.getcwd()
+    exit_code = 1
+
+    try:
+        os.chdir(tmpdir)
+
+        # Get compiler invocation
+        compiler = os.getenv('CC', 'cc')
+        compiler = compiler.split(' ')
+
+        # Attempt to compile a test script.
+        # See http://openmp.org/wp/openmp-compilers/
+        filename = r'test.c'
+        file = open(filename, 'wt', 1)
+        file.write(
+            "#include <omp.h>\n"
+            "#include <stdio.h>\n"
+            "int main() {\n"
+            "#pragma omp parallel\n"
+            "printf(\"Hello from thread %d, nthreads %d\\n\", omp_get_thread_num(), omp_get_num_threads());\n"
+            "}"
+        )
+        file.flush()
+        with open(os.devnull, 'w') as fnull:
+            exit_code = subprocess.call(compiler + ['-fopenmp', filename],
+                                        stdout=fnull, stderr=fnull)
+
+        # Clean up
+        file.close()
+    except OSError:
+        return False
+    finally:
+        os.chdir(curdir)
+        shutil.rmtree(tmpdir)
+
+    return exit_code == 0
+
+
+def check_for_pyembree():
+    try:
+        fn = resource_filename("pyembree", "rtcore.pxd")
+    except ImportError:
+        return None
+    return os.path.dirname(fn)
+
+
+def read_embree_location():
+    '''
+
+    Attempts to locate the embree installation. First, we check for an
+    EMBREE_DIR environment variable. If one is not defined, we look for
+    an embree.cfg file in the root yt source directory. Finally, if that
+    is not present, we default to /usr/local. If embree is installed in a
+    non-standard location and none of the above are set, the compile will
+    not succeed. This only gets called if check_for_pyembree() returns
+    something other than None.
+
+    '''
+
+    rd = os.environ.get('EMBREE_DIR')
+    if rd is not None:
+        return rd
+    print("EMBREE_DIR not set. Attempting to read embree.cfg")
+    try:
+        rd = open("embree.cfg").read().strip()
+        return rd
+    except IOError:
+        print("Reading Embree location from embree.cfg failed.")
+        print("If compilation fails, please place the base directory")
+        print("of your Embree install in embree.cfg and restart.")
+        return '/usr/local'
+
+
+def get_mercurial_changeset_id(target_dir):
+    '''
+    Returns changeset and branch using hglib
+    '''
+    try:
+        import hglib
+    except ImportError:
+        return None
+    with hglib.open(target_dir) as repo:
+        changeset = repo.identify(id=True, branch=True).strip().decode('utf8')
+    return changeset

diff -r b64822ed0327496f792cc9a74dd08fade7b91e27 -r 23cf9b595583d119e23f8f2fd95aff3971088cd4 yt/analysis_modules/cosmological_observation/light_cone/setup.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/setup.py
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/usr/bin/env python
-
-
-def configuration(parent_package='', top_path=None):
-    from numpy.distutils.misc_util import Configuration
-    config = Configuration('light_cone', parent_package, top_path)
-    config.make_config_py()  # installs __config__.py
-    #config.make_svn_version_py()
-    return config

diff -r b64822ed0327496f792cc9a74dd08fade7b91e27 -r 23cf9b595583d119e23f8f2fd95aff3971088cd4 yt/analysis_modules/cosmological_observation/light_ray/setup.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/setup.py
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/usr/bin/env python
-
-
-def configuration(parent_package='', top_path=None):
-    from numpy.distutils.misc_util import Configuration
-    config = Configuration('light_ray', parent_package, top_path)
-    config.make_config_py()  # installs __config__.py
-    #config.make_svn_version_py()
-    return config

diff -r b64822ed0327496f792cc9a74dd08fade7b91e27 -r 23cf9b595583d119e23f8f2fd95aff3971088cd4 yt/analysis_modules/cosmological_observation/setup.py
--- a/yt/analysis_modules/cosmological_observation/setup.py
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/usr/bin/env python
-
-
-def configuration(parent_package='', top_path=None):
-    from numpy.distutils.misc_util import Configuration
-    config = Configuration('cosmological_observation', parent_package, top_path)
-    config.make_config_py()  # installs __config__.py
-    #config.make_svn_version_py()
-    config.add_subpackage("light_cone")
-    config.add_subpackage("light_ray")
-    return config

diff -r b64822ed0327496f792cc9a74dd08fade7b91e27 -r 23cf9b595583d119e23f8f2fd95aff3971088cd4 yt/analysis_modules/halo_finding/fof/setup.py
--- a/yt/analysis_modules/halo_finding/fof/setup.py
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/usr/bin/env python
-
-
-def configuration(parent_package='', top_path=None):
-    from numpy.distutils.misc_util import Configuration
-    config = Configuration('fof', parent_package, top_path)
-    config.add_extension("EnzoFOF", sources=["EnzoFOF.c",
-                                     "kd.c"],
-                                    libraries=["m"])
-    config.make_config_py()  # installs __config__.py
-    #config.make_svn_version_py()
-    return config

diff -r b64822ed0327496f792cc9a74dd08fade7b91e27 -r 23cf9b595583d119e23f8f2fd95aff3971088cd4 yt/analysis_modules/halo_finding/hop/setup.py
--- a/yt/analysis_modules/halo_finding/hop/setup.py
+++ /dev/null
@@ -1,15 +0,0 @@
-#!/usr/bin/env python
-
-
-def configuration(parent_package='', top_path=None):
-    from numpy.distutils.misc_util import Configuration
-    config = Configuration('hop', parent_package, top_path)
-    config.add_extension("EnzoHop", sources=["EnzoHop.c",
-                                     "hop_hop.c",
-                                     "hop_kd.c",
-                                     "hop_regroup.c",
-                                     "hop_slice.c",
-                                     "hop_smooth.c"])
-    config.make_config_py()  # installs __config__.py
-    #config.make_svn_version_py()
-    return config

diff -r b64822ed0327496f792cc9a74dd08fade7b91e27 -r 23cf9b595583d119e23f8f2fd95aff3971088cd4 yt/analysis_modules/halo_finding/rockstar/setup.py
--- a/yt/analysis_modules/halo_finding/rockstar/setup.py
+++ /dev/null
@@ -1,38 +0,0 @@
-#!/usr/bin/env python
-from __future__ import print_function
-import os.path
-import sys
-
-def configuration(parent_package='',top_path=None):
-    from numpy.distutils.misc_util import Configuration
-    config = Configuration('rockstar',parent_package,top_path)
-    config.make_config_py() # installs __config__.py
-    #config.make_svn_version_py()
-    try:
-        rd = open("rockstar.cfg").read().strip()
-    except IOError:
-        print("Reading Rockstar location from rockstar.cfg failed.")
-        print("Please place the base directory of your")
-        print("Rockstar install in rockstar.cfg and restart.")
-        print("(ex: \"echo '/path/to/Rockstar-0.99' > rockstar.cfg\" )")
-        sys.exit(1)
-    config.add_extension("rockstar_interface",
-                         "yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx",
-                         library_dirs=[rd],
-                         libraries=["rockstar"],
-                         #define_macros = [("THREADSAFE", "__thread")],
-                         define_macros = [("THREADSAFE", "")],
-                         include_dirs=[rd,
-                                       os.path.join(rd, "io"),
-                                       os.path.join(rd, "util")])
-    config.add_extension("rockstar_groupies",
-                         "yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx",
-                         library_dirs=[rd],
-                         libraries=["rockstar"],
-                         #define_macros = [("THREADSAFE", "__thread")],
-                         define_macros = [("THREADSAFE", "")],
-                         include_dirs=[rd,
-                                       os.path.join(rd, "io"),
-                                       os.path.join(rd, "util")])
-    return config
-

diff -r b64822ed0327496f792cc9a74dd08fade7b91e27 -r 23cf9b595583d119e23f8f2fd95aff3971088cd4 yt/analysis_modules/halo_finding/setup.py
--- a/yt/analysis_modules/halo_finding/setup.py
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/usr/bin/env python
-import os.path
-
-
-def configuration(parent_package='', top_path=None):
-    from numpy.distutils.misc_util import Configuration
-    config = Configuration('halo_finding', parent_package, top_path)
-    config.add_subpackage("fof")
-    config.add_subpackage("hop")
-    if os.path.exists("rockstar.cfg"):
-        config.add_subpackage("rockstar")
-    config.make_config_py()  # installs __config__.py
-    return config

diff -r b64822ed0327496f792cc9a74dd08fade7b91e27 -r 23cf9b595583d119e23f8f2fd95aff3971088cd4 yt/analysis_modules/halo_mass_function/setup.py
--- a/yt/analysis_modules/halo_mass_function/setup.py
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/usr/bin/env python
-
-
-def configuration(parent_package='', top_path=None):
-    from numpy.distutils.misc_util import Configuration
-    config = Configuration('halo_mass_function', parent_package, top_path)
-    config.make_config_py()  # installs __config__.py
-    #config.make_svn_version_py()
-    return config

diff -r b64822ed0327496f792cc9a74dd08fade7b91e27 -r 23cf9b595583d119e23f8f2fd95aff3971088cd4 yt/analysis_modules/level_sets/setup.py
--- a/yt/analysis_modules/level_sets/setup.py
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/usr/bin/env python
-
-
-def configuration(parent_package='', top_path=None):
-    from numpy.distutils.misc_util import Configuration
-    config = Configuration('level_sets', parent_package, top_path)
-    config.make_config_py()  # installs __config__.py
-    #config.make_svn_version_py()
-    return config

diff -r b64822ed0327496f792cc9a74dd08fade7b91e27 -r 23cf9b595583d119e23f8f2fd95aff3971088cd4 yt/analysis_modules/particle_trajectories/setup.py
--- a/yt/analysis_modules/particle_trajectories/setup.py
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/usr/bin/env python
-
-
-def configuration(parent_package='', top_path=None):
-    from numpy.distutils.misc_util import Configuration
-    config = Configuration('particle_trajectories', parent_package, top_path)
-    #config.add_subpackage("tests")
-    config.make_config_py()  # installs __config__.py
-    #config.make_svn_version_py()
-    return config

diff -r b64822ed0327496f792cc9a74dd08fade7b91e27 -r 23cf9b595583d119e23f8f2fd95aff3971088cd4 yt/analysis_modules/photon_simulator/setup.py
--- a/yt/analysis_modules/photon_simulator/setup.py
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/usr/bin/env python
-
-
-def configuration(parent_package='', top_path=None):
-    from numpy.distutils.misc_util import Configuration
-    config = Configuration('photon_simulator', parent_package, top_path)
-    config.add_extension("utils",
-                         ["yt/analysis_modules/photon_simulator/utils.pyx"])
-    config.add_subpackage("tests")
-    config.make_config_py()  # installs __config__.py
-    #config.make_svn_version_py()
-    return config

diff -r b64822ed0327496f792cc9a74dd08fade7b91e27 -r 23cf9b595583d119e23f8f2fd95aff3971088cd4 yt/analysis_modules/ppv_cube/setup.py
--- a/yt/analysis_modules/ppv_cube/setup.py
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/usr/bin/env python
-
-
-def configuration(parent_package='', top_path=None):
-    from numpy.distutils.misc_util import Configuration
-    config = Configuration('ppv_cube', parent_package, top_path)
-    config.add_extension("ppv_utils", 
-                         ["yt/analysis_modules/ppv_cube/ppv_utils.pyx"],
-                         libraries=["m"])
-    config.add_subpackage("tests")
-    config.make_config_py()  # installs __config__.py
-    #config.make_svn_version_py()
-    return config

diff -r b64822ed0327496f792cc9a74dd08fade7b91e27 -r 23cf9b595583d119e23f8f2fd95aff3971088cd4 yt/analysis_modules/setup.py
--- a/yt/analysis_modules/setup.py
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/usr/bin/env python
-
-
-def configuration(parent_package='', top_path=None):
-    from numpy.distutils.misc_util import Configuration
-    config = Configuration('analysis_modules', parent_package, top_path)
-    config.make_config_py()  # installs __config__.py
-    config.add_subpackage("absorption_spectrum")
-    config.add_subpackage("cosmological_observation")
-    config.add_subpackage("halo_analysis")
-    config.add_subpackage("halo_finding")
-    config.add_subpackage("halo_mass_function")
-    config.add_subpackage("level_sets")
-    config.add_subpackage("particle_trajectories")
-    config.add_subpackage("photon_simulator")
-    config.add_subpackage("spectral_integrator")
-    config.add_subpackage("star_analysis")
-    config.add_subpackage("two_point_functions")
-    config.add_subpackage("radmc3d_export")
-    config.add_subpackage("sunrise_export")
-    config.add_subpackage("sunyaev_zeldovich")
-    config.add_subpackage("particle_trajectories")
-    config.add_subpackage("photon_simulator")
-    config.add_subpackage("ppv_cube")
-    return config

diff -r b64822ed0327496f792cc9a74dd08fade7b91e27 -r 23cf9b595583d119e23f8f2fd95aff3971088cd4 yt/analysis_modules/spectral_integrator/setup.py
--- a/yt/analysis_modules/spectral_integrator/setup.py
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/usr/bin/env python
-
-
-def configuration(parent_package='', top_path=None):
-    from numpy.distutils.misc_util import Configuration
-    config = Configuration('spectral_integrator', parent_package, top_path)
-    config.make_config_py()  # installs __config__.py
-    #config.make_svn_version_py()
-    return config

diff -r b64822ed0327496f792cc9a74dd08fade7b91e27 -r 23cf9b595583d119e23f8f2fd95aff3971088cd4 yt/analysis_modules/star_analysis/setup.py
--- a/yt/analysis_modules/star_analysis/setup.py
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/usr/bin/env python
-
-
-def configuration(parent_package='', top_path=None):
-    from numpy.distutils.misc_util import Configuration
-    config = Configuration('star_analysis', parent_package, top_path)
-    config.make_config_py()  # installs __config__.py
-    #config.make_svn_version_py()
-    return config

diff -r b64822ed0327496f792cc9a74dd08fade7b91e27 -r 23cf9b595583d119e23f8f2fd95aff3971088cd4 yt/analysis_modules/sunyaev_zeldovich/setup.py
--- a/yt/analysis_modules/sunyaev_zeldovich/setup.py
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/usr/bin/env python
-
-
-def configuration(parent_package='', top_path=None):
-    from numpy.distutils.misc_util import Configuration
-    config = Configuration('sunyaev_zeldovich', parent_package, top_path)
-    config.add_subpackage("tests")
-    config.make_config_py()  # installs __config__.py
-    #config.make_svn_version_py()
-    return config

diff -r b64822ed0327496f792cc9a74dd08fade7b91e27 -r 23cf9b595583d119e23f8f2fd95aff3971088cd4 yt/analysis_modules/two_point_functions/setup.py
--- a/yt/analysis_modules/two_point_functions/setup.py
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/usr/bin/env python
-
-
-def configuration(parent_package='', top_path=None):
-    from numpy.distutils.misc_util import Configuration
-    config = Configuration('two_point_functions', parent_package, top_path)
-    config.make_config_py()  # installs __config__.py
-    #config.make_svn_version_py()
-    return config

diff -r b64822ed0327496f792cc9a74dd08fade7b91e27 -r 23cf9b595583d119e23f8f2fd95aff3971088cd4 yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -39,7 +39,7 @@
     storeparameterfiles = 'False',
     parameterfilestore = 'parameter_files.csv',
     maximumstoreddatasets = '500',
-    skip_dataset_cache = 'False',
+    skip_dataset_cache = 'True',
     loadfieldplugins = 'True',
     pluginfilename = 'my_plugins.py',
     parallel_traceback = 'False',

diff -r b64822ed0327496f792cc9a74dd08fade7b91e27 -r 23cf9b595583d119e23f8f2fd95aff3971088cd4 yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -436,22 +436,26 @@
     """
     def count_values(self, use_gas=True, use_particles=True):
         num_vals = 0
-        if use_gas: num_vals += 4
-        if use_particles: num_vals += 4
+        # create the index if it doesn't exist yet
+        self.data_source.ds.index
+        self.use_gas = use_gas & \
+            (("gas", "cell_mass") in self.data_source.ds.field_info)
+        self.use_particles = use_particles & \
+            (("all", "particle_mass") in self.data_source.ds.field_info)
+        if self.use_gas:
+            num_vals += 4
+        if self.use_particles:
+            num_vals += 4
         self.num_vals = num_vals
 
-    def process_chunk(self, data, use_gas=True, use_particles=True):
-        use_gas &= \
-          (("gas", "cell_mass") in self.data_source.ds.field_info)
-        use_particles &= \
-          (("all", "particle_mass") in self.data_source.ds.field_info)
+    def process_chunk(self, data, **kwargs):
         rvals = []
-        if use_gas:
+        if self.use_gas:
             rvals.extend([(data["gas", "specific_angular_momentum_%s" % axis] *
                            data["gas", "cell_mass"]).sum(dtype=np.float64) \
                           for axis in "xyz"])
             rvals.append(data["gas", "cell_mass"].sum(dtype=np.float64))
-        if use_particles:
+        if self.use_particles:
             rvals.extend([(data["all", "particle_specific_angular_momentum_%s" % axis] *
                            data["all", "particle_mass"]).sum(dtype=np.float64) \
                           for axis in "xyz"])

diff -r b64822ed0327496f792cc9a74dd08fade7b91e27 -r 23cf9b595583d119e23f8f2fd95aff3971088cd4 yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -90,10 +90,11 @@
         return self._num_zones + 2*self._num_ghost_zones
 
     def _reshape_vals(self, arr):
-        if len(arr.shape) == 4 and arr.flags["F_CONTIGUOUS"]:
-            return arr
         nz = self.nz
-        n_oct = arr.shape[0] / (nz**3.0)
+        if len(arr.shape) <= 2:
+            n_oct = arr.shape[0] / (nz**3)
+        else:
+            n_oct = max(arr.shape)
         if arr.size == nz*nz*nz*n_oct:
             new_shape = (nz, nz, nz, n_oct)
         elif arr.size == nz*nz*nz*n_oct * 3:
@@ -115,10 +116,9 @@
 
     def select_blocks(self, selector):
         mask = self.oct_handler.mask(selector, domain_id = self.domain_id)
-        mask = self._reshape_vals(mask)
         slicer = OctreeSubsetBlockSlice(self)
         for i, sl in slicer:
-            yield sl, mask[:,:,:,i]
+            yield sl, mask[i,...]
 
     def select_tcoords(self, dobj):
         # These will not be pre-allocated, which can be a problem for speed and

diff -r b64822ed0327496f792cc9a74dd08fade7b91e27 -r 23cf9b595583d119e23f8f2fd95aff3971088cd4 yt/data_objects/setup.py
--- a/yt/data_objects/setup.py
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/usr/bin/env python
-
-
-def configuration(parent_package='', top_path=None):
-    from numpy.distutils.misc_util import Configuration
-    config = Configuration('data_objects', parent_package, top_path)
-    config.add_subpackage("tests")
-    config.make_config_py()  # installs __config__.py
-    #config.make_svn_version_py()
-    return config

diff -r b64822ed0327496f792cc9a74dd08fade7b91e27 -r 23cf9b595583d119e23f8f2fd95aff3971088cd4 yt/extern/setup.py
--- a/yt/extern/setup.py
+++ /dev/null
@@ -1,16 +0,0 @@
-#!/usr/bin/env python
-#----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-
-def configuration(parent_package='', top_path=None):
-    from numpy.distutils.misc_util import Configuration
-    config = Configuration('extern', parent_package, top_path)
-    config.add_subpackage("tqdm")
-    config.make_config_py()
-    return config

diff -r b64822ed0327496f792cc9a74dd08fade7b91e27 -r 23cf9b595583d119e23f8f2fd95aff3971088cd4 yt/fields/setup.py
--- a/yt/fields/setup.py
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/usr/bin/env python
-
-
-def configuration(parent_package='', top_path=None):
-    from numpy.distutils.misc_util import Configuration
-    config = Configuration('fields', parent_package, top_path)
-    config.add_subpackage("tests")
-    config.make_config_py()  # installs __config__.py
-    #config.make_svn_version_py()
-    return config

diff -r b64822ed0327496f792cc9a74dd08fade7b91e27 -r 23cf9b595583d119e23f8f2fd95aff3971088cd4 yt/frontends/_skeleton/setup.py
--- a/yt/frontends/_skeleton/setup.py
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/usr/bin/env python
-
-
-def configuration(parent_package='', top_path=None):
-    from numpy.distutils.misc_util import Configuration
-    config = Configuration('skeleton', parent_package, top_path)
-    config.make_config_py()  # installs __config__.py
-    #config.make_svn_version_py()
-    return config

diff -r b64822ed0327496f792cc9a74dd08fade7b91e27 -r 23cf9b595583d119e23f8f2fd95aff3971088cd4 yt/frontends/art/setup.py
--- a/yt/frontends/art/setup.py
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/usr/bin/env python
-
-
-def configuration(parent_package='',top_path=None):
-    from numpy.distutils.misc_util import Configuration
-    config = Configuration('art',parent_package,top_path)
-    config.make_config_py() # installs __config__.py
-    #config.make_svn_version_py()
-    return config

diff -r b64822ed0327496f792cc9a74dd08fade7b91e27 -r 23cf9b595583d119e23f8f2fd95aff3971088cd4 yt/frontends/artio/_artio_caller.pyx
--- a/yt/frontends/artio/_artio_caller.pyx
+++ b/yt/frontends/artio/_artio_caller.pyx
@@ -8,9 +8,7 @@
 from yt.utilities.lib.fp_utils cimport imax
 from yt.geometry.oct_container cimport \
     SparseOctreeContainer
-from yt.geometry.oct_visitors cimport \
-    OctVisitorData, oct_visitor_function, Oct, \
-    fill_file_indices_oind, fill_file_indices_rind
+from yt.geometry.oct_visitors cimport Oct
 from yt.geometry.particle_deposit cimport \
     ParticleDepositOperation
 from libc.stdint cimport int32_t, int64_t

diff -r b64822ed0327496f792cc9a74dd08fade7b91e27 -r 23cf9b595583d119e23f8f2fd95aff3971088cd4 yt/frontends/artio/artio_headers/cosmology.c
--- a/yt/frontends/artio/artio_headers/cosmology.c
+++ b/yt/frontends/artio/artio_headers/cosmology.c
@@ -443,9 +443,18 @@
 
 double cosmology_get_value_from_table(CosmologyParameters *c, double a, double table[])
 {
-  int idx = (int)(c->ndex*(log10(a)-c->la[0]));
+  // This is special case code for boundary conditions
+  double la = log10(a);
+  if (fabs(la - c->la[c->size-1]) < 1.0e-14) {
+    return table[c->size-1];
+  } else if (fabs(la - c->la[0]) < 1.0e-14) {
+    return table[0];
+  }
 
-  ASSERT(idx>=0 && idx<c->size);
+  int idx = (int)(c->ndex*(la-c->la[0]));
+
+  // Note that because we do idx+1 below, we need -1 here.
+  ASSERT(idx>=0 && (idx<c->size-1));
 
   /*
   //  Do it as a function of aUni rather than la to ensure exact inversion

diff -r b64822ed0327496f792cc9a74dd08fade7b91e27 -r 23cf9b595583d119e23f8f2fd95aff3971088cd4 yt/frontends/artio/setup.py
--- a/yt/frontends/artio/setup.py
+++ /dev/null
@@ -1,22 +0,0 @@
-#!/usr/bin/env python
-import glob
-
-
-def configuration(parent_package='', top_path=None):
-    from numpy.distutils.misc_util import Configuration
-    artio_sources = glob.glob("yt/frontends/artio/artio_headers/*.c")
-    config = Configuration('artio', parent_package, top_path)
-    config.add_extension("_artio_caller",
-                         ["yt/frontends/artio/_artio_caller.pyx"] +
-                         artio_sources,
-                         include_dirs=["yt/frontends/artio/artio_headers/",
-                                       "yt/geometry/",
-                                       "yt/utilities/lib/"],
-                         depends=artio_sources + 
-                                 ["yt/utilities/lib/fp_utils.pxd",
-                                  "yt/geometry/oct_container.pxd",
-                                  "yt/geometry/selection_routines.pxd",
-                                  "yt/geometry/particle_deposit.pxd"])
-    config.make_config_py()  # installs __config__.py
-    #config.make_svn_version_py()
-    return config

diff -r b64822ed0327496f792cc9a74dd08fade7b91e27 -r 23cf9b595583d119e23f8f2fd95aff3971088cd4 yt/frontends/athena/setup.py
--- a/yt/frontends/athena/setup.py
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/usr/bin/env python
-
-
-def configuration(parent_package='', top_path=None):
-    from numpy.distutils.misc_util import Configuration
-    config = Configuration('athena', parent_package, top_path)
-    config.make_config_py()  # installs __config__.py
-    #config.make_svn_version_py()
-    return config

diff -r b64822ed0327496f792cc9a74dd08fade7b91e27 -r 23cf9b595583d119e23f8f2fd95aff3971088cd4 yt/frontends/boxlib/setup.py
--- a/yt/frontends/boxlib/setup.py
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/usr/bin/env python
-
-
-def configuration(parent_package='', top_path=None):
-    from numpy.distutils.misc_util import Configuration
-    config = Configuration('boxlib', parent_package, top_path)
-    config.make_config_py()  # installs __config__.py
-    #config.make_svn_version_py()
-    return config

diff -r b64822ed0327496f792cc9a74dd08fade7b91e27 -r 23cf9b595583d119e23f8f2fd95aff3971088cd4 yt/frontends/chombo/setup.py
--- a/yt/frontends/chombo/setup.py
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/usr/bin/env python
-
-
-def configuration(parent_package='', top_path=None):
-    from numpy.distutils.misc_util import Configuration
-    config = Configuration('chombo', parent_package, top_path)
-    config.make_config_py()  # installs __config__.py
-    #config.make_svn_version_py()
-    return config

diff -r b64822ed0327496f792cc9a74dd08fade7b91e27 -r 23cf9b595583d119e23f8f2fd95aff3971088cd4 yt/frontends/eagle/setup.py
--- a/yt/frontends/eagle/setup.py
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/usr/bin/env python
-
-
-def configuration(parent_package='', top_path=None):
-    from numpy.distutils.misc_util import Configuration
-    config = Configuration('eagle', parent_package, top_path)
-    config.make_config_py()  # installs __config__.py
-    #config.make_svn_version_py()
-    return config

diff -r b64822ed0327496f792cc9a74dd08fade7b91e27 -r 23cf9b595583d119e23f8f2fd95aff3971088cd4 yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -277,14 +277,14 @@
             active_particles = True
             nap = dict((ap_type, []) for ap_type in 
                 params["Physics"]["ActiveParticles"]["ActiveParticlesEnabled"])
-        elif version == 2.2:
-            active_particles = True
+        else:
             nap = {}
-            for type in self.parameters.get("AppendActiveParticleType", []):
-                nap[type] = []
-        else:
-            active_particles = False
-            nap = None
+            if "AppendActiveParticleType" in self.parameters:
+                active_particles = True
+                for type in self.parameters.get("AppendActiveParticleType", []):
+                    nap[type] = []
+            else:
+                active_particles = False
         for grid_id in range(self.num_grids):
             pbar.update(grid_id)
             # We will unroll this list
@@ -394,7 +394,7 @@
         fields = []
         for ptype in self.dataset["AppendActiveParticleType"]:
             select_grids = self.grid_active_particle_count[ptype].flat
-            if np.any(select_grids) is False:
+            if not np.any(select_grids):
                 current_ptypes = self.dataset.particle_types
                 new_ptypes = [p for p in current_ptypes if p != ptype]
                 self.dataset.particle_types = new_ptypes

diff -r b64822ed0327496f792cc9a74dd08fade7b91e27 -r 23cf9b595583d119e23f8f2fd95aff3971088cd4 yt/frontends/enzo/setup.py
--- a/yt/frontends/enzo/setup.py
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/usr/bin/env python
-
-
-def configuration(parent_package='', top_path=None):
-    from numpy.distutils.misc_util import Configuration
-    config = Configuration('enzo', parent_package, top_path)
-    config.make_config_py()  # installs __config__.py
-    #config.make_svn_version_py()
-    return config

diff -r b64822ed0327496f792cc9a74dd08fade7b91e27 -r 23cf9b595583d119e23f8f2fd95aff3971088cd4 yt/frontends/exodus_ii/data_structures.py
--- a/yt/frontends/exodus_ii/data_structures.py
+++ b/yt/frontends/exodus_ii/data_structures.py
@@ -42,10 +42,16 @@
 
     def _initialize_mesh(self):
         coords = self.ds._read_coordinates()
-        self.meshes = [ExodusIIUnstructuredMesh(
-            mesh_id, self.index_filename, conn_ind, coords, self)
-                       for mesh_id, conn_ind in
-                       enumerate(self.ds._read_connectivity())]
+        connectivity = self.ds._read_connectivity()
+        self.meshes = []
+        for mesh_id, conn_ind in enumerate(connectivity):
+            displaced_coords = self.ds._apply_displacement(coords, mesh_id)
+            mesh = ExodusIIUnstructuredMesh(mesh_id, 
+                                            self.index_filename,
+                                            conn_ind, 
+                                            displaced_coords, 
+                                            self)
+            self.meshes.append(mesh)
 
     def _detect_output_fields(self):
         elem_names = self.dataset.parameters['elem_names']
@@ -63,13 +69,87 @@
     def __init__(self,
                  filename,
                  step=0,
+                 displacements=None,
                  dataset_type='exodus_ii',
                  storage_filename=None,
                  units_override=None):
+        """
 
+        A class used to represent an on-disk ExodusII dataset. The initializer takes 
+        two extra optional parameters, "step" and "displacements."
+
+        Parameters
+        ----------
+
+        step : integer
+            The step tells which time index to slice at. It throws an Error if
+            the index is larger than the number of time outputs in the ExodusII
+            file. Passing step=-1 picks out the last dataframe. 
+            Default is 0.
+
+        displacements : dictionary of tuples
+            This is a dictionary that controls whether or not displacement fields
+            will be used with the meshes in this dataset. The keys of the
+            displacements dictionary should the names of meshes in the file 
+            (e.g., "connect1", "connect2", etc... ), while the values should be 
+            tuples of the form (scale, offset), where "scale" is a floating point
+            value and "offset" is an array-like with one component for each spatial
+            dimension in the dataset. When the displacements for a given mesh are
+            turned on, the coordinates of the vertices in that mesh get transformed
+            as: 
+
+                  vertex_x = vertex_x + disp_x*scale + offset_x
+                  vertex_y = vertex_y + disp_y*scale + offset_y
+                  vertex_z = vertex_z + disp_z*scale + offset_z
+
+            If no displacement 
+            fields (assumed to be named 'disp_x', 'disp_y', etc... ) are detected in
+            the output file, then this dictionary is ignored.
+
+        Examples
+        --------
+
+        This will load the Dataset at time index '0' with displacements turned off.
+
+        >>> import yt
+        >>> ds = yt.load("MOOSE_sample_data/mps_out.e")
+
+        This will load the Dataset at the final index with displacements turned off.
+
+        >>> import yt
+        >>> ds = yt.load("MOOSE_sample_data/mps_out.e", step=-1)
+
+        This will load the Dataset at index 10, turning on displacement fields for 
+        the 2nd mesh without applying any scale or offset:
+
+        >>> import yt
+        >>> ds = yt.load("MOOSE_sample_data/mps_out.e", step=10,
+                         displacements={'connect2': (1.0, [0.0, 0.0, 0.0])})
+
+        This will load the Dataset at index 10, scaling the displacements
+        in the 2nd mesh by a factor of 5 while not applying an offset:
+
+        >>> import yt
+        >>> ds = yt.load("MOOSE_sample_data/mps_out.e", step=10,
+                         displacements={'connect2': (1.0, [0.0, 0.0, 0.0])})
+        
+        This will load the Dataset at index 10, scaling the displacements for
+        the 2nd mesh by a factor of 5.0 and shifting all the vertices in 
+        the first mesh by 1.0 unit in the z direction.
+
+        >>> import yt
+        >>> ds = yt.load("MOOSE_sample_data/mps_out.e", step=10,
+                         displacements={'connect1': (0.0, [0.0, 0.0, 1.0]),
+                                        'connect2': (5.0, [0.0, 0.0, 0.0])})
+
+        """
         self.parameter_filename = filename
         self.fluid_types += self._get_fluid_types()
         self.step = step
+        if displacements is None:
+            self.displacements = {}
+        else:
+            self.displacements = displacements
         super(ExodusIIDataset, self).__init__(filename, dataset_type,
                                               units_override=units_override)
         self.index_filename = filename
@@ -102,8 +182,7 @@
         self.parameters['num_meshes'] = self._vars['eb_status'].shape[0]
         self.parameters['elem_names'] = self._get_elem_names()
         self.parameters['nod_names'] = self._get_nod_names()
-        self.domain_left_edge = self._load_domain_edge(0)
-        self.domain_right_edge = self._load_domain_edge(1)
+        self.domain_left_edge, self.domain_right_edge = self._load_domain_edge()
 
         # set up psuedo-3D for lodim datasets here
         if self.dimensionality == 2:
@@ -223,12 +302,33 @@
 
         mylog.info("Loading coordinates")
         if "coord" not in self._vars:
-            return np.array([self._vars["coord%s" % ax][:]
-                             for ax in coord_axes]).transpose().copy()
+            coords = np.array([self._vars["coord%s" % ax][:]
+                               for ax in coord_axes]).transpose().copy()
         else:
-            return np.array([coord for coord in
-                             self._vars["coord"][:]]).transpose().copy()
+            coords = np.array([coord for coord in
+                               self._vars["coord"][:]]).transpose().copy()
+        return coords
 
+    def _apply_displacement(self, coords, mesh_id):
+        
+        mesh_name = "connect%d" % (mesh_id + 1)
+        if mesh_name not in self.displacements:
+            new_coords = coords.copy()
+            return new_coords
+
+        new_coords = np.zeros_like(coords)
+        fac = self.displacements[mesh_name][0]
+        offset = self.displacements[mesh_name][1]
+
+        coord_axes = 'xyz'[:self.dimensionality]
+        for i, ax in enumerate(coord_axes):
+            if "disp_%s" % ax in self.parameters['nod_names']:
+                ind = self.parameters['nod_names'].index("disp_%s" % ax)
+                disp = self._vars['vals_nod_var%d' % (ind + 1)][self.step]
+                new_coords[:, i] = coords[:, i] + fac*disp + offset[i]
+
+        return new_coords
+        
     def _read_connectivity(self):
         """
         Loads the connectivity data for the mesh
@@ -239,17 +339,22 @@
             connectivity.append(self._vars["connect%d" % (i+1)][:].astype("i8"))
         return connectivity
 
-    def _load_domain_edge(self, domain_idx):
+    def _load_domain_edge(self):
         """
         Loads the boundaries for the domain edge
 
-        Parameters:
-        - domain_idx: 0 corresponds to the left edge, 1 corresponds to the right edge
         """
-        if domain_idx == 0:
-            return self._read_coordinates().min(axis=0)
-        if domain_idx == 1:
-            return self._read_coordinates().max(axis=0)
+        
+        coords = self._read_coordinates()
+        connectivity = self._read_connectivity()
+
+        mi = 1e300
+        ma = -1e300
+        for mesh_id, _ in enumerate(connectivity):
+            displaced_coords = self._apply_displacement(coords, mesh_id)
+            mi = np.minimum(displaced_coords.min(axis=0), mi)
+            ma = np.maximum(displaced_coords.max(axis=0), ma)
+        return mi, ma
 
     @classmethod
     def _is_valid(self, *args, **kwargs):

diff -r b64822ed0327496f792cc9a74dd08fade7b91e27 -r 23cf9b595583d119e23f8f2fd95aff3971088cd4 yt/frontends/exodus_ii/setup.py
--- a/yt/frontends/exodus_ii/setup.py
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/usr/bin/env python
-
-
-def configuration(parent_package='', top_path=None):
-    from numpy.distutils.misc_util import Configuration
-    config = Configuration('exodus_ii', parent_package, top_path)
-    config.make_config_py()  # installs __config__.py
-    #config.make_svn_version_py()
-    return config

diff -r b64822ed0327496f792cc9a74dd08fade7b91e27 -r 23cf9b595583d119e23f8f2fd95aff3971088cd4 yt/frontends/exodus_ii/tests/test_outputs.py
--- a/yt/frontends/exodus_ii/tests/test_outputs.py
+++ b/yt/frontends/exodus_ii/tests/test_outputs.py
@@ -18,7 +18,10 @@
     assert_array_equal, \
     requires_file
 from yt.utilities.answer_testing.framework import \
-    data_dir_load
+    data_dir_load, \
+    requires_ds, \
+    GenericArrayTest
+
 
 out = "ExodusII/out.e"
 
@@ -69,3 +72,18 @@
     field_list = [('connect1', 'forced')]
     yield assert_equal, str(ds), "gold.e"
     yield assert_array_equal, ds.field_list, field_list 
+
+big_data = "MOOSE_sample_data/mps_out.e"
+
+
+ at requires_ds(big_data)
+def test_displacement_fields():
+    displacement_dicts =[{'connect2': (5.0, [0.0, 0.0, 0.0])},
+                         {'connect1': (1.0, [1.0, 2.0, 3.0]), 
+                          'connect2': (0.0, [0.0, 0.0, 0.0])}]
+    for disp in displacement_dicts:
+        ds = data_dir_load(big_data, displacements=disp)
+        for mesh in ds.index.meshes:
+            def array_func():
+                return mesh.connectivity_coords
+            yield GenericArrayTest(ds, array_func, 12)

diff -r b64822ed0327496f792cc9a74dd08fade7b91e27 -r 23cf9b595583d119e23f8f2fd95aff3971088cd4 yt/frontends/fits/setup.py
--- a/yt/frontends/fits/setup.py
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/usr/bin/env python
-
-
-def configuration(parent_package='', top_path=None):
-    from numpy.distutils.misc_util import Configuration
-    config = Configuration('fits', parent_package, top_path)
-    config.make_config_py()  # installs __config__.py
-    #config.make_svn_version_py()
-    return config

diff -r b64822ed0327496f792cc9a74dd08fade7b91e27 -r 23cf9b595583d119e23f8f2fd95aff3971088cd4 yt/frontends/flash/setup.py
--- a/yt/frontends/flash/setup.py
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/usr/bin/env python
-
-
-def configuration(parent_package='', top_path=None):
-    from numpy.distutils.misc_util import Configuration
-    config = Configuration('flash', parent_package, top_path)
-    config.make_config_py()  # installs __config__.py
-    #config.make_svn_version_py()
-    return config

diff -r b64822ed0327496f792cc9a74dd08fade7b91e27 -r 23cf9b595583d119e23f8f2fd95aff3971088cd4 yt/frontends/gadget/setup.py
--- a/yt/frontends/gadget/setup.py
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/usr/bin/env python
-
-
-def configuration(parent_package='', top_path=None):
-    from numpy.distutils.misc_util import Configuration
-    config = Configuration('gadget', parent_package, top_path)
-    config.make_config_py()  # installs __config__.py
-    #config.make_svn_version_py()
-    return config

diff -r b64822ed0327496f792cc9a74dd08fade7b91e27 -r 23cf9b595583d119e23f8f2fd95aff3971088cd4 yt/frontends/gadget_fof/api.py
--- a/yt/frontends/gadget_fof/api.py
+++ b/yt/frontends/gadget_fof/api.py
@@ -15,12 +15,19 @@
 #-----------------------------------------------------------------------------
 
 from .data_structures import \
-     GadgetFOFDataset
+    GadgetFOFParticleIndex, \
+    GadgetFOFHDF5File, \
+    GadgetFOFDataset, \
+    GadgetFOFHaloParticleIndex, \
+    GadgetFOFHaloDataset, \
+    GagdetFOFHaloContainer
 
 from .io import \
-     IOHandlerGadgetFOFHDF5
+    IOHandlerGadgetFOFHDF5, \
+    IOHandlerGadgetFOFHaloHDF5
 
 from .fields import \
-     GadgetFOFFieldInfo
+    GadgetFOFFieldInfo, \
+    GadgetFOFHaloFieldInfo
 
 from . import tests

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/880cc087e986/
Changeset:   880cc087e986
Branch:      yt
User:        atmyers
Date:        2016-02-16 20:44:36+00:00
Summary:     update setup script
Affected #:  1 file

diff -r 23cf9b595583d119e23f8f2fd95aff3971088cd4 -r 880cc087e986a7bfcf7b23161a472d8effea4451 setup.py
--- a/setup.py
+++ b/setup.py
@@ -122,6 +122,9 @@
     Extension("yt.utilities.lib.bitarray",
               ["yt/utilities/lib/bitarray.pyx"],
               libraries=["m"], depends=["yt/utilities/lib/bitarray.pxd"]),
+    Extension("yt.utilities.lib.bounding_volume_hierarchy",
+              ["yt/utilities/lib/bounding_volume_hierarchy.pyx"],
+              libraries=["m"], depends=["yt/utilities/lib/bounding_volume_hierarchy.pxd"]),
     Extension("yt.utilities.lib.contour_finding",
               ["yt/utilities/lib/contour_finding.pyx"],
               include_dirs=["yt/utilities/lib/",


https://bitbucket.org/yt_analysis/yt/commits/460ce737b7c4/
Changeset:   460ce737b7c4
Branch:      yt
User:        atmyers
Date:        2016-02-20 19:09:15+00:00
Summary:     perform simple vertex data interpolation during ray trace
Affected #:  3 files

diff -r 880cc087e986a7bfcf7b23161a472d8effea4451 -r 460ce737b7c48f1e1a2a5f66ba28bffec4a41843 yt/utilities/lib/bounding_volume_hierarchy.pxd
--- a/yt/utilities/lib/bounding_volume_hierarchy.pxd
+++ b/yt/utilities/lib/bounding_volume_hierarchy.pxd
@@ -30,6 +30,7 @@
     np.float64_t p0[3]
     np.float64_t p1[3]
     np.float64_t p2[3]
+    np.float64_t d0, d1, d2
     np.int64_t elem_id
     np.float64_t centroid[3]
     BBox bbox

diff -r 880cc087e986a7bfcf7b23161a472d8effea4451 -r 460ce737b7c48f1e1a2a5f66ba28bffec4a41843 yt/utilities/lib/bounding_volume_hierarchy.pyx
--- a/yt/utilities/lib/bounding_volume_hierarchy.pyx
+++ b/yt/utilities/lib/bounding_volume_hierarchy.pyx
@@ -75,7 +75,7 @@
 
     if(t > 1.0e-10 and t < ray.t_far):
         ray.t_far = t
-        ray.data_val = 1.0
+        ray.data_val = (1.0 - u - v)*tri.d0 + u*tri.d1 + v*tri.d2
         ray.elem_id = tri.elem_id
         return True
 
@@ -120,7 +120,8 @@
     @cython.cdivision(True)
     def __init__(self,
                  np.float64_t[:, ::1] vertices,
-                 np.int64_t[:, ::1] indices):
+                 np.int64_t[:, ::1] indices,
+                 np.float64_t[:, ::1] field_data):
         
         self.vertices = vertices
         cdef np.int64_t num_elem = indices.shape[0]
@@ -141,6 +142,9 @@
                 v0 = indices[i][triangulate_hex[j][0]]
                 v1 = indices[i][triangulate_hex[j][1]]
                 v2 = indices[i][triangulate_hex[j][2]]
+                tri.d0 = field_data[i][triangulate_hex[j][0]]
+                tri.d1 = field_data[i][triangulate_hex[j][1]]
+                tri.d2 = field_data[i][triangulate_hex[j][2]]
                 for k in range(3):
                     tri.p0[k] = vertices[v0][k]
                     tri.p1[k] = vertices[v1][k]

diff -r 880cc087e986a7bfcf7b23161a472d8effea4451 -r 460ce737b7c48f1e1a2a5f66ba28bffec4a41843 yt/utilities/lib/tests/test_bounding_volume_hierarchy.py
--- a/yt/utilities/lib/tests/test_bounding_volume_hierarchy.py
+++ b/yt/utilities/lib/tests/test_bounding_volume_hierarchy.py
@@ -31,7 +31,10 @@
     vertices = ds.index.meshes[0].connectivity_coords
     indices  = ds.index.meshes[0].connectivity_indices - 1
 
-    bvh = BVH(vertices, indices)
+    ad = ds.all_data()
+    field_data = ad['connect1', 'diffused']
+
+    bvh = BVH(vertices, indices, field_data)
 
     cam = Camera()
     cam.set_position(np.array([8.0, 8.0, 8.0]))


https://bitbucket.org/yt_analysis/yt/commits/d5eefa0bd123/
Changeset:   d5eefa0bd123
Branch:      yt
User:        atmyers
Date:        2016-02-21 00:07:30+00:00
Summary:     configure the ray tracer to use OpenMP, if it is available.
Affected #:  2 files

diff -r 460ce737b7c48f1e1a2a5f66ba28bffec4a41843 -r d5eefa0bd123ce2121ca90dbfa13a662187797e3 setup.py
--- a/setup.py
+++ b/setup.py
@@ -124,6 +124,8 @@
               libraries=["m"], depends=["yt/utilities/lib/bitarray.pxd"]),
     Extension("yt.utilities.lib.bounding_volume_hierarchy",
               ["yt/utilities/lib/bounding_volume_hierarchy.pyx"],
+              extra_compile_args=omp_args,
+              extra_link_args=omp_args,
               libraries=["m"], depends=["yt/utilities/lib/bounding_volume_hierarchy.pxd"]),
     Extension("yt.utilities.lib.contour_finding",
               ["yt/utilities/lib/contour_finding.pyx"],

diff -r 460ce737b7c48f1e1a2a5f66ba28bffec4a41843 -r d5eefa0bd123ce2121ca90dbfa13a662187797e3 yt/utilities/lib/bounding_volume_hierarchy.pyx
--- a/yt/utilities/lib/bounding_volume_hierarchy.pyx
+++ b/yt/utilities/lib/bounding_volume_hierarchy.pyx
@@ -3,6 +3,7 @@
 cimport numpy as np
 from libc.math cimport fabs, fmax, fmin
 from libc.stdlib cimport malloc, free
+from cython.parallel import parallel, prange
 
 cdef extern from "mesh_construction.h":
     enum:
@@ -271,21 +272,27 @@
                     const int N, 
                     BVH bvh) nogil:
 
-    cdef Ray ray
-    cdef int i, j, offset
+    cdef Ray* ray 
+    cdef int i, j, k
     
-    for i in range(3):
-        ray.direction[i] = direction[i]
-        ray.inv_dir[i] = 1.0 / direction[i]      
-   
-    for i in range(N):
-        for j in range(3):
-            ray.origin[j] = origins[N*j + i]
-        ray.t_far = 1e30
-        ray.t_near = 0.0
-        ray.data_val = 0
-        bvh.intersect(&ray)
-        image[i] = ray.data_val
+    with nogil, parallel():
+       
+        ray = <Ray *> malloc(sizeof(Ray))
+    
+        for k in range(3):
+            ray.direction[k] = direction[k]
+            ray.inv_dir[k] = 1.0 / direction[k]
+
+        for i in prange(N):
+            for j in range(3):
+                ray.origin[j] = origins[N*j + i]
+            ray.t_far = 1e30
+            ray.t_near = 0.0
+            ray.data_val = 0
+            bvh.intersect(ray)
+            image[i] = ray.data_val
+
+        free(ray)
 
 @cython.boundscheck(False)
 @cython.wraparound(False)


https://bitbucket.org/yt_analysis/yt/commits/34770ad3c6f8/
Changeset:   34770ad3c6f8
Branch:      yt
User:        atmyers
Date:        2016-02-22 05:08:36+00:00
Summary:     some cython code cleanup
Affected #:  1 file

diff -r d5eefa0bd123ce2121ca90dbfa13a662187797e3 -r 34770ad3c6f8b4b9d7da8c9458f794dd136be489 yt/utilities/lib/bounding_volume_hierarchy.pyx
--- a/yt/utilities/lib/bounding_volume_hierarchy.pyx
+++ b/yt/utilities/lib/bounding_volume_hierarchy.pyx
@@ -14,21 +14,17 @@
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
-cdef np.float64_t dot(const np.float64_t* a, 
-                      const np.float64_t* b) nogil:
-    cdef np.int64_t i
-    cdef np.float64_t rv = 0.0
-    for i in range(3):
-        rv += a[i]*b[i]
-    return rv
+cdef inline np.float64_t dot(const np.float64_t a[3], 
+                             const np.float64_t b[3]) nogil:
+    return a[0]*b[0] + a[1]*b[1] + a[2]*b[2] 
 
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
-cdef void cross(const np.float64_t* a, 
-                const np.float64_t* b,
-                np.float64_t* c) nogil:
+cdef inline void cross(const np.float64_t a[3], 
+                       const np.float64_t b[3],
+                       np.float64_t c[3]) nogil:
     c[0] = a[1]*b[2] - a[2]*b[1]
     c[1] = a[2]*b[0] - a[0]*b[2]
     c[2] = a[0]*b[1] - a[1]*b[0]
@@ -37,16 +33,25 @@
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
+cdef inline void subtract(const np.float64_t a[3], 
+                          const np.float64_t b[3],
+                          np.float64_t c[3]) nogil:
+    c[0] = a[0] - b[0]
+    c[1] = a[1] - b[1]
+    c[2] = a[2] - b[2]
+
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
 cdef np.int64_t ray_triangle_intersect(Ray* ray, const Triangle* tri) nogil:
 # https://en.wikipedia.org/wiki/M%C3%B6ller%E2%80%93Trumbore_intersection_algorithm
 
     # edge vectors
     cdef np.float64_t e1[3]
     cdef np.float64_t e2[3]
-    cdef int i
-    for i in range(3):
-        e1[i] = tri.p1[i] - tri.p0[i]
-        e2[i] = tri.p2[i] - tri.p0[i]
+    subtract(tri.p1, tri.p0, e1)
+    subtract(tri.p2, tri.p0, e2)
 
     cdef np.float64_t P[3]
     cross(ray.direction, e2, P)
@@ -58,8 +63,7 @@
     inv_det = 1.0 / det
 
     cdef np.float64_t T[3]
-    for i in range(3):
-        T[i] = ray.origin[i] - tri.p0[i] 
+    subtract(ray.origin, tri.p0, T)
 
     cdef np.float64_t u = dot(T, P) * inv_det
     if(u < 0.0 or u > 1.0):
@@ -119,10 +123,10 @@
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    def __init__(self,
-                 np.float64_t[:, ::1] vertices,
-                 np.int64_t[:, ::1] indices,
-                 np.float64_t[:, ::1] field_data):
+    def __cinit__(self,
+                  np.float64_t[:, ::1] vertices,
+                  np.int64_t[:, ::1] indices,
+                  np.float64_t[:, ::1] field_data):
         
         self.vertices = vertices
         cdef np.int64_t num_elem = indices.shape[0]
@@ -303,6 +307,4 @@
                    BVH bvh):
     
     cdef int N = origins.shape[0]
-    cast_rays(<np.float64_t*> image.data,
-              <np.float64_t*> origins.data, 
-              <np.float64_t*> direction.data, N, bvh)
+    cast_rays(&image[0], &origins[0, 0], &direction[0], N, bvh)


https://bitbucket.org/yt_analysis/yt/commits/2913a3edcb72/
Changeset:   2913a3edcb72
Branch:      yt
User:        atmyers
Date:        2016-02-22 05:46:21+00:00
Summary:     clean up after myself.
Affected #:  2 files

diff -r 34770ad3c6f8b4b9d7da8c9458f794dd136be489 -r 2913a3edcb72a9ad372c11669f4de62775793373 yt/utilities/lib/bounding_volume_hierarchy.pxd
--- a/yt/utilities/lib/bounding_volume_hierarchy.pxd
+++ b/yt/utilities/lib/bounding_volume_hierarchy.pxd
@@ -45,4 +45,5 @@
     cdef void _get_node_bbox(self, BVHNode* node, 
                              np.int64_t begin, np.int64_t end) nogil
     cdef void _recursive_intersect(self, Ray* ray, BVHNode* node) nogil
-    cdef BVHNode* _build(self, np.int64_t begin, np.int64_t end) nogil
+    cdef BVHNode* _recursive_build(self, np.int64_t begin, np.int64_t end) nogil
+    cdef void _recursive_free(self, BVHNode* node) nogil

diff -r 34770ad3c6f8b4b9d7da8c9458f794dd136be489 -r 2913a3edcb72a9ad372c11669f4de62775793373 yt/utilities/lib/bounding_volume_hierarchy.pyx
--- a/yt/utilities/lib/bounding_volume_hierarchy.pyx
+++ b/yt/utilities/lib/bounding_volume_hierarchy.pyx
@@ -158,8 +158,17 @@
                     tri.bbox.left_edge[k]  = fmin(fmin(tri.p0[k], tri.p1[k]), tri.p2[k])
                     tri.bbox.right_edge[k] = fmax(fmax(tri.p0[k], tri.p1[k]), tri.p2[k])
 
-        # recursive build
-        self.root = self._build(0, num_tri)
+        self.root = self._recursive_build(0, num_tri)
+
+    cdef void _recursive_free(self, BVHNode* node) nogil:
+        if node.end - node.begin > 8:
+            self._recursive_free(node.left)
+            self._recursive_free(node.right)
+        free(node)
+
+    def __dealloc__(self):
+        self._recursive_free(self.root)
+        free(self.triangles)
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
@@ -227,7 +236,7 @@
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    cdef BVHNode* _build(self, np.int64_t begin, np.int64_t end) nogil:
+    cdef BVHNode* _recursive_build(self, np.int64_t begin, np.int64_t end) nogil:
         cdef BVHNode *node = <BVHNode* > malloc(sizeof(BVHNode))
         node.begin = begin
         node.end = end
@@ -261,8 +270,8 @@
             mid = begin + (end-begin)/2
         
         # recursively build sub-trees
-        node.left = self._build(begin, mid)
-        node.right = self._build(mid, end)
+        node.left = self._recursive_build(begin, mid)
+        node.right = self._recursive_build(mid, end)
 
         return node
 


https://bitbucket.org/yt_analysis/yt/commits/2faa6990c356/
Changeset:   2faa6990c356
Branch:      yt
User:        atmyers
Date:        2016-02-22 05:48:25+00:00
Summary:     don't use magic number for the leaf size
Affected #:  2 files

diff -r 2913a3edcb72a9ad372c11669f4de62775793373 -r 2faa6990c356173ba8b710553a6d6916366ab203 yt/utilities/lib/bounding_volume_hierarchy.pxd
--- a/yt/utilities/lib/bounding_volume_hierarchy.pxd
+++ b/yt/utilities/lib/bounding_volume_hierarchy.pxd
@@ -38,6 +38,7 @@
 cdef class BVH:
     cdef BVHNode* root
     cdef Triangle* triangles
+    cdef np.int64_t leaf_size
     cdef np.float64_t[:, ::1] vertices
     cdef np.int64_t _partition(self, np.int64_t begin, np.int64_t end,
                                np.int64_t ax, np.float64_t split) nogil

diff -r 2913a3edcb72a9ad372c11669f4de62775793373 -r 2faa6990c356173ba8b710553a6d6916366ab203 yt/utilities/lib/bounding_volume_hierarchy.pyx
--- a/yt/utilities/lib/bounding_volume_hierarchy.pyx
+++ b/yt/utilities/lib/bounding_volume_hierarchy.pyx
@@ -128,6 +128,7 @@
                   np.int64_t[:, ::1] indices,
                   np.float64_t[:, ::1] field_data):
         
+        self.leaf_size = 8
         self.vertices = vertices
         cdef np.int64_t num_elem = indices.shape[0]
         cdef np.int64_t num_tri = 12*num_elem
@@ -161,7 +162,7 @@
         self.root = self._recursive_build(0, num_tri)
 
     cdef void _recursive_free(self, BVHNode* node) nogil:
-        if node.end - node.begin > 8:
+        if node.end - node.begin > self.leaf_size:
             self._recursive_free(node.left)
             self._recursive_free(node.right)
         free(node)
@@ -223,7 +224,7 @@
         # check for leaf
         cdef np.int64_t i, hit
         cdef Triangle* tri
-        if (node.end - node.begin) <= 8:
+        if (node.end - node.begin) <= self.leaf_size:
             for i in range(node.begin, node.end):
                 tri = &(self.triangles[i])
                 hit = ray_triangle_intersect(ray, tri)
@@ -244,7 +245,7 @@
         self._get_node_bbox(node, begin, end)
         
         # check for leaf
-        if (end - begin) <= 8:
+        if (end - begin) <= self.leaf_size:
             return node
         
         # we use the "split in the middle of the longest axis approach"


https://bitbucket.org/yt_analysis/yt/commits/dc97a2d7cd52/
Changeset:   dc97a2d7cd52
Branch:      yt
User:        atmyers
Date:        2016-02-22 05:51:26+00:00
Summary:     increase leaf size to 16
Affected #:  1 file

diff -r 2faa6990c356173ba8b710553a6d6916366ab203 -r dc97a2d7cd52edaeee9b5ee8eac2bb401f7e2003 yt/utilities/lib/bounding_volume_hierarchy.pyx
--- a/yt/utilities/lib/bounding_volume_hierarchy.pyx
+++ b/yt/utilities/lib/bounding_volume_hierarchy.pyx
@@ -128,7 +128,7 @@
                   np.int64_t[:, ::1] indices,
                   np.float64_t[:, ::1] field_data):
         
-        self.leaf_size = 8
+        self.leaf_size = 16
         self.vertices = vertices
         cdef np.int64_t num_elem = indices.shape[0]
         cdef np.int64_t num_tri = 12*num_elem


https://bitbucket.org/yt_analysis/yt/commits/d18dc7748e19/
Changeset:   d18dc7748e19
Branch:      yt
User:        MatthewTurk
Date:        2016-02-24 17:37:11+00:00
Summary:     Merged in atmyers/yt (pull request #1994)

Pure Cython Ray Tracer for Triangle Meshes
Affected #:  5 files

diff -r aaa54f6b1a1b1f98ea52b203fccce7c51a1ccc91 -r d18dc7748e19ec77816ee9b44147e5c0597c3185 .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -30,6 +30,7 @@
 yt/utilities/lib/amr_kdtools.c
 yt/utilities/lib/basic_octree.c
 yt/utilities/lib/bitarray.c
+yt/utilities/lib/bounding_volume_hierarchy.c
 yt/utilities/lib/contour_finding.c
 yt/utilities/lib/depth_first_octree.c
 yt/utilities/lib/element_mappings.c

diff -r aaa54f6b1a1b1f98ea52b203fccce7c51a1ccc91 -r d18dc7748e19ec77816ee9b44147e5c0597c3185 setup.py
--- a/setup.py
+++ b/setup.py
@@ -122,6 +122,11 @@
     Extension("yt.utilities.lib.bitarray",
               ["yt/utilities/lib/bitarray.pyx"],
               libraries=["m"], depends=["yt/utilities/lib/bitarray.pxd"]),
+    Extension("yt.utilities.lib.bounding_volume_hierarchy",
+              ["yt/utilities/lib/bounding_volume_hierarchy.pyx"],
+              extra_compile_args=omp_args,
+              extra_link_args=omp_args,
+              libraries=["m"], depends=["yt/utilities/lib/bounding_volume_hierarchy.pxd"]),
     Extension("yt.utilities.lib.contour_finding",
               ["yt/utilities/lib/contour_finding.pyx"],
               include_dirs=["yt/utilities/lib/",

diff -r aaa54f6b1a1b1f98ea52b203fccce7c51a1ccc91 -r d18dc7748e19ec77816ee9b44147e5c0597c3185 yt/utilities/lib/bounding_volume_hierarchy.pxd
--- /dev/null
+++ b/yt/utilities/lib/bounding_volume_hierarchy.pxd
@@ -0,0 +1,50 @@
+cimport cython 
+import numpy as np
+cimport numpy as np
+
+# ray data structure
+cdef struct Ray:
+    np.float64_t origin[3]
+    np.float64_t direction[3]
+    np.float64_t inv_dir[3]
+    np.float64_t data_val
+    np.float64_t t_near
+    np.float64_t t_far
+    np.int64_t elem_id
+
+# axis-aligned bounding box
+cdef struct BBox:
+    np.float64_t left_edge[3]
+    np.float64_t right_edge[3]
+
+# node for the bounding volume hierarchy
+cdef struct BVHNode:
+    np.int64_t begin
+    np.int64_t end
+    BVHNode* left
+    BVHNode* right
+    BBox bbox
+    
+# triangle data structure
+cdef struct Triangle:
+    np.float64_t p0[3]
+    np.float64_t p1[3]
+    np.float64_t p2[3]
+    np.float64_t d0, d1, d2
+    np.int64_t elem_id
+    np.float64_t centroid[3]
+    BBox bbox
+
+cdef class BVH:
+    cdef BVHNode* root
+    cdef Triangle* triangles
+    cdef np.int64_t leaf_size
+    cdef np.float64_t[:, ::1] vertices
+    cdef np.int64_t _partition(self, np.int64_t begin, np.int64_t end,
+                               np.int64_t ax, np.float64_t split) nogil
+    cdef void intersect(self, Ray* ray) nogil
+    cdef void _get_node_bbox(self, BVHNode* node, 
+                             np.int64_t begin, np.int64_t end) nogil
+    cdef void _recursive_intersect(self, Ray* ray, BVHNode* node) nogil
+    cdef BVHNode* _recursive_build(self, np.int64_t begin, np.int64_t end) nogil
+    cdef void _recursive_free(self, BVHNode* node) nogil

diff -r aaa54f6b1a1b1f98ea52b203fccce7c51a1ccc91 -r d18dc7748e19ec77816ee9b44147e5c0597c3185 yt/utilities/lib/bounding_volume_hierarchy.pyx
--- /dev/null
+++ b/yt/utilities/lib/bounding_volume_hierarchy.pyx
@@ -0,0 +1,320 @@
+cimport cython 
+import numpy as np
+cimport numpy as np
+from libc.math cimport fabs, fmax, fmin
+from libc.stdlib cimport malloc, free
+from cython.parallel import parallel, prange
+
+cdef extern from "mesh_construction.h":
+    enum:
+        MAX_NUM_TRI
+    int triangulate_hex[MAX_NUM_TRI][3]
+
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef inline np.float64_t dot(const np.float64_t a[3], 
+                             const np.float64_t b[3]) nogil:
+    return a[0]*b[0] + a[1]*b[1] + a[2]*b[2] 
+
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef inline void cross(const np.float64_t a[3], 
+                       const np.float64_t b[3],
+                       np.float64_t c[3]) nogil:
+    c[0] = a[1]*b[2] - a[2]*b[1]
+    c[1] = a[2]*b[0] - a[0]*b[2]
+    c[2] = a[0]*b[1] - a[1]*b[0]
+
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef inline void subtract(const np.float64_t a[3], 
+                          const np.float64_t b[3],
+                          np.float64_t c[3]) nogil:
+    c[0] = a[0] - b[0]
+    c[1] = a[1] - b[1]
+    c[2] = a[2] - b[2]
+
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef np.int64_t ray_triangle_intersect(Ray* ray, const Triangle* tri) nogil:
+# https://en.wikipedia.org/wiki/M%C3%B6ller%E2%80%93Trumbore_intersection_algorithm
+
+    # edge vectors
+    cdef np.float64_t e1[3]
+    cdef np.float64_t e2[3]
+    subtract(tri.p1, tri.p0, e1)
+    subtract(tri.p2, tri.p0, e2)
+
+    cdef np.float64_t P[3]
+    cross(ray.direction, e2, P)
+
+    cdef np.float64_t det, inv_det
+    det = dot(e1, P)
+    if(det > -1.0e-10 and det < 1.0e-10): 
+        return False
+    inv_det = 1.0 / det
+
+    cdef np.float64_t T[3]
+    subtract(ray.origin, tri.p0, T)
+
+    cdef np.float64_t u = dot(T, P) * inv_det
+    if(u < 0.0 or u > 1.0):
+        return False
+
+    cdef np.float64_t Q[3]
+    cross(T, e1, Q)
+
+    cdef np.float64_t v = dot(ray.direction, Q) * inv_det
+    if(v < 0.0 or u + v  > 1.0):
+        return False
+
+    cdef np.float64_t t = dot(e2, Q) * inv_det
+
+    if(t > 1.0e-10 and t < ray.t_far):
+        ray.t_far = t
+        ray.data_val = (1.0 - u - v)*tri.d0 + u*tri.d1 + v*tri.d2
+        ray.elem_id = tri.elem_id
+        return True
+
+    return False
+
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef np.int64_t ray_bbox_intersect(Ray* ray, const BBox bbox) nogil:
+# https://tavianator.com/fast-branchless-raybounding-box-intersections/
+
+    cdef np.float64_t tmin = -1.0e300
+    cdef np.float64_t tmax =  1.0e300
+ 
+    cdef np.int64_t i
+    cdef np.float64_t t1, t2
+    for i in range(3):
+        t1 = (bbox.left_edge[i]  - ray.origin[i])*ray.inv_dir[i]
+        t2 = (bbox.right_edge[i] - ray.origin[i])*ray.inv_dir[i] 
+        tmin = fmax(tmin, fmin(t1, t2))
+        tmax = fmin(tmax, fmax(t1, t2))
+ 
+    return tmax >= fmax(tmin, 0.0)
+
+
+cdef class BVH:
+    '''
+
+    This class implements a bounding volume hierarchy (BVH), a spatial acceleration
+    structure for fast ray-tracing. A BVH is like a kd-tree, except that instead of 
+    partitioning the *volume* of the parent to create the children, we partition the 
+    triangles themselves into 'left' or 'right' sub-trees. The bounding volume for a
+    node is then determined by computing the bounding volume of the triangles that
+    belong to it. This allows us to quickly discard triangles that are not close 
+    to intersecting a given ray.
+
+    '''
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    def __cinit__(self,
+                  np.float64_t[:, ::1] vertices,
+                  np.int64_t[:, ::1] indices,
+                  np.float64_t[:, ::1] field_data):
+        
+        self.leaf_size = 16
+        self.vertices = vertices
+        cdef np.int64_t num_elem = indices.shape[0]
+        cdef np.int64_t num_tri = 12*num_elem
+
+        # fill our array of triangles
+        cdef np.int64_t i, j, k
+        cdef np.int64_t offset, tri_index
+        cdef np.int64_t v0, v1, v2
+        cdef Triangle* tri
+        self.triangles = <Triangle*> malloc(num_tri * sizeof(Triangle))
+        for i in range(num_elem):
+            offset = 12*i
+            for j in range(12):
+                tri_index = offset + j
+                tri = &(self.triangles[tri_index])
+                tri.elem_id = i
+                v0 = indices[i][triangulate_hex[j][0]]
+                v1 = indices[i][triangulate_hex[j][1]]
+                v2 = indices[i][triangulate_hex[j][2]]
+                tri.d0 = field_data[i][triangulate_hex[j][0]]
+                tri.d1 = field_data[i][triangulate_hex[j][1]]
+                tri.d2 = field_data[i][triangulate_hex[j][2]]
+                for k in range(3):
+                    tri.p0[k] = vertices[v0][k]
+                    tri.p1[k] = vertices[v1][k]
+                    tri.p2[k] = vertices[v2][k]
+                    tri.centroid[k] = (tri.p0[k] + tri.p1[k] + tri.p2[k]) / 3.0
+                    tri.bbox.left_edge[k]  = fmin(fmin(tri.p0[k], tri.p1[k]), tri.p2[k])
+                    tri.bbox.right_edge[k] = fmax(fmax(tri.p0[k], tri.p1[k]), tri.p2[k])
+
+        self.root = self._recursive_build(0, num_tri)
+
+    cdef void _recursive_free(self, BVHNode* node) nogil:
+        if node.end - node.begin > self.leaf_size:
+            self._recursive_free(node.left)
+            self._recursive_free(node.right)
+        free(node)
+
+    def __dealloc__(self):
+        self._recursive_free(self.root)
+        free(self.triangles)
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef np.int64_t _partition(self, np.int64_t begin, np.int64_t end,
+                               np.int64_t ax, np.float64_t split) nogil:
+        # this re-orders the triangle array so that all of the triangles 
+        # to the left of mid have centroids less than or equal to "split" 
+        # along the direction "ax". All the triangles to the right of mid 
+        # will have centroids *greater* than "split" along "ax".
+        cdef np.int64_t mid = begin
+        while (begin != end):
+            if self.triangles[mid].centroid[ax] > split:
+                mid += 1
+            elif self.triangles[begin].centroid[ax] > split:
+                self.triangles[mid], self.triangles[begin] = \
+                self.triangles[begin], self.triangles[mid]
+                mid += 1
+            begin += 1
+        return mid
+    
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef void _get_node_bbox(self, BVHNode* node, 
+                             np.int64_t begin, np.int64_t end) nogil:
+        cdef np.int64_t i, j
+        cdef BBox box = self.triangles[begin].bbox
+        for i in range(begin+1, end):
+            for j in range(3):
+                box.left_edge[j] = fmin(box.left_edge[j],
+                                        self.triangles[i].bbox.left_edge[j])
+                box.right_edge[j] = fmax(box.right_edge[j], 
+                                         self.triangles[i].bbox.right_edge[j])
+        node.bbox = box
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef void intersect(self, Ray* ray) nogil:
+        self._recursive_intersect(ray, self.root)
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef void _recursive_intersect(self, Ray* ray, BVHNode* node) nogil:
+
+        # check for bbox intersection:
+        if not ray_bbox_intersect(ray, node.bbox):
+            return
+
+        # check for leaf
+        cdef np.int64_t i, hit
+        cdef Triangle* tri
+        if (node.end - node.begin) <= self.leaf_size:
+            for i in range(node.begin, node.end):
+                tri = &(self.triangles[i])
+                hit = ray_triangle_intersect(ray, tri)
+            return
+
+        # if not leaf, intersect with left and right children
+        self._recursive_intersect(ray, node.left)
+        self._recursive_intersect(ray, node.right)
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef BVHNode* _recursive_build(self, np.int64_t begin, np.int64_t end) nogil:
+        cdef BVHNode *node = <BVHNode* > malloc(sizeof(BVHNode))
+        node.begin = begin
+        node.end = end
+
+        self._get_node_bbox(node, begin, end)
+        
+        # check for leaf
+        if (end - begin) <= self.leaf_size:
+            return node
+        
+        # we use the "split in the middle of the longest axis approach"
+        # see: http://www.vadimkravcenko.com/bvh-tree-building/
+
+        # compute longest dimension
+        cdef np.int64_t ax = 0
+        cdef np.float64_t d = fabs(node.bbox.right_edge[0] - 
+                                   node.bbox.left_edge[0])
+        if fabs(node.bbox.right_edge[1] - node.bbox.left_edge[1]) > d:
+            ax = 1
+        if fabs(node.bbox.right_edge[2] - node.bbox.left_edge[2]) > d:
+            ax = 2
+
+        # split in half along that dimension
+        cdef np.float64_t split = 0.5*(node.bbox.right_edge[ax] +
+                                       node.bbox.left_edge[ax])
+
+        # sort triangle list
+        cdef np.int64_t mid = self._partition(begin, end, ax, split)
+
+        if(mid == begin or mid == end):
+            mid = begin + (end-begin)/2
+        
+        # recursively build sub-trees
+        node.left = self._recursive_build(begin, mid)
+        node.right = self._recursive_build(mid, end)
+
+        return node
+
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef void cast_rays(np.float64_t* image,
+                    const np.float64_t* origins,
+                    const np.float64_t* direction,
+                    const int N, 
+                    BVH bvh) nogil:
+
+    cdef Ray* ray 
+    cdef int i, j, k
+    
+    with nogil, parallel():
+       
+        ray = <Ray *> malloc(sizeof(Ray))
+    
+        for k in range(3):
+            ray.direction[k] = direction[k]
+            ray.inv_dir[k] = 1.0 / direction[k]
+
+        for i in prange(N):
+            for j in range(3):
+                ray.origin[j] = origins[N*j + i]
+            ray.t_far = 1e30
+            ray.t_near = 0.0
+            ray.data_val = 0
+            bvh.intersect(ray)
+            image[i] = ray.data_val
+
+        free(ray)
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+def test_ray_trace(np.ndarray[np.float64_t, ndim=1] image, 
+                   np.ndarray[np.float64_t, ndim=2] origins,
+                   np.ndarray[np.float64_t, ndim=1] direction,
+                   BVH bvh):
+    
+    cdef int N = origins.shape[0]
+    cast_rays(&image[0], &origins[0, 0], &direction[0], N, bvh)

diff -r aaa54f6b1a1b1f98ea52b203fccce7c51a1ccc91 -r d18dc7748e19ec77816ee9b44147e5c0597c3185 yt/utilities/lib/tests/test_bounding_volume_hierarchy.py
--- /dev/null
+++ b/yt/utilities/lib/tests/test_bounding_volume_hierarchy.py
@@ -0,0 +1,47 @@
+import yt
+import numpy as np
+from yt.utilities.lib.bounding_volume_hierarchy import BVH, \
+    test_ray_trace
+from yt.visualization.volume_rendering.api import Camera
+from yt.testing import requires_file
+
+
+def get_rays(camera):
+
+    normal_vector = camera.unit_vectors[2].d
+    W = np.array([8.0, 8.0])
+    N = np.array([800, 800])
+    dx = W / N
+    
+    x_points = np.linspace((-N[0]/2 + 0.5)*dx[0], (N[0]/2 - 0.5)*dx[0], N[0])
+    y_points = np.linspace((-N[1]/2 + 0.5)*dx[1], (N[1]/2 - 0.5)*dx[0], N[1])
+    
+    X, Y = np.meshgrid(x_points, y_points)
+    result = np.dot(camera.unit_vectors[0:2].T, [X.ravel(), Y.ravel()]) 
+    vec_origins = result.T + camera.position
+    return np.array(vec_origins), np.array(normal_vector)
+
+
+fn = "MOOSE_sample_data/out.e-s010"
+
+
+ at requires_file(fn)
+def test_bounding_volume_hierarchy():
+    ds = yt.load(fn)
+    vertices = ds.index.meshes[0].connectivity_coords
+    indices  = ds.index.meshes[0].connectivity_indices - 1
+
+    ad = ds.all_data()
+    field_data = ad['connect1', 'diffused']
+
+    bvh = BVH(vertices, indices, field_data)
+
+    cam = Camera()
+    cam.set_position(np.array([8.0, 8.0, 8.0]))
+    cam.focus = np.array([0.0, 0.0, 0.0])
+    origins, direction = get_rays(cam)
+
+    image = np.empty(800*800, np.float64)
+    test_ray_trace(image, origins, direction, bvh)
+    image = image.reshape((800, 800))
+    return image

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list