[yt-svn] commit/yt: 20 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Wed Apr 20 11:22:34 PDT 2016


20 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/0ff2ad1af2c5/
Changeset:   0ff2ad1af2c5
Branch:      yt
User:        atmyers
Date:        2016-03-31 02:39:28+00:00
Summary:     hook up proper sampling for hex8 elements
Affected #:  2 files

diff -r bd8a7c0ed718365c84f355f9b30a1a95bd77c98b -r 0ff2ad1af2c5131dfbe856fb04389264fcc36bc5 yt/utilities/lib/bounding_volume_hierarchy.pxd
--- a/yt/utilities/lib/bounding_volume_hierarchy.pxd
+++ b/yt/utilities/lib/bounding_volume_hierarchy.pxd
@@ -38,6 +38,9 @@
 cdef class BVH:
     cdef BVHNode* root
     cdef Triangle* triangles
+    cdef np.float64_t[:, ::1] vertices
+    cdef np.int64_t[:, ::1] indices
+    cdef np.float64_t[:, ::1] field_data
     cdef np.int64_t _partition(self, np.int64_t begin, np.int64_t end,
                                np.int64_t ax, np.float64_t split) nogil
     cdef void intersect(self, Ray* ray) nogil

diff -r bd8a7c0ed718365c84f355f9b30a1a95bd77c98b -r 0ff2ad1af2c5131dfbe856fb04389264fcc36bc5 yt/utilities/lib/bounding_volume_hierarchy.pyx
--- a/yt/utilities/lib/bounding_volume_hierarchy.pyx
+++ b/yt/utilities/lib/bounding_volume_hierarchy.pyx
@@ -5,6 +5,10 @@
 from libc.stdlib cimport malloc, free
 from cython.parallel import parallel, prange
 from vec3_ops cimport dot, subtract, cross
+from yt.utilities.lib.element_mappings cimport ElementSampler, \
+    Q1Sampler3D
+
+cdef ElementSampler Q1Sampler = Q1Sampler3D()
 
 cdef extern from "mesh_construction.h":
     enum:
@@ -104,6 +108,10 @@
                   np.int64_t[:, ::1] indices,
                   np.float64_t[:, ::1] field_data):
 
+        self.vertices = vertices
+        self.indices = indices
+        self.field_data = field_data
+
         cdef np.int64_t num_elem = indices.shape[0]
         cdef np.int64_t num_tri = 12*num_elem
 
@@ -185,6 +193,33 @@
     @cython.cdivision(True)
     cdef void intersect(self, Ray* ray) nogil:
         self._recursive_intersect(ray, self.root)
+        
+        if ray.elem_id < 0:
+            return
+
+        cdef np.float64_t[3] position
+        cdef np.float64_t[3] direction
+        cdef np.float64_t[8] field_data
+        cdef np.int64_t[8] element_indices
+        cdef np.float64_t[24] vertices
+
+        cdef np.int64_t i
+        for i in range(3):
+            position[i] = ray.origin[i] + ray.t_far*ray.direction[i]
+            
+        for i in range(8):
+            element_indices[i] = self.indices[ray.elem_id][i]
+            field_data[i]      = self.field_data[ray.elem_id][i]
+
+        for i in range(8):
+            vertices[i*3]     = self.vertices[element_indices[i]][0]
+            vertices[i*3 + 1] = self.vertices[element_indices[i]][1]
+            vertices[i*3 + 2] = self.vertices[element_indices[i]][2]   
+
+        cdef double mapped_coord[3]
+        Q1Sampler.map_real_to_unit(mapped_coord, vertices, position)
+        val = Q1Sampler.sample_at_unit_point(mapped_coord, field_data)
+        ray.data_val = val
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
@@ -277,6 +312,7 @@
             ray.t_far = INF
             ray.t_near = 0.0
             ray.data_val = 0
+            ray.elem_id = -1
             bvh.intersect(ray)
             image[i] = ray.data_val
 


https://bitbucket.org/yt_analysis/yt/commits/a049e9a49d9a/
Changeset:   a049e9a49d9a
Branch:      yt
User:        atmyers
Date:        2016-03-31 03:48:07+00:00
Summary:     Don't need data on the triangle nodes now
Affected #:  2 files

diff -r 0ff2ad1af2c5131dfbe856fb04389264fcc36bc5 -r a049e9a49d9aaeb782cd718c8ac14cc261a0eb42 yt/utilities/lib/bounding_volume_hierarchy.pxd
--- a/yt/utilities/lib/bounding_volume_hierarchy.pxd
+++ b/yt/utilities/lib/bounding_volume_hierarchy.pxd
@@ -30,7 +30,6 @@
     np.float64_t p0[3]
     np.float64_t p1[3]
     np.float64_t p2[3]
-    np.float64_t d0, d1, d2
     np.int64_t elem_id
     np.float64_t centroid[3]
     BBox bbox

diff -r 0ff2ad1af2c5131dfbe856fb04389264fcc36bc5 -r a049e9a49d9aaeb782cd718c8ac14cc261a0eb42 yt/utilities/lib/bounding_volume_hierarchy.pyx
--- a/yt/utilities/lib/bounding_volume_hierarchy.pyx
+++ b/yt/utilities/lib/bounding_volume_hierarchy.pyx
@@ -60,7 +60,6 @@
 
     if(t > DETERMINANT_EPS and t < ray.t_far):
         ray.t_far = t
-        ray.data_val = (1.0 - u - v)*tri.d0 + u*tri.d1 + v*tri.d2
         ray.elem_id = tri.elem_id
         return True
 
@@ -113,6 +112,7 @@
         self.field_data = field_data
 
         cdef np.int64_t num_elem = indices.shape[0]
+        cdef np.int64_t num_verts_per_elem = indices.shape[1]
         cdef np.int64_t num_tri = 12*num_elem
 
         # fill our array of triangles
@@ -130,9 +130,6 @@
                 v0 = indices[i][triangulate_hex[j][0]]
                 v1 = indices[i][triangulate_hex[j][1]]
                 v2 = indices[i][triangulate_hex[j][2]]
-                tri.d0 = field_data[i][triangulate_hex[j][0]]
-                tri.d1 = field_data[i][triangulate_hex[j][1]]
-                tri.d2 = field_data[i][triangulate_hex[j][2]]
                 for k in range(3):
                     tri.p0[k] = vertices[v0][k]
                     tri.p1[k] = vertices[v1][k]


https://bitbucket.org/yt_analysis/yt/commits/22f8c1d1a8c7/
Changeset:   22f8c1d1a8c7
Branch:      yt
User:        atmyers
Date:        2016-04-02 17:39:23+00:00
Summary:     pre-compute a flattened data structure for faster ray tracing.
Affected #:  2 files

diff -r a049e9a49d9aaeb782cd718c8ac14cc261a0eb42 -r 22f8c1d1a8c78492d197d49a706cafd5c985bf3e yt/utilities/lib/bounding_volume_hierarchy.pxd
--- a/yt/utilities/lib/bounding_volume_hierarchy.pxd
+++ b/yt/utilities/lib/bounding_volume_hierarchy.pxd
@@ -1,6 +1,7 @@
 cimport cython 
 import numpy as np
 cimport numpy as np
+from yt.utilities.lib.element_mappings cimport ElementSampler
 
 # ray data structure
 cdef struct Ray:
@@ -37,9 +38,13 @@
 cdef class BVH:
     cdef BVHNode* root
     cdef Triangle* triangles
-    cdef np.float64_t[:, ::1] vertices
-    cdef np.int64_t[:, ::1] indices
-    cdef np.float64_t[:, ::1] field_data
+    cdef np.float64_t* vertices
+    cdef np.float64_t* field_data
+    cdef np.int64_t num_tri_per_elem
+    cdef np.int64_t num_tri
+    cdef np.int64_t num_elem
+    cdef np.int64_t num_verts_per_elem
+    cdef ElementSampler sampler
     cdef np.int64_t _partition(self, np.int64_t begin, np.int64_t end,
                                np.int64_t ax, np.float64_t split) nogil
     cdef void intersect(self, Ray* ray) nogil

diff -r a049e9a49d9aaeb782cd718c8ac14cc261a0eb42 -r 22f8c1d1a8c78492d197d49a706cafd5c985bf3e yt/utilities/lib/bounding_volume_hierarchy.pyx
--- a/yt/utilities/lib/bounding_volume_hierarchy.pyx
+++ b/yt/utilities/lib/bounding_volume_hierarchy.pyx
@@ -5,10 +5,16 @@
 from libc.stdlib cimport malloc, free
 from cython.parallel import parallel, prange
 from vec3_ops cimport dot, subtract, cross
-from yt.utilities.lib.element_mappings cimport ElementSampler, \
-    Q1Sampler3D
+from yt.utilities.lib.element_mappings cimport \
+    ElementSampler, \
+    Q1Sampler3D, \
+    P1Sampler3D, \
+    Q1Sampler3D, \
+    W1Sampler3D
 
 cdef ElementSampler Q1Sampler = Q1Sampler3D()
+cdef ElementSampler P1Sampler = P1Sampler3D()
+cdef ElementSampler W1Sampler = W1Sampler3D()
 
 cdef extern from "mesh_construction.h":
     enum:
@@ -103,27 +109,40 @@
     @cython.wraparound(False)
     @cython.cdivision(True)
     def __cinit__(self,
-                  np.float64_t[:, ::1] vertices,
-                  np.int64_t[:, ::1] indices,
-                  np.float64_t[:, ::1] field_data):
+                  np.float64_t[:, :] vertices,
+                  np.int64_t[:, :] indices,
+                  np.float64_t[:, :] field_data):
 
-        self.vertices = vertices
-        self.indices = indices
-        self.field_data = field_data
+        self.num_elem = indices.shape[0]
+        self.num_verts_per_elem = indices.shape[1]
+        self.num_tri_per_elem = 12
+        self.num_tri = self.num_tri_per_elem*self.num_elem
+        self.sampler = Q1Sampler
 
-        cdef np.int64_t num_elem = indices.shape[0]
-        cdef np.int64_t num_verts_per_elem = indices.shape[1]
-        cdef np.int64_t num_tri = 12*num_elem
+        # allocate storage
+        cdef np.int64_t size = self.num_verts_per_elem * self.num_elem
+        self.vertices = <np.float64_t*> malloc(size * 3 * sizeof(np.float64_t))
+        self.field_data = <np.float64_t*> malloc(size * sizeof(np.float64_t))
+
+        # create data buffers
+        cdef np.int64_t i, j, k
+        cdef np.int64_t field_offset, vertex_offset
+        for i in range(self.num_elem):
+            field_offset = i*self.num_verts_per_elem
+            for j in range(self.num_verts_per_elem):
+                vertex_offset = i*self.num_verts_per_elem*3 + j*3
+                self.field_data[field_offset + j] = field_data[i][j]
+                for k in range(3):
+                    self.vertices[vertex_offset + k] = vertices[indices[i,j]][k]
 
         # fill our array of triangles
-        cdef np.int64_t i, j, k
         cdef np.int64_t offset, tri_index
         cdef np.int64_t v0, v1, v2
         cdef Triangle* tri
-        self.triangles = <Triangle*> malloc(num_tri * sizeof(Triangle))
-        for i in range(num_elem):
-            offset = 12*i
-            for j in range(12):
+        self.triangles = <Triangle*> malloc(self.num_tri * sizeof(Triangle))
+        for i in range(self.num_elem):
+            offset = self.num_tri_per_elem*i
+            for j in range(self.num_tri_per_elem):
                 tri_index = offset + j
                 tri = &(self.triangles[tri_index])
                 tri.elem_id = i
@@ -138,7 +157,7 @@
                     tri.bbox.left_edge[k]  = fmin(fmin(tri.p0[k], tri.p1[k]), tri.p2[k])
                     tri.bbox.right_edge[k] = fmax(fmax(tri.p0[k], tri.p1[k]), tri.p2[k])
 
-        self.root = self._recursive_build(0, num_tri)
+        self.root = self._recursive_build(0, self.num_tri)
 
     cdef void _recursive_free(self, BVHNode* node) nogil:
         if node.end - node.begin > LEAF_SIZE:
@@ -149,6 +168,8 @@
     def __dealloc__(self):
         self._recursive_free(self.root)
         free(self.triangles)
+        free(self.field_data)
+        free(self.vertices)
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
@@ -195,28 +216,17 @@
             return
 
         cdef np.float64_t[3] position
-        cdef np.float64_t[3] direction
-        cdef np.float64_t[8] field_data
-        cdef np.int64_t[8] element_indices
-        cdef np.float64_t[24] vertices
-
         cdef np.int64_t i
         for i in range(3):
             position[i] = ray.origin[i] + ray.t_far*ray.direction[i]
             
-        for i in range(8):
-            element_indices[i] = self.indices[ray.elem_id][i]
-            field_data[i]      = self.field_data[ray.elem_id][i]
-
-        for i in range(8):
-            vertices[i*3]     = self.vertices[element_indices[i]][0]
-            vertices[i*3 + 1] = self.vertices[element_indices[i]][1]
-            vertices[i*3 + 2] = self.vertices[element_indices[i]][2]   
-
-        cdef double mapped_coord[3]
-        Q1Sampler.map_real_to_unit(mapped_coord, vertices, position)
-        val = Q1Sampler.sample_at_unit_point(mapped_coord, field_data)
-        ray.data_val = val
+        cdef np.float64_t* vertex_ptr
+        cdef np.float64_t* field_ptr         
+        vertex_ptr = self.vertices + ray.elem_id*self.num_verts_per_elem*3
+        field_ptr = self.field_data + ray.elem_id*self.num_verts_per_elem
+        ray.data_val = self.sampler.sample_at_real_point(vertex_ptr,
+                                                         field_ptr,
+                                                         position)
 
     @cython.boundscheck(False)
     @cython.wraparound(False)


https://bitbucket.org/yt_analysis/yt/commits/49944aba410d/
Changeset:   49944aba410d
Branch:      yt
User:        atmyers
Date:        2016-04-02 17:53:20+00:00
Summary:     allow bvh to work with other mesh types
Affected #:  2 files

diff -r 22f8c1d1a8c78492d197d49a706cafd5c985bf3e -r 49944aba410d3708ab7ed03b421bd61226a5ce29 yt/utilities/lib/bounding_volume_hierarchy.pyx
--- a/yt/utilities/lib/bounding_volume_hierarchy.pyx
+++ b/yt/utilities/lib/bounding_volume_hierarchy.pyx
@@ -19,7 +19,16 @@
 cdef extern from "mesh_construction.h":
     enum:
         MAX_NUM_TRI
+
+    int HEX_NV
+    int HEX_NT
+    int TETRA_NV
+    int TETRA_NT
+    int WEDGE_NV
+    int WEDGE_NT
     int triangulate_hex[MAX_NUM_TRI][3]
+    int triangulate_tetra[MAX_NUM_TRI][3]
+    int triangulate_wedge[MAX_NUM_TRI][3]
 
 # define some constants
 cdef np.float64_t DETERMINANT_EPS = 1.0e-10
@@ -115,9 +124,22 @@
 
         self.num_elem = indices.shape[0]
         self.num_verts_per_elem = indices.shape[1]
-        self.num_tri_per_elem = 12
+
+        # We need to figure out what kind of elements we've been handed.
+        cdef int[MAX_NUM_TRI][3] tri_array
+        if self.num_verts_per_elem == 8:
+            self.num_tri_per_elem = HEX_NT
+            tri_array = triangulate_hex
+            self.sampler = Q1Sampler
+        elif self.num_verts_per_elem == 6:
+            self.num_tri_per_elem = WEDGE_NT
+            tri_array = triangulate_wedge
+            self.sampler = W1Sampler
+        elif self.num_verts_per_elem == 4:
+            self.num_tri_per_elem = TETRA_NT
+            tri_array = triangulate_tetra
+            self.sampler = P1Sampler
         self.num_tri = self.num_tri_per_elem*self.num_elem
-        self.sampler = Q1Sampler
 
         # allocate storage
         cdef np.int64_t size = self.num_verts_per_elem * self.num_elem
@@ -146,9 +168,9 @@
                 tri_index = offset + j
                 tri = &(self.triangles[tri_index])
                 tri.elem_id = i
-                v0 = indices[i][triangulate_hex[j][0]]
-                v1 = indices[i][triangulate_hex[j][1]]
-                v2 = indices[i][triangulate_hex[j][2]]
+                v0 = indices[i][tri_array[j][0]]
+                v1 = indices[i][tri_array[j][1]]
+                v2 = indices[i][tri_array[j][2]]
                 for k in range(3):
                     tri.p0[k] = vertices[v0][k]
                     tri.p1[k] = vertices[v1][k]

diff -r 22f8c1d1a8c78492d197d49a706cafd5c985bf3e -r 49944aba410d3708ab7ed03b421bd61226a5ce29 yt/utilities/lib/mesh_construction.pyx
--- a/yt/utilities/lib/mesh_construction.pyx
+++ b/yt/utilities/lib/mesh_construction.pyx
@@ -104,7 +104,7 @@
                  np.ndarray indices,
                  np.ndarray data):
 
-        # We need now to figure out what kind of elements we've been handed.
+        # We need to figure out what kind of elements we've been handed.
         if indices.shape[1] == 8:
             self.vpe = HEX_NV
             self.tpe = HEX_NT


https://bitbucket.org/yt_analysis/yt/commits/e0838f66ad6c/
Changeset:   e0838f66ad6c
Branch:      yt
User:        atmyers
Date:        2016-04-02 22:38:10+00:00
Summary:     begin hooking up BVH to rest of volume rendering code.
Affected #:  3 files

diff -r 49944aba410d3708ab7ed03b421bd61226a5ce29 -r e0838f66ad6c7d2c10d4454b20272b0a33e22117 yt/utilities/lib/bounding_volume_hierarchy.pxd
--- a/yt/utilities/lib/bounding_volume_hierarchy.pxd
+++ b/yt/utilities/lib/bounding_volume_hierarchy.pxd
@@ -12,6 +12,7 @@
     np.float64_t t_near
     np.float64_t t_far
     np.int64_t elem_id
+    np.int64_t near_boundary
 
 # axis-aligned bounding box
 cdef struct BBox:

diff -r 49944aba410d3708ab7ed03b421bd61226a5ce29 -r e0838f66ad6c7d2c10d4454b20272b0a33e22117 yt/utilities/lib/bounding_volume_hierarchy.pyx
--- a/yt/utilities/lib/bounding_volume_hierarchy.pyx
+++ b/yt/utilities/lib/bounding_volume_hierarchy.pyx
@@ -1,4 +1,4 @@
-cimport cython 
+cimport cython
 import numpy as np
 cimport numpy as np
 from libc.math cimport fabs, fmax, fmin
@@ -250,6 +250,8 @@
                                                          field_ptr,
                                                          position)
 
+        ray.near_boundary = -1
+
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)

diff -r 49944aba410d3708ab7ed03b421bd61226a5ce29 -r e0838f66ad6c7d2c10d4454b20272b0a33e22117 yt/utilities/lib/mesh_traversal.pyx
--- a/yt/utilities/lib/mesh_traversal.pyx
+++ b/yt/utilities/lib/mesh_traversal.pyx
@@ -25,6 +25,7 @@
     ImageContainer
 from cython.parallel import prange, parallel, threadid
 from yt.visualization.image_writer import apply_colormap
+from yt.utilities.lib.bounding_volume_hierarchy cimport BVH, Ray 
 
 rtc.rtcInit(NULL)
 rtc.rtcSetErrorFunction(error_printer)
@@ -109,3 +110,71 @@
         self.zbuffer = zbuffer
         free(v_pos)
         free(v_dir)
+
+
+cdef class BVHMeshSampler(ImageSampler):
+
+    cdef public object image_used
+    cdef public object mesh_lines
+    cdef public object zbuffer
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    def __call__(self, 
+                 BVH bvh,
+                 int num_threads = 0):
+        '''
+
+        This function is supposed to cast the rays and return the
+        image.
+
+        '''
+
+        cdef int vi, vj, i, j
+        cdef ImageContainer *im = self.image
+        cdef np.float64_t *v_pos
+        cdef np.float64_t *v_dir
+        cdef np.int64_t nx, ny, size
+        cdef np.float64_t width[3]
+        for i in range(3):
+            width[i] = self.width[i]
+        cdef np.ndarray[np.float64_t, ndim=1] data
+        cdef np.ndarray[np.int64_t, ndim=1] used
+        nx = im.nv[0]
+        ny = im.nv[1]
+        size = nx * ny
+        used = np.empty(size, dtype="int64")
+        mesh = np.empty(size, dtype="int64")
+        data = np.empty(size, dtype="float64")
+        zbuffer = np.empty(size, dtype="float64")
+        cdef Ray* ray
+        with nogil, parallel(num_threads = num_threads):
+            ray = <Ray *> malloc(sizeof(Ray))
+            v_pos = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+            v_dir = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+            for j in prange(size):
+                vj = j % ny
+                vi = (j - vj) / ny
+                vj = vj
+                self.vector_function(im, vi, vj, width, v_dir, v_pos)
+                for i in range(3):
+                    ray.origin[i] = v_pos[i]
+                    ray.direction[i] = v_dir[i]
+                    ray.inv_dir[i] = 1.0 / v_dir[i]
+                ray.t_far = np.inf
+                ray.t_near = 0.0
+                ray.data_val = 0
+                ray.elem_id = -1
+                bvh.intersect(ray)
+                data[j] = ray.data_val
+                used[j] = ray.elem_id
+                mesh[j] = ray.near_boundary
+                zbuffer[j] = ray.t_far
+        self.aimage = data
+        self.image_used = used
+        self.mesh_lines = mesh
+        self.zbuffer = zbuffer
+        free(v_pos)
+        free(v_dir)
+        free(ray)


https://bitbucket.org/yt_analysis/yt/commits/672764585671/
Changeset:   672764585671
Branch:      yt
User:        atmyers
Date:        2016-04-03 20:52:13+00:00
Summary:     merging with tip
Affected #:  17 files

diff -r e0838f66ad6c7d2c10d4454b20272b0a33e22117 -r 672764585671c7d379c13c3537eebe2dde7dfcbe doc/cheatsheet.tex
--- a/doc/cheatsheet.tex
+++ b/doc/cheatsheet.tex
@@ -7,12 +7,12 @@
 
 % To make this come out properly in landscape mode, do one of the following
 % 1.
-%  pdflatex latexsheet.tex
+%  pdflatex cheatsheet.tex
 %
 % 2.
-%  latex latexsheet.tex
-%  dvips -P pdf  -t landscape latexsheet.dvi
-%  ps2pdf latexsheet.ps
+%  latex cheatsheet.tex
+%  dvips -P pdf  -t landscape cheatsheet.dvi
+%  ps2pdf cheatsheet.ps
 
 
 % If you're reading this, be prepared for confusion.  Making this was
@@ -45,7 +45,7 @@
 
 % Turn off header and footer
 \pagestyle{empty}
- 
+
 
 % Redefine section commands to use less space
 \makeatletter
@@ -117,26 +117,26 @@
 including a list of the available flags.
 
 \texttt{iyt}\textemdash\ Load yt and IPython. \\
-\texttt{yt load} {\it dataset}   \textemdash\ Load a single dataset.  \\
+\texttt{yt load} \textit{dataset}   \textemdash\ Load a single dataset.  \\
 \texttt{yt help} \textemdash\ Print yt help information. \\
-\texttt{yt stats} {\it dataset} \textemdash\ Print stats of a dataset. \\
+\texttt{yt stats} \textit{dataset} \textemdash\ Print stats of a dataset. \\
 \texttt{yt update} \textemdash\ Update yt to most recent version.\\
 \texttt{yt update --all} \textemdash\ Update yt and dependencies to most recent version. \\
 \texttt{yt version} \textemdash\ yt installation information. \\
 \texttt{yt notebook} \textemdash\ Run the IPython notebook server. \\
-\texttt{yt upload\_image} {\it image.png} \textemdash\ Upload PNG image to imgur.com. \\
-\texttt{yt upload\_notebook} {\it notebook.nb} \textemdash\ Upload IPython notebook to hub.yt-project.org.\\
-\texttt{yt plot} {\it dataset} \textemdash\ Create a set of images.\\
-\texttt{yt render} {\it dataset} \textemdash\ Create a simple
+\texttt{yt upload\_image} \textit{image.png} \textemdash\ Upload PNG image to imgur.com. \\
+\texttt{yt upload\_notebook} \textit{notebook.nb} \textemdash\ Upload IPython notebook to hub.yt-project.org.\\
+\texttt{yt plot} \textit{dataset} \textemdash\ Create a set of images.\\
+\texttt{yt render} \textit{dataset} \textemdash\ Create a simple
  volume rendering. \\
-\texttt{yt mapserver} {\it dataset} \textemdash\ View a plot/projection in a Gmaps-like
+\texttt{yt mapserver} \textit{dataset} \textemdash\ View a plot/projection in a Gmaps-like
  interface. \\
-\texttt{yt pastebin} {\it text.out} \textemdash\ Post text to the pastebin at
- paste.yt-project.org. \\ 
-\texttt{yt pastebin\_grab} {\it identifier} \textemdash\ Print content of pastebin to
+\texttt{yt pastebin} \textit{text.out} \textemdash\ Post text to the pastebin at
+ paste.yt-project.org. \\
+\texttt{yt pastebin\_grab} \textit{identifier} \textemdash\ Print content of pastebin to
  STDOUT. \\
 \texttt{yt bugreport} \textemdash\ Report a yt bug. \\
-\texttt{yt hop} {\it dataset} \textemdash\  Run hop on a dataset. \\
+\texttt{yt hop} \textit{dataset} \textemdash\  Run hop on a dataset. \\
 
 \subsection{yt Imports}
 In order to use yt, Python must load the relevant yt modules into memory.
@@ -144,15 +144,15 @@
 used as part of a script.
 \newlength{\MyLen}
 \settowidth{\MyLen}{\texttt{letterpaper}/\texttt{a4paper} \ }
-\texttt{import yt}  \textemdash\ 
+\texttt{import yt}  \textemdash\
 Load yt. \\
-\texttt{from yt.config import ytcfg}  \textemdash\ 
+\texttt{from yt.config import ytcfg}  \textemdash\
 Used to set yt configuration options.
 If used, must be called before importing any other module.\\
-\texttt{from yt.analysis\_modules.\emph{halo\_finding}.api import \textasteriskcentered}  \textemdash\ 
+\texttt{from yt.analysis\_modules.\emph{halo\_finding}.api import \textasteriskcentered}  \textemdash\
 Load halo finding modules. Other modules
-are loaded in a similar way by swapping the 
-{\em emphasized} text.
+are loaded in a similar way by swapping the
+\emph{emphasized} text.
 See the \textbf{Analysis Modules} section for a listing and short descriptions of each.
 
 \subsection{YTArray}
@@ -163,32 +163,32 @@
 very brief list of some useful ones.
 \settowidth{\MyLen}{\texttt{multicol} }\\
 \texttt{v = a.in\_cgs()} \textemdash\ Return the array in CGS units \\
-\texttt{v = a.in\_units('Msun/pc**3')} \textemdash\ Return the array in solar masses per cubic parsec \\ 
+\texttt{v = a.in\_units('Msun/pc**3')} \textemdash\ Return the array in solar masses per cubic parsec \\
 \texttt{v = a.max(), a.min()} \textemdash\ Return maximum, minimum of \texttt{a}. \\
 \texttt{index = a.argmax(), a.argmin()} \textemdash\ Return index of max,
 min value of \texttt{a}.\\
-\texttt{v = a[}{\it index}\texttt{]} \textemdash\ Select a single value from \texttt{a} at location {\it index}.\\
-\texttt{b = a[}{\it i:j}\texttt{]} \textemdash\ Select the slice of values from
+\texttt{v = a[}\textit{index}\texttt{]} \textemdash\ Select a single value from \texttt{a} at location \textit{index}.\\
+\texttt{b = a[}\textit{i:j}\texttt{]} \textemdash\ Select the slice of values from
 \texttt{a} between
-locations {\it i} to {\it j-1} saved to a new Numpy array \texttt{b} with length {\it j-i}. \\
+locations \textit{i} to \textit{j-1} saved to a new Numpy array \texttt{b} with length \textit{j-i}. \\
 \texttt{sel = (a > const)} \textemdash\ Create a new boolean Numpy array
 \texttt{sel}, of the same shape as \texttt{a},
 that marks which values of \texttt{a > const}. Other operators (e.g. \textless, !=, \%) work as well.\\
 \texttt{b = a[sel]} \textemdash\ Create a new Numpy array \texttt{b} made up of
 elements from \texttt{a} that correspond to elements of \texttt{sel}
-that are {\it True}. In the above example \texttt{b} would be all elements of \texttt{a} that are greater than \texttt{const}.\\
-\texttt{a.write\_hdf5({\it filename.h5})} \textemdash\ Save \texttt{a} to the hdf5 file {\it filename.h5}.\\
+that are \textit{True}. In the above example \texttt{b} would be all elements of \texttt{a} that are greater than \texttt{const}.\\
+\texttt{a.write\_hdf5(\textit{filename.h5})} \textemdash\ Save \texttt{a} to the hdf5 file \textit{filename.h5}.\\
 
 \subsection{IPython Tips}
 \settowidth{\MyLen}{\texttt{multicol} }
 These tips work if IPython has been loaded, typically either by invoking
 \texttt{iyt} or \texttt{yt load} on the command line, or using the IPython notebook (\texttt{yt notebook}).
 \texttt{Tab complete} \textemdash\ IPython will attempt to auto-complete a
-variable or function name when the \texttt{Tab} key is pressed, e.g. {\it HaloFi}\textendash\texttt{Tab} would auto-complete
-to {\it HaloFinder}. This also works with imports, e.g. {\it from numpy.random.}\textendash\texttt{Tab}
+variable or function name when the \texttt{Tab} key is pressed, e.g. \textit{HaloFi}\textendash\texttt{Tab} would auto-complete
+to \textit{HaloFinder}. This also works with imports, e.g. \textit{from numpy.random.}\textendash\texttt{Tab}
 would give you a list of random functions (note the trailing period before hitting \texttt{Tab}).\\
 \texttt{?, ??} \textemdash\ Appending one or two question marks at the end of any object gives you
-detailed information about it, e.g. {\it variable\_name}?.\\
+detailed information about it, e.g. \textit{variable\_name}?.\\
 Below a few IPython ``magics'' are listed, which are IPython-specific shortcut commands.\\
 \texttt{\%paste} \textemdash\ Paste content from the system clipboard into the IPython shell.\\
 \texttt{\%hist} \textemdash\ Print recent command history.\\
@@ -204,40 +204,40 @@
 
 \subsection{Load and Access Data}
 The first step in using yt is to reference a simulation snapshot.
-After that, simulation data is generally accessed in yt using {\it Data Containers} which are Python objects
+After that, simulation data is generally accessed in yt using \textit{Data Containers} which are Python objects
 that define a region of simulation space from which data should be selected.
 \settowidth{\MyLen}{\texttt{multicol} }
-\texttt{ds = yt.load(}{\it dataset}\texttt{)} \textemdash\   Reference a single snapshot.\\
+\texttt{ds = yt.load(}\textit{dataset}\texttt{)} \textemdash\   Reference a single snapshot.\\
 \texttt{dd = ds.all\_data()} \textemdash\ Select the entire volume.\\
-\texttt{a = dd[}{\it field\_name}\texttt{]} \textemdash\ Copies the contents of {\it field} into the
+\texttt{a = dd[}\textit{field\_name}\texttt{]} \textemdash\ Copies the contents of \textit{field} into the
 YTArray \texttt{a}. Similarly for other data containers.\\
 \texttt{ds.field\_list} \textemdash\ A list of available fields in the snapshot. \\
 \texttt{ds.derived\_field\_list} \textemdash\ A list of available derived fields
 in the snapshot. \\
 \texttt{val, loc = ds.find\_max("Density")} \textemdash\ Find the \texttt{val}ue of
 the maximum of the field \texttt{Density} and its \texttt{loc}ation. \\
-\texttt{sp = ds.sphere(}{\it cen}\texttt{,}{\it radius}\texttt{)} \textemdash\   Create a spherical data 
-container. {\it cen} may be a coordinate, or ``max'' which 
-centers on the max density point. {\it radius} may be a float in 
-code units or a tuple of ({\it length, unit}).\\
+\texttt{sp = ds.sphere(}\textit{cen}\texttt{,}\textit{radius}\texttt{)} \textemdash\   Create a spherical data
+container. \textit{cen} may be a coordinate, or ``max'' which
+centers on the max density point. \textit{radius} may be a float in
+code units or a tuple of (\textit{length, unit}).\\
 
-\texttt{re = ds.region({\it cen}, {\it left edge}, {\it right edge})} \textemdash\ Create a
-rectilinear data container. {\it cen} is required but not used.
-{\it left} and {\it right edge} are coordinate values that define the region.
+\texttt{re = ds.region(\textit{cen}, \textit{left edge}, \textit{right edge})} \textemdash\ Create a
+rectilinear data container. \textit{cen} is required but not used.
+\textit{left} and \textit{right edge} are coordinate values that define the region.
 
-\texttt{di = ds.disk({\it cen}, {\it normal}, {\it radius}, {\it height})} \textemdash\ 
-Create a cylindrical data container centered at {\it cen} along the 
-direction set by {\it normal},with total length
- 2$\times${\it height} and with radius {\it radius}. \\
- 
-\texttt{ds.save\_object(sp, {\it ``sp\_for\_later''})} \textemdash\ Save an object (\texttt{sp}) for later use.\\
-\texttt{sp = ds.load\_object({\it ``sp\_for\_later''})} \textemdash\ Recover a saved object.\\
+\texttt{di = ds.disk(\textit{cen}, \textit{normal}, \textit{radius}, \textit{height})} \textemdash\
+Create a cylindrical data container centered at \textit{cen} along the
+direction set by \textit{normal},with total length
+ 2$\times$\textit{height} and with radius \textit{radius}. \\
+
+\texttt{ds.save\_object(sp, \textit{``sp\_for\_later''})} \textemdash\ Save an object (\texttt{sp}) for later use.\\
+\texttt{sp = ds.load\_object(\textit{``sp\_for\_later''})} \textemdash\ Recover a saved object.\\
 
 
 \subsection{Defining New Fields}
-\texttt{yt} expects on-disk fields, fields generated on-demand and in-memory. 
+\texttt{yt} expects on-disk fields, fields generated on-demand and in-memory.
 Field can either be created before a dataset is loaded using \texttt{add\_field}:
-\texttt{def \_metal\_mass({\it field},{\it data})}\\
+\texttt{def \_metal\_mass(\textit{field},\textit{data})}\\
 \texttt{\hspace{4 mm} return data["metallicity"]*data["cell\_mass"]}\\
 \texttt{add\_field("metal\_mass", units='g', function=\_metal\_mass)}\\
 Or added to an existing dataset using \texttt{ds.add\_field}:
@@ -245,34 +245,34 @@
 
 \subsection{Slices and Projections}
 \settowidth{\MyLen}{\texttt{multicol} }
-\texttt{slc = yt.SlicePlot(ds, {\it axis or normal vector}, {\it field}, {\it center=}, {\it width=}, {\it weight\_field=}, {\it additional parameters})} \textemdash\ Make a slice plot
-perpendicular to {\it axis} (specified via 'x', 'y', or 'z') or a normal vector for an off-axis slice of {\it field} weighted by {\it weight\_field} at (code-units) {\it center} with 
-{\it width} in code units or a (value, unit) tuple. Hint: try {\it yt.SlicePlot?} in IPython to see additional parameters.\\
-\texttt{slc.save({\it file\_prefix})} \textemdash\ Save the slice to a png with name prefix {\it file\_prefix}.
+\texttt{slc = yt.SlicePlot(ds, \textit{axis or normal vector}, \textit{field}, \textit{center=}, \textit{width=}, \textit{weight\_field=}, \textit{additional parameters})} \textemdash\ Make a slice plot
+perpendicular to \textit{axis} (specified via 'x', 'y', or 'z') or a normal vector for an off-axis slice of \textit{field} weighted by \textit{weight\_field} at (code-units) \textit{center} with
+\textit{width} in code units or a (value, unit) tuple. Hint: try \textit{yt.SlicePlot?} in IPython to see additional parameters.\\
+\texttt{slc.save(\textit{file\_prefix})} \textemdash\ Save the slice to a png with name prefix \textit{file\_prefix}.
 \texttt{.save()} works similarly for the commands below.\\
 
-\texttt{prj = yt.ProjectionPlot(ds, {\it axis}, {\it field}, {\it addit. params})} \textemdash\ Make a projection. \\
-\texttt{prj = yt.OffAxisProjectionPlot(ds, {\it normal}, {\it fields}, {\it center=}, {\it width=}, {\it depth=},{\it north\_vector=},{\it weight\_field=})} \textemdash Make an off axis projection. Note this takes an array of fields. \\
+\texttt{prj = yt.ProjectionPlot(ds, \textit{axis}, \textit{field}, \textit{addit. params})} \textemdash\ Make a projection. \\
+\texttt{prj = yt.OffAxisProjectionPlot(ds, \textit{normal}, \textit{fields}, \textit{center=}, \textit{width=}, \textit{depth=},\textit{north\_vector=},\textit{weight\_field=})} \textemdash Make an off axis projection. Note this takes an array of fields. \\
 
 \subsection{Plot Annotations}
 \settowidth{\MyLen}{\texttt{multicol} }
-Plot callbacks are functions itemized in a registry that is attached to every plot object. They can be accessed and then called like \texttt{ prj.annotate\_velocity(factor=16, normalize=False)}. Most callbacks also accept a {\it plot\_args} dict that is fed to matplotlib annotator. \\
-\texttt{velocity({\it factor=},{\it scale=},{\it scale\_units=}, {\it normalize=})} \textemdash\ Uses field "x-velocity" to draw quivers\\
-\texttt{magnetic\_field({\it factor=},{\it scale=},{\it scale\_units=}, {\it normalize=})} \textemdash\ Uses field "Bx" to draw quivers\\
-\texttt{quiver({\it field\_x},{\it field\_y},{\it factor=},{\it scale=},{\it scale\_units=}, {\it normalize=})} \\
-\texttt{contour({\it field=},{\it ncont=},{\it factor=},{\it clim=},{\it take\_log=}, {\it additional parameters})} \textemdash Plots a number of contours {\it ncont} to interpolate {\it field} optionally using {\it take\_log}, upper and lower {\it c}ontour{\it lim}its and {\it factor} number of points in the interpolation.\\
-\texttt{grids({\it alpha=}, {\it draw\_ids=}, {\it periodic=}, {\it min\_level=}, {\it max\_level=})} \textemdash Add grid boundaries. \\
-\texttt{streamlines({\it field\_x},{\it field\_y},{\it factor=},{\it density=})}\\
-\texttt{clumps({\it clumplist})} \textemdash\ Generate {\it clumplist} using the clump finder and plot. \\
-\texttt{arrow({\it pos}, {\it code\_size})} Add an arrow at a {\it pos}ition. \\
-\texttt{point({\it pos}, {\it text})} \textemdash\ Add text at a {\it pos}ition. \\
-\texttt{marker({\it pos}, {\it marker=})} \textemdash\ Add a matplotlib-defined marker at a {\it pos}ition. \\
-\texttt{sphere({\it center}, {\it radius}, {\it text=})} \textemdash\ Draw a circle and append {\it text}.\\
-\texttt{hop\_circles({\it hop\_output}, {\it max\_number=}, {\it annotate=}, {\it min\_size=}, {\it max\_size=}, {\it font\_size=}, {\it print\_halo\_size=}, {\it fixed\_radius=}, {\it min\_mass=}, {\it print\_halo\_mass=}, {\it width=})} \textemdash\ Draw a halo, printing it's ID, mass, clipping halos depending on number of particles ({\it size}) and optionally fixing the drawn circle radius to be constant for all halos.\\
-\texttt{hop\_particles({\it hop\_output},{\it max\_number=},{\it p\_size=},\\
-{\it min\_size},{\it alpha=})} \textemdash\ Draw particle positions for member halos with a certain number of pixels per particle.\\
-\texttt{particles({\it width},{\it p\_size=},{\it col=}, {\it marker=}, {\it stride=}, {\it ptype=}, {\it stars\_only=}, {\it dm\_only=}, {\it minimum\_mass=}, {\it alpha=})}  \textemdash\  Draw particles of {\it p\_size} pixels in a slab of {\it width} with {\it col}or using a matplotlib {\it marker} plotting only every {\it stride} number of particles.\\
-\texttt{title({\it text})}\\
+Plot callbacks are functions itemized in a registry that is attached to every plot object. They can be accessed and then called like \texttt{ prj.annotate\_velocity(factor=16, normalize=False)}. Most callbacks also accept a \textit{plot\_args} dict that is fed to matplotlib annotator. \\
+\texttt{velocity(\textit{factor=},\textit{scale=},\textit{scale\_units=}, \textit{normalize=})} \textemdash\ Uses field "x-velocity" to draw quivers\\
+\texttt{magnetic\_field(\textit{factor=},\textit{scale=},\textit{scale\_units=}, \textit{normalize=})} \textemdash\ Uses field "Bx" to draw quivers\\
+\texttt{quiver(\textit{field\_x},\textit{field\_y},\textit{factor=},\textit{scale=},\textit{scale\_units=}, \textit{normalize=})} \\
+\texttt{contour(\textit{field=},\textit{ncont=},\textit{factor=},\textit{clim=},\textit{take\_log=}, \textit{additional parameters})} \textemdash Plots a number of contours \textit{ncont} to interpolate \textit{field} optionally using \textit{take\_log}, upper and lower \textit{c}ontour\textit{lim}its and \textit{factor} number of points in the interpolation.\\
+\texttt{grids(\textit{alpha=}, \textit{draw\_ids=}, \textit{periodic=}, \textit{min\_level=}, \textit{max\_level=})} \textemdash Add grid boundaries. \\
+\texttt{streamlines(\textit{field\_x},\textit{field\_y},\textit{factor=},\textit{density=})}\\
+\texttt{clumps(\textit{clumplist})} \textemdash\ Generate \textit{clumplist} using the clump finder and plot. \\
+\texttt{arrow(\textit{pos}, \textit{code\_size})} Add an arrow at a \textit{pos}ition. \\
+\texttt{point(\textit{pos}, \textit{text})} \textemdash\ Add text at a \textit{pos}ition. \\
+\texttt{marker(\textit{pos}, \textit{marker=})} \textemdash\ Add a matplotlib-defined marker at a \textit{pos}ition. \\
+\texttt{sphere(\textit{center}, \textit{radius}, \textit{text=})} \textemdash\ Draw a circle and append \textit{text}.\\
+\texttt{hop\_circles(\textit{hop\_output}, \textit{max\_number=}, \textit{annotate=}, \textit{min\_size=}, \textit{max\_size=}, \textit{font\_size=}, \textit{print\_halo\_size=}, \textit{fixed\_radius=}, \textit{min\_mass=}, \textit{print\_halo\_mass=}, \textit{width=})} \textemdash\ Draw a halo, printing it's ID, mass, clipping halos depending on number of particles (\textit{size}) and optionally fixing the drawn circle radius to be constant for all halos.\\
+\texttt{hop\_particles(\textit{hop\_output},\textit{max\_number=},\textit{p\_size=},\\
+\textit{min\_size},\textit{alpha=})} \textemdash\ Draw particle positions for member halos with a certain number of pixels per particle.\\
+\texttt{particles(\textit{width},\textit{p\_size=},\textit{col=}, \textit{marker=}, \textit{stride=}, \textit{ptype=}, \textit{stars\_only=}, \textit{dm\_only=}, \textit{minimum\_mass=}, \textit{alpha=})}  \textemdash\  Draw particles of \textit{p\_size} pixels in a slab of \textit{width} with \textit{col}or using a matplotlib \textit{marker} plotting only every \textit{stride} number of particles.\\
+\texttt{title(\textit{text})}\\
 
 \subsection{The $\sim$/.yt/ Directory}
 \settowidth{\MyLen}{\texttt{multicol} }
@@ -297,12 +297,12 @@
 
 
 \subsection{Parallel Analysis}
-\settowidth{\MyLen}{\texttt{multicol}} 
+\settowidth{\MyLen}{\texttt{multicol}}
 Nearly all of yt is parallelized using
-MPI.  The {\it mpi4py} package must be installed for parallelism in yt.  To
-install {\it pip install mpi4py} on the command line usually works.
+MPI\@.  The \textit{mpi4py} package must be installed for parallelism in yt.  To
+install \textit{pip install mpi4py} on the command line usually works.
 Execute python in parallel similar to this:\\
-{\it mpirun -n 12 python script.py}\\
+\textit{mpirun -n 12 python script.py}\\
 The file \texttt{script.py} must call the \texttt{yt.enable\_parallelism()} to
 turn on yt's parallelism.  If this doesn't happen, all cores will execute the
 same serial yt script.  This command may differ for each system on which you use
@@ -320,12 +320,12 @@
 \texttt{hg clone https://bitbucket.org/yt\_analysis/yt} \textemdash\ Clone a copy of yt. \\
 \texttt{hg status} \textemdash\ Files changed in working directory.\\
 \texttt{hg diff} \textemdash\ Print diff of all changed files in working directory. \\
-\texttt{hg diff -r{\it RevX} -r{\it RevY}} \textemdash\ Print diff of all changes between revision {\it RevX} and {\it RevY}.\\
+\texttt{hg diff -r\textit{RevX} -r\textit{RevY}} \textemdash\ Print diff of all changes between revision \textit{RevX} and \textit{RevY}.\\
 \texttt{hg log} \textemdash\ History of changes.\\
-\texttt{hg cat -r{\it RevX file}} \textemdash\ Print the contents of {\it file} from revision {\it RevX}.\\
+\texttt{hg cat -r\textit{RevX file}} \textemdash\ Print the contents of \textit{file} from revision \textit{RevX}.\\
 \texttt{hg heads} \textemdash\ Print all the current heads. \\
-\texttt{hg revert -r{\it RevX file}} \textemdash\ Revert {\it file} to revision {\it RevX}. On-disk changed version is
-moved to {\it file.orig}. \\
+\texttt{hg revert -r\textit{RevX file}} \textemdash\ Revert \textit{file} to revision \textit{RevX}. On-disk changed version is
+moved to \textit{file.orig}. \\
 \texttt{hg commit} \textemdash\ Commit changes to repository. \\
 \texttt{hg push} \textemdash\ Push changes to default remote repository. \\
 \texttt{hg pull} \textemdash\ Pull changes from default remote repository. \\

diff -r e0838f66ad6c7d2c10d4454b20272b0a33e22117 -r 672764585671c7d379c13c3537eebe2dde7dfcbe doc/source/analyzing/analysis_modules/halo_finders.rst
--- a/doc/source/analyzing/analysis_modules/halo_finders.rst
+++ b/doc/source/analyzing/analysis_modules/halo_finders.rst
@@ -41,11 +41,11 @@
    depending on the user-supplied over density
    threshold parameter. The default is 160.0.
 
-Please see the `HOP method paper
-<http://adsabs.harvard.edu/abs/1998ApJ...498..137E>`_ for
-full details and the
-:class:`~yt.analysis_modules.halo_finding.halo_objects.HOPHalo` and
-:class:`~yt.analysis_modules.halo_finding.halo_objects.Halo` classes.
+Please see the `HOP method paper 
+<http://adsabs.harvard.edu/abs/1998ApJ...498..137E>`_ for 
+full details and the 
+:class:`~yt.analysis_modules.halo_finding.halo_objects.HOPHaloFinder`
+documentation.
 
 .. _fof:
 
@@ -53,8 +53,8 @@
 ---
 
 A basic friends-of-friends halo finder is included.  See the
-:class:`~yt.analysis_modules.halo_finding.halo_objects.FOFHalo` and
-:class:`~yt.analysis_modules.halo_finding.halo_objects.Halo` classes.
+:class:`~yt.analysis_modules.halo_finding.halo_objects.FOFHaloFinder`
+documentation.
 
 .. _rockstar:
 

diff -r e0838f66ad6c7d2c10d4454b20272b0a33e22117 -r 672764585671c7d379c13c3537eebe2dde7dfcbe doc/source/cookbook/tests/test_cookbook.py
--- a/doc/source/cookbook/tests/test_cookbook.py
+++ b/doc/source/cookbook/tests/test_cookbook.py
@@ -11,16 +11,22 @@
 """
 import glob
 import os
+import sys
 import subprocess
 
 
 PARALLEL_TEST = {"rockstar_nest.py": "3"}
+BLACKLIST = ["opengl_ipython.py", "opengl_vr.py"]
 
+if sys.version_info >= (3,0,0):
+    BLACKLIST.append("rockstar_nest.py")
 
 def test_recipe():
     '''Dummy test grabbing all cookbook's recipes'''
     for fname in glob.glob("doc/source/cookbook/*.py"):
         recipe = os.path.basename(fname)
+        if recipe in BLACKLIST:
+            continue
         check_recipe.description = "Testing recipe: %s" % recipe
         if recipe in PARALLEL_TEST:
             yield check_recipe, \

diff -r e0838f66ad6c7d2c10d4454b20272b0a33e22117 -r 672764585671c7d379c13c3537eebe2dde7dfcbe tests/tests_2.7.yaml
--- a/tests/tests_2.7.yaml
+++ b/tests/tests_2.7.yaml
@@ -42,7 +42,7 @@
   local_tipsy_270:
     - yt/frontends/tipsy/tests/test_outputs.py
   
-  local_varia_273:
+  local_varia_274:
     - yt/analysis_modules/radmc3d_export
     - yt/frontends/moab/tests/test_c5.py
     - yt/analysis_modules/photon_simulator/tests/test_spectra.py

diff -r e0838f66ad6c7d2c10d4454b20272b0a33e22117 -r 672764585671c7d379c13c3537eebe2dde7dfcbe tests/tests_3.5.yaml
--- a/tests/tests_3.5.yaml
+++ b/tests/tests_3.5.yaml
@@ -25,6 +25,8 @@
 
   local_halos_350:
     - yt/frontends/owls_subfind/tests/test_outputs.py
+    - yt/frontends/gadget_fof/tests/test_outputs.py:test_fields_g5
+    - yt/frontends/gadget_fof/tests/test_outputs.py:test_fields_g42
   
   local_owls_350:
     - yt/frontends/owls/tests/test_outputs.py
@@ -40,7 +42,7 @@
   local_tipsy_350:
     - yt/frontends/tipsy/tests/test_outputs.py
   
-  local_varia_350:
+  local_varia_351:
     - yt/analysis_modules/radmc3d_export
     - yt/frontends/moab/tests/test_c5.py
     - yt/analysis_modules/photon_simulator/tests/test_spectra.py

diff -r e0838f66ad6c7d2c10d4454b20272b0a33e22117 -r 672764585671c7d379c13c3537eebe2dde7dfcbe yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -59,7 +59,10 @@
 
     def __init__(self, halo_list, id, indices=None, size=None, CoM=None,
         max_dens_point=None, group_total_mass=None, max_radius=None,
-        bulk_vel=None, tasks=None, rms_vel=None, supp=None):
+        bulk_vel=None, tasks=None, rms_vel=None, supp=None, ptype=None):
+        if ptype is None:
+            ptype = "all"
+        self.ptype = ptype
         self.halo_list = halo_list
         self._max_dens = halo_list._max_dens
         self.id = id
@@ -276,10 +279,7 @@
         return r.max()
 
     def __getitem__(self, key):
-        if ytcfg.getboolean("yt", "inline") is False:
-            return self.data[key][self.indices]
-        else:
-            return self.data[key][self.indices]
+        return self.data[(self.ptype, key)][self.indices]
 
     def get_sphere(self, center_of_mass=True):
         r"""Returns a sphere source.
@@ -954,7 +954,8 @@
 
     _fields = ["particle_position_%s" % ax for ax in 'xyz']
 
-    def __init__(self, data_source, dm_only=True, redshift=-1):
+    def __init__(self, data_source, dm_only=True, redshift=-1,
+                 ptype=None):
         """
         Run hop on *data_source* with a given density *threshold*.  If
         *dm_only* is True (default), only run it on the dark matter particles,
@@ -963,6 +964,9 @@
         """
         self._data_source = data_source
         self.dm_only = dm_only
+        if ptype is None:
+            ptype = "all"
+        self.ptype = ptype
         self._groups = []
         self._max_dens = {}
         self.__obtain_particles()
@@ -979,14 +983,14 @@
             ii = slice(None)
         self.particle_fields = {}
         for field in self._fields:
-            tot_part = self._data_source[field].size
+            tot_part = self._data_source[(self.ptype, field)].size
             if field == "particle_index":
                 self.particle_fields[field] = \
-                    self._data_source[field][ii].astype('int64')
+                    self._data_source[(self.ptype, field)][ii].astype('int64')
             else:
                 self.particle_fields[field] = \
-                    self._data_source[field][ii].astype('float64')
-            del self._data_source[field]
+                    self._data_source[(self.ptype, field)][ii].astype('float64')
+            del self._data_source[(self.ptype, field)]
         self._base_indices = np.arange(tot_part)[ii]
         gc.collect()
 
@@ -1014,11 +1018,12 @@
                 cp += counts[i + 1]
                 continue
             group_indices = grab_indices[cp:cp_c]
-            self._groups.append(self._halo_class(self, i, group_indices))
+            self._groups.append(self._halo_class(self, i, group_indices,
+                                                 ptype=self.ptype))
             md_i = np.argmax(dens[cp:cp_c])
             px, py, pz = \
                 [self.particle_fields['particle_position_%s' % ax][group_indices]
-                                            for ax in 'xyz']
+                 for ax in 'xyz']
             self._max_dens[i] = (dens[cp:cp_c][md_i], px[md_i],
                 py[md_i], pz[md_i])
             cp += counts[i + 1]
@@ -1260,10 +1265,11 @@
     _fields = ["particle_position_%s" % ax for ax in 'xyz'] + \
               ["particle_mass"]
 
-    def __init__(self, data_source, threshold=160.0, dm_only=True):
+    def __init__(self, data_source, threshold=160.0, dm_only=True,
+                 ptype=None):
         self.threshold = threshold
         mylog.info("Initializing HOP")
-        HaloList.__init__(self, data_source, dm_only)
+        HaloList.__init__(self, data_source, dm_only, ptype=ptype)
 
     def _run_finder(self):
         self.densities, self.tags = \
@@ -1298,10 +1304,12 @@
     _name = "FOF"
     _halo_class = FOFHalo
 
-    def __init__(self, data_source, link=0.2, dm_only=True, redshift=-1):
+    def __init__(self, data_source, link=0.2, dm_only=True, redshift=-1,
+                 ptype=None):
         self.link = link
         mylog.info("Initializing FOF")
-        HaloList.__init__(self, data_source, dm_only, redshift=redshift)
+        HaloList.__init__(self, data_source, dm_only, redshift=redshift,
+                          ptype=ptype)
 
     def _run_finder(self):
         self.tags = \
@@ -1450,12 +1458,15 @@
             halo += 1
 
 class GenericHaloFinder(HaloList, ParallelAnalysisInterface):
-    def __init__(self, ds, data_source, dm_only=True, padding=0.0):
+    def __init__(self, ds, data_source, padding=0.0, ptype=None):
         ParallelAnalysisInterface.__init__(self)
         self.ds = ds
         self.index = ds.index
         self.center = (np.array(data_source.right_edge) +
                        np.array(data_source.left_edge)) / 2.0
+        if ptype is None:
+            ptype = "all"
+        self.ptype = ptype
 
     def _parse_halolist(self, threshold_adjustment):
         groups = []
@@ -1474,7 +1485,7 @@
                     threshold_adjustment
                 max_dens[hi] = [max_dens_temp] + \
                     list(self._max_dens[halo.id])[1:4]
-                groups.append(self._halo_class(self, hi))
+                groups.append(self._halo_class(self, hi, ptype=self.ptype))
                 groups[-1].indices = halo.indices
                 self.comm.claim_object(groups[-1])
                 hi += 1
@@ -1505,9 +1516,11 @@
         # Note: we already know which halos we own!
         after = my_first_id + len(self._groups)
         # One single fake halo, not owned, does the trick
-        self._groups = [self._halo_class(self, i) for i in range(my_first_id)] + \
+        self._groups = [self._halo_class(self, i, ptype=self.ptype)
+                        for i in range(my_first_id)] + \
                        self._groups + \
-                       [self._halo_class(self, i) for i in range(after, nhalos)]
+                       [self._halo_class(self, i, ptype=self.ptype)
+                        for i in range(after, nhalos)]
         id = 0
         for proc in sorted(halo_info.keys()):
             for halo in self._groups[id:id + halo_info[proc]]:
@@ -1540,7 +1553,7 @@
         LE, RE = bounds
         dw = self.ds.domain_right_edge - self.ds.domain_left_edge
         for i, ax in enumerate('xyz'):
-            arr = self._data_source["particle_position_%s" % ax]
+            arr = self._data_source[self.ptype, "particle_position_%s" % ax]
             arr[arr < LE[i] - self.padding] += dw[i]
             arr[arr > RE[i] + self.padding] -= dw[i]
 
@@ -1671,9 +1684,15 @@
         to the full volume automatically.
     threshold : float
         The density threshold used when building halos. Default = 160.0.
-    dm_only : bool
+    dm_only : bool (deprecated)
         If True, only dark matter particles are used when building halos.
+        This has been deprecated.  Instead, the ptype keyword should be
+        used to specify a particle type.
         Default = True.
+    ptype : string
+        When dm_only is set to False, this sets the type of particle to be
+        used for halo finding, with a default of "all".  This should not be
+        used when dm_only is set to True.
     padding : float
         When run in parallel, the finder needs to surround each subvolume
         with duplicated particles for halo finidng to work. This number
@@ -1697,14 +1716,14 @@
     >>> halos = HaloFinder(ds)
     """
     def __init__(self, ds, subvolume=None, threshold=160, dm_only=True,
-            padding=0.02, total_mass=None):
+                 ptype=None, padding=0.02, total_mass=None):
         if subvolume is not None:
             ds_LE = np.array(subvolume.left_edge)
             ds_RE = np.array(subvolume.right_edge)
         self.period = ds.domain_right_edge - ds.domain_left_edge
         self._data_source = ds.all_data()
-        GenericHaloFinder.__init__(self, ds, self._data_source, dm_only,
-            padding)
+        GenericHaloFinder.__init__(self, ds, self._data_source, padding,
+                                   ptype=ptype)
         # do it once with no padding so the total_mass is correct
         # (no duplicated particles), and on the entire volume, even if only
         # a small part is actually going to be used.
@@ -1712,6 +1731,21 @@
         padded, LE, RE, self._data_source = \
             self.partition_index_3d(ds=self._data_source,
                 padding=self.padding)
+
+        if dm_only:
+            mylog.warn("dm_only is deprecated.  " +
+                       "Use ptype to specify a particle type, instead.")
+
+        # Don't allow dm_only=True and setting a ptype.
+        if dm_only and ptype is not None:
+            raise RuntimeError(
+                "If dm_only is True, ptype must be None.  " + \
+                "dm_only must be False if ptype is set.")
+
+        if ptype is None:
+            ptype = "all"
+        self.ptype = ptype
+
         # For scaling the threshold, note that it's a passthrough
         if total_mass is None:
             if dm_only:
@@ -1719,7 +1753,9 @@
                 total_mass = \
                     self.comm.mpi_allreduce((self._data_source['all', "particle_mass"][select].in_units('Msun')).sum(dtype='float64'), op='sum')
             else:
-                total_mass = self.comm.mpi_allreduce(self._data_source.quantities["TotalQuantity"]("particle_mass").in_units('Msun'), op='sum')
+                total_mass = self.comm.mpi_allreduce(
+                    self._data_source.quantities.total_quantity(
+                        (self.ptype, "particle_mass")).in_units('Msun'), op='sum')
         # MJT: Note that instead of this, if we are assuming that the particles
         # are all on different processors, we should instead construct an
         # object representing the entire domain and sum it "lazily" with
@@ -1743,9 +1779,10 @@
             sub_mass = self._data_source["particle_mass"][select].in_units('Msun').sum(dtype='float64')
         else:
             sub_mass = \
-                self._data_source.quantities["TotalQuantity"]("particle_mass").in_units('Msun')
+                self._data_source.quantities.total_quantity(
+                    (self.ptype, "particle_mass")).in_units('Msun')
         HOPHaloList.__init__(self, self._data_source,
-            threshold * total_mass / sub_mass, dm_only)
+            threshold * total_mass / sub_mass, dm_only, ptype=self.ptype)
         self._parse_halolist(total_mass / sub_mass)
         self._join_halolists()
 
@@ -1776,9 +1813,15 @@
         average) used to build the halos. If negative, this is taken to be
         the *actual* linking length, and no other calculations will be
         applied.  Default = 0.2.
-    dm_only : bool
+    dm_only : bool (deprecated)
         If True, only dark matter particles are used when building halos.
+        This has been deprecated.  Instead, the ptype keyword should be
+        used to specify a particle type.
         Default = True.
+    ptype : string
+        When dm_only is set to False, this sets the type of particle to be
+        used for halo finding, with a default of "all".  This should not be
+        used when dm_only is set to True.
     padding : float
         When run in parallel, the finder needs to surround each subvolume
         with duplicated particles for halo finidng to work. This number
@@ -1791,7 +1834,7 @@
     >>> halos = FOFHaloFinder(ds)
     """
     def __init__(self, ds, subvolume=None, link=0.2, dm_only=True,
-        padding=0.02):
+                 ptype=None, padding=0.02):
         if subvolume is not None:
             ds_LE = np.array(subvolume.left_edge)
             ds_RE = np.array(subvolume.right_edge)
@@ -1800,13 +1843,27 @@
         self.index = ds.index
         self.redshift = ds.current_redshift
         self._data_source = ds.all_data()
-        GenericHaloFinder.__init__(self, ds, self._data_source, dm_only,
-                                   padding)
+        GenericHaloFinder.__init__(self, ds, self._data_source, padding)
         self.padding = 0.0  # * ds["unitary"] # This should be clevererer
         # get the total number of particles across all procs, with no padding
         padded, LE, RE, self._data_source = \
             self.partition_index_3d(ds=self._data_source,
             padding=self.padding)
+
+        if dm_only:
+            mylog.warn("dm_only is deprecated.  " +
+                       "Use ptype to specify a particle type, instead.")
+
+        # Don't allow dm_only=True and setting a ptype.
+        if dm_only and ptype is not None:
+            raise RuntimeError(
+                "If dm_only is True, ptype must be None.  " + \
+                "dm_only must be False if ptype is set.")
+
+        if ptype is None:
+            ptype = "all"
+        self.ptype = ptype
+
         if link > 0.0:
             n_parts = self.comm.mpi_allreduce(self._data_source["particle_position_x"].size, op='sum')
             # get the average spacing between particles
@@ -1834,7 +1891,7 @@
         # here is where the FOF halo finder is run
         mylog.info("Using a linking length of %0.3e", linking_length)
         FOFHaloList.__init__(self, self._data_source, linking_length, dm_only,
-                             redshift=self.redshift)
+                             redshift=self.redshift, ptype=self.ptype)
         self._parse_halolist(1.)
         self._join_halolists()
 

diff -r e0838f66ad6c7d2c10d4454b20272b0a33e22117 -r 672764585671c7d379c13c3537eebe2dde7dfcbe yt/analysis_modules/halo_finding/tests/test_halo_finders.py
--- /dev/null
+++ b/yt/analysis_modules/halo_finding/tests/test_halo_finders.py
@@ -0,0 +1,61 @@
+"""
+Tests for HOP and FOF halo finders.
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2016, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.convenience import \
+    load
+from yt.data_objects.particle_filters import \
+    add_particle_filter
+from yt.analysis_modules.halo_analysis.api import \
+    HaloCatalog
+from yt.testing import \
+    requires_file, \
+    assert_array_equal
+from yt.utilities.answer_testing.framework import \
+    data_dir_load
+
+import tempfile
+import os
+import shutil
+
+def dm(pfilter, data):
+    return data["creation_time"] <= 0.
+add_particle_filter("dm", dm, filtered_type='all',
+                    requires=["creation_time"])
+
+enzotiny = "enzo_tiny_cosmology/DD0046/DD0046"
+ at requires_file(enzotiny)
+def test_datacontainer_data():
+    tmpdir = tempfile.mkdtemp()
+    curdir = os.getcwd()
+    os.chdir(tmpdir)
+    ds = data_dir_load(enzotiny)
+    ds.add_particle_filter("dm")
+
+    for method in ["fof", "hop"]:
+        hc = HaloCatalog(data_ds=ds, finder_method=method,
+                         output_dir="hc1",
+                         finder_kwargs={"dm_only": True})
+        hc.create()
+        hc = HaloCatalog(data_ds=ds, finder_method=method,
+                         output_dir="hc2",
+                         finder_kwargs={"dm_only": False, "ptype": "dm"})
+        hc.create()
+
+        ds1 = load("hc1/hc1.0.h5")
+        ds2 = load("hc2/hc2.0.h5")
+        assert_array_equal(ds1.r["particle_mass"], ds2.r["particle_mass"])
+
+    os.chdir(curdir)
+    shutil.rmtree(tmpdir)

diff -r e0838f66ad6c7d2c10d4454b20272b0a33e22117 -r 672764585671c7d379c13c3537eebe2dde7dfcbe yt/frontends/gadget_fof/tests/test_outputs.py
--- a/yt/frontends/gadget_fof/tests/test_outputs.py
+++ b/yt/frontends/gadget_fof/tests/test_outputs.py
@@ -62,7 +62,7 @@
     for hid in range(0, ds.index.particle_count["Group"]):
         my_h = ds.halo("Group", hid)
         h_ids = my_h["ID"]
-        for sid in range(my_h["subhalo_number"]):
+        for sid in range(int(my_h["subhalo_number"][0])):
             my_s = ds.halo("Subhalo", (my_h.particle_identifier, sid))
             total_sub += my_s["ID"].size
             total_int += np.intersect1d(h_ids, my_s["ID"]).size

diff -r e0838f66ad6c7d2c10d4454b20272b0a33e22117 -r 672764585671c7d379c13c3537eebe2dde7dfcbe yt/utilities/lib/grid_traversal.pyx
--- a/yt/utilities/lib/grid_traversal.pyx
+++ b/yt/utilities/lib/grid_traversal.pyx
@@ -492,6 +492,15 @@
     cdef void setup(self, PartitionedGrid pg):
         return
 
+    def __dealloc__(self):
+        self.image.image = None
+        self.image.vp_pos = None
+        self.image.vp_dir = None
+        self.image.zbuffer = None
+        self.image.camera_data = None
+        free(self.image)
+
+
 cdef void projection_sampler(
                  VolumeContainer *vc,
                  np.float64_t v_pos[3],

diff -r e0838f66ad6c7d2c10d4454b20272b0a33e22117 -r 672764585671c7d379c13c3537eebe2dde7dfcbe yt/utilities/lib/mesh_construction.pxd
--- a/yt/utilities/lib/mesh_construction.pxd
+++ b/yt/utilities/lib/mesh_construction.pxd
@@ -2,6 +2,7 @@
     Vertex, \
     Triangle, \
     Vec3f
+cimport numpy as np
 
 ctypedef struct MeshDataContainer:
     Vertex* vertices       # array of triangle vertices
@@ -14,6 +15,5 @@
 ctypedef struct Patch:
     float[8][3] v
     unsigned int geomID
-    long [:,:] indices
-    double [:,:] vertices
-    double [:,:] field_data
+    np.float64_t* vertices
+    np.float64_t* field_data

diff -r e0838f66ad6c7d2c10d4454b20272b0a33e22117 -r 672764585671c7d379c13c3537eebe2dde7dfcbe yt/utilities/lib/mesh_construction.pyx
--- a/yt/utilities/lib/mesh_construction.pyx
+++ b/yt/utilities/lib/mesh_construction.pyx
@@ -241,6 +241,8 @@
     '''
 
     cdef Patch* patches
+    cdef np.float64_t* vertices
+    cdef np.float64_t* field_data
     cdef unsigned int mesh
     # patches per element, vertices per element, and field points per 
     # element, respectively:
@@ -265,11 +267,22 @@
                                   np.ndarray field_data):
         cdef int i, j, ind, idim
         cdef int ne = indices_in.shape[0]
+        cdef int nv = vertices_in.shape[0]
         cdef int npatch = 6*ne;
 
         cdef unsigned int mesh = rtcgu.rtcNewUserGeometry(scene.scene_i, npatch)
         cdef np.ndarray[np.float64_t, ndim=2] element_vertices
-        cdef Patch* patches = <Patch*> malloc(npatch * sizeof(Patch));
+        cdef Patch* patches = <Patch*> malloc(npatch * sizeof(Patch))
+        self.vertices = <np.float64_t*> malloc(20 * ne * 3 * sizeof(np.float64_t))
+        self.field_data = <np.float64_t*> malloc(20 * ne * sizeof(np.float64_t))
+
+        for i in range(ne):
+            element_vertices = vertices_in[indices_in[i]]
+            for j in range(20):
+                self.field_data[i*20 + j] = field_data[i][j]
+                for k in range(3):
+                    self.vertices[i*20*3 + j*3 + k] = element_vertices[j][k]
+
         cdef Patch* patch
         for i in range(ne):  # for each element
             element_vertices = vertices_in[indices_in[i]]
@@ -280,9 +293,8 @@
                     ind = hex20_faces[j][k]
                     for idim in range(3):  # for each spatial dimension (yikes)
                         patch.v[k][idim] = element_vertices[ind][idim]
-                patch.indices = indices_in
-                patch.vertices = vertices_in
-                patch.field_data = field_data
+                patch.vertices = self.vertices + i*20*3
+                patch.field_data = self.field_data + i*20
 
         self.patches = patches
         self.mesh = mesh
@@ -295,3 +307,5 @@
 
     def __dealloc__(self):
         free(self.patches)
+        free(self.vertices)
+        free(self.field_data)

diff -r e0838f66ad6c7d2c10d4454b20272b0a33e22117 -r 672764585671c7d379c13c3537eebe2dde7dfcbe yt/utilities/lib/mesh_samplers.pyx
--- a/yt/utilities/lib/mesh_samplers.pyx
+++ b/yt/utilities/lib/mesh_samplers.pyx
@@ -196,9 +196,6 @@
                        rtcr.RTCRay& ray) nogil:
     cdef int ray_id, elem_id, i
     cdef double val
-    cdef double[20] field_data
-    cdef int[20] element_indices
-    cdef double[60] vertices
     cdef double[3] position
     cdef float[3] pos
     cdef Patch* data
@@ -220,16 +217,10 @@
     for i in range(3):
         position[i] = <double> pos[i]
  
-    for i in range(20):
-        field_data[i] = patch.field_data[elem_id, i]
-        vertices[i*3    ] = patch.vertices[patch.indices[elem_id, i]][0]
-        vertices[i*3 + 1] = patch.vertices[patch.indices[elem_id, i]][1]
-        vertices[i*3 + 2] = patch.vertices[patch.indices[elem_id, i]][2]
-
     # we use ray.time to pass the value of the field
     cdef double mapped_coord[3]
-    S2Sampler.map_real_to_unit(mapped_coord, vertices, position)
-    val = S2Sampler.sample_at_unit_point(mapped_coord, field_data)
+    S2Sampler.map_real_to_unit(mapped_coord, patch.vertices, position)
+    val = S2Sampler.sample_at_unit_point(mapped_coord, patch.field_data)
     ray.time = val
 
     # we use ray.instID to pass back whether the ray is near the

diff -r e0838f66ad6c7d2c10d4454b20272b0a33e22117 -r 672764585671c7d379c13c3537eebe2dde7dfcbe yt/utilities/lib/tests/test_bounding_volume_hierarchy.py
--- a/yt/utilities/lib/tests/test_bounding_volume_hierarchy.py
+++ b/yt/utilities/lib/tests/test_bounding_volume_hierarchy.py
@@ -36,7 +36,8 @@
 
     bvh = BVH(vertices, indices, field_data)
 
-    cam = Camera(Scene())
+    sc = Scene()
+    cam = Camera(sc)
     cam.set_position(np.array([8.0, 8.0, 8.0]))
     cam.focus = np.array([0.0, 0.0, 0.0])
     origins, direction = get_rays(cam)

diff -r e0838f66ad6c7d2c10d4454b20272b0a33e22117 -r 672764585671c7d379c13c3537eebe2dde7dfcbe yt/visualization/profile_plotter.py
--- a/yt/visualization/profile_plotter.py
+++ b/yt/visualization/profile_plotter.py
@@ -462,7 +462,7 @@
         """
         if field == "all":
             self.x_log = log
-            for field in self.profiles[0].field_data.keys():
+            for field in list(self.profiles[0].field_data.keys()):
                 self.y_log[field] = log
         else:
             field, = self.profiles[0].data_source._determine_fields([field])
@@ -578,7 +578,7 @@
 
         """
         if field is 'all':
-            fields = self.axes.keys()
+            fields = list(self.axes.keys())
         else:
             fields = ensure_list(field)
         for profile in self.profiles:
@@ -971,7 +971,7 @@
         >>>  plot.annotate_text(1e-15, 5e4, "Hello YT")
 
         """
-        for f in self.data_source._determine_fields(self.plots.keys()):
+        for f in self.data_source._determine_fields(list(self.plots.keys())):
             if self.plots[f].figure is not None and text is not None:
                 self.plots[f].axes.text(xpos, ypos, text,
                                         fontproperties=self._font_properties,

diff -r e0838f66ad6c7d2c10d4454b20272b0a33e22117 -r 672764585671c7d379c13c3537eebe2dde7dfcbe yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -24,6 +24,7 @@
     Lens
 import numpy as np
 from numbers import Number as numeric_type
+import weakref
 
 def _sanitize_camera_property_units(value, scene):
     if iterable(value):
@@ -126,7 +127,7 @@
             raise RuntimeError(
                 'The first argument passed to the Camera initializer is a '
                 '%s object, expected a %s object' % (type(scene), Scene))
-        self.scene = scene
+        self.scene = weakref.proxy(scene)
         self.lens = None
         self.north_vector = None
         self.normal_vector = None

diff -r e0838f66ad6c7d2c10d4454b20272b0a33e22117 -r 672764585671c7d379c13c3537eebe2dde7dfcbe yt/visualization/volume_rendering/tests/test_mesh_render.py
--- a/yt/visualization/volume_rendering/tests/test_mesh_render.py
+++ b/yt/visualization/volume_rendering/tests/test_mesh_render.py
@@ -52,6 +52,7 @@
     def mesh_render_image_func(filename_prefix):
         return im.write_image(filename_prefix)
 
+    mesh_render_image_func.__name__ = "func_{}".format(test_prefix)
     test = GenericImageTest(ds, mesh_render_image_func, decimals)
     test.prefix = test_prefix
     return test

diff -r e0838f66ad6c7d2c10d4454b20272b0a33e22117 -r 672764585671c7d379c13c3537eebe2dde7dfcbe yt/visualization/volume_rendering/zbuffer_array.py
--- a/yt/visualization/volume_rendering/zbuffer_array.py
+++ b/yt/visualization/volume_rendering/zbuffer_array.py
@@ -62,7 +62,7 @@
             rgba += (other.rgba * (1.0 - f)[:, None, :])
         else:
             b = self.z > other.z
-            rgba = np.empty(self.rgba.shape)
+            rgba = np.zeros(self.rgba.shape)
             rgba[f] = self.rgba[f]
             rgba[b] = other.rgba[b]
         z = np.min([self.z, other.z], axis=0)


https://bitbucket.org/yt_analysis/yt/commits/040184c871c8/
Changeset:   040184c871c8
Branch:      yt
User:        atmyers
Date:        2016-04-03 23:00:26+00:00
Summary:     A BVH version of MeshSampler.
Affected #:  4 files

diff -r 672764585671c7d379c13c3537eebe2dde7dfcbe -r 040184c871c896fd1d2889c1fe193f79396620af yt/utilities/lib/grid_traversal.pxd
--- a/yt/utilities/lib/grid_traversal.pxd
+++ b/yt/utilities/lib/grid_traversal.pxd
@@ -25,6 +25,8 @@
     np.float64_t *center
     np.float64_t[:,:,:] image
     np.float64_t[:,:] zbuffer
+    np.int64_t[:,:] image_used
+    np.int64_t[:,:] mesh_lines
     np.float64_t pdx, pdy
     np.float64_t bounds[4]
     np.float64_t[:,:] camera_data   # position, width, unit_vec[0,2]
@@ -59,6 +61,8 @@
     cdef sampler_function *sampler
     cdef public object acenter, aimage, ax_vec, ay_vec
     cdef public object azbuffer
+    cdef public object aimage_used
+    cdef public object amesh_lines
     cdef void *supp_data
     cdef np.float64_t width[3]
     cdef public object lens_type

diff -r 672764585671c7d379c13c3537eebe2dde7dfcbe -r 040184c871c896fd1d2889c1fe193f79396620af yt/utilities/lib/grid_traversal.pyx
--- a/yt/utilities/lib/grid_traversal.pyx
+++ b/yt/utilities/lib/grid_traversal.pyx
@@ -380,6 +380,8 @@
                   *args, **kwargs):
         self.image = <ImageContainer *> calloc(sizeof(ImageContainer), 1)
         cdef np.float64_t[:,:] zbuffer
+        cdef np.int64_t[:,:] image_used
+        cdef np.int64_t[:,:] mesh_lines
         cdef np.float64_t[:,:] camera_data
         cdef int i
 
@@ -390,6 +392,10 @@
         zbuffer = kwargs.pop("zbuffer", None)
         if zbuffer is None:
             zbuffer = np.ones((image.shape[0], image.shape[1]), "float64")
+
+        image_used = np.zeros((image.shape[0], image.shape[1]), "int64")
+        mesh_lines = np.zeros((image.shape[0], image.shape[1]), "int64")
+
         self.lens_type = kwargs.pop("lens_type", None)
         if self.lens_type == "plane-parallel":
             self.extent_function = calculate_extent_plane_parallel
@@ -397,7 +403,7 @@
         else:
             if not (vp_pos.shape[0] == vp_dir.shape[0] == image.shape[0]) or \
                not (vp_pos.shape[1] == vp_dir.shape[1] == image.shape[1]):
-                msg = "Bad lense shape / direction for %s\n" % (self.lens_type)
+                msg = "Bad lens shape / direction for %s\n" % (self.lens_type)
                 msg += "Shapes: (%s - %s - %s) and (%s - %s - %s)" % (
                     vp_pos.shape[0], vp_dir.shape[0], image.shape[0],
                     vp_pos.shape[1], vp_dir.shape[1], image.shape[1])
@@ -423,7 +429,9 @@
         self.image.x_vec = <np.float64_t *> x_vec.data
         self.ay_vec = y_vec
         self.image.y_vec = <np.float64_t *> y_vec.data
-        self.image.zbuffer = zbuffer
+        self.image.zbuffer = self.azbuffer = zbuffer
+        self.image.image_used = self.aimage_used = image_used
+        self.image.mesh_lines = self.amesh_lines = mesh_lines
         self.image.nv[0] = image.shape[0]
         self.image.nv[1] = image.shape[1]
         for i in range(4): self.image.bounds[i] = bounds[i]
@@ -498,6 +506,7 @@
         self.image.vp_dir = None
         self.image.zbuffer = None
         self.image.camera_data = None
+        self.image.image_used = None
         free(self.image)
 
 

diff -r 672764585671c7d379c13c3537eebe2dde7dfcbe -r 040184c871c896fd1d2889c1fe193f79396620af yt/utilities/lib/mesh_traversal.pyx
--- a/yt/utilities/lib/mesh_traversal.pyx
+++ b/yt/utilities/lib/mesh_traversal.pyx
@@ -25,7 +25,9 @@
     ImageContainer
 from cython.parallel import prange, parallel, threadid
 from yt.visualization.image_writer import apply_colormap
-from yt.utilities.lib.bounding_volume_hierarchy cimport BVH, Ray 
+from yt.utilities.lib.bounding_volume_hierarchy cimport BVH, Ray
+
+cdef np.float64_t INF = np.inf
 
 rtc.rtcInit(NULL)
 rtc.rtcSetErrorFunction(error_printer)
@@ -43,12 +45,9 @@
     def __dealloc__(self):
         rtcs.rtcDeleteScene(self.scene_i)
 
+
 cdef class MeshSampler(ImageSampler):
 
-    cdef public object image_used
-    cdef public object mesh_lines
-    cdef public object zbuffer
-
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
@@ -71,15 +70,10 @@
         cdef np.float64_t width[3]
         for i in range(3):
             width[i] = self.width[i]
-        cdef np.ndarray[np.float64_t, ndim=1] data
         cdef np.ndarray[np.int64_t, ndim=1] used
         nx = im.nv[0]
         ny = im.nv[1]
         size = nx * ny
-        used = np.empty(size, dtype="int64")
-        mesh = np.empty(size, dtype="int64")
-        data = np.empty(size, dtype="float64")
-        zbuffer = np.empty(size, dtype="float64")
         cdef rtcr.RTCRay ray
         v_pos = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
         v_dir = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
@@ -100,24 +94,16 @@
             ray.time = 0
             ray.Ng[0] = 1e37  # we use this to track the hit distance
             rtcs.rtcIntersect(scene.scene_i, ray)
-            data[j] = ray.time
-            used[j] = ray.primID
-            mesh[j] = ray.instID
-            zbuffer[j] = ray.tfar
-        self.aimage = data
-        self.image_used = used
-        self.mesh_lines = mesh
-        self.zbuffer = zbuffer
+            im.image[vi, vj, 0] = ray.time
+            im.image_used[vi, vj] = ray.primID
+            im.mesh_lines[vi, vj] = ray.instID
+            im.zbuffer[vi, vj] = ray.tfar
         free(v_pos)
         free(v_dir)
 
 
 cdef class BVHMeshSampler(ImageSampler):
 
-    cdef public object image_used
-    cdef public object mesh_lines
-    cdef public object zbuffer
-
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
@@ -144,10 +130,6 @@
         nx = im.nv[0]
         ny = im.nv[1]
         size = nx * ny
-        used = np.empty(size, dtype="int64")
-        mesh = np.empty(size, dtype="int64")
-        data = np.empty(size, dtype="float64")
-        zbuffer = np.empty(size, dtype="float64")
         cdef Ray* ray
         with nogil, parallel(num_threads = num_threads):
             ray = <Ray *> malloc(sizeof(Ray))
@@ -162,19 +144,15 @@
                     ray.origin[i] = v_pos[i]
                     ray.direction[i] = v_dir[i]
                     ray.inv_dir[i] = 1.0 / v_dir[i]
-                ray.t_far = np.inf
+                ray.t_far = INF
                 ray.t_near = 0.0
                 ray.data_val = 0
                 ray.elem_id = -1
                 bvh.intersect(ray)
-                data[j] = ray.data_val
-                used[j] = ray.elem_id
-                mesh[j] = ray.near_boundary
-                zbuffer[j] = ray.t_far
-        self.aimage = data
-        self.image_used = used
-        self.mesh_lines = mesh
-        self.zbuffer = zbuffer
-        free(v_pos)
-        free(v_dir)
-        free(ray)
+                im.image[vi, vj, 0] = ray.data_val
+                im.image_used[vi, vj] = ray.elem_id
+                im.mesh_lines[vi, vj] = ray.near_boundary
+                im.zbuffer[vi, vj] = ray.t_far
+            free(v_pos)
+            free(v_dir)
+            free(ray)

diff -r 672764585671c7d379c13c3537eebe2dde7dfcbe -r 040184c871c896fd1d2889c1fe193f79396620af yt/visualization/volume_rendering/render_source.py
--- a/yt/visualization/volume_rendering/render_source.py
+++ b/yt/visualization/volume_rendering/render_source.py
@@ -502,11 +502,10 @@
         mylog.debug("Done casting rays")
 
         self.finalize_image(camera)
-        self.data = self.sampler.aimage
         self.current_image = self.apply_colormap()
 
         zbuffer += ZBuffer(self.current_image.astype('float64'),
-                           self.sampler.zbuffer)
+                           self.sampler.azbuffer)
         zbuffer.rgba = ImageArray(zbuffer.rgba)
         self.zbuffer = zbuffer
         self.current_image = self.zbuffer.rgba
@@ -523,16 +522,13 @@
         # reshape data
         Nx = camera.resolution[0]
         Ny = camera.resolution[1]
-        sam.aimage = sam.aimage.reshape(Nx, Ny)
-        sam.image_used = sam.image_used.reshape(Nx, Ny)
-        sam.mesh_lines = sam.mesh_lines.reshape(Nx, Ny)
-        sam.zbuffer = sam.zbuffer.reshape(Nx, Ny)
+        self.data = sam.aimage[:,:,0].reshape(Nx, Ny)
 
         # rotate
-        sam.aimage = np.rot90(sam.aimage, k=2)
-        sam.image_used = np.rot90(sam.image_used, k=2)
-        sam.mesh_lines = np.rot90(sam.mesh_lines, k=2)
-        sam.zbuffer = np.rot90(sam.zbuffer, k=2)
+        self.data = np.rot90(self.data, k=2)
+        sam.aimage_used = np.rot90(sam.aimage_used, k=2)
+        sam.amesh_lines = np.rot90(sam.amesh_lines, k=2)
+        sam.azbuffer = np.rot90(sam.azbuffer, k=2)
 
     def annotate_mesh_lines(self, color=None, alpha=1.0):
         r"""
@@ -558,7 +554,7 @@
         if color is None:
             color = np.array([0, 0, 0, alpha])
 
-        locs = [self.sampler.mesh_lines == 1]
+        locs = [self.sampler.amesh_lines == 1]
 
         self.current_image[:, :, 0][locs] = color[0]
         self.current_image[:, :, 1][locs] = color[1]
@@ -592,7 +588,7 @@
                                color_bounds=self._color_bounds,
                                cmap_name=self._cmap)/255.
         alpha = image[:, :, 3]
-        alpha[self.sampler.image_used == -1] = 0.0
+        alpha[self.sampler.aimage_used == -1] = 0.0
         image[:, :, 3] = alpha        
         return image
 


https://bitbucket.org/yt_analysis/yt/commits/c502208b18d1/
Changeset:   c502208b18d1
Branch:      yt
User:        atmyers
Date:        2016-04-03 23:09:52+00:00
Summary:     a couple of renamings to support switching between ray-tracing engines.
Affected #:  3 files

diff -r 040184c871c896fd1d2889c1fe193f79396620af -r c502208b18d16b28d384f6c5ccd8ed68abc827ff yt/utilities/lib/mesh_traversal.pyx
--- a/yt/utilities/lib/mesh_traversal.pyx
+++ b/yt/utilities/lib/mesh_traversal.pyx
@@ -1,6 +1,6 @@
 """
-This file contains the MeshSampler class, which handles casting rays at a
-MeshSource using pyembree.
+This file contains the MeshSampler classes, which handles casting rays at a
+mesh source using either pyembree or the cython ray caster.
 
 
 """
@@ -46,7 +46,7 @@
         rtcs.rtcDeleteScene(self.scene_i)
 
 
-cdef class MeshSampler(ImageSampler):
+cdef class EmbreeMeshSampler(ImageSampler):
 
     @cython.boundscheck(False)
     @cython.wraparound(False)

diff -r 040184c871c896fd1d2889c1fe193f79396620af -r c502208b18d16b28d384f6c5ccd8ed68abc827ff yt/visualization/volume_rendering/render_source.py
--- a/yt/visualization/volume_rendering/render_source.py
+++ b/yt/visualization/volume_rendering/render_source.py
@@ -369,7 +369,7 @@
         assert(self.field is not None)
         assert(self.data_source is not None)
 
-        self.scene = mesh_traversal.YTEmbreeScene()
+        self.volume = mesh_traversal.YTEmbreeScene()
         self.build_mesh()
 
     def cmap():
@@ -438,7 +438,7 @@
         # low-order geometry. Right now, high-order geometry is only
         # implemented for 20-point hexes.
         if indices.shape[1] == 20:
-            self.mesh = mesh_construction.QuadraticElementMesh(self.scene,
+            self.mesh = mesh_construction.QuadraticElementMesh(self.volume,
                                                                vertices,
                                                                indices,
                                                                field_data)
@@ -458,7 +458,7 @@
                 field_data = field_data[:, 0:4]
                 indices = indices[:, 0:4]
 
-            self.mesh = mesh_construction.LinearElementMesh(self.scene,
+            self.mesh = mesh_construction.LinearElementMesh(self.volume,
                                                             vertices,
                                                             indices,
                                                             field_data)
@@ -498,7 +498,7 @@
         self.sampler = new_mesh_sampler(camera, self)
 
         mylog.debug("Casting rays")
-        self.sampler(self.scene)
+        self.sampler(self.volume)
         mylog.debug("Done casting rays")
 
         self.finalize_image(camera)

diff -r 040184c871c896fd1d2889c1fe193f79396620af -r c502208b18d16b28d384f6c5ccd8ed68abc827ff yt/visualization/volume_rendering/utils.py
--- a/yt/visualization/volume_rendering/utils.py
+++ b/yt/visualization/volume_rendering/utils.py
@@ -27,7 +27,7 @@
         params['width'],
     )
     kwargs = {'lens_type': params['lens_type']}
-    sampler = mesh_traversal.MeshSampler(*args, **kwargs)
+    sampler = mesh_traversal.EmbreeMeshSampler(*args, **kwargs)
     return sampler
 
 


https://bitbucket.org/yt_analysis/yt/commits/1a8309856c3e/
Changeset:   1a8309856c3e
Branch:      yt
User:        atmyers
Date:        2016-04-03 23:42:20+00:00
Summary:     enable MeshSources to optionally use the cython ray-tracer.
Affected #:  2 files

diff -r c502208b18d16b28d384f6c5ccd8ed68abc827ff -r 1a8309856c3ec60aa3bed1cf4c07c000cafeb62a yt/visualization/volume_rendering/render_source.py
--- a/yt/visualization/volume_rendering/render_source.py
+++ b/yt/visualization/volume_rendering/render_source.py
@@ -24,6 +24,7 @@
 from .utils import new_volume_render_sampler, data_source_or_all, \
     get_corners, new_projection_sampler, new_mesh_sampler, \
     new_interpolated_projection_sampler
+from yt.utilities.lib.bounding_volume_hierarchy import BVH
 from yt.visualization.image_writer import apply_colormap
 from yt.data_objects.image_array import ImageArray
 from .zbuffer_array import ZBuffer
@@ -353,8 +354,9 @@
         self.data_source = data_source_or_all(data_source)
         field = self.data_source._determine_fields(field)[0]
         self.field = field
-        self.mesh = None
+        self.volume = None
         self.current_image = None
+        self.engine = 'engine'
 
         # default color map
         self._cmap = ytcfg.get("yt", "default_colormap")
@@ -369,8 +371,12 @@
         assert(self.field is not None)
         assert(self.data_source is not None)
 
-        self.volume = mesh_traversal.YTEmbreeScene()
-        self.build_mesh()
+        if self.engine == 'embree':
+            self.volume = mesh_traversal.YTEmbreeScene()
+            self.build_volume_embree()
+
+        elif self.engine == 'bvh':
+            self.build_volume_bvh()
 
     def cmap():
         '''
@@ -410,15 +416,15 @@
     def _validate(self):
         """Make sure that all dependencies have been met"""
         if self.data_source is None:
-            raise RuntimeError("Data source not initialized")
+            raise RuntimeError("Data source not initialized.")
 
-        if self.mesh is None:
-            raise RuntimeError("Mesh not initialized")
+        if self.volume is None:
+            raise RuntimeError("Volume not initialized.")
 
-    def build_mesh(self):
+    def build_volume_embree(self):
         """
 
-        This constructs the mesh that will be ray-traced.
+        This constructs the mesh that will be ray-traced by pyembree.
 
         """
         ftype, fname = self.field
@@ -463,6 +469,49 @@
                                                             indices,
                                                             field_data)
 
+    def build_volume_bvh(self):
+        """
+
+        This constructs the mesh that will be ray-traced.
+
+        """
+        ftype, fname = self.field
+        mesh_id = int(ftype[-1]) - 1
+        index = self.data_source.ds.index
+        offset = index.meshes[mesh_id]._index_offset
+        field_data = self.data_source[self.field].d  # strip units
+
+        vertices = index.meshes[mesh_id].connectivity_coords
+        indices = index.meshes[mesh_id].connectivity_indices - offset
+
+        if len(field_data.shape) == 1:
+            raise NotImplementedError("Element-centered fields are "
+                                      "only supported by Embree. ")
+
+        # Here, we decide whether to render based on high-order or 
+        # low-order geometry. Right now, high-order geometry is only
+        # supported by Embree.
+        if indices.shape[1] == 20:
+            # hexahedral
+            mylog.warning("20-node hexes not yet supported, " +
+                          "dropping to 1st order.")
+            field_data = field_data[:, 0:8]
+            indices = indices[:, 0:8]
+        elif indices.shape[1] == 27:
+            # hexahedral
+            mylog.warning("27-node hexes not yet supported, " +
+                          "dropping to 1st order.")
+            field_data = field_data[:, 0:8]
+            indices = indices[:, 0:8]
+        elif indices.shape[1] == 10:
+            # tetrahedral
+            mylog.warning("10-node tetrahedral elements not yet supported, " +
+                          "dropping to 1st order.")
+            field_data = field_data[:, 0:4]
+            indices = indices[:, 0:4]
+
+        self.volume = BVH(vertices, indices, field_data)
+
     def render(self, camera, zbuffer=None):
         """Renders an image using the provided camera
 
@@ -495,7 +544,7 @@
                               zbuffer.z.reshape(shape[:2]))
         self.zbuffer = zbuffer
 
-        self.sampler = new_mesh_sampler(camera, self)
+        self.sampler = new_mesh_sampler(camera, self, engine=self.engine)
 
         mylog.debug("Casting rays")
         self.sampler(self.volume)

diff -r c502208b18d16b28d384f6c5ccd8ed68abc827ff -r 1a8309856c3ec60aa3bed1cf4c07c000cafeb62a yt/visualization/volume_rendering/utils.py
--- a/yt/visualization/volume_rendering/utils.py
+++ b/yt/visualization/volume_rendering/utils.py
@@ -14,7 +14,7 @@
     return data_source
 
 
-def new_mesh_sampler(camera, render_source):
+def new_mesh_sampler(camera, render_source, engine):
     params = ensure_code_unit_params(camera._get_sampler_params(render_source))
     args = (
         np.atleast_3d(params['vp_pos']),
@@ -27,7 +27,10 @@
         params['width'],
     )
     kwargs = {'lens_type': params['lens_type']}
-    sampler = mesh_traversal.EmbreeMeshSampler(*args, **kwargs)
+    if engine == 'embree':
+        sampler = mesh_traversal.EmbreeMeshSampler(*args, **kwargs)
+    elif engine == 'bvh':
+        sampler = mesh_traversal.BVHMeshSampler(*args, **kwargs)
     return sampler
 
 


https://bitbucket.org/yt_analysis/yt/commits/788526b95863/
Changeset:   788526b95863
Branch:      yt
User:        atmyers
Date:        2016-04-03 23:53:57+00:00
Summary:     typo fix
Affected #:  1 file

diff -r 1a8309856c3ec60aa3bed1cf4c07c000cafeb62a -r 788526b95863868c9372f1964209c09001cc008e yt/visualization/volume_rendering/render_source.py
--- a/yt/visualization/volume_rendering/render_source.py
+++ b/yt/visualization/volume_rendering/render_source.py
@@ -356,7 +356,7 @@
         self.field = field
         self.volume = None
         self.current_image = None
-        self.engine = 'engine'
+        self.engine = 'embree'
 
         # default color map
         self._cmap = ytcfg.get("yt", "default_colormap")


https://bitbucket.org/yt_analysis/yt/commits/52262cbdce67/
Changeset:   52262cbdce67
Branch:      yt
User:        atmyers
Date:        2016-04-04 07:11:56+00:00
Summary:     make sure we free allocated memory in pixelization routines.
Affected #:  1 file

diff -r 788526b95863868c9372f1964209c09001cc008e -r 52262cbdce67159a84284c2bf8a820b3167cea94 yt/utilities/lib/pixelization_routines.pyx
--- a/yt/utilities/lib/pixelization_routines.pyx
+++ b/yt/utilities/lib/pixelization_routines.pyx
@@ -20,6 +20,7 @@
 from yt.utilities.lib.fp_utils cimport fmin, fmax, i64min, i64max, imin, imax, fabs
 from yt.utilities.exceptions import YTPixelizeError, \
     YTElementTypeNotRecognized
+from libc.stdlib cimport malloc, free
 from yt.utilities.lib.element_mappings cimport \
     ElementSampler, \
     P1Sampler3D, \
@@ -29,9 +30,6 @@
     Q1Sampler2D, \
     W1Sampler3D
 
-cdef extern from "stdlib.h":
-    # NOTE that size_t might not be int
-    void *alloca(int)
 
 cdef extern from "pixelization_constants.h":
     enum:
@@ -578,8 +576,7 @@
     cdef int nvertices = conn.shape[1]
     cdef int ndim = coords.shape[1]
     cdef int num_field_vals = field.shape[1]
-    cdef double* mapped_coord
-    cdef int num_mapped_coords
+    cdef double[4] mapped_coord
     cdef ElementSampler sampler
 
     # Pick the right sampler and allocate storage for the mapped coordinate
@@ -622,10 +619,8 @@
         raise RuntimeError
 
     # allocate temporary storage
-    num_mapped_coords = sampler.num_mapped_coords
-    mapped_coord = <double*> alloca(sizeof(double) * num_mapped_coords)
-    vertices = <np.float64_t *> alloca(ndim * sizeof(np.float64_t) * nvertices)
-    field_vals = <np.float64_t *> alloca(sizeof(np.float64_t) * num_field_vals)
+    vertices = <np.float64_t *> malloc(ndim * sizeof(np.float64_t) * nvertices)
+    field_vals = <np.float64_t *> malloc(sizeof(np.float64_t) * num_field_vals)
 
     # fill the image bounds and pixel size informaton here
     for i in range(ndim):
@@ -696,4 +691,7 @@
                             img[pi, pj, pk] = 1.0
                         elif sampler.check_near_edge(mapped_coord, thresh, yax):
                             img[pi, pj, pk] = 1.0
+
+    free(vertices)
+    free(field_vals)
     return img


https://bitbucket.org/yt_analysis/yt/commits/99d790ed1760/
Changeset:   99d790ed1760
Branch:      yt
User:        atmyers
Date:        2016-04-04 07:12:26+00:00
Summary:     refactoring so that we can annotate mesh lines using both engines.
Affected #:  5 files

diff -r 52262cbdce67159a84284c2bf8a820b3167cea94 -r 99d790ed1760c0529a7e26d4c66918f5849c4a16 yt/utilities/lib/bounding_volume_hierarchy.pyx
--- a/yt/utilities/lib/bounding_volume_hierarchy.pyx
+++ b/yt/utilities/lib/bounding_volume_hierarchy.pyx
@@ -246,11 +246,13 @@
         cdef np.float64_t* field_ptr         
         vertex_ptr = self.vertices + ray.elem_id*self.num_verts_per_elem*3
         field_ptr = self.field_data + ray.elem_id*self.num_verts_per_elem
-        ray.data_val = self.sampler.sample_at_real_point(vertex_ptr,
-                                                         field_ptr,
-                                                         position)
 
-        ray.near_boundary = -1
+        cdef np.float64_t[4] mapped_coord
+        self.sampler.map_real_to_unit(mapped_coord, vertex_ptr, position)
+        ray.data_val = self.sampler.sample_at_unit_point(mapped_coord,
+                                                         field_ptr)
+
+        ray.near_boundary = self.sampler.check_mesh_lines(mapped_coord)
 
     @cython.boundscheck(False)
     @cython.wraparound(False)

diff -r 52262cbdce67159a84284c2bf8a820b3167cea94 -r 99d790ed1760c0529a7e26d4c66918f5849c4a16 yt/utilities/lib/element_mappings.pxd
--- a/yt/utilities/lib/element_mappings.pxd
+++ b/yt/utilities/lib/element_mappings.pxd
@@ -35,6 +35,8 @@
                              double tolerance,
                              int direction) nogil
 
+    cdef int check_mesh_lines(self, double* mapped_coord) nogil
+
 
 cdef class P1Sampler2D(ElementSampler):
 
@@ -75,6 +77,8 @@
                              double tolerance,
                              int direction) nogil
 
+    cdef int check_mesh_lines(self, double* mapped_coord) nogil
+
 
 # This typedef defines a function pointer that defines the system
 # of equations that will be solved by the NonlinearSolveSamplers.
@@ -170,6 +174,8 @@
                              double tolerance,
                              int direction) nogil
 
+    cdef int check_mesh_lines(self, double* mapped_coord) nogil
+
 
 cdef class W1Sampler3D(NonlinearSolveSampler3D):
 
@@ -190,6 +196,9 @@
                              double tolerance,
                              int direction) nogil
 
+    cdef int check_mesh_lines(self, double* mapped_coord) nogil
+
+
 
 cdef class S2Sampler3D(NonlinearSolveSampler3D):
 
@@ -210,6 +219,9 @@
                              double tolerance,
                              int direction) nogil
 
+    cdef int check_mesh_lines(self, double* mapped_coord) nogil
+
+
 
 cdef class NonlinearSolveSampler2D(ElementSampler):
 

diff -r 52262cbdce67159a84284c2bf8a820b3167cea94 -r 99d790ed1760c0529a7e26d4c66918f5849c4a16 yt/utilities/lib/element_mappings.pyx
--- a/yt/utilities/lib/element_mappings.pyx
+++ b/yt/utilities/lib/element_mappings.pyx
@@ -93,6 +93,12 @@
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
+    cdef int check_mesh_lines(self, double* mapped_coord) nogil:
+        pass
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
     cdef double sample_at_real_point(self,
                                      double* vertices,
                                      double* field_values,
@@ -263,6 +269,11 @@
             return 1
         return 0
 
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef int check_mesh_lines(self, double* mapped_coord) nogil:
+        pass
 
 cdef class NonlinearSolveSampler3D(ElementSampler):
 
@@ -371,7 +382,6 @@
             return 0
         return 1
 
-
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
@@ -384,6 +394,23 @@
         else:
             return 0
 
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef int check_mesh_lines(self, double* mapped_coord) nogil:
+        if (fabs(fabs(mapped_coord[0]) - 1.0) < 1e-1 and
+            fabs(fabs(mapped_coord[1]) - 1.0) < 1e-1):
+            return 1
+        elif (fabs(fabs(mapped_coord[0]) - 1.0) < 1e-1 and
+              fabs(fabs(mapped_coord[2]) - 1.0) < 1e-1):
+            return 1
+        elif (fabs(fabs(mapped_coord[1]) - 1.0) < 1e-1 and
+              fabs(fabs(mapped_coord[2]) - 1.0) < 1e-1):
+            return 1
+        else:
+            return -1
+
+
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
@@ -526,6 +553,22 @@
         else:
             return 0
 
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef int check_mesh_lines(self, double* mapped_coord) nogil:
+        if (fabs(fabs(mapped_coord[0]) - 1.0) < 1e-1 and
+            fabs(fabs(mapped_coord[1]) - 1.0) < 1e-1):
+            return 1
+        elif (fabs(fabs(mapped_coord[0]) - 1.0) < 1e-1 and
+              fabs(fabs(mapped_coord[2]) - 1.0) < 1e-1):
+            return 1
+        elif (fabs(fabs(mapped_coord[1]) - 1.0) < 1e-1 and
+              fabs(fabs(mapped_coord[2]) - 1.0) < 1e-1):
+            return 1
+        else:
+            return -1
+
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
@@ -707,7 +750,6 @@
             return 0
         return 1
 
-
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
@@ -724,6 +766,32 @@
             return 1
         return 0
 
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef int check_mesh_lines(self, double* mapped_coord) nogil:
+        cdef double r, s, t
+        cdef double thresh = 5.0e-2
+        r = mapped_coord[0]
+        s = mapped_coord[1]
+        t = mapped_coord[2]
+
+        cdef int near_edge_r, near_edge_s, near_edge_t
+        near_edge_r = (r < thresh) or (fabs(r + s - 1.0) < thresh)
+        near_edge_s = (s < thresh)
+        near_edge_t = fabs(fabs(mapped_coord[2]) - 1.0) < thresh
+        
+        # we use ray.instID to pass back whether the ray is near the
+        # element boundary or not (used to annotate mesh lines)
+        if (near_edge_r and near_edge_s):
+            return 1
+        elif (near_edge_r and near_edge_t):
+            return 1
+        elif (near_edge_s and near_edge_t):
+            return 1
+        else:
+            return -1
+
 
 @cython.boundscheck(False)
 @cython.wraparound(False)

diff -r 52262cbdce67159a84284c2bf8a820b3167cea94 -r 99d790ed1760c0529a7e26d4c66918f5849c4a16 yt/utilities/lib/mesh_samplers.pyx
--- a/yt/utilities/lib/mesh_samplers.pyx
+++ b/yt/utilities/lib/mesh_samplers.pyx
@@ -112,17 +112,8 @@
 
     # we use ray.instID to pass back whether the ray is near the
     # element boundary or not (used to annotate mesh lines)
-    if (fabs(fabs(mapped_coord[0]) - 1.0) < 1e-1 and
-        fabs(fabs(mapped_coord[1]) - 1.0) < 1e-1):
-        ray.instID = 1
-    elif (fabs(fabs(mapped_coord[0]) - 1.0) < 1e-1 and
-          fabs(fabs(mapped_coord[2]) - 1.0) < 1e-1):
-        ray.instID = 1
-    elif (fabs(fabs(mapped_coord[1]) - 1.0) < 1e-1 and
-          fabs(fabs(mapped_coord[2]) - 1.0) < 1e-1):
-        ray.instID = 1
-    else:
-        ray.instID = -1
+    ray.instID = Q1Sampler.check_mesh_lines(mapped_coord)
+
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
@@ -165,28 +156,7 @@
     val = W1Sampler.sample_at_unit_point(mapped_coord, field_data)
     ray.time = val
 
-    cdef double r, s, t
-    cdef double thresh = 5.0e-2
-    r = mapped_coord[0]
-    s = mapped_coord[1]
-    t = mapped_coord[2]
-
-    cdef int near_edge_r, near_edge_s, near_edge_t
-    near_edge_r = (r < thresh) or (fabs(r + s - 1.0) < thresh)
-    near_edge_s = (s < thresh)
-    near_edge_t = fabs(fabs(mapped_coord[2]) - 1.0) < thresh
-    
-    # we use ray.instID to pass back whether the ray is near the
-    # element boundary or not (used to annotate mesh lines)
-    if (near_edge_r and near_edge_s):
-        ray.instID = 1
-    elif (near_edge_r and near_edge_t):
-        ray.instID = 1
-    elif (near_edge_s and near_edge_t):
-        ray.instID = 1
-    else:
-        ray.instID = -1
-
+    ray.instID = W1Sampler.check_mesh_lines(mapped_coord)
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
@@ -223,20 +193,8 @@
     val = S2Sampler.sample_at_unit_point(mapped_coord, patch.field_data)
     ray.time = val
 
-    # we use ray.instID to pass back whether the ray is near the
-    # element boundary or not (used to annotate mesh lines)
-    if (fabs(fabs(mapped_coord[0]) - 1.0) < 1e-1 and
-        fabs(fabs(mapped_coord[1]) - 1.0) < 1e-1):
-        ray.instID = 1
-    elif (fabs(fabs(mapped_coord[0]) - 1.0) < 1e-1 and
-          fabs(fabs(mapped_coord[2]) - 1.0) < 1e-1):
-        ray.instID = 1
-    elif (fabs(fabs(mapped_coord[1]) - 1.0) < 1e-1 and
-          fabs(fabs(mapped_coord[2]) - 1.0) < 1e-1):
-        ray.instID = 1
-    else:
-        ray.instID = -1
-
+    ray.instID = S2Sampler.check_mesh_lines(mapped_coord)
+    
 
 @cython.boundscheck(False)
 @cython.wraparound(False)

diff -r 52262cbdce67159a84284c2bf8a820b3167cea94 -r 99d790ed1760c0529a7e26d4c66918f5849c4a16 yt/utilities/lib/mesh_traversal.pyx
--- a/yt/utilities/lib/mesh_traversal.pyx
+++ b/yt/utilities/lib/mesh_traversal.pyx
@@ -131,7 +131,7 @@
         ny = im.nv[1]
         size = nx * ny
         cdef Ray* ray
-        with nogil, parallel(num_threads = num_threads):
+        with nogil, parallel():
             ray = <Ray *> malloc(sizeof(Ray))
             v_pos = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
             v_dir = <np.float64_t *> malloc(3 * sizeof(np.float64_t))


https://bitbucket.org/yt_analysis/yt/commits/b168bb218f14/
Changeset:   b168bb218f14
Branch:      yt
User:        atmyers
Date:        2016-04-04 17:33:50+00:00
Summary:     more refactoring.
Affected #:  2 files

diff -r 99d790ed1760c0529a7e26d4c66918f5849c4a16 -r b168bb218f14615792c2d8e1a453963d1d7fc488 yt/utilities/lib/element_mappings.pyx
--- a/yt/utilities/lib/element_mappings.pyx
+++ b/yt/utilities/lib/element_mappings.pyx
@@ -273,7 +273,33 @@
     @cython.wraparound(False)
     @cython.cdivision(True)
     cdef int check_mesh_lines(self, double* mapped_coord) nogil:
-        pass
+        cdef double u, v, w
+        cdef double thresh = 2.0e-2
+        if mapped_coord[0] == 0:
+            u = mapped_coord[1]
+            v = mapped_coord[2]
+            w = mapped_coord[3]
+        elif mapped_coord[1] == 0:
+            u = mapped_coord[2]
+            v = mapped_coord[3]
+            w = mapped_coord[0]
+        elif mapped_coord[2] == 0:
+            u = mapped_coord[1]
+            v = mapped_coord[3]
+            w = mapped_coord[0]
+        else:
+            u = mapped_coord[1]
+            v = mapped_coord[2]
+            w = mapped_coord[0]
+        if ((u < thresh) or 
+            (v < thresh) or 
+            (w < thresh) or
+            (fabs(u - 1) < thresh) or 
+            (fabs(v - 1) < thresh) or 
+            (fabs(w - 1) < thresh)):
+            return 1
+        return -1
+
 
 cdef class NonlinearSolveSampler3D(ElementSampler):
 

diff -r 99d790ed1760c0529a7e26d4c66918f5849c4a16 -r b168bb218f14615792c2d8e1a453963d1d7fc488 yt/utilities/lib/mesh_samplers.pyx
--- a/yt/utilities/lib/mesh_samplers.pyx
+++ b/yt/utilities/lib/mesh_samplers.pyx
@@ -235,23 +235,8 @@
     P1Sampler.map_real_to_unit(mapped_coord, vertices, position)
     val = P1Sampler.sample_at_unit_point(mapped_coord, field_data)
     ray.time = val
-
-    cdef double u, v, w
-    cdef double thresh = 2.0e-2
-    u = ray.u
-    v = ray.v
-    w = 1.0 - u - v
-    # we use ray.instID to pass back whether the ray is near the
-    # element boundary or not (used to annotate mesh lines)
-    if ((u < thresh) or 
-        (v < thresh) or 
-        (w < thresh) or
-        (fabs(u - 1) < thresh) or 
-        (fabs(v - 1) < thresh) or 
-        (fabs(w - 1) < thresh)):
-        ray.instID = 1
-    else:
-        ray.instID = -1
+    
+    ray.instID = P1Sampler.check_mesh_lines(mapped_coord)
 
 
 @cython.boundscheck(False)


https://bitbucket.org/yt_analysis/yt/commits/d3f8e67fd6a5/
Changeset:   d3f8e67fd6a5
Branch:      yt
User:        atmyers
Date:        2016-04-04 17:35:33+00:00
Summary:     raise Exception if invalid ray-tracing engine is selected.
Affected #:  1 file

diff -r b168bb218f14615792c2d8e1a453963d1d7fc488 -r d3f8e67fd6a53f938e93332c72e1df4cd21efef2 yt/visualization/volume_rendering/render_source.py
--- a/yt/visualization/volume_rendering/render_source.py
+++ b/yt/visualization/volume_rendering/render_source.py
@@ -374,9 +374,11 @@
         if self.engine == 'embree':
             self.volume = mesh_traversal.YTEmbreeScene()
             self.build_volume_embree()
-
         elif self.engine == 'bvh':
             self.build_volume_bvh()
+        else:
+            raise NotImplementedError("Invalid ray-tracing engine selected. "
+                                      "Choices are 'embree' and 'bvh'.")
 
     def cmap():
         '''


https://bitbucket.org/yt_analysis/yt/commits/30fedffbd142/
Changeset:   30fedffbd142
Branch:      yt
User:        atmyers
Date:        2016-04-04 18:32:15+00:00
Summary:     enable element-centered fields to work with BVH.
Affected #:  3 files

diff -r d3f8e67fd6a53f938e93332c72e1df4cd21efef2 -r 30fedffbd142e6384222af1d224f059512a443e8 yt/utilities/lib/bounding_volume_hierarchy.pxd
--- a/yt/utilities/lib/bounding_volume_hierarchy.pxd
+++ b/yt/utilities/lib/bounding_volume_hierarchy.pxd
@@ -45,6 +45,7 @@
     cdef np.int64_t num_tri
     cdef np.int64_t num_elem
     cdef np.int64_t num_verts_per_elem
+    cdef np.int64_t num_field_per_elem
     cdef ElementSampler sampler
     cdef np.int64_t _partition(self, np.int64_t begin, np.int64_t end,
                                np.int64_t ax, np.float64_t split) nogil

diff -r d3f8e67fd6a53f938e93332c72e1df4cd21efef2 -r 30fedffbd142e6384222af1d224f059512a443e8 yt/utilities/lib/bounding_volume_hierarchy.pyx
--- a/yt/utilities/lib/bounding_volume_hierarchy.pyx
+++ b/yt/utilities/lib/bounding_volume_hierarchy.pyx
@@ -124,6 +124,7 @@
 
         self.num_elem = indices.shape[0]
         self.num_verts_per_elem = indices.shape[1]
+        self.num_field_per_elem = field_data.shape[1]
 
         # We need to figure out what kind of elements we've been handed.
         cdef int[MAX_NUM_TRI][3] tri_array
@@ -142,20 +143,22 @@
         self.num_tri = self.num_tri_per_elem*self.num_elem
 
         # allocate storage
-        cdef np.int64_t size = self.num_verts_per_elem * self.num_elem
-        self.vertices = <np.float64_t*> malloc(size * 3 * sizeof(np.float64_t))
-        self.field_data = <np.float64_t*> malloc(size * sizeof(np.float64_t))
+        cdef np.int64_t v_size = self.num_verts_per_elem * self.num_elem * 3
+        self.vertices = <np.float64_t*> malloc(v_size * sizeof(np.float64_t))
+        cdef np.int64_t f_size = self.num_field_per_elem * self.num_elem
+        self.field_data = <np.float64_t*> malloc(f_size * sizeof(np.float64_t))
 
         # create data buffers
         cdef np.int64_t i, j, k
         cdef np.int64_t field_offset, vertex_offset
         for i in range(self.num_elem):
-            field_offset = i*self.num_verts_per_elem
             for j in range(self.num_verts_per_elem):
                 vertex_offset = i*self.num_verts_per_elem*3 + j*3
-                self.field_data[field_offset + j] = field_data[i][j]
                 for k in range(3):
                     self.vertices[vertex_offset + k] = vertices[indices[i,j]][k]
+            field_offset = i*self.num_field_per_elem
+            for j in range(self.num_field_per_elem):
+                self.field_data[field_offset + j] = field_data[i][j]                
 
         # fill our array of triangles
         cdef np.int64_t offset, tri_index
@@ -245,13 +248,15 @@
         cdef np.float64_t* vertex_ptr
         cdef np.float64_t* field_ptr         
         vertex_ptr = self.vertices + ray.elem_id*self.num_verts_per_elem*3
-        field_ptr = self.field_data + ray.elem_id*self.num_verts_per_elem
+        field_ptr = self.field_data + ray.elem_id*self.num_field_per_elem
 
         cdef np.float64_t[4] mapped_coord
         self.sampler.map_real_to_unit(mapped_coord, vertex_ptr, position)
-        ray.data_val = self.sampler.sample_at_unit_point(mapped_coord,
-                                                         field_ptr)
-
+        if self.num_field_per_elem == 1:
+            ray.data_val = field_ptr[0]
+        else:
+            ray.data_val = self.sampler.sample_at_unit_point(mapped_coord,
+                                                             field_ptr)
         ray.near_boundary = self.sampler.check_mesh_lines(mapped_coord)
 
     @cython.boundscheck(False)

diff -r d3f8e67fd6a53f938e93332c72e1df4cd21efef2 -r 30fedffbd142e6384222af1d224f059512a443e8 yt/visualization/volume_rendering/render_source.py
--- a/yt/visualization/volume_rendering/render_source.py
+++ b/yt/visualization/volume_rendering/render_source.py
@@ -486,9 +486,9 @@
         vertices = index.meshes[mesh_id].connectivity_coords
         indices = index.meshes[mesh_id].connectivity_indices - offset
 
+        # if this is an element field, promote to 2D here
         if len(field_data.shape) == 1:
-            raise NotImplementedError("Element-centered fields are "
-                                      "only supported by Embree. ")
+            field_data = np.expand_dims(field_data, 1)
 
         # Here, we decide whether to render based on high-order or 
         # low-order geometry. Right now, high-order geometry is only


https://bitbucket.org/yt_analysis/yt/commits/628471eb2d03/
Changeset:   628471eb2d03
Branch:      yt
User:        atmyers
Date:        2016-04-04 20:05:48+00:00
Summary:     use the same initial 'far away' distance for both BVH and Embree.
Affected #:  1 file

diff -r 30fedffbd142e6384222af1d224f059512a443e8 -r 628471eb2d03f68de22e1ba8fae1ff5a992035e0 yt/utilities/lib/mesh_traversal.pyx
--- a/yt/utilities/lib/mesh_traversal.pyx
+++ b/yt/utilities/lib/mesh_traversal.pyx
@@ -27,8 +27,6 @@
 from yt.visualization.image_writer import apply_colormap
 from yt.utilities.lib.bounding_volume_hierarchy cimport BVH, Ray
 
-cdef np.float64_t INF = np.inf
-
 rtc.rtcInit(NULL)
 rtc.rtcSetErrorFunction(error_printer)
 
@@ -144,7 +142,7 @@
                     ray.origin[i] = v_pos[i]
                     ray.direction[i] = v_dir[i]
                     ray.inv_dir[i] = 1.0 / v_dir[i]
-                ray.t_far = INF
+                ray.t_far = 1e37
                 ray.t_near = 0.0
                 ray.data_val = 0
                 ray.elem_id = -1


https://bitbucket.org/yt_analysis/yt/commits/71eeada8e267/
Changeset:   71eeada8e267
Branch:      yt
User:        atmyers
Date:        2016-04-04 22:52:21+00:00
Summary:     Allow mesh line annotation to work with element-centered fields in Embree mode.
Affected #:  4 files

diff -r 628471eb2d03f68de22e1ba8fae1ff5a992035e0 -r 71eeada8e2678c1c55fdf7aa11868c2e99272637 yt/utilities/lib/mesh_construction.pxd
--- a/yt/utilities/lib/mesh_construction.pxd
+++ b/yt/utilities/lib/mesh_construction.pxd
@@ -11,6 +11,7 @@
     int* element_indices   # which vertices belong to which *element*
     int tpe                # the number of triangles per element
     int vpe                # the number of vertices per element
+    int fpe                # the number of field values per element
 
 ctypedef struct Patch:
     float[8][3] v

diff -r 628471eb2d03f68de22e1ba8fae1ff5a992035e0 -r 71eeada8e2678c1c55fdf7aa11868c2e99272637 yt/utilities/lib/mesh_construction.pyx
--- a/yt/utilities/lib/mesh_construction.pyx
+++ b/yt/utilities/lib/mesh_construction.pyx
@@ -25,7 +25,6 @@
 from mesh_samplers cimport \
     sample_hex, \
     sample_tetra, \
-    sample_element, \
     sample_wedge
 from pyembree.rtcore cimport \
     Vertex, \
@@ -186,19 +185,18 @@
         datac.element_indices = self.element_indices
         datac.tpe = self.tpe
         datac.vpe = self.vpe
+        datac.fpe = self.fpe
         self.datac = datac
         
         rtcg.rtcSetUserData(scene.scene_i, self.mesh, &self.datac)
 
     cdef void _set_sampler_type(self, YTEmbreeScene scene):
 
-        if self.fpe == 1:
-            self.filter_func = <rtcg.RTCFilterFunc> sample_element
-        elif self.fpe == 4:
+        if self.vpe == 4:
             self.filter_func = <rtcg.RTCFilterFunc> sample_tetra
-        elif self.fpe == 6:
+        elif self.vpe == 6:
             self.filter_func = <rtcg.RTCFilterFunc> sample_wedge
-        elif self.fpe == 8:
+        elif self.vpe == 8:
             self.filter_func = <rtcg.RTCFilterFunc> sample_hex
         else:
             raise NotImplementedError("Sampler type not implemented.")

diff -r 628471eb2d03f68de22e1ba8fae1ff5a992035e0 -r 71eeada8e2678c1c55fdf7aa11868c2e99272637 yt/utilities/lib/mesh_samplers.pxd
--- a/yt/utilities/lib/mesh_samplers.pxd
+++ b/yt/utilities/lib/mesh_samplers.pxd
@@ -13,6 +13,3 @@
 
 cdef void sample_hex20(void* userPtr,
                        rtcr.RTCRay& ray) nogil
-
-cdef void sample_element(void* userPtr,
-                         rtcr.RTCRay& ray) nogil

diff -r 628471eb2d03f68de22e1ba8fae1ff5a992035e0 -r 71eeada8e2678c1c55fdf7aa11868c2e99272637 yt/utilities/lib/mesh_samplers.pyx
--- a/yt/utilities/lib/mesh_samplers.pyx
+++ b/yt/utilities/lib/mesh_samplers.pyx
@@ -97,7 +97,9 @@
     
     for i in range(8):
         element_indices[i] = data.element_indices[elem_id*8+i]
-        field_data[i]      = data.field_data[elem_id*8+i]
+
+    for i in range(data.fpe):
+        field_data[i] = data.field_data[elem_id*data.fpe+i]
 
     for i in range(8):
         vertices[i*3]     = data.vertices[element_indices[i]].x
@@ -107,7 +109,10 @@
     # we use ray.time to pass the value of the field
     cdef double mapped_coord[3]
     Q1Sampler.map_real_to_unit(mapped_coord, vertices, position)
-    val = Q1Sampler.sample_at_unit_point(mapped_coord, field_data)
+    if data.fpe == 1:
+        val = field_data[0]
+    else:
+        val = Q1Sampler.sample_at_unit_point(mapped_coord, field_data)
     ray.time = val
 
     # we use ray.instID to pass back whether the ray is near the
@@ -143,7 +148,9 @@
     
     for i in range(6):
         element_indices[i] = data.element_indices[elem_id*6+i]
-        field_data[i]      = data.field_data[elem_id*6+i]
+
+    for i in range(data.fpe):
+        field_data[i] = data.field_data[elem_id*data.fpe+i]
 
     for i in range(6):
         vertices[i*3]     = data.vertices[element_indices[i]].x
@@ -153,9 +160,11 @@
     # we use ray.time to pass the value of the field
     cdef double mapped_coord[3]
     W1Sampler.map_real_to_unit(mapped_coord, vertices, position)
-    val = W1Sampler.sample_at_unit_point(mapped_coord, field_data)
+    if data.fpe == 1:
+        val = field_data[0]
+    else:
+        val = W1Sampler.sample_at_unit_point(mapped_coord, field_data)
     ray.time = val
-
     ray.instID = W1Sampler.check_mesh_lines(mapped_coord)
 
 @cython.boundscheck(False)
@@ -192,7 +201,6 @@
     S2Sampler.map_real_to_unit(mapped_coord, patch.vertices, position)
     val = S2Sampler.sample_at_unit_point(mapped_coord, patch.field_data)
     ray.time = val
-
     ray.instID = S2Sampler.check_mesh_lines(mapped_coord)
     
 
@@ -225,39 +233,19 @@
 
     for i in range(4):
         element_indices[i] = data.element_indices[elem_id*4+i]
-        field_data[i] = data.field_data[elem_id*4+i]
         vertices[i*3] = data.vertices[element_indices[i]].x
         vertices[i*3 + 1] = data.vertices[element_indices[i]].y
         vertices[i*3 + 2] = data.vertices[element_indices[i]].z    
 
+    for i in range(data.fpe):
+        field_data[i] = data.field_data[elem_id*data.fpe+i]
+
     # we use ray.time to pass the value of the field
     cdef double mapped_coord[4]
     P1Sampler.map_real_to_unit(mapped_coord, vertices, position)
-    val = P1Sampler.sample_at_unit_point(mapped_coord, field_data)
+    if data.fpe == 1:
+        val = field_data[0]
+    else:
+        val = P1Sampler.sample_at_unit_point(mapped_coord, field_data)
     ray.time = val
-    
     ray.instID = P1Sampler.check_mesh_lines(mapped_coord)
-
-
- at cython.boundscheck(False)
- at cython.wraparound(False)
- at cython.cdivision(True)
-cdef void sample_element(void* userPtr,
-                         rtcr.RTCRay& ray) nogil:
-    cdef int ray_id, elem_id
-    cdef double val
-    cdef MeshDataContainer* data
-
-    data = <MeshDataContainer*> userPtr
-    ray_id = ray.primID
-    if ray_id == -1:
-        return
-
-    # ray_id records the id number of the hit according to
-    # embree, in which the primitives are triangles. Here,
-    # we convert this to the element id by dividing by the
-    # number of triangles per element.
-    elem_id = ray_id / data.tpe
-
-    val = data.field_data[elem_id]
-    ray.time = val


https://bitbucket.org/yt_analysis/yt/commits/5ada8f62d1d6/
Changeset:   5ada8f62d1d6
Branch:      yt
User:        atmyers
Date:        2016-04-07 01:44:07+00:00
Summary:     merging with tip
Affected #:  25 files

diff -r 71eeada8e2678c1c55fdf7aa11868c2e99272637 -r 5ada8f62d1d6bfe88aa77d2be553c8364360f332 doc/source/analyzing/analysis_modules/absorption_spectrum.rst
--- a/doc/source/analyzing/analysis_modules/absorption_spectrum.rst
+++ b/doc/source/analyzing/analysis_modules/absorption_spectrum.rst
@@ -1,48 +1,91 @@
 .. _absorption_spectrum:
 
-Absorption Spectrum
-===================
+Creating Absorption Spectra
+===========================
 
 .. sectionauthor:: Britton Smith <brittonsmith at gmail.com>
 
-Absorption line spectra, such as shown below, can be made with data created
-by the (:ref:`light-ray-generator`).  For each element of the ray, column
-densities are calculated multiplying the number density within a grid cell
-with the path length of the ray through the cell.  Line profiles are
-generated using a voigt profile based on the temperature field.  The lines
-are then shifted according to the redshift recorded by the light ray tool
-and (optionally) the peculiar velocity of gas along the ray.  Inclusion of the
-peculiar velocity requires setting ``use_peculiar_velocity`` to True in the call to
-:meth:`~yt.analysis_modules.cosmological_observation.light_ray.light_ray.LightRay.make_light_ray`.
+Absorption line spectra are spectra generated using bright background sources
+to illuminate tenuous foreground material and are primarily used in studies
+of the circumgalactic medium and intergalactic medium.  These spectra can
+be created using the
+:class:`~yt.analysis_modules.absorption_spectrum.absorption_spectrum.AbsorptionSpectrum`
+and
+:class:`~yt.analysis_modules.cosmological_observation.light_ray.light_ray.LightRay`
+analysis modules.
 
-The spectrum generator will output a file containing the wavelength and
-normalized flux.  It will also output a text file listing all important lines.
+The 
+:class:`~yt.analysis_modules.absorption_spectrum.absorption_spectrum.AbsorptionSpectrum` class
+and its workhorse method
+:meth:`~yt.analysis_modules.absorption_spectrum.absorption_spectrum.AbsorptionSpectrum.make_spectrum`
+return two arrays, one with wavelengths, the other with the normalized
+flux values at each of the wavelength values.  It can also output a text file
+listing all important lines.
+
+For example, here is an absorption spectrum for the wavelength range from 900 
+to 1800 Angstroms made with a light ray extending from z = 0 to z = 0.4:
 
 .. image:: _images/spectrum_full.png
    :width: 500
 
-An absorption spectrum for the wavelength range from 900 to 1800 Angstroms
-made with a light ray extending from z = 0 to z = 0.4.
+And a zoom-in on the 1425-1450 Angstrom window:
 
 .. image:: _images/spectrum_zoom.png
    :width: 500
 
-A zoom-in of the above spectrum.
+Method for Creating Absorption Spectra
+--------------------------------------
 
-Creating an Absorption Spectrum
--------------------------------
+Once a
+:class:`~yt.analysis_modules.cosmological_observation.light_ray.light_ray.LightRay`
+has been created traversing a dataset using the :ref:`light-ray-generator`,
+a series of arrays store the various fields of the gas parcels (represented
+as cells) intersected along the ray.
+:class:`~yt.analysis_modules.absorption_spectrum.absorption_spectrum.AbsorptionSpectrum`
+steps through each element of the
+:class:`~yt.analysis_modules.cosmological_observation.light_ray.light_ray.LightRay`'s
+arrays and calculates the column density for desired ion by multiplying its
+number density with the path length through the cell.  Using these column
+densities along with temperatures to calculate thermal broadening, voigt
+profiles are deposited on to a featureless background spectrum.  By default,
+the peculiar velocity of the gas is included as a doppler redshift in addition
+to any cosmological redshift of the data dump itself.
 
-To instantiate an AbsorptionSpectrum object, the arguments required are the
-minimum and maximum wavelengths, and the number of wavelength bins.
+Subgrid Deposition
+^^^^^^^^^^^^^^^^^^
+
+For features not resolved (i.e. possessing narrower width than the spectral
+resolution),
+:class:`~yt.analysis_modules.absorption_spectrum.absorption_spectrum.AbsorptionSpectrum`
+performs subgrid deposition.  The subgrid deposition algorithm creates a number
+of smaller virtual bins, by default the width of the virtual bins is 1/10th
+the width of the spectral feature.  The Voigt profile is then deposited
+into these virtual bins where it is resolved, and then these virtual bins
+are numerically integrated back to the resolution of the original spectral bin
+size, yielding accurate equivalent widths values.
+:class:`~yt.analysis_modules.absorption_spectrum.absorption_spectrum.AbsorptionSpectrum`
+informs the user how many spectral features are deposited in this fashion.
+
+Tutorial on Creating an Absorption Spectrum
+-------------------------------------------
+
+Initializing `AbsorptionSpectrum` Class
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+To instantiate an
+:class:`~yt.analysis_modules.absorption_spectrum.absorption_spectrum.AbsorptionSpectrum`
+object, the arguments required are the
+minimum and maximum wavelengths (assumed to be in Angstroms), and the number
+of wavelength bins to span this range (including the endpoints)
 
 .. code-block:: python
 
   from yt.analysis_modules.absorption_spectrum.api import AbsorptionSpectrum
 
-  sp = AbsorptionSpectrum(900.0, 1800.0, 10000)
+  sp = AbsorptionSpectrum(900.0, 1800.0, 10001)
 
 Adding Features to the Spectrum
--------------------------------
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 Absorption lines and continuum features can then be added to the spectrum.
 To add a line, you must know some properties of the line: the rest wavelength,
@@ -67,8 +110,8 @@
 field from the ray data to use to calculate the column density.  The
 ``label_threshold`` keyword tells the spectrum generator to add all lines
 above a column density of 10 :superscript:`10` cm :superscript:`-2` to the
-text line list.  If None is provided, as is the default, no lines of this
-type will be added to the text list.
+text line list output at the end.  If None is provided, as is the default,
+no lines of this type will be added to the text list.
 
 Continuum features with optical depths that follow a power law can also be
 added.  Like adding lines, you must specify details like the wavelength
@@ -86,7 +129,7 @@
   sp.add_continuum(my_label, field, wavelength, normalization, index)
 
 Making the Spectrum
--------------------
+^^^^^^^^^^^^^^^^^^^
 
 Once all the lines and continuum are added, it is time to make a spectrum out
 of some light ray data.
@@ -95,12 +138,12 @@
 
   wavelength, flux = sp.make_spectrum('lightray.h5',
                                       output_file='spectrum.fits',
-                                      line_list_file='lines.txt',
-                                      use_peculiar_velocity=True)
+                                      line_list_file='lines.txt')
 
 A spectrum will be made using the specified ray data and the wavelength and
-flux arrays will also be returned.  If ``use_peculiar_velocity`` is set to
-False, the lines will only be shifted according to the redshift.
+flux arrays will also be returned.  If you set the optional
+``use_peculiar_velocity`` keyword to False, the lines will not incorporate
+doppler redshifts to shift the deposition of the line features.
 
 Three output file formats are supported for writing out the spectrum: fits,
 hdf5, and ascii.  The file format used is based on the extension provided
@@ -112,15 +155,16 @@
 Generating Spectra in Parallel
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
-The spectrum generator can be run in parallel simply by following the procedures
-laid out in :ref:`parallel-computation` for running yt scripts in parallel.
-Spectrum generation is parallelized using a multi-level strategy where each
-absorption line is deposited by a different processor.  If the number of available
-processors is greater than the number of lines, then the deposition of
-individual lines will be divided over multiple processors.
+The `AbsorptionSpectrum` analysis module can be run in parallel simply by
+following the procedures laid out in :ref:`parallel-computation` for running
+yt scripts in parallel.  Spectrum generation is parallelized using a multi-level
+strategy where each absorption line is deposited by a different processor.
+If the number of available processors is greater than the number of lines,
+then the deposition of individual lines will be divided over multiple
+processors.
 
-Fitting an Absorption Spectrum
-------------------------------
+Fitting Absorption Spectra
+==========================
 
 .. sectionauthor:: Hilary Egan <hilary.egan at colorado.edu>
 

diff -r 71eeada8e2678c1c55fdf7aa11868c2e99272637 -r 5ada8f62d1d6bfe88aa77d2be553c8364360f332 doc/source/developing/testing.rst
--- a/doc/source/developing/testing.rst
+++ b/doc/source/developing/testing.rst
@@ -485,12 +485,12 @@
 autodiscovered by `nose <http://nose.readthedocs.org/en/latest/>`_ itself,
 answer tests require definition of which set of tests constitute to a given
 answer. Configuration for the integration server is stored in
-*tests/tests_2.7.yaml* in the main yt repository:
+*tests/tests.yaml* in the main yt repository:
 
 .. code-block:: yaml
 
    answer_tests:
-      local_artio_270:
+      local_artio_000:
          - yt/frontends/artio/tests/test_outputs.py
    # ...
    other_tests:
@@ -498,7 +498,7 @@
          - '-v'
          - '-s'
 
-Each element under *answer_tests* defines answer name (*local_artio_270* in above
+Each element under *answer_tests* defines answer name (*local_artio_000* in above
 snippet) and specifies a list of files/classes/methods that will be validated
 (*yt/frontends/artio/tests/test_outputs.py* in above snippet). On the testing
 server it is translated to:
@@ -506,7 +506,7 @@
 .. code-block:: bash
 
    $ nosetests --with-answer-testing --local --local-dir ... --answer-big-data \
-      --answer-name=local_artio_270 \
+      --answer-name=local_artio_000 \
       yt/frontends/artio/tests/test_outputs.py
 
 If the answer doesn't exist on the server yet, ``nosetests`` is run twice and
@@ -516,21 +516,21 @@
 ~~~~~~~~~~~~~~~~
 
 In order to regenerate answers for a particular set of tests it is sufficient to
-change the answer name in *tests/tests_2.7.yaml* e.g.:
+change the answer name in *tests/tests.yaml* e.g.:
 
 .. code-block:: diff
 
-   --- a/tests/tests_2.7.yaml
-   +++ b/tests/tests_2.7.yaml
+   --- a/tests/tests.yaml
+   +++ b/tests/tests.yaml
    @@ -25,7 +25,7 @@
         - yt/analysis_modules/halo_finding/tests/test_rockstar.py
         - yt/frontends/owls_subfind/tests/test_outputs.py
 
-   -  local_owls_270:
-   +  local_owls_271:
+   -  local_owls_000:
+   +  local_owls_001:
         - yt/frontends/owls/tests/test_outputs.py
 
-      local_pw_270:
+      local_pw_000:
 
 would regenerate answers for OWLS frontend.
 
@@ -538,20 +538,35 @@
 ~~~~~~~~~~~~~~~~~~~~~~~
 
 In order to add a new set of answer tests, it is sufficient to extend the
-*answer_tests* list in *tests/tests_2.7.yaml* e.g.:
+*answer_tests* list in *tests/tests.yaml* e.g.:
 
 .. code-block:: diff
 
-   --- a/tests/tests_2.7.yaml
-   +++ b/tests/tests_2.7.yaml
+   --- a/tests/tests.yaml
+   +++ b/tests/tests.yaml
    @@ -60,6 +60,10 @@
         - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_non_cosmo
         - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_cosmo
 
-   +  local_gdf_270:
+   +  local_gdf_000:
    +    - yt/frontends/gdf/tests/test_outputs.py
    +
    +
     other_tests:
       unittests:
 
+Restricting Python Versions for Answer Tests
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+If for some reason a test can be run only for a specific version of python it is
+possible to indicate this by adding a ``[py2]`` or ``[py3]`` tag. For example:
+
+.. code-block:: yaml
+
+   answer_tests:
+      local_test_000:
+         - yt/test_A.py  # [py2]
+         - yt/test_B.py  # [py3]
+
+would result in ``test_A.py`` being run only for *python2* and ``test_B.py``
+being run only for *python3*.

diff -r 71eeada8e2678c1c55fdf7aa11868c2e99272637 -r 5ada8f62d1d6bfe88aa77d2be553c8364360f332 doc/source/installing.rst
--- a/doc/source/installing.rst
+++ b/doc/source/installing.rst
@@ -244,6 +244,26 @@
 
 Option 1:
 
+Ensure that you have all build dependencies installed in your current
+conda environment:
+
+.. code-block:: bash
+
+  conda install cython mercurial sympy ipython h5py matplotlib
+
+.. note::
+  
+  If you are using a python3 environment, ``conda`` will not be able to install
+  *mercurial*, which works only with python2. You can circumvent this issue by
+  creating a dedicated python2 environment and symlinking *hg* in your current
+  environment:
+
+  .. code-block:: bash
+
+     export CONDA_DIR=$(python -c 'import sys; print(sys.executable.split("/bin/python")[0])')
+     conda create -y -n py27 python=2.7 mercurial
+     ln -s ${CONDA_DIR}/envs/py27/bin/hg ${CONDA_DIR}/bin
+
 Clone the yt repository with:
 
 .. code-block:: bash

diff -r 71eeada8e2678c1c55fdf7aa11868c2e99272637 -r 5ada8f62d1d6bfe88aa77d2be553c8364360f332 doc/source/visualizing/colormaps/index.rst
--- a/doc/source/visualizing/colormaps/index.rst
+++ b/doc/source/visualizing/colormaps/index.rst
@@ -3,33 +3,33 @@
 Colormaps
 =========
 
-There are several colormaps available for yt.  yt includes all of the 
-matplotlib colormaps as well for nearly all functions.  Individual 
-visualization functions usually allow you to specify a colormap with the 
+There are several colormaps available for yt.  yt includes all of the
+matplotlib colormaps as well for nearly all functions.  Individual
+visualization functions usually allow you to specify a colormap with the
 ``cmap`` flag.
 
 .. _install-palettable:
 
-Palettable and ColorBrewer2 
+Palettable and ColorBrewer2
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
 While colormaps that employ a variety of colors often look attractive,
 they are not always the best choice to convey information to one's audience.
-There are numerous `articles <https://eagereyes.org/basics/rainbow-color-map>`_ 
-and 
-`presentations <http://pong.tamu.edu/~kthyng/presentations/visualization.pdf>`_ 
-that discuss how rainbow-based colormaps fail with regard to black-and-white 
+There are numerous `articles <https://eagereyes.org/basics/rainbow-color-map>`_
+and
+`presentations <http://pong.tamu.edu/~kthyng/presentations/visualization.pdf>`_
+that discuss how rainbow-based colormaps fail with regard to black-and-white
 reproductions, colorblind audience members, and confusing in color ordering.
 Depending on the application, the consensus seems to be that gradients between
 one or two colors are the best way for the audience to extract information
 from one's figures.  Many such colormaps are found in palettable.
 
-If you have installed `palettable <http://jiffyclub.github.io/palettable/>`_ 
-(formerly brewer2mpl), you can also access the discrete colormaps available 
+If you have installed `palettable <http://jiffyclub.github.io/palettable/>`_
+(formerly brewer2mpl), you can also access the discrete colormaps available
 to that package including those from `colorbrewer <http://colorbrewer2.org>`_.
-Install `palettable <http://jiffyclub.github.io/palettable/>`_ with 
-``pip install palettable``.  To access these maps in yt, instead of supplying 
-the colormap name, specify a tuple of the form (name, type, number), for 
+Install `palettable <http://jiffyclub.github.io/palettable/>`_ with
+``pip install palettable``.  To access these maps in yt, instead of supplying
+the colormap name, specify a tuple of the form (name, type, number), for
 example ``('RdBu', 'Diverging', 9)``.  These discrete colormaps will
 not be interpolated, and can be useful for creating
 colorblind/printer/grayscale-friendly plots. For more information, visit
@@ -40,22 +40,22 @@
 Making and Viewing Custom Colormaps
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
-yt can also accommodate custom colormaps using the 
-:func:`~yt.visualization.color_maps.make_colormap` function 
-These custom colormaps can be made to an arbitrary level of 
-complexity.  You can make these on the fly for each yt session, or you can 
-store them in your :ref:`plugin-file` for access to them in every future yt 
+yt can also accommodate custom colormaps using the
+:func:`~yt.visualization.color_maps.make_colormap` function
+These custom colormaps can be made to an arbitrary level of
+complexity.  You can make these on the fly for each yt session, or you can
+store them in your :ref:`plugin-file` for access to them in every future yt
 session.  The example below creates two custom colormaps, one that has
-three equally spaced bars of blue, white and red, and the other that 
-interpolates in increasing lengthen intervals from black to red, to green, 
-to blue.  These will be accessible for the rest of the yt session as 
-'french_flag' and 'weird'.  See 
-:func:`~yt.visualization.color_maps.make_colormap` and 
+three equally spaced bars of blue, white and red, and the other that
+interpolates in increasing lengthed intervals from black to red, to green,
+to blue.  These will be accessible for the rest of the yt session as
+'french_flag' and 'weird'.  See
+:func:`~yt.visualization.color_maps.make_colormap` and
 :func:`~yt.visualization.color_maps.show_colormaps` for more details.
 
 .. code-block:: python
 
-    yt.make_colormap([('blue', 20), ('white', 20), ('red', 20)], 
+    yt.make_colormap([('blue', 20), ('white', 20), ('red', 20)],
                      name='french_flag', interpolate=False)
     yt.make_colormap([('black', 5), ('red', 10), ('green', 20), ('blue', 0)],
                      name='weird', interpolate=True)
@@ -66,7 +66,7 @@
 
 This is a chart of all of the yt and matplotlib colormaps available.  In
 addition to each colormap displayed here, you can access its "reverse" by simply
-appending a ``"_r"`` to the end of the colormap name.  
+appending a ``"_r"`` to the end of the colormap name.
 
 .. image:: ../_images/all_colormaps.png
    :width: 512
@@ -80,7 +80,7 @@
 Displaying Colormaps Locally
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
-To display the most up to date colormaps locally, you can use the 
+To display the most up to date colormaps locally, you can use the
 :func:`~yt.visualization.color_maps.show_colormaps` function.  By default,
 you'll see every colormap available to you, but you can specify subsets
 of colormaps to display, either as just the ``yt_native`` colormaps, or
@@ -98,7 +98,7 @@
 
     import yt
     yt.show_colormaps(subset=['algae', 'kamae', 'spectral',
-                              'arbre', 'dusk', 'octarine', 'kelp'], 
+                              'arbre', 'dusk', 'octarine', 'kelp'],
                       filename="yt_native.png")
 
 Applying a Colormap to your Rendering
@@ -111,7 +111,7 @@
 
     yt.write_image(im, "output.png", cmap_name = 'jet')
 
-If you're using the Plot Window interface (e.g. SlicePlot, ProjectionPlot, 
+If you're using the Plot Window interface (e.g. SlicePlot, ProjectionPlot,
 etc.), it's even easier than that.  Simply create your rendering, and you
 can quickly swap the colormap on the fly after the fact with the ``set_cmap``
 callback:
@@ -127,16 +127,16 @@
     p.set_cmap(field="density", cmap='hot')
     p.save('proj_with_hot_cmap.png')
 
-For more information about the callbacks available to Plot Window objects, 
+For more information about the callbacks available to Plot Window objects,
 see :ref:`callbacks`.
 
 Examples of Each Colormap
 ~~~~~~~~~~~~~~~~~~~~~~~~~
 
 To give the reader a better feel for how a colormap appears once it is applied
-to a dataset, below we provide a library of identical projections of an 
-isolated galaxy where only the colormap has changed.  They use the sample 
-dataset "IsolatedGalaxy" available at 
+to a dataset, below we provide a library of identical projections of an
+isolated galaxy where only the colormap has changed.  They use the sample
+dataset "IsolatedGalaxy" available at
 `http://yt-project.org/data <http://yt-project.org/data>`_.
 
 .. yt_colormaps:: cmap_images.py

diff -r 71eeada8e2678c1c55fdf7aa11868c2e99272637 -r 5ada8f62d1d6bfe88aa77d2be553c8364360f332 setupext.py
--- a/setupext.py
+++ b/setupext.py
@@ -138,6 +138,10 @@
         import hglib
     except ImportError:
         return None
-    with hglib.open(target_dir) as repo:
-        changeset = repo.identify(id=True, branch=True).strip().decode('utf8')
+    try:
+        with hglib.open(target_dir) as repo:
+            changeset = repo.identify(
+                id=True, branch=True).strip().decode('utf8')
+    except hglib.error.ServerError:
+        return None
     return changeset

diff -r 71eeada8e2678c1c55fdf7aa11868c2e99272637 -r 5ada8f62d1d6bfe88aa77d2be553c8364360f332 tests/nose_runner.py
--- a/tests/nose_runner.py
+++ b/tests/nose_runner.py
@@ -7,7 +7,6 @@
 from yt.config import ytcfg
 from yt.utilities.answer_testing.framework import AnswerTesting
 
-
 class NoseWorker(multiprocessing.Process):
 
     def __init__(self, task_queue, result_queue):
@@ -54,10 +53,19 @@
 
 
 def generate_tasks_input():
+    pyver = "py{}{}".format(sys.version_info.major, sys.version_info.minor)
+    if sys.version_info < (3, 0, 0):
+        DROP_TAG = "py3"
+    else:
+        DROP_TAG = "py2"
+
     test_dir = ytcfg.get("yt", "test_data_dir")
     answers_dir = os.path.join(test_dir, "answers")
-    with open('tests/tests_%i.%i.yaml' % sys.version_info[:2], 'r') as obj:
-        tests = yaml.load(obj)
+    with open('tests/tests.yaml', 'r') as obj:
+        lines = obj.read()
+    data = '\n'.join([line for line in lines.split('\n')
+                      if DROP_TAG not in line])
+    tests = yaml.load(data)
 
     base_argv = ['--local-dir=%s' % answers_dir, '-v',
                  '--with-answer-testing', '--answer-big-data', '--local']
@@ -66,9 +74,11 @@
     for test in list(tests["other_tests"].keys()):
         args.append([test] + tests["other_tests"][test])
     for answer in list(tests["answer_tests"].keys()):
-        argv = [answer]
+        if tests["answer_tests"][answer] is None:
+            continue
+        argv = ["{}_{}".format(pyver, answer)]
         argv += base_argv
-        argv.append('--answer-name=%s' % answer)
+        argv.append('--answer-name=%s' % argv[0])
         argv += tests["answer_tests"][answer]
         args.append(argv)
 

diff -r 71eeada8e2678c1c55fdf7aa11868c2e99272637 -r 5ada8f62d1d6bfe88aa77d2be553c8364360f332 tests/tests.yaml
--- /dev/null
+++ b/tests/tests.yaml
@@ -0,0 +1,73 @@
+answer_tests:
+  local_artio_000:
+    - yt/frontends/artio/tests/test_outputs.py
+
+  local_athena_000:
+    - yt/frontends/athena
+
+  local_chombo_000:
+    - yt/frontends/chombo/tests/test_outputs.py
+
+  local_enzo_000:
+    - yt/frontends/enzo
+
+  local_fits_000:
+    - yt/frontends/fits/tests/test_outputs.py
+
+  local_flash_000:
+    - yt/frontends/flash/tests/test_outputs.py
+
+  local_gadget_000:
+    - yt/frontends/gadget/tests/test_outputs.py
+
+  local_gdf_000:
+    - yt/frontends/gdf/tests/test_outputs.py
+
+  local_halos_000:
+    - yt/analysis_modules/halo_analysis/tests/test_halo_finders.py  # [py2]
+    - yt/analysis_modules/halo_finding/tests/test_rockstar.py  # [py2]
+    - yt/frontends/owls_subfind/tests/test_outputs.py
+    - yt/frontends/gadget_fof/tests/test_outputs.py:test_fields_g5
+    - yt/frontends/gadget_fof/tests/test_outputs.py:test_fields_g42
+  
+  local_owls_000:
+    - yt/frontends/owls/tests/test_outputs.py
+  
+  local_pw_000:
+    - yt/visualization/tests/test_plotwindow.py:test_attributes
+    - yt/visualization/tests/test_plotwindow.py:test_attributes_wt
+    - yt/visualization/tests/test_profile_plots.py:test_phase_plot_attributes
+    - yt/visualization/tests/test_particle_plot.py:test_particle_projection_answers
+    - yt/visualization/tests/test_particle_plot.py:test_particle_projection_filter
+    - yt/visualization/tests/test_particle_plot.py:test_particle_phase_answers
+  
+  local_tipsy_000:
+    - yt/frontends/tipsy/tests/test_outputs.py
+  
+  local_varia_000:
+    - yt/analysis_modules/radmc3d_export
+    - yt/frontends/moab/tests/test_c5.py
+    - yt/analysis_modules/photon_simulator/tests/test_spectra.py
+    - yt/analysis_modules/photon_simulator/tests/test_sloshing.py
+    - yt/visualization/volume_rendering/tests/test_vr_orientation.py
+    - yt/visualization/volume_rendering/tests/test_mesh_render.py
+
+  local_orion_000:
+    - yt/frontends/boxlib/tests/test_orion.py
+  
+  local_ramses_000:
+    - yt/frontends/ramses/tests/test_outputs.py
+  
+  local_ytdata_000:
+    - yt/frontends/ytdata
+
+  local_absorption_spectrum_000:
+    - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_non_cosmo
+    - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_cosmo
+
+other_tests:
+  unittests:
+     - '-v'
+  cookbook:
+     - '-v'
+     - 'doc/source/cookbook/tests/test_cookbook.py'

diff -r 71eeada8e2678c1c55fdf7aa11868c2e99272637 -r 5ada8f62d1d6bfe88aa77d2be553c8364360f332 tests/tests_2.7.yaml
--- a/tests/tests_2.7.yaml
+++ /dev/null
@@ -1,71 +0,0 @@
-answer_tests:
-  local_artio_270:
-    - yt/frontends/artio/tests/test_outputs.py
-
-  local_athena_270:
-    - yt/frontends/athena
-
-  local_chombo_271:
-    - yt/frontends/chombo/tests/test_outputs.py
-
-  local_enzo_270:
-    - yt/frontends/enzo
-
-  local_fits_270:
-    - yt/frontends/fits/tests/test_outputs.py
-
-  local_flash_270:
-    - yt/frontends/flash/tests/test_outputs.py
-
-  local_gadget_270:
-    - yt/frontends/gadget/tests/test_outputs.py
-
-  local_gdf_270:
-    - yt/frontends/gdf/tests/test_outputs.py
-
-  local_halos_270:
-    - yt/analysis_modules/halo_analysis/tests/test_halo_finders.py
-    - yt/analysis_modules/halo_finding/tests/test_rockstar.py
-    - yt/frontends/owls_subfind/tests/test_outputs.py
-  
-  local_owls_270:
-    - yt/frontends/owls/tests/test_outputs.py
-  
-  local_pw_271:
-    - yt/visualization/tests/test_plotwindow.py:test_attributes
-    - yt/visualization/tests/test_plotwindow.py:test_attributes_wt
-    - yt/visualization/tests/test_profile_plots.py:test_phase_plot_attributes
-    - yt/visualization/tests/test_particle_plot.py:test_particle_projection_answers
-    - yt/visualization/tests/test_particle_plot.py:test_particle_projection_filter
-    - yt/visualization/tests/test_particle_plot.py:test_particle_phase_answers
-  
-  local_tipsy_270:
-    - yt/frontends/tipsy/tests/test_outputs.py
-  
-  local_varia_274:
-    - yt/analysis_modules/radmc3d_export
-    - yt/frontends/moab/tests/test_c5.py
-    - yt/analysis_modules/photon_simulator/tests/test_spectra.py
-    - yt/analysis_modules/photon_simulator/tests/test_sloshing.py
-    - yt/visualization/volume_rendering/tests/test_vr_orientation.py
-    - yt/visualization/volume_rendering/tests/test_mesh_render.py
-
-  local_orion_270:
-    - yt/frontends/boxlib/tests/test_orion.py
-  
-  local_ramses_270:
-    - yt/frontends/ramses/tests/test_outputs.py
-  
-  local_ytdata_270:
-    - yt/frontends/ytdata
-
-  local_absorption_spectrum_271:
-    - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_non_cosmo
-    - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_cosmo
-
-other_tests:
-  unittests:
-     - '-v'
-  cookbook:
-     - '-v'
-     - 'doc/source/cookbook/tests/test_cookbook.py'

diff -r 71eeada8e2678c1c55fdf7aa11868c2e99272637 -r 5ada8f62d1d6bfe88aa77d2be553c8364360f332 tests/tests_3.5.yaml
--- a/tests/tests_3.5.yaml
+++ /dev/null
@@ -1,71 +0,0 @@
-answer_tests:
-  local_artio_350:
-    - yt/frontends/artio/tests/test_outputs.py
-
-  local_athena_350:
-    - yt/frontends/athena
-
-  local_chombo_350:
-    - yt/frontends/chombo/tests/test_outputs.py
-
-  local_enzo_350:
-    - yt/frontends/enzo
-
-  local_fits_350:
-    - yt/frontends/fits/tests/test_outputs.py
-
-  local_flash_350:
-    - yt/frontends/flash/tests/test_outputs.py
-
-  local_gadget_350:
-    - yt/frontends/gadget/tests/test_outputs.py
-
-  local_gdf_350:
-    - yt/frontends/gdf/tests/test_outputs.py
-
-  local_halos_350:
-    - yt/frontends/owls_subfind/tests/test_outputs.py
-    - yt/frontends/gadget_fof/tests/test_outputs.py:test_fields_g5
-    - yt/frontends/gadget_fof/tests/test_outputs.py:test_fields_g42
-  
-  local_owls_350:
-    - yt/frontends/owls/tests/test_outputs.py
-  
-  local_pw_350:
-    - yt/visualization/tests/test_plotwindow.py:test_attributes
-    - yt/visualization/tests/test_plotwindow.py:test_attributes_wt
-    - yt/visualization/tests/test_profile_plots.py:test_phase_plot_attributes
-    - yt/visualization/tests/test_particle_plot.py:test_particle_projection_answers
-    - yt/visualization/tests/test_particle_plot.py:test_particle_projection_filter
-    - yt/visualization/tests/test_particle_plot.py:test_particle_phase_answers
-  
-  local_tipsy_350:
-    - yt/frontends/tipsy/tests/test_outputs.py
-  
-  local_varia_351:
-    - yt/analysis_modules/radmc3d_export
-    - yt/frontends/moab/tests/test_c5.py
-    - yt/analysis_modules/photon_simulator/tests/test_spectra.py
-    - yt/analysis_modules/photon_simulator/tests/test_sloshing.py
-    - yt/visualization/volume_rendering/tests/test_vr_orientation.py
-    - yt/visualization/volume_rendering/tests/test_mesh_render.py
-
-  local_orion_350:
-    - yt/frontends/boxlib/tests/test_orion.py
-  
-  local_ramses_350:
-    - yt/frontends/ramses/tests/test_outputs.py
-  
-  local_ytdata_350:
-    - yt/frontends/ytdata
-
-  local_absorption_spectrum_350:
-    - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_non_cosmo
-    - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_cosmo
-
-other_tests:
-  unittests:
-     - '-v'
-  cookbook:
-     - '-v'
-     - 'doc/source/cookbook/tests/test_cookbook.py'

diff -r 71eeada8e2678c1c55fdf7aa11868c2e99272637 -r 5ada8f62d1d6bfe88aa77d2be553c8364360f332 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -1432,6 +1432,9 @@
             center = self.ds.arr(center, 'code_length')
         if iterable(width):
             w, u = width
+            if isinstance(w, tuple) and isinstance(u, tuple):
+                height = u
+                w, u = w
             width = self.ds.quan(w, input_units = u)
         elif not isinstance(width, YTArray):
             width = self.ds.quan(width, 'code_length')

diff -r 71eeada8e2678c1c55fdf7aa11868c2e99272637 -r 5ada8f62d1d6bfe88aa77d2be553c8364360f332 yt/fields/field_info_container.py
--- a/yt/fields/field_info_container.py
+++ b/yt/fields/field_info_container.py
@@ -38,6 +38,13 @@
     add_volume_weighted_smoothed_field, \
     sph_whitelist_fields
 
+def tupleize(inp):
+    if isinstance(inp, tuple):
+        return inp
+    # prepending with a '?' ensures that the sort order is the same in py2 and
+    # py3, since names of field types shouldn't begin with punctuation
+    return ('?', inp, )
+
 class FieldInfoContainer(dict):
     """
     This is a generic field container.  It contains a list of potential derived
@@ -271,7 +278,7 @@
         self.ds.field_dependencies.update(deps)
         # Note we may have duplicated
         dfl = set(self.ds.derived_field_list).union(deps.keys())
-        self.ds.derived_field_list = list(sorted(dfl))
+        self.ds.derived_field_list = list(sorted(dfl, key=tupleize))
         return loaded, unavailable
 
     def add_output_field(self, name, **kwargs):
@@ -357,5 +364,5 @@
             deps[field] = fd
             mylog.debug("Succeeded with %s (needs %s)", field, fd.requested)
         dfl = set(self.ds.derived_field_list).union(deps.keys())
-        self.ds.derived_field_list = list(sorted(dfl))
+        self.ds.derived_field_list = list(sorted(dfl, key=tupleize))
         return deps, unavailable

diff -r 71eeada8e2678c1c55fdf7aa11868c2e99272637 -r 5ada8f62d1d6bfe88aa77d2be553c8364360f332 yt/fields/particle_fields.py
--- a/yt/fields/particle_fields.py
+++ b/yt/fields/particle_fields.py
@@ -493,10 +493,10 @@
         bv = data.get_field_parameter("bulk_velocity")
         pos = data.ds.arr([data[ptype, spos % ax] for ax in "xyz"])
         vel = data.ds.arr([data[ptype, svel % ax] for ax in "xyz"])
+        pos = pos - np.reshape(center, (3, 1))
+        vel = vel - np.reshape(bv, (3, 1))
         theta = get_sph_theta(pos, normal)
         phi = get_sph_phi(pos, normal)
-        pos = pos - np.reshape(center, (3, 1))
-        vel = vel - np.reshape(bv, (3, 1))
         sphr = get_sph_r_component(vel, theta, phi, normal)
         return sphr
 
@@ -536,10 +536,10 @@
         bv = data.get_field_parameter("bulk_velocity")
         pos = data.ds.arr([data[ptype, spos % ax] for ax in "xyz"])
         vel = data.ds.arr([data[ptype, svel % ax] for ax in "xyz"])
+        pos = pos - np.reshape(center, (3, 1))
+        vel = vel - np.reshape(bv, (3, 1))
         theta = get_sph_theta(pos, normal)
         phi = get_sph_phi(pos, normal)
-        pos = pos - np.reshape(center, (3, 1))
-        vel = vel - np.reshape(bv, (3, 1))
         spht = get_sph_theta_component(vel, theta, phi, normal)
         return spht
 
@@ -664,9 +664,9 @@
         bv = data.get_field_parameter("bulk_velocity")
         pos = data.ds.arr([data[ptype, spos % ax] for ax in "xyz"])
         vel = data.ds.arr([data[ptype, svel % ax] for ax in "xyz"])
-        theta = get_cyl_theta(pos, normal)
         pos = pos - np.reshape(center, (3, 1))
         vel = vel - np.reshape(bv, (3, 1))
+        theta = get_cyl_theta(pos, normal)
         cylr = get_cyl_r_component(vel, theta, normal)
         return cylr
 
@@ -688,9 +688,9 @@
         bv = data.get_field_parameter("bulk_velocity")
         pos = data.ds.arr([data[ptype, spos % ax] for ax in "xyz"])
         vel = data.ds.arr([data[ptype, svel % ax] for ax in "xyz"])
-        theta = get_cyl_theta(pos, normal)
         pos = pos - np.reshape(center, (3, 1))
         vel = vel - np.reshape(bv, (3, 1))
+        theta = get_cyl_theta(pos, normal)
         cylt = get_cyl_theta_component(vel, theta, normal)
         return cylt
 

diff -r 71eeada8e2678c1c55fdf7aa11868c2e99272637 -r 5ada8f62d1d6bfe88aa77d2be553c8364360f332 yt/fields/species_fields.py
--- a/yt/fields/species_fields.py
+++ b/yt/fields/species_fields.py
@@ -114,6 +114,24 @@
                        particle_type = particle_type,
                        units = unit_system["number_density"])
 
+def add_species_aliases(registry, ftype, alias_species, species):
+    """
+    This takes a field registry, a fluid type, and two species names.  
+    The first species name is one you wish to alias to an existing species
+    name.  For instance you might alias all "H_p0" fields to "H_" fields
+    to indicate that "H_" fields are really just neutral hydrogen fields.
+    This function registers field aliases for the density, number_density,
+    mass, and fraction fields between the two species given in the arguments.
+    """
+    registry.alias((ftype, "%s_density" % alias_species), 
+                   (ftype, "%s_density" % species))
+    registry.alias((ftype, "%s_fraction" % alias_species), 
+                   (ftype, "%s_fraction" % species))
+    registry.alias((ftype, "%s_number_density" % alias_species), 
+                   (ftype, "%s_number_density" % species))
+    registry.alias((ftype, "%s_mass" % alias_species), 
+                   (ftype, "%s_mass" % species))
+
 def add_nuclei_density_fields(registry, ftype,
                               particle_type = False):
     unit_system = registry.ds.unit_system
@@ -181,4 +199,10 @@
             # Skip it
             continue
         func(registry, ftype, species, particle_type)
+        # Adds aliases for all neutral species from their raw "MM_"
+        # species to "MM_p0_" species to be explicit.
+        # See YTEP-0003 for more details.
+        if (ChemicalFormula(species).charge == 0):
+            alias_species = "%s_p0" % species.split('_')[0]
+            add_species_aliases(registry, "gas", alias_species, species)
     add_nuclei_density_fields(registry, ftype, particle_type=particle_type)

diff -r 71eeada8e2678c1c55fdf7aa11868c2e99272637 -r 5ada8f62d1d6bfe88aa77d2be553c8364360f332 yt/fields/tests/test_fields.py
--- a/yt/fields/tests/test_fields.py
+++ b/yt/fields/tests/test_fields.py
@@ -301,6 +301,19 @@
     u2 = array_like_field(ad, 1., ("all", "particle_mass")).units
     assert u1 == u2
 
+def test_add_field_string():
+    ds = fake_random_ds(16)
+    ad = ds.all_data()
+
+    def density_alias(field, data):
+        return data['density']
+
+    ds.add_field('density_alias', function=density_alias, units='g/cm**3')
+
+    ad['density_alias']
+    assert ds.derived_field_list[0] == 'density_alias'
+
+
 if __name__ == "__main__":
     setup()
     for t in test_all_fields():
@@ -308,3 +321,4 @@
     test_add_deposited_particle_field()
     test_add_field_unit_semantics()
     test_array_like_field()
+    test_add_field_string()

diff -r 71eeada8e2678c1c55fdf7aa11868c2e99272637 -r 5ada8f62d1d6bfe88aa77d2be553c8364360f332 yt/frontends/flash/fields.py
--- a/yt/frontends/flash/fields.py
+++ b/yt/frontends/flash/fields.py
@@ -151,12 +151,14 @@
             Na_code = data.ds.quan(Na, '1/code_mass')
             return data["flash","dens"]*data["flash","sumy"]*Na_code
         self.add_field(('flash','nion'), function=_nion, units="code_length**-3")
-        def _abar(field, data):
-            try:
-                return data["flash","abar"]
-            except:
-                return 1.0/data["flash","sumy"]
-        self.add_field(("flash","abar"), function=_abar, units="1")
+        
+        if ("flash", "abar") in self.field_list:
+            self.add_output_field(("flash", "abar"), units="1")
+        else:
+            def _abar(field, data):
+                return 1.0 / data["flash","sumy"]
+            self.add_field(("flash","abar"), function=_abar, units="1")
+
         def _number_density(fields,data):
             return (data["nele"]+data["nion"])
         self.add_field(("gas","number_density"), function=_number_density,

diff -r 71eeada8e2678c1c55fdf7aa11868c2e99272637 -r 5ada8f62d1d6bfe88aa77d2be553c8364360f332 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -66,7 +66,7 @@
 from yt.data_objects.unstructured_mesh import \
     SemiStructuredMesh, \
     UnstructuredMesh
-from yt.extern.six import string_types, iteritems
+from yt.extern.six import string_types
 from .fields import \
     StreamFieldInfo
 
@@ -464,36 +464,46 @@
         ds.stream_handler.particle_count[gi] = npart
                                         
 def unitify_data(data):
-    if all([hasattr(val, 'units') for val in data.values()]):
-        new_data, field_units = {}, {}
-        for k, v in data.items():
-            field_units[k] = v.units
-            new_data[k] = v.copy().d
-        data = new_data
-    elif all([((not isinstance(val, np.ndarray)) and (len(val) == 2))
-             for val in data.values()]):
-        new_data, field_units = {}, {}
-        for field in data:
+    new_data, field_units = {}, {}
+    for field, val in data.items():
+        # val is a data array
+        if isinstance(val, np.ndarray):
+            # val is a YTArray
+            if hasattr(val, "units"):
+                field_units[field] = val.units
+                new_data[field] = val.copy().d
+            # val is a numpy array
+            else:
+                field_units[field] = ""
+                new_data[field] = val.copy()
+
+        # val is a tuple of (data, units)
+        elif isinstance(val, tuple) and len(val) == 2:
             try:
                 assert isinstance(field, (string_types, tuple)), \
                   "Field name is not a string!"
-                assert isinstance(data[field][0], np.ndarray), \
+                assert isinstance(val[0], np.ndarray), \
                   "Field data is not an ndarray!"
-                assert isinstance(data[field][1], string_types), \
+                assert isinstance(val[1], string_types), \
                   "Unit specification is not a string!"
-                field_units[field] = data[field][1]
-                new_data[field] = data[field][0]
+                field_units[field] = val[1]
+                new_data[field] = val[0]
             except AssertionError as e:
-                raise RuntimeError("The data dict appears to be invalid.\n" +
-                                   str(e))
-        data = new_data
-    elif all([iterable(val) for val in data.values()]):
-        field_units = {field:'' for field in data.keys()}
-        data = dict((field, np.asarray(val)) for field, val in iteritems(data))
-    else:
-        raise RuntimeError("The data dict appears to be invalid. "
-                           "The data dictionary must map from field "
-                           "names to (numpy array, unit spec) tuples. ")
+                raise RuntimeError(
+                    "The data dict appears to be invalid.\n" + str(e))
+
+        # val is a list of data to be turned into an array
+        elif iterable(val):
+            field_units[field] = ""
+            new_data[field] = np.asarray(val)
+
+        else:
+            raise RuntimeError("The data dict appears to be invalid. "
+                               "The data dictionary must map from field "
+                               "names to (numpy array, unit spec) tuples. ")
+
+    data = new_data
+
     # At this point, we have arrays for all our fields
     new_data = {}
     for field in data:

diff -r 71eeada8e2678c1c55fdf7aa11868c2e99272637 -r 5ada8f62d1d6bfe88aa77d2be553c8364360f332 yt/utilities/chemical_formulas.py
--- a/yt/utilities/chemical_formulas.py
+++ b/yt/utilities/chemical_formulas.py
@@ -29,6 +29,9 @@
                 charge = -int(ionization[1:])
             else:
                 raise NotImplementedError
+        elif self.formula_string.startswith('El'):
+            molecule = self.formula_string
+            charge = -1
         else:
             molecule = self.formula_string
             charge = 0

diff -r 71eeada8e2678c1c55fdf7aa11868c2e99272637 -r 5ada8f62d1d6bfe88aa77d2be553c8364360f332 yt/utilities/command_line.py
--- a/yt/utilities/command_line.py
+++ b/yt/utilities/command_line.py
@@ -1075,25 +1075,33 @@
             print("File must be a PNG file!")
             return 1
         image_data = base64.b64encode(open(filename, 'rb').read())
-        api_key = 'f62d550859558f28c4c214136bc797c7'
-        parameters = {'key':api_key, 'image':image_data, type:'base64',
-                      'caption': "",
+        api_key = 'e1977d9195fe39e'
+        headers = {'Authorization': 'Client-ID %s' % api_key}
+        parameters = {'image': image_data, type: 'base64',
+                      'name': filename,
                       'title': "%s uploaded by yt" % filename}
         data = urllib.parse.urlencode(parameters).encode('utf-8')
-        req = urllib.request.Request('http://api.imgur.com/2/upload.json', data)
+        req = urllib.request.Request('https://api.imgur.com/3/upload', data=data,
+                                     headers=headers)
         try:
             response = urllib.request.urlopen(req).read().decode()
         except urllib.error.HTTPError as e:
             print("ERROR", e)
             return {'uploaded':False}
         rv = json.loads(response)
-        if 'upload' in rv and 'links' in rv['upload']:
+        if 'data' in rv and 'link' in rv['data']:
+            delete_cmd = (
+                "curl -X DELETE -H 'Authorization: Client-ID {secret}'"
+                " https://api.imgur.com/3/image/{delete_hash}"
+            )
             print()
             print("Image successfully uploaded!  You can find it at:")
-            print("    %s" % (rv['upload']['links']['original']))
+            print("    %s" % (rv['data']['link']))
             print()
-            print("If you'd like to delete it, visit this page:")
-            print("    %s" % (rv['upload']['links']['delete_page']))
+            print("If you'd like to delete it, use the following")
+            print("    %s" % 
+                  delete_cmd.format(secret=api_key,
+                                    delete_hash=rv['data']['deletehash']))
             print()
         else:
             print()

diff -r 71eeada8e2678c1c55fdf7aa11868c2e99272637 -r 5ada8f62d1d6bfe88aa77d2be553c8364360f332 yt/utilities/lib/field_interpolation_tables.pxd
--- a/yt/utilities/lib/field_interpolation_tables.pxd
+++ b/yt/utilities/lib/field_interpolation_tables.pxd
@@ -16,6 +16,7 @@
 cimport cython
 cimport numpy as np
 from yt.utilities.lib.fp_utils cimport imax, fmax, imin, fmin, iclip, fclip, fabs
+from libc.stdlib cimport malloc
 
 DEF Nch = 4
 
@@ -26,6 +27,8 @@
     np.float64_t bounds[2]
     np.float64_t dbin
     np.float64_t idbin
+    np.float64_t *d0
+    np.float64_t *dy
     int field_id
     int weight_field_id
     int weight_table_id
@@ -41,12 +44,18 @@
 cdef inline void FIT_initialize_table(FieldInterpolationTable *fit, int nbins,
               np.float64_t *values, np.float64_t bounds1, np.float64_t bounds2,
               int field_id, int weight_field_id, int weight_table_id) nogil:
+    cdef int i
     fit.bounds[0] = bounds1; fit.bounds[1] = bounds2
     fit.nbins = nbins
     fit.dbin = (fit.bounds[1] - fit.bounds[0])/(fit.nbins-1)
     fit.idbin = 1.0/fit.dbin
     # Better not pull this out from under us, yo
     fit.values = values
+    fit.d0 = <np.float64_t *> malloc(sizeof(np.float64_t) * nbins)
+    fit.dy = <np.float64_t *> malloc(sizeof(np.float64_t) * nbins)
+    for i in range(nbins-1):
+        fit.d0[i] = fit.bounds[0] + i * fit.dbin
+        fit.dy[i] = (fit.values[i + 1] - fit.values[i]) * fit.idbin
     fit.field_id = field_id
     fit.weight_field_id = weight_field_id
     fit.weight_table_id = weight_table_id
@@ -56,18 +65,18 @@
 @cython.cdivision(True)
 cdef inline np.float64_t FIT_get_value(FieldInterpolationTable *fit,
                                        np.float64_t dvs[6]) nogil:
-    cdef np.float64_t bv, dy, dd
+    cdef np.float64_t dd, dout
     cdef int bin_id
     if dvs[fit.field_id] >= fit.bounds[1] or dvs[fit.field_id] <= fit.bounds[0]: return 0.0
     if not isnormal(dvs[fit.field_id]): return 0.0
     bin_id = <int> ((dvs[fit.field_id] - fit.bounds[0]) * fit.idbin)
     bin_id = iclip(bin_id, 0, fit.nbins-2)
-    dd = dvs[fit.field_id] - (fit.bounds[0] + bin_id * fit.dbin) # x - x0
-    bv = fit.values[bin_id]
-    dy = fit.values[bin_id + 1] - bv
+
+    dd = dvs[fit.field_id] - fit.d0[bin_id] # x - x0
+    dout = fit.values[bin_id] + dd * fit.dy[bin_id]
     if fit.weight_field_id != -1:
-        return dvs[fit.weight_field_id] * (bv + dd*dy*fit.idbin)
-    return (bv + dd*dy*fit.idbin)
+        dout *= dvs[fit.weight_field_id]
+    return dout 
 
 @cython.boundscheck(False)
 @cython.wraparound(False)

diff -r 71eeada8e2678c1c55fdf7aa11868c2e99272637 -r 5ada8f62d1d6bfe88aa77d2be553c8364360f332 yt/utilities/lib/grid_traversal.pyx
--- a/yt/utilities/lib/grid_traversal.pyx
+++ b/yt/utilities/lib/grid_traversal.pyx
@@ -853,9 +853,11 @@
             self.sampler = volume_render_stars_sampler
 
     def __dealloc__(self):
-        return
-        #free(self.vra.fits)
-        #free(self.vra)
+        for i in range(self.vra.n_fits):
+            free(self.vra.fits[i].d0)
+            free(self.vra.fits[i].dy)
+        free(self.vra.fits)
+        free(self.vra)
 
 cdef class LightSourceRenderSampler(ImageSampler):
     cdef VolumeRenderAccumulator *vra
@@ -914,11 +916,13 @@
         self.sampler = volume_render_gradient_sampler
 
     def __dealloc__(self):
-        return
-        #free(self.vra.fits)
-        #free(self.vra)
-        #free(self.light_dir)
-        #free(self.light_rgba)
+        for i in range(self.vra.n_fits):
+            free(self.vra.fits[i].d0)
+            free(self.vra.fits[i].dy)
+        free(self.vra.light_dir)
+        free(self.vra.light_rgba)
+        free(self.vra.fits)
+        free(self.vra)
 
 
 @cython.boundscheck(False)

diff -r 71eeada8e2678c1c55fdf7aa11868c2e99272637 -r 5ada8f62d1d6bfe88aa77d2be553c8364360f332 yt/utilities/lib/mesh_intersection.pyx
--- a/yt/utilities/lib/mesh_intersection.pyx
+++ b/yt/utilities/lib/mesh_intersection.pyx
@@ -21,6 +21,7 @@
 cimport cython
 from libc.math cimport fabs, fmin, fmax, sqrt
 from yt.utilities.lib.mesh_samplers cimport sample_hex20
+from vec3_ops cimport dot, subtract, cross, distance
 
 
 @cython.boundscheck(False)
@@ -80,30 +81,6 @@
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
-cdef float dot(const float* a, 
-               const float* b,
-               size_t N) nogil:
-    cdef int i
-    cdef float rv = 0.0
-    for i in range(N):
-        rv += a[i]*b[i]
-    return rv
-
-
- at cython.boundscheck(False)
- at cython.wraparound(False)
- at cython.cdivision(True)
-cdef void cross(const float* a, 
-                const float* b,
-                float* c) nogil:
-    c[0] = a[1]*b[2] - a[2]*b[1]
-    c[1] = a[2]*b[0] - a[0]*b[2]
-    c[2] = a[0]*b[1] - a[1]*b[0]
-
-
- at cython.boundscheck(False)
- at cython.wraparound(False)
- at cython.cdivision(True)
 cdef void patchBoundsFunc(Patch* patches, 
                           size_t item,
                           rtcg.RTCBounds* bounds_o) nogil:
@@ -146,7 +123,7 @@
 
     # first we compute the two planes that define the ray.
     cdef float[3] n, N1, N2
-    cdef float A = dot(ray.dir, ray.dir, 3)
+    cdef float A = dot(ray.dir, ray.dir)
     for i in range(3):
         n[i] = ray.dir[i] / A
 
@@ -160,16 +137,16 @@
         N1[2] =-n[1]
     cross(N1, n, N2)
 
-    cdef float d1 = -dot(N1, ray.org, 3)
-    cdef float d2 = -dot(N2, ray.org, 3)
+    cdef float d1 = -dot(N1, ray.org)
+    cdef float d2 = -dot(N2, ray.org)
 
     # the initial guess is set to zero
     cdef float u = 0.0
     cdef float v = 0.0
     cdef float[3] S
     patchSurfaceFunc(patch, u, v, S)
-    cdef float fu = dot(N1, S, 3) + d1
-    cdef float fv = dot(N2, S, 3) + d2
+    cdef float fu = dot(N1, S) + d1
+    cdef float fv = dot(N2, S) + d2
     cdef float err = fmax(fabs(fu), fabs(fv))
     
     # begin Newton interation
@@ -183,10 +160,10 @@
         # compute the Jacobian
         patchSurfaceDerivU(patch, u, v, Su)
         patchSurfaceDerivV(patch, u, v, Sv)
-        J11 = dot(N1, Su, 3)
-        J12 = dot(N1, Sv, 3)
-        J21 = dot(N2, Su, 3)
-        J22 = dot(N2, Sv, 3)
+        J11 = dot(N1, Su)
+        J12 = dot(N1, Sv)
+        J21 = dot(N2, Su)
+        J22 = dot(N2, Sv)
         det = (J11*J22 - J12*J21)
         
         # update the u, v values
@@ -194,17 +171,14 @@
         v -= (-J21*fu + J11*fv) / det
         
         patchSurfaceFunc(patch, u, v, S)
-        fu = dot(N1, S, 3) + d1
-        fv = dot(N2, S, 3) + d2
+        fu = dot(N1, S) + d1
+        fv = dot(N2, S) + d2
 
         err = fmax(fabs(fu), fabs(fv))
         iterations += 1
 
     # t is the distance along the ray to this hit
-    cdef float t = 0.0
-    for i in range(3):
-        t += (S[i] - ray.org[i])**2
-    t = sqrt(t)
+    cdef float t = distance(S, ray.org)
 
     # only count this is it's the closest hit
     if (t < ray.tnear or t > ray.Ng[0]):

diff -r 71eeada8e2678c1c55fdf7aa11868c2e99272637 -r 5ada8f62d1d6bfe88aa77d2be553c8364360f332 yt/utilities/lib/pixelization_routines.pyx
--- a/yt/utilities/lib/pixelization_routines.pyx
+++ b/yt/utilities/lib/pixelization_routines.pyx
@@ -21,6 +21,7 @@
 from yt.utilities.exceptions import YTPixelizeError, \
     YTElementTypeNotRecognized
 from libc.stdlib cimport malloc, free
+from vec3_ops cimport dot, cross, subtract
 from yt.utilities.lib.element_mappings cimport \
     ElementSampler, \
     P1Sampler3D, \
@@ -518,17 +519,11 @@
         vi2a = faces[n][1][0]
         vi2b = faces[n][1][1]
         # Shared vertex is vi1a and vi2a
-        for i in range(3):
-            vec1[i] = vertices[vi1b][i] - vertices[vi1a][i]
-            vec2[i] = vertices[vi2b][i] - vertices[vi2a][i]
-            npoint[i] = point[i] - vertices[vi1b][i]
-        # Now the cross product of vec1 x vec2
-        cp_vec[0] = vec1[1] * vec2[2] - vec1[2] * vec2[1]
-        cp_vec[1] = vec1[2] * vec2[0] - vec1[0] * vec2[2]
-        cp_vec[2] = vec1[0] * vec2[1] - vec1[1] * vec2[0]
-        dp = 0.0
-        for j in range(3):
-            dp += cp_vec[j] * npoint[j]
+        subtract(vertices[vi1b], vertices[vi1a], vec1)
+        subtract(vertices[vi2b], vertices[vi2a], vec2)
+        subtract(point, vertices[vi1b], npoint)
+        cross(vec1, vec2, cp_vec)
+        dp = dot(cp_vec, npoint)
         if match == 0:
             if dp < 0:
                 signs[n] = -1

diff -r 71eeada8e2678c1c55fdf7aa11868c2e99272637 -r 5ada8f62d1d6bfe88aa77d2be553c8364360f332 yt/utilities/lib/vec3_ops.pxd
--- a/yt/utilities/lib/vec3_ops.pxd
+++ b/yt/utilities/lib/vec3_ops.pxd
@@ -1,22 +1,22 @@
 cimport cython 
-import numpy as np
-cimport numpy as np
+cimport cython.floating
 from libc.math cimport sqrt
 
+
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
-cdef inline np.float64_t dot(const np.float64_t a[3], 
-                             const np.float64_t b[3]) nogil:
+cdef inline cython.floating dot(const cython.floating[3] a, 
+                                const cython.floating[3] b) nogil:
     return a[0]*b[0] + a[1]*b[1] + a[2]*b[2] 
 
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
-cdef inline void cross(const np.float64_t a[3], 
-                       const np.float64_t b[3],
-                       np.float64_t c[3]) nogil:
+cdef inline void cross(const cython.floating[3] a,
+                       const cython.floating[3] b,
+                       cython.floating c[3]) nogil:
     c[0] = a[1]*b[2] - a[2]*b[1]
     c[1] = a[2]*b[0] - a[0]*b[2]
     c[2] = a[0]*b[1] - a[1]*b[0]
@@ -25,26 +25,36 @@
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
-cdef inline void subtract(const np.float64_t a[3], 
-                          const np.float64_t b[3],
-                          np.float64_t c[3]) nogil:
+cdef inline void subtract(const cython.floating[3] a, 
+                          const cython.floating[3] b,
+                          cython.floating c[3]) nogil:
     c[0] = a[0] - b[0]
     c[1] = a[1] - b[1]
     c[2] = a[2] - b[2]
 
+
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
-cdef inline void fma(const np.float64_t f,
-                     const np.float64_t a[3], 
-                     const np.float64_t b[3],
-                     np.float64_t c[3]) nogil:
+cdef inline cython.floating distance(const cython.floating[3] a,
+                                     const cython.floating[3] b) nogil:
+    return sqrt((a[0] - b[0])**2 + (a[1] - b[1])**2 +(a[2] - b[2])**2)
+
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef inline void fma(const cython.floating f,
+                     const cython.floating[3] a, 
+                     const cython.floating[3] b,
+                     cython.floating[3] c) nogil:
     c[0] = f * a[0] + b[0]
     c[1] = f * a[1] + b[1]
     c[2] = f * a[2] + b[2]
 
+
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
-cdef inline np.float64_t L2_norm(const np.float64_t a[3]) nogil:
+cdef inline cython.floating L2_norm(const cython.floating[3] a) nogil:
     return sqrt(a[0]*a[0] + a[1]*a[1] + a[2]*a[2])

diff -r 71eeada8e2678c1c55fdf7aa11868c2e99272637 -r 5ada8f62d1d6bfe88aa77d2be553c8364360f332 yt/utilities/logger.py
--- a/yt/utilities/logger.py
+++ b/yt/utilities/logger.py
@@ -54,36 +54,43 @@
 
 ytLogger = logging.getLogger("yt")
 
-yt_sh = logging.StreamHandler(stream=stream)
-# create formatter and add it to the handlers
-formatter = logging.Formatter(ufstring)
-yt_sh.setFormatter(formatter)
-# add the handler to the logger
-ytLogger.addHandler(yt_sh)
-ytLogger.setLevel(level)
-ytLogger.propagate = False
- 
 def disable_stream_logging():
-    ytLogger.removeHandler(ytLogger.handlers[0])
+    if len(ytLogger.handlers) > 0:
+        ytLogger.removeHandler(ytLogger.handlers[0])
     h = logging.NullHandler()
     ytLogger.addHandler(h)
 
-original_emitter = yt_sh.emit
-
 def colorize_logging():
     f = logging.Formatter(cfstring)
     ytLogger.handlers[0].setFormatter(f)
     yt_sh.emit = add_coloring_to_emit_ansi(yt_sh.emit)
 
 def uncolorize_logging():
-    f = logging.Formatter(ufstring)
-    ytLogger.handlers[0].setFormatter(f)
-    yt_sh.emit = original_emitter
-
-if ytcfg.getboolean("yt", "coloredlogs"):
-    colorize_logging()
+    try:
+        f = logging.Formatter(ufstring)
+        ytLogger.handlers[0].setFormatter(f)
+        yt_sh.emit = original_emitter
+    except NameError:
+        # yt_sh and original_emitter are not defined because
+        # suppressStreamLogging is True, so we continue since there is nothing
+        # to uncolorize
+        pass
 
 if ytcfg.getboolean("yt", "suppressStreamLogging"):
     disable_stream_logging()
+else:
+    yt_sh = logging.StreamHandler(stream=stream)
+    # create formatter and add it to the handlers
+    formatter = logging.Formatter(ufstring)
+    yt_sh.setFormatter(formatter)
+    # add the handler to the logger
+    ytLogger.addHandler(yt_sh)
+    ytLogger.setLevel(level)
+    ytLogger.propagate = False
+
+    original_emitter = yt_sh.emit
+
+    if ytcfg.getboolean("yt", "coloredlogs"):
+        colorize_logging()
 
 ytLogger.debug("Set log level to %s", level)

diff -r 71eeada8e2678c1c55fdf7aa11868c2e99272637 -r 5ada8f62d1d6bfe88aa77d2be553c8364360f332 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -165,7 +165,7 @@
 
         position : number, YTQuantity, iterable, or 3 element YTArray
             If a scalar, assumes that the position is the same in all three
-            coordinates. If an interable, must contain only scalars or
+            coordinates. If an iterable, must contain only scalars or
             (length, unit) tuples.
         '''
 
@@ -195,7 +195,7 @@
         width : number, YTQuantity, iterable, or 3 element YTArray
             The width of the volume rendering in the horizontal, vertical, and
             depth directions. If a scalar, assumes that the width is the same in
-            all three directions. If an interable, must contain only scalars or
+            all three directions. If an iterable, must contain only scalars or
             (length, unit) tuples.
         '''
 
@@ -223,7 +223,7 @@
         focus : number, YTQuantity, iterable, or 3 element YTArray
             The width of the volume rendering in the horizontal, vertical, and
             depth directions. If a scalar, assumes that the width is the same in
-            all three directions. If an interable, must contain only scalars or
+            all three directions. If an iterable, must contain only scalars or
             (length, unit) tuples.
         '''
 
@@ -360,7 +360,7 @@
         width : number, YTQuantity, iterable, or 3 element YTArray
             The width of the volume rendering in the horizontal, vertical, and
             depth directions. If a scalar, assumes that the width is the same in
-            all three directions. If an interable, must contain only scalars or
+            all three directions. If an iterable, must contain only scalars or
             (length, unit) tuples.
         """
         self.width = width
@@ -378,7 +378,7 @@
 
         width : number, YTQuantity, iterable, or 3 element YTArray
             If a scalar, assumes that the position is the same in all three
-            coordinates. If an interable, must contain only scalars or
+            coordinates. If an iterable, must contain only scalars or
             (length, unit) tuples.
 
         north_vector : array_like, optional
@@ -402,7 +402,7 @@
 
         focus : number, YTQuantity, iterable, or 3 element YTArray
             If a scalar, assumes that the focus is the same is all three
-            coordinates. If an interable, must contain only scalars or
+            coordinates. If an iterable, must contain only scalars or
             (length, unit) tuples.
 
         """
@@ -470,7 +470,7 @@
             occur.  Defaults to None, which sets rotation around
             `north_vector`
         rot_center  : array_like, optional
-            Specifiy the center around which rotation will occur. Defaults
+            Specify the center around which rotation will occur. Defaults
             to None, which sets rotation around the original camera position
             (i.e. the camera position does not change)
 
@@ -526,7 +526,7 @@
         theta : float, in radians
              Angle (in radians) by which to pitch the view.
         rot_center  : array_like, optional
-            Specifiy the center around which rotation will occur.
+            Specify the center around which rotation will occur.
 
         Examples
         --------
@@ -554,7 +554,7 @@
         theta : float, in radians
              Angle (in radians) by which to yaw the view.
         rot_center  : array_like, optional
-            Specifiy the center around which rotation will occur.
+            Specify the center around which rotation will occur.
 
         Examples
         --------
@@ -582,7 +582,7 @@
         theta : float, in radians
              Angle (in radians) by which to roll the view.
         rot_center  : array_like, optional
-            Specifiy the center around which rotation will occur.
+            Specify the center around which rotation will occur.
 
         Examples
         --------
@@ -617,7 +617,7 @@
             occur.  Defaults to None, which sets rotation around the
             original `north_vector`
         rot_center  : array_like, optional
-            Specifiy the center around which rotation will occur. Defaults
+            Specify the center around which rotation will occur. Defaults
             to None, which sets rotation around the original camera position
             (i.e. the camera position does not change)
 


https://bitbucket.org/yt_analysis/yt/commits/2350683ee8c9/
Changeset:   2350683ee8c9
Branch:      yt
User:        atmyers
Date:        2016-04-13 16:42:50+00:00
Summary:     remove duplicate import
Affected #:  1 file

diff -r 5ada8f62d1d6bfe88aa77d2be553c8364360f332 -r 2350683ee8c925c7ac8db99436e16291a05e2836 yt/utilities/lib/bounding_volume_hierarchy.pyx
--- a/yt/utilities/lib/bounding_volume_hierarchy.pyx
+++ b/yt/utilities/lib/bounding_volume_hierarchy.pyx
@@ -9,7 +9,6 @@
     ElementSampler, \
     Q1Sampler3D, \
     P1Sampler3D, \
-    Q1Sampler3D, \
     W1Sampler3D
 
 cdef ElementSampler Q1Sampler = Q1Sampler3D()


https://bitbucket.org/yt_analysis/yt/commits/248ccc5ef8b4/
Changeset:   248ccc5ef8b4
Branch:      yt
User:        ngoldbaum
Date:        2016-04-20 18:22:12+00:00
Summary:     Merged in atmyers/yt (pull request #2107)

Allow MeshSources to optionally use the Cython ray-tracer instead of Embree.
Affected #:  14 files

diff -r 5d118c06fa7eecc494eff5d231ebf3c47d9d4ab9 -r 248ccc5ef8b46d83682344d249c8de1c964759ab yt/utilities/lib/bounding_volume_hierarchy.pxd
--- a/yt/utilities/lib/bounding_volume_hierarchy.pxd
+++ b/yt/utilities/lib/bounding_volume_hierarchy.pxd
@@ -1,6 +1,7 @@
 cimport cython 
 import numpy as np
 cimport numpy as np
+from yt.utilities.lib.element_mappings cimport ElementSampler
 
 # ray data structure
 cdef struct Ray:
@@ -11,6 +12,7 @@
     np.float64_t t_near
     np.float64_t t_far
     np.int64_t elem_id
+    np.int64_t near_boundary
 
 # axis-aligned bounding box
 cdef struct BBox:
@@ -30,7 +32,6 @@
     np.float64_t p0[3]
     np.float64_t p1[3]
     np.float64_t p2[3]
-    np.float64_t d0, d1, d2
     np.int64_t elem_id
     np.float64_t centroid[3]
     BBox bbox
@@ -38,6 +39,14 @@
 cdef class BVH:
     cdef BVHNode* root
     cdef Triangle* triangles
+    cdef np.float64_t* vertices
+    cdef np.float64_t* field_data
+    cdef np.int64_t num_tri_per_elem
+    cdef np.int64_t num_tri
+    cdef np.int64_t num_elem
+    cdef np.int64_t num_verts_per_elem
+    cdef np.int64_t num_field_per_elem
+    cdef ElementSampler sampler
     cdef np.int64_t _partition(self, np.int64_t begin, np.int64_t end,
                                np.int64_t ax, np.float64_t split) nogil
     cdef void intersect(self, Ray* ray) nogil

diff -r 5d118c06fa7eecc494eff5d231ebf3c47d9d4ab9 -r 248ccc5ef8b46d83682344d249c8de1c964759ab yt/utilities/lib/bounding_volume_hierarchy.pyx
--- a/yt/utilities/lib/bounding_volume_hierarchy.pyx
+++ b/yt/utilities/lib/bounding_volume_hierarchy.pyx
@@ -1,10 +1,19 @@
-cimport cython 
+cimport cython
 import numpy as np
 cimport numpy as np
 from libc.math cimport fabs
 from libc.stdlib cimport malloc, free
 from cython.parallel import parallel, prange
 from vec3_ops cimport dot, subtract, cross
+from yt.utilities.lib.element_mappings cimport \
+    ElementSampler, \
+    Q1Sampler3D, \
+    P1Sampler3D, \
+    W1Sampler3D
+
+cdef ElementSampler Q1Sampler = Q1Sampler3D()
+cdef ElementSampler P1Sampler = P1Sampler3D()
+cdef ElementSampler W1Sampler = W1Sampler3D()
 
 cdef extern from "platform_dep.h" nogil:
     double fmax(double x, double y)
@@ -13,7 +22,16 @@
 cdef extern from "mesh_construction.h":
     enum:
         MAX_NUM_TRI
+
+    int HEX_NV
+    int HEX_NT
+    int TETRA_NV
+    int TETRA_NT
+    int WEDGE_NV
+    int WEDGE_NT
     int triangulate_hex[MAX_NUM_TRI][3]
+    int triangulate_tetra[MAX_NUM_TRI][3]
+    int triangulate_wedge[MAX_NUM_TRI][3]
 
 # define some constants
 cdef np.float64_t DETERMINANT_EPS = 1.0e-10
@@ -60,7 +78,6 @@
 
     if(t > DETERMINANT_EPS and t < ray.t_far):
         ray.t_far = t
-        ray.data_val = (1.0 - u - v)*tri.d0 + u*tri.d1 + v*tri.d2
         ray.elem_id = tri.elem_id
         return True
 
@@ -104,31 +121,62 @@
     @cython.wraparound(False)
     @cython.cdivision(True)
     def __cinit__(self,
-                  np.float64_t[:, ::1] vertices,
-                  np.int64_t[:, ::1] indices,
-                  np.float64_t[:, ::1] field_data):
+                  np.float64_t[:, :] vertices,
+                  np.int64_t[:, :] indices,
+                  np.float64_t[:, :] field_data):
 
-        cdef np.int64_t num_elem = indices.shape[0]
-        cdef np.int64_t num_tri = 12*num_elem
+        self.num_elem = indices.shape[0]
+        self.num_verts_per_elem = indices.shape[1]
+        self.num_field_per_elem = field_data.shape[1]
+
+        # We need to figure out what kind of elements we've been handed.
+        cdef int[MAX_NUM_TRI][3] tri_array
+        if self.num_verts_per_elem == 8:
+            self.num_tri_per_elem = HEX_NT
+            tri_array = triangulate_hex
+            self.sampler = Q1Sampler
+        elif self.num_verts_per_elem == 6:
+            self.num_tri_per_elem = WEDGE_NT
+            tri_array = triangulate_wedge
+            self.sampler = W1Sampler
+        elif self.num_verts_per_elem == 4:
+            self.num_tri_per_elem = TETRA_NT
+            tri_array = triangulate_tetra
+            self.sampler = P1Sampler
+        self.num_tri = self.num_tri_per_elem*self.num_elem
+
+        # allocate storage
+        cdef np.int64_t v_size = self.num_verts_per_elem * self.num_elem * 3
+        self.vertices = <np.float64_t*> malloc(v_size * sizeof(np.float64_t))
+        cdef np.int64_t f_size = self.num_field_per_elem * self.num_elem
+        self.field_data = <np.float64_t*> malloc(f_size * sizeof(np.float64_t))
+
+        # create data buffers
+        cdef np.int64_t i, j, k
+        cdef np.int64_t field_offset, vertex_offset
+        for i in range(self.num_elem):
+            for j in range(self.num_verts_per_elem):
+                vertex_offset = i*self.num_verts_per_elem*3 + j*3
+                for k in range(3):
+                    self.vertices[vertex_offset + k] = vertices[indices[i,j]][k]
+            field_offset = i*self.num_field_per_elem
+            for j in range(self.num_field_per_elem):
+                self.field_data[field_offset + j] = field_data[i][j]                
 
         # fill our array of triangles
-        cdef np.int64_t i, j, k
         cdef np.int64_t offset, tri_index
         cdef np.int64_t v0, v1, v2
         cdef Triangle* tri
-        self.triangles = <Triangle*> malloc(num_tri * sizeof(Triangle))
-        for i in range(num_elem):
-            offset = 12*i
-            for j in range(12):
+        self.triangles = <Triangle*> malloc(self.num_tri * sizeof(Triangle))
+        for i in range(self.num_elem):
+            offset = self.num_tri_per_elem*i
+            for j in range(self.num_tri_per_elem):
                 tri_index = offset + j
                 tri = &(self.triangles[tri_index])
                 tri.elem_id = i
-                v0 = indices[i][triangulate_hex[j][0]]
-                v1 = indices[i][triangulate_hex[j][1]]
-                v2 = indices[i][triangulate_hex[j][2]]
-                tri.d0 = field_data[i][triangulate_hex[j][0]]
-                tri.d1 = field_data[i][triangulate_hex[j][1]]
-                tri.d2 = field_data[i][triangulate_hex[j][2]]
+                v0 = indices[i][tri_array[j][0]]
+                v1 = indices[i][tri_array[j][1]]
+                v2 = indices[i][tri_array[j][2]]
                 for k in range(3):
                     tri.p0[k] = vertices[v0][k]
                     tri.p1[k] = vertices[v1][k]
@@ -137,7 +185,7 @@
                     tri.bbox.left_edge[k]  = fmin(fmin(tri.p0[k], tri.p1[k]), tri.p2[k])
                     tri.bbox.right_edge[k] = fmax(fmax(tri.p0[k], tri.p1[k]), tri.p2[k])
 
-        self.root = self._recursive_build(0, num_tri)
+        self.root = self._recursive_build(0, self.num_tri)
 
     cdef void _recursive_free(self, BVHNode* node) nogil:
         if node.end - node.begin > LEAF_SIZE:
@@ -148,6 +196,8 @@
     def __dealloc__(self):
         self._recursive_free(self.root)
         free(self.triangles)
+        free(self.field_data)
+        free(self.vertices)
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
@@ -189,6 +239,28 @@
     @cython.cdivision(True)
     cdef void intersect(self, Ray* ray) nogil:
         self._recursive_intersect(ray, self.root)
+        
+        if ray.elem_id < 0:
+            return
+
+        cdef np.float64_t[3] position
+        cdef np.int64_t i
+        for i in range(3):
+            position[i] = ray.origin[i] + ray.t_far*ray.direction[i]
+            
+        cdef np.float64_t* vertex_ptr
+        cdef np.float64_t* field_ptr         
+        vertex_ptr = self.vertices + ray.elem_id*self.num_verts_per_elem*3
+        field_ptr = self.field_data + ray.elem_id*self.num_field_per_elem
+
+        cdef np.float64_t[4] mapped_coord
+        self.sampler.map_real_to_unit(mapped_coord, vertex_ptr, position)
+        if self.num_field_per_elem == 1:
+            ray.data_val = field_ptr[0]
+        else:
+            ray.data_val = self.sampler.sample_at_unit_point(mapped_coord,
+                                                             field_ptr)
+        ray.near_boundary = self.sampler.check_mesh_lines(mapped_coord)
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
@@ -281,6 +353,7 @@
             ray.t_far = INF
             ray.t_near = 0.0
             ray.data_val = 0
+            ray.elem_id = -1
             bvh.intersect(ray)
             image[i] = ray.data_val
 

diff -r 5d118c06fa7eecc494eff5d231ebf3c47d9d4ab9 -r 248ccc5ef8b46d83682344d249c8de1c964759ab yt/utilities/lib/element_mappings.pxd
--- a/yt/utilities/lib/element_mappings.pxd
+++ b/yt/utilities/lib/element_mappings.pxd
@@ -35,6 +35,8 @@
                              double tolerance,
                              int direction) nogil
 
+    cdef int check_mesh_lines(self, double* mapped_coord) nogil
+
 
 cdef class P1Sampler2D(ElementSampler):
 
@@ -75,6 +77,8 @@
                              double tolerance,
                              int direction) nogil
 
+    cdef int check_mesh_lines(self, double* mapped_coord) nogil
+
 
 # This typedef defines a function pointer that defines the system
 # of equations that will be solved by the NonlinearSolveSamplers.
@@ -170,6 +174,8 @@
                              double tolerance,
                              int direction) nogil
 
+    cdef int check_mesh_lines(self, double* mapped_coord) nogil
+
 
 cdef class W1Sampler3D(NonlinearSolveSampler3D):
 
@@ -190,6 +196,9 @@
                              double tolerance,
                              int direction) nogil
 
+    cdef int check_mesh_lines(self, double* mapped_coord) nogil
+
+
 
 cdef class S2Sampler3D(NonlinearSolveSampler3D):
 
@@ -210,6 +219,9 @@
                              double tolerance,
                              int direction) nogil
 
+    cdef int check_mesh_lines(self, double* mapped_coord) nogil
+
+
 
 cdef class NonlinearSolveSampler2D(ElementSampler):
 

diff -r 5d118c06fa7eecc494eff5d231ebf3c47d9d4ab9 -r 248ccc5ef8b46d83682344d249c8de1c964759ab yt/utilities/lib/element_mappings.pyx
--- a/yt/utilities/lib/element_mappings.pyx
+++ b/yt/utilities/lib/element_mappings.pyx
@@ -95,6 +95,12 @@
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
+    cdef int check_mesh_lines(self, double* mapped_coord) nogil:
+        pass
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
     cdef double sample_at_real_point(self,
                                      double* vertices,
                                      double* field_values,
@@ -265,6 +271,37 @@
             return 1
         return 0
 
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef int check_mesh_lines(self, double* mapped_coord) nogil:
+        cdef double u, v, w
+        cdef double thresh = 2.0e-2
+        if mapped_coord[0] == 0:
+            u = mapped_coord[1]
+            v = mapped_coord[2]
+            w = mapped_coord[3]
+        elif mapped_coord[1] == 0:
+            u = mapped_coord[2]
+            v = mapped_coord[3]
+            w = mapped_coord[0]
+        elif mapped_coord[2] == 0:
+            u = mapped_coord[1]
+            v = mapped_coord[3]
+            w = mapped_coord[0]
+        else:
+            u = mapped_coord[1]
+            v = mapped_coord[2]
+            w = mapped_coord[0]
+        if ((u < thresh) or 
+            (v < thresh) or 
+            (w < thresh) or
+            (fabs(u - 1) < thresh) or 
+            (fabs(v - 1) < thresh) or 
+            (fabs(w - 1) < thresh)):
+            return 1
+        return -1
+
 
 cdef class NonlinearSolveSampler3D(ElementSampler):
 
@@ -373,7 +410,6 @@
             return 0
         return 1
 
-
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
@@ -386,6 +422,23 @@
         else:
             return 0
 
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef int check_mesh_lines(self, double* mapped_coord) nogil:
+        if (fabs(fabs(mapped_coord[0]) - 1.0) < 1e-1 and
+            fabs(fabs(mapped_coord[1]) - 1.0) < 1e-1):
+            return 1
+        elif (fabs(fabs(mapped_coord[0]) - 1.0) < 1e-1 and
+              fabs(fabs(mapped_coord[2]) - 1.0) < 1e-1):
+            return 1
+        elif (fabs(fabs(mapped_coord[1]) - 1.0) < 1e-1 and
+              fabs(fabs(mapped_coord[2]) - 1.0) < 1e-1):
+            return 1
+        else:
+            return -1
+
+
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
@@ -528,6 +581,22 @@
         else:
             return 0
 
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef int check_mesh_lines(self, double* mapped_coord) nogil:
+        if (fabs(fabs(mapped_coord[0]) - 1.0) < 1e-1 and
+            fabs(fabs(mapped_coord[1]) - 1.0) < 1e-1):
+            return 1
+        elif (fabs(fabs(mapped_coord[0]) - 1.0) < 1e-1 and
+              fabs(fabs(mapped_coord[2]) - 1.0) < 1e-1):
+            return 1
+        elif (fabs(fabs(mapped_coord[1]) - 1.0) < 1e-1 and
+              fabs(fabs(mapped_coord[2]) - 1.0) < 1e-1):
+            return 1
+        else:
+            return -1
+
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
@@ -709,7 +778,6 @@
             return 0
         return 1
 
-
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
@@ -726,6 +794,32 @@
             return 1
         return 0
 
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef int check_mesh_lines(self, double* mapped_coord) nogil:
+        cdef double r, s, t
+        cdef double thresh = 5.0e-2
+        r = mapped_coord[0]
+        s = mapped_coord[1]
+        t = mapped_coord[2]
+
+        cdef int near_edge_r, near_edge_s, near_edge_t
+        near_edge_r = (r < thresh) or (fabs(r + s - 1.0) < thresh)
+        near_edge_s = (s < thresh)
+        near_edge_t = fabs(fabs(mapped_coord[2]) - 1.0) < thresh
+        
+        # we use ray.instID to pass back whether the ray is near the
+        # element boundary or not (used to annotate mesh lines)
+        if (near_edge_r and near_edge_s):
+            return 1
+        elif (near_edge_r and near_edge_t):
+            return 1
+        elif (near_edge_s and near_edge_t):
+            return 1
+        else:
+            return -1
+
 
 @cython.boundscheck(False)
 @cython.wraparound(False)

diff -r 5d118c06fa7eecc494eff5d231ebf3c47d9d4ab9 -r 248ccc5ef8b46d83682344d249c8de1c964759ab yt/utilities/lib/grid_traversal.pxd
--- a/yt/utilities/lib/grid_traversal.pxd
+++ b/yt/utilities/lib/grid_traversal.pxd
@@ -25,6 +25,8 @@
     np.float64_t *center
     np.float64_t[:,:,:] image
     np.float64_t[:,:] zbuffer
+    np.int64_t[:,:] image_used
+    np.int64_t[:,:] mesh_lines
     np.float64_t pdx, pdy
     np.float64_t bounds[4]
     np.float64_t[:,:] camera_data   # position, width, unit_vec[0,2]
@@ -59,6 +61,8 @@
     cdef sampler_function *sampler
     cdef public object acenter, aimage, ax_vec, ay_vec
     cdef public object azbuffer
+    cdef public object aimage_used
+    cdef public object amesh_lines
     cdef void *supp_data
     cdef np.float64_t width[3]
     cdef public object lens_type

diff -r 5d118c06fa7eecc494eff5d231ebf3c47d9d4ab9 -r 248ccc5ef8b46d83682344d249c8de1c964759ab yt/utilities/lib/grid_traversal.pyx
--- a/yt/utilities/lib/grid_traversal.pyx
+++ b/yt/utilities/lib/grid_traversal.pyx
@@ -383,6 +383,8 @@
                   *args, **kwargs):
         self.image = <ImageContainer *> calloc(sizeof(ImageContainer), 1)
         cdef np.float64_t[:,:] zbuffer
+        cdef np.int64_t[:,:] image_used
+        cdef np.int64_t[:,:] mesh_lines
         cdef np.float64_t[:,:] camera_data
         cdef int i
 
@@ -393,6 +395,10 @@
         zbuffer = kwargs.pop("zbuffer", None)
         if zbuffer is None:
             zbuffer = np.ones((image.shape[0], image.shape[1]), "float64")
+
+        image_used = np.zeros((image.shape[0], image.shape[1]), "int64")
+        mesh_lines = np.zeros((image.shape[0], image.shape[1]), "int64")
+
         self.lens_type = kwargs.pop("lens_type", None)
         if self.lens_type == "plane-parallel":
             self.extent_function = calculate_extent_plane_parallel
@@ -400,7 +406,7 @@
         else:
             if not (vp_pos.shape[0] == vp_dir.shape[0] == image.shape[0]) or \
                not (vp_pos.shape[1] == vp_dir.shape[1] == image.shape[1]):
-                msg = "Bad lense shape / direction for %s\n" % (self.lens_type)
+                msg = "Bad lens shape / direction for %s\n" % (self.lens_type)
                 msg += "Shapes: (%s - %s - %s) and (%s - %s - %s)" % (
                     vp_pos.shape[0], vp_dir.shape[0], image.shape[0],
                     vp_pos.shape[1], vp_dir.shape[1], image.shape[1])
@@ -426,7 +432,9 @@
         self.image.x_vec = <np.float64_t *> x_vec.data
         self.ay_vec = y_vec
         self.image.y_vec = <np.float64_t *> y_vec.data
-        self.image.zbuffer = zbuffer
+        self.image.zbuffer = self.azbuffer = zbuffer
+        self.image.image_used = self.aimage_used = image_used
+        self.image.mesh_lines = self.amesh_lines = mesh_lines
         self.image.nv[0] = image.shape[0]
         self.image.nv[1] = image.shape[1]
         for i in range(4): self.image.bounds[i] = bounds[i]
@@ -501,6 +509,7 @@
         self.image.vp_dir = None
         self.image.zbuffer = None
         self.image.camera_data = None
+        self.image.image_used = None
         free(self.image)
 
 

diff -r 5d118c06fa7eecc494eff5d231ebf3c47d9d4ab9 -r 248ccc5ef8b46d83682344d249c8de1c964759ab yt/utilities/lib/mesh_construction.pxd
--- a/yt/utilities/lib/mesh_construction.pxd
+++ b/yt/utilities/lib/mesh_construction.pxd
@@ -11,6 +11,7 @@
     int* element_indices   # which vertices belong to which *element*
     int tpe                # the number of triangles per element
     int vpe                # the number of vertices per element
+    int fpe                # the number of field values per element
 
 ctypedef struct Patch:
     float[8][3] v

diff -r 5d118c06fa7eecc494eff5d231ebf3c47d9d4ab9 -r 248ccc5ef8b46d83682344d249c8de1c964759ab yt/utilities/lib/mesh_construction.pyx
--- a/yt/utilities/lib/mesh_construction.pyx
+++ b/yt/utilities/lib/mesh_construction.pyx
@@ -25,7 +25,6 @@
 from mesh_samplers cimport \
     sample_hex, \
     sample_tetra, \
-    sample_element, \
     sample_wedge
 from pyembree.rtcore cimport \
     Vertex, \
@@ -104,7 +103,7 @@
                  np.ndarray indices,
                  np.ndarray data):
 
-        # We need now to figure out what kind of elements we've been handed.
+        # We need to figure out what kind of elements we've been handed.
         if indices.shape[1] == 8:
             self.vpe = HEX_NV
             self.tpe = HEX_NT
@@ -186,19 +185,18 @@
         datac.element_indices = self.element_indices
         datac.tpe = self.tpe
         datac.vpe = self.vpe
+        datac.fpe = self.fpe
         self.datac = datac
         
         rtcg.rtcSetUserData(scene.scene_i, self.mesh, &self.datac)
 
     cdef void _set_sampler_type(self, YTEmbreeScene scene):
 
-        if self.fpe == 1:
-            self.filter_func = <rtcg.RTCFilterFunc> sample_element
-        elif self.fpe == 4:
+        if self.vpe == 4:
             self.filter_func = <rtcg.RTCFilterFunc> sample_tetra
-        elif self.fpe == 6:
+        elif self.vpe == 6:
             self.filter_func = <rtcg.RTCFilterFunc> sample_wedge
-        elif self.fpe == 8:
+        elif self.vpe == 8:
             self.filter_func = <rtcg.RTCFilterFunc> sample_hex
         else:
             raise NotImplementedError("Sampler type not implemented.")

diff -r 5d118c06fa7eecc494eff5d231ebf3c47d9d4ab9 -r 248ccc5ef8b46d83682344d249c8de1c964759ab yt/utilities/lib/mesh_samplers.pxd
--- a/yt/utilities/lib/mesh_samplers.pxd
+++ b/yt/utilities/lib/mesh_samplers.pxd
@@ -13,6 +13,3 @@
 
 cdef void sample_hex20(void* userPtr,
                        rtcr.RTCRay& ray) nogil
-
-cdef void sample_element(void* userPtr,
-                         rtcr.RTCRay& ray) nogil

diff -r 5d118c06fa7eecc494eff5d231ebf3c47d9d4ab9 -r 248ccc5ef8b46d83682344d249c8de1c964759ab yt/utilities/lib/mesh_samplers.pyx
--- a/yt/utilities/lib/mesh_samplers.pyx
+++ b/yt/utilities/lib/mesh_samplers.pyx
@@ -97,7 +97,9 @@
     
     for i in range(8):
         element_indices[i] = data.element_indices[elem_id*8+i]
-        field_data[i]      = data.field_data[elem_id*8+i]
+
+    for i in range(data.fpe):
+        field_data[i] = data.field_data[elem_id*data.fpe+i]
 
     for i in range(8):
         vertices[i*3]     = data.vertices[element_indices[i]].x
@@ -107,22 +109,16 @@
     # we use ray.time to pass the value of the field
     cdef double mapped_coord[3]
     Q1Sampler.map_real_to_unit(mapped_coord, vertices, position)
-    val = Q1Sampler.sample_at_unit_point(mapped_coord, field_data)
+    if data.fpe == 1:
+        val = field_data[0]
+    else:
+        val = Q1Sampler.sample_at_unit_point(mapped_coord, field_data)
     ray.time = val
 
     # we use ray.instID to pass back whether the ray is near the
     # element boundary or not (used to annotate mesh lines)
-    if (fabs(fabs(mapped_coord[0]) - 1.0) < 1e-1 and
-        fabs(fabs(mapped_coord[1]) - 1.0) < 1e-1):
-        ray.instID = 1
-    elif (fabs(fabs(mapped_coord[0]) - 1.0) < 1e-1 and
-          fabs(fabs(mapped_coord[2]) - 1.0) < 1e-1):
-        ray.instID = 1
-    elif (fabs(fabs(mapped_coord[1]) - 1.0) < 1e-1 and
-          fabs(fabs(mapped_coord[2]) - 1.0) < 1e-1):
-        ray.instID = 1
-    else:
-        ray.instID = -1
+    ray.instID = Q1Sampler.check_mesh_lines(mapped_coord)
+
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
@@ -152,7 +148,9 @@
     
     for i in range(6):
         element_indices[i] = data.element_indices[elem_id*6+i]
-        field_data[i]      = data.field_data[elem_id*6+i]
+
+    for i in range(data.fpe):
+        field_data[i] = data.field_data[elem_id*data.fpe+i]
 
     for i in range(6):
         vertices[i*3]     = data.vertices[element_indices[i]].x
@@ -162,31 +160,12 @@
     # we use ray.time to pass the value of the field
     cdef double mapped_coord[3]
     W1Sampler.map_real_to_unit(mapped_coord, vertices, position)
-    val = W1Sampler.sample_at_unit_point(mapped_coord, field_data)
+    if data.fpe == 1:
+        val = field_data[0]
+    else:
+        val = W1Sampler.sample_at_unit_point(mapped_coord, field_data)
     ray.time = val
-
-    cdef double r, s, t
-    cdef double thresh = 5.0e-2
-    r = mapped_coord[0]
-    s = mapped_coord[1]
-    t = mapped_coord[2]
-
-    cdef int near_edge_r, near_edge_s, near_edge_t
-    near_edge_r = (r < thresh) or (fabs(r + s - 1.0) < thresh)
-    near_edge_s = (s < thresh)
-    near_edge_t = fabs(fabs(mapped_coord[2]) - 1.0) < thresh
-    
-    # we use ray.instID to pass back whether the ray is near the
-    # element boundary or not (used to annotate mesh lines)
-    if (near_edge_r and near_edge_s):
-        ray.instID = 1
-    elif (near_edge_r and near_edge_t):
-        ray.instID = 1
-    elif (near_edge_s and near_edge_t):
-        ray.instID = 1
-    else:
-        ray.instID = -1
-
+    ray.instID = W1Sampler.check_mesh_lines(mapped_coord)
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
@@ -222,21 +201,8 @@
     S2Sampler.map_real_to_unit(mapped_coord, patch.vertices, position)
     val = S2Sampler.sample_at_unit_point(mapped_coord, patch.field_data)
     ray.time = val
-
-    # we use ray.instID to pass back whether the ray is near the
-    # element boundary or not (used to annotate mesh lines)
-    if (fabs(fabs(mapped_coord[0]) - 1.0) < 1e-1 and
-        fabs(fabs(mapped_coord[1]) - 1.0) < 1e-1):
-        ray.instID = 1
-    elif (fabs(fabs(mapped_coord[0]) - 1.0) < 1e-1 and
-          fabs(fabs(mapped_coord[2]) - 1.0) < 1e-1):
-        ray.instID = 1
-    elif (fabs(fabs(mapped_coord[1]) - 1.0) < 1e-1 and
-          fabs(fabs(mapped_coord[2]) - 1.0) < 1e-1):
-        ray.instID = 1
-    else:
-        ray.instID = -1
-
+    ray.instID = S2Sampler.check_mesh_lines(mapped_coord)
+    
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
@@ -267,54 +233,19 @@
 
     for i in range(4):
         element_indices[i] = data.element_indices[elem_id*4+i]
-        field_data[i] = data.field_data[elem_id*4+i]
         vertices[i*3] = data.vertices[element_indices[i]].x
         vertices[i*3 + 1] = data.vertices[element_indices[i]].y
         vertices[i*3 + 2] = data.vertices[element_indices[i]].z    
 
+    for i in range(data.fpe):
+        field_data[i] = data.field_data[elem_id*data.fpe+i]
+
     # we use ray.time to pass the value of the field
     cdef double mapped_coord[4]
     P1Sampler.map_real_to_unit(mapped_coord, vertices, position)
-    val = P1Sampler.sample_at_unit_point(mapped_coord, field_data)
+    if data.fpe == 1:
+        val = field_data[0]
+    else:
+        val = P1Sampler.sample_at_unit_point(mapped_coord, field_data)
     ray.time = val
-
-    cdef double u, v, w
-    cdef double thresh = 2.0e-2
-    u = ray.u
-    v = ray.v
-    w = 1.0 - u - v
-    # we use ray.instID to pass back whether the ray is near the
-    # element boundary or not (used to annotate mesh lines)
-    if ((u < thresh) or 
-        (v < thresh) or 
-        (w < thresh) or
-        (fabs(u - 1) < thresh) or 
-        (fabs(v - 1) < thresh) or 
-        (fabs(w - 1) < thresh)):
-        ray.instID = 1
-    else:
-        ray.instID = -1
-
-
- at cython.boundscheck(False)
- at cython.wraparound(False)
- at cython.cdivision(True)
-cdef void sample_element(void* userPtr,
-                         rtcr.RTCRay& ray) nogil:
-    cdef int ray_id, elem_id
-    cdef double val
-    cdef MeshDataContainer* data
-
-    data = <MeshDataContainer*> userPtr
-    ray_id = ray.primID
-    if ray_id == -1:
-        return
-
-    # ray_id records the id number of the hit according to
-    # embree, in which the primitives are triangles. Here,
-    # we convert this to the element id by dividing by the
-    # number of triangles per element.
-    elem_id = ray_id / data.tpe
-
-    val = data.field_data[elem_id]
-    ray.time = val
+    ray.instID = P1Sampler.check_mesh_lines(mapped_coord)

diff -r 5d118c06fa7eecc494eff5d231ebf3c47d9d4ab9 -r 248ccc5ef8b46d83682344d249c8de1c964759ab yt/utilities/lib/mesh_traversal.pyx
--- a/yt/utilities/lib/mesh_traversal.pyx
+++ b/yt/utilities/lib/mesh_traversal.pyx
@@ -1,6 +1,6 @@
 """
-This file contains the MeshSampler class, which handles casting rays at a
-MeshSource using pyembree.
+This file contains the MeshSampler classes, which handles casting rays at a
+mesh source using either pyembree or the cython ray caster.
 
 
 """
@@ -25,6 +25,7 @@
     ImageContainer
 from cython.parallel import prange, parallel, threadid
 from yt.visualization.image_writer import apply_colormap
+from yt.utilities.lib.bounding_volume_hierarchy cimport BVH, Ray
 
 rtc.rtcInit(NULL)
 rtc.rtcSetErrorFunction(error_printer)
@@ -42,11 +43,8 @@
     def __dealloc__(self):
         rtcs.rtcDeleteScene(self.scene_i)
 
-cdef class MeshSampler(ImageSampler):
 
-    cdef public object image_used
-    cdef public object mesh_lines
-    cdef public object zbuffer
+cdef class EmbreeMeshSampler(ImageSampler):
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
@@ -70,15 +68,10 @@
         cdef np.float64_t width[3]
         for i in range(3):
             width[i] = self.width[i]
-        cdef np.ndarray[np.float64_t, ndim=1] data
         cdef np.ndarray[np.int64_t, ndim=1] used
         nx = im.nv[0]
         ny = im.nv[1]
         size = nx * ny
-        used = np.empty(size, dtype="int64")
-        mesh = np.empty(size, dtype="int64")
-        data = np.empty(size, dtype="float64")
-        zbuffer = np.empty(size, dtype="float64")
         cdef rtcr.RTCRay ray
         v_pos = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
         v_dir = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
@@ -99,13 +92,65 @@
             ray.time = 0
             ray.Ng[0] = 1e37  # we use this to track the hit distance
             rtcs.rtcIntersect(scene.scene_i, ray)
-            data[j] = ray.time
-            used[j] = ray.primID
-            mesh[j] = ray.instID
-            zbuffer[j] = ray.tfar
-        self.aimage = data
-        self.image_used = used
-        self.mesh_lines = mesh
-        self.zbuffer = zbuffer
+            im.image[vi, vj, 0] = ray.time
+            im.image_used[vi, vj] = ray.primID
+            im.mesh_lines[vi, vj] = ray.instID
+            im.zbuffer[vi, vj] = ray.tfar
         free(v_pos)
         free(v_dir)
+
+
+cdef class BVHMeshSampler(ImageSampler):
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    def __call__(self, 
+                 BVH bvh,
+                 int num_threads = 0):
+        '''
+
+        This function is supposed to cast the rays and return the
+        image.
+
+        '''
+
+        cdef int vi, vj, i, j
+        cdef ImageContainer *im = self.image
+        cdef np.float64_t *v_pos
+        cdef np.float64_t *v_dir
+        cdef np.int64_t nx, ny, size
+        cdef np.float64_t width[3]
+        for i in range(3):
+            width[i] = self.width[i]
+        cdef np.ndarray[np.float64_t, ndim=1] data
+        cdef np.ndarray[np.int64_t, ndim=1] used
+        nx = im.nv[0]
+        ny = im.nv[1]
+        size = nx * ny
+        cdef Ray* ray
+        with nogil, parallel():
+            ray = <Ray *> malloc(sizeof(Ray))
+            v_pos = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+            v_dir = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+            for j in prange(size):
+                vj = j % ny
+                vi = (j - vj) / ny
+                vj = vj
+                self.vector_function(im, vi, vj, width, v_dir, v_pos)
+                for i in range(3):
+                    ray.origin[i] = v_pos[i]
+                    ray.direction[i] = v_dir[i]
+                    ray.inv_dir[i] = 1.0 / v_dir[i]
+                ray.t_far = 1e37
+                ray.t_near = 0.0
+                ray.data_val = 0
+                ray.elem_id = -1
+                bvh.intersect(ray)
+                im.image[vi, vj, 0] = ray.data_val
+                im.image_used[vi, vj] = ray.elem_id
+                im.mesh_lines[vi, vj] = ray.near_boundary
+                im.zbuffer[vi, vj] = ray.t_far
+            free(v_pos)
+            free(v_dir)
+            free(ray)

diff -r 5d118c06fa7eecc494eff5d231ebf3c47d9d4ab9 -r 248ccc5ef8b46d83682344d249c8de1c964759ab yt/utilities/lib/pixelization_routines.pyx
--- a/yt/utilities/lib/pixelization_routines.pyx
+++ b/yt/utilities/lib/pixelization_routines.pyx
@@ -20,6 +20,7 @@
 from yt.utilities.lib.fp_utils cimport fmin, fmax, i64min, i64max, imin, imax, fabs
 from yt.utilities.exceptions import YTPixelizeError, \
     YTElementTypeNotRecognized
+from libc.stdlib cimport malloc, free
 from vec3_ops cimport dot, cross, subtract
 from yt.utilities.lib.element_mappings cimport \
     ElementSampler, \
@@ -30,9 +31,6 @@
     Q1Sampler2D, \
     W1Sampler3D
 
-cdef extern from "stdlib.h":
-    # NOTE that size_t might not be int
-    void *alloca(int)
 
 cdef extern from "pixelization_constants.h":
     enum:
@@ -573,8 +571,7 @@
     cdef int nvertices = conn.shape[1]
     cdef int ndim = coords.shape[1]
     cdef int num_field_vals = field.shape[1]
-    cdef double* mapped_coord
-    cdef int num_mapped_coords
+    cdef double[4] mapped_coord
     cdef ElementSampler sampler
 
     # Pick the right sampler and allocate storage for the mapped coordinate
@@ -617,10 +614,8 @@
         raise RuntimeError
 
     # allocate temporary storage
-    num_mapped_coords = sampler.num_mapped_coords
-    mapped_coord = <double*> alloca(sizeof(double) * num_mapped_coords)
-    vertices = <np.float64_t *> alloca(ndim * sizeof(np.float64_t) * nvertices)
-    field_vals = <np.float64_t *> alloca(sizeof(np.float64_t) * num_field_vals)
+    vertices = <np.float64_t *> malloc(ndim * sizeof(np.float64_t) * nvertices)
+    field_vals = <np.float64_t *> malloc(sizeof(np.float64_t) * num_field_vals)
 
     # fill the image bounds and pixel size informaton here
     for i in range(ndim):
@@ -691,4 +686,7 @@
                             img[pi, pj, pk] = 1.0
                         elif sampler.check_near_edge(mapped_coord, thresh, yax):
                             img[pi, pj, pk] = 1.0
+
+    free(vertices)
+    free(field_vals)
     return img

diff -r 5d118c06fa7eecc494eff5d231ebf3c47d9d4ab9 -r 248ccc5ef8b46d83682344d249c8de1c964759ab yt/visualization/volume_rendering/render_source.py
--- a/yt/visualization/volume_rendering/render_source.py
+++ b/yt/visualization/volume_rendering/render_source.py
@@ -24,6 +24,7 @@
 from .utils import new_volume_render_sampler, data_source_or_all, \
     get_corners, new_projection_sampler, new_mesh_sampler, \
     new_interpolated_projection_sampler
+from yt.utilities.lib.bounding_volume_hierarchy import BVH
 from yt.visualization.image_writer import apply_colormap
 from yt.data_objects.image_array import ImageArray
 from .zbuffer_array import ZBuffer
@@ -353,8 +354,9 @@
         self.data_source = data_source_or_all(data_source)
         field = self.data_source._determine_fields(field)[0]
         self.field = field
-        self.mesh = None
+        self.volume = None
         self.current_image = None
+        self.engine = 'embree'
 
         # default color map
         self._cmap = ytcfg.get("yt", "default_colormap")
@@ -369,8 +371,14 @@
         assert(self.field is not None)
         assert(self.data_source is not None)
 
-        self.scene = mesh_traversal.YTEmbreeScene()
-        self.build_mesh()
+        if self.engine == 'embree':
+            self.volume = mesh_traversal.YTEmbreeScene()
+            self.build_volume_embree()
+        elif self.engine == 'bvh':
+            self.build_volume_bvh()
+        else:
+            raise NotImplementedError("Invalid ray-tracing engine selected. "
+                                      "Choices are 'embree' and 'bvh'.")
 
     def cmap():
         '''
@@ -410,12 +418,60 @@
     def _validate(self):
         """Make sure that all dependencies have been met"""
         if self.data_source is None:
-            raise RuntimeError("Data source not initialized")
+            raise RuntimeError("Data source not initialized.")
 
-        if self.mesh is None:
-            raise RuntimeError("Mesh not initialized")
+        if self.volume is None:
+            raise RuntimeError("Volume not initialized.")
 
-    def build_mesh(self):
+    def build_volume_embree(self):
+        """
+
+        This constructs the mesh that will be ray-traced by pyembree.
+
+        """
+        ftype, fname = self.field
+        mesh_id = int(ftype[-1]) - 1
+        index = self.data_source.ds.index
+        offset = index.meshes[mesh_id]._index_offset
+        field_data = self.data_source[self.field].d  # strip units
+
+        vertices = index.meshes[mesh_id].connectivity_coords
+        indices = index.meshes[mesh_id].connectivity_indices - offset
+
+        # if this is an element field, promote to 2D here
+        if len(field_data.shape) == 1:
+            field_data = np.expand_dims(field_data, 1)
+
+        # Here, we decide whether to render based on high-order or 
+        # low-order geometry. Right now, high-order geometry is only
+        # implemented for 20-point hexes.
+        if indices.shape[1] == 20:
+            self.mesh = mesh_construction.QuadraticElementMesh(self.volume,
+                                                               vertices,
+                                                               indices,
+                                                               field_data)
+        else:
+            # if this is another type of higher-order element, we demote
+            # to 1st order here, for now.
+            if indices.shape[1] == 27:
+                # hexahedral
+                mylog.warning("27-node hexes not yet supported, " +
+                              "dropping to 1st order.")
+                field_data = field_data[:, 0:8]
+                indices = indices[:, 0:8]
+            elif indices.shape[1] == 10:
+                # tetrahedral
+                mylog.warning("10-node tetrahedral elements not yet supported, " +
+                              "dropping to 1st order.")
+                field_data = field_data[:, 0:4]
+                indices = indices[:, 0:4]
+
+            self.mesh = mesh_construction.LinearElementMesh(self.volume,
+                                                            vertices,
+                                                            indices,
+                                                            field_data)
+
+    def build_volume_bvh(self):
         """
 
         This constructs the mesh that will be ray-traced.
@@ -436,32 +492,27 @@
 
         # Here, we decide whether to render based on high-order or 
         # low-order geometry. Right now, high-order geometry is only
-        # implemented for 20-point hexes.
+        # supported by Embree.
         if indices.shape[1] == 20:
-            self.mesh = mesh_construction.QuadraticElementMesh(self.scene,
-                                                               vertices,
-                                                               indices,
-                                                               field_data)
-        else:
-            # if this is another type of higher-order element, we demote
-            # to 1st order here, for now.
-            if indices.shape[1] == 27:
-                # hexahedral
-                mylog.warning("27-node hexes not yet supported, " +
-                              "dropping to 1st order.")
-                field_data = field_data[:, 0:8]
-                indices = indices[:, 0:8]
-            elif indices.shape[1] == 10:
-                # tetrahedral
-                mylog.warning("10-node tetrahedral elements not yet supported, " +
-                              "dropping to 1st order.")
-                field_data = field_data[:, 0:4]
-                indices = indices[:, 0:4]
+            # hexahedral
+            mylog.warning("20-node hexes not yet supported, " +
+                          "dropping to 1st order.")
+            field_data = field_data[:, 0:8]
+            indices = indices[:, 0:8]
+        elif indices.shape[1] == 27:
+            # hexahedral
+            mylog.warning("27-node hexes not yet supported, " +
+                          "dropping to 1st order.")
+            field_data = field_data[:, 0:8]
+            indices = indices[:, 0:8]
+        elif indices.shape[1] == 10:
+            # tetrahedral
+            mylog.warning("10-node tetrahedral elements not yet supported, " +
+                          "dropping to 1st order.")
+            field_data = field_data[:, 0:4]
+            indices = indices[:, 0:4]
 
-            self.mesh = mesh_construction.LinearElementMesh(self.scene,
-                                                            vertices,
-                                                            indices,
-                                                            field_data)
+        self.volume = BVH(vertices, indices, field_data)
 
     def render(self, camera, zbuffer=None):
         """Renders an image using the provided camera
@@ -495,18 +546,17 @@
                               zbuffer.z.reshape(shape[:2]))
         self.zbuffer = zbuffer
 
-        self.sampler = new_mesh_sampler(camera, self)
+        self.sampler = new_mesh_sampler(camera, self, engine=self.engine)
 
         mylog.debug("Casting rays")
-        self.sampler(self.scene)
+        self.sampler(self.volume)
         mylog.debug("Done casting rays")
 
         self.finalize_image(camera)
-        self.data = self.sampler.aimage
         self.current_image = self.apply_colormap()
 
         zbuffer += ZBuffer(self.current_image.astype('float64'),
-                           self.sampler.zbuffer)
+                           self.sampler.azbuffer)
         zbuffer.rgba = ImageArray(zbuffer.rgba)
         self.zbuffer = zbuffer
         self.current_image = self.zbuffer.rgba
@@ -523,16 +573,13 @@
         # reshape data
         Nx = camera.resolution[0]
         Ny = camera.resolution[1]
-        sam.aimage = sam.aimage.reshape(Nx, Ny)
-        sam.image_used = sam.image_used.reshape(Nx, Ny)
-        sam.mesh_lines = sam.mesh_lines.reshape(Nx, Ny)
-        sam.zbuffer = sam.zbuffer.reshape(Nx, Ny)
+        self.data = sam.aimage[:,:,0].reshape(Nx, Ny)
 
         # rotate
-        sam.aimage = np.rot90(sam.aimage, k=2)
-        sam.image_used = np.rot90(sam.image_used, k=2)
-        sam.mesh_lines = np.rot90(sam.mesh_lines, k=2)
-        sam.zbuffer = np.rot90(sam.zbuffer, k=2)
+        self.data = np.rot90(self.data, k=2)
+        sam.aimage_used = np.rot90(sam.aimage_used, k=2)
+        sam.amesh_lines = np.rot90(sam.amesh_lines, k=2)
+        sam.azbuffer = np.rot90(sam.azbuffer, k=2)
 
     def annotate_mesh_lines(self, color=None, alpha=1.0):
         r"""
@@ -558,7 +605,7 @@
         if color is None:
             color = np.array([0, 0, 0, alpha])
 
-        locs = [self.sampler.mesh_lines == 1]
+        locs = [self.sampler.amesh_lines == 1]
 
         self.current_image[:, :, 0][locs] = color[0]
         self.current_image[:, :, 1][locs] = color[1]
@@ -592,7 +639,7 @@
                                color_bounds=self._color_bounds,
                                cmap_name=self._cmap)/255.
         alpha = image[:, :, 3]
-        alpha[self.sampler.image_used == -1] = 0.0
+        alpha[self.sampler.aimage_used == -1] = 0.0
         image[:, :, 3] = alpha        
         return image
 

diff -r 5d118c06fa7eecc494eff5d231ebf3c47d9d4ab9 -r 248ccc5ef8b46d83682344d249c8de1c964759ab yt/visualization/volume_rendering/utils.py
--- a/yt/visualization/volume_rendering/utils.py
+++ b/yt/visualization/volume_rendering/utils.py
@@ -14,7 +14,7 @@
     return data_source
 
 
-def new_mesh_sampler(camera, render_source):
+def new_mesh_sampler(camera, render_source, engine):
     params = ensure_code_unit_params(camera._get_sampler_params(render_source))
     args = (
         np.atleast_3d(params['vp_pos']),
@@ -27,7 +27,10 @@
         params['width'],
     )
     kwargs = {'lens_type': params['lens_type']}
-    sampler = mesh_traversal.MeshSampler(*args, **kwargs)
+    if engine == 'embree':
+        sampler = mesh_traversal.EmbreeMeshSampler(*args, **kwargs)
+    elif engine == 'bvh':
+        sampler = mesh_traversal.BVHMeshSampler(*args, **kwargs)
     return sampler

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.spacepope.org/pipermail/yt-svn-spacepope.org/attachments/20160420/374dd555/attachment-0001.htm>


More information about the yt-svn mailing list