[yt-svn] commit/yt: ngoldbaum: Merged in MatthewTurk/yt (pull request #1809)

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Mon Nov 2 11:58:35 PST 2015


1 new commit in yt:

https://bitbucket.org/yt_analysis/yt/commits/f65cfa6e4a8a/
Changeset:   f65cfa6e4a8a
Branch:      yt
User:        ngoldbaum
Date:        2015-11-02 19:58:23+00:00
Summary:     Merged in MatthewTurk/yt (pull request #1809)

Refactor lens vector computation
Affected #:  10 files

diff -r d19ea599033ec9e76ff3bcfd6634ea675b94d7f5 -r f65cfa6e4a8a54f1f89e0f64d1e4c1837c417179 yt/geometry/setup.py
--- a/yt/geometry/setup.py
+++ b/yt/geometry/setup.py
@@ -47,6 +47,7 @@
                 include_dirs=["yt/utilities/lib/"],
                 libraries=["m"],
                 depends=["yt/utilities/lib/fp_utils.pxd",
+                         "yt/utilities/lib/grid_traversal.pxd",
                          "yt/geometry/oct_container.pxd",
                          "yt/geometry/oct_visitors.pxd",
                          "yt/geometry/grid_container.pxd",

diff -r d19ea599033ec9e76ff3bcfd6634ea675b94d7f5 -r f65cfa6e4a8a54f1f89e0f64d1e4c1837c417179 yt/utilities/lib/grid_traversal.pxd
--- a/yt/utilities/lib/grid_traversal.pxd
+++ b/yt/utilities/lib/grid_traversal.pxd
@@ -20,17 +20,14 @@
 cimport kdtree_utils
 
 cdef struct ImageContainer:
-    np.float64_t *vp_pos
-    np.float64_t *vp_dir
+    np.float64_t[:,:,:] vp_pos
+    np.float64_t[:,:,:] vp_dir
     np.float64_t *center
-    np.float64_t *image
-    np.float64_t *zbuffer
+    np.float64_t[:,:,:] image
+    np.float64_t[:,:] zbuffer
     np.float64_t pdx, pdy
     np.float64_t bounds[4]
     int nv[2]
-    int vp_strides[3]
-    int im_strides[3]
-    int vd_strides[3]
     np.float64_t *x_vec
     np.float64_t *y_vec
 
@@ -43,19 +40,29 @@
                 int index[3],
                 void *data) nogil
 
+ctypedef void calculate_extent_function(ImageContainer *image,
+            VolumeContainer *vc, np.int64_t rv[4]) nogil
+
+cdef calculate_extent_function calculate_extent_plane_parallel
+
+ctypedef void generate_vector_info_function(ImageContainer *im,
+            np.int64_t vi, np.int64_t vj,
+            np.float64_t width[2],
+            np.float64_t v_dir[3], np.float64_t v_pos[3]) nogil
+
+cdef generate_vector_info_function generate_vector_info_plane_parallel
+cdef generate_vector_info_function generate_vector_info_null
 
 cdef class ImageSampler:
     cdef ImageContainer *image
     cdef sampler_function *sampler
-    cdef public object avp_pos, avp_dir, acenter, aimage, ax_vec, ay_vec
+    cdef public object acenter, aimage, ax_vec, ay_vec
     cdef public object azbuffer
     cdef void *supp_data
     cdef np.float64_t width[3]
-
-    cdef void get_start_stop(self, np.float64_t *ex, np.int64_t *rv)
-
-    cdef void calculate_extent(self, np.float64_t extrema[4],
-                               VolumeContainer *vc) nogil
+    cdef public object lens_type
+    cdef calculate_extent_function *extent_function
+    cdef generate_vector_info_function *vector_function
 
     cdef void setup(self, PartitionedGrid pg)
 

diff -r d19ea599033ec9e76ff3bcfd6634ea675b94d7f5 -r f65cfa6e4a8a54f1f89e0f64d1e4c1837c417179 yt/utilities/lib/grid_traversal.pyx
--- a/yt/utilities/lib/grid_traversal.pyx
+++ b/yt/utilities/lib/grid_traversal.pyx
@@ -17,7 +17,7 @@
 cimport numpy as np
 cimport cython
 #cimport healpix_interface
-from libc.stdlib cimport malloc, free, abs
+from libc.stdlib cimport malloc, calloc, free, abs
 from libc.math cimport exp, floor, log2, \
     lrint, fabs, atan, atan2, asin, cos, sin, sqrt
 from fp_utils cimport imax, fmax, imin, fmin, iclip, fclip, i64clip
@@ -174,6 +174,85 @@
             for i in range(3):
                 vel[i] /= vel_mag[0]
 
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+cdef void calculate_extent_plane_parallel(ImageContainer *image,
+            VolumeContainer *vc, np.int64_t rv[4]) nogil:
+    # We do this for all eight corners
+    cdef np.float64_t temp
+    cdef np.float64_t *edges[2]
+    cdef np.float64_t cx, cy
+    cdef np.float64_t extrema[4]
+    cdef int i, j, k
+    edges[0] = vc.left_edge
+    edges[1] = vc.right_edge
+    extrema[0] = extrema[2] = 1e300; extrema[1] = extrema[3] = -1e300
+    for i in range(2):
+        for j in range(2):
+            for k in range(2):
+                # This should rotate it into the vector plane
+                temp  = edges[i][0] * image.x_vec[0]
+                temp += edges[j][1] * image.x_vec[1]
+                temp += edges[k][2] * image.x_vec[2]
+                if temp < extrema[0]: extrema[0] = temp
+                if temp > extrema[1]: extrema[1] = temp
+                temp  = edges[i][0] * image.y_vec[0]
+                temp += edges[j][1] * image.y_vec[1]
+                temp += edges[k][2] * image.y_vec[2]
+                if temp < extrema[2]: extrema[2] = temp
+                if temp > extrema[3]: extrema[3] = temp
+    cx = cy = 0.0
+    for i in range(3):
+        cx += image.center[i] * image.x_vec[i]
+        cy += image.center[i] * image.y_vec[i]
+    rv[0] = lrint((extrema[0] - cx - image.bounds[0])/image.pdx)
+    rv[1] = rv[0] + lrint((extrema[1] - extrema[0])/image.pdx)
+    rv[2] = lrint((extrema[2] - cy - image.bounds[2])/image.pdy)
+    rv[3] = rv[2] + lrint((extrema[3] - extrema[2])/image.pdy)
+
+# We do this for a bunch of lenses.  Fallback is to grab them from the vector
+# info supplied.
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+cdef void calculate_extent_null(ImageContainer *image,
+            VolumeContainer *vc, np.int64_t rv[4]) nogil:
+    rv[0] = 0
+    rv[1] = image.nv[0]
+    rv[2] = 0
+    rv[3] = image.nv[1]
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+cdef void generate_vector_info_plane_parallel(ImageContainer *im,
+            np.int64_t vi, np.int64_t vj,
+            np.float64_t width[2],
+            # Now outbound
+            np.float64_t v_dir[3], np.float64_t v_pos[3]) nogil:
+    cdef int i
+    cdef np.float64_t px, py
+    px = width[0] * (<np.float64_t>vi)/(<np.float64_t>im.nv[0]-1) - width[0]/2.0
+    py = width[1] * (<np.float64_t>vj)/(<np.float64_t>im.nv[1]-1) - width[1]/2.0
+    # atleast_3d will add to beginning and end
+    v_pos[0] = im.vp_pos[0,0,0]*px + im.vp_pos[0,3,0]*py + im.vp_pos[0,9,0]
+    v_pos[1] = im.vp_pos[0,1,0]*px + im.vp_pos[0,4,0]*py + im.vp_pos[0,10,0]
+    v_pos[2] = im.vp_pos[0,2,0]*px + im.vp_pos[0,5,0]*py + im.vp_pos[0,11,0]
+    for i in range(3): v_dir[i] = im.vp_dir[0,i,0]
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+cdef void generate_vector_info_null(ImageContainer *im,
+            np.int64_t vi, np.int64_t vj,
+            np.float64_t width[2],
+            # Now outbound
+            np.float64_t v_dir[3], np.float64_t v_pos[3]) nogil:
+    cdef int i
+    for i in range(3):
+        # Here's a funny thing: we use vi here because our *image* will be
+        # flattened.  That means that im.nv will be a better one-d offset,
+        # since vp_pos has funny strides.
+        v_pos[i] = im.vp_pos[vi, vj, i]
+        v_dir[i] = im.vp_dir[vi, vj, i]
 
 cdef struct ImageAccumulator:
     np.float64_t rgba[Nch]
@@ -181,8 +260,8 @@
 
 cdef class ImageSampler:
     def __init__(self,
-                  np.ndarray vp_pos,
-                  np.ndarray vp_dir,
+                  np.float64_t[:,:,:] vp_pos,
+                  np.float64_t[:,:,:] vp_dir,
                   np.ndarray[np.float64_t, ndim=1] center,
                   bounds,
                   np.ndarray[np.float64_t, ndim=3] image,
@@ -190,91 +269,49 @@
                   np.ndarray[np.float64_t, ndim=1] y_vec,
                   np.ndarray[np.float64_t, ndim=1] width,
                   *args, **kwargs):
-        self.image = <ImageContainer *> malloc(sizeof(ImageContainer))
-        cdef ImageContainer *imagec = self.image
-        cdef np.ndarray[np.float64_t, ndim=2] zbuffer
+        self.image = <ImageContainer *> calloc(sizeof(ImageContainer), 1)
+        cdef np.float64_t[:,:] zbuffer
         zbuffer = kwargs.pop("zbuffer", None)
+        if zbuffer is None:
+            zbuffer = np.ones((image.shape[0], image.shape[1]), "float64")
+        self.lens_type = kwargs.pop("lens_type", None)
+        if self.lens_type == "plane-parallel":
+            self.extent_function = calculate_extent_plane_parallel
+            self.vector_function = generate_vector_info_plane_parallel
+        else:
+            if not (vp_pos.shape[0] == vp_dir.shape[0] == image.shape[0]) or \
+               not (vp_pos.shape[1] == vp_dir.shape[1] == image.shape[1]):
+                print "Bad lense shape / direction for %s" % (self.lens_type)
+                print "Shapes: (%s - %s - %s) and (%s - %s - %s)" % (
+                    vp_pos.shape[0], vp_dir.shape[0], image.shape[0],
+                    vp_pos.shape[1], vp_dir.shape[1], image.shape[1])
+                raise RuntimeError
+            self.extent_function = calculate_extent_null
+            self.vector_function = generate_vector_info_null
         self.sampler = NULL
         cdef int i, j
         # These assignments are so we can track the objects and prevent their
-        # de-allocation from reference counts.
-        self.avp_pos = vp_pos
-        self.avp_dir = vp_dir
+        # de-allocation from reference counts.  Note that we do this to the
+        # "atleast_3d" versions.  Also, note that we re-assign the input
+        # arguments.
+        self.image.vp_pos = vp_pos
+        self.image.vp_dir = vp_dir
+        self.image.image = self.aimage = image
         self.acenter = center
-        self.aimage = image
+        self.image.center = <np.float64_t *> center.data
         self.ax_vec = x_vec
+        self.image.x_vec = <np.float64_t *> x_vec.data
         self.ay_vec = y_vec
-        self.azbuffer = zbuffer
-        imagec.vp_pos = <np.float64_t *> vp_pos.data
-        imagec.vp_dir = <np.float64_t *> vp_dir.data
-        imagec.center = <np.float64_t *> center.data
-        imagec.image = <np.float64_t *> image.data
-        imagec.x_vec = <np.float64_t *> x_vec.data
-        imagec.y_vec = <np.float64_t *> y_vec.data
-        imagec.zbuffer = NULL
-        if zbuffer is not None:
-            imagec.zbuffer = <np.float64_t *> zbuffer.data
-        imagec.nv[0] = image.shape[0]
-        imagec.nv[1] = image.shape[1]
-        for i in range(4): imagec.bounds[i] = bounds[i]
-        imagec.pdx = (bounds[1] - bounds[0])/imagec.nv[0]
-        imagec.pdy = (bounds[3] - bounds[2])/imagec.nv[1]
+        self.image.y_vec = <np.float64_t *> y_vec.data
+        self.image.zbuffer = zbuffer
+        self.image.nv[0] = image.shape[0]
+        self.image.nv[1] = image.shape[1]
+        for i in range(4): self.image.bounds[i] = bounds[i]
+        self.image.pdx = (bounds[1] - bounds[0])/self.image.nv[0]
+        self.image.pdy = (bounds[3] - bounds[2])/self.image.nv[1]
         for i in range(3):
-            imagec.vp_strides[i] = vp_pos.strides[i] / 8
-            imagec.im_strides[i] = image.strides[i] / 8
             self.width[i] = width[i]
 
-        if vp_dir.ndim > 1:
-            for i in range(3):
-                imagec.vd_strides[i] = vp_dir.strides[i] / 8
-        elif vp_pos.ndim == 1:
-            imagec.vd_strides[0] = imagec.vd_strides[1] = imagec.vd_strides[2] = -1
-        else:
-            raise RuntimeError
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    cdef void get_start_stop(self, np.float64_t *ex, np.int64_t *rv):
-        # Extrema need to be re-centered
-        cdef np.float64_t cx, cy
-        cdef ImageContainer *im = self.image
-        cdef int i
-        cx = cy = 0.0
-        for i in range(3):
-            cx += im.center[i] * im.x_vec[i]
-            cy += im.center[i] * im.y_vec[i]
-        rv[0] = lrint((ex[0] - cx - im.bounds[0])/im.pdx)
-        rv[1] = rv[0] + lrint((ex[1] - ex[0])/im.pdx)
-        rv[2] = lrint((ex[2] - cy - im.bounds[2])/im.pdy)
-        rv[3] = rv[2] + lrint((ex[3] - ex[2])/im.pdy)
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    cdef void calculate_extent(self, np.float64_t extrema[4],
-                               VolumeContainer *vc) nogil:
-        # We do this for all eight corners
-        cdef np.float64_t temp
-        cdef np.float64_t *edges[2]
-        edges[0] = vc.left_edge
-        edges[1] = vc.right_edge
-        extrema[0] = extrema[2] = 1e300; extrema[1] = extrema[3] = -1e300
-        cdef int i, j, k
-        for i in range(2):
-            for j in range(2):
-                for k in range(2):
-                    # This should rotate it into the vector plane
-                    temp  = edges[i][0] * self.image.x_vec[0]
-                    temp += edges[j][1] * self.image.x_vec[1]
-                    temp += edges[k][2] * self.image.x_vec[2]
-                    if temp < extrema[0]: extrema[0] = temp
-                    if temp > extrema[1]: extrema[1] = temp
-                    temp  = edges[i][0] * self.image.y_vec[0]
-                    temp += edges[j][1] * self.image.y_vec[1]
-                    temp += edges[k][2] * self.image.y_vec[2]
-                    if temp < extrema[2]: extrema[2] = temp
-                    if temp > extrema[3]: extrema[3] = temp
-
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
@@ -282,7 +319,7 @@
         # This routine will iterate over all of the vectors and cast each in
         # turn.  Might benefit from a more sophisticated intersection check,
         # like http://courses.csusm.edu/cs697exz/ray_box.htm
-        cdef int vi, vj, hit, i, j, ni, nj, nn
+        cdef int vi, vj, hit, i, j, k, ni, nj, nn, xi, yi
         cdef np.int64_t offset
         cdef np.int64_t iter[4]
         cdef VolumeContainer *vc = pg.container
@@ -292,83 +329,43 @@
         cdef np.float64_t *v_pos
         cdef np.float64_t *v_dir
         cdef np.float64_t rgba[6]
-        cdef np.float64_t extrema[4]
         cdef np.float64_t max_t
         hit = 0
         cdef np.int64_t nx, ny, size
-        if im.vd_strides[0] == -1:
-            self.calculate_extent(extrema, vc)
-            self.get_start_stop(extrema, iter)
-            iter[0] = i64clip(iter[0]-1, 0, im.nv[0])
-            iter[1] = i64clip(iter[1]+1, 0, im.nv[0])
-            iter[2] = i64clip(iter[2]-1, 0, im.nv[1])
-            iter[3] = i64clip(iter[3]+1, 0, im.nv[1])
-            nx = (iter[1] - iter[0])
-            ny = (iter[3] - iter[2])
-            size = nx * ny
-        else:
-            nx = im.nv[0]
-            ny = 1
-            iter[0] = iter[1] = iter[2] = iter[3] = 0
-            size = nx
+        self.extent_function(self.image, vc, iter)
+        iter[0] = i64clip(iter[0]-1, 0, im.nv[0])
+        iter[1] = i64clip(iter[1]+1, 0, im.nv[0])
+        iter[2] = i64clip(iter[2]-1, 0, im.nv[1])
+        iter[3] = i64clip(iter[3]+1, 0, im.nv[1])
+        nx = (iter[1] - iter[0])
+        ny = (iter[3] - iter[2])
+        size = nx * ny
         cdef ImageAccumulator *idata
-        cdef np.float64_t px, py
         cdef np.float64_t width[3]
+        cdef int use_vec, max_i
         for i in range(3):
             width[i] = self.width[i]
-        if im.vd_strides[0] == -1:
-            with nogil, parallel(num_threads = num_threads):
-                idata = <ImageAccumulator *> malloc(sizeof(ImageAccumulator))
-                idata.supp_data = self.supp_data
-                v_pos = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
-                for j in prange(size, schedule="static",chunksize=1):
-                    vj = j % ny
-                    vi = (j - vj) / ny + iter[0]
-                    vj = vj + iter[2]
-                    # Dynamically calculate the position
-                    px = width[0] * (<np.float64_t>vi)/(<np.float64_t>im.nv[0]-1) - width[0]/2.0
-                    py = width[1] * (<np.float64_t>vj)/(<np.float64_t>im.nv[1]-1) - width[1]/2.0
-                    v_pos[0] = im.vp_pos[0]*px + im.vp_pos[3]*py + im.vp_pos[9]
-                    v_pos[1] = im.vp_pos[1]*px + im.vp_pos[4]*py + im.vp_pos[10]
-                    v_pos[2] = im.vp_pos[2]*px + im.vp_pos[5]*py + im.vp_pos[11]
-                    offset = im.im_strides[0] * vi + im.im_strides[1] * vj
-                    for i in range(Nch): idata.rgba[i] = im.image[i + offset]
-                    if im.zbuffer != NULL:
-                        max_t = im.zbuffer[im.nv[0] * vi + vj]
-                    else:
-                        max_t = 1.0
-                    walk_volume(vc, v_pos, im.vp_dir, self.sampler,
-                                (<void *> idata), NULL, max_t)
-                    for i in range(Nch): im.image[i + offset] = idata.rgba[i]
-                free(idata)
-                free(v_pos)
-        else:
-            with nogil, parallel(num_threads = num_threads):
-                idata = <ImageAccumulator *> malloc(sizeof(ImageAccumulator))
-                idata.supp_data = self.supp_data
-                v_pos = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
-                v_dir = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
-                # If we do not have a simple image plane, we have to cast all
-                # our rays 
-                for j in prange(size, schedule="dynamic", chunksize=100):
-                    offset = j * 3
-                    for i in range(3): v_pos[i] = im.vp_pos[i + offset]
-                    for i in range(3): v_dir[i] = im.vp_dir[i + offset]
-                    if v_dir[0] == v_dir[1] == v_dir[2] == 0.0:
-                        continue
-                    # Note that for Nch != 3 we need a different offset into
-                    # the image object than for the vectors!
-                    for i in range(Nch): idata.rgba[i] = im.image[i + Nch*j]
-                    if im.zbuffer != NULL:
-                        max_t = fclip(im.zbuffer[j], 0.0, 1.0)
-                    else:
-                        max_t = 1.0
-                    walk_volume(vc, v_pos, v_dir, self.sampler, 
-                                (<void *> idata), NULL, max_t)
-                    for i in range(Nch): im.image[i + Nch*j] = idata.rgba[i]
-                free(v_dir)
-                free(idata)
-                free(v_pos)
+        with nogil, parallel(num_threads = num_threads):
+            idata = <ImageAccumulator *> malloc(sizeof(ImageAccumulator))
+            idata.supp_data = self.supp_data
+            v_pos = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+            v_dir = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+            for j in prange(size, schedule="static", chunksize=100):
+                vj = j % ny
+                vi = (j - vj) / ny + iter[0]
+                vj = vj + iter[2]
+                # Dynamically calculate the position
+                self.vector_function(im, vi, vj, width, v_dir, v_pos)
+                for i in range(Nch):
+                    idata.rgba[i] = im.image[vi, vj, i]
+                max_t = fclip(im.zbuffer[vi, vj], 0.0, 1.0)
+                walk_volume(vc, v_pos, v_dir, self.sampler,
+                            (<void *> idata), NULL, max_t)
+                for i in range(Nch):
+                    im.image[vi, vj, i] = idata.rgba[i]
+            free(idata)
+            free(v_pos)
+            free(v_dir)
         return hit
 
     cdef void setup(self, PartitionedGrid pg):

diff -r d19ea599033ec9e76ff3bcfd6634ea675b94d7f5 -r f65cfa6e4a8a54f1f89e0f64d1e4c1837c417179 yt/utilities/lib/mesh_traversal.pyx
--- a/yt/utilities/lib/mesh_traversal.pyx
+++ b/yt/utilities/lib/mesh_traversal.pyx
@@ -75,53 +75,25 @@
         size = nx * ny
         data = np.empty(size, dtype="float64")
         cdef rtcr.RTCRay ray
-        if im.vd_strides[0] == -1:
-            v_pos = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
-            for j in range(size):
-                vj = j % ny
-                vi = (j - vj) / ny
-                vj = vj
-                # Dynamically calculate the position
-                px = width[0] * (<np.float64_t>vi)/(<np.float64_t>im.nv[0]-1) - width[0]/2.0
-                py = width[1] * (<np.float64_t>vj)/(<np.float64_t>im.nv[1]-1) - width[1]/2.0
-                v_pos[0] = im.vp_pos[0]*px + im.vp_pos[3]*py + im.vp_pos[9]
-                v_pos[1] = im.vp_pos[1]*px + im.vp_pos[4]*py + im.vp_pos[10]
-                v_pos[2] = im.vp_pos[2]*px + im.vp_pos[5]*py + im.vp_pos[11]
-                for i in range(3):
-                    ray.org[i] = v_pos[i]
-                    ray.dir[i] = im.vp_dir[i]
-                ray.tnear = 0.0
-                ray.tfar = 1e37
-                ray.geomID = rtcg.RTC_INVALID_GEOMETRY_ID
-                ray.primID = rtcg.RTC_INVALID_GEOMETRY_ID
-                ray.instID = rtcg.RTC_INVALID_GEOMETRY_ID
-                ray.mask = -1
-                ray.time = 0
-                rtcs.rtcIntersect(scene.scene_i, ray)
-                data[j] = ray.time
-            self.aimage = data.reshape(self.image.nv[0], self.image.nv[1])
-            free(v_pos)
-        else:
-            v_pos = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
-            v_dir = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
-            # If we do not have a simple image plane, we have to cast all
-            # our rays 
-            for j in range(size):
-                offset = j * 3
-                for i in range(3): v_pos[i] = im.vp_pos[i + offset]
-                for i in range(3): v_dir[i] = im.vp_dir[i + offset]
-                for i in range(3):
-                    ray.org[i] = v_pos[i]
-                    ray.dir[i] = v_dir[i]
-                ray.tnear = 0.0
-                ray.tfar = 1e37
-                ray.geomID = rtcg.RTC_INVALID_GEOMETRY_ID
-                ray.primID = rtcg.RTC_INVALID_GEOMETRY_ID
-                ray.instID = rtcg.RTC_INVALID_GEOMETRY_ID
-                ray.mask = -1
-                ray.time = 0
-                rtcs.rtcIntersect(scene.scene_i, ray)
-                data[j] = ray.time
-            self.aimage = data.reshape(self.image.nv[0], self.image.nv[1])
-            free(v_pos)
-            free(v_dir)
+        v_pos = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+        v_dir = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+        for j in range(size):
+            vj = j % ny
+            vi = (j - vj) / ny
+            vj = vj
+            self.vector_function(im, vi, vj, width, v_dir, v_pos)
+            for i in range(3):
+                ray.org[i] = v_pos[i]
+                ray.dir[i] = v_dir[i]
+            ray.tnear = 0.0
+            ray.tfar = 1e37
+            ray.geomID = rtcg.RTC_INVALID_GEOMETRY_ID
+            ray.primID = rtcg.RTC_INVALID_GEOMETRY_ID
+            ray.instID = rtcg.RTC_INVALID_GEOMETRY_ID
+            ray.mask = -1
+            ray.time = 0
+            rtcs.rtcIntersect(scene.scene_i, ray)
+            data[j] = ray.time
+        self.aimage = data.reshape(self.image.nv[0], self.image.nv[1])
+        free(v_pos)
+        free(v_dir)

diff -r d19ea599033ec9e76ff3bcfd6634ea675b94d7f5 -r f65cfa6e4a8a54f1f89e0f64d1e4c1837c417179 yt/utilities/lib/setup.py
--- a/yt/utilities/lib/setup.py
+++ b/yt/utilities/lib/setup.py
@@ -76,6 +76,7 @@
                 depends=["yt/utilities/lib/fp_utils.pxd",
                          "yt/utilities/lib/amr_kdtools.pxd",
                          "yt/utilities/lib/ContourFinding.pxd",
+                         "yt/utilities/lib/grid_traversal.pxd",
                          "yt/geometry/oct_container.pxd"])
     config.add_extension("DepthFirstOctree", 
                 ["yt/utilities/lib/DepthFirstOctree.pyx"],
@@ -178,7 +179,8 @@
                              ["yt/utilities/lib/mesh_traversal.pyx"],
                              include_dirs=["yt/utilities/lib", include_dirs],
                              libraries=["m", "embree"], language="c++",
-                             depends=["yt/utilities/lib/mesh_traversal.pxd"])
+                             depends=["yt/utilities/lib/mesh_traversal.pxd",
+                                      "yt/utilities/lib/grid_traversal.pxd"])
         config.add_extension("mesh_samplers",
                              ["yt/utilities/lib/mesh_samplers.pyx"],
                              include_dirs=["yt/utilities/lib", include_dirs],

diff -r d19ea599033ec9e76ff3bcfd6634ea675b94d7f5 -r f65cfa6e4a8a54f1f89e0f64d1e4c1837c417179 yt/visualization/volume_rendering/lens.py
--- a/yt/visualization/volume_rendering/lens.py
+++ b/yt/visualization/volume_rendering/lens.py
@@ -104,7 +104,7 @@
                  x_vec=camera.unit_vectors[0],
                  y_vec=camera.unit_vectors[1],
                  width=np.array(camera.width, dtype='float64'),
-                 image=image)
+                 image=image, lens_type="plane-parallel")
         return sampler_params
 
     def set_viewpoint(self, camera):
@@ -144,7 +144,7 @@
 
     def new_image(self, camera):
         self.current_image = ImageArray(
-            np.zeros((camera.resolution[0]*camera.resolution[1], 1,
+            np.zeros((camera.resolution[0], camera.resolution[1], 
                       4), dtype='float64', order='C'),
             info={'imtype': 'rendering'})
         return self.current_image
@@ -204,8 +204,8 @@
                  x_vec=uv,
                  y_vec=uv,
                  width=np.zeros(3, dtype='float64'),
-                 image=image
-                 )
+                 image=image,
+                 lens_type="perspective")
 
         mylog.debug(positions)
         mylog.debug(vectors)
@@ -266,8 +266,8 @@
     def new_image(self, camera):
         """Initialize a new ImageArray to be used with this lens."""
         self.current_image = ImageArray(
-            np.zeros((camera.resolution[0]*camera.resolution[1], 1,
-                      4), dtype='float64', order='C'),
+            np.zeros((camera.resolution[0], camera.resolution[1], 4),
+                     dtype='float64', order='C'),
             info={'imtype': 'rendering'})
         return self.current_image
 
@@ -294,9 +294,9 @@
         vectors_comb = np.vstack([vectors_left, vectors_right])
         positions_comb = np.vstack([positions_left, positions_right])
 
-        image.shape = (camera.resolution[0]*camera.resolution[1], 1, 4)
-        vectors_comb.shape = (camera.resolution[0]*camera.resolution[1], 1, 3)
-        positions_comb.shape = (camera.resolution[0]*camera.resolution[1], 1, 3)
+        image.shape = (camera.resolution[0], camera.resolution[1], 4)
+        vectors_comb.shape = (camera.resolution[0], camera.resolution[1], 3)
+        positions_comb.shape = (camera.resolution[0], camera.resolution[1], 3)
 
         sampler_params =\
             dict(vp_pos=positions_comb,
@@ -306,8 +306,8 @@
                  x_vec=uv,
                  y_vec=uv,
                  width=np.zeros(3, dtype='float64'),
-                 image=image
-                 )
+                 image=image,
+                 lens_type="stereo-perspective")
 
         return sampler_params
 
@@ -476,19 +476,19 @@
     def new_image(self, camera):
         """Initialize a new ImageArray to be used with this lens."""
         self.current_image = ImageArray(
-            np.zeros((camera.resolution[0]**2, 1,
+            np.zeros((camera.resolution[0], camera.resolution[0],
                       4), dtype='float64', order='C'),
             info={'imtype': 'rendering'})
         return self.current_image
 
     def _get_sampler_params(self, camera, render_source):
         vp = -arr_fisheye_vectors(camera.resolution[0], self.fov)
-        vp.shape = (camera.resolution[0]**2, 1, 3)
+        vp.shape = (camera.resolution[0], camera.resolution[0], 3)
         vp = vp.dot(np.linalg.inv(self.rotation_matrix))
         vp *= self.radius
         uv = np.ones(3, dtype='float64')
-        positions = np.ones((camera.resolution[0]**2, 1, 3),
-                            dtype='float64') * camera.position
+        positions = np.ones((camera.resolution[0], camera.resolution[0], 3),
+            dtype='float64') * camera.position
 
         if render_source.zbuffer is not None:
             image = render_source.zbuffer.rgba
@@ -503,8 +503,8 @@
                  x_vec=uv,
                  y_vec=uv,
                  width=np.zeros(3, dtype='float64'),
-                 image=image
-                 )
+                 image=image,
+                 lens_type="fisheye")
 
         return sampler_params
 
@@ -606,9 +606,9 @@
             image = self.new_image(camera)
 
         dummy = np.ones(3, dtype='float64')
-        image.shape = (camera.resolution[0]*camera.resolution[1], 1, 4)
-        vectors.shape = (camera.resolution[0]*camera.resolution[1], 1, 3)
-        positions.shape = (camera.resolution[0]*camera.resolution[1], 1, 3)
+        image.shape = (camera.resolution[0], camera.resolution[1], 4)
+        vectors.shape = (camera.resolution[0], camera.resolution[1], 3)
+        positions.shape = (camera.resolution[0], camera.resolution[1], 3)
 
         sampler_params = dict(
             vp_pos=positions,
@@ -618,7 +618,8 @@
             x_vec=dummy,
             y_vec=dummy,
             width=np.zeros(3, dtype="float64"),
-            image=image)
+            image=image,
+            lens_type="spherical")
         return sampler_params
 
     def set_viewpoint(self, camera):
@@ -731,9 +732,9 @@
         vectors_comb = np.vstack([vectors, vectors])
         positions_comb = np.vstack([positions_left, positions_right])
 
-        image.shape = (camera.resolution[0]*camera.resolution[1], 1, 4)
-        vectors_comb.shape = (camera.resolution[0]*camera.resolution[1], 1, 3)
-        positions_comb.shape = (camera.resolution[0]*camera.resolution[1], 1, 3)
+        image.shape = (camera.resolution[0], camera.resolution[1], 4)
+        vectors_comb.shape = (camera.resolution[0], camera.resolution[1], 3)
+        positions_comb.shape = (camera.resolution[0], camera.resolution[1], 3)
 
         sampler_params = dict(
             vp_pos=positions_comb,
@@ -743,7 +744,8 @@
             x_vec=dummy,
             y_vec=dummy,
             width=np.zeros(3, dtype="float64"),
-            image=image)
+            image=image,
+            lens_type = "stereo-spherical")
         return sampler_params
 
     def set_viewpoint(self, camera):

diff -r d19ea599033ec9e76ff3bcfd6634ea675b94d7f5 -r f65cfa6e4a8a54f1f89e0f64d1e4c1837c417179 yt/visualization/volume_rendering/old_camera.py
--- a/yt/visualization/volume_rendering/old_camera.py
+++ b/yt/visualization/volume_rendering/old_camera.py
@@ -598,16 +598,16 @@
 
     def get_sampler_args(self, image):
         rotp = np.concatenate([self.orienter.inv_mat.ravel('F'), self.back_center.ravel()])
-        args = (rotp, self.box_vectors[2], self.back_center,
+        args = (np.atleast_3d(rotp), np.atleast_3d(self.box_vectors[2]),
+                self.back_center,
                 (-self.width[0]/2.0, self.width[0]/2.0,
                  -self.width[1]/2.0, self.width[1]/2.0),
                 image, self.orienter.unit_vectors[0], self.orienter.unit_vectors[1],
                 np.array(self.width, dtype='float64'), self.transfer_function, self.sub_samples)
-        return args
+        return args, {'lens_type': 'plane-parallel'}
 
     star_trees = None
-    def get_sampler(self, args):
-        kwargs = {}
+    def get_sampler(self, args, kwargs):
         if self.star_trees is not None:
             kwargs = {'star_list': self.star_trees}
         if self.use_light:
@@ -781,8 +781,8 @@
         if num_threads is None:
             num_threads=get_num_threads()
         image = self.new_image()
-        args = self.get_sampler_args(image)
-        sampler = self.get_sampler(args)
+        args, kwargs = self.get_sampler_args(image)
+        sampler = self.get_sampler(args, kwargs)
         self.initialize_source()
         image = ImageArray(self._render(double_check, num_threads, 
                                         image, sampler),
@@ -1248,14 +1248,14 @@
         positions = self.ds.arr(positions, input_units="code_length")
 
         dummy = np.ones(3, dtype='float64')
-        image.shape = (self.resolution[0]*self.resolution[1],1,4)
+        image.shape = (self.resolution[0], self.resolution[1],4)
 
         args = (positions, vectors, self.back_center,
                 (0.0,1.0,0.0,1.0),
                 image, dummy, dummy,
                 np.zeros(3, dtype='float64'),
                 self.transfer_function, self.sub_samples)
-        return args
+        return args, {'lens_type': 'perspective'}
 
     def _render(self, double_check, num_threads, image, sampler):
         ncells = sum(b.source_mask.size for b in self.volume.bricks)
@@ -1430,7 +1430,7 @@
         if self._needs_tf:
             args += (self.transfer_function,)
         args += (self.sub_samples,)
-        return args
+        return args, {}
 
     def _render(self, double_check, num_threads, image, sampler):
         pbar = get_pbar("Ray casting", (self.volume.brick_dimensions + 1).prod(axis=-1).sum())
@@ -1492,8 +1492,8 @@
         if num_threads is None:
             num_threads=get_num_threads()
         image = self.new_image()
-        args = self.get_sampler_args(image)
-        sampler = self.get_sampler(args)
+        args, kwargs = self.get_sampler_args(image)
+        sampler = self.get_sampler(args, kwargs)
         self.volume.initialize_source()
         image = ImageArray(self._render(double_check, num_threads, 
                                         image, sampler),
@@ -1653,7 +1653,7 @@
                 image, uv, uv,
                 np.zeros(3, dtype='float64'),
                 self.transfer_function, self.sub_samples)
-        return args
+        return args, {}
 
 
     def finalize_image(self, image):
@@ -1799,8 +1799,8 @@
             mylog.debug('Working on: %i %i' % (self.imi, self.imj))
             self._setup_box_properties(self.width, self.center, self.orienter.unit_vectors)
             image = self.new_image()
-            args = self.get_sampler_args(image)
-            sampler = self.get_sampler(args)
+            args, kwargs = self.get_sampler_args(image)
+            sampler = self.get_sampler(args, kwargs)
             image = self._render(double_check, num_threads, image, sampler)
             sto.id = self.imj*self.nimx + self.imi
             sto.result = image
@@ -2405,11 +2405,11 @@
         except AttributeError:
             pass
 
-    def get_sampler(self, args):
+    def get_sampler(self, args, kwargs):
         if self.interpolated:
-            sampler = InterpolatedProjectionSampler(*args)
+            sampler = InterpolatedProjectionSampler(*args, **kwargs)
         else:
-            sampler = ProjectionSampler(*args)
+            sampler = ProjectionSampler(*args, **kwargs)
         return sampler
 
     def initialize_source(self):
@@ -2420,12 +2420,13 @@
 
     def get_sampler_args(self, image):
         rotp = np.concatenate([self.orienter.inv_mat.ravel('F'), self.back_center.ravel()])
-        args = (rotp, self.box_vectors[2], self.back_center,
+        args = (np.atleast_3d(rotp), np.atleast_3d(self.box_vectors[2]),
+                self.back_center,
             (-self.width[0]/2., self.width[0]/2.,
              -self.width[1]/2., self.width[1]/2.),
             image, self.orienter.unit_vectors[0], self.orienter.unit_vectors[1],
                 np.array(self.width, dtype='float64'), self.sub_samples)
-        return args
+        return args, {'lens_type': 'plane-parallel'}
 
     def finalize_image(self,image):
         ds = self.ds
@@ -2506,9 +2507,9 @@
 
         image = self.new_image()
 
-        args = self.get_sampler_args(image)
+        args, kwargs = self.get_sampler_args(image)
 
-        sampler = self.get_sampler(args)
+        sampler = self.get_sampler(args, kwargs)
 
         self.initialize_source()
 
@@ -2559,7 +2560,7 @@
                 image, dummy, dummy,
                 np.zeros(3, dtype='float64'),
                 self.transfer_function, self.sub_samples)
-        return args
+        return args, {'lens_type': 'spherical'}
 
     def _render(self, double_check, num_threads, image, sampler):
         ncells = sum(b.source_mask.size for b in self.volume.bricks)
@@ -2639,7 +2640,7 @@
                 image, dummy, dummy,
                 np.zeros(3, dtype='float64'),
                 self.transfer_function, self.sub_samples)
-        return args
+        return args, {'lens_type': 'stereo-spherical'}
 
     def snapshot(self, fn = None, clip_ratio = None, double_check = False,
                  num_threads = 0, transparent=False):
@@ -2649,16 +2650,16 @@
 
         self.disparity_s = self.disparity
         image1 = self.new_image()
-        args1 = self.get_sampler_args(image1)
-        sampler1 = self.get_sampler(args1)
+        args1, kwargs1 = self.get_sampler_args(image1)
+        sampler1 = self.get_sampler(args1, kwargs1)
         self.initialize_source()
         image1 = self._render(double_check, num_threads,
                               image1, sampler1, '(Left) ')
 
         self.disparity_s = -self.disparity
         image2 = self.new_image()
-        args2 = self.get_sampler_args(image2)
-        sampler2 = self.get_sampler(args2)
+        args2, kwargs2 = self.get_sampler_args(image2)
+        sampler2 = self.get_sampler(args2, kwargs2)
         self.initialize_source()
         image2 = self._render(double_check, num_threads,
                               image2, sampler2, '(Right)')

diff -r d19ea599033ec9e76ff3bcfd6634ea675b94d7f5 -r f65cfa6e4a8a54f1f89e0f64d1e4c1837c417179 yt/visualization/volume_rendering/utils.py
--- a/yt/visualization/volume_rendering/utils.py
+++ b/yt/visualization/volume_rendering/utils.py
@@ -17,17 +17,17 @@
 def new_mesh_sampler(camera, render_source):
     params = camera._get_sampler_params(render_source)
     args = (
-        params['vp_pos'],
-        params['vp_dir'],
+        np.atleast_3d(params['vp_pos']),
+        np.atleast_3d(params['vp_dir']),
         params['center'],
         params['bounds'],
-        params['image'],
+        np.atleast_3d(params['image']),
         params['x_vec'],
         params['y_vec'],
         params['width'],
     )
-
-    sampler = mesh_traversal.MeshSampler(*args)
+    kwargs = {'lens_type': params['lens_type']}
+    sampler = mesh_traversal.MeshSampler(*args, **kwargs)
     return sampler
 
 
@@ -37,8 +37,8 @@
     params.update(transfer_function=render_source.transfer_function)
     params.update(num_samples=render_source.num_samples)
     args = (
-        params['vp_pos'],
-        params['vp_dir'],
+        np.atleast_3d(params['vp_pos']),
+        np.atleast_3d(params['vp_dir']),
         params['center'],
         params['bounds'],
         params['image'],
@@ -48,10 +48,12 @@
         params['transfer_function'],
         params['num_samples'],
     )
-    kwargs = {}
+    kwargs = {'lens_type': params['lens_type']}
     if render_source.zbuffer is not None:
         kwargs['zbuffer'] = render_source.zbuffer.z
         args[4][:] = render_source.zbuffer.rgba[:]
+    else:
+        kwargs['zbuffer'] = np.ones(params['image'].shape[:2], "float64")
 
     sampler = VolumeRenderSampler(*args, **kwargs)
     return sampler
@@ -62,8 +64,8 @@
     params.update(transfer_function=render_source.transfer_function)
     params.update(num_samples=render_source.num_samples)
     args = (
-        params['vp_pos'],
-        params['vp_dir'],
+        np.atleast_3d(params['vp_pos']),
+        np.atleast_3d(params['vp_dir']),
         params['center'],
         params['bounds'],
         params['image'],
@@ -72,10 +74,12 @@
         params['width'],
         params['num_samples'],
     )
-    kwargs = {}
+    kwargs = {'lens_type': params['lens_type']}
     if render_source.zbuffer is not None:
         kwargs['zbuffer'] = render_source.zbuffer.z
-    sampler = InterpolatedProjectionSampler(*args)
+    else:
+        kwargs['zbuffer'] = np.ones(params['image'].shape[:2], "float64")
+    sampler = InterpolatedProjectionSampler(*args, **kwargs)
     return sampler
 
 
@@ -84,8 +88,8 @@
     params.update(transfer_function=render_source.transfer_function)
     params.update(num_samples=render_source.num_samples)
     args = (
-        params['vp_pos'],
-        params['vp_dir'],
+        np.atleast_3d(params['vp_pos']),
+        np.atleast_3d(params['vp_dir']),
         params['center'],
         params['bounds'],
         params['image'],
@@ -94,10 +98,12 @@
         params['width'],
         params['num_samples'],
     )
-    kwargs = {}
+    kwargs = {'lens_type': params['lens_type']}
     if render_source.zbuffer is not None:
         kwargs['zbuffer'] = render_source.zbuffer.z
-    sampler = ProjectionSampler(*args)
+    else:
+        kwargs['zbuffer'] = np.ones(params['image'].shape[:2], "float64")
+    sampler = ProjectionSampler(*args, **kwargs)
     return sampler
 
 

diff -r d19ea599033ec9e76ff3bcfd6634ea675b94d7f5 -r f65cfa6e4a8a54f1f89e0f64d1e4c1837c417179 yt/visualization/volume_rendering/volume_rendering.py
--- a/yt/visualization/volume_rendering/volume_rendering.py
+++ b/yt/visualization/volume_rendering/volume_rendering.py
@@ -20,12 +20,12 @@
 from yt.utilities.exceptions import YTSceneFieldNotFound
 
 
-def create_scene(data_source, field=None):
-    r""" Set up a scene object with sensible defaults for use in volume 
+def create_scene(data_source, field=None, lens_type='plane-parallel'):
+    r""" Set up a scene object with sensible defaults for use in volume
     rendering.
 
     A helper function that creates a default camera view, transfer
-    function, and image size. Using these, it returns an instance 
+    function, and image size. Using these, it returns an instance
     of the Scene class, allowing one to further modify their rendering.
 
     This function is the same as volume_render() except it doesn't render
@@ -37,11 +37,16 @@
         This is the source to be rendered, which can be any arbitrary yt
         3D object
     field: string, tuple, optional
-        The field to be rendered. If unspecified, this will use the 
+        The field to be rendered. If unspecified, this will use the
         default_field for your dataset's frontend--usually ('gas', 'density').
-        A default transfer function will be built that spans the range of 
-        values for that given field, and the field will be logarithmically 
+        A default transfer function will be built that spans the range of
+        values for that given field, and the field will be logarithmically
         scaled if the field_info object specifies as such.
+    lens_type: string, optional
+        This specifies the type of lens to use for rendering. Current
+        options are 'plane-parallel', 'perspective', and 'fisheye'. See
+        :class:`yt.visualization.volume_rendering.lens.Lens` for details.
+        Default: 'plane-parallel'
 
     Returns
     -------
@@ -60,17 +65,18 @@
     if field is None:
         field = data_source.ds.default_field
         if field not in data_source.ds.derived_field_list:
-            raise YTSceneFieldNotFound("""Could not find field '%s' in %s. 
-                  Please specify a field in create_scene()""" % \
-                  (field, data_source.ds))
+            raise YTSceneFieldNotFound("""Could not find field '%s' in %s.
+                  Please specify a field in create_scene()""" % (field, data_source.ds))
         mylog.info('Setting default field to %s' % field.__repr__())
 
     vol = VolumeSource(data_source, field=field)
     sc.add_source(vol)
-    sc.camera = Camera(data_source)
+    sc.camera = Camera(data_source=data_source, lens_type=lens_type)
     return sc
 
-def volume_render(data_source, field=None, fname=None, sigma_clip=None):
+
+def volume_render(data_source, field=None, fname=None, sigma_clip=None,
+                  lens_type='plane-parallel'):
     r""" Create a simple volume rendering of a data source.
 
     A helper function that creates a default camera view, transfer
@@ -84,10 +90,10 @@
         This is the source to be rendered, which can be any arbitrary yt
         3D object
     field: string, tuple, optional
-        The field to be rendered. If unspecified, this will use the 
+        The field to be rendered. If unspecified, this will use the
         default_field for your dataset's frontend--usually ('gas', 'density').
-        A default transfer function will be built that spans the range of 
-        values for that given field, and the field will be logarithmically 
+        A default transfer function will be built that spans the range of
+        values for that given field, and the field will be logarithmically
         scaled if the field_info object specifies as such.
     fname: string, optional
         If specified, the resulting rendering will be saved to this filename
@@ -97,6 +103,11 @@
         using a threshold based on sigma_clip multiplied by the standard
         deviation of the pixel values. Recommended values are between 2 and 6.
         Default: None
+    lens_type: string, optional
+        This specifies the type of lens to use for rendering. Current
+        options are 'plane-parallel', 'perspective', and 'fisheye'. See
+        :class:`yt.visualization.volume_rendering.lens.Lens` for details.
+        Default: 'plane-parallel'
 
     Returns
     -------

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list