<html><body>
<p>1 new commit in yt:</p>
<p><a href="https://bitbucket.org/yt_analysis/yt/commits/248ccc5ef8b4/">https://bitbucket.org/yt_analysis/yt/commits/248ccc5ef8b4/</a> Changeset: 248ccc5ef8b4 Branch: yt User: ngoldbaum Date: 2016-04-20 18:22:12+00:00 Summary: Merged in atmyers/yt (pull request #2107)</p>
<p>Allow MeshSources to optionally use the Cython ray-tracer instead of Embree. Affected #: 14 files</p>
<p>diff -r 5d118c06fa7eecc494eff5d231ebf3c47d9d4ab9 -r 248ccc5ef8b46d83682344d249c8de1c964759ab yt/utilities/lib/bounding_volume_hierarchy.pxd --- a/yt/utilities/lib/bounding_volume_hierarchy.pxd +++ b/yt/utilities/lib/bounding_volume_hierarchy.pxd @@ -1,6 +1,7 @@</p>
<pre>cimport cython
import numpy as np
cimport numpy as np</pre>
<p>+from yt.utilities.lib.element_mappings cimport ElementSampler</p>
<pre># ray data structure
cdef struct Ray:</pre>
<p>@@ -11,6 +12,7 @@</p>
<pre>np.float64_t t_near
np.float64_t t_far
np.int64_t elem_id</pre>
<p>+ np.int64_t near_boundary</p>
<pre># axis-aligned bounding box
cdef struct BBox:</pre>
<p>@@ -30,7 +32,6 @@</p>
<pre>np.float64_t p0[3]
np.float64_t p1[3]
np.float64_t p2[3]</pre>
<ul><li><p>np.float64_t d0, d1, d2 np.int64_t elem_id np.float64_t centroid[3] BBox bbox</p></li></ul>
<p>@@ -38,6 +39,14 @@</p>
<pre>cdef class BVH:
cdef BVHNode* root
cdef Triangle* triangles</pre>
<p>+ cdef np.float64_t* vertices + cdef np.float64_t* field_data + cdef np.int64_t num_tri_per_elem + cdef np.int64_t num_tri + cdef np.int64_t num_elem + cdef np.int64_t num_verts_per_elem + cdef np.int64_t num_field_per_elem + cdef ElementSampler sampler</p>
<pre> cdef np.int64_t _partition(self, np.int64_t begin, np.int64_t end,
np.int64_t ax, np.float64_t split) nogil
cdef void intersect(self, Ray* ray) nogil</pre>
<p>diff -r 5d118c06fa7eecc494eff5d231ebf3c47d9d4ab9 -r 248ccc5ef8b46d83682344d249c8de1c964759ab yt/utilities/lib/bounding_volume_hierarchy.pyx --- a/yt/utilities/lib/bounding_volume_hierarchy.pyx +++ b/yt/utilities/lib/bounding_volume_hierarchy.pyx @@ -1,10 +1,19 @@ -cimport cython +cimport cython</p>
<pre>import numpy as np
cimport numpy as np
from libc.math cimport fabs
from libc.stdlib cimport malloc, free
from cython.parallel import parallel, prange
from vec3_ops cimport dot, subtract, cross</pre>
<p>+from yt.utilities.lib.element_mappings cimport \ + ElementSampler, \ + Q1Sampler3D, \ + P1Sampler3D, \ + W1Sampler3D + +cdef ElementSampler Q1Sampler = Q1Sampler3D() +cdef ElementSampler P1Sampler = P1Sampler3D() +cdef ElementSampler W1Sampler = W1Sampler3D()</p>
<pre>cdef extern from "platform_dep.h" nogil:
double fmax(double x, double y)</pre>
<p>@@ -13,7 +22,16 @@</p>
<pre>cdef extern from "mesh_construction.h":
enum:
MAX_NUM_TRI</pre>
<p>+ + int HEX_NV + int HEX_NT + int TETRA_NV + int TETRA_NT + int WEDGE_NV + int WEDGE_NT</p>
<pre>int triangulate_hex[MAX_NUM_TRI][3]</pre>
<p>+ int triangulate_tetra[MAX_NUM_TRI][3] + int triangulate_wedge[MAX_NUM_TRI][3]</p>
<pre># define some constants
cdef np.float64_t DETERMINANT_EPS = 1.0e-10</pre>
<p>@@ -60,7 +78,6 @@</p>
<pre>if(t > DETERMINANT_EPS and t < ray.t_far):
ray.t_far = t</pre>
<ul><li><p>ray.data_val = (1.0 – u – v)*tri.d0 + u*tri.d1 + v*tri.d2 ray.elem_id = tri.elem_id return True</p></li></ul>
<p>@@ -104,31 +121,62 @@</p>
<pre>@cython.wraparound(False)
@cython.cdivision(True)
def __cinit__(self,</pre>
<ul><li><p>np.float64_t[:, ::1] vertices,</p></li>
<li><p>np.int64_t[:, ::1] indices,</p></li>
<li><p>np.float64_t[:, ::1] field_data):</p></li></ul>
<p>+ np.float64_t[:, :] vertices, + np.int64_t[:, :] indices, + np.float64_t[:, :] field_data):</p>
<ul><li><p>cdef np.int64_t num_elem = indices.shape[0]</p></li>
<li><p>cdef np.int64_t num_tri = 12*num_elem</p></li></ul>
<p>+ self.num_elem = indices.shape[0] + self.num_verts_per_elem = indices.shape[1] + self.num_field_per_elem = field_data.shape[1] + + # We need to figure out what kind of elements we've been handed. + cdef int[MAX_NUM_TRI][3] tri_array + if self.num_verts_per_elem == 8: + self.num_tri_per_elem = HEX_NT + tri_array = triangulate_hex + self.sampler = Q1Sampler + elif self.num_verts_per_elem == 6: + self.num_tri_per_elem = WEDGE_NT + tri_array = triangulate_wedge + self.sampler = W1Sampler + elif self.num_verts_per_elem == 4: + self.num_tri_per_elem = TETRA_NT + tri_array = triangulate_tetra + self.sampler = P1Sampler + self.num_tri = self.num_tri_per_elem*self.num_elem + + # allocate storage + cdef np.int64_t v_size = self.num_verts_per_elem * self.num_elem * 3 + self.vertices = <np.float64_t*> malloc(v_size * sizeof(np.float64_t)) + cdef np.int64_t f_size = self.num_field_per_elem * self.num_elem + self.field_data = <np.float64_t*> malloc(f_size * sizeof(np.float64_t)) + + # create data buffers + cdef np.int64_t i, j, k + cdef np.int64_t field_offset, vertex_offset + for i in range(self.num_elem): + for j in range(self.num_verts_per_elem): + vertex_offset = i*self.num_verts_per_elem*3 + j*3 + for k in range(3): + self.vertices[vertex_offset + k] = vertices[indices[i,j]][k] + field_offset = i*self.num_field_per_elem + for j in range(self.num_field_per_elem): + self.field_data[field_offset + j] = field_data[i][j]</p>
<pre># fill our array of triangles</pre>
<ul><li><p>cdef np.int64_t i, j, k cdef np.int64_t offset, tri_index cdef np.int64_t v0, v1, v2 cdef Triangle* tri</p></li>
<li><p>self.triangles = <Triangle*> malloc(num_tri * sizeof(Triangle))</p></li>
<li><p>for i in range(num_elem):</p></li>
<li><p>offset = 12*i</p></li>
<li><p>for j in range(12):</p></li></ul>
<p>+ self.triangles = <Triangle*> malloc(self.num_tri * sizeof(Triangle)) + for i in range(self.num_elem): + offset = self.num_tri_per_elem*i + for j in range(self.num_tri_per_elem):</p>
<pre>tri_index = offset + j
tri = &(self.triangles[tri_index])
tri.elem_id = i</pre>
<ul><li><p>v0 = indices[i][triangulate_hex[j][0]]</p></li>
<li><p>v1 = indices[i][triangulate_hex[j][1]]</p></li>
<li><p>v2 = indices[i][triangulate_hex[j][2]]</p></li>
<li><p>tri.d0 = field_data[i][triangulate_hex[j][0]]</p></li>
<li><p>tri.d1 = field_data[i][triangulate_hex[j][1]]</p></li>
<li><p>tri.d2 = field_data[i][triangulate_hex[j][2]]</p></li></ul>
<p>+ v0 = indices[i][tri_array[j][0]] + v1 = indices[i][tri_array[j][1]] + v2 = indices[i][tri_array[j][2]]</p>
<pre>for k in range(3):
tri.p0[k] = vertices[v0][k]
tri.p1[k] = vertices[v1][k]</pre>
<p>@@ -137,7 +185,7 @@</p>
<pre> tri.bbox.left_edge[k] = fmin(fmin(tri.p0[k], tri.p1[k]), tri.p2[k])
tri.bbox.right_edge[k] = fmax(fmax(tri.p0[k], tri.p1[k]), tri.p2[k])
</pre>
<ul><li><p>self.root = self._recursive_build(0, num_tri)</p></li></ul>
<p>+ self.root = self._recursive_build(0, self.num_tri)</p>
<pre>cdef void _recursive_free(self, BVHNode* node) nogil:
if node.end - node.begin > LEAF_SIZE:</pre>
<p>@@ -148,6 +196,8 @@</p>
<pre>def __dealloc__(self):
self._recursive_free(self.root)
free(self.triangles)</pre>
<p>+ free(self.field_data) + free(self.vertices)</p>
<pre>@cython.boundscheck(False)
@cython.wraparound(False)</pre>
<p>@@ -189,6 +239,28 @@</p>
<pre>@cython.cdivision(True)
cdef void intersect(self, Ray* ray) nogil:
self._recursive_intersect(ray, self.root)</pre>
<p>+ + if ray.elem_id < 0: + return + + cdef np.float64_t[3] position + cdef np.int64_t i + for i in range(3): + position[i] = ray.origin[i] + ray.t_far*ray.direction[i] + + cdef np.float64_t* vertex_ptr + cdef np.float64_t* field_ptr + vertex_ptr = self.vertices + ray.elem_id*self.num_verts_per_elem*3 + field_ptr = self.field_data + ray.elem_id*self.num_field_per_elem + + cdef np.float64_t[4] mapped_coord + self.sampler.map_real_to_unit(mapped_coord, vertex_ptr, position) + if self.num_field_per_elem == 1: + ray.data_val = field_ptr[0] + else: + ray.data_val = self.sampler.sample_at_unit_point(mapped_coord, + field_ptr) + ray.near_boundary = self.sampler.check_mesh_lines(mapped_coord)</p>
<pre>@cython.boundscheck(False)
@cython.wraparound(False)</pre>
<p>@@ -281,6 +353,7 @@</p>
<pre>ray.t_far = INF
ray.t_near = 0.0
ray.data_val = 0</pre>
<p>+ ray.elem_id = -1</p>
<pre> bvh.intersect(ray)
image[i] = ray.data_val
</pre>
<p>diff -r 5d118c06fa7eecc494eff5d231ebf3c47d9d4ab9 -r 248ccc5ef8b46d83682344d249c8de1c964759ab yt/utilities/lib/element_mappings.pxd --- a/yt/utilities/lib/element_mappings.pxd +++ b/yt/utilities/lib/element_mappings.pxd @@ -35,6 +35,8 @@</p>
<pre> double tolerance,
int direction) nogil
</pre>
<p>+ cdef int check_mesh_lines(self, double* mapped_coord) nogil +</p>
<pre>cdef class P1Sampler2D(ElementSampler):
</pre>
<p>@@ -75,6 +77,8 @@</p>
<pre> double tolerance,
int direction) nogil
</pre>
<p>+ cdef int check_mesh_lines(self, double* mapped_coord) nogil +</p>
<pre># This typedef defines a function pointer that defines the system
# of equations that will be solved by the NonlinearSolveSamplers.</pre>
<p>@@ -170,6 +174,8 @@</p>
<pre> double tolerance,
int direction) nogil
</pre>
<p>+ cdef int check_mesh_lines(self, double* mapped_coord) nogil +</p>
<pre>cdef class W1Sampler3D(NonlinearSolveSampler3D):
</pre>
<p>@@ -190,6 +196,9 @@</p>
<pre> double tolerance,
int direction) nogil
</pre>
<p>+ cdef int check_mesh_lines(self, double* mapped_coord) nogil + +</p>
<pre>cdef class S2Sampler3D(NonlinearSolveSampler3D):
</pre>
<p>@@ -210,6 +219,9 @@</p>
<pre> double tolerance,
int direction) nogil
</pre>
<p>+ cdef int check_mesh_lines(self, double* mapped_coord) nogil + +</p>
<pre>cdef class NonlinearSolveSampler2D(ElementSampler):
</pre>
<p>diff -r 5d118c06fa7eecc494eff5d231ebf3c47d9d4ab9 -r 248ccc5ef8b46d83682344d249c8de1c964759ab yt/utilities/lib/element_mappings.pyx --- a/yt/utilities/lib/element_mappings.pyx +++ b/yt/utilities/lib/element_mappings.pyx @@ -95,6 +95,12 @@</p>
<pre>@cython.boundscheck(False)
@cython.wraparound(False)
@cython.cdivision(True)</pre>
<p>+ cdef int check_mesh_lines(self, double* mapped_coord) nogil: + pass + + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True)</p>
<pre> cdef double sample_at_real_point(self,
double* vertices,
double* field_values,</pre>
<p>@@ -265,6 +271,37 @@</p>
<pre> return 1
return 0
</pre>
<p>+ @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + cdef int check_mesh_lines(self, double* mapped_coord) nogil: + cdef double u, v, w + cdef double thresh = 2.0e-2 + if mapped_coord[0] == 0: + u = mapped_coord[1] + v = mapped_coord[2] + w = mapped_coord[3] + elif mapped_coord[1] == 0: + u = mapped_coord[2] + v = mapped_coord[3] + w = mapped_coord[0] + elif mapped_coord[2] == 0: + u = mapped_coord[1] + v = mapped_coord[3] + w = mapped_coord[0] + else: + u = mapped_coord[1] + v = mapped_coord[2] + w = mapped_coord[0] + if ((u < thresh) or + (v < thresh) or + (w < thresh) or + (fabs(u – 1) < thresh) or + (fabs(v – 1) < thresh) or + (fabs(w – 1) < thresh)): + return 1 + return -1 +</p>
<pre>cdef class NonlinearSolveSampler3D(ElementSampler):
</pre>
<p>@@ -373,7 +410,6 @@</p>
<pre> return 0
return 1
</pre>
<p>–</p>
<pre>@cython.boundscheck(False)
@cython.wraparound(False)
@cython.cdivision(True)</pre>
<p>@@ -386,6 +422,23 @@</p>
<pre> else:
return 0
</pre>
<p>+ @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + cdef int check_mesh_lines(self, double* mapped_coord) nogil: + if (fabs(fabs(mapped_coord[0]) – 1.0) < 1e-1 and + fabs(fabs(mapped_coord[1]) – 1.0) < 1e-1): + return 1 + elif (fabs(fabs(mapped_coord[0]) – 1.0) < 1e-1 and + fabs(fabs(mapped_coord[2]) – 1.0) < 1e-1): + return 1 + elif (fabs(fabs(mapped_coord[1]) – 1.0) < 1e-1 and + fabs(fabs(mapped_coord[2]) – 1.0) < 1e-1): + return 1 + else: + return -1 + +</p>
<pre>@cython.boundscheck(False)
@cython.wraparound(False)</pre>
<p>@@ -528,6 +581,22 @@</p>
<pre> else:
return 0
</pre>
<p>+ @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + cdef int check_mesh_lines(self, double* mapped_coord) nogil: + if (fabs(fabs(mapped_coord[0]) – 1.0) < 1e-1 and + fabs(fabs(mapped_coord[1]) – 1.0) < 1e-1): + return 1 + elif (fabs(fabs(mapped_coord[0]) – 1.0) < 1e-1 and + fabs(fabs(mapped_coord[2]) – 1.0) < 1e-1): + return 1 + elif (fabs(fabs(mapped_coord[1]) – 1.0) < 1e-1 and + fabs(fabs(mapped_coord[2]) – 1.0) < 1e-1): + return 1 + else: + return -1 +</p>
<pre>@cython.boundscheck(False)
@cython.wraparound(False)</pre>
<p>@@ -709,7 +778,6 @@</p>
<pre> return 0
return 1
</pre>
<p>–</p>
<pre>@cython.boundscheck(False)
@cython.wraparound(False)
@cython.cdivision(True)</pre>
<p>@@ -726,6 +794,32 @@</p>
<pre> return 1
return 0
</pre>
<p>+ @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + cdef int check_mesh_lines(self, double* mapped_coord) nogil: + cdef double r, s, t + cdef double thresh = 5.0e-2 + r = mapped_coord[0] + s = mapped_coord[1] + t = mapped_coord[2] + + cdef int near_edge_r, near_edge_s, near_edge_t + near_edge_r = (r < thresh) or (fabs(r + s – 1.0) < thresh) + near_edge_s = (s < thresh) + near_edge_t = fabs(fabs(mapped_coord[2]) – 1.0) < thresh + + # we use ray.instID to pass back whether the ray is near the + # element boundary or not (used to annotate mesh lines) + if (near_edge_r and near_edge_s): + return 1 + elif (near_edge_r and near_edge_t): + return 1 + elif (near_edge_s and near_edge_t): + return 1 + else: + return -1 +</p>
<pre>@cython.boundscheck(False)
@cython.wraparound(False)</pre>
<p>diff -r 5d118c06fa7eecc494eff5d231ebf3c47d9d4ab9 -r 248ccc5ef8b46d83682344d249c8de1c964759ab yt/utilities/lib/grid_traversal.pxd --- a/yt/utilities/lib/grid_traversal.pxd +++ b/yt/utilities/lib/grid_traversal.pxd @@ -25,6 +25,8 @@</p>
<pre>np.float64_t *center
np.float64_t[:,:,:] image
np.float64_t[:,:] zbuffer</pre>
<p>+ np.int64_t[:,:] image_used + np.int64_t[:,:] mesh_lines</p>
<pre>np.float64_t pdx, pdy
np.float64_t bounds[4]
np.float64_t[:,:] camera_data # position, width, unit_vec[0,2]</pre>
<p>@@ -59,6 +61,8 @@</p>
<pre>cdef sampler_function *sampler
cdef public object acenter, aimage, ax_vec, ay_vec
cdef public object azbuffer</pre>
<p>+ cdef public object aimage_used + cdef public object amesh_lines</p>
<pre>cdef void *supp_data
cdef np.float64_t width[3]
cdef public object lens_type</pre>
<p>diff -r 5d118c06fa7eecc494eff5d231ebf3c47d9d4ab9 -r 248ccc5ef8b46d83682344d249c8de1c964759ab yt/utilities/lib/grid_traversal.pyx --- a/yt/utilities/lib/grid_traversal.pyx +++ b/yt/utilities/lib/grid_traversal.pyx @@ -383,6 +383,8 @@</p>
<pre>*args, **kwargs):
self.image = <ImageContainer *> calloc(sizeof(ImageContainer), 1)
cdef np.float64_t[:,:] zbuffer</pre>
<p>+ cdef np.int64_t[:,:] image_used + cdef np.int64_t[:,:] mesh_lines</p>
<pre> cdef np.float64_t[:,:] camera_data
cdef int i
</pre>
<p>@@ -393,6 +395,10 @@</p>
<pre> zbuffer = kwargs.pop("zbuffer", None)
if zbuffer is None:
zbuffer = np.ones((image.shape[0], image.shape[1]), "float64")</pre>
<p>+ + image_used = np.zeros((image.shape[0], image.shape[1]), “int64”) + mesh_lines = np.zeros((image.shape[0], image.shape[1]), “int64”) +</p>
<pre> self.lens_type = kwargs.pop("lens_type", None)
if self.lens_type == "plane-parallel":
self.extent_function = calculate_extent_plane_parallel</pre>
<p>@@ -400,7 +406,7 @@</p>
<pre> else:
if not (vp_pos.shape[0] == vp_dir.shape[0] == image.shape[0]) or \
not (vp_pos.shape[1] == vp_dir.shape[1] == image.shape[1]):</pre>
<ul><li><p>msg = “Bad lense shape / direction for %s\n” % (self.lens_type)</p></li></ul>
<p>+ msg = “Bad lens shape / direction for %s\n” % (self.lens_type)</p>
<pre>msg += "Shapes: (%s - %s - %s) and (%s - %s - %s)" % (
vp_pos.shape[0], vp_dir.shape[0], image.shape[0],
vp_pos.shape[1], vp_dir.shape[1], image.shape[1])</pre>
<p>@@ -426,7 +432,9 @@</p>
<pre>self.image.x_vec = <np.float64_t *> x_vec.data
self.ay_vec = y_vec
self.image.y_vec = <np.float64_t *> y_vec.data</pre>
<ul><li><p>self.image.zbuffer = zbuffer</p></li></ul>
<p>+ self.image.zbuffer = self.azbuffer = zbuffer + self.image.image_used = self.aimage_used = image_used + self.image.mesh_lines = self.amesh_lines = mesh_lines</p>
<pre>self.image.nv[0] = image.shape[0]
self.image.nv[1] = image.shape[1]
for i in range(4): self.image.bounds[i] = bounds[i]</pre>
<p>@@ -501,6 +509,7 @@</p>
<pre>self.image.vp_dir = None
self.image.zbuffer = None
self.image.camera_data = None</pre>
<p>+ self.image.image_used = None</p>
<pre> free(self.image)
</pre>
<p>diff -r 5d118c06fa7eecc494eff5d231ebf3c47d9d4ab9 -r 248ccc5ef8b46d83682344d249c8de1c964759ab yt/utilities/lib/mesh_construction.pxd --- a/yt/utilities/lib/mesh_construction.pxd +++ b/yt/utilities/lib/mesh_construction.pxd @@ -11,6 +11,7 @@</p>
<pre>int* element_indices # which vertices belong to which *element*
int tpe # the number of triangles per element
int vpe # the number of vertices per element</pre>
<p>+ int fpe # the number of field values per element</p>
<pre>ctypedef struct Patch:
float[8][3] v</pre>
<p>diff -r 5d118c06fa7eecc494eff5d231ebf3c47d9d4ab9 -r 248ccc5ef8b46d83682344d249c8de1c964759ab yt/utilities/lib/mesh_construction.pyx --- a/yt/utilities/lib/mesh_construction.pyx +++ b/yt/utilities/lib/mesh_construction.pyx @@ -25,7 +25,6 @@</p>
<pre>from mesh_samplers cimport \
sample_hex, \
sample_tetra, \</pre>
<ul><li><p>sample_element, \ sample_wedge</p></li></ul>
<pre>from pyembree.rtcore cimport \
Vertex, \</pre>
<p>@@ -104,7 +103,7 @@</p>
<pre> np.ndarray indices,
np.ndarray data):
</pre>
<ul><li><p># We need now to figure out what kind of elements we've been handed.</p></li></ul>
<p>+ # We need to figure out what kind of elements we've been handed.</p>
<pre> if indices.shape[1] == 8:
self.vpe = HEX_NV
self.tpe = HEX_NT</pre>
<p>@@ -186,19 +185,18 @@</p>
<pre>datac.element_indices = self.element_indices
datac.tpe = self.tpe
datac.vpe = self.vpe</pre>
<p>+ datac.fpe = self.fpe</p>
<pre> self.datac = datac
rtcg.rtcSetUserData(scene.scene_i, self.mesh, &self.datac)
cdef void _set_sampler_type(self, YTEmbreeScene scene):
</pre>
<ul><li><p>if self.fpe == 1:</p></li>
<li><p>self.filter_func = <rtcg.RTCFilterFunc> sample_element</p></li>
<li><p>elif self.fpe == 4:</p></li></ul>
<p>+ if self.vpe == 4:</p>
<pre>self.filter_func = <rtcg.RTCFilterFunc> sample_tetra</pre>
<ul><li><p>elif self.fpe == 6:</p></li></ul>
<p>+ elif self.vpe == 6:</p>
<pre>self.filter_func = <rtcg.RTCFilterFunc> sample_wedge</pre>
<ul><li><p>elif self.fpe == 8:</p></li></ul>
<p>+ elif self.vpe == 8:</p>
<pre>self.filter_func = <rtcg.RTCFilterFunc> sample_hex
else:
raise NotImplementedError("Sampler type not implemented.")</pre>
<p>diff -r 5d118c06fa7eecc494eff5d231ebf3c47d9d4ab9 -r 248ccc5ef8b46d83682344d249c8de1c964759ab yt/utilities/lib/mesh_samplers.pxd --- a/yt/utilities/lib/mesh_samplers.pxd +++ b/yt/utilities/lib/mesh_samplers.pxd @@ -13,6 +13,3 @@</p>
<pre>cdef void sample_hex20(void* userPtr,
rtcr.RTCRay& ray) nogil</pre>
<p>– -cdef void sample_element(void* userPtr,</p>
<ul><li><p>rtcr.RTCRay& ray) nogil</p></li></ul>
<p>diff -r 5d118c06fa7eecc494eff5d231ebf3c47d9d4ab9 -r 248ccc5ef8b46d83682344d249c8de1c964759ab yt/utilities/lib/mesh_samplers.pyx --- a/yt/utilities/lib/mesh_samplers.pyx +++ b/yt/utilities/lib/mesh_samplers.pyx @@ -97,7 +97,9 @@</p>
<pre>for i in range(8):
element_indices[i] = data.element_indices[elem_id*8+i]</pre>
<ul><li><p>field_data[i] = data.field_data[elem_id*8+i]</p></li></ul>
<p>+ + for i in range(data.fpe): + field_data[i] = data.field_data[elem_id*data.fpe+i]</p>
<pre>for i in range(8):
vertices[i*3] = data.vertices[element_indices[i]].x</pre>
<p>@@ -107,22 +109,16 @@</p>
<pre># we use ray.time to pass the value of the field
cdef double mapped_coord[3]
Q1Sampler.map_real_to_unit(mapped_coord, vertices, position)</pre>
<ul><li><p>val = Q1Sampler.sample_at_unit_point(mapped_coord, field_data)</p></li></ul>
<p>+ if data.fpe == 1: + val = field_data[0] + else: + val = Q1Sampler.sample_at_unit_point(mapped_coord, field_data)</p>
<pre> ray.time = val
# we use ray.instID to pass back whether the ray is near the
# element boundary or not (used to annotate mesh lines)</pre>
<ul><li><p>if (fabs(fabs(mapped_coord[0]) – 1.0) < 1e-1 and</p></li>
<li><p>fabs(fabs(mapped_coord[1]) – 1.0) < 1e-1):</p></li>
<li><p>ray.instID = 1</p></li>
<li><p>elif (fabs(fabs(mapped_coord[0]) – 1.0) < 1e-1 and</p></li>
<li><p>fabs(fabs(mapped_coord[2]) – 1.0) < 1e-1):</p></li>
<li><p>ray.instID = 1</p></li>
<li><p>elif (fabs(fabs(mapped_coord[1]) – 1.0) < 1e-1 and</p></li>
<li><p>fabs(fabs(mapped_coord[2]) – 1.0) < 1e-1):</p></li>
<li><p>ray.instID = 1</p></li>
<li><p>else:</p></li>
<li><p>ray.instID = -1</p></li></ul>
<p>+ ray.instID = Q1Sampler.check_mesh_lines(mapped_coord) +</p>
<pre>@cython.boundscheck(False)
@cython.wraparound(False)</pre>
<p>@@ -152,7 +148,9 @@</p>
<pre>for i in range(6):
element_indices[i] = data.element_indices[elem_id*6+i]</pre>
<ul><li><p>field_data[i] = data.field_data[elem_id*6+i]</p></li></ul>
<p>+ + for i in range(data.fpe): + field_data[i] = data.field_data[elem_id*data.fpe+i]</p>
<pre>for i in range(6):
vertices[i*3] = data.vertices[element_indices[i]].x</pre>
<p>@@ -162,31 +160,12 @@</p>
<pre># we use ray.time to pass the value of the field
cdef double mapped_coord[3]
W1Sampler.map_real_to_unit(mapped_coord, vertices, position)</pre>
<ul><li><p>val = W1Sampler.sample_at_unit_point(mapped_coord, field_data)</p></li></ul>
<p>+ if data.fpe == 1: + val = field_data[0] + else: + val = W1Sampler.sample_at_unit_point(mapped_coord, field_data)</p>
<pre>ray.time = val</pre>
<p>–</p>
<ul><li><p>cdef double r, s, t</p></li>
<li><p>cdef double thresh = 5.0e-2</p></li>
<li><p>r = mapped_coord[0]</p></li>
<li><p>s = mapped_coord[1]</p></li>
<li><p>t = mapped_coord[2]</p></li></ul>
<p>–</p>
<ul><li><p>cdef int near_edge_r, near_edge_s, near_edge_t</p></li>
<li><p>near_edge_r = (r < thresh) or (fabs(r + s – 1.0) < thresh)</p></li>
<li><p>near_edge_s = (s < thresh)</p></li>
<li><p>near_edge_t = fabs(fabs(mapped_coord[2]) – 1.0) < thresh</p></li></ul>
<p>–</p>
<ul><li><p># we use ray.instID to pass back whether the ray is near the</p></li>
<li><p># element boundary or not (used to annotate mesh lines)</p></li>
<li><p>if (near_edge_r and near_edge_s):</p></li>
<li><p>ray.instID = 1</p></li>
<li><p>elif (near_edge_r and near_edge_t):</p></li>
<li><p>ray.instID = 1</p></li>
<li><p>elif (near_edge_s and near_edge_t):</p></li>
<li><p>ray.instID = 1</p></li>
<li><p>else:</p></li>
<li><p>ray.instID = -1</p></li></ul>
<p>– + ray.instID = W1Sampler.check_mesh_lines(mapped_coord)</p>
<pre>@cython.boundscheck(False)
@cython.wraparound(False)</pre>
<p>@@ -222,21 +201,8 @@</p>
<pre>S2Sampler.map_real_to_unit(mapped_coord, patch.vertices, position)
val = S2Sampler.sample_at_unit_point(mapped_coord, patch.field_data)
ray.time = val</pre>
<p>–</p>
<ul><li><p># we use ray.instID to pass back whether the ray is near the</p></li>
<li><p># element boundary or not (used to annotate mesh lines)</p></li>
<li><p>if (fabs(fabs(mapped_coord[0]) – 1.0) < 1e-1 and</p></li>
<li><p>fabs(fabs(mapped_coord[1]) – 1.0) < 1e-1):</p></li>
<li><p>ray.instID = 1</p></li>
<li><p>elif (fabs(fabs(mapped_coord[0]) – 1.0) < 1e-1 and</p></li>
<li><p>fabs(fabs(mapped_coord[2]) – 1.0) < 1e-1):</p></li>
<li><p>ray.instID = 1</p></li>
<li><p>elif (fabs(fabs(mapped_coord[1]) – 1.0) < 1e-1 and</p></li>
<li><p>fabs(fabs(mapped_coord[2]) – 1.0) < 1e-1):</p></li>
<li><p>ray.instID = 1</p></li>
<li><p>else:</p></li>
<li><p>ray.instID = -1</p></li></ul>
<p>– + ray.instID = S2Sampler.check_mesh_lines(mapped_coord) +</p>
<pre>@cython.boundscheck(False)
@cython.wraparound(False)</pre>
<p>@@ -267,54 +233,19 @@</p>
<pre>for i in range(4):
element_indices[i] = data.element_indices[elem_id*4+i]</pre>
<ul><li><p>field_data[i] = data.field_data[elem_id*4+i] vertices[i*3] = data.vertices[element_indices[i]].x vertices[i*3 + 1] = data.vertices[element_indices[i]].y vertices[i*3 + 2] = data.vertices[element_indices[i]].z</p></li></ul>
<p>+ for i in range(data.fpe): + field_data[i] = data.field_data[elem_id*data.fpe+i] +</p>
<pre># we use ray.time to pass the value of the field
cdef double mapped_coord[4]
P1Sampler.map_real_to_unit(mapped_coord, vertices, position)</pre>
<ul><li><p>val = P1Sampler.sample_at_unit_point(mapped_coord, field_data)</p></li></ul>
<p>+ if data.fpe == 1: + val = field_data[0] + else: + val = P1Sampler.sample_at_unit_point(mapped_coord, field_data)</p>
<pre>ray.time = val</pre>
<p>–</p>
<ul><li><p>cdef double u, v, w</p></li>
<li><p>cdef double thresh = 2.0e-2</p></li>
<li><p>u = ray.u</p></li>
<li><p>v = ray.v</p></li>
<li><p>w = 1.0 – u – v</p></li>
<li><p># we use ray.instID to pass back whether the ray is near the</p></li>
<li><p># element boundary or not (used to annotate mesh lines)</p></li>
<li><p>if ((u < thresh) or</p></li>
<li><p>(v < thresh) or</p></li>
<li><p>(w < thresh) or</p></li>
<li><p>(fabs(u – 1) < thresh) or</p></li>
<li><p>(fabs(v – 1) < thresh) or</p></li>
<li><p>(fabs(w – 1) < thresh)):</p></li>
<li><p>ray.instID = 1</p></li>
<li><p>else:</p></li>
<li><p>ray.instID = -1</p></li></ul>
<p>– – -@cython.boundscheck(False) -@cython.wraparound(False) -@cython.cdivision(True) -cdef void sample_element(void* userPtr,</p>
<ul><li><p>rtcr.RTCRay& ray) nogil:</p></li>
<li><p>cdef int ray_id, elem_id</p></li>
<li><p>cdef double val</p></li>
<li><p>cdef MeshDataContainer* data</p></li></ul>
<p>–</p>
<ul><li><p>data = <MeshDataContainer*> userPtr</p></li>
<li><p>ray_id = ray.primID</p></li>
<li><p>if ray_id == -1:</p></li>
<li><p>return</p></li></ul>
<p>–</p>
<ul><li><p># ray_id records the id number of the hit according to</p></li>
<li><p># embree, in which the primitives are triangles. Here,</p></li>
<li><p># we convert this to the element id by dividing by the</p></li>
<li><p># number of triangles per element.</p></li>
<li><p>elem_id = ray_id / data.tpe</p></li></ul>
<p>–</p>
<ul><li><p>val = data.field_data[elem_id]</p></li>
<li><p>ray.time = val</p></li></ul>
<p>+ ray.instID = P1Sampler.check_mesh_lines(mapped_coord)</p>
<p>diff -r 5d118c06fa7eecc494eff5d231ebf3c47d9d4ab9 -r 248ccc5ef8b46d83682344d249c8de1c964759ab yt/utilities/lib/mesh_traversal.pyx --- a/yt/utilities/lib/mesh_traversal.pyx +++ b/yt/utilities/lib/mesh_traversal.pyx @@ -1,6 +1,6 @@</p>
<pre>"""</pre>
<p>-This file contains the MeshSampler class, which handles casting rays at a -MeshSource using pyembree. +This file contains the MeshSampler classes, which handles casting rays at a +mesh source using either pyembree or the cython ray caster.</p>
<pre>"""</pre>
<p>@@ -25,6 +25,7 @@</p>
<pre> ImageContainer
from cython.parallel import prange, parallel, threadid
from yt.visualization.image_writer import apply_colormap</pre>
<p>+from yt.utilities.lib.bounding_volume_hierarchy cimport BVH, Ray</p>
<pre>rtc.rtcInit(NULL)
rtc.rtcSetErrorFunction(error_printer)</pre>
<p>@@ -42,11 +43,8 @@</p>
<pre> def __dealloc__(self):
rtcs.rtcDeleteScene(self.scene_i)
</pre>
<p>-cdef class MeshSampler(ImageSampler):</p>
<ul><li><p>cdef public object image_used</p></li>
<li><p>cdef public object mesh_lines</p></li>
<li><p>cdef public object zbuffer</p></li></ul>
<p>+cdef class EmbreeMeshSampler(ImageSampler):</p>
<pre>@cython.boundscheck(False)
@cython.wraparound(False)</pre>
<p>@@ -70,15 +68,10 @@</p>
<pre> cdef np.float64_t width[3]
for i in range(3):
width[i] = self.width[i]</pre>
<ul><li><p>cdef np.ndarray[np.float64_t, ndim=1] data cdef np.ndarray[np.int64_t, ndim=1] used nx = im.nv[0] ny = im.nv[1] size = nx * ny</p></li>
<li><p>used = np.empty(size, dtype="int64")</p></li>
<li><p>mesh = np.empty(size, dtype="int64")</p></li>
<li><p>data = np.empty(size, dtype="float64")</p></li>
<li><p>zbuffer = np.empty(size, dtype="float64") cdef rtcr.RTCRay ray v_pos = <np.float64_t *> malloc(3 * sizeof(np.float64_t)) v_dir = <np.float64_t *> malloc(3 * sizeof(np.float64_t))</p></li></ul>
<p>@@ -99,13 +92,65 @@</p>
<pre>ray.time = 0
ray.Ng[0] = 1e37 # we use this to track the hit distance
rtcs.rtcIntersect(scene.scene_i, ray)</pre>
<ul><li><p>data[j] = ray.time</p></li>
<li><p>used[j] = ray.primID</p></li>
<li><p>mesh[j] = ray.instID</p></li>
<li><p>zbuffer[j] = ray.tfar</p></li>
<li><p>self.aimage = data</p></li>
<li><p>self.image_used = used</p></li>
<li><p>self.mesh_lines = mesh</p></li>
<li><p>self.zbuffer = zbuffer</p></li></ul>
<p>+ im.image[vi, vj, 0] = ray.time + im.image_used[vi, vj] = ray.primID + im.mesh_lines[vi, vj] = ray.instID + im.zbuffer[vi, vj] = ray.tfar</p>
<pre>free(v_pos)
free(v_dir)</pre>
<p>+ + +cdef class BVHMeshSampler(ImageSampler): + + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + def __call__(self, + BVH bvh, + int num_threads = 0): + ''' + + This function is supposed to cast the rays and return the + image. + + ''' + + cdef int vi, vj, i, j + cdef ImageContainer <strong>im = self.image + cdef np.float64_t *v_pos + cdef np.float64_t *v_dir + cdef np.int64_t nx, ny, size + cdef np.float64_t width[3] + for i in range(3): + width[i] = self.width[i] + cdef np.ndarray[np.float64_t, ndim=1] data + cdef np.ndarray[np.int64_t, ndim=1] used + nx = im.nv[0] + ny = im.nv[1] + size = nx * ny + cdef Ray</strong> ray + with nogil, parallel(): + ray = <Ray *> malloc(sizeof(Ray)) + v_pos = <np.float64_t *> malloc(3 * sizeof(np.float64_t)) + v_dir = <np.float64_t *> malloc(3 * sizeof(np.float64_t)) + for j in prange(size): + vj = j % ny + vi = (j – vj) / ny + vj = vj + self.vector_function(im, vi, vj, width, v_dir, v_pos) + for i in range(3): + ray.origin[i] = v_pos[i] + ray.direction[i] = v_dir[i] + ray.inv_dir[i] = 1.0 / v_dir[i] + ray.t_far = 1e37 + ray.t_near = 0.0 + ray.data_val = 0 + ray.elem_id = -1 + bvh.intersect(ray) + im.image[vi, vj, 0] = ray.data_val + im.image_used[vi, vj] = ray.elem_id + im.mesh_lines[vi, vj] = ray.near_boundary + im.zbuffer[vi, vj] = ray.t_far + free(v_pos) + free(v_dir) + free(ray)</p>
<p>diff -r 5d118c06fa7eecc494eff5d231ebf3c47d9d4ab9 -r 248ccc5ef8b46d83682344d249c8de1c964759ab yt/utilities/lib/pixelization_routines.pyx --- a/yt/utilities/lib/pixelization_routines.pyx +++ b/yt/utilities/lib/pixelization_routines.pyx @@ -20,6 +20,7 @@</p>
<pre>from yt.utilities.lib.fp_utils cimport fmin, fmax, i64min, i64max, imin, imax, fabs
from yt.utilities.exceptions import YTPixelizeError, \
YTElementTypeNotRecognized</pre>
<p>+from libc.stdlib cimport malloc, free</p>
<pre>from vec3_ops cimport dot, cross, subtract
from yt.utilities.lib.element_mappings cimport \
ElementSampler, \</pre>
<p>@@ -30,9 +31,6 @@</p>
<pre> Q1Sampler2D, \
W1Sampler3D
</pre>
<p>-cdef extern from “stdlib.h”:</p>
<ul><li><p># NOTE that size_t might not be int</p></li>
<li><p>void *alloca(int)</p></li></ul>
<pre>cdef extern from "pixelization_constants.h":
enum:</pre>
<p>@@ -573,8 +571,7 @@</p>
<pre>cdef int nvertices = conn.shape[1]
cdef int ndim = coords.shape[1]
cdef int num_field_vals = field.shape[1]</pre>
<ul><li><p>cdef double* mapped_coord</p></li>
<li><p>cdef int num_mapped_coords</p></li></ul>
<p>+ cdef double[4] mapped_coord</p>
<pre> cdef ElementSampler sampler
# Pick the right sampler and allocate storage for the mapped coordinate</pre>
<p>@@ -617,10 +614,8 @@</p>
<pre> raise RuntimeError
# allocate temporary storage</pre>
<ul><li><p>num_mapped_coords = sampler.num_mapped_coords</p></li>
<li><p>mapped_coord = <double*> alloca(sizeof(double) * num_mapped_coords)</p></li>
<li><p>vertices = <np.float64_t *> alloca(ndim * sizeof(np.float64_t) * nvertices)</p></li>
<li><p>field_vals = <np.float64_t *> alloca(sizeof(np.float64_t) * num_field_vals)</p></li></ul>
<p>+ vertices = <np.float64_t *> malloc(ndim * sizeof(np.float64_t) * nvertices) + field_vals = <np.float64_t *> malloc(sizeof(np.float64_t) * num_field_vals)</p>
<pre># fill the image bounds and pixel size informaton here
for i in range(ndim):</pre>
<p>@@ -691,4 +686,7 @@</p>
<pre> img[pi, pj, pk] = 1.0
elif sampler.check_near_edge(mapped_coord, thresh, yax):
img[pi, pj, pk] = 1.0</pre>
<p>+ + free(vertices) + free(field_vals)</p>
<pre>return img</pre>
<p>diff -r 5d118c06fa7eecc494eff5d231ebf3c47d9d4ab9 -r 248ccc5ef8b46d83682344d249c8de1c964759ab yt/visualization/volume_rendering/render_source.py --- a/yt/visualization/volume_rendering/render_source.py +++ b/yt/visualization/volume_rendering/render_source.py @@ -24,6 +24,7 @@</p>
<pre>from .utils import new_volume_render_sampler, data_source_or_all, \
get_corners, new_projection_sampler, new_mesh_sampler, \
new_interpolated_projection_sampler</pre>
<p>+from yt.utilities.lib.bounding_volume_hierarchy import BVH</p>
<pre>from yt.visualization.image_writer import apply_colormap
from yt.data_objects.image_array import ImageArray
from .zbuffer_array import ZBuffer</pre>
<p>@@ -353,8 +354,9 @@</p>
<pre>self.data_source = data_source_or_all(data_source)
field = self.data_source._determine_fields(field)[0]
self.field = field</pre>
<ul><li><p>self.mesh = None</p></li></ul>
<p>+ self.volume = None</p>
<pre>self.current_image = None</pre>
<p>+ self.engine = ‘embree’</p>
<pre># default color map
self._cmap = ytcfg.get("yt", "default_colormap")</pre>
<p>@@ -369,8 +371,14 @@</p>
<pre> assert(self.field is not None)
assert(self.data_source is not None)
</pre>
<ul><li><p>self.scene = mesh_traversal.YTEmbreeScene()</p></li>
<li><p>self.build_mesh()</p></li></ul>
<p>+ if self.engine == ‘embree’: + self.volume = mesh_traversal.YTEmbreeScene() + self.build_volume_embree() + elif self.engine == ‘bvh’: + self.build_volume_bvh() + else: + raise NotImplementedError("Invalid ray-tracing engine selected. " + “Choices are ‘embree’ and 'bvh'.”)</p>
<pre>def cmap():
'''</pre>
<p>@@ -410,12 +418,60 @@</p>
<pre>def _validate(self):
"""Make sure that all dependencies have been met"""
if self.data_source is None:</pre>
<ul><li><p>raise RuntimeError("Data source not initialized")</p></li></ul>
<p>+ raise RuntimeError("Data source not initialized.")</p>
<ul><li><p>if self.mesh is None:</p></li>
<li><p>raise RuntimeError("Mesh not initialized")</p></li></ul>
<p>+ if self.volume is None: + raise RuntimeError("Volume not initialized.")</p>
<ul><li><p>def build_mesh(self):</p></li></ul>
<p>+ def build_volume_embree(self): + """ + + This constructs the mesh that will be ray-traced by pyembree. + + """ + ftype, fname = self.field + mesh_id = int(ftype[-1]) – 1 + index = self.data_source.ds.index + offset = index.meshes[mesh_id]._index_offset + field_data = self.data_source[self.field].d # strip units + + vertices = index.meshes[mesh_id].connectivity_coords + indices = index.meshes[mesh_id].connectivity_indices – offset + + # if this is an element field, promote to 2D here + if len(field_data.shape) == 1: + field_data = np.expand_dims(field_data, 1) + + # Here, we decide whether to render based on high-order or + # low-order geometry. Right now, high-order geometry is only + # implemented for 20-point hexes. + if indices.shape[1] == 20: + self.mesh = mesh_construction.QuadraticElementMesh(self.volume, + vertices, + indices, + field_data) + else: + # if this is another type of higher-order element, we demote + # to 1st order here, for now. + if indices.shape[1] == 27: + # hexahedral + mylog.warning("27-node hexes not yet supported, " + + “dropping to 1st order.”) + field_data = field_data[:, 0:8] + indices = indices[:, 0:8] + elif indices.shape[1] == 10: + # tetrahedral + mylog.warning("10-node tetrahedral elements not yet supported, " + + “dropping to 1st order.”) + field_data = field_data[:, 0:4] + indices = indices[:, 0:4] + + self.mesh = mesh_construction.LinearElementMesh(self.volume, + vertices, + indices, + field_data) + + def build_volume_bvh(self):</p>
<pre> """
This constructs the mesh that will be ray-traced.</pre>
<p>@@ -436,32 +492,27 @@</p>
<pre># Here, we decide whether to render based on high-order or
# low-order geometry. Right now, high-order geometry is only</pre>
<ul><li><p># implemented for 20-point hexes.</p></li></ul>
<p>+ # supported by Embree.</p>
<pre>if indices.shape[1] == 20:</pre>
<ul><li><p>self.mesh = mesh_construction.QuadraticElementMesh(self.scene,</p></li>
<li><p>vertices,</p></li>
<li><p>indices,</p></li>
<li><p>field_data)</p></li>
<li><p>else:</p></li>
<li><p># if this is another type of higher-order element, we demote</p></li>
<li><p># to 1st order here, for now.</p></li>
<li><p>if indices.shape[1] == 27:</p></li>
<li><p># hexahedral</p></li>
<li><p>mylog.warning("27-node hexes not yet supported, " +</p></li>
<li><p>“dropping to 1st order.”)</p></li>
<li><p>field_data = field_data[:, 0:8]</p></li>
<li><p>indices = indices[:, 0:8]</p></li>
<li><p>elif indices.shape[1] == 10:</p></li>
<li><p># tetrahedral</p></li>
<li><p>mylog.warning("10-node tetrahedral elements not yet supported, " +</p></li>
<li><p>“dropping to 1st order.”)</p></li>
<li><p>field_data = field_data[:, 0:4]</p></li>
<li><p>indices = indices[:, 0:4]</p></li></ul>
<p>+ # hexahedral + mylog.warning("20-node hexes not yet supported, " + + “dropping to 1st order.”) + field_data = field_data[:, 0:8] + indices = indices[:, 0:8] + elif indices.shape[1] == 27: + # hexahedral + mylog.warning("27-node hexes not yet supported, " + + “dropping to 1st order.”) + field_data = field_data[:, 0:8] + indices = indices[:, 0:8] + elif indices.shape[1] == 10: + # tetrahedral + mylog.warning("10-node tetrahedral elements not yet supported, " + + “dropping to 1st order.”) + field_data = field_data[:, 0:4] + indices = indices[:, 0:4]</p>
<ul><li><p>self.mesh = mesh_construction.LinearElementMesh(self.scene,</p></li>
<li><p>vertices,</p></li>
<li><p>indices,</p></li>
<li><p>field_data)</p></li></ul>
<p>+ self.volume = BVH(vertices, indices, field_data)</p>
<pre>def render(self, camera, zbuffer=None):
"""Renders an image using the provided camera</pre>
<p>@@ -495,18 +546,17 @@</p>
<pre> zbuffer.z.reshape(shape[:2]))
self.zbuffer = zbuffer
</pre>
<ul><li><p>self.sampler = new_mesh_sampler(camera, self)</p></li></ul>
<p>+ self.sampler = new_mesh_sampler(camera, self, engine=self.engine)</p>
<pre>mylog.debug("Casting rays")</pre>
<ul><li><p>self.sampler(self.scene)</p></li></ul>
<p>+ self.sampler(self.volume)</p>
<pre> mylog.debug("Done casting rays")
self.finalize_image(camera)</pre>
<ul><li><p>self.data = self.sampler.aimage self.current_image = self.apply_colormap()</p>
<p>zbuffer += ZBuffer(self.current_image.astype('float64'),</p></li>
<li><p>self.sampler.zbuffer)</p></li></ul>
<p>+ self.sampler.azbuffer)</p>
<pre>zbuffer.rgba = ImageArray(zbuffer.rgba)
self.zbuffer = zbuffer
self.current_image = self.zbuffer.rgba</pre>
<p>@@ -523,16 +573,13 @@</p>
<pre># reshape data
Nx = camera.resolution[0]
Ny = camera.resolution[1]</pre>
<ul><li><p>sam.aimage = sam.aimage.reshape(Nx, Ny)</p></li>
<li><p>sam.image_used = sam.image_used.reshape(Nx, Ny)</p></li>
<li><p>sam.mesh_lines = sam.mesh_lines.reshape(Nx, Ny)</p></li>
<li><p>sam.zbuffer = sam.zbuffer.reshape(Nx, Ny)</p></li></ul>
<p>+ self.data = sam.aimage[:,:,0].reshape(Nx, Ny)</p>
<pre># rotate</pre>
<ul><li><p>sam.aimage = np.rot90(sam.aimage, k=2)</p></li>
<li><p>sam.image_used = np.rot90(sam.image_used, k=2)</p></li>
<li><p>sam.mesh_lines = np.rot90(sam.mesh_lines, k=2)</p></li>
<li><p>sam.zbuffer = np.rot90(sam.zbuffer, k=2)</p></li></ul>
<p>+ self.data = np.rot90(self.data, k=2) + sam.aimage_used = np.rot90(sam.aimage_used, k=2) + sam.amesh_lines = np.rot90(sam.amesh_lines, k=2) + sam.azbuffer = np.rot90(sam.azbuffer, k=2)</p>
<pre>def annotate_mesh_lines(self, color=None, alpha=1.0):
r"""</pre>
<p>@@ -558,7 +605,7 @@</p>
<pre> if color is None:
color = np.array([0, 0, 0, alpha])
</pre>
<ul><li><p>locs = [self.sampler.mesh_lines == 1]</p></li></ul>
<p>+ locs = [self.sampler.amesh_lines == 1]</p>
<pre>self.current_image[:, :, 0][locs] = color[0]
self.current_image[:, :, 1][locs] = color[1]</pre>
<p>@@ -592,7 +639,7 @@</p>
<pre>color_bounds=self._color_bounds,
cmap_name=self._cmap)/255.
alpha = image[:, :, 3]</pre>
<ul><li><p>alpha[self.sampler.image_used == -1] = 0.0</p></li></ul>
<p>+ alpha[self.sampler.aimage_used == -1] = 0.0</p>
<pre> image[:, :, 3] = alpha
return image
</pre>
<p>diff -r 5d118c06fa7eecc494eff5d231ebf3c47d9d4ab9 -r 248ccc5ef8b46d83682344d249c8de1c964759ab yt/visualization/volume_rendering/utils.py --- a/yt/visualization/volume_rendering/utils.py +++ b/yt/visualization/volume_rendering/utils.py @@ -14,7 +14,7 @@</p>
<pre> return data_source
</pre>
<p>-def new_mesh_sampler(camera, render_source): +def new_mesh_sampler(camera, render_source, engine):</p>
<pre>params = ensure_code_unit_params(camera._get_sampler_params(render_source))
args = (
np.atleast_3d(params['vp_pos']),</pre>
<p>@@ -27,7 +27,10 @@</p>
<pre> params['width'],
)
kwargs = {'lens_type': params['lens_type']}</pre>
<ul><li><p>sampler = mesh_traversal.MeshSampler(*args, **kwargs)</p></li></ul>
<p>+ if engine == ‘embree’: + sampler = mesh_traversal.EmbreeMeshSampler(*args, **kwargs) + elif engine == ‘bvh’: + sampler = mesh_traversal.BVHMeshSampler(*args, **kwargs)</p>
<pre>return sampler</pre>
<p>Repository URL: <a href="https://bitbucket.org/yt_analysis/yt/">https://bitbucket.org/yt_analysis/yt/</a></p>
<p>—</p>
<p>This is a commit notification from bitbucket.org. You are receiving this because you have the service enabled, addressing the recipient of this email.</p>
<img src="http://link.bitbucket.org/wf/open?upn=ll4ctv0L-2ByeRZFC1LslHcg6aJmnQ70VruLbmeLQr27CN1VoQR-2Fngck51NnrYmMvfw7LYRiF77PIW8uYwXM6Cri1WwjKSw5THUGy7Tu6Kz6A-2Fl1IiuQKmozdU2bKgN2jD-2FXjWLZD-2Fr8QyvEr021yCM6rCHRlJ-2BesKiqsRK9lLrLKNLdsR1bntmZVQr1NVLWokeEkOtZ3OB-2BOZ9g-2BlHuVftnIBy1zT7lHRogZ3l5ofXd8-3D" alt="" width="1" height="1" border="0" style="height:1px !important;width:1px !important;border-width:0 !important;margin-top:0 !important;margin-bottom:0 !important;margin-right:0 !important;margin-left:0 !important;padding-top:0 !important;padding-bottom:0 !important;padding-right:0 !important;padding-left:0 !important;"/>
</body></html>