[yt-svn] commit/yt: 148 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Thu Aug 27 09:33:40 PDT 2015


148 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/2409e3708f68/
Changeset:   2409e3708f68
Branch:      yt
User:        MatthewTurk
Date:        2015-04-24 09:53:41+00:00
Summary:     Implement load_unstructured_mesh.
Affected #:  1 file

diff -r 69eac52e4aa5f1593a739c70e23cb07e09cd4b8a -r 2409e3708f687900cd3ee962cc64e5d2e53b1f8a yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -1618,3 +1618,132 @@
     _field_info_class = StreamFieldInfo
     _dataset_type = "stream_unstructured"
 
+def load_unstructured_mesh(data, connectivity, coordinates,
+                         length_unit = None, bbox=None, sim_time=0.0,
+                         mass_unit = None, time_unit = None,
+                         velocity_unit = None, magnetic_unit = None,
+                         periodicity=(False, False, False),
+                         geometry = "cartesian"):
+    r"""Load an unstructured mesh of data into yt as a
+    :class:`~yt.frontends.stream.data_structures.StreamHandler`.
+
+    This should allow an unstructured mesh data to be loaded directly into
+    yt and analyzed as would any others.  Not all functionality for
+    visualization will be present, and some analysis functions may not yet have
+    been implemented.
+
+    Particle fields are detected as one-dimensional fields. The number of particles
+    is set by the "number_of_particles" key in data.
+    
+    Parameters
+    ----------
+    data : dict or list of dicts
+        This is a list of dicts of numpy arrays, where each element in the list
+        is a different mesh, and where the keys of dicts are the field names. 
+        Note that the data in the numpy arrays should define the cell-averaged
+        value for of the quantity in the mesh cells, although this will change
+        with subsequent generations of unstructured mesh support.  If a dict is
+        supplied, this will be assumed to be the only mesh.
+    connectivity : list of array_like or array_like
+        This is the connectivity array for the meshes; this should either be a
+        list where each element in the list is a numpy array or a single numpy
+        array.  Each array in the list can have different connectivity length
+        and should be of shape (N,M) where N is the number of elements and M is
+        the connectivity length.
+    coordinates : array_like
+        This should be of size (L,3) where L is the number of vertices
+        indicated in the connectivity matrix.
+    bbox : array_like (xdim:zdim, LE:RE), optional
+        Size of computational domain in units of the length unit.
+    sim_time : float, optional
+        The simulation time in seconds
+    mass_unit : string
+        Unit to use for masses.  Defaults to unitless.
+    time_unit : string
+        Unit to use for times.  Defaults to unitless.
+    velocity_unit : string
+        Unit to use for velocities.  Defaults to unitless.
+    magnetic_unit : string
+        Unit to use for magnetic fields. Defaults to unitless.
+    periodicity : tuple of booleans
+        Determines whether the data will be treated as periodic along
+        each axis
+    geometry : string or tuple
+        "cartesian", "cylindrical", "polar", "spherical", "geographic" or
+        "spectral_cube".  Optionally, a tuple can be provided to specify the
+        axis ordering -- for instance, to specify that the axis ordering should
+        be z, x, y, this would be: ("cartesian", ("z", "x", "y")).  The same
+        can be done for other coordinates, for instance: 
+        ("spherical", ("theta", "phi", "r")).
+
+    """
+
+    domain_dimensions = np.ones(3, "int32") * 2
+    nprocs = 1
+    if bbox is None:
+        bbox = np.array([ [coords[:,i].min(), coords[:,i].max()]
+                          for i in range(3)], "float64")
+    domain_left_edge = np.array(bbox[:, 0], 'float64')
+    domain_right_edge = np.array(bbox[:, 1], 'float64')
+    grid_levels = np.zeros(nprocs, dtype='int32').reshape((nprocs,1))
+
+    field_units, data = unitify_data(data)
+    sfh = StreamDictFieldHandler()
+    
+    particle_types = set_particle_types(data)
+    
+    sfh.update({'connectivity': connectivity,
+                'coordinates': coordinates,
+                0: data})
+    # Simple check for axis length correctness
+    if 0 and len(data) > 0:
+        fn = list(sorted(data))[0]
+        array_values = data[fn]
+        if array_values.size != connectivity.shape[0]:
+            mylog.error("Dimensions of array must be one fewer than the" +
+                        " coordinate set.")
+            raise RuntimeError
+    grid_left_edges = domain_left_edge
+    grid_right_edges = domain_right_edge
+    grid_dimensions = domain_dimensions.reshape(nprocs,3).astype("int32")
+
+    if length_unit is None:
+        length_unit = 'code_length'
+    if mass_unit is None:
+        mass_unit = 'code_mass'
+    if time_unit is None:
+        time_unit = 'code_time'
+    if velocity_unit is None:
+        velocity_unit = 'code_velocity'
+    if magnetic_unit is None:
+        magnetic_unit = 'code_magnetic'
+
+    # I'm not sure we need any of this.
+    handler = StreamHandler(
+        grid_left_edges,
+        grid_right_edges,
+        grid_dimensions,
+        grid_levels,
+        -np.ones(nprocs, dtype='int64'),
+        np.zeros(nprocs, dtype='int64').reshape(nprocs,1), # Temporary
+        np.zeros(nprocs).reshape((nprocs,1)),
+        sfh,
+        field_units,
+        (length_unit, mass_unit, time_unit, velocity_unit, magnetic_unit),
+        particle_types=particle_types,
+        periodicity=periodicity
+    )
+
+    handler.name = "UnstructuredMeshData"
+    handler.domain_left_edge = domain_left_edge
+    handler.domain_right_edge = domain_right_edge
+    handler.refine_by = 2
+    handler.dimensionality = 3
+    handler.domain_dimensions = domain_dimensions
+    handler.simulation_time = sim_time
+    handler.cosmology_simulation = 0
+
+    sds = StreamUnstructuredMeshDataset(handler, geometry = geometry)
+
+    return sds
+


https://bitbucket.org/yt_analysis/yt/commits/33a8ea060018/
Changeset:   33a8ea060018
Branch:      yt
User:        MatthewTurk
Date:        2015-04-24 10:02:49+00:00
Summary:     Merging with hex_plot PR
Affected #:  7 files

diff -r 2409e3708f687900cd3ee962cc64e5d2e53b1f8a -r 33a8ea060018f8b368d6ea7463d48b6379c49e02 yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -133,7 +133,7 @@
 from yt.frontends.stream.api import \
     load_uniform_grid, load_amr_grids, \
     load_particles, load_hexahedral_mesh, load_octree, \
-    hexahedral_connectivity
+    hexahedral_connectivity, load_unstructured_mesh
 
 # For backwards compatibility
 GadgetDataset = frontends.gadget.GadgetDataset

diff -r 2409e3708f687900cd3ee962cc64e5d2e53b1f8a -r 33a8ea060018f8b368d6ea7463d48b6379c49e02 yt/frontends/stream/api.py
--- a/yt/frontends/stream/api.py
+++ b/yt/frontends/stream/api.py
@@ -24,7 +24,8 @@
       load_hexahedral_mesh, \
       hexahedral_connectivity, \
       load_octree, \
-      refine_amr
+      refine_amr, \
+      load_unstructured_mesh
 
 from .fields import \
       StreamFieldInfo

diff -r 2409e3708f687900cd3ee962cc64e5d2e53b1f8a -r 33a8ea060018f8b368d6ea7463d48b6379c49e02 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -1681,7 +1681,8 @@
     domain_dimensions = np.ones(3, "int32") * 2
     nprocs = 1
     if bbox is None:
-        bbox = np.array([ [coords[:,i].min(), coords[:,i].max()]
+        bbox = np.array([ [coordinates[:,i].min(),
+                           coordinates[:,i].max()]
                           for i in range(3)], "float64")
     domain_left_edge = np.array(bbox[:, 0], 'float64')
     domain_right_edge = np.array(bbox[:, 1], 'float64')

diff -r 2409e3708f687900cd3ee962cc64e5d2e53b1f8a -r 33a8ea060018f8b368d6ea7463d48b6379c49e02 yt/utilities/lib/pixelization_constants.c
--- /dev/null
+++ b/yt/utilities/lib/pixelization_constants.c
@@ -0,0 +1,137 @@
+/*******************************************************************************
+# Copyright (c) 2015, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+*******************************************************************************/
+//
+// Some Cython versions don't like module-level constants, so we'll put them
+// here.
+//
+
+#include "pixelization_constants.h"
+
+/*
+ Six faces, two vectors for each, two indices for each vector.  The function
+ below unrolls how these are defined.  Some info can be found at:
+ http://www.mscsoftware.com/training_videos/patran/Reverb_help/index.html#page/Finite%20Element%20Modeling/elem_lib_topics.16.8.html
+ This is [6][2][2] in shape.
+ Here are the faces and their four edges each:
+ F1    1   2   3   4
+ F2    5   6   7   8
+ F3    1   10  5   9
+ F4    2   11  6   10
+ F5    3   12  7   11
+ F6    4   9   8   12
+
+ The edges are then defined by:
+ E1    1 2
+ E2    2 6
+ E3    6 5
+ E4    5 1
+ E5    4 3
+ E6    3 7
+ E7    7 8
+ E8    8 4
+ E9    1 4
+ E10   2 3
+ E11   6 7
+ E12   5 8
+ Now we unroll these here ...
+ */
+const npy_uint8 hex_face_defs[MAX_NUM_FACES][2][2] = {
+   /* Note that the first of each pair is the shared vertex */
+   {{1, 0}, {1, 5}},
+   {{2, 3}, {2, 6}},
+   {{1, 0}, {1, 2}},
+   {{5, 1}, {5, 6}},
+   {{4, 5}, {4, 7}},
+   {{0, 4}, {0, 3}},
+
+   {{-1, -1}, {-1 -1}},
+   {{-1, -1}, {-1 -1}},
+   {{-1, -1}, {-1 -1}},
+   {{-1, -1}, {-1 -1}},
+   {{-1, -1}, {-1 -1}},
+   {{-1, -1}, {-1 -1}},
+   {{-1, -1}, {-1 -1}},
+   {{-1, -1}, {-1 -1}},
+   {{-1, -1}, {-1 -1}},
+   {{-1, -1}, {-1 -1}}
+};
+
+/* http://www.mscsoftware.com/training_videos/patran/Reverb_help/index.html#page/Finite%2520Element%2520Modeling/elem_lib_topics.16.6.html
+ 
+  F1    1   2   3
+  F2    1   5   4
+  F3    2   6   5
+  F4    3   4   6
+ 
+  The edges are then defined by:
+  E1    1   2
+  E2    2   3
+  E3    3   1
+  E4    1   4
+  E5    2   4
+  E6    3   4
+*/
+
+const npy_uint8 tetra_face_defs[MAX_NUM_FACES][2][2] = {
+   {{1, 0}, {1, 2}},
+   {{1, 0}, {1, 3}},
+   {{2, 1}, {2, 3}},
+   {{3, 0}, {3, 2}},
+
+   {{-1, -1}, {-1 -1}},
+   {{-1, -1}, {-1 -1}},
+   {{-1, -1}, {-1 -1}},
+   {{-1, -1}, {-1 -1}},
+   {{-1, -1}, {-1 -1}},
+   {{-1, -1}, {-1 -1}},
+   {{-1, -1}, {-1 -1}},
+   {{-1, -1}, {-1 -1}},
+   {{-1, -1}, {-1 -1}},
+   {{-1, -1}, {-1 -1}},
+   {{-1, -1}, {-1 -1}},
+   {{-1, -1}, {-1 -1}}
+};
+
+/* http://www.mscsoftware.com/training_videos/patran/Reverb_help/index.html#page/Finite%2520Element%2520Modeling/elem_lib_topics.16.7.html
+  F1    1   2   3   *
+  F2    4   5   6   *
+  F3    1   8   4   7
+  F4    2   9   5   8
+  F5    3   7   6   9
+ 
+  The edges are then defined by:
+  E1    2   1
+  E2    1   3
+  E3    3   2
+  E4    5   4
+  E5    4   6
+  E6    6   5
+  E7    2   5
+  E8    1   4
+  E9    3   6
+*/
+
+const npy_uint8 wedge_face_defs[MAX_NUM_FACES][2][2] = {
+   {{0, 1}, {0, 2}},
+   {{3, 4}, {3, 5}},
+   {{0, 1}, {0, 3}},
+   {{2, 0}, {2, 5}},
+   {{1, 2}, {1, 4}},
+
+   {{-1, -1}, {-1 -1}},
+   {{-1, -1}, {-1 -1}},
+   {{-1, -1}, {-1 -1}},
+   {{-1, -1}, {-1 -1}},
+   {{-1, -1}, {-1 -1}},
+   {{-1, -1}, {-1 -1}},
+   {{-1, -1}, {-1 -1}},
+   {{-1, -1}, {-1 -1}},
+   {{-1, -1}, {-1 -1}},
+   {{-1, -1}, {-1 -1}},
+   {{-1, -1}, {-1 -1}}
+};

diff -r 2409e3708f687900cd3ee962cc64e5d2e53b1f8a -r 33a8ea060018f8b368d6ea7463d48b6379c49e02 yt/utilities/lib/pixelization_constants.h
--- /dev/null
+++ b/yt/utilities/lib/pixelization_constants.h
@@ -0,0 +1,33 @@
+/*******************************************************************************
+# Copyright (c) 2015, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+*******************************************************************************/
+//
+// Some Cython versions don't like module-level constants, so we'll put them
+// here.
+//
+
+#include "Python.h"
+
+#include <stdio.h>
+#include <math.h>
+#include <signal.h>
+#include <ctype.h>
+
+#include "numpy/ndarrayobject.h"
+
+#define MAX_NUM_FACES 16
+
+#define HEX_IND     0
+#define HEX_NF      6
+#define TETRA_IND   1
+#define TETRA_NF    4
+#define WEDGE_IND   2
+#define WEDGE_NF    5
+
+extern const npy_uint8 hex_face_defs[MAX_NUM_FACES][2][2];
+extern const npy_uint8 tetra_face_defs[MAX_NUM_FACES][2][2];
+extern const npy_uint8 wedge_face_defs[MAX_NUM_FACES][2][2];

diff -r 2409e3708f687900cd3ee962cc64e5d2e53b1f8a -r 33a8ea060018f8b368d6ea7463d48b6379c49e02 yt/utilities/lib/pixelization_routines.pyx
--- a/yt/utilities/lib/pixelization_routines.pyx
+++ b/yt/utilities/lib/pixelization_routines.pyx
@@ -22,6 +22,21 @@
     # NOTE that size_t might not be int
     void *alloca(int)
 
+cdef extern from "pixelization_constants.h":
+    enum:
+        MAX_NUM_FACES
+
+    int HEX_IND
+    int HEX_NF
+    np.uint8_t hex_face_defs[MAX_NUM_FACES][2][2]
+
+    int TETRA_IND
+    int TETRA_NF
+    np.uint8_t tetra_face_defs[MAX_NUM_FACES][2][2]
+
+    int WEDGE_IND
+    int WEDGE_NF
+    np.uint8_t wedge_face_defs[MAX_NUM_FACES][2][2]
 
 @cython.cdivision(True)
 @cython.boundscheck(False)
@@ -214,103 +229,6 @@
                 img[i, j] = field[fi]
     return img
 
-# Six faces, two vectors for each, two indices for each vector.  The function
-# below unrolls how these are defined.  Some info can be found at:
-# http://www.mscsoftware.com/training_videos/patran/Reverb_help/index.html#page/Finite%20Element%20Modeling/elem_lib_topics.16.8.html
-# This is [6][2][2] in shape.
-# Here are the faces and their four edges each:
-# F1    1   2   3   4
-# F2    5   6   7   8
-# F3    1   10  5   9
-# F4    2   11  6   10
-# F5    3   12  7   11
-# F6    4   9   8   12
-#
-# The edges are then defined by:
-# E1    1 2
-# E2    2 6
-# E3    6 5
-# E4    5 1
-# E5    4 3
-# E6    3 7
-# E7    7 8
-# E8    8 4
-# E9    1 4
-# E10   2 3
-# E11   6 7
-# E12   5 8
-# Now we unroll these here ...
-cdef np.uint8_t ***hex_face_defs = [
-   # Note that the first of each pair is the shared vertex
-   [[1, 0], [1, 5]],
-   [[2, 3], [2, 6]],
-   [[1, 0], [1, 2]],
-   [[5, 1], [5, 6]],
-   [[4, 5], [4, 7]],
-   [[0, 4], [0, 3]],
-]
-
-# http://www.mscsoftware.com/training_videos/patran/Reverb_help/index.html#page/Finite%2520Element%2520Modeling/elem_lib_topics.16.6.html
-#
-# F1    1   2   3
-# F2    1   5   4
-# F3    2   6   5
-# F4    3   4   6
-#
-# The edges are then defined by:
-# E1    1   2
-# E2    2   3
-# E3    3   1
-# E4    1   4
-# E5    2   4
-# E6    3   4
-cdef np.uint8_t ***tetra_face_defs = [
-   # Like above, first is shared vertex
-   [[1, 0], [1, 2]],
-   [[1, 0], [1, 3]],
-   [[2, 1], [2, 3]],
-   [[3, 0], [3, 2]]
-]
-
-# http://www.mscsoftware.com/training_videos/patran/Reverb_help/index.html#page/Finite%2520Element%2520Modeling/elem_lib_topics.16.7.html
-# F1    1   2   3   *
-# F2    4   5   6   *
-# F3    1   8   4   7
-# F4    2   9   5   8
-# F5    3   7   6   9
-#
-# The edges are then defined by:
-# E1    2   1
-# E2    1   3
-# E3    3   2
-# E4    5   4
-# E5    4   6
-# E6    6   5
-# E7    2   5
-# E8    1   4
-# E9    3   6
-cdef np.uint8_t ***wedge_face_defs = [
-   # As always, first is shared vertex
-   [[0, 1], [0, 2]],
-   [[3, 4], [3, 5]],
-   [[0, 1], [0, 3]],
-   [[2, 0], [2, 5]],
-   [[1, 2], [1, 4]],
-]
-
-cdef np.uint8_t ****face_defs = [
-  hex_face_defs,
-  tetra_face_defs,
-  wedge_face_defs
-]
-
-DEF HEX_IND = 0
-DEF HEX_NF = 6
-DEF TETRA_IND = 1
-DEF TETRA_NF = 4
-DEF WEDGE_IND = 2
-DEF WEDGE_NF = 5
-
 # This function accepts a set of vertices (for a polyhedron) that are
 # assumed to be in order for bottom, then top, in the same clockwise or
 # counterclockwise direction (i.e., like points 1-8 in Figure 4 of the ExodusII
@@ -329,15 +247,15 @@
     # So, let's compute these vectors.  See above where these are written out
     # for ease of use.
     cdef np.float64_t vec1[3], vec2[3], cp_vec[3], dp, npoint[3]
-    cdef np.uint8_t ***faces, nf
+    cdef np.uint8_t faces[MAX_NUM_FACES][2][2], nf
     if nvertices == 4:
-        faces = face_defs[TETRA_IND]
+        faces = tetra_face_defs
         nf = TETRA_NF
     elif nvertices == 6:
-        faces = face_defs[WEDGE_IND]
+        faces = wedge_face_defs
         nf = WEDGE_NF
     elif nvertices == 8:
-        faces = face_defs[HEX_IND]
+        faces = hex_face_defs
         nf = HEX_NF
     else:
         return -1

diff -r 2409e3708f687900cd3ee962cc64e5d2e53b1f8a -r 33a8ea060018f8b368d6ea7463d48b6379c49e02 yt/utilities/lib/setup.py
--- a/yt/utilities/lib/setup.py
+++ b/yt/utilities/lib/setup.py
@@ -100,8 +100,11 @@
                 ["yt/utilities/lib/misc_utilities.pyx"],
                 libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd"])
     config.add_extension("pixelization_routines", 
-                ["yt/utilities/lib/pixelization_routines.pyx"],
-                libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd"])
+                ["yt/utilities/lib/pixelization_routines.pyx",
+                 "yt/utilities/lib/pixelization_constants.c"],
+               include_dirs=["yt/utilities/lib/"],
+                libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd",
+                                  "yt/utilities/lib/pixelization_constants.h"])
     config.add_extension("Octree", 
                 ["yt/utilities/lib/Octree.pyx"],
                 libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd"])


https://bitbucket.org/yt_analysis/yt/commits/8fe1af70a640/
Changeset:   8fe1af70a640
Branch:      yt
User:        MatthewTurk
Date:        2015-04-29 19:21:43+00:00
Summary:     Can now load unstructured mesh.
Affected #:  1 file

diff -r 33a8ea060018f8b368d6ea7463d48b6379c49e02 -r 8fe1af70a64043cb9a978161c8035d0df1135617 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -1587,8 +1587,8 @@
     _index_offset = 0
 
     def __init__(self, *args, **kwargs):
+        self._connectivity_length = self.connectivity_indices.shape[1]
         super(StreamUnstructuredMesh, self).__init__(*args, **kwargs)
-        self._connectivity_length = self.connectivity_indices.shape[1]
 
 
 class StreamUnstructuredIndex(UnstructuredIndex):
@@ -1602,7 +1602,7 @@
         connec = ensure_list(self.stream_handler.fields.pop("connectivity"))
         self.meshes = [StreamUnstructuredMesh(
           i, self.index_filename, c1, c2, self)
-          for i, (c1, c2) in enumerate(zip(coords, connec))]
+          for i, (c1, c2) in enumerate(zip(connec, coords))]
 
     def _setup_data_io(self):
         if self.stream_handler.io is not None:
@@ -1614,7 +1614,7 @@
         self.field_list = list(set(self.stream_handler.get_fields()))
 
 class StreamUnstructuredMeshDataset(StreamDataset):
-    _index_class = StreamUnstructuredMesh
+    _index_class = StreamUnstructuredIndex
     _field_info_class = StreamFieldInfo
     _dataset_type = "stream_unstructured"
 
@@ -1680,6 +1680,8 @@
 
     domain_dimensions = np.ones(3, "int32") * 2
     nprocs = 1
+    data = ensure_list(data)
+    connectivity = ensure_list(connectivity)
     if bbox is None:
         bbox = np.array([ [coordinates[:,i].min(),
                            coordinates[:,i].max()]
@@ -1688,11 +1690,17 @@
     domain_right_edge = np.array(bbox[:, 1], 'float64')
     grid_levels = np.zeros(nprocs, dtype='int32').reshape((nprocs,1))
 
-    field_units, data = unitify_data(data)
+    field_units = {}
+    new_data = []
+    particle_types = {}
+    for d in data:
+        _f_unit, _data = unitify_data(d)
+        field_units.update(_f_unit)
+        new_data.append(_data)
+        particle_types.update(set_particle_types(d))
+    data = new_data
     sfh = StreamDictFieldHandler()
     
-    particle_types = set_particle_types(data)
-    
     sfh.update({'connectivity': connectivity,
                 'coordinates': coordinates,
                 0: data})


https://bitbucket.org/yt_analysis/yt/commits/98b63de4dc33/
Changeset:   98b63de4dc33
Branch:      yt
User:        MatthewTurk
Date:        2015-04-30 09:54:27+00:00
Summary:     Fix some lingering issues with unstructured, refactor fcoords.
Affected #:  3 files

diff -r 8fe1af70a64043cb9a978161c8035d0df1135617 -r 98b63de4dc33bcfd4a33301df86aceb479535d43 yt/data_objects/unstructured_mesh.py
--- a/yt/data_objects/unstructured_mesh.py
+++ b/yt/data_objects/unstructured_mesh.py
@@ -46,7 +46,10 @@
         # This is where we set up the connectivity information
         self.connectivity_indices = connectivity_indices
         if connectivity_indices.shape[1] != self._connectivity_length:
-            raise RuntimeError
+            if self._connectivity_length == -1:
+                self._connectivity_length = connectivity_indices.shape[1]
+            else:
+                raise RuntimeError
         self.connectivity_coords = connectivity_coords
         self.ds = index.dataset
         self._index = index
@@ -90,8 +93,14 @@
     def _generate_container_field(self, field):
         raise NotImplementedError
 
-    def select_fcoords(self, dobj):
-        raise NotImplementedError
+    def select_fcoords(self, dobj = None):
+        # This computes centroids!
+        mask = self._get_selector_mask(dobj.selector)
+        if mask is None: return np.empty((0,3), dtype='float64')
+        centers = fill_fcoords(self.connectivity_coords,
+                               self.connectivity_indices,
+                               self._index_offset)
+        return centers[mask, :]
 
     def select_fwidth(self, dobj):
         raise NotImplementedError
@@ -161,14 +170,6 @@
         elif field == "dz":
             return self._current_chunk.fwidth[:,2]
 
-    def select_fcoords(self, dobj = None):
-        mask = self._get_selector_mask(dobj.selector)
-        if mask is None: return np.empty((0,3), dtype='float64')
-        centers = fill_fcoords(self.connectivity_coords,
-                               self.connectivity_indices,
-                               self._index_offset)
-        return centers[mask, :]
-
     def select_fwidth(self, dobj):
         mask = self._get_selector_mask(dobj.selector)
         if mask is None: return np.empty((0,3), dtype='float64')

diff -r 8fe1af70a64043cb9a978161c8035d0df1135617 -r 98b63de4dc33bcfd4a33301df86aceb479535d43 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -1587,8 +1587,8 @@
     _index_offset = 0
 
     def __init__(self, *args, **kwargs):
+        super(StreamUnstructuredMesh, self).__init__(*args, **kwargs)
         self._connectivity_length = self.connectivity_indices.shape[1]
-        super(StreamUnstructuredMesh, self).__init__(*args, **kwargs)
 
 
 class StreamUnstructuredIndex(UnstructuredIndex):
@@ -1691,19 +1691,16 @@
     grid_levels = np.zeros(nprocs, dtype='int32').reshape((nprocs,1))
 
     field_units = {}
-    new_data = []
     particle_types = {}
-    for d in data:
-        _f_unit, _data = unitify_data(d)
-        field_units.update(_f_unit)
-        new_data.append(_data)
-        particle_types.update(set_particle_types(d))
-    data = new_data
     sfh = StreamDictFieldHandler()
     
     sfh.update({'connectivity': connectivity,
-                'coordinates': coordinates,
-                0: data})
+                'coordinates': coordinates})
+    for i, d in enumerate(data):
+        _f_unit, _data = unitify_data(d)
+        field_units.update(_f_unit)
+        sfh[i] = _data
+        particle_types.update(set_particle_types(d))
     # Simple check for axis length correctness
     if 0 and len(data) > 0:
         fn = list(sorted(data))[0]

diff -r 8fe1af70a64043cb9a978161c8035d0df1135617 -r 98b63de4dc33bcfd4a33301df86aceb479535d43 yt/frontends/stream/io.py
--- a/yt/frontends/stream/io.py
+++ b/yt/frontends/stream/io.py
@@ -255,3 +255,10 @@
                         subset.domain_id - subset._domain_offset][field]
                 subset.fill(field_vals, rv, selector, ind)
         return rv
+
+class IOHandlerStreamUnstructured(BaseIOHandler):
+    _dataset_type = "stream_unstructured"
+
+    def __init__(self, ds):
+        self.fields = ds.stream_handler.fields
+        super(IOHandlerStreamUnstructured, self).__init__(ds)


https://bitbucket.org/yt_analysis/yt/commits/b7bb33f4d7a3/
Changeset:   b7bb33f4d7a3
Branch:      yt
User:        MatthewTurk
Date:        2015-04-30 10:00:42+00:00
Summary:     Use bounding box to compute mesh cell mask.
Affected #:  1 file

diff -r 98b63de4dc33bcfd4a33301df86aceb479535d43 -r b7bb33f4d7a3ed0fae6b75acb4bc97288df578b1 yt/data_objects/unstructured_mesh.py
--- a/yt/data_objects/unstructured_mesh.py
+++ b/yt/data_objects/unstructured_mesh.py
@@ -152,6 +152,18 @@
         mask = selector.select_points(x,y,z, 0.0)
         return mask
 
+    def _get_selector_mask(self, selector):
+        if hash(selector) == self._last_selector_id:
+            mask = self._last_mask
+        else:
+            self._last_mask = mask = selector.fill_mesh_cell_mask(self)
+            self._last_selector_id = hash(selector)
+            if mask is None:
+                self._last_count = 0
+            else:
+                self._last_count = mask.sum()
+        return mask
+
 class SemiStructuredMesh(UnstructuredMesh):
     _connectivity_length = 8
     _type_name = 'semi_structured_mesh'
@@ -190,15 +202,3 @@
         dt, t = dobj.selector.get_dt_mesh(self, mask.sum(), self._index_offset)
         return dt, t
 
-    def _get_selector_mask(self, selector):
-        if hash(selector) == self._last_selector_id:
-            mask = self._last_mask
-        else:
-            self._last_mask = mask = selector.fill_mesh_cell_mask(self)
-            self._last_selector_id = hash(selector)
-            if mask is None:
-                self._last_count = 0
-            else:
-                self._last_count = mask.sum()
-        return mask
-


https://bitbucket.org/yt_analysis/yt/commits/a979d844d6ab/
Changeset:   a979d844d6ab
Branch:      yt
User:        MatthewTurk
Date:        2015-04-30 10:44:14+00:00
Summary:     Remove restriction on nv == 8 for mesh filling by bbox.
Affected #:  1 file

diff -r b7bb33f4d7a3ed0fae6b75acb4bc97288df578b1 -r a979d844d6ab319cd46fc7a1483688f60d8aba6d yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -387,8 +387,6 @@
         cdef int npoints, nv = mesh._connectivity_length
         cdef int total = 0
         cdef int offset = mesh._index_offset
-        if nv != 8:
-            raise RuntimeError
         coords = _ensure_code(mesh.connectivity_coords)
         indices = mesh.connectivity_indices
         npoints = indices.shape[0]


https://bitbucket.org/yt_analysis/yt/commits/5d790a7b7d58/
Changeset:   5d790a7b7d58
Branch:      yt
User:        MatthewTurk
Date:        2015-04-30 10:44:30+00:00
Summary:     Enable fluid reading for unstructured mesh.
Affected #:  2 files

diff -r a979d844d6ab319cd46fc7a1483688f60d8aba6d -r 5d790a7b7d58110c27ecc6064136c13e9a3e8dd3 yt/data_objects/unstructured_mesh.py
--- a/yt/data_objects/unstructured_mesh.py
+++ b/yt/data_objects/unstructured_mesh.py
@@ -135,7 +135,7 @@
         mask = self._get_selector_mask(selector)
         count = self.count(selector)
         if count == 0: return 0
-        dest[offset:offset+count] = source.flat[mask]
+        dest[offset:offset+count] = source[mask,...]
         return count
 
     def count(self, selector):

diff -r a979d844d6ab319cd46fc7a1483688f60d8aba6d -r 5d790a7b7d58110c27ecc6064136c13e9a3e8dd3 yt/frontends/stream/io.py
--- a/yt/frontends/stream/io.py
+++ b/yt/frontends/stream/io.py
@@ -262,3 +262,25 @@
     def __init__(self, ds):
         self.fields = ds.stream_handler.fields
         super(IOHandlerStreamUnstructured, self).__init__(ds)
+
+    def _read_fluid_selection(self, chunks, selector, fields, size):
+        chunks = list(chunks)
+        chunk = chunks[0]
+        rv = {}
+        for field in fields:
+            ftype, fname = field
+            rv[field] = np.empty(size, dtype="float64")
+        ngrids = sum(len(chunk.objs) for chunk in chunks)
+        mylog.debug("Reading %s cells of %s fields in %s blocks",
+                    size, [fname for ftype, fname in fields], ngrids)
+        for field in fields:
+            ind = 0
+            ftype, fname = field
+            for chunk in chunks:
+                for g in chunk.objs:
+                    ds = self.fields[g.mesh_id].get(field, None)
+                    if ds is None:
+                        ds = self.fields[g.mesh_id][fname]
+                    ind += g.select(selector, ds, rv[field], ind) # caches
+        return rv
+


https://bitbucket.org/yt_analysis/yt/commits/b4ad46522ac0/
Changeset:   b4ad46522ac0
Branch:      yt
User:        MatthewTurk
Date:        2015-05-04 10:23:13+00:00
Summary:     Add vertex selection; hardcode to 8 vertices.
Affected #:  7 files

diff -r 5d790a7b7d58110c27ecc6064136c13e9a3e8dd3 -r b4ad46522ac096f1ba3eafbd394a67bf8c46e78b yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -826,6 +826,13 @@
             self.index._identify_base_chunk(self)
         return self._current_chunk.fwidth
 
+    @property
+    def fcoords_vertex(self):
+        if self._current_chunk is None:
+            self.index._identify_base_chunk(self)
+        return self._current_chunk.fcoords_vertex
+
+
 class YTSelectionContainer0D(YTSelectionContainer):
     _spatial = False
     _dimensionality = 0

diff -r 5d790a7b7d58110c27ecc6064136c13e9a3e8dd3 -r b4ad46522ac096f1ba3eafbd394a67bf8c46e78b yt/data_objects/unstructured_mesh.py
--- a/yt/data_objects/unstructured_mesh.py
+++ b/yt/data_objects/unstructured_mesh.py
@@ -164,6 +164,13 @@
                 self._last_count = mask.sum()
         return mask
 
+    def select_fcoords_vertex(self, dobj = None):
+        mask = self._get_selector_mask(dobj.selector)
+        if mask is None: return np.empty((0,self._connectivity_length,3), dtype='float64')
+        vertices = self.connectivity_coords[
+                self.connectivity_indices - 1]
+        return vertices[mask, :, :]
+
 class SemiStructuredMesh(UnstructuredMesh):
     _connectivity_length = 8
     _type_name = 'semi_structured_mesh'

diff -r 5d790a7b7d58110c27ecc6064136c13e9a3e8dd3 -r b4ad46522ac096f1ba3eafbd394a67bf8c46e78b yt/fields/field_detector.py
--- a/yt/fields/field_detector.py
+++ b/yt/fields/field_detector.py
@@ -235,6 +235,13 @@
         return self.ds.arr(fc, input_units = "code_length")
 
     @property
+    def fcoords_vertex(self):
+        fc = np.random.random((self.nd, self.nd, self.nd, 8, 3))
+        if self.flat:
+            fc.shape = (self.nd*self.nd*self.nd, 8, 3)
+        return self.ds.arr(fc, input_units = "code_length")
+
+    @property
     def icoords(self):
         ic = np.mgrid[0:self.nd-1:self.nd*1j,
                       0:self.nd-1:self.nd*1j,

diff -r 5d790a7b7d58110c27ecc6064136c13e9a3e8dd3 -r b4ad46522ac096f1ba3eafbd394a67bf8c46e78b yt/frontends/stream/io.py
--- a/yt/frontends/stream/io.py
+++ b/yt/frontends/stream/io.py
@@ -258,6 +258,7 @@
 
 class IOHandlerStreamUnstructured(BaseIOHandler):
     _dataset_type = "stream_unstructured"
+    _node_types = ("diffused", "convected")
 
     def __init__(self, ds):
         self.fields = ds.stream_handler.fields
@@ -269,7 +270,10 @@
         rv = {}
         for field in fields:
             ftype, fname = field
-            rv[field] = np.empty(size, dtype="float64")
+            if fname in self._node_types:
+                rv[field] = np.empty((size, 8), dtype="float64")
+            else:
+                rv[field] = np.empty(size, dtype="float64")
         ngrids = sum(len(chunk.objs) for chunk in chunks)
         mylog.debug("Reading %s cells of %s fields in %s blocks",
                     size, [fname for ftype, fname in fields], ngrids)

diff -r 5d790a7b7d58110c27ecc6064136c13e9a3e8dd3 -r b4ad46522ac096f1ba3eafbd394a67bf8c46e78b yt/geometry/coordinates/cartesian_coordinates.py
--- a/yt/geometry/coordinates/cartesian_coordinates.py
+++ b/yt/geometry/coordinates/cartesian_coordinates.py
@@ -18,8 +18,10 @@
 from .coordinate_handler import \
     CoordinateHandler, \
     _unknown_coord, \
-    _get_coord_fields
+    _get_coord_fields, \
+    _get_vert_fields
 import yt.visualization._MPL as _MPL
+from yt.fields.derived_field import NullFunc
 
 class CartesianCoordinateHandler(CoordinateHandler):
 
@@ -38,6 +40,10 @@
             registry.add_field(("index", "%s" % ax), function = f2,
                                display_field = False,
                                units = "code_length")
+            f3 = _get_vert_fields(axi)
+            registry.add_field(("index", "vertex_%s" % ax), function = f3,
+                               display_field = False,
+                               units = "code_length")
         def _cell_volume(field, data):
             rv  = data["index", "dx"].copy(order='K')
             rv *= data["index", "dy"]

diff -r 5d790a7b7d58110c27ecc6064136c13e9a3e8dd3 -r b4ad46522ac096f1ba3eafbd394a67bf8c46e78b yt/geometry/coordinates/coordinate_handler.py
--- a/yt/geometry/coordinates/coordinate_handler.py
+++ b/yt/geometry/coordinates/coordinate_handler.py
@@ -45,6 +45,12 @@
         return data._reshape_vals(rv)
     return _dds, _coords
 
+def _get_vert_fields(axi, units = "code_length"):
+    def _vert(field, data):
+        rv = data.ds.arr(data.fcoords_vertex[...,axi].copy(), units)
+        return rv
+    return _vert
+
 def validate_iterable_width(width, ds, unit=None):
     if isinstance(width[0], tuple) and isinstance(width[1], tuple):
         validate_width_tuple(width[0])

diff -r 5d790a7b7d58110c27ecc6064136c13e9a3e8dd3 -r b4ad46522ac096f1ba3eafbd394a67bf8c46e78b yt/geometry/geometry_handler.py
--- a/yt/geometry/geometry_handler.py
+++ b/yt/geometry/geometry_handler.py
@@ -402,6 +402,20 @@
             ind += gt.size
         return cdt
 
+    @cached_property
+    def fcoords_vertex(self):
+        ci = np.empty((self.data_size, 8, 3), dtype='float64')
+        ci = YTArray(ci, input_units = "code_length",
+                     registry = self.dobj.ds.unit_registry)
+        if self.data_size == 0: return ci
+        ind = 0
+        for obj in self.objs:
+            c = obj.select_fcoords_vertex(self.dobj)
+            if c.shape[0] == 0: continue
+            ci[ind:ind+c.shape[0], :, :] = c
+            ind += c.shape[0]
+        return ci
+
 class ChunkDataCache(object):
     def __init__(self, base_iter, preload_fields, geometry_handler,
                  max_length = 256):


https://bitbucket.org/yt_analysis/yt/commits/96208a52266c/
Changeset:   96208a52266c
Branch:      yt
User:        MatthewTurk
Date:        2015-05-16 01:45:43+00:00
Summary:     Adding conditional inclusion of pyembree in new mesh_traversal module.
Affected #:  3 files

diff -r b4ad46522ac096f1ba3eafbd394a67bf8c46e78b -r 96208a52266c8a7e08ae2219d22525cea009be8a yt/utilities/lib/mesh_traversal.pxd
--- /dev/null
+++ b/yt/utilities/lib/mesh_traversal.pxd
@@ -0,0 +1,3 @@
+cimport rtcore
+cimport rtcore_scene
+cimport rtcore_ray

diff -r b4ad46522ac096f1ba3eafbd394a67bf8c46e78b -r 96208a52266c8a7e08ae2219d22525cea009be8a yt/utilities/lib/mesh_traversal.pyx
--- /dev/null
+++ b/yt/utilities/lib/mesh_traversal.pyx
@@ -0,0 +1,2 @@
+def hello_world():
+    print "Hello!"

diff -r b4ad46522ac096f1ba3eafbd394a67bf8c46e78b -r 96208a52266c8a7e08ae2219d22525cea009be8a yt/utilities/lib/setup.py
--- a/yt/utilities/lib/setup.py
+++ b/yt/utilities/lib/setup.py
@@ -1,5 +1,6 @@
 #!/usr/bin/env python
 from __future__ import print_function
+import pkg_resources
 import setuptools
 import os, sys, os.path, glob, \
     tempfile, subprocess, shutil
@@ -43,6 +44,13 @@
 
     return exit_code == 0
 
+def check_for_pyembree():
+    try:
+        fn = pkg_resources.resource_filename("pyembree", "rtcore.pxd")
+    except ImportError:
+        return None
+    return os.path.dirname(fn)
+
 def configuration(parent_package='',top_path=None):
     from numpy.distutils.misc_util import Configuration
     config = Configuration('lib',parent_package,top_path)
@@ -155,6 +163,13 @@
     config.add_extension("amr_kdtools", 
                          ["yt/utilities/lib/amr_kdtools.pyx"],
                          libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd"])
+    include_dirs = check_for_pyembree()
+    if include_dirs is not None:
+        config.add_extension("mesh_traversal",
+                             ["yt/utilities/lib/mesh_traversal.pyx"],
+                             include_dirs=["yt/utilities/lib", include_dirs],
+                             libraries=["m"], language="c++",
+                             depends=["yt/utilities/lib/mesh_traversal.pxd"])
     config.add_subpackage("tests")
 
     if os.environ.get("GPERFTOOLS", "no").upper() != "NO":


https://bitbucket.org/yt_analysis/yt/commits/dd3ef0c68dc2/
Changeset:   dd3ef0c68dc2
Branch:      yt
User:        atmyers
Date:        2015-06-02 18:30:21+00:00
Summary:     merging in the scene refactor stuff
Affected #:  148 files

diff -r 96208a52266c8a7e08ae2219d22525cea009be8a -r dd3ef0c68dc2ffb63480484f94148690faf94669 doc/helper_scripts/update_recipes.py
--- a/doc/helper_scripts/update_recipes.py
+++ b/doc/helper_scripts/update_recipes.py
@@ -8,7 +8,7 @@
 
 .. note::
    All of these scripts are located in the mercurial repository at
-   http://hg.yt-project.org/cookbook/
+   http://bitbucket.org/yt_analysis/cookbook/
 
 """
 footer = """ """
@@ -23,7 +23,7 @@
     recipes = cStringIO.StringIO()
 recipes.write(header)
 
-url = "here: http://hg.yt-project.org/cookbook/raw/tip/%s ."
+url = "here: http://bitbucket.org/yt_analysis/cookbook/raw/tip/%s ."
 
 def cond_output(f, v):
     if not v:
@@ -31,7 +31,7 @@
     return True
 
 repo = hg.repository(uii, "../cookbook/")
-commands.pull(uii, repo, "http://hg.yt-project.org/cookbook/")
+commands.pull(uii, repo, "http://bitbucket.org/yt_analysis/cookbook/")
 ctx = repo["tip"]
 for file in ctx:
     if not file.startswith("recipes/"): continue

diff -r 96208a52266c8a7e08ae2219d22525cea009be8a -r dd3ef0c68dc2ffb63480484f94148690faf94669 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -503,7 +503,9 @@
     BUILD_ARGS=""
     case $LIB in
         *h5py*)
-            BUILD_ARGS="--hdf5=${HDF5_DIR}"
+            pushd $LIB &> /dev/null
+            ( ${DEST_DIR}/bin/python2.7 setup.py configure --hdf5=${HDF5_DIR} 2>&1 ) 1>> ${LOG_FILE} || do_exit
+            popd &> /dev/null
             ;;
         *numpy*)
             if [ -e ${DEST_DIR}/lib/python2.7/site-packages/numpy/__init__.py ]
@@ -595,54 +597,54 @@
 mkdir -p ${DEST_DIR}/src
 cd ${DEST_DIR}/src
 
-CYTHON='Cython-0.20.2'
+CYTHON='Cython-0.22'
 PYX='PyX-0.12.1'
-PYTHON='Python-2.7.8'
+PYTHON='Python-2.7.9'
 BZLIB='bzip2-1.0.6'
-FREETYPE_VER='freetype-2.4.12'
-H5PY='h5py-2.3.1'
-HDF5='hdf5-1.8.14'
-IPYTHON='ipython-2.2.0'
+FREETYPE_VER='freetype-2.4.12' 
+H5PY='h5py-2.5.0'
+HDF5='hdf5-1.8.14' 
+IPYTHON='ipython-2.4.1'
 LAPACK='lapack-3.4.2'
 PNG=libpng-1.6.3
-MATPLOTLIB='matplotlib-1.4.0'
-MERCURIAL='mercurial-3.1'
-NOSE='nose-1.3.4'
-NUMPY='numpy-1.8.2'
-PYTHON_HGLIB='python-hglib-1.0'
-PYZMQ='pyzmq-14.3.1'
+MATPLOTLIB='matplotlib-1.4.3'
+MERCURIAL='mercurial-3.4'
+NOSE='nose-1.3.6'
+NUMPY='numpy-1.9.2'
+PYTHON_HGLIB='python-hglib-1.6'
+PYZMQ='pyzmq-14.5.0'
 ROCKSTAR='rockstar-0.99.6'
-SCIPY='scipy-0.14.0'
+SCIPY='scipy-0.15.1'
 SQLITE='sqlite-autoconf-3071700'
-SYMPY='sympy-0.7.5'
-TORNADO='tornado-4.0.1'
-ZEROMQ='zeromq-4.0.4'
+SYMPY='sympy-0.7.6'
+TORNADO='tornado-4.0.2'
+ZEROMQ='zeromq-4.0.5'
 ZLIB='zlib-1.2.8'
 
 # Now we dump all our SHA512 files out.
-echo '118e3ebd76f50bda8187b76654e65caab2c2c403df9b89da525c2c963dedc7b38d898ae0b92d44b278731d969a891eb3f7b5bcc138cfe3e037f175d4c87c29ec  Cython-0.20.2.tar.gz' > Cython-0.20.2.tar.gz.sha512
+echo '856220fa579e272ac38dcef091760f527431ff3b98df9af6e68416fcf77d9659ac5abe5c7dee41331f359614637a4ff452033085335ee499830ed126ab584267  Cython-0.22.tar.gz' > Cython-0.22.tar.gz.sha512
 echo '4941f5aa21aff3743546495fb073c10d2657ff42b2aff401903498638093d0e31e344cce778980f28a7170c6d29eab72ac074277b9d4088376e8692dc71e55c1  PyX-0.12.1.tar.gz' > PyX-0.12.1.tar.gz.sha512
-echo '4b05f0a490ddee37e8fc7970403bb8b72c38e5d173703db40310e78140d9d5c5732789d69c68dbd5605a623e4582f5b9671f82b8239ecdb34ad4261019dace6a  Python-2.7.8.tgz' > Python-2.7.8.tgz.sha512
+echo 'a42f28ed8e49f04cf89e2ea7434c5ecbc264e7188dcb79ab97f745adf664dd9ab57f9a913543731635f90859536244ac37dca9adf0fc2aa1b215ba884839d160  Python-2.7.9.tgz' > Python-2.7.9.tgz.sha512
 echo '276bd9c061ec9a27d478b33078a86f93164ee2da72210e12e2c9da71dcffeb64767e4460b93f257302b09328eda8655e93c4b9ae85e74472869afbeae35ca71e  blas.tar.gz' > blas.tar.gz.sha512
 echo '00ace5438cfa0c577e5f578d8a808613187eff5217c35164ffe044fbafdfec9e98f4192c02a7d67e01e5a5ccced630583ad1003c37697219b0f147343a3fdd12  bzip2-1.0.6.tar.gz' > bzip2-1.0.6.tar.gz.sha512
 echo 'a296dfcaef7e853e58eed4e24b37c4fa29cfc6ac688def048480f4bb384b9e37ca447faf96eec7b378fd764ba291713f03ac464581d62275e28eb2ec99110ab6  reason-js-20120623.zip' > reason-js-20120623.zip.sha512
 echo '609a68a3675087e0cc95268574f31e104549daa48efe15a25a33b8e269a93b4bd160f4c3e8178dca9c950ef5ca514b039d6fd1b45db6af57f25342464d0429ce  freetype-2.4.12.tar.gz' > freetype-2.4.12.tar.gz.sha512
-echo 'f0da1d2ac855c02fb828444d719a1b23a580adb049335f3e732ace67558a125ac8cd3b3a68ac6bf9d10aa3ab19e4672b814eb28cc8c66910750c62efb655d744  h5py-2.3.1.tar.gz' > h5py-2.3.1.tar.gz.sha512
+echo '4a83f9ae1855a7fad90133b327d426201c8ccfd2e7fbe9f39b2d61a2eee2f3ebe2ea02cf80f3d4e1ad659f8e790c173df8cc99b87d0b7ce63d34aa88cfdc7939  h5py-2.5.0.tar.gz' > h5py-2.5.0.tar.gz.sha512
 echo '4073fba510ccadaba41db0939f909613c9cb52ba8fb6c1062fc9118edc601394c75e102310be1af4077d07c9b327e6bbb1a6359939a7268dc140382d0c1e0199  hdf5-1.8.14.tar.gz' > hdf5-1.8.14.tar.gz.sha512
-echo '4953bf5e9d6d5c6ad538d07d62b5b100fd86a37f6b861238501581c0059bd4655345ca05cf395e79709c38ce4cb9c6293f5d11ac0252a618ad8272b161140d13  ipython-2.2.0.tar.gz' > ipython-2.2.0.tar.gz.sha512
+echo 'a9cffc08ba10c47b0371b05664e55eee0562a30ef0d4bbafae79e52e5b9727906c45840c0918122c06c5672ac65e6eb381399f103e1a836aca003eda81b2acde  ipython-2.4.1.tar.gz' > ipython-2.4.1.tar.gz.sha512
 echo '8770214491e31f0a7a3efaade90eee7b0eb20a8a6ab635c5f854d78263f59a1849133c14ef5123d01023f0110cbb9fc6f818da053c01277914ae81473430a952  lapack-3.4.2.tar.gz' > lapack-3.4.2.tar.gz.sha512
 echo '887582e5a22e4cde338aa8fec7a89f6dd31f2f02b8842735f00f970f64582333fa03401cea6d01704083403c7e8b7ebc26655468ce930165673b33efa4bcd586  libpng-1.6.3.tar.gz' > libpng-1.6.3.tar.gz.sha512
-echo '60aa386639dec17b4f579955df60f2aa7c8ccd589b3490bb9afeb2929ea418d5d1a36a0b02b8d4a6734293076e9069429956c56cf8bd099b756136f2657cf9d4  matplotlib-1.4.0.tar.gz' > matplotlib-1.4.0.tar.gz.sha512
-echo '1ee2fe7a241bf81087e55d9e4ee8fa986f41bb0655d4828d244322c18f3958a1f3111506e2df15aefcf86100b4fe530fcab2d4c041b5945599ed3b3a889d50f5  mercurial-3.1.tar.gz' > mercurial-3.1.tar.gz.sha512
-echo '19499ab08018229ea5195cdac739d6c7c247c5aa5b2c91b801cbd99bad12584ed84c5cfaaa6fa8b4893a46324571a2f8a1988a1381f4ddd58390e597bd7bdc24  nose-1.3.4.tar.gz' > nose-1.3.4.tar.gz.sha512
-echo '996e6b8e2d42f223e44660f56bf73eb8ab124f400d89218f8f5e4d7c9860ada44a4d7c54526137b0695c7a10f36e8834fbf0d42b7cb20bcdb5d5c245d673385c  numpy-1.8.2.tar.gz' > numpy-1.8.2.tar.gz.sha512
-echo '9c0a61299779aff613131aaabbc255c8648f0fa7ab1806af53f19fbdcece0c8a68ddca7880d25b926d67ff1b9201954b207919fb09f6a290acb078e8bbed7b68  python-hglib-1.0.tar.gz' > python-hglib-1.0.tar.gz.sha512
-echo '3d93a8fbd94fc3f1f90df68257cda548ba1adf3d7a819e7a17edc8681894003ac7ae6abd319473054340c11443a6a3817b931366fd7dae78e3807d549c544f8b  pyzmq-14.3.1.tar.gz' > pyzmq-14.3.1.tar.gz.sha512
-echo 'ad1278740c1dc44c5e1b15335d61c4552b66c0439325ed6eeebc5872a1c0ba3fce1dd8509116b318d01e2d41da2ee49ec168da330a7fafd22511138b29f7235d  scipy-0.14.0.tar.gz' > scipy-0.14.0.tar.gz.sha512
+echo '51b0f58b2618b47b653e17e4f6b6a1215d3a3b0f1331ce3555cc7435e365d9c75693f289ce12fe3bf8f69fd57b663e545f0f1c2c94e81eaa661cac0689e125f5  matplotlib-1.4.3.tar.gz' > matplotlib-1.4.3.tar.gz.sha512
+echo 'a61b0d4cf528136991243bb23ac972c11c50ab5681d09f8b2d12cf7d37d3a9d76262f7fe6e7a1834bf6d03e8dc0ebbd9231da982e049e09830341dabefe5d064  mercurial-3.4.tar.gz' > mercurial-3.4.tar.gz.sha512
+echo 'd0cede08dc33a8ac0af0f18063e57f31b615f06e911edb5ca264575174d8f4adb4338448968c403811d9dcc60f38ade3164662d6c7b69b499f56f0984bb6283c  nose-1.3.6.tar.gz' > nose-1.3.6.tar.gz.sha512
+echo '70470ebb9afef5dfd0c83ceb7a9d5f1b7a072b1a9b54b04f04f5ed50fbaedd5b4906bd500472268d478f94df9e749a88698b1ff30f2d80258e7f3fec040617d9  numpy-1.9.2.tar.gz' > numpy-1.9.2.tar.gz.sha512
+echo 'bfd10455e74e30df568c4c4827140fb6cc29893b0e062ce1764bd52852ec7487a70a0f5ea53c3fca7886f5d36365c9f4db52b8c93cad35fb67beeb44a2d56f2d  python-hglib-1.6.tar.gz' > python-hglib-1.6.tar.gz.sha512
+echo '20164f7b05c308e0f089c07fc46b1c522094f3ac136f2e0bba84f19cb63dfd36152a2465df723dd4d93c6fbd2de4f0d94c160e2bbc353a92cfd680eb03cbdc87  pyzmq-14.5.0.tar.gz' > pyzmq-14.5.0.tar.gz.sha512
+echo 'fff4412d850c431a1b4e6ee3b17958ee5ab3beb81e6cb8a8e7d56d368751eaa8781d7c3e69d932dc002d718fddc66a72098acfe74cfe29ec80b24e6736317275  scipy-0.15.1.tar.gz' > scipy-0.15.1.tar.gz.sha512
 echo '96f3e51b46741450bc6b63779c10ebb4a7066860fe544385d64d1eda52592e376a589ef282ace2e1df73df61c10eab1a0d793abbdaf770e60289494d4bf3bcb4  sqlite-autoconf-3071700.tar.gz' > sqlite-autoconf-3071700.tar.gz.sha512
-echo '8a46e75abc3ed2388b5da9cb0e5874ae87580cf3612e2920b662d8f8eee8047efce5aa998eee96661d3565070b1a6b916c8bed74138b821f4e09115f14b6677d  sympy-0.7.5.tar.gz' > sympy-0.7.5.tar.gz.sha512
-echo 'a4e0231e77ebbc2885bab648b292b842cb15c84d66a1972de18cb00fcc611eae2794b872f070ab7d5af32dd0c6c1773527fe1332bd382c1821e1f2d5d76808fb  tornado-4.0.1.tar.gz' > tornado-4.0.1.tar.gz.sha512
-echo '7d70855d0537971841810a66b7a943a88304f6991ce445df19eea034aadc53dbce9d13be92bf44cfef1f3e19511a754eb01006a3968edc1ec3d1766ea4730cda  zeromq-4.0.4.tar.gz' > zeromq-4.0.4.tar.gz.sha512
+echo 'ce0f1a17ac01eb48aec31fc0ad431d9d7ed9907f0e8584a6d79d0ffe6864fe62e203fe3f2a3c3e4e3d485809750ce07507a6488e776a388a7a9a713110882fcf  sympy-0.7.6.tar.gz' > sympy-0.7.6.tar.gz.sha512
+echo '93591068dc63af8d50a7925d528bc0cccdd705232c529b6162619fe28dddaf115e8a460b1842877d35160bd7ed480c1bd0bdbec57d1f359085bd1814e0c1c242  tornado-4.0.2.tar.gz' > tornado-4.0.2.tar.gz.sha512
+echo '0d928ed688ed940d460fa8f8d574a9819dccc4e030d735a8c7db71b59287ee50fa741a08249e356c78356b03c2174f2f2699f05aa7dc3d380ed47d8d7bab5408  zeromq-4.0.5.tar.gz' > zeromq-4.0.5.tar.gz.sha512
 echo 'ece209d4c7ec0cb58ede791444dc754e0d10811cbbdebe3df61c0fd9f9f9867c1c3ccd5f1827f847c005e24eef34fb5bf87b5d3f894d75da04f1797538290e4a  zlib-1.2.8.tar.gz' > zlib-1.2.8.tar.gz.sha512
 # Individual processes
 [ -z "$HDF5_DIR" ] && get_ytproject $HDF5.tar.gz
@@ -946,8 +948,8 @@
 fi
 
 do_setup_py $IPYTHON
+do_setup_py $CYTHON
 do_setup_py $H5PY
-do_setup_py $CYTHON
 do_setup_py $NOSE
 do_setup_py $PYTHON_HGLIB
 do_setup_py $SYMPY

diff -r 96208a52266c8a7e08ae2219d22525cea009be8a -r dd3ef0c68dc2ffb63480484f94148690faf94669 doc/source/about/index.rst
--- a/doc/source/about/index.rst
+++ b/doc/source/about/index.rst
@@ -31,7 +31,7 @@
 `our members website. <http://yt-project.org/members.html>`_
 
 For an up-to-date list of everyone who has contributed to the yt codebase, 
-see the current `CREDITS <http://hg.yt-project.org/yt/src/yt/CREDITS>`_ file.  
+see the current `CREDITS <http://bitbucket.org/yt_analysis/yt/src/yt/CREDITS>`_ file.  
 For a more detailed breakup of contributions made by individual users, see out 
 `Open HUB page <https://www.openhub.net/p/yt_amr/contributors?query=&sort=commits>`_.
 

diff -r 96208a52266c8a7e08ae2219d22525cea009be8a -r dd3ef0c68dc2ffb63480484f94148690faf94669 doc/source/analyzing/filtering.rst
--- a/doc/source/analyzing/filtering.rst
+++ b/doc/source/analyzing/filtering.rst
@@ -99,30 +99,35 @@
 mask out everything except the particles with which you are concerned.
 
 Creating a particle filter takes a few steps.  You must first define a 
-function which accepts a geometric object (e.g. all_data, sphere, etc.)
+function which accepts a data object (e.g. all_data, sphere, etc.)
 as its argument.  It uses the fields and information in this geometric
-object in order to produce some sort of conditional mask that is then returned.
-Here is the function to filter only the particles with `particle_type` (i.e. 
-field = `('all', 'particle_type')` equal to 2. (This is the case with
-Enzo star particles.)
+object in order to produce some sort of conditional mask that is then returned
+to create a new particle type.
+
+Here is a particle filter to create a new ``star`` particle type.  For Enzo
+simulations, stars have ``particle_type`` set to 2, so our filter will select
+only the particles with ``particle_type`` (i.e.  field = ``('all',
+'particle_type')`` equal to 2.
 
 .. code-block:: python
 
-    def Stars(pfilter, data):
-        filter = data[("all", "particle_type")] == 2
+    @yt.particle_filter(requires=["particle_type], filtered_type='all')
+    def stars(pfilter, data):
+        filter = data[(pfilter.filtered_type, "particle_type")] == 2
         return filter
 
-The particle_filter must now be defined to incorporate this function.  It takes
-a few arguments: a name for the filter, our filter function, and the fields
-that it requires in a dataset in order to work (in this case, it requires
-the ('all', 'particle_type') field.
+The :func:`~yt.data_objects.particle_filters.particle_filter` decorator takes a
+few options.  You must specify the names of the particle fields that are
+required in order to define the filter --- in this case the ``particle_type``
+field.  Additionally, you must specify the particle type to be filtered --- in
+this case we filter all the particle in dataset by specifying the ``all``
+particle type.
 
-.. code-block:: python
+In addition, you may specify a name for the newly defined particle type.  If no
+name is specified, the name for the particle type will be inferred from the name
+of the filter definition --- in this case the inferred name will be ``stars``.
 
-    from yt.data_objects.particle_filters import add_particle_filter
-    add_particle_filter("stars", function=Stars, filtered_type='all', requires=["particle_type"])
-
-And lastly, the filter must be applied to our dataset of choice.  Note that this 
+Lastly, the filter must be applied to our dataset of choice.  Note that this 
 filter can be added to as many datasets as we wish.  It will only actually
 create new filtered fields if the dataset has the required fields, though.
 
@@ -133,10 +138,27 @@
     ds.add_particle_filter('stars')
 
 And that's it!  We can now access all of the ('stars', field) fields from 
-our dataset `ds` and treat them as any other particle field.  In addition,
-it created some `deposit` fields, where the particles were deposited on to
+our dataset ``ds`` and treat them as any other particle field.  In addition,
+it created some ``deposit`` fields, where the particles were deposited on to
 the grid as mesh fields.
 
+As an alternative syntax, you can also define a new particle filter via the
+:func:`~yt.data_objects.particle_filter.add_particle_filter` function.  
+
+.. code-block:: python
+
+
+    def Stars(pfilter, data):
+        filter = data[(pfilter.filtered_type, "particle_type")] == 2
+        return filter
+
+    add_particle_filter("stars", function=Stars, filtered_type='all',
+                        requires=["particle_type"])
+
+This is equivalent to our use of the ``particle_filter`` decorator above.  The
+choice to use either the ``particle_filter`` decorator or the
+``add_particle_fitler`` function is a purely stylistic choice.
+
 .. notebook:: particle_filter.ipynb
 
 .. _particle-unions:
@@ -172,7 +194,7 @@
 
 Creating geometric objects for a dataset provides a means for filtering
 a field based on spatial location.  The most commonly used of these are
-spheres, regions (3D prisms), ellipsoids, disks, and rays.  The `all_data`
+spheres, regions (3D prisms), ellipsoids, disks, and rays.  The ``all_data``
 object which gets used throughout this documentation section is an example of 
 a geometric object, but it defaults to including all the data in the dataset
 volume.  To see all of the geometric objects available, see 

diff -r 96208a52266c8a7e08ae2219d22525cea009be8a -r dd3ef0c68dc2ffb63480484f94148690faf94669 doc/source/analyzing/particle_filter.ipynb
--- a/doc/source/analyzing/particle_filter.ipynb
+++ b/doc/source/analyzing/particle_filter.ipynb
@@ -79,10 +79,9 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "from yt.data_objects.particle_filters import add_particle_filter\n",
+      "yt.add_particle_filter(\"young_stars\", function=young_stars, filtered_type='Stars', requires=[\"creation_time\"])\n",
       "\n",
-      "add_particle_filter(\"young_stars\", function=young_stars, filtered_type='Stars', requires=[\"creation_time\"])\n",
-      "add_particle_filter(\"old_stars\", function=old_stars, filtered_type='Stars', requires=[\"creation_time\"])"
+      "yt.add_particle_filter(\"old_stars\", function=old_stars, filtered_type='Stars', requires=[\"creation_time\"])"
      ],
      "language": "python",
      "metadata": {},

diff -r 96208a52266c8a7e08ae2219d22525cea009be8a -r dd3ef0c68dc2ffb63480484f94148690faf94669 doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
--- a/doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
+++ b/doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:c7cfb2db456d127bb633b7eee7ad6fe14290aa622ac62694c7840d80137afaba"
+  "signature": "sha256:4d19ee42177c60fb4b39550b5acd7a0f7e97f59f5c2da3565ff42cdd580454b0"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -236,7 +236,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "q1 = yt.YTArray(1.0,\"C\") # Coulombs\n",
+      "q1 = yt.YTArray(1.0,\"C\") # coulombs\n",
       "q2 = yt.YTArray(1.0,\"esu\") # electrostatic units / statcoulomb\n",
       "\n",
       "print \"units =\", q1.in_mks().units, \", dims =\", q1.units.dimensions\n",
@@ -247,21 +247,14 @@
      "outputs": []
     },
     {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Under the hood, the `yt` units system has a translation layer that converts between these two systems, without any further effort required. For example:"
-     ]
-    },
-    {
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "from yt.utilities.physical_constants import elementary_charge\n",
+      "B1 = yt.YTArray(1.0,\"T\") # tesla\n",
+      "B2 = yt.YTArray(1.0,\"gauss\") # gauss\n",
       "\n",
-      "print elementary_charge\n",
-      "elementary_charge_C = elementary_charge.in_units(\"C\")\n",
-      "print elementary_charge_C"
+      "print \"units =\", B1.in_mks().units, \", dims =\", B1.units.dimensions\n",
+      "print \"units =\", B2.in_cgs().units, \", dims =\", B2.units.dimensions"
      ],
      "language": "python",
      "metadata": {},
@@ -271,13 +264,7 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
-      "The electromagnetic unit translations `yt` understands are:\n",
-      "\n",
-      "* Charge: 1 coulomb (C) $\\leftrightarrow$ 0.1c electrostatic unit (esu, Fr)\n",
-      "* Current: 1 ampere (A, C/s) $\\leftrightarrow$ 0.1c statampere (statA, esu/s, Fr) \n",
-      "* Magnetic Field: 1 tesla (T) $\\leftrightarrow 10^4$ gauss (G)\n",
-      "\n",
-      "where \"Fr\" is the franklin, an alternative name for the electrostatic unit, and c is the speed of light. "
+      "To convert between these two systems, use [Unit Equivalencies](unit_equivalencies.html)."
      ]
     },
     {

diff -r 96208a52266c8a7e08ae2219d22525cea009be8a -r dd3ef0c68dc2ffb63480484f94148690faf94669 doc/source/analyzing/units/6)_Unit_Equivalencies.ipynb
--- a/doc/source/analyzing/units/6)_Unit_Equivalencies.ipynb
+++ b/doc/source/analyzing/units/6)_Unit_Equivalencies.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:b62d83c168828afa81bcf0603bb37d3183f2a810258f25963254ffb24a0acd82"
+  "signature": "sha256:f0bbee67b429d3fde768568adb475908cbbe04c428cafb5a45cd01d6b0de1745"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -34,6 +34,7 @@
      "collapsed": false,
      "input": [
       "import yt\n",
+      "from yt import YTQuantity\n",
       "import numpy as np\n",
       "\n",
       "ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')\n",
@@ -56,7 +57,7 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
-      "Equivalencies can go in both directions, without any information required other than the unit you want to convert to:"
+      "Most equivalencies can go in both directions, without any information required other than the unit you want to convert to (this is not the case for the electromagnetic equivalencies, which we'll discuss later):"
      ]
     },
     {
@@ -130,6 +131,114 @@
      "level": 3,
      "metadata": {},
      "source": [
+      "Electromagnetic Equivalencies"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Special, one-way equivalencies exist for converting between electromagnetic units in the cgs and SI unit systems. These exist since in the cgs system, electromagnetic units are comprised of the base units of seconds, grams and centimeters, whereas in the SI system Ampere is a base unit. For example, the dimensions of charge are completely different in the two systems:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "Q1 = YTQuantity(1.0,\"C\")\n",
+      "Q2 = YTQuantity(1.0,\"esu\")\n",
+      "print \"Q1 dims =\", Q1.units.dimensions\n",
+      "print \"Q2 dims =\", Q2.units.dimensions\n",
+      "print \"Q1 base units =\", Q1.in_mks()\n",
+      "print \"Q2 base units =\", Q2.in_cgs()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "To convert from a cgs unit to an SI unit, use the \"SI\" equivalency:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "from yt.utilities.physical_constants import qp # the elementary charge in esu\n",
+      "qp_SI = qp.to_equivalent(\"C\",\"SI\") # convert to Coulombs\n",
+      "print qp\n",
+      "print qp_SI"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "To convert from an SI unit to a cgs unit, use the \"CGS\" equivalency:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "B = YTQuantity(1.0,\"T\") # magnetic field in Tesla\n",
+      "print B, B.to_equivalent(\"gauss\",\"CGS\") # convert to Gauss"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Equivalencies exist between the SI and cgs dimensions of charge, current, magnetic field, electric potential, and resistance. As a neat example, we can convert current in Amperes and resistance in Ohms to their cgs equivalents, and then use them to calculate the \"Joule heating\" of a conductor with resistance $R$ and current $I$:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "I = YTQuantity(1.0,\"A\")\n",
+      "I_cgs = I.to_equivalent(\"statA\",\"CGS\")\n",
+      "R = YTQuantity(1.0,\"ohm\")\n",
+      "R_cgs = R.to_equivalent(\"statohm\",\"CGS\")\n",
+      "P = I**2*R\n",
+      "P_cgs = I_cgs**2*R_cgs"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "The dimensions of current and resistance in the two systems are completely different, but the formula gives us the power dissipated dimensions of energy per time, so the dimensions and the result should be the same, which we can check:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print P_cgs.units.dimensions == P.units.dimensions\n",
+      "print P.in_units(\"W\"), P_cgs.in_units(\"W\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "heading",
+     "level": 3,
+     "metadata": {},
+     "source": [
       "Determining Valid Equivalencies"
      ]
     },

diff -r 96208a52266c8a7e08ae2219d22525cea009be8a -r dd3ef0c68dc2ffb63480484f94148690faf94669 doc/source/cookbook/amrkdtree_downsampling.py
--- a/doc/source/cookbook/amrkdtree_downsampling.py
+++ b/doc/source/cookbook/amrkdtree_downsampling.py
@@ -4,6 +4,13 @@
 # In this example we will show how to use the AMRKDTree to take a simulation
 # with 8 levels of refinement and only use levels 0-3 to render the dataset.
 
+# Currently this cookbook is flawed in that the data that is covered by the
+# higher resolution data gets masked during the rendering.  This should be
+# fixed by changing either the data source or the code in
+# yt/utilities/amr_kdtree.py where data is being masked for the partitioned
+# grid.  Right now the quick fix is to create a data_collection, but this
+# will only work for patch based simulations that have ds.index.grids.
+
 # We begin by loading up yt, and importing the AMRKDTree
 import numpy as np
 
@@ -12,58 +19,53 @@
 
 # Load up a dataset and define the kdtree
 ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
-kd = AMRKDTree(ds)
+im, sc = yt.volume_render(ds, 'density', fname='v0.png')
+sc.camera.set_width(ds.arr(100, 'kpc'))
+render_source = sc.get_source(0)
+kd=render_source.volume
 
 # Print out specifics of KD Tree
 print("Total volume of all bricks = %i" % kd.count_volume())
 print("Total number of cells = %i" % kd.count_cells())
 
-# Define a camera and take an volume rendering.
-tf = yt.ColorTransferFunction((-30, -22))
-cam = ds.camera([0.5, 0.5, 0.5], [0.2, 0.3, 0.4], 0.10, 256,
-                  tf, volume=kd)
-tf.add_layers(4, 0.01, col_bounds=[-27.5, -25.5], colormap='RdBu_r')
-cam.snapshot("v1.png", clip_ratio=6.0)
-
-# This rendering is okay, but lets say I'd like to improve it, and I don't want
-# to spend the time rendering the high resolution data.  What we can do is
-# generate a low resolution version of the AMRKDTree and pass that in to the
-# camera.  We do this by specifying a maximum refinement level of 6.
-
-kd_low_res = AMRKDTree(ds, max_level=6)
+new_source = ds.all_data()
+new_source.max_level=3
+kd_low_res = AMRKDTree(ds, data_source=new_source)
 print(kd_low_res.count_volume())
 print(kd_low_res.count_cells())
 
 # Now we pass this in as the volume to our camera, and render the snapshot
 # again.
 
-cam.volume = kd_low_res
-cam.snapshot("v4.png", clip_ratio=6.0)
+render_source.set_volume(kd_low_res)
+render_source.set_fields('density')
+sc.render("v1.png")
 
 # This operation was substantiall faster.  Now lets modify the low resolution
 # rendering until we find something we like.
 
+tf = render_source.transfer_function
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds=[-27.5, -25.5],
               alpha=np.ones(4, dtype='float64'), colormap='RdBu_r')
-cam.snapshot("v2.png", clip_ratio=6.0)
+sc.render("v2.png", clip_ratio=6.0)
 
 # This looks better.  Now let's try turning on opacity.
 
 tf.grey_opacity = True
-cam.snapshot("v4.png", clip_ratio=6.0)
-
-# That seemed to pick out som interesting structures.  Now let's bump up the
-# opacity.
-
+sc.render("v3.png", clip_ratio=6.0)
+#
+## That seemed to pick out som interesting structures.  Now let's bump up the
+## opacity.
+#
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds=[-27.5, -25.5],
               alpha=10.0 * np.ones(4, dtype='float64'), colormap='RdBu_r')
-cam.snapshot("v3.png", clip_ratio=6.0)
-
-# This looks pretty good, now lets go back to the full resolution AMRKDTree
-
-cam.volume = kd
-cam.snapshot("v4.png", clip_ratio=6.0)
+sc.render("v4.png", clip_ratio=6.0)
+#
+## This looks pretty good, now lets go back to the full resolution AMRKDTree
+#
+render_source.set_volume(kd)
+sc.render("v5.png", clip_ratio=6.0)
 
 # This looks great!

diff -r 96208a52266c8a7e08ae2219d22525cea009be8a -r dd3ef0c68dc2ffb63480484f94148690faf94669 doc/source/cookbook/camera_movement.py
--- a/doc/source/cookbook/camera_movement.py
+++ b/doc/source/cookbook/camera_movement.py
@@ -3,40 +3,26 @@
 
 # Follow the simple_volume_rendering cookbook for the first part of this.
 ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")  # load data
-ad = ds.all_data()
-mi, ma = ad.quantities.extrema("density")
-
-# Set up transfer function
-tf = yt.ColorTransferFunction((np.log10(mi), np.log10(ma)))
-tf.add_layers(6, w=0.05)
-
-# Set up camera paramters
-c = [0.5, 0.5, 0.5]  # Center
-L = [1, 1, 1]  # Normal Vector
-W = 1.0  # Width
-Nvec = 512  # Pixels on a side
-
-# Specify a north vector, which helps with rotations.
-north_vector = [0., 0., 1.]
+im, sc = yt.volume_render(ds)
+cam = sc.camera
+cam.resolution = (512, 512)
+cam.set_width(ds.domain_width/20.0)
 
 # Find the maximum density location, store it in max_c
 v, max_c = ds.find_max('density')
 
-# Initialize the Camera
-cam = ds.camera(c, L, W, (Nvec, Nvec), tf, north_vector=north_vector)
 frame = 0
-
-# Do a rotation over 5 frames
-for i, snapshot in enumerate(cam.rotation(np.pi, 5, clip_ratio=8.0)):
-    snapshot.write_png('camera_movement_%04i.png' % frame)
-    frame += 1
-
 # Move to the maximum density location over 5 frames
-for i, snapshot in enumerate(cam.move_to(max_c, 5, clip_ratio=8.0)):
-    snapshot.write_png('camera_movement_%04i.png' % frame)
+for _ in cam.move_to(max_c, 5):
+    sc.render('camera_movement_%04i.png' % frame, clip_ratio=8.0)
     frame += 1
 
 # Zoom in by a factor of 10 over 5 frames
-for i, snapshot in enumerate(cam.zoomin(10.0, 5, clip_ratio=8.0)):
-    snapshot.write_png('camera_movement_%04i.png' % frame)
+for _ in cam.zoomin(10.0, 5):
+    sc.render('camera_movement_%04i.png' % frame, clip_ratio=8.0)
     frame += 1
+
+# Do a rotation over 5 frames
+for _ in cam.rotation(np.pi, 5):
+    sc.render('camera_movement_%04i.png' % frame, clip_ratio=8.0)
+    frame += 1

diff -r 96208a52266c8a7e08ae2219d22525cea009be8a -r dd3ef0c68dc2ffb63480484f94148690faf94669 doc/source/cookbook/image_background_colors.py
--- a/doc/source/cookbook/image_background_colors.py
+++ b/doc/source/cookbook/image_background_colors.py
@@ -2,27 +2,14 @@
 # volume renderings, to pngs with varying backgrounds.
 
 # First we use the simple_volume_rendering.py recipe from above to generate
-# a standard volume rendering.  The only difference is that we use 
-# grey_opacity=True with our TransferFunction, as the colored background 
-# functionality requires images with an opacity between 0 and 1. 
-
-# We have removed all the comments from the volume rendering recipe for 
-# brevity here, but consult the recipe for more details.
+# a standard volume rendering.
 
 import yt
 import numpy as np
 
 ds = yt.load("Enzo_64/DD0043/data0043")
-ad = ds.all_data()
-mi, ma = ad.quantities.extrema("density")
-tf = yt.ColorTransferFunction((np.log10(mi)+1, np.log10(ma)), grey_opacity=True)
-tf.add_layers(5, w=0.02, colormap="spectral")
-c = [0.5, 0.5, 0.5]
-L = [0.5, 0.2, 0.7]
-W = 1.0
-Npixels = 512
-cam = ds.camera(c, L, W, Npixels, tf)
-im = cam.snapshot("original.png" % ds, clip_ratio=8.0)
+im, sc = yt.volume_render(ds, 'density')
+im.write_png("original.png", clip_ratio=8.0)
 
 # Our image array can now be transformed to include different background
 # colors.  By default, the background color is black.  The following

diff -r 96208a52266c8a7e08ae2219d22525cea009be8a -r dd3ef0c68dc2ffb63480484f94148690faf94669 doc/source/cookbook/offaxis_projection.py
--- a/doc/source/cookbook/offaxis_projection.py
+++ b/doc/source/cookbook/offaxis_projection.py
@@ -11,7 +11,7 @@
 # objects, you could set it the way you would a cutting plane -- but for this
 # dataset, we'll just choose an off-axis value at random.  This gets normalized
 # automatically.
-L = [0.5, 0.4, 0.7]
+L = [1.0, 0.0, 0.0]
 
 # Our "width" is the width of the image plane as well as the depth.
 # The first element is the left to right width, the second is the
@@ -26,7 +26,7 @@
 # Create the off axis projection.
 # Setting no_ghost to False speeds up the process, but makes a
 # slighly lower quality image.
-image = yt.off_axis_projection(ds, c, L, W, Npixels, "density", no_ghost=False)
+image, sc= yt.off_axis_projection(ds, c, L, W, Npixels, "density", no_ghost=False)
 
 # Write out the final image and give it a name
 # relating to what our dataset is called.

diff -r 96208a52266c8a7e08ae2219d22525cea009be8a -r dd3ef0c68dc2ffb63480484f94148690faf94669 doc/source/cookbook/offaxis_projection_colorbar.py
--- a/doc/source/cookbook/offaxis_projection_colorbar.py
+++ b/doc/source/cookbook/offaxis_projection_colorbar.py
@@ -32,7 +32,7 @@
 # Also note that we set the field which we want to project as "density", but
 # really we could use any arbitrary field like "temperature", "metallicity"
 # or whatever.
-image = yt.off_axis_projection(ds, c, L, W, Npixels, "density", no_ghost=False)
+image, sc = yt.off_axis_projection(ds, c, L, W, Npixels, "density", no_ghost=False)
 
 # Image is now an NxN array representing the intensities of the various pixels.
 # And now, we call our direct image saver.  We save the log of the result.

diff -r 96208a52266c8a7e08ae2219d22525cea009be8a -r dd3ef0c68dc2ffb63480484f94148690faf94669 doc/source/cookbook/opaque_rendering.py
--- a/doc/source/cookbook/opaque_rendering.py
+++ b/doc/source/cookbook/opaque_rendering.py
@@ -3,44 +3,46 @@
 
 ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
 
-# We start by building a transfer function, and initializing a camera.
+# We start by building a default volume rendering scene 
 
-tf = yt.ColorTransferFunction((-30, -22))
-cam = ds.camera([0.5, 0.5, 0.5], [0.2, 0.3, 0.4], 0.10, 256, tf)
+im, sc = yt.volume_render(ds, field=("gas","density"), fname="v0.png", clip_ratio=6.0)
 
-# Now let's add some isocontours, and take a snapshot.
-
-tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5], colormap = 'RdBu_r')
-cam.snapshot("v1.png", clip_ratio=6.0)
+sc.camera.set_width(ds.arr(0.1,'code_length'))
+tf = sc.get_source(0).transfer_function 
+tf.clear()
+tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
+        alpha=np.logspace(-3,0,4), colormap = 'RdBu_r')
+im = sc.render("v1.png", clip_ratio=6.0)
 
 # In this case, the default alphas used (np.logspace(-3,0,Nbins)) does not
 # accentuate the outer regions of the galaxy. Let's start by bringing up the
 # alpha values for each contour to go between 0.1 and 1.0
 
+tf = sc.get_source(0).transfer_function 
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
         alpha=np.logspace(0,0,4), colormap = 'RdBu_r')
-cam.snapshot("v2.png", clip_ratio=6.0)
+im = sc.render("v2.png", clip_ratio=6.0)
 
 # Now let's set the grey_opacity to True.  This should make the inner portions
 # start to be obcured
 
 tf.grey_opacity = True
-cam.snapshot("v3.png", clip_ratio=6.0)
+im = sc.render("v3.png", clip_ratio=6.0)
 
 # That looks pretty good, but let's start bumping up the opacity.
 
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
         alpha=10.0*np.ones(4,dtype='float64'), colormap = 'RdBu_r')
-cam.snapshot("v4.png", clip_ratio=6.0)
+im = sc.render("v4.png", clip_ratio=6.0)
 
 # Let's bump up again to see if we can obscure the inner contour.
 
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
         alpha=30.0*np.ones(4,dtype='float64'), colormap = 'RdBu_r')
-cam.snapshot("v5.png", clip_ratio=6.0)
+im = sc.render("v5.png", clip_ratio=6.0)
 
 # Now we are losing sight of everything.  Let's see if we can obscure the next
 # layer
@@ -48,13 +50,13 @@
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
         alpha=100.0*np.ones(4,dtype='float64'), colormap = 'RdBu_r')
-cam.snapshot("v6.png", clip_ratio=6.0)
+im = sc.render("v6.png", clip_ratio=6.0)
 
 # That is very opaque!  Now lets go back and see what it would look like with
 # grey_opacity = False
 
 tf.grey_opacity=False
-cam.snapshot("v7.png", clip_ratio=6.0)
+im = sc.render("v7.png", clip_ratio=6.0)
 
 # That looks pretty different, but the main thing is that you can see that the
 # inner contours are somewhat visible again.  

diff -r 96208a52266c8a7e08ae2219d22525cea009be8a -r dd3ef0c68dc2ffb63480484f94148690faf94669 doc/source/cookbook/particle_one_color_plot.py
--- /dev/null
+++ b/doc/source/cookbook/particle_one_color_plot.py
@@ -0,0 +1,13 @@
+import yt
+
+# load the dataset
+ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
+
+# create our plot
+p = yt.ParticlePlot(ds, 'particle_position_x', 'particle_position_y', color='b')
+
+# zoom in a little bit
+p.set_width(500, 'kpc')
+
+#save result
+p.save()

diff -r 96208a52266c8a7e08ae2219d22525cea009be8a -r dd3ef0c68dc2ffb63480484f94148690faf94669 doc/source/cookbook/particle_xvz_plot.py
--- /dev/null
+++ b/doc/source/cookbook/particle_xvz_plot.py
@@ -0,0 +1,15 @@
+import yt
+
+# load the dataset
+ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
+
+# create our plot
+p = yt.ParticlePlot(ds, 'particle_position_x', 'particle_velocity_z', ['particle_mass'])
+
+# pick some appropriate units
+p.set_unit('particle_position_x', 'Mpc')
+p.set_unit('particle_velocity_z', 'km/s')
+p.set_unit('particle_mass', 'Msun')
+
+# save result
+p.save()

diff -r 96208a52266c8a7e08ae2219d22525cea009be8a -r dd3ef0c68dc2ffb63480484f94148690faf94669 doc/source/cookbook/particle_xy_plot.py
--- /dev/null
+++ b/doc/source/cookbook/particle_xy_plot.py
@@ -0,0 +1,14 @@
+import yt
+
+# load the dataset
+ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
+
+# create our plot
+p = yt.ParticlePlot(ds, 'particle_position_x', 'particle_position_y', 'particle_mass', width=(0.5, 0.5))
+
+# pick some appropriate units
+p.set_axes_unit('kpc')
+p.set_unit('particle_mass', 'Msun')
+
+#save result
+p.save()

diff -r 96208a52266c8a7e08ae2219d22525cea009be8a -r dd3ef0c68dc2ffb63480484f94148690faf94669 doc/source/cookbook/rendering_with_box_and_grids.py
--- a/doc/source/cookbook/rendering_with_box_and_grids.py
+++ b/doc/source/cookbook/rendering_with_box_and_grids.py
@@ -1,61 +1,22 @@
 import yt
 import numpy as np
+from yt.visualization.volume_rendering.api import BoxSource, CoordinateVectorSource
 
 # Load the dataset.
 ds = yt.load("Enzo_64/DD0043/data0043")
+im, sc = yt.volume_render(ds, ('gas','density'))
+sc.get_source(0).transfer_function.grey_opacity=True
 
-# Create a data container (like a sphere or region) that
-# represents the entire domain.
-ad = ds.all_data()
+sc.annotate_domain(ds)
+im = sc.render()
+im.write_png("%s_vr_domain.png" % ds)
 
-# Get the minimum and maximum densities.
-mi, ma = ad.quantities.extrema("density")
-
-# Create a transfer function to map field values to colors.
-# We bump up our minimum to cut out some of the background fluid
-tf = yt.ColorTransferFunction((np.log10(mi)+2.0, np.log10(ma)))
-
-# Add three guassians, evenly spaced between the min and
-# max specified above with widths of 0.02 and using the
-# gist_stern colormap.
-tf.add_layers(3, w=0.02, colormap="gist_stern")
-
-# Choose a center for the render.
-c = [0.5, 0.5, 0.5]
-
-# Choose a vector representing the viewing direction.
-L = [0.5, 0.2, 0.7]
-
-# Set the width of the image.
-# Decreasing or increasing this value
-# results in a zoom in or out.
-W = 1.0
-
-# The number of pixels along one side of the image.
-# The final image will have Npixel^2 pixels.
-Npixels = 512
-
-# Create a camera object.
-# This object creates the images and
-# can be moved and rotated.
-cam = ds.camera(c, L, W, Npixels, tf)
-
-# Create a snapshot.
-# The return value of this function could also be accepted, modified (or saved
-# for later manipulation) and then put written out using write_bitmap.
-# clip_ratio applies a maximum to the function, which is set to that value
-# times the .std() of the array.
-im = cam.snapshot("%s_volume_rendered.png" % ds, clip_ratio=8.0)
-
-# Add the domain edges, with an alpha blending of 0.3:
-nim = cam.draw_domain(im, alpha=0.3)
-nim.write_png('%s_vr_domain.png' % ds)
-
-# Add the grids, colored by the grid level with the algae colormap
-nim = cam.draw_grids(im, alpha=0.3, cmap='algae')
-nim.write_png('%s_vr_grids.png' % ds)
+sc.annotate_grids(ds)
+im = sc.render()
+im.write_png("%s_vr_grids.png" % ds)
 
 # Here we can draw the coordinate vectors on top of the image by processing
 # it through the camera. Then save it out.
-cam.draw_coordinate_vectors(nim)
-nim.write_png("%s_vr_vectors.png" % ds)
+sc.annotate_axes()
+im = sc.render()
+im.write_png("%s_vr_coords.png" % ds)

diff -r 96208a52266c8a7e08ae2219d22525cea009be8a -r dd3ef0c68dc2ffb63480484f94148690faf94669 doc/source/cookbook/simple_plots.rst
--- a/doc/source/cookbook/simple_plots.rst
+++ b/doc/source/cookbook/simple_plots.rst
@@ -124,6 +124,41 @@
 
 .. yt_cookbook:: simple_off_axis_projection.py
 
+.. _cookbook-simple-particle-plot:
+
+Simple Particle Plot
+~~~~~~~~~~~~~~~~~~~~
+
+You can also use yt to make particle-only plots. This script shows how to
+plot all the particle x and y positions in a dataset, using the particle mass
+to set the color scale.
+See :ref:`particle-plots` for more information.
+
+.. yt_cookbook:: particle_xy_plot.py
+
+.. _cookbook-non-spatial-particle-plot:
+
+Non-spatial Particle Plots
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+You are not limited to plotting spatial fields on the x and y axes. This
+example shows how to plot the particle x-coordinates versus their z-velocities,
+again using the particle mass to set the colorbar. 
+See :ref:`particle-plots` for more information.
+
+.. yt_cookbook:: particle_xvz_plot.py
+
+.. _cookbook-single-color-particle-plot:
+
+Single-color Particle Plots
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+If you don't want to display a third field on the color bar axis, simply pass
+in a color string instead of a particle field.
+See :ref:`particle-plots` for more information.
+
+.. yt_cookbook:: particle_one_color_plot.py
+
 .. _cookbook-simple_volume_rendering:
 
 Simple Volume Rendering

diff -r 96208a52266c8a7e08ae2219d22525cea009be8a -r dd3ef0c68dc2ffb63480484f94148690faf94669 doc/source/cookbook/simple_volume_rendering.py
--- a/doc/source/cookbook/simple_volume_rendering.py
+++ b/doc/source/cookbook/simple_volume_rendering.py
@@ -4,45 +4,25 @@
 # Load the dataset.
 ds = yt.load("Enzo_64/DD0043/data0043")
 
-# Create a data container (like a sphere or region) that
-# represents the entire domain.
-ad = ds.all_data()
+# Create a volume rendering, which will determine data bounds, use the first
+# acceptable field in the field_list, and set up a default transfer function.
+#im, sc = yt.volume_render(ds, fname="%s_volume_rendered.png" % ds, clip_ratio=8.0)
 
-# Get the minimum and maximum densities.
-mi, ma = ad.quantities.extrema("density")
+# You can easily specify a different field
+im, sc = yt.volume_render(ds, field=('gas','density'), fname="%s_density_volume_rendered.png" % ds, clip_ratio=8.0)
 
-# Create a transfer function to map field values to colors.
-# We bump up our minimum to cut out some of the background fluid
-tf = yt.ColorTransferFunction((np.log10(mi)+1, np.log10(ma)))
+# Now increase the resolution
+sc.camera.resolution = (512, 512)
+im = sc.render(fname='big.png', clip_ratio=8.0)
 
-# Add three guassians, evenly spaced between the min and
-# max specified above with widths of 0.02 and using the
-# gist_stern colormap.
-tf.add_layers(5, w=0.02, colormap="spectral")
-
-# Choose a center for the render.
-c = [0.5, 0.5, 0.5]
-
-# Choose a vector representing the viewing direction.
-L = [0.5, 0.2, 0.7]
-
-# Set the width of the image.
-# Decreasing or increasing this value
-# results in a zoom in or out.
-W = 1.0
-
-# The number of pixels along one side of the image.
-# The final image will have Npixel^2 pixels.
-Npixels = 512
-
-# Create a camera object.
-# This object creates the images and
-# can be moved and rotated.
-cam = ds.camera(c, L, W, Npixels, tf)
-
-# Create a snapshot.
-# The return value of this function could also be accepted, modified (or saved
-# for later manipulation) and then put written out using write_bitmap.
-# clip_ratio applies a maximum to the function, which is set to that value
-# times the .std() of the array.
-cam.snapshot("%s_volume_rendered.png" % ds, clip_ratio=8.0)
+# Now modify the transfer function
+# First get the render source, in this case the entire domain, with field ('gas','density')
+render_source = sc.get_source(0)
+# Clear the transfer function
+render_source.transfer_function.clear()
+# Map a range of density values (in log space) to the Reds_r colormap
+render_source.transfer_function.map_to_colormap(
+        np.log10(ds.quan(5.0e-31, 'g/cm**3')),
+        np.log10(ds.quan(1.0e-29, 'g/cm**3')),
+        scale=30.0, colormap='RdBu_r')
+im = sc.render(fname='new_tf.png', clip_ratio=None)

diff -r 96208a52266c8a7e08ae2219d22525cea009be8a -r dd3ef0c68dc2ffb63480484f94148690faf94669 doc/source/cookbook/vol-annotated.py
--- a/doc/source/cookbook/vol-annotated.py
+++ b/doc/source/cookbook/vol-annotated.py
@@ -4,7 +4,7 @@
 import pylab
 
 import yt
-import yt.visualization.volume_rendering.api as vr
+import yt.visualization.volume_rendering.old_camera as vr
 
 ds = yt.load("maestro_subCh_plt00248")
 
@@ -17,11 +17,11 @@
 # centered on these with width sigma        
 vals = [-1.e7, -5.e6, -2.5e6, 2.5e6, 5.e6, 1.e7]
 sigma = 2.e5
-        
+
 mi, ma = min(vals), max(vals)
 
 # Instantiate the ColorTransferfunction.
-tf =  vr.ColorTransferFunction((mi, ma))
+tf =  yt.ColorTransferFunction((mi, ma))
 
 for v in vals:
     tf.sample_colormap(v, sigma**2, colormap="coolwarm")
@@ -69,7 +69,7 @@
 
 # tell the camera to use our figure
 cam._render_figure = f
-    
+
 # save annotated -- this added the transfer function values, 
 # and the clear_fig=False ensures it writes onto our existing figure.
 cam.save_annotated("vol_annotated.png", nim, dpi=145, clear_fig=False)

diff -r 96208a52266c8a7e08ae2219d22525cea009be8a -r dd3ef0c68dc2ffb63480484f94148690faf94669 doc/source/developing/developing.rst
--- a/doc/source/developing/developing.rst
+++ b/doc/source/developing/developing.rst
@@ -5,9 +5,9 @@
 
 .. note:: If you already know how to use version control and are comfortable
    with handling it yourself, the quickest way to contribute to yt is to `fork
-   us on BitBucket <http://hg.yt-project.org/yt/fork>`_, `make your changes
+   us on BitBucket <http://bitbucket.org/yt_analysis/yt/fork>`_, `make your changes
    <http://mercurial.selenic.com/>`_, and issue a `pull request
-   <http://hg.yt-project.org/yt/pull-requests>`_.  The rest of this document is just an
+   <http://bitbucket.org/yt_analysis/yt/pull-requests>`_.  The rest of this document is just an
    explanation of how to do that.
 
 yt is a community project!
@@ -354,7 +354,7 @@
 --------------------------------------
 
 yt is hosted on BitBucket, and you can see all of the yt repositories at
-http://hg.yt-project.org/.  With the yt installation script you should have a
+http://bitbucket.org/yt_analysis/.  With the yt installation script you should have a
 copy of Mercurial for checking out pieces of code.  Make sure you have followed
 the steps above for bootstrapping your development (to assure you have a
 bitbucket account, etc.)

diff -r 96208a52266c8a7e08ae2219d22525cea009be8a -r dd3ef0c68dc2ffb63480484f94148690faf94669 doc/source/help/index.rst
--- a/doc/source/help/index.rst
+++ b/doc/source/help/index.rst
@@ -211,7 +211,7 @@
 If you have gone through all of the above steps, and you're still encountering 
 problems, then you have found a bug.  
 To submit a bug report, you can either directly create one through the
-BitBucket `web interface <http://hg.yt-project.org/yt/issues/new>`_,
+BitBucket `web interface <http://bitbucket.org/yt_analysis/yt/issues/new>`_,
 or you can use the command line ``yt bugreport`` to interactively create one.
 Alternatively, email the ``yt-users`` mailing list and we will construct a new
 ticket in your stead.  Remember to include the information

diff -r 96208a52266c8a7e08ae2219d22525cea009be8a -r dd3ef0c68dc2ffb63480484f94148690faf94669 doc/source/installing.rst
--- a/doc/source/installing.rst
+++ b/doc/source/installing.rst
@@ -67,7 +67,7 @@
 
 .. code-block:: bash
 
-  wget http://hg.yt-project.org/yt/raw/stable/doc/install_script.sh
+  wget http://bitbucket.org/yt_analysis/yt/raw/stable/doc/install_script.sh
 
 .. _installing-yt:
 

diff -r 96208a52266c8a7e08ae2219d22525cea009be8a -r dd3ef0c68dc2ffb63480484f94148690faf94669 doc/source/reference/api/api.rst
--- a/doc/source/reference/api/api.rst
+++ b/doc/source/reference/api/api.rst
@@ -30,6 +30,16 @@
    ~yt.visualization.profile_plotter.PhasePlot
    ~yt.visualization.profile_plotter.PhasePlotMPL
 
+Particle Plots
+^^^^^^^^^^^^^^
+
+.. autosummary::
+   :toctree: generated/
+
+   ~yt.visualization.particle_plots.ParticleProjectionPlot
+   ~yt.visualization.particle_plots.ParticlePhasePlot
+   ~yt.visualization.particle_plots.ParticlePlot
+
 Fixed Resolution Pixelization
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
@@ -37,6 +47,7 @@
    :toctree: generated/
 
    ~yt.visualization.fixed_resolution.FixedResolutionBuffer
+   ~yt.visualization.fixed_resolution.ParticleImageBuffer
    ~yt.visualization.fixed_resolution.CylindricalFixedResolutionBuffer
    ~yt.visualization.fixed_resolution.ObliqueFixedResolutionBuffer
    ~yt.visualization.fixed_resolution.OffAxisProjectionFixedResolutionBuffer
@@ -408,6 +419,7 @@
    ~yt.data_objects.profiles.Profile1D
    ~yt.data_objects.profiles.Profile2D
    ~yt.data_objects.profiles.Profile3D
+   ~yt.data_objects.profiles.ParticleProfile
    ~yt.data_objects.profiles.create_profile
 
 .. _halo_analysis_ref:
@@ -491,6 +503,16 @@
    ~yt.fields.field_info_container.FieldInfoContainer.add_field
    ~yt.data_objects.static_output.Dataset.add_field
 
+
+Particle Filters
+----------------
+
+.. autosummary::
+   :toctree: generated/
+
+   ~yt.data_objects.particle_filters.add_particle_filter
+   ~yt.data_objects.particle_filters.particle_filter
+
 Image Handling
 --------------
 
@@ -570,9 +592,12 @@
 .. autosummary::
    :toctree: generated/
 
+   ~yt.visualization.volume_rendering.volume_rendering.volume_render
+   ~yt.visualization.volume_rendering.off_axis_projection.off_axis_projection
+   ~yt.visualization.volume_rendering.scene.Scene
    ~yt.visualization.volume_rendering.camera.Camera
-   ~yt.visualization.volume_rendering.camera.off_axis_projection
-   ~yt.visualization.volume_rendering.camera.allsky_projection
+   ~yt.visualization.volume_rendering.lens.Lens
+   ~yt.visualization.volume_rendering.render_source.RenderSource
 
 These objects set up the way the image looks:
 

diff -r 96208a52266c8a7e08ae2219d22525cea009be8a -r dd3ef0c68dc2ffb63480484f94148690faf94669 doc/source/reference/changelog.rst
--- a/doc/source/reference/changelog.rst
+++ b/doc/source/reference/changelog.rst
@@ -8,7 +8,7 @@
 Contributors
 ------------
 
-The `CREDITS file <http://hg.yt-project.org/yt/src/yt/CREDITS>`_ contains the
+The `CREDITS file <http://bitbucket.org/yt_analysis/yt/src/yt/CREDITS>`_ contains the
 most up-to-date list of everyone who has contributed to the yt source code.
 
 Version 3.1

diff -r 96208a52266c8a7e08ae2219d22525cea009be8a -r dd3ef0c68dc2ffb63480484f94148690faf94669 doc/source/visualizing/plots.rst
--- a/doc/source/visualizing/plots.rst
+++ b/doc/source/visualizing/plots.rst
@@ -327,7 +327,7 @@
    W = [0.02, 0.02, 0.02]
    c = [0.5, 0.5, 0.5]
    N = 512
-   image = yt.off_axis_projection(ds, c, L, W, N, "density")
+   image, sc = yt.off_axis_projection(ds, c, L, W, N, "density")
    yt.write_image(np.log10(image), "%s_offaxis_projection.png" % ds)
 
 Here, ``W`` is the width of the projection in the x, y, *and* z
@@ -979,6 +979,206 @@
 be set independently for each axis by setting ``accumulation`` to a list of
 ``True``/ ``-True`` /``False`` values.
 
+.. _particle-plots:
+
+Particle Plots
+--------------
+
+Slice and projection plots both provide a callback for over-plotting particle
+positions onto gas fields. However, sometimes you want to plot the particle 
+quantities by themselves, perhaps because the gas fields are not relevant to 
+the your point, or perhaps because your dataset doesn't contain any gas fields 
+in the first place. Additionally, you may want to plot your particles with a 
+third field, such as particle mass or age,  mapped to a colorbar. 
+:class:`~yt.visualization.particle_plots.ParticlePlot` provides a convenient 
+way to do this in yt. 
+
+The easiest way to make a :class:`~yt.visualization.particle_plots.ParticlePlot` 
+is to use the convenience routine. This has the syntax:
+
+.. code-block:: python
+
+   p = yt.ParticlePlot(ds, 'particle_position_x', 'particle_position_y')
+   p.save()
+
+Here, ``ds`` is a dataset we've previously opened. The commands create a particle
+plot that shows the x and y positions of all the particles in ``ds`` and save the 
+result to a file on the disk. The type of plot returned depends on the fields you 
+pass in; in this case, ``p`` will be an :class:`~yt.visualization.particle_plots.ParticleProjectionPlot`, 
+because the fields are aligned to the coordinate system of the simulation. 
+
+Most of the callbacks the work for slice and projection plots also work for
+:class:`~yt.visualization.particle_plots.ParticleProjectionPlot`.
+For instance, we can zoom in:
+
+.. code-block:: python
+   
+   p = yt.ParticlePlot(ds, 'particle_position_x', 'particle_position_y')
+   p.zoom(10)
+   p.save('zoom')
+
+change the width:
+
+.. code-block:: python
+
+   p.set_width((500, 'kpc'))
+
+or change the axis units:
+
+.. code-block:: python
+
+   p.set_unit('particle_position_x', 'Mpc')
+
+Here is a full example that shows the simplest way to use 
+:class:`~yt.visualization.particle_plots.ParticlePlot`:
+
+.. python-script::
+
+   import yt
+   ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
+   p = yt.ParticlePlot(ds, 'particle_position_x', 'particle_position_y')
+   p.save()
+
+In the above examples, we are simply splatting particle x and y positions onto 
+a plot using some color. We can also supply an additional particle field, and map
+that to a colorbar. For instance:
+
+.. code-block:: python
+
+   p = yt.ParticlePlot(ds, 'particle_position_x', 'particle_position_y', 
+                           'particle_mass', width=(0.5, 0.5))
+   p.set_unit('particle_mass', 'Msun')
+   p.save()
+
+will create a plot with the particle mass used to set the colorbar. 
+Specifically, :class:`~yt.visualization.particle_plots.ParticlePlot` 
+shows the total ``z_field`` for all the partices in each pixel on the 
+colorbar axis; to plot average quantities instead, one can supply a 
+``weight_field`` argument. 
+
+Here is a complete example that uses the ``particle_mass`` field
+to set the colorbar and shows off some of the modification functions for 
+:class:`~yt.visualization.particle_plots.ParticleProjectionPlot`:
+
+.. python-script::
+
+   import yt
+   ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
+   p = yt.ParticlePlot(ds, 'particle_position_x', 'particle_position_y', 
+                       'particle_mass', width=(0.5, 0.5))
+   p.set_unit('particle_mass', 'Msun')
+   p.zoom(32)
+   p.annotate_title('Zoomed-in Particle Plot')
+   p.save()
+
+If the fields passed in to :class:`~yt.visualization.particle_plots.ParticlePlot` 
+do not correspond to a valid :class:`~yt.visualization.particle_plots.ParticleProjectionPlot`, 
+a :class:`~yt.visualization.particle_plots.ParticlePhasePlot` will be returned instead.
+:class:`~yt.visualization.particle_plots.ParticlePhasePlot` is used to plot arbitrary particle 
+fields against each other, and do not support some of the callbacks available in 
+:class:`~yt.visualization.particle_plots.ParticleProjectionPlot` -
+for instance, :meth:`~yt.visualization.plot_window.AxisAlignedSlicePlot.pan` and 
+:meth:`~yt.visualization.plot_window.AxisAlignedSlicePlot.zoom` don't make much sense when of your axes is a position
+and the other is a velocity. The modification functions defined for :class:`~yt.visualization.profile_plotter.PhasePlot` 
+should all work, however.
+
+Here is an example of making a :class:`~yt.visualization.particle_plots.ParticlePhasePlot` 
+of ``particle_position_x`` versus ``particle_velocity_z``, with the ``particle_mass`` on the colorbar:
+
+.. python-script::
+
+   import yt
+   ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
+   p = yt.ParticlePlot(ds, 'particle_position_x', 'particle_velocity_z', ['particle_mass'])
+   p.set_unit('particle_position_x', 'Mpc')
+   p.set_unit('particle_velocity_z', 'km/s')
+   p.set_unit('particle_mass', 'Msun')
+   p.save()
+
+and here is one with the particle x and y velocities on the plot axes:
+
+.. python-script::
+
+   import yt
+   ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
+   p = yt.ParticlePlot(ds, 'particle_velocity_x', 'particle_velocity_y', 'particle_mass')
+   p.set_unit('particle_velocity_x', 'km/s')
+   p.set_unit('particle_velocity_y', 'km/s')
+   p.set_unit('particle_mass', 'Msun')
+   p.set_ylim(-400, 400)
+   p.set_xlim(-400, 400)
+   p.save()
+
+If you want more control over the details of the :class:`~yt.visualization.particle_plots.ParticleProjectionPlot` or 
+:class:`~yt.visualization.particle_plots.ParticlePhasePlot`, you can always use these classes directly. For instance, 
+here is an example of using the ``depth`` argument to :class:`~yt.visualization.particle_plots.ParticleProjectionPlot`
+to only plot the particles that live in a thin slice around the center of the
+domain:
+
+.. python-script::
+
+   import yt
+   ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
+
+   p = yt.ParticleProjectionPlot(ds, 2, ['particle_mass'], width=(0.5, 0.5), depth=0.01)
+   p.set_unit('particle_mass', 'Msun')
+   p.save()
+
+and here is an example of using the ``data_source`` argument to :class:`~yt.visualization.particle_plots.ParticlePhasePlot`
+to only consider the particles that lie within a 50 kpc sphere around the domain center:
+
+.. python-script::
+
+   import yt
+   ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
+
+   my_sphere = ds.sphere("c", (50.0, "kpc"))
+
+   p = yt.ParticlePhasePlot(my_sphere, "particle_velocity_x", "particle_velocity_y",
+                            "particle_mass")
+   p.set_unit('particle_velocity_x', 'km/s')
+   p.set_unit('particle_velocity_y', 'km/s')
+   p.set_unit('particle_mass', 'Msun')
+   p.set_ylim(-400, 400)
+   p.set_xlim(-400, 400)
+
+   p.save()
+
+Finally, with 1D and 2D Profiles, you can create a :class:`~yt.data_objects.profiles.ParticleProfile`
+object seperately using the :func:`~yt.data_objects.profiles.create_profile` function, and then use it
+create a :class:`~yt.visualization.particle_plots.ParticlePhasePlot` object using the 
+:meth:`~yt.visualization.particle_plots.ParticlePhasePlot.from_profile` method. In this example,
+we have also used the ``weight_field`` argument to compute the average ``particle_mass`` in each
+pixel, instead of the total:
+
+.. python-script::
+
+   import yt
+
+   ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
+
+   ad = ds.all_data()
+
+   profile = yt.create_profile(ad, ['particle_velocity_x', 'particle_velocity_y'], ['particle_mass'], 
+                               n_bins=800, weight_field='particle_ones')
+
+   p = yt.ParticlePhasePlot.from_profile(profile)
+   p.set_unit('particle_velocity_x', 'km/s')
+   p.set_unit('particle_velocity_y', 'km/s')
+   p.set_unit('particle_mass', 'Msun')
+   p.set_ylim(-400, 400)
+   p.set_xlim(-400, 400)
+   p.save()
+
+Under the hood, the :class:`~yt.data_objects.profiles.ParticleProfile` class works a lot like a 
+:class:`~yt.data_objects.profiles.Profile2D` object, except that instead of just binning the 
+particle field, you can also use higher-order deposition functions like the cloud-in-cell 
+interpolant to spread out the particle quantites over a few cells in the profile. The 
+:func:`~yt.data_objects.profiles.create_profile` will automatically detect when all the fields
+you pass in are particle fields, and return a :class:`~yt.data_objects.profiles.ParticleProfile`
+if that is the case. For a complete description of the :class:`~yt.data_objects.profiles.ParticleProfile`
+class please consult the reference documentation.
+
 .. _interactive-plotting:
 
 Interactive Plotting

diff -r 96208a52266c8a7e08ae2219d22525cea009be8a -r dd3ef0c68dc2ffb63480484f94148690faf94669 doc/source/visualizing/volume_rendering.rst
--- a/doc/source/visualizing/volume_rendering.rst
+++ b/doc/source/visualizing/volume_rendering.rst
@@ -22,6 +22,52 @@
 The volume renderer is also threaded using OpenMP.  Many of the commands
 (including `snapshot`) will accept a `num_threads` option.
 
+Scene Interface
+===============
+
+Tutorial
+--------
+
+The scene interface is the product of a refactor to the volume rendering
+framework, and is meant to provide a more modular interface for creating
+renderings of arbitrary data sources. As such, manual composition of a 
+scene can require a bit more work, but we will also provide several helper
+functions that attempt to create satisfactory default volume renderings.
+
+Here is a working example for rendering the IsolatedGalaxy dataset.
+
+.. python-script::
+  import yt
+  # load the data
+  ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
+  # volume render the 'density' field, and save the resulting image
+  im, sc = yt.volume_rendering(ds, 'density', fname='test_rendering.png')
+
+  # im is the image that was generated.
+  # sc is an instance of a Scene object, which allows you to further refine
+  # your renderings.
+
+When the volume_rendering function is called, first an empty 'Scene' object is
+created. Next, a 'VolumeSource' object is created, which deomposes the grids
+into an AMRKDTree to provide back-to-front rendering of fixed-resolution blocks
+of data.  When the VolumeSource object is created, by default it will create a
+transfer function based on the extrema of the field that you are rendering. The
+transfer function describes how to 'transfer' data values to color and
+brightness.
+
+Next, a Camera object is created, which by default also creates a default,
+plane-parallel, Lens object. The analog to a real camera is intentional.
+A camera can take a picture of a scene from a particular point in time and
+space.  However, you can swap in different lenses like, for example, a fisheye
+lens. Once the camera is added to the scene object, we call the main method of
+the Scene class, 'render'. When called, the scene will loop through all of the
+RenderSource objects that have been added, and integrate the radiative transfer
+equation through the volume. Finally, the image and scene object is returned to
+the user.
+
+Camera Interface
+================
+
 Tutorial
 --------
 

diff -r 96208a52266c8a7e08ae2219d22525cea009be8a -r dd3ef0c68dc2ffb63480484f94148690faf94669 vr_refactor_todo.markdown
--- /dev/null
+++ b/vr_refactor_todo.markdown
@@ -0,0 +1,30 @@
+Todo
+----
+
+Known Issues:
+
+* ~~FRB Off-axis projections are broken I think. Currently should raise not-implemented error.~~
+* Parallelism
+  * Need to write parallel z-buffer reduce.
+  * Need to verify brick ordering
+* Alpha blending level for opaque sources such as grid lines/domains/etc may
+  not currently be ideal. Difficult to get it right when the transparent VRs
+  have wildly different levels. One approach would be to normalize the transfer
+  function such that the integral of the TF multiplied by the depth of the 
+  rendering is equal to 1. With grey opacity on, all of these things get a bit
+  easier, in my opinion
+
+Documentation:
+
+* ~~Scene~~
+* ~~Camera~~
+* Lens
+* Narrative
+  * Have started, but more work to do. Replaced at least the tutorial
+    rendering, which saves a number of lines!
+* Cookbooks
+  * All relevant cookbooks have been updated
+* Parallelism
+* OpaqueSource
+* RenderSource
+* Narrative Developer Documentation

diff -r 96208a52266c8a7e08ae2219d22525cea009be8a -r dd3ef0c68dc2ffb63480484f94148690faf94669 yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -120,9 +120,10 @@
 
 from yt.data_objects.api import \
     BinnedProfile1D, BinnedProfile2D, BinnedProfile3D, \
-    DatasetSeries, \
-    ImageArray, particle_filter, create_profile, \
-    Profile1D, Profile2D, Profile3D
+    DatasetSeries, ImageArray, \
+    particle_filter, add_particle_filter, \
+    create_profile, Profile1D, Profile2D, Profile3D, \
+    ParticleProfile
 
 # For backwards compatibility
 TimeSeriesData = deprecated_class(DatasetSeries)
@@ -148,11 +149,15 @@
     apply_colormap, scale_image, write_projection, \
     SlicePlot, AxisAlignedSlicePlot, OffAxisSlicePlot, \
     ProjectionPlot, OffAxisProjectionPlot, \
-    show_colormaps, add_cmap, make_colormap, ProfilePlot, PhasePlot
+    show_colormaps, add_cmap, make_colormap, \
+    ProfilePlot, PhasePlot, ParticlePhasePlot, \
+    ParticleProjectionPlot, ParticleImageBuffer, ParticlePlot
 
 from yt.visualization.volume_rendering.api import \
-    off_axis_projection, ColorTransferFunction, \
-    TransferFunctionHelper, TransferFunction, MultiVariateTransferFunction
+    volume_render, ColorTransferFunction, TransferFunction, \
+    off_axis_projection
+#    TransferFunctionHelper, MultiVariateTransferFunction
+#    off_axis_projection
 
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     parallel_objects, enable_parallelism, communication_system

diff -r 96208a52266c8a7e08ae2219d22525cea009be8a -r dd3ef0c68dc2ffb63480484f94148690faf94669 yt/analysis_modules/photon_simulator/photon_models.py
--- a/yt/analysis_modules/photon_simulator/photon_models.py
+++ b/yt/analysis_modules/photon_simulator/photon_models.py
@@ -27,15 +27,13 @@
 from yt.funcs import *
 from yt.utilities.physical_constants import mp, kboltz
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
-     communication_system, parallel_objects
+     parallel_objects
 from yt.units.yt_array import uconcatenate
 
 n_kT = 10000
 kT_min = 8.08e-2
 kT_max = 50.
 
-comm = communication_system.communicators[-1]
-
 class PhotonModel(object):
 
     def __init__(self):

diff -r 96208a52266c8a7e08ae2219d22525cea009be8a -r dd3ef0c68dc2ffb63480484f94148690faf94669 yt/analysis_modules/ppv_cube/ppv_cube.py
--- a/yt/analysis_modules/ppv_cube/ppv_cube.py
+++ b/yt/analysis_modules/ppv_cube/ppv_cube.py
@@ -15,7 +15,7 @@
 from yt.utilities.orientation import Orientation
 from yt.utilities.fits_image import FITSImageBuffer, sanitize_fits_unit, \
     create_sky_wcs
-from yt.visualization.volume_rendering.camera import off_axis_projection
+from yt.visualization.volume_rendering.off_axis_projection import off_axis_projection
 from yt.funcs import get_pbar
 from yt.utilities.physical_constants import clight, mh
 import yt.units.dimensions as ytdims
@@ -33,7 +33,7 @@
         def _v_los(field, data):
             return data.ds.arr(data["zeros"], "cm/s")
     elif isinstance(normal, string_types):
-        def _v_los(field, data): 
+        def _v_los(field, data):
             return -data["velocity_%s" % normal]
     else:
         orient = Orientation(normal)
@@ -45,16 +45,16 @@
             return -vz
     return _v_los
 
-fits_info = {"velocity":("m/s","VELOCITY","v"),
-             "frequency":("Hz","FREQUENCY","f"),
-             "energy":("eV","ENERGY","E"),
-             "wavelength":("angstrom","WAVELENG","lambda")}
+fits_info = {"velocity":("m/s","VOPT","v"),
+             "frequency":("Hz","FREQ","f"),
+             "energy":("eV","ENER","E"),
+             "wavelength":("angstrom","WAVE","lambda")}
 
 class PPVCube(object):
-    def __init__(self, ds, normal, field, velocity_bounds, center="c", 
+    def __init__(self, ds, normal, field, velocity_bounds, center="c",
                  width=(1.0,"unitary"), dims=100, thermal_broad=False,
                  atomic_weight=56., depth=(1.0,"unitary"), depth_res=256,
-                 method="integrate", no_shifting=False,
+                 method="integrate", weight_field=None, no_shifting=False,
                  north_vector=None, no_ghost=True):
         r""" Initialize a PPVCube object.
 
@@ -70,7 +70,7 @@
             The field to project.
         velocity_bounds : tuple
             A 4-tuple of (vmin, vmax, nbins, units) for the velocity bounds to
-            integrate over. 
+            integrate over.
         center : A sequence of floats, a string, or a tuple.
             The coordinate of the center of the image. If set to 'c', 'center' or
             left blank, the plot is centered on the middle of the domain. If set to
@@ -84,10 +84,10 @@
         width : float, tuple, or YTQuantity.
             The width of the projection. A float will assume the width is in code units.
             A (value, unit) tuple or YTQuantity allows for the units of the width to be
-            specified. Implies width = height, e.g. the aspect ratio of the PPVCube's 
+            specified. Implies width = height, e.g. the aspect ratio of the PPVCube's
             spatial dimensions is 1.
         dims : integer, optional
-            The spatial resolution of the cube. Implies nx = ny, e.g. the 
+            The spatial resolution of the cube. Implies nx = ny, e.g. the
             aspect ratio of the PPVCube's spatial dimensions is 1.
         atomic_weight : float, optional
             Set this value to the atomic weight of the particle that is emitting the line
@@ -102,12 +102,14 @@
             Set the projection method to be used.
             "integrate" : line of sight integration over the line element.
             "sum" : straight summation over the line of sight.
+        weight_field : string, optional
+            The name of the weighting field.  Set to None for no weight.
         no_shifting : boolean, optional
             If set, no shifting due to velocity will occur but only thermal broadening.
             Should not be set when *thermal_broad* is False, otherwise nothing happens!
         north_vector : a sequence of floats
-            A vector defining the 'up' direction. This option sets the orientation of 
-            the plane of projection. If not set, an arbitrary grid-aligned north_vector 
+            A vector defining the 'up' direction. This option sets the orientation of
+            the plane of projection. If not set, an arbitrary grid-aligned north_vector
             is chosen. Ignored in the case of on-axis cubes.
         no_ghost: bool, optional
             Optimization option for off-axis cases. If True, homogenized bricks will
@@ -173,7 +175,7 @@
         _intensity = self.create_intensity()
         self.ds.add_field(("gas","intensity"), function=_intensity, units=self.field_units)
 
-        if method == "integrate":
+        if method == "integrate" and weight_field is None:
             self.proj_units = str(ds.quan(1.0, self.field_units+"*cm").units)
         elif method == "sum":
             self.proj_units = self.field_units
@@ -183,15 +185,16 @@
         for sto, i in parallel_objects(range(self.nv), storage=storage):
             self.current_v = self.vmid_cgs[i]
             if isinstance(normal, string_types):
-                prj = ds.proj("intensity", ds.coordinates.axis_id[normal], method=method)
+                prj = ds.proj("intensity", ds.coordinates.axis_id[normal], method=method,
+                              weight_field=weight_field)
                 buf = prj.to_frb(width, self.nx, center=self.center)["intensity"]
             else:
-                buf = off_axis_projection(ds, self.center, normal, width,
+                buf, sc = off_axis_projection(ds, self.center, normal, width,
                                           (self.nx, self.ny, depth_res), "intensity",
                                           north_vector=north_vector, no_ghost=no_ghost,
-                                          method=method).swapaxes(0,1)
+                                          method=method, weight=weight_field)
             sto.result_id = i
-            sto.result = buf
+            sto.result = buf.swapaxes(0,1)
             pbar.update(i)
         pbar.finish()
 

diff -r 96208a52266c8a7e08ae2219d22525cea009be8a -r dd3ef0c68dc2ffb63480484f94148690faf94669 yt/analysis_modules/sunyaev_zeldovich/projection.py
--- a/yt/analysis_modules/sunyaev_zeldovich/projection.py
+++ b/yt/analysis_modules/sunyaev_zeldovich/projection.py
@@ -21,7 +21,7 @@
 from yt.utilities.physical_constants import sigma_thompson, clight, hcgs, kboltz, mh, Tcmb
 from yt.units.yt_array import YTQuantity
 from yt.funcs import fix_axis, mylog, iterable, get_pbar
-from yt.visualization.volume_rendering.camera import off_axis_projection
+from yt.visualization.volume_rendering.api import off_axis_projection
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
      communication_system, parallel_root_only
 from yt import units
@@ -221,16 +221,16 @@
         self.ds.add_field(("gas","beta_par"), function=beta_par, units="g/cm**3")
         setup_sunyaev_zeldovich_fields(self.ds)
 
-        dens    = off_axis_projection(self.ds, ctr, L, w, nx, "density")
-        Te      = off_axis_projection(self.ds, ctr, L, w, nx, "t_sz")/dens
-        bpar    = off_axis_projection(self.ds, ctr, L, w, nx, "beta_par")/dens
-        omega1  = off_axis_projection(self.ds, ctr, L, w, nx, "t_squared")/dens
-        omega1  = omega1/(Te*Te) - 1.
+        dens   = off_axis_projection(self.ds, ctr, L, w, nx, "density")[0]
+        Te     = off_axis_projection(self.ds, ctr, L, w, nx, "t_sz")[0]/dens
+        bpar   = off_axis_projection(self.ds, ctr, L, w, nx, "beta_par"[0])/dens
+        omega1 = off_axis_projection(self.ds, ctr, L, w, nx, "t_squared")[0]/dens
+        omega1 = omega1/(Te*Te) - 1.
         if self.high_order:
-            bperp2  = off_axis_projection(self.ds, ctr, L, w, nx, "beta_perp_squared")/dens
-            sigma1  = off_axis_projection(self.ds, ctr, L, w, nx, "t_beta_par")/dens
-            sigma1  = sigma1/Te - bpar
-            kappa1  = off_axis_projection(self.ds, ctr, L, w, nx, "beta_par_squared")/dens
+            bperp2 = off_axis_projection(self.ds, ctr, L, w, nx, "beta_perp_squared")[0]/dens
+            sigma1 = off_axis_projection(self.ds, ctr, L, w, nx, "t_beta_par")[0]/dens
+            sigma1 = sigma1/Te - bpar
+            kappa1 = off_axis_projection(self.ds, ctr, L, w, nx, "beta_par_squared")[0]/dens
             kappa1 -= bpar
         else:
             bperp2 = np.zeros((nx,nx))

diff -r 96208a52266c8a7e08ae2219d22525cea009be8a -r dd3ef0c68dc2ffb63480484f94148690faf94669 yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -61,7 +61,8 @@
     answer_tests_url = 'http://answers.yt-project.org/{1}_{2}',
     sketchfab_api_key = 'None',
     thread_field_detection = 'False',
-    ignore_invalid_unit_operation_errors = 'False'
+    ignore_invalid_unit_operation_errors = 'False',
+    chunk_size = '1000'
     )
 # Here is the upgrade.  We're actually going to parse the file in its entirety
 # here.  Then, if it has any of the Forbidden Sections, it will be rewritten

diff -r 96208a52266c8a7e08ae2219d22525cea009be8a -r dd3ef0c68dc2ffb63480484f94148690faf94669 yt/convenience.py
--- a/yt/convenience.py
+++ b/yt/convenience.py
@@ -13,10 +13,7 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import glob
-import numpy as np
-import os, os.path, inspect, types
-from functools import wraps
+import os, os.path, types
 
 # Named imports
 from yt.funcs import *
@@ -25,6 +22,7 @@
     output_type_registry, \
     simulation_time_series_registry, \
     EnzoRunDatabase
+from yt.utilities.hierarchy_inspection import find_lowest_subclasses
 
 def load(*args ,**kwargs):
     """
@@ -77,8 +75,13 @@
     for n, c in output_type_registry.items():
         if n is None: continue
         if c._is_valid(*args, **kwargs): candidates.append(n)
+
+    # convert to classes
+    candidates = [output_type_registry[c] for c in candidates]
+    # Find only the lowest subclasses, i.e. most specialised front ends
+    candidates = find_lowest_subclasses(candidates)
     if len(candidates) == 1:
-        return output_type_registry[candidates[0]](*args, **kwargs)
+        return candidates[0](*args, **kwargs)
     if len(candidates) == 0:
         if ytcfg.get("yt", "enzo_db") != '' \
            and len(args) == 1 \
@@ -91,6 +94,7 @@
                 return output_type_registry[n](fn)
         mylog.error("Couldn't figure out output type for %s", args[0])
         raise YTOutputNotIdentified(args, kwargs)
+
     mylog.error("Multiple output type candidates for %s:", args[0])
     for c in candidates:
         mylog.error("    Possible: %s", c)
@@ -140,11 +144,11 @@
         valid_file = True
     else:
         valid_file = False
-        
+
     if not valid_file:
-        raise YTOutputNotIdentified((parameter_filename, simulation_type), 
+        raise YTOutputNotIdentified((parameter_filename, simulation_type),
                                     dict(find_outputs=find_outputs))
-    
-    return simulation_time_series_registry[simulation_type](parameter_filename, 
+
+    return simulation_time_series_registry[simulation_type](parameter_filename,
                                                             find_outputs=find_outputs)
 

diff -r 96208a52266c8a7e08ae2219d22525cea009be8a -r dd3ef0c68dc2ffb63480484f94148690faf94669 yt/data_objects/api.py
--- a/yt/data_objects/api.py
+++ b/yt/data_objects/api.py
@@ -35,7 +35,8 @@
     create_profile, \
     Profile1D, \
     Profile2D, \
-    Profile3D
+    Profile3D, \
+    ParticleProfile
 
 from .time_series import \
     DatasetSeries, \
@@ -54,4 +55,5 @@
     ImageArray
 
 from .particle_filters import \
-    particle_filter
+    particle_filter, \
+    add_particle_filter

diff -r 96208a52266c8a7e08ae2219d22525cea009be8a -r dd3ef0c68dc2ffb63480484f94148690faf94669 yt/data_objects/particle_filters.py
--- a/yt/data_objects/particle_filters.py
+++ b/yt/data_objects/particle_filters.py
@@ -16,7 +16,9 @@
 
 import numpy as np
 import copy
+
 from contextlib import contextmanager
+from functools import wraps
 
 from yt.fields.field_info_container import \
     NullFunc, TranslationFunc
@@ -76,13 +78,111 @@
             new_fi._function = TranslationFunc(old_fi.name)
         return new_fi
 
-def add_particle_filter(name, function, requires = None, filtered_type = "all"):
-    if requires is None: requires = []
+
+def add_particle_filter(name, function, requires=None, filtered_type="all"):
+    r"""Create a new particle filter in the global namespace of filters
+
+    A particle filter is a short name that corresponds to an algorithm for
+    filtering a set of particles into a subset.  This is useful for creating new
+    particle types based on a cut on a particle field, such as particle mass, ID
+    or type.
+
+    .. note::
+       Alternatively, you can make use of the
+       :func:`~yt.data_objects.particle_filters.particle_filter` decorator to
+       define a new particle filter.
+
+    Parameters
+    ----------
+    name : string
+        The name of the particle filter.  New particle fields with particle type
+        set by this name will be added to any dataset that enables this particle
+        filter.
+    function : reference to a function
+        The function that defines the particle filter.  The function should
+        accept two arguments: a reference to a particle filter object and a
+        reference to an abstract yt data object.  See the example below.
+    requires : a list of field names
+        A list of field names required by the particle filter definition.
+    filtered_type : string
+        The name of the particle type to be filtered.
+
+    Example
+    -------
+
+    >>> import yt
+
+    >>> def _stars(pfilter, data):
+    ...     return data[(pfilter.filtered_type, 'particle_type')] == 2
+
+    >>> yt.add_particle_filter("stars", function=_stars, filtered_type='all',
+    ...                        requires=["particle_type"])
+
+    >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
+    >>> ds.add_particle_filter('stars')
+    >>> ad = ds.all_data()
+    >>> print (ad['stars', 'particle_mass'])
+    [  1.68243760e+38   1.65690882e+38   1.65813321e+38 ...,   2.04238266e+38
+       2.04523901e+38   2.04770938e+38] g
+
+    """
+    if requires is None:
+        requires = []
     filter = ParticleFilter(name, function, requires, filtered_type)
     filter_registry[name].append(filter)
 
-def particle_filter(name, requires = None, filtered_type = "all"):
-    def _pfilter(func):
-        add_particle_filter(name, func, requires, filtered_type)
-        return func
-    return _pfilter
+
+def particle_filter(name=None, requires=None, filtered_type='all'):
+    r"""A decorator that adds a new particle filter
+
+    A particle filter is a short name that corresponds to an algorithm for
+    filtering a set of particles into a subset.  This is useful for creating new
+    particle types based on a cut on a particle field, such as particle mass, ID
+    or type.
+
+    .. note::
+       Alternatively, you can make use of the
+       :func:`~yt.data_objects.particle_filters.add_particle_filter` function
+       to define a new particle filter using a more declarative syntax.
+
+    Parameters
+    ----------
+    name : string
+        The name of the particle filter.  New particle fields with particle type
+        set by this name will be added to any dataset that enables this particle
+        filter.  If not set, the name will be inferred from the name of the
+        filter function.
+    function : reference to a function
+        The function that defines the particle filter.  The function should
+        accept two arguments: a reference to a particle filter object and a
+        reference to an abstract yt data object.  See the example below.
+    requires : a list of field names
+        A list of field names required by the particle filter definition.
+    filtered_type : string
+        The name of the particle type to be filtered.
+
+    Example
+    -------
+
+    >>> import yt
+
+    >>> # define a filter named "stars"
+    >>> @yt.particle_filter(requires=["particle_type"], filtered_type='all')
+    >>> def stars(pfilter, data):
+    ...     return data[(pfilter.filtered_type, 'particle_type')] == 2
+
+    >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
+    >>> ds.add_particle_filter('stars')
+    >>> ad = ds.all_data()
+    >>> print (ad['stars', 'particle_mass'])
+    [  1.68243760e+38   1.65690882e+38   1.65813321e+38 ...,   2.04238266e+38
+       2.04523901e+38   2.04770938e+38] g
+
+    """
+    def wrapper(function):
+        if name is None:
+            used_name = function.__name__
+        else:
+            used_name = name
+        return add_particle_filter(used_name, function, requires, filtered_type)
+    return wrapper

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/f69fd8320681/
Changeset:   f69fd8320681
Branch:      yt
User:        atmyers
Date:        2015-06-02 18:49:53+00:00
Summary:     initial skeleton for MeshSource
Affected #:  1 file

diff -r dd3ef0c68dc2ffb63480484f94148690faf94669 -r f69fd8320681deb977216401e6aceeb6ccfbf2bc yt/visualization/volume_rendering/render_source.py
--- a/yt/visualization/volume_rendering/render_source.py
+++ b/yt/visualization/volume_rendering/render_source.py
@@ -223,6 +223,113 @@
         return disp
 
 
+class MeshSource(RenderSource):
+
+    """docstring for MeshSource"""
+    _image = None
+    data_source = None
+
+    def __init__(self, data_source, field, auto=True):
+        r"""Initialize a new unstructured source for rendering.
+
+        A :class:`MeshSource` provides the framework to volume render
+        unstructured mesh data.
+
+        Parameters
+        ----------
+        data_source: :class:`AMR3DData` or :class:`Dataset`, optional
+            This is the source to be rendered, which can be any arbitrary yt
+            data object or dataset.
+        fields : string
+            The name of the field to be rendered.
+        auto: bool, optional
+            If True, will build a default PolygonMesh based on the data.
+
+        Examples
+        --------
+        >>> source = MeshSource(ds, ('all', 'convected')
+
+        """
+        super(MeshSource, self).__init__()
+        self.data_source = data_source_or_all(data_source)
+        field = self.data_source._determine_fields(field)[0]
+        self.field = field
+        self.volume = None
+        self.current_image = None
+        self.double_check = False
+        self.num_threads = 0
+        self.num_samples = 10
+        self.sampler_type = 'volume-render'
+
+        # Error checking
+        assert(self.field is not None)
+        assert(self.data_source is not None)
+
+        if auto:
+            self.build_defaults()
+
+    def build_defaults(self):
+        self.build_default_volume()
+
+    def _validate(self):
+        """Make sure that all dependencies have been met"""
+        if self.data_source is None:
+            raise RuntimeError("Data source not initialized")
+
+        if self.volume is None:
+            raise RuntimeError("Volume not initialized")
+
+    def build_default_volume(self):
+        self.volume = PolygonMesh(self.data_source.pf,
+                                  data_source=self.data_source)
+        log_fields = [self.data_source.pf.field_info[self.field].take_log]
+        mylog.debug('Log Fields:' + str(log_fields))
+        self.volume.set_fields([self.field], log_fields, True)
+
+    def set_volume(self, volume):
+        assert(isinstance(volume, PolygonMesh))
+        del self.volume
+        self.volume = volume
+
+    def set_field(self, field, no_ghost=True):
+        field = self.data_source._determine_fields(field)[0]
+        log_field = self.data_source.pf.field_info[field].take_log
+        self.volume.set_fields(field, [log_field], no_ghost)
+        self.field = field
+
+    def set_fields(self, fields, no_ghost=True):
+        fields = self.data_source._determine_fields(fields)
+        log_fields = [self.data_source.ds.field_info[f].take_log
+                      for f in fields]
+        self.volume.set_fields(fields, log_fields, no_ghost)
+        self.field = fields
+
+    def set_sampler(self, camera):
+        """docstring for add_sampler"""
+        if self.sampler_type == 'surface':
+            sampler = new_volume_render_sampler(camera, self)
+        elif self.sampler_type == 'projection':
+            sampler = new_projection_sampler(camera, self)
+        else:
+            NotImplementedError("%s not implemented yet" % self.sampler_type)
+        self.sampler = sampler
+        assert(self.sampler is not None)
+
+    def render(self, camera, zbuffer=None):
+        raise NotImplementedError
+
+    def finalize_image(self, camera, image):
+        image = self.volume.reduce_tree_images(image,
+                                               camera.lens.viewpoint)
+        if self.transfer_function.grey_opacity is False:
+            image[:, :, 3] = 1.0
+        return image
+
+    def __repr__(self):
+        disp = "<Mesh Source>:%s " % str(self.data_source)
+        return disp
+
+
 class PointsSource(OpaqueSource):
 
     """Add set of opaque points to a scene."""


https://bitbucket.org/yt_analysis/yt/commits/3b3324f45db7/
Changeset:   3b3324f45db7
Branch:      yt
User:        atmyers
Date:        2015-06-02 18:50:41+00:00
Summary:     fix typo in VolumeSource example
Affected #:  1 file

diff -r f69fd8320681deb977216401e6aceeb6ccfbf2bc -r 3b3324f45db7f563fb538bc129791c0f3716a57b yt/visualization/volume_rendering/render_source.py
--- a/yt/visualization/volume_rendering/render_source.py
+++ b/yt/visualization/volume_rendering/render_source.py
@@ -88,7 +88,7 @@
 
         Examples
         --------
-        >>> source = RenderSource(ds, 'density')
+        >>> source = VolumeSource(ds, 'density')
 
         """
         super(VolumeSource, self).__init__()


https://bitbucket.org/yt_analysis/yt/commits/2141613b73cb/
Changeset:   2141613b73cb
Branch:      yt
User:        atmyers
Date:        2015-06-02 23:49:47+00:00
Summary:     moving some more of the FEM render code in
Affected #:  5 files

diff -r 3b3324f45db7f563fb538bc129791c0f3716a57b -r 2141613b73cbc6b72b0539edf2f36fc36d587011 yt/utilities/lib/grid_traversal.pyx
--- a/yt/utilities/lib/grid_traversal.pyx
+++ b/yt/utilities/lib/grid_traversal.pyx
@@ -25,6 +25,7 @@
     FieldInterpolationTable, FIT_initialize_table, FIT_eval_transfer,\
     FIT_eval_transfer_with_light
 from fixed_interpolator cimport *
+#from mesh_traversal import EmbreeVolume
 
 from cython.parallel import prange, parallel, threadid
 
@@ -816,6 +817,14 @@
         #free(self.light_dir)
         #free(self.light_rgba)
 
+cdef class MeshSampler(ImageSampler):
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    def __call__(self, scene, int num_threads = 0):
+        pass
+
 
 @cython.boundscheck(False)
 @cython.wraparound(False)

diff -r 3b3324f45db7f563fb538bc129791c0f3716a57b -r 2141613b73cbc6b72b0539edf2f36fc36d587011 yt/utilities/lib/mesh_traversal.pxd
--- a/yt/utilities/lib/mesh_traversal.pxd
+++ b/yt/utilities/lib/mesh_traversal.pxd
@@ -1,3 +1,7 @@
-cimport rtcore
-cimport rtcore_scene
-cimport rtcore_ray
+cimport cython
+cimport numpy as np
+import numpy as np
+cimport pyembree.rtcore_scene as rtcs
+
+cdef class EmbreeVolume:
+    cdef rtcs.RTCScene scene_i

diff -r 3b3324f45db7f563fb538bc129791c0f3716a57b -r 2141613b73cbc6b72b0539edf2f36fc36d587011 yt/utilities/lib/mesh_traversal.pyx
--- a/yt/utilities/lib/mesh_traversal.pyx
+++ b/yt/utilities/lib/mesh_traversal.pyx
@@ -1,2 +1,23 @@
-def hello_world():
-    print "Hello!"
+cimport cython
+cimport numpy as np
+import numpy as np
+cimport pyembree.rtcore as rtc
+cimport pyembree.rtcore_ray as rtcr
+cimport pyembree.rtcore_geometry as rtcg
+cimport pyembree.rtcore_scene as rtcs
+
+cdef void error_printer(const rtc.RTCError code, const char *_str):
+    print "ERROR CAUGHT IN EMBREE"
+    rtc.print_error(code)
+    print "ERROR MESSAGE:", _str
+
+
+cdef class EmbreeVolume:
+
+    def __init__(self):
+        rtc.rtcInit(NULL)
+        rtc.rtcSetErrorFunction(error_printer)
+        self.scene_i = rtcs.rtcNewScene(rtcs.RTC_SCENE_STATIC, rtcs.RTC_INTERSECT1)
+
+    def __dealloc__(self):
+        rtcs.rtcDeleteScene(self.scene_i)

diff -r 3b3324f45db7f563fb538bc129791c0f3716a57b -r 2141613b73cbc6b72b0539edf2f36fc36d587011 yt/utilities/lib/setup.py
--- a/yt/utilities/lib/setup.py
+++ b/yt/utilities/lib/setup.py
@@ -165,10 +165,15 @@
                          libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd"])
     include_dirs = check_for_pyembree()
     if include_dirs is not None:
+        config.add_extension("mesh_construction",
+                             ["yt/utilities/lib/mesh_construction.pyx"],
+                             include_dirs=["yt/utilities/lib", include_dirs],
+                             libraries=["embree"], language="c++",
+                             depends=[])
         config.add_extension("mesh_traversal",
                              ["yt/utilities/lib/mesh_traversal.pyx"],
                              include_dirs=["yt/utilities/lib", include_dirs],
-                             libraries=["m"], language="c++",
+                             libraries=["embree"], language="c++",
                              depends=["yt/utilities/lib/mesh_traversal.pxd"])
     config.add_subpackage("tests")
 

diff -r 3b3324f45db7f563fb538bc129791c0f3716a57b -r 2141613b73cbc6b72b0539edf2f36fc36d587011 yt/visualization/volume_rendering/render_source.py
--- a/yt/visualization/volume_rendering/render_source.py
+++ b/yt/visualization/volume_rendering/render_source.py
@@ -22,6 +22,10 @@
 from .utils import new_volume_render_sampler, data_source_or_all, \
     get_corners, new_projection_sampler
 from yt.visualization.image_writer import apply_colormap
+#from yt.utilities.lib.mesh_traversal.pyx import \
+#    EmbreeVolume
+from yt.utilities.lib.mesh_construction import \
+    ElementMesh
 
 from .zbuffer_array import ZBuffer
 from yt.utilities.lib.misc_utilities import \
@@ -280,16 +284,27 @@
             raise RuntimeError("Volume not initialized")
 
     def build_default_volume(self):
-        self.volume = PolygonMesh(self.data_source.pf,
-                                  data_source=self.data_source)
+
+        mesh = self.ds.index.meshes[0]
+        vertices = mesh.connectivity_coords
+        indices = mesh.connectivity_indices
+        sampler_type = 'surface'
+        field_data = 0
+
+#        self.scene = EmbreeVolume()
+        self.scene = 0
+
+        self.volume = ElementMesh(self.scene,
+                                  vertices,
+                                  indices,
+                                  field_data,
+                                  sampler_type)
+
         log_fields = [self.data_source.pf.field_info[self.field].take_log]
         mylog.debug('Log Fields:' + str(log_fields))
-        self.volume.set_fields([self.field], log_fields, True)
 
     def set_volume(self, volume):
-        assert(isinstance(volume, PolygonMesh))
-        del self.volume
-        self.volume = volume
+        pass
 
     def set_field(self, field, no_ghost=True):
         field = self.data_source._determine_fields(field)[0]
@@ -316,7 +331,16 @@
         assert(self.sampler is not None)
 
     def render(self, camera, zbuffer=None):
-        raise NotImplementedError
+
+        self.set_sampler(camera)
+
+        mylog.debug("Using sampler %s" % self.sampler)
+        self.sampler(scene, num_threads=self.num_threads)
+        mylog.debug("Done casting rays")
+
+        self.current_image = self.finalize_image(camera, self.sampler.aimage)
+
+        return self.current_image
 
     def finalize_image(self, camera, image):
         image = self.volume.reduce_tree_images(image,


https://bitbucket.org/yt_analysis/yt/commits/58258a78924e/
Changeset:   58258a78924e
Branch:      yt
User:        atmyers
Date:        2015-06-03 00:28:57+00:00
Summary:     make sure to actually link against embree
Affected #:  1 file

diff -r dd3ef0c68dc2ffb63480484f94148690faf94669 -r 58258a78924ea8ff5099ee267fce244de96e842a yt/utilities/lib/setup.py
--- a/yt/utilities/lib/setup.py
+++ b/yt/utilities/lib/setup.py
@@ -168,7 +168,7 @@
         config.add_extension("mesh_traversal",
                              ["yt/utilities/lib/mesh_traversal.pyx"],
                              include_dirs=["yt/utilities/lib", include_dirs],
-                             libraries=["m"], language="c++",
+                             libraries=["embree"], language="c++",
                              depends=["yt/utilities/lib/mesh_traversal.pxd"])
     config.add_subpackage("tests")
 


https://bitbucket.org/yt_analysis/yt/commits/9dfe9f9e03f4/
Changeset:   9dfe9f9e03f4
Branch:      yt
User:        atmyers
Date:        2015-06-03 00:29:11+00:00
Summary:     adding a skeleton for the EmbreeVolume
Affected #:  3 files

diff -r 58258a78924ea8ff5099ee267fce244de96e842a -r 9dfe9f9e03f4e17fa7ae439736e406c4b0ccd56e yt/utilities/lib/mesh_traversal.pxd
--- a/yt/utilities/lib/mesh_traversal.pxd
+++ b/yt/utilities/lib/mesh_traversal.pxd
@@ -1,3 +1,6 @@
-cimport rtcore
-cimport rtcore_scene
-cimport rtcore_ray
+cimport pyembree.rtcore
+cimport pyembree.rtcore_scene as rtcs
+cimport pyembree.rtcore_ray
+
+cdef class EmbreeVolume:
+    cdef rtcs.RTCScene scene_i

diff -r 58258a78924ea8ff5099ee267fce244de96e842a -r 9dfe9f9e03f4e17fa7ae439736e406c4b0ccd56e yt/utilities/lib/mesh_traversal.pyx
--- a/yt/utilities/lib/mesh_traversal.pyx
+++ b/yt/utilities/lib/mesh_traversal.pyx
@@ -1,2 +1,25 @@
+cimport cython
+cimport numpy as np
+import numpy as np
+cimport pyembree.rtcore as rtc
+cimport pyembree.rtcore_ray as rtcr
+cimport pyembree.rtcore_geometry as rtcg
+cimport pyembree.rtcore_scene as rtcs
+
 def hello_world():
     print "Hello!"
+
+cdef void error_printer(const rtc.RTCError code, const char *_str):
+    print "ERROR CAUGHT IN EMBREE"
+    rtc.print_error(code)
+    print "ERROR MESSAGE:", _str
+
+cdef class EmbreeVolume:
+
+    def __init__(self):
+        rtc.rtcInit(NULL)
+        rtc.rtcSetErrorFunction(error_printer)
+        self.scene_i = rtcs.rtcNewScene(rtcs.RTC_SCENE_STATIC, rtcs.RTC_INTERSECT1)
+
+    def __dealloc__(self):
+        rtcs.rtcDeleteScene(self.scene_i)

diff -r 58258a78924ea8ff5099ee267fce244de96e842a -r 9dfe9f9e03f4e17fa7ae439736e406c4b0ccd56e yt/visualization/volume_rendering/render_source.py
--- a/yt/visualization/volume_rendering/render_source.py
+++ b/yt/visualization/volume_rendering/render_source.py
@@ -22,6 +22,7 @@
 from .utils import new_volume_render_sampler, data_source_or_all, \
     get_corners, new_projection_sampler
 from yt.visualization.image_writer import apply_colormap
+from yt.utilities.lib.mesh_traversal import EmbreeVolume
 
 from .zbuffer_array import ZBuffer
 from yt.utilities.lib.misc_utilities import \


https://bitbucket.org/yt_analysis/yt/commits/b593e7bc1fd2/
Changeset:   b593e7bc1fd2
Branch:      yt
User:        atmyers
Date:        2015-06-03 04:51:10+00:00
Summary:     getting a MeshSampler class to compile
Affected #:  3 files

diff -r 9dfe9f9e03f4e17fa7ae439736e406c4b0ccd56e -r b593e7bc1fd27da7e3c0b8af581e7d0b7bf1639e yt/utilities/lib/grid_traversal.pxd
--- a/yt/utilities/lib/grid_traversal.pxd
+++ b/yt/utilities/lib/grid_traversal.pxd
@@ -19,6 +19,46 @@
 cimport cython
 cimport kdtree_utils
 
+cdef struct ImageContainer:
+    np.float64_t *vp_pos
+    np.float64_t *vp_dir
+    np.float64_t *center
+    np.float64_t *image
+    np.float64_t *zbuffer
+    np.float64_t pdx, pdy
+    np.float64_t bounds[4]
+    int nv[2]
+    int vp_strides[3]
+    int im_strides[3]
+    int vd_strides[3]
+    np.float64_t *x_vec
+    np.float64_t *y_vec
+
+ctypedef void sampler_function(
+                VolumeContainer *vc,
+                np.float64_t v_pos[3],
+                np.float64_t v_dir[3],
+                np.float64_t enter_t,
+                np.float64_t exit_t,
+                int index[3],
+                void *data) nogil
+
+
+cdef class ImageSampler:
+    cdef ImageContainer *image
+    cdef sampler_function *sampler
+    cdef public object avp_pos, avp_dir, acenter, aimage, ax_vec, ay_vec
+    cdef public object azbuffer
+    cdef void *supp_data
+    cdef np.float64_t width[3]
+
+    cdef void get_start_stop(self, np.float64_t *ex, np.int64_t *rv)
+
+    cdef void calculate_extent(self, np.float64_t extrema[4],
+                               VolumeContainer *vc) nogil
+
+    cdef void setup(self, PartitionedGrid pg)
+
 cdef struct VolumeContainer:
     int n_fields
     np.float64_t **data

diff -r 9dfe9f9e03f4e17fa7ae439736e406c4b0ccd56e -r b593e7bc1fd27da7e3c0b8af581e7d0b7bf1639e yt/utilities/lib/grid_traversal.pyx
--- a/yt/utilities/lib/grid_traversal.pyx
+++ b/yt/utilities/lib/grid_traversal.pyx
@@ -30,15 +30,6 @@
 
 DEF Nch = 4
 
-ctypedef void sampler_function(
-                VolumeContainer *vc,
-                np.float64_t v_pos[3],
-                np.float64_t v_dir[3],
-                np.float64_t enter_t,
-                np.float64_t exit_t,
-                int index[3],
-                void *data) nogil
-
 cdef class PartitionedGrid:
 
     @cython.boundscheck(False)
@@ -183,32 +174,12 @@
             for i in range(3):
                 vel[i] /= vel_mag[0]
 
-cdef struct ImageContainer:
-    np.float64_t *vp_pos
-    np.float64_t *vp_dir
-    np.float64_t *center
-    np.float64_t *image
-    np.float64_t *zbuffer
-    np.float64_t pdx, pdy
-    np.float64_t bounds[4]
-    int nv[2]
-    int vp_strides[3]
-    int im_strides[3]
-    int vd_strides[3]
-    np.float64_t *x_vec
-    np.float64_t *y_vec
 
 cdef struct ImageAccumulator:
     np.float64_t rgba[Nch]
     void *supp_data
 
 cdef class ImageSampler:
-    cdef ImageContainer *image
-    cdef sampler_function *sampler
-    cdef public object avp_pos, avp_dir, acenter, aimage, ax_vec, ay_vec
-    cdef public object azbuffer
-    cdef void *supp_data
-    cdef np.float64_t width[3]
     def __init__(self,
                   np.ndarray vp_pos,
                   np.ndarray vp_dir,

diff -r 9dfe9f9e03f4e17fa7ae439736e406c4b0ccd56e -r b593e7bc1fd27da7e3c0b8af581e7d0b7bf1639e yt/utilities/lib/mesh_traversal.pyx
--- a/yt/utilities/lib/mesh_traversal.pyx
+++ b/yt/utilities/lib/mesh_traversal.pyx
@@ -5,9 +5,7 @@
 cimport pyembree.rtcore_ray as rtcr
 cimport pyembree.rtcore_geometry as rtcg
 cimport pyembree.rtcore_scene as rtcs
-
-def hello_world():
-    print "Hello!"
+from grid_traversal cimport ImageSampler
 
 cdef void error_printer(const rtc.RTCError code, const char *_str):
     print "ERROR CAUGHT IN EMBREE"
@@ -23,3 +21,6 @@
 
     def __dealloc__(self):
         rtcs.rtcDeleteScene(self.scene_i)
+
+cdef class MeshSampler(ImageSampler):
+    pass


https://bitbucket.org/yt_analysis/yt/commits/883deccc7a05/
Changeset:   883deccc7a05
Branch:      yt
User:        atmyers
Date:        2015-06-03 05:02:35+00:00
Summary:     hooking up the Mesh Sampler
Affected #:  2 files

diff -r b593e7bc1fd27da7e3c0b8af581e7d0b7bf1639e -r 883deccc7a05c440d20952e1b6854665a4fae38d yt/utilities/lib/mesh_traversal.pyx
--- a/yt/utilities/lib/mesh_traversal.pyx
+++ b/yt/utilities/lib/mesh_traversal.pyx
@@ -23,4 +23,16 @@
         rtcs.rtcDeleteScene(self.scene_i)
 
 cdef class MeshSampler(ImageSampler):
-    pass
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    def __call__(self, EmbreeVolume volume, int num_threads = 0):
+        '''
+
+        This function is supposed to cast the rays and return the
+        image.
+
+        '''
+
+        pass

diff -r b593e7bc1fd27da7e3c0b8af581e7d0b7bf1639e -r 883deccc7a05c440d20952e1b6854665a4fae38d yt/visualization/volume_rendering/render_source.py
--- a/yt/visualization/volume_rendering/render_source.py
+++ b/yt/visualization/volume_rendering/render_source.py
@@ -22,7 +22,8 @@
 from .utils import new_volume_render_sampler, data_source_or_all, \
     get_corners, new_projection_sampler
 from yt.visualization.image_writer import apply_colormap
-from yt.utilities.lib.mesh_traversal import EmbreeVolume
+from yt.utilities.lib.mesh_traversal import EmbreeVolume, \
+    MeshSampler
 
 from .zbuffer_array import ZBuffer
 from yt.utilities.lib.misc_utilities import \


https://bitbucket.org/yt_analysis/yt/commits/7af013f2df58/
Changeset:   7af013f2df58
Branch:      yt
User:        atmyers
Date:        2015-06-03 05:06:52+00:00
Summary:     merging
Affected #:  5 files

diff -r 883deccc7a05c440d20952e1b6854665a4fae38d -r 7af013f2df58a4c1cb93ae75226cf8aa5dd82760 yt/utilities/lib/grid_traversal.pyx
--- a/yt/utilities/lib/grid_traversal.pyx
+++ b/yt/utilities/lib/grid_traversal.pyx
@@ -25,6 +25,7 @@
     FieldInterpolationTable, FIT_initialize_table, FIT_eval_transfer,\
     FIT_eval_transfer_with_light
 from fixed_interpolator cimport *
+#from mesh_traversal import EmbreeVolume
 
 from cython.parallel import prange, parallel, threadid
 
@@ -787,6 +788,14 @@
         #free(self.light_dir)
         #free(self.light_rgba)
 
+cdef class MeshSampler(ImageSampler):
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    def __call__(self, scene, int num_threads = 0):
+        pass
+
 
 @cython.boundscheck(False)
 @cython.wraparound(False)

diff -r 883deccc7a05c440d20952e1b6854665a4fae38d -r 7af013f2df58a4c1cb93ae75226cf8aa5dd82760 yt/utilities/lib/mesh_traversal.pxd
--- a/yt/utilities/lib/mesh_traversal.pxd
+++ b/yt/utilities/lib/mesh_traversal.pxd
@@ -4,3 +4,4 @@
 
 cdef class EmbreeVolume:
     cdef rtcs.RTCScene scene_i
+

diff -r 883deccc7a05c440d20952e1b6854665a4fae38d -r 7af013f2df58a4c1cb93ae75226cf8aa5dd82760 yt/utilities/lib/setup.py
--- a/yt/utilities/lib/setup.py
+++ b/yt/utilities/lib/setup.py
@@ -165,6 +165,11 @@
                          libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd"])
     include_dirs = check_for_pyembree()
     if include_dirs is not None:
+        config.add_extension("mesh_construction",
+                             ["yt/utilities/lib/mesh_construction.pyx"],
+                             include_dirs=["yt/utilities/lib", include_dirs],
+                             libraries=["embree"], language="c++",
+                             depends=[])
         config.add_extension("mesh_traversal",
                              ["yt/utilities/lib/mesh_traversal.pyx"],
                              include_dirs=["yt/utilities/lib", include_dirs],

diff -r 883deccc7a05c440d20952e1b6854665a4fae38d -r 7af013f2df58a4c1cb93ae75226cf8aa5dd82760 yt/visualization/volume_rendering/render_source.py
--- a/yt/visualization/volume_rendering/render_source.py
+++ b/yt/visualization/volume_rendering/render_source.py
@@ -24,6 +24,8 @@
 from yt.visualization.image_writer import apply_colormap
 from yt.utilities.lib.mesh_traversal import EmbreeVolume, \
     MeshSampler
+from yt.utilities.lib.mesh_construction import \
+    ElementMesh
 
 from .zbuffer_array import ZBuffer
 from yt.utilities.lib.misc_utilities import \
@@ -90,7 +92,7 @@
 
         Examples
         --------
-        >>> source = RenderSource(ds, 'density')
+        >>> source = VolumeSource(ds, 'density')
 
         """
         super(VolumeSource, self).__init__()
@@ -225,6 +227,133 @@
         return disp
 
 
+class MeshSource(RenderSource):
+
+    """docstring for MeshSource"""
+    _image = None
+    data_source = None
+
+    def __init__(self, data_source, field, auto=True):
+        r"""Initialize a new unstructured source for rendering.
+
+        A :class:`MeshSource` provides the framework to volume render
+        unstructured mesh data.
+
+        Parameters
+        ----------
+        data_source: :class:`AMR3DData` or :class:`Dataset`, optional
+            This is the source to be rendered, which can be any arbitrary yt
+            data object or dataset.
+        fields : string
+            The name of the field to be rendered.
+        auto: bool, optional
+            If True, will build a default PolygonMesh based on the data.
+
+        Examples
+        --------
+        >>> source = MeshSource(ds, ('all', 'convected')
+
+        """
+        super(MeshSource, self).__init__()
+        self.data_source = data_source_or_all(data_source)
+        field = self.data_source._determine_fields(field)[0]
+        self.field = field
+        self.volume = None
+        self.current_image = None
+        self.double_check = False
+        self.num_threads = 0
+        self.num_samples = 10
+        self.sampler_type = 'volume-render'
+
+        # Error checking
+        assert(self.field is not None)
+        assert(self.data_source is not None)
+
+        if auto:
+            self.build_defaults()
+
+    def build_defaults(self):
+        self.build_default_volume()
+
+    def _validate(self):
+        """Make sure that all dependencies have been met"""
+        if self.data_source is None:
+            raise RuntimeError("Data source not initialized")
+
+        if self.volume is None:
+            raise RuntimeError("Volume not initialized")
+
+    def build_default_volume(self):
+
+        mesh = self.ds.index.meshes[0]
+        vertices = mesh.connectivity_coords
+        indices = mesh.connectivity_indices
+        sampler_type = 'surface'
+        field_data = 0
+
+#        self.scene = EmbreeVolume()
+        self.scene = 0
+
+        self.volume = ElementMesh(self.scene,
+                                  vertices,
+                                  indices,
+                                  field_data,
+                                  sampler_type)
+
+        log_fields = [self.data_source.pf.field_info[self.field].take_log]
+        mylog.debug('Log Fields:' + str(log_fields))
+
+    def set_volume(self, volume):
+        pass
+
+    def set_field(self, field, no_ghost=True):
+        field = self.data_source._determine_fields(field)[0]
+        log_field = self.data_source.pf.field_info[field].take_log
+        self.volume.set_fields(field, [log_field], no_ghost)
+        self.field = field
+
+    def set_fields(self, fields, no_ghost=True):
+        fields = self.data_source._determine_fields(fields)
+        log_fields = [self.data_source.ds.field_info[f].take_log
+                      for f in fields]
+        self.volume.set_fields(fields, log_fields, no_ghost)
+        self.field = fields
+
+    def set_sampler(self, camera):
+        """docstring for add_sampler"""
+        if self.sampler_type == 'surface':
+            sampler = new_volume_render_sampler(camera, self)
+        elif self.sampler_type == 'projection':
+            sampler = new_projection_sampler(camera, self)
+        else:
+            NotImplementedError("%s not implemented yet" % self.sampler_type)
+        self.sampler = sampler
+        assert(self.sampler is not None)
+
+    def render(self, camera, zbuffer=None):
+
+        self.set_sampler(camera)
+
+        mylog.debug("Using sampler %s" % self.sampler)
+        self.sampler(scene, num_threads=self.num_threads)
+        mylog.debug("Done casting rays")
+
+        self.current_image = self.finalize_image(camera, self.sampler.aimage)
+
+        return self.current_image
+
+    def finalize_image(self, camera, image):
+        image = self.volume.reduce_tree_images(image,
+                                               camera.lens.viewpoint)
+        if self.transfer_function.grey_opacity is False:
+            image[:, :, 3] = 1.0
+        return image
+
+    def __repr__(self):
+        disp = "<Mesh Source>:%s " % str(self.data_source)
+        return disp
+
+
 class PointsSource(OpaqueSource):
 
     """Add set of opaque points to a scene."""


https://bitbucket.org/yt_analysis/yt/commits/9e26e248ede1/
Changeset:   9e26e248ede1
Branch:      yt
User:        atmyers
Date:        2015-06-03 05:23:33+00:00
Summary:     raise an exception instead of calling exit
Affected #:  2 files

diff -r 7af013f2df58a4c1cb93ae75226cf8aa5dd82760 -r 9e26e248ede15d60de20c23abb319242f7de962a yt/utilities/lib/setup.py
--- a/yt/utilities/lib/setup.py
+++ b/yt/utilities/lib/setup.py
@@ -168,12 +168,12 @@
         config.add_extension("mesh_construction",
                              ["yt/utilities/lib/mesh_construction.pyx"],
                              include_dirs=["yt/utilities/lib", include_dirs],
-                             libraries=["embree"], language="c++",
+                             libraries=["m", "embree"], language="c++",
                              depends=[])
         config.add_extension("mesh_traversal",
                              ["yt/utilities/lib/mesh_traversal.pyx"],
                              include_dirs=["yt/utilities/lib", include_dirs],
-                             libraries=["embree"], language="c++",
+                             libraries=["m", "embree"], language="c++",
                              depends=["yt/utilities/lib/mesh_traversal.pxd"])
     config.add_subpackage("tests")
 

diff -r 7af013f2df58a4c1cb93ae75226cf8aa5dd82760 -r 9e26e248ede15d60de20c23abb319242f7de962a yt/visualization/volume_rendering/render_source.py
--- a/yt/visualization/volume_rendering/render_source.py
+++ b/yt/visualization/volume_rendering/render_source.py
@@ -291,14 +291,14 @@
         sampler_type = 'surface'
         field_data = 0
 
-#        self.scene = EmbreeVolume()
-        self.scene = 0
+        # self.scene = EmbreeVolume()
+        # self.scene = 0
 
-        self.volume = ElementMesh(self.scene,
-                                  vertices,
-                                  indices,
-                                  field_data,
-                                  sampler_type)
+        # self.volume = ElementMesh(self.scene,
+        #                           vertices,
+        #                           indices,
+        #                           field_data,
+        #                           sampler_type)
 
         log_fields = [self.data_source.pf.field_info[self.field].take_log]
         mylog.debug('Log Fields:' + str(log_fields))


https://bitbucket.org/yt_analysis/yt/commits/58370227ee34/
Changeset:   58370227ee34
Branch:      yt
User:        atmyers
Date:        2015-06-03 05:45:14+00:00
Summary:     build up the Element Mesh
Affected #:  1 file

diff -r 9e26e248ede15d60de20c23abb319242f7de962a -r 58370227ee34d117d015a1fdf72c1534151c8993 yt/visualization/volume_rendering/render_source.py
--- a/yt/visualization/volume_rendering/render_source.py
+++ b/yt/visualization/volume_rendering/render_source.py
@@ -22,6 +22,7 @@
 from .utils import new_volume_render_sampler, data_source_or_all, \
     get_corners, new_projection_sampler
 from yt.visualization.image_writer import apply_colormap
+
 from yt.utilities.lib.mesh_traversal import EmbreeVolume, \
     MeshSampler
 from yt.utilities.lib.mesh_construction import \
@@ -251,7 +252,7 @@
 
         Examples
         --------
-        >>> source = MeshSource(ds, ('all', 'convected')
+        >>> source = MeshSource(ds, ('all', 'convected'))
 
         """
         super(MeshSource, self).__init__()
@@ -262,8 +263,7 @@
         self.current_image = None
         self.double_check = False
         self.num_threads = 0
-        self.num_samples = 10
-        self.sampler_type = 'volume-render'
+        self.sampler_type = 'surface'
 
         # Error checking
         assert(self.field is not None)
@@ -285,20 +285,17 @@
 
     def build_default_volume(self):
 
-        mesh = self.ds.index.meshes[0]
-        vertices = mesh.connectivity_coords
-        indices = mesh.connectivity_indices
-        sampler_type = 'surface'
-        field_data = 0
+        field_data = self.data_source[self.field]
+        vertices = self.data_source.ds.index.meshes[0].connectivity_coords
+        indices = self.data_source.ds.index.meshes[0].connectivity_indices - 1
 
-        # self.scene = EmbreeVolume()
-        # self.scene = 0
+        self.scene = EmbreeVolume()
 
-        # self.volume = ElementMesh(self.scene,
-        #                           vertices,
-        #                           indices,
-        #                           field_data,
-        #                           sampler_type)
+        self.volume = ElementMesh(self.scene,
+                                  vertices,
+                                  indices,
+                                  field_data,
+                                  self.sampler_type)
 
         log_fields = [self.data_source.pf.field_info[self.field].take_log]
         mylog.debug('Log Fields:' + str(log_fields))


https://bitbucket.org/yt_analysis/yt/commits/d2fdca4da644/
Changeset:   d2fdca4da644
Branch:      yt
User:        atmyers
Date:        2015-06-03 22:19:27+00:00
Summary:     not pretty, but functional surface rendering
Affected #:  5 files

diff -r 58370227ee34d117d015a1fdf72c1534151c8993 -r d2fdca4da644c7a7d64eb1cdd3ba8da37dbc3462 yt/utilities/lib/grid_traversal.pxd
--- a/yt/utilities/lib/grid_traversal.pxd
+++ b/yt/utilities/lib/grid_traversal.pxd
@@ -70,6 +70,7 @@
     np.float64_t idds[3]
     int dims[3]
 
+
 cdef class PartitionedGrid:
     cdef public object my_data
     cdef public object source_mask

diff -r 58370227ee34d117d015a1fdf72c1534151c8993 -r d2fdca4da644c7a7d64eb1cdd3ba8da37dbc3462 yt/utilities/lib/grid_traversal.pyx
--- a/yt/utilities/lib/grid_traversal.pyx
+++ b/yt/utilities/lib/grid_traversal.pyx
@@ -25,7 +25,6 @@
     FieldInterpolationTable, FIT_initialize_table, FIT_eval_transfer,\
     FIT_eval_transfer_with_light
 from fixed_interpolator cimport *
-#from mesh_traversal import EmbreeVolume
 
 from cython.parallel import prange, parallel, threadid
 
@@ -788,14 +787,6 @@
         #free(self.light_dir)
         #free(self.light_rgba)
 
-cdef class MeshSampler(ImageSampler):
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    def __call__(self, scene, int num_threads = 0):
-        pass
-
 
 @cython.boundscheck(False)
 @cython.wraparound(False)

diff -r 58370227ee34d117d015a1fdf72c1534151c8993 -r d2fdca4da644c7a7d64eb1cdd3ba8da37dbc3462 yt/utilities/lib/mesh_traversal.pyx
--- a/yt/utilities/lib/mesh_traversal.pyx
+++ b/yt/utilities/lib/mesh_traversal.pyx
@@ -1,11 +1,15 @@
 cimport cython
 cimport numpy as np
 import numpy as np
+from libc.stdlib cimport malloc, free
 cimport pyembree.rtcore as rtc
 cimport pyembree.rtcore_ray as rtcr
 cimport pyembree.rtcore_geometry as rtcg
 cimport pyembree.rtcore_scene as rtcs
-from grid_traversal cimport ImageSampler
+from grid_traversal cimport ImageSampler, \
+    ImageContainer
+from cython.parallel import prange, parallel, threadid
+
 
 cdef void error_printer(const rtc.RTCError code, const char *_str):
     print "ERROR CAUGHT IN EMBREE"
@@ -35,4 +39,73 @@
 
         '''
 
-        pass
+        rtcs.rtcCommit(volume.scene_i)
+        # This routine will iterate over all of the vectors and cast each in
+        # turn.  Might benefit from a more sophisticated intersection check,
+        # like http://courses.csusm.edu/cs697exz/ray_box.htm
+        cdef int vi, vj, hit, i, j, ni, nj, nn
+        cdef np.int64_t offset
+        cdef np.int64_t iter[4]
+        cdef ImageContainer *im = self.image
+        cdef np.float64_t *v_pos
+        cdef np.float64_t *v_dir
+        cdef np.float64_t rgba[6]
+        cdef np.float64_t extrema[4]
+        cdef np.float64_t max_t
+        hit = 0
+        cdef np.int64_t nx, ny, size
+        cdef np.float64_t px, py
+        cdef np.float64_t width[3]
+        for i in range(3):
+            width[i] = self.width[i]
+        cdef np.ndarray[np.float64_t, ndim=1] data
+        nx = im.nv[0]
+        ny = im.nv[1]
+        size = nx * ny
+        data = np.empty(size, dtype="float64")
+        cdef rtcr.RTCRay ray
+        cdef int vd_i = 0
+        cdef int vd_step = 1
+        if im.vd_strides[0] == -1:
+            v_pos = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+            for j in range(size):
+                vj = j % ny
+                vi = (j - vj) / ny
+                vj = vj
+                # Dynamically calculate the position
+                px = width[0] * (<np.float64_t>vi)/(<np.float64_t>im.nv[0]-1) - width[0]/2.0
+                py = width[1] * (<np.float64_t>vj)/(<np.float64_t>im.nv[1]-1) - width[1]/2.0
+                v_pos[0] = im.vp_pos[0]*px + im.vp_pos[3]*py + im.vp_pos[9]
+                v_pos[1] = im.vp_pos[1]*px + im.vp_pos[4]*py + im.vp_pos[10]
+                v_pos[2] = im.vp_pos[2]*px + im.vp_pos[5]*py + im.vp_pos[11]
+                offset = im.im_strides[0] * vi + im.im_strides[1] * vj
+                for i in range(3):
+                    ray.org[i] = v_pos[i]
+                    ray.dir[i] = im.vp_dir[i]
+                ray.tnear = 0.0
+                ray.tfar = 1e37
+                ray.geomID = rtcg.RTC_INVALID_GEOMETRY_ID
+                ray.primID = rtcg.RTC_INVALID_GEOMETRY_ID
+                ray.instID = rtcg.RTC_INVALID_GEOMETRY_ID
+                ray.mask = -1
+                ray.time = 0
+                vd_i += vd_step
+                rtcs.rtcIntersect(volume.scene_i, ray)
+                data[j] = ray.time
+            self.aimage = data.reshape(self.image.nv[0], self.image.nv[1])
+            free(v_pos)
+        else:
+            v_pos = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+            v_dir = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+            # If we do not have a simple image plane, we have to cast all
+            # our rays 
+            for j in range(size):
+                offset = j * 3
+                for i in range(3): v_pos[i] = im.vp_pos[i + offset]
+                for i in range(3): v_dir[i] = im.vp_dir[i + offset]
+                if v_dir[0] == v_dir[1] == v_dir[2] == 0.0:
+                    continue
+
+            free(v_dir)
+            free(v_pos)
+        return hit

diff -r 58370227ee34d117d015a1fdf72c1534151c8993 -r d2fdca4da644c7a7d64eb1cdd3ba8da37dbc3462 yt/visualization/volume_rendering/render_source.py
--- a/yt/visualization/volume_rendering/render_source.py
+++ b/yt/visualization/volume_rendering/render_source.py
@@ -20,7 +20,7 @@
 from .transfer_functions import TransferFunction, \
     ProjectionTransferFunction, ColorTransferFunction
 from .utils import new_volume_render_sampler, data_source_or_all, \
-    get_corners, new_projection_sampler
+    get_corners, new_projection_sampler, new_mesh_render_sampler
 from yt.visualization.image_writer import apply_colormap
 
 from yt.utilities.lib.mesh_traversal import EmbreeVolume, \
@@ -319,9 +319,7 @@
     def set_sampler(self, camera):
         """docstring for add_sampler"""
         if self.sampler_type == 'surface':
-            sampler = new_volume_render_sampler(camera, self)
-        elif self.sampler_type == 'projection':
-            sampler = new_projection_sampler(camera, self)
+            sampler = new_mesh_render_sampler(camera, self)
         else:
             NotImplementedError("%s not implemented yet" % self.sampler_type)
         self.sampler = sampler
@@ -332,20 +330,13 @@
         self.set_sampler(camera)
 
         mylog.debug("Using sampler %s" % self.sampler)
-        self.sampler(scene, num_threads=self.num_threads)
+        self.sampler(self.scene, num_threads=self.num_threads)
         mylog.debug("Done casting rays")
 
-        self.current_image = self.finalize_image(camera, self.sampler.aimage)
+        self.current_image = self.sampler.aimage
 
         return self.current_image
 
-    def finalize_image(self, camera, image):
-        image = self.volume.reduce_tree_images(image,
-                                               camera.lens.viewpoint)
-        if self.transfer_function.grey_opacity is False:
-            image[:, :, 3] = 1.0
-        return image
-
     def __repr__(self):
         disp = "<Mesh Source>:%s " % str(self.data_source)
         return disp

diff -r 58370227ee34d117d015a1fdf72c1534151c8993 -r d2fdca4da644c7a7d64eb1cdd3ba8da37dbc3462 yt/visualization/volume_rendering/utils.py
--- a/yt/visualization/volume_rendering/utils.py
+++ b/yt/visualization/volume_rendering/utils.py
@@ -2,6 +2,8 @@
 from yt.data_objects.static_output import Dataset
 from yt.utilities.lib.grid_traversal import \
     VolumeRenderSampler, InterpolatedProjectionSampler, ProjectionSampler
+from yt.utilities.lib.mesh_traversal import \
+    MeshSampler
 
 
 def data_source_or_all(data_source):
@@ -10,10 +12,26 @@
     return data_source
 
 
+def new_mesh_render_sampler(camera, render_source):
+    params = camera._get_sampler_params(render_source)
+    args = (
+        params['vp_pos'],
+        params['vp_dir'],
+        params['center'],
+        params['bounds'],
+        params['image'],
+        params['x_vec'],
+        params['y_vec'],
+        params['width'],
+    )
+
+    sampler = MeshSampler(*args)
+    return sampler
+
+
 def new_volume_render_sampler(camera, render_source):
     params = camera._get_sampler_params(render_source)
     params.update(transfer_function=render_source.transfer_function)
-    params.update(transfer_function=render_source.transfer_function)
     params.update(num_samples=render_source.num_samples)
     args = (
         params['vp_pos'],


https://bitbucket.org/yt_analysis/yt/commits/95cb21c27196/
Changeset:   95cb21c27196
Branch:      yt
User:        atmyers
Date:        2015-06-03 22:20:03+00:00
Summary:     checking in the mesh construction code
Affected #:  2 files

diff -r d2fdca4da644c7a7d64eb1cdd3ba8da37dbc3462 -r 95cb21c27196a1427539671765be74362301c375 yt/utilities/lib/mesh_construction.h
--- /dev/null
+++ b/yt/utilities/lib/mesh_construction.h
@@ -0,0 +1,21 @@
+// This array is used to triangulate the hexahedral mesh elements
+// Each element has six faces with two triangles each.
+// The vertex ordering convention is assumed to follow that used
+// here: http://homepages.cae.wisc.edu/~tautges/papers/cnmev3.pdf
+// Note that this is the case for Exodus II data.
+int triangulate_hex[12][3] = {
+  {0, 1, 2}, {0, 2, 3}, // Face is 0 1 2 3 
+  {4, 5, 6}, {4, 6, 7}, // Face is 4 5 6 7
+  {0, 1, 5}, {0, 5, 4}, // Face is 0 1 5 4
+  {1, 2, 6}, {1, 6, 5}, // Face is 1 2 6 5
+  {0, 3, 7}, {0, 7, 4}, // Face is 0 3 7 4
+  {3, 2, 6}, {3, 6, 7}  // Face is 3 2 6 7
+};
+
+// Similarly, this is used to triangulate the tetrahedral cells
+int triangulate_tetra[4][3] = {
+  {0, 1, 2}, 
+  {0, 1, 3},
+  {0, 2, 3},
+  {1, 2, 3}
+};

diff -r d2fdca4da644c7a7d64eb1cdd3ba8da37dbc3462 -r 95cb21c27196a1427539671765be74362301c375 yt/utilities/lib/mesh_construction.pyx
--- /dev/null
+++ b/yt/utilities/lib/mesh_construction.pyx
@@ -0,0 +1,326 @@
+cimport numpy as np
+cimport pyembree.rtcore as rtc 
+from mesh_traversal cimport EmbreeVolume
+cimport pyembree.rtcore_geometry as rtcg
+cimport pyembree.rtcore_ray as rtcr
+cimport pyembree.rtcore_geometry_user as rtcgu
+from pyembree.rtcore cimport \
+    Vertex, \
+    Triangle, \
+    Vec3f
+from libc.stdlib cimport malloc, free
+import numpy as np
+
+cdef extern from "mesh_construction.h":
+    int triangulate_hex[12][3]
+    int triangulate_tetra[4][3]
+
+
+cdef double get_value_trilinear(void* userPtr,
+                                rtcr.RTCRay& ray):
+    cdef int ray_id
+    cdef double u, v, val
+    cdef double d0, d1, d2
+    cdef Vec3f* data
+
+    data = <Vec3f*> userPtr
+    ray_id = ray.primID
+
+    u = ray.u
+    v = ray.v
+
+    d0 = data[ray_id].x
+    d1 = data[ray_id].y
+    d2 = data[ray_id].z
+
+    return d0*(1.0 - u - v) + d1*u + d2*v
+
+
+cdef void maximum_intensity(void* userPtr, 
+                            rtcr.RTCRay& ray):
+
+    cdef double val = get_value_trilinear(userPtr, ray)
+    ray.time = max(ray.time, val)
+    ray.geomID = -1  # reject hit
+
+
+cdef void sample_surface(void* userPtr, 
+                         rtcr.RTCRay& ray):
+
+    cdef double val = get_value_trilinear(userPtr, ray)
+    ray.time = val
+
+
+cdef class TriangleMesh:
+    r'''
+
+    This class constructs a polygon mesh with triangular elements and 
+    adds it to the scene. 
+
+    Parameters
+    ----------
+
+    scene : EmbreeScene
+        This is the scene to which the constructed polygons will be
+        added.
+    vertices : a np.ndarray of floats. 
+        This specifies the x, y, and z coordinates of the vertices in 
+        the polygon mesh. This should either have the shape 
+        (num_triangles, 3, 3), or the shape (num_vertices, 3), depending
+        on the value of the `indices` parameter.
+    indices : either None, or a np.ndarray of ints
+        If None, then vertices must have the shape (num_triangles, 3, 3).
+        In this case, `vertices` specifices the coordinates of each
+        vertex of each triangle in the mesh, with vertices being 
+        duplicated if they are shared between triangles. For example,
+        if indices is None, then vertices[2][1][0] should give you 
+        the x-coordinate of the 2nd vertex of the 3rd triangle.
+        If indices is a np.ndarray, then it must have the shape
+        (num_triangles, 3), and `vertices` must have the shape
+        (num_vertices, 3). In this case, indices[2][1] tells you 
+        the index of the 2nd vertex of the 3rd triangle in `indices`,
+        while vertices[5][2] tells you the z-coordinate of the 6th
+        vertex in the mesh. Note that the indexing is assumed to be
+        zero-based. In this setup, vertices can be shared between
+        triangles, and the number of vertices can be less than 3 times
+        the number of triangles.
+            
+    '''
+
+    cdef Vertex* vertices
+    cdef Triangle* indices
+    cdef unsigned int mesh
+    cdef Vec3f* field_data
+    cdef rtcg.RTCFilterFunc filter_func
+
+    def __init__(self, EmbreeVolume scene,
+                 np.ndarray vertices,
+                 np.ndarray indices = None):
+
+        if indices is None:
+            self._build_from_flat(scene, vertices)
+        else:
+            self._build_from_indices(scene, vertices, indices)
+
+    cdef void _build_from_flat(self, EmbreeVolume scene, 
+                               np.ndarray tri_vertices):
+        cdef int i, j
+        cdef int nt = tri_vertices.shape[0]
+        # In this scheme, we don't share any vertices.  This leads to cracks,
+        # but also means we have exactly three times as many vertices as
+        # triangles.
+        cdef unsigned int mesh = rtcg.rtcNewTriangleMesh(scene.scene_i,
+                    rtcg.RTC_GEOMETRY_STATIC, nt, nt*3, 1) 
+        
+        cdef Vertex* vertices = <Vertex*> rtcg.rtcMapBuffer(scene.scene_i, mesh,
+                        rtcg.RTC_VERTEX_BUFFER)
+
+        for i in range(nt):
+            for j in range(3):
+                vertices[i*3 + j].x = tri_vertices[i,j,0]
+                vertices[i*3 + j].y = tri_vertices[i,j,1]
+                vertices[i*3 + j].z = tri_vertices[i,j,2]
+        rtcg.rtcUnmapBuffer(scene.scene_i, mesh, rtcg.RTC_VERTEX_BUFFER)
+
+        cdef Triangle* triangles = <Triangle*> rtcg.rtcMapBuffer(scene.scene_i,
+                        mesh, rtcg.RTC_INDEX_BUFFER)
+        for i in range(nt):
+            triangles[i].v0 = i*3 + 0
+            triangles[i].v1 = i*3 + 1
+            triangles[i].v2 = i*3 + 2
+
+        rtcg.rtcUnmapBuffer(scene.scene_i, mesh, rtcg.RTC_INDEX_BUFFER)
+        self.vertices = vertices
+        self.indices = triangles
+        self.mesh = mesh
+
+    cdef void _build_from_indices(self, EmbreeVolume scene,
+                                  np.ndarray tri_vertices,
+                                  np.ndarray tri_indices):
+        cdef int i
+        cdef int nv = tri_vertices.shape[0]
+        cdef int nt = tri_indices.shape[0]
+
+        cdef unsigned int mesh = rtcg.rtcNewTriangleMesh(scene.scene_i,
+                    rtcg.RTC_GEOMETRY_STATIC, nt, nv, 1) 
+
+        # set up vertex and triangle arrays. In this case, we just read
+        # them directly from the inputs
+        cdef Vertex* vertices = <Vertex*> rtcg.rtcMapBuffer(scene.scene_i, mesh,
+                                                    rtcg.RTC_VERTEX_BUFFER)
+
+        for i in range(nv):
+                vertices[i].x = tri_vertices[i, 0]
+                vertices[i].y = tri_vertices[i, 1]
+                vertices[i].z = tri_vertices[i, 2]
+
+        rtcg.rtcUnmapBuffer(scene.scene_i, mesh, rtcg.RTC_VERTEX_BUFFER)
+
+        cdef Triangle* triangles = <Triangle*> rtcg.rtcMapBuffer(scene.scene_i,
+                        mesh, rtcg.RTC_INDEX_BUFFER)
+
+        for i in range(nt):
+            triangles[i].v0 = tri_indices[i][0]
+            triangles[i].v1 = tri_indices[i][1]
+            triangles[i].v2 = tri_indices[i][2]
+
+        rtcg.rtcUnmapBuffer(scene.scene_i, mesh, rtcg.RTC_INDEX_BUFFER)
+
+        self.vertices = vertices
+        self.indices = triangles
+        self.mesh = mesh
+
+
+cdef class ElementMesh(TriangleMesh):
+    r'''
+
+    Currently, we handle non-triangular mesh types by converting them 
+    to triangular meshes. This class performs this transformation.
+    Currently, this is implemented for hexahedral and tetrahedral
+    meshes.
+
+    Parameters
+    ----------
+
+    scene : EmbreeScene
+        This is the scene to which the constructed polygons will be
+        added.
+    vertices : a np.ndarray of floats. 
+        This specifies the x, y, and z coordinates of the vertices in 
+        the polygon mesh. This should either have the shape 
+        (num_vertices, 3). For example, vertices[2][1] should give the 
+        y-coordinate of the 3rd vertex in the mesh.
+    indices : a np.ndarray of ints
+        This should either have the shape (num_elements, 4) or 
+        (num_elements, 8) for tetrahedral and hexahedral meshes, 
+        respectively. For tetrahedral meshes, each element will 
+        be represented by four triangles in the scene. For hex meshes,
+        each element will be represented by 12 triangles, 2 for each 
+        face. For hex meshes, we assume that the node ordering is as
+        defined here: 
+        http://homepages.cae.wisc.edu/~tautges/papers/cnmev3.pdf
+            
+    '''
+
+    def __init__(self, EmbreeVolume scene,
+                 np.ndarray vertices, 
+                 np.ndarray indices,
+                 np.ndarray data,
+                 sample_type):
+        # We need now to figure out if we've been handed quads or tetrahedra.
+        # If it's quads, we can build the mesh slightly differently.
+        # http://stackoverflow.com/questions/23723993/converting-quadriladerals-in-an-obj-file-into-triangles
+
+        if sample_type == 'surface':
+            self.filter_func = <rtcg.RTCFilterFunc> &sample_surface
+        elif sample_type == 'maximum':
+            self.filter_func = <rtcg.RTCFilterFunc> &maximum_intensity
+        else:
+            print "Error - sampler type not implemented."
+            raise NotImplementedError
+        if indices.shape[1] == 8:
+            self._build_from_quads(scene, vertices, indices, data)
+        elif indices.shape[1] == 4:
+            self._build_from_triangles(scene, vertices, indices, data)
+        else:
+            raise NotImplementedError
+
+
+    cdef void _build_from_quads(self, EmbreeVolume scene,
+                                np.ndarray quad_vertices,
+                                np.ndarray quad_indices,
+                                np.ndarray data):
+
+        cdef int i, j
+        cdef int nv = quad_vertices.shape[0]
+        cdef int ne = quad_indices.shape[0]
+
+        # There are six faces for every quad.  Each of those will be divided
+        # into two triangles.
+        cdef int nt = 6*2*ne
+
+        cdef unsigned int mesh = rtcg.rtcNewTriangleMesh(scene.scene_i,
+                    rtcg.RTC_GEOMETRY_STATIC, nt, nv, 1) 
+
+        # first just copy over the vertices
+        cdef Vertex* vertices = <Vertex*> rtcg.rtcMapBuffer(scene.scene_i, mesh,
+                        rtcg.RTC_VERTEX_BUFFER)
+
+        for i in range(nv):
+            vertices[i].x = quad_vertices[i, 0]
+            vertices[i].y = quad_vertices[i, 1]
+            vertices[i].z = quad_vertices[i, 2]
+        rtcg.rtcUnmapBuffer(scene.scene_i, mesh, rtcg.RTC_VERTEX_BUFFER)
+
+        # now build up the triangles
+        cdef Triangle* triangles = <Triangle*> rtcg.rtcMapBuffer(scene.scene_i,
+                        mesh, rtcg.RTC_INDEX_BUFFER)
+
+        for i in range(ne):
+            for j in range(12):
+                triangles[12*i+j].v0 = quad_indices[i][triangulate_hex[j][0]]
+                triangles[12*i+j].v1 = quad_indices[i][triangulate_hex[j][1]]
+                triangles[12*i+j].v2 = quad_indices[i][triangulate_hex[j][2]]
+
+        rtcg.rtcUnmapBuffer(scene.scene_i, mesh, rtcg.RTC_INDEX_BUFFER)
+
+        cdef Vec3f* field_data = <Vec3f *>malloc(nt * sizeof(Vec3f))
+
+        for i in range(ne):
+            for j in range(12):
+                field_data[12*i+j].x = data[i][triangulate_hex[j][0]]
+                field_data[12*i+j].y = data[i][triangulate_hex[j][1]]
+                field_data[12*i+j].z = data[i][triangulate_hex[j][2]]
+
+        rtcg.rtcSetUserData(scene.scene_i, mesh, field_data)
+
+        self.field_data = field_data
+        self.vertices = vertices
+        self.indices = triangles
+        self.mesh = mesh
+
+        rtcg.rtcSetIntersectionFilterFunction(scene.scene_i,
+                                              mesh,
+                                              self.filter_func)
+
+    cdef void _build_from_triangles(self, EmbreeVolume scene,
+                                    np.ndarray tetra_vertices, 
+                                    np.ndarray tetra_indices,
+                                    np.ndarray data):
+
+        cdef int i, j
+        cdef int nv = tetra_vertices.shape[0]
+        cdef int ne = tetra_indices.shape[0]
+
+        # There are four triangle faces for each tetrahedron.
+        cdef int nt = 4*ne
+
+        cdef unsigned int mesh = rtcg.rtcNewTriangleMesh(scene.scene_i,
+                    rtcg.RTC_GEOMETRY_STATIC, nt, nv, 1) 
+
+        # Just copy over the vertices
+        cdef Vertex* vertices = <Vertex*> rtcg.rtcMapBuffer(scene.scene_i, mesh,
+                        rtcg.RTC_VERTEX_BUFFER)
+
+        for i in range(nv):
+                vertices[i].x = tetra_vertices[i, 0]
+                vertices[i].y = tetra_vertices[i, 1]
+                vertices[i].z = tetra_vertices[i, 2]
+        rtcg.rtcUnmapBuffer(scene.scene_i, mesh, rtcg.RTC_VERTEX_BUFFER)
+
+        # Now build up the triangles
+        cdef Triangle* triangles = <Triangle*> rtcg.rtcMapBuffer(scene.scene_i,
+                        mesh, rtcg.RTC_INDEX_BUFFER)
+        for i in range(ne):
+            for j in range(4):
+                triangles[4*i+j].v0 = tetra_indices[i][triangulate_tetra[j][0]]
+                triangles[4*i+j].v1 = tetra_indices[i][triangulate_tetra[j][1]]
+                triangles[4*i+j].v2 = tetra_indices[i][triangulate_tetra[j][2]]
+
+        rtcg.rtcUnmapBuffer(scene.scene_i, mesh, rtcg.RTC_INDEX_BUFFER)
+        self.vertices = vertices
+        self.indices = triangles
+        self.mesh = mesh
+
+    def __dealloc__(self):
+        free(self.field_data)


https://bitbucket.org/yt_analysis/yt/commits/b0fea2c56833/
Changeset:   b0fea2c56833
Branch:      yt
User:        atmyers
Date:        2015-06-03 22:35:32+00:00
Summary:     putting the exodus II reader function into utilities
Affected #:  1 file

diff -r 95cb21c27196a1427539671765be74362301c375 -r b0fea2c5683326748a71d7ec2ba010d55b93c8fd yt/utilities/exodusII_reader.py
--- /dev/null
+++ b/yt/utilities/exodusII_reader.py
@@ -0,0 +1,41 @@
+import string
+from itertools import takewhile
+from netCDF4 import Dataset
+import numpy as np
+
+
+def sanitize_string(s):
+    s = "".join(_ for _ in takewhile(lambda a: a in string.printable, s))
+    return s
+
+
+def get_data(fn):
+    f = Dataset(fn)
+    fvars = f.variables
+    # Is this correct?
+    etypes = fvars["eb_status"][:]
+    nelem = etypes.shape[0]
+    varnames = [sanitize_string(v.tostring()) for v in
+                fvars["name_elem_var"][:]]
+    nodnames = [sanitize_string(v.tostring()) for v in
+                fvars["name_nod_var"][:]]
+    coord = np.array([fvars["coord%s" % ax][:]
+                     for ax in 'xyz']).transpose().copy()
+    coords = []
+    connects = []
+    data = []
+    for i in range(nelem):
+        connects.append(fvars["connect%s" % (i+1)][:].astype("i8"))
+        ci = connects[-1]
+        coords.append(coord)  # Same for all
+        vals = {}
+        for j, v in enumerate(varnames):
+            values = fvars["vals_elem_var%seb%s" % (j+1, i+1)][:]
+            vals['gas', v] = values.astype("f8")[-1, :]
+        for j, v in enumerate(nodnames):
+            # We want just for this set of nodes all the node variables
+            # Use (ci - 1) to get these values
+            values = fvars["vals_nod_var%s" % (j+1)][:]
+            vals['gas', v] = values.astype("f8")[-1, ci - 1, ...]
+        data.append(vals)
+    return coords, connects, data


https://bitbucket.org/yt_analysis/yt/commits/ba534d03b7a6/
Changeset:   ba534d03b7a6
Branch:      yt
User:        atmyers
Date:        2015-06-05 21:31:04+00:00
Summary:     fixing an inconsistency in the camera
Affected #:  2 files

diff -r b0fea2c5683326748a71d7ec2ba010d55b93c8fd -r ba534d03b7a694894731a9f9abba1d23cf8c06c3 yt/utilities/orientation.py
--- a/yt/utilities/orientation.py
+++ b/yt/utilities/orientation.py
@@ -80,7 +80,6 @@
         north_vector /= np.sqrt(np.dot(north_vector, north_vector))
         east_vector /= np.sqrt(np.dot(east_vector, east_vector))
         self.normal_vector = normal_vector
+        self.north_vector = north_vector
         self.unit_vectors = YTArray([east_vector, north_vector, normal_vector], "")
         self.inv_mat = np.linalg.pinv(self.unit_vectors)
-
-

diff -r b0fea2c5683326748a71d7ec2ba010d55b93c8fd -r ba534d03b7a694894731a9f9abba1d23cf8c06c3 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -197,9 +197,9 @@
         self.switch_orientation()
 
     def set_position(self, position, north_vector=None):
-          self.position = position
-          self.switch_orientation(normal_vector=self.focus - self.position,
-                                  north_vector=north_vector)
+        self.position = position
+        self.switch_orientation(normal_vector=self.focus - self.position,
+                                north_vector=north_vector)
 
     def switch_orientation(self, normal_vector=None, north_vector=None):
         r"""


https://bitbucket.org/yt_analysis/yt/commits/63cdb0a891f5/
Changeset:   63cdb0a891f5
Branch:      yt
User:        atmyers
Date:        2015-06-09 06:37:52+00:00
Summary:     these things should be done on import, not every time an EmbreeVolume is initialized
Affected #:  1 file

diff -r ba534d03b7a694894731a9f9abba1d23cf8c06c3 -r 63cdb0a891f561d86279618a8ae96a3f3a5e36fe yt/utilities/lib/mesh_traversal.pyx
--- a/yt/utilities/lib/mesh_traversal.pyx
+++ b/yt/utilities/lib/mesh_traversal.pyx
@@ -10,6 +10,8 @@
     ImageContainer
 from cython.parallel import prange, parallel, threadid
 
+rtc.rtcInit(NULL)
+rtc.rtcSetErrorFunction(error_printer)
 
 cdef void error_printer(const rtc.RTCError code, const char *_str):
     print "ERROR CAUGHT IN EMBREE"
@@ -19,8 +21,6 @@
 cdef class EmbreeVolume:
 
     def __init__(self):
-        rtc.rtcInit(NULL)
-        rtc.rtcSetErrorFunction(error_printer)
         self.scene_i = rtcs.rtcNewScene(rtcs.RTC_SCENE_STATIC, rtcs.RTC_INTERSECT1)
 
     def __dealloc__(self):


https://bitbucket.org/yt_analysis/yt/commits/e48326d0689b/
Changeset:   e48326d0689b
Branch:      yt
User:        atmyers
Date:        2015-06-09 06:49:13+00:00
Summary:     a slight renaming
Affected #:  4 files

diff -r 63cdb0a891f561d86279618a8ae96a3f3a5e36fe -r e48326d0689b55d42eeef6867ccec9c31de71e98 yt/utilities/lib/mesh_construction.pyx
--- a/yt/utilities/lib/mesh_construction.pyx
+++ b/yt/utilities/lib/mesh_construction.pyx
@@ -1,6 +1,6 @@
 cimport numpy as np
 cimport pyembree.rtcore as rtc 
-from mesh_traversal cimport EmbreeVolume
+from mesh_traversal cimport YTEmbreeScene
 cimport pyembree.rtcore_geometry as rtcg
 cimport pyembree.rtcore_ray as rtcr
 cimport pyembree.rtcore_geometry_user as rtcgu
@@ -60,7 +60,7 @@
     Parameters
     ----------
 
-    scene : EmbreeScene
+    scene : YTEmbreeScene
         This is the scene to which the constructed polygons will be
         added.
     vertices : a np.ndarray of floats. 
@@ -93,7 +93,7 @@
     cdef Vec3f* field_data
     cdef rtcg.RTCFilterFunc filter_func
 
-    def __init__(self, EmbreeVolume scene,
+    def __init__(self, YTEmbreeScene scene,
                  np.ndarray vertices,
                  np.ndarray indices = None):
 
@@ -102,7 +102,7 @@
         else:
             self._build_from_indices(scene, vertices, indices)
 
-    cdef void _build_from_flat(self, EmbreeVolume scene, 
+    cdef void _build_from_flat(self, YTEmbreeScene scene, 
                                np.ndarray tri_vertices):
         cdef int i, j
         cdef int nt = tri_vertices.shape[0]
@@ -134,7 +134,7 @@
         self.indices = triangles
         self.mesh = mesh
 
-    cdef void _build_from_indices(self, EmbreeVolume scene,
+    cdef void _build_from_indices(self, YTEmbreeScene scene,
                                   np.ndarray tri_vertices,
                                   np.ndarray tri_indices):
         cdef int i
@@ -202,7 +202,7 @@
             
     '''
 
-    def __init__(self, EmbreeVolume scene,
+    def __init__(self, YTEmbreeScene scene,
                  np.ndarray vertices, 
                  np.ndarray indices,
                  np.ndarray data,
@@ -226,7 +226,7 @@
             raise NotImplementedError
 
 
-    cdef void _build_from_quads(self, EmbreeVolume scene,
+    cdef void _build_from_quads(self, YTEmbreeScene scene,
                                 np.ndarray quad_vertices,
                                 np.ndarray quad_indices,
                                 np.ndarray data):
@@ -283,7 +283,7 @@
                                               mesh,
                                               self.filter_func)
 
-    cdef void _build_from_triangles(self, EmbreeVolume scene,
+    cdef void _build_from_triangles(self, YTEmbreeScene scene,
                                     np.ndarray tetra_vertices, 
                                     np.ndarray tetra_indices,
                                     np.ndarray data):

diff -r 63cdb0a891f561d86279618a8ae96a3f3a5e36fe -r e48326d0689b55d42eeef6867ccec9c31de71e98 yt/utilities/lib/mesh_traversal.pxd
--- a/yt/utilities/lib/mesh_traversal.pxd
+++ b/yt/utilities/lib/mesh_traversal.pxd
@@ -2,6 +2,6 @@
 cimport pyembree.rtcore_scene as rtcs
 cimport pyembree.rtcore_ray
 
-cdef class EmbreeVolume:
+cdef class YTEmbreeScene:
     cdef rtcs.RTCScene scene_i
 

diff -r 63cdb0a891f561d86279618a8ae96a3f3a5e36fe -r e48326d0689b55d42eeef6867ccec9c31de71e98 yt/utilities/lib/mesh_traversal.pyx
--- a/yt/utilities/lib/mesh_traversal.pyx
+++ b/yt/utilities/lib/mesh_traversal.pyx
@@ -18,7 +18,7 @@
     rtc.print_error(code)
     print "ERROR MESSAGE:", _str
 
-cdef class EmbreeVolume:
+cdef class YTEmbreeScene:
 
     def __init__(self):
         self.scene_i = rtcs.rtcNewScene(rtcs.RTC_SCENE_STATIC, rtcs.RTC_INTERSECT1)
@@ -31,7 +31,7 @@
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    def __call__(self, EmbreeVolume volume, int num_threads = 0):
+    def __call__(self, YTEmbreeScene scene, int num_threads = 0):
         '''
 
         This function is supposed to cast the rays and return the
@@ -39,7 +39,7 @@
 
         '''
 
-        rtcs.rtcCommit(volume.scene_i)
+        rtcs.rtcCommit(scene.scene_i)
         # This routine will iterate over all of the vectors and cast each in
         # turn.  Might benefit from a more sophisticated intersection check,
         # like http://courses.csusm.edu/cs697exz/ray_box.htm
@@ -90,7 +90,7 @@
                 ray.mask = -1
                 ray.time = 0
                 vd_i += vd_step
-                rtcs.rtcIntersect(volume.scene_i, ray)
+                rtcs.rtcIntersect(scene.scene_i, ray)
                 data[j] = ray.time
             self.aimage = data.reshape(self.image.nv[0], self.image.nv[1])
             free(v_pos)

diff -r 63cdb0a891f561d86279618a8ae96a3f3a5e36fe -r e48326d0689b55d42eeef6867ccec9c31de71e98 yt/visualization/volume_rendering/render_source.py
--- a/yt/visualization/volume_rendering/render_source.py
+++ b/yt/visualization/volume_rendering/render_source.py
@@ -23,7 +23,7 @@
     get_corners, new_projection_sampler, new_mesh_render_sampler
 from yt.visualization.image_writer import apply_colormap
 
-from yt.utilities.lib.mesh_traversal import EmbreeVolume, \
+from yt.utilities.lib.mesh_traversal import YTEmbreeScene, \
     MeshSampler
 from yt.utilities.lib.mesh_construction import \
     ElementMesh
@@ -289,7 +289,7 @@
         vertices = self.data_source.ds.index.meshes[0].connectivity_coords
         indices = self.data_source.ds.index.meshes[0].connectivity_indices - 1
 
-        self.scene = EmbreeVolume()
+        self.scene = YTEmbreeScene()
 
         self.volume = ElementMesh(self.scene,
                                   vertices,


https://bitbucket.org/yt_analysis/yt/commits/c3484fa2c9c9/
Changeset:   c3484fa2c9c9
Branch:      yt
User:        atmyers
Date:        2015-06-09 07:08:06+00:00
Summary:     implement the case where we have a more complex set of rays
Affected #:  1 file

diff -r e48326d0689b55d42eeef6867ccec9c31de71e98 -r c3484fa2c9c952496c544cd507bf6d96a843fb00 yt/utilities/lib/mesh_traversal.pyx
--- a/yt/utilities/lib/mesh_traversal.pyx
+++ b/yt/utilities/lib/mesh_traversal.pyx
@@ -40,9 +40,6 @@
         '''
 
         rtcs.rtcCommit(scene.scene_i)
-        # This routine will iterate over all of the vectors and cast each in
-        # turn.  Might benefit from a more sophisticated intersection check,
-        # like http://courses.csusm.edu/cs697exz/ray_box.htm
         cdef int vi, vj, hit, i, j, ni, nj, nn
         cdef np.int64_t offset
         cdef np.int64_t iter[4]
@@ -78,7 +75,6 @@
                 v_pos[0] = im.vp_pos[0]*px + im.vp_pos[3]*py + im.vp_pos[9]
                 v_pos[1] = im.vp_pos[1]*px + im.vp_pos[4]*py + im.vp_pos[10]
                 v_pos[2] = im.vp_pos[2]*px + im.vp_pos[5]*py + im.vp_pos[11]
-                offset = im.im_strides[0] * vi + im.im_strides[1] * vj
                 for i in range(3):
                     ray.org[i] = v_pos[i]
                     ray.dir[i] = im.vp_dir[i]
@@ -103,9 +99,21 @@
                 offset = j * 3
                 for i in range(3): v_pos[i] = im.vp_pos[i + offset]
                 for i in range(3): v_dir[i] = im.vp_dir[i + offset]
-                if v_dir[0] == v_dir[1] == v_dir[2] == 0.0:
-                    continue
+                for i in range(3):
+                    ray.org[i] = v_pos[i]
+                    ray.dir[i] = v_dir[i]
+                ray.tnear = 0.0
+                ray.tfar = 1e37
+                ray.geomID = rtcg.RTC_INVALID_GEOMETRY_ID
+                ray.primID = rtcg.RTC_INVALID_GEOMETRY_ID
+                ray.instID = rtcg.RTC_INVALID_GEOMETRY_ID
+                ray.mask = -1
+                ray.time = 0
+                vd_i += vd_step
+                rtcs.rtcIntersect(scene.scene_i, ray)
+                data[j] = ray.time
+            self.aimage = data.reshape(self.image.nv[0], self.image.nv[1])
+            free(v_pos)
+            free(v_dir)
 
-            free(v_dir)
-            free(v_pos)
         return hit


https://bitbucket.org/yt_analysis/yt/commits/2cfd1b65f6db/
Changeset:   2cfd1b65f6db
Branch:      yt
User:        atmyers
Date:        2015-06-10 02:32:49+00:00
Summary:     this is much more efficient it seems
Affected #:  1 file

diff -r c3484fa2c9c952496c544cd507bf6d96a843fb00 -r 2cfd1b65f6db77b25949981c33cb6cfe1d22c3ef yt/visualization/volume_rendering/render_source.py
--- a/yt/visualization/volume_rendering/render_source.py
+++ b/yt/visualization/volume_rendering/render_source.py
@@ -285,7 +285,7 @@
 
     def build_default_volume(self):
 
-        field_data = self.data_source[self.field]
+        field_data = self.data_source.ds.index.io.fields[0][self.field]
         vertices = self.data_source.ds.index.meshes[0].connectivity_coords
         indices = self.data_source.ds.index.meshes[0].connectivity_indices - 1
 


https://bitbucket.org/yt_analysis/yt/commits/5a8005a42bfb/
Changeset:   5a8005a42bfb
Branch:      yt
User:        atmyers
Date:        2015-06-10 05:07:57+00:00
Summary:     actually, this is way to speed up the renderer, so that we preserve the option of having an arbitrary data source
Affected #:  1 file

diff -r 2cfd1b65f6db77b25949981c33cb6cfe1d22c3ef -r 5a8005a42bfb8b7d3453cda0bab01a3e63e2ded0 yt/visualization/volume_rendering/render_source.py
--- a/yt/visualization/volume_rendering/render_source.py
+++ b/yt/visualization/volume_rendering/render_source.py
@@ -285,7 +285,7 @@
 
     def build_default_volume(self):
 
-        field_data = self.data_source.ds.index.io.fields[0][self.field]
+        field_data = self.data_source[self.field]
         vertices = self.data_source.ds.index.meshes[0].connectivity_coords
         indices = self.data_source.ds.index.meshes[0].connectivity_indices - 1
 
@@ -294,7 +294,7 @@
         self.volume = ElementMesh(self.scene,
                                   vertices,
                                   indices,
-                                  field_data,
+                                  field_data.d,
                                   self.sampler_type)
 
         log_fields = [self.data_source.pf.field_info[self.field].take_log]


https://bitbucket.org/yt_analysis/yt/commits/43f060c32492/
Changeset:   43f060c32492
Branch:      yt
User:        atmyers
Date:        2015-06-10 23:11:03+00:00
Summary:     moving the embree sampler functions to a different file
Affected #:  4 files

diff -r 5a8005a42bfb8b7d3453cda0bab01a3e63e2ded0 -r 43f060c32492da8e5f48f8ab9cfffbdd05670a9f yt/utilities/lib/mesh_construction.pyx
--- a/yt/utilities/lib/mesh_construction.pyx
+++ b/yt/utilities/lib/mesh_construction.pyx
@@ -4,6 +4,9 @@
 cimport pyembree.rtcore_geometry as rtcg
 cimport pyembree.rtcore_ray as rtcr
 cimport pyembree.rtcore_geometry_user as rtcgu
+from utilities.sampler_functions cimport \
+    maximum_intensity, \
+    sample_surface
 from pyembree.rtcore cimport \
     Vertex, \
     Triangle, \
@@ -16,41 +19,6 @@
     int triangulate_tetra[4][3]
 
 
-cdef double get_value_trilinear(void* userPtr,
-                                rtcr.RTCRay& ray):
-    cdef int ray_id
-    cdef double u, v, val
-    cdef double d0, d1, d2
-    cdef Vec3f* data
-
-    data = <Vec3f*> userPtr
-    ray_id = ray.primID
-
-    u = ray.u
-    v = ray.v
-
-    d0 = data[ray_id].x
-    d1 = data[ray_id].y
-    d2 = data[ray_id].z
-
-    return d0*(1.0 - u - v) + d1*u + d2*v
-
-
-cdef void maximum_intensity(void* userPtr, 
-                            rtcr.RTCRay& ray):
-
-    cdef double val = get_value_trilinear(userPtr, ray)
-    ray.time = max(ray.time, val)
-    ray.geomID = -1  # reject hit
-
-
-cdef void sample_surface(void* userPtr, 
-                         rtcr.RTCRay& ray):
-
-    cdef double val = get_value_trilinear(userPtr, ray)
-    ray.time = val
-
-
 cdef class TriangleMesh:
     r'''
 
@@ -212,9 +180,9 @@
         # http://stackoverflow.com/questions/23723993/converting-quadriladerals-in-an-obj-file-into-triangles
 
         if sample_type == 'surface':
-            self.filter_func = <rtcg.RTCFilterFunc> &sample_surface
+            self.filter_func = <rtcg.RTCFilterFunc> sample_surface
         elif sample_type == 'maximum':
-            self.filter_func = <rtcg.RTCFilterFunc> &maximum_intensity
+            self.filter_func = <rtcg.RTCFilterFunc> maximum_intensity
         else:
             print "Error - sampler type not implemented."
             raise NotImplementedError

diff -r 5a8005a42bfb8b7d3453cda0bab01a3e63e2ded0 -r 43f060c32492da8e5f48f8ab9cfffbdd05670a9f yt/utilities/lib/sampler_functions.pxd
--- /dev/null
+++ b/yt/utilities/lib/sampler_functions.pxd
@@ -0,0 +1,13 @@
+cimport pyembree.rtcore as rtc
+cimport pyembree.rtcore_ray as rtcr
+from pyembree.rtcore cimport Vec3f
+cimport cython
+
+
+cdef double get_value_trilinear(void* userPtr,
+                                rtcr.RTCRay& ray)
+
+cdef void maximum_intensity(void* userPtr, 
+                            rtcr.RTCRay& ray)
+
+cdef void sample_surface(void* userPtr, rtcr.RTCRay& ray)

diff -r 5a8005a42bfb8b7d3453cda0bab01a3e63e2ded0 -r 43f060c32492da8e5f48f8ab9cfffbdd05670a9f yt/utilities/lib/sampler_functions.pyx
--- /dev/null
+++ b/yt/utilities/lib/sampler_functions.pyx
@@ -0,0 +1,40 @@
+cimport pyembree.rtcore as rtc
+cimport pyembree.rtcore_ray as rtcr
+from pyembree.rtcore cimport Vec3f
+cimport cython
+
+
+cdef double get_value_trilinear(void* userPtr,
+                                rtcr.RTCRay& ray):
+    cdef int ray_id
+    cdef double u, v, val
+    cdef double d0, d1, d2
+    cdef Vec3f* data
+
+    data = <Vec3f*> userPtr
+    ray_id = ray.primID
+
+    u = ray.u
+    v = ray.v
+
+    d0 = data[ray_id].x
+    d1 = data[ray_id].y
+    d2 = data[ray_id].z
+
+    return d0*(1.0 - u - v) + d1*u + d2*v
+
+
+cdef void maximum_intensity(void* userPtr, 
+                            rtcr.RTCRay& ray):
+
+    cdef double val = get_value_trilinear(userPtr, ray)
+    ray.time = max(ray.time, val)
+    ray.geomID = -1  # reject hit
+
+
+cdef void sample_surface(void* userPtr, 
+                         rtcr.RTCRay& ray):
+
+    cdef double val = get_value_trilinear(userPtr, ray)
+    ray.time = val
+

diff -r 5a8005a42bfb8b7d3453cda0bab01a3e63e2ded0 -r 43f060c32492da8e5f48f8ab9cfffbdd05670a9f yt/utilities/lib/setup.py
--- a/yt/utilities/lib/setup.py
+++ b/yt/utilities/lib/setup.py
@@ -175,6 +175,11 @@
                              include_dirs=["yt/utilities/lib", include_dirs],
                              libraries=["m", "embree"], language="c++",
                              depends=["yt/utilities/lib/mesh_traversal.pxd"])
+        config.add_extension("sampler_functions",
+                             ["yt/utilities/lib/sampler_functions.pyx"],
+                             include_dirs=["yt/utilities/lib", include_dirs],
+                             libraries=["m", "embree"], language="c++",
+                             depends=["yt/utilities/lib/sampler_functions.pxd"])
     config.add_subpackage("tests")
 
     if os.environ.get("GPERFTOOLS", "no").upper() != "NO":


https://bitbucket.org/yt_analysis/yt/commits/1ecba3164b96/
Changeset:   1ecba3164b96
Branch:      yt
User:        atmyers
Date:        2015-06-10 23:46:15+00:00
Summary:     cleaning up some imports
Affected #:  1 file

diff -r 43f060c32492da8e5f48f8ab9cfffbdd05670a9f -r 1ecba3164b9610b160fa78689b057b997e022107 yt/visualization/volume_rendering/render_source.py
--- a/yt/visualization/volume_rendering/render_source.py
+++ b/yt/visualization/volume_rendering/render_source.py
@@ -23,10 +23,8 @@
     get_corners, new_projection_sampler, new_mesh_render_sampler
 from yt.visualization.image_writer import apply_colormap
 
-from yt.utilities.lib.mesh_traversal import YTEmbreeScene, \
-    MeshSampler
-from yt.utilities.lib.mesh_construction import \
-    ElementMesh
+from yt.utilities.lib.mesh_traversal import YTEmbreeScene
+from yt.utilities.lib.mesh_construction import ElementMesh
 
 from .zbuffer_array import ZBuffer
 from yt.utilities.lib.misc_utilities import \


https://bitbucket.org/yt_analysis/yt/commits/d7dc2d447f26/
Changeset:   d7dc2d447f26
Branch:      yt
User:        atmyers
Date:        2015-06-11 00:16:53+00:00
Summary:     cleaning up some of the rendering code
Affected #:  2 files

diff -r 1ecba3164b9610b160fa78689b057b997e022107 -r d7dc2d447f2671ef2aa48c17d978f5017c38915b yt/utilities/lib/mesh_traversal.pyx
--- a/yt/utilities/lib/mesh_traversal.pyx
+++ b/yt/utilities/lib/mesh_traversal.pyx
@@ -40,16 +40,11 @@
         '''
 
         rtcs.rtcCommit(scene.scene_i)
-        cdef int vi, vj, hit, i, j, ni, nj, nn
+        cdef int vi, vj, i, j, ni, nj, nn
         cdef np.int64_t offset
-        cdef np.int64_t iter[4]
         cdef ImageContainer *im = self.image
         cdef np.float64_t *v_pos
         cdef np.float64_t *v_dir
-        cdef np.float64_t rgba[6]
-        cdef np.float64_t extrema[4]
-        cdef np.float64_t max_t
-        hit = 0
         cdef np.int64_t nx, ny, size
         cdef np.float64_t px, py
         cdef np.float64_t width[3]
@@ -61,8 +56,6 @@
         size = nx * ny
         data = np.empty(size, dtype="float64")
         cdef rtcr.RTCRay ray
-        cdef int vd_i = 0
-        cdef int vd_step = 1
         if im.vd_strides[0] == -1:
             v_pos = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
             for j in range(size):
@@ -85,7 +78,6 @@
                 ray.instID = rtcg.RTC_INVALID_GEOMETRY_ID
                 ray.mask = -1
                 ray.time = 0
-                vd_i += vd_step
                 rtcs.rtcIntersect(scene.scene_i, ray)
                 data[j] = ray.time
             self.aimage = data.reshape(self.image.nv[0], self.image.nv[1])
@@ -109,11 +101,8 @@
                 ray.instID = rtcg.RTC_INVALID_GEOMETRY_ID
                 ray.mask = -1
                 ray.time = 0
-                vd_i += vd_step
                 rtcs.rtcIntersect(scene.scene_i, ray)
                 data[j] = ray.time
             self.aimage = data.reshape(self.image.nv[0], self.image.nv[1])
             free(v_pos)
             free(v_dir)
-
-        return hit

diff -r 1ecba3164b9610b160fa78689b057b997e022107 -r d7dc2d447f2671ef2aa48c17d978f5017c38915b yt/visualization/volume_rendering/render_source.py
--- a/yt/visualization/volume_rendering/render_source.py
+++ b/yt/visualization/volume_rendering/render_source.py
@@ -328,7 +328,7 @@
         self.set_sampler(camera)
 
         mylog.debug("Using sampler %s" % self.sampler)
-        self.sampler(self.scene, num_threads=self.num_threads)
+        self.sampler(self.scene)
         mylog.debug("Done casting rays")
 
         self.current_image = self.sampler.aimage


https://bitbucket.org/yt_analysis/yt/commits/3ff6542589d4/
Changeset:   3ff6542589d4
Branch:      yt
User:        atmyers
Date:        2015-06-13 01:36:07+00:00
Summary:     adding a maximum intensity sampler option
Affected #:  2 files

diff -r d7dc2d447f2671ef2aa48c17d978f5017c38915b -r 3ff6542589d42b4960da7385b799db17d9baa8eb yt/visualization/volume_rendering/render_source.py
--- a/yt/visualization/volume_rendering/render_source.py
+++ b/yt/visualization/volume_rendering/render_source.py
@@ -20,7 +20,8 @@
 from .transfer_functions import TransferFunction, \
     ProjectionTransferFunction, ColorTransferFunction
 from .utils import new_volume_render_sampler, data_source_or_all, \
-    get_corners, new_projection_sampler, new_mesh_render_sampler
+    get_corners, new_projection_sampler, new_mesh_surface_sampler, \
+    new_mesh_maximum_sampler
 from yt.visualization.image_writer import apply_colormap
 
 from yt.utilities.lib.mesh_traversal import YTEmbreeScene
@@ -299,7 +300,9 @@
         mylog.debug('Log Fields:' + str(log_fields))
 
     def set_volume(self, volume):
-        pass
+        assert(isinstance(volume, ElementMesh))
+        del self.volume
+        self.volume = volume
 
     def set_field(self, field, no_ghost=True):
         field = self.data_source._determine_fields(field)[0]
@@ -317,7 +320,9 @@
     def set_sampler(self, camera):
         """docstring for add_sampler"""
         if self.sampler_type == 'surface':
-            sampler = new_mesh_render_sampler(camera, self)
+            sampler = new_mesh_surface_sampler(camera, self)
+        if self.sampler_type == 'maximum':
+            sampler = new_mesh_maximum_sampler(camera, self)
         else:
             NotImplementedError("%s not implemented yet" % self.sampler_type)
         self.sampler = sampler

diff -r d7dc2d447f2671ef2aa48c17d978f5017c38915b -r 3ff6542589d42b4960da7385b799db17d9baa8eb yt/visualization/volume_rendering/utils.py
--- a/yt/visualization/volume_rendering/utils.py
+++ b/yt/visualization/volume_rendering/utils.py
@@ -12,7 +12,24 @@
     return data_source
 
 
-def new_mesh_render_sampler(camera, render_source):
+def new_mesh_surface_sampler(camera, render_source):
+    params = camera._get_sampler_params(render_source)
+    args = (
+        params['vp_pos'],
+        params['vp_dir'],
+        params['center'],
+        params['bounds'],
+        params['image'],
+        params['x_vec'],
+        params['y_vec'],
+        params['width'],
+    )
+
+    sampler = MeshSampler(*args)
+    return sampler
+
+
+def new_mesh_maximum_sampler(camera, render_source):
     params = camera._get_sampler_params(render_source)
     args = (
         params['vp_pos'],


https://bitbucket.org/yt_analysis/yt/commits/6554f58d0fe3/
Changeset:   6554f58d0fe3
Branch:      yt
User:        atmyers
Date:        2015-06-13 02:41:25+00:00
Summary:     put setting the filter feedback function into a helper routine
Affected #:  1 file

diff -r 3ff6542589d42b4960da7385b799db17d9baa8eb -r 6554f58d0fe3c9f6b8202c9237c1726c93f01b9e yt/utilities/lib/mesh_construction.pyx
--- a/yt/utilities/lib/mesh_construction.pyx
+++ b/yt/utilities/lib/mesh_construction.pyx
@@ -174,18 +174,11 @@
                  np.ndarray vertices, 
                  np.ndarray indices,
                  np.ndarray data,
-                 sample_type):
+                 sampler_type):
         # We need now to figure out if we've been handed quads or tetrahedra.
         # If it's quads, we can build the mesh slightly differently.
         # http://stackoverflow.com/questions/23723993/converting-quadriladerals-in-an-obj-file-into-triangles
 
-        if sample_type == 'surface':
-            self.filter_func = <rtcg.RTCFilterFunc> sample_surface
-        elif sample_type == 'maximum':
-            self.filter_func = <rtcg.RTCFilterFunc> maximum_intensity
-        else:
-            print "Error - sampler type not implemented."
-            raise NotImplementedError
         if indices.shape[1] == 8:
             self._build_from_quads(scene, vertices, indices, data)
         elif indices.shape[1] == 4:
@@ -193,6 +186,7 @@
         else:
             raise NotImplementedError
 
+        self._set_sampler_type(scene, sampler_type)
 
     cdef void _build_from_quads(self, YTEmbreeScene scene,
                                 np.ndarray quad_vertices,
@@ -290,5 +284,18 @@
         self.indices = triangles
         self.mesh = mesh
 
+    def _set_sampler_type(self, YTEmbreeScene scene, sampler_type):
+        if sampler_type == 'surface':
+            self.filter_func = <rtcg.RTCFilterFunc> sample_surface
+        elif sampler_type == 'maximum':
+            self.filter_func = <rtcg.RTCFilterFunc> maximum_intensity
+        else:
+            print "Error - sampler type not implemented."
+            raise NotImplementedError
+
+        rtcg.rtcSetIntersectionFilterFunction(scene.scene_i,
+                                              self.mesh,
+                                              self.filter_func)
+
     def __dealloc__(self):
         free(self.field_data)


https://bitbucket.org/yt_analysis/yt/commits/11186236dea6/
Changeset:   11186236dea6
Branch:      yt
User:        atmyers
Date:        2015-06-13 02:42:18+00:00
Summary:     removing the two different sampler functions, only one is needed
Affected #:  1 file

diff -r 6554f58d0fe3c9f6b8202c9237c1726c93f01b9e -r 11186236dea67ca87afec0add03f67225d5f8af7 yt/visualization/volume_rendering/utils.py
--- a/yt/visualization/volume_rendering/utils.py
+++ b/yt/visualization/volume_rendering/utils.py
@@ -12,24 +12,7 @@
     return data_source
 
 
-def new_mesh_surface_sampler(camera, render_source):
-    params = camera._get_sampler_params(render_source)
-    args = (
-        params['vp_pos'],
-        params['vp_dir'],
-        params['center'],
-        params['bounds'],
-        params['image'],
-        params['x_vec'],
-        params['y_vec'],
-        params['width'],
-    )
-
-    sampler = MeshSampler(*args)
-    return sampler
-
-
-def new_mesh_maximum_sampler(camera, render_source):
+def new_mesh_sampler(camera, render_source):
     params = camera._get_sampler_params(render_source)
     args = (
         params['vp_pos'],


https://bitbucket.org/yt_analysis/yt/commits/c183bf97c64f/
Changeset:   c183bf97c64f
Branch:      yt
User:        atmyers
Date:        2015-06-13 02:42:52+00:00
Summary:     cleaning up some of the unused methods for MeshSource
Affected #:  1 file

diff -r 11186236dea67ca87afec0add03f67225d5f8af7 -r c183bf97c64f7edcc126da524deaeff86529cf34 yt/visualization/volume_rendering/render_source.py
--- a/yt/visualization/volume_rendering/render_source.py
+++ b/yt/visualization/volume_rendering/render_source.py
@@ -20,8 +20,7 @@
 from .transfer_functions import TransferFunction, \
     ProjectionTransferFunction, ColorTransferFunction
 from .utils import new_volume_render_sampler, data_source_or_all, \
-    get_corners, new_projection_sampler, new_mesh_surface_sampler, \
-    new_mesh_maximum_sampler
+    get_corners, new_projection_sampler, new_mesh_sampler
 from yt.visualization.image_writer import apply_colormap
 
 from yt.utilities.lib.mesh_traversal import YTEmbreeScene
@@ -233,7 +232,7 @@
     _image = None
     data_source = None
 
-    def __init__(self, data_source, field, auto=True):
+    def __init__(self, data_source, field, sampler_type='surface'):
         r"""Initialize a new unstructured source for rendering.
 
         A :class:`MeshSource` provides the framework to volume render
@@ -246,8 +245,13 @@
             data object or dataset.
         fields : string
             The name of the field to be rendered.
-        auto: bool, optional
-            If True, will build a default PolygonMesh based on the data.
+        sampler_type : string, either 'surface' or 'maximum'
+            The type of volume rendering to use for this MeshSource.
+            If 'surface', each ray will return the value of the field
+            at the point at which it intersects the surface mesh.
+            If 'maximum', each ray will return the largest value of
+            any vertex on any element that the ray intersects.
+            Default is 'surface'.
 
         Examples
         --------
@@ -258,81 +262,47 @@
         self.data_source = data_source_or_all(data_source)
         field = self.data_source._determine_fields(field)[0]
         self.field = field
-        self.volume = None
+        self.mesh = None
         self.current_image = None
-        self.double_check = False
-        self.num_threads = 0
-        self.sampler_type = 'surface'
+        self.sampler_type = sampler_type
 
         # Error checking
         assert(self.field is not None)
         assert(self.data_source is not None)
 
-        if auto:
-            self.build_defaults()
-
-    def build_defaults(self):
-        self.build_default_volume()
+        self.build_data_structures()
 
     def _validate(self):
         """Make sure that all dependencies have been met"""
         if self.data_source is None:
             raise RuntimeError("Data source not initialized")
 
-        if self.volume is None:
-            raise RuntimeError("Volume not initialized")
+        if self.mesh is None:
+            raise RuntimeError("Mesh not initialized")
 
-    def build_default_volume(self):
+    def build_data_structures(self):
 
         field_data = self.data_source[self.field]
         vertices = self.data_source.ds.index.meshes[0].connectivity_coords
+
+        # convert the indices to zero-based indexing
         indices = self.data_source.ds.index.meshes[0].connectivity_indices - 1
 
         self.scene = YTEmbreeScene()
 
-        self.volume = ElementMesh(self.scene,
-                                  vertices,
-                                  indices,
-                                  field_data.d,
-                                  self.sampler_type)
+        mylog.debug("Using field %s with sampler_type %s" % (self.field,
+                                                             self.sampler_type))
+        self.mesh = ElementMesh(self.scene,
+                                vertices,
+                                indices,
+                                field_data.d,
+                                self.sampler_type)
 
-        log_fields = [self.data_source.pf.field_info[self.field].take_log]
-        mylog.debug('Log Fields:' + str(log_fields))
+    def render(self, camera):
 
-    def set_volume(self, volume):
-        assert(isinstance(volume, ElementMesh))
-        del self.volume
-        self.volume = volume
+        self.sampler = new_mesh_sampler(camera, self)
 
-    def set_field(self, field, no_ghost=True):
-        field = self.data_source._determine_fields(field)[0]
-        log_field = self.data_source.pf.field_info[field].take_log
-        self.volume.set_fields(field, [log_field], no_ghost)
-        self.field = field
-
-    def set_fields(self, fields, no_ghost=True):
-        fields = self.data_source._determine_fields(fields)
-        log_fields = [self.data_source.ds.field_info[f].take_log
-                      for f in fields]
-        self.volume.set_fields(fields, log_fields, no_ghost)
-        self.field = fields
-
-    def set_sampler(self, camera):
-        """docstring for add_sampler"""
-        if self.sampler_type == 'surface':
-            sampler = new_mesh_surface_sampler(camera, self)
-        if self.sampler_type == 'maximum':
-            sampler = new_mesh_maximum_sampler(camera, self)
-        else:
-            NotImplementedError("%s not implemented yet" % self.sampler_type)
-        self.sampler = sampler
-        assert(self.sampler is not None)
-
-    def render(self, camera, zbuffer=None):
-
-        self.set_sampler(camera)
-
-        mylog.debug("Using sampler %s" % self.sampler)
+        mylog.debug("Casting rays")
         self.sampler(self.scene)
         mylog.debug("Done casting rays")
 


https://bitbucket.org/yt_analysis/yt/commits/0069234c8ad6/
Changeset:   0069234c8ad6
Branch:      yt
User:        atmyers
Date:        2015-06-13 05:37:04+00:00
Summary:     some refactoring of mesh_construction
Affected #:  7 files

diff -r c183bf97c64f7edcc126da524deaeff86529cf34 -r 0069234c8ad61732e23b082927a677539959f6ed yt/utilities/lib/filter_feedback_functions.pxd
--- /dev/null
+++ b/yt/utilities/lib/filter_feedback_functions.pxd
@@ -0,0 +1,13 @@
+cimport pyembree.rtcore as rtc
+cimport pyembree.rtcore_ray as rtcr
+from pyembree.rtcore cimport Vec3f
+cimport cython
+
+
+cdef double get_value_trilinear(void* userPtr,
+                                rtcr.RTCRay& ray)
+
+cdef void maximum_intensity(void* userPtr, 
+                            rtcr.RTCRay& ray)
+
+cdef void sample_surface(void* userPtr, rtcr.RTCRay& ray)

diff -r c183bf97c64f7edcc126da524deaeff86529cf34 -r 0069234c8ad61732e23b082927a677539959f6ed yt/utilities/lib/filter_feedback_functions.pyx
--- /dev/null
+++ b/yt/utilities/lib/filter_feedback_functions.pyx
@@ -0,0 +1,40 @@
+cimport pyembree.rtcore as rtc
+cimport pyembree.rtcore_ray as rtcr
+from pyembree.rtcore cimport Vec3f
+cimport cython
+
+
+cdef double get_value_trilinear(void* userPtr,
+                                rtcr.RTCRay& ray):
+    cdef int ray_id
+    cdef double u, v, val
+    cdef double d0, d1, d2
+    cdef Vec3f* data
+
+    data = <Vec3f*> userPtr
+    ray_id = ray.primID
+
+    u = ray.u
+    v = ray.v
+
+    d0 = data[ray_id].x
+    d1 = data[ray_id].y
+    d2 = data[ray_id].z
+
+    return d0*(1.0 - u - v) + d1*u + d2*v
+
+
+cdef void maximum_intensity(void* userPtr, 
+                            rtcr.RTCRay& ray):
+
+    cdef double val = get_value_trilinear(userPtr, ray)
+    ray.time = max(ray.time, val)
+    ray.geomID = -1  # reject hit
+
+
+cdef void sample_surface(void* userPtr, 
+                         rtcr.RTCRay& ray):
+
+    cdef double val = get_value_trilinear(userPtr, ray)
+    ray.time = val
+

diff -r c183bf97c64f7edcc126da524deaeff86529cf34 -r 0069234c8ad61732e23b082927a677539959f6ed yt/utilities/lib/mesh_construction.h
--- a/yt/utilities/lib/mesh_construction.h
+++ b/yt/utilities/lib/mesh_construction.h
@@ -1,9 +1,11 @@
+#define MAX_NUM_TRI 12
+
 // This array is used to triangulate the hexahedral mesh elements
 // Each element has six faces with two triangles each.
 // The vertex ordering convention is assumed to follow that used
 // here: http://homepages.cae.wisc.edu/~tautges/papers/cnmev3.pdf
 // Note that this is the case for Exodus II data.
-int triangulate_hex[12][3] = {
+int triangulate_hex[MAX_NUM_TRI][3] = {
   {0, 1, 2}, {0, 2, 3}, // Face is 0 1 2 3 
   {4, 5, 6}, {4, 6, 7}, // Face is 4 5 6 7
   {0, 1, 5}, {0, 5, 4}, // Face is 0 1 5 4
@@ -13,9 +15,18 @@
 };
 
 // Similarly, this is used to triangulate the tetrahedral cells
-int triangulate_tetra[4][3] = {
+int triangulate_tetra[MAX_NUM_TRI][3] = {
   {0, 1, 2}, 
   {0, 1, 3},
   {0, 2, 3},
-  {1, 2, 3}
+  {1, 2, 3},
+
+  {-1, -1, -1},
+  {-1, -1, -1},
+  {-1, -1, -1},
+  {-1, -1, -1},
+  {-1, -1, -1},
+  {-1, -1, -1},
+  {-1, -1, -1},
+  {-1, -1, -1}
 };

diff -r c183bf97c64f7edcc126da524deaeff86529cf34 -r 0069234c8ad61732e23b082927a677539959f6ed yt/utilities/lib/mesh_construction.pyx
--- a/yt/utilities/lib/mesh_construction.pyx
+++ b/yt/utilities/lib/mesh_construction.pyx
@@ -4,7 +4,7 @@
 cimport pyembree.rtcore_geometry as rtcg
 cimport pyembree.rtcore_ray as rtcr
 cimport pyembree.rtcore_geometry_user as rtcgu
-from utilities.sampler_functions cimport \
+from filter_feedback_functions cimport \
     maximum_intensity, \
     sample_surface
 from pyembree.rtcore cimport \
@@ -15,8 +15,11 @@
 import numpy as np
 
 cdef extern from "mesh_construction.h":
-    int triangulate_hex[12][3]
-    int triangulate_tetra[4][3]
+    enum:
+        MAX_NUM_TRI
+
+    int triangulate_hex[MAX_NUM_TRI][3]
+    int triangulate_tetra[MAX_NUM_TRI][3]
 
 
 cdef class TriangleMesh:
@@ -60,6 +63,8 @@
     cdef unsigned int mesh
     cdef Vec3f* field_data
     cdef rtcg.RTCFilterFunc filter_func
+    cdef int tpe
+    cdef int[MAX_NUM_TRI][3] tris
 
     def __init__(self, YTEmbreeScene scene,
                  np.ndarray vertices,
@@ -180,26 +185,29 @@
         # http://stackoverflow.com/questions/23723993/converting-quadriladerals-in-an-obj-file-into-triangles
 
         if indices.shape[1] == 8:
-            self._build_from_quads(scene, vertices, indices, data)
+            self.tpe = 12
+            self.tris = triangulate_hex
         elif indices.shape[1] == 4:
-            self._build_from_triangles(scene, vertices, indices, data)
+            self.tpe = 4
+            self.tris = triangulate_tetra
         else:
             raise NotImplementedError
 
+        self._build_from_indices(scene, vertices, indices)
+        self.field_data = NULL
+        self._set_field_data(scene, data)
         self._set_sampler_type(scene, sampler_type)
 
-    cdef void _build_from_quads(self, YTEmbreeScene scene,
-                                np.ndarray quad_vertices,
-                                np.ndarray quad_indices,
-                                np.ndarray data):
-
-        cdef int i, j
-        cdef int nv = quad_vertices.shape[0]
-        cdef int ne = quad_indices.shape[0]
+    cdef void _build_from_indices(self, YTEmbreeScene scene,
+                                  np.ndarray vertices_in,
+                                  np.ndarray indices_in):
+        cdef int i, j, ind
+        cdef int nv = vertices_in.shape[0]
+        cdef int ne = indices_in.shape[0]
 
         # There are six faces for every quad.  Each of those will be divided
         # into two triangles.
-        cdef int nt = 6*2*ne
+        cdef int nt = self.tpe*ne
 
         cdef unsigned int mesh = rtcg.rtcNewTriangleMesh(scene.scene_i,
                     rtcg.RTC_GEOMETRY_STATIC, nt, nv, 1) 
@@ -209,9 +217,9 @@
                         rtcg.RTC_VERTEX_BUFFER)
 
         for i in range(nv):
-            vertices[i].x = quad_vertices[i, 0]
-            vertices[i].y = quad_vertices[i, 1]
-            vertices[i].z = quad_vertices[i, 2]
+            vertices[i].x = vertices_in[i, 0]
+            vertices[i].y = vertices_in[i, 1]
+            vertices[i].z = vertices_in[i, 2]
         rtcg.rtcUnmapBuffer(scene.scene_i, mesh, rtcg.RTC_VERTEX_BUFFER)
 
         # now build up the triangles
@@ -219,72 +227,37 @@
                         mesh, rtcg.RTC_INDEX_BUFFER)
 
         for i in range(ne):
-            for j in range(12):
-                triangles[12*i+j].v0 = quad_indices[i][triangulate_hex[j][0]]
-                triangles[12*i+j].v1 = quad_indices[i][triangulate_hex[j][1]]
-                triangles[12*i+j].v2 = quad_indices[i][triangulate_hex[j][2]]
+            for j in range(self.tpe):
+                ind = self.tpe*i+j
+                triangles[ind].v0 = indices_in[i][self.tris[j][0]]
+                triangles[ind].v1 = indices_in[i][self.tris[j][1]]
+                triangles[ind].v2 = indices_in[i][self.tris[j][2]]
 
         rtcg.rtcUnmapBuffer(scene.scene_i, mesh, rtcg.RTC_INDEX_BUFFER)
 
-        cdef Vec3f* field_data = <Vec3f *>malloc(nt * sizeof(Vec3f))
-
-        for i in range(ne):
-            for j in range(12):
-                field_data[12*i+j].x = data[i][triangulate_hex[j][0]]
-                field_data[12*i+j].y = data[i][triangulate_hex[j][1]]
-                field_data[12*i+j].z = data[i][triangulate_hex[j][2]]
-
-        rtcg.rtcSetUserData(scene.scene_i, mesh, field_data)
-
-        self.field_data = field_data
         self.vertices = vertices
         self.indices = triangles
         self.mesh = mesh
 
-        rtcg.rtcSetIntersectionFilterFunction(scene.scene_i,
-                                              mesh,
-                                              self.filter_func)
+    cdef void _set_field_data(self, YTEmbreeScene scene,
+                              np.ndarray data_in):
 
-    cdef void _build_from_triangles(self, YTEmbreeScene scene,
-                                    np.ndarray tetra_vertices, 
-                                    np.ndarray tetra_indices,
-                                    np.ndarray data):
+        cdef int ne = data_in.shape[0]
+        cdef int nt = self.tpe*ne
+        cdef Vec3f* field_data = <Vec3f *>malloc(nt * sizeof(Vec3f))
 
-        cdef int i, j
-        cdef int nv = tetra_vertices.shape[0]
-        cdef int ne = tetra_indices.shape[0]
+        for i in range(ne):
+            for j in range(self.tpe):
+                ind = self.tpe*i+j
+                field_data[ind].x = data_in[i][self.tris[j][0]]
+                field_data[ind].y = data_in[i][self.tris[j][1]]
+                field_data[ind].z = data_in[i][self.tris[j][2]]
 
-        # There are four triangle faces for each tetrahedron.
-        cdef int nt = 4*ne
+        rtcg.rtcSetUserData(scene.scene_i, self.mesh, field_data)
 
-        cdef unsigned int mesh = rtcg.rtcNewTriangleMesh(scene.scene_i,
-                    rtcg.RTC_GEOMETRY_STATIC, nt, nv, 1) 
+        self.field_data = field_data
 
-        # Just copy over the vertices
-        cdef Vertex* vertices = <Vertex*> rtcg.rtcMapBuffer(scene.scene_i, mesh,
-                        rtcg.RTC_VERTEX_BUFFER)
-
-        for i in range(nv):
-                vertices[i].x = tetra_vertices[i, 0]
-                vertices[i].y = tetra_vertices[i, 1]
-                vertices[i].z = tetra_vertices[i, 2]
-        rtcg.rtcUnmapBuffer(scene.scene_i, mesh, rtcg.RTC_VERTEX_BUFFER)
-
-        # Now build up the triangles
-        cdef Triangle* triangles = <Triangle*> rtcg.rtcMapBuffer(scene.scene_i,
-                        mesh, rtcg.RTC_INDEX_BUFFER)
-        for i in range(ne):
-            for j in range(4):
-                triangles[4*i+j].v0 = tetra_indices[i][triangulate_tetra[j][0]]
-                triangles[4*i+j].v1 = tetra_indices[i][triangulate_tetra[j][1]]
-                triangles[4*i+j].v2 = tetra_indices[i][triangulate_tetra[j][2]]
-
-        rtcg.rtcUnmapBuffer(scene.scene_i, mesh, rtcg.RTC_INDEX_BUFFER)
-        self.vertices = vertices
-        self.indices = triangles
-        self.mesh = mesh
-
-    def _set_sampler_type(self, YTEmbreeScene scene, sampler_type):
+    cdef void _set_sampler_type(self, YTEmbreeScene scene, sampler_type):
         if sampler_type == 'surface':
             self.filter_func = <rtcg.RTCFilterFunc> sample_surface
         elif sampler_type == 'maximum':
@@ -298,4 +271,5 @@
                                               self.filter_func)
 
     def __dealloc__(self):
-        free(self.field_data)
+        if self.field_data is not NULL:
+            free(self.field_data)

diff -r c183bf97c64f7edcc126da524deaeff86529cf34 -r 0069234c8ad61732e23b082927a677539959f6ed yt/utilities/lib/sampler_functions.pxd
--- a/yt/utilities/lib/sampler_functions.pxd
+++ /dev/null
@@ -1,13 +0,0 @@
-cimport pyembree.rtcore as rtc
-cimport pyembree.rtcore_ray as rtcr
-from pyembree.rtcore cimport Vec3f
-cimport cython
-
-
-cdef double get_value_trilinear(void* userPtr,
-                                rtcr.RTCRay& ray)
-
-cdef void maximum_intensity(void* userPtr, 
-                            rtcr.RTCRay& ray)
-
-cdef void sample_surface(void* userPtr, rtcr.RTCRay& ray)

diff -r c183bf97c64f7edcc126da524deaeff86529cf34 -r 0069234c8ad61732e23b082927a677539959f6ed yt/utilities/lib/sampler_functions.pyx
--- a/yt/utilities/lib/sampler_functions.pyx
+++ /dev/null
@@ -1,40 +0,0 @@
-cimport pyembree.rtcore as rtc
-cimport pyembree.rtcore_ray as rtcr
-from pyembree.rtcore cimport Vec3f
-cimport cython
-
-
-cdef double get_value_trilinear(void* userPtr,
-                                rtcr.RTCRay& ray):
-    cdef int ray_id
-    cdef double u, v, val
-    cdef double d0, d1, d2
-    cdef Vec3f* data
-
-    data = <Vec3f*> userPtr
-    ray_id = ray.primID
-
-    u = ray.u
-    v = ray.v
-
-    d0 = data[ray_id].x
-    d1 = data[ray_id].y
-    d2 = data[ray_id].z
-
-    return d0*(1.0 - u - v) + d1*u + d2*v
-
-
-cdef void maximum_intensity(void* userPtr, 
-                            rtcr.RTCRay& ray):
-
-    cdef double val = get_value_trilinear(userPtr, ray)
-    ray.time = max(ray.time, val)
-    ray.geomID = -1  # reject hit
-
-
-cdef void sample_surface(void* userPtr, 
-                         rtcr.RTCRay& ray):
-
-    cdef double val = get_value_trilinear(userPtr, ray)
-    ray.time = val
-

diff -r c183bf97c64f7edcc126da524deaeff86529cf34 -r 0069234c8ad61732e23b082927a677539959f6ed yt/utilities/lib/setup.py
--- a/yt/utilities/lib/setup.py
+++ b/yt/utilities/lib/setup.py
@@ -175,11 +175,11 @@
                              include_dirs=["yt/utilities/lib", include_dirs],
                              libraries=["m", "embree"], language="c++",
                              depends=["yt/utilities/lib/mesh_traversal.pxd"])
-        config.add_extension("sampler_functions",
-                             ["yt/utilities/lib/sampler_functions.pyx"],
+        config.add_extension("filter_feedback_functions",
+                             ["yt/utilities/lib/filter_feedback_functions.pyx"],
                              include_dirs=["yt/utilities/lib", include_dirs],
                              libraries=["m", "embree"], language="c++",
-                             depends=["yt/utilities/lib/sampler_functions.pxd"])
+                             depends=["yt/utilities/lib/filter_feedback_functions.pxd"])
     config.add_subpackage("tests")
 
     if os.environ.get("GPERFTOOLS", "no").upper() != "NO":


https://bitbucket.org/yt_analysis/yt/commits/cfd909dda84c/
Changeset:   cfd909dda84c
Branch:      yt
User:        atmyers
Date:        2015-06-13 05:45:14+00:00
Summary:     a couple of renamings
Affected #:  2 files

diff -r 0069234c8ad61732e23b082927a677539959f6ed -r cfd909dda84c91b0c790392d4f1daf20bd0f154e yt/utilities/lib/mesh_construction.h
--- a/yt/utilities/lib/mesh_construction.h
+++ b/yt/utilities/lib/mesh_construction.h
@@ -1,4 +1,6 @@
 #define MAX_NUM_TRI 12
+#define HEX_NT 12
+#define TETRA_NT 4
 
 // This array is used to triangulate the hexahedral mesh elements
 // Each element has six faces with two triangles each.

diff -r 0069234c8ad61732e23b082927a677539959f6ed -r cfd909dda84c91b0c790392d4f1daf20bd0f154e yt/utilities/lib/mesh_construction.pyx
--- a/yt/utilities/lib/mesh_construction.pyx
+++ b/yt/utilities/lib/mesh_construction.pyx
@@ -18,6 +18,8 @@
     enum:
         MAX_NUM_TRI
 
+    int HEX_NT
+    int TETRA_NT
     int triangulate_hex[MAX_NUM_TRI][3]
     int triangulate_tetra[MAX_NUM_TRI][3]
 
@@ -64,7 +66,7 @@
     cdef Vec3f* field_data
     cdef rtcg.RTCFilterFunc filter_func
     cdef int tpe
-    cdef int[MAX_NUM_TRI][3] tris
+    cdef int[MAX_NUM_TRI][3] tri_array
 
     def __init__(self, YTEmbreeScene scene,
                  np.ndarray vertices,
@@ -185,11 +187,11 @@
         # http://stackoverflow.com/questions/23723993/converting-quadriladerals-in-an-obj-file-into-triangles
 
         if indices.shape[1] == 8:
-            self.tpe = 12
-            self.tris = triangulate_hex
+            self.tpe = HEX_NT
+            self.tri_array = triangulate_hex
         elif indices.shape[1] == 4:
-            self.tpe = 4
-            self.tris = triangulate_tetra
+            self.tpe = TETRA_NT
+            self.tri_array = triangulate_tetra
         else:
             raise NotImplementedError
 
@@ -229,9 +231,9 @@
         for i in range(ne):
             for j in range(self.tpe):
                 ind = self.tpe*i+j
-                triangles[ind].v0 = indices_in[i][self.tris[j][0]]
-                triangles[ind].v1 = indices_in[i][self.tris[j][1]]
-                triangles[ind].v2 = indices_in[i][self.tris[j][2]]
+                triangles[ind].v0 = indices_in[i][self.tri_array[j][0]]
+                triangles[ind].v1 = indices_in[i][self.tri_array[j][1]]
+                triangles[ind].v2 = indices_in[i][self.tri_array[j][2]]
 
         rtcg.rtcUnmapBuffer(scene.scene_i, mesh, rtcg.RTC_INDEX_BUFFER)
 
@@ -249,9 +251,9 @@
         for i in range(ne):
             for j in range(self.tpe):
                 ind = self.tpe*i+j
-                field_data[ind].x = data_in[i][self.tris[j][0]]
-                field_data[ind].y = data_in[i][self.tris[j][1]]
-                field_data[ind].z = data_in[i][self.tris[j][2]]
+                field_data[ind].x = data_in[i][self.tri_array[j][0]]
+                field_data[ind].y = data_in[i][self.tri_array[j][1]]
+                field_data[ind].z = data_in[i][self.tri_array[j][2]]
 
         rtcg.rtcSetUserData(scene.scene_i, self.mesh, field_data)
 


https://bitbucket.org/yt_analysis/yt/commits/9fb3fcfca893/
Changeset:   9fb3fcfca893
Branch:      yt
User:        atmyers
Date:        2015-06-26 01:07:48+00:00
Summary:     another renaming the MeshSource
Affected #:  3 files

diff -r cfd909dda84c91b0c790392d4f1daf20bd0f154e -r 9fb3fcfca89331735dc2c9b54f592825d386ff14 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -50,7 +50,7 @@
 from yt.geometry.oct_container import \
     OctreeContainer
 from yt.geometry.unstructured_mesh_handler import \
-           UnstructuredIndex
+    UnstructuredIndex
 from yt.data_objects.static_output import \
     Dataset
 from yt.utilities.logger import ytLogger as mylog
@@ -69,8 +69,8 @@
 from yt.utilities.flagging_methods import \
     FlaggingGrid
 from yt.data_objects.unstructured_mesh import \
-           SemiStructuredMesh, \
-           UnstructuredMesh
+    SemiStructuredMesh, \
+    UnstructuredMesh
 from yt.extern.six import string_types, iteritems
 from .fields import \
     StreamFieldInfo
@@ -1693,7 +1693,7 @@
     field_units = {}
     particle_types = {}
     sfh = StreamDictFieldHandler()
-    
+
     sfh.update({'connectivity': connectivity,
                 'coordinates': coordinates})
     for i, d in enumerate(data):

diff -r cfd909dda84c91b0c790392d4f1daf20bd0f154e -r 9fb3fcfca89331735dc2c9b54f592825d386ff14 yt/utilities/lib/filter_feedback_functions.pyx
--- a/yt/utilities/lib/filter_feedback_functions.pyx
+++ b/yt/utilities/lib/filter_feedback_functions.pyx
@@ -37,4 +37,3 @@
 
     cdef double val = get_value_trilinear(userPtr, ray)
     ray.time = val
-

diff -r cfd909dda84c91b0c790392d4f1daf20bd0f154e -r 9fb3fcfca89331735dc2c9b54f592825d386ff14 yt/visualization/volume_rendering/render_source.py
--- a/yt/visualization/volume_rendering/render_source.py
+++ b/yt/visualization/volume_rendering/render_source.py
@@ -270,7 +270,9 @@
         assert(self.field is not None)
         assert(self.data_source is not None)
 
-        self.build_data_structures()
+        self.scene = YTEmbreeScene()
+
+        self.build_mesh()
 
     def _validate(self):
         """Make sure that all dependencies have been met"""
@@ -280,7 +282,7 @@
         if self.mesh is None:
             raise RuntimeError("Mesh not initialized")
 
-    def build_data_structures(self):
+    def build_mesh(self):
 
         field_data = self.data_source[self.field]
         vertices = self.data_source.ds.index.meshes[0].connectivity_coords
@@ -288,10 +290,8 @@
         # convert the indices to zero-based indexing
         indices = self.data_source.ds.index.meshes[0].connectivity_indices - 1
 
-        self.scene = YTEmbreeScene()
-
-        mylog.debug("Using field %s with sampler_type %s" % (self.field,
-                                                             self.sampler_type))
+        mylog.debug("Using field %s and sampler_type %s" % (self.field,
+                                                            self.sampler_type))
         self.mesh = ElementMesh(self.scene,
                                 vertices,
                                 indices,


https://bitbucket.org/yt_analysis/yt/commits/c6a129901ef9/
Changeset:   c6a129901ef9
Branch:      yt
User:        atmyers
Date:        2015-06-26 17:51:55+00:00
Summary:     adding the element mapping code
Affected #:  2 files

diff -r 9fb3fcfca89331735dc2c9b54f592825d386ff14 -r c6a129901ef97e27212201bc667c73c3b278c04f yt/utilities/lib/element_mappings.pyx
--- /dev/null
+++ b/yt/utilities/lib/element_mappings.pyx
@@ -0,0 +1,203 @@
+"""
+This file contains coordinate mappings between physical coordinates and those
+defined on unit elements, for doing intracell interpolation on finite element
+data.
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+from scipy.optimize import fsolve
+cimport numpy as np
+cimport cython
+
+DTYPE = np.float64
+ctypedef np.float64_t DTYPE_t
+
+
+class P1Mapping2D:
+    def map_real_to_unit(self, physical_coord, vertices):
+    
+        x = physical_coord[0]
+        y = physical_coord[1]
+
+        x1 = vertices[0, 0]
+        y1 = vertices[0, 1]
+
+        x2 = vertices[1, 0]
+        y2 = vertices[1, 1]
+
+        x3 = vertices[2, 0]
+        y3 = vertices[2, 1]
+    
+        A = np.array([[1, x, y], [1, x1, y1], [1, x3, y3]])
+        B = np.array([[1, x2, y2], [1, x1, y1], [1, x3, y3]])
+        u = np.linalg.det(A) / np.linalg.det(B)
+
+        C = np.array([[1, x, y], [1, x1, y1], [1, x2, y2]])
+        D = np.array([[1, x3, y3], [1, x1, y1], [1, x2, y2]])
+        v = np.linalg.det(C) / np.linalg.det(D)
+    
+        return np.array([u, v])
+
+
+class P1Mapping3D:
+    def map_real_to_unit(self, physical_coord, vertices):
+    
+        x = physical_coord[0]
+        y = physical_coord[1]
+        z = physical_coord[2]
+
+        x1 = vertices[0, 0]
+        y1 = vertices[0, 1]
+        z1 = vertices[0, 2]
+
+        x2 = vertices[1, 0]
+        y2 = vertices[1, 1]
+        z2 = vertices[1, 2]
+    
+        x3 = vertices[2, 0]
+        y3 = vertices[2, 1]
+        z3 = vertices[2, 2]
+    
+        x4 = vertices[3, 0]
+        y4 = vertices[3, 1]
+        z4 = vertices[3, 2]
+    
+        b = np.array([x, y, z, 1])
+        A = np.array([[x1, x2, x3, x4],
+                      [y1, y2, y3, y4],
+                      [z1, z2, z3, z4],
+                      [1,  1,  1,  1] ])
+    
+        c = np.linalg.solve(A, b)
+    
+        return c
+
+
+class Q1Mapping2D:
+    def map_real_to_unit(self, physical_coord, vertices):
+    
+        # initial guess for the Newton solve
+        x0 = np.array([0.0, 0.0])
+        x = fsolve(self._f, x0, args=(vertices, physical_coord), fprime=self._J)
+        return x
+
+    def _f(x, v, phys_x):
+        f1 = v[0][0]*(1-x[0])*(1-x[1]) + \
+             v[1][0]*(1+x[0])*(1-x[1]) + \
+             v[2][0]*(1-x[0])*(1+x[1]) + \
+             v[3][0]*(1+x[0])*(1+x[1]) - 4.0*phys_x[0]
+        f2 = v[0][1]*(1-x[0])*(1-x[1]) + \
+             v[1][1]*(1+x[0])*(1-x[1]) + \
+             v[2][1]*(1-x[0])*(1+x[1]) + \
+             v[3][1]*(1+x[0])*(1+x[1]) - 4.0*phys_x[1]
+        return np.array([f1, f2])
+
+    def _J(x, v, phys_x):
+        f11 = -(1-x[1])*v[0][0] + \
+               (1-x[1])*v[1][0] - \
+               (1+x[1])*v[2][0] + \
+               (1+x[1])*v[3][0]
+        f12 = -(1-x[0])*v[0][0] - \
+               (1+x[0])*v[1][0] + \
+               (1-x[0])*v[2][0] + \
+               (1+x[0])*v[3][0]
+        f21 = -(1-x[1])*v[0][1] + \
+               (1-x[1])*v[1][1] - \
+               (1+x[1])*v[2][1] + \
+               (1+x[1])*v[3][1]
+        f22 = -(1-x[0])*v[0][1] - \
+               (1+x[0])*v[1][1] + \
+               (1-x[0])*v[2][1] + \
+               (1+x[0])*v[3][1]
+        return np.array([[f11, f12], [f21, f22]])
+
+
+class Q1Mapping3D:
+
+    def map_real_to_unit(self, physical_coord, vertices):
+    
+        # initial guess for the Newton solve
+        x0 = np.array([0.0, 0.0])
+        x = fsolve(self._f, x0, args=(vertices, physical_coord), fprime=self._J)
+        return x
+
+    def _f(x, v, phys_x):
+        f0 = v[0][0]*(1-x[0])*(1-x[1])*(1-x[2]) + \
+             v[1][0]*(1+x[0])*(1-x[1])*(1-x[2]) + \
+             v[2][0]*(1-x[0])*(1+x[1])*(1-x[2]) + \
+             v[3][0]*(1+x[0])*(1+x[1])*(1-x[2]) + \
+             v[4][0]*(1-x[0])*(1-x[1])*(1+x[2]) + \
+             v[5][0]*(1+x[0])*(1-x[1])*(1+x[2]) + \
+             v[6][0]*(1-x[0])*(1+x[1])*(1+x[2]) + \
+             v[7][0]*(1+x[0])*(1+x[1])*(1+x[2]) - 8.0*phys_x[0]
+        f1 = v[0][1]*(1-x[0])*(1-x[1])*(1-x[2]) + \
+             v[1][1]*(1+x[0])*(1-x[1])*(1-x[2]) + \
+             v[2][1]*(1-x[0])*(1+x[1])*(1-x[2]) + \
+             v[3][1]*(1+x[0])*(1+x[1])*(1-x[2]) + \
+             v[4][1]*(1-x[0])*(1-x[1])*(1+x[2]) + \
+             v[5][1]*(1+x[0])*(1-x[1])*(1+x[2]) + \
+             v[6][1]*(1-x[0])*(1+x[1])*(1+x[2]) + \
+             v[7][1]*(1+x[0])*(1+x[1])*(1+x[2]) - 8.0*phys_x[1]
+        f2 = v[0][2]*(1-x[0])*(1-x[1])*(1-x[2]) + \
+             v[1][2]*(1+x[0])*(1-x[1])*(1-x[2]) + \
+             v[2][2]*(1-x[0])*(1+x[1])*(1-x[2]) + \
+             v[3][2]*(1+x[0])*(1+x[1])*(1-x[2]) + \
+             v[4][2]*(1-x[0])*(1-x[1])*(1+x[2]) + \
+             v[5][2]*(1+x[0])*(1-x[1])*(1+x[2]) + \
+             v[6][2]*(1-x[0])*(1+x[1])*(1+x[2]) + \
+             v[7][2]*(1+x[0])*(1+x[1])*(1+x[2]) - 8.0*phys_x[2]
+        return np.array([f0, f1, f2])
+
+    def _J(x, v, phys_x):
+    
+        f00 = -(1-x[1])*(1-x[2])*v[0][0] + (1-x[1])*(1-x[2])*v[1][0] - \
+               (1+x[1])*(1-x[2])*v[2][0] + (1+x[1])*(1-x[2])*v[3][0] - \
+               (1-x[1])*(1+x[2])*v[4][0] + (1-x[1])*(1+x[2])*v[5][0] - \
+               (1+x[1])*(1+x[2])*v[6][0] + (1+x[1])*(1+x[2])*v[7][0]
+        f01 = -(1-x[0])*(1-x[2])*v[0][0] - (1+x[0])*(1-x[2])*v[1][0] + \
+               (1-x[0])*(1-x[2])*v[2][0] + (1+x[0])*(1-x[2])*v[3][0] - \
+               (1-x[0])*(1+x[2])*v[4][0] - (1+x[0])*(1+x[2])*v[5][0] + \
+               (1-x[0])*(1+x[2])*v[6][0] + (1+x[0])*(1+x[2])*v[7][0]
+        f02 = -(1-x[0])*(1-x[1])*v[0][0] - (1+x[0])*(1-x[1])*v[1][0] - \
+               (1-x[0])*(1+x[1])*v[2][0] - (1+x[0])*(1+x[1])*v[3][0] + \
+               (1-x[0])*(1-x[1])*v[4][0] + (1+x[0])*(1-x[1])*v[5][0] + \
+               (1-x[0])*(1+x[1])*v[6][0] + (1+x[0])*(1+x[1])*v[7][0]
+        
+
+        f10 = -(1-x[1])*(1-x[2])*v[0][1] + (1-x[1])*(1-x[2])*v[1][1] - \
+               (1+x[1])*(1-x[2])*v[2][1] + (1+x[1])*(1-x[2])*v[3][1] - \
+               (1-x[1])*(1+x[2])*v[4][1] + (1-x[1])*(1+x[2])*v[5][1] - \
+               (1+x[1])*(1+x[2])*v[6][1] + (1+x[1])*(1+x[2])*v[7][1]
+        f11 = -(1-x[0])*(1-x[2])*v[0][1] - (1+x[0])*(1-x[2])*v[1][1] + \
+               (1-x[0])*(1-x[2])*v[2][1] + (1+x[0])*(1-x[2])*v[3][1] - \
+               (1-x[0])*(1+x[2])*v[4][1] - (1+x[0])*(1+x[2])*v[5][1] + \
+               (1-x[0])*(1+x[2])*v[6][1] + (1+x[0])*(1+x[2])*v[7][1]
+        f12 = -(1-x[0])*(1-x[1])*v[0][1] - (1+x[0])*(1-x[1])*v[1][1] - \
+               (1-x[0])*(1+x[1])*v[2][1] - (1+x[0])*(1+x[1])*v[3][1] + \
+               (1-x[0])*(1-x[1])*v[4][1] + (1+x[0])*(1-x[1])*v[5][1] + \
+               (1-x[0])*(1+x[1])*v[6][1] + (1+x[0])*(1+x[1])*v[7][1]
+        
+        f20 = -(1-x[1])*(1-x[2])*v[0][2] + (1-x[1])*(1-x[2])*v[1][2] - \
+               (1+x[1])*(1-x[2])*v[2][2] + (1+x[1])*(1-x[2])*v[3][2] - \
+               (1-x[1])*(1+x[2])*v[4][2] + (1-x[1])*(1+x[2])*v[5][2] - \
+               (1+x[1])*(1+x[2])*v[6][2] + (1+x[1])*(1+x[2])*v[7][2]
+        f21 = -(1-x[0])*(1-x[2])*v[0][2] - (1+x[0])*(1-x[2])*v[1][2] + \
+               (1-x[0])*(1-x[2])*v[2][2] + (1+x[0])*(1-x[2])*v[3][2] - \
+               (1-x[0])*(1+x[2])*v[4][2] - (1+x[0])*(1+x[2])*v[5][2] + \
+               (1-x[0])*(1+x[2])*v[6][2] + (1+x[0])*(1+x[2])*v[7][2]
+        f22 = -(1-x[0])*(1-x[1])*v[0][2] - (1+x[0])*(1-x[1])*v[1][2] - \
+               (1-x[0])*(1+x[1])*v[2][2] - (1+x[0])*(1+x[1])*v[3][2] + \
+               (1-x[0])*(1-x[1])*v[4][2] + (1+x[0])*(1-x[1])*v[5][2] + \
+               (1-x[0])*(1+x[1])*v[6][2] + (1+x[0])*(1+x[1])*v[7][2]
+
+        return np.array([[f00, f01, f02], [f10, f11, f12], [f20, f21, f22]])

diff -r 9fb3fcfca89331735dc2c9b54f592825d386ff14 -r c6a129901ef97e27212201bc667c73c3b278c04f yt/utilities/lib/setup.py
--- a/yt/utilities/lib/setup.py
+++ b/yt/utilities/lib/setup.py
@@ -158,6 +158,8 @@
           )
     config.add_extension("write_array",
                          ["yt/utilities/lib/write_array.pyx"])
+    config.add_extension("element_mappings",
+                         ["yt/utilities/lib/element_mappings.pyx"])
     config.add_extension("ragged_arrays",
                          ["yt/utilities/lib/ragged_arrays.pyx"])
     config.add_extension("amr_kdtools", 


https://bitbucket.org/yt_analysis/yt/commits/5e34830981dd/
Changeset:   5e34830981dd
Branch:      yt
User:        atmyers
Date:        2015-06-26 17:55:45+00:00
Summary:     add file for testing element mappings
Affected #:  1 file

diff -r c6a129901ef97e27212201bc667c73c3b278c04f -r 5e34830981ddab981f924f7cf82edc3600f00b24 yt/utilities/lib/tests/test_element_mappings.py
--- /dev/null
+++ b/yt/utilities/lib/tests/test_element_mappings.py
@@ -0,0 +1,16 @@
+import numpy as np
+
+from yt.testing import *
+from yt.utilities.lib.element_mappings import \
+    P1Mapping2D, \
+    P1Mapping3D, \
+    Q1Mapping2D, \
+    Q1Mapping3D
+
+
+def setup():
+    pass
+
+
+def test_sample():
+    pass


https://bitbucket.org/yt_analysis/yt/commits/d8a055a16ba8/
Changeset:   d8a055a16ba8
Branch:      yt
User:        atmyers
Date:        2015-06-26 18:14:48+00:00
Summary:     filling in some tests
Affected #:  1 file

diff -r 5e34830981ddab981f924f7cf82edc3600f00b24 -r d8a055a16ba8f284e62f98c2d92474edc5353e9f yt/utilities/lib/tests/test_element_mappings.py
--- a/yt/utilities/lib/tests/test_element_mappings.py
+++ b/yt/utilities/lib/tests/test_element_mappings.py
@@ -12,5 +12,40 @@
     pass
 
 
-def test_sample():
+def test_P1Mapping2D():
+    NV = 3
+    NDIM = 2
+    vertices = np.empty((NV, NDIM))
+
+    vertices[0, 0] = 0.1
+    vertices[0, 1] = 0.2
+
+    vertices[1, 0] = 0.6
+    vertices[1, 1] = 0.3
+
+    vertices[2, 0] = 0.2
+    vertices[2, 1] = 0.7
+
+    physical_x[0] = 0.4
+    physical_x[1] = 0.4
+
+    field_values = np.empty(NV)
+    field_values[0] = 1.0
+    field_values[1] = 2.0
+    field_values[2] = 3.0
+
+    physical_x = np.empty(NDIM)
+    for i in range(NV):
+        physical_x = vertices[i]
+
+
+def test_P1Mapping3D():
     pass
+
+
+def test_Q1Mapping2D():
+    pass
+
+
+def test_Q1Mapping3D():
+    pass


https://bitbucket.org/yt_analysis/yt/commits/2886ca8700fa/
Changeset:   2886ca8700fa
Branch:      yt
User:        atmyers
Date:        2015-06-26 19:18:38+00:00
Summary:     implementing samplers for the triangulr / tetrahedral elements
Affected #:  2 files

diff -r d8a055a16ba8f284e62f98c2d92474edc5353e9f -r 2886ca8700fa9148f4cc6b17b3cd750119d03d20 yt/utilities/lib/element_mappings.pyx
--- a/yt/utilities/lib/element_mappings.pyx
+++ b/yt/utilities/lib/element_mappings.pyx
@@ -1,7 +1,7 @@
 """
 This file contains coordinate mappings between physical coordinates and those
-defined on unit elements, for doing intracell interpolation on finite element
-data.
+defined on unit elements, as well as doing the corresponding intracell 
+interpolation on finite element data.
 
 
 """
@@ -23,8 +23,10 @@
 ctypedef np.float64_t DTYPE_t
 
 
-class P1Mapping2D:
-    def map_real_to_unit(self, physical_coord, vertices):
+class P1Sampler2D:
+
+    @staticmethod
+    def map_real_to_unit(physical_coord, vertices):
     
         x = physical_coord[0]
         y = physical_coord[1]
@@ -48,9 +50,20 @@
     
         return np.array([u, v])
 
+    @staticmethod
+    def sample_at_unit_point(coord, vals):
+        return vals[0]*(1 - coord[0] - coord[1]) + \
+            vals[1]*coord[0] + vals[2]*coord[1]
 
-class P1Mapping3D:
-    def map_real_to_unit(self, physical_coord, vertices):
+    @classmethod
+    def sample_at_real_point(cls, coord, vertices, vals):
+        mapped_coord = cls.map_real_to_unit(coord, vertices)
+        return cls.sample_at_unit_point(coord, vals)
+
+class P1Sampler3D:
+
+    @staticmethod
+    def map_real_to_unit(physical_coord, vertices):
     
         x = physical_coord[0]
         y = physical_coord[1]
@@ -82,8 +95,19 @@
     
         return c
 
+    @staticmethod
+    def sample_at_unit_point(coord, vals):
+        return vals[0]*coord[0] + vals[1]*coord[1] + \
+            vals[2]*coord[2] + vals[3]*coord[3]
 
-class Q1Mapping2D:
+    @classmethod
+    def sample_at_real_point(cls, coord, vertices, vals):
+        mapped_coord = cls.map_real_to_unit(coord, vertices)
+        return cls.sample_at_unit_point(coord, vals)
+
+
+class Q1Sampler2D:
+
     def map_real_to_unit(self, physical_coord, vertices):
     
         # initial guess for the Newton solve
@@ -91,6 +115,7 @@
         x = fsolve(self._f, x0, args=(vertices, physical_coord), fprime=self._J)
         return x
 
+    @staticmethod
     def _f(x, v, phys_x):
         f1 = v[0][0]*(1-x[0])*(1-x[1]) + \
              v[1][0]*(1+x[0])*(1-x[1]) + \
@@ -102,6 +127,7 @@
              v[3][1]*(1+x[0])*(1+x[1]) - 4.0*phys_x[1]
         return np.array([f1, f2])
 
+    @staticmethod
     def _J(x, v, phys_x):
         f11 = -(1-x[1])*v[0][0] + \
                (1-x[1])*v[1][0] - \
@@ -122,15 +148,15 @@
         return np.array([[f11, f12], [f21, f22]])
 
 
-class Q1Mapping3D:
+class Q1Sampler3D:
 
     def map_real_to_unit(self, physical_coord, vertices):
     
-        # initial guess for the Newton solve
-        x0 = np.array([0.0, 0.0])
+        x0 = np.array([0.0, 0.0, 0.0])  # initial guess
         x = fsolve(self._f, x0, args=(vertices, physical_coord), fprime=self._J)
         return x
 
+    @staticmethod
     def _f(x, v, phys_x):
         f0 = v[0][0]*(1-x[0])*(1-x[1])*(1-x[2]) + \
              v[1][0]*(1+x[0])*(1-x[1])*(1-x[2]) + \
@@ -158,6 +184,7 @@
              v[7][2]*(1+x[0])*(1+x[1])*(1+x[2]) - 8.0*phys_x[2]
         return np.array([f0, f1, f2])
 
+    @staticmethod
     def _J(x, v, phys_x):
     
         f00 = -(1-x[1])*(1-x[2])*v[0][0] + (1-x[1])*(1-x[2])*v[1][0] - \

diff -r d8a055a16ba8f284e62f98c2d92474edc5353e9f -r 2886ca8700fa9148f4cc6b17b3cd750119d03d20 yt/utilities/lib/tests/test_element_mappings.py
--- a/yt/utilities/lib/tests/test_element_mappings.py
+++ b/yt/utilities/lib/tests/test_element_mappings.py
@@ -2,17 +2,17 @@
 
 from yt.testing import *
 from yt.utilities.lib.element_mappings import \
-    P1Mapping2D, \
-    P1Mapping3D, \
-    Q1Mapping2D, \
-    Q1Mapping3D
+    P1Sampler2D, \
+    P1Sampler3D, \
+    Q1Sampler2D, \
+    Q1Sampler3D
 
 
 def setup():
     pass
 
 
-def test_P1Mapping2D():
+def test_P1Sampler2D():
     NV = 3
     NDIM = 2
     vertices = np.empty((NV, NDIM))
@@ -26,9 +26,6 @@
     vertices[2, 0] = 0.2
     vertices[2, 1] = 0.7
 
-    physical_x[0] = 0.4
-    physical_x[1] = 0.4
-
     field_values = np.empty(NV)
     field_values[0] = 1.0
     field_values[1] = 2.0
@@ -37,15 +34,51 @@
     physical_x = np.empty(NDIM)
     for i in range(NV):
         physical_x = vertices[i]
+        sampler = P1Sampler2D()
+        x = sampler.map_real_to_unit(physical_x, vertices)
+        val = P1Sampler2D.sample_at_unit_point(x, field_values)
+        assert_almost_equal(field_values[i], val)
 
 
-def test_P1Mapping3D():
+def test_P1Sampler3D():
+    NV = 4
+    NDIM = 3
+    vertices = np.empty((NV, NDIM))
+
+    vertices[0, 0] = 0.1
+    vertices[0, 1] = 0.1
+    vertices[0, 2] = 0.1
+
+    vertices[1, 0] = 0.6
+    vertices[1, 1] = 0.3
+    vertices[1, 2] = 0.2
+
+    vertices[2, 0] = 0.2
+    vertices[2, 1] = 0.7
+    vertices[2, 2] = 0.2
+
+    vertices[3, 0] = 0.4
+    vertices[3, 1] = 0.4
+    vertices[3, 2] = 0.7
+
+    field_values = np.empty(NV)
+    field_values[0] = 1.0
+    field_values[1] = 2.0
+    field_values[2] = 3.0
+    field_values[3] = 4.0
+
+    physical_x = np.empty(NDIM)
+    for i in range(NV):
+        physical_x = vertices[i]
+        sampler = P1Sampler3D()
+        x = sampler.map_real_to_unit(physical_x, vertices)
+        val = P1Sampler3D.sample_at_unit_point(x, field_values)
+        assert_almost_equal(field_values[i], val)
+
+
+def test_Q1Sampler2D():
     pass
 
 
-def test_Q1Mapping2D():
+def test_Q1Sampler3D():
     pass
-
-
-def test_Q1Mapping3D():
-    pass


https://bitbucket.org/yt_analysis/yt/commits/78e88adfde5d/
Changeset:   78e88adfde5d
Branch:      yt
User:        atmyers
Date:        2015-06-26 19:36:09+00:00
Summary:     tests and implementations of the Q1 mappings and samplers. Really slow, but they work.
Affected #:  2 files

diff -r 2886ca8700fa9148f4cc6b17b3cd750119d03d20 -r 78e88adfde5d3e75cb18fe02090df28407c31f01 yt/utilities/lib/element_mappings.pyx
--- a/yt/utilities/lib/element_mappings.pyx
+++ b/yt/utilities/lib/element_mappings.pyx
@@ -147,6 +147,18 @@
                (1+x[0])*v[3][1]
         return np.array([[f11, f12], [f21, f22]])
 
+    @staticmethod
+    def sample_at_unit_point(coord, vals):
+        x = vals[0]*(1.0 - coord[0])*(1.0 - coord[1]) + \
+            vals[1]*(1.0 + coord[0])*(1.0 - coord[1]) + \
+            vals[2]*(1.0 - coord[0])*(1.0 + coord[1]) + \
+            vals[3]*(1.0 + coord[0])*(1.0 + coord[1])
+        return 0.25*x
+
+    @classmethod
+    def sample_at_real_point(cls, coord, vertices, vals):
+        mapped_coord = cls.map_real_to_unit(coord, vertices)
+        return cls.sample_at_unit_point(coord, vals)
 
 class Q1Sampler3D:
 
@@ -228,3 +240,20 @@
                (1-x[0])*(1+x[1])*v[6][2] + (1+x[0])*(1+x[1])*v[7][2]
 
         return np.array([[f00, f01, f02], [f10, f11, f12], [f20, f21, f22]])
+
+    @staticmethod
+    def sample_at_unit_point(coord, vals):
+        x = vals[0]*(1.0 - coord[0])*(1.0 - coord[1])*(1.0 - coord[2]) + \
+            vals[1]*(1.0 + coord[0])*(1.0 - coord[1])*(1.0 - coord[2]) + \
+            vals[2]*(1.0 - coord[0])*(1.0 + coord[1])*(1.0 - coord[2]) + \
+            vals[3]*(1.0 + coord[0])*(1.0 + coord[1])*(1.0 - coord[2]) + \
+            vals[4]*(1.0 - coord[0])*(1.0 - coord[1])*(1.0 + coord[2]) + \
+            vals[5]*(1.0 + coord[0])*(1.0 - coord[1])*(1.0 + coord[2]) + \
+            vals[6]*(1.0 - coord[0])*(1.0 + coord[1])*(1.0 + coord[2]) + \
+            vals[7]*(1.0 + coord[0])*(1.0 + coord[1])*(1.0 + coord[2])
+        return 0.125*x
+
+    @classmethod
+    def sample_at_real_point(cls, coord, vertices, vals):
+        mapped_coord = cls.map_real_to_unit(coord, vertices)
+        return cls.sample_at_unit_point(coord, vals)

diff -r 2886ca8700fa9148f4cc6b17b3cd750119d03d20 -r 78e88adfde5d3e75cb18fe02090df28407c31f01 yt/utilities/lib/tests/test_element_mappings.py
--- a/yt/utilities/lib/tests/test_element_mappings.py
+++ b/yt/utilities/lib/tests/test_element_mappings.py
@@ -77,8 +77,57 @@
 
 
 def test_Q1Sampler2D():
-    pass
+    NV = 4
+    NDIM = 2
+    vertices = np.empty((NV, NDIM))
+
+    vertices[0, 0] = 0.1
+    vertices[0, 1] = 0.2
+
+    vertices[1, 0] = 0.6
+    vertices[1, 1] = 0.3
+
+    vertices[2, 0] = 0.2
+    vertices[2, 1] = 0.7
+
+    vertices[3, 0] = 0.7
+    vertices[3, 1] = 0.9
+
+    field_values = np.empty(NV)
+    field_values[0] = 1.0
+    field_values[1] = 2.0
+    field_values[2] = 3.0
+    field_values[3] = 4.0
+
+    physical_x = np.empty(NDIM)
+    for i in range(NV):
+        physical_x = vertices[i]
+        sampler = Q1Sampler2D()
+        x = sampler.map_real_to_unit(physical_x, vertices)
+        val = Q1Sampler2D.sample_at_unit_point(x, field_values)
+        assert_almost_equal(field_values[i], val)
 
 
 def test_Q1Sampler3D():
-    pass
+    NV = 8
+    NDIM = 3
+
+    vertices = np.array([[2.00657905, 0.6888599,  1.4375],
+                         [1.8658198,  1.00973171, 1.4375],
+                         [1.97881594, 1.07088163, 1.4375],
+                         [2.12808879, 0.73057381, 1.4375],
+                         [2.00657905, 0.6888599,  1.2],
+                         [1.8658198,  1.00973171, 1.2],
+                         [1.97881594, 1.07088163, 1.2],
+                         [2.12808879, 0.73057381, 1.2]])
+
+    field_values = np.array([0.4526278, 0.45262656, 0.45262657, 0.4526278,
+                             0.54464296, 0.54464149, 0.5446415, 0.54464296])
+
+    physical_x = np.empty(NDIM)
+    for i in range(NV):
+        physical_x = vertices[i]
+        sampler = Q1Sampler3D()
+        x = sampler.map_real_to_unit(physical_x, vertices)
+        val = Q1Sampler3D.sample_at_unit_point(x, field_values)
+        assert_almost_equal(field_values[i], val)


https://bitbucket.org/yt_analysis/yt/commits/0ef90aff5dab/
Changeset:   0ef90aff5dab
Branch:      yt
User:        atmyers
Date:        2015-06-26 19:44:13+00:00
Summary:     put common method into a base class
Affected #:  1 file

diff -r 78e88adfde5d3e75cb18fe02090df28407c31f01 -r 0ef90aff5dab77f1d54c3c59ea8c14de23102c5d yt/utilities/lib/element_mappings.pyx
--- a/yt/utilities/lib/element_mappings.pyx
+++ b/yt/utilities/lib/element_mappings.pyx
@@ -23,10 +23,26 @@
 ctypedef np.float64_t DTYPE_t
 
 
-class P1Sampler2D:
+class Sampler:
+
+    @classmethod
+    def map_real_to_unit(cls, physical_coord, vertices):
+        raise NotImplementedError
 
     @staticmethod
-    def map_real_to_unit(physical_coord, vertices):
+    def sample_at_unit_point(coord, vals):
+        raise NotImplementedError
+
+    @classmethod
+    def sample_at_real_point(cls, coord, vertices, vals):
+        mapped_coord = cls.map_real_to_unit(coord, vertices)
+        return cls.sample_at_unit_point(coord, vals)
+    
+
+class P1Sampler2D(Sampler):
+
+    @classmethod
+    def map_real_to_unit(cls, physical_coord, vertices):
     
         x = physical_coord[0]
         y = physical_coord[1]
@@ -55,15 +71,10 @@
         return vals[0]*(1 - coord[0] - coord[1]) + \
             vals[1]*coord[0] + vals[2]*coord[1]
 
-    @classmethod
-    def sample_at_real_point(cls, coord, vertices, vals):
-        mapped_coord = cls.map_real_to_unit(coord, vertices)
-        return cls.sample_at_unit_point(coord, vals)
-
 class P1Sampler3D:
 
-    @staticmethod
-    def map_real_to_unit(physical_coord, vertices):
+    @classmethod
+    def map_real_to_unit(cls, physical_coord, vertices):
     
         x = physical_coord[0]
         y = physical_coord[1]
@@ -100,19 +111,15 @@
         return vals[0]*coord[0] + vals[1]*coord[1] + \
             vals[2]*coord[2] + vals[3]*coord[3]
 
-    @classmethod
-    def sample_at_real_point(cls, coord, vertices, vals):
-        mapped_coord = cls.map_real_to_unit(coord, vertices)
-        return cls.sample_at_unit_point(coord, vals)
-
 
 class Q1Sampler2D:
 
-    def map_real_to_unit(self, physical_coord, vertices):
+    @classmethod
+    def map_real_to_unit(cls, physical_coord, vertices):
     
         # initial guess for the Newton solve
         x0 = np.array([0.0, 0.0])
-        x = fsolve(self._f, x0, args=(vertices, physical_coord), fprime=self._J)
+        x = fsolve(cls._f, x0, args=(vertices, physical_coord), fprime=cls._J)
         return x
 
     @staticmethod
@@ -155,17 +162,14 @@
             vals[3]*(1.0 + coord[0])*(1.0 + coord[1])
         return 0.25*x
 
-    @classmethod
-    def sample_at_real_point(cls, coord, vertices, vals):
-        mapped_coord = cls.map_real_to_unit(coord, vertices)
-        return cls.sample_at_unit_point(coord, vals)
 
 class Q1Sampler3D:
 
-    def map_real_to_unit(self, physical_coord, vertices):
+    @classmethod
+    def map_real_to_unit(cls, physical_coord, vertices):
     
         x0 = np.array([0.0, 0.0, 0.0])  # initial guess
-        x = fsolve(self._f, x0, args=(vertices, physical_coord), fprime=self._J)
+        x = fsolve(cls._f, x0, args=(vertices, physical_coord), fprime=cls._J)
         return x
 
     @staticmethod
@@ -252,8 +256,3 @@
             vals[6]*(1.0 - coord[0])*(1.0 + coord[1])*(1.0 + coord[2]) + \
             vals[7]*(1.0 + coord[0])*(1.0 + coord[1])*(1.0 + coord[2])
         return 0.125*x
-
-    @classmethod
-    def sample_at_real_point(cls, coord, vertices, vals):
-        mapped_coord = cls.map_real_to_unit(coord, vertices)
-        return cls.sample_at_unit_point(coord, vals)


https://bitbucket.org/yt_analysis/yt/commits/832557ccaf4a/
Changeset:   832557ccaf4a
Branch:      yt
User:        atmyers
Date:        2015-06-26 19:58:26+00:00
Summary:     simplify the tests a bit
Affected #:  1 file

diff -r 0ef90aff5dab77f1d54c3c59ea8c14de23102c5d -r 832557ccaf4a47621c153b9fd30c77a6460fcc06 yt/utilities/lib/tests/test_element_mappings.py
--- a/yt/utilities/lib/tests/test_element_mappings.py
+++ b/yt/utilities/lib/tests/test_element_mappings.py
@@ -1,6 +1,6 @@
 import numpy as np
 
-from yt.testing import *
+from yt.testing import assert_almost_equal
 from yt.utilities.lib.element_mappings import \
     P1Sampler2D, \
     P1Sampler3D, \
@@ -15,21 +15,12 @@
 def test_P1Sampler2D():
     NV = 3
     NDIM = 2
-    vertices = np.empty((NV, NDIM))
 
-    vertices[0, 0] = 0.1
-    vertices[0, 1] = 0.2
+    vertices = np.array([[0.1,  0.2],
+                         [0.6,  0.3],
+                         [0.2,  0.7]])
 
-    vertices[1, 0] = 0.6
-    vertices[1, 1] = 0.3
-
-    vertices[2, 0] = 0.2
-    vertices[2, 1] = 0.7
-
-    field_values = np.empty(NV)
-    field_values[0] = 1.0
-    field_values[1] = 2.0
-    field_values[2] = 3.0
+    field_values = np.array([1.0, 2.0, 3.0])
 
     physical_x = np.empty(NDIM)
     for i in range(NV):
@@ -43,29 +34,13 @@
 def test_P1Sampler3D():
     NV = 4
     NDIM = 3
-    vertices = np.empty((NV, NDIM))
 
-    vertices[0, 0] = 0.1
-    vertices[0, 1] = 0.1
-    vertices[0, 2] = 0.1
+    vertices = np.array([[0.1,  0.1,  0.1],
+                         [0.6,  0.3,  0.2],
+                         [0.2,  0.7,  0.2],
+                         [0.4,  0.4,  0.7]])
 
-    vertices[1, 0] = 0.6
-    vertices[1, 1] = 0.3
-    vertices[1, 2] = 0.2
-
-    vertices[2, 0] = 0.2
-    vertices[2, 1] = 0.7
-    vertices[2, 2] = 0.2
-
-    vertices[3, 0] = 0.4
-    vertices[3, 1] = 0.4
-    vertices[3, 2] = 0.7
-
-    field_values = np.empty(NV)
-    field_values[0] = 1.0
-    field_values[1] = 2.0
-    field_values[2] = 3.0
-    field_values[3] = 4.0
+    field_values = np.array([1.0, 2.0, 3.0, 4.0])
 
     physical_x = np.empty(NDIM)
     for i in range(NV):
@@ -79,25 +54,13 @@
 def test_Q1Sampler2D():
     NV = 4
     NDIM = 2
-    vertices = np.empty((NV, NDIM))
 
-    vertices[0, 0] = 0.1
-    vertices[0, 1] = 0.2
+    vertices = np.array([[0.1,  0.2],
+                         [0.6,  0.3],
+                         [0.2,  0.7],
+                         [0.7,  0.9]])
 
-    vertices[1, 0] = 0.6
-    vertices[1, 1] = 0.3
-
-    vertices[2, 0] = 0.2
-    vertices[2, 1] = 0.7
-
-    vertices[3, 0] = 0.7
-    vertices[3, 1] = 0.9
-
-    field_values = np.empty(NV)
-    field_values[0] = 1.0
-    field_values[1] = 2.0
-    field_values[2] = 3.0
-    field_values[3] = 4.0
+    field_values = np.array([1.0, 2.0, 3.0, 4.0])
 
     physical_x = np.empty(NDIM)
     for i in range(NV):
@@ -116,10 +79,10 @@
                          [1.8658198,  1.00973171, 1.4375],
                          [1.97881594, 1.07088163, 1.4375],
                          [2.12808879, 0.73057381, 1.4375],
-                         [2.00657905, 0.6888599,  1.2],
-                         [1.8658198,  1.00973171, 1.2],
-                         [1.97881594, 1.07088163, 1.2],
-                         [2.12808879, 0.73057381, 1.2]])
+                         [2.00657905, 0.6888599,  1.2   ],
+                         [1.8658198,  1.00973171, 1.2   ],
+                         [1.97881594, 1.07088163, 1.2   ],
+                         [2.12808879, 0.73057381, 1.2   ]])
 
     field_values = np.array([0.4526278, 0.45262656, 0.45262657, 0.4526278,
                              0.54464296, 0.54464149, 0.5446415, 0.54464296])


https://bitbucket.org/yt_analysis/yt/commits/ffb75d14365d/
Changeset:   ffb75d14365d
Branch:      yt
User:        atmyers
Date:        2015-06-26 20:11:30+00:00
Summary:     fixing and testing the sample_at_real_point method
Affected #:  2 files

diff -r 832557ccaf4a47621c153b9fd30c77a6460fcc06 -r ffb75d14365d0e392f43ac7e7fecf6a271063ca7 yt/utilities/lib/element_mappings.pyx
--- a/yt/utilities/lib/element_mappings.pyx
+++ b/yt/utilities/lib/element_mappings.pyx
@@ -23,7 +23,7 @@
 ctypedef np.float64_t DTYPE_t
 
 
-class Sampler:
+class ElementSampler:
 
     @classmethod
     def map_real_to_unit(cls, physical_coord, vertices):
@@ -36,10 +36,10 @@
     @classmethod
     def sample_at_real_point(cls, coord, vertices, vals):
         mapped_coord = cls.map_real_to_unit(coord, vertices)
-        return cls.sample_at_unit_point(coord, vals)
+        return cls.sample_at_unit_point(mapped_coord, vals)
     
 
-class P1Sampler2D(Sampler):
+class P1Sampler2D(ElementSampler):
 
     @classmethod
     def map_real_to_unit(cls, physical_coord, vertices):
@@ -71,7 +71,7 @@
         return vals[0]*(1 - coord[0] - coord[1]) + \
             vals[1]*coord[0] + vals[2]*coord[1]
 
-class P1Sampler3D:
+class P1Sampler3D(ElementSampler):
 
     @classmethod
     def map_real_to_unit(cls, physical_coord, vertices):
@@ -112,7 +112,7 @@
             vals[2]*coord[2] + vals[3]*coord[3]
 
 
-class Q1Sampler2D:
+class Q1Sampler2D(ElementSampler):
 
     @classmethod
     def map_real_to_unit(cls, physical_coord, vertices):
@@ -163,7 +163,7 @@
         return 0.25*x
 
 
-class Q1Sampler3D:
+class Q1Sampler3D(ElementSampler):
 
     @classmethod
     def map_real_to_unit(cls, physical_coord, vertices):

diff -r 832557ccaf4a47621c153b9fd30c77a6460fcc06 -r ffb75d14365d0e392f43ac7e7fecf6a271063ca7 yt/utilities/lib/tests/test_element_mappings.py
--- a/yt/utilities/lib/tests/test_element_mappings.py
+++ b/yt/utilities/lib/tests/test_element_mappings.py
@@ -47,8 +47,10 @@
         physical_x = vertices[i]
         sampler = P1Sampler3D()
         x = sampler.map_real_to_unit(physical_x, vertices)
-        val = P1Sampler3D.sample_at_unit_point(x, field_values)
-        assert_almost_equal(field_values[i], val)
+        val1 = P1Sampler3D.sample_at_unit_point(x, field_values)
+        assert_almost_equal(field_values[i], val1)
+        val2 = sampler.sample_at_real_point(physical_x, vertices, field_values)
+        assert_almost_equal(val1, val2)
 
 
 def test_Q1Sampler2D():


https://bitbucket.org/yt_analysis/yt/commits/a6382e5688a6/
Changeset:   a6382e5688a6
Branch:      yt
User:        atmyers
Date:        2015-06-26 20:15:21+00:00
Summary:     fixing the tests
Affected #:  1 file

diff -r ffb75d14365d0e392f43ac7e7fecf6a271063ca7 -r a6382e5688a67256083c3946e39c26cd43e7fd58 yt/utilities/lib/tests/test_element_mappings.py
--- a/yt/utilities/lib/tests/test_element_mappings.py
+++ b/yt/utilities/lib/tests/test_element_mappings.py
@@ -27,8 +27,10 @@
         physical_x = vertices[i]
         sampler = P1Sampler2D()
         x = sampler.map_real_to_unit(physical_x, vertices)
-        val = P1Sampler2D.sample_at_unit_point(x, field_values)
-        assert_almost_equal(field_values[i], val)
+        val1 = sampler.sample_at_unit_point(x, field_values)
+        assert_almost_equal(field_values[i], val1)
+        val2 = sampler.sample_at_real_point(physical_x, vertices, field_values)
+        assert_almost_equal(val1, val2)
 
 
 def test_P1Sampler3D():
@@ -47,7 +49,7 @@
         physical_x = vertices[i]
         sampler = P1Sampler3D()
         x = sampler.map_real_to_unit(physical_x, vertices)
-        val1 = P1Sampler3D.sample_at_unit_point(x, field_values)
+        val1 = sampler.sample_at_unit_point(x, field_values)
         assert_almost_equal(field_values[i], val1)
         val2 = sampler.sample_at_real_point(physical_x, vertices, field_values)
         assert_almost_equal(val1, val2)
@@ -69,8 +71,10 @@
         physical_x = vertices[i]
         sampler = Q1Sampler2D()
         x = sampler.map_real_to_unit(physical_x, vertices)
-        val = Q1Sampler2D.sample_at_unit_point(x, field_values)
-        assert_almost_equal(field_values[i], val)
+        val1 = sampler.sample_at_unit_point(x, field_values)
+        assert_almost_equal(field_values[i], val1)
+        val2 = sampler.sample_at_real_point(physical_x, vertices, field_values)
+        assert_almost_equal(val1, val2)
 
 
 def test_Q1Sampler3D():
@@ -94,5 +98,7 @@
         physical_x = vertices[i]
         sampler = Q1Sampler3D()
         x = sampler.map_real_to_unit(physical_x, vertices)
-        val = Q1Sampler3D.sample_at_unit_point(x, field_values)
-        assert_almost_equal(field_values[i], val)
+        val1 = sampler.sample_at_unit_point(x, field_values)
+        assert_almost_equal(field_values[i], val1)
+        val2 = sampler.sample_at_real_point(physical_x, vertices, field_values)
+        assert_almost_equal(val1, val2)


https://bitbucket.org/yt_analysis/yt/commits/9dc9d2f5f208/
Changeset:   9dc9d2f5f208
Branch:      yt
User:        atmyers
Date:        2015-06-26 20:27:24+00:00
Summary:     simplifying the way the tests are set up
Affected #:  1 file

diff -r a6382e5688a67256083c3946e39c26cd43e7fd58 -r 9dc9d2f5f2086479d231c80aea7e94efe9564458 yt/utilities/lib/tests/test_element_mappings.py
--- a/yt/utilities/lib/tests/test_element_mappings.py
+++ b/yt/utilities/lib/tests/test_element_mappings.py
@@ -12,31 +12,31 @@
     pass
 
 
+def check_all_vertices(sampler, vertices, field_values):
+    NV = vertices.shape[0]
+    NDIM = vertices.shape[1]
+    physical_x = np.empty(NDIM)
+    for i in range(NV):
+        physical_x = vertices[i]
+        x = sampler.map_real_to_unit(physical_x, vertices)
+        val1 = sampler.sample_at_unit_point(x, field_values)
+        assert_almost_equal(field_values[i], val1)
+        val2 = sampler.sample_at_real_point(physical_x, vertices, field_values)
+        assert_almost_equal(val1, val2)
+
+
 def test_P1Sampler2D():
-    NV = 3
-    NDIM = 2
-
     vertices = np.array([[0.1,  0.2],
                          [0.6,  0.3],
                          [0.2,  0.7]])
 
     field_values = np.array([1.0, 2.0, 3.0])
 
-    physical_x = np.empty(NDIM)
-    for i in range(NV):
-        physical_x = vertices[i]
-        sampler = P1Sampler2D()
-        x = sampler.map_real_to_unit(physical_x, vertices)
-        val1 = sampler.sample_at_unit_point(x, field_values)
-        assert_almost_equal(field_values[i], val1)
-        val2 = sampler.sample_at_real_point(physical_x, vertices, field_values)
-        assert_almost_equal(val1, val2)
+    sampler = P1Sampler2D()
+    check_all_vertices(sampler, vertices, field_values)
 
 
 def test_P1Sampler3D():
-    NV = 4
-    NDIM = 3
-
     vertices = np.array([[0.1,  0.1,  0.1],
                          [0.6,  0.3,  0.2],
                          [0.2,  0.7,  0.2],
@@ -44,21 +44,11 @@
 
     field_values = np.array([1.0, 2.0, 3.0, 4.0])
 
-    physical_x = np.empty(NDIM)
-    for i in range(NV):
-        physical_x = vertices[i]
-        sampler = P1Sampler3D()
-        x = sampler.map_real_to_unit(physical_x, vertices)
-        val1 = sampler.sample_at_unit_point(x, field_values)
-        assert_almost_equal(field_values[i], val1)
-        val2 = sampler.sample_at_real_point(physical_x, vertices, field_values)
-        assert_almost_equal(val1, val2)
+    sampler = P1Sampler3D()
+    check_all_vertices(sampler, vertices, field_values)
 
 
 def test_Q1Sampler2D():
-    NV = 4
-    NDIM = 2
-
     vertices = np.array([[0.1,  0.2],
                          [0.6,  0.3],
                          [0.2,  0.7],
@@ -66,21 +56,11 @@
 
     field_values = np.array([1.0, 2.0, 3.0, 4.0])
 
-    physical_x = np.empty(NDIM)
-    for i in range(NV):
-        physical_x = vertices[i]
-        sampler = Q1Sampler2D()
-        x = sampler.map_real_to_unit(physical_x, vertices)
-        val1 = sampler.sample_at_unit_point(x, field_values)
-        assert_almost_equal(field_values[i], val1)
-        val2 = sampler.sample_at_real_point(physical_x, vertices, field_values)
-        assert_almost_equal(val1, val2)
+    sampler = Q1Sampler2D()
+    check_all_vertices(sampler, vertices, field_values)
 
 
 def test_Q1Sampler3D():
-    NV = 8
-    NDIM = 3
-
     vertices = np.array([[2.00657905, 0.6888599,  1.4375],
                          [1.8658198,  1.00973171, 1.4375],
                          [1.97881594, 1.07088163, 1.4375],
@@ -93,12 +73,5 @@
     field_values = np.array([0.4526278, 0.45262656, 0.45262657, 0.4526278,
                              0.54464296, 0.54464149, 0.5446415, 0.54464296])
 
-    physical_x = np.empty(NDIM)
-    for i in range(NV):
-        physical_x = vertices[i]
-        sampler = Q1Sampler3D()
-        x = sampler.map_real_to_unit(physical_x, vertices)
-        val1 = sampler.sample_at_unit_point(x, field_values)
-        assert_almost_equal(field_values[i], val1)
-        val2 = sampler.sample_at_real_point(physical_x, vertices, field_values)
-        assert_almost_equal(val1, val2)
+    sampler = Q1Sampler3D()
+    check_all_vertices(sampler, vertices, field_values)


https://bitbucket.org/yt_analysis/yt/commits/cd260d423cc3/
Changeset:   cd260d423cc3
Branch:      yt
User:        atmyers
Date:        2015-06-26 22:27:00+00:00
Summary:     starting to cythonize the element samplers
Affected #:  2 files

diff -r 9dc9d2f5f2086479d231c80aea7e94efe9564458 -r cd260d423cc35cd4dfd7b9bc27ac9b5421f04771 yt/utilities/lib/element_mappings.pyx
--- a/yt/utilities/lib/element_mappings.pyx
+++ b/yt/utilities/lib/element_mappings.pyx
@@ -14,35 +14,38 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
+cimport numpy as np
+from numpy cimport ndarray
+cimport cython
 import numpy as np
 from scipy.optimize import fsolve
-cimport numpy as np
-cimport cython
 
-DTYPE = np.float64
-ctypedef np.float64_t DTYPE_t
 
+cdef class ElementSampler:
 
-class ElementSampler:
-
-    @classmethod
-    def map_real_to_unit(cls, physical_coord, vertices):
+    def map_real_to_unit(self,
+                         np.ndarray physical_coord, 
+                         np.ndarray vertices):
         raise NotImplementedError
 
-    @staticmethod
-    def sample_at_unit_point(coord, vals):
+    def sample_at_unit_point(self,
+                             np.ndarray coord, 
+                             np.ndarray vals):
         raise NotImplementedError
 
-    @classmethod
-    def sample_at_real_point(cls, coord, vertices, vals):
-        mapped_coord = cls.map_real_to_unit(coord, vertices)
-        return cls.sample_at_unit_point(mapped_coord, vals)
+    def sample_at_real_point(self,
+                             np.ndarray coord, 
+                             np.ndarray vertices, 
+                             np.ndarray vals):
+        mapped_coord = self.map_real_to_unit(coord, vertices)
+        return self.sample_at_unit_point(mapped_coord, vals)
     
 
-class P1Sampler2D(ElementSampler):
+cdef class P1Sampler2D(ElementSampler):
 
-    @classmethod
-    def map_real_to_unit(cls, physical_coord, vertices):
+    def map_real_to_unit(self, 
+                         np.ndarray physical_coord, 
+                         np.ndarray vertices):
     
         x = physical_coord[0]
         y = physical_coord[1]
@@ -66,15 +69,17 @@
     
         return np.array([u, v])
 
-    @staticmethod
-    def sample_at_unit_point(coord, vals):
+    def sample_at_unit_point(self,
+                             np.ndarray coord, 
+                             np.ndarray vals):
         return vals[0]*(1 - coord[0] - coord[1]) + \
             vals[1]*coord[0] + vals[2]*coord[1]
 
-class P1Sampler3D(ElementSampler):
+cdef class P1Sampler3D(ElementSampler):
 
-    @classmethod
-    def map_real_to_unit(cls, physical_coord, vertices):
+    def map_real_to_unit(self, 
+                         np.ndarray physical_coord, 
+                         np.ndarray vertices):
     
         x = physical_coord[0]
         y = physical_coord[1]
@@ -106,56 +111,60 @@
     
         return c
 
-    @staticmethod
-    def sample_at_unit_point(coord, vals):
+    def sample_at_unit_point(self,
+                             np.ndarray coord, 
+                             np.ndarray vals):
         return vals[0]*coord[0] + vals[1]*coord[1] + \
-            vals[2]*coord[2] + vals[3]*coord[3]
+               vals[2]*coord[2] + vals[3]*coord[3]
 
 
-class Q1Sampler2D(ElementSampler):
+cdef np.ndarray[np.float64_t, ndim=1] Q1Function2D(np.ndarray[np.float64_t, ndim=1] x,
+                                                   np.ndarray[np.float64_t, ndim=2] v,
+                                                   np.ndarray[np.float64_t, ndim=1] phys_x):
+    f1 = v[0][0]*(1-x[0])*(1-x[1]) + \
+         v[1][0]*(1+x[0])*(1-x[1]) + \
+         v[2][0]*(1-x[0])*(1+x[1]) + \
+         v[3][0]*(1+x[0])*(1+x[1]) - 4.0*phys_x[0]
+    f2 = v[0][1]*(1-x[0])*(1-x[1]) + \
+         v[1][1]*(1+x[0])*(1-x[1]) + \
+         v[2][1]*(1-x[0])*(1+x[1]) + \
+         v[3][1]*(1+x[0])*(1+x[1]) - 4.0*phys_x[1]
+    return np.array([f1, f2])
 
-    @classmethod
-    def map_real_to_unit(cls, physical_coord, vertices):
+
+cdef np.ndarray[np.float64_t, ndim=2] Q1Jacobian2D(np.ndarray[np.float64_t, ndim=1] x,
+                                                   np.ndarray[np.float64_t, ndim=2] v,
+                                                   np.ndarray[np.float64_t, ndim=1] phys_x):
+    f11 = -(1-x[1])*v[0][0] + \
+          (1-x[1])*v[1][0] - \
+          (1+x[1])*v[2][0] + \
+          (1+x[1])*v[3][0]
+    f12 = -(1-x[0])*v[0][0] - \
+          (1+x[0])*v[1][0] + \
+          (1-x[0])*v[2][0] + \
+          (1+x[0])*v[3][0]
+    f21 = -(1-x[1])*v[0][1] + \
+          (1-x[1])*v[1][1] - \
+          (1+x[1])*v[2][1] + \
+          (1+x[1])*v[3][1]
+    f22 = -(1-x[0])*v[0][1] - \
+          (1+x[0])*v[1][1] + \
+          (1-x[0])*v[2][1] + \
+          (1+x[0])*v[3][1]
+    return np.array([[f11, f12], [f21, f22]])
+
+
+cdef class Q1Sampler2D(ElementSampler):
+
+    def map_real_to_unit(self, np.ndarray physical_coord, np.ndarray vertices):
     
         # initial guess for the Newton solve
         x0 = np.array([0.0, 0.0])
-        x = fsolve(cls._f, x0, args=(vertices, physical_coord), fprime=cls._J)
+        x = fsolve(Q1Function2D, x0, args=(vertices, physical_coord),
+                   fprime=Q1Jacobian2D)
         return x
 
-    @staticmethod
-    def _f(x, v, phys_x):
-        f1 = v[0][0]*(1-x[0])*(1-x[1]) + \
-             v[1][0]*(1+x[0])*(1-x[1]) + \
-             v[2][0]*(1-x[0])*(1+x[1]) + \
-             v[3][0]*(1+x[0])*(1+x[1]) - 4.0*phys_x[0]
-        f2 = v[0][1]*(1-x[0])*(1-x[1]) + \
-             v[1][1]*(1+x[0])*(1-x[1]) + \
-             v[2][1]*(1-x[0])*(1+x[1]) + \
-             v[3][1]*(1+x[0])*(1+x[1]) - 4.0*phys_x[1]
-        return np.array([f1, f2])
-
-    @staticmethod
-    def _J(x, v, phys_x):
-        f11 = -(1-x[1])*v[0][0] + \
-               (1-x[1])*v[1][0] - \
-               (1+x[1])*v[2][0] + \
-               (1+x[1])*v[3][0]
-        f12 = -(1-x[0])*v[0][0] - \
-               (1+x[0])*v[1][0] + \
-               (1-x[0])*v[2][0] + \
-               (1+x[0])*v[3][0]
-        f21 = -(1-x[1])*v[0][1] + \
-               (1-x[1])*v[1][1] - \
-               (1+x[1])*v[2][1] + \
-               (1+x[1])*v[3][1]
-        f22 = -(1-x[0])*v[0][1] - \
-               (1+x[0])*v[1][1] + \
-               (1-x[0])*v[2][1] + \
-               (1+x[0])*v[3][1]
-        return np.array([[f11, f12], [f21, f22]])
-
-    @staticmethod
-    def sample_at_unit_point(coord, vals):
+    def sample_at_unit_point(self, np.ndarray coord, np.ndarray vals):
         x = vals[0]*(1.0 - coord[0])*(1.0 - coord[1]) + \
             vals[1]*(1.0 + coord[0])*(1.0 - coord[1]) + \
             vals[2]*(1.0 - coord[0])*(1.0 + coord[1]) + \
@@ -163,90 +172,90 @@
         return 0.25*x
 
 
-class Q1Sampler3D(ElementSampler):
+cdef np.ndarray[np.float64_t, ndim=1] Q1Function3D(np.ndarray[np.float64_t, ndim=1] x,
+                                                   np.ndarray[np.float64_t, ndim=2] v,
+                                                   np.ndarray[np.float64_t, ndim=1] phys_x):
+    f0 = v[0][0]*(1-x[0])*(1-x[1])*(1-x[2]) + \
+         v[1][0]*(1+x[0])*(1-x[1])*(1-x[2]) + \
+         v[2][0]*(1-x[0])*(1+x[1])*(1-x[2]) + \
+         v[3][0]*(1+x[0])*(1+x[1])*(1-x[2]) + \
+         v[4][0]*(1-x[0])*(1-x[1])*(1+x[2]) + \
+         v[5][0]*(1+x[0])*(1-x[1])*(1+x[2]) + \
+         v[6][0]*(1-x[0])*(1+x[1])*(1+x[2]) + \
+         v[7][0]*(1+x[0])*(1+x[1])*(1+x[2]) - 8.0*phys_x[0]
+    f1 = v[0][1]*(1-x[0])*(1-x[1])*(1-x[2]) + \
+         v[1][1]*(1+x[0])*(1-x[1])*(1-x[2]) + \
+         v[2][1]*(1-x[0])*(1+x[1])*(1-x[2]) + \
+         v[3][1]*(1+x[0])*(1+x[1])*(1-x[2]) + \
+         v[4][1]*(1-x[0])*(1-x[1])*(1+x[2]) + \
+         v[5][1]*(1+x[0])*(1-x[1])*(1+x[2]) + \
+         v[6][1]*(1-x[0])*(1+x[1])*(1+x[2]) + \
+         v[7][1]*(1+x[0])*(1+x[1])*(1+x[2]) - 8.0*phys_x[1]
+    f2 = v[0][2]*(1-x[0])*(1-x[1])*(1-x[2]) + \
+         v[1][2]*(1+x[0])*(1-x[1])*(1-x[2]) + \
+         v[2][2]*(1-x[0])*(1+x[1])*(1-x[2]) + \
+         v[3][2]*(1+x[0])*(1+x[1])*(1-x[2]) + \
+         v[4][2]*(1-x[0])*(1-x[1])*(1+x[2]) + \
+         v[5][2]*(1+x[0])*(1-x[1])*(1+x[2]) + \
+         v[6][2]*(1-x[0])*(1+x[1])*(1+x[2]) + \
+         v[7][2]*(1+x[0])*(1+x[1])*(1+x[2]) - 8.0*phys_x[2]
+    return np.array([f0, f1, f2])
 
-    @classmethod
-    def map_real_to_unit(cls, physical_coord, vertices):
+
+cdef np.ndarray[np.float64_t, ndim=1] Q1Jacobian3D(np.ndarray[np.float64_t, ndim=1] x,
+                                                   np.ndarray[np.float64_t, ndim=2] v,
+                                                   np.ndarray[np.float64_t, ndim=1] phys_x):
+    f00 = -(1-x[1])*(1-x[2])*v[0][0] + (1-x[1])*(1-x[2])*v[1][0] - \
+          (1+x[1])*(1-x[2])*v[2][0] + (1+x[1])*(1-x[2])*v[3][0] - \
+          (1-x[1])*(1+x[2])*v[4][0] + (1-x[1])*(1+x[2])*v[5][0] - \
+          (1+x[1])*(1+x[2])*v[6][0] + (1+x[1])*(1+x[2])*v[7][0]
+    f01 = -(1-x[0])*(1-x[2])*v[0][0] - (1+x[0])*(1-x[2])*v[1][0] + \
+          (1-x[0])*(1-x[2])*v[2][0] + (1+x[0])*(1-x[2])*v[3][0] - \
+          (1-x[0])*(1+x[2])*v[4][0] - (1+x[0])*(1+x[2])*v[5][0] + \
+          (1-x[0])*(1+x[2])*v[6][0] + (1+x[0])*(1+x[2])*v[7][0]
+    f02 = -(1-x[0])*(1-x[1])*v[0][0] - (1+x[0])*(1-x[1])*v[1][0] - \
+          (1-x[0])*(1+x[1])*v[2][0] - (1+x[0])*(1+x[1])*v[3][0] + \
+          (1-x[0])*(1-x[1])*v[4][0] + (1+x[0])*(1-x[1])*v[5][0] + \
+          (1-x[0])*(1+x[1])*v[6][0] + (1+x[0])*(1+x[1])*v[7][0]
+
+    f10 = -(1-x[1])*(1-x[2])*v[0][1] + (1-x[1])*(1-x[2])*v[1][1] - \
+          (1+x[1])*(1-x[2])*v[2][1] + (1+x[1])*(1-x[2])*v[3][1] - \
+          (1-x[1])*(1+x[2])*v[4][1] + (1-x[1])*(1+x[2])*v[5][1] - \
+          (1+x[1])*(1+x[2])*v[6][1] + (1+x[1])*(1+x[2])*v[7][1]
+    f11 = -(1-x[0])*(1-x[2])*v[0][1] - (1+x[0])*(1-x[2])*v[1][1] + \
+          (1-x[0])*(1-x[2])*v[2][1] + (1+x[0])*(1-x[2])*v[3][1] - \
+          (1-x[0])*(1+x[2])*v[4][1] - (1+x[0])*(1+x[2])*v[5][1] + \
+          (1-x[0])*(1+x[2])*v[6][1] + (1+x[0])*(1+x[2])*v[7][1]
+    f12 = -(1-x[0])*(1-x[1])*v[0][1] - (1+x[0])*(1-x[1])*v[1][1] - \
+          (1-x[0])*(1+x[1])*v[2][1] - (1+x[0])*(1+x[1])*v[3][1] + \
+          (1-x[0])*(1-x[1])*v[4][1] + (1+x[0])*(1-x[1])*v[5][1] + \
+          (1-x[0])*(1+x[1])*v[6][1] + (1+x[0])*(1+x[1])*v[7][1]
     
+    f20 = -(1-x[1])*(1-x[2])*v[0][2] + (1-x[1])*(1-x[2])*v[1][2] - \
+          (1+x[1])*(1-x[2])*v[2][2] + (1+x[1])*(1-x[2])*v[3][2] - \
+          (1-x[1])*(1+x[2])*v[4][2] + (1-x[1])*(1+x[2])*v[5][2] - \
+          (1+x[1])*(1+x[2])*v[6][2] + (1+x[1])*(1+x[2])*v[7][2]
+    f21 = -(1-x[0])*(1-x[2])*v[0][2] - (1+x[0])*(1-x[2])*v[1][2] + \
+          (1-x[0])*(1-x[2])*v[2][2] + (1+x[0])*(1-x[2])*v[3][2] - \
+          (1-x[0])*(1+x[2])*v[4][2] - (1+x[0])*(1+x[2])*v[5][2] + \
+          (1-x[0])*(1+x[2])*v[6][2] + (1+x[0])*(1+x[2])*v[7][2]
+    f22 = -(1-x[0])*(1-x[1])*v[0][2] - (1+x[0])*(1-x[1])*v[1][2] - \
+          (1-x[0])*(1+x[1])*v[2][2] - (1+x[0])*(1+x[1])*v[3][2] + \
+          (1-x[0])*(1-x[1])*v[4][2] + (1+x[0])*(1-x[1])*v[5][2] + \
+          (1-x[0])*(1+x[1])*v[6][2] + (1+x[0])*(1+x[1])*v[7][2]
+
+    return np.array([[f00, f01, f02], [f10, f11, f12], [f20, f21, f22]])
+
+
+cdef class Q1Sampler3D(ElementSampler):
+
+    def map_real_to_unit(self, np.ndarray physical_coord, np.ndarray vertices):
         x0 = np.array([0.0, 0.0, 0.0])  # initial guess
-        x = fsolve(cls._f, x0, args=(vertices, physical_coord), fprime=cls._J)
+        x = fsolve(Q1Function3D, x0, args=(vertices, physical_coord),
+                   fprime=Q1Jacobian3D)
         return x
 
-    @staticmethod
-    def _f(x, v, phys_x):
-        f0 = v[0][0]*(1-x[0])*(1-x[1])*(1-x[2]) + \
-             v[1][0]*(1+x[0])*(1-x[1])*(1-x[2]) + \
-             v[2][0]*(1-x[0])*(1+x[1])*(1-x[2]) + \
-             v[3][0]*(1+x[0])*(1+x[1])*(1-x[2]) + \
-             v[4][0]*(1-x[0])*(1-x[1])*(1+x[2]) + \
-             v[5][0]*(1+x[0])*(1-x[1])*(1+x[2]) + \
-             v[6][0]*(1-x[0])*(1+x[1])*(1+x[2]) + \
-             v[7][0]*(1+x[0])*(1+x[1])*(1+x[2]) - 8.0*phys_x[0]
-        f1 = v[0][1]*(1-x[0])*(1-x[1])*(1-x[2]) + \
-             v[1][1]*(1+x[0])*(1-x[1])*(1-x[2]) + \
-             v[2][1]*(1-x[0])*(1+x[1])*(1-x[2]) + \
-             v[3][1]*(1+x[0])*(1+x[1])*(1-x[2]) + \
-             v[4][1]*(1-x[0])*(1-x[1])*(1+x[2]) + \
-             v[5][1]*(1+x[0])*(1-x[1])*(1+x[2]) + \
-             v[6][1]*(1-x[0])*(1+x[1])*(1+x[2]) + \
-             v[7][1]*(1+x[0])*(1+x[1])*(1+x[2]) - 8.0*phys_x[1]
-        f2 = v[0][2]*(1-x[0])*(1-x[1])*(1-x[2]) + \
-             v[1][2]*(1+x[0])*(1-x[1])*(1-x[2]) + \
-             v[2][2]*(1-x[0])*(1+x[1])*(1-x[2]) + \
-             v[3][2]*(1+x[0])*(1+x[1])*(1-x[2]) + \
-             v[4][2]*(1-x[0])*(1-x[1])*(1+x[2]) + \
-             v[5][2]*(1+x[0])*(1-x[1])*(1+x[2]) + \
-             v[6][2]*(1-x[0])*(1+x[1])*(1+x[2]) + \
-             v[7][2]*(1+x[0])*(1+x[1])*(1+x[2]) - 8.0*phys_x[2]
-        return np.array([f0, f1, f2])
-
-    @staticmethod
-    def _J(x, v, phys_x):
-    
-        f00 = -(1-x[1])*(1-x[2])*v[0][0] + (1-x[1])*(1-x[2])*v[1][0] - \
-               (1+x[1])*(1-x[2])*v[2][0] + (1+x[1])*(1-x[2])*v[3][0] - \
-               (1-x[1])*(1+x[2])*v[4][0] + (1-x[1])*(1+x[2])*v[5][0] - \
-               (1+x[1])*(1+x[2])*v[6][0] + (1+x[1])*(1+x[2])*v[7][0]
-        f01 = -(1-x[0])*(1-x[2])*v[0][0] - (1+x[0])*(1-x[2])*v[1][0] + \
-               (1-x[0])*(1-x[2])*v[2][0] + (1+x[0])*(1-x[2])*v[3][0] - \
-               (1-x[0])*(1+x[2])*v[4][0] - (1+x[0])*(1+x[2])*v[5][0] + \
-               (1-x[0])*(1+x[2])*v[6][0] + (1+x[0])*(1+x[2])*v[7][0]
-        f02 = -(1-x[0])*(1-x[1])*v[0][0] - (1+x[0])*(1-x[1])*v[1][0] - \
-               (1-x[0])*(1+x[1])*v[2][0] - (1+x[0])*(1+x[1])*v[3][0] + \
-               (1-x[0])*(1-x[1])*v[4][0] + (1+x[0])*(1-x[1])*v[5][0] + \
-               (1-x[0])*(1+x[1])*v[6][0] + (1+x[0])*(1+x[1])*v[7][0]
-        
-
-        f10 = -(1-x[1])*(1-x[2])*v[0][1] + (1-x[1])*(1-x[2])*v[1][1] - \
-               (1+x[1])*(1-x[2])*v[2][1] + (1+x[1])*(1-x[2])*v[3][1] - \
-               (1-x[1])*(1+x[2])*v[4][1] + (1-x[1])*(1+x[2])*v[5][1] - \
-               (1+x[1])*(1+x[2])*v[6][1] + (1+x[1])*(1+x[2])*v[7][1]
-        f11 = -(1-x[0])*(1-x[2])*v[0][1] - (1+x[0])*(1-x[2])*v[1][1] + \
-               (1-x[0])*(1-x[2])*v[2][1] + (1+x[0])*(1-x[2])*v[3][1] - \
-               (1-x[0])*(1+x[2])*v[4][1] - (1+x[0])*(1+x[2])*v[5][1] + \
-               (1-x[0])*(1+x[2])*v[6][1] + (1+x[0])*(1+x[2])*v[7][1]
-        f12 = -(1-x[0])*(1-x[1])*v[0][1] - (1+x[0])*(1-x[1])*v[1][1] - \
-               (1-x[0])*(1+x[1])*v[2][1] - (1+x[0])*(1+x[1])*v[3][1] + \
-               (1-x[0])*(1-x[1])*v[4][1] + (1+x[0])*(1-x[1])*v[5][1] + \
-               (1-x[0])*(1+x[1])*v[6][1] + (1+x[0])*(1+x[1])*v[7][1]
-        
-        f20 = -(1-x[1])*(1-x[2])*v[0][2] + (1-x[1])*(1-x[2])*v[1][2] - \
-               (1+x[1])*(1-x[2])*v[2][2] + (1+x[1])*(1-x[2])*v[3][2] - \
-               (1-x[1])*(1+x[2])*v[4][2] + (1-x[1])*(1+x[2])*v[5][2] - \
-               (1+x[1])*(1+x[2])*v[6][2] + (1+x[1])*(1+x[2])*v[7][2]
-        f21 = -(1-x[0])*(1-x[2])*v[0][2] - (1+x[0])*(1-x[2])*v[1][2] + \
-               (1-x[0])*(1-x[2])*v[2][2] + (1+x[0])*(1-x[2])*v[3][2] - \
-               (1-x[0])*(1+x[2])*v[4][2] - (1+x[0])*(1+x[2])*v[5][2] + \
-               (1-x[0])*(1+x[2])*v[6][2] + (1+x[0])*(1+x[2])*v[7][2]
-        f22 = -(1-x[0])*(1-x[1])*v[0][2] - (1+x[0])*(1-x[1])*v[1][2] - \
-               (1-x[0])*(1+x[1])*v[2][2] - (1+x[0])*(1+x[1])*v[3][2] + \
-               (1-x[0])*(1-x[1])*v[4][2] + (1+x[0])*(1-x[1])*v[5][2] + \
-               (1-x[0])*(1+x[1])*v[6][2] + (1+x[0])*(1+x[1])*v[7][2]
-
-        return np.array([[f00, f01, f02], [f10, f11, f12], [f20, f21, f22]])
-
-    @staticmethod
-    def sample_at_unit_point(coord, vals):
+    def sample_at_unit_point(self, np.ndarray coord, np.ndarray vals):
         x = vals[0]*(1.0 - coord[0])*(1.0 - coord[1])*(1.0 - coord[2]) + \
             vals[1]*(1.0 + coord[0])*(1.0 - coord[1])*(1.0 - coord[2]) + \
             vals[2]*(1.0 - coord[0])*(1.0 + coord[1])*(1.0 - coord[2]) + \

diff -r 9dc9d2f5f2086479d231c80aea7e94efe9564458 -r cd260d423cc35cd4dfd7b9bc27ac9b5421f04771 yt/utilities/lib/setup.py
--- a/yt/utilities/lib/setup.py
+++ b/yt/utilities/lib/setup.py
@@ -159,7 +159,8 @@
     config.add_extension("write_array",
                          ["yt/utilities/lib/write_array.pyx"])
     config.add_extension("element_mappings",
-                         ["yt/utilities/lib/element_mappings.pyx"])
+                         ["yt/utilities/lib/element_mappings.pyx"],
+                         libraries=["m"])
     config.add_extension("ragged_arrays",
                          ["yt/utilities/lib/ragged_arrays.pyx"])
     config.add_extension("amr_kdtools", 


https://bitbucket.org/yt_analysis/yt/commits/06a79221a0b1/
Changeset:   06a79221a0b1
Branch:      yt
User:        atmyers
Date:        2015-06-26 23:22:59+00:00
Summary:     more cythonizin'
Affected #:  1 file

diff -r cd260d423cc35cd4dfd7b9bc27ac9b5421f04771 -r 06a79221a0b12dde7d35322ade62bd25f78de8d4 yt/utilities/lib/element_mappings.pyx
--- a/yt/utilities/lib/element_mappings.pyx
+++ b/yt/utilities/lib/element_mappings.pyx
@@ -43,9 +43,12 @@
 
 cdef class P1Sampler2D(ElementSampler):
 
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
     def map_real_to_unit(self, 
-                         np.ndarray physical_coord, 
-                         np.ndarray vertices):
+                         np.ndarray[np.float64_t, ndim=1] physical_coord, 
+                         np.ndarray[np.float64_t, ndim=2] vertices):
     
         x = physical_coord[0]
         y = physical_coord[1]
@@ -69,6 +72,9 @@
     
         return np.array([u, v])
 
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
     def sample_at_unit_point(self,
                              np.ndarray coord, 
                              np.ndarray vals):
@@ -77,47 +83,45 @@
 
 cdef class P1Sampler3D(ElementSampler):
 
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
     def map_real_to_unit(self, 
-                         np.ndarray physical_coord, 
-                         np.ndarray vertices):
+                         np.ndarray[np.float64_t, ndim=1] physical_coord, 
+                         np.ndarray[np.float64_t, ndim=2] vertices):
     
-        x = physical_coord[0]
-        y = physical_coord[1]
-        z = physical_coord[2]
+        b = np.array([physical_coord[0],
+                      physical_coord[1], 
+                      physical_coord[2], 
+                      1.0], dtype=np.float64)
 
-        x1 = vertices[0, 0]
-        y1 = vertices[0, 1]
-        z1 = vertices[0, 2]
+        A = np.empty((4, 4), dtype=np.float64)
+        cdef int i, j
+        for i in range(3):
+            for j in range(4):
+                A[i][j] = vertices[j][i]
+        for j in range(4):
+            A[3][j] = 1.0
 
-        x2 = vertices[1, 0]
-        y2 = vertices[1, 1]
-        z2 = vertices[1, 2]
-    
-        x3 = vertices[2, 0]
-        y3 = vertices[2, 1]
-        z3 = vertices[2, 2]
-    
-        x4 = vertices[3, 0]
-        y4 = vertices[3, 1]
-        z4 = vertices[3, 2]
-    
-        b = np.array([x, y, z, 1])
-        A = np.array([[x1, x2, x3, x4],
-                      [y1, y2, y3, y4],
-                      [z1, z2, z3, z4],
-                      [1,  1,  1,  1] ])
-    
         c = np.linalg.solve(A, b)
     
         return c
 
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
     def sample_at_unit_point(self,
-                             np.ndarray coord, 
-                             np.ndarray vals):
-        return vals[0]*coord[0] + vals[1]*coord[1] + \
-               vals[2]*coord[2] + vals[3]*coord[3]
+                             np.ndarray[np.float64_t, ndim=1] coord, 
+                             np.ndarray[np.float64_t, ndim=1] vals):
+        cdef np.float64_t value = 0.0
+        cdef np.int64_t i
+        for i in range(4):
+            value += vals[i]*coord[i]
+        return value
 
-
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
 cdef np.ndarray[np.float64_t, ndim=1] Q1Function2D(np.ndarray[np.float64_t, ndim=1] x,
                                                    np.ndarray[np.float64_t, ndim=2] v,
                                                    np.ndarray[np.float64_t, ndim=1] phys_x):
@@ -131,7 +135,9 @@
          v[3][1]*(1+x[0])*(1+x[1]) - 4.0*phys_x[1]
     return np.array([f1, f2])
 
-
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
 cdef np.ndarray[np.float64_t, ndim=2] Q1Jacobian2D(np.ndarray[np.float64_t, ndim=1] x,
                                                    np.ndarray[np.float64_t, ndim=2] v,
                                                    np.ndarray[np.float64_t, ndim=1] phys_x):
@@ -156,52 +162,63 @@
 
 cdef class Q1Sampler2D(ElementSampler):
 
-    def map_real_to_unit(self, np.ndarray physical_coord, np.ndarray vertices):
+    def map_real_to_unit(self, np.ndarray[np.float64_t, ndim=1] physical_coord, 
+                         np.ndarray[np.float64_t, ndim=2] vertices):
     
         # initial guess for the Newton solve
-        x0 = np.array([0.0, 0.0])
+        x0 = np.array([0.0, 0.0], dtype=np.float64)
         x = fsolve(Q1Function2D, x0, args=(vertices, physical_coord),
                    fprime=Q1Jacobian2D)
         return x
 
-    def sample_at_unit_point(self, np.ndarray coord, np.ndarray vals):
-        x = vals[0]*(1.0 - coord[0])*(1.0 - coord[1]) + \
-            vals[1]*(1.0 + coord[0])*(1.0 - coord[1]) + \
-            vals[2]*(1.0 - coord[0])*(1.0 + coord[1]) + \
-            vals[3]*(1.0 + coord[0])*(1.0 + coord[1])
+    def sample_at_unit_point(self, np.ndarray[np.float64_t, ndim=1] coord, 
+                             np.ndarray[np.float64_t, ndim=1] vals):
+        cdef np.float64_t x = vals[0]*(1.0 - coord[0])*(1.0 - coord[1]) + \
+                              vals[1]*(1.0 + coord[0])*(1.0 - coord[1]) + \
+                              vals[2]*(1.0 - coord[0])*(1.0 + coord[1]) + \
+                              vals[3]*(1.0 + coord[0])*(1.0 + coord[1])
         return 0.25*x
 
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef inline np.ndarray[np.float64_t, ndim=1] Q1Function3D(np.ndarray[np.float64_t, ndim=1] x,
+                                                          np.ndarray[np.float64_t, ndim=2] v,
+                                                          np.ndarray[np.float64_t, ndim=1] phys_x):
+    cdef np.float64_t f0 = v[0][0]*(1-x[0])*(1-x[1])*(1-x[2]) + \
+                           v[1][0]*(1+x[0])*(1-x[1])*(1-x[2]) + \
+                           v[2][0]*(1-x[0])*(1+x[1])*(1-x[2]) + \
+                           v[3][0]*(1+x[0])*(1+x[1])*(1-x[2]) + \
+                           v[4][0]*(1-x[0])*(1-x[1])*(1+x[2]) + \
+                           v[5][0]*(1+x[0])*(1-x[1])*(1+x[2]) + \
+                           v[6][0]*(1-x[0])*(1+x[1])*(1+x[2]) + \
+                           v[7][0]*(1+x[0])*(1+x[1])*(1+x[2]) - 8.0*phys_x[0]
+    cdef np.float64_t f1 = v[0][1]*(1-x[0])*(1-x[1])*(1-x[2]) + \
+                           v[1][1]*(1+x[0])*(1-x[1])*(1-x[2]) + \
+                           v[2][1]*(1-x[0])*(1+x[1])*(1-x[2]) + \
+                           v[3][1]*(1+x[0])*(1+x[1])*(1-x[2]) + \
+                           v[4][1]*(1-x[0])*(1-x[1])*(1+x[2]) + \
+                           v[5][1]*(1+x[0])*(1-x[1])*(1+x[2]) + \
+                           v[6][1]*(1-x[0])*(1+x[1])*(1+x[2]) + \
+                           v[7][1]*(1+x[0])*(1+x[1])*(1+x[2]) - 8.0*phys_x[1]
+    cdef np.float64_t f2 = v[0][2]*(1-x[0])*(1-x[1])*(1-x[2]) + \
+                           v[1][2]*(1+x[0])*(1-x[1])*(1-x[2]) + \
+                           v[2][2]*(1-x[0])*(1+x[1])*(1-x[2]) + \
+                           v[3][2]*(1+x[0])*(1+x[1])*(1-x[2]) + \
+                           v[4][2]*(1-x[0])*(1-x[1])*(1+x[2]) + \
+                           v[5][2]*(1+x[0])*(1-x[1])*(1+x[2]) + \
+                           v[6][2]*(1-x[0])*(1+x[1])*(1+x[2]) + \
+                           v[7][2]*(1+x[0])*(1+x[1])*(1+x[2]) - 8.0*phys_x[2]
 
-cdef np.ndarray[np.float64_t, ndim=1] Q1Function3D(np.ndarray[np.float64_t, ndim=1] x,
-                                                   np.ndarray[np.float64_t, ndim=2] v,
-                                                   np.ndarray[np.float64_t, ndim=1] phys_x):
-    f0 = v[0][0]*(1-x[0])*(1-x[1])*(1-x[2]) + \
-         v[1][0]*(1+x[0])*(1-x[1])*(1-x[2]) + \
-         v[2][0]*(1-x[0])*(1+x[1])*(1-x[2]) + \
-         v[3][0]*(1+x[0])*(1+x[1])*(1-x[2]) + \
-         v[4][0]*(1-x[0])*(1-x[1])*(1+x[2]) + \
-         v[5][0]*(1+x[0])*(1-x[1])*(1+x[2]) + \
-         v[6][0]*(1-x[0])*(1+x[1])*(1+x[2]) + \
-         v[7][0]*(1+x[0])*(1+x[1])*(1+x[2]) - 8.0*phys_x[0]
-    f1 = v[0][1]*(1-x[0])*(1-x[1])*(1-x[2]) + \
-         v[1][1]*(1+x[0])*(1-x[1])*(1-x[2]) + \
-         v[2][1]*(1-x[0])*(1+x[1])*(1-x[2]) + \
-         v[3][1]*(1+x[0])*(1+x[1])*(1-x[2]) + \
-         v[4][1]*(1-x[0])*(1-x[1])*(1+x[2]) + \
-         v[5][1]*(1+x[0])*(1-x[1])*(1+x[2]) + \
-         v[6][1]*(1-x[0])*(1+x[1])*(1+x[2]) + \
-         v[7][1]*(1+x[0])*(1+x[1])*(1+x[2]) - 8.0*phys_x[1]
-    f2 = v[0][2]*(1-x[0])*(1-x[1])*(1-x[2]) + \
-         v[1][2]*(1+x[0])*(1-x[1])*(1-x[2]) + \
-         v[2][2]*(1-x[0])*(1+x[1])*(1-x[2]) + \
-         v[3][2]*(1+x[0])*(1+x[1])*(1-x[2]) + \
-         v[4][2]*(1-x[0])*(1-x[1])*(1+x[2]) + \
-         v[5][2]*(1+x[0])*(1-x[1])*(1+x[2]) + \
-         v[6][2]*(1-x[0])*(1+x[1])*(1+x[2]) + \
-         v[7][2]*(1+x[0])*(1+x[1])*(1+x[2]) - 8.0*phys_x[2]
-    return np.array([f0, f1, f2])
+    A = np.empty(3, dtype=np.float64)
+    A[0] = f0
+    A[1] = f1
+    A[2] = f2
+    return A
 
-
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
 cdef np.ndarray[np.float64_t, ndim=1] Q1Jacobian3D(np.ndarray[np.float64_t, ndim=1] x,
                                                    np.ndarray[np.float64_t, ndim=2] v,
                                                    np.ndarray[np.float64_t, ndim=1] phys_x):


https://bitbucket.org/yt_analysis/yt/commits/fc60fc23cc30/
Changeset:   fc60fc23cc30
Branch:      yt
User:        atmyers
Date:        2015-06-27 04:39:40+00:00
Summary:     replacing scipy fsolve with a cythonized NR solver
Affected #:  1 file

diff -r 06a79221a0b12dde7d35322ade62bd25f78de8d4 -r fc60fc23cc305d5c9357dc8639299899dc82e40e yt/utilities/lib/element_mappings.pyx
--- a/yt/utilities/lib/element_mappings.pyx
+++ b/yt/utilities/lib/element_mappings.pyx
@@ -18,7 +18,7 @@
 from numpy cimport ndarray
 cimport cython
 import numpy as np
-from scipy.optimize import fsolve
+from libc.math cimport abs
 
 
 cdef class ElementSampler:
@@ -122,163 +122,153 @@
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
-cdef np.ndarray[np.float64_t, ndim=1] Q1Function2D(np.ndarray[np.float64_t, ndim=1] x,
-                                                   np.ndarray[np.float64_t, ndim=2] v,
-                                                   np.ndarray[np.float64_t, ndim=1] phys_x):
-    f1 = v[0][0]*(1-x[0])*(1-x[1]) + \
-         v[1][0]*(1+x[0])*(1-x[1]) + \
-         v[2][0]*(1-x[0])*(1+x[1]) + \
-         v[3][0]*(1+x[0])*(1+x[1]) - 4.0*phys_x[0]
-    f2 = v[0][1]*(1-x[0])*(1-x[1]) + \
-         v[1][1]*(1+x[0])*(1-x[1]) + \
-         v[2][1]*(1-x[0])*(1+x[1]) + \
-         v[3][1]*(1+x[0])*(1+x[1]) - 4.0*phys_x[1]
-    return np.array([f1, f2])
+ at cython.initializedcheck(False)
+cpdef inline void Q1Function2D(double[:] fx,
+                               double[:] x, 
+                               double[:, :] vertices, 
+                               double[:] phys_x) nogil:
+    
+    cdef int i
+    for i in range(2):
+        fx[i] = vertices[0][i]*(1-x[0])*(1-x[1]) \
+              + vertices[1][i]*(1+x[0])*(1-x[1]) \
+              + vertices[2][i]*(1-x[0])*(1+x[1]) \
+              + vertices[3][i]*(1+x[0])*(1+x[1]) - 4.0*phys_x[i]
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
-cdef np.ndarray[np.float64_t, ndim=2] Q1Jacobian2D(np.ndarray[np.float64_t, ndim=1] x,
-                                                   np.ndarray[np.float64_t, ndim=2] v,
-                                                   np.ndarray[np.float64_t, ndim=1] phys_x):
-    f11 = -(1-x[1])*v[0][0] + \
-          (1-x[1])*v[1][0] - \
-          (1+x[1])*v[2][0] + \
-          (1+x[1])*v[3][0]
-    f12 = -(1-x[0])*v[0][0] - \
-          (1+x[0])*v[1][0] + \
-          (1-x[0])*v[2][0] + \
-          (1+x[0])*v[3][0]
-    f21 = -(1-x[1])*v[0][1] + \
-          (1-x[1])*v[1][1] - \
-          (1+x[1])*v[2][1] + \
-          (1+x[1])*v[3][1]
-    f22 = -(1-x[0])*v[0][1] - \
-          (1+x[0])*v[1][1] + \
-          (1-x[0])*v[2][1] + \
-          (1+x[0])*v[3][1]
-    return np.array([[f11, f12], [f21, f22]])
+ at cython.initializedcheck(False)
+cpdef inline void Q1Jacobian2D(double[:, :] A,
+                               double[:] x, 
+                               double[:, :] v, 
+                               double[:] phys_x) nogil:
+    
+    cdef int i
+    for i in range(2):
+        A[i][0] = -(1-x[1])*v[0][i] + (1-x[1])*v[1][i] - \
+                   (1+x[1])*v[2][i] + (1+x[1])*v[3][i]
+        A[i][1] = -(1-x[0])*v[0][i] - (1+x[0])*v[1][i] + \
+                   (1-x[0])*v[2][i] + (1+x[0])*v[3][i]
 
 
 cdef class Q1Sampler2D(ElementSampler):
 
-    def map_real_to_unit(self, np.ndarray[np.float64_t, ndim=1] physical_coord, 
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    def map_real_to_unit(self, 
+                         np.ndarray[np.float64_t, ndim=1] physical_x,
                          np.ndarray[np.float64_t, ndim=2] vertices):
-    
-        # initial guess for the Newton solve
-        x0 = np.array([0.0, 0.0], dtype=np.float64)
-        x = fsolve(Q1Function2D, x0, args=(vertices, physical_coord),
-                   fprime=Q1Jacobian2D)
+        x = np.zeros(2, dtype=np.float64)
+        cdef int iterations = 0
+        cdef np.float64_t tolerance = 1.0e-9
+        fx = np.empty(2, dtype=np.float64)
+        A = np.empty((2, 2), dtype=np.float64)
+        Ainv = np.empty((2, 2), dtype=np.float64)
+        Q1Function2D(fx, x, vertices, physical_x)
+        cdef np.float64_t err = np.max(abs(fx))
+        while (err > tolerance and iterations < 100):
+            Q1Jacobian2D(A, x, vertices, physical_x)
+            Ainv = np.linalg.inv(A)
+            x = x - np.dot(Ainv, fx)
+            Q1Function2D(fx, x, vertices, physical_x)
+            err = np.max(abs(fx))
+            iterations += 1
         return x
 
-    def sample_at_unit_point(self, np.ndarray[np.float64_t, ndim=1] coord, 
-                             np.ndarray[np.float64_t, ndim=1] vals):
-        cdef np.float64_t x = vals[0]*(1.0 - coord[0])*(1.0 - coord[1]) + \
-                              vals[1]*(1.0 + coord[0])*(1.0 - coord[1]) + \
-                              vals[2]*(1.0 - coord[0])*(1.0 + coord[1]) + \
-                              vals[3]*(1.0 + coord[0])*(1.0 + coord[1])
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    def sample_at_unit_point(self, double[:] coord, 
+                             double[:] vals):
+        cdef double x = vals[0]*(1.0 - coord[0])*(1.0 - coord[1]) + \
+                        vals[1]*(1.0 + coord[0])*(1.0 - coord[1]) + \
+                        vals[2]*(1.0 - coord[0])*(1.0 + coord[1]) + \
+                        vals[3]*(1.0 + coord[0])*(1.0 + coord[1])
         return 0.25*x
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
-cdef inline np.ndarray[np.float64_t, ndim=1] Q1Function3D(np.ndarray[np.float64_t, ndim=1] x,
-                                                          np.ndarray[np.float64_t, ndim=2] v,
-                                                          np.ndarray[np.float64_t, ndim=1] phys_x):
-    cdef np.float64_t f0 = v[0][0]*(1-x[0])*(1-x[1])*(1-x[2]) + \
-                           v[1][0]*(1+x[0])*(1-x[1])*(1-x[2]) + \
-                           v[2][0]*(1-x[0])*(1+x[1])*(1-x[2]) + \
-                           v[3][0]*(1+x[0])*(1+x[1])*(1-x[2]) + \
-                           v[4][0]*(1-x[0])*(1-x[1])*(1+x[2]) + \
-                           v[5][0]*(1+x[0])*(1-x[1])*(1+x[2]) + \
-                           v[6][0]*(1-x[0])*(1+x[1])*(1+x[2]) + \
-                           v[7][0]*(1+x[0])*(1+x[1])*(1+x[2]) - 8.0*phys_x[0]
-    cdef np.float64_t f1 = v[0][1]*(1-x[0])*(1-x[1])*(1-x[2]) + \
-                           v[1][1]*(1+x[0])*(1-x[1])*(1-x[2]) + \
-                           v[2][1]*(1-x[0])*(1+x[1])*(1-x[2]) + \
-                           v[3][1]*(1+x[0])*(1+x[1])*(1-x[2]) + \
-                           v[4][1]*(1-x[0])*(1-x[1])*(1+x[2]) + \
-                           v[5][1]*(1+x[0])*(1-x[1])*(1+x[2]) + \
-                           v[6][1]*(1-x[0])*(1+x[1])*(1+x[2]) + \
-                           v[7][1]*(1+x[0])*(1+x[1])*(1+x[2]) - 8.0*phys_x[1]
-    cdef np.float64_t f2 = v[0][2]*(1-x[0])*(1-x[1])*(1-x[2]) + \
-                           v[1][2]*(1+x[0])*(1-x[1])*(1-x[2]) + \
-                           v[2][2]*(1-x[0])*(1+x[1])*(1-x[2]) + \
-                           v[3][2]*(1+x[0])*(1+x[1])*(1-x[2]) + \
-                           v[4][2]*(1-x[0])*(1-x[1])*(1+x[2]) + \
-                           v[5][2]*(1+x[0])*(1-x[1])*(1+x[2]) + \
-                           v[6][2]*(1-x[0])*(1+x[1])*(1+x[2]) + \
-                           v[7][2]*(1+x[0])*(1+x[1])*(1+x[2]) - 8.0*phys_x[2]
-
-    A = np.empty(3, dtype=np.float64)
-    A[0] = f0
-    A[1] = f1
-    A[2] = f2
-    return A
+ at cython.initializedcheck(False)
+cpdef inline void Q1Function3D(double[:] fx,
+                               double[:] x, 
+                               double[:, :] vertices, 
+                               double[:] phys_x) nogil:
+    
+    cdef int i
+    for i in range(3):
+        fx[i] = vertices[0][i]*(1-x[0])*(1-x[1])*(1-x[2]) \
+              + vertices[1][i]*(1+x[0])*(1-x[1])*(1-x[2]) \
+              + vertices[2][i]*(1-x[0])*(1+x[1])*(1-x[2]) \
+              + vertices[3][i]*(1+x[0])*(1+x[1])*(1-x[2]) \
+              + vertices[4][i]*(1-x[0])*(1-x[1])*(1+x[2]) \
+              + vertices[5][i]*(1+x[0])*(1-x[1])*(1+x[2]) \
+              + vertices[6][i]*(1-x[0])*(1+x[1])*(1+x[2]) \
+              + vertices[7][i]*(1+x[0])*(1+x[1])*(1+x[2]) \
+              - 8.0*phys_x[i]
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
-cdef np.ndarray[np.float64_t, ndim=1] Q1Jacobian3D(np.ndarray[np.float64_t, ndim=1] x,
-                                                   np.ndarray[np.float64_t, ndim=2] v,
-                                                   np.ndarray[np.float64_t, ndim=1] phys_x):
-    f00 = -(1-x[1])*(1-x[2])*v[0][0] + (1-x[1])*(1-x[2])*v[1][0] - \
-          (1+x[1])*(1-x[2])*v[2][0] + (1+x[1])*(1-x[2])*v[3][0] - \
-          (1-x[1])*(1+x[2])*v[4][0] + (1-x[1])*(1+x[2])*v[5][0] - \
-          (1+x[1])*(1+x[2])*v[6][0] + (1+x[1])*(1+x[2])*v[7][0]
-    f01 = -(1-x[0])*(1-x[2])*v[0][0] - (1+x[0])*(1-x[2])*v[1][0] + \
-          (1-x[0])*(1-x[2])*v[2][0] + (1+x[0])*(1-x[2])*v[3][0] - \
-          (1-x[0])*(1+x[2])*v[4][0] - (1+x[0])*(1+x[2])*v[5][0] + \
-          (1-x[0])*(1+x[2])*v[6][0] + (1+x[0])*(1+x[2])*v[7][0]
-    f02 = -(1-x[0])*(1-x[1])*v[0][0] - (1+x[0])*(1-x[1])*v[1][0] - \
-          (1-x[0])*(1+x[1])*v[2][0] - (1+x[0])*(1+x[1])*v[3][0] + \
-          (1-x[0])*(1-x[1])*v[4][0] + (1+x[0])*(1-x[1])*v[5][0] + \
-          (1-x[0])*(1+x[1])*v[6][0] + (1+x[0])*(1+x[1])*v[7][0]
-
-    f10 = -(1-x[1])*(1-x[2])*v[0][1] + (1-x[1])*(1-x[2])*v[1][1] - \
-          (1+x[1])*(1-x[2])*v[2][1] + (1+x[1])*(1-x[2])*v[3][1] - \
-          (1-x[1])*(1+x[2])*v[4][1] + (1-x[1])*(1+x[2])*v[5][1] - \
-          (1+x[1])*(1+x[2])*v[6][1] + (1+x[1])*(1+x[2])*v[7][1]
-    f11 = -(1-x[0])*(1-x[2])*v[0][1] - (1+x[0])*(1-x[2])*v[1][1] + \
-          (1-x[0])*(1-x[2])*v[2][1] + (1+x[0])*(1-x[2])*v[3][1] - \
-          (1-x[0])*(1+x[2])*v[4][1] - (1+x[0])*(1+x[2])*v[5][1] + \
-          (1-x[0])*(1+x[2])*v[6][1] + (1+x[0])*(1+x[2])*v[7][1]
-    f12 = -(1-x[0])*(1-x[1])*v[0][1] - (1+x[0])*(1-x[1])*v[1][1] - \
-          (1-x[0])*(1+x[1])*v[2][1] - (1+x[0])*(1+x[1])*v[3][1] + \
-          (1-x[0])*(1-x[1])*v[4][1] + (1+x[0])*(1-x[1])*v[5][1] + \
-          (1-x[0])*(1+x[1])*v[6][1] + (1+x[0])*(1+x[1])*v[7][1]
+ at cython.initializedcheck(False)
+cpdef inline void Q1Jacobian3D(double[:, :] A,
+                               double[:] x, 
+                               double[:, :] v, 
+                               double[:] phys_x) nogil:
     
-    f20 = -(1-x[1])*(1-x[2])*v[0][2] + (1-x[1])*(1-x[2])*v[1][2] - \
-          (1+x[1])*(1-x[2])*v[2][2] + (1+x[1])*(1-x[2])*v[3][2] - \
-          (1-x[1])*(1+x[2])*v[4][2] + (1-x[1])*(1+x[2])*v[5][2] - \
-          (1+x[1])*(1+x[2])*v[6][2] + (1+x[1])*(1+x[2])*v[7][2]
-    f21 = -(1-x[0])*(1-x[2])*v[0][2] - (1+x[0])*(1-x[2])*v[1][2] + \
-          (1-x[0])*(1-x[2])*v[2][2] + (1+x[0])*(1-x[2])*v[3][2] - \
-          (1-x[0])*(1+x[2])*v[4][2] - (1+x[0])*(1+x[2])*v[5][2] + \
-          (1-x[0])*(1+x[2])*v[6][2] + (1+x[0])*(1+x[2])*v[7][2]
-    f22 = -(1-x[0])*(1-x[1])*v[0][2] - (1+x[0])*(1-x[1])*v[1][2] - \
-          (1-x[0])*(1+x[1])*v[2][2] - (1+x[0])*(1+x[1])*v[3][2] + \
-          (1-x[0])*(1-x[1])*v[4][2] + (1+x[0])*(1-x[1])*v[5][2] + \
-          (1-x[0])*(1+x[1])*v[6][2] + (1+x[0])*(1+x[1])*v[7][2]
-
-    return np.array([[f00, f01, f02], [f10, f11, f12], [f20, f21, f22]])
+    cdef int i
+    for i in range(3):
+        A[i][0] = -(1-x[1])*(1-x[2])*v[0][i] + (1-x[1])*(1-x[2])*v[1][i] - \
+                   (1+x[1])*(1-x[2])*v[2][i] + (1+x[1])*(1-x[2])*v[3][i] - \
+                   (1-x[1])*(1+x[2])*v[4][i] + (1-x[1])*(1+x[2])*v[5][i] - \
+                   (1+x[1])*(1+x[2])*v[6][i] + (1+x[1])*(1+x[2])*v[7][i]
+        A[i][1] = -(1-x[0])*(1-x[2])*v[0][i] - (1+x[0])*(1-x[2])*v[1][i] + \
+                   (1-x[0])*(1-x[2])*v[2][i] + (1+x[0])*(1-x[2])*v[3][i] - \
+                   (1-x[0])*(1+x[2])*v[4][i] - (1+x[0])*(1+x[2])*v[5][i] + \
+                   (1-x[0])*(1+x[2])*v[6][i] + (1+x[0])*(1+x[2])*v[7][i]
+        A[i][2] = -(1-x[0])*(1-x[1])*v[0][i] - (1+x[0])*(1-x[1])*v[1][i] - \
+                   (1-x[0])*(1+x[1])*v[2][i] - (1+x[0])*(1+x[1])*v[3][i] + \
+                   (1-x[0])*(1-x[1])*v[4][i] + (1+x[0])*(1-x[1])*v[5][i] + \
+                   (1-x[0])*(1+x[1])*v[6][i] + (1+x[0])*(1+x[1])*v[7][i]
 
 
 cdef class Q1Sampler3D(ElementSampler):
 
-    def map_real_to_unit(self, np.ndarray physical_coord, np.ndarray vertices):
-        x0 = np.array([0.0, 0.0, 0.0])  # initial guess
-        x = fsolve(Q1Function3D, x0, args=(vertices, physical_coord),
-                   fprime=Q1Jacobian3D)
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    def map_real_to_unit(self, 
+                         np.ndarray[np.float64_t, ndim=1] physical_x,
+                         np.ndarray[np.float64_t, ndim=2] vertices):
+        x = np.zeros(3, dtype=np.float64)
+        cdef int iterations = 0
+        cdef np.float64_t tolerance = 1.0e-9
+        fx = np.empty(3, dtype=np.float64)
+        A = np.empty((3, 3), dtype=np.float64)
+        Ainv = np.empty((3, 3), dtype=np.float64)
+        Q1Function3D(fx, x, vertices, physical_x)
+        cdef np.float64_t err = np.max(abs(fx))
+        while (err > tolerance and iterations < 100):
+            Q1Jacobian3D(A, x, vertices, physical_x)
+            Ainv = np.linalg.inv(A)
+            x = x - np.dot(Ainv, fx)
+            Q1Function3D(fx, x, vertices, physical_x)
+            err = np.max(abs(fx))
+            iterations += 1
         return x
 
-    def sample_at_unit_point(self, np.ndarray coord, np.ndarray vals):
-        x = vals[0]*(1.0 - coord[0])*(1.0 - coord[1])*(1.0 - coord[2]) + \
-            vals[1]*(1.0 + coord[0])*(1.0 - coord[1])*(1.0 - coord[2]) + \
-            vals[2]*(1.0 - coord[0])*(1.0 + coord[1])*(1.0 - coord[2]) + \
-            vals[3]*(1.0 + coord[0])*(1.0 + coord[1])*(1.0 - coord[2]) + \
-            vals[4]*(1.0 - coord[0])*(1.0 - coord[1])*(1.0 + coord[2]) + \
-            vals[5]*(1.0 + coord[0])*(1.0 - coord[1])*(1.0 + coord[2]) + \
-            vals[6]*(1.0 - coord[0])*(1.0 + coord[1])*(1.0 + coord[2]) + \
-            vals[7]*(1.0 + coord[0])*(1.0 + coord[1])*(1.0 + coord[2])
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    def sample_at_unit_point(self, double[:] coord, double[:] vals):
+        cdef double x = vals[0]*(1.0 - coord[0])*(1.0 - coord[1])*(1.0 - coord[2]) + \
+                        vals[1]*(1.0 + coord[0])*(1.0 - coord[1])*(1.0 - coord[2]) + \
+                        vals[2]*(1.0 - coord[0])*(1.0 + coord[1])*(1.0 - coord[2]) + \
+                        vals[3]*(1.0 + coord[0])*(1.0 + coord[1])*(1.0 - coord[2]) + \
+                        vals[4]*(1.0 - coord[0])*(1.0 - coord[1])*(1.0 + coord[2]) + \
+                        vals[5]*(1.0 + coord[0])*(1.0 - coord[1])*(1.0 + coord[2]) + \
+                        vals[6]*(1.0 - coord[0])*(1.0 + coord[1])*(1.0 + coord[2]) + \
+                        vals[7]*(1.0 + coord[0])*(1.0 + coord[1])*(1.0 + coord[2])
         return 0.125*x


https://bitbucket.org/yt_analysis/yt/commits/a21af21d0c6c/
Changeset:   a21af21d0c6c
Branch:      yt
User:        atmyers
Date:        2015-06-28 19:07:35+00:00
Summary:     speeding up the 2D tri sampler
Affected #:  2 files

diff -r fc60fc23cc305d5c9357dc8639299899dc82e40e -r a21af21d0c6cf4510c07f8a089f59a3310b84908 yt/utilities/lib/element_mappings.pyx
--- a/yt/utilities/lib/element_mappings.pyx
+++ b/yt/utilities/lib/element_mappings.pyx
@@ -29,7 +29,7 @@
         raise NotImplementedError
 
     def sample_at_unit_point(self,
-                             np.ndarray coord, 
+                             np.ndarray coord,
                              np.ndarray vals):
         raise NotImplementedError
 
@@ -46,40 +46,39 @@
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
+    @cython.initializedcheck(False)
     def map_real_to_unit(self, 
-                         np.ndarray[np.float64_t, ndim=1] physical_coord, 
+                         np.ndarray[np.float64_t, ndim=1] physical_x, 
                          np.ndarray[np.float64_t, ndim=2] vertices):
     
-        x = physical_coord[0]
-        y = physical_coord[1]
-
-        x1 = vertices[0, 0]
-        y1 = vertices[0, 1]
-
-        x2 = vertices[1, 0]
-        y2 = vertices[1, 1]
-
-        x3 = vertices[2, 0]
-        y3 = vertices[2, 1]
+        b = np.empty(3, dtype=np.float64)
+        A = np.empty((3, 3), dtype=np.float64)
     
-        A = np.array([[1, x, y], [1, x1, y1], [1, x3, y3]])
-        B = np.array([[1, x2, y2], [1, x1, y1], [1, x3, y3]])
-        u = np.linalg.det(A) / np.linalg.det(B)
-
-        C = np.array([[1, x, y], [1, x1, y1], [1, x2, y2]])
-        D = np.array([[1, x3, y3], [1, x1, y1], [1, x2, y2]])
-        v = np.linalg.det(C) / np.linalg.det(D)
+        b[0] = physical_x[0]
+        b[1] = physical_x[1]
+        b[2] = 1.0
     
-        return np.array([u, v])
+        A[0][0] = vertices[0, 0]
+        A[0][1] = vertices[1, 0]
+        A[0][2] = vertices[2, 0]
+    
+        A[1][0] = vertices[0, 1]
+        A[1][1] = vertices[1, 1]
+        A[1][2] = vertices[2, 1]
+    
+        A[2][0] = 1.0
+        A[2][1] = 1.0
+        A[2][2] = 1.0
+            
+        c = np.linalg.solve(A, b)
+        return c
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    def sample_at_unit_point(self,
-                             np.ndarray coord, 
-                             np.ndarray vals):
-        return vals[0]*(1 - coord[0] - coord[1]) + \
-            vals[1]*coord[0] + vals[2]*coord[1]
+    def sample_at_unit_point(self, double[:] coord, 
+                             double[:] vals):
+        return vals[0]*coord[0] + vals[1]*coord[1] + vals[2]*coord[2]
 
 cdef class P1Sampler3D(ElementSampler):
 
@@ -111,10 +110,10 @@
     @cython.wraparound(False)
     @cython.cdivision(True)
     def sample_at_unit_point(self,
-                             np.ndarray[np.float64_t, ndim=1] coord, 
-                             np.ndarray[np.float64_t, ndim=1] vals):
-        cdef np.float64_t value = 0.0
-        cdef np.int64_t i
+                             double[:] coord, 
+                             double[:] vals):
+        cdef double value = 0.0
+        cdef int i
         for i in range(4):
             value += vals[i]*coord[i]
         return value

diff -r fc60fc23cc305d5c9357dc8639299899dc82e40e -r a21af21d0c6cf4510c07f8a089f59a3310b84908 yt/utilities/lib/tests/test_element_mappings.py
--- a/yt/utilities/lib/tests/test_element_mappings.py
+++ b/yt/utilities/lib/tests/test_element_mappings.py
@@ -15,14 +15,11 @@
 def check_all_vertices(sampler, vertices, field_values):
     NV = vertices.shape[0]
     NDIM = vertices.shape[1]
-    physical_x = np.empty(NDIM)
+    x = np.empty(NDIM)
     for i in range(NV):
-        physical_x = vertices[i]
-        x = sampler.map_real_to_unit(physical_x, vertices)
-        val1 = sampler.sample_at_unit_point(x, field_values)
-        assert_almost_equal(field_values[i], val1)
-        val2 = sampler.sample_at_real_point(physical_x, vertices, field_values)
-        assert_almost_equal(val1, val2)
+        x = vertices[i]
+        val = sampler.sample_at_real_point(x, vertices, field_values)
+        assert_almost_equal(val, field_values[i])
 
 
 def test_P1Sampler2D():


https://bitbucket.org/yt_analysis/yt/commits/2edb2dd26bb8/
Changeset:   2edb2dd26bb8
Branch:      yt
User:        atmyers
Date:        2015-06-28 19:19:06+00:00
Summary:     (slightly) speeding up the 3D tetra sampler
Affected #:  1 file

diff -r a21af21d0c6cf4510c07f8a089f59a3310b84908 -r 2edb2dd26bb86ada324bee746f94e7bb4d862b36 yt/utilities/lib/element_mappings.pyx
--- a/yt/utilities/lib/element_mappings.pyx
+++ b/yt/utilities/lib/element_mappings.pyx
@@ -85,23 +85,37 @@
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    def map_real_to_unit(self, 
-                         np.ndarray[np.float64_t, ndim=1] physical_coord, 
-                         np.ndarray[np.float64_t, ndim=2] vertices):
+    @cython.initializedcheck(False)
+    def map_real_to_unit(self, double[:] physical_x, double[:,:] vertices):
     
-        b = np.array([physical_coord[0],
-                      physical_coord[1], 
-                      physical_coord[2], 
-                      1.0], dtype=np.float64)
+        b = np.empty(4, dtype=np.float64)
+        A = np.empty((4, 4), dtype=np.float64)
+    
+        b[0] = physical_x[0]
+        b[1] = physical_x[1]
+        b[2] = physical_x[2]
+        b[3] = 1.0
+    
+        A[0][0] = vertices[0, 0]
+        A[0][1] = vertices[1, 0]
+        A[0][2] = vertices[2, 0]
+        A[0][3] = vertices[3, 0]
+        
+        A[1][0] = vertices[0, 1]
+        A[1][1] = vertices[1, 1]
+        A[1][2] = vertices[2, 1]
+        A[1][3] = vertices[3, 1]
+        
+        A[2][0] = vertices[0, 2]
+        A[2][1] = vertices[1, 2]
+        A[2][2] = vertices[2, 2]
+        A[2][3] = vertices[3, 2]
 
-        A = np.empty((4, 4), dtype=np.float64)
-        cdef int i, j
-        for i in range(3):
-            for j in range(4):
-                A[i][j] = vertices[j][i]
-        for j in range(4):
-            A[3][j] = 1.0
-
+        A[3][0] = 1.0
+        A[3][1] = 1.0
+        A[3][2] = 1.0
+        A[3][3] = 1.0
+        
         c = np.linalg.solve(A, b)
     
         return c


https://bitbucket.org/yt_analysis/yt/commits/d6964771d73e/
Changeset:   d6964771d73e
Branch:      yt
User:        atmyers
Date:        2015-06-28 19:24:29+00:00
Summary:     (slightly) speeding up the 3D tetra sampler
Affected #:  1 file

diff -r 2edb2dd26bb86ada324bee746f94e7bb4d862b36 -r d6964771d73ef758319e0eb01d914f932b74ef4e yt/utilities/lib/element_mappings.pyx
--- a/yt/utilities/lib/element_mappings.pyx
+++ b/yt/utilities/lib/element_mappings.pyx
@@ -255,19 +255,23 @@
     def map_real_to_unit(self, 
                          np.ndarray[np.float64_t, ndim=1] physical_x,
                          np.ndarray[np.float64_t, ndim=2] vertices):
-        x = np.zeros(3, dtype=np.float64)
+        cdef int dim = 3
+        cdef np.float64_t tolerance = 1.0e-9
+        func = Q1Function3D
+        jac = Q1Jacobian3D
+
+        x = np.zeros(dim, dtype=np.float64)
         cdef int iterations = 0
-        cdef np.float64_t tolerance = 1.0e-9
-        fx = np.empty(3, dtype=np.float64)
-        A = np.empty((3, 3), dtype=np.float64)
-        Ainv = np.empty((3, 3), dtype=np.float64)
-        Q1Function3D(fx, x, vertices, physical_x)
+        fx = np.empty(dim, dtype=np.float64)
+        A = np.empty((dim, dim), dtype=np.float64)
+        Ainv = np.empty((dim, dim), dtype=np.float64)
+        func(fx, x, vertices, physical_x)
         cdef np.float64_t err = np.max(abs(fx))
         while (err > tolerance and iterations < 100):
-            Q1Jacobian3D(A, x, vertices, physical_x)
+            jac(A, x, vertices, physical_x)
             Ainv = np.linalg.inv(A)
             x = x - np.dot(Ainv, fx)
-            Q1Function3D(fx, x, vertices, physical_x)
+            func(fx, x, vertices, physical_x)
             err = np.max(abs(fx))
             iterations += 1
         return x


https://bitbucket.org/yt_analysis/yt/commits/451bdb9f000c/
Changeset:   451bdb9f000c
Branch:      yt
User:        atmyers
Date:        2015-06-28 20:52:51+00:00
Summary:     putting common elements for all samplers that use newton iteration into a base class
Affected #:  1 file

diff -r d6964771d73ef758319e0eb01d914f932b74ef4e -r 451bdb9f000c7c5e63f225f2db398ce39f293a0f yt/utilities/lib/element_mappings.pyx
--- a/yt/utilities/lib/element_mappings.pyx
+++ b/yt/utilities/lib/element_mappings.pyx
@@ -132,14 +132,17 @@
             value += vals[i]*coord[i]
         return value
 
+ctypedef void (*func_type)(double[:], double[:], double[:, :], double[:])
+ctypedef void (*jac_type)(double[:, :], double[:], double[:, :], double[:])
+
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
 @cython.initializedcheck(False)
-cpdef inline void Q1Function2D(double[:] fx,
-                               double[:] x, 
-                               double[:, :] vertices, 
-                               double[:] phys_x) nogil:
+cdef inline void Q1Function2D(double[:] fx,
+                              double[:] x, 
+                              double[:, :] vertices, 
+                              double[:] phys_x) nogil:
     
     cdef int i
     for i in range(2):
@@ -152,10 +155,10 @@
 @cython.wraparound(False)
 @cython.cdivision(True)
 @cython.initializedcheck(False)
-cpdef inline void Q1Jacobian2D(double[:, :] A,
-                               double[:] x, 
-                               double[:, :] v, 
-                               double[:] phys_x) nogil:
+cdef inline void Q1Jacobian2D(double[:, :] A,
+                              double[:] x, 
+                              double[:, :] v, 
+                              double[:] phys_x) nogil:
     
     cdef int i
     for i in range(2):
@@ -165,30 +168,13 @@
                    (1-x[0])*v[2][i] + (1+x[0])*v[3][i]
 
 
-cdef class Q1Sampler2D(ElementSampler):
+cdef class Q1Sampler2D(NonlinearSolveSampler):
 
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    def map_real_to_unit(self, 
-                         np.ndarray[np.float64_t, ndim=1] physical_x,
-                         np.ndarray[np.float64_t, ndim=2] vertices):
-        x = np.zeros(2, dtype=np.float64)
-        cdef int iterations = 0
-        cdef np.float64_t tolerance = 1.0e-9
-        fx = np.empty(2, dtype=np.float64)
-        A = np.empty((2, 2), dtype=np.float64)
-        Ainv = np.empty((2, 2), dtype=np.float64)
-        Q1Function2D(fx, x, vertices, physical_x)
-        cdef np.float64_t err = np.max(abs(fx))
-        while (err > tolerance and iterations < 100):
-            Q1Jacobian2D(A, x, vertices, physical_x)
-            Ainv = np.linalg.inv(A)
-            x = x - np.dot(Ainv, fx)
-            Q1Function2D(fx, x, vertices, physical_x)
-            err = np.max(abs(fx))
-            iterations += 1
-        return x
+    def __init__(self):
+        super(Q1Sampler2D, self).__init__()
+        self.dim = 2
+        self.func = Q1Function2D
+        self.jac = Q1Jacobian2D
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
@@ -205,10 +191,10 @@
 @cython.wraparound(False)
 @cython.cdivision(True)
 @cython.initializedcheck(False)
-cpdef inline void Q1Function3D(double[:] fx,
-                               double[:] x, 
-                               double[:, :] vertices, 
-                               double[:] phys_x) nogil:
+cdef inline void Q1Function3D(double[:] fx,
+                              double[:] x, 
+                              double[:, :] vertices, 
+                              double[:] phys_x) nogil:
     
     cdef int i
     for i in range(3):
@@ -226,10 +212,10 @@
 @cython.wraparound(False)
 @cython.cdivision(True)
 @cython.initializedcheck(False)
-cpdef inline void Q1Jacobian3D(double[:, :] A,
-                               double[:] x, 
-                               double[:, :] v, 
-                               double[:] phys_x) nogil:
+cdef inline void Q1Jacobian3D(double[:, :] A,
+                              double[:] x, 
+                              double[:, :] v, 
+                              double[:] phys_x) nogil:
     
     cdef int i
     for i in range(3):
@@ -246,8 +232,15 @@
                    (1-x[0])*(1-x[1])*v[4][i] + (1+x[0])*(1-x[1])*v[5][i] + \
                    (1-x[0])*(1+x[1])*v[6][i] + (1+x[0])*(1+x[1])*v[7][i]
 
+cdef class NonlinearSolveSampler(ElementSampler):
 
-cdef class Q1Sampler3D(ElementSampler):
+    cdef int dim
+    cdef np.float64_t tolerance
+    cdef func_type func 
+    cdef jac_type jac
+
+    def __init__(self):
+        self.tolerance = 1.0e-9
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
@@ -255,27 +248,31 @@
     def map_real_to_unit(self, 
                          np.ndarray[np.float64_t, ndim=1] physical_x,
                          np.ndarray[np.float64_t, ndim=2] vertices):
-        cdef int dim = 3
-        cdef np.float64_t tolerance = 1.0e-9
-        func = Q1Function3D
-        jac = Q1Jacobian3D
-
-        x = np.zeros(dim, dtype=np.float64)
+        x = np.zeros(self.dim, dtype=np.float64)
         cdef int iterations = 0
-        fx = np.empty(dim, dtype=np.float64)
-        A = np.empty((dim, dim), dtype=np.float64)
-        Ainv = np.empty((dim, dim), dtype=np.float64)
-        func(fx, x, vertices, physical_x)
+        fx = np.empty(self.dim, dtype=np.float64)
+        A = np.empty((self.dim, self.dim), dtype=np.float64)
+        Ainv = np.empty((self.dim, self.dim), dtype=np.float64)
+        self.func(fx, x, vertices, physical_x)
         cdef np.float64_t err = np.max(abs(fx))
-        while (err > tolerance and iterations < 100):
-            jac(A, x, vertices, physical_x)
+        while (err > self.tolerance and iterations < 100):
+            self.jac(A, x, vertices, physical_x)
             Ainv = np.linalg.inv(A)
             x = x - np.dot(Ainv, fx)
-            func(fx, x, vertices, physical_x)
+            self.func(fx, x, vertices, physical_x)
             err = np.max(abs(fx))
             iterations += 1
         return x
 
+
+cdef class Q1Sampler3D(NonlinearSolveSampler):
+
+    def __init__(self):
+        super(Q1Sampler3D, self).__init__()
+        self.dim = 3
+        self.func = Q1Function3D
+        self.jac = Q1Jacobian3D
+
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)


https://bitbucket.org/yt_analysis/yt/commits/16e75a23b3fa/
Changeset:   16e75a23b3fa
Branch:      yt
User:        atmyers
Date:        2015-06-28 20:55:54+00:00
Summary:     some rearranging
Affected #:  1 file

diff -r 451bdb9f000c7c5e63f225f2db398ce39f293a0f -r 16e75a23b3fa51eca66e542da1a475e8b30d1cd0 yt/utilities/lib/element_mappings.pyx
--- a/yt/utilities/lib/element_mappings.pyx
+++ b/yt/utilities/lib/element_mappings.pyx
@@ -135,6 +135,39 @@
 ctypedef void (*func_type)(double[:], double[:], double[:, :], double[:])
 ctypedef void (*jac_type)(double[:, :], double[:], double[:, :], double[:])
 
+cdef class NonlinearSolveSampler(ElementSampler):
+
+    cdef int dim
+    cdef np.float64_t tolerance
+    cdef func_type func 
+    cdef jac_type jac
+
+    def __init__(self):
+        self.tolerance = 1.0e-9
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    def map_real_to_unit(self, 
+                         np.ndarray[np.float64_t, ndim=1] physical_x,
+                         np.ndarray[np.float64_t, ndim=2] vertices):
+        x = np.zeros(self.dim, dtype=np.float64)
+        cdef int iterations = 0
+        fx = np.empty(self.dim, dtype=np.float64)
+        A = np.empty((self.dim, self.dim), dtype=np.float64)
+        Ainv = np.empty((self.dim, self.dim), dtype=np.float64)
+        self.func(fx, x, vertices, physical_x)
+        cdef np.float64_t err = np.max(abs(fx))
+        while (err > self.tolerance and iterations < 100):
+            self.jac(A, x, vertices, physical_x)
+            Ainv = np.linalg.inv(A)
+            x = x - np.dot(Ainv, fx)
+            self.func(fx, x, vertices, physical_x)
+            err = np.max(abs(fx))
+            iterations += 1
+        return x
+
+
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
@@ -232,38 +265,6 @@
                    (1-x[0])*(1-x[1])*v[4][i] + (1+x[0])*(1-x[1])*v[5][i] + \
                    (1-x[0])*(1+x[1])*v[6][i] + (1+x[0])*(1+x[1])*v[7][i]
 
-cdef class NonlinearSolveSampler(ElementSampler):
-
-    cdef int dim
-    cdef np.float64_t tolerance
-    cdef func_type func 
-    cdef jac_type jac
-
-    def __init__(self):
-        self.tolerance = 1.0e-9
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    def map_real_to_unit(self, 
-                         np.ndarray[np.float64_t, ndim=1] physical_x,
-                         np.ndarray[np.float64_t, ndim=2] vertices):
-        x = np.zeros(self.dim, dtype=np.float64)
-        cdef int iterations = 0
-        fx = np.empty(self.dim, dtype=np.float64)
-        A = np.empty((self.dim, self.dim), dtype=np.float64)
-        Ainv = np.empty((self.dim, self.dim), dtype=np.float64)
-        self.func(fx, x, vertices, physical_x)
-        cdef np.float64_t err = np.max(abs(fx))
-        while (err > self.tolerance and iterations < 100):
-            self.jac(A, x, vertices, physical_x)
-            Ainv = np.linalg.inv(A)
-            x = x - np.dot(Ainv, fx)
-            self.func(fx, x, vertices, physical_x)
-            err = np.max(abs(fx))
-            iterations += 1
-        return x
-
 
 cdef class Q1Sampler3D(NonlinearSolveSampler):
 


https://bitbucket.org/yt_analysis/yt/commits/50e543f241be/
Changeset:   50e543f241be
Branch:      yt
User:        atmyers
Date:        2015-06-28 20:57:43+00:00
Summary:     some more rearranging
Affected #:  1 file

diff -r 16e75a23b3fa51eca66e542da1a475e8b30d1cd0 -r 50e543f241bea725778d074f32afa1e5b82af8e3 yt/utilities/lib/element_mappings.pyx
--- a/yt/utilities/lib/element_mappings.pyx
+++ b/yt/utilities/lib/element_mappings.pyx
@@ -167,6 +167,47 @@
             iterations += 1
         return x
 
+cdef class Q1Sampler2D(NonlinearSolveSampler):
+
+    def __init__(self):
+        super(Q1Sampler2D, self).__init__()
+        self.dim = 2
+        self.func = Q1Function2D
+        self.jac = Q1Jacobian2D
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    def sample_at_unit_point(self, double[:] coord, 
+                             double[:] vals):
+        cdef double x = vals[0]*(1.0 - coord[0])*(1.0 - coord[1]) + \
+                        vals[1]*(1.0 + coord[0])*(1.0 - coord[1]) + \
+                        vals[2]*(1.0 - coord[0])*(1.0 + coord[1]) + \
+                        vals[3]*(1.0 + coord[0])*(1.0 + coord[1])
+        return 0.25*x
+
+cdef class Q1Sampler3D(NonlinearSolveSampler):
+
+    def __init__(self):
+        super(Q1Sampler3D, self).__init__()
+        self.dim = 3
+        self.func = Q1Function3D
+        self.jac = Q1Jacobian3D
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    def sample_at_unit_point(self, double[:] coord, double[:] vals):
+        cdef double x = vals[0]*(1.0 - coord[0])*(1.0 - coord[1])*(1.0 - coord[2]) + \
+                        vals[1]*(1.0 + coord[0])*(1.0 - coord[1])*(1.0 - coord[2]) + \
+                        vals[2]*(1.0 - coord[0])*(1.0 + coord[1])*(1.0 - coord[2]) + \
+                        vals[3]*(1.0 + coord[0])*(1.0 + coord[1])*(1.0 - coord[2]) + \
+                        vals[4]*(1.0 - coord[0])*(1.0 - coord[1])*(1.0 + coord[2]) + \
+                        vals[5]*(1.0 + coord[0])*(1.0 - coord[1])*(1.0 + coord[2]) + \
+                        vals[6]*(1.0 - coord[0])*(1.0 + coord[1])*(1.0 + coord[2]) + \
+                        vals[7]*(1.0 + coord[0])*(1.0 + coord[1])*(1.0 + coord[2])
+        return 0.125*x
+
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
@@ -201,25 +242,6 @@
                    (1-x[0])*v[2][i] + (1+x[0])*v[3][i]
 
 
-cdef class Q1Sampler2D(NonlinearSolveSampler):
-
-    def __init__(self):
-        super(Q1Sampler2D, self).__init__()
-        self.dim = 2
-        self.func = Q1Function2D
-        self.jac = Q1Jacobian2D
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    def sample_at_unit_point(self, double[:] coord, 
-                             double[:] vals):
-        cdef double x = vals[0]*(1.0 - coord[0])*(1.0 - coord[1]) + \
-                        vals[1]*(1.0 + coord[0])*(1.0 - coord[1]) + \
-                        vals[2]*(1.0 - coord[0])*(1.0 + coord[1]) + \
-                        vals[3]*(1.0 + coord[0])*(1.0 + coord[1])
-        return 0.25*x
-
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
@@ -264,26 +286,3 @@
                    (1-x[0])*(1+x[1])*v[2][i] - (1+x[0])*(1+x[1])*v[3][i] + \
                    (1-x[0])*(1-x[1])*v[4][i] + (1+x[0])*(1-x[1])*v[5][i] + \
                    (1-x[0])*(1+x[1])*v[6][i] + (1+x[0])*(1+x[1])*v[7][i]
-
-
-cdef class Q1Sampler3D(NonlinearSolveSampler):
-
-    def __init__(self):
-        super(Q1Sampler3D, self).__init__()
-        self.dim = 3
-        self.func = Q1Function3D
-        self.jac = Q1Jacobian3D
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    def sample_at_unit_point(self, double[:] coord, double[:] vals):
-        cdef double x = vals[0]*(1.0 - coord[0])*(1.0 - coord[1])*(1.0 - coord[2]) + \
-                        vals[1]*(1.0 + coord[0])*(1.0 - coord[1])*(1.0 - coord[2]) + \
-                        vals[2]*(1.0 - coord[0])*(1.0 + coord[1])*(1.0 - coord[2]) + \
-                        vals[3]*(1.0 + coord[0])*(1.0 + coord[1])*(1.0 - coord[2]) + \
-                        vals[4]*(1.0 - coord[0])*(1.0 - coord[1])*(1.0 + coord[2]) + \
-                        vals[5]*(1.0 + coord[0])*(1.0 - coord[1])*(1.0 + coord[2]) + \
-                        vals[6]*(1.0 - coord[0])*(1.0 + coord[1])*(1.0 + coord[2]) + \
-                        vals[7]*(1.0 + coord[0])*(1.0 + coord[1])*(1.0 + coord[2])
-        return 0.125*x


https://bitbucket.org/yt_analysis/yt/commits/8c29935df92f/
Changeset:   8c29935df92f
Branch:      yt
User:        atmyers
Date:        2015-06-28 20:58:48+00:00
Summary:     some more rearranging
Affected #:  1 file

diff -r 50e543f241bea725778d074f32afa1e5b82af8e3 -r 8c29935df92f9403cb9ca3d0004170a5c3b7c21f yt/utilities/lib/element_mappings.pyx
--- a/yt/utilities/lib/element_mappings.pyx
+++ b/yt/utilities/lib/element_mappings.pyx
@@ -138,12 +138,14 @@
 cdef class NonlinearSolveSampler(ElementSampler):
 
     cdef int dim
+    cdef int max_iter
     cdef np.float64_t tolerance
     cdef func_type func 
     cdef jac_type jac
 
     def __init__(self):
         self.tolerance = 1.0e-9
+        self.max_iter = 10
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
@@ -158,7 +160,7 @@
         Ainv = np.empty((self.dim, self.dim), dtype=np.float64)
         self.func(fx, x, vertices, physical_x)
         cdef np.float64_t err = np.max(abs(fx))
-        while (err > self.tolerance and iterations < 100):
+        while (err > self.tolerance and iterations < self.max_iter):
             self.jac(A, x, vertices, physical_x)
             Ainv = np.linalg.inv(A)
             x = x - np.dot(Ainv, fx)


https://bitbucket.org/yt_analysis/yt/commits/bee35e14ddf8/
Changeset:   bee35e14ddf8
Branch:      yt
User:        atmyers
Date:        2015-06-28 21:04:53+00:00
Summary:     updating copyright information
Affected #:  2 files

diff -r 8c29935df92f9403cb9ca3d0004170a5c3b7c21f -r bee35e14ddf8ae11ed5d9cb4a5a92060c1f2d932 yt/utilities/lib/element_mappings.pyx
--- a/yt/utilities/lib/element_mappings.pyx
+++ b/yt/utilities/lib/element_mappings.pyx
@@ -7,7 +7,7 @@
 """
 
 #-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
+# Copyright (c) 2015, yt Development Team.
 #
 # Distributed under the terms of the Modified BSD License.
 #

diff -r 8c29935df92f9403cb9ca3d0004170a5c3b7c21f -r bee35e14ddf8ae11ed5d9cb4a5a92060c1f2d932 yt/utilities/lib/tests/test_element_mappings.py
--- a/yt/utilities/lib/tests/test_element_mappings.py
+++ b/yt/utilities/lib/tests/test_element_mappings.py
@@ -1,3 +1,19 @@
+"""
+This file contains tests of the intracell interpolation code contained is
+yt/utilities/lib/element_mappings.pyx.
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2015, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+
 import numpy as np
 
 from yt.testing import assert_almost_equal


https://bitbucket.org/yt_analysis/yt/commits/d1d037afc0b0/
Changeset:   d1d037afc0b0
Branch:      yt
User:        atmyers
Date:        2015-06-28 23:15:24+00:00
Summary:     adding some short docstrings while this is fresh in my mind
Affected #:  1 file

diff -r bee35e14ddf8ae11ed5d9cb4a5a92060c1f2d932 -r d1d037afc0b044c15d0b5e620e98bbdd726f118b yt/utilities/lib/element_mappings.pyx
--- a/yt/utilities/lib/element_mappings.pyx
+++ b/yt/utilities/lib/element_mappings.pyx
@@ -22,6 +22,15 @@
 
 
 cdef class ElementSampler:
+    '''
+
+    This is a base class for sampling the value of a finite element solution
+    at an arbitrary point inside a mesh element. In general, this will be done
+    by transforming the requested physical coordinate into a mapped coordinate 
+    system, sampling the solution in mapped coordinates, and returning the result.
+    This is not to be used directly; use one of the subclasses instead.
+
+    '''
 
     def map_real_to_unit(self,
                          np.ndarray physical_coord, 
@@ -42,6 +51,14 @@
     
 
 cdef class P1Sampler2D(ElementSampler):
+    '''
+
+    This implements sampling inside a linear, triangular mesh element.
+    In this case, the mapping is easily invertible and can be done 
+    with no iteration.
+    
+
+    '''
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
@@ -81,6 +98,13 @@
         return vals[0]*coord[0] + vals[1]*coord[1] + vals[2]*coord[2]
 
 cdef class P1Sampler3D(ElementSampler):
+    '''
+
+    This implements sampling inside a linear, tetrahedral mesh element. Like
+    the 2D case, this mapping is linear and can be inverted easily.
+
+    '''
+
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
@@ -137,6 +161,16 @@
 
 cdef class NonlinearSolveSampler(ElementSampler):
 
+    '''
+
+    This is a base class for handling element samplers that require
+    a nonlinear solve to invert the mapping between coordinate systems.
+    To do this, we perform Netwon-Raphson iteration using a specificed 
+    system of equations with an analytic Jacobian matrix. This is
+    not to be used directly, use one of the subclasses instead.
+
+    '''
+
     cdef int dim
     cdef int max_iter
     cdef np.float64_t tolerance
@@ -171,6 +205,12 @@
 
 cdef class Q1Sampler2D(NonlinearSolveSampler):
 
+    '''
+
+    This implements sampling inside a 2D quadrilateral mesh element.
+
+    '''
+
     def __init__(self):
         super(Q1Sampler2D, self).__init__()
         self.dim = 2
@@ -190,6 +230,12 @@
 
 cdef class Q1Sampler3D(NonlinearSolveSampler):
 
+    ''' 
+
+    This implements sampling inside a 3d hexahedral mesh element.
+
+    '''
+
     def __init__(self):
         super(Q1Sampler3D, self).__init__()
         self.dim = 3


https://bitbucket.org/yt_analysis/yt/commits/6fc5217aed6a/
Changeset:   6fc5217aed6a
Branch:      yt
User:        atmyers
Date:        2015-06-28 23:18:16+00:00
Summary:     sorry sir isaac
Affected #:  1 file

diff -r d1d037afc0b044c15d0b5e620e98bbdd726f118b -r 6fc5217aed6a3387d0d90d781a4d0ac7170b5496 yt/utilities/lib/element_mappings.pyx
--- a/yt/utilities/lib/element_mappings.pyx
+++ b/yt/utilities/lib/element_mappings.pyx
@@ -165,7 +165,7 @@
 
     This is a base class for handling element samplers that require
     a nonlinear solve to invert the mapping between coordinate systems.
-    To do this, we perform Netwon-Raphson iteration using a specificed 
+    To do this, we perform Newton-Raphson iteration using a specificed 
     system of equations with an analytic Jacobian matrix. This is
     not to be used directly, use one of the subclasses instead.
 


https://bitbucket.org/yt_analysis/yt/commits/4e4e06c2cbe8/
Changeset:   4e4e06c2cbe8
Branch:      yt
User:        atmyers
Date:        2015-07-06 22:44:08+00:00
Summary:     merging from ajulian
Affected #:  84 files

diff -r 6fc5217aed6a3387d0d90d781a4d0ac7170b5496 -r 4e4e06c2cbe8866223527a9d5c3116dcadc34f34 .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -13,6 +13,7 @@
 yt/frontends/ramses/_ramses_reader.cpp
 yt/geometry/fake_octree.c
 yt/geometry/grid_container.c
+yt/geometry/grid_visitors.c
 yt/geometry/oct_container.c
 yt/geometry/oct_visitors.c
 yt/geometry/particle_deposit.c
@@ -25,6 +26,7 @@
 yt/utilities/spatial/ckdtree.c
 yt/utilities/lib/alt_ray_tracers.c
 yt/utilities/lib/amr_kdtools.c
+yt/utilities/lib/bitarray.c
 yt/utilities/lib/CICDeposit.c
 yt/utilities/lib/ContourFinding.c
 yt/utilities/lib/DepthFirstOctree.c
@@ -39,6 +41,7 @@
 yt/utilities/lib/misc_utilities.c
 yt/utilities/lib/Octree.c
 yt/utilities/lib/origami.c
+yt/utilities/lib/pixelization_routines.c
 yt/utilities/lib/png_writer.c
 yt/utilities/lib/PointsInVolume.c
 yt/utilities/lib/QuadTree.c
@@ -59,3 +62,4 @@
 doc/source/reference/api/generated/*
 doc/_temp/*
 doc/source/bootcamp/.ipynb_checkpoints/
+dist

diff -r 6fc5217aed6a3387d0d90d781a4d0ac7170b5496 -r 4e4e06c2cbe8866223527a9d5c3116dcadc34f34 .python-version
--- /dev/null
+++ b/.python-version
@@ -0,0 +1,1 @@
+2.7.9

diff -r 6fc5217aed6a3387d0d90d781a4d0ac7170b5496 -r 4e4e06c2cbe8866223527a9d5c3116dcadc34f34 README
--- a/README
+++ b/README
@@ -20,4 +20,4 @@
 For more information on installation, what to do if you run into problems, or 
 ways to help development, please visit our website.
 
-Enjoy!
+Enjoy!
\ No newline at end of file

diff -r 6fc5217aed6a3387d0d90d781a4d0ac7170b5496 -r 4e4e06c2cbe8866223527a9d5c3116dcadc34f34 distribute_setup.py
--- a/distribute_setup.py
+++ /dev/null
@@ -1,541 +0,0 @@
-#!python
-"""Bootstrap distribute installation
-
-If you want to use setuptools in your package's setup.py, just include this
-file in the same directory with it, and add this to the top of your setup.py::
-
-    from distribute_setup import use_setuptools
-    use_setuptools()
-
-If you want to require a specific version of setuptools, set a download
-mirror, or use an alternate download directory, you can do so by supplying
-the appropriate options to ``use_setuptools()``.
-
-This file can also be run as a script to install or upgrade setuptools.
-"""
-import os
-import shutil
-import sys
-import time
-import fnmatch
-import tempfile
-import tarfile
-import optparse
-
-from distutils import log
-
-try:
-    from site import USER_SITE
-except ImportError:
-    USER_SITE = None
-
-try:
-    import subprocess
-
-    def _python_cmd(*args):
-        args = (sys.executable,) + args
-        return subprocess.call(args) == 0
-
-except ImportError:
-    # will be used for python 2.3
-    def _python_cmd(*args):
-        args = (sys.executable,) + args
-        # quoting arguments if windows
-        if sys.platform == 'win32':
-            def quote(arg):
-                if ' ' in arg:
-                    return '"%s"' % arg
-                return arg
-            args = [quote(arg) for arg in args]
-        return os.spawnl(os.P_WAIT, sys.executable, *args) == 0
-
-DEFAULT_VERSION = "0.6.32"
-DEFAULT_URL = "http://pypi.python.org/packages/source/d/distribute/"
-SETUPTOOLS_FAKED_VERSION = "0.6c11"
-
-SETUPTOOLS_PKG_INFO = """\
-Metadata-Version: 1.0
-Name: setuptools
-Version: %s
-Summary: xxxx
-Home-page: xxx
-Author: xxx
-Author-email: xxx
-License: xxx
-Description: xxx
-""" % SETUPTOOLS_FAKED_VERSION
-
-
-def _install(tarball, install_args=()):
-    # extracting the tarball
-    tmpdir = tempfile.mkdtemp()
-    log.warn('Extracting in %s', tmpdir)
-    old_wd = os.getcwd()
-    try:
-        os.chdir(tmpdir)
-        tar = tarfile.open(tarball)
-        _extractall(tar)
-        tar.close()
-
-        # going in the directory
-        subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
-        os.chdir(subdir)
-        log.warn('Now working in %s', subdir)
-
-        # installing
-        log.warn('Installing Distribute')
-        if not _python_cmd('setup.py', 'install', *install_args):
-            log.warn('Something went wrong during the installation.')
-            log.warn('See the error message above.')
-            # exitcode will be 2
-            return 2
-    finally:
-        os.chdir(old_wd)
-        shutil.rmtree(tmpdir)
-
-
-def _build_egg(egg, tarball, to_dir):
-    # extracting the tarball
-    tmpdir = tempfile.mkdtemp()
-    log.warn('Extracting in %s', tmpdir)
-    old_wd = os.getcwd()
-    try:
-        os.chdir(tmpdir)
-        tar = tarfile.open(tarball)
-        _extractall(tar)
-        tar.close()
-
-        # going in the directory
-        subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
-        os.chdir(subdir)
-        log.warn('Now working in %s', subdir)
-
-        # building an egg
-        log.warn('Building a Distribute egg in %s', to_dir)
-        _python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir)
-
-    finally:
-        os.chdir(old_wd)
-        shutil.rmtree(tmpdir)
-    # returning the result
-    log.warn(egg)
-    if not os.path.exists(egg):
-        raise IOError('Could not build the egg.')
-
-
-def _do_download(version, download_base, to_dir, download_delay):
-    egg = os.path.join(to_dir, 'distribute-%s-py%d.%d.egg'
-                       % (version, sys.version_info[0], sys.version_info[1]))
-    if not os.path.exists(egg):
-        tarball = download_setuptools(version, download_base,
-                                      to_dir, download_delay)
-        _build_egg(egg, tarball, to_dir)
-    sys.path.insert(0, egg)
-    import setuptools
-    setuptools.bootstrap_install_from = egg
-
-
-def use_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
-                   to_dir=os.curdir, download_delay=15, no_fake=True):
-    # making sure we use the absolute path
-    to_dir = os.path.abspath(to_dir)
-    was_imported = 'pkg_resources' in sys.modules or \
-        'setuptools' in sys.modules
-    try:
-        try:
-            import pkg_resources
-            if not hasattr(pkg_resources, '_distribute'):
-                if not no_fake:
-                    _fake_setuptools()
-                raise ImportError
-        except ImportError:
-            return _do_download(version, download_base, to_dir, download_delay)
-        try:
-            pkg_resources.require("distribute>=" + version)
-            return
-        except pkg_resources.VersionConflict:
-            e = sys.exc_info()[1]
-            if was_imported:
-                sys.stderr.write(
-                "The required version of distribute (>=%s) is not available,\n"
-                "and can't be installed while this script is running. Please\n"
-                "install a more recent version first, using\n"
-                "'easy_install -U distribute'."
-                "\n\n(Currently using %r)\n" % (version, e.args[0]))
-                sys.exit(2)
-            else:
-                del pkg_resources, sys.modules['pkg_resources']    # reload ok
-                return _do_download(version, download_base, to_dir,
-                                    download_delay)
-        except pkg_resources.DistributionNotFound:
-            return _do_download(version, download_base, to_dir,
-                                download_delay)
-    finally:
-        if not no_fake:
-            _create_fake_setuptools_pkg_info(to_dir)
-
-
-def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
-                        to_dir=os.curdir, delay=15):
-    """Download distribute from a specified location and return its filename
-
-    `version` should be a valid distribute version number that is available
-    as an egg for download under the `download_base` URL (which should end
-    with a '/'). `to_dir` is the directory where the egg will be downloaded.
-    `delay` is the number of seconds to pause before an actual download
-    attempt.
-    """
-    # making sure we use the absolute path
-    to_dir = os.path.abspath(to_dir)
-    try:
-        from urllib.request import urlopen
-    except ImportError:
-        from urllib2 import urlopen
-    tgz_name = "distribute-%s.tar.gz" % version
-    url = download_base + tgz_name
-    saveto = os.path.join(to_dir, tgz_name)
-    src = dst = None
-    if not os.path.exists(saveto):  # Avoid repeated downloads
-        try:
-            log.warn("Downloading %s", url)
-            src = urlopen(url)
-            # Read/write all in one block, so we don't create a corrupt file
-            # if the download is interrupted.
-            data = src.read()
-            dst = open(saveto, "wb")
-            dst.write(data)
-        finally:
-            if src:
-                src.close()
-            if dst:
-                dst.close()
-    return os.path.realpath(saveto)
-
-
-def _no_sandbox(function):
-    def __no_sandbox(*args, **kw):
-        try:
-            from setuptools.sandbox import DirectorySandbox
-            if not hasattr(DirectorySandbox, '_old'):
-                def violation(*args):
-                    pass
-                DirectorySandbox._old = DirectorySandbox._violation
-                DirectorySandbox._violation = violation
-                patched = True
-            else:
-                patched = False
-        except ImportError:
-            patched = False
-
-        try:
-            return function(*args, **kw)
-        finally:
-            if patched:
-                DirectorySandbox._violation = DirectorySandbox._old
-                del DirectorySandbox._old
-
-    return __no_sandbox
-
-
-def _patch_file(path, content):
-    """Will backup the file then patch it"""
-    existing_content = open(path).read()
-    if existing_content == content:
-        # already patched
-        log.warn('Already patched.')
-        return False
-    log.warn('Patching...')
-    _rename_path(path)
-    f = open(path, 'w')
-    try:
-        f.write(content)
-    finally:
-        f.close()
-    return True
-
-_patch_file = _no_sandbox(_patch_file)
-
-
-def _same_content(path, content):
-    return open(path).read() == content
-
-
-def _rename_path(path):
-    new_name = path + '.OLD.%s' % time.time()
-    log.warn('Renaming %s to %s', path, new_name)
-    os.rename(path, new_name)
-    return new_name
-
-
-def _remove_flat_installation(placeholder):
-    if not os.path.isdir(placeholder):
-        log.warn('Unknown installation at %s', placeholder)
-        return False
-    found = False
-    for file in os.listdir(placeholder):
-        if fnmatch.fnmatch(file, 'setuptools*.egg-info'):
-            found = True
-            break
-    if not found:
-        log.warn('Could not locate setuptools*.egg-info')
-        return
-
-    log.warn('Moving elements out of the way...')
-    pkg_info = os.path.join(placeholder, file)
-    if os.path.isdir(pkg_info):
-        patched = _patch_egg_dir(pkg_info)
-    else:
-        patched = _patch_file(pkg_info, SETUPTOOLS_PKG_INFO)
-
-    if not patched:
-        log.warn('%s already patched.', pkg_info)
-        return False
-    # now let's move the files out of the way
-    for element in ('setuptools', 'pkg_resources.py', 'site.py'):
-        element = os.path.join(placeholder, element)
-        if os.path.exists(element):
-            _rename_path(element)
-        else:
-            log.warn('Could not find the %s element of the '
-                     'Setuptools distribution', element)
-    return True
-
-_remove_flat_installation = _no_sandbox(_remove_flat_installation)
-
-
-def _after_install(dist):
-    log.warn('After install bootstrap.')
-    placeholder = dist.get_command_obj('install').install_purelib
-    _create_fake_setuptools_pkg_info(placeholder)
-
-
-def _create_fake_setuptools_pkg_info(placeholder):
-    if not placeholder or not os.path.exists(placeholder):
-        log.warn('Could not find the install location')
-        return
-    pyver = '%s.%s' % (sys.version_info[0], sys.version_info[1])
-    setuptools_file = 'setuptools-%s-py%s.egg-info' % \
-            (SETUPTOOLS_FAKED_VERSION, pyver)
-    pkg_info = os.path.join(placeholder, setuptools_file)
-    if os.path.exists(pkg_info):
-        log.warn('%s already exists', pkg_info)
-        return
-
-    log.warn('Creating %s', pkg_info)
-    try:
-        f = open(pkg_info, 'w')
-    except EnvironmentError:
-        log.warn("Don't have permissions to write %s, skipping", pkg_info)
-        return
-    try:
-        f.write(SETUPTOOLS_PKG_INFO)
-    finally:
-        f.close()
-
-    pth_file = os.path.join(placeholder, 'setuptools.pth')
-    log.warn('Creating %s', pth_file)
-    f = open(pth_file, 'w')
-    try:
-        f.write(os.path.join(os.curdir, setuptools_file))
-    finally:
-        f.close()
-
-_create_fake_setuptools_pkg_info = _no_sandbox(
-    _create_fake_setuptools_pkg_info
-)
-
-
-def _patch_egg_dir(path):
-    # let's check if it's already patched
-    pkg_info = os.path.join(path, 'EGG-INFO', 'PKG-INFO')
-    if os.path.exists(pkg_info):
-        if _same_content(pkg_info, SETUPTOOLS_PKG_INFO):
-            log.warn('%s already patched.', pkg_info)
-            return False
-    _rename_path(path)
-    os.mkdir(path)
-    os.mkdir(os.path.join(path, 'EGG-INFO'))
-    pkg_info = os.path.join(path, 'EGG-INFO', 'PKG-INFO')
-    f = open(pkg_info, 'w')
-    try:
-        f.write(SETUPTOOLS_PKG_INFO)
-    finally:
-        f.close()
-    return True
-
-_patch_egg_dir = _no_sandbox(_patch_egg_dir)
-
-
-def _before_install():
-    log.warn('Before install bootstrap.')
-    _fake_setuptools()
-
-
-def _under_prefix(location):
-    if 'install' not in sys.argv:
-        return True
-    args = sys.argv[sys.argv.index('install') + 1:]
-    for index, arg in enumerate(args):
-        for option in ('--root', '--prefix'):
-            if arg.startswith('%s=' % option):
-                top_dir = arg.split('root=')[-1]
-                return location.startswith(top_dir)
-            elif arg == option:
-                if len(args) > index:
-                    top_dir = args[index + 1]
-                    return location.startswith(top_dir)
-        if arg == '--user' and USER_SITE is not None:
-            return location.startswith(USER_SITE)
-    return True
-
-
-def _fake_setuptools():
-    log.warn('Scanning installed packages')
-    try:
-        import pkg_resources
-    except ImportError:
-        # we're cool
-        log.warn('Setuptools or Distribute does not seem to be installed.')
-        return
-    ws = pkg_resources.working_set
-    try:
-        setuptools_dist = ws.find(
-            pkg_resources.Requirement.parse('setuptools', replacement=False)
-            )
-    except TypeError:
-        # old distribute API
-        setuptools_dist = ws.find(
-            pkg_resources.Requirement.parse('setuptools')
-        )
-
-    if setuptools_dist is None:
-        log.warn('No setuptools distribution found')
-        return
-    # detecting if it was already faked
-    setuptools_location = setuptools_dist.location
-    log.warn('Setuptools installation detected at %s', setuptools_location)
-
-    # if --root or --preix was provided, and if
-    # setuptools is not located in them, we don't patch it
-    if not _under_prefix(setuptools_location):
-        log.warn('Not patching, --root or --prefix is installing Distribute'
-                 ' in another location')
-        return
-
-    # let's see if its an egg
-    if not setuptools_location.endswith('.egg'):
-        log.warn('Non-egg installation')
-        res = _remove_flat_installation(setuptools_location)
-        if not res:
-            return
-    else:
-        log.warn('Egg installation')
-        pkg_info = os.path.join(setuptools_location, 'EGG-INFO', 'PKG-INFO')
-        if (os.path.exists(pkg_info) and
-            _same_content(pkg_info, SETUPTOOLS_PKG_INFO)):
-            log.warn('Already patched.')
-            return
-        log.warn('Patching...')
-        # let's create a fake egg replacing setuptools one
-        res = _patch_egg_dir(setuptools_location)
-        if not res:
-            return
-    log.warn('Patching complete.')
-    _relaunch()
-
-
-def _relaunch():
-    log.warn('Relaunching...')
-    # we have to relaunch the process
-    # pip marker to avoid a relaunch bug
-    _cmd1 = ['-c', 'install', '--single-version-externally-managed']
-    _cmd2 = ['-c', 'install', '--record']
-    if sys.argv[:3] == _cmd1 or sys.argv[:3] == _cmd2:
-        sys.argv[0] = 'setup.py'
-    args = [sys.executable] + sys.argv
-    sys.exit(subprocess.call(args))
-
-
-def _extractall(self, path=".", members=None):
-    """Extract all members from the archive to the current working
-       directory and set owner, modification time and permissions on
-       directories afterwards. `path' specifies a different directory
-       to extract to. `members' is optional and must be a subset of the
-       list returned by getmembers().
-    """
-    import copy
-    import operator
-    from tarfile import ExtractError
-    directories = []
-
-    if members is None:
-        members = self
-
-    for tarinfo in members:
-        if tarinfo.isdir():
-            # Extract directories with a safe mode.
-            directories.append(tarinfo)
-            tarinfo = copy.copy(tarinfo)
-            tarinfo.mode = 448  # decimal for oct 0700
-        self.extract(tarinfo, path)
-
-    # Reverse sort directories.
-    if sys.version_info < (2, 4):
-        def sorter(dir1, dir2):
-            return cmp(dir1.name, dir2.name)
-        directories.sort(sorter)
-        directories.reverse()
-    else:
-        directories.sort(key=operator.attrgetter('name'), reverse=True)
-
-    # Set correct owner, mtime and filemode on directories.
-    for tarinfo in directories:
-        dirpath = os.path.join(path, tarinfo.name)
-        try:
-            self.chown(tarinfo, dirpath)
-            self.utime(tarinfo, dirpath)
-            self.chmod(tarinfo, dirpath)
-        except ExtractError:
-            e = sys.exc_info()[1]
-            if self.errorlevel > 1:
-                raise
-            else:
-                self._dbg(1, "tarfile: %s" % e)
-
-
-def _build_install_args(options):
-    """
-    Build the arguments to 'python setup.py install' on the distribute package
-    """
-    install_args = []
-    if options.user_install:
-        if sys.version_info < (2, 6):
-            log.warn("--user requires Python 2.6 or later")
-            raise SystemExit(1)
-        install_args.append('--user')
-    return install_args
-
-def _parse_args():
-    """
-    Parse the command line for options
-    """
-    parser = optparse.OptionParser()
-    parser.add_option(
-        '--user', dest='user_install', action='store_true', default=False,
-        help='install in user site package (requires Python 2.6 or later)')
-    parser.add_option(
-        '--download-base', dest='download_base', metavar="URL",
-        default=DEFAULT_URL,
-        help='alternative URL from where to download the distribute package')
-    options, args = parser.parse_args()
-    # positional arguments are ignored
-    return options
-
-def main(version=DEFAULT_VERSION):
-    """Install or upgrade setuptools and EasyInstall"""
-    options = _parse_args()
-    tarball = download_setuptools(download_base=options.download_base)
-    return _install(tarball, _build_install_args(options))
-
-if __name__ == '__main__':
-    sys.exit(main())

diff -r 6fc5217aed6a3387d0d90d781a4d0ac7170b5496 -r 4e4e06c2cbe8866223527a9d5c3116dcadc34f34 doc/helper_scripts/run_recipes.py
--- a/doc/helper_scripts/run_recipes.py
+++ b/doc/helper_scripts/run_recipes.py
@@ -13,7 +13,7 @@
 from yt.config import ytcfg
 
 FPATTERNS = ['*.png', '*.txt', '*.h5', '*.dat']
-DPATTERNS = ['LC*', 'LR', 'DD0046', 'halo_analysis']
+DPATTERNS = ['LC*', 'LR', 'DD0046']
 BADF = ['cloudy_emissivity.h5', 'apec_emissivity.h5',
         'xray_emissivity.h5', 'AMRGridData_Slice_x_density.png']
 CWD = os.getcwd()

diff -r 6fc5217aed6a3387d0d90d781a4d0ac7170b5496 -r 4e4e06c2cbe8866223527a9d5c3116dcadc34f34 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -1,18 +1,14 @@
 #
 # Hi there!  Welcome to the yt installation script.
 #
+# First things first, if you experience problems, please visit the Help 
+# section at http://yt-project.org.
+#
 # This script is designed to create a fully isolated Python installation
 # with the dependencies you need to run yt.
 #
-# There are a few options, but you only need to set *one* of them.  And
-# that's the next one, DEST_DIR.  But, if you want to use an existing HDF5
-# installation you can set HDF5_DIR, or if you want to use some other
-# subversion checkout of yt, you can set YT_DIR, too.  (It'll already
-# check the current directory and one up.
-#
-# If you experience problems, please visit the Help section at 
-# http://yt-project.org.
-#
+# There are a few options, but you only need to set *one* of them, which is 
+# the next one, DEST_DIR:
 
 DEST_SUFFIX="yt-`uname -m`"
 DEST_DIR="`pwd`/${DEST_SUFFIX/ /}"   # Installation location
@@ -23,16 +19,25 @@
     DEST_DIR=${YT_DEST}
 fi
 
+# What follows are some other options that you may or may not need to change.
+
 # Here's where you put the HDF5 path if you like; otherwise it'll download it
 # and install it on its own
 #HDF5_DIR=
 
+# If you've got yt some other place, set this to point to it. The script will
+# already check the current directory and the one above it in the tree.
+YT_DIR=""
+
 # If you need to supply arguments to the NumPy or SciPy build, supply them here
 # This one turns on gfortran manually:
 #NUMPY_ARGS="--fcompiler=gnu95"
 # If you absolutely can't get the fortran to work, try this:
 #NUMPY_ARGS="--fcompiler=fake"
 
+INST_PY3=0      # Install Python 3 along with Python 2. If this is turned
+                # on, all Python packages (including yt) will be installed
+                # in Python 3 (except Mercurial, which requires Python 2).
 INST_HG=1       # Install Mercurial or not?  If hg is not already
                 # installed, yt cannot be installed.
 INST_ZLIB=1     # On some systems (Kraken) matplotlib has issues with
@@ -50,9 +55,6 @@
 INST_ROCKSTAR=0 # Install the Rockstar halo finder?
 INST_SCIPY=0    # Install scipy?
 
-# If you've got yt some other place, set this to point to it.
-YT_DIR=""
-
 # If you need to pass anything to matplotlib, do so here.
 MPL_SUPP_LDFLAGS=""
 MPL_SUPP_CFLAGS=""
@@ -111,6 +113,7 @@
     echo INST_SQLITE3=${INST_SQLITE3} >> ${CONFIG_FILE}
     echo INST_PYX=${INST_PYX} >> ${CONFIG_FILE}
     echo INST_0MQ=${INST_0MQ} >> ${CONFIG_FILE}
+    echo INST_PY3=${INST_PY3} >> ${CONFIG_FILE}
     echo INST_ROCKSTAR=${INST_ROCKSTAR} >> ${CONFIG_FILE}
     echo INST_SCIPY=${INST_SCIPY} >> ${CONFIG_FILE}
     echo YT_DIR=${YT_DIR} >> ${CONFIG_FILE}
@@ -415,6 +418,10 @@
 get_willwont ${INST_SQLITE3}
 echo "be installing SQLite3"
 
+printf "%-15s = %s so I " "INST_PY3" "${INST_PY3}"
+get_willwont ${INST_PY3}
+echo "be installing Python 3"
+
 printf "%-15s = %s so I " "INST_HG" "${INST_HG}"
 get_willwont ${INST_HG}
 echo "be installing Mercurial"
@@ -487,6 +494,13 @@
     exit 1
 }
 
+if [ $INST_PY3 -eq 1 ]
+then
+	 PYTHON_EXEC='python3.4'
+else 
+	 PYTHON_EXEC='python2.7'
+fi
+
 function do_setup_py
 {
     [ -e $1/done ] && return
@@ -501,21 +515,27 @@
     [ ! -e $LIB/extracted ] && tar xfz $LIB.tar.gz
     touch $LIB/extracted
     BUILD_ARGS=""
+    if [[ $LIB =~ .*mercurial.* ]] 
+    then
+        PYEXE="python2.7"
+    else
+        PYEXE=${PYTHON_EXEC}
+    fi
     case $LIB in
         *h5py*)
             pushd $LIB &> /dev/null
-            ( ${DEST_DIR}/bin/python2.7 setup.py configure --hdf5=${HDF5_DIR} 2>&1 ) 1>> ${LOG_FILE} || do_exit
+            ( ${DEST_DIR}/bin/${PYTHON_EXEC} setup.py configure --hdf5=${HDF5_DIR} 2>&1 ) 1>> ${LOG_FILE} || do_exit
             popd &> /dev/null
             ;;
         *numpy*)
-            if [ -e ${DEST_DIR}/lib/python2.7/site-packages/numpy/__init__.py ]
+            if [ -e ${DEST_DIR}/lib/${PYTHON_EXEC}/site-packages/numpy/__init__.py ]
             then
-                VER=$(${DEST_DIR}/bin/python -c 'from distutils.version import StrictVersion as SV; \
+                VER=$(${DEST_DIR}/bin/${PYTHON_EXEC} -c 'from distutils.version import StrictVersion as SV; \
                                                  import numpy; print SV(numpy.__version__) < SV("1.8.0")')
                 if [ $VER == "True" ]
                 then
                     echo "Removing previous NumPy instance (see issue #889)"
-                    rm -rf ${DEST_DIR}/lib/python2.7/site-packages/{numpy*,*.pth}
+                    rm -rf ${DEST_DIR}/lib/${PYTHON_EXEC}/site-packages/{numpy*,*.pth}
                 fi
             fi
             ;;
@@ -523,8 +543,8 @@
             ;;
     esac
     cd $LIB
-    ( ${DEST_DIR}/bin/python2.7 setup.py build ${BUILD_ARGS} $* 2>&1 ) 1>> ${LOG_FILE} || do_exit
-    ( ${DEST_DIR}/bin/python2.7 setup.py install    2>&1 ) 1>> ${LOG_FILE} || do_exit
+    ( ${DEST_DIR}/bin/${PYEXE} setup.py build ${BUILD_ARGS} $* 2>&1 ) 1>> ${LOG_FILE} || do_exit
+    ( ${DEST_DIR}/bin/${PYEXE} setup.py install    2>&1 ) 1>> ${LOG_FILE} || do_exit
     touch done
     cd ..
 }
@@ -592,14 +612,15 @@
 # Set paths to what they should be when yt is activated.
 export PATH=${DEST_DIR}/bin:$PATH
 export LD_LIBRARY_PATH=${DEST_DIR}/lib:$LD_LIBRARY_PATH
-export PYTHONPATH=${DEST_DIR}/lib/python2.7/site-packages
+export PYTHONPATH=${DEST_DIR}/lib/${PYTHON_EXEC}/site-packages
 
 mkdir -p ${DEST_DIR}/src
 cd ${DEST_DIR}/src
 
+PYTHON2='Python-2.7.9'
+PYTHON3='Python-3.4.3'
 CYTHON='Cython-0.22'
 PYX='PyX-0.12.1'
-PYTHON='Python-2.7.9'
 BZLIB='bzip2-1.0.6'
 FREETYPE_VER='freetype-2.4.12' 
 H5PY='h5py-2.5.0'
@@ -620,11 +641,13 @@
 TORNADO='tornado-4.0.2'
 ZEROMQ='zeromq-4.0.5'
 ZLIB='zlib-1.2.8'
+SETUPTOOLS='setuptools-16.0'
 
 # Now we dump all our SHA512 files out.
 echo '856220fa579e272ac38dcef091760f527431ff3b98df9af6e68416fcf77d9659ac5abe5c7dee41331f359614637a4ff452033085335ee499830ed126ab584267  Cython-0.22.tar.gz' > Cython-0.22.tar.gz.sha512
 echo '4941f5aa21aff3743546495fb073c10d2657ff42b2aff401903498638093d0e31e344cce778980f28a7170c6d29eab72ac074277b9d4088376e8692dc71e55c1  PyX-0.12.1.tar.gz' > PyX-0.12.1.tar.gz.sha512
 echo 'a42f28ed8e49f04cf89e2ea7434c5ecbc264e7188dcb79ab97f745adf664dd9ab57f9a913543731635f90859536244ac37dca9adf0fc2aa1b215ba884839d160  Python-2.7.9.tgz' > Python-2.7.9.tgz.sha512
+echo '609cc82586fabecb25f25ecb410f2938e01d21cde85dd3f8824fe55c6edde9ecf3b7609195473d3fa05a16b9b121464f5414db1a0187103b78ea6edfa71684a7  Python-3.4.3.tgz' > Python-3.4.3.tgz.sha512
 echo '276bd9c061ec9a27d478b33078a86f93164ee2da72210e12e2c9da71dcffeb64767e4460b93f257302b09328eda8655e93c4b9ae85e74472869afbeae35ca71e  blas.tar.gz' > blas.tar.gz.sha512
 echo '00ace5438cfa0c577e5f578d8a808613187eff5217c35164ffe044fbafdfec9e98f4192c02a7d67e01e5a5ccced630583ad1003c37697219b0f147343a3fdd12  bzip2-1.0.6.tar.gz' > bzip2-1.0.6.tar.gz.sha512
 echo 'a296dfcaef7e853e58eed4e24b37c4fa29cfc6ac688def048480f4bb384b9e37ca447faf96eec7b378fd764ba291713f03ac464581d62275e28eb2ec99110ab6  reason-js-20120623.zip' > reason-js-20120623.zip.sha512
@@ -646,6 +669,7 @@
 echo '93591068dc63af8d50a7925d528bc0cccdd705232c529b6162619fe28dddaf115e8a460b1842877d35160bd7ed480c1bd0bdbec57d1f359085bd1814e0c1c242  tornado-4.0.2.tar.gz' > tornado-4.0.2.tar.gz.sha512
 echo '0d928ed688ed940d460fa8f8d574a9819dccc4e030d735a8c7db71b59287ee50fa741a08249e356c78356b03c2174f2f2699f05aa7dc3d380ed47d8d7bab5408  zeromq-4.0.5.tar.gz' > zeromq-4.0.5.tar.gz.sha512
 echo 'ece209d4c7ec0cb58ede791444dc754e0d10811cbbdebe3df61c0fd9f9f9867c1c3ccd5f1827f847c005e24eef34fb5bf87b5d3f894d75da04f1797538290e4a  zlib-1.2.8.tar.gz' > zlib-1.2.8.tar.gz.sha512
+echo '38a89aad89dc9aa682dbfbca623e2f69511f5e20d4a3526c01aabbc7e93ae78f20aac566676b431e111540b41540a1c4f644ce4174e7ecf052318612075e02dc  setuptools-16.0.tar.gz' > setuptools-16.0.tar.gz.sha512
 # Individual processes
 [ -z "$HDF5_DIR" ] && get_ytproject $HDF5.tar.gz
 [ $INST_ZLIB -eq 1 ] && get_ytproject $ZLIB.tar.gz
@@ -660,10 +684,11 @@
 [ $INST_SCIPY -eq 1 ] && get_ytproject $SCIPY.tar.gz
 [ $INST_SCIPY -eq 1 ] && get_ytproject blas.tar.gz
 [ $INST_SCIPY -eq 1 ] && get_ytproject $LAPACK.tar.gz
-get_ytproject $PYTHON.tgz
+[ $INST_HG -eq 1 ] && get_ytproject $MERCURIAL.tar.gz
+[ $INST_PY3 -eq 1 ] && get_ytproject $PYTHON3.tgz
+get_ytproject $PYTHON2.tgz
 get_ytproject $NUMPY.tar.gz
 get_ytproject $MATPLOTLIB.tar.gz
-get_ytproject $MERCURIAL.tar.gz
 get_ytproject $IPYTHON.tar.gz
 get_ytproject $H5PY.tar.gz
 get_ytproject $CYTHON.tar.gz
@@ -671,6 +696,7 @@
 get_ytproject $NOSE.tar.gz
 get_ytproject $PYTHON_HGLIB.tar.gz
 get_ytproject $SYMPY.tar.gz
+get_ytproject $SETUPTOOLS.tar.gz
 if [ $INST_BZLIB -eq 1 ]
 then
     if [ ! -e $BZLIB/done ]
@@ -787,11 +813,11 @@
     fi
 fi
 
-if [ ! -e $PYTHON/done ]
+if [ ! -e $PYTHON2/done ]
 then
-    echo "Installing Python.  This may take a while, but don't worry.  yt loves you."
-    [ ! -e $PYTHON ] && tar xfz $PYTHON.tgz
-    cd $PYTHON
+    echo "Installing Python 2. This may take a while, but don't worry. yt loves you."
+    [ ! -e $PYTHON2 ] && tar xfz $PYTHON2.tgz
+    cd $PYTHON2
     ( ./configure --prefix=${DEST_DIR}/ ${PYCONF_ARGS} 2>&1 ) 1>> ${LOG_FILE} || do_exit
 
     ( make ${MAKE_PROCS} 2>&1 ) 1>> ${LOG_FILE} || do_exit
@@ -802,7 +828,30 @@
     cd ..
 fi
 
-export PYTHONPATH=${DEST_DIR}/lib/python2.7/site-packages/
+if [ $INST_PY3 -eq 1 ]
+then
+    if [ ! -e $PYTHON3/done ]
+    then
+        echo "Installing Python 3. Because two Pythons are better than one."
+        [ ! -e $PYTHON3 ] && tar xfz $PYTHON3.tgz
+        cd $PYTHON3
+        ( ./configure --prefix=${DEST_DIR}/ ${PYCONF_ARGS} 2>&1 ) 1>> ${LOG_FILE} || do_exit
+
+        ( make ${MAKE_PROCS} 2>&1 ) 1>> ${LOG_FILE} || do_exit
+        ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
+        ( ln -sf ${DEST_DIR}/bin/python3.4 ${DEST_DIR}/bin/pyyt 2>&1 ) 1>> ${LOG_FILE}
+        ( ln -sf ${DEST_DIR}/bin/python3.4 ${DEST_DIR}/bin/python 2>&1 ) 1>> ${LOG_FILE}
+        ( ln -sf ${DEST_DIR}/bin/python3-config ${DEST_DIR}/bin/python-config 2>&1 ) 1>> ${LOG_FILE}
+        ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
+        touch done
+        cd ..
+    fi
+fi
+
+export PYTHONPATH=${DEST_DIR}/lib/${PYTHON_EXEC}/site-packages/
+
+# Install setuptools
+do_setup_py $SETUPTOOLS
 
 if [ $INST_HG -eq 1 ]
 then
@@ -847,12 +896,10 @@
 
 # This fixes problems with gfortran linking.
 unset LDFLAGS
-
-echo "Installing distribute"
-( ${DEST_DIR}/bin/python2.7 ${YT_DIR}/distribute_setup.py 2>&1 ) 1>> ${LOG_FILE} || do_exit
-
+ 
 echo "Installing pip"
-( ${DEST_DIR}/bin/easy_install-2.7 pip 2>&1 ) 1>> ${LOG_FILE} || do_exit
+( ${GETFILE} https://bootstrap.pypa.io/get-pip.py 2>&1 ) 1>> ${LOG_FILE} || do_exit
+( ${DEST_DIR}/bin/${PYTHON_EXEC} get-pip.py 2>&1 ) 1>> ${LOG_FILE} || do_exit
 
 if [ $INST_SCIPY -eq 0 ]
 then
@@ -986,13 +1033,14 @@
 
 echo "Installing yt"
 [ $INST_PNG -eq 1 ] && echo $PNG_DIR > png.cfg
-( export PATH=$DEST_DIR/bin:$PATH ; ${DEST_DIR}/bin/python2.7 setup.py develop 2>&1 ) 1>> ${LOG_FILE} || do_exit
+( export PATH=$DEST_DIR/bin:$PATH ; ${DEST_DIR}/bin/${PYTHON_EXEC} setup.py develop 2>&1 ) 1>> ${LOG_FILE} || do_exit
 touch done
 cd $MY_PWD
 
-if !( ( ${DEST_DIR}/bin/python2.7 -c "import readline" 2>&1 )>> ${LOG_FILE})
+if !( ( ${DEST_DIR}/bin/${PYTHON_EXEC} -c "import readline" 2>&1 )>> ${LOG_FILE}) || \
+	[[ "${MYOS##Darwin}" != "${MYOS}" && $INST_PY3 -eq 1 ]] 
 then
-    if !( ( ${DEST_DIR}/bin/python2.7 -c "import gnureadline" 2>&1 )>> ${LOG_FILE})
+    if !( ( ${DEST_DIR}/bin/${PYTHON_EXEC} -c "import gnureadline" 2>&1 )>> ${LOG_FILE})
     then
         echo "Installing pure-python readline"
         ( ${DEST_DIR}/bin/pip install gnureadline 2>&1 ) 1>> ${LOG_FILE}

diff -r 6fc5217aed6a3387d0d90d781a4d0ac7170b5496 -r 4e4e06c2cbe8866223527a9d5c3116dcadc34f34 doc/source/analyzing/analysis_modules/halo_finders.rst
--- a/doc/source/analyzing/analysis_modules/halo_finders.rst
+++ b/doc/source/analyzing/analysis_modules/halo_finders.rst
@@ -116,7 +116,7 @@
   the width of the smallest grid element in the simulation from the
   last data snapshot (i.e. the one where time has evolved the
   longest) in the time series:
-  ``ds_last.index.get_smallest_dx() * ds_last['mpch']``.
+  ``ds_last.index.get_smallest_dx() * ds_last['Mpch']``.
 * ``total_particles``, if supplied, this is a pre-calculated
   total number of dark matter
   particles present in the simulation. For example, this is useful

diff -r 6fc5217aed6a3387d0d90d781a4d0ac7170b5496 -r 4e4e06c2cbe8866223527a9d5c3116dcadc34f34 doc/source/analyzing/fields.rst
--- a/doc/source/analyzing/fields.rst
+++ b/doc/source/analyzing/fields.rst
@@ -271,6 +271,29 @@
 
 For a practical application of this, see :ref:`cookbook-radial-velocity`.
 
+Gradient Fields
+---------------
+
+yt provides a way to compute gradients of spatial fields using the
+:meth:`~yt.frontends.flash.data_structures.FLASHDataset.add_gradient_fields` 
+method. If you have a spatially-based field such as density or temperature, 
+and want to calculate the gradient of that field, you can do it like so:
+
+.. code-block:: python
+
+    ds = yt.load("GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0150")
+    grad_fields = ds.add_gradient_fields(("gas","temperature"))
+
+where the ``grad_fields`` list will now have a list of new field names that can be used
+in calculations, representing the 3 different components of the field and the magnitude
+of the gradient, e.g., ``"temperature_gradient_x"``, ``"temperature_gradient_y"``,
+``"temperature_gradient_z"``, and ``"temperature_gradient_magnitude"``. To see an example
+of how to create and use these fields, see :ref:`cookbook-complicated-derived-fields`.
+
+.. note::
+
+    ``add_gradient_fields`` currently only supports Cartesian geometries!
+
 General Particle Fields
 -----------------------
 

diff -r 6fc5217aed6a3387d0d90d781a4d0ac7170b5496 -r 4e4e06c2cbe8866223527a9d5c3116dcadc34f34 doc/source/analyzing/generating_processed_data.rst
--- a/doc/source/analyzing/generating_processed_data.rst
+++ b/doc/source/analyzing/generating_processed_data.rst
@@ -47,10 +47,30 @@
    frb = FixedResolutionBuffer(sl, (0.3, 0.5, 0.6, 0.8), (512, 512))
    my_image = frb["density"]
 
-This resultant array can be saved out to disk or visualized using a
-hand-constructed Matplotlib image, for instance using
+This image may then be used in a hand-constructed Matplotlib image, for instance using
 :func:`~matplotlib.pyplot.imshow`.
 
+The buffer arrays can be saved out to disk in either HDF5 or FITS format:
+ 
+.. code-block:: python
+
+   frb.export_hdf5("my_images.h5", fields=["density","temperature"])
+   frb.export_fits("my_images.fits", fields=["density","temperature"],
+                   clobber=True, units="kpc")
+
+In the FITS case, there is an option for setting the ``units`` of the coordinate system in
+the file. If you want to overwrite a file with the same name, set ``clobber=True``. 
+
+The :class:`~yt.visualization.fixed_resolution.FixedResolutionBuffer` can even be exported
+as a 2D dataset itself, which may be operated on in the same way as any other dataset in yt:
+
+.. code-block:: python
+
+   ds_frb = frb.export_dataset(fields=["density","temperature"], nprocs=8)
+   sp = ds_frb.sphere("c", (100.,"kpc"))
+
+where the ``nprocs`` parameter can be used to decompose the image into ``nprocs`` number of grids.
+
 .. _generating-profiles-and-histograms:
 
 Profiles and Histograms

diff -r 6fc5217aed6a3387d0d90d781a4d0ac7170b5496 -r 4e4e06c2cbe8866223527a9d5c3116dcadc34f34 doc/source/analyzing/time_series_analysis.rst
--- a/doc/source/analyzing/time_series_analysis.rst
+++ b/doc/source/analyzing/time_series_analysis.rst
@@ -79,9 +79,7 @@
 Analyzing an Entire Simulation
 ------------------------------
 
-.. note:: Currently only implemented for Enzo.  Other simulation types coming 
-   soon.  Until then, rely on the above prescription for creating 
-   ``DatasetSeries`` objects.
+.. note:: Implemented for: Enzo, Gadget, OWLS.
 
 The parameter file used to run a simulation contains all the information 
 necessary to know what datasets should be available.  The ``simulation`` 
@@ -93,8 +91,7 @@
 .. code-block:: python
 
   import yt
-  my_sim = yt.simulation('enzo_tiny_cosmology/32Mpc_32.enzo', 'Enzo',
-                         find_outputs=False)
+  my_sim = yt.simulation('enzo_tiny_cosmology/32Mpc_32.enzo', 'Enzo')
 
 Then, create a ``DatasetSeries`` object with the 
 :meth:`frontends.enzo.simulation_handling.EnzoSimulation.get_time_series` 
@@ -123,10 +120,10 @@
 to select a subset of the total data:
 
 * ``time_data`` (*bool*): Whether or not to include time outputs when 
-  gathering datasets for time series.  Default: True.
+  gathering datasets for time series.  Default: True.  (Enzo only)
 
 * ``redshift_data`` (*bool*): Whether or not to include redshift outputs 
-  when gathering datasets for time series.  Default: True.
+  when gathering datasets for time series.  Default: True.  (Enzo only)
 
 * ``initial_time`` (*float*): The earliest time for outputs to be included.  
   If None, the initial time of the simulation is used.  This can be used in 
@@ -139,15 +136,12 @@
 * ``times`` (*list*): A list of times for which outputs will be found.
   Default: None.
 
-* ``time_units`` (*str*): The time units used for requesting outputs by time.
-  Default: '1' (code units).
-
 * ``initial_redshift`` (*float*): The earliest redshift for outputs to be 
   included.  If None, the initial redshift of the simulation is used.  This
   can be used in combination with either ``final_time`` or ``final_redshift``.
   Default: None.
 
-* ``final_time`` (*float*): The latest redshift for outputs to be included.  
+* ``final_redshift`` (*float*): The latest redshift for outputs to be included.  
   If None, the final redshift of the simulation is used.  This can be used 
   in combination with either ``initial_time`` or ``initial_redshift``.  
   Default: None.
@@ -157,11 +151,11 @@
 
 * ``initial_cycle`` (*float*): The earliest cycle for outputs to be 
   included.  If None, the initial cycle of the simulation is used.  This can
-  only be used with final_cycle.  Default: None.
+  only be used with final_cycle.  Default: None.  (Enzo only)
 
 * ``final_cycle`` (*float*): The latest cycle for outputs to be included.  
   If None, the final cycle of the simulation is used.  This can only be used 
-  in combination with initial_cycle.  Default: None.
+  in combination with initial_cycle.  Default: None.  (Enzo only)
 
 * ``tolerance`` (*float*):  Used in combination with ``times`` or ``redshifts`` 
   keywords, this is the tolerance within which outputs are accepted given 

diff -r 6fc5217aed6a3387d0d90d781a4d0ac7170b5496 -r 4e4e06c2cbe8866223527a9d5c3116dcadc34f34 doc/source/cookbook/calculating_information.rst
--- a/doc/source/cookbook/calculating_information.rst
+++ b/doc/source/cookbook/calculating_information.rst
@@ -82,6 +82,17 @@
 
 .. yt_cookbook:: derived_field.py
 
+.. _cookbook-complicated-derived-fields:
+
+Complicated Derived Fields
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This recipe demonstrates how to use the 
+:meth:`~yt.frontends.flash.data_structures.FLASHDataset.add_gradient_fields` method
+to generate gradient fields and use them in a more complex derived field. 
+
+.. yt_cookbook:: hse_field.py
+
 Using Particle Filters to Calculate Star Formation Rates
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 

diff -r 6fc5217aed6a3387d0d90d781a4d0ac7170b5496 -r 4e4e06c2cbe8866223527a9d5c3116dcadc34f34 doc/source/cookbook/hse_field.py
--- a/doc/source/cookbook/hse_field.py
+++ b/doc/source/cookbook/hse_field.py
@@ -1,44 +1,32 @@
 import numpy as np
 import yt
 
-from yt.fields.field_plugin_registry import \
-    register_field_plugin
-from yt.fields.fluid_fields import \
-    setup_gradient_fields
-
-
-# Define the components of the gravitational acceleration vector field by
-# taking the gradient of the gravitational potential
- at register_field_plugin
-def setup_my_fields(registry, ftype="gas", slice_info=None):
-    setup_gradient_fields(registry, (ftype, "gravitational_potential"),
-                          "cm ** 2 / s ** 2", slice_info)
-
-# Define the "degree of hydrostatic equilibrium" field
-
-
- at yt.derived_field(name='HSE', units=None, take_log=False,
-                  display_name='Hydrostatic Equilibrium')
-def HSE(field, data):
-
-    gx = data["density"] * data["gravitational_potential_gradient_x"]
-    gy = data["density"] * data["gravitational_potential_gradient_y"]
-    gz = data["density"] * data["gravitational_potential_gradient_z"]
-
-    hx = data["pressure_gradient_x"] - gx
-    hy = data["pressure_gradient_y"] - gy
-    hz = data["pressure_gradient_z"] - gz
-
-    h = np.sqrt((hx * hx + hy * hy + hz * hz) / (gx * gx + gy * gy + gz * gz))
-
-    return h
-
-
 # Open a dataset from when there's a lot of sloshing going on.
 
 ds = yt.load("GasSloshingLowRes/sloshing_low_res_hdf5_plt_cnt_0350")
 
-# gradient operator requires periodic boundaries.  This dataset has
+# Define the components of the gravitational acceleration vector field by
+# taking the gradient of the gravitational potential
+grad_fields = ds.add_gradient_fields(("gas","gravitational_potential"))
+
+# We don't need to do the same for the pressure field because yt already
+# has pressure gradient fields. Now, define the "degree of hydrostatic 
+# equilibrium" field.
+
+def _hse(field, data):
+    # Remember that g is the negative of the potential gradient
+    gx = -data["density"] * data["gravitational_potential_gradient_x"]
+    gy = -data["density"] * data["gravitational_potential_gradient_y"]
+    gz = -data["density"] * data["gravitational_potential_gradient_z"]
+    hx = data["pressure_gradient_x"] - gx
+    hy = data["pressure_gradient_y"] - gy
+    hz = data["pressure_gradient_z"] - gz
+    h = np.sqrt((hx * hx + hy * hy + hz * hz) / (gx * gx + gy * gy + gz * gz))
+    return h
+ds.add_field(('gas','HSE'), function=_hse, units="", take_log=False,
+             display_name='Hydrostatic Equilibrium')
+
+# The gradient operator requires periodic boundaries.  This dataset has
 # open boundary conditions.  We need to hack it for now (this will be fixed
 # in future version of yt)
 ds.periodicity = (True, True, True)

diff -r 6fc5217aed6a3387d0d90d781a4d0ac7170b5496 -r 4e4e06c2cbe8866223527a9d5c3116dcadc34f34 doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -469,6 +469,8 @@
   first image in the primary file. If this is not the case,
   yt will raise a warning and will not load this field.
 
+.. _additional_fits_options:
+
 Additional Options
 ^^^^^^^^^^^^^^^^^^
 
@@ -570,6 +572,35 @@
 ``WCSAxes`` is still in an experimental state, but as its functionality improves it will be
 utilized more here.
 
+``create_spectral_slabs``
+"""""""""""""""""""""""""
+
+.. note::
+
+  The following functionality requires the `spectral-cube <http://spectral-cube.readthedocs.org>`_
+  library to be installed. 
+  
+If you have a spectral intensity dataset of some sort, and would like to extract emission in 
+particular slabs along the spectral axis of a certain width, ``create_spectral_slabs`` can be
+used to generate a dataset with these slabs as different fields. In this example, we use it
+to extract individual lines from an intensity cube:
+
+.. code-block:: python
+
+  slab_centers = {'13CN': (218.03117, 'GHz'),
+                  'CH3CH2CHO': (218.284256, 'GHz'),
+                  'CH3NH2': (218.40956, 'GHz')}
+  slab_width = (0.05, "GHz")
+  ds = create_spectral_slabs("intensity_cube.fits",
+                                    slab_centers, slab_width,
+                                    nan_mask=0.0)
+
+All keyword arguments to `create_spectral_slabs` are passed on to `load` when creating the dataset
+(see :ref:`additional_fits_options` above). In the returned dataset, the different slabs will be
+different fields, with the field names taken from the keys in ``slab_centers``. The WCS coordinates 
+on the spectral axis are reset so that the center of the domain along this axis is zero, and the 
+left and right edges of the domain along this axis are :math:`\pm` ``0.5*slab_width``.
+
 Examples of Using FITS Data
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
@@ -635,13 +666,14 @@
    import yt
    ds = yt.load("snapshot_061.hdf5")
 
-However, yt cannot detect raw-binary Gadget data, and so you must specify the
-format as being Gadget:
+Gadget data in raw binary format can also be loaded with the ``load`` command. 
+This is only supported for snapshots created with the ``SnapFormat`` parameter 
+set to 1 (the standard for Gadget-2).
 
 .. code-block:: python
 
    import yt
-   ds = yt.GadgetDataset("snapshot_061")
+   ds = yt.load("snapshot_061")
 
 .. _particle-bbox:
 

diff -r 6fc5217aed6a3387d0d90d781a4d0ac7170b5496 -r 4e4e06c2cbe8866223527a9d5c3116dcadc34f34 doc/source/installing.rst
--- a/doc/source/installing.rst
+++ b/doc/source/installing.rst
@@ -213,10 +213,31 @@
 ++++++++++++++++++++++++++++++++++++++
 
 To install yt from source, you must make sure you have yt's dependencies
-installed on your system.  These include: a C compiler, ``HDF5``, ``python``,
-``Cython``, ``NumPy``, ``matplotlib``, ``sympy``, and ``h5py``. From here, you
-can use ``pip`` (which comes with ``Python``) to install the latest stable
-version of yt:
+installed on your system. 
+
+If you use a Linux OS, use your distro's package manager to install these yt
+dependencies on your system:
+
+- ``HDF5``
+- ``zeromq``
+- ``sqlite`` 
+- ``mercurial``
+
+Then install the required Python packages with ``pip``:
+
+.. code-block:: bash
+
+  $ pip install -r requirements.txt
+
+If you're using IPython notebooks, you can install its dependencies
+with ``pip`` as well:
+
+.. code-block:: bash
+
+  $ pip install -r optional-requirements.txt
+
+From here, you can use ``pip`` (which comes with ``Python``) to install the latest
+stable version of yt:
 
 .. code-block:: bash
 

diff -r 6fc5217aed6a3387d0d90d781a4d0ac7170b5496 -r 4e4e06c2cbe8866223527a9d5c3116dcadc34f34 doc/source/visualizing/FITSImageBuffer.ipynb
--- a/doc/source/visualizing/FITSImageBuffer.ipynb
+++ /dev/null
@@ -1,205 +0,0 @@
-{
- "metadata": {
-  "name": "",
-  "signature": "sha256:872f7525edd3c1ee09c67f6ecdd8552218df05ebe5ab73bcab55654edf0ac2bb"
- },
- "nbformat": 3,
- "nbformat_minor": 0,
- "worksheets": [
-  {
-   "cells": [
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "yt has capabilities for writing 2D and 3D uniformly gridded data generated from datasets to FITS files. This is via the `FITSImageBuffer` class, which has subclasses `FITSSlice` and `FITSProjection` to write slices and projections directly to FITS. We'll test this out on an Athena dataset."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "%matplotlib inline\n",
-      "import yt\n",
-      "from yt.utilities.fits_image import FITSImageBuffer, FITSSlice, FITSProjection"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "ds = yt.load(\"MHDSloshing/virgo_low_res.0054.vtk\", parameters={\"length_unit\":(1.0,\"Mpc\"),\n",
-      "                                                               \"mass_unit\":(1.0e14,\"Msun\"),\n",
-      "                                                               \"time_unit\":(1.0,\"Myr\")})"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "To demonstrate a useful example of creating a FITS file, let's first make a `ProjectionPlot`:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "prj = yt.ProjectionPlot(ds, \"z\", [\"temperature\"], weight_field=\"density\", width=(500.,\"kpc\"))\n",
-      "prj.show()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Suppose that we wanted to write this projection to a FITS file for analysis and visualization in other programs, such as ds9. We can do that using `FITSProjection`:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "prj_fits = FITSProjection(ds, \"z\", [\"temperature\"], weight_field=\"density\")"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "which took the same parameters as `ProjectionPlot` except the width, because `FITSProjection` and `FITSSlice` always make slices and projections of the width of the domain size, at the finest resolution available in the simulation, in a unit determined to be appropriate for the physical size of the dataset. `prj_fits` is a full-fledged FITS file in memory, specifically an [AstroPy `HDUList`](http://astropy.readthedocs.org/en/latest/io/fits/api/hdulists.html) object. This means that we can use all of the methods inherited from `HDUList`:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "prj_fits.info()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "`info` shows us the contents of the virtual FITS file. We can also look at the header for the `\"temperature\"` image, like so:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "prj_fits[\"temperature\"].header"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "where we can see that the temperature units are in Kelvin and the cell widths are in kiloparsecs. The projection can be written to disk using the `writeto` method:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "prj_fits.writeto(\"sloshing.fits\", clobber=True)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Since yt can read FITS image files, it can be loaded up just like any other dataset:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "ds2 = yt.load(\"sloshing.fits\")"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "and we can make a `SlicePlot` of the 2D image, which shows the same data as the previous image:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "slc2 = yt.SlicePlot(ds2, \"z\", [\"temperature\"], width=(500.,\"kpc\"))\n",
-      "slc2.set_log(\"temperature\", True)\n",
-      "slc2.show()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "If you want more fine-grained control over what goes into the FITS file, you can call `FITSImageBuffer` directly, with various kinds of inputs. For example, you could use a `FixedResolutionBuffer`, and specify you want the units in parsecs instead:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "slc3 = ds.slice(0, 0.0)\n",
-      "frb = slc3.to_frb((500.,\"kpc\"), 800)\n",
-      "fib = FITSImageBuffer(frb, fields=[\"density\",\"temperature\"], units=\"pc\")"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Finally, a 3D FITS cube can be created from a covering grid:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "cvg = ds.covering_grid(ds.index.max_level, [-0.5,-0.5,-0.5], [64, 64, 64], fields=[\"density\",\"temperature\"])\n",
-      "fib = FITSImageBuffer(cvg, fields=[\"density\",\"temperature\"], units=\"Mpc\")"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    }
-   ],
-   "metadata": {}
-  }
- ]
-}
\ No newline at end of file

diff -r 6fc5217aed6a3387d0d90d781a4d0ac7170b5496 -r 4e4e06c2cbe8866223527a9d5c3116dcadc34f34 doc/source/visualizing/FITSImageData.ipynb
--- /dev/null
+++ b/doc/source/visualizing/FITSImageData.ipynb
@@ -0,0 +1,409 @@
+{
+ "metadata": {
+  "name": "",
+  "signature": "sha256:c7de5ef190feaa2289595aec7eaa05db02fd535e408e0d04aa54088b0bd3ebae"
+ },
+ "nbformat": 3,
+ "nbformat_minor": 0,
+ "worksheets": [
+  {
+   "cells": [
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "yt has capabilities for writing 2D and 3D uniformly gridded data generated from datasets to FITS files. This is via the `FITSImageData` class. We'll test these capabilities out on an Athena dataset."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "import yt\n",
+      "from yt.utilities.fits_image import FITSImageData, FITSProjection"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "ds = yt.load(\"MHDSloshing/virgo_low_res.0054.vtk\", parameters={\"length_unit\":(1.0,\"Mpc\"),\n",
+      "                                                               \"mass_unit\":(1.0e14,\"Msun\"),\n",
+      "                                                               \"time_unit\":(1.0,\"Myr\")})"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "heading",
+     "level": 2,
+     "metadata": {},
+     "source": [
+      "Creating FITS images from Slices and Projections"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "There are several ways to make a `FITSImageData` instance. The most intuitive ways are to use the `FITSSlice`, `FITSProjection`, `FITSOffAxisSlice`, and `FITSOffAxisProjection` classes to write slices and projections directly to FITS. To demonstrate a useful example of creating a FITS file, let's first make a `ProjectionPlot`:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "prj = yt.ProjectionPlot(ds, \"z\", [\"temperature\"], weight_field=\"density\", width=(500.,\"kpc\"))\n",
+      "prj.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Suppose that we wanted to write this projection to a FITS file for analysis and visualization in other programs, such as ds9. We can do that using `FITSProjection`:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "prj_fits = FITSProjection(ds, \"z\", [\"temperature\"], weight_field=\"density\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "which took the same parameters as `ProjectionPlot` except the width, because `FITSProjection` and `FITSSlice` always make slices and projections of the width of the domain size, at the finest resolution available in the simulation, in a unit determined to be appropriate for the physical size of the dataset."
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Because `FITSImageData` inherits from the [AstroPy `HDUList`](http://astropy.readthedocs.org/en/latest/io/fits/api/hdulists.html) class, we can call its methods. For example, `info` shows us the contents of the virtual FITS file:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "prj_fits.info()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "We can also look at the header for a particular field:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "prj_fits[\"temperature\"].header"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "where we can see that the temperature units are in Kelvin and the cell widths are in kiloparsecs. If we want the raw image data with units, we can call `get_data`:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "prj_fits.get_data(\"temperature\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "We can use the `set_unit` method to change the units of a particular field:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "prj_fits.set_unit(\"temperature\",\"R\")\n",
+      "prj_fits.get_data(\"temperature\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "The image can be written to disk using the `writeto` method:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "prj_fits.writeto(\"sloshing.fits\", clobber=True)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Since yt can read FITS image files, it can be loaded up just like any other dataset:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "ds2 = yt.load(\"sloshing.fits\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "and we can make a `SlicePlot` of the 2D image, which shows the same data as the previous image:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "slc2 = yt.SlicePlot(ds2, \"z\", [\"temperature\"], width=(500.,\"kpc\"))\n",
+      "slc2.set_log(\"temperature\", True)\n",
+      "slc2.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "heading",
+     "level": 2,
+     "metadata": {},
+     "source": [
+      "Using `FITSImageData` directly"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "If you want more fine-grained control over what goes into the FITS file, you can call `FITSImageData` directly, with various kinds of inputs. For example, you could use a `FixedResolutionBuffer`, and specify you want the units in parsecs instead:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "slc3 = ds.slice(0, 0.0)\n",
+      "frb = slc3.to_frb((500.,\"kpc\"), 800)\n",
+      "fid_frb = FITSImageData(frb, fields=[\"density\",\"temperature\"], units=\"pc\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "A 3D FITS cube can also be created from a covering grid:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "cvg = ds.covering_grid(ds.index.max_level, [-0.5,-0.5,-0.5], [64, 64, 64], fields=[\"density\",\"temperature\"])\n",
+      "fid_cvg = FITSImageData(cvg, fields=[\"density\",\"temperature\"], units=\"Mpc\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "heading",
+     "level": 2,
+     "metadata": {},
+     "source": [
+      "Other `FITSImageData` Methods"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "A `FITSImageData` instance can be generated from one previously written to disk using the `from_file` classmethod:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "fid = FITSImageData.from_file(\"sloshing.fits\")\n",
+      "fid.info()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Multiple `FITSImageData` can be combined to create a new one, provided that the coordinate information is the same:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "prj_fits2 = FITSProjection(ds, \"z\", [\"density\"])\n",
+      "prj_fits3 = FITSImageData.from_images([prj_fits, prj_fits2])\n",
+      "prj_fits3.info()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Alternatively, individual fields can be popped as well:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "dens_fits = prj_fits3.pop(\"density\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "dens_fits.info()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "prj_fits3.info()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "So far, the FITS images we have shown have linear spatial coordinates. One may want to take a projection of an object and make a crude mock observation out of it, with celestial coordinates. For this, we can use the `create_sky_wcs` method. Specify a center (RA, Dec) coordinate in degrees, as well as a linear scale in terms of angle per distance:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "sky_center = [30.,45.] # in degrees\n",
+      "sky_scale = (2.5, \"arcsec/kpc\") # could also use a YTQuantity\n",
+      "prj_fits.create_sky_wcs(sky_center, sky_scale, ctype=[\"RA---TAN\",\"DEC--TAN\"])"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "By the default, a tangent RA/Dec projection is used, but one could also use another projection using the `ctype` keyword. We can now look at the header and see it has the appropriate WCS:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "prj_fits[\"temperature\"].header"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Finally, we can add header keywords to a single field or for all fields in the FITS image using `update_header`:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "fid_frb.update_header(\"all\", \"time\", 0.1) # Update all the fields\n",
+      "fid_frb.update_header(\"temperature\", \"scale\", \"Rankine\") # Update just one field"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print fid_frb[\"density\"].header[\"time\"]\n",
+      "print fid_frb[\"temperature\"].header[\"scale\"]"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    }
+   ],
+   "metadata": {}
+  }
+ ]
+}
\ No newline at end of file

diff -r 6fc5217aed6a3387d0d90d781a4d0ac7170b5496 -r 4e4e06c2cbe8866223527a9d5c3116dcadc34f34 doc/source/visualizing/manual_plotting.rst
--- a/doc/source/visualizing/manual_plotting.rst
+++ b/doc/source/visualizing/manual_plotting.rst
@@ -66,6 +66,57 @@
 setting up multiple axes with colorbars easier than it would be using only
 matplotlib can be found in the :ref:`advanced-multi-panel` cookbook recipe.
 
+.. _frb-filters:
+
+Fixed Resolution Buffer Filters
+-------------------------------
+
+The FRB can be modified by using set of predefined filters in order to e.g.
+create realistically looking, mock observation images out of simulation data.
+Applying filter is an irreversible operation, hence the order in which you are
+using them matters.
+
+.. python-script::
+
+   import matplotlib
+   matplotlib.use('Agg')
+   from matplotlib import pyplot as plt
+
+   import yt
+
+   ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
+   slc = ds.slice('z', 0.5)
+   frb = slc.to_frb((20, 'kpc'), 512)
+   frb.apply_gauss_beam(nbeam=30, sigma=2.0)
+   frb.apply_white_noise(5e-23)
+   plt.imshow(frb['density'].d)
+   plt.savefig('frb_filters.png')
+
+Currently available filters:
+
+Gaussian Smoothing
+~~~~~~~~~~~~~~~~~~
+
+.. function:: apply_gauss_beam(self, nbeam=30, sigma=2.0)
+
+   (This is a proxy for
+   :class:`~yt.visualization.fixed_resolution_filters.FixedResolutionBufferGaussBeamFilter`.)
+
+    This filter convolves the FRB with 2d Gaussian that is "nbeam" pixel wide
+    and has standard deviation "sigma".
+
+White Noise
+~~~~~~~~~~~
+
+.. function:: apply_white_noise(self, bg_lvl=None)
+
+   (This is a proxy for
+   :class:`~yt.visualization.fixed_resolution_filters.FixedResolutionBufferWhiteNoiseFilter`.)
+
+    This filter adds white noise with the amplitude "bg_lvl" to the FRB.
+    If "bg_lvl" is not present, 10th percentile of the FRB's values is used
+    instead.
+
 .. _manual-line-plots:
 
 Line Plots

diff -r 6fc5217aed6a3387d0d90d781a4d0ac7170b5496 -r 4e4e06c2cbe8866223527a9d5c3116dcadc34f34 doc/source/visualizing/sketchfab.rst
--- a/doc/source/visualizing/sketchfab.rst
+++ b/doc/source/visualizing/sketchfab.rst
@@ -56,7 +56,7 @@
 
    import yt
    ds = yt.load("/data/workshop2012/IsolatedGalaxy/galaxy0030/galaxy0030")
-   sphere = ds.sphere("max", (1.0, "mpc"))
+   sphere = ds.sphere("max", (1.0, "Mpc"))
    surface = ds.surface(sphere, "density", 1e-27)
 
 This object, ``surface``, can be queried for values on the surface.  For
@@ -172,7 +172,7 @@
    trans = [1.0, 0.5]
    filename = './surfaces'
 
-   sphere = ds.sphere("max", (1.0, "mpc"))
+   sphere = ds.sphere("max", (1.0, "Mpc"))
    for i,r in enumerate(rho):
        surf = ds.surface(sphere, 'density', r)
        surf.export_obj(filename, transparency = trans[i], color_field='temperature', plot_index = i)
@@ -248,7 +248,7 @@
        return (data['density']*data['density']*np.sqrt(data['temperature']))
    add_field("emissivity", function=_Emissivity, units=r"g*K/cm**6")
 
-   sphere = ds.sphere("max", (1.0, "mpc"))
+   sphere = ds.sphere("max", (1.0, "Mpc"))
    for i,r in enumerate(rho):
        surf = ds.surface(sphere, 'density', r)
        surf.export_obj(filename, transparency = trans[i],

diff -r 6fc5217aed6a3387d0d90d781a4d0ac7170b5496 -r 4e4e06c2cbe8866223527a9d5c3116dcadc34f34 doc/source/visualizing/writing_fits_images.rst
--- a/doc/source/visualizing/writing_fits_images.rst
+++ b/doc/source/visualizing/writing_fits_images.rst
@@ -3,4 +3,4 @@
 Writing FITS Images
 ==========================
 
-.. notebook:: FITSImageBuffer.ipynb
\ No newline at end of file
+.. notebook:: FITSImageData.ipynb
\ No newline at end of file

diff -r 6fc5217aed6a3387d0d90d781a4d0ac7170b5496 -r 4e4e06c2cbe8866223527a9d5c3116dcadc34f34 optional-requirements.txt
--- /dev/null
+++ b/optional-requirements.txt
@@ -0,0 +1,1 @@
+ipython[notebook]
\ No newline at end of file

diff -r 6fc5217aed6a3387d0d90d781a4d0ac7170b5496 -r 4e4e06c2cbe8866223527a9d5c3116dcadc34f34 requirements.txt
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,6 @@
+numpy==1.9.2 
+matplotlib==1.4.3 
+Cython==0.22 
+h5py==2.5.0 
+nose==1.3.6 
+sympy==0.7.6 

diff -r 6fc5217aed6a3387d0d90d781a4d0ac7170b5496 -r 4e4e06c2cbe8866223527a9d5c3116dcadc34f34 setup.py
--- a/setup.py
+++ b/setup.py
@@ -13,11 +13,6 @@
     sys.exit(1)
 
 import setuptools
-from distutils.version import StrictVersion
-if StrictVersion(setuptools.__version__) < StrictVersion('0.7.0'):
-    import distribute_setup
-    distribute_setup.use_setuptools()
-
 from distutils.command.build_py import build_py
 from numpy.distutils.misc_util import appendpath
 from numpy.distutils.command import install_data as np_install_data

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/891d78f35594/
Changeset:   891d78f35594
Branch:      yt
User:        atmyers
Date:        2015-07-06 23:25:33+00:00
Summary:     fixing an import
Affected #:  1 file

diff -r 4e4e06c2cbe8866223527a9d5c3116dcadc34f34 -r 891d78f355943b0a6cd380753a850d0462fedb9f yt/utilities/fits_image.py
--- a/yt/utilities/fits_image.py
+++ b/yt/utilities/fits_image.py
@@ -19,7 +19,7 @@
 from yt.units import dimensions
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     parallel_root_only
-from yt.visualization.volume_rendering.camera import off_axis_projection
+from yt.visualization.volume_rendering.old_camera import off_axis_projection
 import re
 
 pyfits = _astropy.pyfits


https://bitbucket.org/yt_analysis/yt/commits/7deb01c712e2/
Changeset:   7deb01c712e2
Branch:      yt
User:        atmyers
Date:        2015-07-06 23:26:02+00:00
Summary:     making the io reader work with non-hex element types
Affected #:  1 file

diff -r 891d78f355943b0a6cd380753a850d0462fedb9f -r 7deb01c712e23a56dd0d6b893c86f98a6ce64ad3 yt/frontends/stream/io.py
--- a/yt/frontends/stream/io.py
+++ b/yt/frontends/stream/io.py
@@ -256,6 +256,7 @@
                 subset.fill(field_vals, rv, selector, ind)
         return rv
 
+
 class IOHandlerStreamUnstructured(BaseIOHandler):
     _dataset_type = "stream_unstructured"
     _node_types = ("diffused", "convected")
@@ -267,11 +268,13 @@
     def _read_fluid_selection(self, chunks, selector, fields, size):
         chunks = list(chunks)
         chunk = chunks[0]
+        mesh_id = chunk.objs[0].mesh_id
         rv = {}
         for field in fields:
             ftype, fname = field
+            nodes_per_element = self.fields[mesh_id][field].shape[1]
             if fname in self._node_types:
-                rv[field] = np.empty((size, 8), dtype="float64")
+                rv[field] = np.empty((size, nodes_per_element), dtype="float64")
             else:
                 rv[field] = np.empty(size, dtype="float64")
         ngrids = sum(len(chunk.objs) for chunk in chunks)


https://bitbucket.org/yt_analysis/yt/commits/506acf1c9b60/
Changeset:   506acf1c9b60
Branch:      yt
User:        atmyers
Date:        2015-07-07 00:06:47+00:00
Summary:     removing an outdated comment
Affected #:  1 file

diff -r 7deb01c712e23a56dd0d6b893c86f98a6ce64ad3 -r 506acf1c9b60da44eb7e41b7e81047ea547a9547 yt/utilities/lib/mesh_construction.pyx
--- a/yt/utilities/lib/mesh_construction.pyx
+++ b/yt/utilities/lib/mesh_construction.pyx
@@ -206,9 +206,6 @@
         cdef int i, j, ind
         cdef int nv = vertices_in.shape[0]
         cdef int ne = indices_in.shape[0]
-
-        # There are six faces for every quad.  Each of those will be divided
-        # into two triangles.
         cdef int nt = self.tpe*ne
 
         cdef unsigned int mesh = rtcg.rtcNewTriangleMesh(scene.scene_i,


https://bitbucket.org/yt_analysis/yt/commits/919f8a13fd6b/
Changeset:   919f8a13fd6b
Branch:      yt
User:        atmyers
Date:        2015-07-07 22:20:16+00:00
Summary:     doing some work to make the filter feeback functions do more general things
Affected #:  5 files

diff -r 506acf1c9b60da44eb7e41b7e81047ea547a9547 -r 919f8a13fd6b0742743951641bf4d86eb4d11de2 yt/utilities/lib/filter_feedback_functions.pyx
--- a/yt/utilities/lib/filter_feedback_functions.pyx
+++ b/yt/utilities/lib/filter_feedback_functions.pyx
@@ -1,6 +1,7 @@
 cimport pyembree.rtcore as rtc
 cimport pyembree.rtcore_ray as rtcr
 from pyembree.rtcore cimport Vec3f
+from yt.utilities.lib.mesh_construction cimport UserData
 cimport cython
 
 
@@ -9,17 +10,41 @@
     cdef int ray_id
     cdef double u, v, val
     cdef double d0, d1, d2
-    cdef Vec3f* data
 
-    data = <Vec3f*> userPtr
+    data = <UserData*> userPtr
     ray_id = ray.primID
 
     u = ray.u
     v = ray.v
 
-    d0 = data[ray_id].x
-    d1 = data[ray_id].y
-    d2 = data[ray_id].z
+    d0 = data.field_data[ray_id].x
+    d1 = data.field_data[ray_id].y
+    d2 = data.field_data[ray_id].z
+
+    return d0*(1.0 - u - v) + d1*u + d2*v
+
+
+cdef double get_value_triangle(void* userPtr,
+                               rtcr.RTCRay& ray):
+    cdef int ray_id, elem_id
+    cdef double u, v, val
+    cdef double d0, d1, d2
+    cdef Vec3f* field_data
+    cdef long[:, :] element_indices
+
+
+    data = <UserData*> userPtr
+    field_data = data.field_data
+    element_indices = data.element_indices
+
+    ray_id = ray.primID
+    elem_id = ray_id / data.tpe
+    u = ray.u
+    v = ray.v
+
+    d0 = field_data[ray_id].x
+    d1 = field_data[ray_id].y
+    d2 = field_data[ray_id].z
 
     return d0*(1.0 - u - v) + d1*u + d2*v
 

diff -r 506acf1c9b60da44eb7e41b7e81047ea547a9547 -r 919f8a13fd6b0742743951641bf4d86eb4d11de2 yt/utilities/lib/mesh_construction.h
--- a/yt/utilities/lib/mesh_construction.h
+++ b/yt/utilities/lib/mesh_construction.h
@@ -1,5 +1,7 @@
 #define MAX_NUM_TRI 12
+#define HEX_NV 8
 #define HEX_NT 12
+#define TETRA_NV 4
 #define TETRA_NT 4
 
 // This array is used to triangulate the hexahedral mesh elements

diff -r 506acf1c9b60da44eb7e41b7e81047ea547a9547 -r 919f8a13fd6b0742743951641bf4d86eb4d11de2 yt/utilities/lib/mesh_construction.pyx
--- a/yt/utilities/lib/mesh_construction.pyx
+++ b/yt/utilities/lib/mesh_construction.pyx
@@ -17,8 +17,10 @@
 cdef extern from "mesh_construction.h":
     enum:
         MAX_NUM_TRI
-
+        
+    int HEX_NV
     int HEX_NT
+    int TETRA_NV
     int TETRA_NT
     int triangulate_hex[MAX_NUM_TRI][3]
     int triangulate_tetra[MAX_NUM_TRI][3]
@@ -65,8 +67,10 @@
     cdef unsigned int mesh
     cdef Vec3f* field_data
     cdef rtcg.RTCFilterFunc filter_func
-    cdef int tpe
+    cdef int tpe, vpe
     cdef int[MAX_NUM_TRI][3] tri_array
+    cdef long[:,:] element_indices
+    cdef UserData user_data
 
     def __init__(self, YTEmbreeScene scene,
                  np.ndarray vertices,
@@ -85,7 +89,7 @@
         # but also means we have exactly three times as many vertices as
         # triangles.
         cdef unsigned int mesh = rtcg.rtcNewTriangleMesh(scene.scene_i,
-                    rtcg.RTC_GEOMETRY_STATIC, nt, nt*3, 1) 
+                        rtcg.RTC_GEOMETRY_STATIC, nt, nt*3, 1) 
         
         cdef Vertex* vertices = <Vertex*> rtcg.rtcMapBuffer(scene.scene_i, mesh,
                         rtcg.RTC_VERTEX_BUFFER)
@@ -117,7 +121,7 @@
         cdef int nt = tri_indices.shape[0]
 
         cdef unsigned int mesh = rtcg.rtcNewTriangleMesh(scene.scene_i,
-                    rtcg.RTC_GEOMETRY_STATIC, nt, nv, 1) 
+                                        rtcg.RTC_GEOMETRY_STATIC, nt, nv, 1)
 
         # set up vertex and triangle arrays. In this case, we just read
         # them directly from the inputs
@@ -182,14 +186,17 @@
                  np.ndarray indices,
                  np.ndarray data,
                  sampler_type):
+
         # We need now to figure out if we've been handed quads or tetrahedra.
         # If it's quads, we can build the mesh slightly differently.
         # http://stackoverflow.com/questions/23723993/converting-quadriladerals-in-an-obj-file-into-triangles
 
         if indices.shape[1] == 8:
+            self.vpe = HEX_NV
             self.tpe = HEX_NT
             self.tri_array = triangulate_hex
         elif indices.shape[1] == 4:
+            self.vpe = TETRA_NV
             self.tpe = TETRA_NT
             self.tri_array = triangulate_tetra
         else:
@@ -209,7 +216,7 @@
         cdef int nt = self.tpe*ne
 
         cdef unsigned int mesh = rtcg.rtcNewTriangleMesh(scene.scene_i,
-                    rtcg.RTC_GEOMETRY_STATIC, nt, nv, 1) 
+                    rtcg.RTC_GEOMETRY_STATIC, nt, nv, 1)
 
         # first just copy over the vertices
         cdef Vertex* vertices = <Vertex*> rtcg.rtcMapBuffer(scene.scene_i, mesh,
@@ -236,6 +243,7 @@
 
         self.vertices = vertices
         self.indices = triangles
+        self.element_indices = indices_in
         self.mesh = mesh
 
     cdef void _set_field_data(self, YTEmbreeScene scene,
@@ -252,9 +260,18 @@
                 field_data[ind].y = data_in[i][self.tri_array[j][1]]
                 field_data[ind].z = data_in[i][self.tri_array[j][2]]
 
-        rtcg.rtcSetUserData(scene.scene_i, self.mesh, field_data)
+        self.field_data = field_data
 
-        self.field_data = field_data
+        cdef UserData user_data
+        user_data.vertices = self.vertices
+        user_data.indices = self.indices
+        user_data.field_data = self.field_data
+        user_data.element_indices = self.element_indices
+        user_data.tpe = self.tpe
+        user_data.vpe = self.vpe
+        self.user_data = user_data
+        
+        rtcg.rtcSetUserData(scene.scene_i, self.mesh, &self.user_data)
 
     cdef void _set_sampler_type(self, YTEmbreeScene scene, sampler_type):
         if sampler_type == 'surface':
@@ -269,6 +286,27 @@
                                               self.mesh,
                                               self.filter_func)
 
+    # def sample_mesh_element(self, rtcr.RTCRay& ray):
+    #     cdef int primID, elemID
+    #     primID = ray.primID
+    #     elemID = primID / self.tpe
+    #     position = np.empty(3, dtype=np.float64)
+
+    #     position[0] = ray.u*self.vertices[self.indices[primID].v0].x + \
+    #                   ray.v*self.vertices[self.indices[primID].v1].x  + \
+    #                   (1.0 - ray.u - ray.v)*self.vertices[self.indices[primID].v2].x
+
+    #     position[1] = ray.u*self.vertices[self.indices[primID].v0].y + \
+    #                   ray.v*self.vertices[self.indices[primID].v1].y + \
+    #                   (1.0 - ray.u - ray.v)*self.vertices[self.indices[primID].v2].y
+
+    #     position[2] = ray.u*self.vertices[self.indices[primID].v0].z + \
+    #                   ray.v*self.vertices[self.indices[primID].v1].z + \
+    #                   (1.0 - ray.u - ray.v)*self.vertices[self.indices[primID].v2].z
+
+        
+        
+
     def __dealloc__(self):
         if self.field_data is not NULL:
             free(self.field_data)

diff -r 506acf1c9b60da44eb7e41b7e81047ea547a9547 -r 919f8a13fd6b0742743951641bf4d86eb4d11de2 yt/utilities/lib/mesh_traversal.pyx
--- a/yt/utilities/lib/mesh_traversal.pyx
+++ b/yt/utilities/lib/mesh_traversal.pyx
@@ -31,7 +31,10 @@
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    def __call__(self, YTEmbreeScene scene, int num_threads = 0):
+    def __call__(self, 
+                 YTEmbreeScene scene,
+                 mesh,
+                 int num_threads = 0):
         '''
 
         This function is supposed to cast the rays and return the
@@ -43,6 +46,7 @@
         cdef int vi, vj, i, j, ni, nj, nn
         cdef np.int64_t offset
         cdef ImageContainer *im = self.image
+        cdef np.int64_t elemID
         cdef np.float64_t *v_pos
         cdef np.float64_t *v_dir
         cdef np.int64_t nx, ny, size
@@ -79,6 +83,7 @@
                 ray.mask = -1
                 ray.time = 0
                 rtcs.rtcIntersect(scene.scene_i, ray)
+                elemID = ray.primID / 12
                 data[j] = ray.time
             self.aimage = data.reshape(self.image.nv[0], self.image.nv[1])
             free(v_pos)

diff -r 506acf1c9b60da44eb7e41b7e81047ea547a9547 -r 919f8a13fd6b0742743951641bf4d86eb4d11de2 yt/visualization/volume_rendering/render_source.py
--- a/yt/visualization/volume_rendering/render_source.py
+++ b/yt/visualization/volume_rendering/render_source.py
@@ -303,7 +303,7 @@
         self.sampler = new_mesh_sampler(camera, self)
 
         mylog.debug("Casting rays")
-        self.sampler(self.scene)
+        self.sampler(self.scene, self.mesh)
         mylog.debug("Done casting rays")
 
         self.current_image = self.sampler.aimage


https://bitbucket.org/yt_analysis/yt/commits/183a261d840e/
Changeset:   183a261d840e
Branch:      yt
User:        atmyers
Date:        2015-07-07 22:20:48+00:00
Summary:     a struct for passing user data off to the filter feedback functions
Affected #:  1 file

diff -r 919f8a13fd6b0742743951641bf4d86eb4d11de2 -r 183a261d840e86fba8a326679a07e422509a8c6f yt/utilities/lib/mesh_construction.pxd
--- /dev/null
+++ b/yt/utilities/lib/mesh_construction.pxd
@@ -0,0 +1,12 @@
+from pyembree.rtcore cimport \
+    Vertex, \
+    Triangle, \
+    Vec3f
+
+ctypedef struct UserData:
+    Vertex* vertices
+    Triangle* indices
+    Vec3f* field_data
+    long[:,:] element_indices
+    int tpe
+    int vpe


https://bitbucket.org/yt_analysis/yt/commits/838f8c18618f/
Changeset:   838f8c18618f
Branch:      yt
User:        atmyers
Date:        2015-07-07 22:23:54+00:00
Summary:     removing commented out function
Affected #:  3 files

diff -r 183a261d840e86fba8a326679a07e422509a8c6f -r 838f8c18618fb955e558fc2fcd4fce543992f3af yt/utilities/lib/filter_feedback_functions.pxd
--- a/yt/utilities/lib/filter_feedback_functions.pxd
+++ b/yt/utilities/lib/filter_feedback_functions.pxd
@@ -3,6 +3,8 @@
 from pyembree.rtcore cimport Vec3f
 cimport cython
 
+cdef double sample_surface_triangle(void* userPtr,
+                                    rtcr.RTCRay& ray)
 
 cdef double get_value_trilinear(void* userPtr,
                                 rtcr.RTCRay& ray)

diff -r 183a261d840e86fba8a326679a07e422509a8c6f -r 838f8c18618fb955e558fc2fcd4fce543992f3af yt/utilities/lib/filter_feedback_functions.pyx
--- a/yt/utilities/lib/filter_feedback_functions.pyx
+++ b/yt/utilities/lib/filter_feedback_functions.pyx
@@ -24,8 +24,8 @@
     return d0*(1.0 - u - v) + d1*u + d2*v
 
 
-cdef double get_value_triangle(void* userPtr,
-                               rtcr.RTCRay& ray):
+cdef double sample_surface_triangle(void* userPtr,
+                                    rtcr.RTCRay& ray):
     cdef int ray_id, elem_id
     cdef double u, v, val
     cdef double d0, d1, d2

diff -r 183a261d840e86fba8a326679a07e422509a8c6f -r 838f8c18618fb955e558fc2fcd4fce543992f3af yt/utilities/lib/mesh_construction.pyx
--- a/yt/utilities/lib/mesh_construction.pyx
+++ b/yt/utilities/lib/mesh_construction.pyx
@@ -6,7 +6,8 @@
 cimport pyembree.rtcore_geometry_user as rtcgu
 from filter_feedback_functions cimport \
     maximum_intensity, \
-    sample_surface
+    sample_surface, \
+    sample_surface_triangle
 from pyembree.rtcore cimport \
     Vertex, \
     Triangle, \
@@ -275,7 +276,7 @@
 
     cdef void _set_sampler_type(self, YTEmbreeScene scene, sampler_type):
         if sampler_type == 'surface':
-            self.filter_func = <rtcg.RTCFilterFunc> sample_surface
+            self.filter_func = <rtcg.RTCFilterFunc> sample_surface_triangle
         elif sampler_type == 'maximum':
             self.filter_func = <rtcg.RTCFilterFunc> maximum_intensity
         else:
@@ -286,26 +287,6 @@
                                               self.mesh,
                                               self.filter_func)
 
-    # def sample_mesh_element(self, rtcr.RTCRay& ray):
-    #     cdef int primID, elemID
-    #     primID = ray.primID
-    #     elemID = primID / self.tpe
-    #     position = np.empty(3, dtype=np.float64)
-
-    #     position[0] = ray.u*self.vertices[self.indices[primID].v0].x + \
-    #                   ray.v*self.vertices[self.indices[primID].v1].x  + \
-    #                   (1.0 - ray.u - ray.v)*self.vertices[self.indices[primID].v2].x
-
-    #     position[1] = ray.u*self.vertices[self.indices[primID].v0].y + \
-    #                   ray.v*self.vertices[self.indices[primID].v1].y + \
-    #                   (1.0 - ray.u - ray.v)*self.vertices[self.indices[primID].v2].y
-
-    #     position[2] = ray.u*self.vertices[self.indices[primID].v0].z + \
-    #                   ray.v*self.vertices[self.indices[primID].v1].z + \
-    #                   (1.0 - ray.u - ray.v)*self.vertices[self.indices[primID].v2].z
-
-        
-        
 
     def __dealloc__(self):
         if self.field_data is not NULL:


https://bitbucket.org/yt_analysis/yt/commits/69437d9b5125/
Changeset:   69437d9b5125
Branch:      yt
User:        atmyers
Date:        2015-07-08 03:51:50+00:00
Summary:     doing the full, hexahedral interpolation in the surface renderer
Affected #:  5 files

diff -r 838f8c18618fb955e558fc2fcd4fce543992f3af -r 69437d9b512591addbcf891ff31ee49f4030eec4 yt/utilities/lib/filter_feedback_functions.pxd
--- a/yt/utilities/lib/filter_feedback_functions.pxd
+++ b/yt/utilities/lib/filter_feedback_functions.pxd
@@ -3,8 +3,8 @@
 from pyembree.rtcore cimport Vec3f
 cimport cython
 
-cdef double sample_surface_triangle(void* userPtr,
-                                    rtcr.RTCRay& ray)
+cdef double sample_surface_hex(void* userPtr,
+                               rtcr.RTCRay& ray)
 
 cdef double get_value_trilinear(void* userPtr,
                                 rtcr.RTCRay& ray)

diff -r 838f8c18618fb955e558fc2fcd4fce543992f3af -r 69437d9b512591addbcf891ff31ee49f4030eec4 yt/utilities/lib/filter_feedback_functions.pyx
--- a/yt/utilities/lib/filter_feedback_functions.pyx
+++ b/yt/utilities/lib/filter_feedback_functions.pyx
@@ -1,7 +1,9 @@
 cimport pyembree.rtcore as rtc
 cimport pyembree.rtcore_ray as rtcr
-from pyembree.rtcore cimport Vec3f
+from pyembree.rtcore cimport Vec3f, Triangle, Vertex
 from yt.utilities.lib.mesh_construction cimport UserData
+from yt.utilities.lib.element_mappings import Q1Sampler3D
+cimport numpy as np
 cimport cython
 
 
@@ -23,31 +25,69 @@
 
     return d0*(1.0 - u - v) + d1*u + d2*v
 
-
-cdef double sample_surface_triangle(void* userPtr,
-                                    rtcr.RTCRay& ray):
-    cdef int ray_id, elem_id
+    
+cdef double sample_surface_hex(void* userPtr,
+                               rtcr.RTCRay& ray):
+    cdef int ray_id, elem_id, i
     cdef double u, v, val
     cdef double d0, d1, d2
-    cdef Vec3f* field_data
-    cdef long[:, :] element_indices
-
+    cdef double[:] field_data
+    cdef long[:] element_indices
+    cdef double[8][3] vertices
+    cdef double[:] position
+    cdef double result
+    cdef UserData* data
 
     data = <UserData*> userPtr
-    field_data = data.field_data
-    element_indices = data.element_indices
-
     ray_id = ray.primID
     elem_id = ray_id / data.tpe
-    u = ray.u
-    v = ray.v
 
-    d0 = field_data[ray_id].x
-    d1 = field_data[ray_id].y
-    d2 = field_data[ray_id].z
+    position = get_hit_position(userPtr, ray)
+    element_indices = data.element_indices[elem_id]
+    field_data = data.field_data[elem_id]
 
-    return d0*(1.0 - u - v) + d1*u + d2*v
+    for i in range(8):
+        vertices[i][0] = data.vertices[element_indices[i]].x
+        vertices[i][1] = data.vertices[element_indices[i]].y
+        vertices[i][2] = data.vertices[element_indices[i]].z    
 
+    sampler = Q1Sampler3D()
+    result = sampler.sample_at_real_point(position, vertices, field_data)
+
+    return result
+
+
+cdef double[:] get_hit_position(void* userPtr,
+                                rtcr.RTCRay& ray):
+    cdef int primID, elemID, i
+    cdef double[3] position
+    cdef double[3][3] vertex_positions
+    cdef Triangle tri
+    cdef UserData* data
+
+    primID = ray.primID
+    data = <UserData*> userPtr
+    tri = data.indices[primID]
+
+    vertex_positions[0][0] = data.vertices[tri.v0].x
+    vertex_positions[0][1] = data.vertices[tri.v0].y
+    vertex_positions[0][2] = data.vertices[tri.v0].z
+
+    vertex_positions[1][0] = data.vertices[tri.v1].x
+    vertex_positions[1][1] = data.vertices[tri.v1].y
+    vertex_positions[1][2] = data.vertices[tri.v1].z
+
+    vertex_positions[2][0] = data.vertices[tri.v2].x
+    vertex_positions[2][1] = data.vertices[tri.v2].y
+    vertex_positions[2][2] = data.vertices[tri.v2].z
+
+    for i in range(3):
+        position[i] = vertex_positions[0][i]*ray.u + \
+                      vertex_positions[1][i]*ray.v + \
+                      vertex_positions[2][i]*(1.0 - ray.u - ray.v)
+
+    return position
+    
 
 cdef void maximum_intensity(void* userPtr, 
                             rtcr.RTCRay& ray):

diff -r 838f8c18618fb955e558fc2fcd4fce543992f3af -r 69437d9b512591addbcf891ff31ee49f4030eec4 yt/utilities/lib/mesh_construction.pxd
--- a/yt/utilities/lib/mesh_construction.pxd
+++ b/yt/utilities/lib/mesh_construction.pxd
@@ -6,7 +6,7 @@
 ctypedef struct UserData:
     Vertex* vertices
     Triangle* indices
-    Vec3f* field_data
+    double[:,:] field_data
     long[:,:] element_indices
     int tpe
     int vpe

diff -r 838f8c18618fb955e558fc2fcd4fce543992f3af -r 69437d9b512591addbcf891ff31ee49f4030eec4 yt/utilities/lib/mesh_construction.pyx
--- a/yt/utilities/lib/mesh_construction.pyx
+++ b/yt/utilities/lib/mesh_construction.pyx
@@ -4,10 +4,11 @@
 cimport pyembree.rtcore_geometry as rtcg
 cimport pyembree.rtcore_ray as rtcr
 cimport pyembree.rtcore_geometry_user as rtcgu
+from yt.utilities.lib.element_mappings import Q1Sampler3D
 from filter_feedback_functions cimport \
     maximum_intensity, \
     sample_surface, \
-    sample_surface_triangle
+    sample_surface_hex
 from pyembree.rtcore cimport \
     Vertex, \
     Triangle, \
@@ -66,7 +67,8 @@
     cdef Vertex* vertices
     cdef Triangle* indices
     cdef unsigned int mesh
-    cdef Vec3f* field_data
+#    cdef Vec3f* field_data
+    cdef double[:,:] field_data
     cdef rtcg.RTCFilterFunc filter_func
     cdef int tpe, vpe
     cdef int[MAX_NUM_TRI][3] tri_array
@@ -204,7 +206,7 @@
             raise NotImplementedError
 
         self._build_from_indices(scene, vertices, indices)
-        self.field_data = NULL
+#        self.field_data = NULL
         self._set_field_data(scene, data)
         self._set_sampler_type(scene, sampler_type)
 
@@ -250,18 +252,18 @@
     cdef void _set_field_data(self, YTEmbreeScene scene,
                               np.ndarray data_in):
 
-        cdef int ne = data_in.shape[0]
-        cdef int nt = self.tpe*ne
-        cdef Vec3f* field_data = <Vec3f *>malloc(nt * sizeof(Vec3f))
+        # cdef int ne = data_in.shape[0]
+        # cdef int nt = self.tpe*ne
+        # cdef Vec3f* field_data = <Vec3f *>malloc(nt * sizeof(Vec3f))
 
-        for i in range(ne):
-            for j in range(self.tpe):
-                ind = self.tpe*i+j
-                field_data[ind].x = data_in[i][self.tri_array[j][0]]
-                field_data[ind].y = data_in[i][self.tri_array[j][1]]
-                field_data[ind].z = data_in[i][self.tri_array[j][2]]
+        # for i in range(ne):
+        #     for j in range(self.tpe):
+        #         ind = self.tpe*i+j
+        #         field_data[ind].x = data_in[i][self.tri_array[j][0]]
+        #         field_data[ind].y = data_in[i][self.tri_array[j][1]]
+        #         field_data[ind].z = data_in[i][self.tri_array[j][2]]
 
-        self.field_data = field_data
+        self.field_data = data_in
 
         cdef UserData user_data
         user_data.vertices = self.vertices
@@ -276,18 +278,59 @@
 
     cdef void _set_sampler_type(self, YTEmbreeScene scene, sampler_type):
         if sampler_type == 'surface':
-            self.filter_func = <rtcg.RTCFilterFunc> sample_surface_triangle
+            self.filter_func = <rtcg.RTCFilterFunc> sample_surface
         elif sampler_type == 'maximum':
             self.filter_func = <rtcg.RTCFilterFunc> maximum_intensity
         else:
             print "Error - sampler type not implemented."
             raise NotImplementedError
 
-        rtcg.rtcSetIntersectionFilterFunction(scene.scene_i,
-                                              self.mesh,
-                                              self.filter_func)
+#        rtcg.rtcSetIntersectionFilterFunction(scene.scene_i,
+#                                              self.mesh,
+#                                              self.filter_func)
 
+    def sample_at_point(self, double u, double v, int primID):
+        
+        cdef int elemID
+        position = self._get_hit_position(u, v, primID)
+        vertices = np.empty((8, 3), dtype=np.float64)
+        
+        elemID = primID / self.tpe
+        element_indices = self.element_indices[elemID]
+        field_data = np.asarray(self.field_data[elemID], dtype=np.float64)
 
-    def __dealloc__(self):
-        if self.field_data is not NULL:
-            free(self.field_data)
+        for i in range(8):
+            vertices[i][0] = self.vertices[element_indices[i]].x
+            vertices[i][1] = self.vertices[element_indices[i]].y
+            vertices[i][2] = self.vertices[element_indices[i]].z    
+                             
+        sampler = Q1Sampler3D()
+        result = sampler.sample_at_real_point(position, vertices, field_data)
+
+        return result
+
+
+    cdef np.ndarray _get_hit_position(self, double u, double v, int primID):
+
+        cdef Triangle tri
+        position = np.empty(3, dtype=np.float64)
+        vertices = np.empty((3, 3), dtype=np.float64)
+        
+        tri = self.indices[primID]
+
+        vertices[0][0] = self.vertices[tri.v0].x
+        vertices[0][1] = self.vertices[tri.v0].y
+        vertices[0][2] = self.vertices[tri.v0].z
+        
+        vertices[1][0] = self.vertices[tri.v1].x
+        vertices[1][1] = self.vertices[tri.v1].y
+        vertices[1][2] = self.vertices[tri.v1].z
+
+        vertices[2][0] = self.vertices[tri.v2].x
+        vertices[2][1] = self.vertices[tri.v2].y
+        vertices[2][2] = self.vertices[tri.v2].z
+
+        position = vertices[0]*(1.0 - u - v) + vertices[1]*u + vertices[2]*v
+
+        return position
+

diff -r 838f8c18618fb955e558fc2fcd4fce543992f3af -r 69437d9b512591addbcf891ff31ee49f4030eec4 yt/utilities/lib/mesh_traversal.pyx
--- a/yt/utilities/lib/mesh_traversal.pyx
+++ b/yt/utilities/lib/mesh_traversal.pyx
@@ -47,6 +47,7 @@
         cdef np.int64_t offset
         cdef ImageContainer *im = self.image
         cdef np.int64_t elemID
+        cdef np.float64_t value
         cdef np.float64_t *v_pos
         cdef np.float64_t *v_dir
         cdef np.int64_t nx, ny, size
@@ -83,8 +84,11 @@
                 ray.mask = -1
                 ray.time = 0
                 rtcs.rtcIntersect(scene.scene_i, ray)
-                elemID = ray.primID / 12
-                data[j] = ray.time
+                if ray.primID == -1:
+                    data[j] = 0.0
+                else:
+                    value = mesh.sample_at_point(ray.u, ray.v, ray.primID)
+                    data[j] = value
             self.aimage = data.reshape(self.image.nv[0], self.image.nv[1])
             free(v_pos)
         else:
@@ -107,7 +111,7 @@
                 ray.mask = -1
                 ray.time = 0
                 rtcs.rtcIntersect(scene.scene_i, ray)
-                data[j] = ray.time
+                data[j] = ray.primID
             self.aimage = data.reshape(self.image.nv[0], self.image.nv[1])
             free(v_pos)
             free(v_dir)


https://bitbucket.org/yt_analysis/yt/commits/5891133d72e5/
Changeset:   5891133d72e5
Branch:      yt
User:        atmyers
Date:        2015-07-08 17:17:11+00:00
Summary:     revert back to the triangular interpolation for now
Affected #:  2 files

diff -r 69437d9b512591addbcf891ff31ee49f4030eec4 -r 5891133d72e5edef19d9846023c2478030d5890c yt/utilities/lib/mesh_construction.pyx
--- a/yt/utilities/lib/mesh_construction.pyx
+++ b/yt/utilities/lib/mesh_construction.pyx
@@ -1,4 +1,5 @@
 cimport numpy as np
+cimport cython
 cimport pyembree.rtcore as rtc 
 from mesh_traversal cimport YTEmbreeScene
 cimport pyembree.rtcore_geometry as rtcg
@@ -291,11 +292,22 @@
 
     def sample_at_point(self, double u, double v, int primID):
         
-        cdef int elemID
+        cdef int elemID, faceID
         position = self._get_hit_position(u, v, primID)
         vertices = np.empty((8, 3), dtype=np.float64)
         
         elemID = primID / self.tpe
+        # faceID = (primID % self.tpe) / 2
+        
+        # faces = np.array([[0, 1, 2, 3],
+        #                   [4, 5, 6, 7],
+        #                   [0, 1, 5, 4],
+        #                   [1, 2, 6, 5],
+        #                   [0, 3, 7, 4],
+        #                   [3, 2, 6, 7]])
+
+        # locs = faces[faceID]
+
         element_indices = self.element_indices[elemID]
         field_data = np.asarray(self.field_data[elemID], dtype=np.float64)
 
@@ -334,3 +346,22 @@
 
         return position
 
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    @cython.initializedcheck(False)
+    def sample_triangular(self, double u, double v, int primID):
+
+        cdef int i, j
+        cdef double d0, d1, d2
+
+        i = primID / self.tpe
+        j = primID % self.tpe
+
+        d0 = self.field_data[i][self.tri_array[j][0]]
+        d1 = self.field_data[i][self.tri_array[j][1]]
+        d2 = self.field_data[i][self.tri_array[j][2]]
+
+        return d0*(1.0 - u - v) + d1*u + d2*v
+        
+        

diff -r 69437d9b512591addbcf891ff31ee49f4030eec4 -r 5891133d72e5edef19d9846023c2478030d5890c yt/utilities/lib/mesh_traversal.pyx
--- a/yt/utilities/lib/mesh_traversal.pyx
+++ b/yt/utilities/lib/mesh_traversal.pyx
@@ -87,7 +87,7 @@
                 if ray.primID == -1:
                     data[j] = 0.0
                 else:
-                    value = mesh.sample_at_point(ray.u, ray.v, ray.primID)
+                    value = mesh.sample_triangular(ray.u, ray.v, ray.primID)
                     data[j] = value
             self.aimage = data.reshape(self.image.nv[0], self.image.nv[1])
             free(v_pos)


https://bitbucket.org/yt_analysis/yt/commits/2e22165ddbf6/
Changeset:   2e22165ddbf6
Branch:      yt
User:        atmyers
Date:        2015-07-09 08:24:24+00:00
Summary:     much faster hexahedral interpolation
Affected #:  5 files

diff -r 5891133d72e5edef19d9846023c2478030d5890c -r 2e22165ddbf63fcf02054c5a866345678989bc33 yt/utilities/lib/filter_feedback_functions.pxd
--- a/yt/utilities/lib/filter_feedback_functions.pxd
+++ b/yt/utilities/lib/filter_feedback_functions.pxd
@@ -3,8 +3,8 @@
 from pyembree.rtcore cimport Vec3f
 cimport cython
 
-cdef double sample_surface_hex(void* userPtr,
-                               rtcr.RTCRay& ray)
+cdef void sample_surface_hex(void* userPtr,
+                             rtcr.RTCRay& ray)
 
 cdef double get_value_trilinear(void* userPtr,
                                 rtcr.RTCRay& ray)

diff -r 5891133d72e5edef19d9846023c2478030d5890c -r 2e22165ddbf63fcf02054c5a866345678989bc33 yt/utilities/lib/filter_feedback_functions.pyx
--- a/yt/utilities/lib/filter_feedback_functions.pyx
+++ b/yt/utilities/lib/filter_feedback_functions.pyx
@@ -5,6 +5,109 @@
 from yt.utilities.lib.element_mappings import Q1Sampler3D
 cimport numpy as np
 cimport cython
+from libc.math cimport fabs, fmax
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef inline double determinant_3x3(double* col0, 
+                                   double* col1, 
+                                   double* col2) nogil:
+    return col0[0]*col1[1]*col2[2] - col0[0]*col1[2]*col2[1] - \
+           col0[1]*col1[0]*col2[2] + col0[1]*col1[2]*col2[0] + \
+           col0[2]*col1[0]*col2[1] + col0[2]*col1[1]*col2[0]
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef inline void func(double* f,
+                      double* x, 
+                      double* vertices, 
+                      double* phys_x) nogil:
+    
+    cdef int i
+    cdef double rm, rp, sm, sp, tm, tp
+    
+    rm = 1.0 - x[0]
+    rp = 1.0 + x[0]
+    sm = 1.0 - x[1]
+    sp = 1.0 + x[1]
+    tm = 1.0 - x[2]
+    tp = 1.0 + x[2]
+    
+    for i in range(3):
+        f[i] = vertices[0 + i]*rm*sm*tm \
+             + vertices[3 + i]*rp*sm*tm \
+             + vertices[6 + i]*rm*sp*tm \
+             + vertices[9 + i]*rp*sp*tm \
+             + vertices[12 + i]*rm*sm*tp \
+             + vertices[15 + i]*rp*sm*tp \
+             + vertices[18 + i]*rm*sp*tp \
+             + vertices[21 + i]*rp*sp*tp \
+             - 8.0*phys_x[i]
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef inline void J(double* r,
+                   double* s,
+                   double* t,
+                   double* x, 
+                   double* v, 
+                   double* phys_x) nogil:
+    
+    cdef int i
+    cdef double rm, rp, sm, sp, tm, tp
+    
+    rm = 1.0 - x[0]
+    rp = 1.0 + x[0]
+    sm = 1.0 - x[1]
+    sp = 1.0 + x[1]
+    tm = 1.0 - x[2]
+    tp = 1.0 + x[2]
+    
+    for i in range(3):
+        r[i] = -sm*tm*v[0 + i]  + sm*tm*v[3 + i]  - \
+                sp*tm*v[6 + i]  + sp*tm*v[9 + i]  - \
+                sm*tp*v[12 + i] + sm*tp*v[15 + i] - \
+                sp*tp*v[18 + i] + sp*tp*v[21 + i]
+        s[i] = -rm*tm*v[0 + i]  - rp*tm*v[3 + i]  + \
+                rm*tm*v[6 + i]  + rp*tm*v[9 + i]  - \
+                rm*tp*v[12 + i] - rp*tp*v[15 + i] + \
+                rm*tp*v[18 + i] + rp*tp*v[21 + i]
+        t[i] = -rm*sm*v[0 + i]  - rp*sm*v[3 + i]  - \
+                rm*sp*v[6 + i]  - rp*sp*v[9 + i]  + \
+                rm*sm*v[12 + i] + rp*sm*v[15 + i] + \
+                rm*sp*v[18 + i] + rp*sp*v[21 + i]
+                
+                
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef double sample_at_unit_point(double* coord, double* vals) nogil:
+    cdef double F, rm, rp, sm, sp, tm, tp
+    
+    rm = 1.0 - coord[0]
+    rp = 1.0 + coord[0]
+    sm = 1.0 - coord[1]
+    sp = 1.0 + coord[1]
+    tm = 1.0 - coord[2]
+    tp = 1.0 + coord[2]
+    
+    F = vals[0]*rm*sm*tm + vals[1]*rp*sm*tm + vals[2]*rm*sp*tm + vals[3]*rp*sp*tm + \
+        vals[4]*rm*sm*tp + vals[5]*rp*sm*tp + vals[6]*rm*sp*tp + vals[7]*rp*sp*tp
+    return 0.125*F
+                
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef double maxnorm(double* f) nogil:
+    cdef double err
+    cdef int i
+    err = fabs(f[0])
+    for i in range(1, 2):
+        err = fmax(err, fabs(f[i])) 
+    return err
 
 
 cdef double get_value_trilinear(void* userPtr,
@@ -25,42 +128,83 @@
 
     return d0*(1.0 - u - v) + d1*u + d2*v
 
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef double sample_at_real_point(double* vertices,
+                                 double* field_values,
+                                 double* physical_x):
     
-cdef double sample_surface_hex(void* userPtr,
-                               rtcr.RTCRay& ray):
+    cdef int i
+    cdef double d, val
+    cdef double[3] f
+    cdef double[3] r
+    cdef double[3] s
+    cdef double[3] t
+    cdef double[3] x
+    cdef double tolerance = 1.0e-9
+    cdef int iterations = 0
+    cdef double err
+   
+    # initial guess
+    for i in range(3):
+        x[i] = 0.0
+    
+    # initial error norm
+    func(f, x, vertices, physical_x)
+    err = maxnorm(f)  
+   
+    # begin Newton iteration
+    while (err > tolerance and iterations < 10):
+        J(r, s, t, x, vertices, physical_x)
+        d = determinant_3x3(r, s, t)
+        x[0] = x[0] - (determinant_3x3(f, s, t)/d)
+        x[1] = x[1] - (determinant_3x3(r, f, t)/d)
+        x[2] = x[2] - (determinant_3x3(r, s, f)/d)
+        func(f, x, vertices, physical_x)        
+        err = maxnorm(f)
+        iterations += 1
+        
+    val = sample_at_unit_point(x, field_values)
+    return val
+    
+cdef void sample_surface_hex(void* userPtr,
+                             rtcr.RTCRay& ray):
     cdef int ray_id, elem_id, i
     cdef double u, v, val
     cdef double d0, d1, d2
-    cdef double[:] field_data
-    cdef long[:] element_indices
-    cdef double[8][3] vertices
-    cdef double[:] position
+    cdef double[8] field_data
+    cdef long[8] element_indices
+    cdef double[24] vertices
+    cdef double[3] position
     cdef double result
     cdef UserData* data
 
     data = <UserData*> userPtr
     ray_id = ray.primID
+    if ray_id == -1:
+        return
+
     elem_id = ray_id / data.tpe
 
-    position = get_hit_position(userPtr, ray)
-    element_indices = data.element_indices[elem_id]
-    field_data = data.field_data[elem_id]
+    get_hit_position(position, userPtr, ray)
+    
+    for i in range(8):
+        element_indices[i] = data.element_indices[elem_id*8+i]
+        field_data[i] = data.field_data[elem_id*8+i]
 
     for i in range(8):
-        vertices[i][0] = data.vertices[element_indices[i]].x
-        vertices[i][1] = data.vertices[element_indices[i]].y
-        vertices[i][2] = data.vertices[element_indices[i]].z    
+        vertices[i*3] = data.vertices[element_indices[i]].x
+        vertices[i*3 + 1] = data.vertices[element_indices[i]].y
+        vertices[i*3 + 2] = data.vertices[element_indices[i]].z    
 
-    sampler = Q1Sampler3D()
-    result = sampler.sample_at_real_point(position, vertices, field_data)
+    val = sample_at_real_point(vertices, field_data, position)
+    ray.time = val
 
-    return result
-
-
-cdef double[:] get_hit_position(void* userPtr,
-                                rtcr.RTCRay& ray):
+cdef void get_hit_position(double* position,
+                           void* userPtr,
+                           rtcr.RTCRay& ray):
     cdef int primID, elemID, i
-    cdef double[3] position
     cdef double[3][3] vertex_positions
     cdef Triangle tri
     cdef UserData* data
@@ -82,11 +226,9 @@
     vertex_positions[2][2] = data.vertices[tri.v2].z
 
     for i in range(3):
-        position[i] = vertex_positions[0][i]*ray.u + \
-                      vertex_positions[1][i]*ray.v + \
-                      vertex_positions[2][i]*(1.0 - ray.u - ray.v)
-
-    return position
+        position[i] = vertex_positions[0][i]*(1.0 - ray.u - ray.v) + \
+                      vertex_positions[1][i]*ray.u + \
+                      vertex_positions[2][i]*ray.v
     
 
 cdef void maximum_intensity(void* userPtr, 
@@ -102,3 +244,4 @@
 
     cdef double val = get_value_trilinear(userPtr, ray)
     ray.time = val
+

diff -r 5891133d72e5edef19d9846023c2478030d5890c -r 2e22165ddbf63fcf02054c5a866345678989bc33 yt/utilities/lib/mesh_construction.pxd
--- a/yt/utilities/lib/mesh_construction.pxd
+++ b/yt/utilities/lib/mesh_construction.pxd
@@ -6,7 +6,7 @@
 ctypedef struct UserData:
     Vertex* vertices
     Triangle* indices
-    double[:,:] field_data
-    long[:,:] element_indices
+    double* field_data
+    long* element_indices
     int tpe
     int vpe

diff -r 5891133d72e5edef19d9846023c2478030d5890c -r 2e22165ddbf63fcf02054c5a866345678989bc33 yt/utilities/lib/mesh_construction.pyx
--- a/yt/utilities/lib/mesh_construction.pyx
+++ b/yt/utilities/lib/mesh_construction.pyx
@@ -69,11 +69,11 @@
     cdef Triangle* indices
     cdef unsigned int mesh
 #    cdef Vec3f* field_data
-    cdef double[:,:] field_data
+    cdef double* field_data
     cdef rtcg.RTCFilterFunc filter_func
     cdef int tpe, vpe
     cdef int[MAX_NUM_TRI][3] tri_array
-    cdef long[:,:] element_indices
+    cdef long* element_indices
     cdef UserData user_data
 
     def __init__(self, YTEmbreeScene scene,
@@ -206,8 +206,9 @@
         else:
             raise NotImplementedError
 
+        self.field_data = NULL
+        self.element_indices = NULL
         self._build_from_indices(scene, vertices, indices)
-#        self.field_data = NULL
         self._set_field_data(scene, data)
         self._set_sampler_type(scene, sampler_type)
 
@@ -238,33 +239,34 @@
 
         for i in range(ne):
             for j in range(self.tpe):
-                ind = self.tpe*i+j
-                triangles[ind].v0 = indices_in[i][self.tri_array[j][0]]
-                triangles[ind].v1 = indices_in[i][self.tri_array[j][1]]
-                triangles[ind].v2 = indices_in[i][self.tri_array[j][2]]
+                triangles[self.tpe*i+j].v0 = indices_in[i][self.tri_array[j][0]]
+                triangles[self.tpe*i+j].v1 = indices_in[i][self.tri_array[j][1]]
+                triangles[self.tpe*i+j].v2 = indices_in[i][self.tri_array[j][2]]
 
         rtcg.rtcUnmapBuffer(scene.scene_i, mesh, rtcg.RTC_INDEX_BUFFER)
 
+        cdef long* element_indices = <long *> malloc(ne * self.vpe * sizeof(long))
+    
+        for i in range(ne):
+            for j in range(self.vpe):
+                element_indices[i*self.vpe + j] = indices_in[i][j]
+
+        self.element_indices = element_indices
         self.vertices = vertices
         self.indices = triangles
-        self.element_indices = indices_in
         self.mesh = mesh
 
     cdef void _set_field_data(self, YTEmbreeScene scene,
                               np.ndarray data_in):
 
-        # cdef int ne = data_in.shape[0]
-        # cdef int nt = self.tpe*ne
-        # cdef Vec3f* field_data = <Vec3f *>malloc(nt * sizeof(Vec3f))
+        cdef int ne = data_in.shape[0]
+        cdef double* field_data = <double *> malloc(ne * self.vpe * sizeof(double))
 
-        # for i in range(ne):
-        #     for j in range(self.tpe):
-        #         ind = self.tpe*i+j
-        #         field_data[ind].x = data_in[i][self.tri_array[j][0]]
-        #         field_data[ind].y = data_in[i][self.tri_array[j][1]]
-        #         field_data[ind].z = data_in[i][self.tri_array[j][2]]
+        for i in range(ne):
+            for j in range(self.vpe):
+                field_data[self.vpe*i+j] = data_in[i][j]
 
-        self.field_data = data_in
+        self.field_data = field_data
 
         cdef UserData user_data
         user_data.vertices = self.vertices
@@ -279,89 +281,106 @@
 
     cdef void _set_sampler_type(self, YTEmbreeScene scene, sampler_type):
         if sampler_type == 'surface':
-            self.filter_func = <rtcg.RTCFilterFunc> sample_surface
+            self.filter_func = <rtcg.RTCFilterFunc> sample_surface_hex
         elif sampler_type == 'maximum':
             self.filter_func = <rtcg.RTCFilterFunc> maximum_intensity
         else:
             print "Error - sampler type not implemented."
             raise NotImplementedError
 
-#        rtcg.rtcSetIntersectionFilterFunction(scene.scene_i,
-#                                              self.mesh,
-#                                              self.filter_func)
+        rtcg.rtcSetIntersectionFilterFunction(scene.scene_i,
+                                              self.mesh,
+                                              self.filter_func)
 
-    def sample_at_point(self, double u, double v, int primID):
+    # @cython.boundscheck(False)
+    # @cython.wraparound(False)
+    # @cython.cdivision(True)
+    # @cython.initializedcheck(False)
+    # def sample_at_point(self, double u, double v, int primID):
         
-        cdef int elemID, faceID
-        position = self._get_hit_position(u, v, primID)
-        vertices = np.empty((8, 3), dtype=np.float64)
+    #     cdef int elemID, faceID
+    #     position = self._get_hit_position(u, v, primID)
+    #     vertices = np.empty((8, 3), dtype=np.float64)
         
-        elemID = primID / self.tpe
-        # faceID = (primID % self.tpe) / 2
+    #     elemID = primID / self.tpe
+    #     # faceID = (primID % self.tpe) / 2
         
-        # faces = np.array([[0, 1, 2, 3],
-        #                   [4, 5, 6, 7],
-        #                   [0, 1, 5, 4],
-        #                   [1, 2, 6, 5],
-        #                   [0, 3, 7, 4],
-        #                   [3, 2, 6, 7]])
+    #     # faces = np.array([[0, 1, 2, 3],
+    #     #                   [4, 5, 6, 7],
+    #     #                   [0, 1, 5, 4],
+    #     #                   [1, 2, 6, 5],
+    #     #                   [0, 3, 7, 4],
+    #     #                   [3, 2, 6, 7]])
 
-        # locs = faces[faceID]
+    #     # locs = faces[faceID]
 
-        element_indices = self.element_indices[elemID]
-        field_data = np.asarray(self.field_data[elemID], dtype=np.float64)
+    #     element_indices = self.element_indices[elemID]
+    #     field_data = np.asarray(self.field_data[elemID], dtype=np.float64)
 
-        for i in range(8):
-            vertices[i][0] = self.vertices[element_indices[i]].x
-            vertices[i][1] = self.vertices[element_indices[i]].y
-            vertices[i][2] = self.vertices[element_indices[i]].z    
+    #     for i in range(8):
+    #         vertices[i][0] = self.vertices[element_indices[i]].x
+    #         vertices[i][1] = self.vertices[element_indices[i]].y
+    #         vertices[i][2] = self.vertices[element_indices[i]].z    
                              
-        sampler = Q1Sampler3D()
-        result = sampler.sample_at_real_point(position, vertices, field_data)
+    #     sampler = Q1Sampler3D()
+    #     result = sampler.sample_at_real_point(position, vertices, field_data)
 
-        return result
+    #     return result
 
+    # @cython.boundscheck(False)
+    # @cython.wraparound(False)
+    # @cython.cdivision(True)
+    # @cython.initializedcheck(False)
+    # cdef np.ndarray _get_hit_position(self, double u, double v, int primID):
 
-    cdef np.ndarray _get_hit_position(self, double u, double v, int primID):
+    #     cdef Triangle tri
+    #     cdef Vertex v0, v1, v2
+    #     cdef int i
+    #     position = np.empty(3, dtype=np.float64)
+    #     vertices = np.empty((3, 3), dtype=np.float64)
+        
+    #     tri = self.indices[primID]
+    #     v0 = self.vertices[tri.v0]
+    #     v1 = self.vertices[tri.v1]
+    #     v2 = self.vertices[tri.v2]
 
-        cdef Triangle tri
-        position = np.empty(3, dtype=np.float64)
-        vertices = np.empty((3, 3), dtype=np.float64)
+    #     vertices[0][0] = v0.x
+    #     vertices[0][1] = v0.y
+    #     vertices[0][2] = v0.z
         
-        tri = self.indices[primID]
+    #     vertices[1][0] = v1.x
+    #     vertices[1][1] = v1.y
+    #     vertices[1][2] = v1.z
 
-        vertices[0][0] = self.vertices[tri.v0].x
-        vertices[0][1] = self.vertices[tri.v0].y
-        vertices[0][2] = self.vertices[tri.v0].z
+    #     vertices[2][0] = v2.x
+    #     vertices[2][1] = v2.y
+    #     vertices[2][2] = v2.z
+
+    #     for i in range(3):
+    #         position[i] = vertices[0][i]*(1.0 - u - v) + vertices[1][i]*u + vertices[2][i]*v
+
+    #     return position
+
+    # @cython.boundscheck(False)
+    # @cython.wraparound(False)
+    # @cython.cdivision(True)
+    # @cython.initializedcheck(False)
+    # def sample_triangular(self, double u, double v, int primID):
+
+    #     cdef int i, j
+    #     cdef double d0, d1, d2
+
+    #     i = primID / self.tpe
+    #     j = primID % self.tpe
+
+    #     d0 = self.field_data[i][self.tri_array[j][0]]
+    #     d1 = self.field_data[i][self.tri_array[j][1]]
+    #     d2 = self.field_data[i][self.tri_array[j][2]]
+
+    #     return d0*(1.0 - u - v) + d1*u + d2*v
         
-        vertices[1][0] = self.vertices[tri.v1].x
-        vertices[1][1] = self.vertices[tri.v1].y
-        vertices[1][2] = self.vertices[tri.v1].z
-
-        vertices[2][0] = self.vertices[tri.v2].x
-        vertices[2][1] = self.vertices[tri.v2].y
-        vertices[2][2] = self.vertices[tri.v2].z
-
-        position = vertices[0]*(1.0 - u - v) + vertices[1]*u + vertices[2]*v
-
-        return position
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    @cython.initializedcheck(False)
-    def sample_triangular(self, double u, double v, int primID):
-
-        cdef int i, j
-        cdef double d0, d1, d2
-
-        i = primID / self.tpe
-        j = primID % self.tpe
-
-        d0 = self.field_data[i][self.tri_array[j][0]]
-        d1 = self.field_data[i][self.tri_array[j][1]]
-        d2 = self.field_data[i][self.tri_array[j][2]]
-
-        return d0*(1.0 - u - v) + d1*u + d2*v
-        
-        
+    def __dealloc__(self):
+        if self.field_data is not NULL:
+            free(self.field_data)
+        if self.element_indices is not NULL:
+            free(self.element_indices)

diff -r 5891133d72e5edef19d9846023c2478030d5890c -r 2e22165ddbf63fcf02054c5a866345678989bc33 yt/utilities/lib/mesh_traversal.pyx
--- a/yt/utilities/lib/mesh_traversal.pyx
+++ b/yt/utilities/lib/mesh_traversal.pyx
@@ -84,11 +84,7 @@
                 ray.mask = -1
                 ray.time = 0
                 rtcs.rtcIntersect(scene.scene_i, ray)
-                if ray.primID == -1:
-                    data[j] = 0.0
-                else:
-                    value = mesh.sample_triangular(ray.u, ray.v, ray.primID)
-                    data[j] = value
+                data[j] = ray.time
             self.aimage = data.reshape(self.image.nv[0], self.image.nv[1])
             free(v_pos)
         else:
@@ -111,7 +107,7 @@
                 ray.mask = -1
                 ray.time = 0
                 rtcs.rtcIntersect(scene.scene_i, ray)
-                data[j] = ray.primID
+                data[j] = ray.time
             self.aimage = data.reshape(self.image.nv[0], self.image.nv[1])
             free(v_pos)
             free(v_dir)


https://bitbucket.org/yt_analysis/yt/commits/5d3014f9dc16/
Changeset:   5d3014f9dc16
Branch:      yt
User:        atmyers
Date:        2015-07-09 08:25:54+00:00
Summary:     much faster hexahedral interpolation
Affected #:  1 file

diff -r 2e22165ddbf63fcf02054c5a866345678989bc33 -r 5d3014f9dc16418d09ab8ff325edb90fdb829523 yt/utilities/lib/filter_feedback_functions.pyx
--- a/yt/utilities/lib/filter_feedback_functions.pyx
+++ b/yt/utilities/lib/filter_feedback_functions.pyx
@@ -133,7 +133,7 @@
 @cython.cdivision(True)
 cdef double sample_at_real_point(double* vertices,
                                  double* field_values,
-                                 double* physical_x):
+                                 double* physical_x) nogil:
     
     cdef int i
     cdef double d, val
@@ -168,8 +168,11 @@
     val = sample_at_unit_point(x, field_values)
     return val
     
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
 cdef void sample_surface_hex(void* userPtr,
-                             rtcr.RTCRay& ray):
+                             rtcr.RTCRay& ray) nogil:
     cdef int ray_id, elem_id, i
     cdef double u, v, val
     cdef double d0, d1, d2
@@ -201,9 +204,12 @@
     val = sample_at_real_point(vertices, field_data, position)
     ray.time = val
 
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
 cdef void get_hit_position(double* position,
                            void* userPtr,
-                           rtcr.RTCRay& ray):
+                           rtcr.RTCRay& ray) nogil:
     cdef int primID, elemID, i
     cdef double[3][3] vertex_positions
     cdef Triangle tri


https://bitbucket.org/yt_analysis/yt/commits/a468329b9f8f/
Changeset:   a468329b9f8f
Branch:      yt
User:        atmyers
Date:        2015-07-09 19:43:15+00:00
Summary:     removing some old functions that have been supplanted
Affected #:  3 files

diff -r 5d3014f9dc16418d09ab8ff325edb90fdb829523 -r a468329b9f8fb5d2b0d28c0ed4ae1b638538f26c yt/utilities/lib/filter_feedback_functions.pxd
--- a/yt/utilities/lib/filter_feedback_functions.pxd
+++ b/yt/utilities/lib/filter_feedback_functions.pxd
@@ -3,13 +3,5 @@
 from pyembree.rtcore cimport Vec3f
 cimport cython
 
-cdef void sample_surface_hex(void* userPtr,
-                             rtcr.RTCRay& ray)
-
-cdef double get_value_trilinear(void* userPtr,
-                                rtcr.RTCRay& ray)
-
-cdef void maximum_intensity(void* userPtr, 
-                            rtcr.RTCRay& ray)
-
-cdef void sample_surface(void* userPtr, rtcr.RTCRay& ray)
+cdef void sample_hex(void* userPtr,
+                     rtcr.RTCRay& ray) nogil

diff -r 5d3014f9dc16418d09ab8ff325edb90fdb829523 -r a468329b9f8fb5d2b0d28c0ed4ae1b638538f26c yt/utilities/lib/filter_feedback_functions.pyx
--- a/yt/utilities/lib/filter_feedback_functions.pyx
+++ b/yt/utilities/lib/filter_feedback_functions.pyx
@@ -109,25 +109,6 @@
         err = fmax(err, fabs(f[i])) 
     return err
 
-
-cdef double get_value_trilinear(void* userPtr,
-                                rtcr.RTCRay& ray):
-    cdef int ray_id
-    cdef double u, v, val
-    cdef double d0, d1, d2
-
-    data = <UserData*> userPtr
-    ray_id = ray.primID
-
-    u = ray.u
-    v = ray.v
-
-    d0 = data.field_data[ray_id].x
-    d1 = data.field_data[ray_id].y
-    d2 = data.field_data[ray_id].z
-
-    return d0*(1.0 - u - v) + d1*u + d2*v
-
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
@@ -171,8 +152,8 @@
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
-cdef void sample_surface_hex(void* userPtr,
-                             rtcr.RTCRay& ray) nogil:
+cdef void sample_hex(void* userPtr,
+                     rtcr.RTCRay& ray) nogil:
     cdef int ray_id, elem_id, i
     cdef double u, v, val
     cdef double d0, d1, d2
@@ -237,17 +218,38 @@
                       vertex_positions[2][i]*ray.v
     
 
-cdef void maximum_intensity(void* userPtr, 
-                            rtcr.RTCRay& ray):
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef void sample_tetra(void* userPtr,
+                       rtcr.RTCRay& ray) nogil:
+    cdef int ray_id, elem_id, i
+    cdef double u, v, val
+    cdef double d0, d1, d2
+    cdef double[8] field_data
+    cdef long[8] element_indices
+    cdef double[24] vertices
+    cdef double[3] position
+    cdef double result
+    cdef UserData* data
 
-    cdef double val = get_value_trilinear(userPtr, ray)
-    ray.time = max(ray.time, val)
-    ray.geomID = -1  # reject hit
+    data = <UserData*> userPtr
+    ray_id = ray.primID
+    if ray_id == -1:
+        return
 
+    elem_id = ray_id / data.tpe
 
-cdef void sample_surface(void* userPtr, 
-                         rtcr.RTCRay& ray):
+    get_hit_position(position, userPtr, ray)
+    
+    for i in range(8):
+        element_indices[i] = data.element_indices[elem_id*8+i]
+        field_data[i] = data.field_data[elem_id*8+i]
 
-    cdef double val = get_value_trilinear(userPtr, ray)
+    for i in range(8):
+        vertices[i*3] = data.vertices[element_indices[i]].x
+        vertices[i*3 + 1] = data.vertices[element_indices[i]].y
+        vertices[i*3 + 2] = data.vertices[element_indices[i]].z    
+
+    val = sample_at_real_point(vertices, field_data, position)
     ray.time = val
-

diff -r 5d3014f9dc16418d09ab8ff325edb90fdb829523 -r a468329b9f8fb5d2b0d28c0ed4ae1b638538f26c yt/utilities/lib/mesh_construction.pyx
--- a/yt/utilities/lib/mesh_construction.pyx
+++ b/yt/utilities/lib/mesh_construction.pyx
@@ -7,9 +7,7 @@
 cimport pyembree.rtcore_geometry_user as rtcgu
 from yt.utilities.lib.element_mappings import Q1Sampler3D
 from filter_feedback_functions cimport \
-    maximum_intensity, \
-    sample_surface, \
-    sample_surface_hex
+    sample_hex
 from pyembree.rtcore cimport \
     Vertex, \
     Triangle, \
@@ -68,7 +66,6 @@
     cdef Vertex* vertices
     cdef Triangle* indices
     cdef unsigned int mesh
-#    cdef Vec3f* field_data
     cdef double* field_data
     cdef rtcg.RTCFilterFunc filter_func
     cdef int tpe, vpe
@@ -281,9 +278,7 @@
 
     cdef void _set_sampler_type(self, YTEmbreeScene scene, sampler_type):
         if sampler_type == 'surface':
-            self.filter_func = <rtcg.RTCFilterFunc> sample_surface_hex
-        elif sampler_type == 'maximum':
-            self.filter_func = <rtcg.RTCFilterFunc> maximum_intensity
+            self.filter_func = <rtcg.RTCFilterFunc> sample_hex
         else:
             print "Error - sampler type not implemented."
             raise NotImplementedError


https://bitbucket.org/yt_analysis/yt/commits/8cabf57d6c80/
Changeset:   8cabf57d6c80
Branch:      yt
User:        atmyers
Date:        2015-07-09 19:43:37+00:00
Summary:     removing some commented-out code
Affected #:  1 file

diff -r a468329b9f8fb5d2b0d28c0ed4ae1b638538f26c -r 8cabf57d6c80f31d326dee0018fffa0dbf98551c yt/utilities/lib/mesh_construction.pyx
--- a/yt/utilities/lib/mesh_construction.pyx
+++ b/yt/utilities/lib/mesh_construction.pyx
@@ -286,93 +286,6 @@
         rtcg.rtcSetIntersectionFilterFunction(scene.scene_i,
                                               self.mesh,
                                               self.filter_func)
-
-    # @cython.boundscheck(False)
-    # @cython.wraparound(False)
-    # @cython.cdivision(True)
-    # @cython.initializedcheck(False)
-    # def sample_at_point(self, double u, double v, int primID):
-        
-    #     cdef int elemID, faceID
-    #     position = self._get_hit_position(u, v, primID)
-    #     vertices = np.empty((8, 3), dtype=np.float64)
-        
-    #     elemID = primID / self.tpe
-    #     # faceID = (primID % self.tpe) / 2
-        
-    #     # faces = np.array([[0, 1, 2, 3],
-    #     #                   [4, 5, 6, 7],
-    #     #                   [0, 1, 5, 4],
-    #     #                   [1, 2, 6, 5],
-    #     #                   [0, 3, 7, 4],
-    #     #                   [3, 2, 6, 7]])
-
-    #     # locs = faces[faceID]
-
-    #     element_indices = self.element_indices[elemID]
-    #     field_data = np.asarray(self.field_data[elemID], dtype=np.float64)
-
-    #     for i in range(8):
-    #         vertices[i][0] = self.vertices[element_indices[i]].x
-    #         vertices[i][1] = self.vertices[element_indices[i]].y
-    #         vertices[i][2] = self.vertices[element_indices[i]].z    
-                             
-    #     sampler = Q1Sampler3D()
-    #     result = sampler.sample_at_real_point(position, vertices, field_data)
-
-    #     return result
-
-    # @cython.boundscheck(False)
-    # @cython.wraparound(False)
-    # @cython.cdivision(True)
-    # @cython.initializedcheck(False)
-    # cdef np.ndarray _get_hit_position(self, double u, double v, int primID):
-
-    #     cdef Triangle tri
-    #     cdef Vertex v0, v1, v2
-    #     cdef int i
-    #     position = np.empty(3, dtype=np.float64)
-    #     vertices = np.empty((3, 3), dtype=np.float64)
-        
-    #     tri = self.indices[primID]
-    #     v0 = self.vertices[tri.v0]
-    #     v1 = self.vertices[tri.v1]
-    #     v2 = self.vertices[tri.v2]
-
-    #     vertices[0][0] = v0.x
-    #     vertices[0][1] = v0.y
-    #     vertices[0][2] = v0.z
-        
-    #     vertices[1][0] = v1.x
-    #     vertices[1][1] = v1.y
-    #     vertices[1][2] = v1.z
-
-    #     vertices[2][0] = v2.x
-    #     vertices[2][1] = v2.y
-    #     vertices[2][2] = v2.z
-
-    #     for i in range(3):
-    #         position[i] = vertices[0][i]*(1.0 - u - v) + vertices[1][i]*u + vertices[2][i]*v
-
-    #     return position
-
-    # @cython.boundscheck(False)
-    # @cython.wraparound(False)
-    # @cython.cdivision(True)
-    # @cython.initializedcheck(False)
-    # def sample_triangular(self, double u, double v, int primID):
-
-    #     cdef int i, j
-    #     cdef double d0, d1, d2
-
-    #     i = primID / self.tpe
-    #     j = primID % self.tpe
-
-    #     d0 = self.field_data[i][self.tri_array[j][0]]
-    #     d1 = self.field_data[i][self.tri_array[j][1]]
-    #     d2 = self.field_data[i][self.tri_array[j][2]]
-
-    #     return d0*(1.0 - u - v) + d1*u + d2*v
         
     def __dealloc__(self):
         if self.field_data is not NULL:


https://bitbucket.org/yt_analysis/yt/commits/97bcf62b0c9b/
Changeset:   97bcf62b0c9b
Branch:      yt
User:        atmyers
Date:        2015-07-09 19:50:01+00:00
Summary:     some re-naming and re-arranging
Affected #:  1 file

diff -r 8cabf57d6c80f31d326dee0018fffa0dbf98551c -r 97bcf62b0c9b81ed6a587ab0a165758f5eb8fbe7 yt/utilities/lib/filter_feedback_functions.pyx
--- a/yt/utilities/lib/filter_feedback_functions.pyx
+++ b/yt/utilities/lib/filter_feedback_functions.pyx
@@ -20,10 +20,53 @@
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
-cdef inline void func(double* f,
-                      double* x, 
-                      double* vertices, 
-                      double* phys_x) nogil:
+cdef double maxnorm(double* f) nogil:
+    cdef double err
+    cdef int i
+    err = fabs(f[0])
+    for i in range(1, 2):
+        err = fmax(err, fabs(f[i])) 
+    return err
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef void get_hit_position(double* position,
+                           void* userPtr,
+                           rtcr.RTCRay& ray) nogil:
+    cdef int primID, elemID, i
+    cdef double[3][3] vertex_positions
+    cdef Triangle tri
+    cdef UserData* data
+
+    primID = ray.primID
+    data = <UserData*> userPtr
+    tri = data.indices[primID]
+
+    vertex_positions[0][0] = data.vertices[tri.v0].x
+    vertex_positions[0][1] = data.vertices[tri.v0].y
+    vertex_positions[0][2] = data.vertices[tri.v0].z
+
+    vertex_positions[1][0] = data.vertices[tri.v1].x
+    vertex_positions[1][1] = data.vertices[tri.v1].y
+    vertex_positions[1][2] = data.vertices[tri.v1].z
+
+    vertex_positions[2][0] = data.vertices[tri.v2].x
+    vertex_positions[2][1] = data.vertices[tri.v2].y
+    vertex_positions[2][2] = data.vertices[tri.v2].z
+
+    for i in range(3):
+        position[i] = vertex_positions[0][i]*(1.0 - ray.u - ray.v) + \
+                      vertex_positions[1][i]*ray.u + \
+                      vertex_positions[2][i]*ray.v
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef inline void linear_hex_f(double* f,
+                              double* x, 
+                              double* vertices, 
+                              double* phys_x) nogil:
     
     cdef int i
     cdef double rm, rp, sm, sp, tm, tp
@@ -49,12 +92,12 @@
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
-cdef inline void J(double* r,
-                   double* s,
-                   double* t,
-                   double* x, 
-                   double* v, 
-                   double* phys_x) nogil:
+cdef inline void linear_hex_J(double* r,
+                              double* s,
+                              double* t,
+                              double* x, 
+                              double* v, 
+                              double* phys_x) nogil:
     
     cdef int i
     cdef double rm, rp, sm, sp, tm, tp
@@ -84,7 +127,7 @@
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
-cdef double sample_at_unit_point(double* coord, double* vals) nogil:
+cdef double sample_hex_at_unit_point(double* coord, double* vals) nogil:
     cdef double F, rm, rp, sm, sp, tm, tp
     
     rm = 1.0 - coord[0]
@@ -98,23 +141,13 @@
         vals[4]*rm*sm*tp + vals[5]*rp*sm*tp + vals[6]*rm*sp*tp + vals[7]*rp*sp*tp
     return 0.125*F
                 
- at cython.boundscheck(False)
- at cython.wraparound(False)
- at cython.cdivision(True)
-cdef double maxnorm(double* f) nogil:
-    cdef double err
-    cdef int i
-    err = fabs(f[0])
-    for i in range(1, 2):
-        err = fmax(err, fabs(f[i])) 
-    return err
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
-cdef double sample_at_real_point(double* vertices,
-                                 double* field_values,
-                                 double* physical_x) nogil:
+cdef double sample_hex_at_real_point(double* vertices,
+                                     double* field_values,
+                                     double* physical_x) nogil:
     
     cdef int i
     cdef double d, val
@@ -132,22 +165,23 @@
         x[i] = 0.0
     
     # initial error norm
-    func(f, x, vertices, physical_x)
+    linear_hex_f(f, x, vertices, physical_x)
     err = maxnorm(f)  
    
     # begin Newton iteration
     while (err > tolerance and iterations < 10):
-        J(r, s, t, x, vertices, physical_x)
+        linear_hex_J(r, s, t, x, vertices, physical_x)
         d = determinant_3x3(r, s, t)
         x[0] = x[0] - (determinant_3x3(f, s, t)/d)
         x[1] = x[1] - (determinant_3x3(r, f, t)/d)
         x[2] = x[2] - (determinant_3x3(r, s, f)/d)
-        func(f, x, vertices, physical_x)        
+        linear_hex_f(f, x, vertices, physical_x)        
         err = maxnorm(f)
         iterations += 1
         
-    val = sample_at_unit_point(x, field_values)
+    val = sample_hex_at_unit_point(x, field_values)
     return val
+
     
 @cython.boundscheck(False)
 @cython.wraparound(False)
@@ -182,41 +216,9 @@
         vertices[i*3 + 1] = data.vertices[element_indices[i]].y
         vertices[i*3 + 2] = data.vertices[element_indices[i]].z    
 
-    val = sample_at_real_point(vertices, field_data, position)
+    val = sample_hex_at_real_point(vertices, field_data, position)
     ray.time = val
 
- at cython.boundscheck(False)
- at cython.wraparound(False)
- at cython.cdivision(True)
-cdef void get_hit_position(double* position,
-                           void* userPtr,
-                           rtcr.RTCRay& ray) nogil:
-    cdef int primID, elemID, i
-    cdef double[3][3] vertex_positions
-    cdef Triangle tri
-    cdef UserData* data
-
-    primID = ray.primID
-    data = <UserData*> userPtr
-    tri = data.indices[primID]
-
-    vertex_positions[0][0] = data.vertices[tri.v0].x
-    vertex_positions[0][1] = data.vertices[tri.v0].y
-    vertex_positions[0][2] = data.vertices[tri.v0].z
-
-    vertex_positions[1][0] = data.vertices[tri.v1].x
-    vertex_positions[1][1] = data.vertices[tri.v1].y
-    vertex_positions[1][2] = data.vertices[tri.v1].z
-
-    vertex_positions[2][0] = data.vertices[tri.v2].x
-    vertex_positions[2][1] = data.vertices[tri.v2].y
-    vertex_positions[2][2] = data.vertices[tri.v2].z
-
-    for i in range(3):
-        position[i] = vertex_positions[0][i]*(1.0 - ray.u - ray.v) + \
-                      vertex_positions[1][i]*ray.u + \
-                      vertex_positions[2][i]*ray.v
-    
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
@@ -251,5 +253,5 @@
         vertices[i*3 + 1] = data.vertices[element_indices[i]].y
         vertices[i*3 + 2] = data.vertices[element_indices[i]].z    
 
-    val = sample_at_real_point(vertices, field_data, position)
+    val = sample_hex_at_real_point(vertices, field_data, position)
     ray.time = val


https://bitbucket.org/yt_analysis/yt/commits/376b903132bb/
Changeset:   376b903132bb
Branch:      yt
User:        atmyers
Date:        2015-07-09 20:01:54+00:00
Summary:     adding tetra filter feedback functions
Affected #:  2 files

diff -r 97bcf62b0c9b81ed6a587ab0a165758f5eb8fbe7 -r 376b903132bb21f8d0e69ac4edde6ca852f42b0b yt/utilities/lib/filter_feedback_functions.pxd
--- a/yt/utilities/lib/filter_feedback_functions.pxd
+++ b/yt/utilities/lib/filter_feedback_functions.pxd
@@ -5,3 +5,8 @@
 
 cdef void sample_hex(void* userPtr,
                      rtcr.RTCRay& ray) nogil
+
+cdef void sample_tetra(void* userPtr,
+                       rtcr.RTCRay& ray) nogil
+
+

diff -r 97bcf62b0c9b81ed6a587ab0a165758f5eb8fbe7 -r 376b903132bb21f8d0e69ac4edde6ca852f42b0b yt/utilities/lib/filter_feedback_functions.pyx
--- a/yt/utilities/lib/filter_feedback_functions.pyx
+++ b/yt/utilities/lib/filter_feedback_functions.pyx
@@ -223,16 +223,47 @@
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
+cdef double sample_tetra_at_unit_point(double* coord, double* vals) nogil:
+    return vals[0]*coord[0] + vals[1]*coord[1] + vals[2]*coord[2] + vals[3]*coord[3]
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef double tetra_real_to_mapped(double* mapped_coord,
+                                 double* vertices,
+                                 double* physical_coord) nogil:
+    cdef int i
+    cdef double d
+    cdef double[3] bvec
+    cdef double[3] col0
+    cdef double[3] col1
+    cdef double[3] col2
+    
+    for i in range(3):
+        bvec[i] = physical_coord[i]   - vertices[9 + i]
+        col0[i] = vertices[0 + i]     - vertices[9 + i]
+        col1[i] = vertices[3 + i]     - vertices[9 + i]
+        col2[i] = vertices[6 + i]     - vertices[9 + i]
+        
+    d = determinant_3x3(col0, col1, col2)
+    mapped_coord[0] = determinant_3x3(bvec, col1, col2)/d
+    mapped_coord[1] = determinant_3x3(col0, bvec, col2)/d
+    mapped_coord[2] = determinant_3x3(col0, col1, bvec)/d
+    mapped_coord[3] = 1.0 - mapped_coord[0] - mapped_coord[1] - mapped_coord[2]
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
 cdef void sample_tetra(void* userPtr,
                        rtcr.RTCRay& ray) nogil:
+
     cdef int ray_id, elem_id, i
-    cdef double u, v, val
-    cdef double d0, d1, d2
-    cdef double[8] field_data
-    cdef long[8] element_indices
-    cdef double[24] vertices
+    cdef double val
+    cdef double[4] field_data
+    cdef long[4] element_indices
+    cdef double[12] vertices
     cdef double[3] position
-    cdef double result
+    cdef double[4] mapped_coord
     cdef UserData* data
 
     data = <UserData*> userPtr
@@ -240,18 +271,19 @@
     if ray_id == -1:
         return
 
-    elem_id = ray_id / data.tpe
-
     get_hit_position(position, userPtr, ray)
     
-    for i in range(8):
-        element_indices[i] = data.element_indices[elem_id*8+i]
-        field_data[i] = data.field_data[elem_id*8+i]
-
-    for i in range(8):
+    elem_id = ray_id / data.tpe
+    for i in range(4):
+        element_indices[i] = data.element_indices[elem_id*4+i]
+        field_data[i] = data.field_data[elem_id*4+i]
         vertices[i*3] = data.vertices[element_indices[i]].x
         vertices[i*3 + 1] = data.vertices[element_indices[i]].y
         vertices[i*3 + 2] = data.vertices[element_indices[i]].z    
 
-    val = sample_hex_at_real_point(vertices, field_data, position)
+    tetra_real_to_mapped(mapped_coord, 
+                         vertices,
+                         position)    
+        
+    val = sample_tetra_at_unit_point(mapped_coord, field_data)
     ray.time = val


https://bitbucket.org/yt_analysis/yt/commits/3852bf0366ad/
Changeset:   3852bf0366ad
Branch:      yt
User:        atmyers
Date:        2015-07-09 20:03:42+00:00
Summary:     adding tetra filter feedback functions
Affected #:  2 files

diff -r 376b903132bb21f8d0e69ac4edde6ca852f42b0b -r 3852bf0366ada5806be914592efe9450798220f5 yt/utilities/lib/filter_feedback_functions.pyx
--- a/yt/utilities/lib/filter_feedback_functions.pyx
+++ b/yt/utilities/lib/filter_feedback_functions.pyx
@@ -7,6 +7,7 @@
 cimport cython
 from libc.math cimport fabs, fmax
 
+
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
@@ -17,6 +18,7 @@
            col0[1]*col1[0]*col2[2] + col0[1]*col1[2]*col2[0] + \
            col0[2]*col1[0]*col2[1] + col0[2]*col1[1]*col2[0]
 
+
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
@@ -28,6 +30,7 @@
         err = fmax(err, fabs(f[i])) 
     return err
 
+
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
@@ -60,6 +63,7 @@
                       vertex_positions[1][i]*ray.u + \
                       vertex_positions[2][i]*ray.v
 
+
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
@@ -89,6 +93,7 @@
              + vertices[21 + i]*rp*sp*tp \
              - 8.0*phys_x[i]
 
+
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
@@ -226,6 +231,7 @@
 cdef double sample_tetra_at_unit_point(double* coord, double* vals) nogil:
     return vals[0]*coord[0] + vals[1]*coord[1] + vals[2]*coord[2] + vals[3]*coord[3]
 
+
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
@@ -251,6 +257,7 @@
     mapped_coord[2] = determinant_3x3(col0, col1, bvec)/d
     mapped_coord[3] = 1.0 - mapped_coord[0] - mapped_coord[1] - mapped_coord[2]
 
+
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)

diff -r 376b903132bb21f8d0e69ac4edde6ca852f42b0b -r 3852bf0366ada5806be914592efe9450798220f5 yt/utilities/lib/mesh_construction.pyx
--- a/yt/utilities/lib/mesh_construction.pyx
+++ b/yt/utilities/lib/mesh_construction.pyx
@@ -7,7 +7,8 @@
 cimport pyembree.rtcore_geometry_user as rtcgu
 from yt.utilities.lib.element_mappings import Q1Sampler3D
 from filter_feedback_functions cimport \
-    sample_hex
+    sample_hex, \
+    sample_tetra
 from pyembree.rtcore cimport \
     Vertex, \
     Triangle, \


https://bitbucket.org/yt_analysis/yt/commits/2e41e2142b57/
Changeset:   2e41e2142b57
Branch:      yt
User:        atmyers
Date:        2015-07-09 20:12:57+00:00
Summary:     more renaming
Affected #:  2 files

diff -r 3852bf0366ada5806be914592efe9450798220f5 -r 2e41e2142b57df9387e296ae084446cea5652044 yt/utilities/lib/mesh_construction.pxd
--- a/yt/utilities/lib/mesh_construction.pxd
+++ b/yt/utilities/lib/mesh_construction.pxd
@@ -3,7 +3,7 @@
     Triangle, \
     Vec3f
 
-ctypedef struct UserData:
+ctypedef struct MeshDataContainer:
     Vertex* vertices
     Triangle* indices
     double* field_data

diff -r 3852bf0366ada5806be914592efe9450798220f5 -r 2e41e2142b57df9387e296ae084446cea5652044 yt/utilities/lib/mesh_construction.pyx
--- a/yt/utilities/lib/mesh_construction.pyx
+++ b/yt/utilities/lib/mesh_construction.pyx
@@ -5,7 +5,6 @@
 cimport pyembree.rtcore_geometry as rtcg
 cimport pyembree.rtcore_ray as rtcr
 cimport pyembree.rtcore_geometry_user as rtcgu
-from yt.utilities.lib.element_mappings import Q1Sampler3D
 from filter_feedback_functions cimport \
     sample_hex, \
     sample_tetra
@@ -72,7 +71,7 @@
     cdef int tpe, vpe
     cdef int[MAX_NUM_TRI][3] tri_array
     cdef long* element_indices
-    cdef UserData user_data
+    cdef MeshDataContainer datac
 
     def __init__(self, YTEmbreeScene scene,
                  np.ndarray vertices,
@@ -266,16 +265,16 @@
 
         self.field_data = field_data
 
-        cdef UserData user_data
-        user_data.vertices = self.vertices
-        user_data.indices = self.indices
-        user_data.field_data = self.field_data
-        user_data.element_indices = self.element_indices
-        user_data.tpe = self.tpe
-        user_data.vpe = self.vpe
-        self.user_data = user_data
+        cdef MeshDataContainer datac
+        datac.vertices = self.vertices
+        datac.indices = self.indices
+        datac.field_data = self.field_data
+        datac.element_indices = self.element_indices
+        datac.tpe = self.tpe
+        datac.vpe = self.vpe
+        self.datac = datac
         
-        rtcg.rtcSetUserData(scene.scene_i, self.mesh, &self.user_data)
+        rtcg.rtcSetUserData(scene.scene_i, self.mesh, &self.datac)
 
     cdef void _set_sampler_type(self, YTEmbreeScene scene, sampler_type):
         if sampler_type == 'surface':


https://bitbucket.org/yt_analysis/yt/commits/c10e8806c857/
Changeset:   c10e8806c857
Branch:      yt
User:        atmyers
Date:        2015-07-09 20:23:03+00:00
Summary:     a more intuitive name for this file
Affected #:  6 files

diff -r 2e41e2142b57df9387e296ae084446cea5652044 -r c10e8806c857141116adae002f2c85558dfba090 yt/utilities/lib/filter_feedback_functions.pxd
--- a/yt/utilities/lib/filter_feedback_functions.pxd
+++ /dev/null
@@ -1,12 +0,0 @@
-cimport pyembree.rtcore as rtc
-cimport pyembree.rtcore_ray as rtcr
-from pyembree.rtcore cimport Vec3f
-cimport cython
-
-cdef void sample_hex(void* userPtr,
-                     rtcr.RTCRay& ray) nogil
-
-cdef void sample_tetra(void* userPtr,
-                       rtcr.RTCRay& ray) nogil
-
-

diff -r 2e41e2142b57df9387e296ae084446cea5652044 -r c10e8806c857141116adae002f2c85558dfba090 yt/utilities/lib/filter_feedback_functions.pyx
--- a/yt/utilities/lib/filter_feedback_functions.pyx
+++ /dev/null
@@ -1,296 +0,0 @@
-cimport pyembree.rtcore as rtc
-cimport pyembree.rtcore_ray as rtcr
-from pyembree.rtcore cimport Vec3f, Triangle, Vertex
-from yt.utilities.lib.mesh_construction cimport UserData
-from yt.utilities.lib.element_mappings import Q1Sampler3D
-cimport numpy as np
-cimport cython
-from libc.math cimport fabs, fmax
-
-
- at cython.boundscheck(False)
- at cython.wraparound(False)
- at cython.cdivision(True)
-cdef inline double determinant_3x3(double* col0, 
-                                   double* col1, 
-                                   double* col2) nogil:
-    return col0[0]*col1[1]*col2[2] - col0[0]*col1[2]*col2[1] - \
-           col0[1]*col1[0]*col2[2] + col0[1]*col1[2]*col2[0] + \
-           col0[2]*col1[0]*col2[1] + col0[2]*col1[1]*col2[0]
-
-
- at cython.boundscheck(False)
- at cython.wraparound(False)
- at cython.cdivision(True)
-cdef double maxnorm(double* f) nogil:
-    cdef double err
-    cdef int i
-    err = fabs(f[0])
-    for i in range(1, 2):
-        err = fmax(err, fabs(f[i])) 
-    return err
-
-
- at cython.boundscheck(False)
- at cython.wraparound(False)
- at cython.cdivision(True)
-cdef void get_hit_position(double* position,
-                           void* userPtr,
-                           rtcr.RTCRay& ray) nogil:
-    cdef int primID, elemID, i
-    cdef double[3][3] vertex_positions
-    cdef Triangle tri
-    cdef UserData* data
-
-    primID = ray.primID
-    data = <UserData*> userPtr
-    tri = data.indices[primID]
-
-    vertex_positions[0][0] = data.vertices[tri.v0].x
-    vertex_positions[0][1] = data.vertices[tri.v0].y
-    vertex_positions[0][2] = data.vertices[tri.v0].z
-
-    vertex_positions[1][0] = data.vertices[tri.v1].x
-    vertex_positions[1][1] = data.vertices[tri.v1].y
-    vertex_positions[1][2] = data.vertices[tri.v1].z
-
-    vertex_positions[2][0] = data.vertices[tri.v2].x
-    vertex_positions[2][1] = data.vertices[tri.v2].y
-    vertex_positions[2][2] = data.vertices[tri.v2].z
-
-    for i in range(3):
-        position[i] = vertex_positions[0][i]*(1.0 - ray.u - ray.v) + \
-                      vertex_positions[1][i]*ray.u + \
-                      vertex_positions[2][i]*ray.v
-
-
- at cython.boundscheck(False)
- at cython.wraparound(False)
- at cython.cdivision(True)
-cdef inline void linear_hex_f(double* f,
-                              double* x, 
-                              double* vertices, 
-                              double* phys_x) nogil:
-    
-    cdef int i
-    cdef double rm, rp, sm, sp, tm, tp
-    
-    rm = 1.0 - x[0]
-    rp = 1.0 + x[0]
-    sm = 1.0 - x[1]
-    sp = 1.0 + x[1]
-    tm = 1.0 - x[2]
-    tp = 1.0 + x[2]
-    
-    for i in range(3):
-        f[i] = vertices[0 + i]*rm*sm*tm \
-             + vertices[3 + i]*rp*sm*tm \
-             + vertices[6 + i]*rm*sp*tm \
-             + vertices[9 + i]*rp*sp*tm \
-             + vertices[12 + i]*rm*sm*tp \
-             + vertices[15 + i]*rp*sm*tp \
-             + vertices[18 + i]*rm*sp*tp \
-             + vertices[21 + i]*rp*sp*tp \
-             - 8.0*phys_x[i]
-
-
- at cython.boundscheck(False)
- at cython.wraparound(False)
- at cython.cdivision(True)
-cdef inline void linear_hex_J(double* r,
-                              double* s,
-                              double* t,
-                              double* x, 
-                              double* v, 
-                              double* phys_x) nogil:
-    
-    cdef int i
-    cdef double rm, rp, sm, sp, tm, tp
-    
-    rm = 1.0 - x[0]
-    rp = 1.0 + x[0]
-    sm = 1.0 - x[1]
-    sp = 1.0 + x[1]
-    tm = 1.0 - x[2]
-    tp = 1.0 + x[2]
-    
-    for i in range(3):
-        r[i] = -sm*tm*v[0 + i]  + sm*tm*v[3 + i]  - \
-                sp*tm*v[6 + i]  + sp*tm*v[9 + i]  - \
-                sm*tp*v[12 + i] + sm*tp*v[15 + i] - \
-                sp*tp*v[18 + i] + sp*tp*v[21 + i]
-        s[i] = -rm*tm*v[0 + i]  - rp*tm*v[3 + i]  + \
-                rm*tm*v[6 + i]  + rp*tm*v[9 + i]  - \
-                rm*tp*v[12 + i] - rp*tp*v[15 + i] + \
-                rm*tp*v[18 + i] + rp*tp*v[21 + i]
-        t[i] = -rm*sm*v[0 + i]  - rp*sm*v[3 + i]  - \
-                rm*sp*v[6 + i]  - rp*sp*v[9 + i]  + \
-                rm*sm*v[12 + i] + rp*sm*v[15 + i] + \
-                rm*sp*v[18 + i] + rp*sp*v[21 + i]
-                
-                
- at cython.boundscheck(False)
- at cython.wraparound(False)
- at cython.cdivision(True)
-cdef double sample_hex_at_unit_point(double* coord, double* vals) nogil:
-    cdef double F, rm, rp, sm, sp, tm, tp
-    
-    rm = 1.0 - coord[0]
-    rp = 1.0 + coord[0]
-    sm = 1.0 - coord[1]
-    sp = 1.0 + coord[1]
-    tm = 1.0 - coord[2]
-    tp = 1.0 + coord[2]
-    
-    F = vals[0]*rm*sm*tm + vals[1]*rp*sm*tm + vals[2]*rm*sp*tm + vals[3]*rp*sp*tm + \
-        vals[4]*rm*sm*tp + vals[5]*rp*sm*tp + vals[6]*rm*sp*tp + vals[7]*rp*sp*tp
-    return 0.125*F
-                
-
- at cython.boundscheck(False)
- at cython.wraparound(False)
- at cython.cdivision(True)
-cdef double sample_hex_at_real_point(double* vertices,
-                                     double* field_values,
-                                     double* physical_x) nogil:
-    
-    cdef int i
-    cdef double d, val
-    cdef double[3] f
-    cdef double[3] r
-    cdef double[3] s
-    cdef double[3] t
-    cdef double[3] x
-    cdef double tolerance = 1.0e-9
-    cdef int iterations = 0
-    cdef double err
-   
-    # initial guess
-    for i in range(3):
-        x[i] = 0.0
-    
-    # initial error norm
-    linear_hex_f(f, x, vertices, physical_x)
-    err = maxnorm(f)  
-   
-    # begin Newton iteration
-    while (err > tolerance and iterations < 10):
-        linear_hex_J(r, s, t, x, vertices, physical_x)
-        d = determinant_3x3(r, s, t)
-        x[0] = x[0] - (determinant_3x3(f, s, t)/d)
-        x[1] = x[1] - (determinant_3x3(r, f, t)/d)
-        x[2] = x[2] - (determinant_3x3(r, s, f)/d)
-        linear_hex_f(f, x, vertices, physical_x)        
-        err = maxnorm(f)
-        iterations += 1
-        
-    val = sample_hex_at_unit_point(x, field_values)
-    return val
-
-    
- at cython.boundscheck(False)
- at cython.wraparound(False)
- at cython.cdivision(True)
-cdef void sample_hex(void* userPtr,
-                     rtcr.RTCRay& ray) nogil:
-    cdef int ray_id, elem_id, i
-    cdef double u, v, val
-    cdef double d0, d1, d2
-    cdef double[8] field_data
-    cdef long[8] element_indices
-    cdef double[24] vertices
-    cdef double[3] position
-    cdef double result
-    cdef UserData* data
-
-    data = <UserData*> userPtr
-    ray_id = ray.primID
-    if ray_id == -1:
-        return
-
-    elem_id = ray_id / data.tpe
-
-    get_hit_position(position, userPtr, ray)
-    
-    for i in range(8):
-        element_indices[i] = data.element_indices[elem_id*8+i]
-        field_data[i] = data.field_data[elem_id*8+i]
-
-    for i in range(8):
-        vertices[i*3] = data.vertices[element_indices[i]].x
-        vertices[i*3 + 1] = data.vertices[element_indices[i]].y
-        vertices[i*3 + 2] = data.vertices[element_indices[i]].z    
-
-    val = sample_hex_at_real_point(vertices, field_data, position)
-    ray.time = val
-
-
- at cython.boundscheck(False)
- at cython.wraparound(False)
- at cython.cdivision(True)
-cdef double sample_tetra_at_unit_point(double* coord, double* vals) nogil:
-    return vals[0]*coord[0] + vals[1]*coord[1] + vals[2]*coord[2] + vals[3]*coord[3]
-
-
- at cython.boundscheck(False)
- at cython.wraparound(False)
- at cython.cdivision(True)
-cdef double tetra_real_to_mapped(double* mapped_coord,
-                                 double* vertices,
-                                 double* physical_coord) nogil:
-    cdef int i
-    cdef double d
-    cdef double[3] bvec
-    cdef double[3] col0
-    cdef double[3] col1
-    cdef double[3] col2
-    
-    for i in range(3):
-        bvec[i] = physical_coord[i]   - vertices[9 + i]
-        col0[i] = vertices[0 + i]     - vertices[9 + i]
-        col1[i] = vertices[3 + i]     - vertices[9 + i]
-        col2[i] = vertices[6 + i]     - vertices[9 + i]
-        
-    d = determinant_3x3(col0, col1, col2)
-    mapped_coord[0] = determinant_3x3(bvec, col1, col2)/d
-    mapped_coord[1] = determinant_3x3(col0, bvec, col2)/d
-    mapped_coord[2] = determinant_3x3(col0, col1, bvec)/d
-    mapped_coord[3] = 1.0 - mapped_coord[0] - mapped_coord[1] - mapped_coord[2]
-
-
- at cython.boundscheck(False)
- at cython.wraparound(False)
- at cython.cdivision(True)
-cdef void sample_tetra(void* userPtr,
-                       rtcr.RTCRay& ray) nogil:
-
-    cdef int ray_id, elem_id, i
-    cdef double val
-    cdef double[4] field_data
-    cdef long[4] element_indices
-    cdef double[12] vertices
-    cdef double[3] position
-    cdef double[4] mapped_coord
-    cdef UserData* data
-
-    data = <UserData*> userPtr
-    ray_id = ray.primID
-    if ray_id == -1:
-        return
-
-    get_hit_position(position, userPtr, ray)
-    
-    elem_id = ray_id / data.tpe
-    for i in range(4):
-        element_indices[i] = data.element_indices[elem_id*4+i]
-        field_data[i] = data.field_data[elem_id*4+i]
-        vertices[i*3] = data.vertices[element_indices[i]].x
-        vertices[i*3 + 1] = data.vertices[element_indices[i]].y
-        vertices[i*3 + 2] = data.vertices[element_indices[i]].z    
-
-    tetra_real_to_mapped(mapped_coord, 
-                         vertices,
-                         position)    
-        
-    val = sample_tetra_at_unit_point(mapped_coord, field_data)
-    ray.time = val

diff -r 2e41e2142b57df9387e296ae084446cea5652044 -r c10e8806c857141116adae002f2c85558dfba090 yt/utilities/lib/mesh_construction.pyx
--- a/yt/utilities/lib/mesh_construction.pyx
+++ b/yt/utilities/lib/mesh_construction.pyx
@@ -5,7 +5,7 @@
 cimport pyembree.rtcore_geometry as rtcg
 cimport pyembree.rtcore_ray as rtcr
 cimport pyembree.rtcore_geometry_user as rtcgu
-from filter_feedback_functions cimport \
+from mesh_samplers cimport \
     sample_hex, \
     sample_tetra
 from pyembree.rtcore cimport \

diff -r 2e41e2142b57df9387e296ae084446cea5652044 -r c10e8806c857141116adae002f2c85558dfba090 yt/utilities/lib/mesh_samplers.pxd
--- /dev/null
+++ b/yt/utilities/lib/mesh_samplers.pxd
@@ -0,0 +1,12 @@
+cimport pyembree.rtcore as rtc
+cimport pyembree.rtcore_ray as rtcr
+from pyembree.rtcore cimport Vec3f
+cimport cython
+
+cdef void sample_hex(void* userPtr,
+                     rtcr.RTCRay& ray) nogil
+
+cdef void sample_tetra(void* userPtr,
+                       rtcr.RTCRay& ray) nogil
+
+

diff -r 2e41e2142b57df9387e296ae084446cea5652044 -r c10e8806c857141116adae002f2c85558dfba090 yt/utilities/lib/mesh_samplers.pyx
--- /dev/null
+++ b/yt/utilities/lib/mesh_samplers.pyx
@@ -0,0 +1,295 @@
+cimport pyembree.rtcore as rtc
+cimport pyembree.rtcore_ray as rtcr
+from pyembree.rtcore cimport Vec3f, Triangle, Vertex
+from yt.utilities.lib.mesh_construction cimport MeshDataContainer
+cimport numpy as np
+cimport cython
+from libc.math cimport fabs, fmax
+
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef inline double determinant_3x3(double* col0, 
+                                   double* col1, 
+                                   double* col2) nogil:
+    return col0[0]*col1[1]*col2[2] - col0[0]*col1[2]*col2[1] - \
+           col0[1]*col1[0]*col2[2] + col0[1]*col1[2]*col2[0] + \
+           col0[2]*col1[0]*col2[1] + col0[2]*col1[1]*col2[0]
+
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef double maxnorm(double* f) nogil:
+    cdef double err
+    cdef int i
+    err = fabs(f[0])
+    for i in range(1, 2):
+        err = fmax(err, fabs(f[i])) 
+    return err
+
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef void get_hit_position(double* position,
+                           void* userPtr,
+                           rtcr.RTCRay& ray) nogil:
+    cdef int primID, elemID, i
+    cdef double[3][3] vertex_positions
+    cdef Triangle tri
+    cdef MeshDataContainer* data
+
+    primID = ray.primID
+    data = <MeshDataContainer*> userPtr
+    tri = data.indices[primID]
+
+    vertex_positions[0][0] = data.vertices[tri.v0].x
+    vertex_positions[0][1] = data.vertices[tri.v0].y
+    vertex_positions[0][2] = data.vertices[tri.v0].z
+
+    vertex_positions[1][0] = data.vertices[tri.v1].x
+    vertex_positions[1][1] = data.vertices[tri.v1].y
+    vertex_positions[1][2] = data.vertices[tri.v1].z
+
+    vertex_positions[2][0] = data.vertices[tri.v2].x
+    vertex_positions[2][1] = data.vertices[tri.v2].y
+    vertex_positions[2][2] = data.vertices[tri.v2].z
+
+    for i in range(3):
+        position[i] = vertex_positions[0][i]*(1.0 - ray.u - ray.v) + \
+                      vertex_positions[1][i]*ray.u + \
+                      vertex_positions[2][i]*ray.v
+
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef inline void linear_hex_f(double* f,
+                              double* x, 
+                              double* vertices, 
+                              double* phys_x) nogil:
+    
+    cdef int i
+    cdef double rm, rp, sm, sp, tm, tp
+    
+    rm = 1.0 - x[0]
+    rp = 1.0 + x[0]
+    sm = 1.0 - x[1]
+    sp = 1.0 + x[1]
+    tm = 1.0 - x[2]
+    tp = 1.0 + x[2]
+    
+    for i in range(3):
+        f[i] = vertices[0 + i]*rm*sm*tm \
+             + vertices[3 + i]*rp*sm*tm \
+             + vertices[6 + i]*rm*sp*tm \
+             + vertices[9 + i]*rp*sp*tm \
+             + vertices[12 + i]*rm*sm*tp \
+             + vertices[15 + i]*rp*sm*tp \
+             + vertices[18 + i]*rm*sp*tp \
+             + vertices[21 + i]*rp*sp*tp \
+             - 8.0*phys_x[i]
+
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef inline void linear_hex_J(double* r,
+                              double* s,
+                              double* t,
+                              double* x, 
+                              double* v, 
+                              double* phys_x) nogil:
+    
+    cdef int i
+    cdef double rm, rp, sm, sp, tm, tp
+    
+    rm = 1.0 - x[0]
+    rp = 1.0 + x[0]
+    sm = 1.0 - x[1]
+    sp = 1.0 + x[1]
+    tm = 1.0 - x[2]
+    tp = 1.0 + x[2]
+    
+    for i in range(3):
+        r[i] = -sm*tm*v[0 + i]  + sm*tm*v[3 + i]  - \
+                sp*tm*v[6 + i]  + sp*tm*v[9 + i]  - \
+                sm*tp*v[12 + i] + sm*tp*v[15 + i] - \
+                sp*tp*v[18 + i] + sp*tp*v[21 + i]
+        s[i] = -rm*tm*v[0 + i]  - rp*tm*v[3 + i]  + \
+                rm*tm*v[6 + i]  + rp*tm*v[9 + i]  - \
+                rm*tp*v[12 + i] - rp*tp*v[15 + i] + \
+                rm*tp*v[18 + i] + rp*tp*v[21 + i]
+        t[i] = -rm*sm*v[0 + i]  - rp*sm*v[3 + i]  - \
+                rm*sp*v[6 + i]  - rp*sp*v[9 + i]  + \
+                rm*sm*v[12 + i] + rp*sm*v[15 + i] + \
+                rm*sp*v[18 + i] + rp*sp*v[21 + i]
+                
+                
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef double sample_hex_at_unit_point(double* coord, double* vals) nogil:
+    cdef double F, rm, rp, sm, sp, tm, tp
+    
+    rm = 1.0 - coord[0]
+    rp = 1.0 + coord[0]
+    sm = 1.0 - coord[1]
+    sp = 1.0 + coord[1]
+    tm = 1.0 - coord[2]
+    tp = 1.0 + coord[2]
+    
+    F = vals[0]*rm*sm*tm + vals[1]*rp*sm*tm + vals[2]*rm*sp*tm + vals[3]*rp*sp*tm + \
+        vals[4]*rm*sm*tp + vals[5]*rp*sm*tp + vals[6]*rm*sp*tp + vals[7]*rp*sp*tp
+    return 0.125*F
+                
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef double sample_hex_at_real_point(double* vertices,
+                                     double* field_values,
+                                     double* physical_x) nogil:
+    
+    cdef int i
+    cdef double d, val
+    cdef double[3] f
+    cdef double[3] r
+    cdef double[3] s
+    cdef double[3] t
+    cdef double[3] x
+    cdef double tolerance = 1.0e-9
+    cdef int iterations = 0
+    cdef double err
+   
+    # initial guess
+    for i in range(3):
+        x[i] = 0.0
+    
+    # initial error norm
+    linear_hex_f(f, x, vertices, physical_x)
+    err = maxnorm(f)  
+   
+    # begin Newton iteration
+    while (err > tolerance and iterations < 10):
+        linear_hex_J(r, s, t, x, vertices, physical_x)
+        d = determinant_3x3(r, s, t)
+        x[0] = x[0] - (determinant_3x3(f, s, t)/d)
+        x[1] = x[1] - (determinant_3x3(r, f, t)/d)
+        x[2] = x[2] - (determinant_3x3(r, s, f)/d)
+        linear_hex_f(f, x, vertices, physical_x)        
+        err = maxnorm(f)
+        iterations += 1
+        
+    val = sample_hex_at_unit_point(x, field_values)
+    return val
+
+    
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef void sample_hex(void* userPtr,
+                     rtcr.RTCRay& ray) nogil:
+    cdef int ray_id, elem_id, i
+    cdef double u, v, val
+    cdef double d0, d1, d2
+    cdef double[8] field_data
+    cdef long[8] element_indices
+    cdef double[24] vertices
+    cdef double[3] position
+    cdef double result
+    cdef MeshDataContainer* data
+
+    data = <MeshDataContainer*> userPtr
+    ray_id = ray.primID
+    if ray_id == -1:
+        return
+
+    elem_id = ray_id / data.tpe
+
+    get_hit_position(position, userPtr, ray)
+    
+    for i in range(8):
+        element_indices[i] = data.element_indices[elem_id*8+i]
+        field_data[i] = data.field_data[elem_id*8+i]
+
+    for i in range(8):
+        vertices[i*3] = data.vertices[element_indices[i]].x
+        vertices[i*3 + 1] = data.vertices[element_indices[i]].y
+        vertices[i*3 + 2] = data.vertices[element_indices[i]].z    
+
+    val = sample_hex_at_real_point(vertices, field_data, position)
+    ray.time = val
+
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef double sample_tetra_at_unit_point(double* coord, double* vals) nogil:
+    return vals[0]*coord[0] + vals[1]*coord[1] + vals[2]*coord[2] + vals[3]*coord[3]
+
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef double tetra_real_to_mapped(double* mapped_coord,
+                                 double* vertices,
+                                 double* physical_coord) nogil:
+    cdef int i
+    cdef double d
+    cdef double[3] bvec
+    cdef double[3] col0
+    cdef double[3] col1
+    cdef double[3] col2
+    
+    for i in range(3):
+        bvec[i] = physical_coord[i]   - vertices[9 + i]
+        col0[i] = vertices[0 + i]     - vertices[9 + i]
+        col1[i] = vertices[3 + i]     - vertices[9 + i]
+        col2[i] = vertices[6 + i]     - vertices[9 + i]
+        
+    d = determinant_3x3(col0, col1, col2)
+    mapped_coord[0] = determinant_3x3(bvec, col1, col2)/d
+    mapped_coord[1] = determinant_3x3(col0, bvec, col2)/d
+    mapped_coord[2] = determinant_3x3(col0, col1, bvec)/d
+    mapped_coord[3] = 1.0 - mapped_coord[0] - mapped_coord[1] - mapped_coord[2]
+
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef void sample_tetra(void* userPtr,
+                       rtcr.RTCRay& ray) nogil:
+
+    cdef int ray_id, elem_id, i
+    cdef double val
+    cdef double[4] field_data
+    cdef long[4] element_indices
+    cdef double[12] vertices
+    cdef double[3] position
+    cdef double[4] mapped_coord
+    cdef MeshDataContainer* data
+
+    data = <MeshDataContainer*> userPtr
+    ray_id = ray.primID
+    if ray_id == -1:
+        return
+
+    get_hit_position(position, userPtr, ray)
+    
+    elem_id = ray_id / data.tpe
+    for i in range(4):
+        element_indices[i] = data.element_indices[elem_id*4+i]
+        field_data[i] = data.field_data[elem_id*4+i]
+        vertices[i*3] = data.vertices[element_indices[i]].x
+        vertices[i*3 + 1] = data.vertices[element_indices[i]].y
+        vertices[i*3 + 2] = data.vertices[element_indices[i]].z    
+
+    tetra_real_to_mapped(mapped_coord, 
+                         vertices,
+                         position)    
+        
+    val = sample_tetra_at_unit_point(mapped_coord, field_data)
+    ray.time = val

diff -r 2e41e2142b57df9387e296ae084446cea5652044 -r c10e8806c857141116adae002f2c85558dfba090 yt/utilities/lib/setup.py
--- a/yt/utilities/lib/setup.py
+++ b/yt/utilities/lib/setup.py
@@ -178,11 +178,11 @@
                              include_dirs=["yt/utilities/lib", include_dirs],
                              libraries=["m", "embree"], language="c++",
                              depends=["yt/utilities/lib/mesh_traversal.pxd"])
-        config.add_extension("filter_feedback_functions",
-                             ["yt/utilities/lib/filter_feedback_functions.pyx"],
+        config.add_extension("mesh_samplers",
+                             ["yt/utilities/lib/mesh_samplers.pyx"],
                              include_dirs=["yt/utilities/lib", include_dirs],
                              libraries=["m", "embree"], language="c++",
-                             depends=["yt/utilities/lib/filter_feedback_functions.pxd"])
+                             depends=["yt/utilities/lib/mesh_samplers.pxd"])
     config.add_subpackage("tests")
 
     if os.environ.get("GPERFTOOLS", "no").upper() != "NO":


https://bitbucket.org/yt_analysis/yt/commits/c8a6fc0cdf56/
Changeset:   c8a6fc0cdf56
Branch:      yt
User:        atmyers
Date:        2015-07-09 23:41:45+00:00
Summary:     fixing a bug in the determinant code
Affected #:  1 file

diff -r c10e8806c857141116adae002f2c85558dfba090 -r c8a6fc0cdf5680c456d884672bedeb3b47830a60 yt/utilities/lib/mesh_samplers.pyx
--- a/yt/utilities/lib/mesh_samplers.pyx
+++ b/yt/utilities/lib/mesh_samplers.pyx
@@ -15,7 +15,7 @@
                                    double* col2) nogil:
     return col0[0]*col1[1]*col2[2] - col0[0]*col1[2]*col2[1] - \
            col0[1]*col1[0]*col2[2] + col0[1]*col1[2]*col2[0] + \
-           col0[2]*col1[0]*col2[1] + col0[2]*col1[1]*col2[0]
+           col0[2]*col1[0]*col2[1] - col0[2]*col1[1]*col2[0]
 
 
 @cython.boundscheck(False)
@@ -193,13 +193,11 @@
 cdef void sample_hex(void* userPtr,
                      rtcr.RTCRay& ray) nogil:
     cdef int ray_id, elem_id, i
-    cdef double u, v, val
-    cdef double d0, d1, d2
+    cdef double val
     cdef double[8] field_data
     cdef long[8] element_indices
     cdef double[24] vertices
     cdef double[3] position
-    cdef double result
     cdef MeshDataContainer* data
 
     data = <MeshDataContainer*> userPtr
@@ -227,6 +225,20 @@
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
+def test_hex_sampler(np.ndarray[np.float64_t, ndim=2] vertices,
+                     np.ndarray[np.float64_t, ndim=1] field_values,
+                     np.ndarray[np.float64_t, ndim=1] physical_x):
+    
+    cdef double val
+   
+    val = sample_hex_at_real_point(<double*> vertices.data, 
+                                   <double*> field_values.data,
+                                   <double*> physical_x.data)
+    return val
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
 cdef double sample_tetra_at_unit_point(double* coord, double* vals) nogil:
     return vals[0]*coord[0] + vals[1]*coord[1] + vals[2]*coord[2] + vals[3]*coord[3]
 
@@ -293,3 +305,20 @@
         
     val = sample_tetra_at_unit_point(mapped_coord, field_data)
     ray.time = val
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+def test_tetra_sampler(np.ndarray[np.float64_t, ndim=2] vertices,
+                       np.ndarray[np.float64_t, ndim=1] field_values,
+                       np.ndarray[np.float64_t, ndim=1] physical_x):
+
+    cdef double val
+    cdef double[4] mapped_coord
+    tetra_real_to_mapped(mapped_coord, 
+                         <double*> vertices.data,
+                         <double*> physical_x.data)
+
+    val = sample_tetra_at_unit_point(mapped_coord, 
+                                     <double*> field_values.data)
+    return val


https://bitbucket.org/yt_analysis/yt/commits/b45fad4f3e51/
Changeset:   b45fad4f3e51
Branch:      yt
User:        atmyers
Date:        2015-07-09 23:42:01+00:00
Summary:     tests of the mesh samplers
Affected #:  1 file

diff -r c8a6fc0cdf5680c456d884672bedeb3b47830a60 -r b45fad4f3e51246a89c553d88f127afc918488fd yt/utilities/lib/tests/test_mesh_samplers.py
--- /dev/null
+++ b/yt/utilities/lib/tests/test_mesh_samplers.py
@@ -0,0 +1,59 @@
+"""
+This file contains tests of the intracell interpolation code contained is
+yt/utilities/lib/mesh_samplers.pyx.
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2015, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+
+import numpy as np
+
+from yt.testing import assert_almost_equal
+from yt.utilities.lib.mesh_samplers import \
+    test_hex_sampler, \
+    test_tetra_sampler
+
+
+def check_all_vertices(sampler, vertices, field_values):
+    NV = vertices.shape[0]
+    NDIM = vertices.shape[1]
+    x = np.empty(NDIM)
+    for i in range(NV):
+        x = vertices[i]
+        val = sampler(vertices, field_values, x)
+        assert_almost_equal(val, field_values[i])
+
+
+def test_P1Sampler3D():
+    vertices = np.array([[0.1,  0.1,  0.1],
+                         [0.6,  0.3,  0.2],
+                         [0.2,  0.7,  0.2],
+                         [0.4,  0.4,  0.7]], dtype=np.float64)
+
+    field_values = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float64)
+
+    check_all_vertices(test_tetra_sampler, vertices, field_values)
+
+
+def test_Q1Sampler3D():
+    vertices = np.array([[2.00657905, 0.6888599,  1.4375],
+                         [1.8658198,  1.00973171, 1.4375],
+                         [1.97881594, 1.07088163, 1.4375],
+                         [2.12808879, 0.73057381, 1.4375],
+                         [2.00657905, 0.6888599,  1.2   ],
+                         [1.8658198,  1.00973171, 1.2   ],
+                         [1.97881594, 1.07088163, 1.2   ],
+                         [2.12808879, 0.73057381, 1.2   ]])
+
+    field_values = np.array([0.4526278, 0.45262656, 0.45262657, 0.4526278,
+                             0.54464296, 0.54464149, 0.5446415, 0.54464296])
+
+    check_all_vertices(test_hex_sampler, vertices, field_values)


https://bitbucket.org/yt_analysis/yt/commits/2d022f41f36d/
Changeset:   2d022f41f36d
Branch:      yt
User:        atmyers
Date:        2015-07-10 00:07:25+00:00
Summary:     restoring some things to working order
Affected #:  4 files

diff -r b45fad4f3e51246a89c553d88f127afc918488fd -r 2d022f41f36d0aa028de510ce83348a958bedd80 yt/utilities/lib/mesh_construction.pyx
--- a/yt/utilities/lib/mesh_construction.pyx
+++ b/yt/utilities/lib/mesh_construction.pyx
@@ -185,8 +185,7 @@
     def __init__(self, YTEmbreeScene scene,
                  np.ndarray vertices, 
                  np.ndarray indices,
-                 np.ndarray data,
-                 sampler_type):
+                 np.ndarray data):
 
         # We need now to figure out if we've been handed quads or tetrahedra.
         # If it's quads, we can build the mesh slightly differently.
@@ -207,7 +206,7 @@
         self.element_indices = NULL
         self._build_from_indices(scene, vertices, indices)
         self._set_field_data(scene, data)
-        self._set_sampler_type(scene, sampler_type)
+        self._set_sampler_type(scene)
 
     cdef void _build_from_indices(self, YTEmbreeScene scene,
                                   np.ndarray vertices_in,
@@ -276,9 +275,11 @@
         
         rtcg.rtcSetUserData(scene.scene_i, self.mesh, &self.datac)
 
-    cdef void _set_sampler_type(self, YTEmbreeScene scene, sampler_type):
-        if sampler_type == 'surface':
+    cdef void _set_sampler_type(self, YTEmbreeScene scene):
+        if self.vpe == 8:
             self.filter_func = <rtcg.RTCFilterFunc> sample_hex
+        elif self.vpe == 4:
+            self.filter_func = <rtcg.RTCFilterFunc> sample_tetra
         else:
             print "Error - sampler type not implemented."
             raise NotImplementedError

diff -r b45fad4f3e51246a89c553d88f127afc918488fd -r 2d022f41f36d0aa028de510ce83348a958bedd80 yt/utilities/lib/mesh_samplers.pyx
--- a/yt/utilities/lib/mesh_samplers.pyx
+++ b/yt/utilities/lib/mesh_samplers.pyx
@@ -1,3 +1,19 @@
+"""
+This file contains coordinate mappings between physical coordinates and those
+defined on unit elements, as well as functions that do the corresponding intracell
+interpolation.
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2015, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
 cimport pyembree.rtcore as rtc
 cimport pyembree.rtcore_ray as rtcr
 from pyembree.rtcore cimport Vec3f, Triangle, Vertex

diff -r b45fad4f3e51246a89c553d88f127afc918488fd -r 2d022f41f36d0aa028de510ce83348a958bedd80 yt/utilities/lib/mesh_traversal.pyx
--- a/yt/utilities/lib/mesh_traversal.pyx
+++ b/yt/utilities/lib/mesh_traversal.pyx
@@ -33,7 +33,6 @@
     @cython.cdivision(True)
     def __call__(self, 
                  YTEmbreeScene scene,
-                 mesh,
                  int num_threads = 0):
         '''
 

diff -r b45fad4f3e51246a89c553d88f127afc918488fd -r 2d022f41f36d0aa028de510ce83348a958bedd80 yt/visualization/volume_rendering/render_source.py
--- a/yt/visualization/volume_rendering/render_source.py
+++ b/yt/visualization/volume_rendering/render_source.py
@@ -232,7 +232,7 @@
     _image = None
     data_source = None
 
-    def __init__(self, data_source, field, sampler_type='surface'):
+    def __init__(self, data_source, field):
         r"""Initialize a new unstructured source for rendering.
 
         A :class:`MeshSource` provides the framework to volume render
@@ -245,13 +245,6 @@
             data object or dataset.
         fields : string
             The name of the field to be rendered.
-        sampler_type : string, either 'surface' or 'maximum'
-            The type of volume rendering to use for this MeshSource.
-            If 'surface', each ray will return the value of the field
-            at the point at which it intersects the surface mesh.
-            If 'maximum', each ray will return the largest value of
-            any vertex on any element that the ray intersects.
-            Default is 'surface'.
 
         Examples
         --------
@@ -264,7 +257,6 @@
         self.field = field
         self.mesh = None
         self.current_image = None
-        self.sampler_type = sampler_type
 
         # Error checking
         assert(self.field is not None)
@@ -290,20 +282,17 @@
         # convert the indices to zero-based indexing
         indices = self.data_source.ds.index.meshes[0].connectivity_indices - 1
 
-        mylog.debug("Using field %s and sampler_type %s" % (self.field,
-                                                            self.sampler_type))
         self.mesh = ElementMesh(self.scene,
                                 vertices,
                                 indices,
-                                field_data.d,
-                                self.sampler_type)
+                                field_data.d)
 
     def render(self, camera):
 
         self.sampler = new_mesh_sampler(camera, self)
 
         mylog.debug("Casting rays")
-        self.sampler(self.scene, self.mesh)
+        self.sampler(self.scene)
         mylog.debug("Done casting rays")
 
         self.current_image = self.sampler.aimage


https://bitbucket.org/yt_analysis/yt/commits/3c3bbfee7b6a/
Changeset:   3c3bbfee7b6a
Branch:      yt
User:        atmyers
Date:        2015-07-10 06:00:39+00:00
Summary:     fixing an issue with a dangling pointer, also sharing buffers with embree to reduce memory overhead
Affected #:  2 files

diff -r 2d022f41f36d0aa028de510ce83348a958bedd80 -r 3c3bbfee7b6a13540bfdaee3c8f89eaf463ad9ef yt/utilities/lib/mesh_construction.pyx
--- a/yt/utilities/lib/mesh_construction.pyx
+++ b/yt/utilities/lib/mesh_construction.pyx
@@ -220,18 +220,25 @@
                     rtcg.RTC_GEOMETRY_STATIC, nt, nv, 1)
 
         # first just copy over the vertices
-        cdef Vertex* vertices = <Vertex*> rtcg.rtcMapBuffer(scene.scene_i, mesh,
-                        rtcg.RTC_VERTEX_BUFFER)
+#        cdef Vertex* vertices = <Vertex*> rtcg.rtcMapBuffer(scene.scene_i, mesh,
+#                        rtcg.RTC_VERTEX_BUFFER)
+
+        cdef Vertex* vertices = <Vertex*> malloc(nv * sizeof(Vertex))
 
         for i in range(nv):
             vertices[i].x = vertices_in[i, 0]
             vertices[i].y = vertices_in[i, 1]
             vertices[i].z = vertices_in[i, 2]
-        rtcg.rtcUnmapBuffer(scene.scene_i, mesh, rtcg.RTC_VERTEX_BUFFER)
+ #       rtcg.rtcUnmapBuffer(scene.scene_i, mesh, rtcg.RTC_VERTEX_BUFFER)
+       
+        rtcg.rtcSetBuffer(scene.scene_i, mesh, rtcg.RTC_VERTEX_BUFFER,
+                          vertices, 0, sizeof(Vertex))
 
         # now build up the triangles
-        cdef Triangle* triangles = <Triangle*> rtcg.rtcMapBuffer(scene.scene_i,
-                        mesh, rtcg.RTC_INDEX_BUFFER)
+#        cdef Triangle* triangles = <Triangle*> rtcg.rtcMapBuffer(scene.scene_i,
+#                        mesh, rtcg.RTC_INDEX_BUFFER)
+
+        cdef Triangle* triangles = <Triangle*> malloc(nt * sizeof(Triangle))
 
         for i in range(ne):
             for j in range(self.tpe):
@@ -239,7 +246,9 @@
                 triangles[self.tpe*i+j].v1 = indices_in[i][self.tri_array[j][1]]
                 triangles[self.tpe*i+j].v2 = indices_in[i][self.tri_array[j][2]]
 
-        rtcg.rtcUnmapBuffer(scene.scene_i, mesh, rtcg.RTC_INDEX_BUFFER)
+#        rtcg.rtcUnmapBuffer(scene.scene_i, mesh, rtcg.RTC_INDEX_BUFFER)
+        rtcg.rtcSetBuffer(scene.scene_i, mesh, rtcg.RTC_INDEX_BUFFER,
+                          triangles, 0, sizeof(Triangle))
 
         cdef long* element_indices = <long *> malloc(ne * self.vpe * sizeof(long))
     
@@ -293,3 +302,5 @@
             free(self.field_data)
         if self.element_indices is not NULL:
             free(self.element_indices)
+        free(self.vertices)
+        free(self.indices)

diff -r 2d022f41f36d0aa028de510ce83348a958bedd80 -r 3c3bbfee7b6a13540bfdaee3c8f89eaf463ad9ef yt/utilities/lib/mesh_samplers.pyx
--- a/yt/utilities/lib/mesh_samplers.pyx
+++ b/yt/utilities/lib/mesh_samplers.pyx
@@ -227,10 +227,10 @@
     
     for i in range(8):
         element_indices[i] = data.element_indices[elem_id*8+i]
-        field_data[i] = data.field_data[elem_id*8+i]
+        field_data[i]      = data.field_data[elem_id*8+i]
 
     for i in range(8):
-        vertices[i*3] = data.vertices[element_indices[i]].x
+        vertices[i*3]     = data.vertices[element_indices[i]].x
         vertices[i*3 + 1] = data.vertices[element_indices[i]].y
         vertices[i*3 + 2] = data.vertices[element_indices[i]].z    
 


https://bitbucket.org/yt_analysis/yt/commits/b5002ee65277/
Changeset:   b5002ee65277
Branch:      yt
User:        atmyers
Date:        2015-07-10 06:55:33+00:00
Summary:     cleaning up a little bit
Affected #:  2 files

diff -r 3c3bbfee7b6a13540bfdaee3c8f89eaf463ad9ef -r b5002ee6527794d6cc41e36fb21eaca9667ad0d8 yt/utilities/lib/mesh_construction.pxd
--- a/yt/utilities/lib/mesh_construction.pxd
+++ b/yt/utilities/lib/mesh_construction.pxd
@@ -4,9 +4,9 @@
     Vec3f
 
 ctypedef struct MeshDataContainer:
-    Vertex* vertices
-    Triangle* indices
-    double* field_data
-    long* element_indices
-    int tpe
-    int vpe
+    Vertex* vertices       # array of triangle vertices
+    Triangle* indices      # which vertices belong to which triangles
+    double* field_data     # the field values at the vertices
+    long* element_indices  # which vertices belong to which *element*
+    int tpe                # the number of triangles per element
+    int vpe                # the number of vertices per element

diff -r 3c3bbfee7b6a13540bfdaee3c8f89eaf463ad9ef -r b5002ee6527794d6cc41e36fb21eaca9667ad0d8 yt/utilities/lib/mesh_construction.pyx
--- a/yt/utilities/lib/mesh_construction.pyx
+++ b/yt/utilities/lib/mesh_construction.pyx
@@ -188,9 +188,6 @@
                  np.ndarray data):
 
         # We need now to figure out if we've been handed quads or tetrahedra.
-        # If it's quads, we can build the mesh slightly differently.
-        # http://stackoverflow.com/questions/23723993/converting-quadriladerals-in-an-obj-file-into-triangles
-
         if indices.shape[1] == 8:
             self.vpe = HEX_NV
             self.tpe = HEX_NT
@@ -202,8 +199,6 @@
         else:
             raise NotImplementedError
 
-        self.field_data = NULL
-        self.element_indices = NULL
         self._build_from_indices(scene, vertices, indices)
         self._set_field_data(scene, data)
         self._set_sampler_type(scene)
@@ -220,38 +215,25 @@
                     rtcg.RTC_GEOMETRY_STATIC, nt, nv, 1)
 
         # first just copy over the vertices
-#        cdef Vertex* vertices = <Vertex*> rtcg.rtcMapBuffer(scene.scene_i, mesh,
-#                        rtcg.RTC_VERTEX_BUFFER)
-
         cdef Vertex* vertices = <Vertex*> malloc(nv * sizeof(Vertex))
-
         for i in range(nv):
             vertices[i].x = vertices_in[i, 0]
             vertices[i].y = vertices_in[i, 1]
-            vertices[i].z = vertices_in[i, 2]
- #       rtcg.rtcUnmapBuffer(scene.scene_i, mesh, rtcg.RTC_VERTEX_BUFFER)
-       
+            vertices[i].z = vertices_in[i, 2]       
         rtcg.rtcSetBuffer(scene.scene_i, mesh, rtcg.RTC_VERTEX_BUFFER,
                           vertices, 0, sizeof(Vertex))
 
         # now build up the triangles
-#        cdef Triangle* triangles = <Triangle*> rtcg.rtcMapBuffer(scene.scene_i,
-#                        mesh, rtcg.RTC_INDEX_BUFFER)
-
         cdef Triangle* triangles = <Triangle*> malloc(nt * sizeof(Triangle))
-
         for i in range(ne):
             for j in range(self.tpe):
                 triangles[self.tpe*i+j].v0 = indices_in[i][self.tri_array[j][0]]
                 triangles[self.tpe*i+j].v1 = indices_in[i][self.tri_array[j][1]]
                 triangles[self.tpe*i+j].v2 = indices_in[i][self.tri_array[j][2]]
-
-#        rtcg.rtcUnmapBuffer(scene.scene_i, mesh, rtcg.RTC_INDEX_BUFFER)
         rtcg.rtcSetBuffer(scene.scene_i, mesh, rtcg.RTC_INDEX_BUFFER,
                           triangles, 0, sizeof(Triangle))
 
-        cdef long* element_indices = <long *> malloc(ne * self.vpe * sizeof(long))
-    
+        cdef long* element_indices = <long *> malloc(ne * self.vpe * sizeof(long))    
         for i in range(ne):
             for j in range(self.vpe):
                 element_indices[i*self.vpe + j] = indices_in[i][j]
@@ -298,9 +280,7 @@
                                               self.filter_func)
         
     def __dealloc__(self):
-        if self.field_data is not NULL:
-            free(self.field_data)
-        if self.element_indices is not NULL:
-            free(self.element_indices)
+        free(self.field_data)
+        free(self.element_indices)
         free(self.vertices)
         free(self.indices)


https://bitbucket.org/yt_analysis/yt/commits/d811eea6bdaf/
Changeset:   d811eea6bdaf
Branch:      yt
User:        atmyers
Date:        2015-07-10 07:06:00+00:00
Summary:     we don't really need this base class
Affected #:  1 file

diff -r b5002ee6527794d6cc41e36fb21eaca9667ad0d8 -r d811eea6bdafb1bba0f762dce337cbfd22d97e32 yt/utilities/lib/mesh_construction.pyx
--- a/yt/utilities/lib/mesh_construction.pyx
+++ b/yt/utilities/lib/mesh_construction.pyx
@@ -27,131 +27,7 @@
     int triangulate_tetra[MAX_NUM_TRI][3]
 
 
-cdef class TriangleMesh:
-    r'''
-
-    This class constructs a polygon mesh with triangular elements and 
-    adds it to the scene. 
-
-    Parameters
-    ----------
-
-    scene : YTEmbreeScene
-        This is the scene to which the constructed polygons will be
-        added.
-    vertices : a np.ndarray of floats. 
-        This specifies the x, y, and z coordinates of the vertices in 
-        the polygon mesh. This should either have the shape 
-        (num_triangles, 3, 3), or the shape (num_vertices, 3), depending
-        on the value of the `indices` parameter.
-    indices : either None, or a np.ndarray of ints
-        If None, then vertices must have the shape (num_triangles, 3, 3).
-        In this case, `vertices` specifices the coordinates of each
-        vertex of each triangle in the mesh, with vertices being 
-        duplicated if they are shared between triangles. For example,
-        if indices is None, then vertices[2][1][0] should give you 
-        the x-coordinate of the 2nd vertex of the 3rd triangle.
-        If indices is a np.ndarray, then it must have the shape
-        (num_triangles, 3), and `vertices` must have the shape
-        (num_vertices, 3). In this case, indices[2][1] tells you 
-        the index of the 2nd vertex of the 3rd triangle in `indices`,
-        while vertices[5][2] tells you the z-coordinate of the 6th
-        vertex in the mesh. Note that the indexing is assumed to be
-        zero-based. In this setup, vertices can be shared between
-        triangles, and the number of vertices can be less than 3 times
-        the number of triangles.
-            
-    '''
-
-    cdef Vertex* vertices
-    cdef Triangle* indices
-    cdef unsigned int mesh
-    cdef double* field_data
-    cdef rtcg.RTCFilterFunc filter_func
-    cdef int tpe, vpe
-    cdef int[MAX_NUM_TRI][3] tri_array
-    cdef long* element_indices
-    cdef MeshDataContainer datac
-
-    def __init__(self, YTEmbreeScene scene,
-                 np.ndarray vertices,
-                 np.ndarray indices = None):
-
-        if indices is None:
-            self._build_from_flat(scene, vertices)
-        else:
-            self._build_from_indices(scene, vertices, indices)
-
-    cdef void _build_from_flat(self, YTEmbreeScene scene, 
-                               np.ndarray tri_vertices):
-        cdef int i, j
-        cdef int nt = tri_vertices.shape[0]
-        # In this scheme, we don't share any vertices.  This leads to cracks,
-        # but also means we have exactly three times as many vertices as
-        # triangles.
-        cdef unsigned int mesh = rtcg.rtcNewTriangleMesh(scene.scene_i,
-                        rtcg.RTC_GEOMETRY_STATIC, nt, nt*3, 1) 
-        
-        cdef Vertex* vertices = <Vertex*> rtcg.rtcMapBuffer(scene.scene_i, mesh,
-                        rtcg.RTC_VERTEX_BUFFER)
-
-        for i in range(nt):
-            for j in range(3):
-                vertices[i*3 + j].x = tri_vertices[i,j,0]
-                vertices[i*3 + j].y = tri_vertices[i,j,1]
-                vertices[i*3 + j].z = tri_vertices[i,j,2]
-        rtcg.rtcUnmapBuffer(scene.scene_i, mesh, rtcg.RTC_VERTEX_BUFFER)
-
-        cdef Triangle* triangles = <Triangle*> rtcg.rtcMapBuffer(scene.scene_i,
-                        mesh, rtcg.RTC_INDEX_BUFFER)
-        for i in range(nt):
-            triangles[i].v0 = i*3 + 0
-            triangles[i].v1 = i*3 + 1
-            triangles[i].v2 = i*3 + 2
-
-        rtcg.rtcUnmapBuffer(scene.scene_i, mesh, rtcg.RTC_INDEX_BUFFER)
-        self.vertices = vertices
-        self.indices = triangles
-        self.mesh = mesh
-
-    cdef void _build_from_indices(self, YTEmbreeScene scene,
-                                  np.ndarray tri_vertices,
-                                  np.ndarray tri_indices):
-        cdef int i
-        cdef int nv = tri_vertices.shape[0]
-        cdef int nt = tri_indices.shape[0]
-
-        cdef unsigned int mesh = rtcg.rtcNewTriangleMesh(scene.scene_i,
-                                        rtcg.RTC_GEOMETRY_STATIC, nt, nv, 1)
-
-        # set up vertex and triangle arrays. In this case, we just read
-        # them directly from the inputs
-        cdef Vertex* vertices = <Vertex*> rtcg.rtcMapBuffer(scene.scene_i, mesh,
-                                                    rtcg.RTC_VERTEX_BUFFER)
-
-        for i in range(nv):
-                vertices[i].x = tri_vertices[i, 0]
-                vertices[i].y = tri_vertices[i, 1]
-                vertices[i].z = tri_vertices[i, 2]
-
-        rtcg.rtcUnmapBuffer(scene.scene_i, mesh, rtcg.RTC_VERTEX_BUFFER)
-
-        cdef Triangle* triangles = <Triangle*> rtcg.rtcMapBuffer(scene.scene_i,
-                        mesh, rtcg.RTC_INDEX_BUFFER)
-
-        for i in range(nt):
-            triangles[i].v0 = tri_indices[i][0]
-            triangles[i].v1 = tri_indices[i][1]
-            triangles[i].v2 = tri_indices[i][2]
-
-        rtcg.rtcUnmapBuffer(scene.scene_i, mesh, rtcg.RTC_INDEX_BUFFER)
-
-        self.vertices = vertices
-        self.indices = triangles
-        self.mesh = mesh
-
-
-cdef class ElementMesh(TriangleMesh):
+cdef class ElementMesh:
     r'''
 
     Currently, we handle non-triangular mesh types by converting them 
@@ -182,6 +58,16 @@
             
     '''
 
+    cdef Vertex* vertices
+    cdef Triangle* indices
+    cdef unsigned int mesh
+    cdef double* field_data
+    cdef rtcg.RTCFilterFunc filter_func
+    cdef int tpe, vpe
+    cdef int[MAX_NUM_TRI][3] tri_array
+    cdef long* element_indices
+    cdef MeshDataContainer datac
+
     def __init__(self, YTEmbreeScene scene,
                  np.ndarray vertices, 
                  np.ndarray indices,


https://bitbucket.org/yt_analysis/yt/commits/02a07dea8269/
Changeset:   02a07dea8269
Branch:      yt
User:        atmyers
Date:        2015-07-10 07:09:10+00:00
Summary:     this functionality is now implemented elsewhere
Affected #:  2 files

diff -r d811eea6bdafb1bba0f762dce337cbfd22d97e32 -r 02a07dea8269ac9ea1ff36f7a809858f25794ac1 yt/utilities/lib/element_mappings.pyx
--- a/yt/utilities/lib/element_mappings.pyx
+++ /dev/null
@@ -1,336 +0,0 @@
-"""
-This file contains coordinate mappings between physical coordinates and those
-defined on unit elements, as well as doing the corresponding intracell 
-interpolation on finite element data.
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2015, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-cimport numpy as np
-from numpy cimport ndarray
-cimport cython
-import numpy as np
-from libc.math cimport abs
-
-
-cdef class ElementSampler:
-    '''
-
-    This is a base class for sampling the value of a finite element solution
-    at an arbitrary point inside a mesh element. In general, this will be done
-    by transforming the requested physical coordinate into a mapped coordinate 
-    system, sampling the solution in mapped coordinates, and returning the result.
-    This is not to be used directly; use one of the subclasses instead.
-
-    '''
-
-    def map_real_to_unit(self,
-                         np.ndarray physical_coord, 
-                         np.ndarray vertices):
-        raise NotImplementedError
-
-    def sample_at_unit_point(self,
-                             np.ndarray coord,
-                             np.ndarray vals):
-        raise NotImplementedError
-
-    def sample_at_real_point(self,
-                             np.ndarray coord, 
-                             np.ndarray vertices, 
-                             np.ndarray vals):
-        mapped_coord = self.map_real_to_unit(coord, vertices)
-        return self.sample_at_unit_point(mapped_coord, vals)
-    
-
-cdef class P1Sampler2D(ElementSampler):
-    '''
-
-    This implements sampling inside a linear, triangular mesh element.
-    In this case, the mapping is easily invertible and can be done 
-    with no iteration.
-    
-
-    '''
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    @cython.initializedcheck(False)
-    def map_real_to_unit(self, 
-                         np.ndarray[np.float64_t, ndim=1] physical_x, 
-                         np.ndarray[np.float64_t, ndim=2] vertices):
-    
-        b = np.empty(3, dtype=np.float64)
-        A = np.empty((3, 3), dtype=np.float64)
-    
-        b[0] = physical_x[0]
-        b[1] = physical_x[1]
-        b[2] = 1.0
-    
-        A[0][0] = vertices[0, 0]
-        A[0][1] = vertices[1, 0]
-        A[0][2] = vertices[2, 0]
-    
-        A[1][0] = vertices[0, 1]
-        A[1][1] = vertices[1, 1]
-        A[1][2] = vertices[2, 1]
-    
-        A[2][0] = 1.0
-        A[2][1] = 1.0
-        A[2][2] = 1.0
-            
-        c = np.linalg.solve(A, b)
-        return c
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    def sample_at_unit_point(self, double[:] coord, 
-                             double[:] vals):
-        return vals[0]*coord[0] + vals[1]*coord[1] + vals[2]*coord[2]
-
-cdef class P1Sampler3D(ElementSampler):
-    '''
-
-    This implements sampling inside a linear, tetrahedral mesh element. Like
-    the 2D case, this mapping is linear and can be inverted easily.
-
-    '''
-
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    @cython.initializedcheck(False)
-    def map_real_to_unit(self, double[:] physical_x, double[:,:] vertices):
-    
-        b = np.empty(4, dtype=np.float64)
-        A = np.empty((4, 4), dtype=np.float64)
-    
-        b[0] = physical_x[0]
-        b[1] = physical_x[1]
-        b[2] = physical_x[2]
-        b[3] = 1.0
-    
-        A[0][0] = vertices[0, 0]
-        A[0][1] = vertices[1, 0]
-        A[0][2] = vertices[2, 0]
-        A[0][3] = vertices[3, 0]
-        
-        A[1][0] = vertices[0, 1]
-        A[1][1] = vertices[1, 1]
-        A[1][2] = vertices[2, 1]
-        A[1][3] = vertices[3, 1]
-        
-        A[2][0] = vertices[0, 2]
-        A[2][1] = vertices[1, 2]
-        A[2][2] = vertices[2, 2]
-        A[2][3] = vertices[3, 2]
-
-        A[3][0] = 1.0
-        A[3][1] = 1.0
-        A[3][2] = 1.0
-        A[3][3] = 1.0
-        
-        c = np.linalg.solve(A, b)
-    
-        return c
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    def sample_at_unit_point(self,
-                             double[:] coord, 
-                             double[:] vals):
-        cdef double value = 0.0
-        cdef int i
-        for i in range(4):
-            value += vals[i]*coord[i]
-        return value
-
-ctypedef void (*func_type)(double[:], double[:], double[:, :], double[:])
-ctypedef void (*jac_type)(double[:, :], double[:], double[:, :], double[:])
-
-cdef class NonlinearSolveSampler(ElementSampler):
-
-    '''
-
-    This is a base class for handling element samplers that require
-    a nonlinear solve to invert the mapping between coordinate systems.
-    To do this, we perform Newton-Raphson iteration using a specificed 
-    system of equations with an analytic Jacobian matrix. This is
-    not to be used directly, use one of the subclasses instead.
-
-    '''
-
-    cdef int dim
-    cdef int max_iter
-    cdef np.float64_t tolerance
-    cdef func_type func 
-    cdef jac_type jac
-
-    def __init__(self):
-        self.tolerance = 1.0e-9
-        self.max_iter = 10
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    def map_real_to_unit(self, 
-                         np.ndarray[np.float64_t, ndim=1] physical_x,
-                         np.ndarray[np.float64_t, ndim=2] vertices):
-        x = np.zeros(self.dim, dtype=np.float64)
-        cdef int iterations = 0
-        fx = np.empty(self.dim, dtype=np.float64)
-        A = np.empty((self.dim, self.dim), dtype=np.float64)
-        Ainv = np.empty((self.dim, self.dim), dtype=np.float64)
-        self.func(fx, x, vertices, physical_x)
-        cdef np.float64_t err = np.max(abs(fx))
-        while (err > self.tolerance and iterations < self.max_iter):
-            self.jac(A, x, vertices, physical_x)
-            Ainv = np.linalg.inv(A)
-            x = x - np.dot(Ainv, fx)
-            self.func(fx, x, vertices, physical_x)
-            err = np.max(abs(fx))
-            iterations += 1
-        return x
-
-cdef class Q1Sampler2D(NonlinearSolveSampler):
-
-    '''
-
-    This implements sampling inside a 2D quadrilateral mesh element.
-
-    '''
-
-    def __init__(self):
-        super(Q1Sampler2D, self).__init__()
-        self.dim = 2
-        self.func = Q1Function2D
-        self.jac = Q1Jacobian2D
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    def sample_at_unit_point(self, double[:] coord, 
-                             double[:] vals):
-        cdef double x = vals[0]*(1.0 - coord[0])*(1.0 - coord[1]) + \
-                        vals[1]*(1.0 + coord[0])*(1.0 - coord[1]) + \
-                        vals[2]*(1.0 - coord[0])*(1.0 + coord[1]) + \
-                        vals[3]*(1.0 + coord[0])*(1.0 + coord[1])
-        return 0.25*x
-
-cdef class Q1Sampler3D(NonlinearSolveSampler):
-
-    ''' 
-
-    This implements sampling inside a 3d hexahedral mesh element.
-
-    '''
-
-    def __init__(self):
-        super(Q1Sampler3D, self).__init__()
-        self.dim = 3
-        self.func = Q1Function3D
-        self.jac = Q1Jacobian3D
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    def sample_at_unit_point(self, double[:] coord, double[:] vals):
-        cdef double x = vals[0]*(1.0 - coord[0])*(1.0 - coord[1])*(1.0 - coord[2]) + \
-                        vals[1]*(1.0 + coord[0])*(1.0 - coord[1])*(1.0 - coord[2]) + \
-                        vals[2]*(1.0 - coord[0])*(1.0 + coord[1])*(1.0 - coord[2]) + \
-                        vals[3]*(1.0 + coord[0])*(1.0 + coord[1])*(1.0 - coord[2]) + \
-                        vals[4]*(1.0 - coord[0])*(1.0 - coord[1])*(1.0 + coord[2]) + \
-                        vals[5]*(1.0 + coord[0])*(1.0 - coord[1])*(1.0 + coord[2]) + \
-                        vals[6]*(1.0 - coord[0])*(1.0 + coord[1])*(1.0 + coord[2]) + \
-                        vals[7]*(1.0 + coord[0])*(1.0 + coord[1])*(1.0 + coord[2])
-        return 0.125*x
-
-
- at cython.boundscheck(False)
- at cython.wraparound(False)
- at cython.cdivision(True)
- at cython.initializedcheck(False)
-cdef inline void Q1Function2D(double[:] fx,
-                              double[:] x, 
-                              double[:, :] vertices, 
-                              double[:] phys_x) nogil:
-    
-    cdef int i
-    for i in range(2):
-        fx[i] = vertices[0][i]*(1-x[0])*(1-x[1]) \
-              + vertices[1][i]*(1+x[0])*(1-x[1]) \
-              + vertices[2][i]*(1-x[0])*(1+x[1]) \
-              + vertices[3][i]*(1+x[0])*(1+x[1]) - 4.0*phys_x[i]
-
- at cython.boundscheck(False)
- at cython.wraparound(False)
- at cython.cdivision(True)
- at cython.initializedcheck(False)
-cdef inline void Q1Jacobian2D(double[:, :] A,
-                              double[:] x, 
-                              double[:, :] v, 
-                              double[:] phys_x) nogil:
-    
-    cdef int i
-    for i in range(2):
-        A[i][0] = -(1-x[1])*v[0][i] + (1-x[1])*v[1][i] - \
-                   (1+x[1])*v[2][i] + (1+x[1])*v[3][i]
-        A[i][1] = -(1-x[0])*v[0][i] - (1+x[0])*v[1][i] + \
-                   (1-x[0])*v[2][i] + (1+x[0])*v[3][i]
-
-
- at cython.boundscheck(False)
- at cython.wraparound(False)
- at cython.cdivision(True)
- at cython.initializedcheck(False)
-cdef inline void Q1Function3D(double[:] fx,
-                              double[:] x, 
-                              double[:, :] vertices, 
-                              double[:] phys_x) nogil:
-    
-    cdef int i
-    for i in range(3):
-        fx[i] = vertices[0][i]*(1-x[0])*(1-x[1])*(1-x[2]) \
-              + vertices[1][i]*(1+x[0])*(1-x[1])*(1-x[2]) \
-              + vertices[2][i]*(1-x[0])*(1+x[1])*(1-x[2]) \
-              + vertices[3][i]*(1+x[0])*(1+x[1])*(1-x[2]) \
-              + vertices[4][i]*(1-x[0])*(1-x[1])*(1+x[2]) \
-              + vertices[5][i]*(1+x[0])*(1-x[1])*(1+x[2]) \
-              + vertices[6][i]*(1-x[0])*(1+x[1])*(1+x[2]) \
-              + vertices[7][i]*(1+x[0])*(1+x[1])*(1+x[2]) \
-              - 8.0*phys_x[i]
-
- at cython.boundscheck(False)
- at cython.wraparound(False)
- at cython.cdivision(True)
- at cython.initializedcheck(False)
-cdef inline void Q1Jacobian3D(double[:, :] A,
-                              double[:] x, 
-                              double[:, :] v, 
-                              double[:] phys_x) nogil:
-    
-    cdef int i
-    for i in range(3):
-        A[i][0] = -(1-x[1])*(1-x[2])*v[0][i] + (1-x[1])*(1-x[2])*v[1][i] - \
-                   (1+x[1])*(1-x[2])*v[2][i] + (1+x[1])*(1-x[2])*v[3][i] - \
-                   (1-x[1])*(1+x[2])*v[4][i] + (1-x[1])*(1+x[2])*v[5][i] - \
-                   (1+x[1])*(1+x[2])*v[6][i] + (1+x[1])*(1+x[2])*v[7][i]
-        A[i][1] = -(1-x[0])*(1-x[2])*v[0][i] - (1+x[0])*(1-x[2])*v[1][i] + \
-                   (1-x[0])*(1-x[2])*v[2][i] + (1+x[0])*(1-x[2])*v[3][i] - \
-                   (1-x[0])*(1+x[2])*v[4][i] - (1+x[0])*(1+x[2])*v[5][i] + \
-                   (1-x[0])*(1+x[2])*v[6][i] + (1+x[0])*(1+x[2])*v[7][i]
-        A[i][2] = -(1-x[0])*(1-x[1])*v[0][i] - (1+x[0])*(1-x[1])*v[1][i] - \
-                   (1-x[0])*(1+x[1])*v[2][i] - (1+x[0])*(1+x[1])*v[3][i] + \
-                   (1-x[0])*(1-x[1])*v[4][i] + (1+x[0])*(1-x[1])*v[5][i] + \
-                   (1-x[0])*(1+x[1])*v[6][i] + (1+x[0])*(1+x[1])*v[7][i]

diff -r d811eea6bdafb1bba0f762dce337cbfd22d97e32 -r 02a07dea8269ac9ea1ff36f7a809858f25794ac1 yt/utilities/lib/tests/test_element_mappings.py
--- a/yt/utilities/lib/tests/test_element_mappings.py
+++ /dev/null
@@ -1,90 +0,0 @@
-"""
-This file contains tests of the intracell interpolation code contained is
-yt/utilities/lib/element_mappings.pyx.
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2015, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-
-import numpy as np
-
-from yt.testing import assert_almost_equal
-from yt.utilities.lib.element_mappings import \
-    P1Sampler2D, \
-    P1Sampler3D, \
-    Q1Sampler2D, \
-    Q1Sampler3D
-
-
-def setup():
-    pass
-
-
-def check_all_vertices(sampler, vertices, field_values):
-    NV = vertices.shape[0]
-    NDIM = vertices.shape[1]
-    x = np.empty(NDIM)
-    for i in range(NV):
-        x = vertices[i]
-        val = sampler.sample_at_real_point(x, vertices, field_values)
-        assert_almost_equal(val, field_values[i])
-
-
-def test_P1Sampler2D():
-    vertices = np.array([[0.1,  0.2],
-                         [0.6,  0.3],
-                         [0.2,  0.7]])
-
-    field_values = np.array([1.0, 2.0, 3.0])
-
-    sampler = P1Sampler2D()
-    check_all_vertices(sampler, vertices, field_values)
-
-
-def test_P1Sampler3D():
-    vertices = np.array([[0.1,  0.1,  0.1],
-                         [0.6,  0.3,  0.2],
-                         [0.2,  0.7,  0.2],
-                         [0.4,  0.4,  0.7]])
-
-    field_values = np.array([1.0, 2.0, 3.0, 4.0])
-
-    sampler = P1Sampler3D()
-    check_all_vertices(sampler, vertices, field_values)
-
-
-def test_Q1Sampler2D():
-    vertices = np.array([[0.1,  0.2],
-                         [0.6,  0.3],
-                         [0.2,  0.7],
-                         [0.7,  0.9]])
-
-    field_values = np.array([1.0, 2.0, 3.0, 4.0])
-
-    sampler = Q1Sampler2D()
-    check_all_vertices(sampler, vertices, field_values)
-
-
-def test_Q1Sampler3D():
-    vertices = np.array([[2.00657905, 0.6888599,  1.4375],
-                         [1.8658198,  1.00973171, 1.4375],
-                         [1.97881594, 1.07088163, 1.4375],
-                         [2.12808879, 0.73057381, 1.4375],
-                         [2.00657905, 0.6888599,  1.2   ],
-                         [1.8658198,  1.00973171, 1.2   ],
-                         [1.97881594, 1.07088163, 1.2   ],
-                         [2.12808879, 0.73057381, 1.2   ]])
-
-    field_values = np.array([0.4526278, 0.45262656, 0.45262657, 0.4526278,
-                             0.54464296, 0.54464149, 0.5446415, 0.54464296])
-
-    sampler = Q1Sampler3D()
-    check_all_vertices(sampler, vertices, field_values)


https://bitbucket.org/yt_analysis/yt/commits/f7b80355f1d0/
Changeset:   f7b80355f1d0
Branch:      yt
User:        atmyers
Date:        2015-07-10 07:15:58+00:00
Summary:     adding headers
Affected #:  2 files

diff -r 02a07dea8269ac9ea1ff36f7a809858f25794ac1 -r f7b80355f1d0f70f0e1f74fbf9db73957bccdb6b yt/utilities/lib/mesh_construction.pyx
--- a/yt/utilities/lib/mesh_construction.pyx
+++ b/yt/utilities/lib/mesh_construction.pyx
@@ -1,3 +1,20 @@
+"""
+This file contains the ElementMesh, which represents the target that the 
+rays will be cast at when rendering finite element data. This class handles
+the interface between the internal representation of the mesh and the pyembree
+representation.
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2015, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
 cimport numpy as np
 cimport cython
 cimport pyembree.rtcore as rtc 

diff -r 02a07dea8269ac9ea1ff36f7a809858f25794ac1 -r f7b80355f1d0f70f0e1f74fbf9db73957bccdb6b yt/utilities/lib/mesh_traversal.pyx
--- a/yt/utilities/lib/mesh_traversal.pyx
+++ b/yt/utilities/lib/mesh_traversal.pyx
@@ -1,3 +1,18 @@
+"""
+This file contains the MeshSampler class, which handles casting rays at a
+MeshSource using pyembree.
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2015, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
 cimport cython
 cimport numpy as np
 import numpy as np


https://bitbucket.org/yt_analysis/yt/commits/088695eb316d/
Changeset:   088695eb316d
Branch:      yt
User:        atmyers
Date:        2015-07-11 17:18:03+00:00
Summary:     removing an unused import
Affected #:  1 file

diff -r f7b80355f1d0f70f0e1f74fbf9db73957bccdb6b -r 088695eb316d99feec3433f2aa491877826b57a7 yt/utilities/lib/mesh_samplers.pxd
--- a/yt/utilities/lib/mesh_samplers.pxd
+++ b/yt/utilities/lib/mesh_samplers.pxd
@@ -1,6 +1,5 @@
 cimport pyembree.rtcore as rtc
 cimport pyembree.rtcore_ray as rtcr
-from pyembree.rtcore cimport Vec3f
 cimport cython
 
 cdef void sample_hex(void* userPtr,


https://bitbucket.org/yt_analysis/yt/commits/0bb3ca370cea/
Changeset:   0bb3ca370cea
Branch:      yt
User:        atmyers
Date:        2015-07-16 18:27:42+00:00
Summary:     merging
Affected #:  70 files

diff -r 088695eb316d99feec3433f2aa491877826b57a7 -r 0bb3ca370cea95934aecf73e51549b384328f46c doc/README
--- a/doc/README
+++ b/doc/README
@@ -7,4 +7,4 @@
 Because the documentation requires a number of dependencies, we provide
 pre-built versions online, accessible here:
 
-http://yt-project.org/docs/dev-3.0/
+http://yt-project.org/docs/dev/

diff -r 088695eb316d99feec3433f2aa491877826b57a7 -r 0bb3ca370cea95934aecf73e51549b384328f46c doc/source/analyzing/analysis_modules/SZ_projections.ipynb
--- a/doc/source/analyzing/analysis_modules/SZ_projections.ipynb
+++ b/doc/source/analyzing/analysis_modules/SZ_projections.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:2cc168b2c1737c67647aa29892c0213e7a58233fa53c809f9cd975a4306e9bc8"
+  "signature": "sha256:487383ec23a092310522ec25bd02ad2eb16a3402c5ed3d2b103d33fe17697b3c"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -70,6 +70,13 @@
      ]
     },
     {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "<font color='red'>**NOTE**</font>: Currently, use of the SZpack library to create S-Z projections in yt is limited to Python 2.x."
+     ]
+    },
+    {
      "cell_type": "heading",
      "level": 2,
      "metadata": {},

diff -r 088695eb316d99feec3433f2aa491877826b57a7 -r 0bb3ca370cea95934aecf73e51549b384328f46c doc/source/analyzing/analysis_modules/photon_simulator.rst
--- a/doc/source/analyzing/analysis_modules/photon_simulator.rst
+++ b/doc/source/analyzing/analysis_modules/photon_simulator.rst
@@ -10,6 +10,10 @@
 simulated X-ray photon lists of events from datasets that yt is able
 to read. The simulated events then can be exported to X-ray telescope
 simulators to produce realistic observations or can be analyzed in-line.
+
+For detailed information about the design of the algorithm in yt, check 
+out `the SciPy 2014 Proceedings. <http://conference.scipy.org/proceedings/scipy2014/zuhone.html>`_.
+
 The algorithm is based off of that implemented in
 `PHOX <http://www.mpa-garching.mpg.de/~kdolag/Phox/>`_ for SPH datasets
 by Veronica Biffi and Klaus Dolag. There are two relevant papers:
@@ -139,6 +143,12 @@
 the optional keyword ``thermal_broad`` is set to ``True``, the spectral
 lines will be thermally broadened.
 
+.. note:: 
+
+   ``SpectralModel`` objects based on XSPEC models (both the thermal 
+   emission and Galactic absorption models mentioned below) only work 
+   in Python 2.7, since currently PyXspec only works with Python 2.x. 
+   
 Now that we have our ``SpectralModel`` that gives us a spectrum, we need
 to connect this model to a ``PhotonModel`` that will connect the field
 data in the ``data_source`` to the spectral model to actually generate
@@ -148,7 +158,8 @@
 .. code:: python
 
     thermal_model = ThermalPhotonModel(apec_model, X_H=0.75, Zmet=0.3,
-                                       photons_per_chunk=100000000)
+                                       photons_per_chunk=100000000,
+                                       method="invert_cdf")
 
 Where we pass in the ``SpectralModel``, and can optionally set values for
 the hydrogen mass fraction ``X_H`` and metallicity ``Z_met``. If
@@ -165,6 +176,18 @@
 this parameter needs to be set higher, or if you are looking to decrease memory
 usage, you might set this parameter lower.
 
+The ``method`` keyword argument is also optional, and determines how the individual
+photon energies are generated from the spectrum. It may be set to one of two values:
+
+* ``method="invert_cdf"``: Construct the cumulative distribution function of the spectrum and invert
+  it, using uniformly drawn random numbers to determine the photon energies (fast, but relies
+  on construction of the CDF and interpolation between the points, so for some spectra it
+  may not be accurate enough). 
+* ``method="accept_reject"``: Generate the photon energies from the spectrum using an acceptance-rejection
+  technique (accurate, but likely to be slow). 
+
+``method="invert_cdf"`` (the default) should be sufficient for most cases. 
+
 Next, we need to specify "fiducial" values for the telescope collecting
 area, exposure time, and cosmological redshift. Remember, the initial
 photon generation will act as a source for Monte-Carlo sampling for more
@@ -191,12 +214,29 @@
 By default, the angular diameter distance to the object is determined
 from the ``cosmology`` and the cosmological ``redshift``. If a
 ``Cosmology`` instance is not provided, one will be made from the
-default cosmological parameters. If your source is local to the galaxy,
-you can set its distance directly, using a tuple, e.g.
-``dist=(30, "kpc")``. In this case, the ``redshift`` and ``cosmology``
-will be ignored. Finally, if the photon generating function accepts any
-parameters, they can be passed to ``from_scratch`` via a ``parameters``
-dictionary.
+default cosmological parameters. The ``center`` keyword argument specifies
+the center of the photon distribution, and the photon positions will be 
+rescaled with this value as the origin. This argument accepts the following
+values:
+
+* A NumPy array or list corresponding to the coordinates of the center in
+  units of code length. 
+* A ``YTArray`` corresponding to the coordinates of the center in some
+  length units. 
+* ``"center"`` or ``"c"`` corresponds to the domain center. 
+* ``"max"`` or ``"m"`` corresponds to the location of the maximum gas density. 
+* A two-element tuple specifying the max or min of a specific field, e.g.,
+  ``("min","gravitational_potential")``, ``("max","dark_matter_density")``
+
+If ``center`` is not specified, ``from_scratch`` will attempt to use the 
+``"center"`` field parameter of the ``data_source``. 
+
+``from_scratch`` takes a few other optional keyword arguments. If your 
+source is local to the galaxy, you can set its distance directly, using 
+a tuple, e.g. ``dist=(30, "kpc")``. In this case, the ``redshift`` and 
+``cosmology`` will be ignored. Finally, if the photon generating 
+function accepts any parameters, they can be passed to ``from_scratch`` 
+via a ``parameters`` dictionary.
 
 At this point, the ``photons`` are distributed in the three-dimensional
 space of the ``data_source``, with energies in the rest frame of the
@@ -265,7 +305,7 @@
     abs_model = TableAbsorbModel("tbabs_table.h5", 0.1)
 
 Now we're ready to project the photons. First, we choose a line-of-sight
-vector ``L``. Second, we'll adjust the exposure time and the redshift.
+vector ``normal``. Second, we'll adjust the exposure time and the redshift.
 Third, we'll pass in the absorption ``SpectrumModel``. Fourth, we'll
 specify a ``sky_center`` in RA,DEC on the sky in degrees.
 
@@ -274,26 +314,40 @@
 course far short of a full simulation of a telescope ray-trace, but it's
 a quick-and-dirty way to get something close to the real thing. We'll
 discuss how to get your simulated events into a format suitable for
-reading by telescope simulation codes later.
+reading by telescope simulation codes later. If you just want to convolve 
+the photons with an ARF, you may specify that as the only response, but some
+ARFs are unnormalized and still require the RMF for normalization. Check with
+the documentation associated with these files for details. If we are using the
+RMF to convolve energies, we must set ``convolve_energies=True``. 
 
 .. code:: python
 
     ARF = "chandra_ACIS-S3_onaxis_arf.fits"
     RMF = "chandra_ACIS-S3_onaxis_rmf.fits"
-    L = [0.0,0.0,1.0]
-    events = photons.project_photons(L, exp_time_new=2.0e5, redshift_new=0.07, absorb_model=abs_model,
-                                     sky_center=(187.5,12.333), responses=[ARF,RMF])
+    normal = [0.0,0.0,1.0]
+    events = photons.project_photons(normal, exp_time_new=2.0e5, redshift_new=0.07, dist_new=None, 
+                                     absorb_model=abs_model, sky_center=(187.5,12.333), responses=[ARF,RMF], 
+                                     convolve_energies=True, no_shifting=False, north_vector=None,
+                                     psf_sigma=None)
 
-Also, the optional keyword ``psf_sigma`` specifies a Gaussian standard
-deviation to scatter the photon sky positions around with, providing a
-crude representation of a PSF.
+In this case, we chose a three-vector ``normal`` to specify an arbitrary 
+line-of-sight, but ``"x"``, ``"y"``, or ``"z"`` could also be chosen to 
+project along one of those axes. 
 
-.. warning::
+``project_photons`` takes several other optional keyword arguments. 
 
-   The binned images that result, even if you convolve with responses,
-   are still of the same resolution as the finest cell size of the
-   simulation dataset. If you want a more accurate simulation of a
-   particular X-ray telescope, you should check out `Storing events for future use and for reading-in by telescope simulators`_.
+* ``no_shifting`` (default ``False``) controls whether or not Doppler 
+  shifting of photon energies is turned on. 
+* ``dist_new`` is a (value, unit) tuple that is used to set a new
+  angular diameter distance by hand instead of having it determined
+  by the cosmology and the value of the redshift. Should only be used
+  for simulations of nearby objects. 
+* For off-axis ``normal`` vectors,  the ``north_vector`` argument can 
+  be used to control what vector corresponds to the "up" direction in 
+  the resulting event list. 
+* ``psf_sigma`` may be specified to provide a crude representation of 
+  a PSF, and corresponds to the standard deviation (in degress) of a 
+  Gaussian PSF model. 
 
 Let's just take a quick look at the raw events object:
 
@@ -343,19 +397,27 @@
 
 Which is starting to look like a real observation!
 
+.. warning::
+
+   The binned images that result, even if you convolve with responses,
+   are still of the same resolution as the finest cell size of the
+   simulation dataset. If you want a more accurate simulation of a
+   particular X-ray telescope, you should check out `Storing events for future use and for reading-in by telescope simulators`_.
+
 We can also bin up the spectrum into energy bins, and write it to a FITS
 table file. This is an example where we've binned up the spectrum
 according to the unconvolved photon energy:
 
 .. code:: python
 
-    events.write_spectrum("virgo_spec.fits", energy_bins=True, emin=0.1, emax=10.0, nchan=2000, clobber=True)
+    events.write_spectrum("virgo_spec.fits", bin_type="energy", emin=0.1, emax=10.0, nchan=2000, clobber=True)
 
-If we don't set ``energy_bins=True``, and we have convolved our events
+We can also set ``bin_type="channel"``. If we have convolved our events
 with response files, then any other keywords will be ignored and it will
 try to make a spectrum from the channel information that is contained
-within the RMF, suitable for analyzing in XSPEC. For now, we'll stick
-with the energy spectrum, and plot it up:
+within the RMF. Otherwise, the channels will be determined from the ``emin``, 
+``emax``, and ``nchan`` keywords, and will be numbered from 1 to ``nchan``. 
+For now, we'll stick with the energy spectrum, and plot it up:
 
 .. code:: python
 

diff -r 088695eb316d99feec3433f2aa491877826b57a7 -r 0bb3ca370cea95934aecf73e51549b384328f46c doc/source/analyzing/fields.rst
--- a/doc/source/analyzing/fields.rst
+++ b/doc/source/analyzing/fields.rst
@@ -174,7 +174,7 @@
 
 Field plugins can be loaded dynamically, although at present this is not
 particularly useful.  Plans for extending field plugins to dynamically load, to
-enable simple definition of common types (gradient, divergence, etc), and to
+enable simple definition of common types (divergence, curl, etc), and to
 more verbosely describe available fields, have been put in place for future
 versions.
 

diff -r 088695eb316d99feec3433f2aa491877826b57a7 -r 0bb3ca370cea95934aecf73e51549b384328f46c doc/source/cookbook/camera_movement.py
--- a/doc/source/cookbook/camera_movement.py
+++ b/doc/source/cookbook/camera_movement.py
@@ -13,16 +13,16 @@
 
 frame = 0
 # Move to the maximum density location over 5 frames
-for _ in cam.move_to(max_c, 5):
+for _ in cam.iter_move(max_c, 5):
     sc.render('camera_movement_%04i.png' % frame, clip_ratio=8.0)
     frame += 1
 
 # Zoom in by a factor of 10 over 5 frames
-for _ in cam.zoomin(10.0, 5):
+for _ in cam.iter_zoom(10.0, 5):
     sc.render('camera_movement_%04i.png' % frame, clip_ratio=8.0)
     frame += 1
 
 # Do a rotation over 5 frames
-for _ in cam.rotation(np.pi, 5):
+for _ in cam.iter_rotate(np.pi, 5):
     sc.render('camera_movement_%04i.png' % frame, clip_ratio=8.0)
     frame += 1

diff -r 088695eb316d99feec3433f2aa491877826b57a7 -r 0bb3ca370cea95934aecf73e51549b384328f46c doc/source/cookbook/complex_plots.rst
--- a/doc/source/cookbook/complex_plots.rst
+++ b/doc/source/cookbook/complex_plots.rst
@@ -250,8 +250,10 @@
 
 This recipe demonstrates how to write the simulation time, show an
 axis triad indicating the direction of the coordinate system, and show
-the transfer function on a volume rendering.
-See :ref:`volume_rendering` for more information.
+the transfer function on a volume rendering.  Please note that this 
+recipe relies on the old volume rendering interface.  While one can
+continue to use this interface, it may be incompatible with some of the
+new developments and the infrastructure described in :ref:`volume_rendering`.
 
 .. yt_cookbook:: vol-annotated.py
 

diff -r 088695eb316d99feec3433f2aa491877826b57a7 -r 0bb3ca370cea95934aecf73e51549b384328f46c doc/source/cookbook/fit_spectrum.py
--- a/doc/source/cookbook/fit_spectrum.py
+++ b/doc/source/cookbook/fit_spectrum.py
@@ -10,10 +10,10 @@
 def _OVI_number_density(field, data):
     return data['H_number_density']*2.0
 
-# Define a function that will accept a ds and add the new field 
+# Define a function that will accept a ds and add the new field
 # defined above.  This will be given to the LightRay below.
 def setup_ds(ds):
-    ds.add_field("O_p5_number_density", 
+    ds.add_field(("gas","O_p5_number_density"),
                  function=_OVI_number_density,
                  units="cm**-3")
 
@@ -62,7 +62,7 @@
 
 # Get all fields that need to be added to the light ray
 fields = ['temperature']
-for s, params in species_dicts.iteritems():
+for s, params in species_dicts.items():
     fields.append(params['field'])
 
 # Make a light ray, and set njobs to -1 to use one core
@@ -79,7 +79,7 @@
 sp = AbsorptionSpectrum(900.0, 1400.0, 50000)
 
 # Iterate over species
-for s, params in species_dicts.iteritems():
+for s, params in species_dicts.items():
     # Iterate over transitions for a single species
     for i in range(params['numLines']):
         # Add the lines to the spectrum

diff -r 088695eb316d99feec3433f2aa491877826b57a7 -r 0bb3ca370cea95934aecf73e51549b384328f46c doc/source/cookbook/free_free_field.py
--- a/doc/source/cookbook/free_free_field.py
+++ /dev/null
@@ -1,105 +0,0 @@
-### THIS RECIPE IS CURRENTLY BROKEN IN YT-3.0
-### DO NOT TRUST THIS RECIPE UNTIL THIS LINE IS REMOVED
-
-import numpy as np
-import yt
-# Need to grab the proton mass from the constants database
-from yt.utilities.physical_constants import mp
-
-exit()
-# Define the emission field
-
-keVtoerg = 1.602e-9  # Convert energy in keV to energy in erg
-KtokeV = 8.617e-08  # Convert degrees Kelvin to degrees keV
-sqrt3 = np.sqrt(3.)
-expgamma = 1.78107241799  # Exponential of Euler's constant
-
-
-def _FreeFree_Emission(field, data):
-
-    if data.has_field_parameter("Z"):
-        Z = data.get_field_parameter("Z")
-    else:
-        Z = 1.077  # Primordial H/He plasma
-
-    if data.has_field_parameter("mue"):
-        mue = data.get_field_parameter("mue")
-    else:
-        mue = 1./0.875  # Primordial H/He plasma
-
-    if data.has_field_parameter("mui"):
-        mui = data.get_field_parameter("mui")
-    else:
-        mui = 1./0.8125  # Primordial H/He plasma
-
-    if data.has_field_parameter("Ephoton"):
-        Ephoton = data.get_field_parameter("Ephoton")
-    else:
-        Ephoton = 1.0  # in keV
-
-    if data.has_field_parameter("photon_emission"):
-        photon_emission = data.get_field_parameter("photon_emission")
-    else:
-        photon_emission = False  # Flag for energy or photon emission
-
-    n_e = data["density"]/(mue*mp)
-    n_i = data["density"]/(mui*mp)
-    kT = data["temperature"]*KtokeV
-
-    # Compute the Gaunt factor
-
-    g_ff = np.zeros(kT.shape)
-    g_ff[Ephoton/kT > 1.] = np.sqrt((3./np.pi)*kT[Ephoton/kT > 1.]/Ephoton)
-    g_ff[Ephoton/kT < 1.] = (sqrt3/np.pi)*np.log((4./expgamma) *
-                                                 kT[Ephoton/kT < 1.]/Ephoton)
-
-    eps_E = 1.64e-20*Z*Z*n_e*n_i/np.sqrt(data["temperature"]) * \
-        np.exp(-Ephoton/kT)*g_ff
-
-    if photon_emission:
-        eps_E /= (Ephoton*keVtoerg)
-
-    return eps_E
-
-yt.add_field("FreeFree_Emission", function=_FreeFree_Emission)
-
-# Define the luminosity derived quantity
-def _FreeFreeLuminosity(data):
-    return (data["FreeFree_Emission"]*data["cell_volume"]).sum()
-
-
-def _combFreeFreeLuminosity(data, luminosity):
-    return luminosity.sum()
-
-yt.add_quantity("FreeFree_Luminosity", function=_FreeFreeLuminosity,
-                combine_function=_combFreeFreeLuminosity, n_ret=1)
-
-ds = yt.load("GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0150")
-
-sphere = ds.sphere(ds.domain_center, (100., "kpc"))
-
-# Print out the total luminosity at 1 keV for the sphere
-
-print("L_E (1 keV, primordial) = ", sphere.quantities["FreeFree_Luminosity"]())
-
-# The defaults for the field assume a H/He primordial plasma.
-# Let's set the appropriate parameters for a pure hydrogen plasma.
-
-sphere.set_field_parameter("mue", 1.0)
-sphere.set_field_parameter("mui", 1.0)
-sphere.set_field_parameter("Z", 1.0)
-
-print("L_E (1 keV, pure hydrogen) = ", sphere.quantities["FreeFree_Luminosity"]())
-
-# Now let's print the luminosity at an energy of E = 10 keV
-
-sphere.set_field_parameter("Ephoton", 10.0)
-
-print("L_E (10 keV, pure hydrogen) = ", sphere.quantities["FreeFree_Luminosity"]())
-
-# Finally, let's set the flag for photon emission, to get the total number
-# of photons emitted at this energy:
-
-sphere.set_field_parameter("photon_emission", True)
-
-print("L_ph (10 keV, pure hydrogen) = ", sphere.quantities["FreeFree_Luminosity"]())

diff -r 088695eb316d99feec3433f2aa491877826b57a7 -r 0bb3ca370cea95934aecf73e51549b384328f46c doc/source/cookbook/simulation_analysis.py
--- a/doc/source/cookbook/simulation_analysis.py
+++ b/doc/source/cookbook/simulation_analysis.py
@@ -2,11 +2,11 @@
 yt.enable_parallelism()
 import collections
 
-# Enable parallelism in the script (assuming it was called with 
+# Enable parallelism in the script (assuming it was called with
 # `mpirun -np <n_procs>` )
 yt.enable_parallelism()
 
-# By using wildcards such as ? and * with the load command, we can load up a 
+# By using wildcards such as ? and * with the load command, we can load up a
 # Time Series containing all of these datasets simultaneously.
 ts = yt.load('enzo_tiny_cosmology/DD????/DD????')
 
@@ -16,7 +16,7 @@
 # Create an empty dictionary
 data = {}
 
-# Iterate through each dataset in the Time Series (using piter allows it 
+# Iterate through each dataset in the Time Series (using piter allows it
 # to happen in parallel automatically across available processors)
 for ds in ts.piter():
     ad = ds.all_data()
@@ -31,6 +31,6 @@
 # Print out all the values we calculated.
 print("Dataset      Redshift        Density Min      Density Max")
 print("---------------------------------------------------------")
-for key, val in od.iteritems(): 
+for key, val in od.items(): 
     print("%s       %05.3f          %5.3g g/cm^3   %5.3g g/cm^3" % \
            (key, val[1], val[0][0], val[0][1]))

diff -r 088695eb316d99feec3433f2aa491877826b57a7 -r 0bb3ca370cea95934aecf73e51549b384328f46c doc/source/cookbook/time_series.py
--- a/doc/source/cookbook/time_series.py
+++ b/doc/source/cookbook/time_series.py
@@ -12,7 +12,7 @@
 
 storage = {}
 
-# By using the piter() function, we can iterate on every dataset in 
+# By using the piter() function, we can iterate on every dataset in
 # the TimeSeries object.  By using the storage keyword, we can populate
 # a dictionary where the dataset is the key, and sto.result is the value
 # for later use when the loop is complete.
@@ -25,13 +25,13 @@
     sphere = ds.sphere("c", (100., "kpc"))
     # Calculate the entropy within that sphere
     entr = sphere["entropy"].sum()
-    # Store the current time and sphere entropy for this dataset in our 
+    # Store the current time and sphere entropy for this dataset in our
     # storage dictionary as a tuple
     store.result = (ds.current_time.in_units('Gyr'), entr)
 
 # Convert the storage dictionary values to a Nx2 array, so the can be easily
 # plotted
-arr = np.array(storage.values())
+arr = np.array(list(storage.values()))
 
 # Plot up the results: time versus entropy
 plt.semilogy(arr[:,0], arr[:,1], 'r-')

diff -r 088695eb316d99feec3433f2aa491877826b57a7 -r 0bb3ca370cea95934aecf73e51549b384328f46c doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -104,7 +104,11 @@
 -----------
 
 Athena 4.x VTK data is *mostly* supported and cared for by John
-ZuHone. Both uniform grid and SMR datasets are supported.
+ZuHone. Both uniform grid and SMR datasets are supported. 
+
+.. note: 
+   yt also recognizes Fargo3D data written to VTK files as 
+   Athena data, but support for Fargo3D data is preliminary. 
 
 Loading Athena datasets is slightly different depending on whether
 your dataset came from a serial or a parallel run. If the data came

diff -r 088695eb316d99feec3433f2aa491877826b57a7 -r 0bb3ca370cea95934aecf73e51549b384328f46c doc/source/installing.rst
--- a/doc/source/installing.rst
+++ b/doc/source/installing.rst
@@ -39,6 +39,28 @@
   have the the necessary compilers installed (e.g. the ``build-essentials``
   package on debian and ubuntu).
 
+.. _branches-of-yt:
+
+Branches of yt: ``yt``, ``stable``, and ``yt-2.x``
+++++++++++++++++++++++++++++++++++++++++++++++++++
+
+Before you install yt, you must decide which branch (i.e. version) of the code 
+you prefer to use:
+
+* ``yt`` -- The most up-to-date *development* version with the most current features but sometimes unstable (yt-3.x)
+* ``stable`` -- The latest stable release of yt-3.x
+* ``yt-2.x`` -- The latest stable release of yt-2.x
+
+If this is your first time using the code, we recommend using ``stable``, 
+unless you specifically need some piece of brand-new functionality only 
+available in ``yt`` or need to run an old script developed for ``yt-2.x``.
+There were major API and functionality changes made in yt after version 2.7
+in moving to version 3.0.  For a detailed description of the changes
+between versions 2.x (e.g. branch ``yt-2.x``) and 3.x (e.g. branches ``yt`` and 
+``stable``) see :ref:`yt3differences`.  Lastly, don't feel like you're locked 
+into one branch when you install yt, because you can easily change the active
+branch by following the instructions in :ref:`switching-between-yt-versions`.
+
 .. _install-script:
 
 All-in-One Installation Script
@@ -60,16 +82,22 @@
 its dependencies will be removed from your system (no scattered files remaining
 throughout your system).
 
+.. _installing-yt:
+
 Running the Install Script
 ^^^^^^^^^^^^^^^^^^^^^^^^^^
 
-To get the installation script, download it from:
+To get the installation script for the ``stable`` branch of the code, 
+download it from:
 
 .. code-block:: bash
 
   wget http://bitbucket.org/yt_analysis/yt/raw/stable/doc/install_script.sh
 
-.. _installing-yt:
+If you wish to install a different version of yt (see 
+:ref:`above <branches-of-yt>`), replace ``stable`` with the appropriate 
+branch name (e.g. ``yt``, ``yt-2.x``) in the path above to get the correct 
+install script.
 
 By default, the bash install script will install an array of items, but there
 are additional packages that can be downloaded and installed (e.g. SciPy, enzo,
@@ -329,8 +357,8 @@
 
 .. _switching-between-yt-versions:
 
-Switching between yt-2.x and yt-3.x
------------------------------------
+Switching versions of yt: yt-2.x, yt-3.x, stable, and dev
+---------------------------------------------------------
 
 With the release of version 3.0 of yt, development of the legacy yt 2.x series
 has been relegated to bugfixes.  That said, we will continue supporting the 2.x
@@ -356,12 +384,8 @@
   hg update <desired-version>
   python setup.py develop
 
-Valid versions to jump to are:
+Valid versions to jump to are described in :ref:`branches-of-yt`).
 
-* ``yt`` -- The latest *dev* changes in yt-3.x (can be unstable)
-* ``stable`` -- The latest stable release of yt-3.x
-* ``yt-2.x`` -- The latest stable release of yt-2.x
-    
 You can check which version of yt you have installed by invoking ``yt version``
 at the command line.  If you encounter problems, see :ref:`update-errors`.
 
@@ -387,11 +411,7 @@
   hg update <desired-version>
   python setup.py install --user --prefix=
 
-Valid versions to jump to are:
-
-* ``yt`` -- The latest *dev* changes in yt-3.x (can be unstable)
-* ``stable`` -- The latest stable release of yt-3.x
-* ``yt-2.x`` -- The latest stable release of yt-2.x
+Valid versions to jump to are described in :ref:`branches-of-yt`).
     
 You can check which version of yt you have installed by invoking ``yt version``
 at the command line.  If you encounter problems, see :ref:`update-errors`.

diff -r 088695eb316d99feec3433f2aa491877826b57a7 -r 0bb3ca370cea95934aecf73e51549b384328f46c doc/source/reference/api/api.rst
--- a/doc/source/reference/api/api.rst
+++ b/doc/source/reference/api/api.rst
@@ -227,8 +227,6 @@
    ~yt.frontends.chombo.data_structures.Orion2Hierarchy
    ~yt.frontends.chombo.data_structures.Orion2Dataset
    ~yt.frontends.chombo.io.IOHandlerChomboHDF5
-   ~yt.frontends.chombo.io.IOHandlerChombo2DHDF5
-   ~yt.frontends.chombo.io.IOHandlerChombo1DHDF5
    ~yt.frontends.chombo.io.IOHandlerOrion2HDF5
 
 Enzo
@@ -623,6 +621,18 @@
    ~yt.utilities.amr_kdtree.amr_kdtree.AMRKDTree
    ~yt.visualization.volume_rendering.camera.StereoPairCamera
 
+Additional sources can be added to a scene:
+
+.. autosummary::
+   :toctree: generated/
+
+   ~yt.visualization.volume_rendering.api.VolumeSource
+   ~yt.visualization.volume_rendering.api.PointSource
+   ~yt.visualization.volume_rendering.api.LineSource
+   ~yt.visualization.volume_rendering.api.BoxSource
+   ~yt.visualization.volume_rendering.api.GridSource
+   ~yt.visualization.volume_rendering.api.CoordinateVectorSource
+
 Streamlining
 ^^^^^^^^^^^^
 

diff -r 088695eb316d99feec3433f2aa491877826b57a7 -r 0bb3ca370cea95934aecf73e51549b384328f46c doc/source/visualizing/TransferFunctionHelper_Tutorial.ipynb
--- a/doc/source/visualizing/TransferFunctionHelper_Tutorial.ipynb
+++ b/doc/source/visualizing/TransferFunctionHelper_Tutorial.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:5a1547973517987ff047f1b2405277a0e98392e8fd5ffe04521cb2dc372d32d3"
+  "signature": "sha256:ed09405c56bab51abd351d107a4354726709d289b965f274106f4451b387f5ba"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -25,6 +25,8 @@
       "import numpy as np\n",
       "from IPython.core.display import Image\n",
       "from yt.visualization.volume_rendering.transfer_function_helper import TransferFunctionHelper\n",
+      "from yt.visualization.volume_rendering.render_source import VolumeSource\n",
+      "from yt.visualization.volume_rendering.camera import Camera\n",
       "\n",
       "def showme(im):\n",
       "    # screen out NaNs\n",
@@ -66,7 +68,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "tfh = yt.TransferFunctionHelper(ds)"
+      "tfh = TransferFunctionHelper(ds)"
      ],
      "language": "python",
      "metadata": {},
@@ -84,7 +86,7 @@
      "collapsed": false,
      "input": [
       "# Build a transfer function that is a multivariate gaussian in temperature\n",
-      "tfh = yt.TransferFunctionHelper(ds)\n",
+      "tfh = TransferFunctionHelper(ds)\n",
       "tfh.set_field('temperature')\n",
       "tfh.set_log(True)\n",
       "tfh.set_bounds()\n",
@@ -124,7 +126,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "tfh = yt.TransferFunctionHelper(ds)\n",
+      "tfh = TransferFunctionHelper(ds)\n",
       "tfh.set_field('temperature')\n",
       "tfh.set_bounds()\n",
       "tfh.set_log(True)\n",
@@ -143,27 +145,20 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
-      "Finally, let's take a look at the volume rendering."
+      "Finally, let's take a look at the volume rendering. First use the helper function to create a default rendering, then we override this with the transfer function we just created."
      ]
     },
     {
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "L = [-0.1, -1.0, -0.1]\n",
-      "c = ds.domain_center\n",
-      "W = 1.5*ds.domain_width\n",
-      "Npixels = 512 \n",
-      "cam = ds.camera(c, L, W, Npixels, tfh.tf, fields=['temperature'],\n",
-      "                  north_vector=[1.,0.,0.], steady_north=True, \n",
-      "                  sub_samples=5, no_ghost=False)\n",
+      "im, sc = yt.volume_render(ds, ['temperature'])\n",
       "\n",
-      "# Here we substitute the TransferFunction we constructed earlier.\n",
-      "cam.transfer_function = tfh.tf\n",
+      "source = sc.get_source(0)\n",
+      "source.set_transfer_function(tfh.tf)\n",
+      "im2 = sc.render()\n",
       "\n",
-      "\n",
-      "im = cam.snapshot()\n",
-      "showme(im[:,:,:3])"
+      "showme(im2[:,:,:3])"
      ],
      "language": "python",
      "metadata": {},

diff -r 088695eb316d99feec3433f2aa491877826b57a7 -r 0bb3ca370cea95934aecf73e51549b384328f46c doc/source/visualizing/_images/scene_diagram.svg
--- /dev/null
+++ b/doc/source/visualizing/_images/scene_diagram.svg
@@ -0,0 +1,512 @@
+<?xml version="1.0" standalone="yes"?>
+
+<svg version="1.1" viewBox="0.0 0.0 710.0 462.0" fill="none" stroke="none"
+stroke-linecap="square" stroke-miterlimit="10"
+xmlns="http://www.w3.org/2000/svg"
+xmlns:xlink="http://www.w3.org/1999/xlink"><clipPath id="p.0"><path d="m0
+0l710.0 0l0 462.0l-710.0 0l0 -462.0z" clip-rule="nonzero"></path></clipPath><g
+clip-path="url(#p.0)"><path fill="#000000" fill-opacity="0.0" d="m0 0l710.4252
+0l0 462.4462l-710.4252 0z" fill-rule="nonzero"></path><path fill="#000000"
+fill-opacity="0.0" d="m50.425198 121.13386l489.73227 0l0 289.6063l-489.73227
+0z" fill-rule="nonzero"></path><path fill="#000000" fill-opacity="0.0"
+d="m540.1575 121.13386l96.53546 -96.53543l0 289.6063l-96.53546 96.53543z"
+fill-rule="nonzero"></path><path fill="#000000" fill-opacity="0.0"
+d="m50.425198 121.13386l96.53543 -96.53543l489.7323 0l-96.53546 96.53543z"
+fill-rule="nonzero"></path><path fill="#000000" fill-opacity="0.0"
+d="m50.425198 121.13386l96.53543 -96.53543l489.7323 0l0 289.6063l-96.53546
+96.53543l-489.73227 0zm0 0l489.73227 0l96.53546 -96.53543m-96.53546 96.53543l0
+289.6063" fill-rule="nonzero"></path><path fill="#000000" fill-opacity="0.2"
+d="m540.1575 121.13386l96.53546 -96.53543l0 289.6063l-96.53546 96.53543z"
+fill-rule="nonzero"></path><path fill="#ffffff" fill-opacity="0.2"
+d="m50.425198 121.13386l96.53543 -96.53543l489.7323 0l-96.53546 96.53543z"
+fill-rule="nonzero"></path><path stroke="#000000" stroke-width="2.0"
+stroke-linejoin="round" stroke-linecap="butt" d="m50.425198 121.13386l96.53543
+-96.53543l489.7323 0l0 289.6063l-96.53546 96.53543l-489.73227 0zm0 0l489.73227
+0l96.53546 -96.53543m-96.53546 96.53543l0 289.6063"
+fill-rule="nonzero"></path><path fill="#cccccc" d="m399.52493 268.74014l0 0c0
+-6.521576 14.789307 -11.80838 33.032806 -11.80838c18.24353 0 33.032806 5.286804
+33.032806 11.80838l0 47.233612c0 6.5216064 -14.789276 11.808411 -33.032806
+11.808411c-18.2435 0 -33.032806 -5.286804 -33.032806 -11.808411z"
+fill-rule="nonzero"></path><path fill="#000000" fill-opacity="0.0"
+d="m465.59055 268.74014l0 0c0 6.5216064 -14.789276 11.808411 -33.032806
+11.808411c-18.2435 0 -33.032806 -5.286804 -33.032806 -11.808411"
+fill-rule="nonzero"></path><path fill="#000000" fill-opacity="0.0"
+d="m399.52493 268.74014l0 0c0 -6.521576 14.789307 -11.80838 33.032806
+-11.80838c18.24353 0 33.032806 5.286804 33.032806 11.80838l0 47.233612c0
+6.5216064 -14.789276 11.808411 -33.032806 11.808411c-18.2435 0 -33.032806
+-5.286804 -33.032806 -11.808411z" fill-rule="nonzero"></path><path
+stroke="#000000" stroke-width="2.0" stroke-linejoin="round"
+stroke-linecap="butt" d="m465.59055 268.74014l0 0c0 6.5216064 -14.789276
+11.808411 -33.032806 11.808411c-18.2435 0 -33.032806 -5.286804 -33.032806
+-11.808411" fill-rule="nonzero"></path><path stroke="#000000"
+stroke-width="2.0" stroke-linejoin="round" stroke-linecap="butt" d="m399.52493
+268.74014l0 0c0 -6.521576 14.789307 -11.80838 33.032806 -11.80838c18.24353 0
+33.032806 5.286804 33.032806 11.80838l0 47.233612c0 6.5216064 -14.789276
+11.808411 -33.032806 11.808411c-18.2435 0 -33.032806 -5.286804 -33.032806
+-11.808411z" fill-rule="nonzero"></path><path fill="#cccccc" d="m333.45938
+255.11298l20.896912 1.2207031E-4l6.457367 -18.286331l6.457367
+18.286331l20.896881 -1.2207031E-4l-16.906006 11.301453l6.457611
+18.286224l-16.905853 -11.301636l-16.905884 11.301636l6.4576416 -18.286224z"
+fill-rule="nonzero"></path><path stroke="#000000" stroke-width="2.0"
+stroke-linejoin="round" stroke-linecap="butt" d="m333.45938 255.11298l20.896912
+1.2207031E-4l6.457367 -18.286331l6.457367 18.286331l20.896881
+-1.2207031E-4l-16.906006 11.301453l6.457611 18.286224l-16.905853
+-11.301636l-16.905884 11.301636l6.4576416 -18.286224z"
+fill-rule="nonzero"></path><path fill="#cccccc" d="m408.14435
+214.56168l38.53543 0l0 22.251968l-38.53543 0z" fill-rule="nonzero"></path><path
+fill="#a3a3a3" d="m446.67978 214.56168l7.417328 -7.4173126l0
+22.251968l-7.417328 7.4173126z" fill-rule="nonzero"></path><path fill="#d6d6d6"
+d="m408.14435 214.56168l7.417328 -7.4173126l38.53543 0l-7.417328 7.4173126z"
+fill-rule="nonzero"></path><path fill="#000000" fill-opacity="0.0"
+d="m408.14435 214.56168l7.417328 -7.4173126l38.53543 0l0 22.251968l-7.417328
+7.4173126l-38.53543 0zm0 0l38.53543 0l7.417328 -7.4173126m-7.417328 7.4173126l0
+22.251968" fill-rule="nonzero"></path><path stroke="#000000" stroke-width="2.0"
+stroke-linejoin="round" stroke-linecap="butt" d="m408.14435 214.56168l7.417328
+-7.4173126l38.53543 0l0 22.251968l-7.417328 7.4173126l-38.53543 0zm0 0l38.53543
+0l7.417328 -7.4173126m-7.417328 7.4173126l0 22.251968"
+fill-rule="nonzero"></path><path fill="#000000" fill-opacity="0.0"
+d="m329.62204 327.78217l135.9685 0l0 54.582672l-135.9685 0z"
+fill-rule="nonzero"></path><path fill="#000000" d="m354.4149 362.55652q-0.15625
+0.0625 -0.375 0.0625q-0.234375 0 -0.5 -0.140625q-2.796875 -3.015625 -3.734375
+-7.765625q-0.328125 -1.640625 -0.328125 -3.40625q0 -3.375 1.203125
+-6.4375q1.046875 -2.703125 2.859375 -4.671875q0.125 -0.109375 0.515625
+-0.109375q0.375 0 0.640625 0.25q0.265625 0.25 0.265625 0.5625q-2.4375 4.171875
+-2.859375 7.734375q-0.140625 1.25 -0.140625 2.625q-0.015625 0.0625 -0.015625
+0.109375q0 1.34375 0.15625 2.5625q0.140625 1.25 0.5 2.515625q0.6875 2.453125
+2.359375 5.328125q0 0.515625 -0.546875 0.78125zm5.5 -4.3125l1.34375 0q1.625 0
+2.1875 -0.984375q0.171875 -0.3125 0.171875 -0.671875q0 -0.359375 -0.171875
+-0.625q-0.171875 -0.265625 -0.53125 -0.484375q-0.5625 -0.34375 -1.84375
+-0.734375q-1.28125 -0.390625 -1.953125 -0.671875q-0.65625 -0.296875 -1.15625
+-0.703125q-1.0 -0.84375 -1.0 -2.3125q0 -2.171875 2.421875 -2.921875q0.90625
+-0.28125 1.796875 -0.28125q0.890625 0 1.640625 0.125q0.765625 0.109375 1.375
+0.390625q1.390625 0.640625 1.390625 1.84375l0 0.0625q0 0.65625 -0.453125
+1.109375q-0.4375 0.4375 -1.140625 0.453125q-1.109375 0 -1.5 -0.953125q-0.125
+-0.296875 -0.125 -0.484375q0 -0.1875 0.140625 -0.890625l-1.15625 0q-0.953125 0
+-1.40625 0.328125q-0.828125 0.578125 -0.828125 1.265625q0 0.90625 1.640625
+1.46875l0.890625 0.3125q2.25 0.75 2.890625 1.28125q0.65625 0.515625 0.9375
+1.140625q0.296875 0.625 0.296875 1.421875q0 0.8125 -0.3125 1.390625q-0.296875
+0.59375 -0.859375 1.015625q-1.21875 0.921875 -3.140625 0.921875q-1.71875 0
+-3.078125 -0.515625q-1.0 -0.375 -1.421875 -1.078125q-0.21875 -0.359375 -0.21875
+-0.765625q0 -0.390625 0.125 -0.6875q0.140625 -0.3125 0.34375 -0.515625q0.453125
+-0.453125 1.15625 -0.453125q0.671875 0 1.140625 0.46875q0.46875 0.4375 0.46875
+1.0l0 0.015625q-0.015625 0.578125 -0.0625 0.71875zm12.8515625 1.8125q-1.25 0
+-2.203125 -0.46875q-0.953125 -0.46875 -1.609375 -1.3125q-1.3125 -1.703125
+-1.3125 -4.453125q0 -2.890625 1.78125 -4.5q1.625 -1.46875 4.078125
+-1.46875l0.03125 0q2.296875 0 3.703125 1.75q1.328125 1.65625 1.328125 4.3125q0
+2.953125 -1.640625 4.59375q-1.546875 1.546875 -4.15625 1.546875zm-1.703125
+-9.578125q-0.4375 0.46875 -0.734375 1.28125q-0.28125 0.8125 -0.28125 2.0q0
+1.203125 0.21875 2.03125q0.234375 0.8125 0.640625 1.359375q0.796875 1.09375
+2.234375 1.09375q1.46875 0 2.28125 -1.28125q0.734375 -1.171875 0.734375
+-3.078125q0 -3.171875 -1.859375 -3.984375q-0.5625 -0.234375 -1.140625
+-0.234375q-0.578125 0 -1.109375 0.171875q-0.53125 0.171875 -0.984375
+0.640625zm18.230469 8.21875q-1.875 1.359375 -3.375 1.359375q-2.953125 0
+-3.796875 -2.375q-0.28125 -0.78125 -0.28125 -1.828125l0 -5.953125l-1.640625
+0q-0.4375 0 -0.4375 -0.890625q0 -0.59375 0.234375 -0.65625q1.96875 -0.5 3.59375
+-0.5q0.515625 0 0.546875 0.9375l0 6.21875q0 2.28125 1.171875 2.71875q0.34375
+0.140625 0.875 0.140625q0.515625 0 0.984375 -0.109375q0.484375 -0.125 0.859375
+-0.28125q0.34375 -0.171875 0.625 -0.375l0.5 -0.375l0 -6.828125l-1.59375
+0q-0.484375 0 -0.484375 -0.875q0 -0.546875 0.3125 -0.671875q0.3125 -0.125 0.75
+-0.21875q0.453125 -0.078125 0.921875 -0.15625q0.921875 -0.109375 1.46875
+-0.125l0.03125 0q0.515625 0 0.625 0.125q0.140625 0.125 0.1875 0.328125q0.078125
+0.296875 0.078125 1.03125l0 8.828125l1.96875 0q0.3125 0 0.3125 0.765625q0
+0.8125 -0.484375 0.890625q-1.1875 0.1875 -2.125 0.1875q-0.9375 0 -1.109375
+-0.09375q-0.15625 -0.09375 -0.28125 -0.265625q-0.1875 -0.25 -0.4375
+-0.953125zm9.6328125 -9.96875l0 0.46875q0 0.140625 -0.015625 0.28125q0 0.140625
+0 0.25q1.375 -1.25 2.75 -1.71875q0.453125 -0.140625 0.921875 -0.15625l0.03125
+0q0.453125 0 0.8125 0.171875q0.375 0.1875 0.609375 0.453125q0.390625 0.46875
+0.390625 0.96875q0 0.515625 -0.125 0.8125q-0.125 0.296875 -0.359375 0.5q-0.5
+0.453125 -0.984375 0.453125q-0.484375 0 -0.75 -0.125q-0.25 -0.140625 -0.453125
+-0.34375q-0.421875 -0.453125 -0.421875 -1.0q-0.671875 0.3125 -1.03125
+0.71875q-0.34375 0.421875 -0.578125 0.765625l-0.515625 0.796875l0
+6.140625l2.84375 0q0.265625 0 0.265625 0.71875q0 0.578125 -0.171875
+0.765625q-0.171875 0.171875 -0.3125 0.171875l-6.8125 0q-0.328125 0 -0.328125
+-0.734375q0.015625 -0.546875 0.3125 -0.84375q0.09375 -0.078125 0.40625
+-0.078125q0.328125 0 0.65625 -0.03125q0.328125 -0.03125 0.515625
+-0.078125q0.328125 -0.109375 0.328125 -0.390625l0 -7.71875l-2.0625 0q-0.453125
+0 -0.453125 -0.921875q0 -0.578125 0.296875 -0.703125q0.296875 -0.140625 0.78125
+-0.21875q0.5 -0.09375 1.015625 -0.140625q0.984375 -0.09375 1.546875
+-0.109375l0.09375 0q0.484375 0 0.578125 0.0625q0.125 0.046875 0.171875
+0.15625q0.046875 0.125 0.046875 0.515625l0 0.140625zm13.277344
+0.78125l-1.078125 0q-1.21875 0 -2.109375 0.921875q-1.125 1.1875 -1.125
+3.40625q0 3.265625 1.828125 4.125q0.5625 0.28125 1.171875 0.28125q0.59375 0
+1.0625 -0.125q0.484375 -0.125 0.953125 -0.34375q0.46875 -0.1875 0.890625
+-0.453125l0.828125 -0.5q0.046875 -0.03125 0.15625 -0.03125q0.109375 0 0.265625
+0.140625q0.171875 0.125 0.3125 0.3125q0.3125 0.4375 0.3125 0.734375q0 0.296875
+-0.140625 0.40625q-2.234375 1.671875 -4.875 1.671875q-2.4375 0 -3.828125
+-1.703125q-1.328125 -1.640625 -1.328125 -4.515625q0 -2.8125 1.6875
+-4.453125q1.59375 -1.515625 4.078125 -1.53125q2.640625 0.015625 3.65625
+1.15625q0.390625 0.453125 0.390625 0.953125q0 0.5 -0.109375 0.796875q-0.140625
+0.296875 -0.359375 0.5q-0.46875 0.46875 -1.140625 0.46875q-0.65625 0 -1.125
+-0.46875q-0.46875 -0.4375 -0.46875 -0.890625q0 -0.4375 0.09375
+-0.859375zm5.046875 4.28125q0 -1.453125 0.484375 -2.578125q0.46875 -1.09375
+1.265625 -1.84375q1.609375 -1.5 3.984375 -1.515625l0.046875 0q2.171875 0
+3.359375 1.375q1.1875 1.359375 1.1875 3.921875q0 0.546875 -0.515625
+0.953125q-0.484375 0.390625 -1.234375 0.390625l-6.171875 0q0.203125 2.734375
+1.90625 3.515625q0.53125 0.234375 1.140625 0.234375q0.59375 0 1.0625
+-0.125q0.46875 -0.140625 0.890625 -0.34375q0.4375 -0.203125 0.859375
+-0.46875q0.84375 -0.53125 1.0 -0.53125q0.140625 0.015625 0.3125
+0.140625q0.140625 0.140625 0.28125 0.328125q0.296875 0.4375 0.3125 0.75l0
+0.015625q0 0.28125 -0.125 0.375q-2.15625 1.671875 -4.875 1.671875q-2.453125 0
+-3.828125 -1.71875q-1.34375 -1.65625 -1.34375 -4.546875zm7.921875 -1.0625l0
+-0.046875q0 -2.3125 -1.34375 -2.859375q-0.421875 -0.15625 -0.9375
+-0.15625q-0.515625 0 -1.078125 0.1875q-0.5625 0.1875 -1.03125 0.5625q-1.046875
+0.859375 -1.125 2.3125l5.515625 0zm7.3867188 5.515625l1.34375 0q1.625 0 2.1875
+-0.984375q0.171875 -0.3125 0.171875 -0.671875q0 -0.359375 -0.171875
+-0.625q-0.171875 -0.265625 -0.53125 -0.484375q-0.5625 -0.34375 -1.84375
+-0.734375q-1.28125 -0.390625 -1.953125 -0.671875q-0.65625 -0.296875 -1.15625
+-0.703125q-1.0 -0.84375 -1.0 -2.3125q0 -2.171875 2.421875 -2.921875q0.90625
+-0.28125 1.796875 -0.28125q0.890625 0 1.640625 0.125q0.765625 0.109375 1.375
+0.390625q1.390625 0.640625 1.390625 1.84375l0 0.0625q0 0.65625 -0.453125
+1.109375q-0.4375 0.4375 -1.140625 0.453125q-1.109375 0 -1.5 -0.953125q-0.125
+-0.296875 -0.125 -0.484375q0 -0.1875 0.140625 -0.890625l-1.15625 0q-0.953125 0
+-1.40625 0.328125q-0.828125 0.578125 -0.828125 1.265625q0 0.90625 1.640625
+1.46875l0.890625 0.3125q2.25 0.75 2.890625 1.28125q0.65625 0.515625 0.9375
+1.140625q0.296875 0.625 0.296875 1.421875q0 0.8125 -0.3125 1.390625q-0.296875
+0.59375 -0.859375 1.015625q-1.21875 0.921875 -3.140625 0.921875q-1.71875 0
+-3.078125 -0.515625q-1.0 -0.375 -1.421875 -1.078125q-0.21875 -0.359375 -0.21875
+-0.765625q0 -0.390625 0.125 -0.6875q0.140625 -0.3125 0.34375 -0.515625q0.453125
+-0.453125 1.15625 -0.453125q0.671875 0 1.140625 0.46875q0.46875 0.4375 0.46875
+1.0l0 0.015625q-0.015625 0.578125 -0.0625 0.71875zm9.1015625 4.234375q-0.265625
+0.140625 -0.484375 0.140625q-0.234375 0 -0.390625 -0.0625q-0.140625 -0.0625
+-0.265625 -0.171875q-0.28125 -0.25 -0.28125 -0.609375q2.4375 -4.140625 2.859375
+-7.84375q0.140625 -1.265625 0.15625 -2.671875l0 -0.09375q0 -1.328125 -0.140625
+-2.53125q-0.15625 -1.25 -0.5 -2.484375q-0.6875 -2.390625 -2.375 -5.25q0 -0.3125
+0.265625 -0.5625q0.28125 -0.25 0.5625 -0.25q0.46875 0 0.59375 0.109375q2.828125
+3.046875 3.734375 7.6875q0.328125 1.625 0.328125 3.421875q0 5.0625 -2.5
+9.109375q-0.734375 1.1875 -1.5625 2.0625z" fill-rule="nonzero"></path><path
+fill="#000000" fill-opacity="0.0" d="m147.41995 24.598425l135.9685 0l0
+54.582672l-135.9685 0z" fill-rule="nonzero"></path><path fill="#000000"
+d="m187.11708 54.654053q0.75 0.40625 1.875 0.40625q1.125 0 1.8125
+-0.203125q0.671875 -0.1875 1.15625 -0.5625q1.03125 -0.78125 1.046875
+-2.078125l0 -0.015625q0 -1.46875 -2.40625 -2.859375q-0.671875 -0.390625
+-1.46875 -0.78125l-1.6875 -0.859375q-3.1875 -1.6875 -3.1875 -4.5q0 -2.171875
+1.75 -3.375q1.578125 -1.0625 4.0 -1.0625l0.109375 0q1.796875 0 3.34375
+0.8125q1.859375 0.953125 1.859375 2.515625q0 0.75 -0.515625 1.296875q-0.515625
+0.515625 -1.28125 0.515625q-0.765625 0 -1.28125 -0.515625q-0.53125 -0.53125
+-0.53125 -1.296875q0 -0.625 0.34375 -1.09375q-0.6875 -0.421875 -1.65625
+-0.421875q-0.96875 0 -1.59375 0.171875q-0.640625 0.15625 -1.109375
+0.484375q-0.953125 0.671875 -0.96875 1.921875l0 0.015625q0 0.921875 1.015625
+1.65625q0.5 0.375 1.21875 0.75l1.578125 0.8125q2.59375 1.296875 3.640625
+2.390625q1.296875 1.34375 1.296875 3.234375q0 2.15625 -1.6875 3.515625q-1.6875
+1.34375 -4.28125 1.34375q-1.984375 0 -3.6875 -0.828125q-1.984375 -0.984375
+-1.984375 -2.453125q0 -0.78125 0.53125 -1.296875q0.53125 -0.53125 1.28125
+-0.53125q0.765625 0 1.28125 0.53125q0.515625 0.53125 0.515625 1.15625q0 0.625
+-0.328125 1.203125zm17.214844 -8.328125l-1.078125 0q-1.21875 0 -2.109375
+0.921875q-1.125 1.1875 -1.125 3.40625q0 3.265625 1.828125 4.125q0.5625 0.28125
+1.171875 0.28125q0.59375 0 1.0625 -0.125q0.484375 -0.125 0.953125
+-0.34375q0.46875 -0.1875 0.890625 -0.453125l0.828125 -0.5q0.046875 -0.03125
+0.15625 -0.03125q0.109375 0 0.265625 0.140625q0.171875 0.125 0.3125
+0.3125q0.3125 0.4375 0.3125 0.734375q0 0.296875 -0.140625 0.40625q-2.234375
+1.671875 -4.875 1.671875q-2.4375 0 -3.828125 -1.703125q-1.328125 -1.640625
+-1.328125 -4.515625q0 -2.8125 1.6875 -4.453125q1.59375 -1.515625 4.078125
+-1.53125q2.640625 0.015625 3.65625 1.15625q0.390625 0.453125 0.390625
+0.953125q0 0.5 -0.109375 0.796875q-0.140625 0.296875 -0.359375 0.5q-0.46875
+0.46875 -1.140625 0.46875q-0.65625 0 -1.125 -0.46875q-0.46875 -0.4375 -0.46875
+-0.890625q0 -0.4375 0.09375 -0.859375zm5.046875 4.28125q0 -1.453125 0.484375
+-2.578125q0.46875 -1.09375 1.265625 -1.84375q1.6093903 -1.5 3.9843903
+-1.515625l0.046875 0q2.171875 0 3.359375 1.375q1.1875 1.359375 1.1875
+3.921875q0 0.546875 -0.515625 0.953125q-0.484375 0.390625 -1.234375
+0.390625l-6.171875 0q0.203125 2.734375 1.90625 3.515625q0.53125 0.234375
+1.140625 0.234375q0.59375 0 1.0625 -0.125q0.46875 -0.140625 0.890625
+-0.34375q0.4375 -0.203125 0.859375 -0.46875q0.84375 -0.53125 1.0
+-0.53125q0.140625 0.015625 0.3125 0.140625q0.140625 0.140625 0.28125
+0.328125q0.296875 0.4375 0.3125 0.75l0 0.015625q0 0.28125 -0.125 0.375q-2.15625
+1.671875 -4.875 1.671875q-2.453125 0 -3.8281403 -1.71875q-1.34375 -1.65625
+-1.34375 -4.546875zm7.9218903 -1.0625l0 -0.046875q0 -2.3125 -1.34375
+-2.859375q-0.421875 -0.15625 -0.9375 -0.15625q-0.515625 0 -1.078125
+0.1875q-0.5625 0.1875 -1.03125 0.5625q-1.046875 0.859375 -1.125 2.3125l5.515625
+0zm12.386719 7.09375q-0.3125 0 -0.3125 -0.71875q0 -0.5625 0.171875 -0.75q0.1875
+-0.1875 0.3125 -0.1875q1.21875 0 1.21875 -0.5l0 -3.484375q0 -2.265625 -0.265625
+-2.96875q-0.25 -0.71875 -0.65625 -1.015625q-0.40625 -0.296875 -1.09375
+-0.296875q-1.296875 0 -3.203125 1.5l0 6.765625l1.96875 0q0.296875 0 0.296875
+0.71875q0 0.5625 -0.171875 0.75q-0.171875 0.1875 -0.3125 0.1875l-5.96875
+0q-0.3125 0 -0.3125 -0.71875q0 -0.5625 0.171875 -0.75q0.1875 -0.1875 0.3125
+-0.1875l0.125 0q1.34375 0 1.53125 -0.265625q0.0625 -0.09375 0.0625 -0.234375l0
+-7.765625l-2.0625 0q-0.359375 -0.015625 -0.453125 -0.578125q-0.015625 -0.171875
+-0.015625 -0.34375q0 -0.53125 0.296875 -0.65625q0.3125 -0.125 0.796875
+-0.21875q0.484375 -0.09375 1.015625 -0.140625q1.046875 -0.125 1.578125
+-0.125l0.046875 0q0.515625 0.015625 0.625 0.125q0.109375 0.109375 0.15625
+0.296875q0.0625 0.203125 0.0625 0.84375l0 0.21875q1.765625 -0.984375 3.21875
+-1.34375q0.46875 -0.125 1.203125 -0.125q0.703125 0 1.421875 0.3125q0.734375
+0.3125 1.15625 0.890625q0.75 1.03125 0.75 3.28125l0 5.828125l1.828125 0q0.3125
+0 0.3125 0.71875q0 0.5625 -0.1875 0.75q-0.171875 0.1875 -0.296875
+0.1875l-5.328125 0zm7.0078125 -6.03125q0 -1.453125 0.484375 -2.578125q0.46875
+-1.09375 1.265625 -1.84375q1.609375 -1.5 3.984375 -1.515625l0.046875 0q2.171875
+0 3.359375 1.375q1.1875 1.359375 1.1875 3.921875q0 0.546875 -0.515625
+0.953125q-0.484375 0.390625 -1.234375 0.390625l-6.171875 0q0.203125 2.734375
+1.90625 3.515625q0.53125 0.234375 1.140625 0.234375q0.59375 0 1.0625
+-0.125q0.46875 -0.140625 0.890625 -0.34375q0.4375 -0.203125 0.859375
+-0.46875q0.84375 -0.53125 1.0 -0.53125q0.140625 0.015625 0.3125
+0.140625q0.140625 0.140625 0.28125 0.328125q0.296875 0.4375 0.3125 0.75l0
+0.015625q0 0.28125 -0.125 0.375q-2.15625 1.671875 -4.875 1.671875q-2.453125 0
+-3.828125 -1.71875q-1.34375 -1.65625 -1.34375 -4.546875zm7.921875 -1.0625l0
+-0.046875q0 -2.3125 -1.34375 -2.859375q-0.421875 -0.15625 -0.9375
+-0.15625q-0.515625 0 -1.078125 0.1875q-0.5625 0.1875 -1.03125 0.5625q-1.046875
+0.859375 -1.125 2.3125l5.515625 0z" fill-rule="nonzero"></path><path
+fill="#000000" fill-opacity="0.0" d="m133.71216 260.5739l55.118057
+-122.19269l60.047882 27.08609l-12.547058 67.867874l-42.571 54.324814z"
+fill-rule="nonzero"></path><path stroke="#000000" stroke-width="2.0"
+stroke-linejoin="round" stroke-linecap="butt" d="m133.71216 260.5739l55.118057
+-122.19269l60.047882 27.08609l-12.547058 67.867874l-42.571 54.324814z"
+fill-rule="nonzero"></path><path fill="#000000" d="m171.84547
+238.87013q-0.6672516 0.4532318 -1.1298218 1.4787292q-0.4625702 1.0254974
+-0.43501282 2.066391q0.033996582 1.0266571 0.52685547 1.9860382q1.046875
+2.0491943 3.6611786 3.2455902q5.4123535 2.4413757 7.995346 0.40112305q0.8348236
+-0.63475037 1.2331543 -1.5178223q0.00642395 -0.01423645 -0.0078125
+-0.0206604q0.3983307 -0.8830719 0.5014343 -1.4536438q0.09527588 -0.59124756
+0.0821991 -1.0942383q-0.013092041 -0.5029907 -0.07897949 -0.9269562q-0.15101624
+-0.80519104 -0.08035278 -0.961853q0.07067871 -0.15667725 0.24887085
+-0.24771118q0.18461609 -0.105285645 0.41334534 -0.15637207q0.5130615
+-0.11138916 0.78367615 0.010681152q0.28486633 0.12849426 0.38008118
+0.41140747q0.080963135 0.27650452 0.16526794 0.811615q0.09072876 0.520874
+0.07725525 1.234726q-0.0713501 1.7162018 -0.7523651 3.2259674q-0.68743896
+1.5240021 -1.6971283 2.5084076q-1.023941 0.97798157 -2.4385529
+1.4540405q-3.0437317 1.0096588 -7.003296 -0.7763977q-4.1162415 -1.8567352
+-5.4350586 -4.9370728q-1.2121582 -2.8608093 0.22697449 -6.0512543l0.0128479
+-0.02848816q0.7581177 -1.6806793 2.199585 -2.7102814q1.5976105 -1.1477356
+2.9222107 -0.5502472q0.6836548 0.30838013 0.9701538 1.0032654q0.25801086
+0.68203735 -0.05680847 1.3799438q-0.31480408 0.6979065 -0.99684143
+0.95591736q-0.70269775 0.26582336 -1.3578796 -0.029708862q-0.64094543
+-0.28910828 -0.93052673 -0.71113586zm8.361542 -5.578766q0.32844543
+-0.0061187744 0.6845093 0.1545105q0.3560791 0.16061401 0.5753021
+0.3966217q0.19854736 0.24383545 0.31443787 0.51893616q0.21247864 0.5929413
+-0.05595398 1.226059q-0.44973755 0.9970093 -1.4791718 0.96118164q-0.32202148
+-0.008117676 -0.7208252 -0.1880188q-0.384552 -0.17346191 -0.6235962
+-0.555542q-0.23904419 -0.38208008 -0.32055664 -0.8473816q-0.0750885 -0.47953796
+-0.005508423 -1.013794q0.055343628 -0.5406952 0.18190002 -1.0492706q0.22880554
+-0.9252472 0.6785431 -1.9222565q0.4497223 -0.99702454 1.1049652
+-1.5756531q0.6474304 -0.59928894 1.3814087 -0.7824402q1.2819519 -0.2959442
+3.0195923 0.48786926l5.526306 2.4927673l0.8095093 -1.7946167q0.13491821
+-0.29910278 0.76161194 -0.016418457q0.28485107 0.12849426 0.5040741
+0.36450195q0.22564697 0.22177124 0.17982483 0.47535706q-0.09892273 0.6753082
+-0.6000366 1.7862701q-0.5075531 1.1251984 -0.6081085 1.2341156q-0.08630371
+0.11532593 -0.23098755 0.1700592q-0.28152466 0.13012695 -0.9269409
+0.07896423q0.3696289 1.7265778 -0.45915222 3.563919q-0.7452698 1.6521912
+-2.052353 2.1939087q-1.2101898 0.51686096 -2.5205536 -0.07420349l-0.02848816
+-0.0128479q-1.6379395 -0.7388458 -2.0034027 -2.3606873q-0.33642578 -1.4201813
+0.37670898 -3.0011597l1.1885681 -2.634964q-1.7946167 -0.8095093 -2.4422913
+-0.7416992q-0.64123535 0.05357361 -1.0269623 0.37667847q-0.38571167 0.32310486
+-0.6940918 1.0067749l-0.48828125 1.0824585zm8.212418 3.0359192q0.6039276
+-1.3388367 0.11392212 -3.1025696l-2.079483 -0.93800354l-1.047226
+2.3216248q-0.3854828 0.8545685 -0.28694153 1.3961182q0.17866516 0.9719238
+0.8623352 1.280304q1.2533875 0.5653839 2.0563965 -0.34083557q0.22680664
+-0.27479553 0.3809967 -0.6166382zm11.960602 -26.70578q0.13491821 -0.29910278
+0.889801 0.041412354q0.7548828 0.34049988 0.57499695 0.73931885l-2.2100983
+4.899597q-0.12849426 0.28486633 -0.78367615 -0.010681152q-0.5269928 -0.23770142
+-0.66770935 -0.60972595q-0.039093018 -0.10333252 -5.493164E-4
+-0.188797l0.0128479 -0.02848816q0.50112915 -1.1109467 0.04534912
+-1.3165436l-3.1761932 -1.4327087q-2.0652466 -0.9315796 -2.8153992
+-0.97854614q-0.75798035 -0.06765747 -1.1956329 0.18060303q-0.43766785
+0.24824524 -0.7203522 0.87493896q-0.5332489 1.1821747 0.04385376
+3.5508423l6.1672363 2.7818756l0.75167847 -1.6664276q0.12849426 -0.28486633
+0.8833771 0.055648804q0.7548828 0.34049988 0.5557251 0.7820282l-2.1908264
+4.856888q-0.12849426 0.28486633 -0.755188 0.002166748l-0.04272461
+-0.01927185q-0.49850464 -0.22485352 -0.64704895 -0.61753845q-0.039093018
+-0.10334778 -5.493164E-4 -0.188797l0.00642395 -0.014251709q0.50112915
+-1.1109619 0.04534912 -1.3165436l-3.1761932 -1.4327087q-2.0652466 -0.9315796
+-2.8153992 -0.97854614q-0.75798035 -0.06765747 -1.1956329
+0.18060303q-0.43766785 0.24824524 -0.7203522 0.87493896q-0.5332489 1.1821747
+0.05027771 3.5365906l6.1672363 2.7818909l0.8095093 -1.794632q0.12207031
+-0.27061462 0.74876404 0.012069702q0.5269928 0.23771667 0.63505554
+0.49215698q0.100234985 0.23376465 0.048843384 0.34770203l-2.4477997
+5.426605q-0.13491821 0.29910278 -0.7473755 0.022842407q-0.55548096 -0.25056458
+-0.65571594 -0.48432922q-0.093826294 -0.24801636 -0.042434692
+-0.36195374l0.05140686 -0.11395264q0.55252075 -1.2248993 0.3874817
+-1.5050354q-0.059753418 -0.09552002 -0.1879425 -0.15333557l-7.0787964
+-3.1930695l-0.848053 1.8800812q-0.14915466 0.29267883 -0.7417908
+0.16249084q-0.14886475 -0.05001831 -0.27703857 -0.10783386q-0.4985199
+-0.22486877 -0.49038696 -0.546875q0.014541626 -0.33625793 0.12825012
+-0.8163452q0.113708496 -0.48008728 0.2894287 -0.98361206q0.3164978 -1.0056915
+0.5349426 -1.4899445l0.01927185 -0.04273987q0.22625732 -0.46359253 0.3709259
+-0.5183258q0.14468384 -0.054718018 0.33486938 -0.020355225q0.2108612
+0.026550293 0.7948303 0.28996277l0.19940186 0.08995056q-0.17132568 -2.0142212
+0.09857178 -3.4866028q0.07879639 -0.47868347 0.20729065 -0.76353455q0.8930359
+-1.9797821 2.7947083 -2.1676025q-0.2232666 -2.0890656 0.07040405
+-3.8421173q0.08381653 -0.5278473 0.38578796 -1.1972656q0.30195618 -0.66941833
+0.88235474 -1.196106q0.58680725 -0.54093933 1.2872772 -0.6877899q1.2484283
+-0.2596283 3.2994232 0.66552734l5.3126526 2.396408l0.7452545
+-1.6522064zm-3.3632507 -3.1840363q-1.3246002 -0.59750366 -2.1509247
+-1.5016022q-0.8042755 -0.8770294 -1.1602936 -1.9118042q-0.70558167 -2.0838013
+0.25672913 -4.2551727l0.01927185 -0.04272461q0.8930359 -1.9797821 2.6346893
+-2.4968872q1.727417 -0.5235138 4.063278 0.5301361q0.49850464 0.22485352
+0.6568146 0.8619232q0.15690613 0.60214233 -0.151474 1.2858124l-2.5377502
+5.626007q2.5760498 0.93914795 3.9884949 -0.292099q0.43208313 -0.38789368
+0.6826477 -0.94337463q0.24414062 -0.5412445 0.322937 -1.019928q0.06454468
+-0.48510742 0.052856445 -0.9532013q-0.005264282 -0.48231506 -0.07392883
+-0.97610474q-0.1373291 -0.9875641 -0.0730896 -1.1299896q0.07206726 -0.12176514
+0.25668335 -0.22703552q0.18600464 -0.070373535 0.41474915 -0.12145996q0.520874
+-0.09072876 0.8121643 0.023529053l0.01423645 0.00642395q0.25637817 0.11564636
+0.2904358 0.26812744q0.6374054 2.6529846 -0.48049927 5.1312714q-1.008667
+2.2361603 -3.1407776 2.782837q-2.0622864 0.5438843 -4.6972504
+-0.64468384zm2.2887878 -7.6580963l-0.04272461 -0.01927185q-2.1079712 -0.9508667
+-3.1589966 0.049179077q-0.3159027 0.3203125 -0.5279083 0.790329q-0.21202087
+0.47003174 -0.2723999 1.0598755q-0.06036377 0.58984375 0.08872986
+1.1713257q0.35290527 1.3076477 1.6453857 1.9763489l2.2679138
+-5.0277863zm-0.24273682 -9.189972l0.42729187 0.19273376q0.12817383 0.05783081
+0.24993896 0.12989807q0.12818909 0.05781555 0.22789001 0.10279846q-0.57406616
+-1.7673645 -0.43598938 -3.213501q0.058135986 -0.47087097 0.2366333
+-0.9045868l0.0128479 -0.02848816q0.18630981 -0.41304016 0.49075317
+-0.6699524q0.32510376 -0.26474 0.66360474 -0.36917114q0.5879059 -0.16333008
+1.0436859 0.042251587q0.47003174 0.21202087 0.6892395 0.44802856q0.21922302
+0.23602295 0.30801392 0.53318787q0.2074585 0.64208984 0.008300781
+1.0836182q-0.19917297 0.44154358 -0.42233276 0.63227844q-0.23098755 0.1700592
+-0.4996643 0.271698q-0.58651733 0.19824219 -1.085022 -0.026611328q0.008605957
+0.7409363 0.2311554 1.2355652q0.24320984 0.4868164 0.46018982
+0.84181213l0.51438904 0.79766846l5.5975037 2.5249023l1.1692963
+-2.5922241q0.10922241 -0.24214172 0.7644043 0.053390503q0.5269928 0.23771667
+0.6272278 0.47148132q0.085998535 0.22735596 0.028182983 0.35554504l-2.8011627
+6.209961q-0.13491821 0.29910278 -0.80433655 -0.0028533936q-0.4920807
+-0.23910522 -0.640625 -0.6318054q-0.032669067 -0.11756897 0.095825195
+-0.4024353q0.13491821 -0.29910278 0.24134827 -0.61105347q0.10643005 -0.31195068
+0.14079285 -0.5021515q0.035217285 -0.34407043 -0.22116089 -0.4597168l-7.0360565
+-3.1737976l-0.848053 1.8800812q-0.18632507 0.41305542 -1.0266571
+0.033996582q-0.5269928 -0.23771667 -0.5188751 -0.5597229q-0.0061035156
+-0.32844543 0.12184143 -0.8020935q0.12013245 -0.494339 0.28941345
+-0.9836273q0.31929016 -0.93585205 0.53634644 -1.4550323l0.0385437
+-0.08546448q0.19917297 -0.44152832 0.294693 -0.50128174q0.09411621 -0.094680786
+0.21308899 -0.092437744q0.13322449 0.008666992 0.4893036 0.16929626l0.12818909
+0.05781555zm4.841614 -8.833481q0.32843018 -0.0061187744 0.6845093
+0.15449524q0.3560791 0.16062927 0.5753021 0.39663696q0.19854736 0.24383545
+0.3144226 0.51893616q0.2124939 0.5929413 -0.05593872 1.226059q-0.44973755
+0.9970093 -1.4791718 0.96118164q-0.32202148 -0.008117676 -0.7208252
+-0.1880188q-0.38456726 -0.17346191 -0.6235962 -0.555542q-0.23904419 -0.38208008
+-0.32055664 -0.8473816q-0.0750885 -0.47953796 -0.005508423
+-1.0138092q0.055343628 -0.54067993 0.18190002 -1.0492554q0.22880554 -0.9252472
+0.67852783 -1.9222717q0.44973755 -0.9970093 1.1049805 -1.5756378q0.6474304
+-0.59928894 1.3814087 -0.7824402q1.2819366 -0.2959442 3.0195923
+0.48786926l5.526291 2.4927673l0.8095093 -1.7946167q0.13491821 -0.29910278
+0.76161194 -0.016418457q0.28486633 0.12849426 0.50408936 0.36450195q0.22564697
+0.22177124 0.17982483 0.47535706q-0.09892273 0.6753082 -0.6000519
+1.7862701q-0.5075531 1.1251984 -0.60809326 1.2341156q-0.08631897 0.11532593
+-0.23098755 0.1700592q-0.28152466 0.13012695 -0.9269562 0.07896423q0.36964417
+1.7265778 -0.45913696 3.563919q-0.7452698 1.6521912 -2.0523682
+2.1939087q-1.2101746 0.51686096 -2.5205383 -0.07421875l-0.02848816
+-0.0128479q-1.6379547 -0.73883057 -2.0034027 -2.360672q-0.33642578 -1.4201965
+0.37670898 -3.0011597l1.1885681 -2.634964q-1.794632 -0.8095093 -2.4422913
+-0.7416992q-0.6412506 0.05357361 -1.0269623 0.37667847q-0.38571167 0.32310486
+-0.69410706 1.0067596l-0.488266 1.0824738zm8.212418 3.0359192q0.60391235
+-1.3388519 0.11390686 -3.1025696l-2.079483 -0.93800354l-1.047226
+2.3216095q-0.38546753 0.85458374 -0.28692627 1.3961334q0.17866516 0.9719238
+0.8623352 1.280304q1.2533875 0.56536865 2.0563965 -0.34083557q0.22680664
+-0.2748108 0.3809967 -0.6166382z" fill-rule="nonzero"></path><path
+fill="#f3f3f3" d="m276.30206 161.02286l24.433777 11.021454l0 0c13.494385
+6.0869904 9.273712 44.63011 -9.427124 86.0885c-18.700836 41.458374 -44.800217
+70.13254 -58.294617 64.04556l-24.433746 -11.021454z"
+fill-rule="nonzero"></path><path stroke="#000000" stroke-width="2.0"
+stroke-linejoin="round" stroke-linecap="butt" d="m276.30206 161.02286l24.433777
+11.021454l0 0c13.494385 6.0869904 9.273712 44.63011 -9.427124
+86.0885c-18.700836 41.458374 -44.800217 70.13254 -58.294617 64.04556l-24.433746
+-11.021454z" fill-rule="nonzero"></path><path fill="#000000" fill-opacity="0.0"
+d="m213.59521 297.66257l55.910248 -123.94888l49.75757 22.444397l-55.910248
+123.94888z" fill-rule="nonzero"></path><path fill="#000000" d="m247.30704
+257.94562q0.12207031 -0.27062988 0.8769531 0.069885254q0.7548828 0.34051514
+0.5878296 0.7108154l-0.8673248 1.922821l12.918442 5.827179l2.2614746
+-5.01355l-1.2147522 -1.5249939q-0.16226196 -0.21029663 -0.02734375
+-0.5093994q0.12850952 -0.2848816 0.24188232 -0.42227173q0.10559082 -0.15808105
+0.22039795 -0.26055908q0.26309204 -0.24127197 0.4482727 -0.15774536q0.0569458
+0.0256958 0.06478882 0.0463562l2.484314 2.3204956l-4.6514587
+10.311951q-0.1156311 0.25637817 -0.7423401 -0.026306152l-0.04272461
+-0.019256592q-0.47003174 -0.21203613 -0.6342163 -0.6460571q-0.046905518
+-0.12399292 0.0687561 -0.3803711l0.0128479 -0.028503418q0.0335083 -0.0362854
+0.0592041 -0.09326172q0.10922241 -0.24212646 0.19638062 -0.51135254q0.112854004
+-0.3262024 0.1472168 -0.51638794q0.035217285 -0.3440857 -0.22116089
+-0.4597168l-12.434158 -5.6087646q-0.1709137 -0.0770874 -0.23516846
+0.065338135l-0.7838135 1.7376709q-0.13491821 0.29910278 -0.77027893
+0.14962769q-0.14884949 -0.05001831 -0.27703857 -0.1078186q-0.03491211
+0.001373291 -0.06340027 -0.011474609q-0.09970093 -0.044952393 -0.1787262
+-0.0977478q-0.09327698 -0.05923462 -0.16589355 -0.12625122q-0.1750946
+-0.18182373 -0.1315155 -0.31643677l2.8525543 -6.3239136zm13.346802
+-1.3545227q-1.3246155 -0.59750366 -2.15094 -1.5016174q-0.80426025 -0.87701416
+-1.1602783 -1.9118042q-0.7055969 -2.0838013 0.25671387 -4.2551575l0.01928711
+-0.04273987q0.8930054 -1.9797821 2.634674 -2.496872q1.727417 -0.52352905
+4.0632935 0.53012085q0.49850464 0.22486877 0.6567993 0.8619232q0.15692139
+0.6021576 -0.15145874 1.2858124l-2.5377502 5.626007q2.5760498 0.9391632
+3.9884949 -0.292099q0.43206787 -0.38789368 0.6826477 -0.94337463q0.24414062
+-0.54122925 0.322937 -1.019928q0.06454468 -0.48510742 0.052856445
+-0.95318604q-0.005279541 -0.48233032 -0.07394409 -0.97610474q-0.1373291
+-0.9875641 -0.0730896 -1.1300049q0.07208252 -0.12176514 0.25668335
+-0.22703552q0.18600464 -0.07035828 0.4147644 -0.12145996q0.520874 -0.09072876
+0.8121338 0.023529053l0.014251709 0.00642395q0.25637817 0.11564636 0.2904358
+0.2681427q0.63739014 2.6529694 -0.48049927 5.131256q-1.008667 2.2361603
+-3.1407776 2.7828217q-2.0622864 0.5439148 -4.697235 -0.6446533zm2.2887878
+-7.6581116l-0.04272461 -0.01927185q-2.1079712 -0.95085144 -3.1589966
+0.049179077q-0.31591797 0.32032776 -0.5279236 0.79034424q-0.21200562 0.47001648
+-0.2723999 1.0598602q-0.06036377 0.58984375 0.08874512 1.1713257q0.35290527
+1.3076477 1.6453857 1.9763489l2.2679138 -5.0277863zm11.559509
+-8.374359q-0.12850952 0.28485107 -0.7836914 -0.010681152q-0.51275635
+-0.23129272 -0.6129761 -0.46505737q-0.09384155 -0.24801636 -0.04244995
+-0.36195374q0.50112915 -1.1109619 0.04534912 -1.3165436l-3.176178
+-1.4327087q-2.0652466 -0.9315796 -2.8153992 -0.9785614q-0.7579651 -0.06764221
+-1.1956482 0.18060303q-0.4376526 0.2482605 -0.7203369 0.8749542q-0.53323364
+1.1821747 0.05026245 3.5365906l6.1672363 2.7818756l0.8095093
+-1.7946167q0.12207031 -0.27061462 0.7772522 0.024917603q0.51275635 0.23129272
+0.6130066 0.46505737q0.100250244 0.23376465 0.042419434 0.36195374l-2.4542236
+5.4408417q-0.12850952 0.28486633 -0.7836914 -0.010665894q-0.51272583
+-0.23129272 -0.6129761 -0.46505737q-0.09384155 -0.24801636 -0.042419434
+-0.361969l0.0513916 -0.11393738q0.55252075 -1.2248993 0.3874817
+-1.5050354q-0.059753418 -0.09552002 -0.18795776 -0.15335083l-7.078766
+-3.1930542l-0.8480835 1.8800812q-0.1619873 0.321167 -0.71328735
+0.17532349q-0.16311646 -0.056427002 -0.31976318 -0.12709045q-0.48428345
+-0.21844482 -0.47616577 -0.54045105q0.014556885 -0.33625793 0.12826538
+-0.8163452q0.113708496 -0.48008728 0.2894287 -0.9836273q0.3164978 -1.0056763
+0.5349426 -1.4899445l0.019256592 -0.04272461q0.22625732 -0.46359253 0.37094116
+-0.5183258q0.14468384 -0.054718018 0.33486938 -0.020355225q0.21084595
+0.026550293 0.7948303 0.28996277l0.19940186 0.0899353q-0.17132568 -2.014206
+0.09857178 -3.4865875q0.07879639 -0.47868347 0.38076782 -1.1481018q0.28909302
+-0.64094543 0.86950684 -1.167633q0.5868225 -0.5409241 1.287262
+-0.68777466q1.2484436 -0.2596283 3.2994385 0.6655121l5.3126526
+2.396408l0.75167847 -1.6664276q0.12850952 -0.28486633 0.7836609
+0.010665894q0.51275635 0.23129272 0.6065979 0.47930908q0.10021973 0.23376465
+0.048828125 0.3477173l-2.190796 4.8568726zm2.721405 -9.871262l0.55252075
+-1.2248993q0.6681824 -1.4812775 0.002166748 -2.3987885q-0.21420288 -0.28515625
+-0.54177856 -0.43293762q-0.3276062 -0.14776611 -0.6404114
+-0.10031128q-0.31280518 0.047454834 -0.65997314 0.28511047q-0.5446167
+0.37139893 -1.4275208 1.3787079q-0.88290405 1.007309 -1.4155273
+1.5041199q-0.5404663 0.47613525 -1.1163635 0.7648773q-1.1803284 0.564621
+-2.519165 -0.03929138q-1.9797668 -0.8930359 -1.667633 -3.4090881q0.11627197
+-0.94174194 0.4824829 -1.7536011q0.36621094 -0.8118439 0.7885437
+-1.4441223q0.41448975 -0.65293884 0.92144775 -1.0927734q1.1557617 -1.0042114
+2.252472 -0.5095062l0.05697632 0.0256958q0.59820557 0.26983643 0.82492065
+0.86920166q0.2189331 0.5786896 -0.05593872 1.226059q-0.45617676 1.0112457
+-1.4855957 0.9754181q-0.32202148 -0.008117676 -0.49295044
+-0.08522034q-0.17089844 -0.0770874 -0.7540283 -0.49438477l-0.47543335
+1.0539703q-0.39187622 0.86883545 -0.27911377 1.4167938q0.18649292 0.9925995
+0.8132019 1.2752838q0.8260803 0.3726349 2.0134277 -0.89160156l0.651062
+-0.6833496q1.6088257 -1.7426147 2.3565063 -2.108139q0.73983765 -0.38619995
+1.4252014 -0.3855896q0.691803 -0.013626099 1.4181824 0.31402588q0.7406616
+0.3340912 1.1391602 0.85665894q0.41915894 0.51475525 0.5724182
+1.2009735q0.3392334 1.4900208 -0.4510193 3.2419128q-0.70669556 1.5667267
+-1.7356567 2.5938568q-0.75302124 0.75737 -1.5674438 0.8528137q-0.417511
+0.051635742 -0.7878418 -0.11540222q-0.3560791 -0.16061401 -0.57528687
+-0.39663696q-0.22705078 -0.25668335 -0.32867432 -0.5253601q-0.2267456
+-0.59936523 0.06237793 -1.2402954q0.27624512 -0.6124573 0.89627075
+-0.8470001q0.59155273 -0.247406 1.1043091 -0.016113281l0.014251709
+0.00642395q0.52056885 0.25195312 0.62945557 0.35250854z"
+fill-rule="nonzero"></path><path fill="#000000" fill-opacity="0.0"
+d="m125.19179 263.8607l33.637787 -74.55118" fill-rule="nonzero"></path><path
+stroke="#434343" stroke-width="1.0" stroke-linejoin="round"
+stroke-linecap="butt" d="m125.19179 263.8607l32.228317 -71.42735"
+fill-rule="evenodd"></path><path fill="#434343" stroke="#434343"
+stroke-width="1.0" stroke-linecap="butt" d="m157.4201 192.43333l0.5625458
+1.4875793l0.24568176 -3.2788696l-2.2958221 2.353836z"
+fill-rule="evenodd"></path><path fill="#000000" fill-opacity="0.0"
+d="m125.190956 262.63293l74.56457 33.634216" fill-rule="nonzero"></path><path
+stroke="#434343" stroke-width="1.0" stroke-linejoin="round"
+stroke-linecap="butt" d="m125.190956 262.63293l71.440605 32.225098"
+fill-rule="evenodd"></path><path fill="#434343" stroke="#434343"
+stroke-width="1.0" stroke-linecap="butt" d="m196.63156 294.858l-1.4875183
+0.5627136l3.278885 0.24533081l-2.3540802 -2.2955627z"
+fill-rule="evenodd"></path><path fill="#000000" fill-opacity="0.0"
+d="m129.13316 262.30618l-61.102364 6.1102295" fill-rule="nonzero"></path><path
+stroke="#434343" stroke-width="1.0" stroke-linejoin="round"
+stroke-linecap="butt" d="m125.36197 262.6833l-53.92109 5.3921204"
+fill-rule="evenodd"></path><path fill="#434343" stroke="#434343"
+stroke-width="1.0" stroke-linecap="butt" d="m128.63564 262.35593c0.090408325
+0.9040222 -0.56915283 1.710144 -1.4731445 1.8005371c-0.9039993 0.09039307
+-1.7101212 -0.56915283 -1.8005219 -1.473175c-0.090400696 -0.9039917 0.56915283
+-1.7101135 1.4731522 -1.8005066c0.9039993 -0.09039307 1.7101212 0.56915283
+1.8005142 1.4731445z" fill-rule="nonzero"></path><path fill="#434343"
+stroke="#434343" stroke-width="1.0" stroke-linecap="butt" d="m71.44088
+268.0754l1.007103 -1.230896l-2.9625397 1.4264526l3.1863403 0.81155396z"
+fill-rule="evenodd"></path></g></svg>
+

diff -r 088695eb316d99feec3433f2aa491877826b57a7 -r 0bb3ca370cea95934aecf73e51549b384328f46c doc/source/visualizing/callbacks.rst
--- a/doc/source/visualizing/callbacks.rst
+++ b/doc/source/visualizing/callbacks.rst
@@ -483,7 +483,7 @@
    import yt
    ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
    s = yt.SlicePlot(ds, 'z', 'density', center='max', width=(10, 'kpc'))
-   s.annotate_text((2, 2), coord_system='plot', 'Galaxy!')
+   s.annotate_text((2, 2), 'Galaxy!', coord_system='plot')
    s.save()
 
 .. _annotate-title:

diff -r 088695eb316d99feec3433f2aa491877826b57a7 -r 0bb3ca370cea95934aecf73e51549b384328f46c doc/source/visualizing/plots.rst
--- a/doc/source/visualizing/plots.rst
+++ b/doc/source/visualizing/plots.rst
@@ -311,7 +311,8 @@
 .. _off-axis-projection-function:
 
 To avoid manually creating a camera and setting the transfer
-function, yt provides the :func:`~yt.visualization.volume_rendering.camera.off_axis_projection`
+function, yt provides the
+:func:`~yt.visualization.volume_rendering.off_axis_projection.off_axis_projection`
 function, which wraps the camera interface to create an off axis
 projection image buffer.  These images can be saved to disk or
 used in custom plots.  This snippet creates an off axis

diff -r 088695eb316d99feec3433f2aa491877826b57a7 -r 0bb3ca370cea95934aecf73e51549b384328f46c doc/source/visualizing/volume_rendering.rst
--- a/doc/source/visualizing/volume_rendering.rst
+++ b/doc/source/visualizing/volume_rendering.rst
@@ -1,26 +1,50 @@
 .. _volume_rendering:
 
-Volume Rendering: Making 3D Photorealistic Isocontoured Images
-==============================================================
+3D Visualization and Volume Rendering
+=====================================
 
-Volume rendering, as implemented in yt, is a mechanism by which rays are cast
-through a domain, converting field values to emission and absorption, and producing a final image.
-This provides the ability to create off-axis projections, isocontour images,
-volume emission, and absorption from intervening material.  The primary goal 
-of the volume rendering in yt is to provide the ability to make
-*scientifically-informed* visualizations of simulations.  
+yt has the ability to create 3D visualizations, using a process known as volume
+rendering.  Currently all of the rendering capabilities are implemented in
+software, requiring no specialized hardware. Optimized versions implemented
+with OpenGL and utilizing graphics processors are being actively developed.
 
-The volume renderer is implemented in a hybrid of Python and Cython, which is
-Python-like code compiled down to C.  It has been optimized, but it is still a
-*software* volume renderer: it does not currently operate on graphics
-processing units (GPUs).  However, while the rendering engine itself may not
-directly translate to GPU code (OpenCL, CUDA or OpenGL), the Python structures:
-partitioning, transfer functions, display, etc., may be useful in the future
-for transitioning the rendering to the GPU.  In addition, this allows users to create
-volume renderings on traditional supercomputing platforms that may not have access to GPUs.
+Constructing a 3D visualization is a process of describing the "scene" that
+will be rendered.  This includes the location of the viewing point (i.e., where
+the "camera" is placed), the method by which a system would be viewed (i.e.,
+the "lens," which may be orthographic, perspective, fisheye, spherical, and so
+on) and the components that will be rendered (render "sources," such as volume
+elements, lines, annotations, and opaque surfaces).  The 3D plotting
+infrastructure then develops a resultant image from this scene, which can be
+saved to a file or viewed inline.
 
-The volume renderer is also threaded using OpenMP.  Many of the commands
-(including `snapshot`) will accept a `num_threads` option.
+By constructing the scene in this programmatic way, full control can be had
+over each component in the scene as well as the method by which the scene is
+rendered; this can be used to prototype visualizations, inject annotation such
+as grid or continent lines, and then to render a production-quality
+visualization.  By changing the "lens" used, a single camera path can output
+images suitable for planetarium domes, immersive and head tracking systems
+(such as the Occulus Rift or recent "spherical" movie viewers such as the
+mobile YouTube app), as well as standard screens.
+
+.. image:: _images/scene_diagram.svg
+   :width: 50%
+   :align: center
+   :alt: Diagram of a 3D Scene
+
+In versions of yt prior to 3.2, the only volume rendering interface accessible
+was through the "camera" object.  This presented a number of problems,
+principle of which was the inability to describe new scene elements or to
+develop complex visualizations that were independent of the specific elements
+being rendered.  The new "scene" based interface present in yt 3.2 and beyond
+enables both more complex visualizations to be constructed as well as a new,
+more intuitive interface for very simple 3D visualizations.
+
+.. warning:: 3D visualizations can be fun but frustrating!  Tuning the
+             parameters to both look nice and convey useful scientific
+             information can be hard.  We've provided information about best
+             practices and tried to make the interface easy to develop nice
+             visualizations, but getting them *just right* is often
+             time-consuming.
 
 Scene Interface
 ===============
@@ -71,169 +95,84 @@
 Tutorial
 --------
 
-Volume renderings are created by combining three objects: a volume
-homogenization; a transfer function, and a camera object.
+The scene interface provides a more modular interface for creating renderings
+of arbitrary data sources. As such, manual composition of a scene can require a
+bit more work, but we will also provide several helper functions that attempt
+to create satisfactory default volume renderings.
 
-#. Find the appropriate bounds for your data.
-#. Create a ColorTransferFunction object.
-#. Create a Camera object, which homogenizes the volume and orients the viewing
-   direction
-#. Take a snapshot and save the image.
+.. note:: It's usually best to start out simple with the built-in helper
+          interface, and expand on that if you need to.
 
-Here is a working example for the IsolatedGalaxy dataset.
+Here is a working example for rendering the IsolatedGalaxy dataset.
 
 .. python-script::
+  import yt
+  # load the data
+  ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
+  # volume render the 'density' field, and save the resulting image
+  im, sc = yt.volume_render(ds, 'density', fname='test_rendering.png')
 
-   import yt
-   import numpy as np
+  # im is the image that was generated.
+  # sc is an instance of a Scene object, which allows you to further refine
+  # your renderings.
 
-   ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
-   # Choose a field
-   field = 'density'
-   # Do you want the log of the field?
-   use_log = True
+When the volume_render function is called, first an empty
+:class:`~yt.visualization.volume_rendering.scene.Scene` object is
+created. Next, a 
+:class:`~yt.visualization.volume_rendering.api.VolumeSource`
+object is created, which decomposes the volume elements
+into a tree structure to provide back-to-front rendering of fixed-resolution
+blocks of data.  (If the volume elements are grids, this uses a
+:class:`~yt.utilities.amr_kdtree.amr_kdtree.AMRKDTree` object.) When the
+:class:`~yt.visualization.volume_rendering.api.VolumeSource`
+object is created, by default it will create a transfer function
+based on the extrema of the field that you are rendering. The transfer function
+describes how rays that pass through the domain are "transfered" and thus how
+brightness and color correlates to the field values.  Modifying and adjusting
+the transfer function is the primary way to modify the appearance of an image
+based on volumes.
 
-   # Find the bounds in log space of for your field
-   dd = ds.all_data()
-   mi, ma = dd.quantities.extrema(field)
+Once the basic set of objects to be rendered is constructed, a
+:class:`~yt.visualization.volume_rendering.camera.Camera` object is created and
+added to the scene.  By default the creation of a camera also creates a
+default, plane-parallel :class:`~yt.visualization.volume_rendering.lens.Lens`
+object. The analog to a real camera is intentional -- a camera can take a
+picture of a scene from a particular point in time and space, but different
+lenses can be swapped in and out.  For example, this might include a fisheye
+lens, a spherical lens, or some other method of describing the direction and
+origin of rays for rendering. Once the camera is added to the scene object, we
+call the main method of the
+:class:`~yt.visualization.volume_rendering.scene.Scene` class,
+:meth:`~yt.visualization.volume_rendering.scene.Scene.render`.  When called,
+the scene will loop through all of the
+:class:`~yt.visualization.volume_rendering.render_source.RenderSource` objects
+that have been added and integrate the radiative transfer equation through the
+volume. Finally, the image and scene object is returned to the user.
 
-   if use_log:
-       mi,ma = np.log10(mi), np.log10(ma)
+In this example, we don't add on any non-volume rendering sources; however, if
+such sources are added, they will be integrated as well.
 
-   # Instantiate the ColorTransferfunction.
-   tf = yt.ColorTransferFunction((mi, ma))
+Modifying the Scene
+-------------------
 
-   # Set up the camera parameters: center, looking direction, width, resolution
-   c = (ds.domain_right_edge + ds.domain_left_edge)/2.0
-   L = np.array([1.0, 1.0, 1.0])
-   W = ds.quan(0.3, 'unitary')
-   N = 256 
-
-   # Create a camera object
-   cam = ds.camera(c, L, W, N, tf, fields = [field], log_fields = [use_log])
-
-   # Now let's add some isocontours, and take a snapshot, saving the image
-   # to a file.
-   tf.add_layers(10, 0.01, colormap = 'RdBu_r')
-   im = cam.snapshot('test_rendering.png')
-
-   # To add the domain box to the image:
-   nim = cam.draw_domain(im)
-   nim.write_png('test_rendering_with_domain.png')
-
-   # To add the grid outlines to the image:
-   nim = cam.draw_grids(im)
-   nim.write_png('test_rendering_with_grids.png')
-
-Method
-------
-
-Direct ray casting through a volume enables the generation of new types of
-visualizations and images describing a simulation.  yt has the facility
-to generate volume renderings by a direct ray casting method.  However, the
-ability to create volume renderings informed by analysis by other mechanisms --
-for instance, halo location, angular momentum, spectral energy distributions --
-is useful.
-
-The volume rendering in yt follows a relatively straightforward approach.
-
-#. Create a set of transfer functions governing the emission and absorption as
-   a function of one or more variables. (:math:`f(v) \rightarrow (r,g,b,a)`)
-   These can be functions of any field variable, weighted by independent
-   fields, and even weighted by other evaluated transfer functions.  (See
-   `transfer_functions`.)
-#. Partition all chunks into non-overlapping, fully domain-tiling "bricks."
-   Each of these "bricks" contains the finest available data at any location.
-#. Generate vertex-centered data for all grids in the volume rendered domain.
-#. Order the bricks from back-to-front.
-#. Construct plane of rays parallel to the image plane, with initial values set
-   to zero and located at the back of the region to be rendered.
-#. For every brick, identify which rays intersect.  These are then each 'cast'
-   through the brick.
-
-   #. Every cell a ray intersects is sampled 5 times (adjustable by parameter),
-      and data values at each sampling point are trilinearly interpolated from
-      the vertex-centered data.
-   #. Each transfer function is evaluated at each sample point.  This gives us,
-      for each channel, both emission (:math:`j`) and absorption
-      (:math:`\alpha`) values.
-   #. The value for the pixel corresponding to the current ray is updated with
-      new values calculated by rectangular integration over the path length:
-
-      :math:`v^{n+1}_{i} =  j_{i}\Delta s + (1 - \alpha_{i}\Delta s )v^{n}_{i}`
-
-      where :math:`n` and :math:`n+1` represent the pixel before and after
-      passing through a sample, :math:`i` is the color (red, green, blue) and 
-      :math:`\Delta s` is the path length between samples.
-#. The image is returned to the user:
-
-.. image:: _images/vr_sample.jpg
-   :width: 512
-
-.. _the-camera-interface:
-
-The Camera Interface
---------------------
-
-A camera object has also been created, to allow for more programmatic
-descriptions of the viewpoint and image plane, and to allow for moving the
-camera object through the volume and creating multiple images.  There are
-several camera objects available, but the most commonly used is the standard,
-orthographic projection camera.
-
-The primary interface here is through the creation of an instance of
-:class:`~yt.visualization.volume_rendering.camera.Camera`, which represents a
-viewpoint into a volume.  The camera optionally accepts a volume, which can be
-either an instance of
-:class:`~yt.utilities.amr_kdtree.amr_kdtree.AMRKDTree` that
-has already been initialized.  If one is not supplied, the camera will generate
-one itself.  This can also be specified if you wish to save bricks between
-repeated calls, thus saving considerable amounts of time.
-
-The camera interface allows the user to move the camera about the domain, as
-well as providing interfaces for zooming in and out.  Furthermore, yt now
-includes a stereoscopic camera
-(:class:`~yt.visualization.volume_rendering.camera.StereoPairCamera`).
-
-Much like most data objects, the
-:class:`~yt.visualization.volume_rendering.camera.Camera` object hangs off of
-the index file, and can be instantiated in that manner.
-
-.. warning::  The keyword *no_ghost* has been set to True by default
-              for speed considerations.  However, because this turns off ghost
-              zones, there may be artifacts at grid boundaries.  If a higher quality
-              rendering is required, use *no_ghost = False*.
-
-Here's a fully functional script that demonstrates how to use the camera
-interface.
-
-For an example, see the cookbook :ref:`cookbook-simple_volume_rendering`.
-
-The :class:`~yt.visualization.volume_rendering.camera.StereoPairCamera` object
-has a single primary method,
-:meth:`~yt.visualization.volume_rendering.camera.StereoPairCamera.split`, that
-will return two cameras, a left and a right.
-
-.. _camera_movement:
-
-Camera Movement
----------------
-
-There are multiple ways to manipulate the camera viewpoint to create a series of
-renderings.  For an example, see this cookbook:
-:ref:`cookbook-camera_movement`.  For a current list of
-motion helper functions, see the docstrings associated with
-:class:`~yt.visualization.volume_rendering.camera.Camera`.
+Once a basic scene has been created with default render sources and
+camera operations, deeper modifications are possible. These
+modifications can tune the appearance of the render sources (such as which
+colors correspond to which values in the data) as well as the shape of the
+rendered image, the position of the camera in the scene, and other elements
+present in the scene.  Below, we describe a few of the aspects of tuning a
+scene to create a visualization that is communicative and pleasing.
 
 .. _transfer_functions:
 
 Transfer Functions
-------------------
+++++++++++++++++++
 
-Transfer functions are the most essential component.  Several different
-fundamental types have been provided, but there are many different ways the
-construct complicated expressions to produce visualizations and images using
-the underlying machinery.
+Transfer functions are the most essential component of a rendering that
+includes volume sources.  Several different fundamental types have been
+provided, but there are many different ways to construct complicated
+expressions that produce visualizations and images using the underlying
+machinery.
 
 .. note::
    All of the information about how transfer functions are used and values
@@ -264,7 +203,7 @@
 :meth:`~yt.visualization.volume_rendering.transfer_functions.ColorTransferFunction.add_gaussian`,
 which will allow you to specify the colors directly.
 
-An alternate method for modifying the colormap is done using
+An alternate method for modifying the colormap is
 :meth:`~yt.visualization.volume_rendering.transfer_functions.ColorTransferFunction.map_to_colormap`,
 where you can map a segment of the transfer function space to an entire
 colormap at a single alpha value.  This is sometimes useful for very opaque
@@ -303,10 +242,199 @@
 
 .. notebook:: TransferFunctionHelper_Tutorial.ipynb
 
-.. _healpix_volume_rendering:
+Adding New Sources
+++++++++++++++++++
+
+The resulting image of a rendering process is a combination of the different
+sources present in a scene.  While at present there are only a few sources
+available, in principle new sources can be defined and added to yt over time.
+
+By default, the scene will construct a volume object that includes the fluid
+components of a data source. 
+
+Volume Objects
+++++++++++++++
+
+When a volume object is added to a scene, rays that cross it will be
+integrated.  The volume object is affiliated with a transfer function, a set of
+voxels (drawn from a data source) and is integrated in a front-to-back manner.
+Depending on whether or not other opaque objects are in the scene, the volume
+may or may not be traversed in its entirety.
+
+.. note:: Behavior is undefined for volume sources that overlap that are added
+          to a scene.
+
+Hard and Opaque Objects
++++++++++++++++++++++++
+
+In addition to semi-transparent objects, hard surfaces can be added to a scene.
+Currently these surfaces are limited to lines and annotations, but in future
+versions of yt surfaces and texture mapped objects will be included.
+
+The primary objects now available for hard and opaque objects are 
+:class:`~yt.visualization.volume_rendering.api.PointSource` and
+:class:`~yt.visualization.volume_rendering.api.LineSource`.  These are useful
+if you want to annotate points, for instance by splatting a set of particles
+onto an image, or if you want to draw lines connecting different regions or
+vertices.  For instance, lines can be used to draw outlines of regions or
+continents.
+
+Annotations
++++++++++++
+
+By annotating a visualization, additional information can be drawn out.  yt
+provides three annotations:
+:class:`~yt.visualization.volume_rendering.api.BoxSource`,
+:class:`~yt.visualization.volume_rendering.api.GridSource`, and
+:class:`~yt.visualization.volume_rendering.api.CoordinateVectorSource`.  These
+annotations will operate in data space and can draw boxes, grid information,
+and also provide a vector orientation within the image.
+
+Care and Usage of the Camera
+----------------------------
+
+When constructing a movie or utilizing volume rendering to visualize particular
+objects or phenomena, control over the exact position of the camera is
+necessary for both aesthetic and scientific reasons.
+
+yt provides methods for moving the camera by altering its position and
+orientation in space.  There are helper methods that can provide easier ways if
+you are guiding visualization based on quantities in the data.
+
+Cameras also posses "lens" objects, which control the manner in which rays are
+shot out of the camera.  Some of these make some camera properties
+(specifically the width property) irrelevant.
+
+.. _camera_movement:
+
+Moving and Orienting the Camera
++++++++++++++++++++++++++++++++
+
+There are multiple ways to manipulate the camera viewpoint to create a series of
+renderings.  For an example, see this cookbook:
+:ref:`cookbook-camera_movement`.  For a current list of
+motion helper functions, see the docstrings associated with
+:class:`~yt.visualization.volume_rendering.camera.Camera`.  In short, the
+camera possesses a number of properties and methods that make changing its
+position easy.  These properties can be set, and will automatically trigger an
+update of the other properties of the camera:
+
+ * `position` - the position of the camera in scene-space
+ * `width` - the width of the plane the camera can see
+ * `focus` - the point in space the camera is looking at
+ * `resolution` - the image resolution
+
+In addition to this, methods such as
+:meth:`~yt.visualization.volume_rendering.camera.Camera.rotate`,
+:meth:`~yt.visualization.volume_rendering.camera.Camera.pitch`,
+:meth:`~yt.visualization.volume_rendering.camera.Camera.yaw`, and
+:meth:`~yt.visualization.volume_rendering.camera.Camera.roll` can rotate the
+camera in space.
+
+When examining a particular point in space, 
+:meth:`~yt.visualization.volume_rendering.camera.Camera.zoom` can be of
+assistance, as it will move the camera toward the focal point by a factor
+related to the current distance between them.
+
+In addition to manual control, the camera also has iteration methods that help
+with moving and rotating.  The 
+:meth:`~yt.visualization.volume_rendering.camera.Camera.rotation`,
+:meth:`~yt.visualization.volume_rendering.camera.Camera.zoomin`, and
+:meth:`~yt.visualization.volume_rendering.camera.Camera.move_to` methods
+provide iteration over a sequence of positions and orientations.  These can be
+used within a loop:
+
+.. python-script::
+
+   for i in sc.camera.zoomin(100, 5):
+       sc.render("frame_%03i.png" % i)
+
+The variable ``i`` is the frame number in the particular loop being called.  In
+this case, this will zoom in by a factor of 100 over the course of 5 frames.
+
+Changing Lenses
++++++++++++++++
+
+Setting a lens on a camera changes the resulting image.  These lenses can be
+changed at run time or at the time when a camera is initialized by specifying
+the `lens_type` argument with a string.
+
+At the present time, there are a few cameras that can be used:
+`plane-parallel`, `perspective`, `fisheye`, and `spherical`.
+
+ * Plane parallel: This lens type is the standard type used for orthographic
+   projections.  All rays emerge parallel to each other, arranged along a
+   plane.
+ * Perspective: This lens type adjusts for an opening view angle, so that the
+   scene will have an element of perspective to it.
+ * Fisheye: This lens type accepts a field-of-view property, `fov`, that
+   describes how wide an angle the fisheye can see.  Fisheye images are
+   typically used for dome-based presentations; the Hayden planetarium for
+   instance has a field of view of 194.6.  The images returned by this camera
+   will be flat pixel images that can and should be reshaped to the resolution.
+ * Spherical: This is a cylindrical-spherical projection.  Movies rendered in
+   this way can be displayed in head-tracking devices or in YouTube 360 view
+   (for more information see `the YouTube help
+   <https://support.google.com/youtube/answer/6178631?hl=en>`, but it's a
+   simple matter of running a script on an encoded movie file.)
+
+Volume Rendering Method
+-----------------------
+
+Direct ray casting through a volume enables the generation of new types of
+visualizations and images describing a simulation.  yt has the facility
+to generate volume renderings by a direct ray casting method.  However, the
+ability to create volume renderings informed by analysis by other mechanisms --
+for instance, halo location, angular momentum, spectral energy distributions --
+is useful.
+
+The volume rendering in yt follows a relatively straightforward approach.
+
+#. Create a set of transfer functions governing the emission and absorption as
+   a function of one or more variables. (:math:`f(v) \rightarrow (r,g,b,a)`)
+   These can be functions of any field variable, weighted by independent
+   fields, and even weighted by other evaluated transfer functions.  (See
+   `transfer_functions`.)
+#. Partition all chunks into non-overlapping, fully domain-tiling "bricks."
+   Each of these "bricks" contains the finest available data at any location.
+#. Generate vertex-centered data for all grids in the volume rendered domain.
+#. Order the bricks from front-to-back.
+#. Construct plane of rays parallel to the image plane, with initial values set
+   to zero and located at the back of the region to be rendered.
+#. For every brick, identify which rays intersect.  These are then each 'cast'
+   through the brick.
+
+   #. Every cell a ray intersects is sampled 5 times (adjustable by parameter),
+      and data values at each sampling point are trilinearly interpolated from
+      the vertex-centered data.
+   #. Each transfer function is evaluated at each sample point.  This gives us,
+      for each channel, both emission (:math:`j`) and absorption
+      (:math:`\alpha`) values.
+   #. The value for the pixel corresponding to the current ray is updated with
+      new values calculated by rectangular integration over the path length:
+
+      :math:`v^{n+1}_{i} =  j_{i}\Delta s + (1 - \alpha_{i}\Delta s )v^{n}_{i}`
+
+      where :math:`n` and :math:`n+1` represent the pixel before and after
+      passing through a sample, :math:`i` is the color (red, green, blue) and 
+      :math:`\Delta s` is the path length between samples.
+   #. Determine if any addition integrate will change the sample value; if not,
+      terminate integration.  (This reduces integration time when rendering
+      front-to-back.)
+#. The image is returned to the user:
+
+.. image:: _images/vr_sample.jpg
+   :width: 512
+
+Parallelism
+-----------
+
+yt can utilize both MPI and OpenMP parallelism for volume rendering.  Both, and
+their combination, are described below.
 
 MPI Parallelization
--------------------
++++++++++++++++++++
+
 Currently the volume renderer is parallelized using MPI to decompose the volume
 by attempting to split up the
 :class:`~yt.utilities.amr_kdtree.amr_kdtree.AMRKDTree` in a balanced way.  This
@@ -339,7 +467,7 @@
 For more information about enabling parallelism, see :ref:`parallel-computation`.
 
 OpenMP Parallelization
-----------------------
+++++++++++++++++++++++
 
 The volume rendering also parallelized using the OpenMP interface in Cython.
 While the MPI parallelization is done using domain decomposition, the OpenMP
@@ -355,7 +483,7 @@
 by default by modifying the environment variable OMP_NUM_THREADS. 
 
 Running in Hybrid MPI + OpenMP
-------------------------------
+++++++++++++++++++++++++++++++
 
 The two methods for volume rendering parallelization can be used together to
 leverage large supercomputing resources.  When choosing how to balance the
@@ -394,30 +522,3 @@
 
 For an in-depth example, please see the cookbook example on opaque renders here: 
 :ref:`cookbook-opaque_rendering`.
-
-Lighting
---------
-
-Lighting can be optionally used in volume renders by specifying use_light=True
-in the Camera object creation.  If used, one can then change the default
-lighting color and direction by modifying Camera.light_dir and
-Camera.light_rgb.  Lighting works in this context by evaluating not only the
-field value but also its gradient in order to compute the emissivity.  This is
-not the same as casting shadows, but provides a way of highlighting sides of a
-contour.  
-
-Generating a Homogenized Volume
--------------------------------
-
-In order to perform a volume rendering, the data must first be decomposed into
-a HomogenizedVolume object.  This structure splits the domain up into
-single-resolution tiles which cover the domain at the highest resolution
-possible for a given point in space.  This means that every point in space is
-mapped to exactly one data point, which receives its values from the highest
-resolution grid that covers that volume.
-
-The creation of these homogenized volumes is done during the 
-:class:`~yt.visualization.volume_rendering.camera.Camera`  object
-instantiation by default.  However, in some cases it is useful to first build
-your homogenized volume to then be passed in to the camera. A sample usage is shown
-in :ref:`cookbook-amrkdtree_downsampling`.

diff -r 088695eb316d99feec3433f2aa491877826b57a7 -r 0bb3ca370cea95934aecf73e51549b384328f46c setup.py
--- a/setup.py
+++ b/setup.py
@@ -49,19 +49,18 @@
     REASON_FILES.append((dir_name, files))
 
 # Verify that we have Cython installed
+REQ_CYTHON = '0.22'
 try:
     import Cython
-    if version.LooseVersion(Cython.__version__) < version.LooseVersion('0.16'):
-        needs_cython = True
-    else:
-        needs_cython = False
+    needs_cython = \
+        version.LooseVersion(Cython.__version__) < version.LooseVersion(REQ_CYTHON)
 except ImportError as e:
     needs_cython = True
 
 if needs_cython:
     print("Cython is a build-time requirement for the source tree of yt.")
     print("Please either install yt from a provided, release tarball,")
-    print("or install Cython (version 0.16 or higher).")
+    print("or install Cython (version %s or higher)." % REQ_CYTHON)
     print("You may be able to accomplish this by typing:")
     print("     pip install -U Cython")
     sys.exit(1)

diff -r 088695eb316d99feec3433f2aa491877826b57a7 -r 0bb3ca370cea95934aecf73e51549b384328f46c yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -156,6 +156,7 @@
 from yt.visualization.volume_rendering.api import \
     volume_render, ColorTransferFunction, TransferFunction, \
     off_axis_projection
+import yt.visualization.volume_rendering.api as volume_rendering
 #    TransferFunctionHelper, MultiVariateTransferFunction
 #    off_axis_projection
 

diff -r 088695eb316d99feec3433f2aa491877826b57a7 -r 0bb3ca370cea95934aecf73e51549b384328f46c yt/analysis_modules/absorption_spectrum/absorption_line.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_line.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_line.py
@@ -18,8 +18,16 @@
     charge_proton_cgs, \
     mass_electron_cgs, \
     speed_of_light_cgs
+from yt.utilities.on_demand_imports import _scipy, NotAModule
 
-def voigt(a,u):
+special = _scipy.special
+
+def voigt_scipy(a, u):
+    x = np.asarray(u).astype(np.float64)
+    y = np.asarray(a).astype(np.float64)
+    return special.wofz(x + 1j * y).real
+
+def voigt_old(a, u):
     """
     NAME:
         VOIGT 
@@ -209,3 +217,8 @@
     tauphi = (tau0 * phi).in_units("")               # profile scaled with tau0
 
     return (lambda_bins, tauphi)
+
+if isinstance(special, NotAModule):
+    voigt = voigt_old
+else:
+    voigt = voigt_scipy

diff -r 088695eb316d99feec3433f2aa491877826b57a7 -r 0bb3ca370cea95934aecf73e51549b384328f46c yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
@@ -1011,7 +1011,7 @@
 
     """
     f = h5py.File(file_name, 'w')
-    for ion, params in lineDic.iteritems():
+    for ion, params in lineDic.items():
         f.create_dataset("{0}/N".format(ion),data=params['N'])
         f.create_dataset("{0}/b".format(ion),data=params['b'])
         f.create_dataset("{0}/z".format(ion),data=params['z'])

diff -r 088695eb316d99feec3433f2aa491877826b57a7 -r 0bb3ca370cea95934aecf73e51549b384328f46c yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py
@@ -10,8 +10,11 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import yt
-from yt.testing import *
+import numpy as np
+from yt.testing import \
+    assert_allclose, requires_file, requires_module
+from yt.analysis_modules.absorption_spectrum.absorption_line import \
+    voigt_old, voigt_scipy
 from yt.analysis_modules.absorption_spectrum.api import AbsorptionSpectrum
 from yt.analysis_modules.cosmological_observation.api import LightRay
 import tempfile
@@ -20,6 +23,7 @@
 
 COSMO_PLUS = "enzo_cosmology_plus/AMRCosmology.enzo"
 
+
 @requires_file(COSMO_PLUS)
 def test_absorption_spectrum():
     """
@@ -44,22 +48,24 @@
 
     my_label = 'HI Lya'
     field = 'H_number_density'
-    wavelength = 1215.6700 # Angstromss
+    wavelength = 1215.6700  # Angstromss
     f_value = 4.164E-01
     gamma = 6.265e+08
     mass = 1.00794
 
-    sp.add_line(my_label, field, wavelength, f_value, gamma, mass, label_threshold=1.e10)
+    sp.add_line(my_label, field, wavelength, f_value,
+                gamma, mass, label_threshold=1.e10)
 
     my_label = 'HI Lya'
     field = 'H_number_density'
-    wavelength = 912.323660 # Angstroms
+    wavelength = 912.323660  # Angstroms
     normalization = 1.6e17
     index = 3.0
 
     sp.add_continuum(my_label, field, wavelength, normalization, index)
 
-    wavelength, flux = sp.make_spectrum('lightray.h5', output_file='spectrum.txt',
+    wavelength, flux = sp.make_spectrum('lightray.h5',
+                                        output_file='spectrum.txt',
                                         line_list_file='lines.txt',
                                         use_peculiar_velocity=True)
 
@@ -93,25 +99,34 @@
 
     my_label = 'HI Lya'
     field = 'H_number_density'
-    wavelength = 1215.6700 # Angstromss
+    wavelength = 1215.6700  # Angstromss
     f_value = 4.164E-01
     gamma = 6.265e+08
     mass = 1.00794
 
-    sp.add_line(my_label, field, wavelength, f_value, gamma, mass, label_threshold=1.e10)
+    sp.add_line(my_label, field, wavelength, f_value,
+                gamma, mass, label_threshold=1.e10)
 
     my_label = 'HI Lya'
     field = 'H_number_density'
-    wavelength = 912.323660 # Angstroms
+    wavelength = 912.323660  # Angstroms
     normalization = 1.6e17
     index = 3.0
 
     sp.add_continuum(my_label, field, wavelength, normalization, index)
 
-    wavelength, flux = sp.make_spectrum('lightray.h5', output_file='spectrum.fits',
+    wavelength, flux = sp.make_spectrum('lightray.h5',
+                                        output_file='spectrum.fits',
                                         line_list_file='lines.txt',
                                         use_peculiar_velocity=True)
 
     # clean up
     os.chdir(curdir)
     shutil.rmtree(tmpdir)
+
+
+ at requires_module("scipy")
+def test_voigt_profiles():
+    a = 1.7e-4
+    x = np.linspace(5.0, -3.6, 60)
+    yield assert_allclose, voigt_old(a, x), voigt_scipy(a, x), 1e-8

diff -r 088695eb316d99feec3433f2aa491877826b57a7 -r 0bb3ca370cea95934aecf73e51549b384328f46c yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
+++ b/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
@@ -343,7 +343,7 @@
             del output["object"]
 
         # Combine results from each slice.
-        all_slices = all_storage.keys()
+        all_slices = list(all_storage.keys())
         all_slices.sort()
         for my_slice in all_slices:
             if save_slice_images:

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/3080a1c09e60/
Changeset:   3080a1c09e60
Branch:      yt
User:        atmyers
Date:        2015-07-16 18:46:09+00:00
Summary:     missed a conflict
Affected #:  1 file

diff -r 0bb3ca370cea95934aecf73e51549b384328f46c -r 3080a1c09e6056a06bc8a8f5ed6a2c289a6f5808 yt/visualization/volume_rendering/utils.py
--- a/yt/visualization/volume_rendering/utils.py
+++ b/yt/visualization/volume_rendering/utils.py
@@ -109,4 +109,3 @@
         [re[0], re[1], re[2]],
         [le[0], re[1], re[2]],
         ], dtype='float64')
->>>>>>> other


https://bitbucket.org/yt_analysis/yt/commits/7a99b49416a0/
Changeset:   7a99b49416a0
Branch:      yt
User:        atmyers
Date:        2015-07-16 18:54:46+00:00
Summary:     removing this, product of botched merge
Affected #:  1 file

diff -r 3080a1c09e6056a06bc8a8f5ed6a2c289a6f5808 -r 7a99b49416a04afe26bc79ef8189ba582e6cbb6a doc/source/visualizing/volume_rendering.rst
--- a/doc/source/visualizing/volume_rendering.rst
+++ b/doc/source/visualizing/volume_rendering.rst
@@ -46,52 +46,6 @@
              visualizations, but getting them *just right* is often
              time-consuming.
 
-Scene Interface
-===============
-
-Tutorial
---------
-
-The scene interface is the product of a refactor to the volume rendering
-framework, and is meant to provide a more modular interface for creating
-renderings of arbitrary data sources. As such, manual composition of a 
-scene can require a bit more work, but we will also provide several helper
-functions that attempt to create satisfactory default volume renderings.
-
-Here is a working example for rendering the IsolatedGalaxy dataset.
-
-.. python-script::
-  import yt
-  # load the data
-  ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
-  # volume render the 'density' field, and save the resulting image
-  im, sc = yt.volume_rendering(ds, 'density', fname='test_rendering.png')
-
-  # im is the image that was generated.
-  # sc is an instance of a Scene object, which allows you to further refine
-  # your renderings.
-
-When the volume_rendering function is called, first an empty 'Scene' object is
-created. Next, a 'VolumeSource' object is created, which deomposes the grids
-into an AMRKDTree to provide back-to-front rendering of fixed-resolution blocks
-of data.  When the VolumeSource object is created, by default it will create a
-transfer function based on the extrema of the field that you are rendering. The
-transfer function describes how to 'transfer' data values to color and
-brightness.
-
-Next, a Camera object is created, which by default also creates a default,
-plane-parallel, Lens object. The analog to a real camera is intentional.
-A camera can take a picture of a scene from a particular point in time and
-space.  However, you can swap in different lenses like, for example, a fisheye
-lens. Once the camera is added to the scene object, we call the main method of
-the Scene class, 'render'. When called, the scene will loop through all of the
-RenderSource objects that have been added, and integrate the radiative transfer
-equation through the volume. Finally, the image and scene object is returned to
-the user.
-
-Camera Interface
-================
-
 Tutorial
 --------
 


https://bitbucket.org/yt_analysis/yt/commits/5a12c6ead96a/
Changeset:   5a12c6ead96a
Branch:      yt
User:        atmyers
Date:        2015-07-16 18:56:43+00:00
Summary:     removing some old notes
Affected #:  2 files

diff -r 7a99b49416a04afe26bc79ef8189ba582e6cbb6a -r 5a12c6ead96a5c0e19f30a5e722ee1765f70e4e2 vr_refactor_todo.markdown
--- a/vr_refactor_todo.markdown
+++ /dev/null
@@ -1,30 +0,0 @@
-Todo
-----
-
-Known Issues:
-
-* ~~FRB Off-axis projections are broken I think. Currently should raise not-implemented error.~~
-* Parallelism
-  * Need to write parallel z-buffer reduce.
-  * Need to verify brick ordering
-* Alpha blending level for opaque sources such as grid lines/domains/etc may
-  not currently be ideal. Difficult to get it right when the transparent VRs
-  have wildly different levels. One approach would be to normalize the transfer
-  function such that the integral of the TF multiplied by the depth of the 
-  rendering is equal to 1. With grey opacity on, all of these things get a bit
-  easier, in my opinion
-
-Documentation:
-
-* ~~Scene~~
-* ~~Camera~~
-* Lens
-* Narrative
-  * Have started, but more work to do. Replaced at least the tutorial
-    rendering, which saves a number of lines!
-* Cookbooks
-  * All relevant cookbooks have been updated
-* Parallelism
-* OpaqueSource
-* RenderSource
-* Narrative Developer Documentation

diff -r 7a99b49416a04afe26bc79ef8189ba582e6cbb6a -r 5a12c6ead96a5c0e19f30a5e722ee1765f70e4e2 yt/visualization/volume_rendering/notes.md
--- a/yt/visualization/volume_rendering/notes.md
+++ /dev/null
@@ -1,39 +0,0 @@
-
-Overview of Volume Rendering
-============================
-
-In 3.0, we have moved away from the "god class" that was Camera, and have
-attempted to break down the VR system into a hierarchy of classes.  So far
-we are at:
-
-1. Scene 
-2. Camera
-3. Lens 
-4. Source
-
-For now, a scene only has one camera, i.e. one viewpoint. I would like this to be
-extended to multiple cameras at some point, but not in this pass.
-
-A Camera can have many lenses. When taking a snapshot, the Camera will loop 
-over the lenses that have been added by the user.  We should come up with a
-naming convention and storage system.
-
-
-A Lens defines how the vectors are oriented pointing outward from the camera
-position.  Plane-parallel, Perspective, Fisheye are the first set that need to
-be implemented. As much of the Lens as possible will be set up using defaults 
-derived from the scene, such as the width/depth/etc.
-
-A Source is a data source with intent on how to visualize it.  For example, a
-VolumeSource should be treated volumetrically, with a transfer function defined
-for a given field or set of fields.  A generic OpaqueSource should define
-a method for pixelizing a ZBuffer object, carrying information about both the
-color and depth of the surface/streamline/annotation. These will be used for
-compositing later.
-
-
-sc = Scene(data_source)
-cam = sc.add_camera(cam) // triggers cam.set_defaults_from_data_source(data_source)
-lens = PlaneParallelLens()
-cam.set_lens(lens) # This sets up lens based on camera.
-


https://bitbucket.org/yt_analysis/yt/commits/28fa4e57cd8d/
Changeset:   28fa4e57cd8d
Branch:      yt
User:        atmyers
Date:        2015-07-16 19:41:15+00:00
Summary:     reverting some whitespace changes to make this easier to read
Affected #:  3 files

diff -r 5a12c6ead96a5c0e19f30a5e722ee1765f70e4e2 -r 28fa4e57cd8dafab4c315160cbdd8b88a731b74d yt/utilities/lib/grid_traversal.pxd
--- a/yt/utilities/lib/grid_traversal.pxd
+++ b/yt/utilities/lib/grid_traversal.pxd
@@ -70,7 +70,6 @@
     np.float64_t idds[3]
     int dims[3]
 
-
 cdef class PartitionedGrid:
     cdef public object my_data
     cdef public object source_mask

diff -r 5a12c6ead96a5c0e19f30a5e722ee1765f70e4e2 -r 28fa4e57cd8dafab4c315160cbdd8b88a731b74d yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -163,7 +163,7 @@
     def set_lens(self, lens_type):
         r'''
 
-        Set the lens to be used with this camera.
+        Set the lens to be used with this camera. 
 
         Parameters
         ----------
@@ -393,7 +393,6 @@
         self.rotate(theta, rot_vector=self.unit_vectors[2])
 
     def iter_rotate(self, theta, n_steps, rot_vector=None):
-
         r"""Loop over rotate, creating a rotation
 
         This will rotate `n_steps` until the current view has been
@@ -415,7 +414,6 @@
 
         >>> for i in cam.iter_rotate(np.pi, 10):
         ...     im = sc.render("rotation_%04i.png" % i)
-
         """
 
         dtheta = (1.0*theta)/n_steps
@@ -444,7 +442,6 @@
 
         >>> for i in cam.iter_move([0.2,0.3,0.6], 10):
         ...     sc.render("move_%04i.png" % i)
-
         """
         assert isinstance(final, YTArray)
         if exponential:
@@ -476,7 +473,6 @@
         -----
 
         You will need to call snapshot() again to get a new image.
-
         """
         self.set_width(self.width / factor)
 

diff -r 5a12c6ead96a5c0e19f30a5e722ee1765f70e4e2 -r 28fa4e57cd8dafab4c315160cbdd8b88a731b74d yt/visualization/volume_rendering/tests/test_lenses.py
--- a/yt/visualization/volume_rendering/tests/test_lenses.py
+++ b/yt/visualization/volume_rendering/tests/test_lenses.py
@@ -107,3 +107,4 @@
     sc.camera = cam
     sc.add_source(vol)
     sc.render('test_stereospherical_%s.png' % field[1], clip_ratio=6.0)
+


https://bitbucket.org/yt_analysis/yt/commits/49bc90a38783/
Changeset:   49bc90a38783
Branch:      yt
User:        atmyers
Date:        2015-07-22 22:35:09+00:00
Summary:     padding the default bounding box so that vertices do not get cut off
Affected #:  1 file

diff -r 28fa4e57cd8dafab4c315160cbdd8b88a731b74d -r 49bc90a387831f0a2035fa8828208af4be3e5c75 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -1683,8 +1683,8 @@
     data = ensure_list(data)
     connectivity = ensure_list(connectivity)
     if bbox is None:
-        bbox = np.array([ [coordinates[:,i].min(),
-                           coordinates[:,i].max()]
+        bbox = np.array([ [1.1*coordinates[:,i].min(),
+                           1.1*coordinates[:,i].max()]
                           for i in range(3)], "float64")
     domain_left_edge = np.array(bbox[:, 0], 'float64')
     domain_right_edge = np.array(bbox[:, 1], 'float64')


https://bitbucket.org/yt_analysis/yt/commits/ce3f75731362/
Changeset:   ce3f75731362
Branch:      yt
User:        atmyers
Date:        2015-07-22 22:35:36+00:00
Summary:     adding another field to node_types
Affected #:  1 file

diff -r 49bc90a387831f0a2035fa8828208af4be3e5c75 -r ce3f75731362259f071605886ed6746c7a4cc66c yt/frontends/stream/io.py
--- a/yt/frontends/stream/io.py
+++ b/yt/frontends/stream/io.py
@@ -259,7 +259,7 @@
 
 class IOHandlerStreamUnstructured(BaseIOHandler):
     _dataset_type = "stream_unstructured"
-    _node_types = ("diffused", "convected")
+    _node_types = ("diffused", "convected", "u")
 
     def __init__(self, ds):
         self.fields = ds.stream_handler.fields


https://bitbucket.org/yt_analysis/yt/commits/fdcb7215a1f2/
Changeset:   fdcb7215a1f2
Branch:      yt
User:        atmyers
Date:        2015-07-22 22:36:42+00:00
Summary:     don't assume element fields are present
Affected #:  1 file

diff -r ce3f75731362259f071605886ed6746c7a4cc66c -r fdcb7215a1f2869cd38f8ac17d03a8f63826e6f5 yt/utilities/exodusII_reader.py
--- a/yt/utilities/exodusII_reader.py
+++ b/yt/utilities/exodusII_reader.py
@@ -15,8 +15,8 @@
     # Is this correct?
     etypes = fvars["eb_status"][:]
     nelem = etypes.shape[0]
-    varnames = [sanitize_string(v.tostring()) for v in
-                fvars["name_elem_var"][:]]
+#    varnames = [sanitize_string(v.tostring()) for v in
+#                fvars["name_elem_var"][:]]
     nodnames = [sanitize_string(v.tostring()) for v in
                 fvars["name_nod_var"][:]]
     coord = np.array([fvars["coord%s" % ax][:]
@@ -29,9 +29,9 @@
         ci = connects[-1]
         coords.append(coord)  # Same for all
         vals = {}
-        for j, v in enumerate(varnames):
-            values = fvars["vals_elem_var%seb%s" % (j+1, i+1)][:]
-            vals['gas', v] = values.astype("f8")[-1, :]
+#        for j, v in enumerate(varnames):
+#            values = fvars["vals_elem_var%seb%s" % (j+1, i+1)][:]
+#            vals['gas', v] = values.astype("f8")[-1, :]
         for j, v in enumerate(nodnames):
             # We want just for this set of nodes all the node variables
             # Use (ci - 1) to get these values


https://bitbucket.org/yt_analysis/yt/commits/84fde18787a2/
Changeset:   84fde18787a2
Branch:      yt
User:        atmyers
Date:        2015-07-22 22:37:34+00:00
Summary:     convert pixelization routines to c++ so that embree functions can be used
Affected #:  1 file

diff -r fdcb7215a1f2869cd38f8ac17d03a8f63826e6f5 -r 84fde18787a29a82d9e50b7d023d31a6836ee3f9 yt/utilities/lib/setup.py
--- a/yt/utilities/lib/setup.py
+++ b/yt/utilities/lib/setup.py
@@ -107,11 +107,12 @@
     config.add_extension("misc_utilities", 
                 ["yt/utilities/lib/misc_utilities.pyx"],
                 libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd"])
-    config.add_extension("pixelization_routines", 
+    config.add_extension("pixelization_routines",
                 ["yt/utilities/lib/pixelization_routines.pyx",
                  "yt/utilities/lib/pixelization_constants.c"],
                include_dirs=["yt/utilities/lib/"],
-                libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd",
+               language="c++",
+               libraries=["m", "embree"], depends=["yt/utilities/lib/fp_utils.pxd",
                                   "yt/utilities/lib/pixelization_constants.h"])
     config.add_extension("Octree", 
                 ["yt/utilities/lib/Octree.pyx"],


https://bitbucket.org/yt_analysis/yt/commits/dd4cfa5367aa/
Changeset:   dd4cfa5367aa
Branch:      yt
User:        atmyers
Date:        2015-07-22 22:38:28+00:00
Summary:     enable interpolation in the pixelizer
Affected #:  1 file

diff -r 84fde18787a29a82d9e50b7d023d31a6836ee3f9 -r dd4cfa5367aaac8b635c8de08129c1c34d28ff46 yt/utilities/lib/pixelization_routines.pyx
--- a/yt/utilities/lib/pixelization_routines.pyx
+++ b/yt/utilities/lib/pixelization_routines.pyx
@@ -19,6 +19,7 @@
 cimport libc.math as math
 from fp_utils cimport fmin, fmax, i64min, i64max, imin, imax
 from yt.utilities.exceptions import YTPixelizeError
+from yt.utilities.lib.mesh_samplers cimport sample_hex_at_real_point
 cdef extern from "stdlib.h":
     # NOTE that size_t might not be int
     void *alloca(int)
@@ -422,10 +423,10 @@
     return 1
 
 def pixelize_element_mesh(np.ndarray[np.float64_t, ndim=2] coords,
-                      np.ndarray[np.int64_t, ndim=2] conn,
-                      buff_size,
-                      np.ndarray[np.float64_t, ndim=1] field,
-                      extents, int index_offset = 0):
+                          np.ndarray[np.int64_t, ndim=2] conn,
+                          buff_size,
+                          np.ndarray[np.float64_t, ndim=2] field,
+                          extents, int index_offset = 0):
     cdef np.ndarray[np.float64_t, ndim=3] img
     img = np.zeros(buff_size, dtype="float64")
     # Two steps:
@@ -445,6 +446,9 @@
     cdef np.int64_t pstart[3], pend[3]
     cdef np.float64_t ppoint[3], centroid[3], idds[3], dds[3]
     cdef np.float64_t **vertices
+    cdef np.float64_t *flat_vertices
+    cdef np.float64_t *field_vals
+    cdef np.float64_t *physical_x
     cdef int nvertices = conn.shape[1]
     cdef int nf
     # Allocate our signs array
@@ -458,6 +462,9 @@
         raise RuntimeError
     signs = <np.int8_t *> alloca(sizeof(np.int8_t) * nf)
     vertices = <np.float64_t **> alloca(sizeof(np.float64_t *) * nvertices)
+    flat_vertices = <np.float64_t *> alloca(3 * sizeof(np.float64_t) * nvertices)
+    field_vals = <np.float64_t *> alloca(sizeof(np.float64_t) * nvertices)
+    physical_x = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
     for i in range(nvertices):
         vertices[i] = <np.float64_t *> alloca(sizeof(np.float64_t) * 3)
     for i in range(3):
@@ -475,8 +482,10 @@
         RE[0] = RE[1] = RE[2] = -1e60
         for n in range(nvertices): # 8
             cj = conn[ci, n] - index_offset
+            field_vals[n] = field[ci, n]
             for i in range(3):
                 vertices[n][i] = coords[cj, i]
+                flat_vertices[3*n + i] = coords[cj, i]
                 centroid[i] += coords[cj, i]
                 LE[i] = fmin(LE[i], vertices[n][i])
                 RE[i] = fmax(RE[i], vertices[n][i])
@@ -500,14 +509,21 @@
         check_face_dot(nvertices, centroid, vertices, signs, 0)
         for pi in range(pstart[0], pend[0] + 1):
             ppoint[0] = (pi + 0.5) * dds[0] + pLE[0]
+            physical_x[0] = ppoint[0]
             for pj in range(pstart[1], pend[1] + 1):
                 ppoint[1] = (pj + 0.5) * dds[1] + pLE[1]
+                physical_x[1] = ppoint[1]
                 for pk in range(pstart[2], pend[2] + 1):
                     ppoint[2] = (pk + 0.5) * dds[2] + pLE[2]
+                    physical_x[2] = ppoint[2]
                     # Now we just need to figure out if our ppoint is within
                     # our set of vertices.
                     if check_face_dot(nvertices, ppoint, vertices, signs, 1) == 0:
                         continue
                     # Else, we deposit!
-                    img[pi, pj, pk] = field[ci]
+                    img[pi, pj, pk] = field[ci, 0]
+#                    img[pi, pj, pk] = sample_hex_at_real_point(flat_vertices, \
+#                                                               field_vals, \
+#                                                               physical_x)
+
     return img


https://bitbucket.org/yt_analysis/yt/commits/8399a638bcd7/
Changeset:   8399a638bcd7
Branch:      yt
User:        atmyers
Date:        2015-07-22 22:40:14+00:00
Summary:     no need to use long here
Affected #:  1 file

diff -r dd4cfa5367aaac8b635c8de08129c1c34d28ff46 -r 8399a638bcd73dafeb438b92a879b9d29a5159ad yt/utilities/lib/mesh_samplers.pyx
--- a/yt/utilities/lib/mesh_samplers.pyx
+++ b/yt/utilities/lib/mesh_samplers.pyx
@@ -52,7 +52,7 @@
 cdef void get_hit_position(double* position,
                            void* userPtr,
                            rtcr.RTCRay& ray) nogil:
-    cdef int primID, elemID, i
+    cdef int primID, i
     cdef double[3][3] vertex_positions
     cdef Triangle tri
     cdef MeshDataContainer* data
@@ -100,12 +100,12 @@
     for i in range(3):
         f[i] = vertices[0 + i]*rm*sm*tm \
              + vertices[3 + i]*rp*sm*tm \
-             + vertices[6 + i]*rm*sp*tm \
-             + vertices[9 + i]*rp*sp*tm \
+             + vertices[6 + i]*rp*sp*tm \
+             + vertices[9 + i]*rm*sp*tm \
              + vertices[12 + i]*rm*sm*tp \
              + vertices[15 + i]*rp*sm*tp \
-             + vertices[18 + i]*rm*sp*tp \
-             + vertices[21 + i]*rp*sp*tp \
+             + vertices[18 + i]*rp*sp*tp \
+             + vertices[21 + i]*rm*sp*tp \
              - 8.0*phys_x[i]
 
 
@@ -130,18 +130,18 @@
     tp = 1.0 + x[2]
     
     for i in range(3):
-        r[i] = -sm*tm*v[0 + i]  + sm*tm*v[3 + i]  - \
-                sp*tm*v[6 + i]  + sp*tm*v[9 + i]  - \
-                sm*tp*v[12 + i] + sm*tp*v[15 + i] - \
-                sp*tp*v[18 + i] + sp*tp*v[21 + i]
+        r[i] = -sm*tm*v[0 + i]  + sm*tm*v[3 + i]  + \
+                sp*tm*v[6 + i]  - sp*tm*v[9 + i]  - \
+                sm*tp*v[12 + i] + sm*tp*v[15 + i] + \
+                sp*tp*v[18 + i] - sp*tp*v[21 + i]
         s[i] = -rm*tm*v[0 + i]  - rp*tm*v[3 + i]  + \
-                rm*tm*v[6 + i]  + rp*tm*v[9 + i]  - \
+                rp*tm*v[6 + i]  + rm*tm*v[9 + i]  - \
                 rm*tp*v[12 + i] - rp*tp*v[15 + i] + \
-                rm*tp*v[18 + i] + rp*tp*v[21 + i]
+                rp*tp*v[18 + i] + rm*tp*v[21 + i]
         t[i] = -rm*sm*v[0 + i]  - rp*sm*v[3 + i]  - \
-                rm*sp*v[6 + i]  - rp*sp*v[9 + i]  + \
+                rp*sp*v[6 + i]  - rm*sp*v[9 + i]  + \
                 rm*sm*v[12 + i] + rp*sm*v[15 + i] + \
-                rm*sp*v[18 + i] + rp*sp*v[21 + i]
+                rp*sp*v[18 + i] + rm*sp*v[21 + i]
                 
                 
 @cython.boundscheck(False)
@@ -157,8 +157,8 @@
     tm = 1.0 - coord[2]
     tp = 1.0 + coord[2]
     
-    F = vals[0]*rm*sm*tm + vals[1]*rp*sm*tm + vals[2]*rm*sp*tm + vals[3]*rp*sp*tm + \
-        vals[4]*rm*sm*tp + vals[5]*rp*sm*tp + vals[6]*rm*sp*tp + vals[7]*rp*sp*tp
+    F = vals[0]*rm*sm*tm + vals[1]*rp*sm*tm + vals[2]*rp*sp*tm + vals[3]*rm*sp*tm + \
+        vals[4]*rm*sm*tp + vals[5]*rp*sm*tp + vals[6]*rp*sp*tp + vals[7]*rm*sp*tp
     return 0.125*F
                 
 
@@ -189,7 +189,7 @@
     err = maxnorm(f)  
    
     # begin Newton iteration
-    while (err > tolerance and iterations < 10):
+    while (err > tolerance and iterations < 100):
         linear_hex_J(r, s, t, x, vertices, physical_x)
         d = determinant_3x3(r, s, t)
         x[0] = x[0] - (determinant_3x3(f, s, t)/d)
@@ -198,7 +198,7 @@
         linear_hex_f(f, x, vertices, physical_x)        
         err = maxnorm(f)
         iterations += 1
-        
+
     val = sample_hex_at_unit_point(x, field_values)
     return val
 
@@ -211,7 +211,7 @@
     cdef int ray_id, elem_id, i
     cdef double val
     cdef double[8] field_data
-    cdef long[8] element_indices
+    cdef int[8] element_indices
     cdef double[24] vertices
     cdef double[3] position
     cdef MeshDataContainer* data
@@ -262,9 +262,9 @@
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
-cdef double tetra_real_to_mapped(double* mapped_coord,
-                                 double* vertices,
-                                 double* physical_coord) nogil:
+cdef void tetra_real_to_mapped(double* mapped_coord,
+                               double* vertices,
+                               double* physical_coord) nogil:
     cdef int i
     cdef double d
     cdef double[3] bvec
@@ -294,7 +294,7 @@
     cdef int ray_id, elem_id, i
     cdef double val
     cdef double[4] field_data
-    cdef long[4] element_indices
+    cdef int[4] element_indices
     cdef double[12] vertices
     cdef double[3] position
     cdef double[4] mapped_coord
@@ -307,7 +307,7 @@
 
     get_hit_position(position, userPtr, ray)
     
-    elem_id = ray_id / data.tpe
+    elem_id = ray_id / 4
     for i in range(4):
         element_indices[i] = data.element_indices[elem_id*4+i]
         field_data[i] = data.field_data[elem_id*4+i]


https://bitbucket.org/yt_analysis/yt/commits/acb79b6732c8/
Changeset:   acb79b6732c8
Branch:      yt
User:        atmyers
Date:        2015-07-22 22:41:00+00:00
Summary:     or here
Affected #:  2 files

diff -r 8399a638bcd73dafeb438b92a879b9d29a5159ad -r acb79b6732c820dd2580c857a22bef289f285c4e yt/utilities/lib/mesh_construction.pxd
--- a/yt/utilities/lib/mesh_construction.pxd
+++ b/yt/utilities/lib/mesh_construction.pxd
@@ -7,6 +7,6 @@
     Vertex* vertices       # array of triangle vertices
     Triangle* indices      # which vertices belong to which triangles
     double* field_data     # the field values at the vertices
-    long* element_indices  # which vertices belong to which *element*
+    int* element_indices   # which vertices belong to which *element*
     int tpe                # the number of triangles per element
     int vpe                # the number of vertices per element

diff -r 8399a638bcd73dafeb438b92a879b9d29a5159ad -r acb79b6732c820dd2580c857a22bef289f285c4e yt/utilities/lib/mesh_construction.pyx
--- a/yt/utilities/lib/mesh_construction.pyx
+++ b/yt/utilities/lib/mesh_construction.pyx
@@ -82,7 +82,7 @@
     cdef rtcg.RTCFilterFunc filter_func
     cdef int tpe, vpe
     cdef int[MAX_NUM_TRI][3] tri_array
-    cdef long* element_indices
+    cdef int* element_indices
     cdef MeshDataContainer datac
 
     def __init__(self, YTEmbreeScene scene,
@@ -136,7 +136,7 @@
         rtcg.rtcSetBuffer(scene.scene_i, mesh, rtcg.RTC_INDEX_BUFFER,
                           triangles, 0, sizeof(Triangle))
 
-        cdef long* element_indices = <long *> malloc(ne * self.vpe * sizeof(long))    
+        cdef int* element_indices = <int *> malloc(ne * self.vpe * sizeof(int))    
         for i in range(ne):
             for j in range(self.vpe):
                 element_indices[i*self.vpe + j] = indices_in[i][j]
@@ -154,7 +154,7 @@
 
         for i in range(ne):
             for j in range(self.vpe):
-                field_data[self.vpe*i+j] = data_in[i][j]
+                field_data[i*self.vpe+j] = data_in[i][j]
 
         self.field_data = field_data
 


https://bitbucket.org/yt_analysis/yt/commits/75546befa458/
Changeset:   75546befa458
Branch:      yt
User:        atmyers
Date:        2015-07-22 22:47:16+00:00
Summary:     enabling linear interpolation for hexes in the pixelizer
Affected #:  2 files

diff -r acb79b6732c820dd2580c857a22bef289f285c4e -r 75546befa458d02935e6821ab24ad95206e39de7 yt/utilities/lib/mesh_samplers.pxd
--- a/yt/utilities/lib/mesh_samplers.pxd
+++ b/yt/utilities/lib/mesh_samplers.pxd
@@ -8,4 +8,6 @@
 cdef void sample_tetra(void* userPtr,
                        rtcr.RTCRay& ray) nogil
 
-
+cdef double sample_hex_at_real_point(double* vertices,
+                                     double* field_values,
+                                     double* physical_x) nogil

diff -r acb79b6732c820dd2580c857a22bef289f285c4e -r 75546befa458d02935e6821ab24ad95206e39de7 yt/utilities/lib/pixelization_routines.pyx
--- a/yt/utilities/lib/pixelization_routines.pyx
+++ b/yt/utilities/lib/pixelization_routines.pyx
@@ -521,9 +521,9 @@
                     if check_face_dot(nvertices, ppoint, vertices, signs, 1) == 0:
                         continue
                     # Else, we deposit!
-                    img[pi, pj, pk] = field[ci, 0]
-#                    img[pi, pj, pk] = sample_hex_at_real_point(flat_vertices, \
-#                                                               field_vals, \
-#                                                               physical_x)
+#                    img[pi, pj, pk] = field[ci, 0]
+                    img[pi, pj, pk] = sample_hex_at_real_point(flat_vertices, \
+                                                               field_vals, \
+                                                               physical_x)
 
     return img


https://bitbucket.org/yt_analysis/yt/commits/aed76f13b193/
Changeset:   aed76f13b193
Branch:      yt
User:        atmyers
Date:        2015-07-23 22:42:11+00:00
Summary:     exposing the tetrahedral mesh sampler so it can be used in the pixelizer
Affected #:  2 files

diff -r 75546befa458d02935e6821ab24ad95206e39de7 -r aed76f13b193771bfd97476ff250a4e75e1c6388 yt/utilities/lib/mesh_samplers.pxd
--- a/yt/utilities/lib/mesh_samplers.pxd
+++ b/yt/utilities/lib/mesh_samplers.pxd
@@ -11,3 +11,7 @@
 cdef double sample_hex_at_real_point(double* vertices,
                                      double* field_values,
                                      double* physical_x) nogil
+
+cdef double sample_tetra_at_real_point(double* vertices,
+                                       double* field_values,
+                                       double* physical_x) nogil

diff -r 75546befa458d02935e6821ab24ad95206e39de7 -r aed76f13b193771bfd97476ff250a4e75e1c6388 yt/utilities/lib/mesh_samplers.pyx
--- a/yt/utilities/lib/mesh_samplers.pyx
+++ b/yt/utilities/lib/mesh_samplers.pyx
@@ -288,6 +288,23 @@
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
+cdef double sample_tetra_at_real_point(double* vertices,
+                                       double* field_values,
+                                       double* physical_x) nogil:
+    cdef double val
+    cdef double mapped_coord[4]
+
+    tetra_real_to_mapped(mapped_coord, 
+                         vertices,
+                         physical_x)    
+        
+    val = sample_tetra_at_unit_point(mapped_coord, field_values)
+    return val
+
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
 cdef void sample_tetra(void* userPtr,
                        rtcr.RTCRay& ray) nogil:
 
@@ -315,11 +332,7 @@
         vertices[i*3 + 1] = data.vertices[element_indices[i]].y
         vertices[i*3 + 2] = data.vertices[element_indices[i]].z    
 
-    tetra_real_to_mapped(mapped_coord, 
-                         vertices,
-                         position)    
-        
-    val = sample_tetra_at_unit_point(mapped_coord, field_data)
+    val = sample_tetra_at_real_point(vertices, field_data, position)
     ray.time = val
 
 @cython.boundscheck(False)


https://bitbucket.org/yt_analysis/yt/commits/223cfa1478d7/
Changeset:   223cfa1478d7
Branch:      yt
User:        atmyers
Date:        2015-07-24 00:15:36+00:00
Summary:     fixing floating point comparison issue in pixelizer
Affected #:  1 file

diff -r aed76f13b193771bfd97476ff250a4e75e1c6388 -r 223cfa1478d7f8982bd3a1842aacc42c616d24f5 yt/utilities/lib/pixelization_routines.pyx
--- a/yt/utilities/lib/pixelization_routines.pyx
+++ b/yt/utilities/lib/pixelization_routines.pyx
@@ -19,7 +19,9 @@
 cimport libc.math as math
 from fp_utils cimport fmin, fmax, i64min, i64max, imin, imax
 from yt.utilities.exceptions import YTPixelizeError
-from yt.utilities.lib.mesh_samplers cimport sample_hex_at_real_point
+from yt.utilities.lib.mesh_samplers cimport \
+    sample_hex_at_real_point, \
+    sample_tetra_at_real_point
 cdef extern from "stdlib.h":
     # NOTE that size_t might not be int
     void *alloca(int)
@@ -391,12 +393,13 @@
     else:
         return -1
     cdef int i, j, n, vi1a, vi1b, vi2a, vi2b
+
     for n in range(nf):
         vi1a = faces[n][0][0]
         vi1b = faces[n][0][1]
         vi2a = faces[n][1][0]
         vi2b = faces[n][1][1]
-        # Shared vertex is vi1b and vi2b
+        # Shared vertex is vi1a and vi2a
         for i in range(3):
             vec1[i] = vertices[vi1b][i] - vertices[vi1a][i]
             vec2[i] = vertices[vi2b][i] - vertices[vi2a][i]
@@ -414,7 +417,7 @@
             else:
                 signs[n] = 1
         else:
-            if dp < 0 and signs[n] < 0:
+            if dp <= 0 and signs[n] < 0:
                 continue
             elif dp >= 0 and signs[n] > 0:
                 continue
@@ -422,6 +425,8 @@
                 return 0
     return 1
 
+ctypedef double (*sample_function_ptr)(double*, double*, double*)
+
 def pixelize_element_mesh(np.ndarray[np.float64_t, ndim=2] coords,
                           np.ndarray[np.int64_t, ndim=2] conn,
                           buff_size,
@@ -451,13 +456,17 @@
     cdef np.float64_t *physical_x
     cdef int nvertices = conn.shape[1]
     cdef int nf
+    cdef sample_function_ptr sampler
+    
     # Allocate our signs array
     if nvertices == 4:
         nf = TETRA_NF
+        sampler = sample_tetra_at_real_point
     elif nvertices == 6:
         nf = WEDGE_NF
     elif nvertices == 8:
         nf = HEX_NF
+        sampler = sample_hex_at_real_point
     else:
         raise RuntimeError
     signs = <np.int8_t *> alloca(sizeof(np.int8_t) * nf)
@@ -522,8 +531,8 @@
                         continue
                     # Else, we deposit!
 #                    img[pi, pj, pk] = field[ci, 0]
-                    img[pi, pj, pk] = sample_hex_at_real_point(flat_vertices, \
-                                                               field_vals, \
-                                                               physical_x)
+                    img[pi, pj, pk] = sampler(flat_vertices, \
+                                              field_vals, \
+                                              physical_x)
 
     return img


https://bitbucket.org/yt_analysis/yt/commits/bd8bbe4fcdc9/
Changeset:   bd8bbe4fcdc9
Branch:      yt
User:        atmyers
Date:        2015-08-04 05:59:26+00:00
Summary:     make sure winding is consistent for triangle orderings
Affected #:  1 file

diff -r 223cfa1478d7f8982bd3a1842aacc42c616d24f5 -r bd8bbe4fcdc959f6bb64e6f32a2eaf2278bcb7b2 yt/utilities/lib/mesh_construction.h
--- a/yt/utilities/lib/mesh_construction.h
+++ b/yt/utilities/lib/mesh_construction.h
@@ -10,12 +10,12 @@
 // here: http://homepages.cae.wisc.edu/~tautges/papers/cnmev3.pdf
 // Note that this is the case for Exodus II data.
 int triangulate_hex[MAX_NUM_TRI][3] = {
-  {0, 1, 2}, {0, 2, 3}, // Face is 0 1 2 3 
+  {0, 2, 1}, {0, 3, 2}, // Face is 3 2 1 0 
   {4, 5, 6}, {4, 6, 7}, // Face is 4 5 6 7
   {0, 1, 5}, {0, 5, 4}, // Face is 0 1 5 4
   {1, 2, 6}, {1, 6, 5}, // Face is 1 2 6 5
-  {0, 3, 7}, {0, 7, 4}, // Face is 0 3 7 4
-  {3, 2, 6}, {3, 6, 7}  // Face is 3 2 6 7
+  {0, 7, 3}, {0, 4, 7}, // Face is 3 0 4 7
+  {3, 6, 2}, {3, 7, 6}  // Face is 2 3 7 6
 };
 
 // Similarly, this is used to triangulate the tetrahedral cells


https://bitbucket.org/yt_analysis/yt/commits/cd1691fed0ee/
Changeset:   cd1691fed0ee
Branch:      yt
User:        atmyers
Date:        2015-08-04 05:59:59+00:00
Summary:     expose a few more of these cython functions
Affected #:  1 file

diff -r bd8bbe4fcdc959f6bb64e6f32a2eaf2278bcb7b2 -r cd1691fed0ee96b13beedbf8e70cd4bb0fd906e8 yt/utilities/lib/mesh_samplers.pxd
--- a/yt/utilities/lib/mesh_samplers.pxd
+++ b/yt/utilities/lib/mesh_samplers.pxd
@@ -15,3 +15,15 @@
 cdef double sample_tetra_at_real_point(double* vertices,
                                        double* field_values,
                                        double* physical_x) nogil
+
+cdef void hex_real_to_mapped(double* mapped_x,
+                             double* vertices,
+                             double* physical_x) nogil
+
+cdef double sample_hex_at_unit_point(double* coord, double* vals) nogil
+
+cdef void tetra_real_to_mapped(double* mapped_coord,
+                               double* vertices,
+                               double* physical_coord) nogil
+
+cdef double sample_tetra_at_unit_point(double* coord, double* vals) nogil


https://bitbucket.org/yt_analysis/yt/commits/a28be993de4d/
Changeset:   a28be993de4d
Branch:      yt
User:        atmyers
Date:        2015-08-04 06:01:00+00:00
Summary:     adding a hex real-to-mapped function
Affected #:  1 file

diff -r cd1691fed0ee96b13beedbf8e70cd4bb0fd906e8 -r a28be993de4d4909789fc4f5388ae17c271b9447 yt/utilities/lib/mesh_samplers.pyx
--- a/yt/utilities/lib/mesh_samplers.pyx
+++ b/yt/utilities/lib/mesh_samplers.pyx
@@ -26,9 +26,9 @@
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
-cdef inline double determinant_3x3(double* col0, 
-                                   double* col1, 
-                                   double* col2) nogil:
+cdef double determinant_3x3(double* col0, 
+                            double* col1, 
+                            double* col2) nogil:
     return col0[0]*col1[1]*col2[2] - col0[0]*col1[2]*col2[1] - \
            col0[1]*col1[0]*col2[2] + col0[1]*col1[2]*col2[0] + \
            col0[2]*col1[0]*col2[1] - col0[2]*col1[1]*col2[0]
@@ -241,13 +241,54 @@
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
+cdef void hex_real_to_mapped(double* mapped_x,
+                             double* vertices,
+                             double* physical_x) nogil:
+    
+    cdef int i
+    cdef double d, val
+    cdef double[3] f
+    cdef double[3] r
+    cdef double[3] s
+    cdef double[3] t
+    cdef double[3] x
+    cdef double tolerance = 1.0e-9
+    cdef int iterations = 0
+    cdef double err
+   
+    # initial guess
+    for i in range(3):
+        x[i] = 0.0
+    
+    # initial error norm
+    linear_hex_f(f, x, vertices, physical_x)
+    err = maxnorm(f)  
+   
+    # begin Newton iteration
+    while (err > tolerance and iterations < 10):
+        linear_hex_J(r, s, t, x, vertices, physical_x)
+        d = determinant_3x3(r, s, t)
+        x[0] = x[0] - (determinant_3x3(f, s, t)/d)
+        x[1] = x[1] - (determinant_3x3(r, f, t)/d)
+        x[2] = x[2] - (determinant_3x3(r, s, f)/d)
+        linear_hex_f(f, x, vertices, physical_x)        
+        err = maxnorm(f)
+        iterations += 1
+
+    for i in range(3):
+        mapped_x[i] = x[i]
+
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
 def test_hex_sampler(np.ndarray[np.float64_t, ndim=2] vertices,
                      np.ndarray[np.float64_t, ndim=1] field_values,
                      np.ndarray[np.float64_t, ndim=1] physical_x):
-    
+
     cdef double val
-   
-    val = sample_hex_at_real_point(<double*> vertices.data, 
+
+    val = sample_hex_at_real_point(<double*> vertices.data,
                                    <double*> field_values.data,
                                    <double*> physical_x.data)
     return val


https://bitbucket.org/yt_analysis/yt/commits/efdd1933f94a/
Changeset:   efdd1933f94a
Branch:      yt
User:        atmyers
Date:        2015-08-04 06:01:39+00:00
Summary:     fixing the issues with black pixels in the element mesh pixelizer
Affected #:  1 file

diff -r a28be993de4d4909789fc4f5388ae17c271b9447 -r efdd1933f94a1c8607f41b01c8fdf1302873b580 yt/utilities/lib/pixelization_routines.pyx
--- a/yt/utilities/lib/pixelization_routines.pyx
+++ b/yt/utilities/lib/pixelization_routines.pyx
@@ -20,8 +20,10 @@
 from fp_utils cimport fmin, fmax, i64min, i64max, imin, imax
 from yt.utilities.exceptions import YTPixelizeError
 from yt.utilities.lib.mesh_samplers cimport \
-    sample_hex_at_real_point, \
-    sample_tetra_at_real_point
+    sample_hex_at_unit_point, \
+    sample_tetra_at_unit_point, \
+    hex_real_to_mapped, \
+    tetra_real_to_mapped
 cdef extern from "stdlib.h":
     # NOTE that size_t might not be int
     void *alloca(int)
@@ -42,6 +44,7 @@
     int WEDGE_NF
     np.uint8_t wedge_face_defs[MAX_NUM_FACES][2][2]
 
+
 @cython.cdivision(True)
 @cython.boundscheck(False)
 @cython.wraparound(False)
@@ -362,6 +365,7 @@
                 img[i, j] = field[fi]
     return img
 
+
 # This function accepts a set of vertices (for a polyhedron) that are
 # assumed to be in order for bottom, then top, in the same clockwise or
 # counterclockwise direction (i.e., like points 1-8 in Figure 4 of the ExodusII
@@ -425,7 +429,9 @@
                 return 0
     return 1
 
-ctypedef double (*sample_function_ptr)(double*, double*, double*)
+
+ctypedef double (*sample_function_ptr)(double*, double*)
+ctypedef void (*transform_function_ptr)(double*, double*, double*)
 
 def pixelize_element_mesh(np.ndarray[np.float64_t, ndim=2] coords,
                           np.ndarray[np.int64_t, ndim=2] conn,
@@ -437,45 +443,41 @@
     # Two steps:
     #  1. Is image point within the mesh bounding box?
     #  2. Is image point within the mesh element?
-    # Second is more intensive.  It will require a bunch of dot and cross
-    # products.  We are not guaranteed that the elements will be in the correct
-    # order such that cross products are pointing in right direction, so we
-    # compare against the centroid of the (assumed convex) element.
+    # Second is more intensive.  It will converting the element vertices to the
+    # mapped coordinate system, and checking whether the result in in-bounds or not
     # Note that we have to have a pseudo-3D pixel buffer.  One dimension will
     # always be 1.
     cdef np.float64_t pLE[3], pRE[3]
     cdef np.float64_t LE[3], RE[3]
     cdef int use
-    cdef np.int8_t *signs
     cdef np.int64_t n, i, j, k, pi, pj, pk, ci, cj, ck
     cdef np.int64_t pstart[3], pend[3]
-    cdef np.float64_t ppoint[3], centroid[3], idds[3], dds[3]
-    cdef np.float64_t **vertices
-    cdef np.float64_t *flat_vertices
+    cdef np.float64_t ppoint[3], idds[3], dds[3]
+    cdef np.float64_t *vertices
     cdef np.float64_t *field_vals
-    cdef np.float64_t *physical_x
     cdef int nvertices = conn.shape[1]
     cdef int nf
-    cdef sample_function_ptr sampler
-    
-    # Allocate our signs array
+    cdef sample_function_ptr sample_func
+    cdef transform_function_ptr transform_func
+    cdef double* mapped_coord
+
+    # Allocate storage for the mapped coordinate
     if nvertices == 4:
         nf = TETRA_NF
-        sampler = sample_tetra_at_real_point
-    elif nvertices == 6:
-        nf = WEDGE_NF
+        sample_func = sample_tetra_at_unit_point
+        transform_func = tetra_real_to_mapped
+        mapped_coord = <double*> alloca(sizeof(double) * 4)
     elif nvertices == 8:
         nf = HEX_NF
-        sampler = sample_hex_at_real_point
+        sample_func = sample_hex_at_unit_point
+        transform_func = hex_real_to_mapped
+        mapped_coord = <double*> alloca(sizeof(double) * 3)
     else:
         raise RuntimeError
-    signs = <np.int8_t *> alloca(sizeof(np.int8_t) * nf)
-    vertices = <np.float64_t **> alloca(sizeof(np.float64_t *) * nvertices)
-    flat_vertices = <np.float64_t *> alloca(3 * sizeof(np.float64_t) * nvertices)
+
+    vertices = <np.float64_t *> alloca(3 * sizeof(np.float64_t) * nvertices)
     field_vals = <np.float64_t *> alloca(sizeof(np.float64_t) * nvertices)
-    physical_x = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
-    for i in range(nvertices):
-        vertices[i] = <np.float64_t *> alloca(sizeof(np.float64_t) * 3)
+
     for i in range(3):
         pLE[i] = extents[i][0]
         pRE[i] = extents[i][1]
@@ -485,22 +487,16 @@
         else:
             idds[i] = 1.0 / dds[i]
     for ci in range(conn.shape[0]):
-        # Fill the vertices and compute the centroid
-        centroid[0] = centroid[1] = centroid[2] = 0
+        # Fill the vertices
         LE[0] = LE[1] = LE[2] = 1e60
         RE[0] = RE[1] = RE[2] = -1e60
         for n in range(nvertices): # 8
             cj = conn[ci, n] - index_offset
             field_vals[n] = field[ci, n]
             for i in range(3):
-                vertices[n][i] = coords[cj, i]
-                flat_vertices[3*n + i] = coords[cj, i]
-                centroid[i] += coords[cj, i]
-                LE[i] = fmin(LE[i], vertices[n][i])
-                RE[i] = fmax(RE[i], vertices[n][i])
-        centroid[0] /= nvertices
-        centroid[1] /= nvertices
-        centroid[2] /= nvertices
+                vertices[3*n + i] = coords[cj, i]
+                LE[i] = fmin(LE[i], vertices[3*n+i])
+                RE[i] = fmax(RE[i], vertices[3*n+i])
         use = 1
         for i in range(3):
             if RE[i] < pLE[i] or LE[i] >= pRE[i]:
@@ -513,26 +509,21 @@
         # Now our bounding box intersects, so we get the extents of our pixel
         # region which overlaps with the bounding box, and we'll check each
         # pixel in there.
-        # First, we figure out the dot product of the centroid with all the
-        # faces.
-        check_face_dot(nvertices, centroid, vertices, signs, 0)
         for pi in range(pstart[0], pend[0] + 1):
             ppoint[0] = (pi + 0.5) * dds[0] + pLE[0]
-            physical_x[0] = ppoint[0]
             for pj in range(pstart[1], pend[1] + 1):
                 ppoint[1] = (pj + 0.5) * dds[1] + pLE[1]
-                physical_x[1] = ppoint[1]
                 for pk in range(pstart[2], pend[2] + 1):
                     ppoint[2] = (pk + 0.5) * dds[2] + pLE[2]
-                    physical_x[2] = ppoint[2]
                     # Now we just need to figure out if our ppoint is within
                     # our set of vertices.
-                    if check_face_dot(nvertices, ppoint, vertices, signs, 1) == 0:
+                    hex_real_to_mapped(mapped_coord, vertices, ppoint)
+                    if (math.fabs(mapped_coord[0]) - 1.0 > 1.0e-8 or
+                        math.fabs(mapped_coord[1]) - 1.0 > 1.0e-8 or 
+                        math.fabs(mapped_coord[2]) - 1.0 > 1.0e-8):
                         continue
                     # Else, we deposit!
-#                    img[pi, pj, pk] = field[ci, 0]
-                    img[pi, pj, pk] = sampler(flat_vertices, \
-                                              field_vals, \
-                                              physical_x)
+                    img[pi, pj, pk] = sample_hex_at_unit_point(mapped_coord,
+                                                               field_vals)
 
     return img


https://bitbucket.org/yt_analysis/yt/commits/3f413846097e/
Changeset:   3f413846097e
Branch:      yt
User:        atmyers
Date:        2015-08-04 07:20:48+00:00
Summary:     getting slices to automatically work with tetrahedral data
Affected #:  3 files

diff -r efdd1933f94a1c8607f41b01c8fdf1302873b580 -r 3f413846097ec9e9dccc26147a8db695fa60c001 yt/utilities/lib/mesh_samplers.pxd
--- a/yt/utilities/lib/mesh_samplers.pxd
+++ b/yt/utilities/lib/mesh_samplers.pxd
@@ -22,8 +22,12 @@
 
 cdef double sample_hex_at_unit_point(double* coord, double* vals) nogil
 
+cdef int hex_check_inside(double* mapped_coord) nogil
+
 cdef void tetra_real_to_mapped(double* mapped_coord,
                                double* vertices,
                                double* physical_coord) nogil
 
 cdef double sample_tetra_at_unit_point(double* coord, double* vals) nogil
+
+cdef int tetra_check_inside(double* mapped_coord) nogil

diff -r efdd1933f94a1c8607f41b01c8fdf1302873b580 -r 3f413846097ec9e9dccc26147a8db695fa60c001 yt/utilities/lib/mesh_samplers.pyx
--- a/yt/utilities/lib/mesh_samplers.pyx
+++ b/yt/utilities/lib/mesh_samplers.pyx
@@ -165,6 +165,17 @@
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
+cdef int hex_check_inside(double* mapped_coord) nogil:    
+    if (fabs(mapped_coord[0]) - 1.0 > 1.0e-8 or
+        fabs(mapped_coord[1]) - 1.0 > 1.0e-8 or 
+        fabs(mapped_coord[2]) - 1.0 > 1.0e-8):
+        return 0
+    return 1
+
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
 cdef double sample_hex_at_real_point(double* vertices,
                                      double* field_values,
                                      double* physical_x) nogil:
@@ -303,6 +314,18 @@
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
+cdef int tetra_check_inside(double* mapped_coord) nogil:    
+    cdef int i
+    for i in range(4):
+        if (mapped_coord[i] < -1.0e-8 or
+            mapped_coord[i] - 1.0 > 1.0e-8):
+            return 0
+    return 1
+
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
 cdef void tetra_real_to_mapped(double* mapped_coord,
                                double* vertices,
                                double* physical_coord) nogil:

diff -r efdd1933f94a1c8607f41b01c8fdf1302873b580 -r 3f413846097ec9e9dccc26147a8db695fa60c001 yt/utilities/lib/pixelization_routines.pyx
--- a/yt/utilities/lib/pixelization_routines.pyx
+++ b/yt/utilities/lib/pixelization_routines.pyx
@@ -23,7 +23,9 @@
     sample_hex_at_unit_point, \
     sample_tetra_at_unit_point, \
     hex_real_to_mapped, \
-    tetra_real_to_mapped
+    tetra_real_to_mapped, \
+    hex_check_inside, \
+    tetra_check_inside
 cdef extern from "stdlib.h":
     # NOTE that size_t might not be int
     void *alloca(int)
@@ -432,6 +434,7 @@
 
 ctypedef double (*sample_function_ptr)(double*, double*)
 ctypedef void (*transform_function_ptr)(double*, double*, double*)
+ctypedef int (*check_function_ptr)(double*)
 
 def pixelize_element_mesh(np.ndarray[np.float64_t, ndim=2] coords,
                           np.ndarray[np.int64_t, ndim=2] conn,
@@ -459,6 +462,7 @@
     cdef int nf
     cdef sample_function_ptr sample_func
     cdef transform_function_ptr transform_func
+    cdef check_function_ptr check_inside
     cdef double* mapped_coord
 
     # Allocate storage for the mapped coordinate
@@ -466,11 +470,13 @@
         nf = TETRA_NF
         sample_func = sample_tetra_at_unit_point
         transform_func = tetra_real_to_mapped
+        check_inside = tetra_check_inside
         mapped_coord = <double*> alloca(sizeof(double) * 4)
     elif nvertices == 8:
         nf = HEX_NF
         sample_func = sample_hex_at_unit_point
         transform_func = hex_real_to_mapped
+        check_inside = hex_check_inside
         mapped_coord = <double*> alloca(sizeof(double) * 3)
     else:
         raise RuntimeError
@@ -517,13 +523,11 @@
                     ppoint[2] = (pk + 0.5) * dds[2] + pLE[2]
                     # Now we just need to figure out if our ppoint is within
                     # our set of vertices.
-                    hex_real_to_mapped(mapped_coord, vertices, ppoint)
-                    if (math.fabs(mapped_coord[0]) - 1.0 > 1.0e-8 or
-                        math.fabs(mapped_coord[1]) - 1.0 > 1.0e-8 or 
-                        math.fabs(mapped_coord[2]) - 1.0 > 1.0e-8):
+                    transform_func(mapped_coord, vertices, ppoint)
+                    if not check_inside(mapped_coord):
                         continue
                     # Else, we deposit!
-                    img[pi, pj, pk] = sample_hex_at_unit_point(mapped_coord,
-                                                               field_vals)
+                    img[pi, pj, pk] = sample_func(mapped_coord,
+                                                  field_vals)
 
     return img


https://bitbucket.org/yt_analysis/yt/commits/0247aaa9b85e/
Changeset:   0247aaa9b85e
Branch:      yt
User:        atmyers
Date:        2015-08-04 22:31:56+00:00
Summary:     some refactoring of the cython mesh samplers
Affected #:  6 files

diff -r 3f413846097ec9e9dccc26147a8db695fa60c001 -r 0247aaa9b85edb2da3bde30883966b6ac4fd787c yt/utilities/lib/element_mappings.pxd
--- /dev/null
+++ b/yt/utilities/lib/element_mappings.pxd
@@ -0,0 +1,72 @@
+cimport numpy as np
+from numpy cimport ndarray
+cimport cython
+import numpy as np
+from libc.math cimport fabs, fmax
+
+cdef class ElementSampler:
+
+    cdef void map_real_to_unit(self,
+                               double* mapped_x, 
+                               double* vertices,
+                               double* physical_x) nogil
+
+
+    cdef double sample_at_unit_point(self,
+                                     double* coord,
+                                     double* vals) nogil
+    
+
+    cdef double sample_at_real_point(self,
+                                     double* vertices,
+                                     double* field_values,
+                                     double* physical_x) nogil
+
+    cdef int check_inside(self, double* mapped_coord) nogil
+
+
+cdef class P1Sampler3D(ElementSampler):
+
+    cdef void map_real_to_unit(self,
+                               double* mapped_x, 
+                               double* vertices,
+                               double* physical_x) nogil
+
+
+    cdef double sample_at_unit_point(self,
+                                     double* coord,
+                                     double* vals) nogil
+
+    cdef int check_inside(self, double* mapped_coord) nogil
+
+
+ctypedef void (*func_type)(double*, double*, double*, double*) nogil
+ctypedef void (*jac_type)(double*, double*, double*, double*, double*, double*) nogil
+
+cdef class NonlinearSolveSampler(ElementSampler):
+
+    cdef int dim
+    cdef int max_iter
+    cdef np.float64_t tolerance
+    cdef func_type func 
+    cdef jac_type jac
+
+    cdef void map_real_to_unit(self,
+                               double* mapped_x, 
+                               double* vertices,
+                               double* physical_x) nogil
+    
+
+cdef class Q1Sampler3D(NonlinearSolveSampler):
+
+    cdef void map_real_to_unit(self,
+                               double* mapped_x, 
+                               double* vertices,
+                               double* physical_x) nogil
+
+
+    cdef double sample_at_unit_point(self,
+                                     double* coord,
+                                     double* vals) nogil
+
+    cdef int check_inside(self, double* mapped_coord) nogil

diff -r 3f413846097ec9e9dccc26147a8db695fa60c001 -r 0247aaa9b85edb2da3bde30883966b6ac4fd787c yt/utilities/lib/element_mappings.pyx
--- /dev/null
+++ b/yt/utilities/lib/element_mappings.pyx
@@ -0,0 +1,346 @@
+"""
+This file contains coordinate mappings between physical coordinates and those
+defined on unit elements, as well as doing the corresponding intracell 
+interpolation on finite element data.
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2015, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+cimport numpy as np
+from numpy cimport ndarray
+cimport cython
+import numpy as np
+from libc.math cimport fabs, fmax
+
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef double determinant_3x3(double* col0, 
+                            double* col1, 
+                            double* col2) nogil:
+    return col0[0]*col1[1]*col2[2] - col0[0]*col1[2]*col2[1] - \
+           col0[1]*col1[0]*col2[2] + col0[1]*col1[2]*col2[0] + \
+           col0[2]*col1[0]*col2[1] - col0[2]*col1[1]*col2[0]
+
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef double maxnorm(double* f) nogil:
+    cdef double err
+    cdef int i
+    err = fabs(f[0])
+    for i in range(1, 2):
+        err = fmax(err, fabs(f[i])) 
+    return err
+
+
+cdef class ElementSampler:
+    '''
+
+    This is a base class for sampling the value of a finite element solution
+    at an arbitrary point inside a mesh element. In general, this will be done
+    by transforming the requested physical coordinate into a mapped coordinate 
+    system, sampling the solution in mapped coordinates, and returning the result.
+    This is not to be used directly; use one of the subclasses instead.
+
+    '''
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef void map_real_to_unit(self,
+                               double* mapped_x, 
+                               double* vertices,
+                               double* physical_x) nogil:
+        pass
+        
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef double sample_at_unit_point(self,
+                                     double* coord,
+                                     double* vals) nogil:
+        pass
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef int check_inside(self, double* mapped_coord) nogil:
+        pass
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef double sample_at_real_point(self,
+                                     double* vertices,
+                                     double* field_values,
+                                     double* physical_x) nogil:
+        cdef double val
+        cdef double mapped_coord[4]
+
+        self.map_real_to_unit(mapped_coord, vertices, physical_x)
+        val = self.sample_at_unit_point(mapped_coord, field_values)
+        return val
+    
+
+cdef class P1Sampler3D(ElementSampler):
+    '''
+
+    This implements sampling inside a linear, tetrahedral mesh element.
+    This mapping is linear and can be inverted easily.
+
+    '''
+
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef void map_real_to_unit(self, double* mapped_x, 
+                               double* vertices, double* physical_x) nogil:
+    
+        cdef int i
+        cdef double d
+        cdef double[3] bvec
+        cdef double[3] col0
+        cdef double[3] col1
+        cdef double[3] col2
+    
+        for i in range(3):
+            bvec[i] = physical_x[i]       - vertices[9 + i]
+            col0[i] = vertices[0 + i]     - vertices[9 + i]
+            col1[i] = vertices[3 + i]     - vertices[9 + i]
+            col2[i] = vertices[6 + i]     - vertices[9 + i]
+        
+        d = determinant_3x3(col0, col1, col2)
+        mapped_x[0] = determinant_3x3(bvec, col1, col2)/d
+        mapped_x[1] = determinant_3x3(col0, bvec, col2)/d
+        mapped_x[2] = determinant_3x3(col0, col1, bvec)/d
+        mapped_x[3] = 1.0 - mapped_x[0] - mapped_x[1] - mapped_x[2]
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef double sample_at_unit_point(self,
+                                     double* coord, 
+                                     double* vals) nogil:
+        return vals[0]*coord[0] + vals[1]*coord[1] + \
+            vals[2]*coord[2] + vals[3]*coord[3]
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef int check_inside(self, double* mapped_coord) nogil:
+        cdef int i
+        for i in range(4):
+            if (mapped_coord[i] < -1.0e-8 or
+                mapped_coord[i] - 1.0 > 1.0e-8):
+                return 0
+        return 1
+
+
+cdef class NonlinearSolveSampler(ElementSampler):
+
+    '''
+
+    This is a base class for handling element samplers that require
+    a nonlinear solve to invert the mapping between coordinate systems.
+    To do this, we perform Newton-Raphson iteration using a specificed 
+    system of equations with an analytic Jacobian matrix. This is
+    not to be used directly, use one of the subclasses instead.
+
+    '''
+
+    def __init__(self):
+        self.tolerance = 1.0e-9
+        self.max_iter = 10
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef void map_real_to_unit(self,
+                               double* mapped_x,
+                               double* vertices,
+                               double* physical_x) nogil:
+        cdef int i
+        cdef double d, val
+        cdef double[3] f
+        cdef double[3] r
+        cdef double[3] s
+        cdef double[3] t
+        cdef double[3] x
+        cdef int iterations = 0
+        cdef double err
+   
+        # initial guess
+        for i in range(3):
+            x[i] = 0.0
+    
+        # initial error norm
+        self.func(f, x, vertices, physical_x)
+        err = maxnorm(f)  
+   
+        # begin Newton iteration
+        while (err > self.tolerance and iterations < self.max_iter):
+            self.jac(r, s, t, x, vertices, physical_x)
+            d = determinant_3x3(r, s, t)
+            x[0] = x[0] - (determinant_3x3(f, s, t)/d)
+            x[1] = x[1] - (determinant_3x3(r, f, t)/d)
+            x[2] = x[2] - (determinant_3x3(r, s, f)/d)
+            self.func(f, x, vertices, physical_x)        
+            err = maxnorm(f)
+            iterations += 1
+
+        for i in range(3):
+            mapped_x[i] = x[i]
+
+
+cdef class Q1Sampler3D(NonlinearSolveSampler):
+
+    ''' 
+
+    This implements sampling inside a 3D, linear, hexahedral mesh element.
+
+    '''
+
+    def __init__(self):
+        super(Q1Sampler3D, self).__init__()
+        self.dim = 3
+        self.func = Q1Function3D
+        self.jac = Q1Jacobian3D
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef double sample_at_unit_point(self, double* coord, double* vals) nogil:
+        cdef double F, rm, rp, sm, sp, tm, tp
+    
+        rm = 1.0 - coord[0]
+        rp = 1.0 + coord[0]
+        sm = 1.0 - coord[1]
+        sp = 1.0 + coord[1]
+        tm = 1.0 - coord[2]
+        tp = 1.0 + coord[2]
+    
+        F = vals[0]*rm*sm*tm + vals[1]*rp*sm*tm + vals[2]*rp*sp*tm + vals[3]*rm*sp*tm + \
+            vals[4]*rm*sm*tp + vals[5]*rp*sm*tp + vals[6]*rp*sp*tp + vals[7]*rm*sp*tp
+        return 0.125*F
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef int check_inside(self, double* mapped_coord) nogil:
+        if (fabs(mapped_coord[0]) - 1.0 > 1.0e-8 or
+            fabs(mapped_coord[1]) - 1.0 > 1.0e-8 or 
+            fabs(mapped_coord[2]) - 1.0 > 1.0e-8):
+            return 0
+        return 1
+
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef inline void Q1Function3D(double* f,
+                              double* x, 
+                              double* vertices, 
+                              double* phys_x) nogil:    
+    cdef int i
+    cdef double rm, rp, sm, sp, tm, tp
+    
+    rm = 1.0 - x[0]
+    rp = 1.0 + x[0]
+    sm = 1.0 - x[1]
+    sp = 1.0 + x[1]
+    tm = 1.0 - x[2]
+    tp = 1.0 + x[2]
+    
+    for i in range(3):
+        f[i] = vertices[0 + i]*rm*sm*tm \
+             + vertices[3 + i]*rp*sm*tm \
+             + vertices[6 + i]*rp*sp*tm \
+             + vertices[9 + i]*rm*sp*tm \
+             + vertices[12 + i]*rm*sm*tp \
+             + vertices[15 + i]*rp*sm*tp \
+             + vertices[18 + i]*rp*sp*tp \
+             + vertices[21 + i]*rm*sp*tp \
+             - 8.0*phys_x[i]
+
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef inline void Q1Jacobian3D(double* r,
+                              double* s,
+                              double* t,
+                              double* x, 
+                              double* v, 
+                              double* phys_x) nogil:
+    
+    cdef int i
+    cdef double rm, rp, sm, sp, tm, tp
+    
+    rm = 1.0 - x[0]
+    rp = 1.0 + x[0]
+    sm = 1.0 - x[1]
+    sp = 1.0 + x[1]
+    tm = 1.0 - x[2]
+    tp = 1.0 + x[2]
+    
+    for i in range(3):
+        r[i] = -sm*tm*v[0 + i]  + sm*tm*v[3 + i]  + \
+                sp*tm*v[6 + i]  - sp*tm*v[9 + i]  - \
+                sm*tp*v[12 + i] + sm*tp*v[15 + i] + \
+                sp*tp*v[18 + i] - sp*tp*v[21 + i]
+        s[i] = -rm*tm*v[0 + i]  - rp*tm*v[3 + i]  + \
+                rp*tm*v[6 + i]  + rm*tm*v[9 + i]  - \
+                rm*tp*v[12 + i] - rp*tp*v[15 + i] + \
+                rp*tp*v[18 + i] + rm*tp*v[21 + i]
+        t[i] = -rm*sm*v[0 + i]  - rp*sm*v[3 + i]  - \
+                rp*sp*v[6 + i]  - rm*sp*v[9 + i]  + \
+                rm*sm*v[12 + i] + rp*sm*v[15 + i] + \
+                rp*sp*v[18 + i] + rm*sp*v[21 + i]
+
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+def test_hex_sampler(np.ndarray[np.float64_t, ndim=2] vertices,
+                     np.ndarray[np.float64_t, ndim=1] field_values,
+                     np.ndarray[np.float64_t, ndim=1] physical_x):
+
+    cdef double val
+
+    cdef Q1Sampler3D sampler = Q1Sampler3D()
+
+    val = sampler.sample_at_real_point(<double*> vertices.data,
+                                       <double*> field_values.data,
+                                       <double*> physical_x.data)
+    return val
+
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+def test_tetra_sampler(np.ndarray[np.float64_t, ndim=2] vertices,
+                       np.ndarray[np.float64_t, ndim=1] field_values,
+                       np.ndarray[np.float64_t, ndim=1] physical_x):
+
+    cdef double val
+    cdef double[4] mapped_coord
+
+    sampler = P1Sampler3D()
+
+    val = sampler.sample_at_real_point(<double*> vertices.data,
+                                       <double*> field_values.data,
+                                       <double*> physical_x.data)
+
+    return val

diff -r 3f413846097ec9e9dccc26147a8db695fa60c001 -r 0247aaa9b85edb2da3bde30883966b6ac4fd787c yt/utilities/lib/mesh_samplers.pyx
--- a/yt/utilities/lib/mesh_samplers.pyx
+++ b/yt/utilities/lib/mesh_samplers.pyx
@@ -378,7 +378,6 @@
     cdef int[4] element_indices
     cdef double[12] vertices
     cdef double[3] position
-    cdef double[4] mapped_coord
     cdef MeshDataContainer* data
 
     data = <MeshDataContainer*> userPtr

diff -r 3f413846097ec9e9dccc26147a8db695fa60c001 -r 0247aaa9b85edb2da3bde30883966b6ac4fd787c yt/utilities/lib/pixelization_routines.pyx
--- a/yt/utilities/lib/pixelization_routines.pyx
+++ b/yt/utilities/lib/pixelization_routines.pyx
@@ -19,13 +19,10 @@
 cimport libc.math as math
 from fp_utils cimport fmin, fmax, i64min, i64max, imin, imax
 from yt.utilities.exceptions import YTPixelizeError
-from yt.utilities.lib.mesh_samplers cimport \
-    sample_hex_at_unit_point, \
-    sample_tetra_at_unit_point, \
-    hex_real_to_mapped, \
-    tetra_real_to_mapped, \
-    hex_check_inside, \
-    tetra_check_inside
+from yt.utilities.lib.element_mappings cimport \
+    ElementSampler, \
+    P1Sampler3D, \
+    Q1Sampler3D
 cdef extern from "stdlib.h":
     # NOTE that size_t might not be int
     void *alloca(int)
@@ -459,25 +456,16 @@
     cdef np.float64_t *vertices
     cdef np.float64_t *field_vals
     cdef int nvertices = conn.shape[1]
-    cdef int nf
-    cdef sample_function_ptr sample_func
-    cdef transform_function_ptr transform_func
-    cdef check_function_ptr check_inside
     cdef double* mapped_coord
+    cdef ElementSampler sampler
 
     # Allocate storage for the mapped coordinate
     if nvertices == 4:
-        nf = TETRA_NF
-        sample_func = sample_tetra_at_unit_point
-        transform_func = tetra_real_to_mapped
-        check_inside = tetra_check_inside
         mapped_coord = <double*> alloca(sizeof(double) * 4)
+        sampler = P1Sampler3D()
     elif nvertices == 8:
-        nf = HEX_NF
-        sample_func = sample_hex_at_unit_point
-        transform_func = hex_real_to_mapped
-        check_inside = hex_check_inside
         mapped_coord = <double*> alloca(sizeof(double) * 3)
+        sampler = Q1Sampler3D()
     else:
         raise RuntimeError
 
@@ -523,11 +511,11 @@
                     ppoint[2] = (pk + 0.5) * dds[2] + pLE[2]
                     # Now we just need to figure out if our ppoint is within
                     # our set of vertices.
-                    transform_func(mapped_coord, vertices, ppoint)
-                    if not check_inside(mapped_coord):
+                    sampler.map_real_to_unit(mapped_coord, vertices, ppoint)
+                    if not sampler.check_inside(mapped_coord):
                         continue
                     # Else, we deposit!
-                    img[pi, pj, pk] = sample_func(mapped_coord,
-                                                  field_vals)
+                    img[pi, pj, pk] = sampler.sample_at_unit_point(mapped_coord,
+                                                                   field_vals)
 
     return img

diff -r 3f413846097ec9e9dccc26147a8db695fa60c001 -r 0247aaa9b85edb2da3bde30883966b6ac4fd787c yt/utilities/lib/setup.py
--- a/yt/utilities/lib/setup.py
+++ b/yt/utilities/lib/setup.py
@@ -161,7 +161,7 @@
                          ["yt/utilities/lib/write_array.pyx"])
     config.add_extension("element_mappings",
                          ["yt/utilities/lib/element_mappings.pyx"],
-                         libraries=["m"])
+                         libraries=["m"], depends=["yt/utilities/lib/element_mappings.pxd"])
     config.add_extension("ragged_arrays",
                          ["yt/utilities/lib/ragged_arrays.pyx"])
     config.add_extension("amr_kdtools", 

diff -r 3f413846097ec9e9dccc26147a8db695fa60c001 -r 0247aaa9b85edb2da3bde30883966b6ac4fd787c yt/utilities/lib/tests/test_element_mappings.py
--- /dev/null
+++ b/yt/utilities/lib/tests/test_element_mappings.py
@@ -0,0 +1,61 @@
+"""
+This file contains tests of the intracell interpolation code contained is
+yt/utilities/lib/element_mappings.pyx.
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2015, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+
+import numpy as np
+
+from yt.testing import assert_almost_equal
+from yt.utilities.lib.element_mappings import \
+    test_tetra_sampler, \
+    test_hex_sampler
+
+
+def check_all_vertices(sampler, vertices, field_values):
+    NV = vertices.shape[0]
+    NDIM = vertices.shape[1]
+    x = np.empty(NDIM)
+    for i in range(NV):
+        x = vertices[i]
+        val = sampler(vertices, field_values, x)
+        assert_almost_equal(val, field_values[i])
+
+
+def test_P1Sampler3D():
+    vertices = np.array([[0.1,  0.1,  0.1],
+                         [0.6,  0.3,  0.2],
+                         [0.2,  0.7,  0.2],
+                         [0.4,  0.4,  0.7]])
+
+    field_values = np.array([1.0, 2.0, 3.0, 4.0])
+
+    sampler = test_tetra_sampler
+    check_all_vertices(sampler, vertices, field_values)
+
+
+def test_Q1Sampler3D():
+    vertices = np.array([[2.00657905, 0.6888599,  1.4375],
+                         [1.8658198,  1.00973171, 1.4375],
+                         [1.97881594, 1.07088163, 1.4375],
+                         [2.12808879, 0.73057381, 1.4375],
+                         [2.00657905, 0.6888599,  1.2   ],
+                         [1.8658198,  1.00973171, 1.2   ],
+                         [1.97881594, 1.07088163, 1.2   ],
+                         [2.12808879, 0.73057381, 1.2   ]])
+
+    field_values = np.array([0.4526278, 0.45262656, 0.45262657, 0.4526278,
+                             0.54464296, 0.54464149, 0.5446415, 0.54464296])
+
+    sampler = test_hex_sampler
+    check_all_vertices(sampler, vertices, field_values)


https://bitbucket.org/yt_analysis/yt/commits/58751a5a526b/
Changeset:   58751a5a526b
Branch:      yt
User:        atmyers
Date:        2015-08-04 23:08:58+00:00
Summary:     some refactoring of the cython mesh samplers
Affected #:  4 files

diff -r 0247aaa9b85edb2da3bde30883966b6ac4fd787c -r 58751a5a526b13b223f80008639e6d9a58924795 yt/utilities/lib/element_mappings.pyx
--- a/yt/utilities/lib/element_mappings.pyx
+++ b/yt/utilities/lib/element_mappings.pyx
@@ -91,7 +91,7 @@
         self.map_real_to_unit(mapped_coord, vertices, physical_x)
         val = self.sample_at_unit_point(mapped_coord, field_values)
         return val
-    
+
 
 cdef class P1Sampler3D(ElementSampler):
     '''

diff -r 0247aaa9b85edb2da3bde30883966b6ac4fd787c -r 58751a5a526b13b223f80008639e6d9a58924795 yt/utilities/lib/mesh_construction.pyx
--- a/yt/utilities/lib/mesh_construction.pyx
+++ b/yt/utilities/lib/mesh_construction.pyx
@@ -170,6 +170,7 @@
         rtcg.rtcSetUserData(scene.scene_i, self.mesh, &self.datac)
 
     cdef void _set_sampler_type(self, YTEmbreeScene scene):
+
         if self.vpe == 8:
             self.filter_func = <rtcg.RTCFilterFunc> sample_hex
         elif self.vpe == 4:

diff -r 0247aaa9b85edb2da3bde30883966b6ac4fd787c -r 58751a5a526b13b223f80008639e6d9a58924795 yt/utilities/lib/mesh_samplers.pyx
--- a/yt/utilities/lib/mesh_samplers.pyx
+++ b/yt/utilities/lib/mesh_samplers.pyx
@@ -18,33 +18,15 @@
 cimport pyembree.rtcore_ray as rtcr
 from pyembree.rtcore cimport Vec3f, Triangle, Vertex
 from yt.utilities.lib.mesh_construction cimport MeshDataContainer
+from yt.utilities.lib.element_mappings cimport \
+    P1Sampler3D, \
+    Q1Sampler3D
 cimport numpy as np
 cimport cython
 from libc.math cimport fabs, fmax
 
-
- at cython.boundscheck(False)
- at cython.wraparound(False)
- at cython.cdivision(True)
-cdef double determinant_3x3(double* col0, 
-                            double* col1, 
-                            double* col2) nogil:
-    return col0[0]*col1[1]*col2[2] - col0[0]*col1[2]*col2[1] - \
-           col0[1]*col1[0]*col2[2] + col0[1]*col1[2]*col2[0] + \
-           col0[2]*col1[0]*col2[1] - col0[2]*col1[1]*col2[0]
-
-
- at cython.boundscheck(False)
- at cython.wraparound(False)
- at cython.cdivision(True)
-cdef double maxnorm(double* f) nogil:
-    cdef double err
-    cdef int i
-    err = fabs(f[0])
-    for i in range(1, 2):
-        err = fmax(err, fabs(f[i])) 
-    return err
-
+cdef ElementSampler Q1Sampler = Q1Sampler3D()
+cdef ElementSampler P1Sampler = P1Sampler3D()
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
@@ -82,141 +64,6 @@
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
-cdef inline void linear_hex_f(double* f,
-                              double* x, 
-                              double* vertices, 
-                              double* phys_x) nogil:
-    
-    cdef int i
-    cdef double rm, rp, sm, sp, tm, tp
-    
-    rm = 1.0 - x[0]
-    rp = 1.0 + x[0]
-    sm = 1.0 - x[1]
-    sp = 1.0 + x[1]
-    tm = 1.0 - x[2]
-    tp = 1.0 + x[2]
-    
-    for i in range(3):
-        f[i] = vertices[0 + i]*rm*sm*tm \
-             + vertices[3 + i]*rp*sm*tm \
-             + vertices[6 + i]*rp*sp*tm \
-             + vertices[9 + i]*rm*sp*tm \
-             + vertices[12 + i]*rm*sm*tp \
-             + vertices[15 + i]*rp*sm*tp \
-             + vertices[18 + i]*rp*sp*tp \
-             + vertices[21 + i]*rm*sp*tp \
-             - 8.0*phys_x[i]
-
-
- at cython.boundscheck(False)
- at cython.wraparound(False)
- at cython.cdivision(True)
-cdef inline void linear_hex_J(double* r,
-                              double* s,
-                              double* t,
-                              double* x, 
-                              double* v, 
-                              double* phys_x) nogil:
-    
-    cdef int i
-    cdef double rm, rp, sm, sp, tm, tp
-    
-    rm = 1.0 - x[0]
-    rp = 1.0 + x[0]
-    sm = 1.0 - x[1]
-    sp = 1.0 + x[1]
-    tm = 1.0 - x[2]
-    tp = 1.0 + x[2]
-    
-    for i in range(3):
-        r[i] = -sm*tm*v[0 + i]  + sm*tm*v[3 + i]  + \
-                sp*tm*v[6 + i]  - sp*tm*v[9 + i]  - \
-                sm*tp*v[12 + i] + sm*tp*v[15 + i] + \
-                sp*tp*v[18 + i] - sp*tp*v[21 + i]
-        s[i] = -rm*tm*v[0 + i]  - rp*tm*v[3 + i]  + \
-                rp*tm*v[6 + i]  + rm*tm*v[9 + i]  - \
-                rm*tp*v[12 + i] - rp*tp*v[15 + i] + \
-                rp*tp*v[18 + i] + rm*tp*v[21 + i]
-        t[i] = -rm*sm*v[0 + i]  - rp*sm*v[3 + i]  - \
-                rp*sp*v[6 + i]  - rm*sp*v[9 + i]  + \
-                rm*sm*v[12 + i] + rp*sm*v[15 + i] + \
-                rp*sp*v[18 + i] + rm*sp*v[21 + i]
-                
-                
- at cython.boundscheck(False)
- at cython.wraparound(False)
- at cython.cdivision(True)
-cdef double sample_hex_at_unit_point(double* coord, double* vals) nogil:
-    cdef double F, rm, rp, sm, sp, tm, tp
-    
-    rm = 1.0 - coord[0]
-    rp = 1.0 + coord[0]
-    sm = 1.0 - coord[1]
-    sp = 1.0 + coord[1]
-    tm = 1.0 - coord[2]
-    tp = 1.0 + coord[2]
-    
-    F = vals[0]*rm*sm*tm + vals[1]*rp*sm*tm + vals[2]*rp*sp*tm + vals[3]*rm*sp*tm + \
-        vals[4]*rm*sm*tp + vals[5]*rp*sm*tp + vals[6]*rp*sp*tp + vals[7]*rm*sp*tp
-    return 0.125*F
-                
-
- at cython.boundscheck(False)
- at cython.wraparound(False)
- at cython.cdivision(True)
-cdef int hex_check_inside(double* mapped_coord) nogil:    
-    if (fabs(mapped_coord[0]) - 1.0 > 1.0e-8 or
-        fabs(mapped_coord[1]) - 1.0 > 1.0e-8 or 
-        fabs(mapped_coord[2]) - 1.0 > 1.0e-8):
-        return 0
-    return 1
-
-
- at cython.boundscheck(False)
- at cython.wraparound(False)
- at cython.cdivision(True)
-cdef double sample_hex_at_real_point(double* vertices,
-                                     double* field_values,
-                                     double* physical_x) nogil:
-    
-    cdef int i
-    cdef double d, val
-    cdef double[3] f
-    cdef double[3] r
-    cdef double[3] s
-    cdef double[3] t
-    cdef double[3] x
-    cdef double tolerance = 1.0e-9
-    cdef int iterations = 0
-    cdef double err
-   
-    # initial guess
-    for i in range(3):
-        x[i] = 0.0
-    
-    # initial error norm
-    linear_hex_f(f, x, vertices, physical_x)
-    err = maxnorm(f)  
-   
-    # begin Newton iteration
-    while (err > tolerance and iterations < 100):
-        linear_hex_J(r, s, t, x, vertices, physical_x)
-        d = determinant_3x3(r, s, t)
-        x[0] = x[0] - (determinant_3x3(f, s, t)/d)
-        x[1] = x[1] - (determinant_3x3(r, f, t)/d)
-        x[2] = x[2] - (determinant_3x3(r, s, f)/d)
-        linear_hex_f(f, x, vertices, physical_x)        
-        err = maxnorm(f)
-        iterations += 1
-
-    val = sample_hex_at_unit_point(x, field_values)
-    return val
-
-    
- at cython.boundscheck(False)
- at cython.wraparound(False)
- at cython.cdivision(True)
 cdef void sample_hex(void* userPtr,
                      rtcr.RTCRay& ray) nogil:
     cdef int ray_id, elem_id, i
@@ -245,130 +92,13 @@
         vertices[i*3 + 1] = data.vertices[element_indices[i]].y
         vertices[i*3 + 2] = data.vertices[element_indices[i]].z    
 
-    val = sample_hex_at_real_point(vertices, field_data, position)
+    val = Q1Sampler.sample_at_real_point(vertices, field_data, position)
     ray.time = val
 
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
-cdef void hex_real_to_mapped(double* mapped_x,
-                             double* vertices,
-                             double* physical_x) nogil:
-    
-    cdef int i
-    cdef double d, val
-    cdef double[3] f
-    cdef double[3] r
-    cdef double[3] s
-    cdef double[3] t
-    cdef double[3] x
-    cdef double tolerance = 1.0e-9
-    cdef int iterations = 0
-    cdef double err
-   
-    # initial guess
-    for i in range(3):
-        x[i] = 0.0
-    
-    # initial error norm
-    linear_hex_f(f, x, vertices, physical_x)
-    err = maxnorm(f)  
-   
-    # begin Newton iteration
-    while (err > tolerance and iterations < 10):
-        linear_hex_J(r, s, t, x, vertices, physical_x)
-        d = determinant_3x3(r, s, t)
-        x[0] = x[0] - (determinant_3x3(f, s, t)/d)
-        x[1] = x[1] - (determinant_3x3(r, f, t)/d)
-        x[2] = x[2] - (determinant_3x3(r, s, f)/d)
-        linear_hex_f(f, x, vertices, physical_x)        
-        err = maxnorm(f)
-        iterations += 1
-
-    for i in range(3):
-        mapped_x[i] = x[i]
-
-
- at cython.boundscheck(False)
- at cython.wraparound(False)
- at cython.cdivision(True)
-def test_hex_sampler(np.ndarray[np.float64_t, ndim=2] vertices,
-                     np.ndarray[np.float64_t, ndim=1] field_values,
-                     np.ndarray[np.float64_t, ndim=1] physical_x):
-
-    cdef double val
-
-    val = sample_hex_at_real_point(<double*> vertices.data,
-                                   <double*> field_values.data,
-                                   <double*> physical_x.data)
-    return val
-
- at cython.boundscheck(False)
- at cython.wraparound(False)
- at cython.cdivision(True)
-cdef double sample_tetra_at_unit_point(double* coord, double* vals) nogil:
-    return vals[0]*coord[0] + vals[1]*coord[1] + vals[2]*coord[2] + vals[3]*coord[3]
-
-
- at cython.boundscheck(False)
- at cython.wraparound(False)
- at cython.cdivision(True)
-cdef int tetra_check_inside(double* mapped_coord) nogil:    
-    cdef int i
-    for i in range(4):
-        if (mapped_coord[i] < -1.0e-8 or
-            mapped_coord[i] - 1.0 > 1.0e-8):
-            return 0
-    return 1
-
-
- at cython.boundscheck(False)
- at cython.wraparound(False)
- at cython.cdivision(True)
-cdef void tetra_real_to_mapped(double* mapped_coord,
-                               double* vertices,
-                               double* physical_coord) nogil:
-    cdef int i
-    cdef double d
-    cdef double[3] bvec
-    cdef double[3] col0
-    cdef double[3] col1
-    cdef double[3] col2
-    
-    for i in range(3):
-        bvec[i] = physical_coord[i]   - vertices[9 + i]
-        col0[i] = vertices[0 + i]     - vertices[9 + i]
-        col1[i] = vertices[3 + i]     - vertices[9 + i]
-        col2[i] = vertices[6 + i]     - vertices[9 + i]
-        
-    d = determinant_3x3(col0, col1, col2)
-    mapped_coord[0] = determinant_3x3(bvec, col1, col2)/d
-    mapped_coord[1] = determinant_3x3(col0, bvec, col2)/d
-    mapped_coord[2] = determinant_3x3(col0, col1, bvec)/d
-    mapped_coord[3] = 1.0 - mapped_coord[0] - mapped_coord[1] - mapped_coord[2]
-
-
- at cython.boundscheck(False)
- at cython.wraparound(False)
- at cython.cdivision(True)
-cdef double sample_tetra_at_real_point(double* vertices,
-                                       double* field_values,
-                                       double* physical_x) nogil:
-    cdef double val
-    cdef double mapped_coord[4]
-
-    tetra_real_to_mapped(mapped_coord, 
-                         vertices,
-                         physical_x)    
-        
-    val = sample_tetra_at_unit_point(mapped_coord, field_values)
-    return val
-
-
- at cython.boundscheck(False)
- at cython.wraparound(False)
- at cython.cdivision(True)
 cdef void sample_tetra(void* userPtr,
                        rtcr.RTCRay& ray) nogil:
 
@@ -387,7 +117,7 @@
 
     get_hit_position(position, userPtr, ray)
     
-    elem_id = ray_id / 4
+    elem_id = ray_id / data.tpe
     for i in range(4):
         element_indices[i] = data.element_indices[elem_id*4+i]
         field_data[i] = data.field_data[elem_id*4+i]
@@ -395,22 +125,5 @@
         vertices[i*3 + 1] = data.vertices[element_indices[i]].y
         vertices[i*3 + 2] = data.vertices[element_indices[i]].z    
 
-    val = sample_tetra_at_real_point(vertices, field_data, position)
+    val = P1Sampler.sample_at_real_point(vertices, field_data, position)
     ray.time = val
-
- at cython.boundscheck(False)
- at cython.wraparound(False)
- at cython.cdivision(True)
-def test_tetra_sampler(np.ndarray[np.float64_t, ndim=2] vertices,
-                       np.ndarray[np.float64_t, ndim=1] field_values,
-                       np.ndarray[np.float64_t, ndim=1] physical_x):
-
-    cdef double val
-    cdef double[4] mapped_coord
-    tetra_real_to_mapped(mapped_coord, 
-                         <double*> vertices.data,
-                         <double*> physical_x.data)
-
-    val = sample_tetra_at_unit_point(mapped_coord, 
-                                     <double*> field_values.data)
-    return val

diff -r 0247aaa9b85edb2da3bde30883966b6ac4fd787c -r 58751a5a526b13b223f80008639e6d9a58924795 yt/utilities/lib/pixelization_routines.pyx
--- a/yt/utilities/lib/pixelization_routines.pyx
+++ b/yt/utilities/lib/pixelization_routines.pyx
@@ -429,10 +429,6 @@
     return 1
 
 
-ctypedef double (*sample_function_ptr)(double*, double*)
-ctypedef void (*transform_function_ptr)(double*, double*, double*)
-ctypedef int (*check_function_ptr)(double*)
-
 def pixelize_element_mesh(np.ndarray[np.float64_t, ndim=2] coords,
                           np.ndarray[np.int64_t, ndim=2] conn,
                           buff_size,


https://bitbucket.org/yt_analysis/yt/commits/75accced1900/
Changeset:   75accced1900
Branch:      yt
User:        atmyers
Date:        2015-08-04 23:17:32+00:00
Summary:     huh, that worked apparently
Affected #:  2 files

diff -r 58751a5a526b13b223f80008639e6d9a58924795 -r 75accced19007b96249c85a9b3b3dce76ff69344 yt/utilities/lib/mesh_samplers.pxd
--- a/yt/utilities/lib/mesh_samplers.pxd
+++ b/yt/utilities/lib/mesh_samplers.pxd
@@ -7,27 +7,3 @@
 
 cdef void sample_tetra(void* userPtr,
                        rtcr.RTCRay& ray) nogil
-
-cdef double sample_hex_at_real_point(double* vertices,
-                                     double* field_values,
-                                     double* physical_x) nogil
-
-cdef double sample_tetra_at_real_point(double* vertices,
-                                       double* field_values,
-                                       double* physical_x) nogil
-
-cdef void hex_real_to_mapped(double* mapped_x,
-                             double* vertices,
-                             double* physical_x) nogil
-
-cdef double sample_hex_at_unit_point(double* coord, double* vals) nogil
-
-cdef int hex_check_inside(double* mapped_coord) nogil
-
-cdef void tetra_real_to_mapped(double* mapped_coord,
-                               double* vertices,
-                               double* physical_coord) nogil
-
-cdef double sample_tetra_at_unit_point(double* coord, double* vals) nogil
-
-cdef int tetra_check_inside(double* mapped_coord) nogil

diff -r 58751a5a526b13b223f80008639e6d9a58924795 -r 75accced19007b96249c85a9b3b3dce76ff69344 yt/utilities/lib/mesh_samplers.pyx
--- a/yt/utilities/lib/mesh_samplers.pyx
+++ b/yt/utilities/lib/mesh_samplers.pyx
@@ -1,7 +1,6 @@
 """
-This file contains coordinate mappings between physical coordinates and those
-defined on unit elements, as well as functions that do the corresponding intracell
-interpolation.
+This file contains functions that sample a surface mesh at the point hit by
+a ray. These can be used with pyembree in the form of "filter feedback functions."
 
 
 """
@@ -19,6 +18,7 @@
 from pyembree.rtcore cimport Vec3f, Triangle, Vertex
 from yt.utilities.lib.mesh_construction cimport MeshDataContainer
 from yt.utilities.lib.element_mappings cimport \
+    ElementSampler, \
     P1Sampler3D, \
     Q1Sampler3D
 cimport numpy as np


https://bitbucket.org/yt_analysis/yt/commits/eb1a1f47771f/
Changeset:   eb1a1f47771f
Branch:      yt
User:        atmyers
Date:        2015-08-04 23:20:21+00:00
Summary:     updating tests
Affected #:  2 files

diff -r 75accced19007b96249c85a9b3b3dce76ff69344 -r eb1a1f47771ffb02d38fcfab88310e3e5e627864 yt/utilities/lib/tests/test_element_mappings.py
--- a/yt/utilities/lib/tests/test_element_mappings.py
+++ b/yt/utilities/lib/tests/test_element_mappings.py
@@ -40,8 +40,7 @@
 
     field_values = np.array([1.0, 2.0, 3.0, 4.0])
 
-    sampler = test_tetra_sampler
-    check_all_vertices(sampler, vertices, field_values)
+    check_all_vertices(test_tetra_sampler, vertices, field_values)
 
 
 def test_Q1Sampler3D():
@@ -57,5 +56,4 @@
     field_values = np.array([0.4526278, 0.45262656, 0.45262657, 0.4526278,
                              0.54464296, 0.54464149, 0.5446415, 0.54464296])
 
-    sampler = test_hex_sampler
-    check_all_vertices(sampler, vertices, field_values)
+    check_all_vertices(test_hex_sampler, vertices, field_values)

diff -r 75accced19007b96249c85a9b3b3dce76ff69344 -r eb1a1f47771ffb02d38fcfab88310e3e5e627864 yt/utilities/lib/tests/test_mesh_samplers.py
--- a/yt/utilities/lib/tests/test_mesh_samplers.py
+++ /dev/null
@@ -1,59 +0,0 @@
-"""
-This file contains tests of the intracell interpolation code contained is
-yt/utilities/lib/mesh_samplers.pyx.
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2015, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-
-import numpy as np
-
-from yt.testing import assert_almost_equal
-from yt.utilities.lib.mesh_samplers import \
-    test_hex_sampler, \
-    test_tetra_sampler
-
-
-def check_all_vertices(sampler, vertices, field_values):
-    NV = vertices.shape[0]
-    NDIM = vertices.shape[1]
-    x = np.empty(NDIM)
-    for i in range(NV):
-        x = vertices[i]
-        val = sampler(vertices, field_values, x)
-        assert_almost_equal(val, field_values[i])
-
-
-def test_P1Sampler3D():
-    vertices = np.array([[0.1,  0.1,  0.1],
-                         [0.6,  0.3,  0.2],
-                         [0.2,  0.7,  0.2],
-                         [0.4,  0.4,  0.7]], dtype=np.float64)
-
-    field_values = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float64)
-
-    check_all_vertices(test_tetra_sampler, vertices, field_values)
-
-
-def test_Q1Sampler3D():
-    vertices = np.array([[2.00657905, 0.6888599,  1.4375],
-                         [1.8658198,  1.00973171, 1.4375],
-                         [1.97881594, 1.07088163, 1.4375],
-                         [2.12808879, 0.73057381, 1.4375],
-                         [2.00657905, 0.6888599,  1.2   ],
-                         [1.8658198,  1.00973171, 1.2   ],
-                         [1.97881594, 1.07088163, 1.2   ],
-                         [2.12808879, 0.73057381, 1.2   ]])
-
-    field_values = np.array([0.4526278, 0.45262656, 0.45262657, 0.4526278,
-                             0.54464296, 0.54464149, 0.5446415, 0.54464296])
-
-    check_all_vertices(test_hex_sampler, vertices, field_values)


https://bitbucket.org/yt_analysis/yt/commits/98d0c1d7b409/
Changeset:   98d0c1d7b409
Branch:      yt
User:        atmyers
Date:        2015-08-05 18:17:35+00:00
Summary:     don't link against embree here
Affected #:  1 file

diff -r eb1a1f47771ffb02d38fcfab88310e3e5e627864 -r 98d0c1d7b409ecad6430e05e16aa760c7b72d717 yt/utilities/lib/setup.py
--- a/yt/utilities/lib/setup.py
+++ b/yt/utilities/lib/setup.py
@@ -112,8 +112,8 @@
                  "yt/utilities/lib/pixelization_constants.c"],
                include_dirs=["yt/utilities/lib/"],
                language="c++",
-               libraries=["m", "embree"], depends=["yt/utilities/lib/fp_utils.pxd",
-                                  "yt/utilities/lib/pixelization_constants.h"])
+               libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd",
+                                   "yt/utilities/lib/pixelization_constants.h"])
     config.add_extension("Octree", 
                 ["yt/utilities/lib/Octree.pyx"],
                 libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd"])


https://bitbucket.org/yt_analysis/yt/commits/570395c326b9/
Changeset:   570395c326b9
Branch:      yt
User:        atmyers
Date:        2015-08-05 19:46:26+00:00
Summary:     making the mesh_traversal and mesh_construction import conditional
Affected #:  1 file

diff -r 98d0c1d7b409ecad6430e05e16aa760c7b72d717 -r 570395c326b9e503b70f3c0da8ea1a7110cf2cb7 yt/visualization/volume_rendering/render_source.py
--- a/yt/visualization/volume_rendering/render_source.py
+++ b/yt/visualization/volume_rendering/render_source.py
@@ -22,14 +22,20 @@
 from .utils import new_volume_render_sampler, data_source_or_all, \
     get_corners, new_projection_sampler, new_mesh_sampler
 from yt.visualization.image_writer import apply_colormap
-
-from yt.utilities.lib.mesh_traversal import YTEmbreeScene
-from yt.utilities.lib.mesh_construction import ElementMesh
-
 from .zbuffer_array import ZBuffer
 from yt.utilities.lib.misc_utilities import \
     zlines, zpoints
 
+from yt.utilities.on_demand_imports import NotAModule
+try:
+    from yt.utilities.lib import mesh_traversal
+except ImportError:
+    mesh_traversal = NotAModule("pyembree")
+try:
+    from yt.utilities.lib import mesh_construction
+except ImportError:
+    mesh_construction = NotAModule("pyembree")
+
 
 class RenderSource(ParallelAnalysisInterface):
 
@@ -279,7 +285,7 @@
         assert(self.field is not None)
         assert(self.data_source is not None)
 
-        self.scene = YTEmbreeScene()
+        self.scene = mesh_traversal.YTEmbreeScene()
 
         self.build_mesh()
 
@@ -299,10 +305,10 @@
         # convert the indices to zero-based indexing
         indices = self.data_source.ds.index.meshes[0].connectivity_indices - 1
 
-        self.mesh = ElementMesh(self.scene,
-                                vertices,
-                                indices,
-                                field_data.d)
+        self.mesh = mesh_construction.ElementMesh(self.scene,
+                                                  vertices,
+                                                  indices,
+                                                  field_data.d)
 
     def render(self, camera):
 


https://bitbucket.org/yt_analysis/yt/commits/7f399a39d070/
Changeset:   7f399a39d070
Branch:      yt
User:        atmyers
Date:        2015-08-05 20:04:12+00:00
Summary:     missed one here
Affected #:  1 file

diff -r 570395c326b9e503b70f3c0da8ea1a7110cf2cb7 -r 7f399a39d070753cab59d7879da914b59e0ec287 yt/visualization/volume_rendering/utils.py
--- a/yt/visualization/volume_rendering/utils.py
+++ b/yt/visualization/volume_rendering/utils.py
@@ -2,8 +2,11 @@
 from yt.data_objects.static_output import Dataset
 from yt.utilities.lib.grid_traversal import \
     VolumeRenderSampler, InterpolatedProjectionSampler, ProjectionSampler
-from yt.utilities.lib.mesh_traversal import MeshSampler
-
+from yt.utilities.on_demand_imports import NotAModule
+try:
+    from yt.utilities.lib import mesh_traversal
+except ImportError:
+    mesh_traversal = NotAModule("pyembree")
 
 def data_source_or_all(data_source):
     if isinstance(data_source, Dataset):
@@ -24,7 +27,7 @@
         params['width'],
     )
 
-    sampler = MeshSampler(*args)
+    sampler = mesh_traversal.MeshSampler(*args)
     return sampler
 
 


https://bitbucket.org/yt_analysis/yt/commits/b717486e9b33/
Changeset:   b717486e9b33
Branch:      yt
User:        atmyers
Date:        2015-08-05 20:30:28+00:00
Summary:     fixing the volume rendering tests to use the new API for LineSource
Affected #:  1 file

diff -r 7f399a39d070753cab59d7879da914b59e0ec287 -r b717486e9b33958d37cc6e2f509385fb0aad0b36 yt/visualization/volume_rendering/tests/test_composite.py
--- a/yt/visualization/volume_rendering/tests/test_composite.py
+++ b/yt/visualization/volume_rendering/tests/test_composite.py
@@ -40,9 +40,9 @@
 
     # DRAW SOME LINES
     npoints = 100
-    vertices = np.random.random([npoints, 3])
+    vertices = np.random.random([npoints, 2, 3])
     colors = np.random.random([npoints, 4])
-    colors[:,3] = 0.10
+    colors[:, 3] = 0.10
 
     box_source = BoxSource(ds.domain_left_edge, ds.domain_right_edge, color=[1.,1.,1.,1.0])
     sc.add_source(box_source)


https://bitbucket.org/yt_analysis/yt/commits/9b7c67154f54/
Changeset:   9b7c67154f54
Branch:      yt
User:        atmyers
Date:        2015-08-05 21:50:45+00:00
Summary:     need to make the LineSource API change for grid sources as well
Affected #:  1 file

diff -r b717486e9b33958d37cc6e2f509385fb0aad0b36 -r 9b7c67154f54385b6aaa548d600de2c090730680 yt/visualization/volume_rendering/render_source.py
--- a/yt/visualization/volume_rendering/render_source.py
+++ b/yt/visualization/volume_rendering/render_source.py
@@ -568,7 +568,7 @@
         vertices = np.empty([corners.shape[2]*2*12, 3])
         for i in range(3):
             vertices[:, i] = corners[order, i, ...].ravel(order='F')
-        vertices = vertices.reshape((12, 2, 3))
+        vertices = vertices.reshape((corners.shape[2]*12, 2, 3))
 
         super(GridSource, self).__init__(vertices, colors, color_stride=24)
 


https://bitbucket.org/yt_analysis/yt/commits/c8672859dd9c/
Changeset:   c8672859dd9c
Branch:      yt
User:        atmyers
Date:        2015-08-05 22:51:40+00:00
Summary:     fixing the last of the unit tests
Affected #:  1 file

diff -r 9b7c67154f54385b6aaa548d600de2c090730680 -r c8672859dd9c533535d05f8e2173d708d55016f5 yt/geometry/coordinates/cartesian_coordinates.py
--- a/yt/geometry/coordinates/cartesian_coordinates.py
+++ b/yt/geometry/coordinates/cartesian_coordinates.py
@@ -18,10 +18,9 @@
 from .coordinate_handler import \
     CoordinateHandler, \
     _unknown_coord, \
-    _get_coord_fields, \
-    _get_vert_fields
+    _get_coord_fields,
 import yt.visualization._MPL as _MPL
-from yt.fields.derived_field import NullFunc
+
 
 class CartesianCoordinateHandler(CoordinateHandler):
 
@@ -40,10 +39,10 @@
             registry.add_field(("index", "%s" % ax), function = f2,
                                display_field = False,
                                units = "code_length")
-            f3 = _get_vert_fields(axi)
-            registry.add_field(("index", "vertex_%s" % ax), function = f3,
-                               display_field = False,
-                               units = "code_length")
+            #f3 = _get_vert_fields(axi)
+            #registry.add_field(("index", "vertex_%s" % ax), function = f3,
+            #                   display_field = False,
+            #                   units = "code_length")
         def _cell_volume(field, data):
             rv  = data["index", "dx"].copy(order='K')
             rv *= data["index", "dy"]


https://bitbucket.org/yt_analysis/yt/commits/0619318ab59b/
Changeset:   0619318ab59b
Branch:      yt
User:        atmyers
Date:        2015-08-05 22:53:34+00:00
Summary:     misplaced comma
Affected #:  1 file

diff -r c8672859dd9c533535d05f8e2173d708d55016f5 -r 0619318ab59be9ca116e08ddc215a1eee242fb4d yt/geometry/coordinates/cartesian_coordinates.py
--- a/yt/geometry/coordinates/cartesian_coordinates.py
+++ b/yt/geometry/coordinates/cartesian_coordinates.py
@@ -18,7 +18,7 @@
 from .coordinate_handler import \
     CoordinateHandler, \
     _unknown_coord, \
-    _get_coord_fields,
+    _get_coord_fields
 import yt.visualization._MPL as _MPL
 
 


https://bitbucket.org/yt_analysis/yt/commits/fa46703c6a93/
Changeset:   fa46703c6a93
Branch:      yt
User:        atmyers
Date:        2015-08-05 22:56:35+00:00
Summary:     fixing a divide-by-zero issue in the vr code
Affected #:  1 file

diff -r 0619318ab59be9ca116e08ddc215a1eee242fb4d -r fa46703c6a931dd2a7012a824bec9cf3a44f914f yt/utilities/lib/misc_utilities.pyx
--- a/yt/utilities/lib/misc_utilities.pyx
+++ b/yt/utilities/lib/misc_utilities.pyx
@@ -372,9 +372,10 @@
                                 talpha = image[x0, yi0, 3]
                                 image[x0, yi0, 3] = alpha[3] + talpha * (1 - alpha[3])
                                 for i in range(3):
-                                    image[x0, yi0, i] = (alpha[3]*alpha[i] + image[x0, yi0, i]*talpha*(1.0-alpha[3]))/image[x0,yi0,3]
                                     if image[x0, yi0, 3] == 0.0:
                                         image[x0, yi0, i] = 0.0
+                                    else:
+                                        image[x0, yi0, i] = (alpha[3]*alpha[i] + image[x0, yi0, i]*talpha*(1.0-alpha[3]))/image[x0,yi0,3]
                             else:
                                 for i in range(4):
                                     image[x0, yi0, i] = alpha[i]


https://bitbucket.org/yt_analysis/yt/commits/5d5c2da33078/
Changeset:   5d5c2da33078
Branch:      yt
User:        atmyers
Date:        2015-08-09 20:05:13+00:00
Summary:     adding a temp field
Affected #:  1 file

diff -r fa46703c6a931dd2a7012a824bec9cf3a44f914f -r 5d5c2da33078d47b543ad3a34f96586b77c61566 yt/frontends/stream/io.py
--- a/yt/frontends/stream/io.py
+++ b/yt/frontends/stream/io.py
@@ -259,7 +259,7 @@
 
 class IOHandlerStreamUnstructured(BaseIOHandler):
     _dataset_type = "stream_unstructured"
-    _node_types = ("diffused", "convected", "u")
+    _node_types = ("diffused", "convected", "u", "temp")
 
     def __init__(self, ds):
         self.fields = ds.stream_handler.fields


https://bitbucket.org/yt_analysis/yt/commits/db3907222e68/
Changeset:   db3907222e68
Branch:      yt
User:        atmyers
Date:        2015-08-12 19:15:53+00:00
Summary:     typo in docstring
Affected #:  1 file

diff -r 5d5c2da33078d47b543ad3a34f96586b77c61566 -r db3907222e682bf7a1c953968be280c35100ab7e yt/utilities/lib/element_mappings.pyx
--- a/yt/utilities/lib/element_mappings.pyx
+++ b/yt/utilities/lib/element_mappings.pyx
@@ -154,7 +154,7 @@
 
     This is a base class for handling element samplers that require
     a nonlinear solve to invert the mapping between coordinate systems.
-    To do this, we perform Newton-Raphson iteration using a specificed 
+    To do this, we perform Newton-Raphson iteration using a specified 
     system of equations with an analytic Jacobian matrix. This is
     not to be used directly, use one of the subclasses instead.
 


https://bitbucket.org/yt_analysis/yt/commits/303548191ca8/
Changeset:   303548191ca8
Branch:      yt
User:        atmyers
Date:        2015-08-17 01:57:23+00:00
Summary:     adding a stub for some documentation for the unstructured mesh renderer
Affected #:  2 files

diff -r db3907222e682bf7a1c953968be280c35100ab7e -r 303548191ca883769428e34762a23cac95259a03 doc/source/visualizing/index.rst
--- a/doc/source/visualizing/index.rst
+++ b/doc/source/visualizing/index.rst
@@ -15,6 +15,7 @@
    callbacks
    manual_plotting
    volume_rendering
+   unstructured_mesh_rendering
    hardware_volume_rendering
    sketchfab
    mapserver

diff -r db3907222e682bf7a1c953968be280c35100ab7e -r 303548191ca883769428e34762a23cac95259a03 doc/source/visualizing/unstructured_mesh_rendering.rst
--- /dev/null
+++ b/doc/source/visualizing/unstructured_mesh_rendering.rst
@@ -0,0 +1,28 @@
+.. _unstructured_mesh_rendering:
+
+Unstructured Mesh Rendering
+===========================
+
+Beginning with version 3.3, yt has the ability to volume render unstructured
+meshes from, for example, finite element calculations. In order to use this
+capability, a few additional dependencies are required beyond those you get
+when you run the install script. First, embree (a fast software ray-tracing
+library from Intel) must be installed, following the instructions here. 
+Second, the python bindings for embree (called ''pyembree'') must also 
+be installed. 
+
+Once the pre-requisites are installed, unstructured mesh data can be rendered
+much like any other dataset. In particular, a new type of RenderSource object
+has been defined, called the MeshSource, that represents the unstructured mesh
+data that will be rendered. The user creates this object, and also defines a 
+camera that specifies your viewpoint into the scene. When render() is called,
+a set of rays are cast at the source. Each time a ray strikes the source mesh,
+the data is sampled at the intersection point at the resulting value gets 
+saved into an image.
+
+See below for examples. First, here is an example of rendering a hexahedral mesh.
+
+Next, here is an example of rendering a dataset with tetrahedral mesh elements.
+
+Finally, here is a script that creates frames of a movie. It calls the rotate()
+method 300 times, saving a new image to the disk each time.


https://bitbucket.org/yt_analysis/yt/commits/cd9357aabfbf/
Changeset:   cd9357aabfbf
Branch:      yt
User:        atmyers
Date:        2015-08-17 14:33:56+00:00
Summary:     adding some documentation about loading streaming unstructured mesh datasets
Affected #:  2 files

diff -r 303548191ca883769428e34762a23cac95259a03 -r cd9357aabfbff6ffdacb6edb73bc53583183c221 doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -1026,6 +1026,60 @@
 * Some functions may behave oddly or not work at all.
 * Data must already reside in memory.
 
+Unstructured Grid Data
+----------------------
+
+See :ref:`loading-numpy-array`,
+:func:`~yt.frontends.stream.data_structures.load_unstructured_mesh` for
+more detail.
+
+In addition to the above grid types, you can also load data stored on
+unstructured meshes. This type of mesh is used, for example, in many
+finite element calculations. Currently, hexahedral, tetrahedral, and
+wedge-shaped mesh element are supported.
+
+To load an unstructured mesh, you need to specify the following. First,
+you need to have a coordinates array, which should be an (L, 3) array
+that stores the (x, y, z) positions of all of the vertices in the mesh.
+Second, you need to specify a connectivity array, which describes how
+those vertices are connected into mesh elements. The connectivity array
+should be (N, M), where N is the number of elements and M is the
+connectivity length, i.e. the number of vertices per element. Finally,
+you must also specify a data dictionary, where the keys should be
+the names of the fields and the values should be numpy arrays that
+contain the field data. These arrays can either supply the cell-averaged
+data for each element, in which case they would be (N, 1), or they
+can have node-centered data, in which case they would also be (N, M).
+
+Here is an example of how to load an in-memory, unstructured mesh dataset:
+
+.. code-block:: python
+
+   import yt
+   import numpy
+   from yt.utilities.exodusII_reader import get_data
+
+   coords, connectivity, data = get_data("data/out.e-s010")
+
+This uses a publically available MOOSE dataset along with the get_data
+function to parse the coords, connectivty, and data. Then, these
+can be loaded as an in-memory dataset as follows:
+
+.. code-block:: python
+
+    mesh_id = 0
+    ds = yt.load_unstructured_mesh(data[mesh_id], connectivity[mesh_id], coords[mesh_id])
+
+Note that load_unstructured_mesh can take either a single or a list of meshes.
+Here, we have selected only the first mesh to load.
+
+.. rubric:: Caveats
+
+* Units will be incorrect unless the data has already been converted to cgs.
+* Integration is not implemented.
+* Some functions may behave oddly or not work at all.
+* Data must already reside in memory.
+
 Generic Particle Data
 ---------------------
 

diff -r 303548191ca883769428e34762a23cac95259a03 -r cd9357aabfbff6ffdacb6edb73bc53583183c221 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -1634,16 +1634,13 @@
 
     Particle fields are detected as one-dimensional fields. The number of particles
     is set by the "number_of_particles" key in data.
-    
+
     Parameters
     ----------
     data : dict or list of dicts
         This is a list of dicts of numpy arrays, where each element in the list
-        is a different mesh, and where the keys of dicts are the field names. 
-        Note that the data in the numpy arrays should define the cell-averaged
-        value for of the quantity in the mesh cells, although this will change
-        with subsequent generations of unstructured mesh support.  If a dict is
-        supplied, this will be assumed to be the only mesh.
+        is a different mesh, and where the keys of dicts are the field names.
+        If a dict is supplied, this will be assumed to be the only mesh.
     connectivity : list of array_like or array_like
         This is the connectivity array for the meshes; this should either be a
         list where each element in the list is a numpy array or a single numpy


https://bitbucket.org/yt_analysis/yt/commits/dd01241620ea/
Changeset:   dd01241620ea
Branch:      yt
User:        atmyers
Date:        2015-08-17 14:55:28+00:00
Summary:     adding sample rendering scripts to docs
Affected #:  1 file

diff -r cd9357aabfbff6ffdacb6edb73bc53583183c221 -r dd01241620ea0cc538e33a790b11b240581eb2d0 doc/source/visualizing/unstructured_mesh_rendering.rst
--- a/doc/source/visualizing/unstructured_mesh_rendering.rst
+++ b/doc/source/visualizing/unstructured_mesh_rendering.rst
@@ -22,7 +22,113 @@
 
 See below for examples. First, here is an example of rendering a hexahedral mesh.
 
+.. python-script::
+   import yt
+   import pylab as plt
+   from yt.visualization.volume_rendering.render_source import MeshSource
+   from yt.visualization.volume_rendering.camera import Camera
+   from yt.utilities.exodusII_reader import get_data
+
+   # load the data
+   coords, connectivity, data = get_data("data/out.e-s010")
+   mesh_id = 0
+   field_name = ('gas', 'diffused')
+   ds = yt.load_unstructured_mesh(data[mesh_id], connectivity[mesh_id], coords[mesh_id])
+
+   # create the RenderSource
+   ms = MeshSource(ds, field_name)
+
+   # set up camera
+   cam = Camera(ds)
+   camera_position = ds.arr([-3.0, 3.0, -3.0], 'code_length')
+   north_vector = ds.arr([0.0, 1.0, 0.0], 'dimensionless')
+   cam.resolution = (800, 800)
+   cam.set_position(camera_position, north_vector)
+
+   # make the image
+   im = ms.render(cam)
+
+   # plot and save
+   plt.imshow(im, cmap='Eos A', origin='lower', vmin=0, vmax=2.0)
+   plt.gca().axes.get_xaxis().set_visible(False)
+   plt.gca().axes.get_yaxis().set_visible(False)
+   cb = plt.colorbar()
+   cb.set_label(field_name[1])
+   plt.savefig('hex_mesh_render.png')
+
 Next, here is an example of rendering a dataset with tetrahedral mesh elements.
 
+.. python-script::
+   import yt
+   import pylab as plt
+   from yt.visualization.volume_rendering.render_source import MeshSource
+   from yt.visualization.volume_rendering.camera import Camera
+   from yt.utilities.exodusII_reader import get_data
+
+   # load the data
+   filename = "../moose/test/tests/mesh/high_order_elems/gold/high_order_elems_tet4_refine_out.e"
+   coords, connectivity, data = get_data(filename)
+   mesh_id = 0
+   field_name = ('gas', 'u')
+   ds = yt.load_unstructured_mesh(data[mesh_id], connectivity[mesh_id], coords[mesh_id])
+
+   # create the RenderSource
+   ms = MeshSource(ds, field_name)
+
+   # set up camera
+   cam = Camera(ds)
+   camera_position = ds.arr([3.0, 3.0, 3.0], 'code_length')
+   cam.set_width(ds.arr([2.0, 2.0, 2.0], 'code_length'))
+   north_vector = ds.arr([0.0, 1.0, 0.0], 'dimensionless')
+   cam.resolution = (800, 800)
+   cam.set_position(camera_position, north_vector)
+
+   # make the image
+   im = ms.render(cam)
+
+   # plot and save
+   plt.imshow(im, cmap='Eos A', origin='lower', vmin=0.0, vmax=1.0)
+   plt.gca().axes.get_xaxis().set_visible(False)
+   plt.gca().axes.get_yaxis().set_visible(False)
+   cb = plt.colorbar()
+   cb.set_label(field_name[1])
+   plt.savefig('tet_mesh_render.png')
+
 Finally, here is a script that creates frames of a movie. It calls the rotate()
 method 300 times, saving a new image to the disk each time.
+
+.. python-script::
+   import yt
+   import pylab as plt
+   from yt.visualization.volume_rendering.render_source import MeshSource
+   from yt.visualization.volume_rendering.camera import Camera
+   from yt.utilities.exodusII_reader import get_data
+
+   # load dataset
+   coords, connectivity, data = get_data("data/out.e-s010")
+   mesh_id = 0
+   field_name = ('gas', 'diffused')
+   ds = yt.load_unstructured_mesh(data[mesh_id], connectivity[mesh_id], coords[mesh_id])
+
+   # create the RenderSource
+   ms = MeshSource(ds, field_name)
+
+   # set up camera
+   cam = Camera(ds)
+   camera_position = ds.arr([-3.0, 3.0, -3.0], 'code_length')
+   north_vector = ds.arr([0.0, 1.0, 0.0], 'dimensionless')
+   cam.set_position(camera_position, north_vector)
+   cam.steady_north = True
+
+   # make movie frames
+   num_frames = 301
+   for i in range(num_frames):
+       cam.rotate(2.0*np.pi/num_frames)
+       im = ms.render(cam)
+       plt.imshow(im, cmap='Eos A', origin='lower',vmin=0.0, vmax=2.0)
+       plt.gca().axes.get_xaxis().set_visible(False)
+       plt.gca().axes.get_yaxis().set_visible(False)
+       cb = plt.colorbar()
+       cb.set_label('diffused')
+       plt.savefig('movie_frames/surface_render_%.4d.png' % i)
+       plt.clf()


https://bitbucket.org/yt_analysis/yt/commits/8d42126f6741/
Changeset:   8d42126f6741
Branch:      yt
User:        atmyers
Date:        2015-08-17 15:01:15+00:00
Summary:     adding some example images to the docs
Affected #:  3 files

diff -r dd01241620ea0cc538e33a790b11b240581eb2d0 -r 8d42126f67415b77b4d19a109f23443de3280cab doc/source/visualizing/_images/hex_mesh_render.png
Binary file doc/source/visualizing/_images/hex_mesh_render.png has changed

diff -r dd01241620ea0cc538e33a790b11b240581eb2d0 -r 8d42126f67415b77b4d19a109f23443de3280cab doc/source/visualizing/_images/tet_mesh_render.png
Binary file doc/source/visualizing/_images/tet_mesh_render.png has changed

diff -r dd01241620ea0cc538e33a790b11b240581eb2d0 -r 8d42126f67415b77b4d19a109f23443de3280cab doc/source/visualizing/unstructured_mesh_rendering.rst
--- a/doc/source/visualizing/unstructured_mesh_rendering.rst
+++ b/doc/source/visualizing/unstructured_mesh_rendering.rst
@@ -56,6 +56,8 @@
    cb.set_label(field_name[1])
    plt.savefig('hex_mesh_render.png')
 
+.. image:: _images/hex_mesh_render.png
+
 Next, here is an example of rendering a dataset with tetrahedral mesh elements.
 
 .. python-script::
@@ -94,6 +96,8 @@
    cb.set_label(field_name[1])
    plt.savefig('tet_mesh_render.png')
 
+.. image:: _images/tet_mesh_render.png
+
 Finally, here is a script that creates frames of a movie. It calls the rotate()
 method 300 times, saving a new image to the disk each time.
 


https://bitbucket.org/yt_analysis/yt/commits/de29071e71b5/
Changeset:   de29071e71b5
Branch:      yt
User:        atmyers
Date:        2015-08-17 15:10:30+00:00
Summary:     updating reference docs and adding links to them in the narrative docs
Affected #:  2 files

diff -r 8d42126f67415b77b4d19a109f23443de3280cab -r de29071e71b533885406e55925060049242a7159 doc/source/reference/api/api.rst
--- a/doc/source/reference/api/api.rst
+++ b/doc/source/reference/api/api.rst
@@ -397,6 +397,7 @@
    ~yt.frontends.stream.data_structures.load_amr_grids
    ~yt.frontends.stream.data_structures.load_particles
    ~yt.frontends.stream.data_structures.load_hexahedral_mesh
+   ~yt.frontends.stream.data_structures.load_unstructured_mesh
 
 Derived Datatypes
 -----------------
@@ -632,6 +633,7 @@
    ~yt.visualization.volume_rendering.api.BoxSource
    ~yt.visualization.volume_rendering.api.GridSource
    ~yt.visualization.volume_rendering.api.CoordinateVectorSource
+   ~yt.visualization.volume_rendering.render_source.MeshSource
 
 Streamlining
 ^^^^^^^^^^^^

diff -r 8d42126f67415b77b4d19a109f23443de3280cab -r de29071e71b533885406e55925060049242a7159 doc/source/visualizing/unstructured_mesh_rendering.rst
--- a/doc/source/visualizing/unstructured_mesh_rendering.rst
+++ b/doc/source/visualizing/unstructured_mesh_rendering.rst
@@ -12,10 +12,15 @@
 be installed. 
 
 Once the pre-requisites are installed, unstructured mesh data can be rendered
-much like any other dataset. In particular, a new type of RenderSource object
-has been defined, called the MeshSource, that represents the unstructured mesh
-data that will be rendered. The user creates this object, and also defines a 
-camera that specifies your viewpoint into the scene. When render() is called,
+much like any other dataset. In particular, a new type of 
+:class:`~yt.visualization.volume_rendering.render_source.RenderSource` object
+has been defined, called the 
+:class:`~yt.visualization.volume_rendering.render_source.MeshSource`, that
+represents the unstructured mesh data that will be rendered. The user creates 
+this object, and also defines a
+:class:`~yt.visualization.volume_rendering.camera.Camera` 
+that specifies your viewpoint into the scene. When 
+:class:`~yt.visualization.volume_rendering.render_source.RenderSource` is called,
 a set of rays are cast at the source. Each time a ray strikes the source mesh,
 the data is sampled at the intersection point at the resulting value gets 
 saved into an image.


https://bitbucket.org/yt_analysis/yt/commits/22452100ef0f/
Changeset:   22452100ef0f
Branch:      yt
User:        atmyers
Date:        2015-08-17 15:17:56+00:00
Summary:     adding some web links in the docs
Affected #:  2 files

diff -r de29071e71b533885406e55925060049242a7159 -r 22452100ef0fea501b5d9b14ae68d0eea7e57b2c doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -1061,9 +1061,9 @@
 
    coords, connectivity, data = get_data("data/out.e-s010")
 
-This uses a publically available MOOSE dataset along with the get_data
-function to parse the coords, connectivty, and data. Then, these
-can be loaded as an in-memory dataset as follows:
+This uses a publically available `MOOSE <http://mooseframework.org/>` 
+dataset along with the get_data function to parse the coords, connectivity, 
+and data. Then, these can be loaded as an in-memory dataset as follows:
 
 .. code-block:: python
 

diff -r de29071e71b533885406e55925060049242a7159 -r 22452100ef0fea501b5d9b14ae68d0eea7e57b2c doc/source/visualizing/unstructured_mesh_rendering.rst
--- a/doc/source/visualizing/unstructured_mesh_rendering.rst
+++ b/doc/source/visualizing/unstructured_mesh_rendering.rst
@@ -6,10 +6,10 @@
 Beginning with version 3.3, yt has the ability to volume render unstructured
 meshes from, for example, finite element calculations. In order to use this
 capability, a few additional dependencies are required beyond those you get
-when you run the install script. First, embree (a fast software ray-tracing
-library from Intel) must be installed, following the instructions here. 
-Second, the python bindings for embree (called ''pyembree'') must also 
-be installed. 
+when you run the install script. First, `embree <https://embree.github.io>`
+(a fast software ray-tracing library from Intel) must be installed, following 
+the instructions there. Second, the python bindings for embree (called 
+`pyembree <https://github.com/scopatz/pyembree>`) must also be installed. 
 
 Once the pre-requisites are installed, unstructured mesh data can be rendered
 much like any other dataset. In particular, a new type of 


https://bitbucket.org/yt_analysis/yt/commits/015ffcf85beb/
Changeset:   015ffcf85beb
Branch:      yt
User:        atmyers
Date:        2015-08-17 18:21:01+00:00
Summary:     adding a test of the surface mesh renderer that only gets run if pymembree is found
Affected #:  1 file

diff -r 22452100ef0fea501b5d9b14ae68d0eea7e57b2c -r 015ffcf85beb779d00e98e54662049a217a7c4e5 yt/visualization/volume_rendering/tests/test_mesh_render.py
--- /dev/null
+++ b/yt/visualization/volume_rendering/tests/test_mesh_render.py
@@ -0,0 +1,35 @@
+"""
+Test Surface Mesh Rendering
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2015, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import yt
+import numpy as np
+from yt.testing import requires_module
+from yt.visualization.volume_rendering.render_source import MeshSource
+from yt.visualization.volume_rendering.camera import Camera
+from yt.frontends.stream.sample_data.unstructured_mesh import \
+    _connectivity, \
+    _coordinates
+
+ at requires_module("pyembree")
+def test_surface_mesh_render():
+    data ={}
+    data[('gas', 'diffused')] = np.ones_like(_connectivity)
+    ds = yt.load_unstructured_mesh(data, _connectivity, _coordinates)
+    ms = MeshSource(ds, ('gas', 'diffused'))
+    cam = Camera(ds)
+    im = ms.render(cam)
+    return im
+
+
+if __name__ == "__main__":
+    im = test_surface_mesh_render()


https://bitbucket.org/yt_analysis/yt/commits/1f9ffd5ca403/
Changeset:   1f9ffd5ca403
Branch:      yt
User:        atmyers
Date:        2015-08-19 01:02:48+00:00
Summary:     no need to embed these images
Affected #:  3 files

diff -r 015ffcf85beb779d00e98e54662049a217a7c4e5 -r 1f9ffd5ca403fa79d4f0be5b7c721dee74d2d370 doc/source/visualizing/_images/hex_mesh_render.png
Binary file doc/source/visualizing/_images/hex_mesh_render.png has changed

diff -r 015ffcf85beb779d00e98e54662049a217a7c4e5 -r 1f9ffd5ca403fa79d4f0be5b7c721dee74d2d370 doc/source/visualizing/_images/tet_mesh_render.png
Binary file doc/source/visualizing/_images/tet_mesh_render.png has changed

diff -r 015ffcf85beb779d00e98e54662049a217a7c4e5 -r 1f9ffd5ca403fa79d4f0be5b7c721dee74d2d370 doc/source/visualizing/unstructured_mesh_rendering.rst
--- a/doc/source/visualizing/unstructured_mesh_rendering.rst
+++ b/doc/source/visualizing/unstructured_mesh_rendering.rst
@@ -61,8 +61,6 @@
    cb.set_label(field_name[1])
    plt.savefig('hex_mesh_render.png')
 
-.. image:: _images/hex_mesh_render.png
-
 Next, here is an example of rendering a dataset with tetrahedral mesh elements.
 
 .. python-script::
@@ -101,8 +99,6 @@
    cb.set_label(field_name[1])
    plt.savefig('tet_mesh_render.png')
 
-.. image:: _images/tet_mesh_render.png
-
 Finally, here is a script that creates frames of a movie. It calls the rotate()
 method 300 times, saving a new image to the disk each time.
 


https://bitbucket.org/yt_analysis/yt/commits/feff57a94e4c/
Changeset:   feff57a94e4c
Branch:      yt
User:        atmyers
Date:        2015-08-19 01:06:46+00:00
Summary:     fix bounding box calculation
Affected #:  1 file

diff -r 1f9ffd5ca403fa79d4f0be5b7c721dee74d2d370 -r feff57a94e4c5c71b0b3fcec997e01cf7b8b652e yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -1680,8 +1680,8 @@
     data = ensure_list(data)
     connectivity = ensure_list(connectivity)
     if bbox is None:
-        bbox = np.array([ [1.1*coordinates[:,i].min(),
-                           1.1*coordinates[:,i].max()]
+        bbox = np.array([[coordinates[:,i].min() - 0.1 * abs(coordinates[:,i].min()),
+                          coordinates[:,i].max() + 0.1 * abs(coordinates[:,i].max())]
                           for i in range(3)], "float64")
     domain_left_edge = np.array(bbox[:, 0], 'float64')
     domain_right_edge = np.array(bbox[:, 1], 'float64')


https://bitbucket.org/yt_analysis/yt/commits/0be6c304340a/
Changeset:   0be6c304340a
Branch:      yt
User:        atmyers
Date:        2015-08-19 01:21:35+00:00
Summary:     adding a note about installing the python wrapper for embree
Affected #:  1 file

diff -r feff57a94e4c5c71b0b3fcec997e01cf7b8b652e -r 0be6c304340adc108c923359fdae29795a40cff2 doc/source/visualizing/unstructured_mesh_rendering.rst
--- a/doc/source/visualizing/unstructured_mesh_rendering.rst
+++ b/doc/source/visualizing/unstructured_mesh_rendering.rst
@@ -7,9 +7,41 @@
 meshes from, for example, finite element calculations. In order to use this
 capability, a few additional dependencies are required beyond those you get
 when you run the install script. First, `embree <https://embree.github.io>`
-(a fast software ray-tracing library from Intel) must be installed, following 
-the instructions there. Second, the python bindings for embree (called 
-`pyembree <https://github.com/scopatz/pyembree>`) must also be installed. 
+(a fast software ray-tracing library from Intel) must be installed, either
+by compiling from source or by using one of the pre-built binaries available
+at Embree's `downloads <https://embree.github.io/downloads.html>` page. Once
+Embree is installed, you must also create a symlink next to the library. For
+example, if the libraries were installed at /usr/local/lib/, you must do
+
+.. code-block:: bash
+
+    sudo ln -s /usr/local/lib/libembree.2.6.1.dylib /usr/local/lib/libembree.so
+
+Second, the python bindings for embree (called 
+`pyembree <https://github.com/scopatz/pyembree>`) must also be installed. To
+do so, first obtain a copy, by .e.g. cloning the repo:
+
+.. code-block:: bash
+
+    git clone https://github.com/scopatz/pyembree
+
+To install, navigate to the root directory and run the setup script:
+
+.. code-block:: bash
+
+    python setup.py develop
+
+If Embree was installed to some location that is not in your path by default,
+you will need to pass in CFLAGS and LDFLAGS to the setup.py script. For example,
+the Mac OS package installer puts the installation at /opt/local/ instead of 
+usr/local. To account for this, you would do:
+
+.. code-block:: bash
+
+    CFLAGS='-I/opt/local/include' LDFLAGS='-L/opt/local/lib' python setup.py install
+
+You must also use these flags when building any part of yt that links against
+pyembree.
 
 Once the pre-requisites are installed, unstructured mesh data can be rendered
 much like any other dataset. In particular, a new type of 


https://bitbucket.org/yt_analysis/yt/commits/54aa20d883ff/
Changeset:   54aa20d883ff
Branch:      yt
User:        atmyers
Date:        2015-08-19 01:23:58+00:00
Summary:     make this a code block instead of a python script
Affected #:  1 file

diff -r 0be6c304340adc108c923359fdae29795a40cff2 -r 54aa20d883ff69afda9569e9c137d87f88a4a5d2 doc/source/visualizing/unstructured_mesh_rendering.rst
--- a/doc/source/visualizing/unstructured_mesh_rendering.rst
+++ b/doc/source/visualizing/unstructured_mesh_rendering.rst
@@ -134,7 +134,7 @@
 Finally, here is a script that creates frames of a movie. It calls the rotate()
 method 300 times, saving a new image to the disk each time.
 
-.. python-script::
+.. code-block:: python
    import yt
    import pylab as plt
    from yt.visualization.volume_rendering.render_source import MeshSource


https://bitbucket.org/yt_analysis/yt/commits/6487d7fa604f/
Changeset:   6487d7fa604f
Branch:      yt
User:        atmyers
Date:        2015-08-19 19:08:43+00:00
Summary:     adding a comment to about converting between primitive and mesh element ids
Affected #:  1 file

diff -r 54aa20d883ff69afda9569e9c137d87f88a4a5d2 -r 6487d7fa604fb7500394ee250a0fef2887735504 yt/utilities/lib/mesh_samplers.pyx
--- a/yt/utilities/lib/mesh_samplers.pyx
+++ b/yt/utilities/lib/mesh_samplers.pyx
@@ -79,6 +79,10 @@
     if ray_id == -1:
         return
 
+    # ray_id records the id number of the hit according to
+    # embree, in which the primitives are triangles. Here,
+    # we convert this to the element id by dividing by the
+    # number of triangles per element.
     elem_id = ray_id / data.tpe
 
     get_hit_position(position, userPtr, ray)
@@ -116,8 +120,13 @@
         return
 
     get_hit_position(position, userPtr, ray)
-    
+
+    # ray_id records the id number of the hit according to
+    # embree, in which the primitives are triangles. Here,
+    # we convert this to the element id by dividing by the
+    # number of triangles per element.    
     elem_id = ray_id / data.tpe
+
     for i in range(4):
         element_indices[i] = data.element_indices[elem_id*4+i]
         field_data[i] = data.field_data[elem_id*4+i]


https://bitbucket.org/yt_analysis/yt/commits/840f014c4aed/
Changeset:   840f014c4aed
Branch:      yt
User:        atmyers
Date:        2015-08-19 21:15:09+00:00
Summary:     making tolerance member data instead of putting it in by hand
Affected #:  2 files

diff -r 6487d7fa604fb7500394ee250a0fef2887735504 -r 840f014c4aed94e041ae454a32242e2075e4d613 yt/utilities/lib/element_mappings.pxd
--- a/yt/utilities/lib/element_mappings.pxd
+++ b/yt/utilities/lib/element_mappings.pxd
@@ -6,6 +6,11 @@
 
 cdef class ElementSampler:
 
+    # how close a point has to be to the element
+    # to get counted as "inside". This is in the
+    # mapped coordinates of the element.
+    cdef np.float64_t inclusion_tol
+
     cdef void map_real_to_unit(self,
                                double* mapped_x, 
                                double* vertices,

diff -r 6487d7fa604fb7500394ee250a0fef2887735504 -r 840f014c4aed94e041ae454a32242e2075e4d613 yt/utilities/lib/element_mappings.pyx
--- a/yt/utilities/lib/element_mappings.pyx
+++ b/yt/utilities/lib/element_mappings.pyx
@@ -55,6 +55,9 @@
 
     '''
 
+    def __init__(self):
+        self.inclusion_tol = 1.0e-8
+
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
@@ -115,6 +118,8 @@
         cdef double[3] col1
         cdef double[3] col2
     
+        # here, we express positions relative to the 4th element,
+        # which is selected by vertices[9]
         for i in range(3):
             bvec[i] = physical_x[i]       - vertices[9 + i]
             col0[i] = vertices[0 + i]     - vertices[9 + i]
@@ -142,8 +147,8 @@
     cdef int check_inside(self, double* mapped_coord) nogil:
         cdef int i
         for i in range(4):
-            if (mapped_coord[i] < -1.0e-8 or
-                mapped_coord[i] - 1.0 > 1.0e-8):
+            if (mapped_coord[i] < -self.inclusion_tol or
+                mapped_coord[i] - 1.0 > self.inclusion_tol):
                 return 0
         return 1
 
@@ -161,6 +166,7 @@
     '''
 
     def __init__(self):
+        super(NonlinearSolveSampler, self).__init__()
         self.tolerance = 1.0e-9
         self.max_iter = 10
 
@@ -239,9 +245,9 @@
     @cython.wraparound(False)
     @cython.cdivision(True)
     cdef int check_inside(self, double* mapped_coord) nogil:
-        if (fabs(mapped_coord[0]) - 1.0 > 1.0e-8 or
-            fabs(mapped_coord[1]) - 1.0 > 1.0e-8 or 
-            fabs(mapped_coord[2]) - 1.0 > 1.0e-8):
+        if (fabs(mapped_coord[0]) - 1.0 > self.inclusion_tol or
+            fabs(mapped_coord[1]) - 1.0 > self.inclusion_tol or 
+            fabs(mapped_coord[2]) - 1.0 > self.inclusion_tol):
             return 0
         return 1
 


https://bitbucket.org/yt_analysis/yt/commits/e47bef6787f4/
Changeset:   e47bef6787f4
Branch:      yt
User:        atmyers
Date:        2015-08-19 21:26:39+00:00
Summary:     adding some notes that clarify the arguments here
Affected #:  1 file

diff -r 840f014c4aed94e041ae454a32242e2075e4d613 -r e47bef6787f4ed55a275becc5665856019c93af6 yt/utilities/lib/element_mappings.pxd
--- a/yt/utilities/lib/element_mappings.pxd
+++ b/yt/utilities/lib/element_mappings.pxd
@@ -44,9 +44,45 @@
 
     cdef int check_inside(self, double* mapped_coord) nogil
 
+# This typedef defines a function pointer that defines the system
+# of equations that will be solved by the NonlinearSolveSamplers.
+# 
+# inputs:
+#     x        - pointer to the mapped coordinate
+#     vertices - pointer to the element vertices
+#     phys_x   - pointer to the physical coordinate
+#
+# outputs:
+#
+#     fx - the result of solving the system, should be close to 0
+#          once it is converged.
+#
+ctypedef void (*func_type)(double* fx, 
+                           double* x, 
+                           double* vertices, 
+                           double* phys_x) nogil
 
-ctypedef void (*func_type)(double*, double*, double*, double*) nogil
-ctypedef void (*jac_type)(double*, double*, double*, double*, double*, double*) nogil
+# This typedef defines a function pointer that defines the Jacobian
+# matrix used by the NonlinearSolveSamplers. Subclasses needed to 
+# define a Jacobian function in this form.
+# 
+# inputs:
+#     x        - pointer to the mapped coordinate
+#     vertices - pointer to the element vertices
+#     phys_x   - pointer to the physical coordinate
+#
+# outputs:
+#
+#     rcol     - the first column of the jacobian
+#     scol     - the second column of the jacobian
+#     tcol     - the third column of the jaocobian
+#
+ctypedef void (*jac_type)(double* rcol, 
+                          double* scol, 
+                          double* tcol, 
+                          double* x, 
+                          double* vertices, 
+                          double* phys_x) nogil
 
 cdef class NonlinearSolveSampler(ElementSampler):
 


https://bitbucket.org/yt_analysis/yt/commits/086505db20e3/
Changeset:   086505db20e3
Branch:      yt
User:        atmyers
Date:        2015-08-19 21:29:29+00:00
Summary:     adding a couple of more docstrings
Affected #:  1 file

diff -r e47bef6787f4ed55a275becc5665856019c93af6 -r 086505db20e34ce0b769e70a72dd38f03c7f0381 yt/utilities/lib/element_mappings.pyx
--- a/yt/utilities/lib/element_mappings.pyx
+++ b/yt/utilities/lib/element_mappings.pyx
@@ -258,7 +258,13 @@
 cdef inline void Q1Function3D(double* f,
                               double* x, 
                               double* vertices, 
-                              double* phys_x) nogil:    
+                              double* phys_x) nogil:
+'''
+
+This defines the function used by the Newton-Raphson solver for 
+linear, hex elements.
+
+'''
     cdef int i
     cdef double rm, rp, sm, sp, tm, tp
     
@@ -290,6 +296,12 @@
                               double* x, 
                               double* v, 
                               double* phys_x) nogil:
+'''
+
+This defines the Jacobian matrix used by the Newton-Raphson 
+solver with linear, hexahedral elements.
+
+'''
     
     cdef int i
     cdef double rm, rp, sm, sp, tm, tp


https://bitbucket.org/yt_analysis/yt/commits/d9972c5f3bbd/
Changeset:   d9972c5f3bbd
Branch:      yt
User:        atmyers
Date:        2015-08-19 22:59:40+00:00
Summary:     using some more descriptive variable names
Affected #:  2 files

diff -r 086505db20e34ce0b769e70a72dd38f03c7f0381 -r d9972c5f3bbdab1fb653f724a6bf11b662203f93 yt/geometry/coordinates/cartesian_coordinates.py
--- a/yt/geometry/coordinates/cartesian_coordinates.py
+++ b/yt/geometry/coordinates/cartesian_coordinates.py
@@ -18,7 +18,8 @@
 from .coordinate_handler import \
     CoordinateHandler, \
     _unknown_coord, \
-    _get_coord_fields
+    _get_coord_fields, \
+    _get_vert_fields
 import yt.visualization._MPL as _MPL
 
 
@@ -39,10 +40,10 @@
             registry.add_field(("index", "%s" % ax), function = f2,
                                display_field = False,
                                units = "code_length")
-            #f3 = _get_vert_fields(axi)
-            #registry.add_field(("index", "vertex_%s" % ax), function = f3,
-            #                   display_field = False,
-            #                   units = "code_length")
+            f3 = _get_vert_fields(axi)
+            registry.add_field(("index", "vertex_%s" % ax), function = f3,
+                               display_field = False,
+                               units = "code_length")
         def _cell_volume(field, data):
             rv  = data["index", "dx"].copy(order='K')
             rv *= data["index", "dy"]

diff -r 086505db20e34ce0b769e70a72dd38f03c7f0381 -r d9972c5f3bbdab1fb653f724a6bf11b662203f93 yt/utilities/lib/element_mappings.pyx
--- a/yt/utilities/lib/element_mappings.pyx
+++ b/yt/utilities/lib/element_mappings.pyx
@@ -255,16 +255,10 @@
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
-cdef inline void Q1Function3D(double* f,
+cdef inline void Q1Function3D(double* fx,
                               double* x, 
                               double* vertices, 
                               double* phys_x) nogil:
-'''
-
-This defines the function used by the Newton-Raphson solver for 
-linear, hex elements.
-
-'''
     cdef int i
     cdef double rm, rp, sm, sp, tm, tp
     
@@ -276,33 +270,26 @@
     tp = 1.0 + x[2]
     
     for i in range(3):
-        f[i] = vertices[0 + i]*rm*sm*tm \
-             + vertices[3 + i]*rp*sm*tm \
-             + vertices[6 + i]*rp*sp*tm \
-             + vertices[9 + i]*rm*sp*tm \
-             + vertices[12 + i]*rm*sm*tp \
-             + vertices[15 + i]*rp*sm*tp \
-             + vertices[18 + i]*rp*sp*tp \
-             + vertices[21 + i]*rm*sp*tp \
-             - 8.0*phys_x[i]
+        fx[i] = vertices[0 + i]*rm*sm*tm \
+              + vertices[3 + i]*rp*sm*tm \
+              + vertices[6 + i]*rp*sp*tm \
+              + vertices[9 + i]*rm*sp*tm \
+              + vertices[12 + i]*rm*sm*tp \
+              + vertices[15 + i]*rp*sm*tp \
+              + vertices[18 + i]*rp*sp*tp \
+              + vertices[21 + i]*rm*sp*tp \
+              - 8.0*phys_x[i]
 
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
-cdef inline void Q1Jacobian3D(double* r,
-                              double* s,
-                              double* t,
+cdef inline void Q1Jacobian3D(double* rcol,
+                              double* scol,
+                              double* tcol,
                               double* x, 
-                              double* v, 
-                              double* phys_x) nogil:
-'''
-
-This defines the Jacobian matrix used by the Newton-Raphson 
-solver with linear, hexahedral elements.
-
-'''
-    
+                              double* vertices, 
+                              double* phys_x) nogil:    
     cdef int i
     cdef double rm, rp, sm, sp, tm, tp
     
@@ -314,18 +301,18 @@
     tp = 1.0 + x[2]
     
     for i in range(3):
-        r[i] = -sm*tm*v[0 + i]  + sm*tm*v[3 + i]  + \
-                sp*tm*v[6 + i]  - sp*tm*v[9 + i]  - \
-                sm*tp*v[12 + i] + sm*tp*v[15 + i] + \
-                sp*tp*v[18 + i] - sp*tp*v[21 + i]
-        s[i] = -rm*tm*v[0 + i]  - rp*tm*v[3 + i]  + \
-                rp*tm*v[6 + i]  + rm*tm*v[9 + i]  - \
-                rm*tp*v[12 + i] - rp*tp*v[15 + i] + \
-                rp*tp*v[18 + i] + rm*tp*v[21 + i]
-        t[i] = -rm*sm*v[0 + i]  - rp*sm*v[3 + i]  - \
-                rp*sp*v[6 + i]  - rm*sp*v[9 + i]  + \
-                rm*sm*v[12 + i] + rp*sm*v[15 + i] + \
-                rp*sp*v[18 + i] + rm*sp*v[21 + i]
+        rcol[i] = -sm*tm*vertices[0 + i]  + sm*tm*vertices[3 + i]  + \
+                   sp*tm*vertices[6 + i]  - sp*tm*vertices[9 + i]  - \
+                   sm*tp*vertices[12 + i] + sm*tp*vertices[15 + i] + \
+                   sp*tp*vertices[18 + i] - sp*tp*vertices[21 + i]
+        scol[i] = -rm*tm*vertices[0 + i]  - rp*tm*vertices[3 + i]  + \
+                   rp*tm*vertices[6 + i]  + rm*tm*vertices[9 + i]  - \
+                   rm*tp*vertices[12 + i] - rp*tp*vertices[15 + i] + \
+                   rp*tp*vertices[18 + i] + rm*tp*vertices[21 + i]
+        tcol[i] = -rm*sm*vertices[0 + i]  - rp*sm*vertices[3 + i]  - \
+                   rp*sp*vertices[6 + i]  - rm*sp*vertices[9 + i]  + \
+                   rm*sm*vertices[12 + i] + rp*sm*vertices[15 + i] + \
+                   rp*sp*vertices[18 + i] + rm*sp*vertices[21 + i]
 
 
 @cython.boundscheck(False)


https://bitbucket.org/yt_analysis/yt/commits/33c91edbb5c3/
Changeset:   33c91edbb5c3
Branch:      yt
User:        atmyers
Date:        2015-08-20 00:32:26+00:00
Summary:     these imports were missing
Affected #:  1 file

diff -r d9972c5f3bbdab1fb653f724a6bf11b662203f93 -r 33c91edbb5c30f56683a1992ef74f251de0e41d2 yt/geometry/coordinates/cartesian_coordinates.py
--- a/yt/geometry/coordinates/cartesian_coordinates.py
+++ b/yt/geometry/coordinates/cartesian_coordinates.py
@@ -19,7 +19,9 @@
     CoordinateHandler, \
     _unknown_coord, \
     _get_coord_fields, \
-    _get_vert_fields
+    _get_vert_fields, \
+    cartesian_to_cylindrical, \
+    cylindrical_to_cartesian
 import yt.visualization._MPL as _MPL
 
 


https://bitbucket.org/yt_analysis/yt/commits/b9f85c68e957/
Changeset:   b9f85c68e957
Branch:      yt
User:        atmyers
Date:        2015-08-20 00:48:42+00:00
Summary:     disabling testing the vertex fields for now
Affected #:  1 file

diff -r 33c91edbb5c30f56683a1992ef74f251de0e41d2 -r b9f85c68e9579d35fe949af6f79d1cb7bbc2f27d yt/fields/tests/test_fields.py
--- a/yt/fields/tests/test_fields.py
+++ b/yt/fields/tests/test_fields.py
@@ -178,6 +178,9 @@
     for field in sorted(base_ds.field_info):
         if field[1].find("beta_p") > -1:
             continue
+        if field[1].find("vertex") > -1:
+            # don't test the vertex fields for now
+            continue
         if field in base_ds.field_list:
             # Don't know how to test this.  We need some way of having fields
             # that are fallbacks be tested, but we don't have that now.


https://bitbucket.org/yt_analysis/yt/commits/ee896adc8da3/
Changeset:   ee896adc8da3
Branch:      yt
User:        atmyers
Date:        2015-08-25 03:06:44+00:00
Summary:     putting the sample data in a more descriptive place, plus some formatting changes
Affected #:  1 file

diff -r b9f85c68e9579d35fe949af6f79d1cb7bbc2f27d -r ee896adc8da39d752d455777953bf20ddb7cd3b8 doc/source/visualizing/unstructured_mesh_rendering.rst
--- a/doc/source/visualizing/unstructured_mesh_rendering.rst
+++ b/doc/source/visualizing/unstructured_mesh_rendering.rst
@@ -38,7 +38,7 @@
 
 .. code-block:: bash
 
-    CFLAGS='-I/opt/local/include' LDFLAGS='-L/opt/local/lib' python setup.py install
+    CFLAGS='-I/opt/local/include' LDFLAGS='-L/opt/local/lib' python setup.py develop
 
 You must also use these flags when building any part of yt that links against
 pyembree.
@@ -60,6 +60,7 @@
 See below for examples. First, here is an example of rendering a hexahedral mesh.
 
 .. python-script::
+
    import yt
    import pylab as plt
    from yt.visualization.volume_rendering.render_source import MeshSource
@@ -67,7 +68,7 @@
    from yt.utilities.exodusII_reader import get_data
 
    # load the data
-   coords, connectivity, data = get_data("data/out.e-s010")
+   coords, connectivity, data = get_data("MOOSE_sample_data/out.e-s010")
    mesh_id = 0
    field_name = ('gas', 'diffused')
    ds = yt.load_unstructured_mesh(data[mesh_id], connectivity[mesh_id], coords[mesh_id])
@@ -96,6 +97,7 @@
 Next, here is an example of rendering a dataset with tetrahedral mesh elements.
 
 .. python-script::
+
    import yt
    import pylab as plt
    from yt.visualization.volume_rendering.render_source import MeshSource
@@ -103,7 +105,7 @@
    from yt.utilities.exodusII_reader import get_data
 
    # load the data
-   filename = "../moose/test/tests/mesh/high_order_elems/gold/high_order_elems_tet4_refine_out.e"
+   filename = "MOOSE_sample_data/high_order_elems_tet4_refine_out.e"
    coords, connectivity, data = get_data(filename)
    mesh_id = 0
    field_name = ('gas', 'u')
@@ -135,6 +137,7 @@
 method 300 times, saving a new image to the disk each time.
 
 .. code-block:: python
+
    import yt
    import pylab as plt
    from yt.visualization.volume_rendering.render_source import MeshSource
@@ -142,7 +145,7 @@
    from yt.utilities.exodusII_reader import get_data
 
    # load dataset
-   coords, connectivity, data = get_data("data/out.e-s010")
+   coords, connectivity, data = get_data("MOOSE_sample_data/out.e-s010")
    mesh_id = 0
    field_name = ('gas', 'diffused')
    ds = yt.load_unstructured_mesh(data[mesh_id], connectivity[mesh_id], coords[mesh_id])


https://bitbucket.org/yt_analysis/yt/commits/025b420dfb54/
Changeset:   025b420dfb54
Branch:      yt
User:        atmyers
Date:        2015-08-25 03:07:42+00:00
Summary:     also changing the data filename here
Affected #:  1 file

diff -r ee896adc8da39d752d455777953bf20ddb7cd3b8 -r 025b420dfb546e90313bc8588f071d44c2a6cfde doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -1059,7 +1059,7 @@
    import numpy
    from yt.utilities.exodusII_reader import get_data
 
-   coords, connectivity, data = get_data("data/out.e-s010")
+   coords, connectivity, data = get_data("MOOSE_sample_data/out.e-s010")
 
 This uses a publically available `MOOSE <http://mooseframework.org/>` 
 dataset along with the get_data function to parse the coords, connectivity, 


https://bitbucket.org/yt_analysis/yt/commits/1f3e980d070d/
Changeset:   1f3e980d070d
Branch:      yt
User:        atmyers
Date:        2015-08-25 03:10:32+00:00
Summary:     fixing a typo caught by Cameron
Affected #:  1 file

diff -r 025b420dfb546e90313bc8588f071d44c2a6cfde -r 1f3e980d070d4ed919dc75f8a74d63e1116f9fa0 yt/utilities/lib/pixelization_routines.pyx
--- a/yt/utilities/lib/pixelization_routines.pyx
+++ b/yt/utilities/lib/pixelization_routines.pyx
@@ -439,7 +439,7 @@
     # Two steps:
     #  1. Is image point within the mesh bounding box?
     #  2. Is image point within the mesh element?
-    # Second is more intensive.  It will converting the element vertices to the
+    # Second is more intensive.  It will convert the element vertices to the
     # mapped coordinate system, and checking whether the result in in-bounds or not
     # Note that we have to have a pseudo-3D pixel buffer.  One dimension will
     # always be 1.


https://bitbucket.org/yt_analysis/yt/commits/199b3af00efd/
Changeset:   199b3af00efd
Branch:      yt
User:        atmyers
Date:        2015-08-25 03:12:00+00:00
Summary:     filling in a docstring stub, caught by Cameron
Affected #:  1 file

diff -r 1f3e980d070d4ed919dc75f8a74d63e1116f9fa0 -r 199b3af00efd138fdedc07b01e0794f54129b678 yt/visualization/volume_rendering/render_source.py
--- a/yt/visualization/volume_rendering/render_source.py
+++ b/yt/visualization/volume_rendering/render_source.py
@@ -251,7 +251,15 @@
 
 class MeshSource(RenderSource):
 
-    """docstring for MeshSource"""
+    """
+
+    MeshSource is a class for volume rendering unstructured mesh
+    data. This functionality requires the embree ray-tracing
+    engine and the associated pyembree python bindings to be
+    installed in order to function.
+
+    """
+
     _image = None
     data_source = None
 


https://bitbucket.org/yt_analysis/yt/commits/b2c91d429a58/
Changeset:   b2c91d429a58
Branch:      yt
User:        atmyers
Date:        2015-08-25 03:26:15+00:00
Summary:     making the reader fall back to the test data dir
Affected #:  1 file

diff -r 199b3af00efd138fdedc07b01e0794f54129b678 -r b2c91d429a58ce344ef3f974d00aabb2a3824c90 yt/utilities/exodusII_reader.py
--- a/yt/utilities/exodusII_reader.py
+++ b/yt/utilities/exodusII_reader.py
@@ -2,6 +2,8 @@
 from itertools import takewhile
 from netCDF4 import Dataset
 import numpy as np
+from yt.config import ytcfg
+import os
 
 
 def sanitize_string(s):
@@ -10,7 +12,10 @@
 
 
 def get_data(fn):
-    f = Dataset(fn)
+    try:
+        f = Dataset(fn)
+    except RuntimeError:
+        f = Dataset(os.path.join(ytcfg.get("yt", "test_data_dir"), fn))
     fvars = f.variables
     # Is this correct?
     etypes = fvars["eb_status"][:]


https://bitbucket.org/yt_analysis/yt/commits/5cc8045a109d/
Changeset:   5cc8045a109d
Branch:      yt
User:        atmyers
Date:        2015-08-27 01:23:29+00:00
Summary:     give MeshSource render an optional zbuffer argument to make it consistent with the other Source objects
Affected #:  1 file

diff -r b2c91d429a58ce344ef3f974d00aabb2a3824c90 -r 5cc8045a109df59ff6ff1a191bc15b049379bcca yt/visualization/volume_rendering/render_source.py
--- a/yt/visualization/volume_rendering/render_source.py
+++ b/yt/visualization/volume_rendering/render_source.py
@@ -318,7 +318,7 @@
                                                   indices,
                                                   field_data.d)
 
-    def render(self, camera):
+    def render(self, camera, zbuffer=None):
 
         self.sampler = new_mesh_sampler(camera, self)
 


https://bitbucket.org/yt_analysis/yt/commits/fceb5ab0e3c6/
Changeset:   fceb5ab0e3c6
Branch:      yt
User:        ngoldbaum
Date:        2015-08-27 16:33:21+00:00
Summary:     Merged in atmyers/yt (pull request #1643)

Unstructured Mesh Rendering
Affected #:  40 files

diff -r a5fb8a8d19009e8aa9b2c8e0859bc2c7c7949934 -r fceb5ab0e3c639902f1d00e9ad6413367fd8fe27 doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -1026,6 +1026,60 @@
 * Some functions may behave oddly or not work at all.
 * Data must already reside in memory.
 
+Unstructured Grid Data
+----------------------
+
+See :ref:`loading-numpy-array`,
+:func:`~yt.frontends.stream.data_structures.load_unstructured_mesh` for
+more detail.
+
+In addition to the above grid types, you can also load data stored on
+unstructured meshes. This type of mesh is used, for example, in many
+finite element calculations. Currently, hexahedral, tetrahedral, and
+wedge-shaped mesh element are supported.
+
+To load an unstructured mesh, you need to specify the following. First,
+you need to have a coordinates array, which should be an (L, 3) array
+that stores the (x, y, z) positions of all of the vertices in the mesh.
+Second, you need to specify a connectivity array, which describes how
+those vertices are connected into mesh elements. The connectivity array
+should be (N, M), where N is the number of elements and M is the
+connectivity length, i.e. the number of vertices per element. Finally,
+you must also specify a data dictionary, where the keys should be
+the names of the fields and the values should be numpy arrays that
+contain the field data. These arrays can either supply the cell-averaged
+data for each element, in which case they would be (N, 1), or they
+can have node-centered data, in which case they would also be (N, M).
+
+Here is an example of how to load an in-memory, unstructured mesh dataset:
+
+.. code-block:: python
+
+   import yt
+   import numpy
+   from yt.utilities.exodusII_reader import get_data
+
+   coords, connectivity, data = get_data("MOOSE_sample_data/out.e-s010")
+
+This uses a publically available `MOOSE <http://mooseframework.org/>` 
+dataset along with the get_data function to parse the coords, connectivity, 
+and data. Then, these can be loaded as an in-memory dataset as follows:
+
+.. code-block:: python
+
+    mesh_id = 0
+    ds = yt.load_unstructured_mesh(data[mesh_id], connectivity[mesh_id], coords[mesh_id])
+
+Note that load_unstructured_mesh can take either a single or a list of meshes.
+Here, we have selected only the first mesh to load.
+
+.. rubric:: Caveats
+
+* Units will be incorrect unless the data has already been converted to cgs.
+* Integration is not implemented.
+* Some functions may behave oddly or not work at all.
+* Data must already reside in memory.
+
 Generic Particle Data
 ---------------------
 

diff -r a5fb8a8d19009e8aa9b2c8e0859bc2c7c7949934 -r fceb5ab0e3c639902f1d00e9ad6413367fd8fe27 doc/source/reference/api/api.rst
--- a/doc/source/reference/api/api.rst
+++ b/doc/source/reference/api/api.rst
@@ -397,6 +397,7 @@
    ~yt.frontends.stream.data_structures.load_amr_grids
    ~yt.frontends.stream.data_structures.load_particles
    ~yt.frontends.stream.data_structures.load_hexahedral_mesh
+   ~yt.frontends.stream.data_structures.load_unstructured_mesh
 
 Derived Datatypes
 -----------------
@@ -632,6 +633,7 @@
    ~yt.visualization.volume_rendering.api.BoxSource
    ~yt.visualization.volume_rendering.api.GridSource
    ~yt.visualization.volume_rendering.api.CoordinateVectorSource
+   ~yt.visualization.volume_rendering.render_source.MeshSource
 
 Streamlining
 ^^^^^^^^^^^^

diff -r a5fb8a8d19009e8aa9b2c8e0859bc2c7c7949934 -r fceb5ab0e3c639902f1d00e9ad6413367fd8fe27 doc/source/visualizing/index.rst
--- a/doc/source/visualizing/index.rst
+++ b/doc/source/visualizing/index.rst
@@ -15,6 +15,7 @@
    callbacks
    manual_plotting
    volume_rendering
+   unstructured_mesh_rendering
    hardware_volume_rendering
    sketchfab
    mapserver

diff -r a5fb8a8d19009e8aa9b2c8e0859bc2c7c7949934 -r fceb5ab0e3c639902f1d00e9ad6413367fd8fe27 doc/source/visualizing/unstructured_mesh_rendering.rst
--- /dev/null
+++ b/doc/source/visualizing/unstructured_mesh_rendering.rst
@@ -0,0 +1,174 @@
+.. _unstructured_mesh_rendering:
+
+Unstructured Mesh Rendering
+===========================
+
+Beginning with version 3.3, yt has the ability to volume render unstructured
+meshes from, for example, finite element calculations. In order to use this
+capability, a few additional dependencies are required beyond those you get
+when you run the install script. First, `embree <https://embree.github.io>`
+(a fast software ray-tracing library from Intel) must be installed, either
+by compiling from source or by using one of the pre-built binaries available
+at Embree's `downloads <https://embree.github.io/downloads.html>` page. Once
+Embree is installed, you must also create a symlink next to the library. For
+example, if the libraries were installed at /usr/local/lib/, you must do
+
+.. code-block:: bash
+
+    sudo ln -s /usr/local/lib/libembree.2.6.1.dylib /usr/local/lib/libembree.so
+
+Second, the python bindings for embree (called 
+`pyembree <https://github.com/scopatz/pyembree>`) must also be installed. To
+do so, first obtain a copy, by .e.g. cloning the repo:
+
+.. code-block:: bash
+
+    git clone https://github.com/scopatz/pyembree
+
+To install, navigate to the root directory and run the setup script:
+
+.. code-block:: bash
+
+    python setup.py develop
+
+If Embree was installed to some location that is not in your path by default,
+you will need to pass in CFLAGS and LDFLAGS to the setup.py script. For example,
+the Mac OS package installer puts the installation at /opt/local/ instead of 
+usr/local. To account for this, you would do:
+
+.. code-block:: bash
+
+    CFLAGS='-I/opt/local/include' LDFLAGS='-L/opt/local/lib' python setup.py develop
+
+You must also use these flags when building any part of yt that links against
+pyembree.
+
+Once the pre-requisites are installed, unstructured mesh data can be rendered
+much like any other dataset. In particular, a new type of 
+:class:`~yt.visualization.volume_rendering.render_source.RenderSource` object
+has been defined, called the 
+:class:`~yt.visualization.volume_rendering.render_source.MeshSource`, that
+represents the unstructured mesh data that will be rendered. The user creates 
+this object, and also defines a
+:class:`~yt.visualization.volume_rendering.camera.Camera` 
+that specifies your viewpoint into the scene. When 
+:class:`~yt.visualization.volume_rendering.render_source.RenderSource` is called,
+a set of rays are cast at the source. Each time a ray strikes the source mesh,
+the data is sampled at the intersection point at the resulting value gets 
+saved into an image.
+
+See below for examples. First, here is an example of rendering a hexahedral mesh.
+
+.. python-script::
+
+   import yt
+   import pylab as plt
+   from yt.visualization.volume_rendering.render_source import MeshSource
+   from yt.visualization.volume_rendering.camera import Camera
+   from yt.utilities.exodusII_reader import get_data
+
+   # load the data
+   coords, connectivity, data = get_data("MOOSE_sample_data/out.e-s010")
+   mesh_id = 0
+   field_name = ('gas', 'diffused')
+   ds = yt.load_unstructured_mesh(data[mesh_id], connectivity[mesh_id], coords[mesh_id])
+
+   # create the RenderSource
+   ms = MeshSource(ds, field_name)
+
+   # set up camera
+   cam = Camera(ds)
+   camera_position = ds.arr([-3.0, 3.0, -3.0], 'code_length')
+   north_vector = ds.arr([0.0, 1.0, 0.0], 'dimensionless')
+   cam.resolution = (800, 800)
+   cam.set_position(camera_position, north_vector)
+
+   # make the image
+   im = ms.render(cam)
+
+   # plot and save
+   plt.imshow(im, cmap='Eos A', origin='lower', vmin=0, vmax=2.0)
+   plt.gca().axes.get_xaxis().set_visible(False)
+   plt.gca().axes.get_yaxis().set_visible(False)
+   cb = plt.colorbar()
+   cb.set_label(field_name[1])
+   plt.savefig('hex_mesh_render.png')
+
+Next, here is an example of rendering a dataset with tetrahedral mesh elements.
+
+.. python-script::
+
+   import yt
+   import pylab as plt
+   from yt.visualization.volume_rendering.render_source import MeshSource
+   from yt.visualization.volume_rendering.camera import Camera
+   from yt.utilities.exodusII_reader import get_data
+
+   # load the data
+   filename = "MOOSE_sample_data/high_order_elems_tet4_refine_out.e"
+   coords, connectivity, data = get_data(filename)
+   mesh_id = 0
+   field_name = ('gas', 'u')
+   ds = yt.load_unstructured_mesh(data[mesh_id], connectivity[mesh_id], coords[mesh_id])
+
+   # create the RenderSource
+   ms = MeshSource(ds, field_name)
+
+   # set up camera
+   cam = Camera(ds)
+   camera_position = ds.arr([3.0, 3.0, 3.0], 'code_length')
+   cam.set_width(ds.arr([2.0, 2.0, 2.0], 'code_length'))
+   north_vector = ds.arr([0.0, 1.0, 0.0], 'dimensionless')
+   cam.resolution = (800, 800)
+   cam.set_position(camera_position, north_vector)
+
+   # make the image
+   im = ms.render(cam)
+
+   # plot and save
+   plt.imshow(im, cmap='Eos A', origin='lower', vmin=0.0, vmax=1.0)
+   plt.gca().axes.get_xaxis().set_visible(False)
+   plt.gca().axes.get_yaxis().set_visible(False)
+   cb = plt.colorbar()
+   cb.set_label(field_name[1])
+   plt.savefig('tet_mesh_render.png')
+
+Finally, here is a script that creates frames of a movie. It calls the rotate()
+method 300 times, saving a new image to the disk each time.
+
+.. code-block:: python
+
+   import yt
+   import pylab as plt
+   from yt.visualization.volume_rendering.render_source import MeshSource
+   from yt.visualization.volume_rendering.camera import Camera
+   from yt.utilities.exodusII_reader import get_data
+
+   # load dataset
+   coords, connectivity, data = get_data("MOOSE_sample_data/out.e-s010")
+   mesh_id = 0
+   field_name = ('gas', 'diffused')
+   ds = yt.load_unstructured_mesh(data[mesh_id], connectivity[mesh_id], coords[mesh_id])
+
+   # create the RenderSource
+   ms = MeshSource(ds, field_name)
+
+   # set up camera
+   cam = Camera(ds)
+   camera_position = ds.arr([-3.0, 3.0, -3.0], 'code_length')
+   north_vector = ds.arr([0.0, 1.0, 0.0], 'dimensionless')
+   cam.set_position(camera_position, north_vector)
+   cam.steady_north = True
+
+   # make movie frames
+   num_frames = 301
+   for i in range(num_frames):
+       cam.rotate(2.0*np.pi/num_frames)
+       im = ms.render(cam)
+       plt.imshow(im, cmap='Eos A', origin='lower',vmin=0.0, vmax=2.0)
+       plt.gca().axes.get_xaxis().set_visible(False)
+       plt.gca().axes.get_yaxis().set_visible(False)
+       cb = plt.colorbar()
+       cb.set_label('diffused')
+       plt.savefig('movie_frames/surface_render_%.4d.png' % i)
+       plt.clf()

diff -r a5fb8a8d19009e8aa9b2c8e0859bc2c7c7949934 -r fceb5ab0e3c639902f1d00e9ad6413367fd8fe27 yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -134,7 +134,7 @@
 from yt.frontends.stream.api import \
     load_uniform_grid, load_amr_grids, \
     load_particles, load_hexahedral_mesh, load_octree, \
-    hexahedral_connectivity
+    hexahedral_connectivity, load_unstructured_mesh
 
 # For backwards compatibility
 GadgetDataset = frontends.gadget.GadgetDataset

diff -r a5fb8a8d19009e8aa9b2c8e0859bc2c7c7949934 -r fceb5ab0e3c639902f1d00e9ad6413367fd8fe27 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -826,6 +826,13 @@
             self.index._identify_base_chunk(self)
         return self._current_chunk.fwidth
 
+    @property
+    def fcoords_vertex(self):
+        if self._current_chunk is None:
+            self.index._identify_base_chunk(self)
+        return self._current_chunk.fcoords_vertex
+
+
 class YTSelectionContainer0D(YTSelectionContainer):
     _spatial = False
     _dimensionality = 0

diff -r a5fb8a8d19009e8aa9b2c8e0859bc2c7c7949934 -r fceb5ab0e3c639902f1d00e9ad6413367fd8fe27 yt/data_objects/unstructured_mesh.py
--- a/yt/data_objects/unstructured_mesh.py
+++ b/yt/data_objects/unstructured_mesh.py
@@ -46,7 +46,10 @@
         # This is where we set up the connectivity information
         self.connectivity_indices = connectivity_indices
         if connectivity_indices.shape[1] != self._connectivity_length:
-            raise RuntimeError
+            if self._connectivity_length == -1:
+                self._connectivity_length = connectivity_indices.shape[1]
+            else:
+                raise RuntimeError
         self.connectivity_coords = connectivity_coords
         self.ds = index.dataset
         self._index = index
@@ -90,8 +93,14 @@
     def _generate_container_field(self, field):
         raise NotImplementedError
 
-    def select_fcoords(self, dobj):
-        raise NotImplementedError
+    def select_fcoords(self, dobj = None):
+        # This computes centroids!
+        mask = self._get_selector_mask(dobj.selector)
+        if mask is None: return np.empty((0,3), dtype='float64')
+        centers = fill_fcoords(self.connectivity_coords,
+                               self.connectivity_indices,
+                               self._index_offset)
+        return centers[mask, :]
 
     def select_fwidth(self, dobj):
         raise NotImplementedError
@@ -126,7 +135,7 @@
         mask = self._get_selector_mask(selector)
         count = self.count(selector)
         if count == 0: return 0
-        dest[offset:offset+count] = source.flat[mask]
+        dest[offset:offset+count] = source[mask,...]
         return count
 
     def count(self, selector):
@@ -143,6 +152,25 @@
         mask = selector.select_points(x,y,z, 0.0)
         return mask
 
+    def _get_selector_mask(self, selector):
+        if hash(selector) == self._last_selector_id:
+            mask = self._last_mask
+        else:
+            self._last_mask = mask = selector.fill_mesh_cell_mask(self)
+            self._last_selector_id = hash(selector)
+            if mask is None:
+                self._last_count = 0
+            else:
+                self._last_count = mask.sum()
+        return mask
+
+    def select_fcoords_vertex(self, dobj = None):
+        mask = self._get_selector_mask(dobj.selector)
+        if mask is None: return np.empty((0,self._connectivity_length,3), dtype='float64')
+        vertices = self.connectivity_coords[
+                self.connectivity_indices - 1]
+        return vertices[mask, :, :]
+
 class SemiStructuredMesh(UnstructuredMesh):
     _connectivity_length = 8
     _type_name = 'semi_structured_mesh'
@@ -161,14 +189,6 @@
         elif field == "dz":
             return self._current_chunk.fwidth[:,2]
 
-    def select_fcoords(self, dobj = None):
-        mask = self._get_selector_mask(dobj.selector)
-        if mask is None: return np.empty((0,3), dtype='float64')
-        centers = fill_fcoords(self.connectivity_coords,
-                               self.connectivity_indices,
-                               self._index_offset)
-        return centers[mask, :]
-
     def select_fwidth(self, dobj):
         mask = self._get_selector_mask(dobj.selector)
         if mask is None: return np.empty((0,3), dtype='float64')
@@ -189,15 +209,3 @@
         dt, t = dobj.selector.get_dt_mesh(self, mask.sum(), self._index_offset)
         return dt, t
 
-    def _get_selector_mask(self, selector):
-        if hash(selector) == self._last_selector_id:
-            mask = self._last_mask
-        else:
-            self._last_mask = mask = selector.fill_mesh_cell_mask(self)
-            self._last_selector_id = hash(selector)
-            if mask is None:
-                self._last_count = 0
-            else:
-                self._last_count = mask.sum()
-        return mask
-

diff -r a5fb8a8d19009e8aa9b2c8e0859bc2c7c7949934 -r fceb5ab0e3c639902f1d00e9ad6413367fd8fe27 yt/fields/field_detector.py
--- a/yt/fields/field_detector.py
+++ b/yt/fields/field_detector.py
@@ -240,6 +240,13 @@
         return self.ds.arr(fc, input_units = "code_length")
 
     @property
+    def fcoords_vertex(self):
+        fc = np.random.random((self.nd, self.nd, self.nd, 8, 3))
+        if self.flat:
+            fc.shape = (self.nd*self.nd*self.nd, 8, 3)
+        return self.ds.arr(fc, input_units = "code_length")
+
+    @property
     def icoords(self):
         ic = np.mgrid[0:self.nd-1:self.nd*1j,
                       0:self.nd-1:self.nd*1j,

diff -r a5fb8a8d19009e8aa9b2c8e0859bc2c7c7949934 -r fceb5ab0e3c639902f1d00e9ad6413367fd8fe27 yt/fields/tests/test_fields.py
--- a/yt/fields/tests/test_fields.py
+++ b/yt/fields/tests/test_fields.py
@@ -178,6 +178,9 @@
     for field in sorted(base_ds.field_info):
         if field[1].find("beta_p") > -1:
             continue
+        if field[1].find("vertex") > -1:
+            # don't test the vertex fields for now
+            continue
         if field in base_ds.field_list:
             # Don't know how to test this.  We need some way of having fields
             # that are fallbacks be tested, but we don't have that now.

diff -r a5fb8a8d19009e8aa9b2c8e0859bc2c7c7949934 -r fceb5ab0e3c639902f1d00e9ad6413367fd8fe27 yt/frontends/stream/api.py
--- a/yt/frontends/stream/api.py
+++ b/yt/frontends/stream/api.py
@@ -24,7 +24,8 @@
       load_hexahedral_mesh, \
       hexahedral_connectivity, \
       load_octree, \
-      refine_amr
+      refine_amr, \
+      load_unstructured_mesh
 
 from .fields import \
       StreamFieldInfo

diff -r a5fb8a8d19009e8aa9b2c8e0859bc2c7c7949934 -r fceb5ab0e3c639902f1d00e9ad6413367fd8fe27 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -50,7 +50,7 @@
 from yt.geometry.oct_container import \
     OctreeContainer
 from yt.geometry.unstructured_mesh_handler import \
-           UnstructuredIndex
+    UnstructuredIndex
 from yt.data_objects.static_output import \
     Dataset
 from yt.utilities.logger import ytLogger as mylog
@@ -69,8 +69,8 @@
 from yt.utilities.flagging_methods import \
     FlaggingGrid
 from yt.data_objects.unstructured_mesh import \
-           SemiStructuredMesh, \
-           UnstructuredMesh
+    SemiStructuredMesh, \
+    UnstructuredMesh
 from yt.extern.six import string_types, iteritems
 from .fields import \
     StreamFieldInfo
@@ -1602,7 +1602,7 @@
         connec = ensure_list(self.stream_handler.fields.pop("connectivity"))
         self.meshes = [StreamUnstructuredMesh(
           i, self.index_filename, c1, c2, self)
-          for i, (c1, c2) in enumerate(zip(coords, connec))]
+          for i, (c1, c2) in enumerate(zip(connec, coords))]
 
     def _setup_data_io(self):
         if self.stream_handler.io is not None:
@@ -1614,7 +1614,139 @@
         self.field_list = list(set(self.stream_handler.get_fields()))
 
 class StreamUnstructuredMeshDataset(StreamDataset):
-    _index_class = StreamUnstructuredMesh
+    _index_class = StreamUnstructuredIndex
     _field_info_class = StreamFieldInfo
     _dataset_type = "stream_unstructured"
 
+def load_unstructured_mesh(data, connectivity, coordinates,
+                         length_unit = None, bbox=None, sim_time=0.0,
+                         mass_unit = None, time_unit = None,
+                         velocity_unit = None, magnetic_unit = None,
+                         periodicity=(False, False, False),
+                         geometry = "cartesian"):
+    r"""Load an unstructured mesh of data into yt as a
+    :class:`~yt.frontends.stream.data_structures.StreamHandler`.
+
+    This should allow an unstructured mesh data to be loaded directly into
+    yt and analyzed as would any others.  Not all functionality for
+    visualization will be present, and some analysis functions may not yet have
+    been implemented.
+
+    Particle fields are detected as one-dimensional fields. The number of particles
+    is set by the "number_of_particles" key in data.
+
+    Parameters
+    ----------
+    data : dict or list of dicts
+        This is a list of dicts of numpy arrays, where each element in the list
+        is a different mesh, and where the keys of dicts are the field names.
+        If a dict is supplied, this will be assumed to be the only mesh.
+    connectivity : list of array_like or array_like
+        This is the connectivity array for the meshes; this should either be a
+        list where each element in the list is a numpy array or a single numpy
+        array.  Each array in the list can have different connectivity length
+        and should be of shape (N,M) where N is the number of elements and M is
+        the connectivity length.
+    coordinates : array_like
+        This should be of size (L,3) where L is the number of vertices
+        indicated in the connectivity matrix.
+    bbox : array_like (xdim:zdim, LE:RE), optional
+        Size of computational domain in units of the length unit.
+    sim_time : float, optional
+        The simulation time in seconds
+    mass_unit : string
+        Unit to use for masses.  Defaults to unitless.
+    time_unit : string
+        Unit to use for times.  Defaults to unitless.
+    velocity_unit : string
+        Unit to use for velocities.  Defaults to unitless.
+    magnetic_unit : string
+        Unit to use for magnetic fields. Defaults to unitless.
+    periodicity : tuple of booleans
+        Determines whether the data will be treated as periodic along
+        each axis
+    geometry : string or tuple
+        "cartesian", "cylindrical", "polar", "spherical", "geographic" or
+        "spectral_cube".  Optionally, a tuple can be provided to specify the
+        axis ordering -- for instance, to specify that the axis ordering should
+        be z, x, y, this would be: ("cartesian", ("z", "x", "y")).  The same
+        can be done for other coordinates, for instance: 
+        ("spherical", ("theta", "phi", "r")).
+
+    """
+
+    domain_dimensions = np.ones(3, "int32") * 2
+    nprocs = 1
+    data = ensure_list(data)
+    connectivity = ensure_list(connectivity)
+    if bbox is None:
+        bbox = np.array([[coordinates[:,i].min() - 0.1 * abs(coordinates[:,i].min()),
+                          coordinates[:,i].max() + 0.1 * abs(coordinates[:,i].max())]
+                          for i in range(3)], "float64")
+    domain_left_edge = np.array(bbox[:, 0], 'float64')
+    domain_right_edge = np.array(bbox[:, 1], 'float64')
+    grid_levels = np.zeros(nprocs, dtype='int32').reshape((nprocs,1))
+
+    field_units = {}
+    particle_types = {}
+    sfh = StreamDictFieldHandler()
+
+    sfh.update({'connectivity': connectivity,
+                'coordinates': coordinates})
+    for i, d in enumerate(data):
+        _f_unit, _data = unitify_data(d)
+        field_units.update(_f_unit)
+        sfh[i] = _data
+        particle_types.update(set_particle_types(d))
+    # Simple check for axis length correctness
+    if 0 and len(data) > 0:
+        fn = list(sorted(data))[0]
+        array_values = data[fn]
+        if array_values.size != connectivity.shape[0]:
+            mylog.error("Dimensions of array must be one fewer than the" +
+                        " coordinate set.")
+            raise RuntimeError
+    grid_left_edges = domain_left_edge
+    grid_right_edges = domain_right_edge
+    grid_dimensions = domain_dimensions.reshape(nprocs,3).astype("int32")
+
+    if length_unit is None:
+        length_unit = 'code_length'
+    if mass_unit is None:
+        mass_unit = 'code_mass'
+    if time_unit is None:
+        time_unit = 'code_time'
+    if velocity_unit is None:
+        velocity_unit = 'code_velocity'
+    if magnetic_unit is None:
+        magnetic_unit = 'code_magnetic'
+
+    # I'm not sure we need any of this.
+    handler = StreamHandler(
+        grid_left_edges,
+        grid_right_edges,
+        grid_dimensions,
+        grid_levels,
+        -np.ones(nprocs, dtype='int64'),
+        np.zeros(nprocs, dtype='int64').reshape(nprocs,1), # Temporary
+        np.zeros(nprocs).reshape((nprocs,1)),
+        sfh,
+        field_units,
+        (length_unit, mass_unit, time_unit, velocity_unit, magnetic_unit),
+        particle_types=particle_types,
+        periodicity=periodicity
+    )
+
+    handler.name = "UnstructuredMeshData"
+    handler.domain_left_edge = domain_left_edge
+    handler.domain_right_edge = domain_right_edge
+    handler.refine_by = 2
+    handler.dimensionality = 3
+    handler.domain_dimensions = domain_dimensions
+    handler.simulation_time = sim_time
+    handler.cosmology_simulation = 0
+
+    sds = StreamUnstructuredMeshDataset(handler, geometry = geometry)
+
+    return sds
+

diff -r a5fb8a8d19009e8aa9b2c8e0859bc2c7c7949934 -r fceb5ab0e3c639902f1d00e9ad6413367fd8fe27 yt/frontends/stream/io.py
--- a/yt/frontends/stream/io.py
+++ b/yt/frontends/stream/io.py
@@ -255,3 +255,39 @@
                         subset.domain_id - subset._domain_offset][field]
                 subset.fill(field_vals, rv, selector, ind)
         return rv
+
+
+class IOHandlerStreamUnstructured(BaseIOHandler):
+    _dataset_type = "stream_unstructured"
+    _node_types = ("diffused", "convected", "u", "temp")
+
+    def __init__(self, ds):
+        self.fields = ds.stream_handler.fields
+        super(IOHandlerStreamUnstructured, self).__init__(ds)
+
+    def _read_fluid_selection(self, chunks, selector, fields, size):
+        chunks = list(chunks)
+        chunk = chunks[0]
+        mesh_id = chunk.objs[0].mesh_id
+        rv = {}
+        for field in fields:
+            ftype, fname = field
+            nodes_per_element = self.fields[mesh_id][field].shape[1]
+            if fname in self._node_types:
+                rv[field] = np.empty((size, nodes_per_element), dtype="float64")
+            else:
+                rv[field] = np.empty(size, dtype="float64")
+        ngrids = sum(len(chunk.objs) for chunk in chunks)
+        mylog.debug("Reading %s cells of %s fields in %s blocks",
+                    size, [fname for ftype, fname in fields], ngrids)
+        for field in fields:
+            ind = 0
+            ftype, fname = field
+            for chunk in chunks:
+                for g in chunk.objs:
+                    ds = self.fields[g.mesh_id].get(field, None)
+                    if ds is None:
+                        ds = self.fields[g.mesh_id][fname]
+                    ind += g.select(selector, ds, rv[field], ind) # caches
+        return rv
+

diff -r a5fb8a8d19009e8aa9b2c8e0859bc2c7c7949934 -r fceb5ab0e3c639902f1d00e9ad6413367fd8fe27 yt/geometry/coordinates/cartesian_coordinates.py
--- a/yt/geometry/coordinates/cartesian_coordinates.py
+++ b/yt/geometry/coordinates/cartesian_coordinates.py
@@ -18,9 +18,13 @@
 from .coordinate_handler import \
     CoordinateHandler, \
     _unknown_coord, \
-    _get_coord_fields
+    _get_coord_fields, \
+    _get_vert_fields, \
+    cartesian_to_cylindrical, \
+    cylindrical_to_cartesian
 import yt.visualization._MPL as _MPL
 
+
 class CartesianCoordinateHandler(CoordinateHandler):
 
     def __init__(self, ds, ordering = ('x','y','z')):
@@ -38,6 +42,10 @@
             registry.add_field(("index", "%s" % ax), function = f2,
                                display_field = False,
                                units = "code_length")
+            f3 = _get_vert_fields(axi)
+            registry.add_field(("index", "vertex_%s" % ax), function = f3,
+                               display_field = False,
+                               units = "code_length")
         def _cell_volume(field, data):
             rv  = data["index", "dx"].copy(order='K')
             rv *= data["index", "dy"]

diff -r a5fb8a8d19009e8aa9b2c8e0859bc2c7c7949934 -r fceb5ab0e3c639902f1d00e9ad6413367fd8fe27 yt/geometry/coordinates/coordinate_handler.py
--- a/yt/geometry/coordinates/coordinate_handler.py
+++ b/yt/geometry/coordinates/coordinate_handler.py
@@ -45,6 +45,12 @@
         return data._reshape_vals(rv)
     return _dds, _coords
 
+def _get_vert_fields(axi, units = "code_length"):
+    def _vert(field, data):
+        rv = data.ds.arr(data.fcoords_vertex[...,axi].copy(), units)
+        return rv
+    return _vert
+
 def validate_iterable_width(width, ds, unit=None):
     if isinstance(width[0], tuple) and isinstance(width[1], tuple):
         validate_width_tuple(width[0])

diff -r a5fb8a8d19009e8aa9b2c8e0859bc2c7c7949934 -r fceb5ab0e3c639902f1d00e9ad6413367fd8fe27 yt/geometry/geometry_handler.py
--- a/yt/geometry/geometry_handler.py
+++ b/yt/geometry/geometry_handler.py
@@ -402,6 +402,20 @@
             ind += gt.size
         return cdt
 
+    @cached_property
+    def fcoords_vertex(self):
+        ci = np.empty((self.data_size, 8, 3), dtype='float64')
+        ci = YTArray(ci, input_units = "code_length",
+                     registry = self.dobj.ds.unit_registry)
+        if self.data_size == 0: return ci
+        ind = 0
+        for obj in self.objs:
+            c = obj.select_fcoords_vertex(self.dobj)
+            if c.shape[0] == 0: continue
+            ci[ind:ind+c.shape[0], :, :] = c
+            ind += c.shape[0]
+        return ci
+
 class ChunkDataCache(object):
     def __init__(self, base_iter, preload_fields, geometry_handler,
                  max_length = 256):

diff -r a5fb8a8d19009e8aa9b2c8e0859bc2c7c7949934 -r fceb5ab0e3c639902f1d00e9ad6413367fd8fe27 yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -387,8 +387,6 @@
         cdef int npoints, nv = mesh._connectivity_length
         cdef int total = 0
         cdef int offset = mesh._index_offset
-        if nv != 8:
-            raise RuntimeError
         coords = _ensure_code(mesh.connectivity_coords)
         indices = mesh.connectivity_indices
         npoints = indices.shape[0]

diff -r a5fb8a8d19009e8aa9b2c8e0859bc2c7c7949934 -r fceb5ab0e3c639902f1d00e9ad6413367fd8fe27 yt/utilities/exodusII_reader.py
--- /dev/null
+++ b/yt/utilities/exodusII_reader.py
@@ -0,0 +1,46 @@
+import string
+from itertools import takewhile
+from netCDF4 import Dataset
+import numpy as np
+from yt.config import ytcfg
+import os
+
+
+def sanitize_string(s):
+    s = "".join(_ for _ in takewhile(lambda a: a in string.printable, s))
+    return s
+
+
+def get_data(fn):
+    try:
+        f = Dataset(fn)
+    except RuntimeError:
+        f = Dataset(os.path.join(ytcfg.get("yt", "test_data_dir"), fn))
+    fvars = f.variables
+    # Is this correct?
+    etypes = fvars["eb_status"][:]
+    nelem = etypes.shape[0]
+#    varnames = [sanitize_string(v.tostring()) for v in
+#                fvars["name_elem_var"][:]]
+    nodnames = [sanitize_string(v.tostring()) for v in
+                fvars["name_nod_var"][:]]
+    coord = np.array([fvars["coord%s" % ax][:]
+                     for ax in 'xyz']).transpose().copy()
+    coords = []
+    connects = []
+    data = []
+    for i in range(nelem):
+        connects.append(fvars["connect%s" % (i+1)][:].astype("i8"))
+        ci = connects[-1]
+        coords.append(coord)  # Same for all
+        vals = {}
+#        for j, v in enumerate(varnames):
+#            values = fvars["vals_elem_var%seb%s" % (j+1, i+1)][:]
+#            vals['gas', v] = values.astype("f8")[-1, :]
+        for j, v in enumerate(nodnames):
+            # We want just for this set of nodes all the node variables
+            # Use (ci - 1) to get these values
+            values = fvars["vals_nod_var%s" % (j+1)][:]
+            vals['gas', v] = values.astype("f8")[-1, ci - 1, ...]
+        data.append(vals)
+    return coords, connects, data

diff -r a5fb8a8d19009e8aa9b2c8e0859bc2c7c7949934 -r fceb5ab0e3c639902f1d00e9ad6413367fd8fe27 yt/utilities/lib/element_mappings.pxd
--- /dev/null
+++ b/yt/utilities/lib/element_mappings.pxd
@@ -0,0 +1,113 @@
+cimport numpy as np
+from numpy cimport ndarray
+cimport cython
+import numpy as np
+from libc.math cimport fabs, fmax
+
+cdef class ElementSampler:
+
+    # how close a point has to be to the element
+    # to get counted as "inside". This is in the
+    # mapped coordinates of the element.
+    cdef np.float64_t inclusion_tol
+
+    cdef void map_real_to_unit(self,
+                               double* mapped_x, 
+                               double* vertices,
+                               double* physical_x) nogil
+
+
+    cdef double sample_at_unit_point(self,
+                                     double* coord,
+                                     double* vals) nogil
+    
+
+    cdef double sample_at_real_point(self,
+                                     double* vertices,
+                                     double* field_values,
+                                     double* physical_x) nogil
+
+    cdef int check_inside(self, double* mapped_coord) nogil
+
+
+cdef class P1Sampler3D(ElementSampler):
+
+    cdef void map_real_to_unit(self,
+                               double* mapped_x, 
+                               double* vertices,
+                               double* physical_x) nogil
+
+
+    cdef double sample_at_unit_point(self,
+                                     double* coord,
+                                     double* vals) nogil
+
+    cdef int check_inside(self, double* mapped_coord) nogil
+
+# This typedef defines a function pointer that defines the system
+# of equations that will be solved by the NonlinearSolveSamplers.
+# 
+# inputs:
+#     x        - pointer to the mapped coordinate
+#     vertices - pointer to the element vertices
+#     phys_x   - pointer to the physical coordinate
+#
+# outputs:
+#
+#     fx - the result of solving the system, should be close to 0
+#          once it is converged.
+#
+ctypedef void (*func_type)(double* fx, 
+                           double* x, 
+                           double* vertices, 
+                           double* phys_x) nogil
+
+# This typedef defines a function pointer that defines the Jacobian
+# matrix used by the NonlinearSolveSamplers. Subclasses needed to 
+# define a Jacobian function in this form.
+# 
+# inputs:
+#     x        - pointer to the mapped coordinate
+#     vertices - pointer to the element vertices
+#     phys_x   - pointer to the physical coordinate
+#
+# outputs:
+#
+#     rcol     - the first column of the jacobian
+#     scol     - the second column of the jacobian
+#     tcol     - the third column of the jaocobian
+#
+ctypedef void (*jac_type)(double* rcol, 
+                          double* scol, 
+                          double* tcol, 
+                          double* x, 
+                          double* vertices, 
+                          double* phys_x) nogil
+
+cdef class NonlinearSolveSampler(ElementSampler):
+
+    cdef int dim
+    cdef int max_iter
+    cdef np.float64_t tolerance
+    cdef func_type func 
+    cdef jac_type jac
+
+    cdef void map_real_to_unit(self,
+                               double* mapped_x, 
+                               double* vertices,
+                               double* physical_x) nogil
+    
+
+cdef class Q1Sampler3D(NonlinearSolveSampler):
+
+    cdef void map_real_to_unit(self,
+                               double* mapped_x, 
+                               double* vertices,
+                               double* physical_x) nogil
+
+
+    cdef double sample_at_unit_point(self,
+                                     double* coord,
+                                     double* vals) nogil
+
+    cdef int check_inside(self, double* mapped_coord) nogil

diff -r a5fb8a8d19009e8aa9b2c8e0859bc2c7c7949934 -r fceb5ab0e3c639902f1d00e9ad6413367fd8fe27 yt/utilities/lib/element_mappings.pyx
--- /dev/null
+++ b/yt/utilities/lib/element_mappings.pyx
@@ -0,0 +1,351 @@
+"""
+This file contains coordinate mappings between physical coordinates and those
+defined on unit elements, as well as doing the corresponding intracell 
+interpolation on finite element data.
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2015, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+cimport numpy as np
+from numpy cimport ndarray
+cimport cython
+import numpy as np
+from libc.math cimport fabs, fmax
+
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef double determinant_3x3(double* col0, 
+                            double* col1, 
+                            double* col2) nogil:
+    return col0[0]*col1[1]*col2[2] - col0[0]*col1[2]*col2[1] - \
+           col0[1]*col1[0]*col2[2] + col0[1]*col1[2]*col2[0] + \
+           col0[2]*col1[0]*col2[1] - col0[2]*col1[1]*col2[0]
+
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef double maxnorm(double* f) nogil:
+    cdef double err
+    cdef int i
+    err = fabs(f[0])
+    for i in range(1, 2):
+        err = fmax(err, fabs(f[i])) 
+    return err
+
+
+cdef class ElementSampler:
+    '''
+
+    This is a base class for sampling the value of a finite element solution
+    at an arbitrary point inside a mesh element. In general, this will be done
+    by transforming the requested physical coordinate into a mapped coordinate 
+    system, sampling the solution in mapped coordinates, and returning the result.
+    This is not to be used directly; use one of the subclasses instead.
+
+    '''
+
+    def __init__(self):
+        self.inclusion_tol = 1.0e-8
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef void map_real_to_unit(self,
+                               double* mapped_x, 
+                               double* vertices,
+                               double* physical_x) nogil:
+        pass
+        
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef double sample_at_unit_point(self,
+                                     double* coord,
+                                     double* vals) nogil:
+        pass
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef int check_inside(self, double* mapped_coord) nogil:
+        pass
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef double sample_at_real_point(self,
+                                     double* vertices,
+                                     double* field_values,
+                                     double* physical_x) nogil:
+        cdef double val
+        cdef double mapped_coord[4]
+
+        self.map_real_to_unit(mapped_coord, vertices, physical_x)
+        val = self.sample_at_unit_point(mapped_coord, field_values)
+        return val
+
+
+cdef class P1Sampler3D(ElementSampler):
+    '''
+
+    This implements sampling inside a linear, tetrahedral mesh element.
+    This mapping is linear and can be inverted easily.
+
+    '''
+
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef void map_real_to_unit(self, double* mapped_x, 
+                               double* vertices, double* physical_x) nogil:
+    
+        cdef int i
+        cdef double d
+        cdef double[3] bvec
+        cdef double[3] col0
+        cdef double[3] col1
+        cdef double[3] col2
+    
+        # here, we express positions relative to the 4th element,
+        # which is selected by vertices[9]
+        for i in range(3):
+            bvec[i] = physical_x[i]       - vertices[9 + i]
+            col0[i] = vertices[0 + i]     - vertices[9 + i]
+            col1[i] = vertices[3 + i]     - vertices[9 + i]
+            col2[i] = vertices[6 + i]     - vertices[9 + i]
+        
+        d = determinant_3x3(col0, col1, col2)
+        mapped_x[0] = determinant_3x3(bvec, col1, col2)/d
+        mapped_x[1] = determinant_3x3(col0, bvec, col2)/d
+        mapped_x[2] = determinant_3x3(col0, col1, bvec)/d
+        mapped_x[3] = 1.0 - mapped_x[0] - mapped_x[1] - mapped_x[2]
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef double sample_at_unit_point(self,
+                                     double* coord, 
+                                     double* vals) nogil:
+        return vals[0]*coord[0] + vals[1]*coord[1] + \
+            vals[2]*coord[2] + vals[3]*coord[3]
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef int check_inside(self, double* mapped_coord) nogil:
+        cdef int i
+        for i in range(4):
+            if (mapped_coord[i] < -self.inclusion_tol or
+                mapped_coord[i] - 1.0 > self.inclusion_tol):
+                return 0
+        return 1
+
+
+cdef class NonlinearSolveSampler(ElementSampler):
+
+    '''
+
+    This is a base class for handling element samplers that require
+    a nonlinear solve to invert the mapping between coordinate systems.
+    To do this, we perform Newton-Raphson iteration using a specified 
+    system of equations with an analytic Jacobian matrix. This is
+    not to be used directly, use one of the subclasses instead.
+
+    '''
+
+    def __init__(self):
+        super(NonlinearSolveSampler, self).__init__()
+        self.tolerance = 1.0e-9
+        self.max_iter = 10
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef void map_real_to_unit(self,
+                               double* mapped_x,
+                               double* vertices,
+                               double* physical_x) nogil:
+        cdef int i
+        cdef double d, val
+        cdef double[3] f
+        cdef double[3] r
+        cdef double[3] s
+        cdef double[3] t
+        cdef double[3] x
+        cdef int iterations = 0
+        cdef double err
+   
+        # initial guess
+        for i in range(3):
+            x[i] = 0.0
+    
+        # initial error norm
+        self.func(f, x, vertices, physical_x)
+        err = maxnorm(f)  
+   
+        # begin Newton iteration
+        while (err > self.tolerance and iterations < self.max_iter):
+            self.jac(r, s, t, x, vertices, physical_x)
+            d = determinant_3x3(r, s, t)
+            x[0] = x[0] - (determinant_3x3(f, s, t)/d)
+            x[1] = x[1] - (determinant_3x3(r, f, t)/d)
+            x[2] = x[2] - (determinant_3x3(r, s, f)/d)
+            self.func(f, x, vertices, physical_x)        
+            err = maxnorm(f)
+            iterations += 1
+
+        for i in range(3):
+            mapped_x[i] = x[i]
+
+
+cdef class Q1Sampler3D(NonlinearSolveSampler):
+
+    ''' 
+
+    This implements sampling inside a 3D, linear, hexahedral mesh element.
+
+    '''
+
+    def __init__(self):
+        super(Q1Sampler3D, self).__init__()
+        self.dim = 3
+        self.func = Q1Function3D
+        self.jac = Q1Jacobian3D
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef double sample_at_unit_point(self, double* coord, double* vals) nogil:
+        cdef double F, rm, rp, sm, sp, tm, tp
+    
+        rm = 1.0 - coord[0]
+        rp = 1.0 + coord[0]
+        sm = 1.0 - coord[1]
+        sp = 1.0 + coord[1]
+        tm = 1.0 - coord[2]
+        tp = 1.0 + coord[2]
+    
+        F = vals[0]*rm*sm*tm + vals[1]*rp*sm*tm + vals[2]*rp*sp*tm + vals[3]*rm*sp*tm + \
+            vals[4]*rm*sm*tp + vals[5]*rp*sm*tp + vals[6]*rp*sp*tp + vals[7]*rm*sp*tp
+        return 0.125*F
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef int check_inside(self, double* mapped_coord) nogil:
+        if (fabs(mapped_coord[0]) - 1.0 > self.inclusion_tol or
+            fabs(mapped_coord[1]) - 1.0 > self.inclusion_tol or 
+            fabs(mapped_coord[2]) - 1.0 > self.inclusion_tol):
+            return 0
+        return 1
+
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef inline void Q1Function3D(double* fx,
+                              double* x, 
+                              double* vertices, 
+                              double* phys_x) nogil:
+    cdef int i
+    cdef double rm, rp, sm, sp, tm, tp
+    
+    rm = 1.0 - x[0]
+    rp = 1.0 + x[0]
+    sm = 1.0 - x[1]
+    sp = 1.0 + x[1]
+    tm = 1.0 - x[2]
+    tp = 1.0 + x[2]
+    
+    for i in range(3):
+        fx[i] = vertices[0 + i]*rm*sm*tm \
+              + vertices[3 + i]*rp*sm*tm \
+              + vertices[6 + i]*rp*sp*tm \
+              + vertices[9 + i]*rm*sp*tm \
+              + vertices[12 + i]*rm*sm*tp \
+              + vertices[15 + i]*rp*sm*tp \
+              + vertices[18 + i]*rp*sp*tp \
+              + vertices[21 + i]*rm*sp*tp \
+              - 8.0*phys_x[i]
+
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef inline void Q1Jacobian3D(double* rcol,
+                              double* scol,
+                              double* tcol,
+                              double* x, 
+                              double* vertices, 
+                              double* phys_x) nogil:    
+    cdef int i
+    cdef double rm, rp, sm, sp, tm, tp
+    
+    rm = 1.0 - x[0]
+    rp = 1.0 + x[0]
+    sm = 1.0 - x[1]
+    sp = 1.0 + x[1]
+    tm = 1.0 - x[2]
+    tp = 1.0 + x[2]
+    
+    for i in range(3):
+        rcol[i] = -sm*tm*vertices[0 + i]  + sm*tm*vertices[3 + i]  + \
+                   sp*tm*vertices[6 + i]  - sp*tm*vertices[9 + i]  - \
+                   sm*tp*vertices[12 + i] + sm*tp*vertices[15 + i] + \
+                   sp*tp*vertices[18 + i] - sp*tp*vertices[21 + i]
+        scol[i] = -rm*tm*vertices[0 + i]  - rp*tm*vertices[3 + i]  + \
+                   rp*tm*vertices[6 + i]  + rm*tm*vertices[9 + i]  - \
+                   rm*tp*vertices[12 + i] - rp*tp*vertices[15 + i] + \
+                   rp*tp*vertices[18 + i] + rm*tp*vertices[21 + i]
+        tcol[i] = -rm*sm*vertices[0 + i]  - rp*sm*vertices[3 + i]  - \
+                   rp*sp*vertices[6 + i]  - rm*sp*vertices[9 + i]  + \
+                   rm*sm*vertices[12 + i] + rp*sm*vertices[15 + i] + \
+                   rp*sp*vertices[18 + i] + rm*sp*vertices[21 + i]
+
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+def test_hex_sampler(np.ndarray[np.float64_t, ndim=2] vertices,
+                     np.ndarray[np.float64_t, ndim=1] field_values,
+                     np.ndarray[np.float64_t, ndim=1] physical_x):
+
+    cdef double val
+
+    cdef Q1Sampler3D sampler = Q1Sampler3D()
+
+    val = sampler.sample_at_real_point(<double*> vertices.data,
+                                       <double*> field_values.data,
+                                       <double*> physical_x.data)
+    return val
+
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+def test_tetra_sampler(np.ndarray[np.float64_t, ndim=2] vertices,
+                       np.ndarray[np.float64_t, ndim=1] field_values,
+                       np.ndarray[np.float64_t, ndim=1] physical_x):
+
+    cdef double val
+    cdef double[4] mapped_coord
+
+    sampler = P1Sampler3D()
+
+    val = sampler.sample_at_real_point(<double*> vertices.data,
+                                       <double*> field_values.data,
+                                       <double*> physical_x.data)
+
+    return val

diff -r a5fb8a8d19009e8aa9b2c8e0859bc2c7c7949934 -r fceb5ab0e3c639902f1d00e9ad6413367fd8fe27 yt/utilities/lib/grid_traversal.pxd
--- a/yt/utilities/lib/grid_traversal.pxd
+++ b/yt/utilities/lib/grid_traversal.pxd
@@ -19,6 +19,46 @@
 cimport cython
 cimport kdtree_utils
 
+cdef struct ImageContainer:
+    np.float64_t *vp_pos
+    np.float64_t *vp_dir
+    np.float64_t *center
+    np.float64_t *image
+    np.float64_t *zbuffer
+    np.float64_t pdx, pdy
+    np.float64_t bounds[4]
+    int nv[2]
+    int vp_strides[3]
+    int im_strides[3]
+    int vd_strides[3]
+    np.float64_t *x_vec
+    np.float64_t *y_vec
+
+ctypedef void sampler_function(
+                VolumeContainer *vc,
+                np.float64_t v_pos[3],
+                np.float64_t v_dir[3],
+                np.float64_t enter_t,
+                np.float64_t exit_t,
+                int index[3],
+                void *data) nogil
+
+
+cdef class ImageSampler:
+    cdef ImageContainer *image
+    cdef sampler_function *sampler
+    cdef public object avp_pos, avp_dir, acenter, aimage, ax_vec, ay_vec
+    cdef public object azbuffer
+    cdef void *supp_data
+    cdef np.float64_t width[3]
+
+    cdef void get_start_stop(self, np.float64_t *ex, np.int64_t *rv)
+
+    cdef void calculate_extent(self, np.float64_t extrema[4],
+                               VolumeContainer *vc) nogil
+
+    cdef void setup(self, PartitionedGrid pg)
+
 cdef struct VolumeContainer:
     int n_fields
     np.float64_t **data

diff -r a5fb8a8d19009e8aa9b2c8e0859bc2c7c7949934 -r fceb5ab0e3c639902f1d00e9ad6413367fd8fe27 yt/utilities/lib/grid_traversal.pyx
--- a/yt/utilities/lib/grid_traversal.pyx
+++ b/yt/utilities/lib/grid_traversal.pyx
@@ -30,15 +30,6 @@
 
 DEF Nch = 4
 
-ctypedef void sampler_function(
-                VolumeContainer *vc,
-                np.float64_t v_pos[3],
-                np.float64_t v_dir[3],
-                np.float64_t enter_t,
-                np.float64_t exit_t,
-                int index[3],
-                void *data) nogil
-
 cdef class PartitionedGrid:
 
     @cython.boundscheck(False)
@@ -183,32 +174,12 @@
             for i in range(3):
                 vel[i] /= vel_mag[0]
 
-cdef struct ImageContainer:
-    np.float64_t *vp_pos
-    np.float64_t *vp_dir
-    np.float64_t *center
-    np.float64_t *image
-    np.float64_t *zbuffer
-    np.float64_t pdx, pdy
-    np.float64_t bounds[4]
-    int nv[2]
-    int vp_strides[3]
-    int im_strides[3]
-    int vd_strides[3]
-    np.float64_t *x_vec
-    np.float64_t *y_vec
 
 cdef struct ImageAccumulator:
     np.float64_t rgba[Nch]
     void *supp_data
 
 cdef class ImageSampler:
-    cdef ImageContainer *image
-    cdef sampler_function *sampler
-    cdef public object avp_pos, avp_dir, acenter, aimage, ax_vec, ay_vec
-    cdef public object azbuffer
-    cdef void *supp_data
-    cdef np.float64_t width[3]
     def __init__(self,
                   np.ndarray vp_pos,
                   np.ndarray vp_dir,

diff -r a5fb8a8d19009e8aa9b2c8e0859bc2c7c7949934 -r fceb5ab0e3c639902f1d00e9ad6413367fd8fe27 yt/utilities/lib/mesh_construction.h
--- /dev/null
+++ b/yt/utilities/lib/mesh_construction.h
@@ -0,0 +1,36 @@
+#define MAX_NUM_TRI 12
+#define HEX_NV 8
+#define HEX_NT 12
+#define TETRA_NV 4
+#define TETRA_NT 4
+
+// This array is used to triangulate the hexahedral mesh elements
+// Each element has six faces with two triangles each.
+// The vertex ordering convention is assumed to follow that used
+// here: http://homepages.cae.wisc.edu/~tautges/papers/cnmev3.pdf
+// Note that this is the case for Exodus II data.
+int triangulate_hex[MAX_NUM_TRI][3] = {
+  {0, 2, 1}, {0, 3, 2}, // Face is 3 2 1 0 
+  {4, 5, 6}, {4, 6, 7}, // Face is 4 5 6 7
+  {0, 1, 5}, {0, 5, 4}, // Face is 0 1 5 4
+  {1, 2, 6}, {1, 6, 5}, // Face is 1 2 6 5
+  {0, 7, 3}, {0, 4, 7}, // Face is 3 0 4 7
+  {3, 6, 2}, {3, 7, 6}  // Face is 2 3 7 6
+};
+
+// Similarly, this is used to triangulate the tetrahedral cells
+int triangulate_tetra[MAX_NUM_TRI][3] = {
+  {0, 1, 2}, 
+  {0, 1, 3},
+  {0, 2, 3},
+  {1, 2, 3},
+
+  {-1, -1, -1},
+  {-1, -1, -1},
+  {-1, -1, -1},
+  {-1, -1, -1},
+  {-1, -1, -1},
+  {-1, -1, -1},
+  {-1, -1, -1},
+  {-1, -1, -1}
+};

diff -r a5fb8a8d19009e8aa9b2c8e0859bc2c7c7949934 -r fceb5ab0e3c639902f1d00e9ad6413367fd8fe27 yt/utilities/lib/mesh_construction.pxd
--- /dev/null
+++ b/yt/utilities/lib/mesh_construction.pxd
@@ -0,0 +1,12 @@
+from pyembree.rtcore cimport \
+    Vertex, \
+    Triangle, \
+    Vec3f
+
+ctypedef struct MeshDataContainer:
+    Vertex* vertices       # array of triangle vertices
+    Triangle* indices      # which vertices belong to which triangles
+    double* field_data     # the field values at the vertices
+    int* element_indices   # which vertices belong to which *element*
+    int tpe                # the number of triangles per element
+    int vpe                # the number of vertices per element

diff -r a5fb8a8d19009e8aa9b2c8e0859bc2c7c7949934 -r fceb5ab0e3c639902f1d00e9ad6413367fd8fe27 yt/utilities/lib/mesh_construction.pyx
--- /dev/null
+++ b/yt/utilities/lib/mesh_construction.pyx
@@ -0,0 +1,190 @@
+"""
+This file contains the ElementMesh, which represents the target that the 
+rays will be cast at when rendering finite element data. This class handles
+the interface between the internal representation of the mesh and the pyembree
+representation.
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2015, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+cimport numpy as np
+cimport cython
+cimport pyembree.rtcore as rtc 
+from mesh_traversal cimport YTEmbreeScene
+cimport pyembree.rtcore_geometry as rtcg
+cimport pyembree.rtcore_ray as rtcr
+cimport pyembree.rtcore_geometry_user as rtcgu
+from mesh_samplers cimport \
+    sample_hex, \
+    sample_tetra
+from pyembree.rtcore cimport \
+    Vertex, \
+    Triangle, \
+    Vec3f
+from libc.stdlib cimport malloc, free
+import numpy as np
+
+cdef extern from "mesh_construction.h":
+    enum:
+        MAX_NUM_TRI
+        
+    int HEX_NV
+    int HEX_NT
+    int TETRA_NV
+    int TETRA_NT
+    int triangulate_hex[MAX_NUM_TRI][3]
+    int triangulate_tetra[MAX_NUM_TRI][3]
+
+
+cdef class ElementMesh:
+    r'''
+
+    Currently, we handle non-triangular mesh types by converting them 
+    to triangular meshes. This class performs this transformation.
+    Currently, this is implemented for hexahedral and tetrahedral
+    meshes.
+
+    Parameters
+    ----------
+
+    scene : EmbreeScene
+        This is the scene to which the constructed polygons will be
+        added.
+    vertices : a np.ndarray of floats. 
+        This specifies the x, y, and z coordinates of the vertices in 
+        the polygon mesh. This should either have the shape 
+        (num_vertices, 3). For example, vertices[2][1] should give the 
+        y-coordinate of the 3rd vertex in the mesh.
+    indices : a np.ndarray of ints
+        This should either have the shape (num_elements, 4) or 
+        (num_elements, 8) for tetrahedral and hexahedral meshes, 
+        respectively. For tetrahedral meshes, each element will 
+        be represented by four triangles in the scene. For hex meshes,
+        each element will be represented by 12 triangles, 2 for each 
+        face. For hex meshes, we assume that the node ordering is as
+        defined here: 
+        http://homepages.cae.wisc.edu/~tautges/papers/cnmev3.pdf
+            
+    '''
+
+    cdef Vertex* vertices
+    cdef Triangle* indices
+    cdef unsigned int mesh
+    cdef double* field_data
+    cdef rtcg.RTCFilterFunc filter_func
+    cdef int tpe, vpe
+    cdef int[MAX_NUM_TRI][3] tri_array
+    cdef int* element_indices
+    cdef MeshDataContainer datac
+
+    def __init__(self, YTEmbreeScene scene,
+                 np.ndarray vertices, 
+                 np.ndarray indices,
+                 np.ndarray data):
+
+        # We need now to figure out if we've been handed quads or tetrahedra.
+        if indices.shape[1] == 8:
+            self.vpe = HEX_NV
+            self.tpe = HEX_NT
+            self.tri_array = triangulate_hex
+        elif indices.shape[1] == 4:
+            self.vpe = TETRA_NV
+            self.tpe = TETRA_NT
+            self.tri_array = triangulate_tetra
+        else:
+            raise NotImplementedError
+
+        self._build_from_indices(scene, vertices, indices)
+        self._set_field_data(scene, data)
+        self._set_sampler_type(scene)
+
+    cdef void _build_from_indices(self, YTEmbreeScene scene,
+                                  np.ndarray vertices_in,
+                                  np.ndarray indices_in):
+        cdef int i, j, ind
+        cdef int nv = vertices_in.shape[0]
+        cdef int ne = indices_in.shape[0]
+        cdef int nt = self.tpe*ne
+
+        cdef unsigned int mesh = rtcg.rtcNewTriangleMesh(scene.scene_i,
+                    rtcg.RTC_GEOMETRY_STATIC, nt, nv, 1)
+
+        # first just copy over the vertices
+        cdef Vertex* vertices = <Vertex*> malloc(nv * sizeof(Vertex))
+        for i in range(nv):
+            vertices[i].x = vertices_in[i, 0]
+            vertices[i].y = vertices_in[i, 1]
+            vertices[i].z = vertices_in[i, 2]       
+        rtcg.rtcSetBuffer(scene.scene_i, mesh, rtcg.RTC_VERTEX_BUFFER,
+                          vertices, 0, sizeof(Vertex))
+
+        # now build up the triangles
+        cdef Triangle* triangles = <Triangle*> malloc(nt * sizeof(Triangle))
+        for i in range(ne):
+            for j in range(self.tpe):
+                triangles[self.tpe*i+j].v0 = indices_in[i][self.tri_array[j][0]]
+                triangles[self.tpe*i+j].v1 = indices_in[i][self.tri_array[j][1]]
+                triangles[self.tpe*i+j].v2 = indices_in[i][self.tri_array[j][2]]
+        rtcg.rtcSetBuffer(scene.scene_i, mesh, rtcg.RTC_INDEX_BUFFER,
+                          triangles, 0, sizeof(Triangle))
+
+        cdef int* element_indices = <int *> malloc(ne * self.vpe * sizeof(int))    
+        for i in range(ne):
+            for j in range(self.vpe):
+                element_indices[i*self.vpe + j] = indices_in[i][j]
+
+        self.element_indices = element_indices
+        self.vertices = vertices
+        self.indices = triangles
+        self.mesh = mesh
+
+    cdef void _set_field_data(self, YTEmbreeScene scene,
+                              np.ndarray data_in):
+
+        cdef int ne = data_in.shape[0]
+        cdef double* field_data = <double *> malloc(ne * self.vpe * sizeof(double))
+
+        for i in range(ne):
+            for j in range(self.vpe):
+                field_data[i*self.vpe+j] = data_in[i][j]
+
+        self.field_data = field_data
+
+        cdef MeshDataContainer datac
+        datac.vertices = self.vertices
+        datac.indices = self.indices
+        datac.field_data = self.field_data
+        datac.element_indices = self.element_indices
+        datac.tpe = self.tpe
+        datac.vpe = self.vpe
+        self.datac = datac
+        
+        rtcg.rtcSetUserData(scene.scene_i, self.mesh, &self.datac)
+
+    cdef void _set_sampler_type(self, YTEmbreeScene scene):
+
+        if self.vpe == 8:
+            self.filter_func = <rtcg.RTCFilterFunc> sample_hex
+        elif self.vpe == 4:
+            self.filter_func = <rtcg.RTCFilterFunc> sample_tetra
+        else:
+            print "Error - sampler type not implemented."
+            raise NotImplementedError
+
+        rtcg.rtcSetIntersectionFilterFunction(scene.scene_i,
+                                              self.mesh,
+                                              self.filter_func)
+        
+    def __dealloc__(self):
+        free(self.field_data)
+        free(self.element_indices)
+        free(self.vertices)
+        free(self.indices)

diff -r a5fb8a8d19009e8aa9b2c8e0859bc2c7c7949934 -r fceb5ab0e3c639902f1d00e9ad6413367fd8fe27 yt/utilities/lib/mesh_samplers.pxd
--- /dev/null
+++ b/yt/utilities/lib/mesh_samplers.pxd
@@ -0,0 +1,9 @@
+cimport pyembree.rtcore as rtc
+cimport pyembree.rtcore_ray as rtcr
+cimport cython
+
+cdef void sample_hex(void* userPtr,
+                     rtcr.RTCRay& ray) nogil
+
+cdef void sample_tetra(void* userPtr,
+                       rtcr.RTCRay& ray) nogil

diff -r a5fb8a8d19009e8aa9b2c8e0859bc2c7c7949934 -r fceb5ab0e3c639902f1d00e9ad6413367fd8fe27 yt/utilities/lib/mesh_samplers.pyx
--- /dev/null
+++ b/yt/utilities/lib/mesh_samplers.pyx
@@ -0,0 +1,138 @@
+"""
+This file contains functions that sample a surface mesh at the point hit by
+a ray. These can be used with pyembree in the form of "filter feedback functions."
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2015, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+cimport pyembree.rtcore as rtc
+cimport pyembree.rtcore_ray as rtcr
+from pyembree.rtcore cimport Vec3f, Triangle, Vertex
+from yt.utilities.lib.mesh_construction cimport MeshDataContainer
+from yt.utilities.lib.element_mappings cimport \
+    ElementSampler, \
+    P1Sampler3D, \
+    Q1Sampler3D
+cimport numpy as np
+cimport cython
+from libc.math cimport fabs, fmax
+
+cdef ElementSampler Q1Sampler = Q1Sampler3D()
+cdef ElementSampler P1Sampler = P1Sampler3D()
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef void get_hit_position(double* position,
+                           void* userPtr,
+                           rtcr.RTCRay& ray) nogil:
+    cdef int primID, i
+    cdef double[3][3] vertex_positions
+    cdef Triangle tri
+    cdef MeshDataContainer* data
+
+    primID = ray.primID
+    data = <MeshDataContainer*> userPtr
+    tri = data.indices[primID]
+
+    vertex_positions[0][0] = data.vertices[tri.v0].x
+    vertex_positions[0][1] = data.vertices[tri.v0].y
+    vertex_positions[0][2] = data.vertices[tri.v0].z
+
+    vertex_positions[1][0] = data.vertices[tri.v1].x
+    vertex_positions[1][1] = data.vertices[tri.v1].y
+    vertex_positions[1][2] = data.vertices[tri.v1].z
+
+    vertex_positions[2][0] = data.vertices[tri.v2].x
+    vertex_positions[2][1] = data.vertices[tri.v2].y
+    vertex_positions[2][2] = data.vertices[tri.v2].z
+
+    for i in range(3):
+        position[i] = vertex_positions[0][i]*(1.0 - ray.u - ray.v) + \
+                      vertex_positions[1][i]*ray.u + \
+                      vertex_positions[2][i]*ray.v
+
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef void sample_hex(void* userPtr,
+                     rtcr.RTCRay& ray) nogil:
+    cdef int ray_id, elem_id, i
+    cdef double val
+    cdef double[8] field_data
+    cdef int[8] element_indices
+    cdef double[24] vertices
+    cdef double[3] position
+    cdef MeshDataContainer* data
+
+    data = <MeshDataContainer*> userPtr
+    ray_id = ray.primID
+    if ray_id == -1:
+        return
+
+    # ray_id records the id number of the hit according to
+    # embree, in which the primitives are triangles. Here,
+    # we convert this to the element id by dividing by the
+    # number of triangles per element.
+    elem_id = ray_id / data.tpe
+
+    get_hit_position(position, userPtr, ray)
+    
+    for i in range(8):
+        element_indices[i] = data.element_indices[elem_id*8+i]
+        field_data[i]      = data.field_data[elem_id*8+i]
+
+    for i in range(8):
+        vertices[i*3]     = data.vertices[element_indices[i]].x
+        vertices[i*3 + 1] = data.vertices[element_indices[i]].y
+        vertices[i*3 + 2] = data.vertices[element_indices[i]].z    
+
+    val = Q1Sampler.sample_at_real_point(vertices, field_data, position)
+    ray.time = val
+
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef void sample_tetra(void* userPtr,
+                       rtcr.RTCRay& ray) nogil:
+
+    cdef int ray_id, elem_id, i
+    cdef double val
+    cdef double[4] field_data
+    cdef int[4] element_indices
+    cdef double[12] vertices
+    cdef double[3] position
+    cdef MeshDataContainer* data
+
+    data = <MeshDataContainer*> userPtr
+    ray_id = ray.primID
+    if ray_id == -1:
+        return
+
+    get_hit_position(position, userPtr, ray)
+
+    # ray_id records the id number of the hit according to
+    # embree, in which the primitives are triangles. Here,
+    # we convert this to the element id by dividing by the
+    # number of triangles per element.    
+    elem_id = ray_id / data.tpe
+
+    for i in range(4):
+        element_indices[i] = data.element_indices[elem_id*4+i]
+        field_data[i] = data.field_data[elem_id*4+i]
+        vertices[i*3] = data.vertices[element_indices[i]].x
+        vertices[i*3 + 1] = data.vertices[element_indices[i]].y
+        vertices[i*3 + 2] = data.vertices[element_indices[i]].z    
+
+    val = P1Sampler.sample_at_real_point(vertices, field_data, position)
+    ray.time = val

diff -r a5fb8a8d19009e8aa9b2c8e0859bc2c7c7949934 -r fceb5ab0e3c639902f1d00e9ad6413367fd8fe27 yt/utilities/lib/mesh_traversal.pxd
--- /dev/null
+++ b/yt/utilities/lib/mesh_traversal.pxd
@@ -0,0 +1,7 @@
+cimport pyembree.rtcore
+cimport pyembree.rtcore_scene as rtcs
+cimport pyembree.rtcore_ray
+
+cdef class YTEmbreeScene:
+    cdef rtcs.RTCScene scene_i
+

diff -r a5fb8a8d19009e8aa9b2c8e0859bc2c7c7949934 -r fceb5ab0e3c639902f1d00e9ad6413367fd8fe27 yt/utilities/lib/mesh_traversal.pyx
--- /dev/null
+++ b/yt/utilities/lib/mesh_traversal.pyx
@@ -0,0 +1,127 @@
+"""
+This file contains the MeshSampler class, which handles casting rays at a
+MeshSource using pyembree.
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2015, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+cimport cython
+cimport numpy as np
+import numpy as np
+from libc.stdlib cimport malloc, free
+cimport pyembree.rtcore as rtc
+cimport pyembree.rtcore_ray as rtcr
+cimport pyembree.rtcore_geometry as rtcg
+cimport pyembree.rtcore_scene as rtcs
+from grid_traversal cimport ImageSampler, \
+    ImageContainer
+from cython.parallel import prange, parallel, threadid
+
+rtc.rtcInit(NULL)
+rtc.rtcSetErrorFunction(error_printer)
+
+cdef void error_printer(const rtc.RTCError code, const char *_str):
+    print "ERROR CAUGHT IN EMBREE"
+    rtc.print_error(code)
+    print "ERROR MESSAGE:", _str
+
+cdef class YTEmbreeScene:
+
+    def __init__(self):
+        self.scene_i = rtcs.rtcNewScene(rtcs.RTC_SCENE_STATIC, rtcs.RTC_INTERSECT1)
+
+    def __dealloc__(self):
+        rtcs.rtcDeleteScene(self.scene_i)
+
+cdef class MeshSampler(ImageSampler):
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    def __call__(self, 
+                 YTEmbreeScene scene,
+                 int num_threads = 0):
+        '''
+
+        This function is supposed to cast the rays and return the
+        image.
+
+        '''
+
+        rtcs.rtcCommit(scene.scene_i)
+        cdef int vi, vj, i, j, ni, nj, nn
+        cdef np.int64_t offset
+        cdef ImageContainer *im = self.image
+        cdef np.int64_t elemID
+        cdef np.float64_t value
+        cdef np.float64_t *v_pos
+        cdef np.float64_t *v_dir
+        cdef np.int64_t nx, ny, size
+        cdef np.float64_t px, py
+        cdef np.float64_t width[3]
+        for i in range(3):
+            width[i] = self.width[i]
+        cdef np.ndarray[np.float64_t, ndim=1] data
+        nx = im.nv[0]
+        ny = im.nv[1]
+        size = nx * ny
+        data = np.empty(size, dtype="float64")
+        cdef rtcr.RTCRay ray
+        if im.vd_strides[0] == -1:
+            v_pos = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+            for j in range(size):
+                vj = j % ny
+                vi = (j - vj) / ny
+                vj = vj
+                # Dynamically calculate the position
+                px = width[0] * (<np.float64_t>vi)/(<np.float64_t>im.nv[0]-1) - width[0]/2.0
+                py = width[1] * (<np.float64_t>vj)/(<np.float64_t>im.nv[1]-1) - width[1]/2.0
+                v_pos[0] = im.vp_pos[0]*px + im.vp_pos[3]*py + im.vp_pos[9]
+                v_pos[1] = im.vp_pos[1]*px + im.vp_pos[4]*py + im.vp_pos[10]
+                v_pos[2] = im.vp_pos[2]*px + im.vp_pos[5]*py + im.vp_pos[11]
+                for i in range(3):
+                    ray.org[i] = v_pos[i]
+                    ray.dir[i] = im.vp_dir[i]
+                ray.tnear = 0.0
+                ray.tfar = 1e37
+                ray.geomID = rtcg.RTC_INVALID_GEOMETRY_ID
+                ray.primID = rtcg.RTC_INVALID_GEOMETRY_ID
+                ray.instID = rtcg.RTC_INVALID_GEOMETRY_ID
+                ray.mask = -1
+                ray.time = 0
+                rtcs.rtcIntersect(scene.scene_i, ray)
+                data[j] = ray.time
+            self.aimage = data.reshape(self.image.nv[0], self.image.nv[1])
+            free(v_pos)
+        else:
+            v_pos = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+            v_dir = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+            # If we do not have a simple image plane, we have to cast all
+            # our rays 
+            for j in range(size):
+                offset = j * 3
+                for i in range(3): v_pos[i] = im.vp_pos[i + offset]
+                for i in range(3): v_dir[i] = im.vp_dir[i + offset]
+                for i in range(3):
+                    ray.org[i] = v_pos[i]
+                    ray.dir[i] = v_dir[i]
+                ray.tnear = 0.0
+                ray.tfar = 1e37
+                ray.geomID = rtcg.RTC_INVALID_GEOMETRY_ID
+                ray.primID = rtcg.RTC_INVALID_GEOMETRY_ID
+                ray.instID = rtcg.RTC_INVALID_GEOMETRY_ID
+                ray.mask = -1
+                ray.time = 0
+                rtcs.rtcIntersect(scene.scene_i, ray)
+                data[j] = ray.time
+            self.aimage = data.reshape(self.image.nv[0], self.image.nv[1])
+            free(v_pos)
+            free(v_dir)

diff -r a5fb8a8d19009e8aa9b2c8e0859bc2c7c7949934 -r fceb5ab0e3c639902f1d00e9ad6413367fd8fe27 yt/utilities/lib/misc_utilities.pyx
--- a/yt/utilities/lib/misc_utilities.pyx
+++ b/yt/utilities/lib/misc_utilities.pyx
@@ -372,9 +372,10 @@
                                 talpha = image[x0, yi0, 3]
                                 image[x0, yi0, 3] = alpha[3] + talpha * (1 - alpha[3])
                                 for i in range(3):
-                                    image[x0, yi0, i] = (alpha[3]*alpha[i] + image[x0, yi0, i]*talpha*(1.0-alpha[3]))/image[x0,yi0,3]
                                     if image[x0, yi0, 3] == 0.0:
                                         image[x0, yi0, i] = 0.0
+                                    else:
+                                        image[x0, yi0, i] = (alpha[3]*alpha[i] + image[x0, yi0, i]*talpha*(1.0-alpha[3]))/image[x0,yi0,3]
                             else:
                                 for i in range(4):
                                     image[x0, yi0, i] = alpha[i]

This diff is so big that we needed to truncate the remainder.

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.


More information about the yt-svn mailing list