[yt-svn] commit/yt: 25 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Mon Nov 2 11:58:33 PST 2015


25 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/fc452afb0cd1/
Changeset:   fc452afb0cd1
Branch:      yt
User:        MatthewTurk
Date:        2015-10-14 21:37:41+00:00
Summary:     Starting to refactor TransferFunctionHelper.
Affected #:  1 file

diff -r 52b57ac913dd7e334914c7e8985f35ebb1618346 -r fc452afb0cd1d2053fcaa471ca24ab6e2f9f7a71 yt/visualization/volume_rendering/transfer_function_helper.py
--- a/yt/visualization/volume_rendering/transfer_function_helper.py
+++ b/yt/visualization/volume_rendering/transfer_function_helper.py
@@ -21,11 +21,19 @@
 from matplotlib.figure import Figure
 from yt.extern.six.moves import StringIO
 import numpy as np
+import functools
 
+def invalidate_tf(func):
+    @functools.wraps(func)
+    def wrapper(self, *args, **kwargs)
+        func(self, *args, **kwargs)
+        self._tf_valid = False
+    return wrapper
 
 class TransferFunctionHelper(object):
 
     profiles = None
+    _tf_valid = False
 
     def __init__(self, ds):
         r"""A transfer function helper.
@@ -51,6 +59,7 @@
         self.grey_opacity = True 
         self.profiles = {}
 
+    @invalidate_tf
     def set_bounds(self, bounds=None):
         """
         Set the bounds of the transfer function.
@@ -75,6 +84,7 @@
             assert(self.bounds[1] > 0.0)
         return
 
+    @invalidate_tf
     def set_field(self, field):
         """
         Set the field to be rendered
@@ -87,6 +97,7 @@
         self.field = field
         self.log = self.ds._get_field_info(self.field).take_log
 
+    @invalidate_tf
     def set_log(self, log):
         """
         Set whether or not the transfer function should be in log or linear
@@ -102,7 +113,8 @@
         self.ds.index
         self.ds._get_field_info(self.field).take_log = log
 
-    def build_transfer_function(self):
+    @property
+    def tf(self):
         """
         Builds the transfer function according to the current state of the
         TransferFunctionHelper.
@@ -117,6 +129,8 @@
         A ColorTransferFunction object.
 
         """
+        if self._tf_valid:
+            return self._tf
         if self.bounds is None:
             mylog.info('Calculating data bounds. This may take a while.' +
                        '  Set the .bounds to avoid this.')
@@ -127,11 +141,13 @@
         else:
             mi, ma = self.bounds
 
-        self.tf = ColorTransferFunction((mi, ma),
+        self._tf = ColorTransferFunction((mi, ma),
                                         grey_opacity=self.grey_opacity,
                                         nbins=512)
-        return self.tf
+        self._tf_valid = True
+        return self._tf
 
+    @invalidate_tf
     def setup_default(self):
         """docstring for setup_default"""
         if self.log:
@@ -157,8 +173,6 @@
         If fn is None, will return an image to an IPython notebook.
 
         """
-        if self.tf is None:
-            self.build_transfer_function()
         tf = self.tf
         if self.log:
             xfunc = np.logspace


https://bitbucket.org/yt_analysis/yt/commits/adeeda4cc7dd/
Changeset:   adeeda4cc7dd
Branch:      yt
User:        MatthewTurk
Date:        2015-10-14 23:24:20+00:00
Summary:     First attempt at speeding up occlusion.  Might not speed things up.
Affected #:  1 file

diff -r fc452afb0cd1d2053fcaa471ca24ab6e2f9f7a71 -r adeeda4cc7dd438b8800c3c6cdd929ae7dd3ef69 yt/utilities/lib/grid_traversal.pyx
--- a/yt/utilities/lib/grid_traversal.pyx
+++ b/yt/utilities/lib/grid_traversal.pyx
@@ -282,7 +282,7 @@
         # This routine will iterate over all of the vectors and cast each in
         # turn.  Might benefit from a more sophisticated intersection check,
         # like http://courses.csusm.edu/cs697exz/ray_box.htm
-        cdef int vi, vj, hit, i, j, ni, nj, nn
+        cdef int vi, vj, hit, i, j, k, ni, nj, nn, xi, yi
         cdef np.int64_t offset
         cdef np.int64_t iter[4]
         cdef VolumeContainer *vc = pg.container
@@ -314,6 +314,9 @@
         cdef ImageAccumulator *idata
         cdef np.float64_t px, py
         cdef np.float64_t width[3]
+        cdef np.float64_t delta[3][2], delta0[3]
+        cdef int use_vec
+        cdef np.float64_t dij[3][3]
         for i in range(3):
             width[i] = self.width[i]
         if im.vd_strides[0] == -1:
@@ -356,6 +359,34 @@
                     for i in range(3): v_dir[i] = im.vp_dir[i + offset]
                     if v_dir[0] == v_dir[1] == v_dir[2] == 0.0:
                         continue
+                    # Before we do *any* image copying, we will apply an early
+                    # termination step.  This uses the information that in
+                    # order to intersect a block, we need to intersect at least
+                    # one face.  So, we build up a set of derived values that
+                    # help us determine if we *know* we don't intersect.
+                    use_vec = 1
+                    if not ((vc.left_edge[0] <= v_pos[0] <= vc.right_edge[0]) and
+                            (vc.left_edge[1] <= v_pos[1] <= vc.right_edge[1]) and
+                            (vc.left_edge[2] <= v_pos[2] <= vc.right_edge[2])):
+                        for i in range(3):
+                            if v_dir[i] < 0:
+                                delta0[i] = vc.left_edge[i]
+                            else:
+                                delta0[i] = vc.right_edge[i]
+                            delta[i][0] = (vc.left_edge[i] - v_pos[i])/delta0[i]
+                            delta[i][1] = (vc.right_edge[i] - v_pos[i])/delta0[i]
+                            for k in range(3):
+                                if i == k: continue
+                                dij[i][j] = v_dir[i] / v_dir[j]
+                        use_vec = 0
+                        for i in range(3):
+                            xi = (i + 1) % 3
+                            yi = (i + 2) % 3
+                            if delta[xi][0] <= dij[xi][i] <= delta[xi][1] and \
+                               delta[yi][0] <= dij[yi][i] <= delta[yi][1]:
+                               use_vec = 1
+                               break
+                    if use_vec == 0: continue
                     # Note that for Nch != 3 we need a different offset into
                     # the image object than for the vectors!
                     for i in range(Nch): idata.rgba[i] = im.image[i + Nch*j]


https://bitbucket.org/yt_analysis/yt/commits/6b8d93df3f94/
Changeset:   6b8d93df3f94
Branch:      yt
User:        MatthewTurk
Date:        2015-10-14 23:48:09+00:00
Summary:     Fixing j->k
Affected #:  1 file

diff -r adeeda4cc7dd438b8800c3c6cdd929ae7dd3ef69 -r 6b8d93df3f94a69e9f3db4c7e9a15225b7ca9707 yt/utilities/lib/grid_traversal.pyx
--- a/yt/utilities/lib/grid_traversal.pyx
+++ b/yt/utilities/lib/grid_traversal.pyx
@@ -377,7 +377,7 @@
                             delta[i][1] = (vc.right_edge[i] - v_pos[i])/delta0[i]
                             for k in range(3):
                                 if i == k: continue
-                                dij[i][j] = v_dir[i] / v_dir[j]
+                                dij[i][k] = v_dir[k] / v_dir[k]
                         use_vec = 0
                         for i in range(3):
                             xi = (i + 1) % 3


https://bitbucket.org/yt_analysis/yt/commits/49823ebad173/
Changeset:   49823ebad173
Branch:      yt
User:        MatthewTurk
Date:        2015-10-14 23:54:45+00:00
Summary:     And, messed this up.
Affected #:  1 file

diff -r 6b8d93df3f94a69e9f3db4c7e9a15225b7ca9707 -r 49823ebad17398ab5ec3131fd995469da8f6bbff yt/utilities/lib/grid_traversal.pyx
--- a/yt/utilities/lib/grid_traversal.pyx
+++ b/yt/utilities/lib/grid_traversal.pyx
@@ -377,7 +377,7 @@
                             delta[i][1] = (vc.right_edge[i] - v_pos[i])/delta0[i]
                             for k in range(3):
                                 if i == k: continue
-                                dij[i][k] = v_dir[k] / v_dir[k]
+                                dij[i][j] = v_dir[i] / v_dir[k]
                         use_vec = 0
                         for i in range(3):
                             xi = (i + 1) % 3


https://bitbucket.org/yt_analysis/yt/commits/91632e8cbe02/
Changeset:   91632e8cbe02
Branch:      yt
User:        xarthisius
Date:        2015-10-14 22:35:44+00:00
Summary:     Allow to change lens_type while calling volume_render
Affected #:  1 file

diff -r debebbd57949ab80164ef34d9d88f59990c26813 -r 91632e8cbe0242c494c784b5d14d90d30dcd20ce yt/visualization/volume_rendering/volume_rendering.py
--- a/yt/visualization/volume_rendering/volume_rendering.py
+++ b/yt/visualization/volume_rendering/volume_rendering.py
@@ -19,7 +19,8 @@
 from yt.funcs import mylog
 
 
-def volume_render(data_source, field=None, fname=None, sigma_clip=None):
+def volume_render(data_source, field=None, fname=None, sigma_clip=None,
+                  lens_type='plane-parallel'):
     r""" Create a simple volume rendering of a data source.
 
     A helper function that creates a default camera view, transfer
@@ -45,6 +46,11 @@
         The resulting image will be clipped before saving, using a threshold
         based on `sigma_clip` multiplied by the standard deviation of the pixel
         values. Recommended values are between 2 and 6. Default: None
+    lens_type: string, optional
+        This specifies the type of lens to use for rendering. Current
+        options are 'plane-parallel', 'perspective', and 'fisheye'. See
+        :class:`yt.visualization.volume_rendering.lens.Lens` for details.
+        Default: 'plane-parallel'
 
     Returns
     -------
@@ -81,6 +87,6 @@
 
     vol = VolumeSource(data_source, field=field)
     sc.add_source(vol)
-    sc.camera = Camera(data_source)
+    sc.camera = Camera(data_source=data_source, lens_type=lens_type)
     im = sc.render(fname=fname, sigma_clip=sigma_clip)
     return im, sc


https://bitbucket.org/yt_analysis/yt/commits/742f41ae1ae0/
Changeset:   742f41ae1ae0
Branch:      yt
User:        MatthewTurk
Date:        2015-10-14 23:55:50+00:00
Summary:     Merging with Kacper's work
Affected #:  20 files

diff -r 49823ebad17398ab5ec3131fd995469da8f6bbff -r 742f41ae1ae0c668c139f207cddae10e2390e32c .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -31,6 +31,7 @@
 yt/utilities/lib/CICDeposit.c
 yt/utilities/lib/ContourFinding.c
 yt/utilities/lib/DepthFirstOctree.c
+yt/utilities/lib/element_mappings.c
 yt/utilities/lib/FixedInterpolator.c
 yt/utilities/lib/fortran_reader.c
 yt/utilities/lib/freetype_writer.c
@@ -38,6 +39,7 @@
 yt/utilities/lib/image_utilities.c
 yt/utilities/lib/Interpolators.c
 yt/utilities/lib/kdtree.c
+yt/utilities/lib/line_integral_convolution.c
 yt/utilities/lib/mesh_utilities.c
 yt/utilities/lib/misc_utilities.c
 yt/utilities/lib/Octree.c

diff -r 49823ebad17398ab5ec3131fd995469da8f6bbff -r 742f41ae1ae0c668c139f207cddae10e2390e32c doc/source/cookbook/amrkdtree_downsampling.py
--- a/doc/source/cookbook/amrkdtree_downsampling.py
+++ b/doc/source/cookbook/amrkdtree_downsampling.py
@@ -48,12 +48,12 @@
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds=[-27.5, -25.5],
               alpha=np.ones(4, dtype='float64'), colormap='RdBu_r')
-sc.render("v2.png", clip_ratio=6.0)
+sc.render("v2.png", sigma_clip=6.0)
 
 # This looks better.  Now let's try turning on opacity.
 
 tf.grey_opacity = True
-sc.render("v3.png", clip_ratio=6.0)
+sc.render("v3.png", sigma_clip=6.0)
 #
 ## That seemed to pick out som interesting structures.  Now let's bump up the
 ## opacity.
@@ -61,11 +61,11 @@
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds=[-27.5, -25.5],
               alpha=10.0 * np.ones(4, dtype='float64'), colormap='RdBu_r')
-sc.render("v4.png", clip_ratio=6.0)
+sc.render("v4.png", sigma_clip=6.0)
 #
 ## This looks pretty good, now lets go back to the full resolution AMRKDTree
 #
 render_source.set_volume(kd)
-sc.render("v5.png", clip_ratio=6.0)
+sc.render("v5.png", sigma_clip=6.0)
 
 # This looks great!

diff -r 49823ebad17398ab5ec3131fd995469da8f6bbff -r 742f41ae1ae0c668c139f207cddae10e2390e32c doc/source/cookbook/camera_movement.py
--- a/doc/source/cookbook/camera_movement.py
+++ b/doc/source/cookbook/camera_movement.py
@@ -14,15 +14,15 @@
 frame = 0
 # Move to the maximum density location over 5 frames
 for _ in cam.iter_move(max_c, 5):
-    sc.render('camera_movement_%04i.png' % frame, clip_ratio=8.0)
+    sc.render('camera_movement_%04i.png' % frame, sigma_clip=8.0)
     frame += 1
 
 # Zoom in by a factor of 10 over 5 frames
 for _ in cam.iter_zoom(10.0, 5):
-    sc.render('camera_movement_%04i.png' % frame, clip_ratio=8.0)
+    sc.render('camera_movement_%04i.png' % frame, sigma_clip=8.0)
     frame += 1
 
 # Do a rotation over 5 frames
 for _ in cam.iter_rotate(np.pi, 5):
-    sc.render('camera_movement_%04i.png' % frame, clip_ratio=8.0)
+    sc.render('camera_movement_%04i.png' % frame, sigma_clip=8.0)
     frame += 1

diff -r 49823ebad17398ab5ec3131fd995469da8f6bbff -r 742f41ae1ae0c668c139f207cddae10e2390e32c doc/source/cookbook/image_background_colors.py
--- a/doc/source/cookbook/image_background_colors.py
+++ b/doc/source/cookbook/image_background_colors.py
@@ -9,7 +9,7 @@
 
 ds = yt.load("Enzo_64/DD0043/data0043")
 im, sc = yt.volume_render(ds, 'density')
-im.write_png("original.png", clip_ratio=8.0)
+im.write_png("original.png", sigma_clip=8.0)
 
 # Our image array can now be transformed to include different background
 # colors.  By default, the background color is black.  The following
@@ -22,10 +22,10 @@
 # None  (0.,0.,0.,0.) <-- Transparent!
 # any rgba list/array: [r,g,b,a], bounded by 0..1
 
-# We include the clip_ratio=8 keyword here to bring out more contrast between
+# We include the sigma_clip=8 keyword here to bring out more contrast between
 # the background and foreground, but it is entirely optional.
 
-im.write_png('black_bg.png', background='black', clip_ratio=8.0)
-im.write_png('white_bg.png', background='white', clip_ratio=8.0)
-im.write_png('green_bg.png', background=[0.,1.,0.,1.], clip_ratio=8.0)
-im.write_png('transparent_bg.png', background=None, clip_ratio=8.0)
+im.write_png('black_bg.png', background='black', sigma_clip=8.0)
+im.write_png('white_bg.png', background='white', sigma_clip=8.0)
+im.write_png('green_bg.png', background=[0.,1.,0.,1.], sigma_clip=8.0)
+im.write_png('transparent_bg.png', background=None, sigma_clip=8.0)

diff -r 49823ebad17398ab5ec3131fd995469da8f6bbff -r 742f41ae1ae0c668c139f207cddae10e2390e32c doc/source/cookbook/opaque_rendering.py
--- a/doc/source/cookbook/opaque_rendering.py
+++ b/doc/source/cookbook/opaque_rendering.py
@@ -5,14 +5,14 @@
 
 # We start by building a default volume rendering scene 
 
-im, sc = yt.volume_render(ds, field=("gas","density"), fname="v0.png", clip_ratio=6.0)
+im, sc = yt.volume_render(ds, field=("gas","density"), fname="v0.png", sigma_clip=6.0)
 
 sc.camera.set_width(ds.arr(0.1,'code_length'))
 tf = sc.get_source(0).transfer_function 
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
         alpha=np.logspace(-3,0,4), colormap = 'RdBu_r')
-im = sc.render("v1.png", clip_ratio=6.0)
+im = sc.render("v1.png", sigma_clip=6.0)
 
 # In this case, the default alphas used (np.logspace(-3,0,Nbins)) does not
 # accentuate the outer regions of the galaxy. Let's start by bringing up the
@@ -22,27 +22,27 @@
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
         alpha=np.logspace(0,0,4), colormap = 'RdBu_r')
-im = sc.render("v2.png", clip_ratio=6.0)
+im = sc.render("v2.png", sigma_clip=6.0)
 
 # Now let's set the grey_opacity to True.  This should make the inner portions
 # start to be obcured
 
 tf.grey_opacity = True
-im = sc.render("v3.png", clip_ratio=6.0)
+im = sc.render("v3.png", sigma_clip=6.0)
 
 # That looks pretty good, but let's start bumping up the opacity.
 
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
         alpha=10.0*np.ones(4,dtype='float64'), colormap = 'RdBu_r')
-im = sc.render("v4.png", clip_ratio=6.0)
+im = sc.render("v4.png", sigma_clip=6.0)
 
 # Let's bump up again to see if we can obscure the inner contour.
 
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
         alpha=30.0*np.ones(4,dtype='float64'), colormap = 'RdBu_r')
-im = sc.render("v5.png", clip_ratio=6.0)
+im = sc.render("v5.png", sigma_clip=6.0)
 
 # Now we are losing sight of everything.  Let's see if we can obscure the next
 # layer
@@ -50,13 +50,13 @@
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
         alpha=100.0*np.ones(4,dtype='float64'), colormap = 'RdBu_r')
-im = sc.render("v6.png", clip_ratio=6.0)
+im = sc.render("v6.png", sigma_clip=6.0)
 
 # That is very opaque!  Now lets go back and see what it would look like with
 # grey_opacity = False
 
 tf.grey_opacity=False
-im = sc.render("v7.png", clip_ratio=6.0)
+im = sc.render("v7.png", sigma_clip=6.0)
 
 # That looks pretty different, but the main thing is that you can see that the
 # inner contours are somewhat visible again.  

diff -r 49823ebad17398ab5ec3131fd995469da8f6bbff -r 742f41ae1ae0c668c139f207cddae10e2390e32c doc/source/cookbook/simple_volume_rendering.py
--- a/doc/source/cookbook/simple_volume_rendering.py
+++ b/doc/source/cookbook/simple_volume_rendering.py
@@ -6,14 +6,14 @@
 
 # Create a volume rendering, which will determine data bounds, use the first
 # acceptable field in the field_list, and set up a default transfer function.
-#im, sc = yt.volume_render(ds, fname="%s_volume_rendered.png" % ds, clip_ratio=8.0)
+#im, sc = yt.volume_render(ds, fname="%s_volume_rendered.png" % ds, sigma_clip=8.0)
 
 # You can easily specify a different field
-im, sc = yt.volume_render(ds, field=('gas','density'), fname="%s_density_volume_rendered.png" % ds, clip_ratio=8.0)
+im, sc = yt.volume_render(ds, field=('gas','density'), fname="%s_density_volume_rendered.png" % ds, sigma_clip=8.0)
 
 # Now increase the resolution
 sc.camera.resolution = (512, 512)
-im = sc.render(fname='big.png', clip_ratio=8.0)
+im = sc.render(fname='big.png', sigma_clip=8.0)
 
 # Now modify the transfer function
 # First get the render source, in this case the entire domain, with field ('gas','density')
@@ -25,4 +25,4 @@
         np.log10(ds.quan(5.0e-31, 'g/cm**3')),
         np.log10(ds.quan(1.0e-29, 'g/cm**3')),
         scale=30.0, colormap='RdBu_r')
-im = sc.render(fname='new_tf.png', clip_ratio=None)
+im = sc.render(fname='new_tf.png', sigma_clip=None)

diff -r 49823ebad17398ab5ec3131fd995469da8f6bbff -r 742f41ae1ae0c668c139f207cddae10e2390e32c doc/source/cookbook/various_lens.py
--- a/doc/source/cookbook/various_lens.py
+++ b/doc/source/cookbook/various_lens.py
@@ -34,7 +34,7 @@
 cam.set_width(ds.domain_width * 0.5)
 sc.camera = cam
 sc.add_source(vol)
-sc.render('lens_plane-parallel.png', clip_ratio=6.0)
+sc.render('lens_plane-parallel.png', sigma_clip=6.0)
 
 # Perspective lens
 cam = Camera(ds, lens_type='perspective')
@@ -50,7 +50,7 @@
 cam.set_width(ds.domain_width * 0.5)
 sc.camera = cam
 sc.add_source(vol)
-sc.render('lens_perspective.png', clip_ratio=6.0)
+sc.render('lens_perspective.png', sigma_clip=6.0)
 
 # Stereo-perspective lens
 cam = Camera(ds, lens_type='stereo-perspective')
@@ -65,7 +65,7 @@
 cam.lens.disparity = ds.domain_width[0] * 1.e-3
 sc.camera = cam
 sc.add_source(vol)
-sc.render('lens_stereo-perspective.png', clip_ratio=6.0)
+sc.render('lens_stereo-perspective.png', sigma_clip=6.0)
 
 # Fisheye lens
 dd = ds.sphere(ds.domain_center, ds.domain_width[0] / 10)
@@ -79,7 +79,7 @@
 cam.lens.fov = 360.0
 sc.camera = cam
 sc.add_source(vol)
-sc.render('lens_fisheye.png', clip_ratio=6.0)
+sc.render('lens_fisheye.png', sigma_clip=6.0)
 
 # Spherical lens
 cam = Camera(ds, lens_type='spherical')
@@ -96,7 +96,7 @@
 cam.set_width(ds.domain_width * 0.5)
 sc.camera = cam
 sc.add_source(vol)
-sc.render('lens_spherical.png', clip_ratio=6.0)
+sc.render('lens_spherical.png', sigma_clip=6.0)
 
 # Stereo-spherical lens
 cam = Camera(ds, lens_type='stereo-spherical')
@@ -111,4 +111,4 @@
 cam.lens.disparity = ds.domain_width[0] * 1.e-3
 sc.camera = cam
 sc.add_source(vol)
-sc.render('lens_stereo-spherical.png', clip_ratio=6.0)
\ No newline at end of file
+sc.render('lens_stereo-spherical.png', sigma_clip=6.0)

diff -r 49823ebad17398ab5ec3131fd995469da8f6bbff -r 742f41ae1ae0c668c139f207cddae10e2390e32c doc/source/quickstart/6)_Volume_Rendering.ipynb
--- a/doc/source/quickstart/6)_Volume_Rendering.ipynb
+++ b/doc/source/quickstart/6)_Volume_Rendering.ipynb
@@ -56,14 +56,14 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
-      "If we want to apply a clipping, we can specify the `clip_ratio`.  This will clip the upper bounds to this value times the standard deviation of the values in the image array."
+      "If we want to apply a clipping, we can specify the `sigma_clip`.  This will clip the upper bounds to this value times the standard deviation of the values in the image array."
      ]
     },
     {
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "cam.show(clip_ratio=4)"
+      "cam.show(sigma_clip=4)"
      ],
      "language": "python",
      "metadata": {},
@@ -83,7 +83,7 @@
       "tf = yt.ColorTransferFunction((-28, -25))\n",
       "tf.add_layers(4, w=0.03)\n",
       "cam = ds.camera([0.5, 0.5, 0.5], [1.0, 1.0, 1.0], (20.0, 'kpc'), 512, tf, no_ghost=False)\n",
-      "cam.show(clip_ratio=4.0)"
+      "cam.show(sigma_clip=4.0)"
      ],
      "language": "python",
      "metadata": {},
@@ -93,4 +93,4 @@
    "metadata": {}
   }
  ]
-}
\ No newline at end of file
+}

diff -r 49823ebad17398ab5ec3131fd995469da8f6bbff -r 742f41ae1ae0c668c139f207cddae10e2390e32c yt/data_objects/image_array.py
--- a/yt/data_objects/image_array.py
+++ b/yt/data_objects/image_array.py
@@ -11,10 +11,12 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
+import warnings
 import numpy as np
 from yt.visualization.image_writer import write_bitmap, write_image
 from yt.units.yt_array import YTArray
 
+
 class ImageArray(YTArray):
     r"""A custom Numpy ndarray used for images.
 
@@ -237,15 +239,15 @@
         np.clip(out, 0.0, 1.0, out)
         return out
 
-    def write_png(self, filename, clip_ratio=None, background='black',
-                  rescale=True):
+    def write_png(self, filename, sigma_clip=None, background='black',
+                  rescale=True, clip_ratio=None):
         r"""Writes ImageArray to png file.
 
         Parameters
         ----------
         filename: string
             Note filename not be modified.
-        clip_ratio: float, optional
+        sigma_clip: float, optional
             Image will be clipped before saving to the standard deviation
             of the image multiplied by this value.  Useful for enhancing
             images. Default: None
@@ -293,9 +295,13 @@
             filename += '.png'
 
         if clip_ratio is not None:
+            warnings.warn("'clip_ratio' keyword is deprecated. Use 'sigma_clip' instead")
+            sigma_clip = clip_ratio
+
+        if sigma_clip is not None:
             nz = out[:, :, :3][out[:, :, :3].nonzero()]
             return write_bitmap(out.swapaxes(0, 1), filename,
-                                nz.mean() + clip_ratio*nz.std())
+                                nz.mean() + sigma_clip * nz.std())
         else:
             return write_bitmap(out.swapaxes(0, 1), filename)
 

diff -r 49823ebad17398ab5ec3131fd995469da8f6bbff -r 742f41ae1ae0c668c139f207cddae10e2390e32c yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -70,7 +70,7 @@
         self.normal_vector = None
         self.light = None
         self._resolution = (512, 512)
-        self._width = 1.0
+        self._width = np.array([1.0, 1.0, 1.0])
         self._focus = np.array([0.0]*3)
         self._position = np.array([1.0]*3)
         self.set_lens(lens_type)

diff -r 49823ebad17398ab5ec3131fd995469da8f6bbff -r 742f41ae1ae0c668c139f207cddae10e2390e32c yt/visualization/volume_rendering/render_source.py
--- a/yt/visualization/volume_rendering/render_source.py
+++ b/yt/visualization/volume_rendering/render_source.py
@@ -513,7 +513,7 @@
 
 
 class GridSource(LineSource):
-    def __init__(self, data_source, alpha=0.3, cmap='alage',
+    def __init__(self, data_source, alpha=0.3, cmap='algae',
                  min_level=None, max_level=None):
         r"""A render source for drawing grids in a scene.
 

diff -r 49823ebad17398ab5ec3131fd995469da8f6bbff -r 742f41ae1ae0c668c139f207cddae10e2390e32c yt/visualization/volume_rendering/scene.py
--- a/yt/visualization/volume_rendering/scene.py
+++ b/yt/visualization/volume_rendering/scene.py
@@ -90,7 +90,7 @@
 
         return self
 
-    def render(self, fname=None, clip_ratio=None, camera=None):
+    def render(self, fname=None, sigma_clip=None, camera=None):
         r"""Render all sources in the Scene.
 
         Use the current state of the Scene object to render all sources
@@ -101,9 +101,10 @@
         fname: string, optional
             If specified, save the rendering as a bitmap to the file "fname".
             Default: None
-        clip_ratio: float, optional
-            If supplied, the 'max_val' argument to write_bitmap will be handed
-            clip_ratio * image.std()
+        sigma_clip: float, optional
+            Image will be clipped before saving to the standard deviation
+            of the image multiplied by this value.  Useful for enhancing
+            images. Default: None
         camera: :class:`Camera`, optional
             If specified, use a different :class:`Camera` to render the scene.
 
@@ -125,7 +126,7 @@
         self._validate()
         bmp = self.composite(camera=camera)
         if fname is not None:
-            bmp.write_png(fname, clip_ratio=clip_ratio)
+            bmp.write_png(fname, sigma_clip=sigma_clip)
         return bmp
 
     def _validate(self):

diff -r 49823ebad17398ab5ec3131fd995469da8f6bbff -r 742f41ae1ae0c668c139f207cddae10e2390e32c yt/visualization/volume_rendering/tests/rotation_volume_rendering.py
--- a/yt/visualization/volume_rendering/tests/rotation_volume_rendering.py
+++ b/yt/visualization/volume_rendering/tests/rotation_volume_rendering.py
@@ -21,4 +21,4 @@
 frames = 10
 for i in range(frames):
     sc.camera.yaw(angle/frames)
-    sc.render('test_rot_%04i.png' % i, clip_ratio=6.0)
+    sc.render('test_rot_%04i.png' % i, sigma_clip=6.0)

diff -r 49823ebad17398ab5ec3131fd995469da8f6bbff -r 742f41ae1ae0c668c139f207cddae10e2390e32c yt/visualization/volume_rendering/tests/simple_volume_rendering.py
--- a/yt/visualization/volume_rendering/tests/simple_volume_rendering.py
+++ b/yt/visualization/volume_rendering/tests/simple_volume_rendering.py
@@ -14,4 +14,4 @@
     fake_random_ds
 
 ds = fake_random_ds(32)
-im, sc = yt.volume_render(ds, fname='test.png', clip_ratio=4.0)
+im, sc = yt.volume_render(ds, fname='test.png', sigma_clip=4.0)

diff -r 49823ebad17398ab5ec3131fd995469da8f6bbff -r 742f41ae1ae0c668c139f207cddae10e2390e32c yt/visualization/volume_rendering/tests/test_lenses.py
--- a/yt/visualization/volume_rendering/tests/test_lenses.py
+++ b/yt/visualization/volume_rendering/tests/test_lenses.py
@@ -28,7 +28,7 @@
     tf.grey_opacity = True
     sc.camera = cam
     sc.add_source(vol)
-    sc.render('test_perspective_%s.png' % field[1], clip_ratio=6.0)
+    sc.render('test_perspective_%s.png' % field[1], sigma_clip=6.0)
 
 def test_stereoperspective_lens():
     #ds = fake_random_ds(32, fields = field)
@@ -42,7 +42,7 @@
     tf.grey_opacity = True
     sc.camera = cam
     sc.add_source(vol)
-    sc.render('test_stereoperspective_%s.png' % field[1], clip_ratio=6.0)
+    sc.render('test_stereoperspective_%s.png' % field[1], sigma_clip=6.0)
 
 def test_fisheye_lens():
     ds = fake_random_ds(32, fields = field)
@@ -60,7 +60,7 @@
     tf.grey_opacity = True
     sc.camera = cam
     sc.add_source(vol)
-    sc.render('test_fisheye_%s.png' % field[1], clip_ratio=6.0)
+    sc.render('test_fisheye_%s.png' % field[1], sigma_clip=6.0)
 
 def test_plane_lens():
     ds = fake_random_ds(32, fields = field)
@@ -76,7 +76,7 @@
     tf.grey_opacity = True
     sc.camera = cam
     sc.add_source(vol)
-    sc.render('test_plane_%s.png' % field[1], clip_ratio=6.0)
+    sc.render('test_plane_%s.png' % field[1], sigma_clip=6.0)
 
 def test_spherical_lens():
     #ds = fake_random_ds(32, fields = field)
@@ -90,7 +90,7 @@
     tf.grey_opacity = True
     sc.camera = cam
     sc.add_source(vol)
-    sc.render('test_spherical_%s.png' % field[1], clip_ratio=6.0)
+    sc.render('test_spherical_%s.png' % field[1], sigma_clip=6.0)
 
 def test_stereospherical_lens():
     #ds = fake_random_ds(32, fields = field)
@@ -106,5 +106,5 @@
     tf.grey_opacity = True
     sc.camera = cam
     sc.add_source(vol)
-    sc.render('test_stereospherical_%s.png' % field[1], clip_ratio=6.0)
+    sc.render('test_stereospherical_%s.png' % field[1], sigma_clip=6.0)
 

diff -r 49823ebad17398ab5ec3131fd995469da8f6bbff -r 742f41ae1ae0c668c139f207cddae10e2390e32c yt/visualization/volume_rendering/tests/test_scene.py
--- a/yt/visualization/volume_rendering/tests/test_scene.py
+++ b/yt/visualization/volume_rendering/tests/test_scene.py
@@ -48,9 +48,9 @@
     mi_bound = ((ma-mi)*(0.10))+mi
     ma_bound = ((ma-mi)*(0.90))+mi
     tf.map_to_colormap(mi_bound, ma_bound,  scale=0.01, colormap='Reds_r')
-    sc.render('test_scene.png', clip_ratio=6.0)
+    sc.render('test_scene.png', sigma_clip=6.0)
     
     nrot = 2 
     for i in range(nrot):
         sc.camera.pitch(2*np.pi/nrot)
-        sc.render('test_rot_%04i.png' % i, clip_ratio=6.0)
+        sc.render('test_rot_%04i.png' % i, sigma_clip=6.0)

diff -r 49823ebad17398ab5ec3131fd995469da8f6bbff -r 742f41ae1ae0c668c139f207cddae10e2390e32c yt/visualization/volume_rendering/tests/test_simple_vr.py
--- a/yt/visualization/volume_rendering/tests/test_simple_vr.py
+++ b/yt/visualization/volume_rendering/tests/test_simple_vr.py
@@ -15,7 +15,7 @@
 
 def test_simple_vr():
     ds = fake_random_ds(32)
-    im, sc = yt.volume_render(ds, fname='test.png', clip_ratio=4.0)
+    im, sc = yt.volume_render(ds, fname='test.png', sigma_clip=4.0)
     print(sc)
     return im, sc
 

diff -r 49823ebad17398ab5ec3131fd995469da8f6bbff -r 742f41ae1ae0c668c139f207cddae10e2390e32c yt/visualization/volume_rendering/tests/test_zbuff.py
--- a/yt/visualization/volume_rendering/tests/test_zbuff.py
+++ b/yt/visualization/volume_rendering/tests/test_zbuff.py
@@ -10,15 +10,15 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import yt
 from yt.testing import fake_random_ds
-from yt.visualization.volume_rendering.api import Scene, Camera, ZBuffer, \
-    VolumeSource, OpaqueSource, LineSource, BoxSource
-from yt.utilities.lib.misc_utilities import lines
-from yt.data_objects.api import ImageArray
+from yt.visualization.volume_rendering.api import \
+    Scene, Camera, ZBuffer, \
+    VolumeSource, OpaqueSource
+from yt.testing import assert_almost_equal
 import numpy as np
 np.random.seed(0)
 
+
 def test_composite_vr():
     ds = fake_random_ds(64)
     dd = ds.sphere(ds.domain_center, 0.45*ds.domain_width[0])
@@ -53,5 +53,55 @@
     im.write_png("composite.png")
     return im
 
+
+def test_nonrectangular_add():
+    rgba1 = np.ones((64, 1, 4))
+    z1 = np.expand_dims(np.arange(64.), 1)
+
+    rgba2 = np.zeros((64, 1, 4))
+    z2 = np.expand_dims(np.arange(63., -1., -1.), 1)
+
+    exact_rgba = np.concatenate((np.ones(32), np.zeros(32)))
+    exact_rgba = np.expand_dims(exact_rgba, 1)
+    exact_rgba = np.dstack((exact_rgba, exact_rgba, exact_rgba, exact_rgba))
+
+    exact_z = np.concatenate((np.arange(32.), np.arange(31.,-1.,-1.)))
+    exact_z = np.expand_dims(exact_z, 1)
+
+    buff1 = ZBuffer(rgba1, z1)
+    buff2 = ZBuffer(rgba2, z2)
+
+    buff = buff1 + buff2
+
+    assert_almost_equal(buff.rgba, exact_rgba)
+    assert_almost_equal(buff.z, exact_z)
+
+
+def test_rectangular_add():
+    rgba1 = np.ones((8, 8, 4))
+    z1 = np.arange(64.)
+    z1 = z1.reshape((8, 8))
+    buff1 = ZBuffer(rgba1, z1)
+
+    rgba2 = np.zeros((8, 8, 4))
+    z2 = np.arange(63., -1., -1.)
+    z2 = z2.reshape((8, 8))
+    buff2 = ZBuffer(rgba2, z2)
+
+    buff = buff1 + buff2
+
+    exact_rgba = np.empty((8, 8, 4), dtype=np.float64)
+    exact_rgba[0:4,0:8,:] = 1.0
+    exact_rgba[4:8,0:8,:] = 0.0
+
+    exact_z = np.concatenate((np.arange(32.), np.arange(31., -1., -1.)))
+    exact_z = np.expand_dims(exact_z, 1)
+    exact_z = exact_z.reshape(8, 8)
+
+    assert_almost_equal(buff.rgba, exact_rgba)
+    assert_almost_equal(buff.z, exact_z)
+
 if __name__ == "__main__":
     im = test_composite_vr()
+    test_nonrectangular_add()
+    test_rectangular_add()

diff -r 49823ebad17398ab5ec3131fd995469da8f6bbff -r 742f41ae1ae0c668c139f207cddae10e2390e32c yt/visualization/volume_rendering/volume_rendering.py
--- a/yt/visualization/volume_rendering/volume_rendering.py
+++ b/yt/visualization/volume_rendering/volume_rendering.py
@@ -19,7 +19,8 @@
 from yt.funcs import mylog
 
 
-def volume_render(data_source, field=None, fname=None, clip_ratio=None):
+def volume_render(data_source, field=None, fname=None, sigma_clip=None,
+                  lens_type='plane-parallel'):
     r""" Create a simple volume rendering of a data source.
 
     A helper function that creates a default camera view, transfer
@@ -41,10 +42,15 @@
     fname: string, optional
         If specified, the resulting rendering will be saved to this filename
         in png format.
-    clip_ratio: float, optional
-        If specified, the resulting image will be clipped before saving,
-        using a threshold based on clip_ratio multiplied by the standard
-        deviation of the pixel values. Recommended values are between 2 and 6.
+    sigma_clip: float
+        The resulting image will be clipped before saving, using a threshold
+        based on `sigma_clip` multiplied by the standard deviation of the pixel
+        values. Recommended values are between 2 and 6. Default: None
+    lens_type: string, optional
+        This specifies the type of lens to use for rendering. Current
+        options are 'plane-parallel', 'perspective', and 'fisheye'. See
+        :class:`yt.visualization.volume_rendering.lens.Lens` for details.
+        Default: 'plane-parallel'
 
     Returns
     -------
@@ -58,7 +64,7 @@
     Example:
     >>> import yt
     >>> ds = yt.load("Enzo_64/DD0046/DD0046")
-    >>> im, sc = yt.volume_render(ds, fname='test.png', clip_ratio=4.0)
+    >>> im, sc = yt.volume_render(ds, fname='test.png')
     """
     data_source = data_source_or_all(data_source)
     sc = Scene()
@@ -81,6 +87,6 @@
 
     vol = VolumeSource(data_source, field=field)
     sc.add_source(vol)
-    sc.camera = Camera(data_source)
-    im = sc.render(fname=fname, clip_ratio=clip_ratio)
+    sc.camera = Camera(data_source=data_source, lens_type=lens_type)
+    im = sc.render(fname=fname, sigma_clip=sigma_clip)
     return im, sc

diff -r 49823ebad17398ab5ec3131fd995469da8f6bbff -r 742f41ae1ae0c668c139f207cddae10e2390e32c yt/visualization/volume_rendering/zbuffer_array.py
--- a/yt/visualization/volume_rendering/zbuffer_array.py
+++ b/yt/visualization/volume_rendering/zbuffer_array.py
@@ -28,13 +28,16 @@
 
     def __add__(self, other):
         assert(self.shape == other.shape)
-        f_or_b = self.z < other.z
+        f = self.z < other.z
         if self.z.shape[1] == 1:
             # Non-rectangular
-            rgba = (self.rgba * f_or_b[:,None,:])
-            rgba += (other.rgba * (1.0 - f_or_b)[:,None,:])
+            rgba = (self.rgba * f[:,None,:])
+            rgba += (other.rgba * (1.0 - f)[:,None,:])
         else:
-            rgba = (self.rgba.T * f_or_b).T + (other.rgba.T * (1 - f_or_b)).T
+            b = self.z > other.z
+            rgba = np.empty(self.rgba.shape)
+            rgba[f] = self.rgba[f]
+            rgba[b] = other.rgba[b]
         z = np.min([self.z, other.z], axis=0)
         return ZBuffer(rgba, z)
 


https://bitbucket.org/yt_analysis/yt/commits/23f7bc6c886c/
Changeset:   23f7bc6c886c
Branch:      yt
User:        MatthewTurk
Date:        2015-10-14 23:58:03+00:00
Summary:     Syntax error.
Affected #:  1 file

diff -r 742f41ae1ae0c668c139f207cddae10e2390e32c -r 23f7bc6c886c173ee7bd4d8dc5023c5d0ec3ff7c yt/visualization/volume_rendering/transfer_function_helper.py
--- a/yt/visualization/volume_rendering/transfer_function_helper.py
+++ b/yt/visualization/volume_rendering/transfer_function_helper.py
@@ -25,15 +25,17 @@
 
 def invalidate_tf(func):
     @functools.wraps(func)
-    def wrapper(self, *args, **kwargs)
-        func(self, *args, **kwargs)
+    def wrapper(self, *args, **kwargs):
+        rv = func(self, *args, **kwargs)
         self._tf_valid = False
+        return rv
     return wrapper
 
 class TransferFunctionHelper(object):
 
     profiles = None
     _tf_valid = False
+    _tf = None
 
     def __init__(self, ds):
         r"""A transfer function helper.
@@ -54,11 +56,13 @@
         self.ds = ds
         self.field = None
         self.log = False
-        self.tf = None
         self.bounds = None
         self.grey_opacity = True 
         self.profiles = {}
 
+    def build_transfer_function(self, *args, **kwargs):
+        self.tf
+
     @invalidate_tf
     def set_bounds(self, bounds=None):
         """


https://bitbucket.org/yt_analysis/yt/commits/b9b17c8521c6/
Changeset:   b9b17c8521c6
Branch:      yt
User:        MatthewTurk
Date:        2015-10-15 00:16:49+00:00
Summary:     Fix this again
Affected #:  1 file

diff -r 23f7bc6c886c173ee7bd4d8dc5023c5d0ec3ff7c -r b9b17c8521c61ca5b571dfdd8e533a66d641cdaa yt/utilities/lib/grid_traversal.pyx
--- a/yt/utilities/lib/grid_traversal.pyx
+++ b/yt/utilities/lib/grid_traversal.pyx
@@ -377,7 +377,7 @@
                             delta[i][1] = (vc.right_edge[i] - v_pos[i])/delta0[i]
                             for k in range(3):
                                 if i == k: continue
-                                dij[i][j] = v_dir[i] / v_dir[k]
+                                dij[i][k] = v_dir[i] / v_dir[k]
                         use_vec = 0
                         for i in range(3):
                             xi = (i + 1) % 3


https://bitbucket.org/yt_analysis/yt/commits/e9249da48d08/
Changeset:   e9249da48d08
Branch:      yt
User:        MatthewTurk
Date:        2015-10-15 05:04:34+00:00
Summary:     Allocate variables inside loop.
Affected #:  1 file

diff -r b9b17c8521c61ca5b571dfdd8e533a66d641cdaa -r e9249da48d084d1b22ba576e57dc160ace5f1f37 yt/utilities/lib/grid_traversal.pyx
--- a/yt/utilities/lib/grid_traversal.pyx
+++ b/yt/utilities/lib/grid_traversal.pyx
@@ -314,9 +314,9 @@
         cdef ImageAccumulator *idata
         cdef np.float64_t px, py
         cdef np.float64_t width[3]
-        cdef np.float64_t delta[3][2], delta0[3]
+        cdef np.float64_t *delta, *delta0 # [3][2] and [3]
+        cdef np.float64_t *dij # [3][3]
         cdef int use_vec
-        cdef np.float64_t dij[3][3]
         for i in range(3):
             width[i] = self.width[i]
         if im.vd_strides[0] == -1:
@@ -349,10 +349,14 @@
             with nogil, parallel(num_threads = num_threads):
                 idata = <ImageAccumulator *> malloc(sizeof(ImageAccumulator))
                 idata.supp_data = self.supp_data
+                delta = <np.float64_t*> malloc(sizeof(np.float64_t) * 6)
+                delta0 = <np.float64_t*> malloc(sizeof(np.float64_t) * 3)
+                dij = <np.float64_t*> malloc(sizeof(np.float64_t) * 9)
                 v_pos = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
                 v_dir = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
                 # If we do not have a simple image plane, we have to cast all
                 # our rays 
+                
                 for j in prange(size, schedule="dynamic", chunksize=100):
                     offset = j * 3
                     for i in range(3): v_pos[i] = im.vp_pos[i + offset]
@@ -373,17 +377,17 @@
                                 delta0[i] = vc.left_edge[i]
                             else:
                                 delta0[i] = vc.right_edge[i]
-                            delta[i][0] = (vc.left_edge[i] - v_pos[i])/delta0[i]
-                            delta[i][1] = (vc.right_edge[i] - v_pos[i])/delta0[i]
+                            delta[i*2 + 0] = (vc.left_edge[i] - v_pos[i])
+                            delta[i*2 + 1] = (vc.right_edge[i] - v_pos[i])
                             for k in range(3):
                                 if i == k: continue
-                                dij[i][k] = v_dir[i] / v_dir[k]
+                                dij[i*3 + k] = v_dir[i] / v_dir[k]
                         use_vec = 0
                         for i in range(3):
                             xi = (i + 1) % 3
                             yi = (i + 2) % 3
-                            if delta[xi][0] <= dij[xi][i] <= delta[xi][1] and \
-                               delta[yi][0] <= dij[yi][i] <= delta[yi][1]:
+                            if delta[xi*2 + 0] <= dij[xi*3+i]*delta0[i] <= delta[xi*2+1] and \
+                               delta[yi*2 + 0] <= dij[yi*3+i]*delta0[i] <= delta[yi*2+1]:
                                use_vec = 1
                                break
                     if use_vec == 0: continue
@@ -400,6 +404,9 @@
                 free(v_dir)
                 free(idata)
                 free(v_pos)
+                free(dij)
+                free(delta)
+                free(delta0)
         return hit
 
     cdef void setup(self, PartitionedGrid pg):


https://bitbucket.org/yt_analysis/yt/commits/1dcf691d529f/
Changeset:   1dcf691d529f
Branch:      yt
User:        MatthewTurk
Date:        2015-10-15 15:28:08+00:00
Summary:     Rework conditional slightly.
Affected #:  1 file

diff -r e9249da48d084d1b22ba576e57dc160ace5f1f37 -r 1dcf691d529fcfb36904303bd06cad1833734835 yt/utilities/lib/grid_traversal.pyx
--- a/yt/utilities/lib/grid_traversal.pyx
+++ b/yt/utilities/lib/grid_traversal.pyx
@@ -369,9 +369,9 @@
                     # one face.  So, we build up a set of derived values that
                     # help us determine if we *know* we don't intersect.
                     use_vec = 1
-                    if not ((vc.left_edge[0] <= v_pos[0] <= vc.right_edge[0]) and
-                            (vc.left_edge[1] <= v_pos[1] <= vc.right_edge[1]) and
-                            (vc.left_edge[2] <= v_pos[2] <= vc.right_edge[2])):
+                    if (vc.left_edge[0] > v_pos[0] or v_pos[0] > vc.right_edge[0] or
+                        vc.left_edge[1] > v_pos[1] or v_pos[1] > vc.right_edge[1] or
+                        vc.left_edge[2] > v_pos[2] or v_pos[2] > vc.right_edge[2]):
                         for i in range(3):
                             if v_dir[i] < 0:
                                 delta0[i] = vc.left_edge[i]


https://bitbucket.org/yt_analysis/yt/commits/4e6d2a2547d6/
Changeset:   4e6d2a2547d6
Branch:      yt
User:        MatthewTurk
Date:        2015-10-15 17:31:58+00:00
Summary:     This one works, but provides minimal speedup.
Affected #:  1 file

diff -r 1dcf691d529fcfb36904303bd06cad1833734835 -r 4e6d2a2547d6b9c4b83d278776abeb408f4ead13 yt/utilities/lib/grid_traversal.pyx
--- a/yt/utilities/lib/grid_traversal.pyx
+++ b/yt/utilities/lib/grid_traversal.pyx
@@ -314,9 +314,7 @@
         cdef ImageAccumulator *idata
         cdef np.float64_t px, py
         cdef np.float64_t width[3]
-        cdef np.float64_t *delta, *delta0 # [3][2] and [3]
-        cdef np.float64_t *dij # [3][3]
-        cdef int use_vec
+        cdef int use_vec, max_i
         for i in range(3):
             width[i] = self.width[i]
         if im.vd_strides[0] == -1:
@@ -349,9 +347,6 @@
             with nogil, parallel(num_threads = num_threads):
                 idata = <ImageAccumulator *> malloc(sizeof(ImageAccumulator))
                 idata.supp_data = self.supp_data
-                delta = <np.float64_t*> malloc(sizeof(np.float64_t) * 6)
-                delta0 = <np.float64_t*> malloc(sizeof(np.float64_t) * 3)
-                dij = <np.float64_t*> malloc(sizeof(np.float64_t) * 9)
                 v_pos = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
                 v_dir = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
                 # If we do not have a simple image plane, we have to cast all
@@ -368,28 +363,33 @@
                     # order to intersect a block, we need to intersect at least
                     # one face.  So, we build up a set of derived values that
                     # help us determine if we *know* we don't intersect.
-                    use_vec = 1
+                    use_vec = 0
                     if (vc.left_edge[0] > v_pos[0] or v_pos[0] > vc.right_edge[0] or
                         vc.left_edge[1] > v_pos[1] or v_pos[1] > vc.right_edge[1] or
                         vc.left_edge[2] > v_pos[2] or v_pos[2] > vc.right_edge[2]):
+                        # Find largest t of intersection with a face.
+                        max_t = -1e300
+                        max_i = -1
                         for i in range(3):
-                            if v_dir[i] < 0:
-                                delta0[i] = vc.left_edge[i]
-                            else:
-                                delta0[i] = vc.right_edge[i]
-                            delta[i*2 + 0] = (vc.left_edge[i] - v_pos[i])
-                            delta[i*2 + 1] = (vc.right_edge[i] - v_pos[i])
-                            for k in range(3):
-                                if i == k: continue
-                                dij[i*3 + k] = v_dir[i] / v_dir[k]
-                        use_vec = 0
-                        for i in range(3):
-                            xi = (i + 1) % 3
-                            yi = (i + 2) % 3
-                            if delta[xi*2 + 0] <= dij[xi*3+i]*delta0[i] <= delta[xi*2+1] and \
-                               delta[yi*2 + 0] <= dij[yi*3+i]*delta0[i] <= delta[yi*2+1]:
-                               use_vec = 1
-                               break
+                            if v_dir[i] > 0 and \
+                              (vc.left_edge[i] - v_pos[i])/v_dir[i] > max_t:
+                                max_t = (vc.left_edge[i] - v_pos[i])/v_dir[i]
+                                max_i = i
+                            elif v_dir[i] < 0 and \
+                              (vc.right_edge[i] - v_pos[i])/v_dir[i] > max_t:
+                                max_t = (vc.right_edge[i] - v_pos[i])/v_dir[i]
+                                max_i = i
+                        xi = (i + 1) % 3
+                        yi = (i + 2) % 3
+                        if max_t < 0 or max_t > 1:
+                            pass
+                        elif ((vc.left_edge[xi] <= v_pos[xi] + v_dir[xi]*max_t
+                            <= vc.right_edge[xi]) and
+                            (vc.left_edge[yi] <= v_pos[yi] + v_dir[yi]*max_t
+                            <= vc.right_edge[yi])):
+                            use_vec = 1
+                    else:
+                        use_vec = 1
                     if use_vec == 0: continue
                     # Note that for Nch != 3 we need a different offset into
                     # the image object than for the vectors!
@@ -404,9 +404,6 @@
                 free(v_dir)
                 free(idata)
                 free(v_pos)
-                free(dij)
-                free(delta)
-                free(delta0)
         return hit
 
     cdef void setup(self, PartitionedGrid pg):


https://bitbucket.org/yt_analysis/yt/commits/612f1ddf649e/
Changeset:   612f1ddf649e
Branch:      yt
User:        MatthewTurk
Date:        2015-10-15 18:34:46+00:00
Summary:     Revert all my transfer function differences.
Affected #:  1 file

diff -r 4e6d2a2547d6b9c4b83d278776abeb408f4ead13 -r 612f1ddf649e67756789e34a2673b1d156821644 yt/visualization/volume_rendering/transfer_function_helper.py
--- a/yt/visualization/volume_rendering/transfer_function_helper.py
+++ b/yt/visualization/volume_rendering/transfer_function_helper.py
@@ -21,21 +21,11 @@
 from matplotlib.figure import Figure
 from yt.extern.six.moves import StringIO
 import numpy as np
-import functools
 
-def invalidate_tf(func):
-    @functools.wraps(func)
-    def wrapper(self, *args, **kwargs):
-        rv = func(self, *args, **kwargs)
-        self._tf_valid = False
-        return rv
-    return wrapper
 
 class TransferFunctionHelper(object):
 
     profiles = None
-    _tf_valid = False
-    _tf = None
 
     def __init__(self, ds):
         r"""A transfer function helper.
@@ -56,14 +46,11 @@
         self.ds = ds
         self.field = None
         self.log = False
+        self.tf = None
         self.bounds = None
         self.grey_opacity = True 
         self.profiles = {}
 
-    def build_transfer_function(self, *args, **kwargs):
-        self.tf
-
-    @invalidate_tf
     def set_bounds(self, bounds=None):
         """
         Set the bounds of the transfer function.
@@ -88,7 +75,6 @@
             assert(self.bounds[1] > 0.0)
         return
 
-    @invalidate_tf
     def set_field(self, field):
         """
         Set the field to be rendered
@@ -101,7 +87,6 @@
         self.field = field
         self.log = self.ds._get_field_info(self.field).take_log
 
-    @invalidate_tf
     def set_log(self, log):
         """
         Set whether or not the transfer function should be in log or linear
@@ -117,8 +102,7 @@
         self.ds.index
         self.ds._get_field_info(self.field).take_log = log
 
-    @property
-    def tf(self):
+    def build_transfer_function(self):
         """
         Builds the transfer function according to the current state of the
         TransferFunctionHelper.
@@ -133,8 +117,6 @@
         A ColorTransferFunction object.
 
         """
-        if self._tf_valid:
-            return self._tf
         if self.bounds is None:
             mylog.info('Calculating data bounds. This may take a while.' +
                        '  Set the .bounds to avoid this.')
@@ -145,13 +127,11 @@
         else:
             mi, ma = self.bounds
 
-        self._tf = ColorTransferFunction((mi, ma),
+        self.tf = ColorTransferFunction((mi, ma),
                                         grey_opacity=self.grey_opacity,
                                         nbins=512)
-        self._tf_valid = True
-        return self._tf
+        return self.tf
 
-    @invalidate_tf
     def setup_default(self):
         """docstring for setup_default"""
         if self.log:
@@ -177,6 +157,8 @@
         If fn is None, will return an image to an IPython notebook.
 
         """
+        if self.tf is None:
+            self.build_transfer_function()
         tf = self.tf
         if self.log:
             xfunc = np.logspace


https://bitbucket.org/yt_analysis/yt/commits/c5d3a162a7fb/
Changeset:   c5d3a162a7fb
Branch:      yt
User:        MatthewTurk
Date:        2015-10-15 21:53:23+00:00
Summary:     Refactor out lens type for extent computation.
Affected #:  5 files

diff -r 612f1ddf649e67756789e34a2673b1d156821644 -r c5d3a162a7fb05d0c300b44e4a6d2cf520b7ac2f yt/geometry/setup.py
--- a/yt/geometry/setup.py
+++ b/yt/geometry/setup.py
@@ -47,6 +47,7 @@
                 include_dirs=["yt/utilities/lib/"],
                 libraries=["m"],
                 depends=["yt/utilities/lib/fp_utils.pxd",
+                         "yt/utilities/lib/grid_traversal.pxd",
                          "yt/geometry/oct_container.pxd",
                          "yt/geometry/oct_visitors.pxd",
                          "yt/geometry/grid_container.pxd",

diff -r 612f1ddf649e67756789e34a2673b1d156821644 -r c5d3a162a7fb05d0c300b44e4a6d2cf520b7ac2f yt/utilities/lib/grid_traversal.pxd
--- a/yt/utilities/lib/grid_traversal.pxd
+++ b/yt/utilities/lib/grid_traversal.pxd
@@ -43,6 +43,10 @@
                 int index[3],
                 void *data) nogil
 
+ctypedef void calculate_extent_function(ImageContainer *image,
+            VolumeContainer *vc, np.int64_t rv[4]) nogil
+
+cdef calculate_extent_function calculate_extent_plane_parallel
 
 cdef class ImageSampler:
     cdef ImageContainer *image
@@ -51,11 +55,7 @@
     cdef public object azbuffer
     cdef void *supp_data
     cdef np.float64_t width[3]
-
-    cdef void get_start_stop(self, np.float64_t *ex, np.int64_t *rv)
-
-    cdef void calculate_extent(self, np.float64_t extrema[4],
-                               VolumeContainer *vc) nogil
+    cdef public object lens_type
 
     cdef void setup(self, PartitionedGrid pg)
 

diff -r 612f1ddf649e67756789e34a2673b1d156821644 -r c5d3a162a7fb05d0c300b44e4a6d2cf520b7ac2f yt/utilities/lib/grid_traversal.pyx
--- a/yt/utilities/lib/grid_traversal.pyx
+++ b/yt/utilities/lib/grid_traversal.pyx
@@ -174,6 +174,41 @@
             for i in range(3):
                 vel[i] /= vel_mag[0]
 
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+cdef void calculate_extent_plane_parallel(ImageContainer *image,
+            VolumeContainer *vc, np.int64_t rv[4]) nogil:
+    # We do this for all eight corners
+    cdef np.float64_t temp
+    cdef np.float64_t *edges[2]
+    cdef np.float64_t cx, cy
+    cdef np.float64_t extrema[4]
+    cdef int i, j, k
+    edges[0] = vc.left_edge
+    edges[1] = vc.right_edge
+    extrema[0] = extrema[2] = 1e300; extrema[1] = extrema[3] = -1e300
+    for i in range(2):
+        for j in range(2):
+            for k in range(2):
+                # This should rotate it into the vector plane
+                temp  = edges[i][0] * image.x_vec[0]
+                temp += edges[j][1] * image.x_vec[1]
+                temp += edges[k][2] * image.x_vec[2]
+                if temp < extrema[0]: extrema[0] = temp
+                if temp > extrema[1]: extrema[1] = temp
+                temp  = edges[i][0] * image.y_vec[0]
+                temp += edges[j][1] * image.y_vec[1]
+                temp += edges[k][2] * image.y_vec[2]
+                if temp < extrema[2]: extrema[2] = temp
+                if temp > extrema[3]: extrema[3] = temp
+    cx = cy = 0.0
+    for i in range(3):
+        cx += image.center[i] * image.x_vec[i]
+        cy += image.center[i] * image.y_vec[i]
+    rv[0] = lrint((extrema[0] - cx - image.bounds[0])/image.pdx)
+    rv[1] = rv[0] + lrint((extrema[1] - extrema[0])/image.pdx)
+    rv[2] = lrint((extrema[2] - cy - image.bounds[2])/image.pdy)
+    rv[3] = rv[2] + lrint((extrema[3] - extrema[2])/image.pdy)
 
 cdef struct ImageAccumulator:
     np.float64_t rgba[Nch]
@@ -194,6 +229,7 @@
         cdef ImageContainer *imagec = self.image
         cdef np.ndarray[np.float64_t, ndim=2] zbuffer
         zbuffer = kwargs.pop("zbuffer", None)
+        self.lens_type = kwargs.pop("lens_type", None)
         self.sampler = NULL
         cdef int i, j
         # These assignments are so we can track the objects and prevent their
@@ -235,49 +271,6 @@
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    cdef void get_start_stop(self, np.float64_t *ex, np.int64_t *rv):
-        # Extrema need to be re-centered
-        cdef np.float64_t cx, cy
-        cdef ImageContainer *im = self.image
-        cdef int i
-        cx = cy = 0.0
-        for i in range(3):
-            cx += im.center[i] * im.x_vec[i]
-            cy += im.center[i] * im.y_vec[i]
-        rv[0] = lrint((ex[0] - cx - im.bounds[0])/im.pdx)
-        rv[1] = rv[0] + lrint((ex[1] - ex[0])/im.pdx)
-        rv[2] = lrint((ex[2] - cy - im.bounds[2])/im.pdy)
-        rv[3] = rv[2] + lrint((ex[3] - ex[2])/im.pdy)
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    cdef void calculate_extent(self, np.float64_t extrema[4],
-                               VolumeContainer *vc) nogil:
-        # We do this for all eight corners
-        cdef np.float64_t temp
-        cdef np.float64_t *edges[2]
-        edges[0] = vc.left_edge
-        edges[1] = vc.right_edge
-        extrema[0] = extrema[2] = 1e300; extrema[1] = extrema[3] = -1e300
-        cdef int i, j, k
-        for i in range(2):
-            for j in range(2):
-                for k in range(2):
-                    # This should rotate it into the vector plane
-                    temp  = edges[i][0] * self.image.x_vec[0]
-                    temp += edges[j][1] * self.image.x_vec[1]
-                    temp += edges[k][2] * self.image.x_vec[2]
-                    if temp < extrema[0]: extrema[0] = temp
-                    if temp > extrema[1]: extrema[1] = temp
-                    temp  = edges[i][0] * self.image.y_vec[0]
-                    temp += edges[j][1] * self.image.y_vec[1]
-                    temp += edges[k][2] * self.image.y_vec[2]
-                    if temp < extrema[2]: extrema[2] = temp
-                    if temp > extrema[3]: extrema[3] = temp
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
     def __call__(self, PartitionedGrid pg, int num_threads = 0):
         # This routine will iterate over all of the vectors and cast each in
         # turn.  Might benefit from a more sophisticated intersection check,
@@ -292,13 +285,11 @@
         cdef np.float64_t *v_pos
         cdef np.float64_t *v_dir
         cdef np.float64_t rgba[6]
-        cdef np.float64_t extrema[4]
         cdef np.float64_t max_t
         hit = 0
         cdef np.int64_t nx, ny, size
-        if im.vd_strides[0] == -1:
-            self.calculate_extent(extrema, vc)
-            self.get_start_stop(extrema, iter)
+        if self.lens_type == "plane-parallel":
+            calculate_extent_plane_parallel(self.image, vc, iter)
             iter[0] = i64clip(iter[0]-1, 0, im.nv[0])
             iter[1] = i64clip(iter[1]+1, 0, im.nv[0])
             iter[2] = i64clip(iter[2]-1, 0, im.nv[1])

diff -r 612f1ddf649e67756789e34a2673b1d156821644 -r c5d3a162a7fb05d0c300b44e4a6d2cf520b7ac2f yt/visualization/volume_rendering/lens.py
--- a/yt/visualization/volume_rendering/lens.py
+++ b/yt/visualization/volume_rendering/lens.py
@@ -107,7 +107,7 @@
                  x_vec=camera.unit_vectors[0],
                  y_vec=camera.unit_vectors[1],
                  width=np.array(camera.width, dtype='float64'),
-                 image=image)
+                 image=image, lens_type="plane-parallel")
         return sampler_params
 
     def set_viewpoint(self, camera):
@@ -205,8 +205,8 @@
                  x_vec=uv,
                  y_vec=uv,
                  width=np.zeros(3, dtype='float64'),
-                 image=image
-                 )
+                 image=image,
+                 lens_type="perspective")
 
         mylog.debug(positions)
         mylog.debug(vectors)
@@ -308,8 +308,8 @@
                  x_vec=uv,
                  y_vec=uv,
                  width=np.zeros(3, dtype='float64'),
-                 image=image
-                 )
+                 image=image,
+                 lens_type="stereo-perspective")
 
         return sampler_params
 
@@ -483,8 +483,8 @@
                  x_vec=uv,
                  y_vec=uv,
                  width=np.zeros(3, dtype='float64'),
-                 image=image
-                 )
+                 image=image,
+                 lens_type="fisheye")
 
         return sampler_params
 
@@ -591,7 +591,8 @@
             x_vec=dummy,
             y_vec=dummy,
             width=np.zeros(3, dtype="float64"),
-            image=image)
+            image=image,
+            lens_type="spherical")
         return sampler_params
 
     def set_viewpoint(self, camera):
@@ -704,7 +705,8 @@
             x_vec=dummy,
             y_vec=dummy,
             width=np.zeros(3, dtype="float64"),
-            image=image)
+            image=image,
+            lens_type = "stereo-spherical")
         return sampler_params
 
     def set_viewpoint(self, camera):

diff -r 612f1ddf649e67756789e34a2673b1d156821644 -r c5d3a162a7fb05d0c300b44e4a6d2cf520b7ac2f yt/visualization/volume_rendering/utils.py
--- a/yt/visualization/volume_rendering/utils.py
+++ b/yt/visualization/volume_rendering/utils.py
@@ -26,8 +26,8 @@
         params['y_vec'],
         params['width'],
     )
-
-    sampler = mesh_traversal.MeshSampler(*args)
+    kwargs = {'lens_type': params['lens_type']}
+    sampler = mesh_traversal.MeshSampler(*args, **kwargs)
     return sampler
 
 
@@ -48,7 +48,7 @@
         params['transfer_function'],
         params['num_samples'],
     )
-    kwargs = {}
+    kwargs = {'lens_type': params['lens_type']}
     if render_source.zbuffer is not None:
         kwargs['zbuffer'] = render_source.zbuffer.z
         args[4][:] = render_source.zbuffer.rgba[:]
@@ -72,10 +72,10 @@
         params['width'],
         params['num_samples'],
     )
-    kwargs = {}
+    kwargs = {'lens_type': params['lens_type']}
     if render_source.zbuffer is not None:
         kwargs['zbuffer'] = render_source.zbuffer.z
-    sampler = InterpolatedProjectionSampler(*args)
+    sampler = InterpolatedProjectionSampler(*args, **kwargs)
     return sampler
 
 
@@ -94,10 +94,10 @@
         params['width'],
         params['num_samples'],
     )
-    kwargs = {}
+    kwargs = {'lens_type': params['lens_type']}
     if render_source.zbuffer is not None:
         kwargs['zbuffer'] = render_source.zbuffer.z
-    sampler = ProjectionSampler(*args)
+    sampler = ProjectionSampler(*args, **kwargs)
     return sampler
 
 


https://bitbucket.org/yt_analysis/yt/commits/dc2a2e943fbf/
Changeset:   dc2a2e943fbf
Branch:      yt
User:        MatthewTurk
Date:        2015-10-15 22:34:26+00:00
Summary:     Refactoring calculating extents.
Affected #:  3 files

diff -r c5d3a162a7fb05d0c300b44e4a6d2cf520b7ac2f -r dc2a2e943fbfdad32d6ccfa9329163e7f293dfc3 yt/utilities/lib/grid_traversal.pxd
--- a/yt/utilities/lib/grid_traversal.pxd
+++ b/yt/utilities/lib/grid_traversal.pxd
@@ -56,6 +56,7 @@
     cdef void *supp_data
     cdef np.float64_t width[3]
     cdef public object lens_type
+    cdef calculate_extent_function *extent_function
 
     cdef void setup(self, PartitionedGrid pg)
 

diff -r c5d3a162a7fb05d0c300b44e4a6d2cf520b7ac2f -r dc2a2e943fbfdad32d6ccfa9329163e7f293dfc3 yt/utilities/lib/grid_traversal.pyx
--- a/yt/utilities/lib/grid_traversal.pyx
+++ b/yt/utilities/lib/grid_traversal.pyx
@@ -210,6 +210,41 @@
     rv[2] = lrint((extrema[2] - cy - image.bounds[2])/image.pdy)
     rv[3] = rv[2] + lrint((extrema[3] - extrema[2])/image.pdy)
 
+# We do this for a bunch of lenses.  Fallback is to grab them from the vector
+# info supplied.
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+cdef void calculate_extent_null(ImageContainer *image,
+            VolumeContainer *vc, np.int64_t rv[4]) nogil:
+    rv[0] = 0
+    rv[1] = image.nv[0]
+    rv[2] = 0
+    rv[3] = image.nv[1]
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+cdef void generate_vector_info_plane_parallel(ImageContainer *im,
+            np.int64_t vi, np.int64_t vj,
+            np.float64_t width[2],
+            # Now outbound
+            np.float64_t v_dir[3], np.float64_t v_pos[3]) nogil:
+    cdef np.float64_t px, py
+    px = width[0] * (<np.float64_t>vi)/(<np.float64_t>im.nv[0]-1) - width[0]/2.0
+    py = width[1] * (<np.float64_t>vj)/(<np.float64_t>im.nv[1]-1) - width[1]/2.0
+    v_pos[0] = im.vp_pos[0]*px + im.vp_pos[3]*py + im.vp_pos[9]
+    v_pos[1] = im.vp_pos[1]*px + im.vp_pos[4]*py + im.vp_pos[10]
+    v_pos[2] = im.vp_pos[2]*px + im.vp_pos[5]*py + im.vp_pos[11]
+
+cdef void generate_vector_info_null(ImageContainer *im,
+            np.int64_t vi, np.int64_t vj,
+            np.float64_t width[2],
+            # Now outbound
+            np.float64_t v_dir[3], np.float64_t v_pos[3]) nogil:
+    cdef int i
+    for i in range(3): v_pos[i] = im.vp_pos[i + vi*3]
+    for i in range(3): v_dir[i] = im.vp_dir[i + vi*3]
+
 cdef struct ImageAccumulator:
     np.float64_t rgba[Nch]
     void *supp_data
@@ -230,6 +265,10 @@
         cdef np.ndarray[np.float64_t, ndim=2] zbuffer
         zbuffer = kwargs.pop("zbuffer", None)
         self.lens_type = kwargs.pop("lens_type", None)
+        if self.lens_type == "plane-parallel":
+            self.extent_function = calculate_extent_plane_parallel
+        else:
+            self.extent_function = calculate_extent_null
         self.sampler = NULL
         cdef int i, j
         # These assignments are so we can track the objects and prevent their
@@ -290,20 +329,16 @@
         cdef np.int64_t nx, ny, size
         if self.lens_type == "plane-parallel":
             calculate_extent_plane_parallel(self.image, vc, iter)
-            iter[0] = i64clip(iter[0]-1, 0, im.nv[0])
-            iter[1] = i64clip(iter[1]+1, 0, im.nv[0])
-            iter[2] = i64clip(iter[2]-1, 0, im.nv[1])
-            iter[3] = i64clip(iter[3]+1, 0, im.nv[1])
-            nx = (iter[1] - iter[0])
-            ny = (iter[3] - iter[2])
-            size = nx * ny
         else:
-            nx = im.nv[0]
-            ny = 1
-            iter[0] = iter[1] = iter[2] = iter[3] = 0
-            size = nx
+            calculate_extent_null(self.image, vc, iter)
+        iter[0] = i64clip(iter[0]-1, 0, im.nv[0])
+        iter[1] = i64clip(iter[1]+1, 0, im.nv[0])
+        iter[2] = i64clip(iter[2]-1, 0, im.nv[1])
+        iter[3] = i64clip(iter[3]+1, 0, im.nv[1])
+        nx = (iter[1] - iter[0])
+        ny = (iter[3] - iter[2])
+        size = nx * ny
         cdef ImageAccumulator *idata
-        cdef np.float64_t px, py
         cdef np.float64_t width[3]
         cdef int use_vec, max_i
         for i in range(3):
@@ -313,16 +348,14 @@
                 idata = <ImageAccumulator *> malloc(sizeof(ImageAccumulator))
                 idata.supp_data = self.supp_data
                 v_pos = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+                v_dir = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
                 for j in prange(size, schedule="static",chunksize=1):
                     vj = j % ny
                     vi = (j - vj) / ny + iter[0]
                     vj = vj + iter[2]
                     # Dynamically calculate the position
-                    px = width[0] * (<np.float64_t>vi)/(<np.float64_t>im.nv[0]-1) - width[0]/2.0
-                    py = width[1] * (<np.float64_t>vj)/(<np.float64_t>im.nv[1]-1) - width[1]/2.0
-                    v_pos[0] = im.vp_pos[0]*px + im.vp_pos[3]*py + im.vp_pos[9]
-                    v_pos[1] = im.vp_pos[1]*px + im.vp_pos[4]*py + im.vp_pos[10]
-                    v_pos[2] = im.vp_pos[2]*px + im.vp_pos[5]*py + im.vp_pos[11]
+                    generate_vector_info_plane_parallel(im, vi, vj, width,
+                        v_dir, v_pos)
                     offset = im.im_strides[0] * vi + im.im_strides[1] * vj
                     for i in range(Nch): idata.rgba[i] = im.image[i + offset]
                     if im.zbuffer != NULL:
@@ -334,6 +367,7 @@
                     for i in range(Nch): im.image[i + offset] = idata.rgba[i]
                 free(idata)
                 free(v_pos)
+                free(v_dir)
         else:
             with nogil, parallel(num_threads = num_threads):
                 idata = <ImageAccumulator *> malloc(sizeof(ImageAccumulator))
@@ -344,44 +378,10 @@
                 # our rays 
                 
                 for j in prange(size, schedule="dynamic", chunksize=100):
-                    offset = j * 3
-                    for i in range(3): v_pos[i] = im.vp_pos[i + offset]
-                    for i in range(3): v_dir[i] = im.vp_dir[i + offset]
+                    generate_vector_info_null(im, j, 0, width,
+                        v_dir, v_pos)
                     if v_dir[0] == v_dir[1] == v_dir[2] == 0.0:
                         continue
-                    # Before we do *any* image copying, we will apply an early
-                    # termination step.  This uses the information that in
-                    # order to intersect a block, we need to intersect at least
-                    # one face.  So, we build up a set of derived values that
-                    # help us determine if we *know* we don't intersect.
-                    use_vec = 0
-                    if (vc.left_edge[0] > v_pos[0] or v_pos[0] > vc.right_edge[0] or
-                        vc.left_edge[1] > v_pos[1] or v_pos[1] > vc.right_edge[1] or
-                        vc.left_edge[2] > v_pos[2] or v_pos[2] > vc.right_edge[2]):
-                        # Find largest t of intersection with a face.
-                        max_t = -1e300
-                        max_i = -1
-                        for i in range(3):
-                            if v_dir[i] > 0 and \
-                              (vc.left_edge[i] - v_pos[i])/v_dir[i] > max_t:
-                                max_t = (vc.left_edge[i] - v_pos[i])/v_dir[i]
-                                max_i = i
-                            elif v_dir[i] < 0 and \
-                              (vc.right_edge[i] - v_pos[i])/v_dir[i] > max_t:
-                                max_t = (vc.right_edge[i] - v_pos[i])/v_dir[i]
-                                max_i = i
-                        xi = (i + 1) % 3
-                        yi = (i + 2) % 3
-                        if max_t < 0 or max_t > 1:
-                            pass
-                        elif ((vc.left_edge[xi] <= v_pos[xi] + v_dir[xi]*max_t
-                            <= vc.right_edge[xi]) and
-                            (vc.left_edge[yi] <= v_pos[yi] + v_dir[yi]*max_t
-                            <= vc.right_edge[yi])):
-                            use_vec = 1
-                    else:
-                        use_vec = 1
-                    if use_vec == 0: continue
                     # Note that for Nch != 3 we need a different offset into
                     # the image object than for the vectors!
                     for i in range(Nch): idata.rgba[i] = im.image[i + Nch*j]

diff -r c5d3a162a7fb05d0c300b44e4a6d2cf520b7ac2f -r dc2a2e943fbfdad32d6ccfa9329163e7f293dfc3 yt/utilities/lib/setup.py
--- a/yt/utilities/lib/setup.py
+++ b/yt/utilities/lib/setup.py
@@ -76,6 +76,7 @@
                 depends=["yt/utilities/lib/fp_utils.pxd",
                          "yt/utilities/lib/amr_kdtools.pxd",
                          "yt/utilities/lib/ContourFinding.pxd",
+                         "yt/utilities/lib/grid_traversal.pxd",
                          "yt/geometry/oct_container.pxd"])
     config.add_extension("DepthFirstOctree", 
                 ["yt/utilities/lib/DepthFirstOctree.pyx"],
@@ -178,7 +179,8 @@
                              ["yt/utilities/lib/mesh_traversal.pyx"],
                              include_dirs=["yt/utilities/lib", include_dirs],
                              libraries=["m", "embree"], language="c++",
-                             depends=["yt/utilities/lib/mesh_traversal.pxd"])
+                             depends=["yt/utilities/lib/mesh_traversal.pxd",
+                                      "yt/utilities/lib/grid_traversal.pxd"])
         config.add_extension("mesh_samplers",
                              ["yt/utilities/lib/mesh_samplers.pyx"],
                              include_dirs=["yt/utilities/lib", include_dirs],


https://bitbucket.org/yt_analysis/yt/commits/5214d592ca7c/
Changeset:   5214d592ca7c
Branch:      yt
User:        MatthewTurk
Date:        2015-10-16 00:37:16+00:00
Summary:     Simplify further the extend and vector computation.
Affected #:  3 files

diff -r dc2a2e943fbfdad32d6ccfa9329163e7f293dfc3 -r 5214d592ca7ca214b3a0b6204bec2bf31aa14549 yt/utilities/lib/grid_traversal.pxd
--- a/yt/utilities/lib/grid_traversal.pxd
+++ b/yt/utilities/lib/grid_traversal.pxd
@@ -33,6 +33,7 @@
     int vd_strides[3]
     np.float64_t *x_vec
     np.float64_t *y_vec
+    int z_strides[2]
 
 ctypedef void sampler_function(
                 VolumeContainer *vc,
@@ -48,6 +49,14 @@
 
 cdef calculate_extent_function calculate_extent_plane_parallel
 
+ctypedef void generate_vector_info_function(ImageContainer *im,
+            np.int64_t vi, np.int64_t vj,
+            np.float64_t width[2],
+            np.float64_t v_dir[3], np.float64_t v_pos[3]) nogil
+
+cdef generate_vector_info_function generate_vector_info_plane_parallel
+cdef generate_vector_info_function generate_vector_info_null
+
 cdef class ImageSampler:
     cdef ImageContainer *image
     cdef sampler_function *sampler
@@ -57,6 +66,7 @@
     cdef np.float64_t width[3]
     cdef public object lens_type
     cdef calculate_extent_function *extent_function
+    cdef generate_vector_info_function *vector_function
 
     cdef void setup(self, PartitionedGrid pg)
 

diff -r dc2a2e943fbfdad32d6ccfa9329163e7f293dfc3 -r 5214d592ca7ca214b3a0b6204bec2bf31aa14549 yt/utilities/lib/grid_traversal.pyx
--- a/yt/utilities/lib/grid_traversal.pyx
+++ b/yt/utilities/lib/grid_traversal.pyx
@@ -229,21 +229,29 @@
             np.float64_t width[2],
             # Now outbound
             np.float64_t v_dir[3], np.float64_t v_pos[3]) nogil:
+    cdef int i
     cdef np.float64_t px, py
     px = width[0] * (<np.float64_t>vi)/(<np.float64_t>im.nv[0]-1) - width[0]/2.0
     py = width[1] * (<np.float64_t>vj)/(<np.float64_t>im.nv[1]-1) - width[1]/2.0
     v_pos[0] = im.vp_pos[0]*px + im.vp_pos[3]*py + im.vp_pos[9]
     v_pos[1] = im.vp_pos[1]*px + im.vp_pos[4]*py + im.vp_pos[10]
     v_pos[2] = im.vp_pos[2]*px + im.vp_pos[5]*py + im.vp_pos[11]
+    for i in range(3): v_dir[i] = im.vp_dir[i]
 
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
 cdef void generate_vector_info_null(ImageContainer *im,
             np.int64_t vi, np.int64_t vj,
             np.float64_t width[2],
             # Now outbound
             np.float64_t v_dir[3], np.float64_t v_pos[3]) nogil:
     cdef int i
-    for i in range(3): v_pos[i] = im.vp_pos[i + vi*3]
-    for i in range(3): v_dir[i] = im.vp_dir[i + vi*3]
+    for i in range(3):
+        # Here's a funny thing: we use vi here because our *image* will be
+        # flattened.  That means that im.nv will be a better one-d offset,
+        # since vp_pos has funny strides.
+        v_pos[i] = im.vp_pos[vi*3 + i]
+        v_dir[i] = im.vp_dir[vi*3 + i]
 
 cdef struct ImageAccumulator:
     np.float64_t rgba[Nch]
@@ -267,8 +275,10 @@
         self.lens_type = kwargs.pop("lens_type", None)
         if self.lens_type == "plane-parallel":
             self.extent_function = calculate_extent_plane_parallel
+            self.vector_function = generate_vector_info_plane_parallel
         else:
             self.extent_function = calculate_extent_null
+            self.vector_function = generate_vector_info_null
         self.sampler = NULL
         cdef int i, j
         # These assignments are so we can track the objects and prevent their
@@ -289,6 +299,10 @@
         imagec.zbuffer = NULL
         if zbuffer is not None:
             imagec.zbuffer = <np.float64_t *> zbuffer.data
+            imagec.z_strides[0]
+            # 2D
+            for i in range(2):
+                imagec.z_strides[i] = zbuffer.strides[i] / 8
         imagec.nv[0] = image.shape[0]
         imagec.nv[1] = image.shape[1]
         for i in range(4): imagec.bounds[i] = bounds[i]
@@ -327,10 +341,7 @@
         cdef np.float64_t max_t
         hit = 0
         cdef np.int64_t nx, ny, size
-        if self.lens_type == "plane-parallel":
-            calculate_extent_plane_parallel(self.image, vc, iter)
-        else:
-            calculate_extent_null(self.image, vc, iter)
+        self.extent_function(self.image, vc, iter)
         iter[0] = i64clip(iter[0]-1, 0, im.nv[0])
         iter[1] = i64clip(iter[1]+1, 0, im.nv[0])
         iter[2] = i64clip(iter[2]-1, 0, im.nv[1])
@@ -343,58 +354,33 @@
         cdef int use_vec, max_i
         for i in range(3):
             width[i] = self.width[i]
-        if im.vd_strides[0] == -1:
-            with nogil, parallel(num_threads = num_threads):
-                idata = <ImageAccumulator *> malloc(sizeof(ImageAccumulator))
-                idata.supp_data = self.supp_data
-                v_pos = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
-                v_dir = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
-                for j in prange(size, schedule="static",chunksize=1):
-                    vj = j % ny
-                    vi = (j - vj) / ny + iter[0]
-                    vj = vj + iter[2]
-                    # Dynamically calculate the position
-                    generate_vector_info_plane_parallel(im, vi, vj, width,
-                        v_dir, v_pos)
-                    offset = im.im_strides[0] * vi + im.im_strides[1] * vj
-                    for i in range(Nch): idata.rgba[i] = im.image[i + offset]
-                    if im.zbuffer != NULL:
-                        max_t = im.zbuffer[im.nv[0] * vi + vj]
-                    else:
-                        max_t = 1.0
-                    walk_volume(vc, v_pos, im.vp_dir, self.sampler,
-                                (<void *> idata), NULL, max_t)
-                    for i in range(Nch): im.image[i + offset] = idata.rgba[i]
-                free(idata)
-                free(v_pos)
-                free(v_dir)
-        else:
-            with nogil, parallel(num_threads = num_threads):
-                idata = <ImageAccumulator *> malloc(sizeof(ImageAccumulator))
-                idata.supp_data = self.supp_data
-                v_pos = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
-                v_dir = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
-                # If we do not have a simple image plane, we have to cast all
-                # our rays 
-                
-                for j in prange(size, schedule="dynamic", chunksize=100):
-                    generate_vector_info_null(im, j, 0, width,
-                        v_dir, v_pos)
-                    if v_dir[0] == v_dir[1] == v_dir[2] == 0.0:
-                        continue
-                    # Note that for Nch != 3 we need a different offset into
-                    # the image object than for the vectors!
-                    for i in range(Nch): idata.rgba[i] = im.image[i + Nch*j]
-                    if im.zbuffer != NULL:
-                        max_t = fclip(im.zbuffer[j], 0.0, 1.0)
-                    else:
-                        max_t = 1.0
-                    walk_volume(vc, v_pos, v_dir, self.sampler, 
-                                (<void *> idata), NULL, max_t)
-                    for i in range(Nch): im.image[i + Nch*j] = idata.rgba[i]
-                free(v_dir)
-                free(idata)
-                free(v_pos)
+        with nogil, parallel(num_threads = num_threads):
+            idata = <ImageAccumulator *> malloc(sizeof(ImageAccumulator))
+            idata.supp_data = self.supp_data
+            v_pos = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+            v_dir = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+            for j in prange(size, schedule="static", chunksize=100):
+                vj = j % ny
+                vi = (j - vj) / ny + iter[0]
+                vj = vj + iter[2]
+                # Dynamically calculate the position
+                self.vector_function(im, vi, vj, width,
+                    v_dir, v_pos)
+                offset = im.im_strides[0] * vi + im.im_strides[1] * vj
+                for i in range(Nch): idata.rgba[i] = im.image[i + offset]
+                if im.zbuffer != NULL:
+                    offset = im.z_strides[0] * vi + im.z_strides[1] * vj
+                    max_t = im.zbuffer[offset]
+                else:
+                    max_t = 1.0
+                max_t = fclip(max_t, 0.0, 1.0)
+                walk_volume(vc, v_pos, v_dir, self.sampler,
+                            (<void *> idata), NULL, max_t)
+                offset = im.im_strides[0] * vi + im.im_strides[1] * vj
+                for i in range(Nch): im.image[i + offset] = idata.rgba[i]
+            free(idata)
+            free(v_pos)
+            free(v_dir)
         return hit
 
     cdef void setup(self, PartitionedGrid pg):

diff -r dc2a2e943fbfdad32d6ccfa9329163e7f293dfc3 -r 5214d592ca7ca214b3a0b6204bec2bf31aa14549 yt/visualization/volume_rendering/lens.py
--- a/yt/visualization/volume_rendering/lens.py
+++ b/yt/visualization/volume_rendering/lens.py
@@ -191,7 +191,7 @@
         vectors = sample_x + sample_y + normal_vecs * camera.width[2]
 
         positions = np.tile(camera.position, camera.resolution[0] * camera.resolution[1])\
-                           .reshape(camera.resolution[0], camera.resolution[1], 3)
+                           .reshape(camera.resolution[0], camera.resolution[1], 3, order='C')
 
         uv = np.ones(3, dtype='float64')
 


https://bitbucket.org/yt_analysis/yt/commits/112628baf1db/
Changeset:   112628baf1db
Branch:      yt
User:        xarthisius
Date:        2015-10-15 18:22:46+00:00
Summary:     Merging
Affected #:  25 files

diff -r 91632e8cbe0242c494c784b5d14d90d30dcd20ce -r 112628baf1dba54179d9995a2a3bf5cb5b58a920 doc/source/cookbook/camera_movement.py
--- a/doc/source/cookbook/camera_movement.py
+++ b/doc/source/cookbook/camera_movement.py
@@ -3,7 +3,7 @@
 
 # Follow the simple_volume_rendering cookbook for the first part of this.
 ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")  # load data
-im, sc = yt.volume_render(ds)
+sc = yt.create_scene(ds)
 cam = sc.camera
 cam.resolution = (512, 512)
 cam.set_width(ds.domain_width/20.0)

diff -r 91632e8cbe0242c494c784b5d14d90d30dcd20ce -r 112628baf1dba54179d9995a2a3bf5cb5b58a920 doc/source/cookbook/complex_plots.rst
--- a/doc/source/cookbook/complex_plots.rst
+++ b/doc/source/cookbook/complex_plots.rst
@@ -196,10 +196,28 @@
 
 In this recipe, we move a camera through a domain and take multiple volume
 rendering snapshots.
-See :ref:`volume_rendering` for more information.
+See :ref:`camera_movement` for more information.
 
 .. yt_cookbook:: camera_movement.py
 
+Volume Rendering with Custom Camera
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+In this recipe we modify the :ref:`cookbook-simple_volume_rendering` recipe to
+use customized camera properties. See :ref:`volume_rendering` for more
+information.
+
+.. yt_cookbook:: custom_camera_volume_rendering.py
+
+Volume Rendering with a Custom Transfer Function
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+In this recipe we modify the :ref:`cookbook-simple_volume_rendering` recipe to
+use customized camera properties. See :ref:`volume_rendering` for more
+information.
+
+.. yt_cookbook:: custom_transfer_function_volume_rendering.py
+
 Zooming into an Image
 ~~~~~~~~~~~~~~~~~~~~~
 
@@ -229,7 +247,7 @@
 This recipe demonstrates how to make semi-opaque volume renderings, but also
 how to step through and try different things to identify the type of volume
 rendering you want.
-See :ref:`volume_rendering` for more information.
+See :ref:`opaque_rendering` for more information.
 
 .. yt_cookbook:: opaque_rendering.py
 
@@ -244,18 +262,20 @@
 
 .. yt_cookbook:: amrkdtree_downsampling.py
 
+.. _cookbook-volume_rendering_annotations:
+
 Volume Rendering with Bounding Box and Overlaid Grids
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
 This recipe demonstrates how to overplot a bounding box on a volume rendering
 as well as overplotting grids representing the level of refinement achieved
 in different regions of the code.
-See :ref:`volume_rendering` for more information.
+See :ref:`volume_rendering_annotations` for more information.
 
 .. yt_cookbook:: rendering_with_box_and_grids.py
 
 Volume Rendering with Annotation
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
 This recipe demonstrates how to write the simulation time, show an
 axis triad indicating the direction of the coordinate system, and show

diff -r 91632e8cbe0242c494c784b5d14d90d30dcd20ce -r 112628baf1dba54179d9995a2a3bf5cb5b58a920 doc/source/cookbook/custom_camera_volume_rendering.py
--- /dev/null
+++ b/doc/source/cookbook/custom_camera_volume_rendering.py
@@ -0,0 +1,21 @@
+import yt
+
+# Load the dataset
+ds = yt.load("Enzo_64/DD0043/data0043")
+
+# Create a volume rendering
+sc = yt.create_scene(ds, field=('gas', 'density'))
+
+# Now increase the resolution
+sc.camera.resolution = (1024, 1024)
+
+# Set the camera focus to a position that is offset from the center of
+# the domain
+sc.camera.focus = ds.arr([0.3, 0.3, 0.3], 'unitary')
+
+# Move the camera position to the other side of the dataset
+sc.camera.position = ds.arr([0, 0, 0], 'unitary')
+
+# save to disk with a custom filename and apply sigma clipping to eliminate
+# very bright pixels, producing an image with better contrast.
+sc.render(fname='custom.png', sigma_clip=4)

diff -r 91632e8cbe0242c494c784b5d14d90d30dcd20ce -r 112628baf1dba54179d9995a2a3bf5cb5b58a920 doc/source/cookbook/custom_transfer_function_volume_rendering.py
--- /dev/null
+++ b/doc/source/cookbook/custom_transfer_function_volume_rendering.py
@@ -0,0 +1,24 @@
+import yt
+import numpy as np
+
+# Load the dataset
+ds = yt.load("Enzo_64/DD0043/data0043")
+
+# Create a volume rendering
+sc = yt.create_scene(ds, field=('gas', 'density'))
+
+# Modify the transfer function
+
+# First get the render source, in this case the entire domain, with field ('gas','density')
+render_source = sc.get_source(0)
+
+# Clear the transfer function
+render_source.transfer_function.clear()
+
+# Map a range of density values (in log space) to the Reds_r colormap
+render_source.transfer_function.map_to_colormap(
+    np.log10(ds.quan(5.0e-31, 'g/cm**3')),
+    np.log10(ds.quan(1.0e-29, 'g/cm**3')),
+    scale=30.0, colormap='RdBu_r')
+
+im = sc.render(fname='new_tf.png', sigma_clip=None)

diff -r 91632e8cbe0242c494c784b5d14d90d30dcd20ce -r 112628baf1dba54179d9995a2a3bf5cb5b58a920 doc/source/cookbook/rendering_with_box_and_grids.py
--- a/doc/source/cookbook/rendering_with_box_and_grids.py
+++ b/doc/source/cookbook/rendering_with_box_and_grids.py
@@ -4,7 +4,7 @@
 
 # Load the dataset.
 ds = yt.load("Enzo_64/DD0043/data0043")
-im, sc = yt.volume_render(ds, ('gas','density'))
+sc = yt.create_scene(ds, ('gas','density'))
 sc.get_source(0).transfer_function.grey_opacity=True
 
 sc.annotate_domain(ds)

diff -r 91632e8cbe0242c494c784b5d14d90d30dcd20ce -r 112628baf1dba54179d9995a2a3bf5cb5b58a920 doc/source/cookbook/simple_volume_rendering.py
--- a/doc/source/cookbook/simple_volume_rendering.py
+++ b/doc/source/cookbook/simple_volume_rendering.py
@@ -1,28 +1,10 @@
 import yt
-import numpy as np
 
 # Load the dataset.
 ds = yt.load("Enzo_64/DD0043/data0043")
 
 # Create a volume rendering, which will determine data bounds, use the first
 # acceptable field in the field_list, and set up a default transfer function.
-#im, sc = yt.volume_render(ds, fname="%s_volume_rendered.png" % ds, sigma_clip=8.0)
 
-# You can easily specify a different field
-im, sc = yt.volume_render(ds, field=('gas','density'), fname="%s_density_volume_rendered.png" % ds, sigma_clip=8.0)
-
-# Now increase the resolution
-sc.camera.resolution = (512, 512)
-im = sc.render(fname='big.png', sigma_clip=8.0)
-
-# Now modify the transfer function
-# First get the render source, in this case the entire domain, with field ('gas','density')
-render_source = sc.get_source(0)
-# Clear the transfer function
-render_source.transfer_function.clear()
-# Map a range of density values (in log space) to the Reds_r colormap
-render_source.transfer_function.map_to_colormap(
-        np.log10(ds.quan(5.0e-31, 'g/cm**3')),
-        np.log10(ds.quan(1.0e-29, 'g/cm**3')),
-        scale=30.0, colormap='RdBu_r')
-im = sc.render(fname='new_tf.png', sigma_clip=None)
+# This will save a file named 'data0043_density_volume_rendered.png' to disk.
+im, sc = yt.volume_render(ds, field=('gas', 'density'))

diff -r 91632e8cbe0242c494c784b5d14d90d30dcd20ce -r 112628baf1dba54179d9995a2a3bf5cb5b58a920 doc/source/reference/api/api.rst
--- a/doc/source/reference/api/api.rst
+++ b/doc/source/reference/api/api.rst
@@ -592,6 +592,7 @@
    :toctree: generated/
 
    ~yt.visualization.volume_rendering.volume_rendering.volume_render
+   ~yt.visualization.volume_rendering.volume_rendering.create_scene
    ~yt.visualization.volume_rendering.off_axis_projection.off_axis_projection
    ~yt.visualization.volume_rendering.scene.Scene
    ~yt.visualization.volume_rendering.camera.Camera

diff -r 91632e8cbe0242c494c784b5d14d90d30dcd20ce -r 112628baf1dba54179d9995a2a3bf5cb5b58a920 doc/source/visualizing/volume_rendering.rst
--- a/doc/source/visualizing/volume_rendering.rst
+++ b/doc/source/visualizing/volume_rendering.rst
@@ -60,6 +60,7 @@
 Here is a working example for rendering the IsolatedGalaxy dataset.
 
 .. python-script::
+
   import yt
   # load the data
   ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
@@ -70,10 +71,10 @@
   # sc is an instance of a Scene object, which allows you to further refine
   # your renderings.
 
-When the volume_render function is called, first an empty
-:class:`~yt.visualization.volume_rendering.scene.Scene` object is
-created. Next, a 
-:class:`~yt.visualization.volume_rendering.api.VolumeSource`
+When the :func:`~yt.visualization.volume_rendering.volume_render` function 
+is called, first an empty 
+:class:`~yt.visualization.volume_rendering.scene.Scene` object is created. 
+Next, a :class:`~yt.visualization.volume_rendering.api.VolumeSource`
 object is created, which decomposes the volume elements
 into a tree structure to provide back-to-front rendering of fixed-resolution
 blocks of data.  (If the volume elements are grids, this uses a
@@ -106,6 +107,21 @@
 In this example, we don't add on any non-volume rendering sources; however, if
 such sources are added, they will be integrated as well.
 
+Alternatively, if you don't want to immediately generate an image of your
+volume rendering, and you just want access to the default scene object, 
+you can skip this expensive operation by just running the
+:func:`~yt.visualization.volume_rendering.create_scene` function in lieu of the
+:func:`~yt.visualization.volume_rendering.volume_render` function. Example:
+
+.. python-script::
+
+  import yt
+  # load the data
+  ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
+  # volume render the 'density' field 
+  sc = yt.create_scene(ds, 'density')
+
+
 Modifying the Scene
 -------------------
 
@@ -233,6 +249,8 @@
 vertices.  For instance, lines can be used to draw outlines of regions or
 continents.
 
+.. _volume_rendering_annotations:
+
 Annotations
 +++++++++++
 
@@ -244,6 +262,9 @@
 annotations will operate in data space and can draw boxes, grid information,
 and also provide a vector orientation within the image.
 
+For example scripts using these features, 
+see :ref:`cookbook-volume_rendering_annotations`.
+
 Care and Usage of the Camera
 ----------------------------
 
@@ -464,6 +485,8 @@
 
 For more information about enabling parallelism, see :ref:`parallel-computation`.
 
+.. _opaque_rendering:
+
 Opacity
 -------
 

diff -r 91632e8cbe0242c494c784b5d14d90d30dcd20ce -r 112628baf1dba54179d9995a2a3bf5cb5b58a920 yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -154,7 +154,7 @@
     ParticleProjectionPlot, ParticleImageBuffer, ParticlePlot
 
 from yt.visualization.volume_rendering.api import \
-    volume_render, ColorTransferFunction, TransferFunction, \
+    volume_render, create_scene, ColorTransferFunction, TransferFunction, \
     off_axis_projection
 import yt.visualization.volume_rendering.api as volume_rendering
 #    TransferFunctionHelper, MultiVariateTransferFunction

diff -r 91632e8cbe0242c494c784b5d14d90d30dcd20ce -r 112628baf1dba54179d9995a2a3bf5cb5b58a920 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -112,6 +112,7 @@
 class Dataset(object):
 
     default_fluid_type = "gas"
+    default_field = ("gas", "density")
     fluid_types = ("gas", "deposit", "index")
     particle_types = ("io",) # By default we have an 'all'
     particle_types_raw = ("io",)

diff -r 91632e8cbe0242c494c784b5d14d90d30dcd20ce -r 112628baf1dba54179d9995a2a3bf5cb5b58a920 yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -67,6 +67,9 @@
     def __str__(self):
         return "Could not find field '%s' in %s." % (self.fname, self.ds)
 
+class YTSceneFieldNotFound(YTException):
+    pass
+
 class YTCouldNotGenerateField(YTFieldNotFound):
     def __str__(self):
         return "Could field '%s' in %s could not be generated." % (self.fname, self.ds)

diff -r 91632e8cbe0242c494c784b5d14d90d30dcd20ce -r 112628baf1dba54179d9995a2a3bf5cb5b58a920 yt/utilities/orientation.py
--- a/yt/utilities/orientation.py
+++ b/yt/utilities/orientation.py
@@ -41,12 +41,6 @@
 
         """
 
-        # Make sure vectors are unitless
-        if north_vector is not None:
-            north_vector = YTArray(north_vector, "", dtype='float64')
-        if normal_vector is not None:
-            normal_vector = YTArray(normal_vector, "", dtype='float64')
-
         self.steady_north = steady_north
         if not np.dot(normal_vector, normal_vector) > 0:
             mylog.error("Normal vector is null")
@@ -63,6 +57,13 @@
     def _setup_normalized_vectors(self, normal_vector, north_vector):
         mylog.debug('Setting normalized vectors' + str(normal_vector)
                     + str(north_vector))
+
+        # Make sure vectors are unitless
+        if north_vector is not None:
+            north_vector = YTArray(north_vector, "", dtype='float64')
+        if normal_vector is not None:
+            normal_vector = YTArray(normal_vector, "", dtype='float64')
+
         # Now we set up our various vectors
         normal_vector /= np.sqrt(np.dot(normal_vector, normal_vector))
         if north_vector is None:

diff -r 91632e8cbe0242c494c784b5d14d90d30dcd20ce -r 112628baf1dba54179d9995a2a3bf5cb5b58a920 yt/visualization/volume_rendering/api.py
--- a/yt/visualization/volume_rendering/api.py
+++ b/yt/visualization/volume_rendering/api.py
@@ -27,7 +27,7 @@
 #    SphericalCamera, StereoSphericalCamera
 from .camera import Camera
 from .transfer_function_helper import TransferFunctionHelper
-from .volume_rendering import volume_render
+from .volume_rendering import volume_render, create_scene
 from .off_axis_projection import off_axis_projection
 from .scene import Scene
 from .render_source import VolumeSource, OpaqueSource, LineSource, \

diff -r 91632e8cbe0242c494c784b5d14d90d30dcd20ce -r 112628baf1dba54179d9995a2a3bf5cb5b58a920 yt/visualization/volume_rendering/tests/simple_scene_creation.py
--- /dev/null
+++ b/yt/visualization/volume_rendering/tests/simple_scene_creation.py
@@ -0,0 +1,17 @@
+"""
+Create a simple scene object
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2014, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+import yt
+from yt.testing import \
+    fake_random_ds
+
+ds = fake_random_ds(32)
+sc = yt.create_scene(ds)

diff -r 91632e8cbe0242c494c784b5d14d90d30dcd20ce -r 112628baf1dba54179d9995a2a3bf5cb5b58a920 yt/visualization/volume_rendering/tests/test_composite.py
--- a/yt/visualization/volume_rendering/tests/test_composite.py
+++ b/yt/visualization/volume_rendering/tests/test_composite.py
@@ -10,55 +10,82 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import yt
+import os
+import tempfile
+import shutil
 from yt.testing import fake_random_ds
-from yt.visualization.volume_rendering.api import Scene, Camera, ZBuffer, \
-    VolumeSource, OpaqueSource, LineSource, BoxSource
-from yt.utilities.lib.misc_utilities import lines
+from yt.visualization.volume_rendering.api import Scene, Camera, \
+    VolumeSource, LineSource, BoxSource
 from yt.data_objects.api import ImageArray
 import numpy as np
+from unittest import TestCase
+
 np.random.seed(0)
 
+# This toggles using a temporary directory. Turn off to examine images.
+use_tmpdir = True
 
-def test_composite_vr():
-    ds = fake_random_ds(64)
-    dd = ds.sphere(ds.domain_center, 0.45*ds.domain_width[0])
-    ds.field_info[ds.field_list[0]].take_log=False
 
-    sc = Scene()
-    cam = Camera(ds)
-    cam.resolution = (512,512)
-    sc.camera = cam
-    vr = VolumeSource(dd, field=ds.field_list[0])
-    vr.transfer_function.clear()
-    vr.transfer_function.grey_opacity=True
-    vr.transfer_function.map_to_colormap(0.0, 1.0, scale=3.0, colormap="Reds")
-    sc.add_source(vr)
+def setup():
+    """Test specific setup."""
+    from yt.config import ytcfg
+    ytcfg["yt", "__withintesting"] = "True"
 
-    cam.set_width( 1.8*ds.domain_width )
-    cam.lens.setup_box_properties(cam)
 
-    # DRAW SOME LINES
-    npoints = 100
-    vertices = np.random.random([npoints, 2, 3])
-    colors = np.random.random([npoints, 4])
-    colors[:, 3] = 0.10
+class CompositeVRTest(TestCase):
+    def setUp(self):
+        if use_tmpdir:
+            self.curdir = os.getcwd()
+            # Perform I/O in safe place instead of yt main dir
+            self.tmpdir = tempfile.mkdtemp()
+            os.chdir(self.tmpdir)
+        else:
+            self.curdir, self.tmpdir = None, None
 
-    box_source = BoxSource(ds.domain_left_edge, ds.domain_right_edge, color=[1.,1.,1.,1.0])
-    sc.add_source(box_source)
+    def tearDown(self):
+        if use_tmpdir:
+            os.chdir(self.curdir)
+            shutil.rmtree(self.tmpdir)
 
-    box_source = BoxSource(ds.domain_left_edge + np.array([0.1,0.,0.3])*ds.domain_left_edge.uq,
-            ds.domain_right_edge-np.array([0.1,0.2,0.3])*ds.domain_left_edge.uq,
-            color=np.array([0.0, 1.0, 0.0, 0.10]))
-    sc.add_source(box_source)
+    def test_composite_vr(self):
+        ds = fake_random_ds(64)
+        dd = ds.sphere(ds.domain_center, 0.45*ds.domain_width[0])
+        ds.field_info[ds.field_list[0]].take_log=False
 
-    line_source = LineSource(vertices, colors)
-    sc.add_source(line_source)
+        sc = Scene()
+        cam = Camera(ds)
+        cam.resolution = (512, 512)
+        sc.camera = cam
+        vr = VolumeSource(dd, field=ds.field_list[0])
+        vr.transfer_function.clear()
+        vr.transfer_function.grey_opacity=True
+        vr.transfer_function.map_to_colormap(0.0, 1.0, scale=3.0, colormap="Reds")
+        sc.add_source(vr)
 
-    im = sc.render()
-    im = ImageArray(im.d)
-    im.write_png("composite.png")
-    return im
+        cam.set_width( 1.8*ds.domain_width )
+        cam.lens.setup_box_properties(cam)
 
-if __name__ == "__main__":
-    im = test_composite_vr()
+        # DRAW SOME LINES
+        npoints = 100
+        vertices = np.random.random([npoints, 2, 3])
+        colors = np.random.random([npoints, 4])
+        colors[:, 3] = 0.10
+
+        box_source = BoxSource(ds.domain_left_edge, 
+                               ds.domain_right_edge, 
+                               color=[1.0, 1.0, 1.0, 1.0])
+        sc.add_source(box_source)
+
+        LE = ds.domain_left_edge + np.array([0.1,0.,0.3])*ds.domain_left_edge.uq
+        RE = ds.domain_right_edge-np.array([0.1,0.2,0.3])*ds.domain_left_edge.uq
+        color = np.array([0.0, 1.0, 0.0, 0.10])
+        box_source = BoxSource(LE, RE, color=color)
+        sc.add_source(box_source)
+
+        line_source = LineSource(vertices, colors)
+        sc.add_source(line_source)
+
+        im = sc.render()
+        im = ImageArray(im.d)
+        im.write_png("composite.png")
+        return im

diff -r 91632e8cbe0242c494c784b5d14d90d30dcd20ce -r 112628baf1dba54179d9995a2a3bf5cb5b58a920 yt/visualization/volume_rendering/tests/test_lenses.py
--- a/yt/visualization/volume_rendering/tests/test_lenses.py
+++ b/yt/visualization/volume_rendering/tests/test_lenses.py
@@ -9,102 +9,125 @@
 #
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
-import yt
+
+import os
+import tempfile
+import shutil
 from yt.testing import fake_random_ds
 from yt.visualization.volume_rendering.api import Scene, Camera, VolumeSource
-from time import time
 import numpy as np
+from unittest import TestCase
 
-field = ("gas", "density")
+# This toggles using a temporary directory. Turn off to examine images.
+use_tmpdir = True
 
-def test_perspective_lens():
-    #ds = fake_random_ds(32, fields = field)
-    ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
-    sc = Scene()
-    cam = Camera(ds, lens_type='perspective')
-    cam.position = ds.arr(np.array([1.0, 1.0, 1.0]), 'code_length')
-    vol = VolumeSource(ds, field=field)
-    tf = vol.transfer_function
-    tf.grey_opacity = True
-    sc.camera = cam
-    sc.add_source(vol)
-    sc.render('test_perspective_%s.png' % field[1], sigma_clip=6.0)
 
-def test_stereoperspective_lens():
-    #ds = fake_random_ds(32, fields = field)
-    ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
-    sc = Scene()
-    cam = Camera(ds, lens_type='stereo-perspective')
-    cam.resolution = [1024, 512]
-    cam.position = ds.arr(np.array([0.7, 0.7, 0.7]), 'code_length')
-    vol = VolumeSource(ds, field=field)
-    tf = vol.transfer_function
-    tf.grey_opacity = True
-    sc.camera = cam
-    sc.add_source(vol)
-    sc.render('test_stereoperspective_%s.png' % field[1], sigma_clip=6.0)
+def setup():
+    """Test specific setup."""
+    from yt.config import ytcfg
+    ytcfg["yt", "__withintesting"] = "True"
 
-def test_fisheye_lens():
-    ds = fake_random_ds(32, fields = field)
-    #ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
-    dd = ds.sphere(ds.domain_center, ds.domain_width[0] / 10)
-    sc = Scene()
-    cam = Camera(dd, lens_type='fisheye')
-    cam.lens.fov = 360.0
-    cam.set_width(ds.domain_width)
-    v, c = ds.find_max('density')
-    p = ds.domain_center.copy()
-    cam.set_position(c-0.0005*ds.domain_width)
-    vol = VolumeSource(dd, field=field)
-    tf = vol.transfer_function
-    tf.grey_opacity = True
-    sc.camera = cam
-    sc.add_source(vol)
-    sc.render('test_fisheye_%s.png' % field[1], sigma_clip=6.0)
 
-def test_plane_lens():
-    ds = fake_random_ds(32, fields = field)
-    #ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
-    dd = ds.sphere(ds.domain_center, ds.domain_width[0] / 10)
-    sc = Scene()
-    cam = Camera(dd, lens_type='plane-parallel')
-    cam.set_width(ds.domain_width*1e-2)
-    v, c = ds.find_max('density')
-    p = ds.domain_center.copy()
-    vol = VolumeSource(dd, field=field)
-    tf = vol.transfer_function
-    tf.grey_opacity = True
-    sc.camera = cam
-    sc.add_source(vol)
-    sc.render('test_plane_%s.png' % field[1], sigma_clip=6.0)
+class LensTest(TestCase):
+    def setUp(self):
+        if use_tmpdir:
+            self.curdir = os.getcwd()
+            # Perform I/O in safe place instead of yt main dir
+            self.tmpdir = tempfile.mkdtemp()
+            os.chdir(self.tmpdir)
+        else:
+            self.curdir, self.tmpdir = None, None
 
-def test_spherical_lens():
-    #ds = fake_random_ds(32, fields = field)
-    ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
-    sc = Scene()
-    cam = Camera(ds, lens_type='spherical')
-    cam.resolution = [512, 256]
-    cam.position = ds.arr(np.array([0.6, 0.5, 0.5]), 'code_length')
-    vol = VolumeSource(ds, field=field)
-    tf = vol.transfer_function
-    tf.grey_opacity = True
-    sc.camera = cam
-    sc.add_source(vol)
-    sc.render('test_spherical_%s.png' % field[1], sigma_clip=6.0)
+        self.field = ("gas", "density")
+        self.ds = fake_random_ds(32, fields=self.field)
 
-def test_stereospherical_lens():
-    #ds = fake_random_ds(32, fields = field)
-    ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
-    w = (ds.domain_width).in_units('code_length')
-    w = ds.arr(w, 'code_length')
-    sc = Scene()
-    cam = Camera(ds, lens_type='stereo-spherical')
-    cam.resolution = [1024, 256]
-    cam.position = ds.arr(np.array([0.6, 0.5, 0.5]), 'code_length')
-    vol = VolumeSource(ds, field=field)
-    tf = vol.transfer_function
-    tf.grey_opacity = True
-    sc.camera = cam
-    sc.add_source(vol)
-    sc.render('test_stereospherical_%s.png' % field[1], sigma_clip=6.0)
+    def tearDown(self):
+        if use_tmpdir:
+            os.chdir(self.curdir)
+            shutil.rmtree(self.tmpdir)
 
+    def test_perspective_lens(self):
+        sc = Scene()
+        cam = Camera(self.ds, lens_type='perspective')
+        cam.position = self.ds.arr(np.array([1.0, 1.0, 1.0]), 'code_length')
+        vol = VolumeSource(self.ds, field=self.field)
+        tf = vol.transfer_function
+        tf.grey_opacity = True
+        sc.camera = cam
+        sc.add_source(vol)
+        sc.render('test_perspective_%s.png' % self.field[1], sigma_clip=6.0)
+
+    def test_stereoperspective_lens(self):
+        sc = Scene()
+        cam = Camera(self.ds, lens_type='stereo-perspective')
+        cam.resolution = [1024, 512]
+        cam.position = self.ds.arr(np.array([0.7, 0.7, 0.7]), 'code_length')
+        vol = VolumeSource(self.ds, field=self.field)
+        tf = vol.transfer_function
+        tf.grey_opacity = True
+        sc.camera = cam
+        sc.add_source(vol)
+        sc.render('test_stereoperspective_%s.png' % self.field[1],
+                  sigma_clip=6.0)
+
+    def test_fisheye_lens(self):
+        dd = self.ds.sphere(self.ds.domain_center,
+                            self.ds.domain_width[0] / 10)
+        sc = Scene()
+        cam = Camera(dd, lens_type='fisheye')
+        cam.lens.fov = 360.0
+        cam.set_width(self.ds.domain_width)
+        v, c = self.ds.find_max('density')
+        p = self.ds.domain_center.copy()
+        cam.set_position(c-0.0005*self.ds.domain_width)
+        vol = VolumeSource(dd, field=self.field)
+        tf = vol.transfer_function
+        tf.grey_opacity = True
+        sc.camera = cam
+        sc.add_source(vol)
+        sc.render('test_fisheye_%s.png' % self.field[1],
+                  sigma_clip=6.0)
+
+    def test_plane_lens(self):
+        dd = self.ds.sphere(self.ds.domain_center,
+                            self.ds.domain_width[0] / 10)
+        sc = Scene()
+        cam = Camera(dd, lens_type='plane-parallel')
+        cam.set_width(self.ds.domain_width*1e-2)
+        v, c = self.ds.find_max('density')
+        p = self.ds.domain_center.copy()
+        vol = VolumeSource(dd, field=self.field)
+        tf = vol.transfer_function
+        tf.grey_opacity = True
+        sc.camera = cam
+        sc.add_source(vol)
+        sc.render('test_plane_%s.png' % self.field[1],
+                  sigma_clip=6.0)
+
+    def test_spherical_lens(self):
+        sc = Scene()
+        cam = Camera(self.ds, lens_type='spherical')
+        cam.resolution = [512, 256]
+        cam.position = self.ds.arr(np.array([0.6, 0.5, 0.5]), 'code_length')
+        vol = VolumeSource(self.ds, field=self.field)
+        tf = vol.transfer_function
+        tf.grey_opacity = True
+        sc.camera = cam
+        sc.add_source(vol)
+        sc.render('test_spherical_%s.png' % self.field[1],
+                  sigma_clip=6.0)
+
+    def test_stereospherical_lens(self):
+        w = (self.ds.domain_width).in_units('code_length')
+        w = self.ds.arr(w, 'code_length')
+        sc = Scene()
+        cam = Camera(self.ds, lens_type='stereo-spherical')
+        cam.resolution = [1024, 256]
+        cam.position = self.ds.arr(np.array([0.6, 0.5, 0.5]), 'code_length')
+        vol = VolumeSource(self.ds, field=self.field)
+        tf = vol.transfer_function
+        tf.grey_opacity = True
+        sc.camera = cam
+        sc.add_source(vol)
+        sc.render('test_stereospherical_%s.png' % self.field[1],
+                  sigma_clip=6.0)

diff -r 91632e8cbe0242c494c784b5d14d90d30dcd20ce -r 112628baf1dba54179d9995a2a3bf5cb5b58a920 yt/visualization/volume_rendering/tests/test_points.py
--- a/yt/visualization/volume_rendering/tests/test_points.py
+++ b/yt/visualization/volume_rendering/tests/test_points.py
@@ -10,44 +10,68 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import yt
+import os
+import tempfile
+import shutil
 from yt.testing import fake_random_ds
-from yt.visualization.volume_rendering.api import Scene, Camera, ZBuffer, \
-    VolumeSource, OpaqueSource, LineSource, BoxSource, PointSource
-from yt.utilities.lib.misc_utilities import lines
-from yt.data_objects.api import ImageArray
+from yt.visualization.volume_rendering.api import Scene, Camera, \
+    VolumeSource, PointSource
 import numpy as np
+from unittest import TestCase
+
 np.random.seed(0)
 
-def test_points_vr():
-    ds = fake_random_ds(64)
-    dd = ds.sphere(ds.domain_center, 0.45*ds.domain_width[0])
-    ds.field_info[ds.field_list[0]].take_log=False
+# This toggles using a temporary directory. Turn off to examine images.
+use_tmpdir = True
 
-    sc = Scene()
-    cam = Camera(ds)
-    cam.resolution = (512,512)
-    sc.camera = cam
-    vr = VolumeSource(dd, field=ds.field_list[0])
-    vr.transfer_function.clear()
-    vr.transfer_function.grey_opacity=False
-    vr.transfer_function.map_to_colormap(0.0, 1.0, scale=10., colormap="Reds")
-    sc.add_source(vr)
 
-    cam.set_width( 1.8*ds.domain_width )
-    cam.lens.setup_box_properties(cam)
+def setup():
+    """Test specific setup."""
+    from yt.config import ytcfg
+    ytcfg["yt", "__withintesting"] = "True"
 
-    # DRAW SOME POINTS
-    npoints = 1000
-    vertices = np.random.random([npoints, 3])
-    colors = np.random.random([npoints, 4])
-    colors[:,3] = 0.10
 
-    points_source = PointSource(vertices, colors=colors)
-    sc.add_source(points_source)
-    im = sc.render()
-    im.write_png("points.png")
-    return im
+class PointsVRTest(TestCase):
+    def setUp(self):
+        if use_tmpdir:
+            self.curdir = os.getcwd()
+            # Perform I/O in safe place instead of yt main dir
+            self.tmpdir = tempfile.mkdtemp()
+            os.chdir(self.tmpdir)
+        else:
+            self.curdir, self.tmpdir = None, None
 
-if __name__ == "__main__":
-    im = test_points_vr()
+    def tearDown(self):
+        if use_tmpdir:
+            os.chdir(self.curdir)
+            shutil.rmtree(self.tmpdir)
+
+    def test_points_vr(self):
+        ds = fake_random_ds(64)
+        dd = ds.sphere(ds.domain_center, 0.45*ds.domain_width[0])
+        ds.field_info[ds.field_list[0]].take_log=False
+
+        sc = Scene()
+        cam = Camera(ds)
+        cam.resolution = (512,512)
+        sc.camera = cam
+        vr = VolumeSource(dd, field=ds.field_list[0])
+        vr.transfer_function.clear()
+        vr.transfer_function.grey_opacity=False
+        vr.transfer_function.map_to_colormap(0.0, 1.0, scale=10., colormap="Reds")
+        sc.add_source(vr)
+
+        cam.set_width( 1.8*ds.domain_width )
+        cam.lens.setup_box_properties(cam)
+
+        # DRAW SOME POINTS
+        npoints = 1000
+        vertices = np.random.random([npoints, 3])
+        colors = np.random.random([npoints, 4])
+        colors[:,3] = 0.10
+
+        points_source = PointSource(vertices, colors=colors)
+        sc.add_source(points_source)
+        im = sc.render()
+        im.write_png("points.png")
+        return im

diff -r 91632e8cbe0242c494c784b5d14d90d30dcd20ce -r 112628baf1dba54179d9995a2a3bf5cb5b58a920 yt/visualization/volume_rendering/tests/test_scene.py
--- a/yt/visualization/volume_rendering/tests/test_scene.py
+++ b/yt/visualization/volume_rendering/tests/test_scene.py
@@ -12,45 +12,73 @@
 #
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
-import yt
+
+import os
+import tempfile
+import shutil
 from yt.testing import fake_random_ds
-from yt.visualization.volume_rendering.api import Scene, \
-    volume_render, Camera, VolumeSource
+from yt.visualization.volume_rendering.api import volume_render, VolumeSource
 import numpy as np
+from unittest import TestCase
 
-def test_rotation():
-    ds = fake_random_ds(64)
-    ds2 = fake_random_ds(64)
-    dd = ds.sphere(ds.domain_center, ds.domain_width[0] / 2)
-    dd2 = ds2.sphere(ds2.domain_center, ds2.domain_width[0] / 2)
-    
-    im, sc = volume_render(dd, field=('gas', 'density'))
-    im.write_png('test.png')
-    
-    vol = sc.get_source(0)
-    tf = vol.transfer_function
-    tf.clear()
-    mi, ma = dd.quantities.extrema('density')
-    mi = np.log10(mi)
-    ma = np.log10(ma)
-    mi_bound = ((ma-mi)*(0.10))+mi
-    ma_bound = ((ma-mi)*(0.90))+mi
-    tf.map_to_colormap(mi_bound, ma_bound, scale=0.01, colormap='Blues_r')
-    
-    vol2 = VolumeSource(dd2, field=('gas', 'density'))
-    sc.add_source(vol2)
-    
-    tf = vol2.transfer_function
-    tf.clear()
-    mi, ma = dd2.quantities.extrema('density')
-    mi = np.log10(mi)
-    ma = np.log10(ma)
-    mi_bound = ((ma-mi)*(0.10))+mi
-    ma_bound = ((ma-mi)*(0.90))+mi
-    tf.map_to_colormap(mi_bound, ma_bound,  scale=0.01, colormap='Reds_r')
-    sc.render('test_scene.png', sigma_clip=6.0)
-    
-    nrot = 2 
-    for i in range(nrot):
-        sc.camera.pitch(2*np.pi/nrot)
-        sc.render('test_rot_%04i.png' % i, sigma_clip=6.0)
+# This toggles using a temporary directory. Turn off to examine images.
+use_tmpdir = True
+
+
+def setup():
+    """Test specific setup."""
+    from yt.config import ytcfg
+    ytcfg["yt", "__withintesting"] = "True"
+
+
+class RotationTest(TestCase):
+    def setUp(self):
+        if use_tmpdir:
+            self.curdir = os.getcwd()
+            # Perform I/O in safe place instead of yt main dir
+            self.tmpdir = tempfile.mkdtemp()
+            os.chdir(self.tmpdir)
+        else:
+            self.curdir, self.tmpdir = None, None
+
+    def tearDown(self):
+        if use_tmpdir:
+            os.chdir(self.curdir)
+            shutil.rmtree(self.tmpdir)
+
+    def test_rotation(self):
+        ds = fake_random_ds(64)
+        ds2 = fake_random_ds(64)
+        dd = ds.sphere(ds.domain_center, ds.domain_width[0] / 2)
+        dd2 = ds2.sphere(ds2.domain_center, ds2.domain_width[0] / 2)
+
+        im, sc = volume_render(dd, field=('gas', 'density'))
+        im.write_png('test.png')
+
+        vol = sc.get_source(0)
+        tf = vol.transfer_function
+        tf.clear()
+        mi, ma = dd.quantities.extrema('density')
+        mi = np.log10(mi)
+        ma = np.log10(ma)
+        mi_bound = ((ma-mi)*(0.10))+mi
+        ma_bound = ((ma-mi)*(0.90))+mi
+        tf.map_to_colormap(mi_bound, ma_bound, scale=0.01, colormap='Blues_r')
+
+        vol2 = VolumeSource(dd2, field=('gas', 'density'))
+        sc.add_source(vol2)
+
+        tf = vol2.transfer_function
+        tf.clear()
+        mi, ma = dd2.quantities.extrema('density')
+        mi = np.log10(mi)
+        ma = np.log10(ma)
+        mi_bound = ((ma-mi)*(0.10))+mi
+        ma_bound = ((ma-mi)*(0.90))+mi
+        tf.map_to_colormap(mi_bound, ma_bound,  scale=0.01, colormap='Reds_r')
+        sc.render('test_scene.png', sigma_clip=6.0)
+
+        nrot = 2 
+        for i in range(nrot):
+            sc.camera.pitch(2*np.pi/nrot)
+            sc.render('test_rot_%04i.png' % i, sigma_clip=6.0)

diff -r 91632e8cbe0242c494c784b5d14d90d30dcd20ce -r 112628baf1dba54179d9995a2a3bf5cb5b58a920 yt/visualization/volume_rendering/tests/test_simple_vr.py
--- a/yt/visualization/volume_rendering/tests/test_simple_vr.py
+++ b/yt/visualization/volume_rendering/tests/test_simple_vr.py
@@ -10,14 +10,41 @@
 #
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
+
+import os
+import tempfile
+import shutil
 import yt
 from yt.testing import fake_random_ds
+from unittest import TestCase
 
-def test_simple_vr():
-    ds = fake_random_ds(32)
-    im, sc = yt.volume_render(ds, fname='test.png', sigma_clip=4.0)
-    print(sc)
-    return im, sc
+# This toggles using a temporary directory. Turn off to examine images.
+use_tmpdir = True
 
-if __name__ == "__main__":
-    im, sc = test_simple_vr()
+
+def setup():
+    """Test specific setup."""
+    from yt.config import ytcfg
+    ytcfg["yt", "__withintesting"] = "True"
+
+
+class SimpleVRTest(TestCase):
+    def setUp(self):
+        if use_tmpdir:
+            self.curdir = os.getcwd()
+            # Perform I/O in safe place instead of yt main dir
+            self.tmpdir = tempfile.mkdtemp()
+            os.chdir(self.tmpdir)
+        else:
+            self.curdir, self.tmpdir = None, None
+
+    def tearDown(self):
+        if use_tmpdir:
+            os.chdir(self.curdir)
+            shutil.rmtree(self.tmpdir)
+
+    def test_simple_vr(self):
+        ds = fake_random_ds(32)
+        im, sc = yt.volume_render(ds, fname='test.png', sigma_clip=4.0)
+        print(sc)
+        return im, sc

diff -r 91632e8cbe0242c494c784b5d14d90d30dcd20ce -r 112628baf1dba54179d9995a2a3bf5cb5b58a920 yt/visualization/volume_rendering/tests/test_vr_cameras.py
--- a/yt/visualization/volume_rendering/tests/test_vr_cameras.py
+++ b/yt/visualization/volume_rendering/tests/test_vr_cameras.py
@@ -23,7 +23,8 @@
 from yt.visualization.volume_rendering.old_camera import \
     PerspectiveCamera, StereoPairCamera, InteractiveCamera, ProjectionCamera, \
     FisheyeCamera
-from yt.visualization.volume_rendering.api import ColorTransferFunction, ProjectionTransferFunction
+from yt.visualization.volume_rendering.api import ColorTransferFunction, \
+    ProjectionTransferFunction
 from yt.visualization.tests.test_plotwindow import assert_fname
 from unittest import TestCase
 

diff -r 91632e8cbe0242c494c784b5d14d90d30dcd20ce -r 112628baf1dba54179d9995a2a3bf5cb5b58a920 yt/visualization/volume_rendering/tests/test_zbuff.py
--- a/yt/visualization/volume_rendering/tests/test_zbuff.py
+++ b/yt/visualization/volume_rendering/tests/test_zbuff.py
@@ -10,98 +10,121 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
+import os
+import tempfile
+import shutil
 from yt.testing import fake_random_ds
 from yt.visualization.volume_rendering.api import \
     Scene, Camera, ZBuffer, \
     VolumeSource, OpaqueSource
 from yt.testing import assert_almost_equal
 import numpy as np
+from unittest import TestCase
+
 np.random.seed(0)
 
+# This toggles using a temporary directory. Turn off to examine images.
+use_tmpdir = True
 
-def test_composite_vr():
-    ds = fake_random_ds(64)
-    dd = ds.sphere(ds.domain_center, 0.45*ds.domain_width[0])
-    ds.field_info[ds.field_list[0]].take_log=False
 
-    sc = Scene()
-    cam = Camera(ds)
-    cam.resolution = (512,512)
-    sc.camera = cam
-    vr = VolumeSource(dd, field=ds.field_list[0])
-    vr.transfer_function.clear()
-    vr.transfer_function.grey_opacity=True
-    vr.transfer_function.map_to_colormap(0.0, 1.0, scale=10.0, colormap="Reds")
-    sc.add_source(vr)
+def setup():
+    """Test specific setup."""
+    from yt.config import ytcfg
+    ytcfg["yt", "__withintesting"] = "True"
 
-    cam.set_width( 1.8*ds.domain_width )
-    cam.lens.setup_box_properties(cam)
 
-    # Create Arbitrary Z-buffer
-    empty = cam.lens.new_image(cam)
-    z = np.empty(empty.shape[:2], dtype='float64')
-    # Let's put a blue plane right through the center
-    z[:] = cam.width[2] / 2.
-    empty[:,:,2] = 1.0 # Set blue to 1's
-    empty[:,:,3] = 1.0 # Set alpha to 1's
-    zbuffer = ZBuffer(empty, z)
-    zsource = OpaqueSource()
-    zsource.set_zbuffer(zbuffer)
-    sc.add_source(zsource)
+class ZBufferTest(TestCase):
+    def setUp(self):
+        if use_tmpdir:
+            self.curdir = os.getcwd()
+            # Perform I/O in safe place instead of yt main dir
+            self.tmpdir = tempfile.mkdtemp()
+            os.chdir(self.tmpdir)
+        else:
+            self.curdir, self.tmpdir = None, None
 
-    im = sc.render()
-    im.write_png("composite.png")
-    return im
+    def tearDown(self):
+        if use_tmpdir:
+            os.chdir(self.curdir)
+            shutil.rmtree(self.tmpdir)
 
 
-def test_nonrectangular_add():
-    rgba1 = np.ones((64, 1, 4))
-    z1 = np.expand_dims(np.arange(64.), 1)
+    def test_composite_vr(self):
+        ds = fake_random_ds(64)
+        dd = ds.sphere(ds.domain_center, 0.45*ds.domain_width[0])
+        ds.field_info[ds.field_list[0]].take_log=False
 
-    rgba2 = np.zeros((64, 1, 4))
-    z2 = np.expand_dims(np.arange(63., -1., -1.), 1)
+        sc = Scene()
+        cam = Camera(ds)
+        cam.resolution = (512,512)
+        sc.camera = cam
+        vr = VolumeSource(dd, field=ds.field_list[0])
+        vr.transfer_function.clear()
+        vr.transfer_function.grey_opacity=True
+        vr.transfer_function.map_to_colormap(0.0, 1.0, scale=10.0, colormap="Reds")
+        sc.add_source(vr)
 
-    exact_rgba = np.concatenate((np.ones(32), np.zeros(32)))
-    exact_rgba = np.expand_dims(exact_rgba, 1)
-    exact_rgba = np.dstack((exact_rgba, exact_rgba, exact_rgba, exact_rgba))
+        cam.set_width( 1.8*ds.domain_width )
+        cam.lens.setup_box_properties(cam)
 
-    exact_z = np.concatenate((np.arange(32.), np.arange(31.,-1.,-1.)))
-    exact_z = np.expand_dims(exact_z, 1)
+        # Create Arbitrary Z-buffer
+        empty = cam.lens.new_image(cam)
+        z = np.empty(empty.shape[:2], dtype='float64')
+        # Let's put a blue plane right through the center
+        z[:] = cam.width[2] / 2.
+        empty[:,:,2] = 1.0 # Set blue to 1's
+        empty[:,:,3] = 1.0 # Set alpha to 1's
+        zbuffer = ZBuffer(empty, z)
+        zsource = OpaqueSource()
+        zsource.set_zbuffer(zbuffer)
+        sc.add_source(zsource)
 
-    buff1 = ZBuffer(rgba1, z1)
-    buff2 = ZBuffer(rgba2, z2)
+        im = sc.render()
+        im.write_png("composite.png")
+        return im
 
-    buff = buff1 + buff2
+    def test_nonrectangular_add(self):
+        rgba1 = np.ones((64, 1, 4))
+        z1 = np.expand_dims(np.arange(64.), 1)
 
-    assert_almost_equal(buff.rgba, exact_rgba)
-    assert_almost_equal(buff.z, exact_z)
+        rgba2 = np.zeros((64, 1, 4))
+        z2 = np.expand_dims(np.arange(63., -1., -1.), 1)
 
+        exact_rgba = np.concatenate((np.ones(32), np.zeros(32)))
+        exact_rgba = np.expand_dims(exact_rgba, 1)
+        exact_rgba = np.dstack((exact_rgba, exact_rgba, exact_rgba, exact_rgba))
+        
+        exact_z = np.concatenate((np.arange(32.), np.arange(31.,-1.,-1.)))
+        exact_z = np.expand_dims(exact_z, 1)
+        
+        buff1 = ZBuffer(rgba1, z1)
+        buff2 = ZBuffer(rgba2, z2)
+        
+        buff = buff1 + buff2
+        
+        assert_almost_equal(buff.rgba, exact_rgba)
+        assert_almost_equal(buff.z, exact_z)
 
-def test_rectangular_add():
-    rgba1 = np.ones((8, 8, 4))
-    z1 = np.arange(64.)
-    z1 = z1.reshape((8, 8))
-    buff1 = ZBuffer(rgba1, z1)
+    def test_rectangular_add(self):
+        rgba1 = np.ones((8, 8, 4))
+        z1 = np.arange(64.)
+        z1 = z1.reshape((8, 8))
+        buff1 = ZBuffer(rgba1, z1)
 
-    rgba2 = np.zeros((8, 8, 4))
-    z2 = np.arange(63., -1., -1.)
-    z2 = z2.reshape((8, 8))
-    buff2 = ZBuffer(rgba2, z2)
+        rgba2 = np.zeros((8, 8, 4))
+        z2 = np.arange(63., -1., -1.)
+        z2 = z2.reshape((8, 8))
+        buff2 = ZBuffer(rgba2, z2)
 
-    buff = buff1 + buff2
+        buff = buff1 + buff2
 
-    exact_rgba = np.empty((8, 8, 4), dtype=np.float64)
-    exact_rgba[0:4,0:8,:] = 1.0
-    exact_rgba[4:8,0:8,:] = 0.0
+        exact_rgba = np.empty((8, 8, 4), dtype=np.float64)
+        exact_rgba[0:4,0:8,:] = 1.0
+        exact_rgba[4:8,0:8,:] = 0.0
+        
+        exact_z = np.concatenate((np.arange(32.), np.arange(31., -1., -1.)))
+        exact_z = np.expand_dims(exact_z, 1)
+        exact_z = exact_z.reshape(8, 8)
 
-    exact_z = np.concatenate((np.arange(32.), np.arange(31., -1., -1.)))
-    exact_z = np.expand_dims(exact_z, 1)
-    exact_z = exact_z.reshape(8, 8)
-
-    assert_almost_equal(buff.rgba, exact_rgba)
-    assert_almost_equal(buff.z, exact_z)
-
-if __name__ == "__main__":
-    im = test_composite_vr()
-    test_nonrectangular_add()
-    test_rectangular_add()
+        assert_almost_equal(buff.rgba, exact_rgba)
+        assert_almost_equal(buff.z, exact_z)

diff -r 91632e8cbe0242c494c784b5d14d90d30dcd20ce -r 112628baf1dba54179d9995a2a3bf5cb5b58a920 yt/visualization/volume_rendering/transfer_function_helper.py
--- a/yt/visualization/volume_rendering/transfer_function_helper.py
+++ b/yt/visualization/volume_rendering/transfer_function_helper.py
@@ -139,6 +139,8 @@
         else:
             mi, ma = self.bounds
         self.tf.add_layers(10, colormap='spectral')
+        factor = self.tf.funcs[-1].y.size / self.tf.funcs[-1].y.sum()
+        self.tf.funcs[-1].y *= 2*factor
 
     def plot(self, fn=None, profile_field=None, profile_weight=None):
         """

diff -r 91632e8cbe0242c494c784b5d14d90d30dcd20ce -r 112628baf1dba54179d9995a2a3bf5cb5b58a920 yt/visualization/volume_rendering/volume_rendering.py
--- a/yt/visualization/volume_rendering/volume_rendering.py
+++ b/yt/visualization/volume_rendering/volume_rendering.py
@@ -17,9 +17,65 @@
 from .render_source import VolumeSource
 from .utils import data_source_or_all
 from yt.funcs import mylog
+from yt.utilities.exceptions import YTSceneFieldNotFound
 
 
-def volume_render(data_source, field=None, fname=None, sigma_clip=None,
+def create_scene(data_source, field=None, lens_type='plane-parallel'):
+    r""" Set up a scene object with sensible defaults for use in volume 
+    rendering.
+
+    A helper function that creates a default camera view, transfer
+    function, and image size. Using these, it returns an instance 
+    of the Scene class, allowing one to further modify their rendering.
+
+    This function is the same as volume_render() except it doesn't render
+    the image.
+
+    Parameters
+    ----------
+    data_source : :class:`yt.data_objects.data_containers.AMR3DData`
+        This is the source to be rendered, which can be any arbitrary yt
+        3D object
+    field: string, tuple, optional
+        The field to be rendered. If unspecified, this will use the 
+        default_field for your dataset's frontend--usually ('gas', 'density').
+        A default transfer function will be built that spans the range of 
+        values for that given field, and the field will be logarithmically 
+        scaled if the field_info object specifies as such.
+    lens_type: string, optional
+        This specifies the type of lens to use for rendering. Current
+        options are 'plane-parallel', 'perspective', and 'fisheye'. See
+        :class:`yt.visualization.volume_rendering.lens.Lens` for details.
+        Default: 'plane-parallel'
+
+    Returns
+    -------
+    sc: Scene
+        A :class:`yt.visualization.volume_rendering.scene.Scene` object
+        that was constructed during the rendering. Useful for further
+        modifications, rotations, etc.
+
+    Example:
+    >>> import yt
+    >>> ds = yt.load("Enzo_64/DD0046/DD0046")
+    >>> sc = yt.create_scene(ds)
+    """
+    data_source = data_source_or_all(data_source)
+    sc = Scene()
+    if field is None:
+        field = data_source.ds.default_field
+        if field not in data_source.ds.derived_field_list:
+            raise YTSceneFieldNotFound("""Could not find field '%s' in %s. 
+                  Please specify a field in create_scene()""" % \
+                  (field, data_source.ds))
+        mylog.info('Setting default field to %s' % field.__repr__())
+
+    vol = VolumeSource(data_source, field=field)
+    sc.add_source(vol)
+    sc.camera = Camera(data_source=data_source, lens_type=lens_type)
+    return sc
+
+def volume_render(data_source, field=None, fname=None, sigma_clip=None
                   lens_type='plane-parallel'):
     r""" Create a simple volume rendering of a data source.
 
@@ -34,18 +90,19 @@
         This is the source to be rendered, which can be any arbitrary yt
         3D object
     field: string, tuple, optional
-        The field to be rendered. By default, this will use the first
-        field in data_source.ds.field_list.  A default transfer function
-        will be built that spans the range of values for that given field,
-        and the field will be logarithmically scaled if the field_info
-        object specifies as such.
+        The field to be rendered. If unspecified, this will use the 
+        default_field for your dataset's frontend--usually ('gas', 'density').
+        A default transfer function will be built that spans the range of 
+        values for that given field, and the field will be logarithmically 
+        scaled if the field_info object specifies as such.
     fname: string, optional
         If specified, the resulting rendering will be saved to this filename
         in png format.
-    sigma_clip: float
-        The resulting image will be clipped before saving, using a threshold
-        based on `sigma_clip` multiplied by the standard deviation of the pixel
-        values. Recommended values are between 2 and 6. Default: None
+    sigma_clip: float, optional
+        If specified, the resulting image will be clipped before saving,
+        using a threshold based on sigma_clip multiplied by the standard
+        deviation of the pixel values. Recommended values are between 2 and 6.
+        Default: None
     lens_type: string, optional
         This specifies the type of lens to use for rendering. Current
         options are 'plane-parallel', 'perspective', and 'fisheye'. See
@@ -64,29 +121,8 @@
     Example:
     >>> import yt
     >>> ds = yt.load("Enzo_64/DD0046/DD0046")
-    >>> im, sc = yt.volume_render(ds, fname='test.png')
+    >>> im, sc = yt.volume_render(ds, fname='test.png', sigma_clip=4.0)
     """
-    data_source = data_source_or_all(data_source)
-    sc = Scene()
-    if field is None:
-        data_source.ds.index
-        for ftype, f in sorted(data_source.ds.field_list):
-            if ftype == "all":
-                continue
-            if f == 'Density':
-                field = (ftype, f)
-            elif f == 'density':
-                field = (ftype, f)
-            elif ftype != 'index' and 'particle' not in f:
-                field = (ftype, f)
-                break
-        else:
-            raise RuntimeError("Could not find default field." +
-                               " Please set explicitly in volume_render call")
-        mylog.info('Setting default field to %s' % field.__repr__())
-
-    vol = VolumeSource(data_source, field=field)
-    sc.add_source(vol)
-    sc.camera = Camera(data_source=data_source, lens_type=lens_type)
+    sc = create_scene(data_source, field=field, lens_type=lens_type)
     im = sc.render(fname=fname, sigma_clip=sigma_clip)
     return im, sc


https://bitbucket.org/yt_analysis/yt/commits/8b4e7658b5ec/
Changeset:   8b4e7658b5ec
Branch:      yt
User:        xarthisius
Date:        2015-10-15 18:25:58+00:00
Summary:     pep8 fixes, syntax error fix
Affected #:  1 file

diff -r 112628baf1dba54179d9995a2a3bf5cb5b58a920 -r 8b4e7658b5ec0c0179429542a6147715c39768fc yt/visualization/volume_rendering/volume_rendering.py
--- a/yt/visualization/volume_rendering/volume_rendering.py
+++ b/yt/visualization/volume_rendering/volume_rendering.py
@@ -21,11 +21,11 @@
 
 
 def create_scene(data_source, field=None, lens_type='plane-parallel'):
-    r""" Set up a scene object with sensible defaults for use in volume 
+    r""" Set up a scene object with sensible defaults for use in volume
     rendering.
 
     A helper function that creates a default camera view, transfer
-    function, and image size. Using these, it returns an instance 
+    function, and image size. Using these, it returns an instance
     of the Scene class, allowing one to further modify their rendering.
 
     This function is the same as volume_render() except it doesn't render
@@ -37,10 +37,10 @@
         This is the source to be rendered, which can be any arbitrary yt
         3D object
     field: string, tuple, optional
-        The field to be rendered. If unspecified, this will use the 
+        The field to be rendered. If unspecified, this will use the
         default_field for your dataset's frontend--usually ('gas', 'density').
-        A default transfer function will be built that spans the range of 
-        values for that given field, and the field will be logarithmically 
+        A default transfer function will be built that spans the range of
+        values for that given field, and the field will be logarithmically
         scaled if the field_info object specifies as such.
     lens_type: string, optional
         This specifies the type of lens to use for rendering. Current
@@ -65,9 +65,8 @@
     if field is None:
         field = data_source.ds.default_field
         if field not in data_source.ds.derived_field_list:
-            raise YTSceneFieldNotFound("""Could not find field '%s' in %s. 
-                  Please specify a field in create_scene()""" % \
-                  (field, data_source.ds))
+            raise YTSceneFieldNotFound("""Could not find field '%s' in %s.
+                  Please specify a field in create_scene()""" % (field, data_source.ds))
         mylog.info('Setting default field to %s' % field.__repr__())
 
     vol = VolumeSource(data_source, field=field)
@@ -75,7 +74,8 @@
     sc.camera = Camera(data_source=data_source, lens_type=lens_type)
     return sc
 
-def volume_render(data_source, field=None, fname=None, sigma_clip=None
+
+def volume_render(data_source, field=None, fname=None, sigma_clip=None,
                   lens_type='plane-parallel'):
     r""" Create a simple volume rendering of a data source.
 
@@ -90,10 +90,10 @@
         This is the source to be rendered, which can be any arbitrary yt
         3D object
     field: string, tuple, optional
-        The field to be rendered. If unspecified, this will use the 
+        The field to be rendered. If unspecified, this will use the
         default_field for your dataset's frontend--usually ('gas', 'density').
-        A default transfer function will be built that spans the range of 
-        values for that given field, and the field will be logarithmically 
+        A default transfer function will be built that spans the range of
+        values for that given field, and the field will be logarithmically
         scaled if the field_info object specifies as such.
     fname: string, optional
         If specified, the resulting rendering will be saved to this filename


https://bitbucket.org/yt_analysis/yt/commits/f9bb10e58798/
Changeset:   f9bb10e58798
Branch:      yt
User:        MatthewTurk
Date:        2015-10-16 00:38:52+00:00
Summary:     Merging with Kacper
Affected #:  25 files

diff -r 5214d592ca7ca214b3a0b6204bec2bf31aa14549 -r f9bb10e5879858cc676cf21650ffeaedd8d6ccf4 doc/source/cookbook/camera_movement.py
--- a/doc/source/cookbook/camera_movement.py
+++ b/doc/source/cookbook/camera_movement.py
@@ -3,7 +3,7 @@
 
 # Follow the simple_volume_rendering cookbook for the first part of this.
 ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")  # load data
-im, sc = yt.volume_render(ds)
+sc = yt.create_scene(ds)
 cam = sc.camera
 cam.resolution = (512, 512)
 cam.set_width(ds.domain_width/20.0)

diff -r 5214d592ca7ca214b3a0b6204bec2bf31aa14549 -r f9bb10e5879858cc676cf21650ffeaedd8d6ccf4 doc/source/cookbook/complex_plots.rst
--- a/doc/source/cookbook/complex_plots.rst
+++ b/doc/source/cookbook/complex_plots.rst
@@ -196,10 +196,28 @@
 
 In this recipe, we move a camera through a domain and take multiple volume
 rendering snapshots.
-See :ref:`volume_rendering` for more information.
+See :ref:`camera_movement` for more information.
 
 .. yt_cookbook:: camera_movement.py
 
+Volume Rendering with Custom Camera
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+In this recipe we modify the :ref:`cookbook-simple_volume_rendering` recipe to
+use customized camera properties. See :ref:`volume_rendering` for more
+information.
+
+.. yt_cookbook:: custom_camera_volume_rendering.py
+
+Volume Rendering with a Custom Transfer Function
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+In this recipe we modify the :ref:`cookbook-simple_volume_rendering` recipe to
+use customized camera properties. See :ref:`volume_rendering` for more
+information.
+
+.. yt_cookbook:: custom_transfer_function_volume_rendering.py
+
 Zooming into an Image
 ~~~~~~~~~~~~~~~~~~~~~
 
@@ -229,7 +247,7 @@
 This recipe demonstrates how to make semi-opaque volume renderings, but also
 how to step through and try different things to identify the type of volume
 rendering you want.
-See :ref:`volume_rendering` for more information.
+See :ref:`opaque_rendering` for more information.
 
 .. yt_cookbook:: opaque_rendering.py
 
@@ -244,18 +262,20 @@
 
 .. yt_cookbook:: amrkdtree_downsampling.py
 
+.. _cookbook-volume_rendering_annotations:
+
 Volume Rendering with Bounding Box and Overlaid Grids
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
 This recipe demonstrates how to overplot a bounding box on a volume rendering
 as well as overplotting grids representing the level of refinement achieved
 in different regions of the code.
-See :ref:`volume_rendering` for more information.
+See :ref:`volume_rendering_annotations` for more information.
 
 .. yt_cookbook:: rendering_with_box_and_grids.py
 
 Volume Rendering with Annotation
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
 This recipe demonstrates how to write the simulation time, show an
 axis triad indicating the direction of the coordinate system, and show

diff -r 5214d592ca7ca214b3a0b6204bec2bf31aa14549 -r f9bb10e5879858cc676cf21650ffeaedd8d6ccf4 doc/source/cookbook/custom_camera_volume_rendering.py
--- /dev/null
+++ b/doc/source/cookbook/custom_camera_volume_rendering.py
@@ -0,0 +1,21 @@
+import yt
+
+# Load the dataset
+ds = yt.load("Enzo_64/DD0043/data0043")
+
+# Create a volume rendering
+sc = yt.create_scene(ds, field=('gas', 'density'))
+
+# Now increase the resolution
+sc.camera.resolution = (1024, 1024)
+
+# Set the camera focus to a position that is offset from the center of
+# the domain
+sc.camera.focus = ds.arr([0.3, 0.3, 0.3], 'unitary')
+
+# Move the camera position to the other side of the dataset
+sc.camera.position = ds.arr([0, 0, 0], 'unitary')
+
+# save to disk with a custom filename and apply sigma clipping to eliminate
+# very bright pixels, producing an image with better contrast.
+sc.render(fname='custom.png', sigma_clip=4)

diff -r 5214d592ca7ca214b3a0b6204bec2bf31aa14549 -r f9bb10e5879858cc676cf21650ffeaedd8d6ccf4 doc/source/cookbook/custom_transfer_function_volume_rendering.py
--- /dev/null
+++ b/doc/source/cookbook/custom_transfer_function_volume_rendering.py
@@ -0,0 +1,24 @@
+import yt
+import numpy as np
+
+# Load the dataset
+ds = yt.load("Enzo_64/DD0043/data0043")
+
+# Create a volume rendering
+sc = yt.create_scene(ds, field=('gas', 'density'))
+
+# Modify the transfer function
+
+# First get the render source, in this case the entire domain, with field ('gas','density')
+render_source = sc.get_source(0)
+
+# Clear the transfer function
+render_source.transfer_function.clear()
+
+# Map a range of density values (in log space) to the Reds_r colormap
+render_source.transfer_function.map_to_colormap(
+    np.log10(ds.quan(5.0e-31, 'g/cm**3')),
+    np.log10(ds.quan(1.0e-29, 'g/cm**3')),
+    scale=30.0, colormap='RdBu_r')
+
+im = sc.render(fname='new_tf.png', sigma_clip=None)

diff -r 5214d592ca7ca214b3a0b6204bec2bf31aa14549 -r f9bb10e5879858cc676cf21650ffeaedd8d6ccf4 doc/source/cookbook/rendering_with_box_and_grids.py
--- a/doc/source/cookbook/rendering_with_box_and_grids.py
+++ b/doc/source/cookbook/rendering_with_box_and_grids.py
@@ -4,7 +4,7 @@
 
 # Load the dataset.
 ds = yt.load("Enzo_64/DD0043/data0043")
-im, sc = yt.volume_render(ds, ('gas','density'))
+sc = yt.create_scene(ds, ('gas','density'))
 sc.get_source(0).transfer_function.grey_opacity=True
 
 sc.annotate_domain(ds)

diff -r 5214d592ca7ca214b3a0b6204bec2bf31aa14549 -r f9bb10e5879858cc676cf21650ffeaedd8d6ccf4 doc/source/cookbook/simple_volume_rendering.py
--- a/doc/source/cookbook/simple_volume_rendering.py
+++ b/doc/source/cookbook/simple_volume_rendering.py
@@ -1,28 +1,10 @@
 import yt
-import numpy as np
 
 # Load the dataset.
 ds = yt.load("Enzo_64/DD0043/data0043")
 
 # Create a volume rendering, which will determine data bounds, use the first
 # acceptable field in the field_list, and set up a default transfer function.
-#im, sc = yt.volume_render(ds, fname="%s_volume_rendered.png" % ds, sigma_clip=8.0)
 
-# You can easily specify a different field
-im, sc = yt.volume_render(ds, field=('gas','density'), fname="%s_density_volume_rendered.png" % ds, sigma_clip=8.0)
-
-# Now increase the resolution
-sc.camera.resolution = (512, 512)
-im = sc.render(fname='big.png', sigma_clip=8.0)
-
-# Now modify the transfer function
-# First get the render source, in this case the entire domain, with field ('gas','density')
-render_source = sc.get_source(0)
-# Clear the transfer function
-render_source.transfer_function.clear()
-# Map a range of density values (in log space) to the Reds_r colormap
-render_source.transfer_function.map_to_colormap(
-        np.log10(ds.quan(5.0e-31, 'g/cm**3')),
-        np.log10(ds.quan(1.0e-29, 'g/cm**3')),
-        scale=30.0, colormap='RdBu_r')
-im = sc.render(fname='new_tf.png', sigma_clip=None)
+# This will save a file named 'data0043_density_volume_rendered.png' to disk.
+im, sc = yt.volume_render(ds, field=('gas', 'density'))

diff -r 5214d592ca7ca214b3a0b6204bec2bf31aa14549 -r f9bb10e5879858cc676cf21650ffeaedd8d6ccf4 doc/source/reference/api/api.rst
--- a/doc/source/reference/api/api.rst
+++ b/doc/source/reference/api/api.rst
@@ -592,6 +592,7 @@
    :toctree: generated/
 
    ~yt.visualization.volume_rendering.volume_rendering.volume_render
+   ~yt.visualization.volume_rendering.volume_rendering.create_scene
    ~yt.visualization.volume_rendering.off_axis_projection.off_axis_projection
    ~yt.visualization.volume_rendering.scene.Scene
    ~yt.visualization.volume_rendering.camera.Camera

diff -r 5214d592ca7ca214b3a0b6204bec2bf31aa14549 -r f9bb10e5879858cc676cf21650ffeaedd8d6ccf4 doc/source/visualizing/volume_rendering.rst
--- a/doc/source/visualizing/volume_rendering.rst
+++ b/doc/source/visualizing/volume_rendering.rst
@@ -60,6 +60,7 @@
 Here is a working example for rendering the IsolatedGalaxy dataset.
 
 .. python-script::
+
   import yt
   # load the data
   ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
@@ -70,10 +71,10 @@
   # sc is an instance of a Scene object, which allows you to further refine
   # your renderings.
 
-When the volume_render function is called, first an empty
-:class:`~yt.visualization.volume_rendering.scene.Scene` object is
-created. Next, a 
-:class:`~yt.visualization.volume_rendering.api.VolumeSource`
+When the :func:`~yt.visualization.volume_rendering.volume_render` function 
+is called, first an empty 
+:class:`~yt.visualization.volume_rendering.scene.Scene` object is created. 
+Next, a :class:`~yt.visualization.volume_rendering.api.VolumeSource`
 object is created, which decomposes the volume elements
 into a tree structure to provide back-to-front rendering of fixed-resolution
 blocks of data.  (If the volume elements are grids, this uses a
@@ -106,6 +107,21 @@
 In this example, we don't add on any non-volume rendering sources; however, if
 such sources are added, they will be integrated as well.
 
+Alternatively, if you don't want to immediately generate an image of your
+volume rendering, and you just want access to the default scene object, 
+you can skip this expensive operation by just running the
+:func:`~yt.visualization.volume_rendering.create_scene` function in lieu of the
+:func:`~yt.visualization.volume_rendering.volume_render` function. Example:
+
+.. python-script::
+
+  import yt
+  # load the data
+  ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
+  # volume render the 'density' field 
+  sc = yt.create_scene(ds, 'density')
+
+
 Modifying the Scene
 -------------------
 
@@ -233,6 +249,8 @@
 vertices.  For instance, lines can be used to draw outlines of regions or
 continents.
 
+.. _volume_rendering_annotations:
+
 Annotations
 +++++++++++
 
@@ -244,6 +262,9 @@
 annotations will operate in data space and can draw boxes, grid information,
 and also provide a vector orientation within the image.
 
+For example scripts using these features, 
+see :ref:`cookbook-volume_rendering_annotations`.
+
 Care and Usage of the Camera
 ----------------------------
 
@@ -464,6 +485,8 @@
 
 For more information about enabling parallelism, see :ref:`parallel-computation`.
 
+.. _opaque_rendering:
+
 Opacity
 -------
 

diff -r 5214d592ca7ca214b3a0b6204bec2bf31aa14549 -r f9bb10e5879858cc676cf21650ffeaedd8d6ccf4 yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -154,7 +154,7 @@
     ParticleProjectionPlot, ParticleImageBuffer, ParticlePlot
 
 from yt.visualization.volume_rendering.api import \
-    volume_render, ColorTransferFunction, TransferFunction, \
+    volume_render, create_scene, ColorTransferFunction, TransferFunction, \
     off_axis_projection
 import yt.visualization.volume_rendering.api as volume_rendering
 #    TransferFunctionHelper, MultiVariateTransferFunction

diff -r 5214d592ca7ca214b3a0b6204bec2bf31aa14549 -r f9bb10e5879858cc676cf21650ffeaedd8d6ccf4 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -112,6 +112,7 @@
 class Dataset(object):
 
     default_fluid_type = "gas"
+    default_field = ("gas", "density")
     fluid_types = ("gas", "deposit", "index")
     particle_types = ("io",) # By default we have an 'all'
     particle_types_raw = ("io",)

diff -r 5214d592ca7ca214b3a0b6204bec2bf31aa14549 -r f9bb10e5879858cc676cf21650ffeaedd8d6ccf4 yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -67,6 +67,9 @@
     def __str__(self):
         return "Could not find field '%s' in %s." % (self.fname, self.ds)
 
+class YTSceneFieldNotFound(YTException):
+    pass
+
 class YTCouldNotGenerateField(YTFieldNotFound):
     def __str__(self):
         return "Could field '%s' in %s could not be generated." % (self.fname, self.ds)

diff -r 5214d592ca7ca214b3a0b6204bec2bf31aa14549 -r f9bb10e5879858cc676cf21650ffeaedd8d6ccf4 yt/utilities/orientation.py
--- a/yt/utilities/orientation.py
+++ b/yt/utilities/orientation.py
@@ -41,12 +41,6 @@
 
         """
 
-        # Make sure vectors are unitless
-        if north_vector is not None:
-            north_vector = YTArray(north_vector, "", dtype='float64')
-        if normal_vector is not None:
-            normal_vector = YTArray(normal_vector, "", dtype='float64')
-
         self.steady_north = steady_north
         if not np.dot(normal_vector, normal_vector) > 0:
             mylog.error("Normal vector is null")
@@ -63,6 +57,13 @@
     def _setup_normalized_vectors(self, normal_vector, north_vector):
         mylog.debug('Setting normalized vectors' + str(normal_vector)
                     + str(north_vector))
+
+        # Make sure vectors are unitless
+        if north_vector is not None:
+            north_vector = YTArray(north_vector, "", dtype='float64')
+        if normal_vector is not None:
+            normal_vector = YTArray(normal_vector, "", dtype='float64')
+
         # Now we set up our various vectors
         normal_vector /= np.sqrt(np.dot(normal_vector, normal_vector))
         if north_vector is None:

diff -r 5214d592ca7ca214b3a0b6204bec2bf31aa14549 -r f9bb10e5879858cc676cf21650ffeaedd8d6ccf4 yt/visualization/volume_rendering/api.py
--- a/yt/visualization/volume_rendering/api.py
+++ b/yt/visualization/volume_rendering/api.py
@@ -27,7 +27,7 @@
 #    SphericalCamera, StereoSphericalCamera
 from .camera import Camera
 from .transfer_function_helper import TransferFunctionHelper
-from .volume_rendering import volume_render
+from .volume_rendering import volume_render, create_scene
 from .off_axis_projection import off_axis_projection
 from .scene import Scene
 from .render_source import VolumeSource, OpaqueSource, LineSource, \

diff -r 5214d592ca7ca214b3a0b6204bec2bf31aa14549 -r f9bb10e5879858cc676cf21650ffeaedd8d6ccf4 yt/visualization/volume_rendering/tests/simple_scene_creation.py
--- /dev/null
+++ b/yt/visualization/volume_rendering/tests/simple_scene_creation.py
@@ -0,0 +1,17 @@
+"""
+Create a simple scene object
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2014, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+import yt
+from yt.testing import \
+    fake_random_ds
+
+ds = fake_random_ds(32)
+sc = yt.create_scene(ds)

diff -r 5214d592ca7ca214b3a0b6204bec2bf31aa14549 -r f9bb10e5879858cc676cf21650ffeaedd8d6ccf4 yt/visualization/volume_rendering/tests/test_composite.py
--- a/yt/visualization/volume_rendering/tests/test_composite.py
+++ b/yt/visualization/volume_rendering/tests/test_composite.py
@@ -10,55 +10,82 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import yt
+import os
+import tempfile
+import shutil
 from yt.testing import fake_random_ds
-from yt.visualization.volume_rendering.api import Scene, Camera, ZBuffer, \
-    VolumeSource, OpaqueSource, LineSource, BoxSource
-from yt.utilities.lib.misc_utilities import lines
+from yt.visualization.volume_rendering.api import Scene, Camera, \
+    VolumeSource, LineSource, BoxSource
 from yt.data_objects.api import ImageArray
 import numpy as np
+from unittest import TestCase
+
 np.random.seed(0)
 
+# This toggles using a temporary directory. Turn off to examine images.
+use_tmpdir = True
 
-def test_composite_vr():
-    ds = fake_random_ds(64)
-    dd = ds.sphere(ds.domain_center, 0.45*ds.domain_width[0])
-    ds.field_info[ds.field_list[0]].take_log=False
 
-    sc = Scene()
-    cam = Camera(ds)
-    cam.resolution = (512,512)
-    sc.camera = cam
-    vr = VolumeSource(dd, field=ds.field_list[0])
-    vr.transfer_function.clear()
-    vr.transfer_function.grey_opacity=True
-    vr.transfer_function.map_to_colormap(0.0, 1.0, scale=3.0, colormap="Reds")
-    sc.add_source(vr)
+def setup():
+    """Test specific setup."""
+    from yt.config import ytcfg
+    ytcfg["yt", "__withintesting"] = "True"
 
-    cam.set_width( 1.8*ds.domain_width )
-    cam.lens.setup_box_properties(cam)
 
-    # DRAW SOME LINES
-    npoints = 100
-    vertices = np.random.random([npoints, 2, 3])
-    colors = np.random.random([npoints, 4])
-    colors[:, 3] = 0.10
+class CompositeVRTest(TestCase):
+    def setUp(self):
+        if use_tmpdir:
+            self.curdir = os.getcwd()
+            # Perform I/O in safe place instead of yt main dir
+            self.tmpdir = tempfile.mkdtemp()
+            os.chdir(self.tmpdir)
+        else:
+            self.curdir, self.tmpdir = None, None
 
-    box_source = BoxSource(ds.domain_left_edge, ds.domain_right_edge, color=[1.,1.,1.,1.0])
-    sc.add_source(box_source)
+    def tearDown(self):
+        if use_tmpdir:
+            os.chdir(self.curdir)
+            shutil.rmtree(self.tmpdir)
 
-    box_source = BoxSource(ds.domain_left_edge + np.array([0.1,0.,0.3])*ds.domain_left_edge.uq,
-            ds.domain_right_edge-np.array([0.1,0.2,0.3])*ds.domain_left_edge.uq,
-            color=np.array([0.0, 1.0, 0.0, 0.10]))
-    sc.add_source(box_source)
+    def test_composite_vr(self):
+        ds = fake_random_ds(64)
+        dd = ds.sphere(ds.domain_center, 0.45*ds.domain_width[0])
+        ds.field_info[ds.field_list[0]].take_log=False
 
-    line_source = LineSource(vertices, colors)
-    sc.add_source(line_source)
+        sc = Scene()
+        cam = Camera(ds)
+        cam.resolution = (512, 512)
+        sc.camera = cam
+        vr = VolumeSource(dd, field=ds.field_list[0])
+        vr.transfer_function.clear()
+        vr.transfer_function.grey_opacity=True
+        vr.transfer_function.map_to_colormap(0.0, 1.0, scale=3.0, colormap="Reds")
+        sc.add_source(vr)
 
-    im = sc.render()
-    im = ImageArray(im.d)
-    im.write_png("composite.png")
-    return im
+        cam.set_width( 1.8*ds.domain_width )
+        cam.lens.setup_box_properties(cam)
 
-if __name__ == "__main__":
-    im = test_composite_vr()
+        # DRAW SOME LINES
+        npoints = 100
+        vertices = np.random.random([npoints, 2, 3])
+        colors = np.random.random([npoints, 4])
+        colors[:, 3] = 0.10
+
+        box_source = BoxSource(ds.domain_left_edge, 
+                               ds.domain_right_edge, 
+                               color=[1.0, 1.0, 1.0, 1.0])
+        sc.add_source(box_source)
+
+        LE = ds.domain_left_edge + np.array([0.1,0.,0.3])*ds.domain_left_edge.uq
+        RE = ds.domain_right_edge-np.array([0.1,0.2,0.3])*ds.domain_left_edge.uq
+        color = np.array([0.0, 1.0, 0.0, 0.10])
+        box_source = BoxSource(LE, RE, color=color)
+        sc.add_source(box_source)
+
+        line_source = LineSource(vertices, colors)
+        sc.add_source(line_source)
+
+        im = sc.render()
+        im = ImageArray(im.d)
+        im.write_png("composite.png")
+        return im

diff -r 5214d592ca7ca214b3a0b6204bec2bf31aa14549 -r f9bb10e5879858cc676cf21650ffeaedd8d6ccf4 yt/visualization/volume_rendering/tests/test_lenses.py
--- a/yt/visualization/volume_rendering/tests/test_lenses.py
+++ b/yt/visualization/volume_rendering/tests/test_lenses.py
@@ -9,102 +9,125 @@
 #
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
-import yt
+
+import os
+import tempfile
+import shutil
 from yt.testing import fake_random_ds
 from yt.visualization.volume_rendering.api import Scene, Camera, VolumeSource
-from time import time
 import numpy as np
+from unittest import TestCase
 
-field = ("gas", "density")
+# This toggles using a temporary directory. Turn off to examine images.
+use_tmpdir = True
 
-def test_perspective_lens():
-    #ds = fake_random_ds(32, fields = field)
-    ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
-    sc = Scene()
-    cam = Camera(ds, lens_type='perspective')
-    cam.position = ds.arr(np.array([1.0, 1.0, 1.0]), 'code_length')
-    vol = VolumeSource(ds, field=field)
-    tf = vol.transfer_function
-    tf.grey_opacity = True
-    sc.camera = cam
-    sc.add_source(vol)
-    sc.render('test_perspective_%s.png' % field[1], sigma_clip=6.0)
 
-def test_stereoperspective_lens():
-    #ds = fake_random_ds(32, fields = field)
-    ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
-    sc = Scene()
-    cam = Camera(ds, lens_type='stereo-perspective')
-    cam.resolution = [1024, 512]
-    cam.position = ds.arr(np.array([0.7, 0.7, 0.7]), 'code_length')
-    vol = VolumeSource(ds, field=field)
-    tf = vol.transfer_function
-    tf.grey_opacity = True
-    sc.camera = cam
-    sc.add_source(vol)
-    sc.render('test_stereoperspective_%s.png' % field[1], sigma_clip=6.0)
+def setup():
+    """Test specific setup."""
+    from yt.config import ytcfg
+    ytcfg["yt", "__withintesting"] = "True"
 
-def test_fisheye_lens():
-    ds = fake_random_ds(32, fields = field)
-    #ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
-    dd = ds.sphere(ds.domain_center, ds.domain_width[0] / 10)
-    sc = Scene()
-    cam = Camera(dd, lens_type='fisheye')
-    cam.lens.fov = 360.0
-    cam.set_width(ds.domain_width)
-    v, c = ds.find_max('density')
-    p = ds.domain_center.copy()
-    cam.set_position(c-0.0005*ds.domain_width)
-    vol = VolumeSource(dd, field=field)
-    tf = vol.transfer_function
-    tf.grey_opacity = True
-    sc.camera = cam
-    sc.add_source(vol)
-    sc.render('test_fisheye_%s.png' % field[1], sigma_clip=6.0)
 
-def test_plane_lens():
-    ds = fake_random_ds(32, fields = field)
-    #ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
-    dd = ds.sphere(ds.domain_center, ds.domain_width[0] / 10)
-    sc = Scene()
-    cam = Camera(dd, lens_type='plane-parallel')
-    cam.set_width(ds.domain_width*1e-2)
-    v, c = ds.find_max('density')
-    p = ds.domain_center.copy()
-    vol = VolumeSource(dd, field=field)
-    tf = vol.transfer_function
-    tf.grey_opacity = True
-    sc.camera = cam
-    sc.add_source(vol)
-    sc.render('test_plane_%s.png' % field[1], sigma_clip=6.0)
+class LensTest(TestCase):
+    def setUp(self):
+        if use_tmpdir:
+            self.curdir = os.getcwd()
+            # Perform I/O in safe place instead of yt main dir
+            self.tmpdir = tempfile.mkdtemp()
+            os.chdir(self.tmpdir)
+        else:
+            self.curdir, self.tmpdir = None, None
 
-def test_spherical_lens():
-    #ds = fake_random_ds(32, fields = field)
-    ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
-    sc = Scene()
-    cam = Camera(ds, lens_type='spherical')
-    cam.resolution = [512, 256]
-    cam.position = ds.arr(np.array([0.6, 0.5, 0.5]), 'code_length')
-    vol = VolumeSource(ds, field=field)
-    tf = vol.transfer_function
-    tf.grey_opacity = True
-    sc.camera = cam
-    sc.add_source(vol)
-    sc.render('test_spherical_%s.png' % field[1], sigma_clip=6.0)
+        self.field = ("gas", "density")
+        self.ds = fake_random_ds(32, fields=self.field)
 
-def test_stereospherical_lens():
-    #ds = fake_random_ds(32, fields = field)
-    ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
-    w = (ds.domain_width).in_units('code_length')
-    w = ds.arr(w, 'code_length')
-    sc = Scene()
-    cam = Camera(ds, lens_type='stereo-spherical')
-    cam.resolution = [1024, 256]
-    cam.position = ds.arr(np.array([0.6, 0.5, 0.5]), 'code_length')
-    vol = VolumeSource(ds, field=field)
-    tf = vol.transfer_function
-    tf.grey_opacity = True
-    sc.camera = cam
-    sc.add_source(vol)
-    sc.render('test_stereospherical_%s.png' % field[1], sigma_clip=6.0)
+    def tearDown(self):
+        if use_tmpdir:
+            os.chdir(self.curdir)
+            shutil.rmtree(self.tmpdir)
 
+    def test_perspective_lens(self):
+        sc = Scene()
+        cam = Camera(self.ds, lens_type='perspective')
+        cam.position = self.ds.arr(np.array([1.0, 1.0, 1.0]), 'code_length')
+        vol = VolumeSource(self.ds, field=self.field)
+        tf = vol.transfer_function
+        tf.grey_opacity = True
+        sc.camera = cam
+        sc.add_source(vol)
+        sc.render('test_perspective_%s.png' % self.field[1], sigma_clip=6.0)
+
+    def test_stereoperspective_lens(self):
+        sc = Scene()
+        cam = Camera(self.ds, lens_type='stereo-perspective')
+        cam.resolution = [1024, 512]
+        cam.position = self.ds.arr(np.array([0.7, 0.7, 0.7]), 'code_length')
+        vol = VolumeSource(self.ds, field=self.field)
+        tf = vol.transfer_function
+        tf.grey_opacity = True
+        sc.camera = cam
+        sc.add_source(vol)
+        sc.render('test_stereoperspective_%s.png' % self.field[1],
+                  sigma_clip=6.0)
+
+    def test_fisheye_lens(self):
+        dd = self.ds.sphere(self.ds.domain_center,
+                            self.ds.domain_width[0] / 10)
+        sc = Scene()
+        cam = Camera(dd, lens_type='fisheye')
+        cam.lens.fov = 360.0
+        cam.set_width(self.ds.domain_width)
+        v, c = self.ds.find_max('density')
+        p = self.ds.domain_center.copy()
+        cam.set_position(c-0.0005*self.ds.domain_width)
+        vol = VolumeSource(dd, field=self.field)
+        tf = vol.transfer_function
+        tf.grey_opacity = True
+        sc.camera = cam
+        sc.add_source(vol)
+        sc.render('test_fisheye_%s.png' % self.field[1],
+                  sigma_clip=6.0)
+
+    def test_plane_lens(self):
+        dd = self.ds.sphere(self.ds.domain_center,
+                            self.ds.domain_width[0] / 10)
+        sc = Scene()
+        cam = Camera(dd, lens_type='plane-parallel')
+        cam.set_width(self.ds.domain_width*1e-2)
+        v, c = self.ds.find_max('density')
+        p = self.ds.domain_center.copy()
+        vol = VolumeSource(dd, field=self.field)
+        tf = vol.transfer_function
+        tf.grey_opacity = True
+        sc.camera = cam
+        sc.add_source(vol)
+        sc.render('test_plane_%s.png' % self.field[1],
+                  sigma_clip=6.0)
+
+    def test_spherical_lens(self):
+        sc = Scene()
+        cam = Camera(self.ds, lens_type='spherical')
+        cam.resolution = [512, 256]
+        cam.position = self.ds.arr(np.array([0.6, 0.5, 0.5]), 'code_length')
+        vol = VolumeSource(self.ds, field=self.field)
+        tf = vol.transfer_function
+        tf.grey_opacity = True
+        sc.camera = cam
+        sc.add_source(vol)
+        sc.render('test_spherical_%s.png' % self.field[1],
+                  sigma_clip=6.0)
+
+    def test_stereospherical_lens(self):
+        w = (self.ds.domain_width).in_units('code_length')
+        w = self.ds.arr(w, 'code_length')
+        sc = Scene()
+        cam = Camera(self.ds, lens_type='stereo-spherical')
+        cam.resolution = [1024, 256]
+        cam.position = self.ds.arr(np.array([0.6, 0.5, 0.5]), 'code_length')
+        vol = VolumeSource(self.ds, field=self.field)
+        tf = vol.transfer_function
+        tf.grey_opacity = True
+        sc.camera = cam
+        sc.add_source(vol)
+        sc.render('test_stereospherical_%s.png' % self.field[1],
+                  sigma_clip=6.0)

diff -r 5214d592ca7ca214b3a0b6204bec2bf31aa14549 -r f9bb10e5879858cc676cf21650ffeaedd8d6ccf4 yt/visualization/volume_rendering/tests/test_points.py
--- a/yt/visualization/volume_rendering/tests/test_points.py
+++ b/yt/visualization/volume_rendering/tests/test_points.py
@@ -10,44 +10,68 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import yt
+import os
+import tempfile
+import shutil
 from yt.testing import fake_random_ds
-from yt.visualization.volume_rendering.api import Scene, Camera, ZBuffer, \
-    VolumeSource, OpaqueSource, LineSource, BoxSource, PointSource
-from yt.utilities.lib.misc_utilities import lines
-from yt.data_objects.api import ImageArray
+from yt.visualization.volume_rendering.api import Scene, Camera, \
+    VolumeSource, PointSource
 import numpy as np
+from unittest import TestCase
+
 np.random.seed(0)
 
-def test_points_vr():
-    ds = fake_random_ds(64)
-    dd = ds.sphere(ds.domain_center, 0.45*ds.domain_width[0])
-    ds.field_info[ds.field_list[0]].take_log=False
+# This toggles using a temporary directory. Turn off to examine images.
+use_tmpdir = True
 
-    sc = Scene()
-    cam = Camera(ds)
-    cam.resolution = (512,512)
-    sc.camera = cam
-    vr = VolumeSource(dd, field=ds.field_list[0])
-    vr.transfer_function.clear()
-    vr.transfer_function.grey_opacity=False
-    vr.transfer_function.map_to_colormap(0.0, 1.0, scale=10., colormap="Reds")
-    sc.add_source(vr)
 
-    cam.set_width( 1.8*ds.domain_width )
-    cam.lens.setup_box_properties(cam)
+def setup():
+    """Test specific setup."""
+    from yt.config import ytcfg
+    ytcfg["yt", "__withintesting"] = "True"
 
-    # DRAW SOME POINTS
-    npoints = 1000
-    vertices = np.random.random([npoints, 3])
-    colors = np.random.random([npoints, 4])
-    colors[:,3] = 0.10
 
-    points_source = PointSource(vertices, colors=colors)
-    sc.add_source(points_source)
-    im = sc.render()
-    im.write_png("points.png")
-    return im
+class PointsVRTest(TestCase):
+    def setUp(self):
+        if use_tmpdir:
+            self.curdir = os.getcwd()
+            # Perform I/O in safe place instead of yt main dir
+            self.tmpdir = tempfile.mkdtemp()
+            os.chdir(self.tmpdir)
+        else:
+            self.curdir, self.tmpdir = None, None
 
-if __name__ == "__main__":
-    im = test_points_vr()
+    def tearDown(self):
+        if use_tmpdir:
+            os.chdir(self.curdir)
+            shutil.rmtree(self.tmpdir)
+
+    def test_points_vr(self):
+        ds = fake_random_ds(64)
+        dd = ds.sphere(ds.domain_center, 0.45*ds.domain_width[0])
+        ds.field_info[ds.field_list[0]].take_log=False
+
+        sc = Scene()
+        cam = Camera(ds)
+        cam.resolution = (512,512)
+        sc.camera = cam
+        vr = VolumeSource(dd, field=ds.field_list[0])
+        vr.transfer_function.clear()
+        vr.transfer_function.grey_opacity=False
+        vr.transfer_function.map_to_colormap(0.0, 1.0, scale=10., colormap="Reds")
+        sc.add_source(vr)
+
+        cam.set_width( 1.8*ds.domain_width )
+        cam.lens.setup_box_properties(cam)
+
+        # DRAW SOME POINTS
+        npoints = 1000
+        vertices = np.random.random([npoints, 3])
+        colors = np.random.random([npoints, 4])
+        colors[:,3] = 0.10
+
+        points_source = PointSource(vertices, colors=colors)
+        sc.add_source(points_source)
+        im = sc.render()
+        im.write_png("points.png")
+        return im

diff -r 5214d592ca7ca214b3a0b6204bec2bf31aa14549 -r f9bb10e5879858cc676cf21650ffeaedd8d6ccf4 yt/visualization/volume_rendering/tests/test_scene.py
--- a/yt/visualization/volume_rendering/tests/test_scene.py
+++ b/yt/visualization/volume_rendering/tests/test_scene.py
@@ -12,45 +12,73 @@
 #
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
-import yt
+
+import os
+import tempfile
+import shutil
 from yt.testing import fake_random_ds
-from yt.visualization.volume_rendering.api import Scene, \
-    volume_render, Camera, VolumeSource
+from yt.visualization.volume_rendering.api import volume_render, VolumeSource
 import numpy as np
+from unittest import TestCase
 
-def test_rotation():
-    ds = fake_random_ds(64)
-    ds2 = fake_random_ds(64)
-    dd = ds.sphere(ds.domain_center, ds.domain_width[0] / 2)
-    dd2 = ds2.sphere(ds2.domain_center, ds2.domain_width[0] / 2)
-    
-    im, sc = volume_render(dd, field=('gas', 'density'))
-    im.write_png('test.png')
-    
-    vol = sc.get_source(0)
-    tf = vol.transfer_function
-    tf.clear()
-    mi, ma = dd.quantities.extrema('density')
-    mi = np.log10(mi)
-    ma = np.log10(ma)
-    mi_bound = ((ma-mi)*(0.10))+mi
-    ma_bound = ((ma-mi)*(0.90))+mi
-    tf.map_to_colormap(mi_bound, ma_bound, scale=0.01, colormap='Blues_r')
-    
-    vol2 = VolumeSource(dd2, field=('gas', 'density'))
-    sc.add_source(vol2)
-    
-    tf = vol2.transfer_function
-    tf.clear()
-    mi, ma = dd2.quantities.extrema('density')
-    mi = np.log10(mi)
-    ma = np.log10(ma)
-    mi_bound = ((ma-mi)*(0.10))+mi
-    ma_bound = ((ma-mi)*(0.90))+mi
-    tf.map_to_colormap(mi_bound, ma_bound,  scale=0.01, colormap='Reds_r')
-    sc.render('test_scene.png', sigma_clip=6.0)
-    
-    nrot = 2 
-    for i in range(nrot):
-        sc.camera.pitch(2*np.pi/nrot)
-        sc.render('test_rot_%04i.png' % i, sigma_clip=6.0)
+# This toggles using a temporary directory. Turn off to examine images.
+use_tmpdir = True
+
+
+def setup():
+    """Test specific setup."""
+    from yt.config import ytcfg
+    ytcfg["yt", "__withintesting"] = "True"
+
+
+class RotationTest(TestCase):
+    def setUp(self):
+        if use_tmpdir:
+            self.curdir = os.getcwd()
+            # Perform I/O in safe place instead of yt main dir
+            self.tmpdir = tempfile.mkdtemp()
+            os.chdir(self.tmpdir)
+        else:
+            self.curdir, self.tmpdir = None, None
+
+    def tearDown(self):
+        if use_tmpdir:
+            os.chdir(self.curdir)
+            shutil.rmtree(self.tmpdir)
+
+    def test_rotation(self):
+        ds = fake_random_ds(64)
+        ds2 = fake_random_ds(64)
+        dd = ds.sphere(ds.domain_center, ds.domain_width[0] / 2)
+        dd2 = ds2.sphere(ds2.domain_center, ds2.domain_width[0] / 2)
+
+        im, sc = volume_render(dd, field=('gas', 'density'))
+        im.write_png('test.png')
+
+        vol = sc.get_source(0)
+        tf = vol.transfer_function
+        tf.clear()
+        mi, ma = dd.quantities.extrema('density')
+        mi = np.log10(mi)
+        ma = np.log10(ma)
+        mi_bound = ((ma-mi)*(0.10))+mi
+        ma_bound = ((ma-mi)*(0.90))+mi
+        tf.map_to_colormap(mi_bound, ma_bound, scale=0.01, colormap='Blues_r')
+
+        vol2 = VolumeSource(dd2, field=('gas', 'density'))
+        sc.add_source(vol2)
+
+        tf = vol2.transfer_function
+        tf.clear()
+        mi, ma = dd2.quantities.extrema('density')
+        mi = np.log10(mi)
+        ma = np.log10(ma)
+        mi_bound = ((ma-mi)*(0.10))+mi
+        ma_bound = ((ma-mi)*(0.90))+mi
+        tf.map_to_colormap(mi_bound, ma_bound,  scale=0.01, colormap='Reds_r')
+        sc.render('test_scene.png', sigma_clip=6.0)
+
+        nrot = 2 
+        for i in range(nrot):
+            sc.camera.pitch(2*np.pi/nrot)
+            sc.render('test_rot_%04i.png' % i, sigma_clip=6.0)

diff -r 5214d592ca7ca214b3a0b6204bec2bf31aa14549 -r f9bb10e5879858cc676cf21650ffeaedd8d6ccf4 yt/visualization/volume_rendering/tests/test_simple_vr.py
--- a/yt/visualization/volume_rendering/tests/test_simple_vr.py
+++ b/yt/visualization/volume_rendering/tests/test_simple_vr.py
@@ -10,14 +10,41 @@
 #
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
+
+import os
+import tempfile
+import shutil
 import yt
 from yt.testing import fake_random_ds
+from unittest import TestCase
 
-def test_simple_vr():
-    ds = fake_random_ds(32)
-    im, sc = yt.volume_render(ds, fname='test.png', sigma_clip=4.0)
-    print(sc)
-    return im, sc
+# This toggles using a temporary directory. Turn off to examine images.
+use_tmpdir = True
 
-if __name__ == "__main__":
-    im, sc = test_simple_vr()
+
+def setup():
+    """Test specific setup."""
+    from yt.config import ytcfg
+    ytcfg["yt", "__withintesting"] = "True"
+
+
+class SimpleVRTest(TestCase):
+    def setUp(self):
+        if use_tmpdir:
+            self.curdir = os.getcwd()
+            # Perform I/O in safe place instead of yt main dir
+            self.tmpdir = tempfile.mkdtemp()
+            os.chdir(self.tmpdir)
+        else:
+            self.curdir, self.tmpdir = None, None
+
+    def tearDown(self):
+        if use_tmpdir:
+            os.chdir(self.curdir)
+            shutil.rmtree(self.tmpdir)
+
+    def test_simple_vr(self):
+        ds = fake_random_ds(32)
+        im, sc = yt.volume_render(ds, fname='test.png', sigma_clip=4.0)
+        print(sc)
+        return im, sc

diff -r 5214d592ca7ca214b3a0b6204bec2bf31aa14549 -r f9bb10e5879858cc676cf21650ffeaedd8d6ccf4 yt/visualization/volume_rendering/tests/test_vr_cameras.py
--- a/yt/visualization/volume_rendering/tests/test_vr_cameras.py
+++ b/yt/visualization/volume_rendering/tests/test_vr_cameras.py
@@ -23,7 +23,8 @@
 from yt.visualization.volume_rendering.old_camera import \
     PerspectiveCamera, StereoPairCamera, InteractiveCamera, ProjectionCamera, \
     FisheyeCamera
-from yt.visualization.volume_rendering.api import ColorTransferFunction, ProjectionTransferFunction
+from yt.visualization.volume_rendering.api import ColorTransferFunction, \
+    ProjectionTransferFunction
 from yt.visualization.tests.test_plotwindow import assert_fname
 from unittest import TestCase
 

diff -r 5214d592ca7ca214b3a0b6204bec2bf31aa14549 -r f9bb10e5879858cc676cf21650ffeaedd8d6ccf4 yt/visualization/volume_rendering/tests/test_zbuff.py
--- a/yt/visualization/volume_rendering/tests/test_zbuff.py
+++ b/yt/visualization/volume_rendering/tests/test_zbuff.py
@@ -10,98 +10,121 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
+import os
+import tempfile
+import shutil
 from yt.testing import fake_random_ds
 from yt.visualization.volume_rendering.api import \
     Scene, Camera, ZBuffer, \
     VolumeSource, OpaqueSource
 from yt.testing import assert_almost_equal
 import numpy as np
+from unittest import TestCase
+
 np.random.seed(0)
 
+# This toggles using a temporary directory. Turn off to examine images.
+use_tmpdir = True
 
-def test_composite_vr():
-    ds = fake_random_ds(64)
-    dd = ds.sphere(ds.domain_center, 0.45*ds.domain_width[0])
-    ds.field_info[ds.field_list[0]].take_log=False
 
-    sc = Scene()
-    cam = Camera(ds)
-    cam.resolution = (512,512)
-    sc.camera = cam
-    vr = VolumeSource(dd, field=ds.field_list[0])
-    vr.transfer_function.clear()
-    vr.transfer_function.grey_opacity=True
-    vr.transfer_function.map_to_colormap(0.0, 1.0, scale=10.0, colormap="Reds")
-    sc.add_source(vr)
+def setup():
+    """Test specific setup."""
+    from yt.config import ytcfg
+    ytcfg["yt", "__withintesting"] = "True"
 
-    cam.set_width( 1.8*ds.domain_width )
-    cam.lens.setup_box_properties(cam)
 
-    # Create Arbitrary Z-buffer
-    empty = cam.lens.new_image(cam)
-    z = np.empty(empty.shape[:2], dtype='float64')
-    # Let's put a blue plane right through the center
-    z[:] = cam.width[2] / 2.
-    empty[:,:,2] = 1.0 # Set blue to 1's
-    empty[:,:,3] = 1.0 # Set alpha to 1's
-    zbuffer = ZBuffer(empty, z)
-    zsource = OpaqueSource()
-    zsource.set_zbuffer(zbuffer)
-    sc.add_source(zsource)
+class ZBufferTest(TestCase):
+    def setUp(self):
+        if use_tmpdir:
+            self.curdir = os.getcwd()
+            # Perform I/O in safe place instead of yt main dir
+            self.tmpdir = tempfile.mkdtemp()
+            os.chdir(self.tmpdir)
+        else:
+            self.curdir, self.tmpdir = None, None
 
-    im = sc.render()
-    im.write_png("composite.png")
-    return im
+    def tearDown(self):
+        if use_tmpdir:
+            os.chdir(self.curdir)
+            shutil.rmtree(self.tmpdir)
 
 
-def test_nonrectangular_add():
-    rgba1 = np.ones((64, 1, 4))
-    z1 = np.expand_dims(np.arange(64.), 1)
+    def test_composite_vr(self):
+        ds = fake_random_ds(64)
+        dd = ds.sphere(ds.domain_center, 0.45*ds.domain_width[0])
+        ds.field_info[ds.field_list[0]].take_log=False
 
-    rgba2 = np.zeros((64, 1, 4))
-    z2 = np.expand_dims(np.arange(63., -1., -1.), 1)
+        sc = Scene()
+        cam = Camera(ds)
+        cam.resolution = (512,512)
+        sc.camera = cam
+        vr = VolumeSource(dd, field=ds.field_list[0])
+        vr.transfer_function.clear()
+        vr.transfer_function.grey_opacity=True
+        vr.transfer_function.map_to_colormap(0.0, 1.0, scale=10.0, colormap="Reds")
+        sc.add_source(vr)
 
-    exact_rgba = np.concatenate((np.ones(32), np.zeros(32)))
-    exact_rgba = np.expand_dims(exact_rgba, 1)
-    exact_rgba = np.dstack((exact_rgba, exact_rgba, exact_rgba, exact_rgba))
+        cam.set_width( 1.8*ds.domain_width )
+        cam.lens.setup_box_properties(cam)
 
-    exact_z = np.concatenate((np.arange(32.), np.arange(31.,-1.,-1.)))
-    exact_z = np.expand_dims(exact_z, 1)
+        # Create Arbitrary Z-buffer
+        empty = cam.lens.new_image(cam)
+        z = np.empty(empty.shape[:2], dtype='float64')
+        # Let's put a blue plane right through the center
+        z[:] = cam.width[2] / 2.
+        empty[:,:,2] = 1.0 # Set blue to 1's
+        empty[:,:,3] = 1.0 # Set alpha to 1's
+        zbuffer = ZBuffer(empty, z)
+        zsource = OpaqueSource()
+        zsource.set_zbuffer(zbuffer)
+        sc.add_source(zsource)
 
-    buff1 = ZBuffer(rgba1, z1)
-    buff2 = ZBuffer(rgba2, z2)
+        im = sc.render()
+        im.write_png("composite.png")
+        return im
 
-    buff = buff1 + buff2
+    def test_nonrectangular_add(self):
+        rgba1 = np.ones((64, 1, 4))
+        z1 = np.expand_dims(np.arange(64.), 1)
 
-    assert_almost_equal(buff.rgba, exact_rgba)
-    assert_almost_equal(buff.z, exact_z)
+        rgba2 = np.zeros((64, 1, 4))
+        z2 = np.expand_dims(np.arange(63., -1., -1.), 1)
 
+        exact_rgba = np.concatenate((np.ones(32), np.zeros(32)))
+        exact_rgba = np.expand_dims(exact_rgba, 1)
+        exact_rgba = np.dstack((exact_rgba, exact_rgba, exact_rgba, exact_rgba))
+        
+        exact_z = np.concatenate((np.arange(32.), np.arange(31.,-1.,-1.)))
+        exact_z = np.expand_dims(exact_z, 1)
+        
+        buff1 = ZBuffer(rgba1, z1)
+        buff2 = ZBuffer(rgba2, z2)
+        
+        buff = buff1 + buff2
+        
+        assert_almost_equal(buff.rgba, exact_rgba)
+        assert_almost_equal(buff.z, exact_z)
 
-def test_rectangular_add():
-    rgba1 = np.ones((8, 8, 4))
-    z1 = np.arange(64.)
-    z1 = z1.reshape((8, 8))
-    buff1 = ZBuffer(rgba1, z1)
+    def test_rectangular_add(self):
+        rgba1 = np.ones((8, 8, 4))
+        z1 = np.arange(64.)
+        z1 = z1.reshape((8, 8))
+        buff1 = ZBuffer(rgba1, z1)
 
-    rgba2 = np.zeros((8, 8, 4))
-    z2 = np.arange(63., -1., -1.)
-    z2 = z2.reshape((8, 8))
-    buff2 = ZBuffer(rgba2, z2)
+        rgba2 = np.zeros((8, 8, 4))
+        z2 = np.arange(63., -1., -1.)
+        z2 = z2.reshape((8, 8))
+        buff2 = ZBuffer(rgba2, z2)
 
-    buff = buff1 + buff2
+        buff = buff1 + buff2
 
-    exact_rgba = np.empty((8, 8, 4), dtype=np.float64)
-    exact_rgba[0:4,0:8,:] = 1.0
-    exact_rgba[4:8,0:8,:] = 0.0
+        exact_rgba = np.empty((8, 8, 4), dtype=np.float64)
+        exact_rgba[0:4,0:8,:] = 1.0
+        exact_rgba[4:8,0:8,:] = 0.0
+        
+        exact_z = np.concatenate((np.arange(32.), np.arange(31., -1., -1.)))
+        exact_z = np.expand_dims(exact_z, 1)
+        exact_z = exact_z.reshape(8, 8)
 
-    exact_z = np.concatenate((np.arange(32.), np.arange(31., -1., -1.)))
-    exact_z = np.expand_dims(exact_z, 1)
-    exact_z = exact_z.reshape(8, 8)
-
-    assert_almost_equal(buff.rgba, exact_rgba)
-    assert_almost_equal(buff.z, exact_z)
-
-if __name__ == "__main__":
-    im = test_composite_vr()
-    test_nonrectangular_add()
-    test_rectangular_add()
+        assert_almost_equal(buff.rgba, exact_rgba)
+        assert_almost_equal(buff.z, exact_z)

diff -r 5214d592ca7ca214b3a0b6204bec2bf31aa14549 -r f9bb10e5879858cc676cf21650ffeaedd8d6ccf4 yt/visualization/volume_rendering/transfer_function_helper.py
--- a/yt/visualization/volume_rendering/transfer_function_helper.py
+++ b/yt/visualization/volume_rendering/transfer_function_helper.py
@@ -139,6 +139,8 @@
         else:
             mi, ma = self.bounds
         self.tf.add_layers(10, colormap='spectral')
+        factor = self.tf.funcs[-1].y.size / self.tf.funcs[-1].y.sum()
+        self.tf.funcs[-1].y *= 2*factor
 
     def plot(self, fn=None, profile_field=None, profile_weight=None):
         """

diff -r 5214d592ca7ca214b3a0b6204bec2bf31aa14549 -r f9bb10e5879858cc676cf21650ffeaedd8d6ccf4 yt/visualization/volume_rendering/volume_rendering.py
--- a/yt/visualization/volume_rendering/volume_rendering.py
+++ b/yt/visualization/volume_rendering/volume_rendering.py
@@ -17,6 +17,62 @@
 from .render_source import VolumeSource
 from .utils import data_source_or_all
 from yt.funcs import mylog
+from yt.utilities.exceptions import YTSceneFieldNotFound
+
+
+def create_scene(data_source, field=None, lens_type='plane-parallel'):
+    r""" Set up a scene object with sensible defaults for use in volume
+    rendering.
+
+    A helper function that creates a default camera view, transfer
+    function, and image size. Using these, it returns an instance
+    of the Scene class, allowing one to further modify their rendering.
+
+    This function is the same as volume_render() except it doesn't render
+    the image.
+
+    Parameters
+    ----------
+    data_source : :class:`yt.data_objects.data_containers.AMR3DData`
+        This is the source to be rendered, which can be any arbitrary yt
+        3D object
+    field: string, tuple, optional
+        The field to be rendered. If unspecified, this will use the
+        default_field for your dataset's frontend--usually ('gas', 'density').
+        A default transfer function will be built that spans the range of
+        values for that given field, and the field will be logarithmically
+        scaled if the field_info object specifies as such.
+    lens_type: string, optional
+        This specifies the type of lens to use for rendering. Current
+        options are 'plane-parallel', 'perspective', and 'fisheye'. See
+        :class:`yt.visualization.volume_rendering.lens.Lens` for details.
+        Default: 'plane-parallel'
+
+    Returns
+    -------
+    sc: Scene
+        A :class:`yt.visualization.volume_rendering.scene.Scene` object
+        that was constructed during the rendering. Useful for further
+        modifications, rotations, etc.
+
+    Example:
+    >>> import yt
+    >>> ds = yt.load("Enzo_64/DD0046/DD0046")
+    >>> sc = yt.create_scene(ds)
+    """
+    data_source = data_source_or_all(data_source)
+    sc = Scene()
+    if field is None:
+        field = data_source.ds.default_field
+        if field not in data_source.ds.derived_field_list:
+            raise YTSceneFieldNotFound("""Could not find field '%s' in %s.
+                  Please specify a field in create_scene()""" % (field, data_source.ds))
+        mylog.info('Setting default field to %s' % field.__repr__())
+
+    vol = VolumeSource(data_source, field=field)
+    sc.add_source(vol)
+    sc.camera = Camera(data_source=data_source, lens_type=lens_type)
+    return sc
 
 
 def volume_render(data_source, field=None, fname=None, sigma_clip=None,
@@ -34,18 +90,19 @@
         This is the source to be rendered, which can be any arbitrary yt
         3D object
     field: string, tuple, optional
-        The field to be rendered. By default, this will use the first
-        field in data_source.ds.field_list.  A default transfer function
-        will be built that spans the range of values for that given field,
-        and the field will be logarithmically scaled if the field_info
-        object specifies as such.
+        The field to be rendered. If unspecified, this will use the
+        default_field for your dataset's frontend--usually ('gas', 'density').
+        A default transfer function will be built that spans the range of
+        values for that given field, and the field will be logarithmically
+        scaled if the field_info object specifies as such.
     fname: string, optional
         If specified, the resulting rendering will be saved to this filename
         in png format.
-    sigma_clip: float
-        The resulting image will be clipped before saving, using a threshold
-        based on `sigma_clip` multiplied by the standard deviation of the pixel
-        values. Recommended values are between 2 and 6. Default: None
+    sigma_clip: float, optional
+        If specified, the resulting image will be clipped before saving,
+        using a threshold based on sigma_clip multiplied by the standard
+        deviation of the pixel values. Recommended values are between 2 and 6.
+        Default: None
     lens_type: string, optional
         This specifies the type of lens to use for rendering. Current
         options are 'plane-parallel', 'perspective', and 'fisheye'. See
@@ -64,29 +121,8 @@
     Example:
     >>> import yt
     >>> ds = yt.load("Enzo_64/DD0046/DD0046")
-    >>> im, sc = yt.volume_render(ds, fname='test.png')
+    >>> im, sc = yt.volume_render(ds, fname='test.png', sigma_clip=4.0)
     """
-    data_source = data_source_or_all(data_source)
-    sc = Scene()
-    if field is None:
-        data_source.ds.index
-        for ftype, f in sorted(data_source.ds.field_list):
-            if ftype == "all":
-                continue
-            if f == 'Density':
-                field = (ftype, f)
-            elif f == 'density':
-                field = (ftype, f)
-            elif ftype != 'index' and 'particle' not in f:
-                field = (ftype, f)
-                break
-        else:
-            raise RuntimeError("Could not find default field." +
-                               " Please set explicitly in volume_render call")
-        mylog.info('Setting default field to %s' % field.__repr__())
-
-    vol = VolumeSource(data_source, field=field)
-    sc.add_source(vol)
-    sc.camera = Camera(data_source=data_source, lens_type=lens_type)
+    sc = create_scene(data_source, field=field, lens_type=lens_type)
     im = sc.render(fname=fname, sigma_clip=sigma_clip)
     return im, sc


https://bitbucket.org/yt_analysis/yt/commits/d6bc569beb2f/
Changeset:   d6bc569beb2f
Branch:      yt
User:        MatthewTurk
Date:        2015-10-16 00:39:03+00:00
Summary:     Merging with upstream
Affected #:  5 files

diff -r f9bb10e5879858cc676cf21650ffeaedd8d6ccf4 -r d6bc569beb2f4a83c3ca721ea4cbfea70a9e41b5 doc/source/visualizing/volume_rendering.rst
--- a/doc/source/visualizing/volume_rendering.rst
+++ b/doc/source/visualizing/volume_rendering.rst
@@ -304,7 +304,10 @@
 :meth:`~yt.visualization.volume_rendering.camera.Camera.pitch`,
 :meth:`~yt.visualization.volume_rendering.camera.Camera.yaw`, and
 :meth:`~yt.visualization.volume_rendering.camera.Camera.roll` can rotate the
-camera in space.
+camera in space. The center around which the camera rotates can be specified by
+the optional parameter `rot_center` (very useful for perspective and spherical
+lenses), or by default `rot_center` is set to be at camera location (i.e. the 
+camera will rotate about its current position).
 
 When examining a particular point in space, 
 :meth:`~yt.visualization.volume_rendering.camera.Camera.zoom` can be of

diff -r f9bb10e5879858cc676cf21650ffeaedd8d6ccf4 -r d6bc569beb2f4a83c3ca721ea4cbfea70a9e41b5 yt/utilities/orientation.py
--- a/yt/utilities/orientation.py
+++ b/yt/utilities/orientation.py
@@ -75,7 +75,7 @@
             # The north_vector calculated here will still be included in self.unit_vectors.
             north_vector = np.cross(normal_vector, east_vector).ravel()
         else:
-            if self.steady_north:
+            if self.steady_north or (np.dot(north_vector, normal_vector) != 0.0):
                 north_vector = north_vector - np.dot(north_vector,normal_vector)*normal_vector
             east_vector = np.cross(north_vector, normal_vector).ravel()
         north_vector /= np.sqrt(np.dot(north_vector, north_vector))

diff -r f9bb10e5879858cc676cf21650ffeaedd8d6ccf4 -r d6bc569beb2f4a83c3ca721ea4cbfea70a9e41b5 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -299,7 +299,7 @@
                                 north_vector=north_vector)
         self._moved = True
 
-    def rotate(self, theta, rot_vector=None):
+    def rotate(self, theta, rot_vector=None, rot_center=None):
         r"""Rotate by a given angle
 
         Rotate the view.  If `rot_vector` is None, rotation will occur
@@ -313,6 +313,10 @@
             Specify the rotation vector around which rotation will
             occur.  Defaults to None, which sets rotation around
             `north_vector`
+        rot_center  : array_like, optional
+            Specifiy the center around which rotation will occur. Defaults
+            to None, which sets rotation around the original camera position
+            (i.e. the camera position does not change)
 
         Examples
         --------
@@ -323,12 +327,19 @@
         rotate_all = rot_vector is not None
         if rot_vector is None:
             rot_vector = self.north_vector
+        if rot_center is None:
+            rot_center = self._position
         rot_vector = ensure_numpy_array(rot_vector)
         rot_vector = rot_vector/np.linalg.norm(rot_vector)
 
+        new_position = self._position - rot_center
         R = get_rotation_matrix(theta, rot_vector)
+        new_position = np.dot(R, new_position) + rot_center
 
-        normal_vector = self.unit_vectors[2]
+        if (new_position == self._position).all():
+            normal_vector = self.unit_vectors[2]
+        else:
+            normal_vector = rot_center - new_position
         normal_vector = normal_vector/np.sqrt((normal_vector**2).sum())
 
         if rotate_all:
@@ -337,8 +348,9 @@
                 north_vector=np.dot(R, self.unit_vectors[1]))
         else:
             self.switch_view(normal_vector=np.dot(R, normal_vector))
+        if (new_position != self._position).any(): self.set_position(new_position)
 
-    def pitch(self, theta):
+    def pitch(self, theta, rot_center=None):
         r"""Rotate by a given angle about the horizontal axis
 
         Pitch the view.
@@ -347,6 +359,8 @@
         ----------
         theta : float, in radians
              Angle (in radians) by which to pitch the view.
+        rot_center  : array_like, optional
+            Specifiy the center around which rotation will occur.
 
         Examples
         --------
@@ -354,9 +368,9 @@
         >>> cam = Camera()
         >>> cam.pitch(np.pi/4)
         """
-        self.rotate(theta, rot_vector=self.unit_vectors[0])
+        self.rotate(theta, rot_vector=self.unit_vectors[0], rot_center=rot_center)
 
-    def yaw(self, theta):
+    def yaw(self, theta, rot_center=None):
         r"""Rotate by a given angle about the vertical axis
 
         Yaw the view.
@@ -365,6 +379,8 @@
         ----------
         theta : float, in radians
              Angle (in radians) by which to yaw the view.
+        rot_center  : array_like, optional
+            Specifiy the center around which rotation will occur.
 
         Examples
         --------
@@ -372,9 +388,9 @@
         >>> cam = Camera()
         >>> cam.yaw(np.pi/4)
         """
-        self.rotate(theta, rot_vector=self.unit_vectors[1])
+        self.rotate(theta, rot_vector=self.unit_vectors[1], rot_center=rot_center)
 
-    def roll(self, theta):
+    def roll(self, theta, rot_center=None):
         r"""Rotate by a given angle about the view normal axis
 
         Roll the view.
@@ -383,6 +399,8 @@
         ----------
         theta : float, in radians
              Angle (in radians) by which to roll the view.
+        rot_center  : array_like, optional
+            Specifiy the center around which rotation will occur.
 
         Examples
         --------
@@ -390,9 +408,9 @@
         >>> cam = Camera()
         >>> cam.roll(np.pi/4)
         """
-        self.rotate(theta, rot_vector=self.unit_vectors[2])
+        self.rotate(theta, rot_vector=self.unit_vectors[2], rot_center=rot_center)
 
-    def iter_rotate(self, theta, n_steps, rot_vector=None):
+    def iter_rotate(self, theta, n_steps, rot_vector=None, rot_center=None):
         r"""Loop over rotate, creating a rotation
 
         This will rotate `n_steps` until the current view has been
@@ -408,6 +426,10 @@
             Specify the rotation vector around which rotation will
             occur.  Defaults to None, which sets rotation around the
             original `north_vector`
+        rot_center  : array_like, optional
+            Specifiy the center around which rotation will occur. Defaults
+            to None, which sets rotation around the original camera position
+            (i.e. the camera position does not change)
 
         Examples
         --------
@@ -418,7 +440,7 @@
 
         dtheta = (1.0*theta)/n_steps
         for i in xrange(n_steps):
-            self.rotate(dtheta, rot_vector=rot_vector)
+            self.rotate(dtheta, rot_vector=rot_vector, rot_center=rot_center)
             yield i
 
     def iter_move(self, final, n_steps, exponential=False):

diff -r f9bb10e5879858cc676cf21650ffeaedd8d6ccf4 -r d6bc569beb2f4a83c3ca721ea4cbfea70a9e41b5 yt/visualization/volume_rendering/off_axis_projection.py
--- a/yt/visualization/volume_rendering/off_axis_projection.py
+++ b/yt/visualization/volume_rendering/off_axis_projection.py
@@ -152,6 +152,7 @@
     camera.set_width(width)
     camera.switch_orientation(normal_vector=normal_vector,
                               north_vector=north_vector)
+    camera.position = center - width[2]*camera.normal_vector
     camera.focus = center
     sc.camera = camera
     sc.add_source(vol)

diff -r f9bb10e5879858cc676cf21650ffeaedd8d6ccf4 -r d6bc569beb2f4a83c3ca721ea4cbfea70a9e41b5 yt/visualization/volume_rendering/scene.py
--- a/yt/visualization/volume_rendering/scene.py
+++ b/yt/visualization/volume_rendering/scene.py
@@ -204,7 +204,7 @@
         """
         self.camera = camera
 
-    def get_camera(self, camera):
+    def get_camera(self):
         r"""
 
         Get the camera currently used by this scene.


https://bitbucket.org/yt_analysis/yt/commits/61acd3400aed/
Changeset:   61acd3400aed
Branch:      yt
User:        MatthewTurk
Date:        2015-10-16 21:56:04+00:00
Summary:     This fixes the various memoryview segfaults.

Memoryviews are pretty great, it turns out.
Affected #:  5 files

diff -r d6bc569beb2f4a83c3ca721ea4cbfea70a9e41b5 -r 61acd3400aed31c252d69b55bd364a7b7257d7df yt/utilities/lib/grid_traversal.pxd
--- a/yt/utilities/lib/grid_traversal.pxd
+++ b/yt/utilities/lib/grid_traversal.pxd
@@ -20,20 +20,16 @@
 cimport kdtree_utils
 
 cdef struct ImageContainer:
-    np.float64_t *vp_pos
-    np.float64_t *vp_dir
+    np.float64_t[:,:,:] vp_pos
+    np.float64_t[:,:,:] vp_dir
     np.float64_t *center
-    np.float64_t *image
-    np.float64_t *zbuffer
+    np.float64_t[:,:,:] image
+    np.float64_t[:,:] zbuffer
     np.float64_t pdx, pdy
     np.float64_t bounds[4]
     int nv[2]
-    int vp_strides[3]
-    int im_strides[3]
-    int vd_strides[3]
     np.float64_t *x_vec
     np.float64_t *y_vec
-    int z_strides[2]
 
 ctypedef void sampler_function(
                 VolumeContainer *vc,
@@ -60,7 +56,7 @@
 cdef class ImageSampler:
     cdef ImageContainer *image
     cdef sampler_function *sampler
-    cdef public object avp_pos, avp_dir, acenter, aimage, ax_vec, ay_vec
+    cdef public object acenter, aimage, ax_vec, ay_vec
     cdef public object azbuffer
     cdef void *supp_data
     cdef np.float64_t width[3]

diff -r d6bc569beb2f4a83c3ca721ea4cbfea70a9e41b5 -r 61acd3400aed31c252d69b55bd364a7b7257d7df yt/utilities/lib/grid_traversal.pyx
--- a/yt/utilities/lib/grid_traversal.pyx
+++ b/yt/utilities/lib/grid_traversal.pyx
@@ -17,7 +17,7 @@
 cimport numpy as np
 cimport cython
 #cimport healpix_interface
-from libc.stdlib cimport malloc, free, abs
+from libc.stdlib cimport malloc, calloc, free, abs
 from libc.math cimport exp, floor, log2, \
     lrint, fabs, atan, atan2, asin, cos, sin, sqrt
 from fp_utils cimport imax, fmax, imin, fmin, iclip, fclip, i64clip
@@ -233,10 +233,12 @@
     cdef np.float64_t px, py
     px = width[0] * (<np.float64_t>vi)/(<np.float64_t>im.nv[0]-1) - width[0]/2.0
     py = width[1] * (<np.float64_t>vj)/(<np.float64_t>im.nv[1]-1) - width[1]/2.0
-    v_pos[0] = im.vp_pos[0]*px + im.vp_pos[3]*py + im.vp_pos[9]
-    v_pos[1] = im.vp_pos[1]*px + im.vp_pos[4]*py + im.vp_pos[10]
-    v_pos[2] = im.vp_pos[2]*px + im.vp_pos[5]*py + im.vp_pos[11]
-    for i in range(3): v_dir[i] = im.vp_dir[i]
+    # Note we skip 1,0,*
+    v_pos[0] = im.vp_pos[0,0,0]*px + im.vp_pos[0,1,0]*py + im.vp_pos[1,1,0]
+    v_pos[1] = im.vp_pos[0,0,1]*px + im.vp_pos[0,1,1]*py + im.vp_pos[1,1,1]
+    v_pos[2] = im.vp_pos[0,0,2]*px + im.vp_pos[0,1,2]*py + im.vp_pos[1,1,2]
+    # atleast_3d will add to beginning and end
+    for i in range(3): v_dir[i] = im.vp_dir[0,i,0]
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
@@ -250,8 +252,8 @@
         # Here's a funny thing: we use vi here because our *image* will be
         # flattened.  That means that im.nv will be a better one-d offset,
         # since vp_pos has funny strides.
-        v_pos[i] = im.vp_pos[vi*3 + i]
-        v_dir[i] = im.vp_dir[vi*3 + i]
+        v_pos[i] = im.vp_pos[vi, vj, i]
+        v_dir[i] = im.vp_dir[vi, vj, i]
 
 cdef struct ImageAccumulator:
     np.float64_t rgba[Nch]
@@ -259,8 +261,8 @@
 
 cdef class ImageSampler:
     def __init__(self,
-                  np.ndarray vp_pos,
-                  np.ndarray vp_dir,
+                  np.float64_t[:,:,:] vp_pos,
+                  np.float64_t[:,:,:] vp_dir,
                   np.ndarray[np.float64_t, ndim=1] center,
                   bounds,
                   np.ndarray[np.float64_t, ndim=3] image,
@@ -268,10 +270,11 @@
                   np.ndarray[np.float64_t, ndim=1] y_vec,
                   np.ndarray[np.float64_t, ndim=1] width,
                   *args, **kwargs):
-        self.image = <ImageContainer *> malloc(sizeof(ImageContainer))
-        cdef ImageContainer *imagec = self.image
-        cdef np.ndarray[np.float64_t, ndim=2] zbuffer
+        self.image = <ImageContainer *> calloc(sizeof(ImageContainer), 1)
+        cdef np.float64_t[:,:] zbuffer
         zbuffer = kwargs.pop("zbuffer", None)
+        if zbuffer is None:
+            zbuffer = np.ones((image.shape[0], image.shape[1]), "float64")
         self.lens_type = kwargs.pop("lens_type", None)
         if self.lens_type == "plane-parallel":
             self.extent_function = calculate_extent_plane_parallel
@@ -282,45 +285,27 @@
         self.sampler = NULL
         cdef int i, j
         # These assignments are so we can track the objects and prevent their
-        # de-allocation from reference counts.
-        self.avp_pos = vp_pos
-        self.avp_dir = vp_dir
+        # de-allocation from reference counts.  Note that we do this to the
+        # "atleast_3d" versions.  Also, note that we re-assign the input
+        # arguments.
+        self.image.vp_pos = vp_pos
+        self.image.vp_dir = vp_dir
+        self.image.image = self.aimage = image
         self.acenter = center
-        self.aimage = image
+        self.image.center = <np.float64_t *> center.data
         self.ax_vec = x_vec
+        self.image.x_vec = <np.float64_t *> x_vec.data
         self.ay_vec = y_vec
-        self.azbuffer = zbuffer
-        imagec.vp_pos = <np.float64_t *> vp_pos.data
-        imagec.vp_dir = <np.float64_t *> vp_dir.data
-        imagec.center = <np.float64_t *> center.data
-        imagec.image = <np.float64_t *> image.data
-        imagec.x_vec = <np.float64_t *> x_vec.data
-        imagec.y_vec = <np.float64_t *> y_vec.data
-        imagec.zbuffer = NULL
-        if zbuffer is not None:
-            imagec.zbuffer = <np.float64_t *> zbuffer.data
-            imagec.z_strides[0]
-            # 2D
-            for i in range(2):
-                imagec.z_strides[i] = zbuffer.strides[i] / 8
-        imagec.nv[0] = image.shape[0]
-        imagec.nv[1] = image.shape[1]
-        for i in range(4): imagec.bounds[i] = bounds[i]
-        imagec.pdx = (bounds[1] - bounds[0])/imagec.nv[0]
-        imagec.pdy = (bounds[3] - bounds[2])/imagec.nv[1]
+        self.image.y_vec = <np.float64_t *> y_vec.data
+        self.image.zbuffer = zbuffer
+        self.image.nv[0] = image.shape[0]
+        self.image.nv[1] = image.shape[1]
+        for i in range(4): self.image.bounds[i] = bounds[i]
+        self.image.pdx = (bounds[1] - bounds[0])/self.image.nv[0]
+        self.image.pdy = (bounds[3] - bounds[2])/self.image.nv[1]
         for i in range(3):
-            imagec.vp_strides[i] = vp_pos.strides[i] / 8
-            imagec.im_strides[i] = image.strides[i] / 8
             self.width[i] = width[i]
 
-        if vp_dir.ndim > 1:
-            for i in range(3):
-                imagec.vd_strides[i] = vp_dir.strides[i] / 8
-        elif vp_pos.ndim == 1:
-            imagec.vd_strides[0] = imagec.vd_strides[1] = imagec.vd_strides[2] = -1
-        else:
-            raise RuntimeError
-
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
@@ -364,20 +349,14 @@
                 vi = (j - vj) / ny + iter[0]
                 vj = vj + iter[2]
                 # Dynamically calculate the position
-                self.vector_function(im, vi, vj, width,
-                    v_dir, v_pos)
-                offset = im.im_strides[0] * vi + im.im_strides[1] * vj
-                for i in range(Nch): idata.rgba[i] = im.image[i + offset]
-                if im.zbuffer != NULL:
-                    offset = im.z_strides[0] * vi + im.z_strides[1] * vj
-                    max_t = im.zbuffer[offset]
-                else:
-                    max_t = 1.0
-                max_t = fclip(max_t, 0.0, 1.0)
+                self.vector_function(im, vi, vj, width, v_dir, v_pos)
+                for i in range(Nch):
+                    idata.rgba[i] = im.image[vi, vj, i]
+                max_t = fclip(im.zbuffer[vi, vj], 0.0, 1.0)
                 walk_volume(vc, v_pos, v_dir, self.sampler,
                             (<void *> idata), NULL, max_t)
-                offset = im.im_strides[0] * vi + im.im_strides[1] * vj
-                for i in range(Nch): im.image[i + offset] = idata.rgba[i]
+                for i in range(Nch):
+                    im.image[vi, vj, i] = idata.rgba[i]
             free(idata)
             free(v_pos)
             free(v_dir)

diff -r d6bc569beb2f4a83c3ca721ea4cbfea70a9e41b5 -r 61acd3400aed31c252d69b55bd364a7b7257d7df yt/utilities/lib/mesh_traversal.pyx
--- a/yt/utilities/lib/mesh_traversal.pyx
+++ b/yt/utilities/lib/mesh_traversal.pyx
@@ -75,53 +75,25 @@
         size = nx * ny
         data = np.empty(size, dtype="float64")
         cdef rtcr.RTCRay ray
-        if im.vd_strides[0] == -1:
-            v_pos = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
-            for j in range(size):
-                vj = j % ny
-                vi = (j - vj) / ny
-                vj = vj
-                # Dynamically calculate the position
-                px = width[0] * (<np.float64_t>vi)/(<np.float64_t>im.nv[0]-1) - width[0]/2.0
-                py = width[1] * (<np.float64_t>vj)/(<np.float64_t>im.nv[1]-1) - width[1]/2.0
-                v_pos[0] = im.vp_pos[0]*px + im.vp_pos[3]*py + im.vp_pos[9]
-                v_pos[1] = im.vp_pos[1]*px + im.vp_pos[4]*py + im.vp_pos[10]
-                v_pos[2] = im.vp_pos[2]*px + im.vp_pos[5]*py + im.vp_pos[11]
-                for i in range(3):
-                    ray.org[i] = v_pos[i]
-                    ray.dir[i] = im.vp_dir[i]
-                ray.tnear = 0.0
-                ray.tfar = 1e37
-                ray.geomID = rtcg.RTC_INVALID_GEOMETRY_ID
-                ray.primID = rtcg.RTC_INVALID_GEOMETRY_ID
-                ray.instID = rtcg.RTC_INVALID_GEOMETRY_ID
-                ray.mask = -1
-                ray.time = 0
-                rtcs.rtcIntersect(scene.scene_i, ray)
-                data[j] = ray.time
-            self.aimage = data.reshape(self.image.nv[0], self.image.nv[1])
-            free(v_pos)
-        else:
-            v_pos = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
-            v_dir = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
-            # If we do not have a simple image plane, we have to cast all
-            # our rays 
-            for j in range(size):
-                offset = j * 3
-                for i in range(3): v_pos[i] = im.vp_pos[i + offset]
-                for i in range(3): v_dir[i] = im.vp_dir[i + offset]
-                for i in range(3):
-                    ray.org[i] = v_pos[i]
-                    ray.dir[i] = v_dir[i]
-                ray.tnear = 0.0
-                ray.tfar = 1e37
-                ray.geomID = rtcg.RTC_INVALID_GEOMETRY_ID
-                ray.primID = rtcg.RTC_INVALID_GEOMETRY_ID
-                ray.instID = rtcg.RTC_INVALID_GEOMETRY_ID
-                ray.mask = -1
-                ray.time = 0
-                rtcs.rtcIntersect(scene.scene_i, ray)
-                data[j] = ray.time
-            self.aimage = data.reshape(self.image.nv[0], self.image.nv[1])
-            free(v_pos)
-            free(v_dir)
+        v_pos = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+        v_dir = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+        for j in range(size):
+            vj = j % ny
+            vi = (j - vj) / ny
+            vj = vj
+            self.vector_function(im, vi, vj, width, v_dir, v_pos)
+            for i in range(3):
+                ray.org[i] = v_pos[i]
+                ray.dir[i] = v_dir[i]
+            ray.tnear = 0.0
+            ray.tfar = 1e37
+            ray.geomID = rtcg.RTC_INVALID_GEOMETRY_ID
+            ray.primID = rtcg.RTC_INVALID_GEOMETRY_ID
+            ray.instID = rtcg.RTC_INVALID_GEOMETRY_ID
+            ray.mask = -1
+            ray.time = 0
+            rtcs.rtcIntersect(scene.scene_i, ray)
+            data[j] = ray.time
+        self.aimage = data.reshape(self.image.nv[0], self.image.nv[1])
+        free(v_pos)
+        free(v_dir)

diff -r d6bc569beb2f4a83c3ca721ea4cbfea70a9e41b5 -r 61acd3400aed31c252d69b55bd364a7b7257d7df yt/visualization/volume_rendering/lens.py
--- a/yt/visualization/volume_rendering/lens.py
+++ b/yt/visualization/volume_rendering/lens.py
@@ -149,7 +149,7 @@
 
     def new_image(self, camera):
         self.current_image = ImageArray(
-            np.zeros((camera.resolution[0]*camera.resolution[1], 1,
+            np.zeros((camera.resolution[0], camera.resolution[1], 
                       4), dtype='float64', order='C'),
             info={'imtype': 'rendering'})
         return self.current_image

diff -r d6bc569beb2f4a83c3ca721ea4cbfea70a9e41b5 -r 61acd3400aed31c252d69b55bd364a7b7257d7df yt/visualization/volume_rendering/utils.py
--- a/yt/visualization/volume_rendering/utils.py
+++ b/yt/visualization/volume_rendering/utils.py
@@ -17,11 +17,11 @@
 def new_mesh_sampler(camera, render_source):
     params = camera._get_sampler_params(render_source)
     args = (
-        params['vp_pos'],
-        params['vp_dir'],
+        np.atleast_3d(params['vp_pos']),
+        np.atleast_3d(params['vp_dir']),
         params['center'],
         params['bounds'],
-        params['image'],
+        np.atleast_3d(params['image']),
         params['x_vec'],
         params['y_vec'],
         params['width'],
@@ -37,8 +37,8 @@
     params.update(transfer_function=render_source.transfer_function)
     params.update(num_samples=render_source.num_samples)
     args = (
-        params['vp_pos'],
-        params['vp_dir'],
+        np.atleast_3d(params['vp_pos']),
+        np.atleast_3d(params['vp_dir']),
         params['center'],
         params['bounds'],
         params['image'],
@@ -52,6 +52,8 @@
     if render_source.zbuffer is not None:
         kwargs['zbuffer'] = render_source.zbuffer.z
         args[4][:] = render_source.zbuffer.rgba[:]
+    else:
+        kwargs['zbuffer'] = np.ones(params['image'].shape[:2], "float64")
 
     sampler = VolumeRenderSampler(*args, **kwargs)
     return sampler
@@ -62,8 +64,8 @@
     params.update(transfer_function=render_source.transfer_function)
     params.update(num_samples=render_source.num_samples)
     args = (
-        params['vp_pos'],
-        params['vp_dir'],
+        np.atleast_3d(params['vp_pos']),
+        np.atleast_3d(params['vp_dir']),
         params['center'],
         params['bounds'],
         params['image'],
@@ -75,6 +77,8 @@
     kwargs = {'lens_type': params['lens_type']}
     if render_source.zbuffer is not None:
         kwargs['zbuffer'] = render_source.zbuffer.z
+    else:
+        kwargs['zbuffer'] = np.ones(params['image'].shape[:2], "float64")
     sampler = InterpolatedProjectionSampler(*args, **kwargs)
     return sampler
 
@@ -84,8 +88,8 @@
     params.update(transfer_function=render_source.transfer_function)
     params.update(num_samples=render_source.num_samples)
     args = (
-        params['vp_pos'],
-        params['vp_dir'],
+        np.atleast_3d(params['vp_pos']),
+        np.atleast_3d(params['vp_dir']),
         params['center'],
         params['bounds'],
         params['image'],
@@ -97,6 +101,8 @@
     kwargs = {'lens_type': params['lens_type']}
     if render_source.zbuffer is not None:
         kwargs['zbuffer'] = render_source.zbuffer.z
+    else:
+        kwargs['zbuffer'] = np.ones(params['image'].shape[:2], "float64")
     sampler = ProjectionSampler(*args, **kwargs)
     return sampler
 


https://bitbucket.org/yt_analysis/yt/commits/733495213214/
Changeset:   733495213214
Branch:      yt
User:        MatthewTurk
Date:        2015-10-17 03:42:49+00:00
Summary:     Merging with upstream
Affected #:  26 files

diff -r 61acd3400aed31c252d69b55bd364a7b7257d7df -r 7334952132141b8e115bae89196d6734f14f06b7 doc/source/cookbook/amrkdtree_downsampling.py
--- a/doc/source/cookbook/amrkdtree_downsampling.py
+++ b/doc/source/cookbook/amrkdtree_downsampling.py
@@ -39,7 +39,8 @@
 
 render_source.set_volume(kd_low_res)
 render_source.set_fields('density')
-sc.render("v1.png")
+sc.render()
+sc.save("v1.png")
 
 # This operation was substantiall faster.  Now lets modify the low resolution
 # rendering until we find something we like.
@@ -48,12 +49,14 @@
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds=[-27.5, -25.5],
               alpha=np.ones(4, dtype='float64'), colormap='RdBu_r')
-sc.render("v2.png", sigma_clip=6.0)
+sc.render(sigma_clip=6.0)
+sc.save("v2.png")
 
 # This looks better.  Now let's try turning on opacity.
 
 tf.grey_opacity = True
-sc.render("v3.png", sigma_clip=6.0)
+sc.render(sigma_clip=6.0)
+sc.save("v3.png")
 #
 ## That seemed to pick out som interesting structures.  Now let's bump up the
 ## opacity.
@@ -61,11 +64,13 @@
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds=[-27.5, -25.5],
               alpha=10.0 * np.ones(4, dtype='float64'), colormap='RdBu_r')
-sc.render("v4.png", sigma_clip=6.0)
+sc.render(sigma_clip=6.0)
+sc.save("v4.png")
 #
 ## This looks pretty good, now lets go back to the full resolution AMRKDTree
 #
 render_source.set_volume(kd)
-sc.render("v5.png", sigma_clip=6.0)
+sc.render(sigma_clip=6.0)
+sc.render("v5.png")
 
 # This looks great!

diff -r 61acd3400aed31c252d69b55bd364a7b7257d7df -r 7334952132141b8e115bae89196d6734f14f06b7 doc/source/cookbook/camera_movement.py
--- a/doc/source/cookbook/camera_movement.py
+++ b/doc/source/cookbook/camera_movement.py
@@ -14,15 +14,18 @@
 frame = 0
 # Move to the maximum density location over 5 frames
 for _ in cam.iter_move(max_c, 5):
-    sc.render('camera_movement_%04i.png' % frame, sigma_clip=8.0)
+    sc.render(sigma_clip=8.0)
+    sc.save('camera_movement_%04i.png' % frame)
     frame += 1
 
 # Zoom in by a factor of 10 over 5 frames
 for _ in cam.iter_zoom(10.0, 5):
-    sc.render('camera_movement_%04i.png' % frame, sigma_clip=8.0)
+    sc.render(sigma_clip=8.0)
+    sc.save('camera_movement_%04i.png' % frame)
     frame += 1
 
 # Do a rotation over 5 frames
 for _ in cam.iter_rotate(np.pi, 5):
-    sc.render('camera_movement_%04i.png' % frame, sigma_clip=8.0)
+    sc.render(sigma_clip=8.0)
+    sc.save('camera_movement_%04i.png' % frame)
     frame += 1

diff -r 61acd3400aed31c252d69b55bd364a7b7257d7df -r 7334952132141b8e115bae89196d6734f14f06b7 doc/source/cookbook/custom_camera_volume_rendering.py
--- a/doc/source/cookbook/custom_camera_volume_rendering.py
+++ b/doc/source/cookbook/custom_camera_volume_rendering.py
@@ -18,4 +18,5 @@
 
 # save to disk with a custom filename and apply sigma clipping to eliminate
 # very bright pixels, producing an image with better contrast.
-sc.render(fname='custom.png', sigma_clip=4)
+sc.render(sigma_clip=4)
+sc.save('custom.png')

diff -r 61acd3400aed31c252d69b55bd364a7b7257d7df -r 7334952132141b8e115bae89196d6734f14f06b7 doc/source/cookbook/custom_transfer_function_volume_rendering.py
--- a/doc/source/cookbook/custom_transfer_function_volume_rendering.py
+++ b/doc/source/cookbook/custom_transfer_function_volume_rendering.py
@@ -21,4 +21,4 @@
     np.log10(ds.quan(1.0e-29, 'g/cm**3')),
     scale=30.0, colormap='RdBu_r')
 
-im = sc.render(fname='new_tf.png', sigma_clip=None)
+sc.save('new_tf.png')

diff -r 61acd3400aed31c252d69b55bd364a7b7257d7df -r 7334952132141b8e115bae89196d6734f14f06b7 doc/source/cookbook/index.rst
--- a/doc/source/cookbook/index.rst
+++ b/doc/source/cookbook/index.rst
@@ -45,6 +45,7 @@
    embedded_webm_animation
    gadget_notebook
    owls_notebook
+   ../visualizing/transfer_function_helper
    ../analyzing/analysis_modules/sunyaev_zeldovich
    fits_radio_cubes
    fits_xray_images

diff -r 61acd3400aed31c252d69b55bd364a7b7257d7df -r 7334952132141b8e115bae89196d6734f14f06b7 doc/source/cookbook/opaque_rendering.py
--- a/doc/source/cookbook/opaque_rendering.py
+++ b/doc/source/cookbook/opaque_rendering.py
@@ -12,7 +12,8 @@
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
         alpha=np.logspace(-3,0,4), colormap = 'RdBu_r')
-im = sc.render("v1.png", sigma_clip=6.0)
+sc.render(sigma_clip=6.0)
+sc.save("v1.png")
 
 # In this case, the default alphas used (np.logspace(-3,0,Nbins)) does not
 # accentuate the outer regions of the galaxy. Let's start by bringing up the
@@ -22,27 +23,31 @@
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
         alpha=np.logspace(0,0,4), colormap = 'RdBu_r')
-im = sc.render("v2.png", sigma_clip=6.0)
+sc.render(sigma_clip=6.0)
+sc.save("v2.png")
 
 # Now let's set the grey_opacity to True.  This should make the inner portions
 # start to be obcured
 
 tf.grey_opacity = True
-im = sc.render("v3.png", sigma_clip=6.0)
+sc.render(sigma_clip=6.0)
+sc.save("v3.png")
 
 # That looks pretty good, but let's start bumping up the opacity.
 
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
         alpha=10.0*np.ones(4,dtype='float64'), colormap = 'RdBu_r')
-im = sc.render("v4.png", sigma_clip=6.0)
+sc.render(sigma_clip=6.0)
+sc.save("v4.png")
 
 # Let's bump up again to see if we can obscure the inner contour.
 
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
         alpha=30.0*np.ones(4,dtype='float64'), colormap = 'RdBu_r')
-im = sc.render("v5.png", sigma_clip=6.0)
+sc.render(sigma_clip=6.0)
+sc.save("v5.png")
 
 # Now we are losing sight of everything.  Let's see if we can obscure the next
 # layer
@@ -50,13 +55,15 @@
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
         alpha=100.0*np.ones(4,dtype='float64'), colormap = 'RdBu_r')
-im = sc.render("v6.png", sigma_clip=6.0)
+sc.render(sigma_clip=6.0)
+sc.save("v6.png")
 
 # That is very opaque!  Now lets go back and see what it would look like with
 # grey_opacity = False
 
 tf.grey_opacity=False
-im = sc.render("v7.png", sigma_clip=6.0)
+sc.render(sigma_clip=6.0)
+sc.save("v7.png")
 
 # That looks pretty different, but the main thing is that you can see that the
 # inner contours are somewhat visible again.  

diff -r 61acd3400aed31c252d69b55bd364a7b7257d7df -r 7334952132141b8e115bae89196d6734f14f06b7 doc/source/cookbook/rendering_with_box_and_grids.py
--- a/doc/source/cookbook/rendering_with_box_and_grids.py
+++ b/doc/source/cookbook/rendering_with_box_and_grids.py
@@ -8,15 +8,15 @@
 sc.get_source(0).transfer_function.grey_opacity=True
 
 sc.annotate_domain(ds)
-im = sc.render()
-im.write_png("%s_vr_domain.png" % ds)
+sc.render()
+sc.save("%s_vr_domain.png" % ds)
 
 sc.annotate_grids(ds)
-im = sc.render()
-im.write_png("%s_vr_grids.png" % ds)
+sc.render()
+sc.save("%s_vr_grids.png" % ds)
 
 # Here we can draw the coordinate vectors on top of the image by processing
 # it through the camera. Then save it out.
 sc.annotate_axes()
-im = sc.render()
-im.write_png("%s_vr_coords.png" % ds)
+sc.render()
+sc.save("%s_vr_coords.png" % ds)

diff -r 61acd3400aed31c252d69b55bd364a7b7257d7df -r 7334952132141b8e115bae89196d6734f14f06b7 doc/source/cookbook/various_lens.py
--- a/doc/source/cookbook/various_lens.py
+++ b/doc/source/cookbook/various_lens.py
@@ -34,7 +34,8 @@
 cam.set_width(ds.domain_width * 0.5)
 sc.camera = cam
 sc.add_source(vol)
-sc.render('lens_plane-parallel.png', sigma_clip=6.0)
+sc.render(sigma_clip=6.0)
+sc.save('lens_plane-parallel.png')
 
 # Perspective lens
 cam = Camera(ds, lens_type='perspective')
@@ -50,7 +51,8 @@
 cam.set_width(ds.domain_width * 0.5)
 sc.camera = cam
 sc.add_source(vol)
-sc.render('lens_perspective.png', sigma_clip=6.0)
+sc.render(sigma_clip=6.0)
+sc.save('lens_perspective.png')
 
 # Stereo-perspective lens
 cam = Camera(ds, lens_type='stereo-perspective')
@@ -65,7 +67,8 @@
 cam.lens.disparity = ds.domain_width[0] * 1.e-3
 sc.camera = cam
 sc.add_source(vol)
-sc.render('lens_stereo-perspective.png', sigma_clip=6.0)
+sc.render(sigma_clip=6.0)
+sc.save('lens_stereo-perspective.png')
 
 # Fisheye lens
 dd = ds.sphere(ds.domain_center, ds.domain_width[0] / 10)
@@ -79,7 +82,8 @@
 cam.lens.fov = 360.0
 sc.camera = cam
 sc.add_source(vol)
-sc.render('lens_fisheye.png', sigma_clip=6.0)
+sc.render(sigma_clip=6.0)
+sc.save('lens_fisheye.png')
 
 # Spherical lens
 cam = Camera(ds, lens_type='spherical')
@@ -96,7 +100,8 @@
 cam.set_width(ds.domain_width * 0.5)
 sc.camera = cam
 sc.add_source(vol)
-sc.render('lens_spherical.png', sigma_clip=6.0)
+sc.render(sigma_clip=6.0)
+sc.save('lens_spherical.png')
 
 # Stereo-spherical lens
 cam = Camera(ds, lens_type='stereo-spherical')
@@ -111,4 +116,5 @@
 cam.lens.disparity = ds.domain_width[0] * 1.e-3
 sc.camera = cam
 sc.add_source(vol)
-sc.render('lens_stereo-spherical.png', sigma_clip=6.0)
+sc.render(sigma_clip=6.0)
+sc.save('lens_stereo-spherical.png')

diff -r 61acd3400aed31c252d69b55bd364a7b7257d7df -r 7334952132141b8e115bae89196d6734f14f06b7 doc/source/visualizing/transfer_function_helper.rst
--- /dev/null
+++ b/doc/source/visualizing/transfer_function_helper.rst
@@ -0,0 +1,6 @@
+.. _transfer-function-helper-tutorial:
+
+Transfer Function Helper Tutorial
+=================================
+
+.. notebook:: TransferFunctionHelper_Tutorial.ipynb

diff -r 61acd3400aed31c252d69b55bd364a7b7257d7df -r 7334952132141b8e115bae89196d6734f14f06b7 doc/source/visualizing/volume_rendering.rst
--- a/doc/source/visualizing/volume_rendering.rst
+++ b/doc/source/visualizing/volume_rendering.rst
@@ -31,11 +31,11 @@
    :align: center
    :alt: Diagram of a 3D Scene
 
-In versions of yt prior to 3.2, the only volume rendering interface accessible
+In versions of yt prior to 3.3, the only volume rendering interface accessible
 was through the "camera" object.  This presented a number of problems,
 principle of which was the inability to describe new scene elements or to
 develop complex visualizations that were independent of the specific elements
-being rendered.  The new "scene" based interface present in yt 3.2 and beyond
+being rendered.  The new "scene" based interface present in yt 3.3 and beyond
 enables both more complex visualizations to be constructed as well as a new,
 more intuitive interface for very simple 3D visualizations.
 
@@ -65,14 +65,15 @@
   # load the data
   ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
   # volume render the 'density' field, and save the resulting image
-  im, sc = yt.volume_render(ds, 'density', fname='test_rendering.png')
+  im, sc = yt.volume_render(ds, 'density', fname='rendering.png')
 
-  # im is the image that was generated.
+  # im is the image array generated. it is also saved to 'rendering.png'.
   # sc is an instance of a Scene object, which allows you to further refine
-  # your renderings.
+  # your renderings, and later save them.
 
-When the :func:`~yt.visualization.volume_rendering.volume_render` function 
-is called, first an empty 
+When the 
+:func:`~yt.visualization.volume_rendering.volume_rendering.volume_render` 
+function is called, first an empty 
 :class:`~yt.visualization.volume_rendering.scene.Scene` object is created. 
 Next, a :class:`~yt.visualization.volume_rendering.api.VolumeSource`
 object is created, which decomposes the volume elements
@@ -96,9 +97,10 @@
 lenses can be swapped in and out.  For example, this might include a fisheye
 lens, a spherical lens, or some other method of describing the direction and
 origin of rays for rendering. Once the camera is added to the scene object, we
-call the main method of the
+call the main methods of the
 :class:`~yt.visualization.volume_rendering.scene.Scene` class,
-:meth:`~yt.visualization.volume_rendering.scene.Scene.render`.  When called,
+:meth:`~yt.visualization.volume_rendering.scene.Scene.render` and 
+:meth:`~yt.visualization.volume_rendering.scene.Scene.save`.  When called,
 the scene will loop through all of the
 :class:`~yt.visualization.volume_rendering.render_source.RenderSource` objects
 that have been added and integrate the radiative transfer equation through the
@@ -110,20 +112,17 @@
 Alternatively, if you don't want to immediately generate an image of your
 volume rendering, and you just want access to the default scene object, 
 you can skip this expensive operation by just running the
-:func:`~yt.visualization.volume_rendering.create_scene` function in lieu of the
-:func:`~yt.visualization.volume_rendering.volume_render` function. Example:
+:func:`~yt.visualization.volume_rendering.volume_rendering.create_scene` function in lieu of the
+:func:`~yt.visualization.volume_rendering.volume_rendering.volume_render` function. Example:
 
 .. python-script::
 
   import yt
-  # load the data
   ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
-  # volume render the 'density' field 
   sc = yt.create_scene(ds, 'density')
 
-
-Modifying the Scene
--------------------
+Modifying and Saving the Scene
+------------------------------
 
 Once a basic scene has been created with default render sources and
 camera operations, deeper modifications are possible. These
@@ -133,6 +132,56 @@
 present in the scene.  Below, we describe a few of the aspects of tuning a
 scene to create a visualization that is communicative and pleasing.
 
+.. _rendering_scene:
+
+Rendering and Saving
+++++++++++++++++++++
+
+Whenever you want a rendering of your current scene configuration, use the
+:meth:`~yt.visualization.volume_rendering.scene.Scene.render` method to
+trigger the scene to actually do the ray-tracing step.  After that, you can
+use the :meth:`~yt.visualization.volume_rendering.scene.Scene.save` method
+to save it to disk.  Alternatively, 
+:meth:`~yt.visualization.volume_rendering.scene.Scene.render` will return an 
+:class:`~yt.data_objects.image_array.ImageArray` object if you want to further 
+process it in Python (potentially writing it out with 
+:meth:`~yt.data_objects.image_array.ImageArray.write_png`).  You can continue 
+modifying your :class:`~yt.visualization.volume_rendering.scene.Scene` object,
+and render it as you make changes to see how those changes affect the resulting
+image.  
+
+.. python-script::
+
+  import yt
+  ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
+  sc = yt.create_scene(ds, 'density')
+  sc.render() 
+  sc.save()
+  <make changes to scene>
+  sc.render()
+  sc.save('changes.png')
+
+.. _sigma_clip:
+
+Improving Image Contrast with Sigma Clipping
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+If your images appear to be too dark, you can try using the ``sigma_clip``
+keyword in the :meth:`~yt.visualization.volume_rendering.scene.Scene.render` 
+or :func:`~yt.visualization.volume_rendering.volume_rendering.volume_render` functions.  
+Because the brightness range in an image is scaled to match the range of 
+emissivity values of underlying rendering, if you have a few really 
+high-emissivity points, they will scale the rest of your image to be quite 
+dark.  ``sigma_clip = N`` can address this by removing values that are more
+than ``N`` standard deviations brighter than the mean of your image.  
+Typically, a choice of 4 to 6 will help dramatically with your resulting image.
+
+.. python-script::
+
+  sc = yt.create_scene(ds, 'density')
+  sc.render(sigma_clip=4)
+  sc.save()
+
 .. _transfer_functions:
 
 Transfer Functions
@@ -210,7 +259,11 @@
 TransferFunctionHelper
 ----------------------
 
-.. notebook:: TransferFunctionHelper_Tutorial.ipynb
+Because good transfer functions can be difficult to generate, the 
+TransferFunctionHelper exists in order to help create and modify transfer
+functions with smart defaults for your datasets.  To follow a full example
+on how to use this interface, follow the
+:ref:`transfer-function-helper-tutorial`.
 
 Adding New Sources
 ++++++++++++++++++
@@ -325,7 +378,8 @@
 .. python-script::
 
    for i in sc.camera.zoomin(100, 5):
-       sc.render("frame_%03i.png" % i)
+       sc.render()
+       sc.save("frame_%03i.png" % i)
 
 The variable ``i`` is the frame number in the particular loop being called.  In
 this case, this will zoom in by a factor of 100 over the course of 5 frames.

diff -r 61acd3400aed31c252d69b55bd364a7b7257d7df -r 7334952132141b8e115bae89196d6734f14f06b7 yt/units/unit_object.py
--- a/yt/units/unit_object.py
+++ b/yt/units/unit_object.py
@@ -29,16 +29,15 @@
     latex_symbol_lut, unit_prefixes, \
     prefixable_units, cgs_base_units, \
     mks_base_units, latex_prefixes, yt_base_units
-from yt.units.unit_registry import UnitRegistry
+from yt.units.unit_registry import \
+    UnitRegistry, \
+    UnitParseError
 from yt.utilities.exceptions import YTUnitsNotReducible
 
 import copy
 import string
 import token
 
-class UnitParseError(Exception):
-    pass
-
 class InvalidUnitOperation(Exception):
     pass
 
@@ -545,8 +544,13 @@
             return (unit_data[0] * prefix_value, unit_data[1])
 
     # no dice
-    raise UnitParseError("Could not find unit symbol '%s' in the provided " \
-                         "symbols." % symbol_str)
+    if symbol_str.startswith('code_'):
+        raise UnitParseError(
+            "Code units have not been defined. \n"
+            "Try creating the array or quantity using ds.arr or ds.quan instead.")
+    else:
+        raise UnitParseError("Could not find unit symbol '%s' in the provided " \
+                             "symbols." % symbol_str)
 
 def validate_dimensions(dimensions):
     if isinstance(dimensions, Mul):

diff -r 61acd3400aed31c252d69b55bd364a7b7257d7df -r 7334952132141b8e115bae89196d6734f14f06b7 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -305,7 +305,9 @@
     result_storage = None
     prefix = ""
     def __init__(self, ds_fn):
-        if isinstance(ds_fn, Dataset):
+        if ds_fn is None:
+            self.ds = None
+        elif isinstance(ds_fn, Dataset):
             self.ds = ds_fn
         else:
             self.ds = data_dir_load(ds_fn)
@@ -315,7 +317,8 @@
         if self.reference_storage.reference_name is not None:
             dd = self.reference_storage.get(self.storage_name)
             if dd is None or self.description not in dd:
-                raise YTNoOldAnswer("%s : %s" % (self.storage_name , self.description))
+                raise YTNoOldAnswer(
+                    "%s : %s" % (self.storage_name, self.description))
             ov = dd[self.description]
             self.compare(nv, ov)
         else:
@@ -660,6 +663,30 @@
         assert compare_images(fns[0], fns[1], 10**(-decimals)) == None
         for fn in fns: os.remove(fn)
 
+class VRImageComparisonTest(AnswerTestingTest):
+    _type_name = "VRImageComparison"
+    _attrs = ('desc',)
+
+    def __init__(self, scene, ds, desc, decimals):
+        super(VRImageComparisonTest, self).__init__(None)
+        self.obj_type = ('vr',)
+        self.ds = ds
+        self.scene = scene
+        self.desc = desc
+        self.decimals = decimals
+
+    def run(self):
+        tmpfd, tmpname = tempfile.mkstemp(suffix='.png')
+        os.close(tmpfd)
+        self.scene.render(sigma_clip=1.0)
+        self.scene.save(tmpname)
+        image = mpimg.imread(tmpname)
+        os.remove(tmpname)
+        return [zlib.compress(image.dumps())]
+
+    def compare(self, new_result, old_result):
+        compare_image_lists(new_result, old_result, self.decimals)
+        
 class PlotWindowAttributeTest(AnswerTestingTest):
     _type_name = "PlotWindowAttribute"
     _attrs = ('plot_type', 'plot_field', 'plot_axis', 'attr_name', 'attr_args',
@@ -774,6 +801,16 @@
     else:
         return ftrue
 
+def requires_answer_testing():
+    def ffalse(func):
+        return lambda: None
+    def ftrue(func):
+        return func
+    if AnswerTestingTest.result_storage is not None:
+        return ftrue
+    else:
+        return ffalse
+    
 def requires_ds(ds_fn, big_data = False, file_check = False):
     def ffalse(func):
         return lambda: None

diff -r 61acd3400aed31c252d69b55bd364a7b7257d7df -r 7334952132141b8e115bae89196d6734f14f06b7 yt/utilities/orientation.py
--- a/yt/utilities/orientation.py
+++ b/yt/utilities/orientation.py
@@ -41,6 +41,12 @@
 
         """
 
+        # Make sure vectors are unitless
+        if north_vector is not None:
+            north_vector = YTArray(north_vector, "", dtype='float64')
+        if normal_vector is not None:
+            normal_vector = YTArray(normal_vector, "", dtype='float64')
+
         self.steady_north = steady_north
         if not np.dot(normal_vector, normal_vector) > 0:
             mylog.error("Normal vector is null")

diff -r 61acd3400aed31c252d69b55bd364a7b7257d7df -r 7334952132141b8e115bae89196d6734f14f06b7 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -14,6 +14,7 @@
 from yt.funcs import iterable, mylog, ensure_numpy_array
 from yt.utilities.orientation import Orientation
 from yt.units.yt_array import YTArray
+from yt.units.unit_registry import UnitParseError
 from yt.utilities.math_utils import get_rotation_matrix
 from .utils import data_source_or_all
 from .lens import lenses
@@ -22,16 +23,34 @@
 
 class Camera(Orientation):
 
-    r"""
+    r"""A representation of a point of view into a Scene.
 
-    The Camera class. A Camera represents of point of view into a
-    Scene. It is defined by a position (the location of the camera
+    It is defined by a position (the location of the camera
     in the simulation domain,), a focus (the point at which the
     camera is pointed), a width (the width of the snapshot that will
     be taken, a resolution (the number of pixels in the image), and
     a north_vector (the "up" direction in the resulting image). A
     camera can use a variety of different Lens objects.
 
+    Parameters
+    ----------
+    data_source: :class:`AMR3DData` or :class:`Dataset`, optional
+        This is the source to be rendered, which can be any arbitrary yt
+        data object or dataset.
+    lens_type: string, optional
+        This specifies the type of lens to use for rendering. Current
+        options are 'plane-parallel', 'perspective', and 'fisheye'. See
+        :class:`yt.visualization.volume_rendering.lens.Lens` for details.
+        Default: 'plane-parallel'
+    auto: boolean
+        If True, build smart defaults using the data source extent. This
+        can be time-consuming to iterate over the entire dataset to find
+        the positional bounds. Default: False
+
+    Examples
+    --------
+    >>> cam = Camera(ds)
+
     """
 
     _moved = True
@@ -42,29 +61,7 @@
 
     def __init__(self, data_source=None, lens_type='plane-parallel',
                  auto=False):
-        """
-        Initialize a Camera Instance
-
-        Parameters
-        ----------
-        data_source: :class:`AMR3DData` or :class:`Dataset`, optional
-            This is the source to be rendered, which can be any arbitrary yt
-            data object or dataset.
-        lens_type: string, optional
-            This specifies the type of lens to use for rendering. Current
-            options are 'plane-parallel', 'perspective', and 'fisheye'. See
-            :class:`yt.visualization.volume_rendering.lens.Lens` for details.
-            Default: 'plane-parallel'
-        auto: boolean
-            If True, build smart defaults using the data source extent. This
-            can be time-consuming to iterate over the entire dataset to find
-            the positional bounds. Default: False
-
-        Examples
-        --------
-        >>> cam = Camera(ds)
-
-        """
+        """Initialize a Camera Instance"""
         self.lens = None
         self.north_vector = None
         self.normal_vector = None
@@ -90,12 +87,18 @@
 
     def position():
         doc = '''The position is the location of the camera in
-               the coordinate system of the simulation.'''
+               the coordinate system of the simulation. This needs
+               to be either a YTArray or a numpy array. If it is a 
+               numpy array, it is assumed to be in code units. If it
+               is a YTArray, it will be converted to code units 
+               automatically. '''
 
         def fget(self):
             return self._position
 
         def fset(self, value):
+            if isinstance(value, YTArray):
+                value = value.in_units("code_length")
             self._position = value
             self.switch_orientation()
 
@@ -105,12 +108,17 @@
     position = property(**position())
 
     def width():
-        doc = '''The width of the image that will be produced. '''
+        doc = '''The width of the region that will be seen in the image. 
+               This needs to be either a YTArray or a numpy array. If it 
+               is a numpy array, it is assumed to be in code units. If it
+               is a YTArray, it will be converted to code units automatically. '''
 
         def fget(self):
             return self._width
 
         def fset(self, value):
+            if isinstance(value, YTArray):
+                value = value.in_units("code_length")
             self._width = value
             self.switch_orientation()
 
@@ -121,12 +129,18 @@
     width = property(**width())
 
     def focus():
-        doc = '''The focus defines the point the Camera is pointed at. '''
+        doc = '''The focus defines the point the Camera is pointed at. This needs
+               to be either a YTArray or a numpy array. If it is a 
+               numpy array, it is assumed to be in code units. If it
+               is a YTArray, it will be converted to code units 
+               automatically. '''
 
         def fget(self):
             return self._focus
 
         def fset(self, value):
+            if isinstance(value, YTArray):
+                value = value.in_units("code_length")
             self._focus = value
             self.switch_orientation()
 
@@ -161,9 +175,7 @@
         return lens_params
 
     def set_lens(self, lens_type):
-        r'''
-
-        Set the lens to be used with this camera. 
+        r"""Set the lens to be used with this camera.
 
         Parameters
         ----------
@@ -177,7 +189,7 @@
             'spherical'
             'stereo-spherical'
 
-        '''
+        """
         if lens_type not in lenses:
             mylog.error("Lens type not available")
             raise RuntimeError()
@@ -185,6 +197,7 @@
         self.lens.camera = self
 
     def set_defaults_from_data_source(self, data_source):
+        """Resets the camera attributes to their default values"""
         self.position = data_source.pf.domain_right_edge
 
         width = 1.5 * data_source.pf.domain_width.max()
@@ -215,20 +228,22 @@
         self._moved = True
 
     def set_width(self, width):
-        r"""
-
-        Set the width of the image that will be produced by this camera.
-        This must be a YTQuantity.
+        r"""Set the width of the image that will be produced by this camera.
 
         Parameters
         ----------
 
-        width : :class:`yt.units.yt_array.YTQuantity`
-
+        width : YTQuantity or 3 element YTArray
+            The width of the volume rendering in the horizontal, vertical, and
+            depth directions. If a scalar, assumes that the width is the same in
+            all three directions.
         """
-        assert isinstance(width, YTArray), 'Width must be created with ds.arr'
-        if isinstance(width, YTArray):
+        try:
             width = width.in_units('code_length')
+        except (AttributeError, UnitParseError):
+            raise ValueError(
+                'Volume rendering width must be a YTArray that can be '
+                'converted to code units')
 
         if not iterable(width):
             width = YTArray([width.d]*3, width.units)  # Can't get code units.
@@ -236,9 +251,7 @@
         self.switch_orientation()
 
     def set_position(self, position, north_vector=None):
-        r"""
-
-        Set the position of the camera.
+        r"""Set the position of the camera.
 
         Parameters
         ----------
@@ -256,8 +269,7 @@
                                 north_vector=north_vector)
 
     def switch_orientation(self, normal_vector=None, north_vector=None):
-        r"""
-        Change the view direction based on any of the orientation parameters.
+        r"""Change the view direction based on any of the orientation parameters.
 
         This will recalculate all the necessary vectors and vector planes
         related to an orientable object.
@@ -435,7 +447,8 @@
         --------
 
         >>> for i in cam.iter_rotate(np.pi, 10):
-        ...     im = sc.render("rotation_%04i.png" % i)
+        ...     im = sc.render()
+        ...     sc.save('rotation_%04i.png' % i)
         """
 
         dtheta = (1.0*theta)/n_steps
@@ -463,7 +476,8 @@
         --------
 
         >>> for i in cam.iter_move([0.2,0.3,0.6], 10):
-        ...     sc.render("move_%04i.png" % i)
+        ...     sc.render()
+        ...     sc.save("move_%04i.png" % i)
         """
         assert isinstance(final, YTArray)
         if exponential:
@@ -490,11 +504,6 @@
         factor : float
             The factor by which to reduce the distance to the focal point.
 
-
-        Notes
-        -----
-
-        You will need to call snapshot() again to get a new image.
         """
         self.set_width(self.width / factor)
 
@@ -516,7 +525,8 @@
         --------
 
         >>> for i in cam.iter_zoom(100.0, 10):
-        ...     sc.render("zoom_%04i.png" % i)
+        ...     sc.render()
+        ...     sc.save("zoom_%04i.png" % i)
         """
         f = final**(1.0/n_steps)
         for i in xrange(n_steps):

diff -r 61acd3400aed31c252d69b55bd364a7b7257d7df -r 7334952132141b8e115bae89196d6734f14f06b7 yt/visualization/volume_rendering/lens.py
--- a/yt/visualization/volume_rendering/lens.py
+++ b/yt/visualization/volume_rendering/lens.py
@@ -26,15 +26,7 @@
 
 
 class Lens(ParallelAnalysisInterface):
-
-    """
-
-    A base class for setting up Lens objects. A Lens,
-    along with a Camera, is used to defined the set of
-    rays that will be used for rendering.
-
-    """
-
+    """A Lens is used to define the set of rays for rendering."""
     def __init__(self, ):
         super(Lens, self).__init__()
         self.viewpoint = None
@@ -48,9 +40,14 @@
         self.sampler = None
 
     def set_camera(self, camera):
+        """Set the properties of the lens based on the camera.
+
+        This is a proxy for setup_box_properties
+        """
         self.setup_box_properties(camera)
 
     def new_image(self, camera):
+        """Initialize a new ImageArray to be used with this lens."""
         self.current_image = ImageArray(
             np.zeros((camera.resolution[0], camera.resolution[1],
                       4), dtype='float64', order='C'),
@@ -58,6 +55,7 @@
         return self.current_image
 
     def setup_box_properties(self, camera):
+        """Set up the view and stage based on the properties of the camera."""
         unit_vectors = camera.unit_vectors
         width = camera.width
         center = camera.focus
@@ -80,13 +78,12 @@
 
 
 class PlaneParallelLens(Lens):
+    r"""The lens for orthographic projections.
 
-    r'''
-
-    This lens type is the standard type used for orthographic projections. 
     All rays emerge parallel to each other, arranged along a plane.
 
-    '''
+    The initializer takes no parameters.
+    """
 
     def __init__(self, ):
         super(PlaneParallelLens, self).__init__()
@@ -111,6 +108,7 @@
         return sampler_params
 
     def set_viewpoint(self, camera):
+        """Set the viewpoint based on the camera"""
         # This is a hack that should be replaced by an alternate plane-parallel
         # traversal. Put the camera really far away so that the effective
         # viewpoint is infinitely far away, making for parallel rays.
@@ -135,17 +133,14 @@
 
 
 class PerspectiveLens(Lens):
+    r"""A lens for viewing a scene with a set of rays within an opening angle.
 
-    r'''
-
-    This lens type adjusts for an opening view angle, so that the scene will 
-    have an element of perspective to it.
-
-    '''
+    The scene will have an element of perspective to it since the rays are not
+    parallel.
+    """
 
     def __init__(self):
         super(PerspectiveLens, self).__init__()
-        self.expand_factor = 1.5
 
     def new_image(self, camera):
         self.current_image = ImageArray(
@@ -155,13 +150,6 @@
         return self.current_image
 
     def _get_sampler_params(self, camera, render_source):
-        # We should move away from pre-generation of vectors like this and into
-        # the usage of on-the-fly generation in the VolumeIntegrator module
-        # We might have a different width and back_center
-        # dl = (self.back_center - self.front_center)
-        # self.front_center += self.expand_factor*dl
-        # self.back_center -= dl
-
         if render_source.zbuffer is not None:
             image = render_source.zbuffer.rgba
         else:
@@ -174,24 +162,30 @@
         px = np.mat(np.linspace(-.5, .5, camera.resolution[0]))
         py = np.mat(np.linspace(-.5, .5, camera.resolution[1]))
 
-        sample_x = camera.width[0] * np.array(east_vec.reshape(3,1) * px).transpose()
-        sample_y = camera.width[1] * np.array(north_vec.reshape(3,1) * py).transpose()
+        sample_x = camera.width[0] * np.array(east_vec.reshape(3, 1) * px)
+        sample_x = sample_x.transpose()
+        sample_y = camera.width[1] * np.array(north_vec.reshape(3, 1) * py)
+        sample_y = sample_y.transpose()
 
         vectors = np.zeros((camera.resolution[0], camera.resolution[1], 3),
                            dtype='float64', order='C')
 
-        sample_x = np.repeat(sample_x.reshape(camera.resolution[0],1,3), \
+        sample_x = np.repeat(sample_x.reshape(camera.resolution[0], 1, 3),
                              camera.resolution[1], axis=1)
-        sample_y = np.repeat(sample_y.reshape(1,camera.resolution[1],3), \
+        sample_y = np.repeat(sample_y.reshape(1, camera.resolution[1], 3),
                              camera.resolution[0], axis=0)
 
-        normal_vecs = np.tile(normal_vec, camera.resolution[0] * camera.resolution[1])\
-                             .reshape(camera.resolution[0], camera.resolution[1], 3)
+        normal_vecs = np.tile(
+            normal_vec, camera.resolution[0] * camera.resolution[1])
+        normal_vecs = normal_vecs.reshape(
+            camera.resolution[0], camera.resolution[1], 3)
 
         vectors = sample_x + sample_y + normal_vecs * camera.width[2]
 
-        positions = np.tile(camera.position, camera.resolution[0] * camera.resolution[1])\
-                           .reshape(camera.resolution[0], camera.resolution[1], 3, order='C')
+        positions = np.tile(
+            camera.position, camera.resolution[0] * camera.resolution[1])
+        positions = positions.reshape(
+            camera.resolution[0], camera.resolution[1], 3)
 
         uv = np.ones(3, dtype='float64')
 
@@ -234,16 +228,18 @@
             if np.arccos(sight_angle_cos) < 0.5 * np.pi:
                 sight_length = camera.width[2] / sight_angle_cos
             else:
-            # If the corner is on the backwards, then we put it outside of the image
-            # It can not be simply removed because it may connect to other corner
-            # within the image, which produces visible domain boundary line
-                sight_length = np.sqrt(camera.width[0]**2 + camera.width[1]**2) / \
-                               np.sqrt(1 - sight_angle_cos**2)
+                # If the corner is on the backwards, then we put it outside of
+                # the image It can not be simply removed because it may connect
+                # to other corner within the image, which produces visible
+                # domain boundary line
+                sight_length = np.sqrt(camera.width[0]**2 + camera.width[1]**2)
+                sight_length = sight_length / np.sqrt(1 - sight_angle_cos**2)
             pos1[i] = camera.position + sight_length * sight_vector[i]
 
         dx = np.dot(pos1 - sight_center.d, camera.unit_vectors[0])
         dy = np.dot(pos1 - sight_center.d, camera.unit_vectors[1])
-        dz = np.dot(pos1 - sight_center.d, camera.unit_vectors[2])
+        dz = np.dot(pos - camera.position.d, camera.unit_vectors[2])
+
         # Transpose into image coords.
         px = (res[0] * 0.5 + res[0] / camera.width[0].d * dx).astype('int')
         py = (res[1] * 0.5 + res[1] / camera.width[1].d * dy).astype('int')
@@ -256,15 +252,14 @@
 
 
 class StereoPerspectiveLens(Lens):
-
-    """docstring for StereoPerspectiveLens"""
+    """A lens that includes two sources for perspective rays, for 3D viewing"""
 
     def __init__(self):
         super(StereoPerspectiveLens, self).__init__()
-        self.expand_factor = 1.5
         self.disparity = None
 
     def new_image(self, camera):
+        """Initialize a new ImageArray to be used with this lens."""
         self.current_image = ImageArray(
             np.zeros((camera.resolution[0]*camera.resolution[1], 1,
                       4), dtype='float64', order='C'),
@@ -275,10 +270,6 @@
         # We should move away from pre-generation of vectors like this and into
         # the usage of on-the-fly generation in the VolumeIntegrator module
         # We might have a different width and back_center
-        # dl = (self.back_center - self.front_center)
-        # self.front_center += self.expand_factor*dl
-        # self.back_center -= dl
-
         if self.disparity is None:
             self.disparity = camera.width[0] / 2.e3
 
@@ -287,8 +278,10 @@
         else:
             image = self.new_image(camera)
 
-        vectors_left, positions_left = self._get_positions_vectors(camera, -self.disparity)
-        vectors_right, positions_right = self._get_positions_vectors(camera, self.disparity)
+        vectors_left, positions_left = self._get_positions_vectors(
+            camera, -self.disparity)
+        vectors_right, positions_right = self._get_positions_vectors(
+            camera, self.disparity)
 
         uv = np.ones(3, dtype='float64')
 
@@ -330,28 +323,37 @@
         px = np.mat(np.linspace(-.5, .5, single_resolution_x))
         py = np.mat(np.linspace(-.5, .5, camera.resolution[1]))
 
-        sample_x = camera.width[0] * np.array(east_vec_rot.reshape(3,1) * px).transpose()
-        sample_y = camera.width[1] * np.array(north_vec.reshape(3,1) * py).transpose()
+        sample_x = camera.width[0] * np.array(east_vec_rot.reshape(3, 1) * px)
+        sample_x = sample_x.transpose()
+        sample_y = camera.width[1] * np.array(north_vec.reshape(3, 1) * py)
+        sample_y = sample_y.transpose()
 
         vectors = np.zeros((single_resolution_x, camera.resolution[1], 3),
                            dtype='float64', order='C')
 
-        sample_x = np.repeat(sample_x.reshape(single_resolution_x,1,3), \
+        sample_x = np.repeat(sample_x.reshape(single_resolution_x, 1, 3),
                              camera.resolution[1], axis=1)
-        sample_y = np.repeat(sample_y.reshape(1,camera.resolution[1],3), \
+        sample_y = np.repeat(sample_y.reshape(1, camera.resolution[1], 3),
                              single_resolution_x, axis=0)
 
-        normal_vecs = np.tile(normal_vec_rot, single_resolution_x * camera.resolution[1])\
-                             .reshape(single_resolution_x, camera.resolution[1], 3)
-        east_vecs = np.tile(east_vec_rot, single_resolution_x * camera.resolution[1])\
-                           .reshape(single_resolution_x, camera.resolution[1], 3)
+        normal_vecs = np.tile(
+            normal_vec_rot, single_resolution_x * camera.resolution[1])
+        normal_vecs = normal_vecs.reshape(
+            single_resolution_x, camera.resolution[1], 3)
+        east_vecs = np.tile(
+            east_vec_rot, single_resolution_x * camera.resolution[1])
+        east_vecs = east_vecs.reshape(
+            single_resolution_x, camera.resolution[1], 3)
 
         vectors = sample_x + sample_y + normal_vecs * camera.width[2]
 
-        positions = np.tile(camera.position, single_resolution_x * camera.resolution[1])\
-                           .reshape(single_resolution_x, camera.resolution[1], 3)
+        positions = np.tile(
+            camera.position, single_resolution_x * camera.resolution[1])
+        positions = positions.reshape(
+            single_resolution_x, camera.resolution[1], 3)
 
-        positions = positions + east_vecs * disparity # Here the east_vecs is non-rotated one
+        # Here the east_vecs is non-rotated one
+        positions = positions + east_vecs * disparity
 
         mylog.debug(positions)
         mylog.debug(vectors)
@@ -365,8 +367,10 @@
         if self.disparity is None:
             self.disparity = camera.width[0] / 2.e3
 
-        px_left, py_left, dz_left = self._get_px_py_dz(camera, pos, res, -self.disparity)
-        px_right, py_right, dz_right = self._get_px_py_dz(camera, pos, res, self.disparity)
+        px_left, py_left, dz_left = self._get_px_py_dz(
+            camera, pos, res, -self.disparity)
+        px_right, py_right, dz_right = self._get_px_py_dz(
+            camera, pos, res, self.disparity)
 
         px = np.hstack([px_left, px_right])
         py = np.hstack([py_left, py_right])
@@ -402,16 +406,18 @@
             if np.arccos(sight_angle_cos) < 0.5 * np.pi:
                 sight_length = camera.width[2] / sight_angle_cos
             else:
-            # If the corner is on the backwards, then we put it outside of the image
-            # It can not be simply removed because it may connect to other corner
-            # within the image, which produces visible domain boundary line
-                sight_length = np.sqrt(camera.width[0]**2 + camera.width[1]**2) / \
-                               np.sqrt(1 - sight_angle_cos**2)
+                # If the corner is on the backwards, then we put it outside of
+                # the image It can not be simply removed because it may connect
+                # to other corner within the image, which produces visible
+                # domain boundary line
+                sight_length = np.sqrt(camera.width[0]**2 + camera.width[1]**2)
+                sight_length = sight_length / np.sqrt(1 - sight_angle_cos**2)
             pos1[i] = camera_position_shift + sight_length * sight_vector[i]
 
         dx = np.dot(pos1 - sight_center.d, east_vec_rot)
         dy = np.dot(pos1 - sight_center.d, north_vec)
-        dz = np.dot(pos1 - sight_center.d, normal_vec_rot)
+        dz = np.dot(pos - camera_position_shift, normal_vec_rot)
+        
         # Transpose into image coords.
         px = (res0_h * 0.5 + res0_h / camera.width[0].d * dx).astype('int')
         py = (res[1] * 0.5 + res[1] / camera.width[1].d * dy).astype('int')
@@ -431,14 +437,13 @@
 
 
 class FisheyeLens(Lens):
+    r"""A lens for dome-based renderings
 
-    r"""
-
-    This lens type accepts a field-of-view property, fov, that describes how wide 
-    an angle the fisheye can see. Fisheye images are typically used for dome-based 
-    presentations; the Hayden planetarium for instance has a field of view of 194.6. 
-    The images returned by this camera will be flat pixel images that can and should 
-    be reshaped to the resolution.    
+    This lens type accepts a field-of-view property, fov, that describes how
+    wide an angle the fisheye can see. Fisheye images are typically used for
+    dome-based presentations; the Hayden planetarium for instance has a field of
+    view of 194.6.  The images returned by this camera will be flat pixel images
+    that can and should be reshaped to the resolution.
 
     """
 
@@ -450,11 +455,13 @@
         self.rotation_matrix = np.eye(3)
 
     def setup_box_properties(self, camera):
+        """Set up the view and stage based on the properties of the camera."""
         self.radius = camera.width.max()
         super(FisheyeLens, self).setup_box_properties(camera)
         self.set_viewpoint(camera)
 
     def new_image(self, camera):
+        """Initialize a new ImageArray to be used with this lens."""
         self.current_image = ImageArray(
             np.zeros((camera.resolution[0]**2, 1,
                       4), dtype='float64', order='C'),
@@ -489,9 +496,7 @@
         return sampler_params
 
     def set_viewpoint(self, camera):
-        """
-        For a FisheyeLens, the viewpoint is the front center.
-        """
+        """For a FisheyeLens, the viewpoint is the camera's position"""
         self.viewpoint = camera.position
 
     def __repr__(self):
@@ -508,7 +513,7 @@
         # vectors back onto the plane.  arr_fisheye_vectors goes from px, py to
         # vector, and we need the reverse.
         # First, we transform lpos into *relative to the camera* coordinates.
-        lpos = camera.position - pos
+        lpos = camera.position.d - pos
         lpos = lpos.dot(self.rotation_matrix)
         # lpos = lpos.dot(self.rotation_matrix)
         mag = (lpos * lpos).sum(axis=1)**0.5
@@ -521,21 +526,22 @@
         px = r * np.cos(phi)
         py = r * np.sin(phi)
         u = camera.focus.uq
+        length_unit = u / u.d
         # dz is distance the ray would travel
         px = (px + 1.0) * res[0] / 2.0
         py = (py + 1.0) * res[1] / 2.0
-        px = (u * np.rint(px)).astype("int64")
-        py = (u * np.rint(py)).astype("int64")
+        # px and py should be dimensionless
+        px = (u * np.rint(px) / length_unit).astype("int64")
+        py = (u * np.rint(py) / length_unit).astype("int64")
         return px, py, dz
 
 
 class SphericalLens(Lens):
+    r"""A lens for cylindrical-spherical projection.
 
-    r"""
+    Movies rendered in this way can be displayed in head-tracking devices or
+    in YouTube 360 view.
 
-    This is a cylindrical-spherical projection. Movies rendered in this way 
-    can be displayed in head-tracking devices or in YouTube 360 view.
-    
     """
 
     def __init__(self):
@@ -545,6 +551,7 @@
         self.rotation_matrix = np.eye(3)
 
     def setup_box_properties(self, camera):
+        """Set up the view and stage based on the properties of the camera."""
         self.radius = camera.width.max()
         super(SphericalLens, self).setup_box_properties(camera)
         self.set_viewpoint(camera)
@@ -562,11 +569,13 @@
         vectors[:, :, 2] = np.sin(py)
         vectors = vectors * camera.width[0]
 
-        positions = np.tile(camera.position, camera.resolution[0] * camera.resolution[1])\
-                           .reshape(camera.resolution[0], camera.resolution[1], 3)
+        positions = np.tile(
+            camera.position,
+            camera.resolution[0] * camera.resolution[1]).reshape(
+                camera.resolution[0], camera.resolution[1], 3)
 
-        R1 = get_rotation_matrix(0.5*np.pi, [1,0,0])
-        R2 = get_rotation_matrix(0.5*np.pi, [0,0,1])
+        R1 = get_rotation_matrix(0.5*np.pi, [1, 0, 0])
+        R2 = get_rotation_matrix(0.5*np.pi, [0, 0, 1])
         uv = np.dot(R1, camera.unit_vectors)
         uv = np.dot(R2, uv)
         vectors.reshape((camera.resolution[0]*camera.resolution[1], 3))
@@ -596,9 +605,7 @@
         return sampler_params
 
     def set_viewpoint(self, camera):
-        """
-        For a PerspectiveLens, the viewpoint is the front center.
-        """
+        """For a SphericalLens, the viewpoint is the camera's position"""
         self.viewpoint = camera.position
 
     def project_to_plane(self, camera, pos, res=None):
@@ -606,7 +613,7 @@
             res = camera.resolution
         # Much of our setup here is the same as in the fisheye, except for the
         # actual conversion back to the px, py values.
-        lpos = camera.position - pos
+        lpos = camera.position.d - pos
         # inv_mat = np.linalg.inv(self.rotation_matrix)
         # lpos = lpos.dot(self.rotation_matrix)
         mag = (lpos * lpos).sum(axis=1)**0.5
@@ -623,17 +630,22 @@
         py = np.arcsin(lpos[:, 2])
         dz = mag / self.radius
         u = camera.focus.uq
+        length_unit = u / u.d
         # dz is distance the ray would travel
         px = ((-px + np.pi) / (2.0*np.pi)) * res[0]
         py = ((-py + np.pi/2.0) / np.pi) * res[1]
-        px = (u * np.rint(px)).astype("int64")
-        py = (u * np.rint(py)).astype("int64")
+        # px and py should be dimensionless
+        px = (u * np.rint(px) / length_unit).astype("int64")
+        py = (u * np.rint(py) / length_unit).astype("int64")
         return px, py, dz
 
 
 class StereoSphericalLens(Lens):
+    r"""A lens for a stereo cylindrical-spherical projection.
 
-    """docstring for StereoSphericalLens"""
+    Movies rendered in this way can be displayed in VR devices or stereo youtube
+    360 degree movies.
+    """
 
     def __init__(self):
         super(StereoSphericalLens, self).__init__()
@@ -652,31 +664,35 @@
             self.disparity = camera.width[0] / 1000.
 
         single_resolution_x = np.floor(camera.resolution[0])/2
-        px = np.linspace(-np.pi, np.pi, single_resolution_x, endpoint=True)[:,None]
-        py = np.linspace(-np.pi/2., np.pi/2., camera.resolution[1], endpoint=True)[None,:]
+        px = np.linspace(-np.pi, np.pi, single_resolution_x,
+                         endpoint=True)[:, None]
+        py = np.linspace(-np.pi/2., np.pi/2., camera.resolution[1],
+                         endpoint=True)[None, :]
 
         vectors = np.zeros((single_resolution_x, camera.resolution[1], 3),
                            dtype='float64', order='C')
-        vectors[:,:,0] = np.cos(px) * np.cos(py)
-        vectors[:,:,1] = np.sin(px) * np.cos(py)
-        vectors[:,:,2] = np.sin(py)
+        vectors[:, :, 0] = np.cos(px) * np.cos(py)
+        vectors[:, :, 1] = np.sin(px) * np.cos(py)
+        vectors[:, :, 2] = np.sin(py)
         vectors = vectors * camera.width[0]
 
         vectors2 = np.zeros((single_resolution_x, camera.resolution[1], 3),
                             dtype='float64', order='C')
-        vectors2[:,:,0] = -np.sin(px) * np.ones((1, camera.resolution[1]))
-        vectors2[:,:,1] = np.cos(px) * np.ones((1, camera.resolution[1]))
-        vectors2[:,:,2] = 0
+        vectors2[:, :, 0] = -np.sin(px) * np.ones((1, camera.resolution[1]))
+        vectors2[:, :, 1] = np.cos(px) * np.ones((1, camera.resolution[1]))
+        vectors2[:, :, 2] = 0
 
-        positions = np.tile(camera.position, single_resolution_x * camera.resolution[1])\
-                           .reshape(single_resolution_x, camera.resolution[1], 3)
+        positions = np.tile(
+            camera.position, single_resolution_x * camera.resolution[1])
+        positions = positions.reshape(
+            single_resolution_x, camera.resolution[1], 3)
 
         # The left and right are switched here since VR is in LHS.
         positions_left = positions + vectors2 * self.disparity
         positions_right = positions + vectors2 * (-self.disparity)
 
-        R1 = get_rotation_matrix(0.5*np.pi, [1,0,0])
-        R2 = get_rotation_matrix(0.5*np.pi, [0,0,1])
+        R1 = get_rotation_matrix(0.5*np.pi, [1, 0, 0])
+        R2 = get_rotation_matrix(0.5*np.pi, [0, 0, 1])
         uv = np.dot(R1, camera.unit_vectors)
         uv = np.dot(R2, uv)
         vectors.reshape((single_resolution_x*camera.resolution[1], 3))

diff -r 61acd3400aed31c252d69b55bd364a7b7257d7df -r 7334952132141b8e115bae89196d6734f14f06b7 yt/visualization/volume_rendering/off_axis_projection.py
--- a/yt/visualization/volume_rendering/off_axis_projection.py
+++ b/yt/visualization/volume_rendering/off_axis_projection.py
@@ -152,6 +152,8 @@
     camera.set_width(width)
     camera.switch_orientation(normal_vector=normal_vector,
                               north_vector=north_vector)
+    if not iterable(width):
+        width = data_source.ds.arr([width]*3)
     camera.position = center - width[2]*camera.normal_vector
     camera.focus = center
     sc.camera = camera
@@ -174,9 +176,6 @@
     east_vector = camera.unit_vectors[1]
     normal_vector = camera.unit_vectors[2]
     fields = vol.field
-    if not iterable(width):
-        width = data_source.ds.arr([width]*3)
-
     mi = ds.domain_right_edge.copy()
     ma = ds.domain_left_edge.copy()
     for off1 in [-1, 1]:

diff -r 61acd3400aed31c252d69b55bd364a7b7257d7df -r 7334952132141b8e115bae89196d6734f14f06b7 yt/visualization/volume_rendering/render_source.py
--- a/yt/visualization/volume_rendering/render_source.py
+++ b/yt/visualization/volume_rendering/render_source.py
@@ -39,10 +39,9 @@
 
 class RenderSource(ParallelAnalysisInterface):
 
-    """
+    """Base Class for Render Sources.
 
-    Base Class for Render Sources. Will be inherited for volumes,
-    streamlines, etc.
+    Will be inherited for volumes, streamlines, etc.
 
     """
 
@@ -59,10 +58,9 @@
 
 
 class OpaqueSource(RenderSource):
-    """
+    """A base class for opaque render sources.
 
-    A base class for opaque render sources. Will be inherited from
-    for LineSources, BoxSources, etc.
+    Will be inherited from for LineSources, BoxSources, etc.
 
     """
     def __init__(self):
@@ -72,50 +70,36 @@
     def set_zbuffer(self, zbuffer):
         self.zbuffer = zbuffer
 
-    def render(self, camera, zbuffer=None):
-        # This is definitely wrong for now
-        if zbuffer is not None and self.zbuffer is not None:
-            zbuffer.rgba = self.zbuffer.rgba
-            zbuffer.z = self.zbuffer.z
-            self.zbuffer = zbuffer
-        return self.zbuffer
+class VolumeSource(RenderSource):
+    """A class for rendering data from a volumetric data source
 
+    Examples of such sources include a sphere, cylinder, or the
+    entire computational domain.
 
-class VolumeSource(RenderSource):
+    A :class:`VolumeSource` provides the framework to decompose an arbitrary
+    yt data source into bricks that can be traversed and volume rendered.
 
+    Parameters
+    ----------
+    data_source: :class:`AMR3DData` or :class:`Dataset`, optional
+        This is the source to be rendered, which can be any arbitrary yt
+        data object or dataset.
+    fields : string
+        The name of the field(s) to be rendered.
+    auto: bool, optional
+        If True, will build a default AMRKDTree and transfer function based
+        on the data.
+
+    Examples
+    --------
+    >>> source = VolumeSource(ds.all_data(), 'density')
     """
 
-    A VolumeSource is a class for rendering data from
-    an arbitrary volumetric data source, e.g. a sphere,
-    cylinder, or the entire computational domain.
-
-
-    """
     _image = None
     data_source = None
 
     def __init__(self, data_source, field, auto=True):
-        r"""Initialize a new volumetric source for rendering.
-
-        A :class:`VolumeSource` provides the framework to decompose an arbitrary
-        yt data source into bricks that can be traversed and volume rendered.
-
-        Parameters
-        ----------
-        data_source: :class:`AMR3DData` or :class:`Dataset`, optional
-            This is the source to be rendered, which can be any arbitrary yt
-            data object or dataset.
-        fields : string
-            The name of the field(s) to be rendered.
-        auto: bool, optional
-            If True, will build a default AMRKDTree and transfer function based
-            on the data.
-
-        Examples
-        --------
-        >>> source = RenderSource(ds, 'density')
-
-        """
+        r"""Initialize a new volumetric source for rendering."""
         super(VolumeSource, self).__init__()
         self.data_source = data_source_or_all(data_source)
         field = self.data_source._determine_fields(field)[0]
@@ -138,13 +122,14 @@
             self.build_defaults()
 
     def build_defaults(self):
+        """Sets a default volume and transfer function"""
+        mylog.info("Creating default volume")
         self.build_default_volume()
+        mylog.info("Creating default transfer function")
         self.build_default_transfer_function()
 
     def set_transfer_function(self, transfer_function):
-        """
-        Set transfer function for this source
-        """
+        """Set transfer function for this source"""
         if not isinstance(transfer_function,
                           (TransferFunction, ColorTransferFunction,
                            ProjectionTransferFunction)):
@@ -167,6 +152,7 @@
             raise RuntimeError("Transfer Function not Supplied")
 
     def build_default_transfer_function(self):
+        """Sets up a transfer function"""
         self.tfh = \
             TransferFunctionHelper(self.data_source.pf)
         self.tfh.set_field(self.field)
@@ -175,6 +161,7 @@
         self.transfer_function = self.tfh.tf
 
     def build_default_volume(self):
+        """Sets up an AMRKDTree based on the VolumeSource's field"""
         self.volume = AMRKDTree(self.data_source.pf,
                                 data_source=self.data_source)
         log_fields = [self.data_source.pf.field_info[self.field].take_log]
@@ -182,17 +169,23 @@
         self.volume.set_fields([self.field], log_fields, True)
 
     def set_volume(self, volume):
+        """Associates an AMRKDTree with the VolumeSource"""
         assert(isinstance(volume, AMRKDTree))
         del self.volume
         self.volume = volume
 
-    def set_field(self, field, no_ghost=True):
-        field = self.data_source._determine_fields(field)[0]
-        log_field = self.data_source.pf.field_info[field].take_log
-        self.volume.set_fields(field, [log_field], no_ghost)
-        self.field = field
+    def set_fields(self, fields, no_ghost=True):
+        """Set the source's fields to render
 
-    def set_fields(self, fields, no_ghost=True):
+        Parameters
+        ---------
+        fields: field name or list of field names
+            The field or fields to render
+        no_ghost: boolean
+            If False, the AMRKDTree estimates vertex centered data using ghost
+            zones, which can eliminate seams in the resulting volume rendering.
+            Defaults to True for performance reasons.
+        """
         fields = self.data_source._determine_fields(fields)
         log_fields = [self.data_source.ds.field_info[f].take_log
                       for f in fields]
@@ -200,7 +193,12 @@
         self.field = fields
 
     def set_sampler(self, camera):
-        """docstring for add_sampler"""
+        """Sets a volume render sampler
+
+        The type of sampler is determined based on the ``sampler_type`` attribute
+        of the VolumeSource. Currently the ``volume_render`` and ``projection``
+        sampler types are supported.
+        """
         if self.sampler_type == 'volume-render':
             sampler = new_volume_render_sampler(camera, self)
         elif self.sampler_type == 'projection':
@@ -211,6 +209,24 @@
         assert(self.sampler is not None)
 
     def render(self, camera, zbuffer=None):
+        """Renders an image using the provided camera
+
+        Parameters
+        ----------
+        camera: :class:`yt.visualization.volume_rendering.camera.Camera` instance
+            A volume rendering camera. Can be any type of camera.
+        zbuffer: :class:`yt.visualization.volume_rendering.zbuffer_array.Zbuffer` instance
+            A zbuffer array. This is used for opaque sources to determine the
+            z position of the source relative to other sources. Only useful if
+            you are manually calling render on multiple sources. Scene.render
+            uses this internally.
+
+        Returns
+        -------
+        A :class:`yt.data_objects.image_array.ImageArray` instance containing
+        the rendered image.
+
+        """
         self.zbuffer = zbuffer
         self.set_sampler(camera)
         assert (self.sampler is not None)
@@ -238,11 +254,24 @@
         return self.current_image
 
     def finalize_image(self, camera, image, call_from_VR=False):
-        image = self.volume.reduce_tree_images(image,
-                                               camera.lens.viewpoint)
+        """Parallel reduce the image.
+
+        Parameters
+        ----------
+        camera: :class:`yt.visualization.volume_rendering.camera.Camera` instance
+            The camera used to produce the volume rendering image.
+        image: :class:`yt.data_objects.image_array.ImageArray` instance
+            A reference to an image to fill
+        call_from_vr: boolean, optional
+            Whether or not this is being called from a higher level in the VR
+            interface. Used to set the correct orientation.
+        """
+        image = self.volume.reduce_tree_images(image, camera.lens.viewpoint)
         image.shape = camera.resolution[0], camera.resolution[1], 4
-        # If the call is from VR, the image is rotated by 180 to get correct up dir
-        if call_from_VR: image = np.rot90(image, k=2)
+        # If the call is from VR, the image is rotated by 180 to get correct
+        # up direction
+        if call_from_VR is True: 
+            image = np.rot90(image, k=2)
         if self.transfer_function.grey_opacity is False:
             image[:, :, 3] = 1.0
         return image
@@ -254,38 +283,33 @@
 
 
 class MeshSource(RenderSource):
+    """A source for unstructured mesh data
 
-    """
+    This functionality requires the embree ray-tracing engine and the
+    associated pyembree python bindings to be installed in order to
+    function.
 
-    MeshSource is a class for volume rendering unstructured mesh
-    data. This functionality requires the embree ray-tracing
-    engine and the associated pyembree python bindings to be
-    installed in order to function.
+    A :class:`MeshSource` provides the framework to volume render
+    unstructured mesh data.
 
+    Parameters
+    ----------
+    data_source: :class:`AMR3DData` or :class:`Dataset`, optional
+        This is the source to be rendered, which can be any arbitrary yt
+        data object or dataset.
+    field : string
+        The name of the field to be rendered.
+
+    Examples
+    --------
+    >>> source = MeshSource(ds, ('all', 'convected'))
     """
 
     _image = None
     data_source = None
 
     def __init__(self, data_source, field):
-        r"""Initialize a new unstructured source for rendering.
-
-        A :class:`MeshSource` provides the framework to volume render
-        unstructured mesh data.
-
-        Parameters
-        ----------
-        data_source: :class:`AMR3DData` or :class:`Dataset`, optional
-            This is the source to be rendered, which can be any arbitrary yt
-            data object or dataset.
-        fields : string
-            The name of the field to be rendered.
-
-        Examples
-        --------
-        >>> source = MeshSource(ds, ('all', 'convected'))
-
-        """
+        r"""Initialize a new unstructured source for rendering."""
         super(MeshSource, self).__init__()
         self.data_source = data_source_or_all(data_source)
         field = self.data_source._determine_fields(field)[0]
@@ -323,7 +347,25 @@
                                                   field_data.d)
 
     def render(self, camera, zbuffer=None):
+        """Renders an image using the provided camera
 
+        Parameters
+        ----------
+        camera: :class:`yt.visualization.volume_rendering.camera.Camera` instance
+            A volume rendering camera. Can be any type of camera.
+        zbuffer: :class:`yt.visualization.volume_rendering.zbuffer_array.Zbuffer` instance
+            A zbuffer array. This is used for opaque sources to determine the
+            z position of the source relative to other sources. Only useful if
+            you are manually calling render on multiple sources. Scene.render
+            uses this internally.
+
+        Returns
+        -------
+        A :class:`yt.data_objects.image_array.ImageArray` instance containing
+        the rendered image.
+
+        """
+ 
         self.sampler = new_mesh_sampler(camera, self)
 
         mylog.debug("Casting rays")
@@ -340,33 +382,34 @@
 
 
 class PointSource(OpaqueSource):
+    r"""A rendering source of opaque points in the scene.
+
+    This class provides a mechanism for adding points to a scene; these
+    points will be opaque, and can also be colored.
+
+    Parameters
+    ----------
+    positions: array, shape (N, 3)
+        These positions, in data-space coordinates, are the points to be
+        added to the scene.
+    colors : array, shape (N, 4), optional
+        The colors of the points, including an alpha channel, in floating
+        point running from 0..1.
+    color_stride : int, optional
+        The stride with which to access the colors when putting them on the
+        scene.
+
+    Examples
+    --------
+    >>> source = PointSource(particle_positions)
+
+    """
+
 
     _image = None
     data_source = None
 
     def __init__(self, positions, colors=None, color_stride=1):
-        r"""A rendering source of opaque points in the scene.
-
-        This class provides a mechanism for adding points to a scene; these
-        points will be opaque, and can also be colored.
-
-        Parameters
-        ----------
-        positions: array, shape (N, 3)
-            These positions, in data-space coordinates, are the points to be
-            added to the scene.
-        colors : array, shape (N, 4), optional
-            The colors of the points, including an alpha channel, in floating
-            point running from 0..1.
-        color_stride : int, optional
-            The stride with which to access the colors when putting them on the
-            scene.
-
-        Examples
-        --------
-        >>> source = PointSource(particle_positions)
-
-        """
         self.positions = positions
         # If colors aren't individually set, make black with full opacity
         if colors is None:
@@ -376,6 +419,24 @@
         self.color_stride = color_stride
 
     def render(self, camera, zbuffer=None):
+        """Renders an image using the provided camera
+
+        Parameters
+        ----------
+        camera: :class:`yt.visualization.volume_rendering.camera.Camera` instance
+            A volume rendering camera. Can be any type of camera.
+        zbuffer: :class:`yt.visualization.volume_rendering.zbuffer_array.Zbuffer` instance
+            A zbuffer array. This is used for opaque sources to determine the
+            z position of the source relative to other sources. Only useful if
+            you are manually calling render on multiple sources. Scene.render
+            uses this internally.
+
+        Returns
+        -------
+        A :class:`yt.data_objects.image_array.ImageArray` instance containing
+        the rendered image.
+
+        """
         vertices = self.positions
         if zbuffer is None:
             empty = camera.lens.new_image(camera)
@@ -396,44 +457,43 @@
         return zbuffer
 
     def __repr__(self):
-        disp = "<Points Source>"
+        disp = "<Point Source>"
         return disp
 
 
 class LineSource(OpaqueSource):
+    r"""A render source for a sequence of opaque line segments.
+
+    This class provides a mechanism for adding lines to a scene; these
+    points will be opaque, and can also be colored.
+
+    Parameters
+    ----------
+    positions: array, shape (N, 2, 3)
+        These positions, in data-space coordinates, are the starting and
+        stopping points for each pair of lines. For example,
+        positions[0][0] and positions[0][1] would give the (x, y, z)
+        coordinates of the beginning and end points of the first line,
+        respectively.
+    colors : array, shape (N, 4), optional
+        The colors of the points, including an alpha channel, in floating
+        point running from 0..1.  Note that they correspond to the line
+        segment succeeding each point; this means that strictly speaking
+        they need only be (N-1) in length.
+    color_stride : int, optional
+        The stride with which to access the colors when putting them on the
+        scene.
+
+    Examples
+    --------
+    >>> source = LineSource(np.random.random((10, 3)))
+
+    """
 
     _image = None
     data_source = None
 
     def __init__(self, positions, colors=None, color_stride=1):
-        r"""A render source for a sequence of opaque line segments.
-
-        This class provides a mechanism for adding lines to a scene; these
-        points will be opaque, and can also be colored.
-
-        Parameters
-        ----------
-        positions: array, shape (N, 2, 3)
-            These positions, in data-space coordinates, are the starting and
-            stopping points for each pair of lines. For example,
-            positions[0][0] and positions[0][1] would give the (x, y, z)
-            coordinates of the beginning and end points of the first line,
-            respectively.
-        colors : array, shape (N, 4), optional
-            The colors of the points, including an alpha channel, in floating
-            point running from 0..1.  Note that they correspond to the line
-            segment succeeding each point; this means that strictly speaking
-            they need only be (N-1) in length.
-        color_stride : int, optional
-            The stride with which to access the colors when putting them on the
-            scene.
-
-        Examples
-        --------
-        >>> source = LineSource(np.random.random((10, 3)))
-
-        """
-
         super(LineSource, self).__init__()
 
         assert(positions.shape[1] == 2)
@@ -451,6 +511,24 @@
         self.color_stride = color_stride
 
     def render(self, camera, zbuffer=None):
+        """Renders an image using the provided camera
+
+        Parameters
+        ----------
+        camera: :class:`yt.visualization.volume_rendering.camera.Camera` instance
+            A volume rendering camera. Can be any type of camera.
+        zbuffer: :class:`yt.visualization.volume_rendering.zbuffer_array.Zbuffer` instance
+            A zbuffer array. This is used for opaque sources to determine the
+            z position of the source relative to other sources. Only useful if
+            you are manually calling render on multiple sources. Scene.render
+            uses this internally.
+
+        Returns
+        -------
+        A :class:`yt.data_objects.image_array.ImageArray` instance containing
+        the rendered image.
+
+        """
         vertices = self.positions
         if zbuffer is None:
             empty = camera.lens.new_image(camera)
@@ -476,26 +554,25 @@
 
 
 class BoxSource(LineSource):
+    r"""A render source for a box drawn with line segments.
+    This render source will draw a box, with transparent faces, in data
+    space coordinates.  This is useful for annotations.
+
+    Parameters
+    ----------
+    left_edge: array-like, shape (3,), float
+        The left edge coordinates of the box.
+    right_edge : array-like, shape (3,), float
+        The right edge coordinates of the box.
+    color : array-like, shape (4,), float, optional
+        The colors (including alpha) to use for the lines.
+
+    Examples
+    --------
+    >>> source = BoxSource(grid_obj.LeftEdge, grid_obj.RightEdge)
+
+    """
     def __init__(self, left_edge, right_edge, color=None):
-        r"""A render source for a box drawn with line segments.
-
-        This render source will draw a box, with transparent faces, in data
-        space coordinates.  This is useful for annotations.
-
-        Parameters
-        ----------
-        left_edge: array-like, shape (3,), float
-            The left edge coordinates of the box.
-        right_edge : array-like, shape (3,), float
-            The right edge coordinates of the box.
-        color : array-like, shape (4,), float, optional
-            The colors (including alpha) to use for the lines.
-
-        Examples
-        --------
-        >>> source = BoxSource(grid_obj.LeftEdge, grid_obj.RightEdge)
-
-        """
         if color is None:
             color = np.array([1.0, 1.0, 1.0, 1.0])
         color = ensure_numpy_array(color)
@@ -513,32 +590,32 @@
 
 
 class GridSource(LineSource):
+    r"""A render source for drawing grids in a scene.
+
+    This render source will draw blocks that are within a given data
+    source, by default coloring them by their level of resolution.
+
+    Parameters
+    ----------
+    data_source: :class:`~yt.data_objects.api.DataContainer`
+        The data container that will be used to identify grids to draw.
+    alpha : float
+        The opacity of the grids to draw.
+    cmap : color map name
+        The color map to use to map resolution levels to color.
+    min_level : int, optional
+        Minimum level to draw
+    max_level : int, optional
+        Maximum level to draw
+
+    Examples
+    --------
+    >>> dd = ds.sphere("c", (0.1, "unitary"))
+    >>> source = GridSource(dd, alpha=1.0)
+
+    """
     def __init__(self, data_source, alpha=0.3, cmap='algae',
                  min_level=None, max_level=None):
-        r"""A render source for drawing grids in a scene.
-
-        This render source will draw blocks that are within a given data
-        source, by default coloring them by their level of resolution.
-
-        Parameters
-        ----------
-        data_source: :class:`~yt.data_objects.api.DataContainer`
-            The data container that will be used to identify grids to draw.
-        alpha : float
-            The opacity of the grids to draw.
-        cmap : color map name
-            The color map to use to map resolution levels to color.
-        min_level : int, optional
-            Minimum level to draw
-        max_level : int, optional
-            Maximum level to draw
-
-        Examples
-        --------
-        >>> dd = ds.sphere("c", (0.1, "unitary"))
-        >>> source = GridSource(dd, alpha=1.0)
-
-        """
         data_source = data_source_or_all(data_source)
         corners = []
         levels = []
@@ -586,24 +663,24 @@
 
 
 class CoordinateVectorSource(OpaqueSource):
+    r"""Draw coordinate vectors on the scene.
+
+    This will draw a set of coordinate vectors on the camera image.  They
+    will appear in the lower right of the image.
+
+    Parameters
+    ----------
+    colors: array-like, shape (3,4), optional
+        The x, y, z RGBA values to use to draw the vectors.
+    alpha : float, optional
+        The opacity of the vectors.
+
+    Examples
+    --------
+    >>> source = CoordinateVectorSource()
+    """
+
     def __init__(self, colors=None, alpha=1.0):
-        r"""Draw coordinate vectors on the scene.
-
-        This will draw a set of coordinate vectors on the camera image.  They
-        will appear in the lower right of the image.
-
-        Parameters
-        ----------
-        colors: array-like, shape (3,4), optional
-            The x, y, z RGBA values to use to draw the vectors.
-        alpha : float, optional
-            The opacity of the vectors.
-
-        Examples
-        --------
-        >>> source = CoordinateVectorSource()
-
-        """
         super(CoordinateVectorSource, self).__init__()
         # If colors aren't individually set, make black with full opacity
         if colors is None:
@@ -616,6 +693,24 @@
         self.color_stride = 2
 
     def render(self, camera, zbuffer=None):
+        """Renders an image using the provided camera
+
+        Parameters
+        ----------
+        camera: :class:`yt.visualization.volume_rendering.camera.Camera` instance
+            A volume rendering camera. Can be any type of camera.
+        zbuffer: :class:`yt.visualization.volume_rendering.zbuffer_array.Zbuffer` instance
+            A zbuffer array. This is used for opaque sources to determine the
+            z position of the source relative to other sources. Only useful if
+            you are manually calling render on multiple sources. Scene.render
+            uses this internally.
+
+        Returns
+        -------
+        A :class:`yt.data_objects.image_array.ImageArray` instance containing
+        the rendered image.
+
+        """
         camera.lens.setup_box_properties(camera)
         center = camera.focus
         # Get positions at the focus

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/7f2b8807626c/
Changeset:   7f2b8807626c
Branch:      yt
User:        MatthewTurk
Date:        2015-10-17 04:27:34+00:00
Summary:     Fixing old cameras and new to have correct res and lens types.
Affected #:  2 files

diff -r 7334952132141b8e115bae89196d6734f14f06b7 -r 7f2b8807626c0b0143e5188642a45ceac0d82566 yt/visualization/volume_rendering/lens.py
--- a/yt/visualization/volume_rendering/lens.py
+++ b/yt/visualization/volume_rendering/lens.py
@@ -261,8 +261,8 @@
     def new_image(self, camera):
         """Initialize a new ImageArray to be used with this lens."""
         self.current_image = ImageArray(
-            np.zeros((camera.resolution[0]*camera.resolution[1], 1,
-                      4), dtype='float64', order='C'),
+            np.zeros((camera.resolution[0], camera.resolution[1], 4),
+                     dtype='float64', order='C'),
             info={'imtype': 'rendering'})
         return self.current_image
 
@@ -289,9 +289,9 @@
         vectors_comb = np.vstack([vectors_left, vectors_right])
         positions_comb = np.vstack([positions_left, positions_right])
 
-        image.shape = (camera.resolution[0]*camera.resolution[1], 1, 4)
-        vectors_comb.shape = (camera.resolution[0]*camera.resolution[1], 1, 3)
-        positions_comb.shape = (camera.resolution[0]*camera.resolution[1], 1, 3)
+        image.shape = (camera.resolution[0], camera.resolution[1], 4)
+        vectors_comb.shape = (camera.resolution[0], camera.resolution[1], 3)
+        positions_comb.shape = (camera.resolution[0], camera.resolution[1], 3)
 
         sampler_params =\
             dict(vp_pos=positions_comb,
@@ -588,9 +588,9 @@
             image = self.new_image(camera)
 
         dummy = np.ones(3, dtype='float64')
-        image.shape = (camera.resolution[0]*camera.resolution[1], 1, 4)
-        vectors.shape = (camera.resolution[0]*camera.resolution[1], 1, 3)
-        positions.shape = (camera.resolution[0]*camera.resolution[1], 1, 3)
+        image.shape = (camera.resolution[0], camera.resolution[1], 4)
+        vectors.shape = (camera.resolution[0], camera.resolution[1], 3)
+        positions.shape = (camera.resolution[0], camera.resolution[1], 3)
 
         sampler_params = dict(
             vp_pos=positions,
@@ -709,9 +709,9 @@
         vectors_comb = np.vstack([vectors, vectors])
         positions_comb = np.vstack([positions_left, positions_right])
 
-        image.shape = (camera.resolution[0]*camera.resolution[1], 1, 4)
-        vectors_comb.shape = (camera.resolution[0]*camera.resolution[1], 1, 3)
-        positions_comb.shape = (camera.resolution[0]*camera.resolution[1], 1, 3)
+        image.shape = (camera.resolution[0], camera.resolution[1], 4)
+        vectors_comb.shape = (camera.resolution[0], camera.resolution[1], 3)
+        positions_comb.shape = (camera.resolution[0], camera.resolution[1], 3)
 
         sampler_params = dict(
             vp_pos=positions_comb,

diff -r 7334952132141b8e115bae89196d6734f14f06b7 -r 7f2b8807626c0b0143e5188642a45ceac0d82566 yt/visualization/volume_rendering/old_camera.py
--- a/yt/visualization/volume_rendering/old_camera.py
+++ b/yt/visualization/volume_rendering/old_camera.py
@@ -598,16 +598,16 @@
 
     def get_sampler_args(self, image):
         rotp = np.concatenate([self.orienter.inv_mat.ravel('F'), self.back_center.ravel()])
-        args = (rotp, self.box_vectors[2], self.back_center,
+        args = (np.atleast_3d(rotp), np.atleast_3d(self.box_vectors[2]),
+                self.back_center,
                 (-self.width[0]/2.0, self.width[0]/2.0,
                  -self.width[1]/2.0, self.width[1]/2.0),
                 image, self.orienter.unit_vectors[0], self.orienter.unit_vectors[1],
                 np.array(self.width, dtype='float64'), self.transfer_function, self.sub_samples)
-        return args
+        return args, {'lens_type': 'plane-parallel'}
 
     star_trees = None
-    def get_sampler(self, args):
-        kwargs = {}
+    def get_sampler(self, args, kwargs):
         if self.star_trees is not None:
             kwargs = {'star_list': self.star_trees}
         if self.use_light:
@@ -781,8 +781,8 @@
         if num_threads is None:
             num_threads=get_num_threads()
         image = self.new_image()
-        args = self.get_sampler_args(image)
-        sampler = self.get_sampler(args)
+        args, kwargs = self.get_sampler_args(image)
+        sampler = self.get_sampler(args, kwargs)
         self.initialize_source()
         image = ImageArray(self._render(double_check, num_threads, 
                                         image, sampler),
@@ -1248,14 +1248,14 @@
         positions = self.ds.arr(positions, input_units="code_length")
 
         dummy = np.ones(3, dtype='float64')
-        image.shape = (self.resolution[0]*self.resolution[1],1,4)
+        image.shape = (self.resolution[0], self.resolution[1],4)
 
         args = (positions, vectors, self.back_center,
                 (0.0,1.0,0.0,1.0),
                 image, dummy, dummy,
                 np.zeros(3, dtype='float64'),
                 self.transfer_function, self.sub_samples)
-        return args
+        return args, {'lens_type': 'perspective'}
 
     def _render(self, double_check, num_threads, image, sampler):
         ncells = sum(b.source_mask.size for b in self.volume.bricks)
@@ -1430,7 +1430,7 @@
         if self._needs_tf:
             args += (self.transfer_function,)
         args += (self.sub_samples,)
-        return args
+        return args, {}
 
     def _render(self, double_check, num_threads, image, sampler):
         pbar = get_pbar("Ray casting", (self.volume.brick_dimensions + 1).prod(axis=-1).sum())
@@ -1492,8 +1492,8 @@
         if num_threads is None:
             num_threads=get_num_threads()
         image = self.new_image()
-        args = self.get_sampler_args(image)
-        sampler = self.get_sampler(args)
+        args, kwargs = self.get_sampler_args(image)
+        sampler = self.get_sampler(args, kwargs)
         self.volume.initialize_source()
         image = ImageArray(self._render(double_check, num_threads, 
                                         image, sampler),
@@ -1653,7 +1653,7 @@
                 image, uv, uv,
                 np.zeros(3, dtype='float64'),
                 self.transfer_function, self.sub_samples)
-        return args
+        return args, {}
 
 
     def finalize_image(self, image):
@@ -1799,8 +1799,8 @@
             mylog.debug('Working on: %i %i' % (self.imi, self.imj))
             self._setup_box_properties(self.width, self.center, self.orienter.unit_vectors)
             image = self.new_image()
-            args = self.get_sampler_args(image)
-            sampler = self.get_sampler(args)
+            args, kwargs = self.get_sampler_args(image)
+            sampler = self.get_sampler(args, kwargs)
             image = self._render(double_check, num_threads, image, sampler)
             sto.id = self.imj*self.nimx + self.imi
             sto.result = image
@@ -2405,11 +2405,11 @@
         except AttributeError:
             pass
 
-    def get_sampler(self, args):
+    def get_sampler(self, args, kwargs):
         if self.interpolated:
-            sampler = InterpolatedProjectionSampler(*args)
+            sampler = InterpolatedProjectionSampler(*args, **kwargs)
         else:
-            sampler = ProjectionSampler(*args)
+            sampler = ProjectionSampler(*args, **kwargs)
         return sampler
 
     def initialize_source(self):
@@ -2420,12 +2420,13 @@
 
     def get_sampler_args(self, image):
         rotp = np.concatenate([self.orienter.inv_mat.ravel('F'), self.back_center.ravel()])
-        args = (rotp, self.box_vectors[2], self.back_center,
+        args = (np.atleast_3d(rotp), np.atleast_3d(self.box_vectors[2]),
+                self.back_center,
             (-self.width[0]/2., self.width[0]/2.,
              -self.width[1]/2., self.width[1]/2.),
             image, self.orienter.unit_vectors[0], self.orienter.unit_vectors[1],
                 np.array(self.width, dtype='float64'), self.sub_samples)
-        return args
+        return args, {'lens_type': 'plane-parallel'}
 
     def finalize_image(self,image):
         ds = self.ds
@@ -2506,9 +2507,9 @@
 
         image = self.new_image()
 
-        args = self.get_sampler_args(image)
+        args, kwargs = self.get_sampler_args(image)
 
-        sampler = self.get_sampler(args)
+        sampler = self.get_sampler(args, kwargs)
 
         self.initialize_source()
 
@@ -2559,7 +2560,7 @@
                 image, dummy, dummy,
                 np.zeros(3, dtype='float64'),
                 self.transfer_function, self.sub_samples)
-        return args
+        return args, {'lens_type': 'spherical'}
 
     def _render(self, double_check, num_threads, image, sampler):
         ncells = sum(b.source_mask.size for b in self.volume.bricks)
@@ -2639,7 +2640,7 @@
                 image, dummy, dummy,
                 np.zeros(3, dtype='float64'),
                 self.transfer_function, self.sub_samples)
-        return args
+        return args, {'lens_type': 'stereo-spherical'}
 
     def snapshot(self, fn = None, clip_ratio = None, double_check = False,
                  num_threads = 0, transparent=False):
@@ -2649,16 +2650,16 @@
 
         self.disparity_s = self.disparity
         image1 = self.new_image()
-        args1 = self.get_sampler_args(image1)
-        sampler1 = self.get_sampler(args1)
+        args1, kwargs1 = self.get_sampler_args(image1)
+        sampler1 = self.get_sampler(args1, kwargs1)
         self.initialize_source()
         image1 = self._render(double_check, num_threads,
                               image1, sampler1, '(Left) ')
 
         self.disparity_s = -self.disparity
         image2 = self.new_image()
-        args2 = self.get_sampler_args(image2)
-        sampler2 = self.get_sampler(args2)
+        args2, kwargs2 = self.get_sampler_args(image2)
+        sampler2 = self.get_sampler(args2, kwargs2)
         self.initialize_source()
         image2 = self._render(double_check, num_threads,
                               image2, sampler2, '(Right)')


https://bitbucket.org/yt_analysis/yt/commits/bd1c4713e9e1/
Changeset:   bd1c4713e9e1
Branch:      yt
User:        MatthewTurk
Date:        2015-10-17 20:25:02+00:00
Summary:     Fix indexing for plane parallel.  Add assert.
Affected #:  1 file

diff -r 7f2b8807626c0b0143e5188642a45ceac0d82566 -r bd1c4713e9e1f8114d964e500d33e4516d791508 yt/utilities/lib/grid_traversal.pyx
--- a/yt/utilities/lib/grid_traversal.pyx
+++ b/yt/utilities/lib/grid_traversal.pyx
@@ -233,11 +233,10 @@
     cdef np.float64_t px, py
     px = width[0] * (<np.float64_t>vi)/(<np.float64_t>im.nv[0]-1) - width[0]/2.0
     py = width[1] * (<np.float64_t>vj)/(<np.float64_t>im.nv[1]-1) - width[1]/2.0
-    # Note we skip 1,0,*
-    v_pos[0] = im.vp_pos[0,0,0]*px + im.vp_pos[0,1,0]*py + im.vp_pos[1,1,0]
-    v_pos[1] = im.vp_pos[0,0,1]*px + im.vp_pos[0,1,1]*py + im.vp_pos[1,1,1]
-    v_pos[2] = im.vp_pos[0,0,2]*px + im.vp_pos[0,1,2]*py + im.vp_pos[1,1,2]
     # atleast_3d will add to beginning and end
+    v_pos[0] = im.vp_pos[0,0,0]*px + im.vp_pos[0,3,0]*py + im.vp_pos[0,9,0]
+    v_pos[1] = im.vp_pos[0,1,0]*px + im.vp_pos[0,4,0]*py + im.vp_pos[0,10,0]
+    v_pos[2] = im.vp_pos[0,2,0]*px + im.vp_pos[0,5,0]*py + im.vp_pos[0,11,0]
     for i in range(3): v_dir[i] = im.vp_dir[0,i,0]
 
 @cython.boundscheck(False)
@@ -280,6 +279,8 @@
             self.extent_function = calculate_extent_plane_parallel
             self.vector_function = generate_vector_info_plane_parallel
         else:
+            assert(vp_pos.shape[0] == vp_dir.shape[0] == image.shape[0])
+            assert(vp_pos.shape[1] == vp_dir.shape[1] == image.shape[1])
             self.extent_function = calculate_extent_null
             self.vector_function = generate_vector_info_null
         self.sampler = NULL


https://bitbucket.org/yt_analysis/yt/commits/9ed36c76aac9/
Changeset:   9ed36c76aac9
Branch:      yt
User:        MatthewTurk
Date:        2015-10-21 18:34:54+00:00
Summary:     This fixes fisheye, and adds better errors.
Affected #:  2 files

diff -r bd1c4713e9e1f8114d964e500d33e4516d791508 -r 9ed36c76aac91ce04714c97d22e74e88e43691ac yt/utilities/lib/grid_traversal.pyx
--- a/yt/utilities/lib/grid_traversal.pyx
+++ b/yt/utilities/lib/grid_traversal.pyx
@@ -279,8 +279,13 @@
             self.extent_function = calculate_extent_plane_parallel
             self.vector_function = generate_vector_info_plane_parallel
         else:
-            assert(vp_pos.shape[0] == vp_dir.shape[0] == image.shape[0])
-            assert(vp_pos.shape[1] == vp_dir.shape[1] == image.shape[1])
+            if not (vp_pos.shape[0] == vp_dir.shape[0] == image.shape[0]) or \
+               not (vp_pos.shape[1] == vp_dir.shape[1] == image.shape[1]):
+                print "Bad lense shape / direction for %s" % (self.lens_type)
+                print "Shapes: (%s - %s - %s) and (%s - %s - %s)" % (
+                    vp_pos.shape[0], vp_dir.shape[0], image.shape[0],
+                    vp_pos.shape[1], vp_dir.shape[1], image.shape[1])
+                raise RuntimeError
             self.extent_function = calculate_extent_null
             self.vector_function = generate_vector_info_null
         self.sampler = NULL

diff -r bd1c4713e9e1f8114d964e500d33e4516d791508 -r 9ed36c76aac91ce04714c97d22e74e88e43691ac yt/visualization/volume_rendering/lens.py
--- a/yt/visualization/volume_rendering/lens.py
+++ b/yt/visualization/volume_rendering/lens.py
@@ -463,19 +463,19 @@
     def new_image(self, camera):
         """Initialize a new ImageArray to be used with this lens."""
         self.current_image = ImageArray(
-            np.zeros((camera.resolution[0]**2, 1,
+            np.zeros((camera.resolution[0], camera.resolution[0],
                       4), dtype='float64', order='C'),
             info={'imtype': 'rendering'})
         return self.current_image
 
     def _get_sampler_params(self, camera, render_source):
         vp = -arr_fisheye_vectors(camera.resolution[0], self.fov)
-        vp.shape = (camera.resolution[0]**2, 1, 3)
+        vp.shape = (camera.resolution[0], camera.resolution[0], 3)
         vp = vp.dot(np.linalg.inv(self.rotation_matrix))
         vp *= self.radius
         uv = np.ones(3, dtype='float64')
-        positions = np.ones((camera.resolution[0]**2, 1, 3),
-                            dtype='float64') * camera.position
+        positions = np.ones((camera.resolution[0], camera.resolution[0], 3),
+            dtype='float64') * camera.position
 
         if render_source.zbuffer is not None:
             image = render_source.zbuffer.rgba


https://bitbucket.org/yt_analysis/yt/commits/f65cfa6e4a8a/
Changeset:   f65cfa6e4a8a
Branch:      yt
User:        ngoldbaum
Date:        2015-11-02 19:58:23+00:00
Summary:     Merged in MatthewTurk/yt (pull request #1809)

Refactor lens vector computation
Affected #:  10 files

diff -r d19ea599033ec9e76ff3bcfd6634ea675b94d7f5 -r f65cfa6e4a8a54f1f89e0f64d1e4c1837c417179 yt/geometry/setup.py
--- a/yt/geometry/setup.py
+++ b/yt/geometry/setup.py
@@ -47,6 +47,7 @@
                 include_dirs=["yt/utilities/lib/"],
                 libraries=["m"],
                 depends=["yt/utilities/lib/fp_utils.pxd",
+                         "yt/utilities/lib/grid_traversal.pxd",
                          "yt/geometry/oct_container.pxd",
                          "yt/geometry/oct_visitors.pxd",
                          "yt/geometry/grid_container.pxd",

diff -r d19ea599033ec9e76ff3bcfd6634ea675b94d7f5 -r f65cfa6e4a8a54f1f89e0f64d1e4c1837c417179 yt/utilities/lib/grid_traversal.pxd
--- a/yt/utilities/lib/grid_traversal.pxd
+++ b/yt/utilities/lib/grid_traversal.pxd
@@ -20,17 +20,14 @@
 cimport kdtree_utils
 
 cdef struct ImageContainer:
-    np.float64_t *vp_pos
-    np.float64_t *vp_dir
+    np.float64_t[:,:,:] vp_pos
+    np.float64_t[:,:,:] vp_dir
     np.float64_t *center
-    np.float64_t *image
-    np.float64_t *zbuffer
+    np.float64_t[:,:,:] image
+    np.float64_t[:,:] zbuffer
     np.float64_t pdx, pdy
     np.float64_t bounds[4]
     int nv[2]
-    int vp_strides[3]
-    int im_strides[3]
-    int vd_strides[3]
     np.float64_t *x_vec
     np.float64_t *y_vec
 
@@ -43,19 +40,29 @@
                 int index[3],
                 void *data) nogil
 
+ctypedef void calculate_extent_function(ImageContainer *image,
+            VolumeContainer *vc, np.int64_t rv[4]) nogil
+
+cdef calculate_extent_function calculate_extent_plane_parallel
+
+ctypedef void generate_vector_info_function(ImageContainer *im,
+            np.int64_t vi, np.int64_t vj,
+            np.float64_t width[2],
+            np.float64_t v_dir[3], np.float64_t v_pos[3]) nogil
+
+cdef generate_vector_info_function generate_vector_info_plane_parallel
+cdef generate_vector_info_function generate_vector_info_null
 
 cdef class ImageSampler:
     cdef ImageContainer *image
     cdef sampler_function *sampler
-    cdef public object avp_pos, avp_dir, acenter, aimage, ax_vec, ay_vec
+    cdef public object acenter, aimage, ax_vec, ay_vec
     cdef public object azbuffer
     cdef void *supp_data
     cdef np.float64_t width[3]
-
-    cdef void get_start_stop(self, np.float64_t *ex, np.int64_t *rv)
-
-    cdef void calculate_extent(self, np.float64_t extrema[4],
-                               VolumeContainer *vc) nogil
+    cdef public object lens_type
+    cdef calculate_extent_function *extent_function
+    cdef generate_vector_info_function *vector_function
 
     cdef void setup(self, PartitionedGrid pg)
 

diff -r d19ea599033ec9e76ff3bcfd6634ea675b94d7f5 -r f65cfa6e4a8a54f1f89e0f64d1e4c1837c417179 yt/utilities/lib/grid_traversal.pyx
--- a/yt/utilities/lib/grid_traversal.pyx
+++ b/yt/utilities/lib/grid_traversal.pyx
@@ -17,7 +17,7 @@
 cimport numpy as np
 cimport cython
 #cimport healpix_interface
-from libc.stdlib cimport malloc, free, abs
+from libc.stdlib cimport malloc, calloc, free, abs
 from libc.math cimport exp, floor, log2, \
     lrint, fabs, atan, atan2, asin, cos, sin, sqrt
 from fp_utils cimport imax, fmax, imin, fmin, iclip, fclip, i64clip
@@ -174,6 +174,85 @@
             for i in range(3):
                 vel[i] /= vel_mag[0]
 
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+cdef void calculate_extent_plane_parallel(ImageContainer *image,
+            VolumeContainer *vc, np.int64_t rv[4]) nogil:
+    # We do this for all eight corners
+    cdef np.float64_t temp
+    cdef np.float64_t *edges[2]
+    cdef np.float64_t cx, cy
+    cdef np.float64_t extrema[4]
+    cdef int i, j, k
+    edges[0] = vc.left_edge
+    edges[1] = vc.right_edge
+    extrema[0] = extrema[2] = 1e300; extrema[1] = extrema[3] = -1e300
+    for i in range(2):
+        for j in range(2):
+            for k in range(2):
+                # This should rotate it into the vector plane
+                temp  = edges[i][0] * image.x_vec[0]
+                temp += edges[j][1] * image.x_vec[1]
+                temp += edges[k][2] * image.x_vec[2]
+                if temp < extrema[0]: extrema[0] = temp
+                if temp > extrema[1]: extrema[1] = temp
+                temp  = edges[i][0] * image.y_vec[0]
+                temp += edges[j][1] * image.y_vec[1]
+                temp += edges[k][2] * image.y_vec[2]
+                if temp < extrema[2]: extrema[2] = temp
+                if temp > extrema[3]: extrema[3] = temp
+    cx = cy = 0.0
+    for i in range(3):
+        cx += image.center[i] * image.x_vec[i]
+        cy += image.center[i] * image.y_vec[i]
+    rv[0] = lrint((extrema[0] - cx - image.bounds[0])/image.pdx)
+    rv[1] = rv[0] + lrint((extrema[1] - extrema[0])/image.pdx)
+    rv[2] = lrint((extrema[2] - cy - image.bounds[2])/image.pdy)
+    rv[3] = rv[2] + lrint((extrema[3] - extrema[2])/image.pdy)
+
+# We do this for a bunch of lenses.  Fallback is to grab them from the vector
+# info supplied.
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+cdef void calculate_extent_null(ImageContainer *image,
+            VolumeContainer *vc, np.int64_t rv[4]) nogil:
+    rv[0] = 0
+    rv[1] = image.nv[0]
+    rv[2] = 0
+    rv[3] = image.nv[1]
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+cdef void generate_vector_info_plane_parallel(ImageContainer *im,
+            np.int64_t vi, np.int64_t vj,
+            np.float64_t width[2],
+            # Now outbound
+            np.float64_t v_dir[3], np.float64_t v_pos[3]) nogil:
+    cdef int i
+    cdef np.float64_t px, py
+    px = width[0] * (<np.float64_t>vi)/(<np.float64_t>im.nv[0]-1) - width[0]/2.0
+    py = width[1] * (<np.float64_t>vj)/(<np.float64_t>im.nv[1]-1) - width[1]/2.0
+    # atleast_3d will add to beginning and end
+    v_pos[0] = im.vp_pos[0,0,0]*px + im.vp_pos[0,3,0]*py + im.vp_pos[0,9,0]
+    v_pos[1] = im.vp_pos[0,1,0]*px + im.vp_pos[0,4,0]*py + im.vp_pos[0,10,0]
+    v_pos[2] = im.vp_pos[0,2,0]*px + im.vp_pos[0,5,0]*py + im.vp_pos[0,11,0]
+    for i in range(3): v_dir[i] = im.vp_dir[0,i,0]
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+cdef void generate_vector_info_null(ImageContainer *im,
+            np.int64_t vi, np.int64_t vj,
+            np.float64_t width[2],
+            # Now outbound
+            np.float64_t v_dir[3], np.float64_t v_pos[3]) nogil:
+    cdef int i
+    for i in range(3):
+        # Here's a funny thing: we use vi here because our *image* will be
+        # flattened.  That means that im.nv will be a better one-d offset,
+        # since vp_pos has funny strides.
+        v_pos[i] = im.vp_pos[vi, vj, i]
+        v_dir[i] = im.vp_dir[vi, vj, i]
 
 cdef struct ImageAccumulator:
     np.float64_t rgba[Nch]
@@ -181,8 +260,8 @@
 
 cdef class ImageSampler:
     def __init__(self,
-                  np.ndarray vp_pos,
-                  np.ndarray vp_dir,
+                  np.float64_t[:,:,:] vp_pos,
+                  np.float64_t[:,:,:] vp_dir,
                   np.ndarray[np.float64_t, ndim=1] center,
                   bounds,
                   np.ndarray[np.float64_t, ndim=3] image,
@@ -190,91 +269,49 @@
                   np.ndarray[np.float64_t, ndim=1] y_vec,
                   np.ndarray[np.float64_t, ndim=1] width,
                   *args, **kwargs):
-        self.image = <ImageContainer *> malloc(sizeof(ImageContainer))
-        cdef ImageContainer *imagec = self.image
-        cdef np.ndarray[np.float64_t, ndim=2] zbuffer
+        self.image = <ImageContainer *> calloc(sizeof(ImageContainer), 1)
+        cdef np.float64_t[:,:] zbuffer
         zbuffer = kwargs.pop("zbuffer", None)
+        if zbuffer is None:
+            zbuffer = np.ones((image.shape[0], image.shape[1]), "float64")
+        self.lens_type = kwargs.pop("lens_type", None)
+        if self.lens_type == "plane-parallel":
+            self.extent_function = calculate_extent_plane_parallel
+            self.vector_function = generate_vector_info_plane_parallel
+        else:
+            if not (vp_pos.shape[0] == vp_dir.shape[0] == image.shape[0]) or \
+               not (vp_pos.shape[1] == vp_dir.shape[1] == image.shape[1]):
+                print "Bad lense shape / direction for %s" % (self.lens_type)
+                print "Shapes: (%s - %s - %s) and (%s - %s - %s)" % (
+                    vp_pos.shape[0], vp_dir.shape[0], image.shape[0],
+                    vp_pos.shape[1], vp_dir.shape[1], image.shape[1])
+                raise RuntimeError
+            self.extent_function = calculate_extent_null
+            self.vector_function = generate_vector_info_null
         self.sampler = NULL
         cdef int i, j
         # These assignments are so we can track the objects and prevent their
-        # de-allocation from reference counts.
-        self.avp_pos = vp_pos
-        self.avp_dir = vp_dir
+        # de-allocation from reference counts.  Note that we do this to the
+        # "atleast_3d" versions.  Also, note that we re-assign the input
+        # arguments.
+        self.image.vp_pos = vp_pos
+        self.image.vp_dir = vp_dir
+        self.image.image = self.aimage = image
         self.acenter = center
-        self.aimage = image
+        self.image.center = <np.float64_t *> center.data
         self.ax_vec = x_vec
+        self.image.x_vec = <np.float64_t *> x_vec.data
         self.ay_vec = y_vec
-        self.azbuffer = zbuffer
-        imagec.vp_pos = <np.float64_t *> vp_pos.data
-        imagec.vp_dir = <np.float64_t *> vp_dir.data
-        imagec.center = <np.float64_t *> center.data
-        imagec.image = <np.float64_t *> image.data
-        imagec.x_vec = <np.float64_t *> x_vec.data
-        imagec.y_vec = <np.float64_t *> y_vec.data
-        imagec.zbuffer = NULL
-        if zbuffer is not None:
-            imagec.zbuffer = <np.float64_t *> zbuffer.data
-        imagec.nv[0] = image.shape[0]
-        imagec.nv[1] = image.shape[1]
-        for i in range(4): imagec.bounds[i] = bounds[i]
-        imagec.pdx = (bounds[1] - bounds[0])/imagec.nv[0]
-        imagec.pdy = (bounds[3] - bounds[2])/imagec.nv[1]
+        self.image.y_vec = <np.float64_t *> y_vec.data
+        self.image.zbuffer = zbuffer
+        self.image.nv[0] = image.shape[0]
+        self.image.nv[1] = image.shape[1]
+        for i in range(4): self.image.bounds[i] = bounds[i]
+        self.image.pdx = (bounds[1] - bounds[0])/self.image.nv[0]
+        self.image.pdy = (bounds[3] - bounds[2])/self.image.nv[1]
         for i in range(3):
-            imagec.vp_strides[i] = vp_pos.strides[i] / 8
-            imagec.im_strides[i] = image.strides[i] / 8
             self.width[i] = width[i]
 
-        if vp_dir.ndim > 1:
-            for i in range(3):
-                imagec.vd_strides[i] = vp_dir.strides[i] / 8
-        elif vp_pos.ndim == 1:
-            imagec.vd_strides[0] = imagec.vd_strides[1] = imagec.vd_strides[2] = -1
-        else:
-            raise RuntimeError
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    cdef void get_start_stop(self, np.float64_t *ex, np.int64_t *rv):
-        # Extrema need to be re-centered
-        cdef np.float64_t cx, cy
-        cdef ImageContainer *im = self.image
-        cdef int i
-        cx = cy = 0.0
-        for i in range(3):
-            cx += im.center[i] * im.x_vec[i]
-            cy += im.center[i] * im.y_vec[i]
-        rv[0] = lrint((ex[0] - cx - im.bounds[0])/im.pdx)
-        rv[1] = rv[0] + lrint((ex[1] - ex[0])/im.pdx)
-        rv[2] = lrint((ex[2] - cy - im.bounds[2])/im.pdy)
-        rv[3] = rv[2] + lrint((ex[3] - ex[2])/im.pdy)
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    cdef void calculate_extent(self, np.float64_t extrema[4],
-                               VolumeContainer *vc) nogil:
-        # We do this for all eight corners
-        cdef np.float64_t temp
-        cdef np.float64_t *edges[2]
-        edges[0] = vc.left_edge
-        edges[1] = vc.right_edge
-        extrema[0] = extrema[2] = 1e300; extrema[1] = extrema[3] = -1e300
-        cdef int i, j, k
-        for i in range(2):
-            for j in range(2):
-                for k in range(2):
-                    # This should rotate it into the vector plane
-                    temp  = edges[i][0] * self.image.x_vec[0]
-                    temp += edges[j][1] * self.image.x_vec[1]
-                    temp += edges[k][2] * self.image.x_vec[2]
-                    if temp < extrema[0]: extrema[0] = temp
-                    if temp > extrema[1]: extrema[1] = temp
-                    temp  = edges[i][0] * self.image.y_vec[0]
-                    temp += edges[j][1] * self.image.y_vec[1]
-                    temp += edges[k][2] * self.image.y_vec[2]
-                    if temp < extrema[2]: extrema[2] = temp
-                    if temp > extrema[3]: extrema[3] = temp
-
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
@@ -282,7 +319,7 @@
         # This routine will iterate over all of the vectors and cast each in
         # turn.  Might benefit from a more sophisticated intersection check,
         # like http://courses.csusm.edu/cs697exz/ray_box.htm
-        cdef int vi, vj, hit, i, j, ni, nj, nn
+        cdef int vi, vj, hit, i, j, k, ni, nj, nn, xi, yi
         cdef np.int64_t offset
         cdef np.int64_t iter[4]
         cdef VolumeContainer *vc = pg.container
@@ -292,83 +329,43 @@
         cdef np.float64_t *v_pos
         cdef np.float64_t *v_dir
         cdef np.float64_t rgba[6]
-        cdef np.float64_t extrema[4]
         cdef np.float64_t max_t
         hit = 0
         cdef np.int64_t nx, ny, size
-        if im.vd_strides[0] == -1:
-            self.calculate_extent(extrema, vc)
-            self.get_start_stop(extrema, iter)
-            iter[0] = i64clip(iter[0]-1, 0, im.nv[0])
-            iter[1] = i64clip(iter[1]+1, 0, im.nv[0])
-            iter[2] = i64clip(iter[2]-1, 0, im.nv[1])
-            iter[3] = i64clip(iter[3]+1, 0, im.nv[1])
-            nx = (iter[1] - iter[0])
-            ny = (iter[3] - iter[2])
-            size = nx * ny
-        else:
-            nx = im.nv[0]
-            ny = 1
-            iter[0] = iter[1] = iter[2] = iter[3] = 0
-            size = nx
+        self.extent_function(self.image, vc, iter)
+        iter[0] = i64clip(iter[0]-1, 0, im.nv[0])
+        iter[1] = i64clip(iter[1]+1, 0, im.nv[0])
+        iter[2] = i64clip(iter[2]-1, 0, im.nv[1])
+        iter[3] = i64clip(iter[3]+1, 0, im.nv[1])
+        nx = (iter[1] - iter[0])
+        ny = (iter[3] - iter[2])
+        size = nx * ny
         cdef ImageAccumulator *idata
-        cdef np.float64_t px, py
         cdef np.float64_t width[3]
+        cdef int use_vec, max_i
         for i in range(3):
             width[i] = self.width[i]
-        if im.vd_strides[0] == -1:
-            with nogil, parallel(num_threads = num_threads):
-                idata = <ImageAccumulator *> malloc(sizeof(ImageAccumulator))
-                idata.supp_data = self.supp_data
-                v_pos = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
-                for j in prange(size, schedule="static",chunksize=1):
-                    vj = j % ny
-                    vi = (j - vj) / ny + iter[0]
-                    vj = vj + iter[2]
-                    # Dynamically calculate the position
-                    px = width[0] * (<np.float64_t>vi)/(<np.float64_t>im.nv[0]-1) - width[0]/2.0
-                    py = width[1] * (<np.float64_t>vj)/(<np.float64_t>im.nv[1]-1) - width[1]/2.0
-                    v_pos[0] = im.vp_pos[0]*px + im.vp_pos[3]*py + im.vp_pos[9]
-                    v_pos[1] = im.vp_pos[1]*px + im.vp_pos[4]*py + im.vp_pos[10]
-                    v_pos[2] = im.vp_pos[2]*px + im.vp_pos[5]*py + im.vp_pos[11]
-                    offset = im.im_strides[0] * vi + im.im_strides[1] * vj
-                    for i in range(Nch): idata.rgba[i] = im.image[i + offset]
-                    if im.zbuffer != NULL:
-                        max_t = im.zbuffer[im.nv[0] * vi + vj]
-                    else:
-                        max_t = 1.0
-                    walk_volume(vc, v_pos, im.vp_dir, self.sampler,
-                                (<void *> idata), NULL, max_t)
-                    for i in range(Nch): im.image[i + offset] = idata.rgba[i]
-                free(idata)
-                free(v_pos)
-        else:
-            with nogil, parallel(num_threads = num_threads):
-                idata = <ImageAccumulator *> malloc(sizeof(ImageAccumulator))
-                idata.supp_data = self.supp_data
-                v_pos = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
-                v_dir = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
-                # If we do not have a simple image plane, we have to cast all
-                # our rays 
-                for j in prange(size, schedule="dynamic", chunksize=100):
-                    offset = j * 3
-                    for i in range(3): v_pos[i] = im.vp_pos[i + offset]
-                    for i in range(3): v_dir[i] = im.vp_dir[i + offset]
-                    if v_dir[0] == v_dir[1] == v_dir[2] == 0.0:
-                        continue
-                    # Note that for Nch != 3 we need a different offset into
-                    # the image object than for the vectors!
-                    for i in range(Nch): idata.rgba[i] = im.image[i + Nch*j]
-                    if im.zbuffer != NULL:
-                        max_t = fclip(im.zbuffer[j], 0.0, 1.0)
-                    else:
-                        max_t = 1.0
-                    walk_volume(vc, v_pos, v_dir, self.sampler, 
-                                (<void *> idata), NULL, max_t)
-                    for i in range(Nch): im.image[i + Nch*j] = idata.rgba[i]
-                free(v_dir)
-                free(idata)
-                free(v_pos)
+        with nogil, parallel(num_threads = num_threads):
+            idata = <ImageAccumulator *> malloc(sizeof(ImageAccumulator))
+            idata.supp_data = self.supp_data
+            v_pos = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+            v_dir = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+            for j in prange(size, schedule="static", chunksize=100):
+                vj = j % ny
+                vi = (j - vj) / ny + iter[0]
+                vj = vj + iter[2]
+                # Dynamically calculate the position
+                self.vector_function(im, vi, vj, width, v_dir, v_pos)
+                for i in range(Nch):
+                    idata.rgba[i] = im.image[vi, vj, i]
+                max_t = fclip(im.zbuffer[vi, vj], 0.0, 1.0)
+                walk_volume(vc, v_pos, v_dir, self.sampler,
+                            (<void *> idata), NULL, max_t)
+                for i in range(Nch):
+                    im.image[vi, vj, i] = idata.rgba[i]
+            free(idata)
+            free(v_pos)
+            free(v_dir)
         return hit
 
     cdef void setup(self, PartitionedGrid pg):

diff -r d19ea599033ec9e76ff3bcfd6634ea675b94d7f5 -r f65cfa6e4a8a54f1f89e0f64d1e4c1837c417179 yt/utilities/lib/mesh_traversal.pyx
--- a/yt/utilities/lib/mesh_traversal.pyx
+++ b/yt/utilities/lib/mesh_traversal.pyx
@@ -75,53 +75,25 @@
         size = nx * ny
         data = np.empty(size, dtype="float64")
         cdef rtcr.RTCRay ray
-        if im.vd_strides[0] == -1:
-            v_pos = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
-            for j in range(size):
-                vj = j % ny
-                vi = (j - vj) / ny
-                vj = vj
-                # Dynamically calculate the position
-                px = width[0] * (<np.float64_t>vi)/(<np.float64_t>im.nv[0]-1) - width[0]/2.0
-                py = width[1] * (<np.float64_t>vj)/(<np.float64_t>im.nv[1]-1) - width[1]/2.0
-                v_pos[0] = im.vp_pos[0]*px + im.vp_pos[3]*py + im.vp_pos[9]
-                v_pos[1] = im.vp_pos[1]*px + im.vp_pos[4]*py + im.vp_pos[10]
-                v_pos[2] = im.vp_pos[2]*px + im.vp_pos[5]*py + im.vp_pos[11]
-                for i in range(3):
-                    ray.org[i] = v_pos[i]
-                    ray.dir[i] = im.vp_dir[i]
-                ray.tnear = 0.0
-                ray.tfar = 1e37
-                ray.geomID = rtcg.RTC_INVALID_GEOMETRY_ID
-                ray.primID = rtcg.RTC_INVALID_GEOMETRY_ID
-                ray.instID = rtcg.RTC_INVALID_GEOMETRY_ID
-                ray.mask = -1
-                ray.time = 0
-                rtcs.rtcIntersect(scene.scene_i, ray)
-                data[j] = ray.time
-            self.aimage = data.reshape(self.image.nv[0], self.image.nv[1])
-            free(v_pos)
-        else:
-            v_pos = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
-            v_dir = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
-            # If we do not have a simple image plane, we have to cast all
-            # our rays 
-            for j in range(size):
-                offset = j * 3
-                for i in range(3): v_pos[i] = im.vp_pos[i + offset]
-                for i in range(3): v_dir[i] = im.vp_dir[i + offset]
-                for i in range(3):
-                    ray.org[i] = v_pos[i]
-                    ray.dir[i] = v_dir[i]
-                ray.tnear = 0.0
-                ray.tfar = 1e37
-                ray.geomID = rtcg.RTC_INVALID_GEOMETRY_ID
-                ray.primID = rtcg.RTC_INVALID_GEOMETRY_ID
-                ray.instID = rtcg.RTC_INVALID_GEOMETRY_ID
-                ray.mask = -1
-                ray.time = 0
-                rtcs.rtcIntersect(scene.scene_i, ray)
-                data[j] = ray.time
-            self.aimage = data.reshape(self.image.nv[0], self.image.nv[1])
-            free(v_pos)
-            free(v_dir)
+        v_pos = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+        v_dir = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+        for j in range(size):
+            vj = j % ny
+            vi = (j - vj) / ny
+            vj = vj
+            self.vector_function(im, vi, vj, width, v_dir, v_pos)
+            for i in range(3):
+                ray.org[i] = v_pos[i]
+                ray.dir[i] = v_dir[i]
+            ray.tnear = 0.0
+            ray.tfar = 1e37
+            ray.geomID = rtcg.RTC_INVALID_GEOMETRY_ID
+            ray.primID = rtcg.RTC_INVALID_GEOMETRY_ID
+            ray.instID = rtcg.RTC_INVALID_GEOMETRY_ID
+            ray.mask = -1
+            ray.time = 0
+            rtcs.rtcIntersect(scene.scene_i, ray)
+            data[j] = ray.time
+        self.aimage = data.reshape(self.image.nv[0], self.image.nv[1])
+        free(v_pos)
+        free(v_dir)

diff -r d19ea599033ec9e76ff3bcfd6634ea675b94d7f5 -r f65cfa6e4a8a54f1f89e0f64d1e4c1837c417179 yt/utilities/lib/setup.py
--- a/yt/utilities/lib/setup.py
+++ b/yt/utilities/lib/setup.py
@@ -76,6 +76,7 @@
                 depends=["yt/utilities/lib/fp_utils.pxd",
                          "yt/utilities/lib/amr_kdtools.pxd",
                          "yt/utilities/lib/ContourFinding.pxd",
+                         "yt/utilities/lib/grid_traversal.pxd",
                          "yt/geometry/oct_container.pxd"])
     config.add_extension("DepthFirstOctree", 
                 ["yt/utilities/lib/DepthFirstOctree.pyx"],
@@ -178,7 +179,8 @@
                              ["yt/utilities/lib/mesh_traversal.pyx"],
                              include_dirs=["yt/utilities/lib", include_dirs],
                              libraries=["m", "embree"], language="c++",
-                             depends=["yt/utilities/lib/mesh_traversal.pxd"])
+                             depends=["yt/utilities/lib/mesh_traversal.pxd",
+                                      "yt/utilities/lib/grid_traversal.pxd"])
         config.add_extension("mesh_samplers",
                              ["yt/utilities/lib/mesh_samplers.pyx"],
                              include_dirs=["yt/utilities/lib", include_dirs],

diff -r d19ea599033ec9e76ff3bcfd6634ea675b94d7f5 -r f65cfa6e4a8a54f1f89e0f64d1e4c1837c417179 yt/visualization/volume_rendering/lens.py
--- a/yt/visualization/volume_rendering/lens.py
+++ b/yt/visualization/volume_rendering/lens.py
@@ -104,7 +104,7 @@
                  x_vec=camera.unit_vectors[0],
                  y_vec=camera.unit_vectors[1],
                  width=np.array(camera.width, dtype='float64'),
-                 image=image)
+                 image=image, lens_type="plane-parallel")
         return sampler_params
 
     def set_viewpoint(self, camera):
@@ -144,7 +144,7 @@
 
     def new_image(self, camera):
         self.current_image = ImageArray(
-            np.zeros((camera.resolution[0]*camera.resolution[1], 1,
+            np.zeros((camera.resolution[0], camera.resolution[1], 
                       4), dtype='float64', order='C'),
             info={'imtype': 'rendering'})
         return self.current_image
@@ -204,8 +204,8 @@
                  x_vec=uv,
                  y_vec=uv,
                  width=np.zeros(3, dtype='float64'),
-                 image=image
-                 )
+                 image=image,
+                 lens_type="perspective")
 
         mylog.debug(positions)
         mylog.debug(vectors)
@@ -266,8 +266,8 @@
     def new_image(self, camera):
         """Initialize a new ImageArray to be used with this lens."""
         self.current_image = ImageArray(
-            np.zeros((camera.resolution[0]*camera.resolution[1], 1,
-                      4), dtype='float64', order='C'),
+            np.zeros((camera.resolution[0], camera.resolution[1], 4),
+                     dtype='float64', order='C'),
             info={'imtype': 'rendering'})
         return self.current_image
 
@@ -294,9 +294,9 @@
         vectors_comb = np.vstack([vectors_left, vectors_right])
         positions_comb = np.vstack([positions_left, positions_right])
 
-        image.shape = (camera.resolution[0]*camera.resolution[1], 1, 4)
-        vectors_comb.shape = (camera.resolution[0]*camera.resolution[1], 1, 3)
-        positions_comb.shape = (camera.resolution[0]*camera.resolution[1], 1, 3)
+        image.shape = (camera.resolution[0], camera.resolution[1], 4)
+        vectors_comb.shape = (camera.resolution[0], camera.resolution[1], 3)
+        positions_comb.shape = (camera.resolution[0], camera.resolution[1], 3)
 
         sampler_params =\
             dict(vp_pos=positions_comb,
@@ -306,8 +306,8 @@
                  x_vec=uv,
                  y_vec=uv,
                  width=np.zeros(3, dtype='float64'),
-                 image=image
-                 )
+                 image=image,
+                 lens_type="stereo-perspective")
 
         return sampler_params
 
@@ -476,19 +476,19 @@
     def new_image(self, camera):
         """Initialize a new ImageArray to be used with this lens."""
         self.current_image = ImageArray(
-            np.zeros((camera.resolution[0]**2, 1,
+            np.zeros((camera.resolution[0], camera.resolution[0],
                       4), dtype='float64', order='C'),
             info={'imtype': 'rendering'})
         return self.current_image
 
     def _get_sampler_params(self, camera, render_source):
         vp = -arr_fisheye_vectors(camera.resolution[0], self.fov)
-        vp.shape = (camera.resolution[0]**2, 1, 3)
+        vp.shape = (camera.resolution[0], camera.resolution[0], 3)
         vp = vp.dot(np.linalg.inv(self.rotation_matrix))
         vp *= self.radius
         uv = np.ones(3, dtype='float64')
-        positions = np.ones((camera.resolution[0]**2, 1, 3),
-                            dtype='float64') * camera.position
+        positions = np.ones((camera.resolution[0], camera.resolution[0], 3),
+            dtype='float64') * camera.position
 
         if render_source.zbuffer is not None:
             image = render_source.zbuffer.rgba
@@ -503,8 +503,8 @@
                  x_vec=uv,
                  y_vec=uv,
                  width=np.zeros(3, dtype='float64'),
-                 image=image
-                 )
+                 image=image,
+                 lens_type="fisheye")
 
         return sampler_params
 
@@ -606,9 +606,9 @@
             image = self.new_image(camera)
 
         dummy = np.ones(3, dtype='float64')
-        image.shape = (camera.resolution[0]*camera.resolution[1], 1, 4)
-        vectors.shape = (camera.resolution[0]*camera.resolution[1], 1, 3)
-        positions.shape = (camera.resolution[0]*camera.resolution[1], 1, 3)
+        image.shape = (camera.resolution[0], camera.resolution[1], 4)
+        vectors.shape = (camera.resolution[0], camera.resolution[1], 3)
+        positions.shape = (camera.resolution[0], camera.resolution[1], 3)
 
         sampler_params = dict(
             vp_pos=positions,
@@ -618,7 +618,8 @@
             x_vec=dummy,
             y_vec=dummy,
             width=np.zeros(3, dtype="float64"),
-            image=image)
+            image=image,
+            lens_type="spherical")
         return sampler_params
 
     def set_viewpoint(self, camera):
@@ -731,9 +732,9 @@
         vectors_comb = np.vstack([vectors, vectors])
         positions_comb = np.vstack([positions_left, positions_right])
 
-        image.shape = (camera.resolution[0]*camera.resolution[1], 1, 4)
-        vectors_comb.shape = (camera.resolution[0]*camera.resolution[1], 1, 3)
-        positions_comb.shape = (camera.resolution[0]*camera.resolution[1], 1, 3)
+        image.shape = (camera.resolution[0], camera.resolution[1], 4)
+        vectors_comb.shape = (camera.resolution[0], camera.resolution[1], 3)
+        positions_comb.shape = (camera.resolution[0], camera.resolution[1], 3)
 
         sampler_params = dict(
             vp_pos=positions_comb,
@@ -743,7 +744,8 @@
             x_vec=dummy,
             y_vec=dummy,
             width=np.zeros(3, dtype="float64"),
-            image=image)
+            image=image,
+            lens_type = "stereo-spherical")
         return sampler_params
 
     def set_viewpoint(self, camera):

diff -r d19ea599033ec9e76ff3bcfd6634ea675b94d7f5 -r f65cfa6e4a8a54f1f89e0f64d1e4c1837c417179 yt/visualization/volume_rendering/old_camera.py
--- a/yt/visualization/volume_rendering/old_camera.py
+++ b/yt/visualization/volume_rendering/old_camera.py
@@ -598,16 +598,16 @@
 
     def get_sampler_args(self, image):
         rotp = np.concatenate([self.orienter.inv_mat.ravel('F'), self.back_center.ravel()])
-        args = (rotp, self.box_vectors[2], self.back_center,
+        args = (np.atleast_3d(rotp), np.atleast_3d(self.box_vectors[2]),
+                self.back_center,
                 (-self.width[0]/2.0, self.width[0]/2.0,
                  -self.width[1]/2.0, self.width[1]/2.0),
                 image, self.orienter.unit_vectors[0], self.orienter.unit_vectors[1],
                 np.array(self.width, dtype='float64'), self.transfer_function, self.sub_samples)
-        return args
+        return args, {'lens_type': 'plane-parallel'}
 
     star_trees = None
-    def get_sampler(self, args):
-        kwargs = {}
+    def get_sampler(self, args, kwargs):
         if self.star_trees is not None:
             kwargs = {'star_list': self.star_trees}
         if self.use_light:
@@ -781,8 +781,8 @@
         if num_threads is None:
             num_threads=get_num_threads()
         image = self.new_image()
-        args = self.get_sampler_args(image)
-        sampler = self.get_sampler(args)
+        args, kwargs = self.get_sampler_args(image)
+        sampler = self.get_sampler(args, kwargs)
         self.initialize_source()
         image = ImageArray(self._render(double_check, num_threads, 
                                         image, sampler),
@@ -1248,14 +1248,14 @@
         positions = self.ds.arr(positions, input_units="code_length")
 
         dummy = np.ones(3, dtype='float64')
-        image.shape = (self.resolution[0]*self.resolution[1],1,4)
+        image.shape = (self.resolution[0], self.resolution[1],4)
 
         args = (positions, vectors, self.back_center,
                 (0.0,1.0,0.0,1.0),
                 image, dummy, dummy,
                 np.zeros(3, dtype='float64'),
                 self.transfer_function, self.sub_samples)
-        return args
+        return args, {'lens_type': 'perspective'}
 
     def _render(self, double_check, num_threads, image, sampler):
         ncells = sum(b.source_mask.size for b in self.volume.bricks)
@@ -1430,7 +1430,7 @@
         if self._needs_tf:
             args += (self.transfer_function,)
         args += (self.sub_samples,)
-        return args
+        return args, {}
 
     def _render(self, double_check, num_threads, image, sampler):
         pbar = get_pbar("Ray casting", (self.volume.brick_dimensions + 1).prod(axis=-1).sum())
@@ -1492,8 +1492,8 @@
         if num_threads is None:
             num_threads=get_num_threads()
         image = self.new_image()
-        args = self.get_sampler_args(image)
-        sampler = self.get_sampler(args)
+        args, kwargs = self.get_sampler_args(image)
+        sampler = self.get_sampler(args, kwargs)
         self.volume.initialize_source()
         image = ImageArray(self._render(double_check, num_threads, 
                                         image, sampler),
@@ -1653,7 +1653,7 @@
                 image, uv, uv,
                 np.zeros(3, dtype='float64'),
                 self.transfer_function, self.sub_samples)
-        return args
+        return args, {}
 
 
     def finalize_image(self, image):
@@ -1799,8 +1799,8 @@
             mylog.debug('Working on: %i %i' % (self.imi, self.imj))
             self._setup_box_properties(self.width, self.center, self.orienter.unit_vectors)
             image = self.new_image()
-            args = self.get_sampler_args(image)
-            sampler = self.get_sampler(args)
+            args, kwargs = self.get_sampler_args(image)
+            sampler = self.get_sampler(args, kwargs)
             image = self._render(double_check, num_threads, image, sampler)
             sto.id = self.imj*self.nimx + self.imi
             sto.result = image
@@ -2405,11 +2405,11 @@
         except AttributeError:
             pass
 
-    def get_sampler(self, args):
+    def get_sampler(self, args, kwargs):
         if self.interpolated:
-            sampler = InterpolatedProjectionSampler(*args)
+            sampler = InterpolatedProjectionSampler(*args, **kwargs)
         else:
-            sampler = ProjectionSampler(*args)
+            sampler = ProjectionSampler(*args, **kwargs)
         return sampler
 
     def initialize_source(self):
@@ -2420,12 +2420,13 @@
 
     def get_sampler_args(self, image):
         rotp = np.concatenate([self.orienter.inv_mat.ravel('F'), self.back_center.ravel()])
-        args = (rotp, self.box_vectors[2], self.back_center,
+        args = (np.atleast_3d(rotp), np.atleast_3d(self.box_vectors[2]),
+                self.back_center,
             (-self.width[0]/2., self.width[0]/2.,
              -self.width[1]/2., self.width[1]/2.),
             image, self.orienter.unit_vectors[0], self.orienter.unit_vectors[1],
                 np.array(self.width, dtype='float64'), self.sub_samples)
-        return args
+        return args, {'lens_type': 'plane-parallel'}
 
     def finalize_image(self,image):
         ds = self.ds
@@ -2506,9 +2507,9 @@
 
         image = self.new_image()
 
-        args = self.get_sampler_args(image)
+        args, kwargs = self.get_sampler_args(image)
 
-        sampler = self.get_sampler(args)
+        sampler = self.get_sampler(args, kwargs)
 
         self.initialize_source()
 
@@ -2559,7 +2560,7 @@
                 image, dummy, dummy,
                 np.zeros(3, dtype='float64'),
                 self.transfer_function, self.sub_samples)
-        return args
+        return args, {'lens_type': 'spherical'}
 
     def _render(self, double_check, num_threads, image, sampler):
         ncells = sum(b.source_mask.size for b in self.volume.bricks)
@@ -2639,7 +2640,7 @@
                 image, dummy, dummy,
                 np.zeros(3, dtype='float64'),
                 self.transfer_function, self.sub_samples)
-        return args
+        return args, {'lens_type': 'stereo-spherical'}
 
     def snapshot(self, fn = None, clip_ratio = None, double_check = False,
                  num_threads = 0, transparent=False):
@@ -2649,16 +2650,16 @@
 
         self.disparity_s = self.disparity
         image1 = self.new_image()
-        args1 = self.get_sampler_args(image1)
-        sampler1 = self.get_sampler(args1)
+        args1, kwargs1 = self.get_sampler_args(image1)
+        sampler1 = self.get_sampler(args1, kwargs1)
         self.initialize_source()
         image1 = self._render(double_check, num_threads,
                               image1, sampler1, '(Left) ')
 
         self.disparity_s = -self.disparity
         image2 = self.new_image()
-        args2 = self.get_sampler_args(image2)
-        sampler2 = self.get_sampler(args2)
+        args2, kwargs2 = self.get_sampler_args(image2)
+        sampler2 = self.get_sampler(args2, kwargs2)
         self.initialize_source()
         image2 = self._render(double_check, num_threads,
                               image2, sampler2, '(Right)')

diff -r d19ea599033ec9e76ff3bcfd6634ea675b94d7f5 -r f65cfa6e4a8a54f1f89e0f64d1e4c1837c417179 yt/visualization/volume_rendering/utils.py
--- a/yt/visualization/volume_rendering/utils.py
+++ b/yt/visualization/volume_rendering/utils.py
@@ -17,17 +17,17 @@
 def new_mesh_sampler(camera, render_source):
     params = camera._get_sampler_params(render_source)
     args = (
-        params['vp_pos'],
-        params['vp_dir'],
+        np.atleast_3d(params['vp_pos']),
+        np.atleast_3d(params['vp_dir']),
         params['center'],
         params['bounds'],
-        params['image'],
+        np.atleast_3d(params['image']),
         params['x_vec'],
         params['y_vec'],
         params['width'],
     )
-
-    sampler = mesh_traversal.MeshSampler(*args)
+    kwargs = {'lens_type': params['lens_type']}
+    sampler = mesh_traversal.MeshSampler(*args, **kwargs)
     return sampler
 
 
@@ -37,8 +37,8 @@
     params.update(transfer_function=render_source.transfer_function)
     params.update(num_samples=render_source.num_samples)
     args = (
-        params['vp_pos'],
-        params['vp_dir'],
+        np.atleast_3d(params['vp_pos']),
+        np.atleast_3d(params['vp_dir']),
         params['center'],
         params['bounds'],
         params['image'],
@@ -48,10 +48,12 @@
         params['transfer_function'],
         params['num_samples'],
     )
-    kwargs = {}
+    kwargs = {'lens_type': params['lens_type']}
     if render_source.zbuffer is not None:
         kwargs['zbuffer'] = render_source.zbuffer.z
         args[4][:] = render_source.zbuffer.rgba[:]
+    else:
+        kwargs['zbuffer'] = np.ones(params['image'].shape[:2], "float64")
 
     sampler = VolumeRenderSampler(*args, **kwargs)
     return sampler
@@ -62,8 +64,8 @@
     params.update(transfer_function=render_source.transfer_function)
     params.update(num_samples=render_source.num_samples)
     args = (
-        params['vp_pos'],
-        params['vp_dir'],
+        np.atleast_3d(params['vp_pos']),
+        np.atleast_3d(params['vp_dir']),
         params['center'],
         params['bounds'],
         params['image'],
@@ -72,10 +74,12 @@
         params['width'],
         params['num_samples'],
     )
-    kwargs = {}
+    kwargs = {'lens_type': params['lens_type']}
     if render_source.zbuffer is not None:
         kwargs['zbuffer'] = render_source.zbuffer.z
-    sampler = InterpolatedProjectionSampler(*args)
+    else:
+        kwargs['zbuffer'] = np.ones(params['image'].shape[:2], "float64")
+    sampler = InterpolatedProjectionSampler(*args, **kwargs)
     return sampler
 
 
@@ -84,8 +88,8 @@
     params.update(transfer_function=render_source.transfer_function)
     params.update(num_samples=render_source.num_samples)
     args = (
-        params['vp_pos'],
-        params['vp_dir'],
+        np.atleast_3d(params['vp_pos']),
+        np.atleast_3d(params['vp_dir']),
         params['center'],
         params['bounds'],
         params['image'],
@@ -94,10 +98,12 @@
         params['width'],
         params['num_samples'],
     )
-    kwargs = {}
+    kwargs = {'lens_type': params['lens_type']}
     if render_source.zbuffer is not None:
         kwargs['zbuffer'] = render_source.zbuffer.z
-    sampler = ProjectionSampler(*args)
+    else:
+        kwargs['zbuffer'] = np.ones(params['image'].shape[:2], "float64")
+    sampler = ProjectionSampler(*args, **kwargs)
     return sampler
 
 

diff -r d19ea599033ec9e76ff3bcfd6634ea675b94d7f5 -r f65cfa6e4a8a54f1f89e0f64d1e4c1837c417179 yt/visualization/volume_rendering/volume_rendering.py
--- a/yt/visualization/volume_rendering/volume_rendering.py
+++ b/yt/visualization/volume_rendering/volume_rendering.py
@@ -20,12 +20,12 @@
 from yt.utilities.exceptions import YTSceneFieldNotFound
 
 
-def create_scene(data_source, field=None):
-    r""" Set up a scene object with sensible defaults for use in volume 
+def create_scene(data_source, field=None, lens_type='plane-parallel'):
+    r""" Set up a scene object with sensible defaults for use in volume
     rendering.
 
     A helper function that creates a default camera view, transfer
-    function, and image size. Using these, it returns an instance 
+    function, and image size. Using these, it returns an instance
     of the Scene class, allowing one to further modify their rendering.
 
     This function is the same as volume_render() except it doesn't render
@@ -37,11 +37,16 @@
         This is the source to be rendered, which can be any arbitrary yt
         3D object
     field: string, tuple, optional
-        The field to be rendered. If unspecified, this will use the 
+        The field to be rendered. If unspecified, this will use the
         default_field for your dataset's frontend--usually ('gas', 'density').
-        A default transfer function will be built that spans the range of 
-        values for that given field, and the field will be logarithmically 
+        A default transfer function will be built that spans the range of
+        values for that given field, and the field will be logarithmically
         scaled if the field_info object specifies as such.
+    lens_type: string, optional
+        This specifies the type of lens to use for rendering. Current
+        options are 'plane-parallel', 'perspective', and 'fisheye'. See
+        :class:`yt.visualization.volume_rendering.lens.Lens` for details.
+        Default: 'plane-parallel'
 
     Returns
     -------
@@ -60,17 +65,18 @@
     if field is None:
         field = data_source.ds.default_field
         if field not in data_source.ds.derived_field_list:
-            raise YTSceneFieldNotFound("""Could not find field '%s' in %s. 
-                  Please specify a field in create_scene()""" % \
-                  (field, data_source.ds))
+            raise YTSceneFieldNotFound("""Could not find field '%s' in %s.
+                  Please specify a field in create_scene()""" % (field, data_source.ds))
         mylog.info('Setting default field to %s' % field.__repr__())
 
     vol = VolumeSource(data_source, field=field)
     sc.add_source(vol)
-    sc.camera = Camera(data_source)
+    sc.camera = Camera(data_source=data_source, lens_type=lens_type)
     return sc
 
-def volume_render(data_source, field=None, fname=None, sigma_clip=None):
+
+def volume_render(data_source, field=None, fname=None, sigma_clip=None,
+                  lens_type='plane-parallel'):
     r""" Create a simple volume rendering of a data source.
 
     A helper function that creates a default camera view, transfer
@@ -84,10 +90,10 @@
         This is the source to be rendered, which can be any arbitrary yt
         3D object
     field: string, tuple, optional
-        The field to be rendered. If unspecified, this will use the 
+        The field to be rendered. If unspecified, this will use the
         default_field for your dataset's frontend--usually ('gas', 'density').
-        A default transfer function will be built that spans the range of 
-        values for that given field, and the field will be logarithmically 
+        A default transfer function will be built that spans the range of
+        values for that given field, and the field will be logarithmically
         scaled if the field_info object specifies as such.
     fname: string, optional
         If specified, the resulting rendering will be saved to this filename
@@ -97,6 +103,11 @@
         using a threshold based on sigma_clip multiplied by the standard
         deviation of the pixel values. Recommended values are between 2 and 6.
         Default: None
+    lens_type: string, optional
+        This specifies the type of lens to use for rendering. Current
+        options are 'plane-parallel', 'perspective', and 'fisheye'. See
+        :class:`yt.visualization.volume_rendering.lens.Lens` for details.
+        Default: 'plane-parallel'
 
     Returns
     -------

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list