[yt-svn] commit/yt: 3 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Mon Nov 2 12:58:57 PST 2015


3 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/cf01d56ef1bb/
Changeset:   cf01d56ef1bb
Branch:      yt
User:        atmyers
Date:        2015-10-20 21:56:31+00:00
Summary:     fixing auto=True option for Camera constructor
Affected #:  1 file

diff -r 4148cdf287775b38e7ad9807459680cb1370daf1 -r cf01d56ef1bb4977facede05ea90d0f8750043e5 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -198,7 +198,7 @@
 
     def set_defaults_from_data_source(self, data_source):
         """Resets the camera attributes to their default values"""
-        self.position = data_source.pf.domain_right_edge
+        self._position = data_source.pf.domain_right_edge
 
         width = 1.5 * data_source.pf.domain_width.max()
         (xmi, xma), (ymi, yma), (zmi, zma) = \
@@ -220,8 +220,8 @@
         if not isinstance(focus, YTArray):
             focus = self.pf.arr(focus, input_units="code_length")
 
-        self.set_width(width)
-        self.focus = focus
+        self._width = width
+        self._focus = focus
 
         super(Camera, self).__init__(self.focus - self.position,
                                      self.north_vector, steady_north=False)


https://bitbucket.org/yt_analysis/yt/commits/19538c5ef25f/
Changeset:   19538c5ef25f
Branch:      yt
User:        atmyers
Date:        2015-10-21 06:01:55+00:00
Summary:     adding an explanatory note
Affected #:  1 file

diff -r cf01d56ef1bb4977facede05ea90d0f8750043e5 -r 19538c5ef25f7bfdc7b64442e13462cf285e00d6 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -198,7 +198,8 @@
 
     def set_defaults_from_data_source(self, data_source):
         """Resets the camera attributes to their default values"""
-        self._position = data_source.pf.domain_right_edge
+        
+        position = data_source.pf.domain_right_edge
 
         width = 1.5 * data_source.pf.domain_width.max()
         (xmi, xma), (ymi, yma), (zmi, zma) = \
@@ -220,8 +221,12 @@
         if not isinstance(focus, YTArray):
             focus = self.pf.arr(focus, input_units="code_length")
 
+        # We can't use the property setters yet, since they rely on attributes
+        # that will not be set up until the base class initializer is called.
+        # See Issue #1131.
         self._width = width
         self._focus = focus
+        self._position = position
 
         super(Camera, self).__init__(self.focus - self.position,
                                      self.north_vector, steady_north=False)


https://bitbucket.org/yt_analysis/yt/commits/9f560b7f279c/
Changeset:   9f560b7f279c
Branch:      yt
User:        ngoldbaum
Date:        2015-11-02 20:57:44+00:00
Summary:     Merging
Affected #:  29 files

diff -r 19538c5ef25f7bfdc7b64442e13462cf285e00d6 -r 9f560b7f279cd6e3abffb105bd64b80946333c94 doc/source/cookbook/amrkdtree_downsampling.py
--- a/doc/source/cookbook/amrkdtree_downsampling.py
+++ b/doc/source/cookbook/amrkdtree_downsampling.py
@@ -40,7 +40,7 @@
 render_source.set_volume(kd_low_res)
 render_source.set_fields('density')
 sc.render()
-sc.save("v1.png")
+sc.save("v1.png", sigma_clip=6.0)
 
 # This operation was substantiall faster.  Now lets modify the low resolution
 # rendering until we find something we like.
@@ -49,14 +49,14 @@
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds=[-27.5, -25.5],
               alpha=np.ones(4, dtype='float64'), colormap='RdBu_r')
-sc.render(sigma_clip=6.0)
-sc.save("v2.png")
+sc.render()
+sc.save("v2.png", sigma_clip=6.0)
 
 # This looks better.  Now let's try turning on opacity.
 
 tf.grey_opacity = True
-sc.render(sigma_clip=6.0)
-sc.save("v3.png")
+sc.render()
+sc.save("v3.png", sigma_clip=6.0)
 #
 ## That seemed to pick out som interesting structures.  Now let's bump up the
 ## opacity.
@@ -64,13 +64,13 @@
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds=[-27.5, -25.5],
               alpha=10.0 * np.ones(4, dtype='float64'), colormap='RdBu_r')
-sc.render(sigma_clip=6.0)
-sc.save("v4.png")
+sc.render()
+sc.save("v4.png", sigma_clip=6.0)
 #
 ## This looks pretty good, now lets go back to the full resolution AMRKDTree
 #
 render_source.set_volume(kd)
-sc.render(sigma_clip=6.0)
-sc.render("v5.png")
+sc.render()
+sc.save("v5.png", sigma_clip=6.0)
 
 # This looks great!

diff -r 19538c5ef25f7bfdc7b64442e13462cf285e00d6 -r 9f560b7f279cd6e3abffb105bd64b80946333c94 doc/source/cookbook/camera_movement.py
--- a/doc/source/cookbook/camera_movement.py
+++ b/doc/source/cookbook/camera_movement.py
@@ -14,18 +14,18 @@
 frame = 0
 # Move to the maximum density location over 5 frames
 for _ in cam.iter_move(max_c, 5):
-    sc.render(sigma_clip=8.0)
-    sc.save('camera_movement_%04i.png' % frame)
+    sc.render()
+    sc.save('camera_movement_%04i.png' % frame, sigma_clip=8.0)
     frame += 1
 
 # Zoom in by a factor of 10 over 5 frames
 for _ in cam.iter_zoom(10.0, 5):
-    sc.render(sigma_clip=8.0)
-    sc.save('camera_movement_%04i.png' % frame)
+    sc.render()
+    sc.save('camera_movement_%04i.png' % frame, sigma_clip=8.0)
     frame += 1
 
 # Do a rotation over 5 frames
 for _ in cam.iter_rotate(np.pi, 5):
-    sc.render(sigma_clip=8.0)
-    sc.save('camera_movement_%04i.png' % frame)
+    sc.render()
+    sc.save('camera_movement_%04i.png' % frame, sigma_clip=8.0)
     frame += 1

diff -r 19538c5ef25f7bfdc7b64442e13462cf285e00d6 -r 9f560b7f279cd6e3abffb105bd64b80946333c94 doc/source/cookbook/complex_plots.rst
--- a/doc/source/cookbook/complex_plots.rst
+++ b/doc/source/cookbook/complex_plots.rst
@@ -209,6 +209,8 @@
 
 .. yt_cookbook:: custom_camera_volume_rendering.py
 
+.. _cookbook-custom-transfer-function:
+
 Volume Rendering with a Custom Transfer Function
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
@@ -218,6 +220,17 @@
 
 .. yt_cookbook:: custom_transfer_function_volume_rendering.py
 
+.. _cookbook-sigma_clip:
+
+Volume Rendering with Sigma Clipping
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+In this recipe we output several images with different values of sigma_clip
+set in order to change the contrast of the resulting image.  See 
+:ref:`sigma_clip` for more information.
+
+.. yt_cookbook:: sigma_clip.py
+
 Zooming into an Image
 ~~~~~~~~~~~~~~~~~~~~~
 

diff -r 19538c5ef25f7bfdc7b64442e13462cf285e00d6 -r 9f560b7f279cd6e3abffb105bd64b80946333c94 doc/source/cookbook/custom_camera_volume_rendering.py
--- a/doc/source/cookbook/custom_camera_volume_rendering.py
+++ b/doc/source/cookbook/custom_camera_volume_rendering.py
@@ -18,5 +18,5 @@
 
 # save to disk with a custom filename and apply sigma clipping to eliminate
 # very bright pixels, producing an image with better contrast.
-sc.render(sigma_clip=4)
-sc.save('custom.png')
+sc.render()
+sc.save('custom.png', sigma_clip=4)

diff -r 19538c5ef25f7bfdc7b64442e13462cf285e00d6 -r 9f560b7f279cd6e3abffb105bd64b80946333c94 doc/source/cookbook/opaque_rendering.py
--- a/doc/source/cookbook/opaque_rendering.py
+++ b/doc/source/cookbook/opaque_rendering.py
@@ -12,8 +12,8 @@
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
         alpha=np.logspace(-3,0,4), colormap = 'RdBu_r')
-sc.render(sigma_clip=6.0)
-sc.save("v1.png")
+sc.render()
+sc.save("v1.png", sigma_clip=6.0)
 
 # In this case, the default alphas used (np.logspace(-3,0,Nbins)) does not
 # accentuate the outer regions of the galaxy. Let's start by bringing up the
@@ -23,31 +23,31 @@
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
         alpha=np.logspace(0,0,4), colormap = 'RdBu_r')
-sc.render(sigma_clip=6.0)
-sc.save("v2.png")
+sc.render()
+sc.save("v2.png", sigma_clip=6.0)
 
 # Now let's set the grey_opacity to True.  This should make the inner portions
 # start to be obcured
 
 tf.grey_opacity = True
-sc.render(sigma_clip=6.0)
-sc.save("v3.png")
+sc.render()
+sc.save("v3.png", sigma_clip=6.0)
 
 # That looks pretty good, but let's start bumping up the opacity.
 
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
         alpha=10.0*np.ones(4,dtype='float64'), colormap = 'RdBu_r')
-sc.render(sigma_clip=6.0)
-sc.save("v4.png")
+sc.render()
+sc.save("v4.png", sigma_clip=6.0)
 
 # Let's bump up again to see if we can obscure the inner contour.
 
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
         alpha=30.0*np.ones(4,dtype='float64'), colormap = 'RdBu_r')
-sc.render(sigma_clip=6.0)
-sc.save("v5.png")
+sc.render()
+sc.save("v5.png", sigma_clip=6.0)
 
 # Now we are losing sight of everything.  Let's see if we can obscure the next
 # layer
@@ -55,15 +55,15 @@
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
         alpha=100.0*np.ones(4,dtype='float64'), colormap = 'RdBu_r')
-sc.render(sigma_clip=6.0)
-sc.save("v6.png")
+sc.render()
+sc.save("v6.png", sigma_clip=6.0)
 
 # That is very opaque!  Now lets go back and see what it would look like with
 # grey_opacity = False
 
 tf.grey_opacity=False
-sc.render(sigma_clip=6.0)
-sc.save("v7.png")
+sc.render()
+sc.save("v7.png", sigma_clip=6.0)
 
 # That looks pretty different, but the main thing is that you can see that the
 # inner contours are somewhat visible again.  

diff -r 19538c5ef25f7bfdc7b64442e13462cf285e00d6 -r 9f560b7f279cd6e3abffb105bd64b80946333c94 doc/source/cookbook/sigma_clip.py
--- /dev/null
+++ b/doc/source/cookbook/sigma_clip.py
@@ -0,0 +1,17 @@
+import yt
+
+# Load the dataset.
+ds = yt.load("enzo_tiny_cosmology/RD0009/RD0009")
+
+# Create a volume rendering, which will determine data bounds, use the first
+# acceptable field in the field_list, and set up a default transfer function.
+
+# Render and save output images with different levels of sigma clipping.
+# Sigma clipping removes the highest intensity pixels in a volume render, 
+# which affects the overall contrast of the image.
+sc = yt.create_scene(ds, field=('gas', 'density'))
+sc.render()
+sc.save('clip_0.png')
+sc.save('clip_2.png', sigma_clip=2)
+sc.save('clip_4.png', sigma_clip=4)
+sc.save('clip_6.png', sigma_clip=6)

diff -r 19538c5ef25f7bfdc7b64442e13462cf285e00d6 -r 9f560b7f279cd6e3abffb105bd64b80946333c94 doc/source/cookbook/simple_volume_rendering.py
--- a/doc/source/cookbook/simple_volume_rendering.py
+++ b/doc/source/cookbook/simple_volume_rendering.py
@@ -6,5 +6,5 @@
 # Create a volume rendering, which will determine data bounds, use the first
 # acceptable field in the field_list, and set up a default transfer function.
 
-# This will save a file named 'data0043_density_volume_rendered.png' to disk.
+# This will save a file named 'data0043_Render_density.png' to disk.
 im, sc = yt.volume_render(ds, field=('gas', 'density'))

diff -r 19538c5ef25f7bfdc7b64442e13462cf285e00d6 -r 9f560b7f279cd6e3abffb105bd64b80946333c94 doc/source/cookbook/various_lens.py
--- a/doc/source/cookbook/various_lens.py
+++ b/doc/source/cookbook/various_lens.py
@@ -34,8 +34,8 @@
 cam.set_width(ds.domain_width * 0.5)
 sc.camera = cam
 sc.add_source(vol)
-sc.render(sigma_clip=6.0)
-sc.save('lens_plane-parallel.png')
+sc.render()
+sc.save('lens_plane-parallel.png', sigma_clip=6.0)
 
 # Perspective lens
 cam = Camera(ds, lens_type='perspective')
@@ -51,8 +51,8 @@
 cam.set_width(ds.domain_width * 0.5)
 sc.camera = cam
 sc.add_source(vol)
-sc.render(sigma_clip=6.0)
-sc.save('lens_perspective.png')
+sc.render()
+sc.save('lens_perspective.png', sigma_clip=6.0)
 
 # Stereo-perspective lens
 cam = Camera(ds, lens_type='stereo-perspective')
@@ -67,8 +67,8 @@
 cam.lens.disparity = ds.domain_width[0] * 1.e-3
 sc.camera = cam
 sc.add_source(vol)
-sc.render(sigma_clip=6.0)
-sc.save('lens_stereo-perspective.png')
+sc.render()
+sc.save('lens_stereo-perspective.png', sigma_clip=6.0)
 
 # Fisheye lens
 dd = ds.sphere(ds.domain_center, ds.domain_width[0] / 10)
@@ -82,8 +82,8 @@
 cam.lens.fov = 360.0
 sc.camera = cam
 sc.add_source(vol)
-sc.render(sigma_clip=6.0)
-sc.save('lens_fisheye.png')
+sc.render()
+sc.save('lens_fisheye.png', sigma_clip=6.0)
 
 # Spherical lens
 cam = Camera(ds, lens_type='spherical')
@@ -95,13 +95,12 @@
 cam.position = ds.arr([0.4, 0.5, 0.5], 'code_length')
 cam.switch_orientation(normal_vector=normal_vector,
                        north_vector=north_vector)
-# In (stereo)spherical camera, width[0] specifies the radius of the sphere (the
-# depth of your line of sight), while width[1] or width[2] are not used.
-cam.set_width(ds.domain_width * 0.5)
+# In (stereo)spherical camera, camera width is not used since the entire volume
+# will be rendered
 sc.camera = cam
 sc.add_source(vol)
-sc.render(sigma_clip=6.0)
-sc.save('lens_spherical.png')
+sc.render()
+sc.save('lens_spherical.png', sigma_clip=6.0)
 
 # Stereo-spherical lens
 cam = Camera(ds, lens_type='stereo-spherical')
@@ -111,10 +110,11 @@
 cam.position = ds.arr([0.4, 0.5, 0.5], 'code_length')
 cam.switch_orientation(normal_vector=normal_vector,
                        north_vector=north_vector)
-cam.set_width(ds.domain_width * 0.5)
+# In (stereo)spherical camera, camera width is not used since the entire volume
+# will be rendered
 # Set the distance between left-eye and right-eye.
 cam.lens.disparity = ds.domain_width[0] * 1.e-3
 sc.camera = cam
 sc.add_source(vol)
-sc.render(sigma_clip=6.0)
-sc.save('lens_stereo-spherical.png')
+sc.render()
+sc.save('lens_stereo-spherical.png', sigma_clip=6.0)

diff -r 19538c5ef25f7bfdc7b64442e13462cf285e00d6 -r 9f560b7f279cd6e3abffb105bd64b80946333c94 doc/source/reference/api/api.rst
--- a/doc/source/reference/api/api.rst
+++ b/doc/source/reference/api/api.rst
@@ -586,7 +586,8 @@
 
 See also :ref:`volume_rendering`.
 
-Here are the primary entry points:
+Here are the primary entry points and the main classes involved in the 
+Scene infrastructure:
 
 .. autosummary::
    :toctree: generated/
@@ -596,45 +597,46 @@
    ~yt.visualization.volume_rendering.off_axis_projection.off_axis_projection
    ~yt.visualization.volume_rendering.scene.Scene
    ~yt.visualization.volume_rendering.camera.Camera
-   ~yt.visualization.volume_rendering.lens.Lens
-   ~yt.visualization.volume_rendering.render_source.RenderSource
+   ~yt.utilities.amr_kdtree.amr_kdtree.AMRKDTree
 
-These objects set up the way the image looks:
+The different kinds of sources:
 
 .. autosummary::
    :toctree: generated/
 
-   ~yt.visualization.volume_rendering.transfer_functions.ColorTransferFunction
-   ~yt.visualization.volume_rendering.transfer_functions.MultiVariateTransferFunction
-   ~yt.visualization.volume_rendering.transfer_functions.PlanckTransferFunction
-   ~yt.visualization.volume_rendering.transfer_functions.ProjectionTransferFunction
-   ~yt.visualization.volume_rendering.transfer_functions.TransferFunction
+   ~yt.visualization.volume_rendering.render_source.RenderSource
+   ~yt.visualization.volume_rendering.render_source.VolumeSource
+   ~yt.visualization.volume_rendering.render_source.PointSource
+   ~yt.visualization.volume_rendering.render_source.LineSource
+   ~yt.visualization.volume_rendering.render_source.BoxSource
+   ~yt.visualization.volume_rendering.render_source.GridSource
+   ~yt.visualization.volume_rendering.render_source.CoordinateVectorSource
+   ~yt.visualization.volume_rendering.render_source.MeshSource
 
-There are also advanced objects for particular use cases:
+The different kinds of transfer functions:
 
 .. autosummary::
    :toctree: generated/
 
-   ~yt.visualization.volume_rendering.camera.MosaicFisheyeCamera
-   ~yt.visualization.volume_rendering.camera.FisheyeCamera
-   ~yt.visualization.volume_rendering.camera.MosaicCamera
-   ~yt.visualization.volume_rendering.camera.plot_allsky_healpix
-   ~yt.visualization.volume_rendering.camera.PerspectiveCamera
-   ~yt.utilities.amr_kdtree.amr_kdtree.AMRKDTree
-   ~yt.visualization.volume_rendering.camera.StereoPairCamera
-
-Additional sources can be added to a scene:
+   ~yt.visualization.volume_rendering.transfer_functions.TransferFunction
+   ~yt.visualization.volume_rendering.transfer_functions.ColorTransferFunction
+   ~yt.visualization.volume_rendering.transfer_functions.ProjectionTransferFunction
+   ~yt.visualization.volume_rendering.transfer_functions.PlanckTransferFunction
+   ~yt.visualization.volume_rendering.transfer_functions.MultiVariateTransferFunction
+   ~yt.visualization.volume_rendering.transfer_function_helper.TransferFunctionHelper
+ 
+The different kinds of lenses:
 
 .. autosummary::
    :toctree: generated/
 
-   ~yt.visualization.volume_rendering.api.VolumeSource
-   ~yt.visualization.volume_rendering.api.PointSource
-   ~yt.visualization.volume_rendering.api.LineSource
-   ~yt.visualization.volume_rendering.api.BoxSource
-   ~yt.visualization.volume_rendering.api.GridSource
-   ~yt.visualization.volume_rendering.api.CoordinateVectorSource
-   ~yt.visualization.volume_rendering.render_source.MeshSource
+   ~yt.visualization.volume_rendering.lens.Lens
+   ~yt.visualization.volume_rendering.lens.PlaneParallelLens
+   ~yt.visualization.volume_rendering.lens.PerspectiveLens
+   ~yt.visualization.volume_rendering.lens.StereoPerspectiveLens
+   ~yt.visualization.volume_rendering.lens.FisheyeLens
+   ~yt.visualization.volume_rendering.lens.SphericalLens
+   ~yt.visualization.volume_rendering.lens.StereoSphericalLens
 
 Streamlining
 ^^^^^^^^^^^^

diff -r 19538c5ef25f7bfdc7b64442e13462cf285e00d6 -r 9f560b7f279cd6e3abffb105bd64b80946333c94 doc/source/visualizing/volume_rendering.rst
--- a/doc/source/visualizing/volume_rendering.rst
+++ b/doc/source/visualizing/volume_rendering.rst
@@ -3,10 +3,18 @@
 3D Visualization and Volume Rendering
 =====================================
 
-yt has the ability to create 3D visualizations, using a process known as volume
-rendering.  Currently all of the rendering capabilities are implemented in
-software, requiring no specialized hardware. Optimized versions implemented
-with OpenGL and utilizing graphics processors are being actively developed.
+yt has the ability to create 3D visualizations using a process known as *volume
+rendering* (oftentimes abbreviated VR).  This volume rendering code differs 
+from the standard yt infrastructure for generating :ref:`simple-inspection` 
+in that it evaluates the radiative transfer equations through the volume with 
+user-defined transfer functions for each ray.  Thus it can accommodate both
+opaque and transparent structures appropriately.  Currently all of the 
+rendering capabilities are implemented in software, requiring no specialized 
+hardware. Optimized versions implemented with OpenGL and utilizing graphics 
+processors are being actively developed.
+
+Volume Rendering Introduction
+-----------------------------
 
 Constructing a 3D visualization is a process of describing the "scene" that
 will be rendered.  This includes the location of the viewing point (i.e., where
@@ -15,7 +23,7 @@
 on) and the components that will be rendered (render "sources," such as volume
 elements, lines, annotations, and opaque surfaces).  The 3D plotting
 infrastructure then develops a resultant image from this scene, which can be
-saved to a file or viewed inline.
+saved to a file or viewed inline.  
 
 By constructing the scene in this programmatic way, full control can be had
 over each component in the scene as well as the method by which the scene is
@@ -23,53 +31,336 @@
 as grid or continent lines, and then to render a production-quality
 visualization.  By changing the "lens" used, a single camera path can output
 images suitable for planetarium domes, immersive and head tracking systems
-(such as the Occulus Rift or recent "spherical" movie viewers such as the
-mobile YouTube app), as well as standard screens.
+(such as the Oculus Rift or recent "spherical" movie viewers such as the
+mobile YouTube app), as well as standard screens.  
 
 .. image:: _images/scene_diagram.svg
    :width: 50%
    :align: center
    :alt: Diagram of a 3D Scene
 
-In versions of yt prior to 3.3, the only volume rendering interface accessible
-was through the "camera" object.  This presented a number of problems,
-principle of which was the inability to describe new scene elements or to
-develop complex visualizations that were independent of the specific elements
-being rendered.  The new "scene" based interface present in yt 3.3 and beyond
-enables both more complex visualizations to be constructed as well as a new,
-more intuitive interface for very simple 3D visualizations.
+.. _scene-description:
+
+Volume Rendering Components
+---------------------------
+
+The Scene class and its subcomponents are organized as follows.  Indented
+objects *hang* off of their parent object.  
+
+* :ref:`Scene <scene>` - container object describing a volume and its contents
+    * :ref:`Sources <render-sources>` - objects to be rendered
+        * :ref:`VolumeSource <volume-sources>` - simulation volume tied to a dataset
+            * :ref:`TransferFunction <transfer_functions>` - mapping of simulation field values to color, brightness, and transparency
+        * :ref:`OpaqueSource <opaque-sources>` - Opaque structures like lines, dots, etc.
+        * :ref:`Annotations <volume_rendering_annotations>` - Annotated structures like grid cells, simulation boundaries, etc.
+    * :ref:`Camera <camera>` - object for rendering; consists of a location, focus, orientation, and resolution
+        * :ref:`Lens <lenses>` - object describing method for distributing rays through Sources
+
+.. _scene:
+
+Scene
+^^^^^
+
+The :class:`~yt.visualization.volume_rendering.scene.Scene`
+is the container class which encompasses the whole of the volume
+rendering interface.  At its base level, it describes an infinite volume, 
+with a series of 
+:class:`~yt.visualization.volume_rendering.render_source.RenderSource` objects
+hanging off of it that describe the contents
+of that volume.  It also contains a 
+:class:`~yt.visualization.volume_rendering.camera.Camera` for rendering that
+volume..  All of its classes can be
+accessed and modified as properties hanging off of the scene.
+The scene's most important functions are 
+:meth:`~yt.visualization.volume_rendering.scene.Scene.render` for 
+casting rays through the scene and 
+:meth:`~yt.visualization.volume_rendering.scene.Scene.save` for saving the
+resulting rendered image to disk.
+
+The easiest way to create a scene with sensible defaults is to use the 
+functions:
+:func:`~yt.visualization.volume_rendering.volume_rendering.create_scene` 
+(creates the scene) or 
+:func:`~yt.visualization.volume_rendering.volume_rendering.volume_render`
+(creates the scene and then triggers ray tracing to produce an image).
+See the :ref:`annotated-vr-example` for details.
+
+.. _render-sources:
+
+RenderSources
+^^^^^^^^^^^^^
+
+:class:`~yt.visualization.volume_rendering.render_source.RenderSource` objects
+comprise the contents of what is actually *rendered*.  One can add several 
+different RenderSources to a Scene and the ray-tracing step will pass rays
+through all of them to produce the final rendered image.  
+
+.. _volume-sources:
+
+VolumeSources
++++++++++++++
+
+:class:`~yt.visualization.volume_rendering.render_source.VolumeSource` objects
+are 3D :ref:`geometric-objects` of individual datasets placed into the scene 
+for rendering.  Each VolumeSource requires a 
+:ref:`TransferFunction <transfer_functions>` to describe how the fields in 
+the VolumeSource dataset produce different colors and brightnesses in the
+resulting image.
+
+.. _opaque-sources:
+
+OpaqueSources
++++++++++++++
+
+In addition to semi-transparent objects, fully opaque structures can be added 
+to a scene as 
+:class:`~yt.visualization.volume_rendering.render_source.OpaqueSource` objects 
+including 
+:class:`~yt.visualization.volume_rendering.render_source.LineSource` objects 
+and 
+:class:`~yt.visualization.volume_rendering.render_source.PointSource` objects.
+These are useful if you want to annotate locations or particles in an image, 
+or if you want to draw lines connecting different regions or
+vertices.  For instance, lines can be used to draw outlines of regions or
+continents.
+
+.. _volume_rendering_annotations:
+
+Annotations
++++++++++++
+
+Similar to OpaqueSources, annotations enable the user to highlight 
+certain information with opaque structures.  Examples include 
+:class:`~yt.visualization.volume_rendering.api.BoxSource`,
+:class:`~yt.visualization.volume_rendering.api.GridSource`, and
+:class:`~yt.visualization.volume_rendering.api.CoordinateVectorSource`.  These
+annotations will operate in data space and can draw boxes, grid information,
+and also provide a vector orientation within the image.
+
+For example scripts using these features, 
+see :ref:`cookbook-volume_rendering_annotations`.
+
+.. _transfer_functions:
+
+Transfer Functions
+^^^^^^^^^^^^^^^^^^
+
+A transfer function describes how rays that pass through the domain of a
+:class:`~yt.visualization.volume_rendering.render_source.VolumeSource` are
+mapped from simulation field values to color, brightness, and opacity in the
+resulting rendered image.  A transfer function consists of an array over 
+the x and y dimensions.  The x dimension typically represents field values in 
+your underlying dataset to which you want your rendering to be sensitive (e.g. 
+density from 1e20 to 1e23).  The y dimension consists of 4 channels for red, 
+green, blue, and alpha (opacity).  A transfer function starts with all zeros 
+for its y dimension values, implying that rays traversing the VolumeSource 
+will not show up at all in the final image.  However, you can add features to 
+the transfer function that will highlight certain field values in your 
+rendering.
+
+.. _transfer-function-helper:
+
+TransferFunctionHelper
+++++++++++++++++++++++
+
+Because good transfer functions can be difficult to generate, the 
+:class:`~yt.visualization.volume_rendering.transfer_function_helper.TransferFunctionHelper`
+exists in order to help create and modify transfer functions with smart 
+defaults for your datasets.  To see a full example on how to use this 
+interface, follow the annotated :ref:`transfer-function-helper-tutorial`.
+
+Color Transfer Functions
+++++++++++++++++++++++++
+
+A :class:`~yt.visualization.volume_rendering.transfer_functions.ColorTransferFunction`
+is the standard way to map dataset field values to colors, brightnesses, 
+and opacities in the rendered rays.  One can add discrete features to the
+transfer function, which will render isocontours in the field data and 
+works well for visualizing nested structures in a simulation.  Alternatively,
+one can add continuous features to the transfer function, which tends to 
+produce better results for most datasets.
+
+In order to modify a 
+:class:`~yt.visualization.volume_rendering.transfer_functions.ColorTransferFunction`
+use 
+:meth:`~yt.visualization.volume_rendering.transfer_functions.ColorTransferFunction.add_layers`,
+which will add evenly spaced isocontours along the transfer
+function; use 
+:meth:`~yt.visualization.volume_rendering.transfer_functions.ColorTransferFunction.sample_colormap`,
+which will sample a colormap at a given value; 
+use
+:meth:`~yt.visualization.volume_rendering.transfer_functions.ColorTransferFunction.add_gaussian`,
+which will allow you to specify the colors directly on the transfer function,
+and use 
+:meth:`~yt.visualization.volume_rendering.transfer_functions.ColorTransferFunction.map_to_colormap`,
+where you can map a segment of the transfer function space to an entire
+colormap at a single alpha value.  
+
+See :ref:`cookbook-custom-transfer-function` for an example usage.
+
+Projection Transfer Function
+++++++++++++++++++++++++++++
+
+This is designed to allow you to generate projections like what you obtain
+from the standard :ref:`projection-plots`, and it forms the basis of 
+:ref:`off-axis-projections`.  See :ref:`cookbook-offaxis_projection` for a 
+simple example.  Note that the integration here is scaled to a width of 1.0; 
+this means that if you want to apply a colorbar, you will have to multiply by 
+the integration width (specified when you initialize the volume renderer) in 
+whatever units are appropriate.
+
+Planck Transfer Function
+++++++++++++++++++++++++
+
+This transfer function is designed to apply a semi-realistic color field based
+on temperature, emission weighted by density, and approximate scattering based
+on the density.  This class is currently under-documented, and it may be best
+to examine the source code to use it.
+
+More Complicated Transfer Functions
++++++++++++++++++++++++++++++++++++
+
+For more complicated transfer functions, you can use the
+:class:`~yt.visualization.volume_rendering.transfer_functions.MultiVariateTransferFunction`
+object.  This allows for a set of weightings, linkages and so on.
+All of the information about how all transfer functions are used and values are
+extracted is contained in the sourcefile ``utilities/lib/grid_traversal.pyx``.
+For more information on how the transfer function is actually applied, look
+over the source code there.
+
+.. _camera:
+
+Camera
+^^^^^^
+
+The :class:`~yt.visualization.volume_rendering.camera.Camera` object
+is what it sounds like, a camera within the Scene.  It possesses the 
+quantities:
+ * :meth:`~yt.visualization.volume_rendering.camera.Camera.position` - the position of the camera in scene-space
+ * :meth:`~yt.visualization.volume_rendering.camera.Camera.width` - the width of the plane the camera can see
+ * :meth:`~yt.visualization.volume_rendering.camera.Camera.focus` - the point in space the camera is looking at
+ * :meth:`~yt.visualization.volume_rendering.camera.Camera.resolution` - the image resolution
+ * ``north_vector`` - a vector defining the "up" direction in an image
+ * :ref:`lens <lenses>` - an object controlling how rays traverse the Scene
+
+.. _camera_movement:
+
+Moving and Orienting the Camera
++++++++++++++++++++++++++++++++
+
+There are multiple ways to manipulate the camera viewpoint and orientation.
+One can set the properties listed above explicitly, or one can use the
+:class:`~yt.visualization.volume_rendering.camera.Camera` helper methods.  
+In either case, any change triggers an update of all of the other properties.
+Note that the camera exists in a right-handed coordinate system centered on
+the camera.
+
+Rotation-related methods
+ * :meth:`~yt.visualization.volume_rendering.camera.Camera.pitch` - rotate about the lateral axis
+ * :meth:`~yt.visualization.volume_rendering.camera.Camera.yaw` - rotate about the vertical axis (i.e. ``north_vector``)
+ * :meth:`~yt.visualization.volume_rendering.camera.Camera.roll` - rotate about the longitudinal axis (i.e. ``normal_vector``)
+ * :meth:`~yt.visualization.volume_rendering.camera.Camera.rotate` - rotate about an arbitrary axis
+ * :meth:`~yt.visualization.volume_rendering.camera.Camera.iter_rotate` - iteratively rotate about an arbitrary axis
+
+For the rotation methods, the camera pivots around the ``rot_center`` rotation 
+center.  By default, this is the camera position, which means that the 
+camera doesn't change its position at all, it just changes its orientation.  
+
+Zoom-related methods
+ * :meth:`~yt.visualization.volume_rendering.camera.Camera.set_width` - change the width of the FOV
+ * :meth:`~yt.visualization.volume_rendering.camera.Camera.zoom` - change the width of the FOV
+ * :meth:`~yt.visualization.volume_rendering.camera.Camera.iter_zoom` - iteratively change the width of the FOV
+
+Perhaps counterintuitively, the camera does not get closer to the focus
+during a zoom; it simply reduces the width of the field of view.
+
+Translation-related methods
+ * :meth:`~yt.visualization.volume_rendering.camera.Camera.set_position` - change the location of the camera keeping the focus fixed
+ * :meth:`~yt.visualization.volume_rendering.camera.Camera.iter_move` - iteratively change the location of the camera keeping the focus fixed
+
+The iterative methods provide iteration over a series of changes in the 
+position or orientation of the camera.  These can be used within a loop.
+For an example on how to use all of these camera movement functions, see 
+:ref:`cookbook-camera_movement`.  
+
+.. _lenses:
+
+Camera Lenses
+^^^^^^^^^^^^^
+
+Cameras possess :class:`~yt.visualization.volume_rendering.lens.Lens` objects, 
+which control the geometric path in which rays travel to the camera.  These
+lenses can be swapped in and out of an existing camera to produce different
+views of the same Scene.  For a full demonstration of a Scene object 
+rendered with different lenses, see :ref:`cookbook-various_lens`.
+
+Plane Parallel
+++++++++++++++
+
+The :class:`~yt.visualization.volume_rendering.lens.PlaneParallelLens` is the
+standard lens type used for orthographic projections.  All rays emerge 
+parallel to each other, arranged along a plane.
+
+Perspective and Stereo Perspective
+++++++++++++++++++++++++++++++++++
+
+The :class:`~yt.visualization.volume_rendering.lens.PerspectiveLens` 
+adjusts for an opening view angle, so that the scene will have an 
+element of perspective to it.
+:class:`~yt.visualization.volume_rendering.lens.StereoPerspectiveLens`
+is identical to PerspectiveLens, but it produces two images from nearby 
+camera positions for use in 3D viewing.
+
+Fisheye or Dome
++++++++++++++++
+
+The :class:`~yt.visualization.volume_rendering.lens.FisheyeLens` 
+is appropriate for viewing an arbitrary field of view.  Fisheye images 
+are typically used for dome-based presentations; the Hayden planetarium 
+for instance has a field of view of 194.6.  The images returned by this 
+camera will be flat pixel images that can and should be reshaped to the 
+resolution.
+
+Spherical and Stereo Spherical
+++++++++++++++++++++++++++++++
+
+The :class:`~yt.visualization.volume_rendering.lens.SphericalLens` produces
+a cylindrical-spherical projection.  Movies rendered in this way can be 
+displayed in head-tracking devices (e.g. Oculus Rift) or in YouTube 360 view
+(for more information see `the YouTube help
+<https://support.google.com/youtube/answer/6178631?hl=en>`, but it's a
+simple matter of running a script on an encoded movie file.)
+:class:`~yt.visualization.volume_rendering.lens.StereoSphericalLens` 
+is identical to :class:`~yt.visualization.volume_rendering.lens.SphericalLens` 
+but it produces two images from nearby camera positions for use in 3D viewing.
+
+.. _annotated-vr-example:
+
+Annotated Examples
+------------------
 
 .. warning:: 3D visualizations can be fun but frustrating!  Tuning the
              parameters to both look nice and convey useful scientific
              information can be hard.  We've provided information about best
              practices and tried to make the interface easy to develop nice
              visualizations, but getting them *just right* is often
-             time-consuming.
+             time-consuming.  It's usually best to start out simple with the 
+             built-in helper interface, and expand on that as you need.
 
-Tutorial
---------
-
-The scene interface provides a more modular interface for creating renderings
-of arbitrary data sources. As such, manual composition of a scene can require a
-bit more work, but we will also provide several helper functions that attempt
+The scene interface provides a modular interface for creating renderings
+of arbitrary data sources. As such, manual composition of a scene can require 
+a bit more work, but we will also provide several helper functions that attempt
 to create satisfactory default volume renderings.
 
-.. note:: It's usually best to start out simple with the built-in helper
-          interface, and expand on that if you need to.
-
-Here is a working example for rendering the IsolatedGalaxy dataset.
-
-.. python-script::
-
-  import yt
-  # load the data
-  ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
-  # volume render the 'density' field, and save the resulting image
-  im, sc = yt.volume_render(ds, 'density', fname='rendering.png')
-
-  # im is the image array generated. it is also saved to 'rendering.png'.
-  # sc is an instance of a Scene object, which allows you to further refine
-  # your renderings, and later save them.
+***needs ipython notebook with two examples here: one high-level one using the 
+yt.volume_render() functionality and the other detailed example using 
+yt.create_scene() to generate a base scene, then modifying all of the 
+components, adding some opaque sources, changing the camera position, 
+transfer function, lens, etc.  Then using "print scene" to display the
+useful __repr__ output for the scene and other VR classes.  The text below
+could be useful too.  Right now, we cannot use ipython notebooks with
+the VR infrastructure because there is no Scene.show() method.  Once it is
+introduced we should be able to do this.***
 
 When the 
 :func:`~yt.visualization.volume_rendering.volume_rendering.volume_render` 
@@ -83,15 +374,18 @@
 :class:`~yt.visualization.volume_rendering.api.VolumeSource`
 object is created, by default it will create a transfer function
 based on the extrema of the field that you are rendering. The transfer function
-describes how rays that pass through the domain are "transfered" and thus how
+describes how rays that pass through the domain are "transferred" and thus how
 brightness and color correlates to the field values.  Modifying and adjusting
 the transfer function is the primary way to modify the appearance of an image
 based on volumes.
 
-Once the basic set of objects to be rendered is constructed, a
-:class:`~yt.visualization.volume_rendering.camera.Camera` object is created and
+Once the basic set of objects to be rendered is constructed (e.g. 
+:class:`~yt.visualization.volume_rendering.scene.Scene`, 
+:class:`~yt.visualization.volume_rendering.render_source.RenderSource`, and
+:class:`~yt.visualization.volume_rendering.api.VolumeSource` objects) , a
+:class:`~yt.visualization.volume_rendering.camera.Camera` is created and
 added to the scene.  By default the creation of a camera also creates a
-default, plane-parallel :class:`~yt.visualization.volume_rendering.lens.Lens`
+plane-parallel :class:`~yt.visualization.volume_rendering.lens.Lens`
 object. The analog to a real camera is intentional -- a camera can take a
 picture of a scene from a particular point in time and space, but different
 lenses can be swapped in and out.  For example, this might include a fisheye
@@ -100,10 +394,10 @@
 call the main methods of the
 :class:`~yt.visualization.volume_rendering.scene.Scene` class,
 :meth:`~yt.visualization.volume_rendering.scene.Scene.render` and 
-:meth:`~yt.visualization.volume_rendering.scene.Scene.save`.  When called,
+:meth:`~yt.visualization.volume_rendering.scene.Scene.save`.  When rendered,
 the scene will loop through all of the
 :class:`~yt.visualization.volume_rendering.render_source.RenderSource` objects
-that have been added and integrate the radiative transfer equation through the
+that have been added and integrate the radiative transfer equations through the
 volume. Finally, the image and scene object is returned to the user.
 
 In this example, we don't add on any non-volume rendering sources; however, if
@@ -111,9 +405,11 @@
 
 Alternatively, if you don't want to immediately generate an image of your
 volume rendering, and you just want access to the default scene object, 
-you can skip this expensive operation by just running the
-:func:`~yt.visualization.volume_rendering.volume_rendering.create_scene` function in lieu of the
-:func:`~yt.visualization.volume_rendering.volume_rendering.volume_render` function. Example:
+you can skip the expensive operation of rendering by just running the
+:func:`~yt.visualization.volume_rendering.volume_rendering.create_scene` 
+function in lieu of the
+:func:`~yt.visualization.volume_rendering.volume_rendering.volume_render` 
+function. Example:
 
 .. python-script::
 
@@ -121,297 +417,30 @@
   ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
   sc = yt.create_scene(ds, 'density')
 
-Modifying and Saving the Scene
-------------------------------
 
-Once a basic scene has been created with default render sources and
-camera operations, deeper modifications are possible. These
-modifications can tune the appearance of the render sources (such as which
-colors correspond to which values in the data) as well as the shape of the
-rendered image, the position of the camera in the scene, and other elements
-present in the scene.  Below, we describe a few of the aspects of tuning a
-scene to create a visualization that is communicative and pleasing.
 
-.. _rendering_scene:
-
-Rendering and Saving
-++++++++++++++++++++
-
-Whenever you want a rendering of your current scene configuration, use the
-:meth:`~yt.visualization.volume_rendering.scene.Scene.render` method to
-trigger the scene to actually do the ray-tracing step.  After that, you can
-use the :meth:`~yt.visualization.volume_rendering.scene.Scene.save` method
-to save it to disk.  Alternatively, 
-:meth:`~yt.visualization.volume_rendering.scene.Scene.render` will return an 
-:class:`~yt.data_objects.image_array.ImageArray` object if you want to further 
-process it in Python (potentially writing it out with 
-:meth:`~yt.data_objects.image_array.ImageArray.write_png`).  You can continue 
-modifying your :class:`~yt.visualization.volume_rendering.scene.Scene` object,
-and render it as you make changes to see how those changes affect the resulting
-image.  
+If you're eager to just see what a volume rendering of your simulation looks
+like, there is the high-level function 
+:func:`~yt.visualization.volume_rendering.volume_rendering.volume_render` 
+that can set up several defaults and provide you with an rendered image
+quickly:
 
 .. python-script::
 
   import yt
+  # load the data
   ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
-  sc = yt.create_scene(ds, 'density')
-  sc.render() 
-  sc.save()
-  <make changes to scene>
-  sc.render()
-  sc.save('changes.png')
 
-.. _sigma_clip:
+  # volume render the 'density' field, and save the resulting image
+  im, sc = yt.volume_render(ds, 'density', fname='rendering.png')
 
-Improving Image Contrast with Sigma Clipping
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+  # im is the image array generated. it is also saved to 'rendering.png'.
+  # sc is an instance of a Scene object, which allows you to further refine
+  # your renderings and later save them.
 
-If your images appear to be too dark, you can try using the ``sigma_clip``
-keyword in the :meth:`~yt.visualization.volume_rendering.scene.Scene.render` 
-or :func:`~yt.visualization.volume_rendering.volume_rendering.volume_render` functions.  
-Because the brightness range in an image is scaled to match the range of 
-emissivity values of underlying rendering, if you have a few really 
-high-emissivity points, they will scale the rest of your image to be quite 
-dark.  ``sigma_clip = N`` can address this by removing values that are more
-than ``N`` standard deviations brighter than the mean of your image.  
-Typically, a choice of 4 to 6 will help dramatically with your resulting image.
 
-.. python-script::
 
-  sc = yt.create_scene(ds, 'density')
-  sc.render(sigma_clip=4)
-  sc.save()
-
-.. _transfer_functions:
-
-Transfer Functions
-++++++++++++++++++
-
-Transfer functions are the most essential component of a rendering that
-includes volume sources.  Several different fundamental types have been
-provided, but there are many different ways to construct complicated
-expressions that produce visualizations and images using the underlying
-machinery.
-
-.. note::
-   All of the information about how transfer functions are used and values
-   extracted is contained in the functions `TransferFunctionProxy.eval_transfer`
-   and `FIT_get_value` in the file `yt/_amr_utils/VolumeIntegrator.pyx`.  If
-   you're curious about how to construct your own, or why you get the values
-   you do, you should read the source!
-
-There are three ready-to-go transfer functions implemented in yt.
-:class:`~yt.visualization.volume_rendering.transfer_functions.ColorTransferFunction`,
-:class:`~yt.visualization.volume_rendering.transfer_functions.ProjectionTransferFunction`,
-and
-:class:`~yt.visualization.volume_rendering.transfer_functions.PlanckTransferFunction`.
-
-Color Transfer Functions
-^^^^^^^^^^^^^^^^^^^^^^^^
-
-These transfer functions are the standard way to apply colors to specific
-values in the field being rendered.  For instance, applying isocontours at
-specific densities.  They have several different mechanisms that can be used.
-The easiest mechanism is to use
-:meth:`~yt.visualization.volume_rendering.transfer_functions.ColorTransferFunction.add_layers`,
-which will add evenly spaced isocontours between the bounds of the transfer
-function.  However, you can also use
-:meth:`~yt.visualization.volume_rendering.transfer_functions.ColorTransferFunction.sample_colormap`,
-which will sample a colormap at a given value.  Additionally, you can directly
-call
-:meth:`~yt.visualization.volume_rendering.transfer_functions.ColorTransferFunction.add_gaussian`,
-which will allow you to specify the colors directly.
-
-An alternate method for modifying the colormap is
-:meth:`~yt.visualization.volume_rendering.transfer_functions.ColorTransferFunction.map_to_colormap`,
-where you can map a segment of the transfer function space to an entire
-colormap at a single alpha value.  This is sometimes useful for very opaque
-renderings.
-
-See :ref:`cookbook-simple_volume_rendering` for an example usage.
-
-Projection Transfer Function
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-This is designed to allow you to very easily project off-axis through a region.
-See :ref:`cookbook-offaxis_projection` for a simple example.  Note that the
-integration here is scaled to a width of 1.0; this means that if you want to
-apply a colorbar, you will have to multiply by the integration width (specified
-when you initialize the volume renderer) in whatever units are appropriate.
-
-Planck Transfer Function
-^^^^^^^^^^^^^^^^^^^^^^^^
-
-This transfer function is designed to apply a semi-realistic color field based
-on temperature, emission weighted by density, and approximate scattering based
-on the density.  This class is currently under-documented, and it may be best
-to examine the source code to use it.
-
-More Complicated Transfer Functions
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-For more complicated transfer functions, you can use the
-:class:`~yt.visualization.volume_rendering.transfer_functions.MultiVariateTransferFunction`
-object.  This allows for a set of weightings, linkages and so on.
-
-.. _transfer-function-helper:
-
-TransferFunctionHelper
-----------------------
-
-Because good transfer functions can be difficult to generate, the 
-TransferFunctionHelper exists in order to help create and modify transfer
-functions with smart defaults for your datasets.  To follow a full example
-on how to use this interface, follow the
-:ref:`transfer-function-helper-tutorial`.
-
-Adding New Sources
-++++++++++++++++++
-
-The resulting image of a rendering process is a combination of the different
-sources present in a scene.  While at present there are only a few sources
-available, in principle new sources can be defined and added to yt over time.
-
-By default, the scene will construct a volume object that includes the fluid
-components of a data source. 
-
-Volume Objects
-++++++++++++++
-
-When a volume object is added to a scene, rays that cross it will be
-integrated.  The volume object is affiliated with a transfer function, a set of
-voxels (drawn from a data source) and is integrated in a front-to-back manner.
-Depending on whether or not other opaque objects are in the scene, the volume
-may or may not be traversed in its entirety.
-
-.. note:: Behavior is undefined for volume sources that overlap that are added
-          to a scene.
-
-Hard and Opaque Objects
-+++++++++++++++++++++++
-
-In addition to semi-transparent objects, hard surfaces can be added to a scene.
-Currently these surfaces are limited to lines and annotations, but in future
-versions of yt surfaces and texture mapped objects will be included.
-
-The primary objects now available for hard and opaque objects are 
-:class:`~yt.visualization.volume_rendering.api.PointSource` and
-:class:`~yt.visualization.volume_rendering.api.LineSource`.  These are useful
-if you want to annotate points, for instance by splatting a set of particles
-onto an image, or if you want to draw lines connecting different regions or
-vertices.  For instance, lines can be used to draw outlines of regions or
-continents.
-
-.. _volume_rendering_annotations:
-
-Annotations
-+++++++++++
-
-By annotating a visualization, additional information can be drawn out.  yt
-provides three annotations:
-:class:`~yt.visualization.volume_rendering.api.BoxSource`,
-:class:`~yt.visualization.volume_rendering.api.GridSource`, and
-:class:`~yt.visualization.volume_rendering.api.CoordinateVectorSource`.  These
-annotations will operate in data space and can draw boxes, grid information,
-and also provide a vector orientation within the image.
-
-For example scripts using these features, 
-see :ref:`cookbook-volume_rendering_annotations`.
-
-Care and Usage of the Camera
-----------------------------
-
-When constructing a movie or utilizing volume rendering to visualize particular
-objects or phenomena, control over the exact position of the camera is
-necessary for both aesthetic and scientific reasons.
-
-yt provides methods for moving the camera by altering its position and
-orientation in space.  There are helper methods that can provide easier ways if
-you are guiding visualization based on quantities in the data.
-
-Cameras also posses "lens" objects, which control the manner in which rays are
-shot out of the camera.  Some of these make some camera properties
-(specifically the width property) irrelevant.
-
-.. _camera_movement:
-
-Moving and Orienting the Camera
-+++++++++++++++++++++++++++++++
-
-There are multiple ways to manipulate the camera viewpoint to create a series of
-renderings.  For an example, see this cookbook:
-:ref:`cookbook-camera_movement`.  For a current list of
-motion helper functions, see the docstrings associated with
-:class:`~yt.visualization.volume_rendering.camera.Camera`.  In short, the
-camera possesses a number of properties and methods that make changing its
-position easy.  These properties can be set, and will automatically trigger an
-update of the other properties of the camera:
-
- * `position` - the position of the camera in scene-space
- * `width` - the width of the plane the camera can see
- * `focus` - the point in space the camera is looking at
- * `resolution` - the image resolution
-
-In addition to this, methods such as
-:meth:`~yt.visualization.volume_rendering.camera.Camera.rotate`,
-:meth:`~yt.visualization.volume_rendering.camera.Camera.pitch`,
-:meth:`~yt.visualization.volume_rendering.camera.Camera.yaw`, and
-:meth:`~yt.visualization.volume_rendering.camera.Camera.roll` can rotate the
-camera in space. The center around which the camera rotates can be specified by
-the optional parameter `rot_center` (very useful for perspective and spherical
-lenses), or by default `rot_center` is set to be at camera location (i.e. the 
-camera will rotate about its current position).
-
-When examining a particular point in space, 
-:meth:`~yt.visualization.volume_rendering.camera.Camera.zoom` can be of
-assistance, as it will move the camera toward the focal point by a factor
-related to the current distance between them.
-
-In addition to manual control, the camera also has iteration methods that help
-with moving and rotating.  The 
-:meth:`~yt.visualization.volume_rendering.camera.Camera.rotation`,
-:meth:`~yt.visualization.volume_rendering.camera.Camera.zoomin`, and
-:meth:`~yt.visualization.volume_rendering.camera.Camera.move_to` methods
-provide iteration over a sequence of positions and orientations.  These can be
-used within a loop:
-
-.. python-script::
-
-   for i in sc.camera.zoomin(100, 5):
-       sc.render()
-       sc.save("frame_%03i.png" % i)
-
-The variable ``i`` is the frame number in the particular loop being called.  In
-this case, this will zoom in by a factor of 100 over the course of 5 frames.
-
-Changing Lenses
-+++++++++++++++
-
-Setting a lens on a camera changes the resulting image.  These lenses can be
-changed at run time or at the time when a camera is initialized by specifying
-the `lens_type` argument with a string.
-
-At the present time, there are a few cameras that can be used:
-`plane-parallel`, `(stereo)perspective`, `fisheye`, and `(stereo)spherical`.
-
- * Plane parallel: This lens type is the standard type used for orthographic
-   projections.  All rays emerge parallel to each other, arranged along a
-   plane.
- * Perspective: This lens type adjusts for an opening view angle, so that the
-   scene will have an element of perspective to it.
- * Fisheye: This lens type accepts a field-of-view property, `fov`, that
-   describes how wide an angle the fisheye can see.  Fisheye images are
-   typically used for dome-based presentations; the Hayden planetarium for
-   instance has a field of view of 194.6.  The images returned by this camera
-   will be flat pixel images that can and should be reshaped to the resolution.
- * Spherical: This is a cylindrical-spherical projection.  Movies rendered in
-   this way can be displayed in head-tracking devices or in YouTube 360 view
-   (for more information see `the YouTube help
-   <https://support.google.com/youtube/answer/6178631?hl=en>`, but it's a
-   simple matter of running a script on an encoded movie file.)
-
-For more information on the usage of different lenses and their features, see the
-cookbook example :ref:`cookbook-various_lens`.
+.. _volume-rendering-method:
 
 Volume Rendering Method
 -----------------------
@@ -542,10 +571,15 @@
 
 For more information about enabling parallelism, see :ref:`parallel-computation`.
 
+.. _vr-faq:
+
+Volume Rendering Frequently Asked Questions
+-------------------------------------------
+
 .. _opaque_rendering:
 
 Opacity
--------
+^^^^^^^
 
 There are currently two models for opacity when rendering a volume, which are
 controlled in the ColorTransferFunction with the keyword
@@ -559,3 +593,19 @@
 
 For an in-depth example, please see the cookbook example on opaque renders here: 
 :ref:`cookbook-opaque_rendering`.
+
+.. _sigma_clip:
+
+Improving Image Contrast with Sigma Clipping
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+If your images appear to be too dark, you can try using the ``sigma_clip``
+keyword in the :meth:`~yt.visualization.volume_rendering.scene.Scene.save` 
+or :func:`~yt.visualization.volume_rendering.volume_rendering.volume_render` 
+functions.  Because the brightness range in an image is scaled to match the 
+range of emissivity values of underlying rendering, if you have a few really 
+high-emissivity points, they will scale the rest of your image to be quite 
+dark.  ``sigma_clip = N`` can address this by removing values that are more
+than ``N`` standard deviations brighter than the mean of your image.  
+Typically, a choice of 4 to 6 will help dramatically with your resulting image.
+See the cookbook recipe :ref:`cookbook-sigma_clip` for a demonstration.

diff -r 19538c5ef25f7bfdc7b64442e13462cf285e00d6 -r 9f560b7f279cd6e3abffb105bd64b80946333c94 yt/data_objects/image_array.py
--- a/yt/data_objects/image_array.py
+++ b/yt/data_objects/image_array.py
@@ -246,7 +246,8 @@
         Parameters
         ----------
         filename: string
-            Note filename not be modified.
+            Filename to save to.  If None, PNG contents will be returned as a
+            string.
         sigma_clip: float, optional
             Image will be clipped before saving to the standard deviation
             of the image multiplied by this value.  Useful for enhancing
@@ -291,7 +292,7 @@
         else:
             out = scaled
 
-        if filename[-4:] != '.png':
+        if filename is not None and filename[-4:] != '.png':
             filename += '.png'
 
         if clip_ratio is not None:

diff -r 19538c5ef25f7bfdc7b64442e13462cf285e00d6 -r 9f560b7f279cd6e3abffb105bd64b80946333c94 yt/geometry/setup.py
--- a/yt/geometry/setup.py
+++ b/yt/geometry/setup.py
@@ -47,6 +47,7 @@
                 include_dirs=["yt/utilities/lib/"],
                 libraries=["m"],
                 depends=["yt/utilities/lib/fp_utils.pxd",
+                         "yt/utilities/lib/grid_traversal.pxd",
                          "yt/geometry/oct_container.pxd",
                          "yt/geometry/oct_visitors.pxd",
                          "yt/geometry/grid_container.pxd",

diff -r 19538c5ef25f7bfdc7b64442e13462cf285e00d6 -r 9f560b7f279cd6e3abffb105bd64b80946333c94 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -678,8 +678,8 @@
     def run(self):
         tmpfd, tmpname = tempfile.mkstemp(suffix='.png')
         os.close(tmpfd)
-        self.scene.render(sigma_clip=1.0)
-        self.scene.save(tmpname)
+        self.scene.render()
+        self.scene.save(tmpname, sigma_clip=1.0)
         image = mpimg.imread(tmpname)
         os.remove(tmpname)
         return [zlib.compress(image.dumps())]

diff -r 19538c5ef25f7bfdc7b64442e13462cf285e00d6 -r 9f560b7f279cd6e3abffb105bd64b80946333c94 yt/utilities/lib/grid_traversal.pxd
--- a/yt/utilities/lib/grid_traversal.pxd
+++ b/yt/utilities/lib/grid_traversal.pxd
@@ -20,17 +20,14 @@
 cimport kdtree_utils
 
 cdef struct ImageContainer:
-    np.float64_t *vp_pos
-    np.float64_t *vp_dir
+    np.float64_t[:,:,:] vp_pos
+    np.float64_t[:,:,:] vp_dir
     np.float64_t *center
-    np.float64_t *image
-    np.float64_t *zbuffer
+    np.float64_t[:,:,:] image
+    np.float64_t[:,:] zbuffer
     np.float64_t pdx, pdy
     np.float64_t bounds[4]
     int nv[2]
-    int vp_strides[3]
-    int im_strides[3]
-    int vd_strides[3]
     np.float64_t *x_vec
     np.float64_t *y_vec
 
@@ -43,19 +40,29 @@
                 int index[3],
                 void *data) nogil
 
+ctypedef void calculate_extent_function(ImageContainer *image,
+            VolumeContainer *vc, np.int64_t rv[4]) nogil
+
+cdef calculate_extent_function calculate_extent_plane_parallel
+
+ctypedef void generate_vector_info_function(ImageContainer *im,
+            np.int64_t vi, np.int64_t vj,
+            np.float64_t width[2],
+            np.float64_t v_dir[3], np.float64_t v_pos[3]) nogil
+
+cdef generate_vector_info_function generate_vector_info_plane_parallel
+cdef generate_vector_info_function generate_vector_info_null
 
 cdef class ImageSampler:
     cdef ImageContainer *image
     cdef sampler_function *sampler
-    cdef public object avp_pos, avp_dir, acenter, aimage, ax_vec, ay_vec
+    cdef public object acenter, aimage, ax_vec, ay_vec
     cdef public object azbuffer
     cdef void *supp_data
     cdef np.float64_t width[3]
-
-    cdef void get_start_stop(self, np.float64_t *ex, np.int64_t *rv)
-
-    cdef void calculate_extent(self, np.float64_t extrema[4],
-                               VolumeContainer *vc) nogil
+    cdef public object lens_type
+    cdef calculate_extent_function *extent_function
+    cdef generate_vector_info_function *vector_function
 
     cdef void setup(self, PartitionedGrid pg)
 

diff -r 19538c5ef25f7bfdc7b64442e13462cf285e00d6 -r 9f560b7f279cd6e3abffb105bd64b80946333c94 yt/utilities/lib/grid_traversal.pyx
--- a/yt/utilities/lib/grid_traversal.pyx
+++ b/yt/utilities/lib/grid_traversal.pyx
@@ -17,7 +17,7 @@
 cimport numpy as np
 cimport cython
 #cimport healpix_interface
-from libc.stdlib cimport malloc, free, abs
+from libc.stdlib cimport malloc, calloc, free, abs
 from libc.math cimport exp, floor, log2, \
     lrint, fabs, atan, atan2, asin, cos, sin, sqrt
 from fp_utils cimport imax, fmax, imin, fmin, iclip, fclip, i64clip
@@ -174,6 +174,85 @@
             for i in range(3):
                 vel[i] /= vel_mag[0]
 
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+cdef void calculate_extent_plane_parallel(ImageContainer *image,
+            VolumeContainer *vc, np.int64_t rv[4]) nogil:
+    # We do this for all eight corners
+    cdef np.float64_t temp
+    cdef np.float64_t *edges[2]
+    cdef np.float64_t cx, cy
+    cdef np.float64_t extrema[4]
+    cdef int i, j, k
+    edges[0] = vc.left_edge
+    edges[1] = vc.right_edge
+    extrema[0] = extrema[2] = 1e300; extrema[1] = extrema[3] = -1e300
+    for i in range(2):
+        for j in range(2):
+            for k in range(2):
+                # This should rotate it into the vector plane
+                temp  = edges[i][0] * image.x_vec[0]
+                temp += edges[j][1] * image.x_vec[1]
+                temp += edges[k][2] * image.x_vec[2]
+                if temp < extrema[0]: extrema[0] = temp
+                if temp > extrema[1]: extrema[1] = temp
+                temp  = edges[i][0] * image.y_vec[0]
+                temp += edges[j][1] * image.y_vec[1]
+                temp += edges[k][2] * image.y_vec[2]
+                if temp < extrema[2]: extrema[2] = temp
+                if temp > extrema[3]: extrema[3] = temp
+    cx = cy = 0.0
+    for i in range(3):
+        cx += image.center[i] * image.x_vec[i]
+        cy += image.center[i] * image.y_vec[i]
+    rv[0] = lrint((extrema[0] - cx - image.bounds[0])/image.pdx)
+    rv[1] = rv[0] + lrint((extrema[1] - extrema[0])/image.pdx)
+    rv[2] = lrint((extrema[2] - cy - image.bounds[2])/image.pdy)
+    rv[3] = rv[2] + lrint((extrema[3] - extrema[2])/image.pdy)
+
+# We do this for a bunch of lenses.  Fallback is to grab them from the vector
+# info supplied.
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+cdef void calculate_extent_null(ImageContainer *image,
+            VolumeContainer *vc, np.int64_t rv[4]) nogil:
+    rv[0] = 0
+    rv[1] = image.nv[0]
+    rv[2] = 0
+    rv[3] = image.nv[1]
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+cdef void generate_vector_info_plane_parallel(ImageContainer *im,
+            np.int64_t vi, np.int64_t vj,
+            np.float64_t width[2],
+            # Now outbound
+            np.float64_t v_dir[3], np.float64_t v_pos[3]) nogil:
+    cdef int i
+    cdef np.float64_t px, py
+    px = width[0] * (<np.float64_t>vi)/(<np.float64_t>im.nv[0]-1) - width[0]/2.0
+    py = width[1] * (<np.float64_t>vj)/(<np.float64_t>im.nv[1]-1) - width[1]/2.0
+    # atleast_3d will add to beginning and end
+    v_pos[0] = im.vp_pos[0,0,0]*px + im.vp_pos[0,3,0]*py + im.vp_pos[0,9,0]
+    v_pos[1] = im.vp_pos[0,1,0]*px + im.vp_pos[0,4,0]*py + im.vp_pos[0,10,0]
+    v_pos[2] = im.vp_pos[0,2,0]*px + im.vp_pos[0,5,0]*py + im.vp_pos[0,11,0]
+    for i in range(3): v_dir[i] = im.vp_dir[0,i,0]
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+cdef void generate_vector_info_null(ImageContainer *im,
+            np.int64_t vi, np.int64_t vj,
+            np.float64_t width[2],
+            # Now outbound
+            np.float64_t v_dir[3], np.float64_t v_pos[3]) nogil:
+    cdef int i
+    for i in range(3):
+        # Here's a funny thing: we use vi here because our *image* will be
+        # flattened.  That means that im.nv will be a better one-d offset,
+        # since vp_pos has funny strides.
+        v_pos[i] = im.vp_pos[vi, vj, i]
+        v_dir[i] = im.vp_dir[vi, vj, i]
 
 cdef struct ImageAccumulator:
     np.float64_t rgba[Nch]
@@ -181,8 +260,8 @@
 
 cdef class ImageSampler:
     def __init__(self,
-                  np.ndarray vp_pos,
-                  np.ndarray vp_dir,
+                  np.float64_t[:,:,:] vp_pos,
+                  np.float64_t[:,:,:] vp_dir,
                   np.ndarray[np.float64_t, ndim=1] center,
                   bounds,
                   np.ndarray[np.float64_t, ndim=3] image,
@@ -190,91 +269,49 @@
                   np.ndarray[np.float64_t, ndim=1] y_vec,
                   np.ndarray[np.float64_t, ndim=1] width,
                   *args, **kwargs):
-        self.image = <ImageContainer *> malloc(sizeof(ImageContainer))
-        cdef ImageContainer *imagec = self.image
-        cdef np.ndarray[np.float64_t, ndim=2] zbuffer
+        self.image = <ImageContainer *> calloc(sizeof(ImageContainer), 1)
+        cdef np.float64_t[:,:] zbuffer
         zbuffer = kwargs.pop("zbuffer", None)
+        if zbuffer is None:
+            zbuffer = np.ones((image.shape[0], image.shape[1]), "float64")
+        self.lens_type = kwargs.pop("lens_type", None)
+        if self.lens_type == "plane-parallel":
+            self.extent_function = calculate_extent_plane_parallel
+            self.vector_function = generate_vector_info_plane_parallel
+        else:
+            if not (vp_pos.shape[0] == vp_dir.shape[0] == image.shape[0]) or \
+               not (vp_pos.shape[1] == vp_dir.shape[1] == image.shape[1]):
+                print "Bad lense shape / direction for %s" % (self.lens_type)
+                print "Shapes: (%s - %s - %s) and (%s - %s - %s)" % (
+                    vp_pos.shape[0], vp_dir.shape[0], image.shape[0],
+                    vp_pos.shape[1], vp_dir.shape[1], image.shape[1])
+                raise RuntimeError
+            self.extent_function = calculate_extent_null
+            self.vector_function = generate_vector_info_null
         self.sampler = NULL
         cdef int i, j
         # These assignments are so we can track the objects and prevent their
-        # de-allocation from reference counts.
-        self.avp_pos = vp_pos
-        self.avp_dir = vp_dir
+        # de-allocation from reference counts.  Note that we do this to the
+        # "atleast_3d" versions.  Also, note that we re-assign the input
+        # arguments.
+        self.image.vp_pos = vp_pos
+        self.image.vp_dir = vp_dir
+        self.image.image = self.aimage = image
         self.acenter = center
-        self.aimage = image
+        self.image.center = <np.float64_t *> center.data
         self.ax_vec = x_vec
+        self.image.x_vec = <np.float64_t *> x_vec.data
         self.ay_vec = y_vec
-        self.azbuffer = zbuffer
-        imagec.vp_pos = <np.float64_t *> vp_pos.data
-        imagec.vp_dir = <np.float64_t *> vp_dir.data
-        imagec.center = <np.float64_t *> center.data
-        imagec.image = <np.float64_t *> image.data
-        imagec.x_vec = <np.float64_t *> x_vec.data
-        imagec.y_vec = <np.float64_t *> y_vec.data
-        imagec.zbuffer = NULL
-        if zbuffer is not None:
-            imagec.zbuffer = <np.float64_t *> zbuffer.data
-        imagec.nv[0] = image.shape[0]
-        imagec.nv[1] = image.shape[1]
-        for i in range(4): imagec.bounds[i] = bounds[i]
-        imagec.pdx = (bounds[1] - bounds[0])/imagec.nv[0]
-        imagec.pdy = (bounds[3] - bounds[2])/imagec.nv[1]
+        self.image.y_vec = <np.float64_t *> y_vec.data
+        self.image.zbuffer = zbuffer
+        self.image.nv[0] = image.shape[0]
+        self.image.nv[1] = image.shape[1]
+        for i in range(4): self.image.bounds[i] = bounds[i]
+        self.image.pdx = (bounds[1] - bounds[0])/self.image.nv[0]
+        self.image.pdy = (bounds[3] - bounds[2])/self.image.nv[1]
         for i in range(3):
-            imagec.vp_strides[i] = vp_pos.strides[i] / 8
-            imagec.im_strides[i] = image.strides[i] / 8
             self.width[i] = width[i]
 
-        if vp_dir.ndim > 1:
-            for i in range(3):
-                imagec.vd_strides[i] = vp_dir.strides[i] / 8
-        elif vp_pos.ndim == 1:
-            imagec.vd_strides[0] = imagec.vd_strides[1] = imagec.vd_strides[2] = -1
-        else:
-            raise RuntimeError
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    cdef void get_start_stop(self, np.float64_t *ex, np.int64_t *rv):
-        # Extrema need to be re-centered
-        cdef np.float64_t cx, cy
-        cdef ImageContainer *im = self.image
-        cdef int i
-        cx = cy = 0.0
-        for i in range(3):
-            cx += im.center[i] * im.x_vec[i]
-            cy += im.center[i] * im.y_vec[i]
-        rv[0] = lrint((ex[0] - cx - im.bounds[0])/im.pdx)
-        rv[1] = rv[0] + lrint((ex[1] - ex[0])/im.pdx)
-        rv[2] = lrint((ex[2] - cy - im.bounds[2])/im.pdy)
-        rv[3] = rv[2] + lrint((ex[3] - ex[2])/im.pdy)
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    cdef void calculate_extent(self, np.float64_t extrema[4],
-                               VolumeContainer *vc) nogil:
-        # We do this for all eight corners
-        cdef np.float64_t temp
-        cdef np.float64_t *edges[2]
-        edges[0] = vc.left_edge
-        edges[1] = vc.right_edge
-        extrema[0] = extrema[2] = 1e300; extrema[1] = extrema[3] = -1e300
-        cdef int i, j, k
-        for i in range(2):
-            for j in range(2):
-                for k in range(2):
-                    # This should rotate it into the vector plane
-                    temp  = edges[i][0] * self.image.x_vec[0]
-                    temp += edges[j][1] * self.image.x_vec[1]
-                    temp += edges[k][2] * self.image.x_vec[2]
-                    if temp < extrema[0]: extrema[0] = temp
-                    if temp > extrema[1]: extrema[1] = temp
-                    temp  = edges[i][0] * self.image.y_vec[0]
-                    temp += edges[j][1] * self.image.y_vec[1]
-                    temp += edges[k][2] * self.image.y_vec[2]
-                    if temp < extrema[2]: extrema[2] = temp
-                    if temp > extrema[3]: extrema[3] = temp
-
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
@@ -282,7 +319,7 @@
         # This routine will iterate over all of the vectors and cast each in
         # turn.  Might benefit from a more sophisticated intersection check,
         # like http://courses.csusm.edu/cs697exz/ray_box.htm
-        cdef int vi, vj, hit, i, j, ni, nj, nn
+        cdef int vi, vj, hit, i, j, k, ni, nj, nn, xi, yi
         cdef np.int64_t offset
         cdef np.int64_t iter[4]
         cdef VolumeContainer *vc = pg.container
@@ -292,83 +329,43 @@
         cdef np.float64_t *v_pos
         cdef np.float64_t *v_dir
         cdef np.float64_t rgba[6]
-        cdef np.float64_t extrema[4]
         cdef np.float64_t max_t
         hit = 0
         cdef np.int64_t nx, ny, size
-        if im.vd_strides[0] == -1:
-            self.calculate_extent(extrema, vc)
-            self.get_start_stop(extrema, iter)
-            iter[0] = i64clip(iter[0]-1, 0, im.nv[0])
-            iter[1] = i64clip(iter[1]+1, 0, im.nv[0])
-            iter[2] = i64clip(iter[2]-1, 0, im.nv[1])
-            iter[3] = i64clip(iter[3]+1, 0, im.nv[1])
-            nx = (iter[1] - iter[0])
-            ny = (iter[3] - iter[2])
-            size = nx * ny
-        else:
-            nx = im.nv[0]
-            ny = 1
-            iter[0] = iter[1] = iter[2] = iter[3] = 0
-            size = nx
+        self.extent_function(self.image, vc, iter)
+        iter[0] = i64clip(iter[0]-1, 0, im.nv[0])
+        iter[1] = i64clip(iter[1]+1, 0, im.nv[0])
+        iter[2] = i64clip(iter[2]-1, 0, im.nv[1])
+        iter[3] = i64clip(iter[3]+1, 0, im.nv[1])
+        nx = (iter[1] - iter[0])
+        ny = (iter[3] - iter[2])
+        size = nx * ny
         cdef ImageAccumulator *idata
-        cdef np.float64_t px, py
         cdef np.float64_t width[3]
+        cdef int use_vec, max_i
         for i in range(3):
             width[i] = self.width[i]
-        if im.vd_strides[0] == -1:
-            with nogil, parallel(num_threads = num_threads):
-                idata = <ImageAccumulator *> malloc(sizeof(ImageAccumulator))
-                idata.supp_data = self.supp_data
-                v_pos = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
-                for j in prange(size, schedule="static",chunksize=1):
-                    vj = j % ny
-                    vi = (j - vj) / ny + iter[0]
-                    vj = vj + iter[2]
-                    # Dynamically calculate the position
-                    px = width[0] * (<np.float64_t>vi)/(<np.float64_t>im.nv[0]-1) - width[0]/2.0
-                    py = width[1] * (<np.float64_t>vj)/(<np.float64_t>im.nv[1]-1) - width[1]/2.0
-                    v_pos[0] = im.vp_pos[0]*px + im.vp_pos[3]*py + im.vp_pos[9]
-                    v_pos[1] = im.vp_pos[1]*px + im.vp_pos[4]*py + im.vp_pos[10]
-                    v_pos[2] = im.vp_pos[2]*px + im.vp_pos[5]*py + im.vp_pos[11]
-                    offset = im.im_strides[0] * vi + im.im_strides[1] * vj
-                    for i in range(Nch): idata.rgba[i] = im.image[i + offset]
-                    if im.zbuffer != NULL:
-                        max_t = im.zbuffer[im.nv[0] * vi + vj]
-                    else:
-                        max_t = 1.0
-                    walk_volume(vc, v_pos, im.vp_dir, self.sampler,
-                                (<void *> idata), NULL, max_t)
-                    for i in range(Nch): im.image[i + offset] = idata.rgba[i]
-                free(idata)
-                free(v_pos)
-        else:
-            with nogil, parallel(num_threads = num_threads):
-                idata = <ImageAccumulator *> malloc(sizeof(ImageAccumulator))
-                idata.supp_data = self.supp_data
-                v_pos = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
-                v_dir = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
-                # If we do not have a simple image plane, we have to cast all
-                # our rays 
-                for j in prange(size, schedule="dynamic", chunksize=100):
-                    offset = j * 3
-                    for i in range(3): v_pos[i] = im.vp_pos[i + offset]
-                    for i in range(3): v_dir[i] = im.vp_dir[i + offset]
-                    if v_dir[0] == v_dir[1] == v_dir[2] == 0.0:
-                        continue
-                    # Note that for Nch != 3 we need a different offset into
-                    # the image object than for the vectors!
-                    for i in range(Nch): idata.rgba[i] = im.image[i + Nch*j]
-                    if im.zbuffer != NULL:
-                        max_t = fclip(im.zbuffer[j], 0.0, 1.0)
-                    else:
-                        max_t = 1.0
-                    walk_volume(vc, v_pos, v_dir, self.sampler, 
-                                (<void *> idata), NULL, max_t)
-                    for i in range(Nch): im.image[i + Nch*j] = idata.rgba[i]
-                free(v_dir)
-                free(idata)
-                free(v_pos)
+        with nogil, parallel(num_threads = num_threads):
+            idata = <ImageAccumulator *> malloc(sizeof(ImageAccumulator))
+            idata.supp_data = self.supp_data
+            v_pos = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+            v_dir = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+            for j in prange(size, schedule="static", chunksize=100):
+                vj = j % ny
+                vi = (j - vj) / ny + iter[0]
+                vj = vj + iter[2]
+                # Dynamically calculate the position
+                self.vector_function(im, vi, vj, width, v_dir, v_pos)
+                for i in range(Nch):
+                    idata.rgba[i] = im.image[vi, vj, i]
+                max_t = fclip(im.zbuffer[vi, vj], 0.0, 1.0)
+                walk_volume(vc, v_pos, v_dir, self.sampler,
+                            (<void *> idata), NULL, max_t)
+                for i in range(Nch):
+                    im.image[vi, vj, i] = idata.rgba[i]
+            free(idata)
+            free(v_pos)
+            free(v_dir)
         return hit
 
     cdef void setup(self, PartitionedGrid pg):

diff -r 19538c5ef25f7bfdc7b64442e13462cf285e00d6 -r 9f560b7f279cd6e3abffb105bd64b80946333c94 yt/utilities/lib/mesh_traversal.pyx
--- a/yt/utilities/lib/mesh_traversal.pyx
+++ b/yt/utilities/lib/mesh_traversal.pyx
@@ -75,53 +75,25 @@
         size = nx * ny
         data = np.empty(size, dtype="float64")
         cdef rtcr.RTCRay ray
-        if im.vd_strides[0] == -1:
-            v_pos = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
-            for j in range(size):
-                vj = j % ny
-                vi = (j - vj) / ny
-                vj = vj
-                # Dynamically calculate the position
-                px = width[0] * (<np.float64_t>vi)/(<np.float64_t>im.nv[0]-1) - width[0]/2.0
-                py = width[1] * (<np.float64_t>vj)/(<np.float64_t>im.nv[1]-1) - width[1]/2.0
-                v_pos[0] = im.vp_pos[0]*px + im.vp_pos[3]*py + im.vp_pos[9]
-                v_pos[1] = im.vp_pos[1]*px + im.vp_pos[4]*py + im.vp_pos[10]
-                v_pos[2] = im.vp_pos[2]*px + im.vp_pos[5]*py + im.vp_pos[11]
-                for i in range(3):
-                    ray.org[i] = v_pos[i]
-                    ray.dir[i] = im.vp_dir[i]
-                ray.tnear = 0.0
-                ray.tfar = 1e37
-                ray.geomID = rtcg.RTC_INVALID_GEOMETRY_ID
-                ray.primID = rtcg.RTC_INVALID_GEOMETRY_ID
-                ray.instID = rtcg.RTC_INVALID_GEOMETRY_ID
-                ray.mask = -1
-                ray.time = 0
-                rtcs.rtcIntersect(scene.scene_i, ray)
-                data[j] = ray.time
-            self.aimage = data.reshape(self.image.nv[0], self.image.nv[1])
-            free(v_pos)
-        else:
-            v_pos = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
-            v_dir = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
-            # If we do not have a simple image plane, we have to cast all
-            # our rays 
-            for j in range(size):
-                offset = j * 3
-                for i in range(3): v_pos[i] = im.vp_pos[i + offset]
-                for i in range(3): v_dir[i] = im.vp_dir[i + offset]
-                for i in range(3):
-                    ray.org[i] = v_pos[i]
-                    ray.dir[i] = v_dir[i]
-                ray.tnear = 0.0
-                ray.tfar = 1e37
-                ray.geomID = rtcg.RTC_INVALID_GEOMETRY_ID
-                ray.primID = rtcg.RTC_INVALID_GEOMETRY_ID
-                ray.instID = rtcg.RTC_INVALID_GEOMETRY_ID
-                ray.mask = -1
-                ray.time = 0
-                rtcs.rtcIntersect(scene.scene_i, ray)
-                data[j] = ray.time
-            self.aimage = data.reshape(self.image.nv[0], self.image.nv[1])
-            free(v_pos)
-            free(v_dir)
+        v_pos = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+        v_dir = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+        for j in range(size):
+            vj = j % ny
+            vi = (j - vj) / ny
+            vj = vj
+            self.vector_function(im, vi, vj, width, v_dir, v_pos)
+            for i in range(3):
+                ray.org[i] = v_pos[i]
+                ray.dir[i] = v_dir[i]
+            ray.tnear = 0.0
+            ray.tfar = 1e37
+            ray.geomID = rtcg.RTC_INVALID_GEOMETRY_ID
+            ray.primID = rtcg.RTC_INVALID_GEOMETRY_ID
+            ray.instID = rtcg.RTC_INVALID_GEOMETRY_ID
+            ray.mask = -1
+            ray.time = 0
+            rtcs.rtcIntersect(scene.scene_i, ray)
+            data[j] = ray.time
+        self.aimage = data.reshape(self.image.nv[0], self.image.nv[1])
+        free(v_pos)
+        free(v_dir)

diff -r 19538c5ef25f7bfdc7b64442e13462cf285e00d6 -r 9f560b7f279cd6e3abffb105bd64b80946333c94 yt/utilities/lib/setup.py
--- a/yt/utilities/lib/setup.py
+++ b/yt/utilities/lib/setup.py
@@ -76,6 +76,7 @@
                 depends=["yt/utilities/lib/fp_utils.pxd",
                          "yt/utilities/lib/amr_kdtools.pxd",
                          "yt/utilities/lib/ContourFinding.pxd",
+                         "yt/utilities/lib/grid_traversal.pxd",
                          "yt/geometry/oct_container.pxd"])
     config.add_extension("DepthFirstOctree", 
                 ["yt/utilities/lib/DepthFirstOctree.pyx"],
@@ -178,7 +179,8 @@
                              ["yt/utilities/lib/mesh_traversal.pyx"],
                              include_dirs=["yt/utilities/lib", include_dirs],
                              libraries=["m", "embree"], language="c++",
-                             depends=["yt/utilities/lib/mesh_traversal.pxd"])
+                             depends=["yt/utilities/lib/mesh_traversal.pxd",
+                                      "yt/utilities/lib/grid_traversal.pxd"])
         config.add_extension("mesh_samplers",
                              ["yt/utilities/lib/mesh_samplers.pyx"],
                              include_dirs=["yt/utilities/lib", include_dirs],

This diff is so big that we needed to truncate the remainder.

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list