[yt-svn] commit/yt: 17 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Fri Oct 16 16:10:59 PDT 2015


17 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/b789e08a17f8/
Changeset:   b789e08a17f8
Branch:      yt
User:        chummels
Date:        2015-10-15 21:32:44+00:00
Summary:     Adding scene.save() function which saves an image to disk after it has been rendered.
Affected #:  1 file

diff -r 562b253b734a3abc182b712fb9009034ea3b36c6 -r b789e08a17f86ef7282d4252d09b2f8de4eedd1d yt/visualization/volume_rendering/scene.py
--- a/yt/visualization/volume_rendering/scene.py
+++ b/yt/visualization/volume_rendering/scene.py
@@ -18,7 +18,7 @@
 from yt.extern.six import iteritems, itervalues
 from .camera import Camera
 from .render_source import OpaqueSource, BoxSource, CoordinateVectorSource, \
-    GridSource
+    GridSource, RenderSource
 from .zbuffer_array import ZBuffer
 
 
@@ -55,6 +55,8 @@
         super(Scene, self).__init__()
         self.sources = OrderedDict()
         self.camera = None
+        # An image array containing the last rendered image of the scene
+        self.last_render = None
 
     def get_source(self, source_num):
         return list(itervalues(self.sources))[source_num]
@@ -90,17 +92,15 @@
 
         return self
 
-    def render(self, fname=None, sigma_clip=None, camera=None):
+    def render(self, sigma_clip=None, camera=None):
         r"""Render all sources in the Scene.
 
         Use the current state of the Scene object to render all sources
-        currently in the scene.
+        currently in the scene.  Returns the image array.  If you want to
+        save the output to a file, call the save() function.
 
         Parameters
         ----------
-        fname: string, optional
-            If specified, save the rendering as a bitmap to the file "fname".
-            Default: None
         sigma_clip: float, optional
             Image will be clipped before saving to the standard deviation
             of the image multiplied by this value.  Useful for enhancing
@@ -110,14 +110,14 @@
 
         Returns
         -------
-        bmp: :class:`ImageArray`
-            ImageArray instance of the current rendering image.
+        ImageArray instance of the current rendering image.
 
         Examples
         --------
         >>> sc = Scene()
         >>> # Add sources/camera/etc
-        >>> im = sc.render('rendering.png')
+        >>> im = sc.render(sigma_clip=4)
+        >>> sc.save()
 
         """
         if camera is None:
@@ -125,10 +125,51 @@
         assert(camera is not None)
         self._validate()
         bmp = self.composite(camera=camera)
-        if fname is not None:
-            bmp.write_png(fname, sigma_clip=sigma_clip)
+        self.last_render = bmp
         return bmp
 
+    def save(self, fname=None):
+        r"""Saves the most recently rendered image of the Scene to disk.
+
+        Once you have created a scene and rendered that scene to an image 
+        array, this saves that image array to disk with an optional filename.
+
+        Parameters
+        ----------
+        fname: string, optional
+            If specified, save the rendering as a bitmap to the file "fname".
+            If unspecified, it creates a default based on the dataset filename.
+            Default: None
+
+        Returns
+        -------
+            Nothing
+
+        Examples
+        --------
+        >>> sc = yt.create_scene(ds)
+        >>> # Add sources/camera/etc
+        >>> sc.render()
+        >>> sc.save('test.png')
+
+        """
+        if fname is None:
+            sources = list(itervalues(self.sources))
+            rensources = [s for s in sources if isinstance(s, RenderSource)]
+            # if a render source present, use its affiliated ds for fname
+            if len(rensources) > 0:
+                rs = rensources[0]
+                basename = rs.data_source.ds.basename
+                if isinstance(rs.field, basestring):
+                    field = rs.field
+                else:
+                    field = rs.field[-1]
+                fname = "%s_Render_%s.png" % (basename, field)
+            # if no render source present, use a default filename
+            else:
+                fname = "Render.png"   
+        self.last_render.write_png(fname)
+ 
     def _validate(self):
         r"""Validate the current state of the scene."""
         for k, source in iteritems(self.sources):


https://bitbucket.org/yt_analysis/yt/commits/3cffeb176560/
Changeset:   3cffeb176560
Branch:      yt
User:        chummels
Date:        2015-10-15 22:17:25+00:00
Summary:     Adding in automatic recognition of png files in save() method on scene
Affected #:  1 file

diff -r b789e08a17f86ef7282d4252d09b2f8de4eedd1d -r 3cffeb176560b7c675c3c37fbff6a2d1390c135c yt/visualization/volume_rendering/scene.py
--- a/yt/visualization/volume_rendering/scene.py
+++ b/yt/visualization/volume_rendering/scene.py
@@ -20,7 +20,7 @@
 from .render_source import OpaqueSource, BoxSource, CoordinateVectorSource, \
     GridSource, RenderSource
 from .zbuffer_array import ZBuffer
-
+from yt.funcs import get_image_suffix
 
 class Scene(object):
 
@@ -123,6 +123,7 @@
         if camera is None:
             camera = self.camera
         assert(camera is not None)
+        mylog.info("Rendering scene")
         self._validate()
         bmp = self.composite(camera=camera)
         self.last_render = bmp
@@ -133,6 +134,8 @@
 
         Once you have created a scene and rendered that scene to an image 
         array, this saves that image array to disk with an optional filename.
+        If an image has not yet been rendered for the current scene object,
+        it forces one and writes it out.
 
         Parameters
         ----------
@@ -152,6 +155,11 @@
         >>> sc.render()
         >>> sc.save('test.png')
 
+        # Or alternatively
+        >>> sc = yt.create_scene(ds)
+        >>> # Add sources/camera/etc
+        >>> sc.save('test.png')
+
         """
         if fname is None:
             sources = list(itervalues(self.sources))
@@ -168,6 +176,15 @@
             # if no render source present, use a default filename
             else:
                 fname = "Render.png"   
+        suffix = get_image_suffix(fname)
+        if suffix == '':
+            suffix = '.png'
+            fname = '%s%s' % (fname, suffix)
+
+        if self.last_render is None:
+            self.render()
+
+        mylog.info("Saving render %s", fname)
         self.last_render.write_png(fname)
  
     def _validate(self):


https://bitbucket.org/yt_analysis/yt/commits/e3e5fde43e59/
Changeset:   e3e5fde43e59
Branch:      yt
User:        chummels
Date:        2015-10-15 23:04:57+00:00
Summary:     Adding log messages to renderings.
Affected #:  3 files

diff -r 3cffeb176560b7c675c3c37fbff6a2d1390c135c -r e3e5fde43e597ea4bf24388ab8329134708ae25a yt/visualization/volume_rendering/render_source.py
--- a/yt/visualization/volume_rendering/render_source.py
+++ b/yt/visualization/volume_rendering/render_source.py
@@ -138,7 +138,9 @@
             self.build_defaults()
 
     def build_defaults(self):
+        mylog.info("Creating default volume")
         self.build_default_volume()
+        mylog.info("Creating default transfer function")
         self.build_default_transfer_function()
 
     def set_transfer_function(self, transfer_function):

diff -r 3cffeb176560b7c675c3c37fbff6a2d1390c135c -r e3e5fde43e597ea4bf24388ab8329134708ae25a yt/visualization/volume_rendering/scene.py
--- a/yt/visualization/volume_rendering/scene.py
+++ b/yt/visualization/volume_rendering/scene.py
@@ -14,13 +14,13 @@
 
 import numpy as np
 from collections import OrderedDict
-from yt.funcs import mylog
+from yt.funcs import mylog, get_image_suffix, get_pbar
 from yt.extern.six import iteritems, itervalues
 from .camera import Camera
 from .render_source import OpaqueSource, BoxSource, CoordinateVectorSource, \
     GridSource, RenderSource
 from .zbuffer_array import ZBuffer
-from yt.funcs import get_image_suffix
+
 
 class Scene(object):
 
@@ -123,7 +123,6 @@
         if camera is None:
             camera = self.camera
         assert(camera is not None)
-        mylog.info("Rendering scene")
         self._validate()
         bmp = self.composite(camera=camera)
         self.last_render = bmp
@@ -189,8 +188,12 @@
  
     def _validate(self):
         r"""Validate the current state of the scene."""
-        for k, source in iteritems(self.sources):
+
+        pbar = get_pbar("Rendering scene: ", len(self.sources))
+        for i, (k, source) in enumerate(iteritems(self.sources)):
             source._validate()
+            pbar.update(i)
+        pbar.finish()
         return
 
     def composite(self, camera=None):

diff -r 3cffeb176560b7c675c3c37fbff6a2d1390c135c -r e3e5fde43e597ea4bf24388ab8329134708ae25a yt/visualization/volume_rendering/volume_rendering.py
--- a/yt/visualization/volume_rendering/volume_rendering.py
+++ b/yt/visualization/volume_rendering/volume_rendering.py
@@ -113,5 +113,6 @@
     >>> im, sc = yt.volume_render(ds, fname='test.png', sigma_clip=4.0)
     """
     sc = create_scene(data_source, field=field)
-    im = sc.render(fname=fname, sigma_clip=sigma_clip)
+    im = sc.render(sigma_clip=sigma_clip)
+    sc.save(fname=fname)
     return im, sc


https://bitbucket.org/yt_analysis/yt/commits/bc1d744300a4/
Changeset:   bc1d744300a4
Branch:      yt
User:        chummels
Date:        2015-10-15 23:18:36+00:00
Summary:     Updating use of sc.render() to incorporate sc.save() when required.
Affected #:  6 files

diff -r e3e5fde43e597ea4bf24388ab8329134708ae25a -r bc1d744300a45da54b5717c610ea5a03d30b526e yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -413,7 +413,8 @@
         --------
 
         >>> for i in cam.iter_rotate(np.pi, 10):
-        ...     im = sc.render("rotation_%04i.png" % i)
+        ...     im = sc.render()
+        ...     sc.save('rotation_%04i.png' % i)
         """
 
         dtheta = (1.0*theta)/n_steps
@@ -441,7 +442,8 @@
         --------
 
         >>> for i in cam.iter_move([0.2,0.3,0.6], 10):
-        ...     sc.render("move_%04i.png" % i)
+        ...     sc.render()
+        ...     sc.save("move_%04i.png" % i)
         """
         assert isinstance(final, YTArray)
         if exponential:
@@ -494,7 +496,8 @@
         --------
 
         >>> for i in cam.iter_zoom(100.0, 10):
-        ...     sc.render("zoom_%04i.png" % i)
+        ...     sc.render()
+        ...     sc.save("zoom_%04i.png" % i)
         """
         f = final**(1.0/n_steps)
         for i in xrange(n_steps):

diff -r e3e5fde43e597ea4bf24388ab8329134708ae25a -r bc1d744300a45da54b5717c610ea5a03d30b526e yt/visualization/volume_rendering/tests/modify_transfer_function.py
--- a/yt/visualization/volume_rendering/tests/modify_transfer_function.py
+++ b/yt/visualization/volume_rendering/tests/modify_transfer_function.py
@@ -22,5 +22,5 @@
 tf.clear()
 tf.grey_opacity=True
 tf.add_layers(3, colormap='RdBu')
-sc.render("new_tf.png")
-
+sc.render()
+sc.save("new_tf.png")

diff -r e3e5fde43e597ea4bf24388ab8329134708ae25a -r bc1d744300a45da54b5717c610ea5a03d30b526e yt/visualization/volume_rendering/tests/multiple_fields.py
--- a/yt/visualization/volume_rendering/tests/multiple_fields.py
+++ b/yt/visualization/volume_rendering/tests/multiple_fields.py
@@ -20,5 +20,6 @@
 volume_source = sc.get_source(0)
 volume_source.set_field(('gas','velocity_x'))
 volume_source.build_default_transfer_function()
-sc.render("render_x.png")
+sc.render()
+sc.save("render_x.png")
 

diff -r e3e5fde43e597ea4bf24388ab8329134708ae25a -r bc1d744300a45da54b5717c610ea5a03d30b526e yt/visualization/volume_rendering/tests/rotation_volume_rendering.py
--- a/yt/visualization/volume_rendering/tests/rotation_volume_rendering.py
+++ b/yt/visualization/volume_rendering/tests/rotation_volume_rendering.py
@@ -21,4 +21,5 @@
 frames = 10
 for i in range(frames):
     sc.camera.yaw(angle/frames)
-    sc.render('test_rot_%04i.png' % i, sigma_clip=6.0)
+    sc.render(sigma_clip=6.0)
+    sc.save('test_rot_%04i.png' % i)

diff -r e3e5fde43e597ea4bf24388ab8329134708ae25a -r bc1d744300a45da54b5717c610ea5a03d30b526e yt/visualization/volume_rendering/tests/test_lenses.py
--- a/yt/visualization/volume_rendering/tests/test_lenses.py
+++ b/yt/visualization/volume_rendering/tests/test_lenses.py
@@ -55,7 +55,8 @@
         tf.grey_opacity = True
         sc.camera = cam
         sc.add_source(vol)
-        sc.render('test_perspective_%s.png' % self.field[1], sigma_clip=6.0)
+        sc.render(sigma_clip=6.0)
+        sc.save('test_perspective_%s.png' % self.field[1])
 
     def test_stereoperspective_lens(self):
         sc = Scene()
@@ -67,8 +68,8 @@
         tf.grey_opacity = True
         sc.camera = cam
         sc.add_source(vol)
-        sc.render('test_stereoperspective_%s.png' % self.field[1],
-                  sigma_clip=6.0)
+        sc.render(sigma_clip=6.0)
+        sc.save('test_stereoperspective_%s.png' % self.field[1])
 
     def test_fisheye_lens(self):
         dd = self.ds.sphere(self.ds.domain_center,
@@ -85,8 +86,8 @@
         tf.grey_opacity = True
         sc.camera = cam
         sc.add_source(vol)
-        sc.render('test_fisheye_%s.png' % self.field[1],
-                  sigma_clip=6.0)
+        sc.render(sigma_clip=6.0)
+        sc.save('test_fisheye_%s.png' % self.field[1])
 
     def test_plane_lens(self):
         dd = self.ds.sphere(self.ds.domain_center,
@@ -101,8 +102,8 @@
         tf.grey_opacity = True
         sc.camera = cam
         sc.add_source(vol)
-        sc.render('test_plane_%s.png' % self.field[1],
-                  sigma_clip=6.0)
+        sc.render(sigma_clip=6.0)
+        sc.save('test_plane_%s.png' % self.field[1])
 
     def test_spherical_lens(self):
         sc = Scene()
@@ -114,8 +115,8 @@
         tf.grey_opacity = True
         sc.camera = cam
         sc.add_source(vol)
-        sc.render('test_spherical_%s.png' % self.field[1],
-                  sigma_clip=6.0)
+        sc.render(sigma_clip=6.0)
+        sc.save('test_spherical_%s.png' % self.field[1])
 
     def test_stereospherical_lens(self):
         w = (self.ds.domain_width).in_units('code_length')
@@ -129,5 +130,5 @@
         tf.grey_opacity = True
         sc.camera = cam
         sc.add_source(vol)
-        sc.render('test_stereospherical_%s.png' % self.field[1],
-                  sigma_clip=6.0)
+        sc.render(sigma_clip=6.0)
+        sc.save('test_stereospherical_%s.png' % self.field[1])

diff -r e3e5fde43e597ea4bf24388ab8329134708ae25a -r bc1d744300a45da54b5717c610ea5a03d30b526e yt/visualization/volume_rendering/tests/test_scene.py
--- a/yt/visualization/volume_rendering/tests/test_scene.py
+++ b/yt/visualization/volume_rendering/tests/test_scene.py
@@ -76,9 +76,11 @@
         mi_bound = ((ma-mi)*(0.10))+mi
         ma_bound = ((ma-mi)*(0.90))+mi
         tf.map_to_colormap(mi_bound, ma_bound,  scale=0.01, colormap='Reds_r')
-        sc.render('test_scene.png', sigma_clip=6.0)
+        sc.render(sigma_clip=6.0)
+        sc.save('test_scene.png')
 
         nrot = 2 
         for i in range(nrot):
             sc.camera.pitch(2*np.pi/nrot)
-            sc.render('test_rot_%04i.png' % i, sigma_clip=6.0)
+            sc.render(sigma_clip=6.0)
+            sc.save('test_rot_%04i.png' % i)


https://bitbucket.org/yt_analysis/yt/commits/530c4e8135a4/
Changeset:   530c4e8135a4
Branch:      yt
User:        chummels
Date:        2015-10-15 23:45:41+00:00
Summary:     Updating docs to use the new sc.render(); sc.save(fname) paradigm.
Affected #:  8 files

diff -r bc1d744300a45da54b5717c610ea5a03d30b526e -r 530c4e8135a47b1fa75ecd780e18f0594faf05e0 doc/source/cookbook/amrkdtree_downsampling.py
--- a/doc/source/cookbook/amrkdtree_downsampling.py
+++ b/doc/source/cookbook/amrkdtree_downsampling.py
@@ -39,7 +39,8 @@
 
 render_source.set_volume(kd_low_res)
 render_source.set_fields('density')
-sc.render("v1.png")
+sc.render()
+sc.save("v1.png")
 
 # This operation was substantiall faster.  Now lets modify the low resolution
 # rendering until we find something we like.
@@ -48,12 +49,14 @@
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds=[-27.5, -25.5],
               alpha=np.ones(4, dtype='float64'), colormap='RdBu_r')
-sc.render("v2.png", sigma_clip=6.0)
+sc.render(sigma_clip=6.0)
+sc.save("v2.png")
 
 # This looks better.  Now let's try turning on opacity.
 
 tf.grey_opacity = True
-sc.render("v3.png", sigma_clip=6.0)
+sc.render(sigma_clip=6.0)
+sc.save("v3.png")
 #
 ## That seemed to pick out som interesting structures.  Now let's bump up the
 ## opacity.
@@ -61,11 +64,13 @@
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds=[-27.5, -25.5],
               alpha=10.0 * np.ones(4, dtype='float64'), colormap='RdBu_r')
-sc.render("v4.png", sigma_clip=6.0)
+sc.render(sigma_clip=6.0)
+sc.save("v4.png")
 #
 ## This looks pretty good, now lets go back to the full resolution AMRKDTree
 #
 render_source.set_volume(kd)
-sc.render("v5.png", sigma_clip=6.0)
+sc.render(sigma_clip=6.0)
+sc.render("v5.png")
 
 # This looks great!

diff -r bc1d744300a45da54b5717c610ea5a03d30b526e -r 530c4e8135a47b1fa75ecd780e18f0594faf05e0 doc/source/cookbook/camera_movement.py
--- a/doc/source/cookbook/camera_movement.py
+++ b/doc/source/cookbook/camera_movement.py
@@ -14,15 +14,18 @@
 frame = 0
 # Move to the maximum density location over 5 frames
 for _ in cam.iter_move(max_c, 5):
-    sc.render('camera_movement_%04i.png' % frame, sigma_clip=8.0)
+    sc.render(sigma_clip=8.0)
+    sc.save('camera_movement_%04i.png' % frame)
     frame += 1
 
 # Zoom in by a factor of 10 over 5 frames
 for _ in cam.iter_zoom(10.0, 5):
-    sc.render('camera_movement_%04i.png' % frame, sigma_clip=8.0)
+    sc.render(sigma_clip=8.0)
+    sc.save('camera_movement_%04i.png' % frame)
     frame += 1
 
 # Do a rotation over 5 frames
 for _ in cam.iter_rotate(np.pi, 5):
-    sc.render('camera_movement_%04i.png' % frame, sigma_clip=8.0)
+    sc.render(sigma_clip=8.0)
+    sc.save('camera_movement_%04i.png' % frame)
     frame += 1

diff -r bc1d744300a45da54b5717c610ea5a03d30b526e -r 530c4e8135a47b1fa75ecd780e18f0594faf05e0 doc/source/cookbook/custom_camera_volume_rendering.py
--- a/doc/source/cookbook/custom_camera_volume_rendering.py
+++ b/doc/source/cookbook/custom_camera_volume_rendering.py
@@ -18,4 +18,5 @@
 
 # save to disk with a custom filename and apply sigma clipping to eliminate
 # very bright pixels, producing an image with better contrast.
-sc.render(fname='custom.png', sigma_clip=4)
+sc.render(sigma_clip=4)
+sc.save('custom.png')

diff -r bc1d744300a45da54b5717c610ea5a03d30b526e -r 530c4e8135a47b1fa75ecd780e18f0594faf05e0 doc/source/cookbook/custom_transfer_function_volume_rendering.py
--- a/doc/source/cookbook/custom_transfer_function_volume_rendering.py
+++ b/doc/source/cookbook/custom_transfer_function_volume_rendering.py
@@ -21,4 +21,4 @@
     np.log10(ds.quan(1.0e-29, 'g/cm**3')),
     scale=30.0, colormap='RdBu_r')
 
-im = sc.render(fname='new_tf.png', sigma_clip=None)
+im = sc.save('new_tf.png')

diff -r bc1d744300a45da54b5717c610ea5a03d30b526e -r 530c4e8135a47b1fa75ecd780e18f0594faf05e0 doc/source/cookbook/opaque_rendering.py
--- a/doc/source/cookbook/opaque_rendering.py
+++ b/doc/source/cookbook/opaque_rendering.py
@@ -12,7 +12,8 @@
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
         alpha=np.logspace(-3,0,4), colormap = 'RdBu_r')
-im = sc.render("v1.png", sigma_clip=6.0)
+sc.render(sigma_clip=6.0)
+sc.save("v1.png")
 
 # In this case, the default alphas used (np.logspace(-3,0,Nbins)) does not
 # accentuate the outer regions of the galaxy. Let's start by bringing up the
@@ -22,27 +23,31 @@
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
         alpha=np.logspace(0,0,4), colormap = 'RdBu_r')
-im = sc.render("v2.png", sigma_clip=6.0)
+sc.render(sigma_clip=6.0)
+sc.save("v2.png")
 
 # Now let's set the grey_opacity to True.  This should make the inner portions
 # start to be obcured
 
 tf.grey_opacity = True
-im = sc.render("v3.png", sigma_clip=6.0)
+sc.render(sigma_clip=6.0)
+sc.save("v3.png")
 
 # That looks pretty good, but let's start bumping up the opacity.
 
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
         alpha=10.0*np.ones(4,dtype='float64'), colormap = 'RdBu_r')
-im = sc.render("v4.png", sigma_clip=6.0)
+sc.render(sigma_clip=6.0)
+sc.save("v4.png")
 
 # Let's bump up again to see if we can obscure the inner contour.
 
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
         alpha=30.0*np.ones(4,dtype='float64'), colormap = 'RdBu_r')
-im = sc.render("v5.png", sigma_clip=6.0)
+sc.render(sigma_clip=6.0)
+sc.save("v5.png")
 
 # Now we are losing sight of everything.  Let's see if we can obscure the next
 # layer
@@ -50,13 +55,15 @@
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
         alpha=100.0*np.ones(4,dtype='float64'), colormap = 'RdBu_r')
-im = sc.render("v6.png", sigma_clip=6.0)
+sc.render(sigma_clip=6.0)
+sc.save("v6.png")
 
 # That is very opaque!  Now lets go back and see what it would look like with
 # grey_opacity = False
 
 tf.grey_opacity=False
-im = sc.render("v7.png", sigma_clip=6.0)
+sc.render(sigma_clip=6.0)
+sc.save("v7.png")
 
 # That looks pretty different, but the main thing is that you can see that the
 # inner contours are somewhat visible again.  

diff -r bc1d744300a45da54b5717c610ea5a03d30b526e -r 530c4e8135a47b1fa75ecd780e18f0594faf05e0 doc/source/cookbook/rendering_with_box_and_grids.py
--- a/doc/source/cookbook/rendering_with_box_and_grids.py
+++ b/doc/source/cookbook/rendering_with_box_and_grids.py
@@ -8,15 +8,15 @@
 sc.get_source(0).transfer_function.grey_opacity=True
 
 sc.annotate_domain(ds)
-im = sc.render()
-im.write_png("%s_vr_domain.png" % ds)
+sc.render()
+sc.save("%s_vr_domain.png" % ds)
 
 sc.annotate_grids(ds)
-im = sc.render()
-im.write_png("%s_vr_grids.png" % ds)
+sc.render()
+sc.save("%s_vr_grids.png" % ds)
 
 # Here we can draw the coordinate vectors on top of the image by processing
 # it through the camera. Then save it out.
 sc.annotate_axes()
-im = sc.render()
-im.write_png("%s_vr_coords.png" % ds)
+sc.render()
+sc.save("%s_vr_coords.png" % ds)

diff -r bc1d744300a45da54b5717c610ea5a03d30b526e -r 530c4e8135a47b1fa75ecd780e18f0594faf05e0 doc/source/cookbook/various_lens.py
--- a/doc/source/cookbook/various_lens.py
+++ b/doc/source/cookbook/various_lens.py
@@ -34,7 +34,8 @@
 cam.set_width(ds.domain_width * 0.5)
 sc.camera = cam
 sc.add_source(vol)
-sc.render('lens_plane-parallel.png', sigma_clip=6.0)
+sc.render(sigma_clip=6.0)
+sc.save('lens_plane-parallel.png')
 
 # Perspective lens
 cam = Camera(ds, lens_type='perspective')
@@ -50,7 +51,8 @@
 cam.set_width(ds.domain_width * 0.5)
 sc.camera = cam
 sc.add_source(vol)
-sc.render('lens_perspective.png', sigma_clip=6.0)
+sc.render(sigma_clip=6.0)
+sc.save('lens_perspective.png')
 
 # Stereo-perspective lens
 cam = Camera(ds, lens_type='stereo-perspective')
@@ -65,7 +67,8 @@
 cam.lens.disparity = ds.domain_width[0] * 1.e-3
 sc.camera = cam
 sc.add_source(vol)
-sc.render('lens_stereo-perspective.png', sigma_clip=6.0)
+sc.render(sigma_clip=6.0)
+sc.save('lens_stereo-perspective.png')
 
 # Fisheye lens
 dd = ds.sphere(ds.domain_center, ds.domain_width[0] / 10)
@@ -79,7 +82,8 @@
 cam.lens.fov = 360.0
 sc.camera = cam
 sc.add_source(vol)
-sc.render('lens_fisheye.png', sigma_clip=6.0)
+sc.render(sigma_clip=6.0)
+sc.save('lens_fisheye.png')
 
 # Spherical lens
 cam = Camera(ds, lens_type='spherical')
@@ -96,7 +100,8 @@
 cam.set_width(ds.domain_width * 0.5)
 sc.camera = cam
 sc.add_source(vol)
-sc.render('lens_spherical.png', sigma_clip=6.0)
+sc.render(sigma_clip=6.0)
+sc.save('lens_spherical.png')
 
 # Stereo-spherical lens
 cam = Camera(ds, lens_type='stereo-spherical')
@@ -111,4 +116,5 @@
 cam.lens.disparity = ds.domain_width[0] * 1.e-3
 sc.camera = cam
 sc.add_source(vol)
-sc.render('lens_stereo-spherical.png', sigma_clip=6.0)
+sc.render(sigma_clip=6.0)
+sc.save('lens_stereo-spherical.png')

diff -r bc1d744300a45da54b5717c610ea5a03d30b526e -r 530c4e8135a47b1fa75ecd780e18f0594faf05e0 doc/source/visualizing/volume_rendering.rst
--- a/doc/source/visualizing/volume_rendering.rst
+++ b/doc/source/visualizing/volume_rendering.rst
@@ -31,11 +31,11 @@
    :align: center
    :alt: Diagram of a 3D Scene
 
-In versions of yt prior to 3.2, the only volume rendering interface accessible
+In versions of yt prior to 3.3, the only volume rendering interface accessible
 was through the "camera" object.  This presented a number of problems,
 principle of which was the inability to describe new scene elements or to
 develop complex visualizations that were independent of the specific elements
-being rendered.  The new "scene" based interface present in yt 3.2 and beyond
+being rendered.  The new "scene" based interface present in yt 3.3 and beyond
 enables both more complex visualizations to be constructed as well as a new,
 more intuitive interface for very simple 3D visualizations.
 
@@ -96,9 +96,10 @@
 lenses can be swapped in and out.  For example, this might include a fisheye
 lens, a spherical lens, or some other method of describing the direction and
 origin of rays for rendering. Once the camera is added to the scene object, we
-call the main method of the
+call the main methods of the
 :class:`~yt.visualization.volume_rendering.scene.Scene` class,
-:meth:`~yt.visualization.volume_rendering.scene.Scene.render`.  When called,
+:meth:`~yt.visualization.volume_rendering.scene.Scene.render` and 
+:meth:`~yt.visualization.volume_rendering.scene.Scene.save`.  When called,
 the scene will loop through all of the
 :class:`~yt.visualization.volume_rendering.render_source.RenderSource` objects
 that have been added and integrate the radiative transfer equation through the
@@ -322,7 +323,7 @@
 .. python-script::
 
    for i in sc.camera.zoomin(100, 5):
-       sc.render("frame_%03i.png" % i)
+       sc.save("frame_%03i.png" % i)
 
 The variable ``i`` is the frame number in the particular loop being called.  In
 this case, this will zoom in by a factor of 100 over the course of 5 frames.


https://bitbucket.org/yt_analysis/yt/commits/ab275fdb1e94/
Changeset:   ab275fdb1e94
Branch:      yt
User:        chummels
Date:        2015-10-15 23:47:49+00:00
Summary:     Merging.
Affected #:  5 files

diff -r 530c4e8135a47b1fa75ecd780e18f0594faf05e0 -r ab275fdb1e942234b327f05f6723e56db3ff25cd doc/source/visualizing/volume_rendering.rst
--- a/doc/source/visualizing/volume_rendering.rst
+++ b/doc/source/visualizing/volume_rendering.rst
@@ -305,7 +305,10 @@
 :meth:`~yt.visualization.volume_rendering.camera.Camera.pitch`,
 :meth:`~yt.visualization.volume_rendering.camera.Camera.yaw`, and
 :meth:`~yt.visualization.volume_rendering.camera.Camera.roll` can rotate the
-camera in space.
+camera in space. The center around which the camera rotates can be specified by
+the optional parameter `rot_center` (very useful for perspective and spherical
+lenses), or by default `rot_center` is set to be at camera location (i.e. the 
+camera will rotate about its current position).
 
 When examining a particular point in space, 
 :meth:`~yt.visualization.volume_rendering.camera.Camera.zoom` can be of

diff -r 530c4e8135a47b1fa75ecd780e18f0594faf05e0 -r ab275fdb1e942234b327f05f6723e56db3ff25cd yt/utilities/orientation.py
--- a/yt/utilities/orientation.py
+++ b/yt/utilities/orientation.py
@@ -75,7 +75,7 @@
             # The north_vector calculated here will still be included in self.unit_vectors.
             north_vector = np.cross(normal_vector, east_vector).ravel()
         else:
-            if self.steady_north:
+            if self.steady_north or (np.dot(north_vector, normal_vector) != 0.0):
                 north_vector = north_vector - np.dot(north_vector,normal_vector)*normal_vector
             east_vector = np.cross(north_vector, normal_vector).ravel()
         north_vector /= np.sqrt(np.dot(north_vector, north_vector))

diff -r 530c4e8135a47b1fa75ecd780e18f0594faf05e0 -r ab275fdb1e942234b327f05f6723e56db3ff25cd yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -299,7 +299,7 @@
                                 north_vector=north_vector)
         self._moved = True
 
-    def rotate(self, theta, rot_vector=None):
+    def rotate(self, theta, rot_vector=None, rot_center=None):
         r"""Rotate by a given angle
 
         Rotate the view.  If `rot_vector` is None, rotation will occur
@@ -313,6 +313,10 @@
             Specify the rotation vector around which rotation will
             occur.  Defaults to None, which sets rotation around
             `north_vector`
+        rot_center  : array_like, optional
+            Specifiy the center around which rotation will occur. Defaults
+            to None, which sets rotation around the original camera position
+            (i.e. the camera position does not change)
 
         Examples
         --------
@@ -323,12 +327,19 @@
         rotate_all = rot_vector is not None
         if rot_vector is None:
             rot_vector = self.north_vector
+        if rot_center is None:
+            rot_center = self._position
         rot_vector = ensure_numpy_array(rot_vector)
         rot_vector = rot_vector/np.linalg.norm(rot_vector)
 
+        new_position = self._position - rot_center
         R = get_rotation_matrix(theta, rot_vector)
+        new_position = np.dot(R, new_position) + rot_center
 
-        normal_vector = self.unit_vectors[2]
+        if (new_position == self._position).all():
+            normal_vector = self.unit_vectors[2]
+        else:
+            normal_vector = rot_center - new_position
         normal_vector = normal_vector/np.sqrt((normal_vector**2).sum())
 
         if rotate_all:
@@ -337,8 +348,9 @@
                 north_vector=np.dot(R, self.unit_vectors[1]))
         else:
             self.switch_view(normal_vector=np.dot(R, normal_vector))
+        if (new_position != self._position).any(): self.set_position(new_position)
 
-    def pitch(self, theta):
+    def pitch(self, theta, rot_center=None):
         r"""Rotate by a given angle about the horizontal axis
 
         Pitch the view.
@@ -347,6 +359,8 @@
         ----------
         theta : float, in radians
              Angle (in radians) by which to pitch the view.
+        rot_center  : array_like, optional
+            Specifiy the center around which rotation will occur.
 
         Examples
         --------
@@ -354,9 +368,9 @@
         >>> cam = Camera()
         >>> cam.pitch(np.pi/4)
         """
-        self.rotate(theta, rot_vector=self.unit_vectors[0])
+        self.rotate(theta, rot_vector=self.unit_vectors[0], rot_center=rot_center)
 
-    def yaw(self, theta):
+    def yaw(self, theta, rot_center=None):
         r"""Rotate by a given angle about the vertical axis
 
         Yaw the view.
@@ -365,6 +379,8 @@
         ----------
         theta : float, in radians
              Angle (in radians) by which to yaw the view.
+        rot_center  : array_like, optional
+            Specifiy the center around which rotation will occur.
 
         Examples
         --------
@@ -372,9 +388,9 @@
         >>> cam = Camera()
         >>> cam.yaw(np.pi/4)
         """
-        self.rotate(theta, rot_vector=self.unit_vectors[1])
+        self.rotate(theta, rot_vector=self.unit_vectors[1], rot_center=rot_center)
 
-    def roll(self, theta):
+    def roll(self, theta, rot_center=None):
         r"""Rotate by a given angle about the view normal axis
 
         Roll the view.
@@ -383,6 +399,8 @@
         ----------
         theta : float, in radians
              Angle (in radians) by which to roll the view.
+        rot_center  : array_like, optional
+            Specifiy the center around which rotation will occur.
 
         Examples
         --------
@@ -390,9 +408,9 @@
         >>> cam = Camera()
         >>> cam.roll(np.pi/4)
         """
-        self.rotate(theta, rot_vector=self.unit_vectors[2])
+        self.rotate(theta, rot_vector=self.unit_vectors[2], rot_center=rot_center)
 
-    def iter_rotate(self, theta, n_steps, rot_vector=None):
+    def iter_rotate(self, theta, n_steps, rot_vector=None, rot_center=None):
         r"""Loop over rotate, creating a rotation
 
         This will rotate `n_steps` until the current view has been
@@ -408,6 +426,10 @@
             Specify the rotation vector around which rotation will
             occur.  Defaults to None, which sets rotation around the
             original `north_vector`
+        rot_center  : array_like, optional
+            Specifiy the center around which rotation will occur. Defaults
+            to None, which sets rotation around the original camera position
+            (i.e. the camera position does not change)
 
         Examples
         --------
@@ -419,7 +441,7 @@
 
         dtheta = (1.0*theta)/n_steps
         for i in xrange(n_steps):
-            self.rotate(dtheta, rot_vector=rot_vector)
+            self.rotate(dtheta, rot_vector=rot_vector, rot_center=rot_center)
             yield i
 
     def iter_move(self, final, n_steps, exponential=False):

diff -r 530c4e8135a47b1fa75ecd780e18f0594faf05e0 -r ab275fdb1e942234b327f05f6723e56db3ff25cd yt/visualization/volume_rendering/off_axis_projection.py
--- a/yt/visualization/volume_rendering/off_axis_projection.py
+++ b/yt/visualization/volume_rendering/off_axis_projection.py
@@ -152,6 +152,7 @@
     camera.set_width(width)
     camera.switch_orientation(normal_vector=normal_vector,
                               north_vector=north_vector)
+    camera.position = center - width[2]*camera.normal_vector
     camera.focus = center
     sc.camera = camera
     sc.add_source(vol)

diff -r 530c4e8135a47b1fa75ecd780e18f0594faf05e0 -r ab275fdb1e942234b327f05f6723e56db3ff25cd yt/visualization/volume_rendering/scene.py
--- a/yt/visualization/volume_rendering/scene.py
+++ b/yt/visualization/volume_rendering/scene.py
@@ -265,7 +265,7 @@
         """
         self.camera = camera
 
-    def get_camera(self, camera):
+    def get_camera(self):
         r"""
 
         Get the camera currently used by this scene.


https://bitbucket.org/yt_analysis/yt/commits/4cf19eac329a/
Changeset:   4cf19eac329a
Branch:      yt
User:        chummels
Date:        2015-10-16 00:19:38+00:00
Summary:     Updating VR narrative docs to explicitly tell user how to render and save.  Also discusses sigma_clip
Affected #:  1 file

diff -r ab275fdb1e942234b327f05f6723e56db3ff25cd -r 4cf19eac329ae514e1e1bc020b02564fe7cda4ca doc/source/visualizing/volume_rendering.rst
--- a/doc/source/visualizing/volume_rendering.rst
+++ b/doc/source/visualizing/volume_rendering.rst
@@ -65,11 +65,11 @@
   # load the data
   ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
   # volume render the 'density' field, and save the resulting image
-  im, sc = yt.volume_render(ds, 'density', fname='test_rendering.png')
+  im, sc = yt.volume_render(ds, 'density', fname='rendering.png')
 
-  # im is the image that was generated.
+  # im is the image array generated. it is also saved to 'rendering.png'.
   # sc is an instance of a Scene object, which allows you to further refine
-  # your renderings.
+  # your renderings, and later save them.
 
 When the :func:`~yt.visualization.volume_rendering.volume_render` function 
 is called, first an empty 
@@ -119,12 +119,10 @@
   import yt
   # load the data
   ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
-  # volume render the 'density' field 
   sc = yt.create_scene(ds, 'density')
 
-
-Modifying the Scene
--------------------
+Modifying and Saving the Scene
+------------------------------
 
 Once a basic scene has been created with default render sources and
 camera operations, deeper modifications are possible. These
@@ -134,6 +132,37 @@
 present in the scene.  Below, we describe a few of the aspects of tuning a
 scene to create a visualization that is communicative and pleasing.
 
+.. _rendering_scene:
+
+Rendering and Saving
+++++++++++++++++++++
+
+Whenever you want a rendering of your current scene configuration, use the
+:meth:`~yt.visualization.volume_rendering.scene.Scene.render` method to
+trigger the scene to actually do the ray-tracing step.  After that, you can
+use the :meth:`~yt.visualization.volume_rendering.scene.Scene.save` method
+to save it to disk.  Alternatively, 
+:meth:`~yt.visualization.volume_rendering.scene.Scene.render` will return an 
+image array if you want to further process it in Python.  You can continue 
+modifying your :class:`~yt.visualization.volume_rendering.scene.Scene` object,
+and render it as you make changes to see how those changes affect the resulting
+image.
+
+.. _sigma_clip:
+
+Brightening an Image with Sigma Clipping
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+If your images appear to be too dark, you can try using the ``sigma_clip``
+keyword in the :meth:`~yt.visualization.volume_rendering.scene.Scene.render` 
+:func:`~yt.visualization.volume_rendering.volume_render` functions.  
+Because the brightness range in an image is scaled to match the range of 
+emissivity values of underlying rendering, if you have a few really 
+high-emissivity points, they will scale the rest of your image to be quite 
+dark.  ``sigma_clip = N`` can address this by removing values that are more
+than ``N`` standard deviations brighter than the mean of your image.  
+Typically, a choice of 4 to 6 will help dramatically with your resulting image.
+
 .. _transfer_functions:
 
 Transfer Functions


https://bitbucket.org/yt_analysis/yt/commits/ead950467b8a/
Changeset:   ead950467b8a
Branch:      yt
User:        chummels
Date:        2015-10-16 00:29:19+00:00
Summary:     Adding examples to volume_rendering narrative docs for saving images.
Affected #:  1 file

diff -r 4cf19eac329ae514e1e1bc020b02564fe7cda4ca -r ead950467b8ae57cff26182cf9b11f05d3466f2f doc/source/visualizing/volume_rendering.rst
--- a/doc/source/visualizing/volume_rendering.rst
+++ b/doc/source/visualizing/volume_rendering.rst
@@ -117,7 +117,6 @@
 .. python-script::
 
   import yt
-  # load the data
   ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
   sc = yt.create_scene(ds, 'density')
 
@@ -143,10 +142,23 @@
 use the :meth:`~yt.visualization.volume_rendering.scene.Scene.save` method
 to save it to disk.  Alternatively, 
 :meth:`~yt.visualization.volume_rendering.scene.Scene.render` will return an 
-image array if you want to further process it in Python.  You can continue 
+:class:`~yt.data_objects.image_array.ImageArray` object if you want to further 
+process it in Python (potentially writing it out with 
+:meth:`~yt.data_objects.image_array.ImageArray.write_png`.  You can continue 
 modifying your :class:`~yt.visualization.volume_rendering.scene.Scene` object,
 and render it as you make changes to see how those changes affect the resulting
-image.
+image.  
+
+.. python-script::
+
+  import yt
+  ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
+  sc = yt.create_scene(ds, 'density')
+  sc.render() 
+  sc.save()
+  <make changes to scene>
+  sc.render()
+  sc.save('changes.png')
 
 .. _sigma_clip:
 
@@ -163,6 +175,12 @@
 than ``N`` standard deviations brighter than the mean of your image.  
 Typically, a choice of 4 to 6 will help dramatically with your resulting image.
 
+.. python-script::
+
+  sc = yt.create_scene(ds, 'density')
+  sc.render(sigma_clip=4)
+  sc.save()
+
 .. _transfer_functions:
 
 Transfer Functions


https://bitbucket.org/yt_analysis/yt/commits/872c02132d64/
Changeset:   872c02132d64
Branch:      yt
User:        chummels
Date:        2015-10-16 00:40:08+00:00
Summary:     Correcting bug in cookbook recipe.
Affected #:  1 file

diff -r ead950467b8ae57cff26182cf9b11f05d3466f2f -r 872c02132d6464286b231e5a08928535e5284a84 doc/source/cookbook/custom_transfer_function_volume_rendering.py
--- a/doc/source/cookbook/custom_transfer_function_volume_rendering.py
+++ b/doc/source/cookbook/custom_transfer_function_volume_rendering.py
@@ -21,4 +21,4 @@
     np.log10(ds.quan(1.0e-29, 'g/cm**3')),
     scale=30.0, colormap='RdBu_r')
 
-im = sc.save('new_tf.png')
+sc.save('new_tf.png')


https://bitbucket.org/yt_analysis/yt/commits/ba4ecabbaeb6/
Changeset:   ba4ecabbaeb6
Branch:      yt
User:        chummels
Date:        2015-10-16 06:47:58+00:00
Summary:     Merging.
Affected #:  9 files

diff -r 872c02132d6464286b231e5a08928535e5284a84 -r ba4ecabbaeb6abff7646683906d3793b0afcedd2 yt/units/unit_object.py
--- a/yt/units/unit_object.py
+++ b/yt/units/unit_object.py
@@ -29,16 +29,15 @@
     latex_symbol_lut, unit_prefixes, \
     prefixable_units, cgs_base_units, \
     mks_base_units, latex_prefixes, yt_base_units
-from yt.units.unit_registry import UnitRegistry
+from yt.units.unit_registry import \
+    UnitRegistry, \
+    UnitParseError
 from yt.utilities.exceptions import YTUnitsNotReducible
 
 import copy
 import string
 import token
 
-class UnitParseError(Exception):
-    pass
-
 class InvalidUnitOperation(Exception):
     pass
 
@@ -545,8 +544,13 @@
             return (unit_data[0] * prefix_value, unit_data[1])
 
     # no dice
-    raise UnitParseError("Could not find unit symbol '%s' in the provided " \
-                         "symbols." % symbol_str)
+    if symbol_str.startswith('code_'):
+        raise UnitParseError(
+            "Code units have not been defined. \n"
+            "Try creating the array or quantity using ds.arr or ds.quan instead.")
+    else:
+        raise UnitParseError("Could not find unit symbol '%s' in the provided " \
+                             "symbols." % symbol_str)
 
 def validate_dimensions(dimensions):
     if isinstance(dimensions, Mul):

diff -r 872c02132d6464286b231e5a08928535e5284a84 -r ba4ecabbaeb6abff7646683906d3793b0afcedd2 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -305,7 +305,9 @@
     result_storage = None
     prefix = ""
     def __init__(self, ds_fn):
-        if isinstance(ds_fn, Dataset):
+        if ds_fn is None:
+            self.ds = None
+        elif isinstance(ds_fn, Dataset):
             self.ds = ds_fn
         else:
             self.ds = data_dir_load(ds_fn)
@@ -315,7 +317,8 @@
         if self.reference_storage.reference_name is not None:
             dd = self.reference_storage.get(self.storage_name)
             if dd is None or self.description not in dd:
-                raise YTNoOldAnswer("%s : %s" % (self.storage_name , self.description))
+                raise YTNoOldAnswer(
+                    "%s : %s" % (self.storage_name, self.description))
             ov = dd[self.description]
             self.compare(nv, ov)
         else:
@@ -660,6 +663,29 @@
         assert compare_images(fns[0], fns[1], 10**(-decimals)) == None
         for fn in fns: os.remove(fn)
 
+class VRImageComparisonTest(AnswerTestingTest):
+    _type_name = "VRImageComparison"
+    _attrs = ('desc',)
+
+    def __init__(self, scene, ds, desc, decimals):
+        super(VRImageComparisonTest, self).__init__(None)
+        self.obj_type = ('vr',)
+        self.ds = ds
+        self.scene = scene
+        self.desc = desc
+        self.decimals = decimals
+
+    def run(self):
+        tmpfd, tmpname = tempfile.mkstemp(suffix='.png')
+        os.close(tmpfd)
+        self.scene.render(tmpname, sigma_clip=1.0)
+        image = mpimg.imread(tmpname)
+        os.remove(tmpname)
+        return [zlib.compress(image.dumps())]
+
+    def compare(self, new_result, old_result):
+        compare_image_lists(new_result, old_result, self.decimals)
+        
 class PlotWindowAttributeTest(AnswerTestingTest):
     _type_name = "PlotWindowAttribute"
     _attrs = ('plot_type', 'plot_field', 'plot_axis', 'attr_name', 'attr_args',
@@ -774,6 +800,16 @@
     else:
         return ftrue
 
+def requires_answer_testing():
+    def ffalse(func):
+        return lambda: None
+    def ftrue(func):
+        return func
+    if AnswerTestingTest.result_storage is not None:
+        return ftrue
+    else:
+        return ffalse
+    
 def requires_ds(ds_fn, big_data = False, file_check = False):
     def ffalse(func):
         return lambda: None

diff -r 872c02132d6464286b231e5a08928535e5284a84 -r ba4ecabbaeb6abff7646683906d3793b0afcedd2 yt/utilities/orientation.py
--- a/yt/utilities/orientation.py
+++ b/yt/utilities/orientation.py
@@ -41,6 +41,12 @@
 
         """
 
+        # Make sure vectors are unitless
+        if north_vector is not None:
+            north_vector = YTArray(north_vector, "", dtype='float64')
+        if normal_vector is not None:
+            normal_vector = YTArray(normal_vector, "", dtype='float64')
+
         self.steady_north = steady_north
         if not np.dot(normal_vector, normal_vector) > 0:
             mylog.error("Normal vector is null")

diff -r 872c02132d6464286b231e5a08928535e5284a84 -r ba4ecabbaeb6abff7646683906d3793b0afcedd2 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -14,6 +14,7 @@
 from yt.funcs import iterable, mylog, ensure_numpy_array
 from yt.utilities.orientation import Orientation
 from yt.units.yt_array import YTArray
+from yt.units.unit_registry import UnitParseError
 from yt.utilities.math_utils import get_rotation_matrix
 from .utils import data_source_or_all
 from .lens import lenses
@@ -22,16 +23,34 @@
 
 class Camera(Orientation):
 
-    r"""
+    r"""A representation of a point of view into a Scene.
 
-    The Camera class. A Camera represents of point of view into a
-    Scene. It is defined by a position (the location of the camera
+    It is defined by a position (the location of the camera
     in the simulation domain,), a focus (the point at which the
     camera is pointed), a width (the width of the snapshot that will
     be taken, a resolution (the number of pixels in the image), and
     a north_vector (the "up" direction in the resulting image). A
     camera can use a variety of different Lens objects.
 
+    Parameters
+    ----------
+    data_source: :class:`AMR3DData` or :class:`Dataset`, optional
+        This is the source to be rendered, which can be any arbitrary yt
+        data object or dataset.
+    lens_type: string, optional
+        This specifies the type of lens to use for rendering. Current
+        options are 'plane-parallel', 'perspective', and 'fisheye'. See
+        :class:`yt.visualization.volume_rendering.lens.Lens` for details.
+        Default: 'plane-parallel'
+    auto: boolean
+        If True, build smart defaults using the data source extent. This
+        can be time-consuming to iterate over the entire dataset to find
+        the positional bounds. Default: False
+
+    Examples
+    --------
+    >>> cam = Camera(ds)
+
     """
 
     _moved = True
@@ -42,29 +61,7 @@
 
     def __init__(self, data_source=None, lens_type='plane-parallel',
                  auto=False):
-        """
-        Initialize a Camera Instance
-
-        Parameters
-        ----------
-        data_source: :class:`AMR3DData` or :class:`Dataset`, optional
-            This is the source to be rendered, which can be any arbitrary yt
-            data object or dataset.
-        lens_type: string, optional
-            This specifies the type of lens to use for rendering. Current
-            options are 'plane-parallel', 'perspective', and 'fisheye'. See
-            :class:`yt.visualization.volume_rendering.lens.Lens` for details.
-            Default: 'plane-parallel'
-        auto: boolean
-            If True, build smart defaults using the data source extent. This
-            can be time-consuming to iterate over the entire dataset to find
-            the positional bounds. Default: False
-
-        Examples
-        --------
-        >>> cam = Camera(ds)
-
-        """
+        """Initialize a Camera Instance"""
         self.lens = None
         self.north_vector = None
         self.normal_vector = None
@@ -90,12 +87,18 @@
 
     def position():
         doc = '''The position is the location of the camera in
-               the coordinate system of the simulation.'''
+               the coordinate system of the simulation. This needs
+               to be either a YTArray or a numpy array. If it is a 
+               numpy array, it is assumed to be in code units. If it
+               is a YTArray, it will be converted to code units 
+               automatically. '''
 
         def fget(self):
             return self._position
 
         def fset(self, value):
+            if isinstance(value, YTArray):
+                value = value.in_units("code_length")
             self._position = value
             self.switch_orientation()
 
@@ -105,12 +108,17 @@
     position = property(**position())
 
     def width():
-        doc = '''The width of the image that will be produced. '''
+        doc = '''The width of the region that will be seen in the image. 
+               This needs to be either a YTArray or a numpy array. If it 
+               is a numpy array, it is assumed to be in code units. If it
+               is a YTArray, it will be converted to code units automatically. '''
 
         def fget(self):
             return self._width
 
         def fset(self, value):
+            if isinstance(value, YTArray):
+                value = value.in_units("code_length")
             self._width = value
             self.switch_orientation()
 
@@ -121,12 +129,18 @@
     width = property(**width())
 
     def focus():
-        doc = '''The focus defines the point the Camera is pointed at. '''
+        doc = '''The focus defines the point the Camera is pointed at. This needs
+               to be either a YTArray or a numpy array. If it is a 
+               numpy array, it is assumed to be in code units. If it
+               is a YTArray, it will be converted to code units 
+               automatically. '''
 
         def fget(self):
             return self._focus
 
         def fset(self, value):
+            if isinstance(value, YTArray):
+                value = value.in_units("code_length")
             self._focus = value
             self.switch_orientation()
 
@@ -161,9 +175,7 @@
         return lens_params
 
     def set_lens(self, lens_type):
-        r'''
-
-        Set the lens to be used with this camera. 
+        r"""Set the lens to be used with this camera.
 
         Parameters
         ----------
@@ -177,7 +189,7 @@
             'spherical'
             'stereo-spherical'
 
-        '''
+        """
         if lens_type not in lenses:
             mylog.error("Lens type not available")
             raise RuntimeError()
@@ -185,6 +197,7 @@
         self.lens.camera = self
 
     def set_defaults_from_data_source(self, data_source):
+        """Resets the camera attributes to their default values"""
         self.position = data_source.pf.domain_right_edge
 
         width = 1.5 * data_source.pf.domain_width.max()
@@ -215,20 +228,22 @@
         self._moved = True
 
     def set_width(self, width):
-        r"""
-
-        Set the width of the image that will be produced by this camera.
-        This must be a YTQuantity.
+        r"""Set the width of the image that will be produced by this camera.
 
         Parameters
         ----------
 
-        width : :class:`yt.units.yt_array.YTQuantity`
-
+        width : YTQuantity or 3 element YTArray
+            The width of the volume rendering in the horizontal, vertical, and
+            depth directions. If a scalar, assumes that the width is the same in
+            all three directions.
         """
-        assert isinstance(width, YTArray), 'Width must be created with ds.arr'
-        if isinstance(width, YTArray):
+        try:
             width = width.in_units('code_length')
+        except (AttributeError, UnitParseError):
+            raise ValueError(
+                'Volume rendering width must be a YTArray that can be '
+                'converted to code units')
 
         if not iterable(width):
             width = YTArray([width.d]*3, width.units)  # Can't get code units.
@@ -236,9 +251,7 @@
         self.switch_orientation()
 
     def set_position(self, position, north_vector=None):
-        r"""
-
-        Set the position of the camera.
+        r"""Set the position of the camera.
 
         Parameters
         ----------
@@ -256,8 +269,7 @@
                                 north_vector=north_vector)
 
     def switch_orientation(self, normal_vector=None, north_vector=None):
-        r"""
-        Change the view direction based on any of the orientation parameters.
+        r"""Change the view direction based on any of the orientation parameters.
 
         This will recalculate all the necessary vectors and vector planes
         related to an orientable object.
@@ -492,11 +504,6 @@
         factor : float
             The factor by which to reduce the distance to the focal point.
 
-
-        Notes
-        -----
-
-        You will need to call snapshot() again to get a new image.
         """
         self.set_width(self.width / factor)
 

diff -r 872c02132d6464286b231e5a08928535e5284a84 -r ba4ecabbaeb6abff7646683906d3793b0afcedd2 yt/visualization/volume_rendering/lens.py
--- a/yt/visualization/volume_rendering/lens.py
+++ b/yt/visualization/volume_rendering/lens.py
@@ -26,15 +26,7 @@
 
 
 class Lens(ParallelAnalysisInterface):
-
-    """
-
-    A base class for setting up Lens objects. A Lens,
-    along with a Camera, is used to defined the set of
-    rays that will be used for rendering.
-
-    """
-
+    """A Lens is used to define the set of rays for rendering."""
     def __init__(self, ):
         super(Lens, self).__init__()
         self.viewpoint = None
@@ -48,9 +40,14 @@
         self.sampler = None
 
     def set_camera(self, camera):
+        """Set the properties of the lens based on the camera.
+
+        This is a proxy for setup_box_properties
+        """
         self.setup_box_properties(camera)
 
     def new_image(self, camera):
+        """Initialize a new ImageArray to be used with this lens."""
         self.current_image = ImageArray(
             np.zeros((camera.resolution[0], camera.resolution[1],
                       4), dtype='float64', order='C'),
@@ -58,6 +55,7 @@
         return self.current_image
 
     def setup_box_properties(self, camera):
+        """Set up the view and stage based on the properties of the camera."""
         unit_vectors = camera.unit_vectors
         width = camera.width
         center = camera.focus
@@ -80,13 +78,12 @@
 
 
 class PlaneParallelLens(Lens):
+    r"""The lens for orthographic projections.
 
-    r'''
-
-    This lens type is the standard type used for orthographic projections. 
     All rays emerge parallel to each other, arranged along a plane.
 
-    '''
+    The initializer takes no parameters.
+    """
 
     def __init__(self, ):
         super(PlaneParallelLens, self).__init__()
@@ -111,6 +108,7 @@
         return sampler_params
 
     def set_viewpoint(self, camera):
+        """Set the viewpoint based on the camera"""
         # This is a hack that should be replaced by an alternate plane-parallel
         # traversal. Put the camera really far away so that the effective
         # viewpoint is infinitely far away, making for parallel rays.
@@ -135,17 +133,14 @@
 
 
 class PerspectiveLens(Lens):
+    r"""A lens for viewing a scene with a set of rays within an opening angle.
 
-    r'''
-
-    This lens type adjusts for an opening view angle, so that the scene will 
-    have an element of perspective to it.
-
-    '''
+    The scene will have an element of perspective to it since the rays are not
+    parallel.
+    """
 
     def __init__(self):
         super(PerspectiveLens, self).__init__()
-        self.expand_factor = 1.5
 
     def new_image(self, camera):
         self.current_image = ImageArray(
@@ -155,13 +150,6 @@
         return self.current_image
 
     def _get_sampler_params(self, camera, render_source):
-        # We should move away from pre-generation of vectors like this and into
-        # the usage of on-the-fly generation in the VolumeIntegrator module
-        # We might have a different width and back_center
-        # dl = (self.back_center - self.front_center)
-        # self.front_center += self.expand_factor*dl
-        # self.back_center -= dl
-
         if render_source.zbuffer is not None:
             image = render_source.zbuffer.rgba
         else:
@@ -174,24 +162,30 @@
         px = np.mat(np.linspace(-.5, .5, camera.resolution[0]))
         py = np.mat(np.linspace(-.5, .5, camera.resolution[1]))
 
-        sample_x = camera.width[0] * np.array(east_vec.reshape(3,1) * px).transpose()
-        sample_y = camera.width[1] * np.array(north_vec.reshape(3,1) * py).transpose()
+        sample_x = camera.width[0] * np.array(east_vec.reshape(3, 1) * px)
+        sample_x = sample_x.transpose()
+        sample_y = camera.width[1] * np.array(north_vec.reshape(3, 1) * py)
+        sample_y = sample_y.transpose()
 
         vectors = np.zeros((camera.resolution[0], camera.resolution[1], 3),
                            dtype='float64', order='C')
 
-        sample_x = np.repeat(sample_x.reshape(camera.resolution[0],1,3), \
+        sample_x = np.repeat(sample_x.reshape(camera.resolution[0], 1, 3),
                              camera.resolution[1], axis=1)
-        sample_y = np.repeat(sample_y.reshape(1,camera.resolution[1],3), \
+        sample_y = np.repeat(sample_y.reshape(1, camera.resolution[1], 3),
                              camera.resolution[0], axis=0)
 
-        normal_vecs = np.tile(normal_vec, camera.resolution[0] * camera.resolution[1])\
-                             .reshape(camera.resolution[0], camera.resolution[1], 3)
+        normal_vecs = np.tile(
+            normal_vec, camera.resolution[0] * camera.resolution[1])
+        normal_vecs = normal_vecs.reshape(
+            camera.resolution[0], camera.resolution[1], 3)
 
         vectors = sample_x + sample_y + normal_vecs * camera.width[2]
 
-        positions = np.tile(camera.position, camera.resolution[0] * camera.resolution[1])\
-                           .reshape(camera.resolution[0], camera.resolution[1], 3)
+        positions = np.tile(
+            camera.position, camera.resolution[0] * camera.resolution[1])
+        positions = positions.reshape(
+            camera.resolution[0], camera.resolution[1], 3)
 
         uv = np.ones(3, dtype='float64')
 
@@ -234,11 +228,12 @@
             if np.arccos(sight_angle_cos) < 0.5 * np.pi:
                 sight_length = camera.width[2] / sight_angle_cos
             else:
-            # If the corner is on the backwards, then we put it outside of the image
-            # It can not be simply removed because it may connect to other corner
-            # within the image, which produces visible domain boundary line
-                sight_length = np.sqrt(camera.width[0]**2 + camera.width[1]**2) / \
-                               np.sqrt(1 - sight_angle_cos**2)
+                # If the corner is on the backwards, then we put it outside of
+                # the image It can not be simply removed because it may connect
+                # to other corner within the image, which produces visible
+                # domain boundary line
+                sight_length = np.sqrt(camera.width[0]**2 + camera.width[1]**2)
+                sight_length = sight_length / np.sqrt(1 - sight_angle_cos**2)
             pos1[i] = camera.position + sight_length * sight_vector[i]
 
         dx = np.dot(pos1 - sight_center.d, camera.unit_vectors[0])
@@ -256,15 +251,14 @@
 
 
 class StereoPerspectiveLens(Lens):
-
-    """docstring for StereoPerspectiveLens"""
+    """A lens that includes two sources for perspective rays, for 3D viewing"""
 
     def __init__(self):
         super(StereoPerspectiveLens, self).__init__()
-        self.expand_factor = 1.5
         self.disparity = None
 
     def new_image(self, camera):
+        """Initialize a new ImageArray to be used with this lens."""
         self.current_image = ImageArray(
             np.zeros((camera.resolution[0]*camera.resolution[1], 1,
                       4), dtype='float64', order='C'),
@@ -275,10 +269,6 @@
         # We should move away from pre-generation of vectors like this and into
         # the usage of on-the-fly generation in the VolumeIntegrator module
         # We might have a different width and back_center
-        # dl = (self.back_center - self.front_center)
-        # self.front_center += self.expand_factor*dl
-        # self.back_center -= dl
-
         if self.disparity is None:
             self.disparity = camera.width[0] / 2.e3
 
@@ -287,8 +277,10 @@
         else:
             image = self.new_image(camera)
 
-        vectors_left, positions_left = self._get_positions_vectors(camera, -self.disparity)
-        vectors_right, positions_right = self._get_positions_vectors(camera, self.disparity)
+        vectors_left, positions_left = self._get_positions_vectors(
+            camera, -self.disparity)
+        vectors_right, positions_right = self._get_positions_vectors(
+            camera, self.disparity)
 
         uv = np.ones(3, dtype='float64')
 
@@ -330,28 +322,37 @@
         px = np.mat(np.linspace(-.5, .5, single_resolution_x))
         py = np.mat(np.linspace(-.5, .5, camera.resolution[1]))
 
-        sample_x = camera.width[0] * np.array(east_vec_rot.reshape(3,1) * px).transpose()
-        sample_y = camera.width[1] * np.array(north_vec.reshape(3,1) * py).transpose()
+        sample_x = camera.width[0] * np.array(east_vec_rot.reshape(3, 1) * px)
+        sample_x = sample_x.transpose()
+        sample_y = camera.width[1] * np.array(north_vec.reshape(3, 1) * py)
+        sample_y = sample_y.transpose()
 
         vectors = np.zeros((single_resolution_x, camera.resolution[1], 3),
                            dtype='float64', order='C')
 
-        sample_x = np.repeat(sample_x.reshape(single_resolution_x,1,3), \
+        sample_x = np.repeat(sample_x.reshape(single_resolution_x, 1, 3),
                              camera.resolution[1], axis=1)
-        sample_y = np.repeat(sample_y.reshape(1,camera.resolution[1],3), \
+        sample_y = np.repeat(sample_y.reshape(1, camera.resolution[1], 3),
                              single_resolution_x, axis=0)
 
-        normal_vecs = np.tile(normal_vec_rot, single_resolution_x * camera.resolution[1])\
-                             .reshape(single_resolution_x, camera.resolution[1], 3)
-        east_vecs = np.tile(east_vec_rot, single_resolution_x * camera.resolution[1])\
-                           .reshape(single_resolution_x, camera.resolution[1], 3)
+        normal_vecs = np.tile(
+            normal_vec_rot, single_resolution_x * camera.resolution[1])
+        normal_vecs = normal_vecs.reshape(
+            single_resolution_x, camera.resolution[1], 3)
+        east_vecs = np.tile(
+            east_vec_rot, single_resolution_x * camera.resolution[1])
+        east_vecs = east_vecs.reshape(
+            single_resolution_x, camera.resolution[1], 3)
 
         vectors = sample_x + sample_y + normal_vecs * camera.width[2]
 
-        positions = np.tile(camera.position, single_resolution_x * camera.resolution[1])\
-                           .reshape(single_resolution_x, camera.resolution[1], 3)
+        positions = np.tile(
+            camera.position, single_resolution_x * camera.resolution[1])
+        positions = positions.reshape(
+            single_resolution_x, camera.resolution[1], 3)
 
-        positions = positions + east_vecs * disparity # Here the east_vecs is non-rotated one
+        # Here the east_vecs is non-rotated one
+        positions = positions + east_vecs * disparity
 
         mylog.debug(positions)
         mylog.debug(vectors)
@@ -365,8 +366,10 @@
         if self.disparity is None:
             self.disparity = camera.width[0] / 2.e3
 
-        px_left, py_left, dz_left = self._get_px_py_dz(camera, pos, res, -self.disparity)
-        px_right, py_right, dz_right = self._get_px_py_dz(camera, pos, res, self.disparity)
+        px_left, py_left, dz_left = self._get_px_py_dz(
+            camera, pos, res, -self.disparity)
+        px_right, py_right, dz_right = self._get_px_py_dz(
+            camera, pos, res, self.disparity)
 
         px = np.hstack([px_left, px_right])
         py = np.hstack([py_left, py_right])
@@ -402,16 +405,18 @@
             if np.arccos(sight_angle_cos) < 0.5 * np.pi:
                 sight_length = camera.width[2] / sight_angle_cos
             else:
-            # If the corner is on the backwards, then we put it outside of the image
-            # It can not be simply removed because it may connect to other corner
-            # within the image, which produces visible domain boundary line
-                sight_length = np.sqrt(camera.width[0]**2 + camera.width[1]**2) / \
-                               np.sqrt(1 - sight_angle_cos**2)
+                # If the corner is on the backwards, then we put it outside of
+                # the image It can not be simply removed because it may connect
+                # to other corner within the image, which produces visible
+                # domain boundary line
+                sight_length = np.sqrt(camera.width[0]**2 + camera.width[1]**2)
+                sight_length = sight_length / np.sqrt(1 - sight_angle_cos**2)
             pos1[i] = camera_position_shift + sight_length * sight_vector[i]
 
         dx = np.dot(pos1 - sight_center.d, east_vec_rot)
         dy = np.dot(pos1 - sight_center.d, north_vec)
         dz = np.dot(pos1 - sight_center.d, normal_vec_rot)
+
         # Transpose into image coords.
         px = (res0_h * 0.5 + res0_h / camera.width[0].d * dx).astype('int')
         py = (res[1] * 0.5 + res[1] / camera.width[1].d * dy).astype('int')
@@ -431,14 +436,13 @@
 
 
 class FisheyeLens(Lens):
+    r"""A lens for dome-based renderings
 
-    r"""
-
-    This lens type accepts a field-of-view property, fov, that describes how wide 
-    an angle the fisheye can see. Fisheye images are typically used for dome-based 
-    presentations; the Hayden planetarium for instance has a field of view of 194.6. 
-    The images returned by this camera will be flat pixel images that can and should 
-    be reshaped to the resolution.    
+    This lens type accepts a field-of-view property, fov, that describes how
+    wide an angle the fisheye can see. Fisheye images are typically used for
+    dome-based presentations; the Hayden planetarium for instance has a field of
+    view of 194.6.  The images returned by this camera will be flat pixel images
+    that can and should be reshaped to the resolution.
 
     """
 
@@ -450,11 +454,13 @@
         self.rotation_matrix = np.eye(3)
 
     def setup_box_properties(self, camera):
+        """Set up the view and stage based on the properties of the camera."""
         self.radius = camera.width.max()
         super(FisheyeLens, self).setup_box_properties(camera)
         self.set_viewpoint(camera)
 
     def new_image(self, camera):
+        """Initialize a new ImageArray to be used with this lens."""
         self.current_image = ImageArray(
             np.zeros((camera.resolution[0]**2, 1,
                       4), dtype='float64', order='C'),
@@ -489,9 +495,7 @@
         return sampler_params
 
     def set_viewpoint(self, camera):
-        """
-        For a FisheyeLens, the viewpoint is the front center.
-        """
+        """For a FisheyeLens, the viewpoint is the camera's position"""
         self.viewpoint = camera.position
 
     def __repr__(self):
@@ -530,12 +534,11 @@
 
 
 class SphericalLens(Lens):
+    r"""A lens for cylindrical-spherical projection.
 
-    r"""
+    Movies rendered in this way can be displayed in head-tracking devices or
+    in YouTube 360 view.
 
-    This is a cylindrical-spherical projection. Movies rendered in this way 
-    can be displayed in head-tracking devices or in YouTube 360 view.
-    
     """
 
     def __init__(self):
@@ -545,6 +548,7 @@
         self.rotation_matrix = np.eye(3)
 
     def setup_box_properties(self, camera):
+        """Set up the view and stage based on the properties of the camera."""
         self.radius = camera.width.max()
         super(SphericalLens, self).setup_box_properties(camera)
         self.set_viewpoint(camera)
@@ -562,11 +566,13 @@
         vectors[:, :, 2] = np.sin(py)
         vectors = vectors * camera.width[0]
 
-        positions = np.tile(camera.position, camera.resolution[0] * camera.resolution[1])\
-                           .reshape(camera.resolution[0], camera.resolution[1], 3)
+        positions = np.tile(
+            camera.position,
+            camera.resolution[0] * camera.resolution[1]).reshape(
+                camera.resolution[0], camera.resolution[1], 3)
 
-        R1 = get_rotation_matrix(0.5*np.pi, [1,0,0])
-        R2 = get_rotation_matrix(0.5*np.pi, [0,0,1])
+        R1 = get_rotation_matrix(0.5*np.pi, [1, 0, 0])
+        R2 = get_rotation_matrix(0.5*np.pi, [0, 0, 1])
         uv = np.dot(R1, camera.unit_vectors)
         uv = np.dot(R2, uv)
         vectors.reshape((camera.resolution[0]*camera.resolution[1], 3))
@@ -595,9 +601,7 @@
         return sampler_params
 
     def set_viewpoint(self, camera):
-        """
-        For a PerspectiveLens, the viewpoint is the front center.
-        """
+        """For a SphericalLens, the viewpoint is the camera's position"""
         self.viewpoint = camera.position
 
     def project_to_plane(self, camera, pos, res=None):
@@ -631,8 +635,11 @@
 
 
 class StereoSphericalLens(Lens):
+    r"""A lens for a stereo cylindrical-spherical projection.
 
-    """docstring for StereoSphericalLens"""
+    Movies rendered in this way can be displayed in VR devices or stereo youtube
+    360 degree movies.
+    """
 
     def __init__(self):
         super(StereoSphericalLens, self).__init__()
@@ -651,31 +658,35 @@
             self.disparity = camera.width[0] / 1000.
 
         single_resolution_x = np.floor(camera.resolution[0])/2
-        px = np.linspace(-np.pi, np.pi, single_resolution_x, endpoint=True)[:,None]
-        py = np.linspace(-np.pi/2., np.pi/2., camera.resolution[1], endpoint=True)[None,:]
+        px = np.linspace(-np.pi, np.pi, single_resolution_x,
+                         endpoint=True)[:, None]
+        py = np.linspace(-np.pi/2., np.pi/2., camera.resolution[1],
+                         endpoint=True)[None, :]
 
         vectors = np.zeros((single_resolution_x, camera.resolution[1], 3),
                            dtype='float64', order='C')
-        vectors[:,:,0] = np.cos(px) * np.cos(py)
-        vectors[:,:,1] = np.sin(px) * np.cos(py)
-        vectors[:,:,2] = np.sin(py)
+        vectors[:, :, 0] = np.cos(px) * np.cos(py)
+        vectors[:, :, 1] = np.sin(px) * np.cos(py)
+        vectors[:, :, 2] = np.sin(py)
         vectors = vectors * camera.width[0]
 
         vectors2 = np.zeros((single_resolution_x, camera.resolution[1], 3),
                             dtype='float64', order='C')
-        vectors2[:,:,0] = -np.sin(px) * np.ones((1, camera.resolution[1]))
-        vectors2[:,:,1] = np.cos(px) * np.ones((1, camera.resolution[1]))
-        vectors2[:,:,2] = 0
+        vectors2[:, :, 0] = -np.sin(px) * np.ones((1, camera.resolution[1]))
+        vectors2[:, :, 1] = np.cos(px) * np.ones((1, camera.resolution[1]))
+        vectors2[:, :, 2] = 0
 
-        positions = np.tile(camera.position, single_resolution_x * camera.resolution[1])\
-                           .reshape(single_resolution_x, camera.resolution[1], 3)
+        positions = np.tile(
+            camera.position, single_resolution_x * camera.resolution[1])
+        positions = positions.reshape(
+            single_resolution_x, camera.resolution[1], 3)
 
         # The left and right are switched here since VR is in LHS.
         positions_left = positions + vectors2 * self.disparity
         positions_right = positions + vectors2 * (-self.disparity)
 
-        R1 = get_rotation_matrix(0.5*np.pi, [1,0,0])
-        R2 = get_rotation_matrix(0.5*np.pi, [0,0,1])
+        R1 = get_rotation_matrix(0.5*np.pi, [1, 0, 0])
+        R2 = get_rotation_matrix(0.5*np.pi, [0, 0, 1])
         uv = np.dot(R1, camera.unit_vectors)
         uv = np.dot(R2, uv)
         vectors.reshape((single_resolution_x*camera.resolution[1], 3))

diff -r 872c02132d6464286b231e5a08928535e5284a84 -r ba4ecabbaeb6abff7646683906d3793b0afcedd2 yt/visualization/volume_rendering/render_source.py
--- a/yt/visualization/volume_rendering/render_source.py
+++ b/yt/visualization/volume_rendering/render_source.py
@@ -39,10 +39,9 @@
 
 class RenderSource(ParallelAnalysisInterface):
 
-    """
+    """Base Class for Render Sources.
 
-    Base Class for Render Sources. Will be inherited for volumes,
-    streamlines, etc.
+    Will be inherited for volumes, streamlines, etc.
 
     """
 
@@ -59,10 +58,9 @@
 
 
 class OpaqueSource(RenderSource):
-    """
+    """A base class for opaque render sources.
 
-    A base class for opaque render sources. Will be inherited from
-    for LineSources, BoxSources, etc.
+    Will be inherited from for LineSources, BoxSources, etc.
 
     """
     def __init__(self):
@@ -82,40 +80,39 @@
 
 
 class VolumeSource(RenderSource):
+    """A class for rendering data from a volumetric data source
+
+    Examples of such sources include a sphere, cylinder, or the
+    entire computational domain.
+
+    A :class:`VolumeSource` provides the framework to decompose an arbitrary
+    yt data source into bricks that can be traversed and volume rendered.
+
+    Parameters
+    ----------
+    data_source: :class:`AMR3DData` or :class:`Dataset`, optional
+        This is the source to be rendered, which can be any arbitrary yt
+        data object or dataset.
+    fields : string
+        The name of the field(s) to be rendered.
+    auto: bool, optional
+        If True, will build a default AMRKDTree and transfer function based
+        on the data.
+
+    Examples
+    --------
+    >>> source = VolumeSource(ds.all_data(), 'density')
+
 
     """
 
-    A VolumeSource is a class for rendering data from
-    an arbitrary volumetric data source, e.g. a sphere,
-    cylinder, or the entire computational domain.
 
 
-    """
     _image = None
     data_source = None
 
     def __init__(self, data_source, field, auto=True):
-        r"""Initialize a new volumetric source for rendering.
-
-        A :class:`VolumeSource` provides the framework to decompose an arbitrary
-        yt data source into bricks that can be traversed and volume rendered.
-
-        Parameters
-        ----------
-        data_source: :class:`AMR3DData` or :class:`Dataset`, optional
-            This is the source to be rendered, which can be any arbitrary yt
-            data object or dataset.
-        fields : string
-            The name of the field(s) to be rendered.
-        auto: bool, optional
-            If True, will build a default AMRKDTree and transfer function based
-            on the data.
-
-        Examples
-        --------
-        >>> source = RenderSource(ds, 'density')
-
-        """
+        r"""Initialize a new volumetric source for rendering."""
         super(VolumeSource, self).__init__()
         self.data_source = data_source_or_all(data_source)
         field = self.data_source._determine_fields(field)[0]
@@ -138,15 +135,14 @@
             self.build_defaults()
 
     def build_defaults(self):
+        """Sets a default volume and transfer function"""
         mylog.info("Creating default volume")
         self.build_default_volume()
         mylog.info("Creating default transfer function")
         self.build_default_transfer_function()
 
     def set_transfer_function(self, transfer_function):
-        """
-        Set transfer function for this source
-        """
+        """Set transfer function for this source"""
         if not isinstance(transfer_function,
                           (TransferFunction, ColorTransferFunction,
                            ProjectionTransferFunction)):
@@ -169,6 +165,7 @@
             raise RuntimeError("Transfer Function not Supplied")
 
     def build_default_transfer_function(self):
+        """Sets up a transfer function"""
         self.tfh = \
             TransferFunctionHelper(self.data_source.pf)
         self.tfh.set_field(self.field)
@@ -177,6 +174,7 @@
         self.transfer_function = self.tfh.tf
 
     def build_default_volume(self):
+        """Sets up an AMRKDTree based on the VolumeSource's field"""
         self.volume = AMRKDTree(self.data_source.pf,
                                 data_source=self.data_source)
         log_fields = [self.data_source.pf.field_info[self.field].take_log]
@@ -184,25 +182,36 @@
         self.volume.set_fields([self.field], log_fields, True)
 
     def set_volume(self, volume):
+        """Associates an AMRKDTree with the VolumeSource"""
         assert(isinstance(volume, AMRKDTree))
         del self.volume
         self.volume = volume
 
-    def set_field(self, field, no_ghost=True):
-        field = self.data_source._determine_fields(field)[0]
-        log_field = self.data_source.pf.field_info[field].take_log
-        self.volume.set_fields(field, [log_field], no_ghost)
-        self.field = field
+    def set_fields(self, fields, no_ghost=True):
+    """Set the source's fields to render
 
-    def set_fields(self, fields, no_ghost=True):
-        fields = self.data_source._determine_fields(fields)
+        Parameters
+        ---------
+        fields: field name or list of field names
+            The field or fields to render
+        no_ghost: boolean
+            If False, the AMRKDTree estimates vertex centered data using ghost
+            zones, which can eliminate seams in the resulting volume rendering.
+            Defaults to True for performance reasons.
+             fields = self.data_source._determine_fields(fields)
         log_fields = [self.data_source.ds.field_info[f].take_log
                       for f in fields]
         self.volume.set_fields(fields, log_fields, no_ghost)
         self.field = fields
+    """
 
     def set_sampler(self, camera):
-        """docstring for add_sampler"""
+        """Sets a volume render sampler
+
+        The type of sampler is determined based on the ``sampler_type`` attribute
+        of the VolumeSource. Currently the ``volume_render`` and ``projection``
+        sampler types are supported.
+        """
         if self.sampler_type == 'volume-render':
             sampler = new_volume_render_sampler(camera, self)
         elif self.sampler_type == 'projection':
@@ -213,6 +222,25 @@
         assert(self.sampler is not None)
 
     def render(self, camera, zbuffer=None):
+        """Renders an image using the provided camera
+
+        Parameters
+        ----------
+        camera: :class:`yt.visualization.volume_rendering.camera.Camera` instance
+            A volume rendering camera. Can be any type of camera.
+        zbuffer: :class:`yt.visualization.volume_rendering.zbuffer_array.Zbuffer` instance
+            A zbuffer array. This is used for opaque sources to determine the
+            z position of the source relative to other sources. Only useful if
+            you are manually calling render on multiple sources. Scene.render
+            uses this internally.
+
+        Returns
+        -------
+        A :class:`yt.data_objects.image_array.ImageArray` instance containing
+        the rendered image.
+
+        """
+ 
         self.zbuffer = zbuffer
         self.set_sampler(camera)
         assert (self.sampler is not None)
@@ -240,10 +268,23 @@
         return self.current_image
 
     def finalize_image(self, camera, image, call_from_VR=False):
-        image = self.volume.reduce_tree_images(image,
+        """Parallel reduce the image.
+
+        Parameters
+        ----------
+        camera: :class:`yt.visualization.volume_rendering.camera.Camera` instance
+            The camera used to produce the volume rendering image.
+        image: :class:`yt.data_objects.image_array.ImageArray` instance
+            A reference to an image to fill
+        call_from_vr: boolean, optional
+            Whether or not this is being called from a higher level in the VR
+            interface. Used to set the correct orientation.
+        """
+         image = self.volume.reduce_tree_images(image,
                                                camera.lens.viewpoint)
         image.shape = camera.resolution[0], camera.resolution[1], 4
-        # If the call is from VR, the image is rotated by 180 to get correct up dir
+        # If the call is from VR, the image is rotated by 180 to get correct
+        # up direction
         if call_from_VR: image = np.rot90(image, k=2)
         if self.transfer_function.grey_opacity is False:
             image[:, :, 3] = 1.0
@@ -256,38 +297,33 @@
 
 
 class MeshSource(RenderSource):
+    """A mesh for unstructured mesh data
 
-    """
+    This functionality requires the embree ray-tracing engine and the
+    associated pyembree python bindings to be installed in order to
+    function.
 
-    MeshSource is a class for volume rendering unstructured mesh
-    data. This functionality requires the embree ray-tracing
-    engine and the associated pyembree python bindings to be
-    installed in order to function.
+    A :class:`MeshSource` provides the framework to volume render
+    unstructured mesh data.
 
+    Parameters
+    ----------
+    data_source: :class:`AMR3DData` or :class:`Dataset`, optional
+        This is the source to be rendered, which can be any arbitrary yt
+        data object or dataset.
+    field : string
+        The name of the field to be rendered.
+
+    Examples
+    --------
+    >>> source = MeshSource(ds, ('all', 'convected'))
     """
 
     _image = None
     data_source = None
 
     def __init__(self, data_source, field):
-        r"""Initialize a new unstructured source for rendering.
-
-        A :class:`MeshSource` provides the framework to volume render
-        unstructured mesh data.
-
-        Parameters
-        ----------
-        data_source: :class:`AMR3DData` or :class:`Dataset`, optional
-            This is the source to be rendered, which can be any arbitrary yt
-            data object or dataset.
-        fields : string
-            The name of the field to be rendered.
-
-        Examples
-        --------
-        >>> source = MeshSource(ds, ('all', 'convected'))
-
-        """
+        r"""Initialize a new unstructured source for rendering."""
         super(MeshSource, self).__init__()
         self.data_source = data_source_or_all(data_source)
         field = self.data_source._determine_fields(field)[0]
@@ -325,7 +361,25 @@
                                                   field_data.d)
 
     def render(self, camera, zbuffer=None):
+        """Renders an image using the provided camera
 
+        Parameters
+        ----------
+        camera: :class:`yt.visualization.volume_rendering.camera.Camera` instance
+            A volume rendering camera. Can be any type of camera.
+        zbuffer: :class:`yt.visualization.volume_rendering.zbuffer_array.Zbuffer` instance
+            A zbuffer array. This is used for opaque sources to determine the
+            z position of the source relative to other sources. Only useful if
+            you are manually calling render on multiple sources. Scene.render
+            uses this internally.
+
+        Returns
+        -------
+        A :class:`yt.data_objects.image_array.ImageArray` instance containing
+        the rendered image.
+
+        """
+ 
         self.sampler = new_mesh_sampler(camera, self)
 
         mylog.debug("Casting rays")
@@ -342,6 +396,29 @@
 
 
 class PointSource(OpaqueSource):
+    r"""A rendering source of opaque points in the scene.
+
+    This class provides a mechanism for adding points to a scene; these
+    points will be opaque, and can also be colored.
+
+    Parameters
+    ----------
+    positions: array, shape (N, 3)
+        These positions, in data-space coordinates, are the points to be
+        added to the scene.
+    colors : array, shape (N, 4), optional
+        The colors of the points, including an alpha channel, in floating
+        point running from 0..1.
+    color_stride : int, optional
+        The stride with which to access the colors when putting them on the
+        scene.
+
+    Examples
+    --------
+    >>> source = PointSource(particle_positions)
+
+    """
+
 
     _image = None
     data_source = None
@@ -369,6 +446,7 @@
         >>> source = PointSource(particle_positions)
 
         """
+    def __init__(self, positions, colors=None, color_stride=1):
         self.positions = positions
         # If colors aren't individually set, make black with full opacity
         if colors is None:
@@ -378,6 +456,24 @@
         self.color_stride = color_stride
 
     def render(self, camera, zbuffer=None):
+        """Renders an image using the provided camera
+
+        Parameters
+        ----------
+        camera: :class:`yt.visualization.volume_rendering.camera.Camera` instance
+            A volume rendering camera. Can be any type of camera.
+        zbuffer: :class:`yt.visualization.volume_rendering.zbuffer_array.Zbuffer` instance
+            A zbuffer array. This is used for opaque sources to determine the
+            z position of the source relative to other sources. Only useful if
+            you are manually calling render on multiple sources. Scene.render
+            uses this internally.
+
+        Returns
+        -------
+        A :class:`yt.data_objects.image_array.ImageArray` instance containing
+        the rendered image.
+
+        """
         vertices = self.positions
         if zbuffer is None:
             empty = camera.lens.new_image(camera)
@@ -403,39 +499,38 @@
 
 
 class LineSource(OpaqueSource):
+    r"""A render source for a sequence of opaque line segments.
+
+    This class provides a mechanism for adding lines to a scene; these
+    points will be opaque, and can also be colored.
+
+    Parameters
+    ----------
+    positions: array, shape (N, 2, 3)
+        These positions, in data-space coordinates, are the starting and
+        stopping points for each pair of lines. For example,
+        positions[0][0] and positions[0][1] would give the (x, y, z)
+        coordinates of the beginning and end points of the first line,
+        respectively.
+    colors : array, shape (N, 4), optional
+        The colors of the points, including an alpha channel, in floating
+        point running from 0..1.  Note that they correspond to the line
+        segment succeeding each point; this means that strictly speaking
+        they need only be (N-1) in length.
+    color_stride : int, optional
+        The stride with which to access the colors when putting them on the
+        scene.
+
+    Examples
+    --------
+    >>> source = LineSource(np.random.random((10, 3)))
+
+    """
 
     _image = None
     data_source = None
 
     def __init__(self, positions, colors=None, color_stride=1):
-        r"""A render source for a sequence of opaque line segments.
-
-        This class provides a mechanism for adding lines to a scene; these
-        points will be opaque, and can also be colored.
-
-        Parameters
-        ----------
-        positions: array, shape (N, 2, 3)
-            These positions, in data-space coordinates, are the starting and
-            stopping points for each pair of lines. For example,
-            positions[0][0] and positions[0][1] would give the (x, y, z)
-            coordinates of the beginning and end points of the first line,
-            respectively.
-        colors : array, shape (N, 4), optional
-            The colors of the points, including an alpha channel, in floating
-            point running from 0..1.  Note that they correspond to the line
-            segment succeeding each point; this means that strictly speaking
-            they need only be (N-1) in length.
-        color_stride : int, optional
-            The stride with which to access the colors when putting them on the
-            scene.
-
-        Examples
-        --------
-        >>> source = LineSource(np.random.random((10, 3)))
-
-        """
-
         super(LineSource, self).__init__()
 
         assert(positions.shape[1] == 2)
@@ -453,6 +548,24 @@
         self.color_stride = color_stride
 
     def render(self, camera, zbuffer=None):
+        """Renders an image using the provided camera
+
+        Parameters
+        ----------
+        camera: :class:`yt.visualization.volume_rendering.camera.Camera` instance
+            A volume rendering camera. Can be any type of camera.
+        zbuffer: :class:`yt.visualization.volume_rendering.zbuffer_array.Zbuffer` instance
+            A zbuffer array. This is used for opaque sources to determine the
+            z position of the source relative to other sources. Only useful if
+            you are manually calling render on multiple sources. Scene.render
+            uses this internally.
+
+        Returns
+        -------
+        A :class:`yt.data_objects.image_array.ImageArray` instance containing
+        the rendered image.
+
+        """
         vertices = self.positions
         if zbuffer is None:
             empty = camera.lens.new_image(camera)
@@ -478,26 +591,26 @@
 
 
 class BoxSource(LineSource):
+    r"""A render source for a box drawn with line segments.
+    This render source will draw a box, with transparent faces, in data
+    space coordinates.  This is useful for annotations.
+
+    Parameters
+    ----------
+    left_edge: array-like, shape (3,), float
+        The left edge coordinates of the box.
+    right_edge : array-like, shape (3,), float
+        The right edge coordinates of the box.
+    color : array-like, shape (4,), float, optional
+        The colors (including alpha) to use for the lines.
+
+    Examples
+    --------
+    >>> source = BoxSource(grid_obj.LeftEdge, grid_obj.RightEdge)
+
+    """
     def __init__(self, left_edge, right_edge, color=None):
-        r"""A render source for a box drawn with line segments.
-
-        This render source will draw a box, with transparent faces, in data
-        space coordinates.  This is useful for annotations.
-
-        Parameters
-        ----------
-        left_edge: array-like, shape (3,), float
-            The left edge coordinates of the box.
-        right_edge : array-like, shape (3,), float
-            The right edge coordinates of the box.
-        color : array-like, shape (4,), float, optional
-            The colors (including alpha) to use for the lines.
-
-        Examples
-        --------
-        >>> source = BoxSource(grid_obj.LeftEdge, grid_obj.RightEdge)
-
-        """
+        r"""A render source for a box drawn with line segments."""
         if color is None:
             color = np.array([1.0, 1.0, 1.0, 1.0])
         color = ensure_numpy_array(color)
@@ -515,32 +628,32 @@
 
 
 class GridSource(LineSource):
+    r"""A render source for drawing grids in a scene.
+
+    This render source will draw blocks that are within a given data
+    source, by default coloring them by their level of resolution.
+
+    Parameters
+    ----------
+    data_source: :class:`~yt.data_objects.api.DataContainer`
+        The data container that will be used to identify grids to draw.
+    alpha : float
+        The opacity of the grids to draw.
+    cmap : color map name
+        The color map to use to map resolution levels to color.
+    min_level : int, optional
+        Minimum level to draw
+    max_level : int, optional
+        Maximum level to draw
+
+    Examples
+    --------
+    >>> dd = ds.sphere("c", (0.1, "unitary"))
+    >>> source = GridSource(dd, alpha=1.0)
+
+    """
     def __init__(self, data_source, alpha=0.3, cmap='algae',
                  min_level=None, max_level=None):
-        r"""A render source for drawing grids in a scene.
-
-        This render source will draw blocks that are within a given data
-        source, by default coloring them by their level of resolution.
-
-        Parameters
-        ----------
-        data_source: :class:`~yt.data_objects.api.DataContainer`
-            The data container that will be used to identify grids to draw.
-        alpha : float
-            The opacity of the grids to draw.
-        cmap : color map name
-            The color map to use to map resolution levels to color.
-        min_level : int, optional
-            Minimum level to draw
-        max_level : int, optional
-            Maximum level to draw
-
-        Examples
-        --------
-        >>> dd = ds.sphere("c", (0.1, "unitary"))
-        >>> source = GridSource(dd, alpha=1.0)
-
-        """
         data_source = data_source_or_all(data_source)
         corners = []
         levels = []
@@ -588,24 +701,25 @@
 
 
 class CoordinateVectorSource(OpaqueSource):
+    r"""Draw coordinate vectors on the scene.
+
+    This will draw a set of coordinate vectors on the camera image.  They
+    will appear in the lower right of the image.
+
+    Parameters
+    ----------
+    colors: array-like, shape (3,4), optional
+        The x, y, z RGBA values to use to draw the vectors.
+    alpha : float, optional
+        The opacity of the vectors.
+
+    Examples
+    --------
+    >>> source = CoordinateVectorSource()
+    """
+
     def __init__(self, colors=None, alpha=1.0):
-        r"""Draw coordinate vectors on the scene.
-
-        This will draw a set of coordinate vectors on the camera image.  They
-        will appear in the lower right of the image.
-
-        Parameters
-        ----------
-        colors: array-like, shape (3,4), optional
-            The x, y, z RGBA values to use to draw the vectors.
-        alpha : float, optional
-            The opacity of the vectors.
-
-        Examples
-        --------
-        >>> source = CoordinateVectorSource()
-
-        """
+        r"""Draw coordinate vectors on the scene."""
         super(CoordinateVectorSource, self).__init__()
         # If colors aren't individually set, make black with full opacity
         if colors is None:
@@ -618,6 +732,24 @@
         self.color_stride = 2
 
     def render(self, camera, zbuffer=None):
+        """Renders an image using the provided camera
+
+        Parameters
+        ----------
+        camera: :class:`yt.visualization.volume_rendering.camera.Camera` instance
+            A volume rendering camera. Can be any type of camera.
+        zbuffer: :class:`yt.visualization.volume_rendering.zbuffer_array.Zbuffer` instance
+            A zbuffer array. This is used for opaque sources to determine the
+            z position of the source relative to other sources. Only useful if
+            you are manually calling render on multiple sources. Scene.render
+            uses this internally.
+
+        Returns
+        -------
+        A :class:`yt.data_objects.image_array.ImageArray` instance containing
+        the rendered image.
+
+        """
         camera.lens.setup_box_properties(camera)
         center = camera.focus
         # Get positions at the focus

diff -r 872c02132d6464286b231e5a08928535e5284a84 -r ba4ecabbaeb6abff7646683906d3793b0afcedd2 yt/visualization/volume_rendering/scene.py
--- a/yt/visualization/volume_rendering/scene.py
+++ b/yt/visualization/volume_rendering/scene.py
@@ -24,34 +24,32 @@
 
 class Scene(object):
 
-    """The Scene Class
+    """A virtual landscape for a volume rendering.
 
     The Scene class is meant to be the primary container for the
     new volume rendering framework. A single scene may contain
     several Camera and RenderSource instances, and is the primary
     driver behind creating a volume rendering.
 
+    This sets up the basics needed to add sources and cameras.
+    This does very little setup, and requires additional input
+    to do anything useful.
+
+    Parameters
+    ----------
+    None
+
+    Examples
+    --------
+    >>> sc = Scene()
+
     """
 
     _current = None
     _camera = None
 
     def __init__(self):
-        r"""Create a new Scene instance.
-
-        This sets up the basics needed to add sources and cameras.
-        This does very little setup, and requires additional input
-        to do anything useful.
-
-        Parameters
-        ----------
-        None
-
-        Examples
-        --------
-        >>> sc = Scene()
-
-        """
+        r"""Create a new Scene instance"""
         super(Scene, self).__init__()
         self.sources = OrderedDict()
         self.camera = None
@@ -59,6 +57,7 @@
         self.last_render = None
 
     def get_source(self, source_num):
+        """Returns the volume rendering source indexed by ``source_num``"""
         return list(itervalues(self.sources))[source_num]
 
     def _iter_opaque_sources(self):
@@ -81,9 +80,18 @@
                 yield k, source
 
     def add_source(self, render_source, keyname=None):
-        """
-        Add a render source to the scene.  This will autodetect the
-        type of source.
+        """Add a render source to the scene.
+
+        This will autodetect the type of source.
+
+        Parameters
+        ----------
+        render_source: an instance of :class:`yt.visualization.volume_rendering.render_source.RenderSource`
+            A source to contribute to the volume rendering scene.
+
+        keyname: string (optional)
+            The dictionary key used to reference the source in the sources
+            dictionary.
         """
         if keyname is None:
             keyname = 'source_%02i' % len(self.sources)

diff -r 872c02132d6464286b231e5a08928535e5284a84 -r ba4ecabbaeb6abff7646683906d3793b0afcedd2 yt/visualization/volume_rendering/tests/test_vr_orientation.py
--- /dev/null
+++ b/yt/visualization/volume_rendering/tests/test_vr_orientation.py
@@ -0,0 +1,151 @@
+"""
+Answer test to verify VR orientation and rotation is correct
+"""
+
+# -----------------------------------------------------------------------------
+# Copyright (c) 2015, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# -----------------------------------------------------------------------------
+
+
+import numpy as np
+
+from yt import load_uniform_grid
+from yt.utilities.answer_testing.framework import \
+    requires_answer_testing, \
+    VRImageComparisonTest
+from yt.visualization.volume_rendering.api import \
+    Scene, \
+    Camera, \
+    VolumeSource, \
+    ColorTransferFunction
+
+
+def setup_ds():
+
+    N = 96
+
+    xmin = ymin = zmin = -1.0
+    xmax = ymax = zmax = 1.0
+
+    dcoord = (xmax - xmin)/N
+
+    arr = np.zeros((N, N, N), dtype=np.float64)
+    arr[:, :, :] = 1.e-4
+
+    bbox = np.array([[xmin, xmax], [ymin, ymax], [zmin, zmax]])
+
+    # coordinates -- in the notation data[i, j, k]
+    x = (np.arange(N) + 0.5)*(xmax - xmin)/N + xmin
+    y = (np.arange(N) + 0.5)*(ymax - ymin)/N + ymin
+    z = (np.arange(N) + 0.5)*(zmax - zmin)/N + zmin
+
+    x3d, y3d, z3d = np.meshgrid(x, y, z, indexing="ij")
+
+    # sphere at the origin
+    c = np.array([0.5*(xmin + xmax), 0.5*(ymin + ymax), 0.5*(zmin + zmax)])
+
+    r = np.sqrt((x3d - c[0])**2 + (y3d - c[1])**2 + (z3d - c[2])**2)
+    arr[r < 0.05] = 1.0
+
+    arr[abs(x3d - xmin) < 2*dcoord] = 0.3
+    arr[abs(y3d - ymin) < 2*dcoord] = 0.3
+    arr[abs(z3d - zmin) < 2*dcoord] = 0.3
+
+    # single cube on +x
+    xc = 0.75
+    dx = 0.05
+    idx = np.logical_and(np.logical_and(x3d > xc-dx, x3d < xc+dx),
+                         np.logical_and(np.logical_and(y3d > -dx, y3d < dx),
+                                        np.logical_and(z3d > -dx, z3d < dx)))
+
+    arr[idx] = 1.0
+
+    # two cubes on +y
+    dy = 0.05
+    for yc in [0.65, 0.85]:
+
+        idx = np.logical_and(np.logical_and(y3d > yc-dy, y3d < yc+dy),
+                             np.logical_and(np.logical_and(x3d > -dy, x3d < dy),
+                                            np.logical_and(z3d > -dy, z3d < dy)))
+
+        arr[idx] = 0.8
+
+    # three cubes on +z
+    dz = 0.05
+    for zc in [0.5, 0.7, 0.9]:
+
+        idx = np.logical_and(np.logical_and(z3d > zc-dz, z3d < zc+dz),
+                             np.logical_and(np.logical_and(x3d > -dz, x3d < dz),
+                                            np.logical_and(y3d > -dz, y3d < dz)))
+
+        arr[idx] = 0.6
+
+    data = dict(Density=arr)
+    ds = load_uniform_grid(data, arr.shape, bbox=bbox)
+
+    return ds
+
+
+ at requires_answer_testing()
+def test_orientation():
+    ds = setup_ds()
+
+    sc = Scene()
+
+    vol = VolumeSource(ds, field=('gas', 'Density'))
+
+    tf = vol.transfer_function
+    tf = ColorTransferFunction((0.1, 1.0))
+    tf.sample_colormap(1.0, 0.01, colormap="coolwarm")
+    tf.sample_colormap(0.8, 0.01, colormap="coolwarm")
+    tf.sample_colormap(0.6, 0.01, colormap="coolwarm")
+    tf.sample_colormap(0.3, 0.01, colormap="coolwarm")
+
+    n_frames = 5
+    theta = np.pi / n_frames
+    decimals = 3
+
+    for lens_type in ['plane-parallel', 'perspective']:
+        frame = 0
+
+        cam = Camera(ds, lens_type='plane-parallel')
+        cam.resolution = (1000, 1000)
+        cam.position = ds.arr(np.array([-4., 0., 0.]), 'code_length')
+        cam.switch_orientation(normal_vector=[1., 0., 0.],
+                               north_vector=[0., 0., 1.])
+        cam.set_width(ds.domain_width*2.)
+
+        sc.camera = cam
+        sc.add_source(vol)
+        yield VRImageComparisonTest(
+            sc, ds, '%s_%04d' % (lens_type, frame), decimals)
+
+        for i in range(n_frames):
+            frame += 1
+            center = ds.arr([0, 0, 0], 'code_length')
+            cam.yaw(theta, rot_center=center)
+            sc.camera = cam
+            yield VRImageComparisonTest(
+                sc, ds, 'yaw_%s_%04d' % (lens_type, frame), decimals)
+
+        for i in range(n_frames):
+            frame += 1
+            theta = np.pi / n_frames
+            center = ds.arr([0, 0, 0], 'code_length')
+            cam.pitch(theta, rot_center=center)
+            sc.camera = cam
+            yield VRImageComparisonTest(
+                sc, ds, 'pitch_%s_%04d' % (lens_type, frame), decimals)
+
+        for i in range(n_frames):
+            frame += 1
+            theta = np.pi / n_frames
+            center = ds.arr([0, 0, 0], 'code_length')
+            cam.roll(theta, rot_center=center)
+            sc.camera = cam
+            yield VRImageComparisonTest(
+                sc, ds, 'roll_%s_%04d' % (lens_type, frame), decimals)

diff -r 872c02132d6464286b231e5a08928535e5284a84 -r ba4ecabbaeb6abff7646683906d3793b0afcedd2 yt/visualization/volume_rendering/zbuffer_array.py
--- a/yt/visualization/volume_rendering/zbuffer_array.py
+++ b/yt/visualization/volume_rendering/zbuffer_array.py
@@ -12,13 +12,40 @@
 #-----------------------------------------------------------------------------
 
 
-from yt.funcs import mylog
-from yt.data_objects.api import ImageArray
 import numpy as np
 
 
 class ZBuffer(object):
-    """docstring for ZBuffer"""
+    """A container object for z-buffer arrays
+
+    A zbuffer is a companion array for an image that allows the volume rendering
+    infrastructure to determine whether one opaque source is in front of another
+    opaque source.  The z buffer encodes the distance to the opaque source
+    relative to the camera position.
+
+    Parameters
+    ----------
+    rgba: MxNx4 image
+        The image the z buffer corresponds to
+    z: MxN image
+        The z depth of each pixel in the image. The shape of the image must be
+        the same as each RGBA channel in the original image.
+    
+    Examples
+    --------
+    >>> import numpy as np
+    >>> shape = (64, 64)
+    >>> b1 = Zbuffer(np.random.random(shape), np.ones(shape))
+    >>> b2 = Zbuffer(np.random.random(shape), np.zeros(shape))
+    >>> c = b1 + b2
+    >>> np.all(c.rgba == b2.rgba)
+    True
+    >>> np.all(c.z == b2.z))
+    True
+    >>> np.all(c == b2)
+    True
+
+    """
     def __init__(self, rgba, z):
         super(ZBuffer, self).__init__()
         assert(rgba.shape[:len(z.shape)] == z.shape)
@@ -31,8 +58,8 @@
         f = self.z < other.z
         if self.z.shape[1] == 1:
             # Non-rectangular
-            rgba = (self.rgba * f[:,None,:])
-            rgba += (other.rgba * (1.0 - f)[:,None,:])
+            rgba = (self.rgba * f[:, None, :])
+            rgba += (other.rgba * (1.0 - f)[:, None, :])
         else:
             b = self.z > other.z
             rgba = np.empty(self.rgba.shape)


https://bitbucket.org/yt_analysis/yt/commits/074abc3c5e5a/
Changeset:   074abc3c5e5a
Branch:      yt
User:        chummels
Date:        2015-10-16 06:56:22+00:00
Summary:     Fixing merge.
Affected #:  1 file

diff -r ba4ecabbaeb6abff7646683906d3793b0afcedd2 -r 074abc3c5e5a5a122f7a2508acc5fdcbdee3ab51 yt/visualization/volume_rendering/render_source.py
--- a/yt/visualization/volume_rendering/render_source.py
+++ b/yt/visualization/volume_rendering/render_source.py
@@ -102,12 +102,8 @@
     Examples
     --------
     >>> source = VolumeSource(ds.all_data(), 'density')
-
-
     """
 
-
-
     _image = None
     data_source = None
 
@@ -198,12 +194,12 @@
             If False, the AMRKDTree estimates vertex centered data using ghost
             zones, which can eliminate seams in the resulting volume rendering.
             Defaults to True for performance reasons.
-             fields = self.data_source._determine_fields(fields)
+    """
+        fields = self.data_source._determine_fields(fields)
         log_fields = [self.data_source.ds.field_info[f].take_log
                       for f in fields]
         self.volume.set_fields(fields, log_fields, no_ghost)
         self.field = fields
-    """
 
     def set_sampler(self, camera):
         """Sets a volume render sampler
@@ -424,29 +420,8 @@
     data_source = None
 
     def __init__(self, positions, colors=None, color_stride=1):
-        r"""A rendering source of opaque points in the scene.
-
-        This class provides a mechanism for adding points to a scene; these
-        points will be opaque, and can also be colored.
-
-        Parameters
-        ----------
-        positions: array, shape (N, 3)
-            These positions, in data-space coordinates, are the points to be
-            added to the scene.
-        colors : array, shape (N, 4), optional
-            The colors of the points, including an alpha channel, in floating
-            point running from 0..1.
-        color_stride : int, optional
-            The stride with which to access the colors when putting them on the
-            scene.
-
-        Examples
-        --------
-        >>> source = PointSource(particle_positions)
-
+        """Construct a PointSource object.
         """
-    def __init__(self, positions, colors=None, color_stride=1):
         self.positions = positions
         # If colors aren't individually set, make black with full opacity
         if colors is None:


https://bitbucket.org/yt_analysis/yt/commits/8508410f7230/
Changeset:   8508410f7230
Branch:      yt
User:        chummels
Date:        2015-10-16 19:22:48+00:00
Summary:     Fixing a few mistakes in my merge.
Affected #:  3 files

diff -r 074abc3c5e5a5a122f7a2508acc5fdcbdee3ab51 -r 8508410f7230a944b78897816b3922bdc47fd6c6 doc/source/visualizing/volume_rendering.rst
--- a/doc/source/visualizing/volume_rendering.rst
+++ b/doc/source/visualizing/volume_rendering.rst
@@ -373,6 +373,7 @@
 .. python-script::
 
    for i in sc.camera.zoomin(100, 5):
+       sc.render()
        sc.save("frame_%03i.png" % i)
 
 The variable ``i`` is the frame number in the particular loop being called.  In

diff -r 074abc3c5e5a5a122f7a2508acc5fdcbdee3ab51 -r 8508410f7230a944b78897816b3922bdc47fd6c6 yt/visualization/volume_rendering/render_source.py
--- a/yt/visualization/volume_rendering/render_source.py
+++ b/yt/visualization/volume_rendering/render_source.py
@@ -70,15 +70,6 @@
     def set_zbuffer(self, zbuffer):
         self.zbuffer = zbuffer
 
-    def render(self, camera, zbuffer=None):
-        # This is definitely wrong for now
-        if zbuffer is not None and self.zbuffer is not None:
-            zbuffer.rgba = self.zbuffer.rgba
-            zbuffer.z = self.zbuffer.z
-            self.zbuffer = zbuffer
-        return self.zbuffer
-
-
 class VolumeSource(RenderSource):
     """A class for rendering data from a volumetric data source
 
@@ -184,7 +175,7 @@
         self.volume = volume
 
     def set_fields(self, fields, no_ghost=True):
-    """Set the source's fields to render
+        """Set the source's fields to render
 
         Parameters
         ---------
@@ -194,7 +185,7 @@
             If False, the AMRKDTree estimates vertex centered data using ghost
             zones, which can eliminate seams in the resulting volume rendering.
             Defaults to True for performance reasons.
-    """
+        """
         fields = self.data_source._determine_fields(fields)
         log_fields = [self.data_source.ds.field_info[f].take_log
                       for f in fields]
@@ -236,7 +227,6 @@
         the rendered image.
 
         """
- 
         self.zbuffer = zbuffer
         self.set_sampler(camera)
         assert (self.sampler is not None)
@@ -281,7 +271,8 @@
         image.shape = camera.resolution[0], camera.resolution[1], 4
         # If the call is from VR, the image is rotated by 180 to get correct
         # up direction
-        if call_from_VR: image = np.rot90(image, k=2)
+        if call_from_VR is True: 
+            image = np.rot90(image, k=2)
         if self.transfer_function.grey_opacity is False:
             image[:, :, 3] = 1.0
         return image
@@ -293,7 +284,7 @@
 
 
 class MeshSource(RenderSource):
-    """A mesh for unstructured mesh data
+    """A source for unstructured mesh data
 
     This functionality requires the embree ray-tracing engine and the
     associated pyembree python bindings to be installed in order to
@@ -420,8 +411,6 @@
     data_source = None
 
     def __init__(self, positions, colors=None, color_stride=1):
-        """Construct a PointSource object.
-        """
         self.positions = positions
         # If colors aren't individually set, make black with full opacity
         if colors is None:
@@ -469,7 +458,7 @@
         return zbuffer
 
     def __repr__(self):
-        disp = "<Points Source>"
+        disp = "<Point Source>"
         return disp
 
 
@@ -585,7 +574,6 @@
 
     """
     def __init__(self, left_edge, right_edge, color=None):
-        r"""A render source for a box drawn with line segments."""
         if color is None:
             color = np.array([1.0, 1.0, 1.0, 1.0])
         color = ensure_numpy_array(color)
@@ -694,7 +682,6 @@
     """
 
     def __init__(self, colors=None, alpha=1.0):
-        r"""Draw coordinate vectors on the scene."""
         super(CoordinateVectorSource, self).__init__()
         # If colors aren't individually set, make black with full opacity
         if colors is None:

diff -r 074abc3c5e5a5a122f7a2508acc5fdcbdee3ab51 -r 8508410f7230a944b78897816b3922bdc47fd6c6 yt/visualization/volume_rendering/scene.py
--- a/yt/visualization/volume_rendering/scene.py
+++ b/yt/visualization/volume_rendering/scene.py
@@ -118,7 +118,8 @@
 
         Returns
         -------
-        ImageArray instance of the current rendering image.
+        A :class:`yt.data_objects.image_array.ImageArray` instance containing
+        the current rendering image.
 
         Examples
         --------


https://bitbucket.org/yt_analysis/yt/commits/0a26d5e14323/
Changeset:   0a26d5e14323
Branch:      yt
User:        chummels
Date:        2015-10-16 20:49:15+00:00
Summary:     Correcting errant indent
Affected #:  1 file

diff -r 8508410f7230a944b78897816b3922bdc47fd6c6 -r 0a26d5e143230f0f4f4822bf76f8a3157257b875 yt/visualization/volume_rendering/render_source.py
--- a/yt/visualization/volume_rendering/render_source.py
+++ b/yt/visualization/volume_rendering/render_source.py
@@ -266,8 +266,7 @@
             Whether or not this is being called from a higher level in the VR
             interface. Used to set the correct orientation.
         """
-         image = self.volume.reduce_tree_images(image,
-                                               camera.lens.viewpoint)
+        image = self.volume.reduce_tree_images(image, camera.lens.viewpoint)
         image.shape = camera.resolution[0], camera.resolution[1], 4
         # If the call is from VR, the image is rotated by 180 to get correct
         # up direction


https://bitbucket.org/yt_analysis/yt/commits/bc7e473a9d9f/
Changeset:   bc7e473a9d9f
Branch:      yt
User:        chummels
Date:        2015-10-16 21:20:22+00:00
Summary:     Updating function source for volume rendering in narrative docs.
Affected #:  1 file

diff -r 0a26d5e143230f0f4f4822bf76f8a3157257b875 -r bc7e473a9d9f5ed5c37de03946d6f193f583ad46 doc/source/visualizing/volume_rendering.rst
--- a/doc/source/visualizing/volume_rendering.rst
+++ b/doc/source/visualizing/volume_rendering.rst
@@ -71,8 +71,9 @@
   # sc is an instance of a Scene object, which allows you to further refine
   # your renderings, and later save them.
 
-When the :func:`~yt.visualization.volume_rendering.volume_render` function 
-is called, first an empty 
+When the 
+:func:`~yt.visualization.volume_rendering.volume_rendering.volume_render` 
+function is called, first an empty 
 :class:`~yt.visualization.volume_rendering.scene.Scene` object is created. 
 Next, a :class:`~yt.visualization.volume_rendering.api.VolumeSource`
 object is created, which decomposes the volume elements
@@ -111,8 +112,8 @@
 Alternatively, if you don't want to immediately generate an image of your
 volume rendering, and you just want access to the default scene object, 
 you can skip this expensive operation by just running the
-:func:`~yt.visualization.volume_rendering.create_scene` function in lieu of the
-:func:`~yt.visualization.volume_rendering.volume_render` function. Example:
+:func:`~yt.visualization.volume_rendering.volume_rendering.create_scene` function in lieu of the
+:func:`~yt.visualization.volume_rendering.volume_rendering.volume_render` function. Example:
 
 .. python-script::
 
@@ -144,7 +145,7 @@
 :meth:`~yt.visualization.volume_rendering.scene.Scene.render` will return an 
 :class:`~yt.data_objects.image_array.ImageArray` object if you want to further 
 process it in Python (potentially writing it out with 
-:meth:`~yt.data_objects.image_array.ImageArray.write_png`.  You can continue 
+:meth:`~yt.data_objects.image_array.ImageArray.write_png`).  You can continue 
 modifying your :class:`~yt.visualization.volume_rendering.scene.Scene` object,
 and render it as you make changes to see how those changes affect the resulting
 image.  
@@ -162,12 +163,12 @@
 
 .. _sigma_clip:
 
-Brightening an Image with Sigma Clipping
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Improving Image Contrast with Sigma Clipping
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 If your images appear to be too dark, you can try using the ``sigma_clip``
 keyword in the :meth:`~yt.visualization.volume_rendering.scene.Scene.render` 
-:func:`~yt.visualization.volume_rendering.volume_render` functions.  
+or :func:`~yt.visualization.volume_rendering.volume_rendering.volume_render` functions.  
 Because the brightness range in an image is scaled to match the range of 
 emissivity values of underlying rendering, if you have a few really 
 high-emissivity points, they will scale the rest of your image to be quite 


https://bitbucket.org/yt_analysis/yt/commits/c8ad792bc6e9/
Changeset:   c8ad792bc6e9
Branch:      yt
User:        chummels
Date:        2015-10-16 21:40:49+00:00
Summary:     Removing progress bar for render of scene.
Affected #:  1 file

diff -r bc7e473a9d9f5ed5c37de03946d6f193f583ad46 -r c8ad792bc6e969ddf2ac72dd567e87f48ad492b5 yt/visualization/volume_rendering/scene.py
--- a/yt/visualization/volume_rendering/scene.py
+++ b/yt/visualization/volume_rendering/scene.py
@@ -14,7 +14,7 @@
 
 import numpy as np
 from collections import OrderedDict
-from yt.funcs import mylog, get_image_suffix, get_pbar
+from yt.funcs import mylog, get_image_suffix
 from yt.extern.six import iteritems, itervalues
 from .camera import Camera
 from .render_source import OpaqueSource, BoxSource, CoordinateVectorSource, \
@@ -129,6 +129,7 @@
         >>> sc.save()
 
         """
+        mylog.info("Rendering scene (Can take a while).")
         if camera is None:
             camera = self.camera
         assert(camera is not None)
@@ -198,11 +199,8 @@
     def _validate(self):
         r"""Validate the current state of the scene."""
 
-        pbar = get_pbar("Rendering scene: ", len(self.sources))
-        for i, (k, source) in enumerate(iteritems(self.sources)):
+        for k, source in iteritems(self.sources):
             source._validate()
-            pbar.update(i)
-        pbar.finish()
         return
 
     def composite(self, camera=None):


https://bitbucket.org/yt_analysis/yt/commits/8cdb4e128a46/
Changeset:   8cdb4e128a46
Branch:      yt
User:        chummels
Date:        2015-10-16 21:46:50+00:00
Summary:     Gave a more detailed filename for scene.save() when only opaque sources are specified.
Affected #:  1 file

diff -r c8ad792bc6e969ddf2ac72dd567e87f48ad492b5 -r 8cdb4e128a46af9e6590e12e39f1ad8cf0c90e44 yt/visualization/volume_rendering/scene.py
--- a/yt/visualization/volume_rendering/scene.py
+++ b/yt/visualization/volume_rendering/scene.py
@@ -173,7 +173,7 @@
         if fname is None:
             sources = list(itervalues(self.sources))
             rensources = [s for s in sources if isinstance(s, RenderSource)]
-            # if a render source present, use its affiliated ds for fname
+            # if a volume source present, use its affiliated ds for fname
             if len(rensources) > 0:
                 rs = rensources[0]
                 basename = rs.data_source.ds.basename
@@ -182,9 +182,9 @@
                 else:
                     field = rs.field[-1]
                 fname = "%s_Render_%s.png" % (basename, field)
-            # if no render source present, use a default filename
+            # if no volume source present, use a default filename
             else:
-                fname = "Render.png"   
+                fname = "Render_opaque.png"   
         suffix = get_image_suffix(fname)
         if suffix == '':
             suffix = '.png'


https://bitbucket.org/yt_analysis/yt/commits/6facab7ff325/
Changeset:   6facab7ff325
Branch:      yt
User:        ngoldbaum
Date:        2015-10-16 23:10:51+00:00
Summary:     Merged in chummels/yt (pull request #1808)

[experimental] Modifying scene.render() function to not save to disk and instead requiring sc.save()
Affected #:  17 files

diff -r d0f52cf90877f87528854d9e30dd71dc570050f2 -r 6facab7ff3258b232346caaecd999be9f25ef139 doc/source/cookbook/amrkdtree_downsampling.py
--- a/doc/source/cookbook/amrkdtree_downsampling.py
+++ b/doc/source/cookbook/amrkdtree_downsampling.py
@@ -39,7 +39,8 @@
 
 render_source.set_volume(kd_low_res)
 render_source.set_fields('density')
-sc.render("v1.png")
+sc.render()
+sc.save("v1.png")
 
 # This operation was substantiall faster.  Now lets modify the low resolution
 # rendering until we find something we like.
@@ -48,12 +49,14 @@
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds=[-27.5, -25.5],
               alpha=np.ones(4, dtype='float64'), colormap='RdBu_r')
-sc.render("v2.png", sigma_clip=6.0)
+sc.render(sigma_clip=6.0)
+sc.save("v2.png")
 
 # This looks better.  Now let's try turning on opacity.
 
 tf.grey_opacity = True
-sc.render("v3.png", sigma_clip=6.0)
+sc.render(sigma_clip=6.0)
+sc.save("v3.png")
 #
 ## That seemed to pick out som interesting structures.  Now let's bump up the
 ## opacity.
@@ -61,11 +64,13 @@
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds=[-27.5, -25.5],
               alpha=10.0 * np.ones(4, dtype='float64'), colormap='RdBu_r')
-sc.render("v4.png", sigma_clip=6.0)
+sc.render(sigma_clip=6.0)
+sc.save("v4.png")
 #
 ## This looks pretty good, now lets go back to the full resolution AMRKDTree
 #
 render_source.set_volume(kd)
-sc.render("v5.png", sigma_clip=6.0)
+sc.render(sigma_clip=6.0)
+sc.render("v5.png")
 
 # This looks great!

diff -r d0f52cf90877f87528854d9e30dd71dc570050f2 -r 6facab7ff3258b232346caaecd999be9f25ef139 doc/source/cookbook/camera_movement.py
--- a/doc/source/cookbook/camera_movement.py
+++ b/doc/source/cookbook/camera_movement.py
@@ -14,15 +14,18 @@
 frame = 0
 # Move to the maximum density location over 5 frames
 for _ in cam.iter_move(max_c, 5):
-    sc.render('camera_movement_%04i.png' % frame, sigma_clip=8.0)
+    sc.render(sigma_clip=8.0)
+    sc.save('camera_movement_%04i.png' % frame)
     frame += 1
 
 # Zoom in by a factor of 10 over 5 frames
 for _ in cam.iter_zoom(10.0, 5):
-    sc.render('camera_movement_%04i.png' % frame, sigma_clip=8.0)
+    sc.render(sigma_clip=8.0)
+    sc.save('camera_movement_%04i.png' % frame)
     frame += 1
 
 # Do a rotation over 5 frames
 for _ in cam.iter_rotate(np.pi, 5):
-    sc.render('camera_movement_%04i.png' % frame, sigma_clip=8.0)
+    sc.render(sigma_clip=8.0)
+    sc.save('camera_movement_%04i.png' % frame)
     frame += 1

diff -r d0f52cf90877f87528854d9e30dd71dc570050f2 -r 6facab7ff3258b232346caaecd999be9f25ef139 doc/source/cookbook/custom_camera_volume_rendering.py
--- a/doc/source/cookbook/custom_camera_volume_rendering.py
+++ b/doc/source/cookbook/custom_camera_volume_rendering.py
@@ -18,4 +18,5 @@
 
 # save to disk with a custom filename and apply sigma clipping to eliminate
 # very bright pixels, producing an image with better contrast.
-sc.render(fname='custom.png', sigma_clip=4)
+sc.render(sigma_clip=4)
+sc.save('custom.png')

diff -r d0f52cf90877f87528854d9e30dd71dc570050f2 -r 6facab7ff3258b232346caaecd999be9f25ef139 doc/source/cookbook/custom_transfer_function_volume_rendering.py
--- a/doc/source/cookbook/custom_transfer_function_volume_rendering.py
+++ b/doc/source/cookbook/custom_transfer_function_volume_rendering.py
@@ -21,4 +21,4 @@
     np.log10(ds.quan(1.0e-29, 'g/cm**3')),
     scale=30.0, colormap='RdBu_r')
 
-im = sc.render(fname='new_tf.png', sigma_clip=None)
+sc.save('new_tf.png')

diff -r d0f52cf90877f87528854d9e30dd71dc570050f2 -r 6facab7ff3258b232346caaecd999be9f25ef139 doc/source/cookbook/opaque_rendering.py
--- a/doc/source/cookbook/opaque_rendering.py
+++ b/doc/source/cookbook/opaque_rendering.py
@@ -12,7 +12,8 @@
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
         alpha=np.logspace(-3,0,4), colormap = 'RdBu_r')
-im = sc.render("v1.png", sigma_clip=6.0)
+sc.render(sigma_clip=6.0)
+sc.save("v1.png")
 
 # In this case, the default alphas used (np.logspace(-3,0,Nbins)) does not
 # accentuate the outer regions of the galaxy. Let's start by bringing up the
@@ -22,27 +23,31 @@
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
         alpha=np.logspace(0,0,4), colormap = 'RdBu_r')
-im = sc.render("v2.png", sigma_clip=6.0)
+sc.render(sigma_clip=6.0)
+sc.save("v2.png")
 
 # Now let's set the grey_opacity to True.  This should make the inner portions
 # start to be obcured
 
 tf.grey_opacity = True
-im = sc.render("v3.png", sigma_clip=6.0)
+sc.render(sigma_clip=6.0)
+sc.save("v3.png")
 
 # That looks pretty good, but let's start bumping up the opacity.
 
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
         alpha=10.0*np.ones(4,dtype='float64'), colormap = 'RdBu_r')
-im = sc.render("v4.png", sigma_clip=6.0)
+sc.render(sigma_clip=6.0)
+sc.save("v4.png")
 
 # Let's bump up again to see if we can obscure the inner contour.
 
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
         alpha=30.0*np.ones(4,dtype='float64'), colormap = 'RdBu_r')
-im = sc.render("v5.png", sigma_clip=6.0)
+sc.render(sigma_clip=6.0)
+sc.save("v5.png")
 
 # Now we are losing sight of everything.  Let's see if we can obscure the next
 # layer
@@ -50,13 +55,15 @@
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
         alpha=100.0*np.ones(4,dtype='float64'), colormap = 'RdBu_r')
-im = sc.render("v6.png", sigma_clip=6.0)
+sc.render(sigma_clip=6.0)
+sc.save("v6.png")
 
 # That is very opaque!  Now lets go back and see what it would look like with
 # grey_opacity = False
 
 tf.grey_opacity=False
-im = sc.render("v7.png", sigma_clip=6.0)
+sc.render(sigma_clip=6.0)
+sc.save("v7.png")
 
 # That looks pretty different, but the main thing is that you can see that the
 # inner contours are somewhat visible again.  

diff -r d0f52cf90877f87528854d9e30dd71dc570050f2 -r 6facab7ff3258b232346caaecd999be9f25ef139 doc/source/cookbook/rendering_with_box_and_grids.py
--- a/doc/source/cookbook/rendering_with_box_and_grids.py
+++ b/doc/source/cookbook/rendering_with_box_and_grids.py
@@ -8,15 +8,15 @@
 sc.get_source(0).transfer_function.grey_opacity=True
 
 sc.annotate_domain(ds)
-im = sc.render()
-im.write_png("%s_vr_domain.png" % ds)
+sc.render()
+sc.save("%s_vr_domain.png" % ds)
 
 sc.annotate_grids(ds)
-im = sc.render()
-im.write_png("%s_vr_grids.png" % ds)
+sc.render()
+sc.save("%s_vr_grids.png" % ds)
 
 # Here we can draw the coordinate vectors on top of the image by processing
 # it through the camera. Then save it out.
 sc.annotate_axes()
-im = sc.render()
-im.write_png("%s_vr_coords.png" % ds)
+sc.render()
+sc.save("%s_vr_coords.png" % ds)

diff -r d0f52cf90877f87528854d9e30dd71dc570050f2 -r 6facab7ff3258b232346caaecd999be9f25ef139 doc/source/cookbook/various_lens.py
--- a/doc/source/cookbook/various_lens.py
+++ b/doc/source/cookbook/various_lens.py
@@ -34,7 +34,8 @@
 cam.set_width(ds.domain_width * 0.5)
 sc.camera = cam
 sc.add_source(vol)
-sc.render('lens_plane-parallel.png', sigma_clip=6.0)
+sc.render(sigma_clip=6.0)
+sc.save('lens_plane-parallel.png')
 
 # Perspective lens
 cam = Camera(ds, lens_type='perspective')
@@ -50,7 +51,8 @@
 cam.set_width(ds.domain_width * 0.5)
 sc.camera = cam
 sc.add_source(vol)
-sc.render('lens_perspective.png', sigma_clip=6.0)
+sc.render(sigma_clip=6.0)
+sc.save('lens_perspective.png')
 
 # Stereo-perspective lens
 cam = Camera(ds, lens_type='stereo-perspective')
@@ -65,7 +67,8 @@
 cam.lens.disparity = ds.domain_width[0] * 1.e-3
 sc.camera = cam
 sc.add_source(vol)
-sc.render('lens_stereo-perspective.png', sigma_clip=6.0)
+sc.render(sigma_clip=6.0)
+sc.save('lens_stereo-perspective.png')
 
 # Fisheye lens
 dd = ds.sphere(ds.domain_center, ds.domain_width[0] / 10)
@@ -79,7 +82,8 @@
 cam.lens.fov = 360.0
 sc.camera = cam
 sc.add_source(vol)
-sc.render('lens_fisheye.png', sigma_clip=6.0)
+sc.render(sigma_clip=6.0)
+sc.save('lens_fisheye.png')
 
 # Spherical lens
 cam = Camera(ds, lens_type='spherical')
@@ -96,7 +100,8 @@
 cam.set_width(ds.domain_width * 0.5)
 sc.camera = cam
 sc.add_source(vol)
-sc.render('lens_spherical.png', sigma_clip=6.0)
+sc.render(sigma_clip=6.0)
+sc.save('lens_spherical.png')
 
 # Stereo-spherical lens
 cam = Camera(ds, lens_type='stereo-spherical')
@@ -111,4 +116,5 @@
 cam.lens.disparity = ds.domain_width[0] * 1.e-3
 sc.camera = cam
 sc.add_source(vol)
-sc.render('lens_stereo-spherical.png', sigma_clip=6.0)
+sc.render(sigma_clip=6.0)
+sc.save('lens_stereo-spherical.png')

diff -r d0f52cf90877f87528854d9e30dd71dc570050f2 -r 6facab7ff3258b232346caaecd999be9f25ef139 doc/source/visualizing/volume_rendering.rst
--- a/doc/source/visualizing/volume_rendering.rst
+++ b/doc/source/visualizing/volume_rendering.rst
@@ -31,11 +31,11 @@
    :align: center
    :alt: Diagram of a 3D Scene
 
-In versions of yt prior to 3.2, the only volume rendering interface accessible
+In versions of yt prior to 3.3, the only volume rendering interface accessible
 was through the "camera" object.  This presented a number of problems,
 principle of which was the inability to describe new scene elements or to
 develop complex visualizations that were independent of the specific elements
-being rendered.  The new "scene" based interface present in yt 3.2 and beyond
+being rendered.  The new "scene" based interface present in yt 3.3 and beyond
 enables both more complex visualizations to be constructed as well as a new,
 more intuitive interface for very simple 3D visualizations.
 
@@ -65,14 +65,15 @@
   # load the data
   ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
   # volume render the 'density' field, and save the resulting image
-  im, sc = yt.volume_render(ds, 'density', fname='test_rendering.png')
+  im, sc = yt.volume_render(ds, 'density', fname='rendering.png')
 
-  # im is the image that was generated.
+  # im is the image array generated. it is also saved to 'rendering.png'.
   # sc is an instance of a Scene object, which allows you to further refine
-  # your renderings.
+  # your renderings, and later save them.
 
-When the :func:`~yt.visualization.volume_rendering.volume_render` function 
-is called, first an empty 
+When the 
+:func:`~yt.visualization.volume_rendering.volume_rendering.volume_render` 
+function is called, first an empty 
 :class:`~yt.visualization.volume_rendering.scene.Scene` object is created. 
 Next, a :class:`~yt.visualization.volume_rendering.api.VolumeSource`
 object is created, which decomposes the volume elements
@@ -96,9 +97,10 @@
 lenses can be swapped in and out.  For example, this might include a fisheye
 lens, a spherical lens, or some other method of describing the direction and
 origin of rays for rendering. Once the camera is added to the scene object, we
-call the main method of the
+call the main methods of the
 :class:`~yt.visualization.volume_rendering.scene.Scene` class,
-:meth:`~yt.visualization.volume_rendering.scene.Scene.render`.  When called,
+:meth:`~yt.visualization.volume_rendering.scene.Scene.render` and 
+:meth:`~yt.visualization.volume_rendering.scene.Scene.save`.  When called,
 the scene will loop through all of the
 :class:`~yt.visualization.volume_rendering.render_source.RenderSource` objects
 that have been added and integrate the radiative transfer equation through the
@@ -110,20 +112,17 @@
 Alternatively, if you don't want to immediately generate an image of your
 volume rendering, and you just want access to the default scene object, 
 you can skip this expensive operation by just running the
-:func:`~yt.visualization.volume_rendering.create_scene` function in lieu of the
-:func:`~yt.visualization.volume_rendering.volume_render` function. Example:
+:func:`~yt.visualization.volume_rendering.volume_rendering.create_scene` function in lieu of the
+:func:`~yt.visualization.volume_rendering.volume_rendering.volume_render` function. Example:
 
 .. python-script::
 
   import yt
-  # load the data
   ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
-  # volume render the 'density' field 
   sc = yt.create_scene(ds, 'density')
 
-
-Modifying the Scene
--------------------
+Modifying and Saving the Scene
+------------------------------
 
 Once a basic scene has been created with default render sources and
 camera operations, deeper modifications are possible. These
@@ -133,6 +132,56 @@
 present in the scene.  Below, we describe a few of the aspects of tuning a
 scene to create a visualization that is communicative and pleasing.
 
+.. _rendering_scene:
+
+Rendering and Saving
+++++++++++++++++++++
+
+Whenever you want a rendering of your current scene configuration, use the
+:meth:`~yt.visualization.volume_rendering.scene.Scene.render` method to
+trigger the scene to actually do the ray-tracing step.  After that, you can
+use the :meth:`~yt.visualization.volume_rendering.scene.Scene.save` method
+to save it to disk.  Alternatively, 
+:meth:`~yt.visualization.volume_rendering.scene.Scene.render` will return an 
+:class:`~yt.data_objects.image_array.ImageArray` object if you want to further 
+process it in Python (potentially writing it out with 
+:meth:`~yt.data_objects.image_array.ImageArray.write_png`).  You can continue 
+modifying your :class:`~yt.visualization.volume_rendering.scene.Scene` object,
+and render it as you make changes to see how those changes affect the resulting
+image.  
+
+.. python-script::
+
+  import yt
+  ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
+  sc = yt.create_scene(ds, 'density')
+  sc.render() 
+  sc.save()
+  <make changes to scene>
+  sc.render()
+  sc.save('changes.png')
+
+.. _sigma_clip:
+
+Improving Image Contrast with Sigma Clipping
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+If your images appear to be too dark, you can try using the ``sigma_clip``
+keyword in the :meth:`~yt.visualization.volume_rendering.scene.Scene.render` 
+or :func:`~yt.visualization.volume_rendering.volume_rendering.volume_render` functions.  
+Because the brightness range in an image is scaled to match the range of 
+emissivity values of underlying rendering, if you have a few really 
+high-emissivity points, they will scale the rest of your image to be quite 
+dark.  ``sigma_clip = N`` can address this by removing values that are more
+than ``N`` standard deviations brighter than the mean of your image.  
+Typically, a choice of 4 to 6 will help dramatically with your resulting image.
+
+.. python-script::
+
+  sc = yt.create_scene(ds, 'density')
+  sc.render(sigma_clip=4)
+  sc.save()
+
 .. _transfer_functions:
 
 Transfer Functions
@@ -329,7 +378,8 @@
 .. python-script::
 
    for i in sc.camera.zoomin(100, 5):
-       sc.render("frame_%03i.png" % i)
+       sc.render()
+       sc.save("frame_%03i.png" % i)
 
 The variable ``i`` is the frame number in the particular loop being called.  In
 this case, this will zoom in by a factor of 100 over the course of 5 frames.

diff -r d0f52cf90877f87528854d9e30dd71dc570050f2 -r 6facab7ff3258b232346caaecd999be9f25ef139 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -447,7 +447,8 @@
         --------
 
         >>> for i in cam.iter_rotate(np.pi, 10):
-        ...     im = sc.render("rotation_%04i.png" % i)
+        ...     im = sc.render()
+        ...     sc.save('rotation_%04i.png' % i)
         """
 
         dtheta = (1.0*theta)/n_steps
@@ -475,7 +476,8 @@
         --------
 
         >>> for i in cam.iter_move([0.2,0.3,0.6], 10):
-        ...     sc.render("move_%04i.png" % i)
+        ...     sc.render()
+        ...     sc.save("move_%04i.png" % i)
         """
         assert isinstance(final, YTArray)
         if exponential:
@@ -523,7 +525,8 @@
         --------
 
         >>> for i in cam.iter_zoom(100.0, 10):
-        ...     sc.render("zoom_%04i.png" % i)
+        ...     sc.render()
+        ...     sc.save("zoom_%04i.png" % i)
         """
         f = final**(1.0/n_steps)
         for i in xrange(n_steps):

diff -r d0f52cf90877f87528854d9e30dd71dc570050f2 -r 6facab7ff3258b232346caaecd999be9f25ef139 yt/visualization/volume_rendering/render_source.py
--- a/yt/visualization/volume_rendering/render_source.py
+++ b/yt/visualization/volume_rendering/render_source.py
@@ -70,7 +70,6 @@
     def set_zbuffer(self, zbuffer):
         self.zbuffer = zbuffer
 
-
 class VolumeSource(RenderSource):
     """A class for rendering data from a volumetric data source
 
@@ -94,8 +93,8 @@
     Examples
     --------
     >>> source = VolumeSource(ds.all_data(), 'density')
+    """
 
-    """
     _image = None
     data_source = None
 
@@ -124,7 +123,9 @@
 
     def build_defaults(self):
         """Sets a default volume and transfer function"""
+        mylog.info("Creating default volume")
         self.build_default_volume()
+        mylog.info("Creating default transfer function")
         self.build_default_transfer_function()
 
     def set_transfer_function(self, transfer_function):
@@ -265,12 +266,11 @@
             Whether or not this is being called from a higher level in the VR
             interface. Used to set the correct orientation.
         """
-        image = self.volume.reduce_tree_images(image,
-                                               camera.lens.viewpoint)
+        image = self.volume.reduce_tree_images(image, camera.lens.viewpoint)
         image.shape = camera.resolution[0], camera.resolution[1], 4
         # If the call is from VR, the image is rotated by 180 to get correct
-        # up dirirection
-        if call_from_VR is True:
+        # up direction
+        if call_from_VR is True: 
             image = np.rot90(image, k=2)
         if self.transfer_function.grey_opacity is False:
             image[:, :, 3] = 1.0
@@ -365,6 +365,7 @@
         the rendered image.
 
         """
+ 
         self.sampler = new_mesh_sampler(camera, self)
 
         mylog.debug("Casting rays")
@@ -404,6 +405,7 @@
 
     """
 
+
     _image = None
     data_source = None
 
@@ -455,7 +457,7 @@
         return zbuffer
 
     def __repr__(self):
-        disp = "<Points Source>"
+        disp = "<Point Source>"
         return disp
 
 
@@ -553,7 +555,6 @@
 
 class BoxSource(LineSource):
     r"""A render source for a box drawn with line segments.
-
     This render source will draw a box, with transparent faces, in data
     space coordinates.  This is useful for annotations.
 
@@ -677,8 +678,8 @@
     Examples
     --------
     >>> source = CoordinateVectorSource()
+    """
 
-    """
     def __init__(self, colors=None, alpha=1.0):
         super(CoordinateVectorSource, self).__init__()
         # If colors aren't individually set, make black with full opacity

diff -r d0f52cf90877f87528854d9e30dd71dc570050f2 -r 6facab7ff3258b232346caaecd999be9f25ef139 yt/visualization/volume_rendering/scene.py
--- a/yt/visualization/volume_rendering/scene.py
+++ b/yt/visualization/volume_rendering/scene.py
@@ -14,11 +14,11 @@
 
 import numpy as np
 from collections import OrderedDict
-from yt.funcs import mylog
+from yt.funcs import mylog, get_image_suffix
 from yt.extern.six import iteritems, itervalues
 from .camera import Camera
 from .render_source import OpaqueSource, BoxSource, CoordinateVectorSource, \
-    GridSource
+    GridSource, RenderSource
 from .zbuffer_array import ZBuffer
 
 
@@ -53,6 +53,8 @@
         super(Scene, self).__init__()
         self.sources = OrderedDict()
         self.camera = None
+        # An image array containing the last rendered image of the scene
+        self.last_render = None
 
     def get_source(self, source_num):
         """Returns the volume rendering source indexed by ``source_num``"""
@@ -98,22 +100,20 @@
 
         return self
 
-    def render(self, fname=None, sigma_clip=None, camera=None):
+    def render(self, sigma_clip=None, camera=None):
         r"""Render all sources in the Scene.
 
         Use the current state of the Scene object to render all sources
-        currently in the scene.
+        currently in the scene.  Returns the image array.  If you want to
+        save the output to a file, call the save() function.
 
         Parameters
         ----------
-        fname: string, optional
-            If specified, save the rendering as a bitmap to the file "fname".
-            Default: None
         sigma_clip: float, optional
             Image will be clipped before saving to the standard deviation
             of the image multiplied by this value.  Useful for enhancing
             images. Default: None
-        camera: :class:`yt.visualization.volume_rendering.camera.Camera`, optional
+        camera: :class:`Camera`, optional
             If specified, use a different :class:`Camera` to render the scene.
 
         Returns
@@ -125,20 +125,80 @@
         --------
         >>> sc = Scene()
         >>> # Add sources/camera/etc
-        >>> im = sc.render('rendering.png')
+        >>> im = sc.render(sigma_clip=4)
+        >>> sc.save()
 
         """
+        mylog.info("Rendering scene (Can take a while).")
         if camera is None:
             camera = self.camera
         assert(camera is not None)
         self._validate()
         bmp = self.composite(camera=camera)
-        if fname is not None:
-            bmp.write_png(fname, sigma_clip=sigma_clip)
+        self.last_render = bmp
         return bmp
 
+    def save(self, fname=None):
+        r"""Saves the most recently rendered image of the Scene to disk.
+
+        Once you have created a scene and rendered that scene to an image 
+        array, this saves that image array to disk with an optional filename.
+        If an image has not yet been rendered for the current scene object,
+        it forces one and writes it out.
+
+        Parameters
+        ----------
+        fname: string, optional
+            If specified, save the rendering as a bitmap to the file "fname".
+            If unspecified, it creates a default based on the dataset filename.
+            Default: None
+
+        Returns
+        -------
+            Nothing
+
+        Examples
+        --------
+        >>> sc = yt.create_scene(ds)
+        >>> # Add sources/camera/etc
+        >>> sc.render()
+        >>> sc.save('test.png')
+
+        # Or alternatively
+        >>> sc = yt.create_scene(ds)
+        >>> # Add sources/camera/etc
+        >>> sc.save('test.png')
+
+        """
+        if fname is None:
+            sources = list(itervalues(self.sources))
+            rensources = [s for s in sources if isinstance(s, RenderSource)]
+            # if a volume source present, use its affiliated ds for fname
+            if len(rensources) > 0:
+                rs = rensources[0]
+                basename = rs.data_source.ds.basename
+                if isinstance(rs.field, basestring):
+                    field = rs.field
+                else:
+                    field = rs.field[-1]
+                fname = "%s_Render_%s.png" % (basename, field)
+            # if no volume source present, use a default filename
+            else:
+                fname = "Render_opaque.png"   
+        suffix = get_image_suffix(fname)
+        if suffix == '':
+            suffix = '.png'
+            fname = '%s%s' % (fname, suffix)
+
+        if self.last_render is None:
+            self.render()
+
+        mylog.info("Saving render %s", fname)
+        self.last_render.write_png(fname)
+ 
     def _validate(self):
         r"""Validate the current state of the scene."""
+
         for k, source in iteritems(self.sources):
             source._validate()
         return

diff -r d0f52cf90877f87528854d9e30dd71dc570050f2 -r 6facab7ff3258b232346caaecd999be9f25ef139 yt/visualization/volume_rendering/tests/modify_transfer_function.py
--- a/yt/visualization/volume_rendering/tests/modify_transfer_function.py
+++ b/yt/visualization/volume_rendering/tests/modify_transfer_function.py
@@ -22,5 +22,5 @@
 tf.clear()
 tf.grey_opacity=True
 tf.add_layers(3, colormap='RdBu')
-sc.render("new_tf.png")
-
+sc.render()
+sc.save("new_tf.png")

diff -r d0f52cf90877f87528854d9e30dd71dc570050f2 -r 6facab7ff3258b232346caaecd999be9f25ef139 yt/visualization/volume_rendering/tests/multiple_fields.py
--- a/yt/visualization/volume_rendering/tests/multiple_fields.py
+++ b/yt/visualization/volume_rendering/tests/multiple_fields.py
@@ -20,5 +20,6 @@
 volume_source = sc.get_source(0)
 volume_source.set_field(('gas','velocity_x'))
 volume_source.build_default_transfer_function()
-sc.render("render_x.png")
+sc.render()
+sc.save("render_x.png")
 

diff -r d0f52cf90877f87528854d9e30dd71dc570050f2 -r 6facab7ff3258b232346caaecd999be9f25ef139 yt/visualization/volume_rendering/tests/rotation_volume_rendering.py
--- a/yt/visualization/volume_rendering/tests/rotation_volume_rendering.py
+++ b/yt/visualization/volume_rendering/tests/rotation_volume_rendering.py
@@ -21,4 +21,5 @@
 frames = 10
 for i in range(frames):
     sc.camera.yaw(angle/frames)
-    sc.render('test_rot_%04i.png' % i, sigma_clip=6.0)
+    sc.render(sigma_clip=6.0)
+    sc.save('test_rot_%04i.png' % i)

diff -r d0f52cf90877f87528854d9e30dd71dc570050f2 -r 6facab7ff3258b232346caaecd999be9f25ef139 yt/visualization/volume_rendering/tests/test_lenses.py
--- a/yt/visualization/volume_rendering/tests/test_lenses.py
+++ b/yt/visualization/volume_rendering/tests/test_lenses.py
@@ -55,7 +55,8 @@
         tf.grey_opacity = True
         sc.camera = cam
         sc.add_source(vol)
-        sc.render('test_perspective_%s.png' % self.field[1], sigma_clip=6.0)
+        sc.render(sigma_clip=6.0)
+        sc.save('test_perspective_%s.png' % self.field[1])
 
     def test_stereoperspective_lens(self):
         sc = Scene()
@@ -67,8 +68,8 @@
         tf.grey_opacity = True
         sc.camera = cam
         sc.add_source(vol)
-        sc.render('test_stereoperspective_%s.png' % self.field[1],
-                  sigma_clip=6.0)
+        sc.render(sigma_clip=6.0)
+        sc.save('test_stereoperspective_%s.png' % self.field[1])
 
     def test_fisheye_lens(self):
         dd = self.ds.sphere(self.ds.domain_center,
@@ -85,8 +86,8 @@
         tf.grey_opacity = True
         sc.camera = cam
         sc.add_source(vol)
-        sc.render('test_fisheye_%s.png' % self.field[1],
-                  sigma_clip=6.0)
+        sc.render(sigma_clip=6.0)
+        sc.save('test_fisheye_%s.png' % self.field[1])
 
     def test_plane_lens(self):
         dd = self.ds.sphere(self.ds.domain_center,
@@ -101,8 +102,8 @@
         tf.grey_opacity = True
         sc.camera = cam
         sc.add_source(vol)
-        sc.render('test_plane_%s.png' % self.field[1],
-                  sigma_clip=6.0)
+        sc.render(sigma_clip=6.0)
+        sc.save('test_plane_%s.png' % self.field[1])
 
     def test_spherical_lens(self):
         sc = Scene()
@@ -114,8 +115,8 @@
         tf.grey_opacity = True
         sc.camera = cam
         sc.add_source(vol)
-        sc.render('test_spherical_%s.png' % self.field[1],
-                  sigma_clip=6.0)
+        sc.render(sigma_clip=6.0)
+        sc.save('test_spherical_%s.png' % self.field[1])
 
     def test_stereospherical_lens(self):
         w = (self.ds.domain_width).in_units('code_length')
@@ -129,5 +130,5 @@
         tf.grey_opacity = True
         sc.camera = cam
         sc.add_source(vol)
-        sc.render('test_stereospherical_%s.png' % self.field[1],
-                  sigma_clip=6.0)
+        sc.render(sigma_clip=6.0)
+        sc.save('test_stereospherical_%s.png' % self.field[1])

diff -r d0f52cf90877f87528854d9e30dd71dc570050f2 -r 6facab7ff3258b232346caaecd999be9f25ef139 yt/visualization/volume_rendering/tests/test_scene.py
--- a/yt/visualization/volume_rendering/tests/test_scene.py
+++ b/yt/visualization/volume_rendering/tests/test_scene.py
@@ -76,9 +76,11 @@
         mi_bound = ((ma-mi)*(0.10))+mi
         ma_bound = ((ma-mi)*(0.90))+mi
         tf.map_to_colormap(mi_bound, ma_bound,  scale=0.01, colormap='Reds_r')
-        sc.render('test_scene.png', sigma_clip=6.0)
+        sc.render(sigma_clip=6.0)
+        sc.save('test_scene.png')
 
         nrot = 2 
         for i in range(nrot):
             sc.camera.pitch(2*np.pi/nrot)
-            sc.render('test_rot_%04i.png' % i, sigma_clip=6.0)
+            sc.render(sigma_clip=6.0)
+            sc.save('test_rot_%04i.png' % i)

diff -r d0f52cf90877f87528854d9e30dd71dc570050f2 -r 6facab7ff3258b232346caaecd999be9f25ef139 yt/visualization/volume_rendering/volume_rendering.py
--- a/yt/visualization/volume_rendering/volume_rendering.py
+++ b/yt/visualization/volume_rendering/volume_rendering.py
@@ -113,5 +113,6 @@
     >>> im, sc = yt.volume_render(ds, fname='test.png', sigma_clip=4.0)
     """
     sc = create_scene(data_source, field=field)
-    im = sc.render(fname=fname, sigma_clip=sigma_clip)
+    im = sc.render(sigma_clip=sigma_clip)
+    sc.save(fname=fname)
     return im, sc

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list