[yt-svn] commit/yt: 16 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Wed Oct 21 18:12:17 PDT 2015


16 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/d9010e05d646/
Changeset:   d9010e05d646
Branch:      yt
User:        atmyers
Date:        2015-10-16 00:20:46+00:00
Summary:     merging
Affected #:  5 files

diff -r 830bca068d4bfb63eccc0b34df6c6c1642d38828 -r d9010e05d64656a32c9a3ced5e5557994d1d8480 doc/source/visualizing/volume_rendering.rst
--- a/doc/source/visualizing/volume_rendering.rst
+++ b/doc/source/visualizing/volume_rendering.rst
@@ -304,7 +304,10 @@
 :meth:`~yt.visualization.volume_rendering.camera.Camera.pitch`,
 :meth:`~yt.visualization.volume_rendering.camera.Camera.yaw`, and
 :meth:`~yt.visualization.volume_rendering.camera.Camera.roll` can rotate the
-camera in space.
+camera in space. The center around which the camera rotates can be specified by
+the optional parameter `rot_center` (very useful for perspective and spherical
+lenses), or by default `rot_center` is set to be at camera location (i.e. the 
+camera will rotate about its current position).
 
 When examining a particular point in space, 
 :meth:`~yt.visualization.volume_rendering.camera.Camera.zoom` can be of

diff -r 830bca068d4bfb63eccc0b34df6c6c1642d38828 -r d9010e05d64656a32c9a3ced5e5557994d1d8480 yt/utilities/orientation.py
--- a/yt/utilities/orientation.py
+++ b/yt/utilities/orientation.py
@@ -81,7 +81,7 @@
             # The north_vector calculated here will still be included in self.unit_vectors.
             north_vector = np.cross(normal_vector, east_vector).ravel()
         else:
-            if self.steady_north:
+            if self.steady_north or (np.dot(north_vector, normal_vector) != 0.0):
                 north_vector = north_vector - np.dot(north_vector,normal_vector)*normal_vector
             east_vector = np.cross(north_vector, normal_vector).ravel()
         north_vector /= np.sqrt(np.dot(north_vector, north_vector))

diff -r 830bca068d4bfb63eccc0b34df6c6c1642d38828 -r d9010e05d64656a32c9a3ced5e5557994d1d8480 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -316,7 +316,7 @@
                                 north_vector=north_vector)
         self._moved = True
 
-    def rotate(self, theta, rot_vector=None):
+    def rotate(self, theta, rot_vector=None, rot_center=None):
         r"""Rotate by a given angle
 
         Rotate the view.  If `rot_vector` is None, rotation will occur
@@ -330,6 +330,10 @@
             Specify the rotation vector around which rotation will
             occur.  Defaults to None, which sets rotation around
             `north_vector`
+        rot_center  : array_like, optional
+            Specifiy the center around which rotation will occur. Defaults
+            to None, which sets rotation around the original camera position
+            (i.e. the camera position does not change)
 
         Examples
         --------
@@ -340,12 +344,19 @@
         rotate_all = rot_vector is not None
         if rot_vector is None:
             rot_vector = self.north_vector
+        if rot_center is None:
+            rot_center = self._position
         rot_vector = ensure_numpy_array(rot_vector)
         rot_vector = rot_vector/np.linalg.norm(rot_vector)
 
+        new_position = self._position - rot_center
         R = get_rotation_matrix(theta, rot_vector)
+        new_position = np.dot(R, new_position) + rot_center
 
-        normal_vector = self.unit_vectors[2]
+        if (new_position == self._position).all():
+            normal_vector = self.unit_vectors[2]
+        else:
+            normal_vector = rot_center - new_position
         normal_vector = normal_vector/np.sqrt((normal_vector**2).sum())
 
         if rotate_all:
@@ -354,8 +365,9 @@
                 north_vector=np.dot(R, self.unit_vectors[1]))
         else:
             self.switch_view(normal_vector=np.dot(R, normal_vector))
+        if (new_position != self._position).any(): self.set_position(new_position)
 
-    def pitch(self, theta):
+    def pitch(self, theta, rot_center=None):
         r"""Rotate by a given angle about the horizontal axis
 
         Pitch the view.
@@ -364,6 +376,8 @@
         ----------
         theta : float, in radians
              Angle (in radians) by which to pitch the view.
+        rot_center  : array_like, optional
+            Specifiy the center around which rotation will occur.
 
         Examples
         --------
@@ -371,9 +385,9 @@
         >>> cam = Camera()
         >>> cam.pitch(np.pi/4)
         """
-        self.rotate(theta, rot_vector=self.unit_vectors[0])
+        self.rotate(theta, rot_vector=self.unit_vectors[0], rot_center=rot_center)
 
-    def yaw(self, theta):
+    def yaw(self, theta, rot_center=None):
         r"""Rotate by a given angle about the vertical axis
 
         Yaw the view.
@@ -382,6 +396,8 @@
         ----------
         theta : float, in radians
              Angle (in radians) by which to yaw the view.
+        rot_center  : array_like, optional
+            Specifiy the center around which rotation will occur.
 
         Examples
         --------
@@ -389,9 +405,9 @@
         >>> cam = Camera()
         >>> cam.yaw(np.pi/4)
         """
-        self.rotate(theta, rot_vector=self.unit_vectors[1])
+        self.rotate(theta, rot_vector=self.unit_vectors[1], rot_center=rot_center)
 
-    def roll(self, theta):
+    def roll(self, theta, rot_center=None):
         r"""Rotate by a given angle about the view normal axis
 
         Roll the view.
@@ -400,6 +416,8 @@
         ----------
         theta : float, in radians
              Angle (in radians) by which to roll the view.
+        rot_center  : array_like, optional
+            Specifiy the center around which rotation will occur.
 
         Examples
         --------
@@ -407,9 +425,9 @@
         >>> cam = Camera()
         >>> cam.roll(np.pi/4)
         """
-        self.rotate(theta, rot_vector=self.unit_vectors[2])
+        self.rotate(theta, rot_vector=self.unit_vectors[2], rot_center=rot_center)
 
-    def iter_rotate(self, theta, n_steps, rot_vector=None):
+    def iter_rotate(self, theta, n_steps, rot_vector=None, rot_center=None):
         r"""Loop over rotate, creating a rotation
 
         This will rotate `n_steps` until the current view has been
@@ -425,6 +443,10 @@
             Specify the rotation vector around which rotation will
             occur.  Defaults to None, which sets rotation around the
             original `north_vector`
+        rot_center  : array_like, optional
+            Specifiy the center around which rotation will occur. Defaults
+            to None, which sets rotation around the original camera position
+            (i.e. the camera position does not change)
 
         Examples
         --------
@@ -435,7 +457,7 @@
 
         dtheta = (1.0*theta)/n_steps
         for i in xrange(n_steps):
-            self.rotate(dtheta, rot_vector=rot_vector)
+            self.rotate(dtheta, rot_vector=rot_vector, rot_center=rot_center)
             yield i
 
     def iter_move(self, final, n_steps, exponential=False):

diff -r 830bca068d4bfb63eccc0b34df6c6c1642d38828 -r d9010e05d64656a32c9a3ced5e5557994d1d8480 yt/visualization/volume_rendering/off_axis_projection.py
--- a/yt/visualization/volume_rendering/off_axis_projection.py
+++ b/yt/visualization/volume_rendering/off_axis_projection.py
@@ -152,6 +152,7 @@
     camera.set_width(width)
     camera.switch_orientation(normal_vector=normal_vector,
                               north_vector=north_vector)
+    camera.position = center - width[2]*camera.normal_vector
     camera.focus = center
     sc.camera = camera
     sc.add_source(vol)

diff -r 830bca068d4bfb63eccc0b34df6c6c1642d38828 -r d9010e05d64656a32c9a3ced5e5557994d1d8480 yt/visualization/volume_rendering/scene.py
--- a/yt/visualization/volume_rendering/scene.py
+++ b/yt/visualization/volume_rendering/scene.py
@@ -204,7 +204,7 @@
         """
         self.camera = camera
 
-    def get_camera(self, camera):
+    def get_camera(self):
         r"""
 
         Get the camera currently used by this scene.


https://bitbucket.org/yt_analysis/yt/commits/90e44e84fb2a/
Changeset:   90e44e84fb2a
Branch:      yt
User:        atmyers
Date:        2015-10-16 16:50:45+00:00
Summary:     merging
Affected #:  8 files

diff -r d9010e05d64656a32c9a3ced5e5557994d1d8480 -r 90e44e84fb2aa4c2b662206285547db372759294 yt/units/unit_object.py
--- a/yt/units/unit_object.py
+++ b/yt/units/unit_object.py
@@ -29,16 +29,15 @@
     latex_symbol_lut, unit_prefixes, \
     prefixable_units, cgs_base_units, \
     mks_base_units, latex_prefixes, yt_base_units
-from yt.units.unit_registry import UnitRegistry
+from yt.units.unit_registry import \
+    UnitRegistry, \
+    UnitParseError
 from yt.utilities.exceptions import YTUnitsNotReducible
 
 import copy
 import string
 import token
 
-class UnitParseError(Exception):
-    pass
-
 class InvalidUnitOperation(Exception):
     pass
 

diff -r d9010e05d64656a32c9a3ced5e5557994d1d8480 -r 90e44e84fb2aa4c2b662206285547db372759294 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -305,7 +305,9 @@
     result_storage = None
     prefix = ""
     def __init__(self, ds_fn):
-        if isinstance(ds_fn, Dataset):
+        if ds_fn is None:
+            self.ds = None
+        elif isinstance(ds_fn, Dataset):
             self.ds = ds_fn
         else:
             self.ds = data_dir_load(ds_fn)
@@ -315,7 +317,8 @@
         if self.reference_storage.reference_name is not None:
             dd = self.reference_storage.get(self.storage_name)
             if dd is None or self.description not in dd:
-                raise YTNoOldAnswer("%s : %s" % (self.storage_name , self.description))
+                raise YTNoOldAnswer(
+                    "%s : %s" % (self.storage_name, self.description))
             ov = dd[self.description]
             self.compare(nv, ov)
         else:
@@ -660,6 +663,29 @@
         assert compare_images(fns[0], fns[1], 10**(-decimals)) == None
         for fn in fns: os.remove(fn)
 
+class VRImageComparisonTest(AnswerTestingTest):
+    _type_name = "VRImageComparison"
+    _attrs = ('desc',)
+
+    def __init__(self, scene, ds, desc, decimals):
+        super(VRImageComparisonTest, self).__init__(None)
+        self.obj_type = ('vr',)
+        self.ds = ds
+        self.scene = scene
+        self.desc = desc
+        self.decimals = decimals
+
+    def run(self):
+        tmpfd, tmpname = tempfile.mkstemp(suffix='.png')
+        os.close(tmpfd)
+        self.scene.render(tmpname, sigma_clip=1.0)
+        image = mpimg.imread(tmpname)
+        os.remove(tmpname)
+        return [zlib.compress(image.dumps())]
+
+    def compare(self, new_result, old_result):
+        compare_image_lists(new_result, old_result, self.decimals)
+        
 class PlotWindowAttributeTest(AnswerTestingTest):
     _type_name = "PlotWindowAttribute"
     _attrs = ('plot_type', 'plot_field', 'plot_axis', 'attr_name', 'attr_args',
@@ -774,6 +800,16 @@
     else:
         return ftrue
 
+def requires_answer_testing():
+    def ffalse(func):
+        return lambda: None
+    def ftrue(func):
+        return func
+    if AnswerTestingTest.result_storage is not None:
+        return ftrue
+    else:
+        return ffalse
+    
 def requires_ds(ds_fn, big_data = False, file_check = False):
     def ffalse(func):
         return lambda: None

diff -r d9010e05d64656a32c9a3ced5e5557994d1d8480 -r 90e44e84fb2aa4c2b662206285547db372759294 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -14,6 +14,7 @@
 from yt.funcs import iterable, mylog, ensure_numpy_array
 from yt.utilities.orientation import Orientation
 from yt.units.yt_array import YTArray
+from yt.units.unit_registry import UnitParseError
 from yt.utilities.math_utils import get_rotation_matrix
 from .utils import data_source_or_all
 from .lens import lenses
@@ -22,16 +23,34 @@
 
 class Camera(Orientation):
 
-    r"""
+    r"""A representation of a point of view into a Scene.
 
-    The Camera class. A Camera represents of point of view into a
-    Scene. It is defined by a position (the location of the camera
+    It is defined by a position (the location of the camera
     in the simulation domain,), a focus (the point at which the
     camera is pointed), a width (the width of the snapshot that will
     be taken, a resolution (the number of pixels in the image), and
     a north_vector (the "up" direction in the resulting image). A
     camera can use a variety of different Lens objects.
 
+    Parameters
+    ----------
+    data_source: :class:`AMR3DData` or :class:`Dataset`, optional
+        This is the source to be rendered, which can be any arbitrary yt
+        data object or dataset.
+    lens_type: string, optional
+        This specifies the type of lens to use for rendering. Current
+        options are 'plane-parallel', 'perspective', and 'fisheye'. See
+        :class:`yt.visualization.volume_rendering.lens.Lens` for details.
+        Default: 'plane-parallel'
+    auto: boolean
+        If True, build smart defaults using the data source extent. This
+        can be time-consuming to iterate over the entire dataset to find
+        the positional bounds. Default: False
+
+    Examples
+    --------
+    >>> cam = Camera(ds)
+
     """
 
     _moved = True
@@ -42,29 +61,7 @@
 
     def __init__(self, data_source=None, lens_type='plane-parallel',
                  auto=False):
-        """
-        Initialize a Camera Instance
-
-        Parameters
-        ----------
-        data_source: :class:`AMR3DData` or :class:`Dataset`, optional
-            This is the source to be rendered, which can be any arbitrary yt
-            data object or dataset.
-        lens_type: string, optional
-            This specifies the type of lens to use for rendering. Current
-            options are 'plane-parallel', 'perspective', and 'fisheye'. See
-            :class:`yt.visualization.volume_rendering.lens.Lens` for details.
-            Default: 'plane-parallel'
-        auto: boolean
-            If True, build smart defaults using the data source extent. This
-            can be time-consuming to iterate over the entire dataset to find
-            the positional bounds. Default: False
-
-        Examples
-        --------
-        >>> cam = Camera(ds)
-
-        """
+        """Initialize a Camera Instance"""
         self.lens = None
         self.north_vector = None
         self.normal_vector = None
@@ -178,9 +175,7 @@
         return lens_params
 
     def set_lens(self, lens_type):
-        r'''
-
-        Set the lens to be used with this camera. 
+        r"""Set the lens to be used with this camera.
 
         Parameters
         ----------
@@ -194,7 +189,7 @@
             'spherical'
             'stereo-spherical'
 
-        '''
+        """
         if lens_type not in lenses:
             mylog.error("Lens type not available")
             raise RuntimeError()
@@ -202,6 +197,7 @@
         self.lens.camera = self
 
     def set_defaults_from_data_source(self, data_source):
+        """Resets the camera attributes to their default values"""
         self.position = data_source.pf.domain_right_edge
 
         width = 1.5 * data_source.pf.domain_width.max()
@@ -232,20 +228,22 @@
         self._moved = True
 
     def set_width(self, width):
-        r"""
-
-        Set the width of the image that will be produced by this camera.
-        This must be a YTQuantity.
+        r"""Set the width of the image that will be produced by this camera.
 
         Parameters
         ----------
 
-        width : :class:`yt.units.yt_array.YTQuantity`
-
+        width : YTQuantity or 3 element YTArray
+            The width of the volume rendering in the horizontal, vertical, and
+            depth directions. If a scalar, assumes that the width is the same in
+            all three directions.
         """
-        assert isinstance(width, YTArray), 'Width must be created with ds.arr'
-        if isinstance(width, YTArray):
+        try:
             width = width.in_units('code_length')
+        except (AttributeError, UnitParseError):
+            raise ValueError(
+                'Volume rendering width must be a YTArray that can be '
+                'converted to code units')
 
         if not iterable(width):
             width = YTArray([width.d]*3, width.units)  # Can't get code units.
@@ -253,9 +251,7 @@
         self.switch_orientation()
 
     def set_position(self, position, north_vector=None):
-        r"""
-
-        Set the position of the camera.
+        r"""Set the position of the camera.
 
         Parameters
         ----------
@@ -273,8 +269,7 @@
                                 north_vector=north_vector)
 
     def switch_orientation(self, normal_vector=None, north_vector=None):
-        r"""
-        Change the view direction based on any of the orientation parameters.
+        r"""Change the view direction based on any of the orientation parameters.
 
         This will recalculate all the necessary vectors and vector planes
         related to an orientable object.
@@ -507,11 +502,6 @@
         factor : float
             The factor by which to reduce the distance to the focal point.
 
-
-        Notes
-        -----
-
-        You will need to call snapshot() again to get a new image.
         """
         self.set_width(self.width / factor)
 

diff -r d9010e05d64656a32c9a3ced5e5557994d1d8480 -r 90e44e84fb2aa4c2b662206285547db372759294 yt/visualization/volume_rendering/lens.py
--- a/yt/visualization/volume_rendering/lens.py
+++ b/yt/visualization/volume_rendering/lens.py
@@ -26,15 +26,7 @@
 
 
 class Lens(ParallelAnalysisInterface):
-
-    """
-
-    A base class for setting up Lens objects. A Lens,
-    along with a Camera, is used to defined the set of
-    rays that will be used for rendering.
-
-    """
-
+    """A Lens is used to define the set of rays for rendering."""
     def __init__(self, ):
         super(Lens, self).__init__()
         self.viewpoint = None
@@ -48,9 +40,14 @@
         self.sampler = None
 
     def set_camera(self, camera):
+        """Set the properties of the lens based on the camera.
+
+        This is a proxy for setup_box_properties
+        """
         self.setup_box_properties(camera)
 
     def new_image(self, camera):
+        """Initialize a new ImageArray to be used with this lens."""
         self.current_image = ImageArray(
             np.zeros((camera.resolution[0], camera.resolution[1],
                       4), dtype='float64', order='C'),
@@ -58,6 +55,7 @@
         return self.current_image
 
     def setup_box_properties(self, camera):
+        """Set up the view and stage based on the properties of the camera."""
         unit_vectors = camera.unit_vectors
         width = camera.width
         center = camera.focus
@@ -80,13 +78,12 @@
 
 
 class PlaneParallelLens(Lens):
+    r"""The lens for orthographic projections.
 
-    r'''
-
-    This lens type is the standard type used for orthographic projections. 
     All rays emerge parallel to each other, arranged along a plane.
 
-    '''
+    The initializer takes no parameters.
+    """
 
     def __init__(self, ):
         super(PlaneParallelLens, self).__init__()
@@ -111,6 +108,7 @@
         return sampler_params
 
     def set_viewpoint(self, camera):
+        """Set the viewpoint based on the camera"""
         # This is a hack that should be replaced by an alternate plane-parallel
         # traversal. Put the camera really far away so that the effective
         # viewpoint is infinitely far away, making for parallel rays.
@@ -135,17 +133,14 @@
 
 
 class PerspectiveLens(Lens):
+    r"""A lens for viewing a scene with a set of rays within an opening angle.
 
-    r'''
-
-    This lens type adjusts for an opening view angle, so that the scene will 
-    have an element of perspective to it.
-
-    '''
+    The scene will have an element of perspective to it since the rays are not
+    parallel.
+    """
 
     def __init__(self):
         super(PerspectiveLens, self).__init__()
-        self.expand_factor = 1.5
 
     def new_image(self, camera):
         self.current_image = ImageArray(
@@ -155,13 +150,6 @@
         return self.current_image
 
     def _get_sampler_params(self, camera, render_source):
-        # We should move away from pre-generation of vectors like this and into
-        # the usage of on-the-fly generation in the VolumeIntegrator module
-        # We might have a different width and back_center
-        # dl = (self.back_center - self.front_center)
-        # self.front_center += self.expand_factor*dl
-        # self.back_center -= dl
-
         if render_source.zbuffer is not None:
             image = render_source.zbuffer.rgba
         else:
@@ -174,24 +162,30 @@
         px = np.mat(np.linspace(-.5, .5, camera.resolution[0]))
         py = np.mat(np.linspace(-.5, .5, camera.resolution[1]))
 
-        sample_x = camera.width[0] * np.array(east_vec.reshape(3,1) * px).transpose()
-        sample_y = camera.width[1] * np.array(north_vec.reshape(3,1) * py).transpose()
+        sample_x = camera.width[0] * np.array(east_vec.reshape(3, 1) * px)
+        sample_x = sample_x.transpose()
+        sample_y = camera.width[1] * np.array(north_vec.reshape(3, 1) * py)
+        sample_y = sample_y.transpose()
 
         vectors = np.zeros((camera.resolution[0], camera.resolution[1], 3),
                            dtype='float64', order='C')
 
-        sample_x = np.repeat(sample_x.reshape(camera.resolution[0],1,3), \
+        sample_x = np.repeat(sample_x.reshape(camera.resolution[0], 1, 3),
                              camera.resolution[1], axis=1)
-        sample_y = np.repeat(sample_y.reshape(1,camera.resolution[1],3), \
+        sample_y = np.repeat(sample_y.reshape(1, camera.resolution[1], 3),
                              camera.resolution[0], axis=0)
 
-        normal_vecs = np.tile(normal_vec, camera.resolution[0] * camera.resolution[1])\
-                             .reshape(camera.resolution[0], camera.resolution[1], 3)
+        normal_vecs = np.tile(
+            normal_vec, camera.resolution[0] * camera.resolution[1])
+        normal_vecs = normal_vecs.reshape(
+            camera.resolution[0], camera.resolution[1], 3)
 
         vectors = sample_x + sample_y + normal_vecs * camera.width[2]
 
-        positions = np.tile(camera.position, camera.resolution[0] * camera.resolution[1])\
-                           .reshape(camera.resolution[0], camera.resolution[1], 3)
+        positions = np.tile(
+            camera.position, camera.resolution[0] * camera.resolution[1])
+        positions = positions.reshape(
+            camera.resolution[0], camera.resolution[1], 3)
 
         uv = np.ones(3, dtype='float64')
 
@@ -234,11 +228,12 @@
             if np.arccos(sight_angle_cos) < 0.5 * np.pi:
                 sight_length = camera.width[2] / sight_angle_cos
             else:
-            # If the corner is on the backwards, then we put it outside of the image
-            # It can not be simply removed because it may connect to other corner
-            # within the image, which produces visible domain boundary line
-                sight_length = np.sqrt(camera.width[0]**2 + camera.width[1]**2) / \
-                               np.sqrt(1 - sight_angle_cos**2)
+                # If the corner is on the backwards, then we put it outside of
+                # the image It can not be simply removed because it may connect
+                # to other corner within the image, which produces visible
+                # domain boundary line
+                sight_length = np.sqrt(camera.width[0]**2 + camera.width[1]**2)
+                sight_length = sight_length / np.sqrt(1 - sight_angle_cos**2)
             pos1[i] = camera.position + sight_length * sight_vector[i]
 
         dx = np.dot(pos1 - sight_center.d, camera.unit_vectors[0])
@@ -256,15 +251,14 @@
 
 
 class StereoPerspectiveLens(Lens):
-
-    """docstring for StereoPerspectiveLens"""
+    """A lens that includes two sources for perspective rays, for 3D viewing"""
 
     def __init__(self):
         super(StereoPerspectiveLens, self).__init__()
-        self.expand_factor = 1.5
         self.disparity = None
 
     def new_image(self, camera):
+        """Initialize a new ImageArray to be used with this lens."""
         self.current_image = ImageArray(
             np.zeros((camera.resolution[0]*camera.resolution[1], 1,
                       4), dtype='float64', order='C'),
@@ -275,10 +269,6 @@
         # We should move away from pre-generation of vectors like this and into
         # the usage of on-the-fly generation in the VolumeIntegrator module
         # We might have a different width and back_center
-        # dl = (self.back_center - self.front_center)
-        # self.front_center += self.expand_factor*dl
-        # self.back_center -= dl
-
         if self.disparity is None:
             self.disparity = camera.width[0] / 2.e3
 
@@ -287,8 +277,10 @@
         else:
             image = self.new_image(camera)
 
-        vectors_left, positions_left = self._get_positions_vectors(camera, -self.disparity)
-        vectors_right, positions_right = self._get_positions_vectors(camera, self.disparity)
+        vectors_left, positions_left = self._get_positions_vectors(
+            camera, -self.disparity)
+        vectors_right, positions_right = self._get_positions_vectors(
+            camera, self.disparity)
 
         uv = np.ones(3, dtype='float64')
 
@@ -330,28 +322,37 @@
         px = np.mat(np.linspace(-.5, .5, single_resolution_x))
         py = np.mat(np.linspace(-.5, .5, camera.resolution[1]))
 
-        sample_x = camera.width[0] * np.array(east_vec_rot.reshape(3,1) * px).transpose()
-        sample_y = camera.width[1] * np.array(north_vec.reshape(3,1) * py).transpose()
+        sample_x = camera.width[0] * np.array(east_vec_rot.reshape(3, 1) * px)
+        sample_x = sample_x.transpose()
+        sample_y = camera.width[1] * np.array(north_vec.reshape(3, 1) * py)
+        sample_y = sample_y.transpose()
 
         vectors = np.zeros((single_resolution_x, camera.resolution[1], 3),
                            dtype='float64', order='C')
 
-        sample_x = np.repeat(sample_x.reshape(single_resolution_x,1,3), \
+        sample_x = np.repeat(sample_x.reshape(single_resolution_x, 1, 3),
                              camera.resolution[1], axis=1)
-        sample_y = np.repeat(sample_y.reshape(1,camera.resolution[1],3), \
+        sample_y = np.repeat(sample_y.reshape(1, camera.resolution[1], 3),
                              single_resolution_x, axis=0)
 
-        normal_vecs = np.tile(normal_vec_rot, single_resolution_x * camera.resolution[1])\
-                             .reshape(single_resolution_x, camera.resolution[1], 3)
-        east_vecs = np.tile(east_vec_rot, single_resolution_x * camera.resolution[1])\
-                           .reshape(single_resolution_x, camera.resolution[1], 3)
+        normal_vecs = np.tile(
+            normal_vec_rot, single_resolution_x * camera.resolution[1])
+        normal_vecs = normal_vecs.reshape(
+            single_resolution_x, camera.resolution[1], 3)
+        east_vecs = np.tile(
+            east_vec_rot, single_resolution_x * camera.resolution[1])
+        east_vecs = east_vecs.reshape(
+            single_resolution_x, camera.resolution[1], 3)
 
         vectors = sample_x + sample_y + normal_vecs * camera.width[2]
 
-        positions = np.tile(camera.position, single_resolution_x * camera.resolution[1])\
-                           .reshape(single_resolution_x, camera.resolution[1], 3)
+        positions = np.tile(
+            camera.position, single_resolution_x * camera.resolution[1])
+        positions = positions.reshape(
+            single_resolution_x, camera.resolution[1], 3)
 
-        positions = positions + east_vecs * disparity # Here the east_vecs is non-rotated one
+        # Here the east_vecs is non-rotated one
+        positions = positions + east_vecs * disparity
 
         mylog.debug(positions)
         mylog.debug(vectors)
@@ -365,8 +366,10 @@
         if self.disparity is None:
             self.disparity = camera.width[0] / 2.e3
 
-        px_left, py_left, dz_left = self._get_px_py_dz(camera, pos, res, -self.disparity)
-        px_right, py_right, dz_right = self._get_px_py_dz(camera, pos, res, self.disparity)
+        px_left, py_left, dz_left = self._get_px_py_dz(
+            camera, pos, res, -self.disparity)
+        px_right, py_right, dz_right = self._get_px_py_dz(
+            camera, pos, res, self.disparity)
 
         px = np.hstack([px_left, px_right])
         py = np.hstack([py_left, py_right])
@@ -402,16 +405,18 @@
             if np.arccos(sight_angle_cos) < 0.5 * np.pi:
                 sight_length = camera.width[2] / sight_angle_cos
             else:
-            # If the corner is on the backwards, then we put it outside of the image
-            # It can not be simply removed because it may connect to other corner
-            # within the image, which produces visible domain boundary line
-                sight_length = np.sqrt(camera.width[0]**2 + camera.width[1]**2) / \
-                               np.sqrt(1 - sight_angle_cos**2)
+                # If the corner is on the backwards, then we put it outside of
+                # the image It can not be simply removed because it may connect
+                # to other corner within the image, which produces visible
+                # domain boundary line
+                sight_length = np.sqrt(camera.width[0]**2 + camera.width[1]**2)
+                sight_length = sight_length / np.sqrt(1 - sight_angle_cos**2)
             pos1[i] = camera_position_shift + sight_length * sight_vector[i]
 
         dx = np.dot(pos1 - sight_center.d, east_vec_rot)
         dy = np.dot(pos1 - sight_center.d, north_vec)
         dz = np.dot(pos1 - sight_center.d, normal_vec_rot)
+
         # Transpose into image coords.
         px = (res0_h * 0.5 + res0_h / camera.width[0].d * dx).astype('int')
         py = (res[1] * 0.5 + res[1] / camera.width[1].d * dy).astype('int')
@@ -431,14 +436,13 @@
 
 
 class FisheyeLens(Lens):
+    r"""A lens for dome-based renderings
 
-    r"""
-
-    This lens type accepts a field-of-view property, fov, that describes how wide 
-    an angle the fisheye can see. Fisheye images are typically used for dome-based 
-    presentations; the Hayden planetarium for instance has a field of view of 194.6. 
-    The images returned by this camera will be flat pixel images that can and should 
-    be reshaped to the resolution.    
+    This lens type accepts a field-of-view property, fov, that describes how
+    wide an angle the fisheye can see. Fisheye images are typically used for
+    dome-based presentations; the Hayden planetarium for instance has a field of
+    view of 194.6.  The images returned by this camera will be flat pixel images
+    that can and should be reshaped to the resolution.
 
     """
 
@@ -450,11 +454,13 @@
         self.rotation_matrix = np.eye(3)
 
     def setup_box_properties(self, camera):
+        """Set up the view and stage based on the properties of the camera."""
         self.radius = camera.width.max()
         super(FisheyeLens, self).setup_box_properties(camera)
         self.set_viewpoint(camera)
 
     def new_image(self, camera):
+        """Initialize a new ImageArray to be used with this lens."""
         self.current_image = ImageArray(
             np.zeros((camera.resolution[0]**2, 1,
                       4), dtype='float64', order='C'),
@@ -489,9 +495,7 @@
         return sampler_params
 
     def set_viewpoint(self, camera):
-        """
-        For a FisheyeLens, the viewpoint is the front center.
-        """
+        """For a FisheyeLens, the viewpoint is the camera's position"""
         self.viewpoint = camera.position
 
     def __repr__(self):
@@ -530,12 +534,11 @@
 
 
 class SphericalLens(Lens):
+    r"""A lens for cylindrical-spherical projection.
 
-    r"""
+    Movies rendered in this way can be displayed in head-tracking devices or
+    in YouTube 360 view.
 
-    This is a cylindrical-spherical projection. Movies rendered in this way 
-    can be displayed in head-tracking devices or in YouTube 360 view.
-    
     """
 
     def __init__(self):
@@ -545,6 +548,7 @@
         self.rotation_matrix = np.eye(3)
 
     def setup_box_properties(self, camera):
+        """Set up the view and stage based on the properties of the camera."""
         self.radius = camera.width.max()
         super(SphericalLens, self).setup_box_properties(camera)
         self.set_viewpoint(camera)
@@ -562,11 +566,13 @@
         vectors[:, :, 2] = np.sin(py)
         vectors = vectors * camera.width[0]
 
-        positions = np.tile(camera.position, camera.resolution[0] * camera.resolution[1])\
-                           .reshape(camera.resolution[0], camera.resolution[1], 3)
+        positions = np.tile(
+            camera.position,
+            camera.resolution[0] * camera.resolution[1]).reshape(
+                camera.resolution[0], camera.resolution[1], 3)
 
-        R1 = get_rotation_matrix(0.5*np.pi, [1,0,0])
-        R2 = get_rotation_matrix(0.5*np.pi, [0,0,1])
+        R1 = get_rotation_matrix(0.5*np.pi, [1, 0, 0])
+        R2 = get_rotation_matrix(0.5*np.pi, [0, 0, 1])
         uv = np.dot(R1, camera.unit_vectors)
         uv = np.dot(R2, uv)
         vectors.reshape((camera.resolution[0]*camera.resolution[1], 3))
@@ -595,9 +601,7 @@
         return sampler_params
 
     def set_viewpoint(self, camera):
-        """
-        For a PerspectiveLens, the viewpoint is the front center.
-        """
+        """For a SphericalLens, the viewpoint is the camera's position"""
         self.viewpoint = camera.position
 
     def project_to_plane(self, camera, pos, res=None):
@@ -631,8 +635,11 @@
 
 
 class StereoSphericalLens(Lens):
+    r"""A lens for a stereo cylindrical-spherical projection.
 
-    """docstring for StereoSphericalLens"""
+    Movies rendered in this way can be displayed in VR devices or stereo youtube
+    360 degree movies.
+    """
 
     def __init__(self):
         super(StereoSphericalLens, self).__init__()
@@ -651,31 +658,35 @@
             self.disparity = camera.width[0] / 1000.
 
         single_resolution_x = np.floor(camera.resolution[0])/2
-        px = np.linspace(-np.pi, np.pi, single_resolution_x, endpoint=True)[:,None]
-        py = np.linspace(-np.pi/2., np.pi/2., camera.resolution[1], endpoint=True)[None,:]
+        px = np.linspace(-np.pi, np.pi, single_resolution_x,
+                         endpoint=True)[:, None]
+        py = np.linspace(-np.pi/2., np.pi/2., camera.resolution[1],
+                         endpoint=True)[None, :]
 
         vectors = np.zeros((single_resolution_x, camera.resolution[1], 3),
                            dtype='float64', order='C')
-        vectors[:,:,0] = np.cos(px) * np.cos(py)
-        vectors[:,:,1] = np.sin(px) * np.cos(py)
-        vectors[:,:,2] = np.sin(py)
+        vectors[:, :, 0] = np.cos(px) * np.cos(py)
+        vectors[:, :, 1] = np.sin(px) * np.cos(py)
+        vectors[:, :, 2] = np.sin(py)
         vectors = vectors * camera.width[0]
 
         vectors2 = np.zeros((single_resolution_x, camera.resolution[1], 3),
                             dtype='float64', order='C')
-        vectors2[:,:,0] = -np.sin(px) * np.ones((1, camera.resolution[1]))
-        vectors2[:,:,1] = np.cos(px) * np.ones((1, camera.resolution[1]))
-        vectors2[:,:,2] = 0
+        vectors2[:, :, 0] = -np.sin(px) * np.ones((1, camera.resolution[1]))
+        vectors2[:, :, 1] = np.cos(px) * np.ones((1, camera.resolution[1]))
+        vectors2[:, :, 2] = 0
 
-        positions = np.tile(camera.position, single_resolution_x * camera.resolution[1])\
-                           .reshape(single_resolution_x, camera.resolution[1], 3)
+        positions = np.tile(
+            camera.position, single_resolution_x * camera.resolution[1])
+        positions = positions.reshape(
+            single_resolution_x, camera.resolution[1], 3)
 
         # The left and right are switched here since VR is in LHS.
         positions_left = positions + vectors2 * self.disparity
         positions_right = positions + vectors2 * (-self.disparity)
 
-        R1 = get_rotation_matrix(0.5*np.pi, [1,0,0])
-        R2 = get_rotation_matrix(0.5*np.pi, [0,0,1])
+        R1 = get_rotation_matrix(0.5*np.pi, [1, 0, 0])
+        R2 = get_rotation_matrix(0.5*np.pi, [0, 0, 1])
         uv = np.dot(R1, camera.unit_vectors)
         uv = np.dot(R2, uv)
         vectors.reshape((single_resolution_x*camera.resolution[1], 3))

diff -r d9010e05d64656a32c9a3ced5e5557994d1d8480 -r 90e44e84fb2aa4c2b662206285547db372759294 yt/visualization/volume_rendering/render_source.py
--- a/yt/visualization/volume_rendering/render_source.py
+++ b/yt/visualization/volume_rendering/render_source.py
@@ -39,10 +39,9 @@
 
 class RenderSource(ParallelAnalysisInterface):
 
-    """
+    """Base Class for Render Sources.
 
-    Base Class for Render Sources. Will be inherited for volumes,
-    streamlines, etc.
+    Will be inherited for volumes, streamlines, etc.
 
     """
 
@@ -59,10 +58,9 @@
 
 
 class OpaqueSource(RenderSource):
-    """
+    """A base class for opaque render sources.
 
-    A base class for opaque render sources. Will be inherited from
-    for LineSources, BoxSources, etc.
+    Will be inherited from for LineSources, BoxSources, etc.
 
     """
     def __init__(self):
@@ -72,50 +70,37 @@
     def set_zbuffer(self, zbuffer):
         self.zbuffer = zbuffer
 
-    def render(self, camera, zbuffer=None):
-        # This is definitely wrong for now
-        if zbuffer is not None and self.zbuffer is not None:
-            zbuffer.rgba = self.zbuffer.rgba
-            zbuffer.z = self.zbuffer.z
-            self.zbuffer = zbuffer
-        return self.zbuffer
-
 
 class VolumeSource(RenderSource):
+    """A class for rendering data from a volumetric data source
 
-    """
+    Examples of such sources include a sphere, cylinder, or the
+    entire computational domain.
 
-    A VolumeSource is a class for rendering data from
-    an arbitrary volumetric data source, e.g. a sphere,
-    cylinder, or the entire computational domain.
+    A :class:`VolumeSource` provides the framework to decompose an arbitrary
+    yt data source into bricks that can be traversed and volume rendered.
 
+    Parameters
+    ----------
+    data_source: :class:`AMR3DData` or :class:`Dataset`, optional
+        This is the source to be rendered, which can be any arbitrary yt
+        data object or dataset.
+    fields : string
+        The name of the field(s) to be rendered.
+    auto: bool, optional
+        If True, will build a default AMRKDTree and transfer function based
+        on the data.
+
+    Examples
+    --------
+    >>> source = VolumeSource(ds.all_data(), 'density')
 
     """
     _image = None
     data_source = None
 
     def __init__(self, data_source, field, auto=True):
-        r"""Initialize a new volumetric source for rendering.
-
-        A :class:`VolumeSource` provides the framework to decompose an arbitrary
-        yt data source into bricks that can be traversed and volume rendered.
-
-        Parameters
-        ----------
-        data_source: :class:`AMR3DData` or :class:`Dataset`, optional
-            This is the source to be rendered, which can be any arbitrary yt
-            data object or dataset.
-        fields : string
-            The name of the field(s) to be rendered.
-        auto: bool, optional
-            If True, will build a default AMRKDTree and transfer function based
-            on the data.
-
-        Examples
-        --------
-        >>> source = RenderSource(ds, 'density')
-
-        """
+        r"""Initialize a new volumetric source for rendering."""
         super(VolumeSource, self).__init__()
         self.data_source = data_source_or_all(data_source)
         field = self.data_source._determine_fields(field)[0]
@@ -138,13 +123,12 @@
             self.build_defaults()
 
     def build_defaults(self):
+        """Sets a default volume and transfer function"""
         self.build_default_volume()
         self.build_default_transfer_function()
 
     def set_transfer_function(self, transfer_function):
-        """
-        Set transfer function for this source
-        """
+        """Set transfer function for this source"""
         if not isinstance(transfer_function,
                           (TransferFunction, ColorTransferFunction,
                            ProjectionTransferFunction)):
@@ -167,6 +151,7 @@
             raise RuntimeError("Transfer Function not Supplied")
 
     def build_default_transfer_function(self):
+        """Sets up a transfer function"""
         self.tfh = \
             TransferFunctionHelper(self.data_source.pf)
         self.tfh.set_field(self.field)
@@ -175,6 +160,7 @@
         self.transfer_function = self.tfh.tf
 
     def build_default_volume(self):
+        """Sets up an AMRKDTree based on the VolumeSource's field"""
         self.volume = AMRKDTree(self.data_source.pf,
                                 data_source=self.data_source)
         log_fields = [self.data_source.pf.field_info[self.field].take_log]
@@ -182,17 +168,23 @@
         self.volume.set_fields([self.field], log_fields, True)
 
     def set_volume(self, volume):
+        """Associates an AMRKDTree with the VolumeSource"""
         assert(isinstance(volume, AMRKDTree))
         del self.volume
         self.volume = volume
 
-    def set_field(self, field, no_ghost=True):
-        field = self.data_source._determine_fields(field)[0]
-        log_field = self.data_source.pf.field_info[field].take_log
-        self.volume.set_fields(field, [log_field], no_ghost)
-        self.field = field
+    def set_fields(self, fields, no_ghost=True):
+        """Set the source's fields to render
 
-    def set_fields(self, fields, no_ghost=True):
+        Parameters
+        ---------
+        fields: field name or list of field names
+            The field or fields to render
+        no_ghost: boolean
+            If False, the AMRKDTree estimates vertex centered data using ghost
+            zones, which can eliminate seams in the resulting volume rendering.
+            Defaults to True for performance reasons.
+        """
         fields = self.data_source._determine_fields(fields)
         log_fields = [self.data_source.ds.field_info[f].take_log
                       for f in fields]
@@ -200,7 +192,12 @@
         self.field = fields
 
     def set_sampler(self, camera):
-        """docstring for add_sampler"""
+        """Sets a volume render sampler
+
+        The type of sampler is determined based on the ``sampler_type`` attribute
+        of the VolumeSource. Currently the ``volume_render`` and ``projection``
+        sampler types are supported.
+        """
         if self.sampler_type == 'volume-render':
             sampler = new_volume_render_sampler(camera, self)
         elif self.sampler_type == 'projection':
@@ -211,6 +208,24 @@
         assert(self.sampler is not None)
 
     def render(self, camera, zbuffer=None):
+        """Renders an image using the provided camera
+
+        Parameters
+        ----------
+        camera: :class:`yt.visualization.volume_rendering.camera.Camera` instance
+            A volume rendering camera. Can be any type of camera.
+        zbuffer: :class:`yt.visualization.volume_rendering.zbuffer_array.Zbuffer` instance
+            A zbuffer array. This is used for opaque sources to determine the
+            z position of the source relative to other sources. Only useful if
+            you are manually calling render on multiple sources. Scene.render
+            uses this internally.
+
+        Returns
+        -------
+        A :class:`yt.data_objects.image_array.ImageArray` instance containing
+        the rendered image.
+
+        """
         self.zbuffer = zbuffer
         self.set_sampler(camera)
         assert (self.sampler is not None)
@@ -238,11 +253,25 @@
         return self.current_image
 
     def finalize_image(self, camera, image, call_from_VR=False):
+        """Parallel reduce the image.
+
+        Parameters
+        ----------
+        camera: :class:`yt.visualization.volume_rendering.camera.Camera` instance
+            The camera used to produce the volume rendering image.
+        image: :class:`yt.data_objects.image_array.ImageArray` instance
+            A reference to an image to fill
+        call_from_vr: boolean, optional
+            Whether or not this is being called from a higher level in the VR
+            interface. Used to set the correct orientation.
+        """
         image = self.volume.reduce_tree_images(image,
                                                camera.lens.viewpoint)
         image.shape = camera.resolution[0], camera.resolution[1], 4
-        # If the call is from VR, the image is rotated by 180 to get correct up dir
-        if call_from_VR: image = np.rot90(image, k=2)
+        # If the call is from VR, the image is rotated by 180 to get correct
+        # up dirirection
+        if call_from_VR is True:
+            image = np.rot90(image, k=2)
         if self.transfer_function.grey_opacity is False:
             image[:, :, 3] = 1.0
         return image
@@ -254,38 +283,33 @@
 
 
 class MeshSource(RenderSource):
+    """A source for unstructured mesh data
 
-    """
+    This functionality requires the embree ray-tracing engine and the
+    associated pyembree python bindings to be installed in order to
+    function.
 
-    MeshSource is a class for volume rendering unstructured mesh
-    data. This functionality requires the embree ray-tracing
-    engine and the associated pyembree python bindings to be
-    installed in order to function.
+    A :class:`MeshSource` provides the framework to volume render
+    unstructured mesh data.
 
+    Parameters
+    ----------
+    data_source: :class:`AMR3DData` or :class:`Dataset`, optional
+        This is the source to be rendered, which can be any arbitrary yt
+        data object or dataset.
+    field : string
+        The name of the field to be rendered.
+
+    Examples
+    --------
+    >>> source = MeshSource(ds, ('all', 'convected'))
     """
 
     _image = None
     data_source = None
 
     def __init__(self, data_source, field):
-        r"""Initialize a new unstructured source for rendering.
-
-        A :class:`MeshSource` provides the framework to volume render
-        unstructured mesh data.
-
-        Parameters
-        ----------
-        data_source: :class:`AMR3DData` or :class:`Dataset`, optional
-            This is the source to be rendered, which can be any arbitrary yt
-            data object or dataset.
-        fields : string
-            The name of the field to be rendered.
-
-        Examples
-        --------
-        >>> source = MeshSource(ds, ('all', 'convected'))
-
-        """
+        r"""Initialize a new unstructured source for rendering."""
         super(MeshSource, self).__init__()
         self.data_source = data_source_or_all(data_source)
         field = self.data_source._determine_fields(field)[0]
@@ -323,7 +347,24 @@
                                                   field_data.d)
 
     def render(self, camera, zbuffer=None):
+        """Renders an image using the provided camera
 
+        Parameters
+        ----------
+        camera: :class:`yt.visualization.volume_rendering.camera.Camera` instance
+            A volume rendering camera. Can be any type of camera.
+        zbuffer: :class:`yt.visualization.volume_rendering.zbuffer_array.Zbuffer` instance
+            A zbuffer array. This is used for opaque sources to determine the
+            z position of the source relative to other sources. Only useful if
+            you are manually calling render on multiple sources. Scene.render
+            uses this internally.
+
+        Returns
+        -------
+        A :class:`yt.data_objects.image_array.ImageArray` instance containing
+        the rendered image.
+
+        """
         self.sampler = new_mesh_sampler(camera, self)
 
         mylog.debug("Casting rays")
@@ -340,33 +381,33 @@
 
 
 class PointSource(OpaqueSource):
+    r"""A rendering source of opaque points in the scene.
+
+    This class provides a mechanism for adding points to a scene; these
+    points will be opaque, and can also be colored.
+
+    Parameters
+    ----------
+    positions: array, shape (N, 3)
+        These positions, in data-space coordinates, are the points to be
+        added to the scene.
+    colors : array, shape (N, 4), optional
+        The colors of the points, including an alpha channel, in floating
+        point running from 0..1.
+    color_stride : int, optional
+        The stride with which to access the colors when putting them on the
+        scene.
+
+    Examples
+    --------
+    >>> source = PointSource(particle_positions)
+
+    """
 
     _image = None
     data_source = None
 
     def __init__(self, positions, colors=None, color_stride=1):
-        r"""A rendering source of opaque points in the scene.
-
-        This class provides a mechanism for adding points to a scene; these
-        points will be opaque, and can also be colored.
-
-        Parameters
-        ----------
-        positions: array, shape (N, 3)
-            These positions, in data-space coordinates, are the points to be
-            added to the scene.
-        colors : array, shape (N, 4), optional
-            The colors of the points, including an alpha channel, in floating
-            point running from 0..1.
-        color_stride : int, optional
-            The stride with which to access the colors when putting them on the
-            scene.
-
-        Examples
-        --------
-        >>> source = PointSource(particle_positions)
-
-        """
         self.positions = positions
         # If colors aren't individually set, make black with full opacity
         if colors is None:
@@ -376,6 +417,24 @@
         self.color_stride = color_stride
 
     def render(self, camera, zbuffer=None):
+        """Renders an image using the provided camera
+
+        Parameters
+        ----------
+        camera: :class:`yt.visualization.volume_rendering.camera.Camera` instance
+            A volume rendering camera. Can be any type of camera.
+        zbuffer: :class:`yt.visualization.volume_rendering.zbuffer_array.Zbuffer` instance
+            A zbuffer array. This is used for opaque sources to determine the
+            z position of the source relative to other sources. Only useful if
+            you are manually calling render on multiple sources. Scene.render
+            uses this internally.
+
+        Returns
+        -------
+        A :class:`yt.data_objects.image_array.ImageArray` instance containing
+        the rendered image.
+
+        """
         vertices = self.positions
         if zbuffer is None:
             empty = camera.lens.new_image(camera)
@@ -401,39 +460,38 @@
 
 
 class LineSource(OpaqueSource):
+    r"""A render source for a sequence of opaque line segments.
+
+    This class provides a mechanism for adding lines to a scene; these
+    points will be opaque, and can also be colored.
+
+    Parameters
+    ----------
+    positions: array, shape (N, 2, 3)
+        These positions, in data-space coordinates, are the starting and
+        stopping points for each pair of lines. For example,
+        positions[0][0] and positions[0][1] would give the (x, y, z)
+        coordinates of the beginning and end points of the first line,
+        respectively.
+    colors : array, shape (N, 4), optional
+        The colors of the points, including an alpha channel, in floating
+        point running from 0..1.  Note that they correspond to the line
+        segment succeeding each point; this means that strictly speaking
+        they need only be (N-1) in length.
+    color_stride : int, optional
+        The stride with which to access the colors when putting them on the
+        scene.
+
+    Examples
+    --------
+    >>> source = LineSource(np.random.random((10, 3)))
+
+    """
 
     _image = None
     data_source = None
 
     def __init__(self, positions, colors=None, color_stride=1):
-        r"""A render source for a sequence of opaque line segments.
-
-        This class provides a mechanism for adding lines to a scene; these
-        points will be opaque, and can also be colored.
-
-        Parameters
-        ----------
-        positions: array, shape (N, 2, 3)
-            These positions, in data-space coordinates, are the starting and
-            stopping points for each pair of lines. For example,
-            positions[0][0] and positions[0][1] would give the (x, y, z)
-            coordinates of the beginning and end points of the first line,
-            respectively.
-        colors : array, shape (N, 4), optional
-            The colors of the points, including an alpha channel, in floating
-            point running from 0..1.  Note that they correspond to the line
-            segment succeeding each point; this means that strictly speaking
-            they need only be (N-1) in length.
-        color_stride : int, optional
-            The stride with which to access the colors when putting them on the
-            scene.
-
-        Examples
-        --------
-        >>> source = LineSource(np.random.random((10, 3)))
-
-        """
-
         super(LineSource, self).__init__()
 
         assert(positions.shape[1] == 2)
@@ -451,6 +509,24 @@
         self.color_stride = color_stride
 
     def render(self, camera, zbuffer=None):
+        """Renders an image using the provided camera
+
+        Parameters
+        ----------
+        camera: :class:`yt.visualization.volume_rendering.camera.Camera` instance
+            A volume rendering camera. Can be any type of camera.
+        zbuffer: :class:`yt.visualization.volume_rendering.zbuffer_array.Zbuffer` instance
+            A zbuffer array. This is used for opaque sources to determine the
+            z position of the source relative to other sources. Only useful if
+            you are manually calling render on multiple sources. Scene.render
+            uses this internally.
+
+        Returns
+        -------
+        A :class:`yt.data_objects.image_array.ImageArray` instance containing
+        the rendered image.
+
+        """
         vertices = self.positions
         if zbuffer is None:
             empty = camera.lens.new_image(camera)
@@ -476,26 +552,26 @@
 
 
 class BoxSource(LineSource):
+    r"""A render source for a box drawn with line segments.
+
+    This render source will draw a box, with transparent faces, in data
+    space coordinates.  This is useful for annotations.
+
+    Parameters
+    ----------
+    left_edge: array-like, shape (3,), float
+        The left edge coordinates of the box.
+    right_edge : array-like, shape (3,), float
+        The right edge coordinates of the box.
+    color : array-like, shape (4,), float, optional
+        The colors (including alpha) to use for the lines.
+
+    Examples
+    --------
+    >>> source = BoxSource(grid_obj.LeftEdge, grid_obj.RightEdge)
+
+    """
     def __init__(self, left_edge, right_edge, color=None):
-        r"""A render source for a box drawn with line segments.
-
-        This render source will draw a box, with transparent faces, in data
-        space coordinates.  This is useful for annotations.
-
-        Parameters
-        ----------
-        left_edge: array-like, shape (3,), float
-            The left edge coordinates of the box.
-        right_edge : array-like, shape (3,), float
-            The right edge coordinates of the box.
-        color : array-like, shape (4,), float, optional
-            The colors (including alpha) to use for the lines.
-
-        Examples
-        --------
-        >>> source = BoxSource(grid_obj.LeftEdge, grid_obj.RightEdge)
-
-        """
         if color is None:
             color = np.array([1.0, 1.0, 1.0, 1.0])
         color = ensure_numpy_array(color)
@@ -513,32 +589,32 @@
 
 
 class GridSource(LineSource):
+    r"""A render source for drawing grids in a scene.
+
+    This render source will draw blocks that are within a given data
+    source, by default coloring them by their level of resolution.
+
+    Parameters
+    ----------
+    data_source: :class:`~yt.data_objects.api.DataContainer`
+        The data container that will be used to identify grids to draw.
+    alpha : float
+        The opacity of the grids to draw.
+    cmap : color map name
+        The color map to use to map resolution levels to color.
+    min_level : int, optional
+        Minimum level to draw
+    max_level : int, optional
+        Maximum level to draw
+
+    Examples
+    --------
+    >>> dd = ds.sphere("c", (0.1, "unitary"))
+    >>> source = GridSource(dd, alpha=1.0)
+
+    """
     def __init__(self, data_source, alpha=0.3, cmap='algae',
                  min_level=None, max_level=None):
-        r"""A render source for drawing grids in a scene.
-
-        This render source will draw blocks that are within a given data
-        source, by default coloring them by their level of resolution.
-
-        Parameters
-        ----------
-        data_source: :class:`~yt.data_objects.api.DataContainer`
-            The data container that will be used to identify grids to draw.
-        alpha : float
-            The opacity of the grids to draw.
-        cmap : color map name
-            The color map to use to map resolution levels to color.
-        min_level : int, optional
-            Minimum level to draw
-        max_level : int, optional
-            Maximum level to draw
-
-        Examples
-        --------
-        >>> dd = ds.sphere("c", (0.1, "unitary"))
-        >>> source = GridSource(dd, alpha=1.0)
-
-        """
         data_source = data_source_or_all(data_source)
         corners = []
         levels = []
@@ -586,24 +662,24 @@
 
 
 class CoordinateVectorSource(OpaqueSource):
+    r"""Draw coordinate vectors on the scene.
+
+    This will draw a set of coordinate vectors on the camera image.  They
+    will appear in the lower right of the image.
+
+    Parameters
+    ----------
+    colors: array-like, shape (3,4), optional
+        The x, y, z RGBA values to use to draw the vectors.
+    alpha : float, optional
+        The opacity of the vectors.
+
+    Examples
+    --------
+    >>> source = CoordinateVectorSource()
+
+    """
     def __init__(self, colors=None, alpha=1.0):
-        r"""Draw coordinate vectors on the scene.
-
-        This will draw a set of coordinate vectors on the camera image.  They
-        will appear in the lower right of the image.
-
-        Parameters
-        ----------
-        colors: array-like, shape (3,4), optional
-            The x, y, z RGBA values to use to draw the vectors.
-        alpha : float, optional
-            The opacity of the vectors.
-
-        Examples
-        --------
-        >>> source = CoordinateVectorSource()
-
-        """
         super(CoordinateVectorSource, self).__init__()
         # If colors aren't individually set, make black with full opacity
         if colors is None:
@@ -616,6 +692,24 @@
         self.color_stride = 2
 
     def render(self, camera, zbuffer=None):
+        """Renders an image using the provided camera
+
+        Parameters
+        ----------
+        camera: :class:`yt.visualization.volume_rendering.camera.Camera` instance
+            A volume rendering camera. Can be any type of camera.
+        zbuffer: :class:`yt.visualization.volume_rendering.zbuffer_array.Zbuffer` instance
+            A zbuffer array. This is used for opaque sources to determine the
+            z position of the source relative to other sources. Only useful if
+            you are manually calling render on multiple sources. Scene.render
+            uses this internally.
+
+        Returns
+        -------
+        A :class:`yt.data_objects.image_array.ImageArray` instance containing
+        the rendered image.
+
+        """
         camera.lens.setup_box_properties(camera)
         center = camera.focus
         # Get positions at the focus

diff -r d9010e05d64656a32c9a3ced5e5557994d1d8480 -r 90e44e84fb2aa4c2b662206285547db372759294 yt/visualization/volume_rendering/scene.py
--- a/yt/visualization/volume_rendering/scene.py
+++ b/yt/visualization/volume_rendering/scene.py
@@ -24,39 +24,38 @@
 
 class Scene(object):
 
-    """The Scene Class
+    """A virtual landscape for a volume rendering.
 
     The Scene class is meant to be the primary container for the
     new volume rendering framework. A single scene may contain
     several Camera and RenderSource instances, and is the primary
     driver behind creating a volume rendering.
 
+    This sets up the basics needed to add sources and cameras.
+    This does very little setup, and requires additional input
+    to do anything useful.
+
+    Parameters
+    ----------
+    None
+
+    Examples
+    --------
+    >>> sc = Scene()
+
     """
 
     _current = None
     _camera = None
 
     def __init__(self):
-        r"""Create a new Scene instance.
-
-        This sets up the basics needed to add sources and cameras.
-        This does very little setup, and requires additional input
-        to do anything useful.
-
-        Parameters
-        ----------
-        None
-
-        Examples
-        --------
-        >>> sc = Scene()
-
-        """
+        r"""Create a new Scene instance"""
         super(Scene, self).__init__()
         self.sources = OrderedDict()
         self.camera = None
 
     def get_source(self, source_num):
+        """Returns the volume rendering source indexed by ``source_num``"""
         return list(itervalues(self.sources))[source_num]
 
     def _iter_opaque_sources(self):
@@ -79,9 +78,18 @@
                 yield k, source
 
     def add_source(self, render_source, keyname=None):
-        """
-        Add a render source to the scene.  This will autodetect the
-        type of source.
+        """Add a render source to the scene.
+
+        This will autodetect the type of source.
+
+        Parameters
+        ----------
+        render_source: an instance of :class:`yt.visualization.volume_rendering.render_source.RenderSource`
+            A source to contribute to the volume rendering scene.
+
+        keyname: string (optional)
+            The dictionary key used to reference the source in the sources
+            dictionary.
         """
         if keyname is None:
             keyname = 'source_%02i' % len(self.sources)
@@ -105,13 +113,13 @@
             Image will be clipped before saving to the standard deviation
             of the image multiplied by this value.  Useful for enhancing
             images. Default: None
-        camera: :class:`Camera`, optional
+        camera: :class:`yt.visualization.volume_rendering.camera.Camera`, optional
             If specified, use a different :class:`Camera` to render the scene.
 
         Returns
         -------
-        bmp: :class:`ImageArray`
-            ImageArray instance of the current rendering image.
+        A :class:`yt.data_objects.image_array.ImageArray` instance containing
+        the current rendering image.
 
         Examples
         --------

diff -r d9010e05d64656a32c9a3ced5e5557994d1d8480 -r 90e44e84fb2aa4c2b662206285547db372759294 yt/visualization/volume_rendering/tests/test_vr_orientation.py
--- /dev/null
+++ b/yt/visualization/volume_rendering/tests/test_vr_orientation.py
@@ -0,0 +1,151 @@
+"""
+Answer test to verify VR orientation and rotation is correct
+"""
+
+# -----------------------------------------------------------------------------
+# Copyright (c) 2015, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+# -----------------------------------------------------------------------------
+
+
+import numpy as np
+
+from yt import load_uniform_grid
+from yt.utilities.answer_testing.framework import \
+    requires_answer_testing, \
+    VRImageComparisonTest
+from yt.visualization.volume_rendering.api import \
+    Scene, \
+    Camera, \
+    VolumeSource, \
+    ColorTransferFunction
+
+
+def setup_ds():
+
+    N = 96
+
+    xmin = ymin = zmin = -1.0
+    xmax = ymax = zmax = 1.0
+
+    dcoord = (xmax - xmin)/N
+
+    arr = np.zeros((N, N, N), dtype=np.float64)
+    arr[:, :, :] = 1.e-4
+
+    bbox = np.array([[xmin, xmax], [ymin, ymax], [zmin, zmax]])
+
+    # coordinates -- in the notation data[i, j, k]
+    x = (np.arange(N) + 0.5)*(xmax - xmin)/N + xmin
+    y = (np.arange(N) + 0.5)*(ymax - ymin)/N + ymin
+    z = (np.arange(N) + 0.5)*(zmax - zmin)/N + zmin
+
+    x3d, y3d, z3d = np.meshgrid(x, y, z, indexing="ij")
+
+    # sphere at the origin
+    c = np.array([0.5*(xmin + xmax), 0.5*(ymin + ymax), 0.5*(zmin + zmax)])
+
+    r = np.sqrt((x3d - c[0])**2 + (y3d - c[1])**2 + (z3d - c[2])**2)
+    arr[r < 0.05] = 1.0
+
+    arr[abs(x3d - xmin) < 2*dcoord] = 0.3
+    arr[abs(y3d - ymin) < 2*dcoord] = 0.3
+    arr[abs(z3d - zmin) < 2*dcoord] = 0.3
+
+    # single cube on +x
+    xc = 0.75
+    dx = 0.05
+    idx = np.logical_and(np.logical_and(x3d > xc-dx, x3d < xc+dx),
+                         np.logical_and(np.logical_and(y3d > -dx, y3d < dx),
+                                        np.logical_and(z3d > -dx, z3d < dx)))
+
+    arr[idx] = 1.0
+
+    # two cubes on +y
+    dy = 0.05
+    for yc in [0.65, 0.85]:
+
+        idx = np.logical_and(np.logical_and(y3d > yc-dy, y3d < yc+dy),
+                             np.logical_and(np.logical_and(x3d > -dy, x3d < dy),
+                                            np.logical_and(z3d > -dy, z3d < dy)))
+
+        arr[idx] = 0.8
+
+    # three cubes on +z
+    dz = 0.05
+    for zc in [0.5, 0.7, 0.9]:
+
+        idx = np.logical_and(np.logical_and(z3d > zc-dz, z3d < zc+dz),
+                             np.logical_and(np.logical_and(x3d > -dz, x3d < dz),
+                                            np.logical_and(y3d > -dz, y3d < dz)))
+
+        arr[idx] = 0.6
+
+    data = dict(Density=arr)
+    ds = load_uniform_grid(data, arr.shape, bbox=bbox)
+
+    return ds
+
+
+ at requires_answer_testing()
+def test_orientation():
+    ds = setup_ds()
+
+    sc = Scene()
+
+    vol = VolumeSource(ds, field=('gas', 'Density'))
+
+    tf = vol.transfer_function
+    tf = ColorTransferFunction((0.1, 1.0))
+    tf.sample_colormap(1.0, 0.01, colormap="coolwarm")
+    tf.sample_colormap(0.8, 0.01, colormap="coolwarm")
+    tf.sample_colormap(0.6, 0.01, colormap="coolwarm")
+    tf.sample_colormap(0.3, 0.01, colormap="coolwarm")
+
+    n_frames = 5
+    theta = np.pi / n_frames
+    decimals = 3
+
+    for lens_type in ['plane-parallel', 'perspective']:
+        frame = 0
+
+        cam = Camera(ds, lens_type='plane-parallel')
+        cam.resolution = (1000, 1000)
+        cam.position = ds.arr(np.array([-4., 0., 0.]), 'code_length')
+        cam.switch_orientation(normal_vector=[1., 0., 0.],
+                               north_vector=[0., 0., 1.])
+        cam.set_width(ds.domain_width*2.)
+
+        sc.camera = cam
+        sc.add_source(vol)
+        yield VRImageComparisonTest(
+            sc, ds, '%s_%04d' % (lens_type, frame), decimals)
+
+        for i in range(n_frames):
+            frame += 1
+            center = ds.arr([0, 0, 0], 'code_length')
+            cam.yaw(theta, rot_center=center)
+            sc.camera = cam
+            yield VRImageComparisonTest(
+                sc, ds, 'yaw_%s_%04d' % (lens_type, frame), decimals)
+
+        for i in range(n_frames):
+            frame += 1
+            theta = np.pi / n_frames
+            center = ds.arr([0, 0, 0], 'code_length')
+            cam.pitch(theta, rot_center=center)
+            sc.camera = cam
+            yield VRImageComparisonTest(
+                sc, ds, 'pitch_%s_%04d' % (lens_type, frame), decimals)
+
+        for i in range(n_frames):
+            frame += 1
+            theta = np.pi / n_frames
+            center = ds.arr([0, 0, 0], 'code_length')
+            cam.roll(theta, rot_center=center)
+            sc.camera = cam
+            yield VRImageComparisonTest(
+                sc, ds, 'roll_%s_%04d' % (lens_type, frame), decimals)

diff -r d9010e05d64656a32c9a3ced5e5557994d1d8480 -r 90e44e84fb2aa4c2b662206285547db372759294 yt/visualization/volume_rendering/zbuffer_array.py
--- a/yt/visualization/volume_rendering/zbuffer_array.py
+++ b/yt/visualization/volume_rendering/zbuffer_array.py
@@ -12,13 +12,40 @@
 #-----------------------------------------------------------------------------
 
 
-from yt.funcs import mylog
-from yt.data_objects.api import ImageArray
 import numpy as np
 
 
 class ZBuffer(object):
-    """docstring for ZBuffer"""
+    """A container object for z-buffer arrays
+
+    A zbuffer is a companion array for an image that allows the volume rendering
+    infrastructure to determine whether one opaque source is in front of another
+    opaque source.  The z buffer encodes the distance to the opaque source
+    relative to the camera position.
+
+    Parameters
+    ----------
+    rgba: MxNx4 image
+        The image the z buffer corresponds to
+    z: MxN image
+        The z depth of each pixel in the image. The shape of the image must be
+        the same as each RGBA channel in the original image.
+    
+    Examples
+    --------
+    >>> import numpy as np
+    >>> shape = (64, 64)
+    >>> b1 = Zbuffer(np.random.random(shape), np.ones(shape))
+    >>> b2 = Zbuffer(np.random.random(shape), np.zeros(shape))
+    >>> c = b1 + b2
+    >>> np.all(c.rgba == b2.rgba)
+    True
+    >>> np.all(c.z == b2.z))
+    True
+    >>> np.all(c == b2)
+    True
+
+    """
     def __init__(self, rgba, z):
         super(ZBuffer, self).__init__()
         assert(rgba.shape[:len(z.shape)] == z.shape)
@@ -31,8 +58,8 @@
         f = self.z < other.z
         if self.z.shape[1] == 1:
             # Non-rectangular
-            rgba = (self.rgba * f[:,None,:])
-            rgba += (other.rgba * (1.0 - f)[:,None,:])
+            rgba = (self.rgba * f[:, None, :])
+            rgba += (other.rgba * (1.0 - f)[:, None, :])
         else:
             b = self.z > other.z
             rgba = np.empty(self.rgba.shape)


https://bitbucket.org/yt_analysis/yt/commits/e1fc46f03f1e/
Changeset:   e1fc46f03f1e
Branch:      yt
User:        atmyers
Date:        2015-10-19 00:58:40+00:00
Summary:     adding a more realistic example for the VolumeSource docstrings
Affected #:  1 file

diff -r 90e44e84fb2aa4c2b662206285547db372759294 -r e1fc46f03f1ed75600268f50c7da972e67f64e68 yt/visualization/volume_rendering/render_source.py
--- a/yt/visualization/volume_rendering/render_source.py
+++ b/yt/visualization/volume_rendering/render_source.py
@@ -93,7 +93,19 @@
 
     Examples
     --------
+
+    This example manually creates a VolumeSource, adds it to a scene, sets the
+    camera, and renders an image.
+
+    >>> import yt
+    >>> from yt.visualization.volume_rendering.api import Scene, VolumeSource, Camera
+    >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
+    >>> sc = Scene()
     >>> source = VolumeSource(ds.all_data(), 'density')
+    >>> sc.add_source(source)
+    >>> cam = Camera(ds)
+    >>> sc.camera = cam
+    >>> im = sc.render()
 
     """
     _image = None
@@ -309,7 +321,7 @@
     data_source = None
 
     def __init__(self, data_source, field):
-        r"""Initialize a new unstructured source for rendering."""
+        r"""Initialize a new unstructured mesh source for rendering."""
         super(MeshSource, self).__init__()
         self.data_source = data_source_or_all(data_source)
         field = self.data_source._determine_fields(field)[0]
@@ -334,7 +346,11 @@
             raise RuntimeError("Mesh not initialized")
 
     def build_mesh(self):
+        """
 
+        This constructs the mesh that will be ray-traced.
+
+        """
         field_data = self.data_source[self.field]
         vertices = self.data_source.ds.index.meshes[0].connectivity_coords
 


https://bitbucket.org/yt_analysis/yt/commits/7bb3d2f21ee1/
Changeset:   7bb3d2f21ee1
Branch:      yt
User:        atmyers
Date:        2015-10-19 00:59:44+00:00
Summary:     merging
Affected #:  22 files

diff -r e1fc46f03f1ed75600268f50c7da972e67f64e68 -r 7bb3d2f21ee1ca94fbbf1f211a7b57fef714466e doc/source/cookbook/amrkdtree_downsampling.py
--- a/doc/source/cookbook/amrkdtree_downsampling.py
+++ b/doc/source/cookbook/amrkdtree_downsampling.py
@@ -39,7 +39,8 @@
 
 render_source.set_volume(kd_low_res)
 render_source.set_fields('density')
-sc.render("v1.png")
+sc.render()
+sc.save("v1.png")
 
 # This operation was substantiall faster.  Now lets modify the low resolution
 # rendering until we find something we like.
@@ -48,12 +49,14 @@
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds=[-27.5, -25.5],
               alpha=np.ones(4, dtype='float64'), colormap='RdBu_r')
-sc.render("v2.png", sigma_clip=6.0)
+sc.render(sigma_clip=6.0)
+sc.save("v2.png")
 
 # This looks better.  Now let's try turning on opacity.
 
 tf.grey_opacity = True
-sc.render("v3.png", sigma_clip=6.0)
+sc.render(sigma_clip=6.0)
+sc.save("v3.png")
 #
 ## That seemed to pick out som interesting structures.  Now let's bump up the
 ## opacity.
@@ -61,11 +64,13 @@
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds=[-27.5, -25.5],
               alpha=10.0 * np.ones(4, dtype='float64'), colormap='RdBu_r')
-sc.render("v4.png", sigma_clip=6.0)
+sc.render(sigma_clip=6.0)
+sc.save("v4.png")
 #
 ## This looks pretty good, now lets go back to the full resolution AMRKDTree
 #
 render_source.set_volume(kd)
-sc.render("v5.png", sigma_clip=6.0)
+sc.render(sigma_clip=6.0)
+sc.render("v5.png")
 
 # This looks great!

diff -r e1fc46f03f1ed75600268f50c7da972e67f64e68 -r 7bb3d2f21ee1ca94fbbf1f211a7b57fef714466e doc/source/cookbook/camera_movement.py
--- a/doc/source/cookbook/camera_movement.py
+++ b/doc/source/cookbook/camera_movement.py
@@ -14,15 +14,18 @@
 frame = 0
 # Move to the maximum density location over 5 frames
 for _ in cam.iter_move(max_c, 5):
-    sc.render('camera_movement_%04i.png' % frame, sigma_clip=8.0)
+    sc.render(sigma_clip=8.0)
+    sc.save('camera_movement_%04i.png' % frame)
     frame += 1
 
 # Zoom in by a factor of 10 over 5 frames
 for _ in cam.iter_zoom(10.0, 5):
-    sc.render('camera_movement_%04i.png' % frame, sigma_clip=8.0)
+    sc.render(sigma_clip=8.0)
+    sc.save('camera_movement_%04i.png' % frame)
     frame += 1
 
 # Do a rotation over 5 frames
 for _ in cam.iter_rotate(np.pi, 5):
-    sc.render('camera_movement_%04i.png' % frame, sigma_clip=8.0)
+    sc.render(sigma_clip=8.0)
+    sc.save('camera_movement_%04i.png' % frame)
     frame += 1

diff -r e1fc46f03f1ed75600268f50c7da972e67f64e68 -r 7bb3d2f21ee1ca94fbbf1f211a7b57fef714466e doc/source/cookbook/custom_camera_volume_rendering.py
--- a/doc/source/cookbook/custom_camera_volume_rendering.py
+++ b/doc/source/cookbook/custom_camera_volume_rendering.py
@@ -18,4 +18,5 @@
 
 # save to disk with a custom filename and apply sigma clipping to eliminate
 # very bright pixels, producing an image with better contrast.
-sc.render(fname='custom.png', sigma_clip=4)
+sc.render(sigma_clip=4)
+sc.save('custom.png')

diff -r e1fc46f03f1ed75600268f50c7da972e67f64e68 -r 7bb3d2f21ee1ca94fbbf1f211a7b57fef714466e doc/source/cookbook/custom_transfer_function_volume_rendering.py
--- a/doc/source/cookbook/custom_transfer_function_volume_rendering.py
+++ b/doc/source/cookbook/custom_transfer_function_volume_rendering.py
@@ -21,4 +21,4 @@
     np.log10(ds.quan(1.0e-29, 'g/cm**3')),
     scale=30.0, colormap='RdBu_r')
 
-im = sc.render(fname='new_tf.png', sigma_clip=None)
+sc.save('new_tf.png')

diff -r e1fc46f03f1ed75600268f50c7da972e67f64e68 -r 7bb3d2f21ee1ca94fbbf1f211a7b57fef714466e doc/source/cookbook/index.rst
--- a/doc/source/cookbook/index.rst
+++ b/doc/source/cookbook/index.rst
@@ -45,6 +45,7 @@
    embedded_webm_animation
    gadget_notebook
    owls_notebook
+   ../visualizing/transfer_function_helper
    ../analyzing/analysis_modules/sunyaev_zeldovich
    fits_radio_cubes
    fits_xray_images

diff -r e1fc46f03f1ed75600268f50c7da972e67f64e68 -r 7bb3d2f21ee1ca94fbbf1f211a7b57fef714466e doc/source/cookbook/opaque_rendering.py
--- a/doc/source/cookbook/opaque_rendering.py
+++ b/doc/source/cookbook/opaque_rendering.py
@@ -12,7 +12,8 @@
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
         alpha=np.logspace(-3,0,4), colormap = 'RdBu_r')
-im = sc.render("v1.png", sigma_clip=6.0)
+sc.render(sigma_clip=6.0)
+sc.save("v1.png")
 
 # In this case, the default alphas used (np.logspace(-3,0,Nbins)) does not
 # accentuate the outer regions of the galaxy. Let's start by bringing up the
@@ -22,27 +23,31 @@
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
         alpha=np.logspace(0,0,4), colormap = 'RdBu_r')
-im = sc.render("v2.png", sigma_clip=6.0)
+sc.render(sigma_clip=6.0)
+sc.save("v2.png")
 
 # Now let's set the grey_opacity to True.  This should make the inner portions
 # start to be obcured
 
 tf.grey_opacity = True
-im = sc.render("v3.png", sigma_clip=6.0)
+sc.render(sigma_clip=6.0)
+sc.save("v3.png")
 
 # That looks pretty good, but let's start bumping up the opacity.
 
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
         alpha=10.0*np.ones(4,dtype='float64'), colormap = 'RdBu_r')
-im = sc.render("v4.png", sigma_clip=6.0)
+sc.render(sigma_clip=6.0)
+sc.save("v4.png")
 
 # Let's bump up again to see if we can obscure the inner contour.
 
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
         alpha=30.0*np.ones(4,dtype='float64'), colormap = 'RdBu_r')
-im = sc.render("v5.png", sigma_clip=6.0)
+sc.render(sigma_clip=6.0)
+sc.save("v5.png")
 
 # Now we are losing sight of everything.  Let's see if we can obscure the next
 # layer
@@ -50,13 +55,15 @@
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
         alpha=100.0*np.ones(4,dtype='float64'), colormap = 'RdBu_r')
-im = sc.render("v6.png", sigma_clip=6.0)
+sc.render(sigma_clip=6.0)
+sc.save("v6.png")
 
 # That is very opaque!  Now lets go back and see what it would look like with
 # grey_opacity = False
 
 tf.grey_opacity=False
-im = sc.render("v7.png", sigma_clip=6.0)
+sc.render(sigma_clip=6.0)
+sc.save("v7.png")
 
 # That looks pretty different, but the main thing is that you can see that the
 # inner contours are somewhat visible again.  

diff -r e1fc46f03f1ed75600268f50c7da972e67f64e68 -r 7bb3d2f21ee1ca94fbbf1f211a7b57fef714466e doc/source/cookbook/rendering_with_box_and_grids.py
--- a/doc/source/cookbook/rendering_with_box_and_grids.py
+++ b/doc/source/cookbook/rendering_with_box_and_grids.py
@@ -8,15 +8,15 @@
 sc.get_source(0).transfer_function.grey_opacity=True
 
 sc.annotate_domain(ds)
-im = sc.render()
-im.write_png("%s_vr_domain.png" % ds)
+sc.render()
+sc.save("%s_vr_domain.png" % ds)
 
 sc.annotate_grids(ds)
-im = sc.render()
-im.write_png("%s_vr_grids.png" % ds)
+sc.render()
+sc.save("%s_vr_grids.png" % ds)
 
 # Here we can draw the coordinate vectors on top of the image by processing
 # it through the camera. Then save it out.
 sc.annotate_axes()
-im = sc.render()
-im.write_png("%s_vr_coords.png" % ds)
+sc.render()
+sc.save("%s_vr_coords.png" % ds)

diff -r e1fc46f03f1ed75600268f50c7da972e67f64e68 -r 7bb3d2f21ee1ca94fbbf1f211a7b57fef714466e doc/source/cookbook/various_lens.py
--- a/doc/source/cookbook/various_lens.py
+++ b/doc/source/cookbook/various_lens.py
@@ -34,7 +34,8 @@
 cam.set_width(ds.domain_width * 0.5)
 sc.camera = cam
 sc.add_source(vol)
-sc.render('lens_plane-parallel.png', sigma_clip=6.0)
+sc.render(sigma_clip=6.0)
+sc.save('lens_plane-parallel.png')
 
 # Perspective lens
 cam = Camera(ds, lens_type='perspective')
@@ -50,7 +51,8 @@
 cam.set_width(ds.domain_width * 0.5)
 sc.camera = cam
 sc.add_source(vol)
-sc.render('lens_perspective.png', sigma_clip=6.0)
+sc.render(sigma_clip=6.0)
+sc.save('lens_perspective.png')
 
 # Stereo-perspective lens
 cam = Camera(ds, lens_type='stereo-perspective')
@@ -65,7 +67,8 @@
 cam.lens.disparity = ds.domain_width[0] * 1.e-3
 sc.camera = cam
 sc.add_source(vol)
-sc.render('lens_stereo-perspective.png', sigma_clip=6.0)
+sc.render(sigma_clip=6.0)
+sc.save('lens_stereo-perspective.png')
 
 # Fisheye lens
 dd = ds.sphere(ds.domain_center, ds.domain_width[0] / 10)
@@ -79,7 +82,8 @@
 cam.lens.fov = 360.0
 sc.camera = cam
 sc.add_source(vol)
-sc.render('lens_fisheye.png', sigma_clip=6.0)
+sc.render(sigma_clip=6.0)
+sc.save('lens_fisheye.png')
 
 # Spherical lens
 cam = Camera(ds, lens_type='spherical')
@@ -96,7 +100,8 @@
 cam.set_width(ds.domain_width * 0.5)
 sc.camera = cam
 sc.add_source(vol)
-sc.render('lens_spherical.png', sigma_clip=6.0)
+sc.render(sigma_clip=6.0)
+sc.save('lens_spherical.png')
 
 # Stereo-spherical lens
 cam = Camera(ds, lens_type='stereo-spherical')
@@ -111,4 +116,5 @@
 cam.lens.disparity = ds.domain_width[0] * 1.e-3
 sc.camera = cam
 sc.add_source(vol)
-sc.render('lens_stereo-spherical.png', sigma_clip=6.0)
+sc.render(sigma_clip=6.0)
+sc.save('lens_stereo-spherical.png')

diff -r e1fc46f03f1ed75600268f50c7da972e67f64e68 -r 7bb3d2f21ee1ca94fbbf1f211a7b57fef714466e doc/source/visualizing/transfer_function_helper.rst
--- /dev/null
+++ b/doc/source/visualizing/transfer_function_helper.rst
@@ -0,0 +1,6 @@
+.. _transfer-function-helper-tutorial:
+
+Transfer Function Helper Tutorial
+=================================
+
+.. notebook:: TransferFunctionHelper_Tutorial.ipynb

diff -r e1fc46f03f1ed75600268f50c7da972e67f64e68 -r 7bb3d2f21ee1ca94fbbf1f211a7b57fef714466e doc/source/visualizing/volume_rendering.rst
--- a/doc/source/visualizing/volume_rendering.rst
+++ b/doc/source/visualizing/volume_rendering.rst
@@ -31,11 +31,11 @@
    :align: center
    :alt: Diagram of a 3D Scene
 
-In versions of yt prior to 3.2, the only volume rendering interface accessible
+In versions of yt prior to 3.3, the only volume rendering interface accessible
 was through the "camera" object.  This presented a number of problems,
 principle of which was the inability to describe new scene elements or to
 develop complex visualizations that were independent of the specific elements
-being rendered.  The new "scene" based interface present in yt 3.2 and beyond
+being rendered.  The new "scene" based interface present in yt 3.3 and beyond
 enables both more complex visualizations to be constructed as well as a new,
 more intuitive interface for very simple 3D visualizations.
 
@@ -65,14 +65,15 @@
   # load the data
   ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
   # volume render the 'density' field, and save the resulting image
-  im, sc = yt.volume_render(ds, 'density', fname='test_rendering.png')
+  im, sc = yt.volume_render(ds, 'density', fname='rendering.png')
 
-  # im is the image that was generated.
+  # im is the image array generated. it is also saved to 'rendering.png'.
   # sc is an instance of a Scene object, which allows you to further refine
-  # your renderings.
+  # your renderings, and later save them.
 
-When the :func:`~yt.visualization.volume_rendering.volume_render` function 
-is called, first an empty 
+When the 
+:func:`~yt.visualization.volume_rendering.volume_rendering.volume_render` 
+function is called, first an empty 
 :class:`~yt.visualization.volume_rendering.scene.Scene` object is created. 
 Next, a :class:`~yt.visualization.volume_rendering.api.VolumeSource`
 object is created, which decomposes the volume elements
@@ -96,9 +97,10 @@
 lenses can be swapped in and out.  For example, this might include a fisheye
 lens, a spherical lens, or some other method of describing the direction and
 origin of rays for rendering. Once the camera is added to the scene object, we
-call the main method of the
+call the main methods of the
 :class:`~yt.visualization.volume_rendering.scene.Scene` class,
-:meth:`~yt.visualization.volume_rendering.scene.Scene.render`.  When called,
+:meth:`~yt.visualization.volume_rendering.scene.Scene.render` and 
+:meth:`~yt.visualization.volume_rendering.scene.Scene.save`.  When called,
 the scene will loop through all of the
 :class:`~yt.visualization.volume_rendering.render_source.RenderSource` objects
 that have been added and integrate the radiative transfer equation through the
@@ -110,20 +112,17 @@
 Alternatively, if you don't want to immediately generate an image of your
 volume rendering, and you just want access to the default scene object, 
 you can skip this expensive operation by just running the
-:func:`~yt.visualization.volume_rendering.create_scene` function in lieu of the
-:func:`~yt.visualization.volume_rendering.volume_render` function. Example:
+:func:`~yt.visualization.volume_rendering.volume_rendering.create_scene` function in lieu of the
+:func:`~yt.visualization.volume_rendering.volume_rendering.volume_render` function. Example:
 
 .. python-script::
 
   import yt
-  # load the data
   ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
-  # volume render the 'density' field 
   sc = yt.create_scene(ds, 'density')
 
-
-Modifying the Scene
--------------------
+Modifying and Saving the Scene
+------------------------------
 
 Once a basic scene has been created with default render sources and
 camera operations, deeper modifications are possible. These
@@ -133,6 +132,56 @@
 present in the scene.  Below, we describe a few of the aspects of tuning a
 scene to create a visualization that is communicative and pleasing.
 
+.. _rendering_scene:
+
+Rendering and Saving
+++++++++++++++++++++
+
+Whenever you want a rendering of your current scene configuration, use the
+:meth:`~yt.visualization.volume_rendering.scene.Scene.render` method to
+trigger the scene to actually do the ray-tracing step.  After that, you can
+use the :meth:`~yt.visualization.volume_rendering.scene.Scene.save` method
+to save it to disk.  Alternatively, 
+:meth:`~yt.visualization.volume_rendering.scene.Scene.render` will return an 
+:class:`~yt.data_objects.image_array.ImageArray` object if you want to further 
+process it in Python (potentially writing it out with 
+:meth:`~yt.data_objects.image_array.ImageArray.write_png`).  You can continue 
+modifying your :class:`~yt.visualization.volume_rendering.scene.Scene` object,
+and render it as you make changes to see how those changes affect the resulting
+image.  
+
+.. python-script::
+
+  import yt
+  ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
+  sc = yt.create_scene(ds, 'density')
+  sc.render() 
+  sc.save()
+  <make changes to scene>
+  sc.render()
+  sc.save('changes.png')
+
+.. _sigma_clip:
+
+Improving Image Contrast with Sigma Clipping
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+If your images appear to be too dark, you can try using the ``sigma_clip``
+keyword in the :meth:`~yt.visualization.volume_rendering.scene.Scene.render` 
+or :func:`~yt.visualization.volume_rendering.volume_rendering.volume_render` functions.  
+Because the brightness range in an image is scaled to match the range of 
+emissivity values of underlying rendering, if you have a few really 
+high-emissivity points, they will scale the rest of your image to be quite 
+dark.  ``sigma_clip = N`` can address this by removing values that are more
+than ``N`` standard deviations brighter than the mean of your image.  
+Typically, a choice of 4 to 6 will help dramatically with your resulting image.
+
+.. python-script::
+
+  sc = yt.create_scene(ds, 'density')
+  sc.render(sigma_clip=4)
+  sc.save()
+
 .. _transfer_functions:
 
 Transfer Functions
@@ -210,7 +259,11 @@
 TransferFunctionHelper
 ----------------------
 
-.. notebook:: TransferFunctionHelper_Tutorial.ipynb
+Because good transfer functions can be difficult to generate, the 
+TransferFunctionHelper exists in order to help create and modify transfer
+functions with smart defaults for your datasets.  To follow a full example
+on how to use this interface, follow the
+:ref:`transfer-function-helper-tutorial`.
 
 Adding New Sources
 ++++++++++++++++++
@@ -325,7 +378,8 @@
 .. python-script::
 
    for i in sc.camera.zoomin(100, 5):
-       sc.render("frame_%03i.png" % i)
+       sc.render()
+       sc.save("frame_%03i.png" % i)
 
 The variable ``i`` is the frame number in the particular loop being called.  In
 this case, this will zoom in by a factor of 100 over the course of 5 frames.

diff -r e1fc46f03f1ed75600268f50c7da972e67f64e68 -r 7bb3d2f21ee1ca94fbbf1f211a7b57fef714466e yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -678,7 +678,8 @@
     def run(self):
         tmpfd, tmpname = tempfile.mkstemp(suffix='.png')
         os.close(tmpfd)
-        self.scene.render(tmpname, sigma_clip=1.0)
+        self.scene.render(sigma_clip=1.0)
+        self.scene.save(tmpname)
         image = mpimg.imread(tmpname)
         os.remove(tmpname)
         return [zlib.compress(image.dumps())]

diff -r e1fc46f03f1ed75600268f50c7da972e67f64e68 -r 7bb3d2f21ee1ca94fbbf1f211a7b57fef714466e yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -447,7 +447,8 @@
         --------
 
         >>> for i in cam.iter_rotate(np.pi, 10):
-        ...     im = sc.render("rotation_%04i.png" % i)
+        ...     im = sc.render()
+        ...     sc.save('rotation_%04i.png' % i)
         """
 
         dtheta = (1.0*theta)/n_steps
@@ -475,7 +476,8 @@
         --------
 
         >>> for i in cam.iter_move([0.2,0.3,0.6], 10):
-        ...     sc.render("move_%04i.png" % i)
+        ...     sc.render()
+        ...     sc.save("move_%04i.png" % i)
         """
         assert isinstance(final, YTArray)
         if exponential:
@@ -523,7 +525,8 @@
         --------
 
         >>> for i in cam.iter_zoom(100.0, 10):
-        ...     sc.render("zoom_%04i.png" % i)
+        ...     sc.render()
+        ...     sc.save("zoom_%04i.png" % i)
         """
         f = final**(1.0/n_steps)
         for i in xrange(n_steps):

diff -r e1fc46f03f1ed75600268f50c7da972e67f64e68 -r 7bb3d2f21ee1ca94fbbf1f211a7b57fef714466e yt/visualization/volume_rendering/lens.py
--- a/yt/visualization/volume_rendering/lens.py
+++ b/yt/visualization/volume_rendering/lens.py
@@ -238,7 +238,8 @@
 
         dx = np.dot(pos1 - sight_center.d, camera.unit_vectors[0])
         dy = np.dot(pos1 - sight_center.d, camera.unit_vectors[1])
-        dz = np.dot(pos1 - sight_center.d, camera.unit_vectors[2])
+        dz = np.dot(pos - camera.position.d, camera.unit_vectors[2])
+
         # Transpose into image coords.
         px = (res[0] * 0.5 + res[0] / camera.width[0].d * dx).astype('int')
         py = (res[1] * 0.5 + res[1] / camera.width[1].d * dy).astype('int')
@@ -415,8 +416,8 @@
 
         dx = np.dot(pos1 - sight_center.d, east_vec_rot)
         dy = np.dot(pos1 - sight_center.d, north_vec)
-        dz = np.dot(pos1 - sight_center.d, normal_vec_rot)
-
+        dz = np.dot(pos - camera_position_shift, normal_vec_rot)
+        
         # Transpose into image coords.
         px = (res0_h * 0.5 + res0_h / camera.width[0].d * dx).astype('int')
         py = (res[1] * 0.5 + res[1] / camera.width[1].d * dy).astype('int')
@@ -512,7 +513,7 @@
         # vectors back onto the plane.  arr_fisheye_vectors goes from px, py to
         # vector, and we need the reverse.
         # First, we transform lpos into *relative to the camera* coordinates.
-        lpos = camera.position - pos
+        lpos = camera.position.d - pos
         lpos = lpos.dot(self.rotation_matrix)
         # lpos = lpos.dot(self.rotation_matrix)
         mag = (lpos * lpos).sum(axis=1)**0.5
@@ -525,11 +526,13 @@
         px = r * np.cos(phi)
         py = r * np.sin(phi)
         u = camera.focus.uq
+        length_unit = u / u.d
         # dz is distance the ray would travel
         px = (px + 1.0) * res[0] / 2.0
         py = (py + 1.0) * res[1] / 2.0
-        px = (u * np.rint(px)).astype("int64")
-        py = (u * np.rint(py)).astype("int64")
+        # px and py should be dimensionless
+        px = (u * np.rint(px) / length_unit).astype("int64")
+        py = (u * np.rint(py) / length_unit).astype("int64")
         return px, py, dz
 
 
@@ -609,7 +612,7 @@
             res = camera.resolution
         # Much of our setup here is the same as in the fisheye, except for the
         # actual conversion back to the px, py values.
-        lpos = camera.position - pos
+        lpos = camera.position.d - pos
         # inv_mat = np.linalg.inv(self.rotation_matrix)
         # lpos = lpos.dot(self.rotation_matrix)
         mag = (lpos * lpos).sum(axis=1)**0.5
@@ -626,11 +629,13 @@
         py = np.arcsin(lpos[:, 2])
         dz = mag / self.radius
         u = camera.focus.uq
+        length_unit = u / u.d
         # dz is distance the ray would travel
         px = ((-px + np.pi) / (2.0*np.pi)) * res[0]
         py = ((-py + np.pi/2.0) / np.pi) * res[1]
-        px = (u * np.rint(px)).astype("int64")
-        py = (u * np.rint(py)).astype("int64")
+        # px and py should be dimensionless
+        px = (u * np.rint(px) / length_unit).astype("int64")
+        py = (u * np.rint(py) / length_unit).astype("int64")
         return px, py, dz
 
 

diff -r e1fc46f03f1ed75600268f50c7da972e67f64e68 -r 7bb3d2f21ee1ca94fbbf1f211a7b57fef714466e yt/visualization/volume_rendering/off_axis_projection.py
--- a/yt/visualization/volume_rendering/off_axis_projection.py
+++ b/yt/visualization/volume_rendering/off_axis_projection.py
@@ -152,6 +152,8 @@
     camera.set_width(width)
     camera.switch_orientation(normal_vector=normal_vector,
                               north_vector=north_vector)
+    if not iterable(width):
+        width = data_source.ds.arr([width]*3)
     camera.position = center - width[2]*camera.normal_vector
     camera.focus = center
     sc.camera = camera
@@ -174,9 +176,6 @@
     east_vector = camera.unit_vectors[1]
     normal_vector = camera.unit_vectors[2]
     fields = vol.field
-    if not iterable(width):
-        width = data_source.ds.arr([width]*3)
-
     mi = ds.domain_right_edge.copy()
     ma = ds.domain_left_edge.copy()
     for off1 in [-1, 1]:

diff -r e1fc46f03f1ed75600268f50c7da972e67f64e68 -r 7bb3d2f21ee1ca94fbbf1f211a7b57fef714466e yt/visualization/volume_rendering/render_source.py
--- a/yt/visualization/volume_rendering/render_source.py
+++ b/yt/visualization/volume_rendering/render_source.py
@@ -70,7 +70,6 @@
     def set_zbuffer(self, zbuffer):
         self.zbuffer = zbuffer
 
-
 class VolumeSource(RenderSource):
     """A class for rendering data from a volumetric data source
 
@@ -106,8 +105,8 @@
     >>> cam = Camera(ds)
     >>> sc.camera = cam
     >>> im = sc.render()
+    """
 
-    """
     _image = None
     data_source = None
 
@@ -136,7 +135,9 @@
 
     def build_defaults(self):
         """Sets a default volume and transfer function"""
+        mylog.info("Creating default volume")
         self.build_default_volume()
+        mylog.info("Creating default transfer function")
         self.build_default_transfer_function()
 
     def set_transfer_function(self, transfer_function):
@@ -277,12 +278,11 @@
             Whether or not this is being called from a higher level in the VR
             interface. Used to set the correct orientation.
         """
-        image = self.volume.reduce_tree_images(image,
-                                               camera.lens.viewpoint)
+        image = self.volume.reduce_tree_images(image, camera.lens.viewpoint)
         image.shape = camera.resolution[0], camera.resolution[1], 4
         # If the call is from VR, the image is rotated by 180 to get correct
-        # up dirirection
-        if call_from_VR is True:
+        # up direction
+        if call_from_VR is True: 
             image = np.rot90(image, k=2)
         if self.transfer_function.grey_opacity is False:
             image[:, :, 3] = 1.0
@@ -381,6 +381,7 @@
         the rendered image.
 
         """
+ 
         self.sampler = new_mesh_sampler(camera, self)
 
         mylog.debug("Casting rays")
@@ -420,6 +421,7 @@
 
     """
 
+
     _image = None
     data_source = None
 
@@ -471,7 +473,7 @@
         return zbuffer
 
     def __repr__(self):
-        disp = "<Points Source>"
+        disp = "<Point Source>"
         return disp
 
 
@@ -569,7 +571,6 @@
 
 class BoxSource(LineSource):
     r"""A render source for a box drawn with line segments.
-
     This render source will draw a box, with transparent faces, in data
     space coordinates.  This is useful for annotations.
 
@@ -693,8 +694,8 @@
     Examples
     --------
     >>> source = CoordinateVectorSource()
+    """
 
-    """
     def __init__(self, colors=None, alpha=1.0):
         super(CoordinateVectorSource, self).__init__()
         # If colors aren't individually set, make black with full opacity

diff -r e1fc46f03f1ed75600268f50c7da972e67f64e68 -r 7bb3d2f21ee1ca94fbbf1f211a7b57fef714466e yt/visualization/volume_rendering/scene.py
--- a/yt/visualization/volume_rendering/scene.py
+++ b/yt/visualization/volume_rendering/scene.py
@@ -14,11 +14,11 @@
 
 import numpy as np
 from collections import OrderedDict
-from yt.funcs import mylog
+from yt.funcs import mylog, get_image_suffix
 from yt.extern.six import iteritems, itervalues
 from .camera import Camera
 from .render_source import OpaqueSource, BoxSource, CoordinateVectorSource, \
-    GridSource
+    GridSource, RenderSource
 from .zbuffer_array import ZBuffer
 
 
@@ -53,6 +53,8 @@
         super(Scene, self).__init__()
         self.sources = OrderedDict()
         self.camera = None
+        # An image array containing the last rendered image of the scene
+        self.last_render = None
 
     def get_source(self, source_num):
         """Returns the volume rendering source indexed by ``source_num``"""
@@ -98,22 +100,20 @@
 
         return self
 
-    def render(self, fname=None, sigma_clip=None, camera=None):
+    def render(self, sigma_clip=None, camera=None):
         r"""Render all sources in the Scene.
 
         Use the current state of the Scene object to render all sources
-        currently in the scene.
+        currently in the scene.  Returns the image array.  If you want to
+        save the output to a file, call the save() function.
 
         Parameters
         ----------
-        fname: string, optional
-            If specified, save the rendering as a bitmap to the file "fname".
-            Default: None
         sigma_clip: float, optional
             Image will be clipped before saving to the standard deviation
             of the image multiplied by this value.  Useful for enhancing
             images. Default: None
-        camera: :class:`yt.visualization.volume_rendering.camera.Camera`, optional
+        camera: :class:`Camera`, optional
             If specified, use a different :class:`Camera` to render the scene.
 
         Returns
@@ -125,20 +125,80 @@
         --------
         >>> sc = Scene()
         >>> # Add sources/camera/etc
-        >>> im = sc.render('rendering.png')
+        >>> im = sc.render(sigma_clip=4)
+        >>> sc.save()
 
         """
+        mylog.info("Rendering scene (Can take a while).")
         if camera is None:
             camera = self.camera
         assert(camera is not None)
         self._validate()
         bmp = self.composite(camera=camera)
-        if fname is not None:
-            bmp.write_png(fname, sigma_clip=sigma_clip)
+        self.last_render = bmp
         return bmp
 
+    def save(self, fname=None):
+        r"""Saves the most recently rendered image of the Scene to disk.
+
+        Once you have created a scene and rendered that scene to an image 
+        array, this saves that image array to disk with an optional filename.
+        If an image has not yet been rendered for the current scene object,
+        it forces one and writes it out.
+
+        Parameters
+        ----------
+        fname: string, optional
+            If specified, save the rendering as a bitmap to the file "fname".
+            If unspecified, it creates a default based on the dataset filename.
+            Default: None
+
+        Returns
+        -------
+            Nothing
+
+        Examples
+        --------
+        >>> sc = yt.create_scene(ds)
+        >>> # Add sources/camera/etc
+        >>> sc.render()
+        >>> sc.save('test.png')
+
+        # Or alternatively
+        >>> sc = yt.create_scene(ds)
+        >>> # Add sources/camera/etc
+        >>> sc.save('test.png')
+
+        """
+        if fname is None:
+            sources = list(itervalues(self.sources))
+            rensources = [s for s in sources if isinstance(s, RenderSource)]
+            # if a volume source present, use its affiliated ds for fname
+            if len(rensources) > 0:
+                rs = rensources[0]
+                basename = rs.data_source.ds.basename
+                if isinstance(rs.field, basestring):
+                    field = rs.field
+                else:
+                    field = rs.field[-1]
+                fname = "%s_Render_%s.png" % (basename, field)
+            # if no volume source present, use a default filename
+            else:
+                fname = "Render_opaque.png"   
+        suffix = get_image_suffix(fname)
+        if suffix == '':
+            suffix = '.png'
+            fname = '%s%s' % (fname, suffix)
+
+        if self.last_render is None:
+            self.render()
+
+        mylog.info("Saving render %s", fname)
+        self.last_render.write_png(fname)
+ 
     def _validate(self):
         r"""Validate the current state of the scene."""
+
         for k, source in iteritems(self.sources):
             source._validate()
         return

diff -r e1fc46f03f1ed75600268f50c7da972e67f64e68 -r 7bb3d2f21ee1ca94fbbf1f211a7b57fef714466e yt/visualization/volume_rendering/tests/modify_transfer_function.py
--- a/yt/visualization/volume_rendering/tests/modify_transfer_function.py
+++ b/yt/visualization/volume_rendering/tests/modify_transfer_function.py
@@ -22,5 +22,5 @@
 tf.clear()
 tf.grey_opacity=True
 tf.add_layers(3, colormap='RdBu')
-sc.render("new_tf.png")
-
+sc.render()
+sc.save("new_tf.png")

diff -r e1fc46f03f1ed75600268f50c7da972e67f64e68 -r 7bb3d2f21ee1ca94fbbf1f211a7b57fef714466e yt/visualization/volume_rendering/tests/multiple_fields.py
--- a/yt/visualization/volume_rendering/tests/multiple_fields.py
+++ b/yt/visualization/volume_rendering/tests/multiple_fields.py
@@ -20,5 +20,6 @@
 volume_source = sc.get_source(0)
 volume_source.set_field(('gas','velocity_x'))
 volume_source.build_default_transfer_function()
-sc.render("render_x.png")
+sc.render()
+sc.save("render_x.png")
 

diff -r e1fc46f03f1ed75600268f50c7da972e67f64e68 -r 7bb3d2f21ee1ca94fbbf1f211a7b57fef714466e yt/visualization/volume_rendering/tests/rotation_volume_rendering.py
--- a/yt/visualization/volume_rendering/tests/rotation_volume_rendering.py
+++ b/yt/visualization/volume_rendering/tests/rotation_volume_rendering.py
@@ -21,4 +21,5 @@
 frames = 10
 for i in range(frames):
     sc.camera.yaw(angle/frames)
-    sc.render('test_rot_%04i.png' % i, sigma_clip=6.0)
+    sc.render(sigma_clip=6.0)
+    sc.save('test_rot_%04i.png' % i)

diff -r e1fc46f03f1ed75600268f50c7da972e67f64e68 -r 7bb3d2f21ee1ca94fbbf1f211a7b57fef714466e yt/visualization/volume_rendering/tests/test_lenses.py
--- a/yt/visualization/volume_rendering/tests/test_lenses.py
+++ b/yt/visualization/volume_rendering/tests/test_lenses.py
@@ -55,7 +55,8 @@
         tf.grey_opacity = True
         sc.camera = cam
         sc.add_source(vol)
-        sc.render('test_perspective_%s.png' % self.field[1], sigma_clip=6.0)
+        sc.render(sigma_clip=6.0)
+        sc.save('test_perspective_%s.png' % self.field[1])
 
     def test_stereoperspective_lens(self):
         sc = Scene()
@@ -67,8 +68,8 @@
         tf.grey_opacity = True
         sc.camera = cam
         sc.add_source(vol)
-        sc.render('test_stereoperspective_%s.png' % self.field[1],
-                  sigma_clip=6.0)
+        sc.render(sigma_clip=6.0)
+        sc.save('test_stereoperspective_%s.png' % self.field[1])
 
     def test_fisheye_lens(self):
         dd = self.ds.sphere(self.ds.domain_center,
@@ -85,8 +86,8 @@
         tf.grey_opacity = True
         sc.camera = cam
         sc.add_source(vol)
-        sc.render('test_fisheye_%s.png' % self.field[1],
-                  sigma_clip=6.0)
+        sc.render(sigma_clip=6.0)
+        sc.save('test_fisheye_%s.png' % self.field[1])
 
     def test_plane_lens(self):
         dd = self.ds.sphere(self.ds.domain_center,
@@ -101,8 +102,8 @@
         tf.grey_opacity = True
         sc.camera = cam
         sc.add_source(vol)
-        sc.render('test_plane_%s.png' % self.field[1],
-                  sigma_clip=6.0)
+        sc.render(sigma_clip=6.0)
+        sc.save('test_plane_%s.png' % self.field[1])
 
     def test_spherical_lens(self):
         sc = Scene()
@@ -114,8 +115,8 @@
         tf.grey_opacity = True
         sc.camera = cam
         sc.add_source(vol)
-        sc.render('test_spherical_%s.png' % self.field[1],
-                  sigma_clip=6.0)
+        sc.render(sigma_clip=6.0)
+        sc.save('test_spherical_%s.png' % self.field[1])
 
     def test_stereospherical_lens(self):
         w = (self.ds.domain_width).in_units('code_length')
@@ -129,5 +130,5 @@
         tf.grey_opacity = True
         sc.camera = cam
         sc.add_source(vol)
-        sc.render('test_stereospherical_%s.png' % self.field[1],
-                  sigma_clip=6.0)
+        sc.render(sigma_clip=6.0)
+        sc.save('test_stereospherical_%s.png' % self.field[1])

diff -r e1fc46f03f1ed75600268f50c7da972e67f64e68 -r 7bb3d2f21ee1ca94fbbf1f211a7b57fef714466e yt/visualization/volume_rendering/tests/test_scene.py
--- a/yt/visualization/volume_rendering/tests/test_scene.py
+++ b/yt/visualization/volume_rendering/tests/test_scene.py
@@ -76,9 +76,11 @@
         mi_bound = ((ma-mi)*(0.10))+mi
         ma_bound = ((ma-mi)*(0.90))+mi
         tf.map_to_colormap(mi_bound, ma_bound,  scale=0.01, colormap='Reds_r')
-        sc.render('test_scene.png', sigma_clip=6.0)
+        sc.render(sigma_clip=6.0)
+        sc.save('test_scene.png')
 
         nrot = 2 
         for i in range(nrot):
             sc.camera.pitch(2*np.pi/nrot)
-            sc.render('test_rot_%04i.png' % i, sigma_clip=6.0)
+            sc.render(sigma_clip=6.0)
+            sc.save('test_rot_%04i.png' % i)

diff -r e1fc46f03f1ed75600268f50c7da972e67f64e68 -r 7bb3d2f21ee1ca94fbbf1f211a7b57fef714466e yt/visualization/volume_rendering/volume_rendering.py
--- a/yt/visualization/volume_rendering/volume_rendering.py
+++ b/yt/visualization/volume_rendering/volume_rendering.py
@@ -113,5 +113,6 @@
     >>> im, sc = yt.volume_render(ds, fname='test.png', sigma_clip=4.0)
     """
     sc = create_scene(data_source, field=field)
-    im = sc.render(fname=fname, sigma_clip=sigma_clip)
+    im = sc.render(sigma_clip=sigma_clip)
+    sc.save(fname=fname)
     return im, sc


https://bitbucket.org/yt_analysis/yt/commits/b0a86986436e/
Changeset:   b0a86986436e
Branch:      yt
User:        atmyers
Date:        2015-10-19 23:23:57+00:00
Summary:     this allows rendering scenes when only opaque sources are present
Affected #:  1 file

diff -r 7bb3d2f21ee1ca94fbbf1f211a7b57fef714466e -r b0a86986436eac55d35d89428748202c613a6430 yt/visualization/volume_rendering/scene.py
--- a/yt/visualization/volume_rendering/scene.py
+++ b/yt/visualization/volume_rendering/scene.py
@@ -234,7 +234,8 @@
         opaque = ZBuffer(empty, np.ones(empty.shape[:2]) * np.inf)
 
         for k, source in self._iter_opaque_sources():
-            source.render(camera, zbuffer=opaque)
+            opaque = source.render(camera, zbuffer=opaque)
+            im = opaque.rgba
 
         for k, source in self._iter_transparent_sources():
             im = source.render(camera, zbuffer=opaque)


https://bitbucket.org/yt_analysis/yt/commits/bcfb5e76d8da/
Changeset:   bcfb5e76d8da
Branch:      yt
User:        atmyers
Date:        2015-10-19 23:24:09+00:00
Summary:     More robust examples for the render sources
Affected #:  1 file

diff -r b0a86986436eac55d35d89428748202c613a6430 -r bcfb5e76d8daadddc0fc97a51f3a4fd38ee1fc08 yt/visualization/volume_rendering/render_source.py
--- a/yt/visualization/volume_rendering/render_source.py
+++ b/yt/visualization/volume_rendering/render_source.py
@@ -93,6 +93,16 @@
     Examples
     --------
 
+    The easiest way to make a VolumeSource is to use the volume_render
+    function, so that the VolumeSource gets created automatically. This 
+    example shows how to do this and then access the resulting source:
+
+    >>> import yt
+    >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
+    >>> im, sc = yt.volume_render(ds)
+    >>> volume_source = sc.get_source(0)
+
+    You can also create VolumeSource instances by hand and add them to Scenes.
     This example manually creates a VolumeSource, adds it to a scene, sets the
     camera, and renders an image.
 
@@ -105,6 +115,7 @@
     >>> cam = Camera(ds)
     >>> sc.camera = cam
     >>> im = sc.render()
+
     """
 
     _image = None
@@ -417,7 +428,26 @@
 
     Examples
     --------
-    >>> source = PointSource(particle_positions)
+
+    This example creates a volume rendering and adds 1000 random points to
+    the image:
+
+    >>> import yt
+    >>> import numpy as np
+    >>> from yt.visualization.volume_rendering.api import PointSource
+    >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
+    
+    >>> im, sc = yt.volume_render(ds)
+    
+    >>> npoints = 1000
+    >>> vertices = np.random.random([npoints, 3])
+    >>> colors = np.random.random([npoints, 4])
+    >>> colors[:,3] = 1.0
+
+    >>> points = PointSource(vertices, colors=colors)
+    >>> sc.add_source(points)
+
+    >>> im = sc.render()
 
     """
 
@@ -502,8 +532,27 @@
 
     Examples
     --------
-    >>> source = LineSource(np.random.random((10, 3)))
 
+    This example creates a volume rendering and then adds some random lines
+    to the image:
+
+    >>> import yt
+    >>> import numpy as np
+    >>> from yt.visualization.volume_rendering.api import LineSource
+    >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
+    
+    >>> im, sc = yt.volume_render(ds)
+    
+    >>> npoints = 100
+    >>> vertices = np.random.random([npoints, 2, 3])
+    >>> colors = np.random.random([npoints, 4])
+    >>> colors[:,3] = 1.0
+    
+    >>> lines = LineSource(vertices, colors)
+    >>> sc.add_source(lines)
+
+    >>> im = sc.render()
+    
     """
 
     _image = None
@@ -585,7 +634,22 @@
 
     Examples
     --------
-    >>> source = BoxSource(grid_obj.LeftEdge, grid_obj.RightEdge)
+
+    This example shows how to use BoxSource to add an outline of the 
+    domain boundaries to a volume rendering.
+
+    >>> import yt
+    >>> from yt.visualization.volume_rendering.api import BoxSource
+    >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
+    >>>
+    >>> im, sc = yt.volume_render(ds)
+    >>> 
+    >>> box_source = BoxSource(ds.domain_left_edge,
+    >>>                       ds.domain_right_edge,
+    >>>                       [1.0, 1.0, 1.0, 1.0])
+    >>> sc.add_source(box_source)
+    >>> 
+    >>> im = sc.render()
 
     """
     def __init__(self, left_edge, right_edge, color=None):
@@ -626,8 +690,38 @@
 
     Examples
     --------
+
+    This example makes a volume rendering and adds outlines of all the 
+    AMR grids in the simulation:
+
+    >>> import yt
+    >>> from yt.visualization.volume_rendering.api import GridSource
+    >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
+    >>>
+    >>> im, sc = yt.volume_render(ds)
+    >>>
+    >>> grid_source = GridSource(ds.all_data(), alpha=1.0)
+    >>>
+    >>> sc.add_source(grid_source)
+    >>>
+    >>> im = sc.render()
+
+    This example does the same thing, except it only draws the grids
+    that are inside a sphere of radius (0.1, "unitary") located at the
+    domain center:
+
+    >>> import yt
+    >>> from yt.visualization.volume_rendering.api import GridSource
+    >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
+    >>> 
+    >>> im, sc = yt.volume_render(ds)
+    >>> 
     >>> dd = ds.sphere("c", (0.1, "unitary"))
-    >>> source = GridSource(dd, alpha=1.0)
+    >>> grid_source = GridSource(dd, alpha=1.0)
+    >>> 
+    >>> sc.add_source(grid_source)
+    >>>
+    >>> im = sc.render()
 
     """
     def __init__(self, data_source, alpha=0.3, cmap='algae',
@@ -693,7 +787,19 @@
 
     Examples
     --------
-    >>> source = CoordinateVectorSource()
+
+    >>> import yt
+    >>> from yt.visualization.volume_rendering.api import CoordinateVectorSource
+    >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
+    >>>
+    >>> im, sc = yt.volume_render(ds)
+    >>> 
+    >>> coord_source = CoordinateVectorSource()
+    >>> 
+    >>> sc.add_source(coord_source)
+    >>> 
+    >>> im = sc.render()
+
     """
 
     def __init__(self, colors=None, alpha=1.0):


https://bitbucket.org/yt_analysis/yt/commits/dddeb9913d04/
Changeset:   dddeb9913d04
Branch:      yt
User:        atmyers
Date:        2015-10-20 05:03:27+00:00
Summary:     add an assert to catch when positions is the wrong shape, to avoid a confusing error later down the road
Affected #:  1 file

diff -r bcfb5e76d8daadddc0fc97a51f3a4fd38ee1fc08 -r dddeb9913d0427b9a7a55a80a5634a4fb3327eed yt/visualization/volume_rendering/render_source.py
--- a/yt/visualization/volume_rendering/render_source.py
+++ b/yt/visualization/volume_rendering/render_source.py
@@ -456,6 +456,7 @@
     data_source = None
 
     def __init__(self, positions, colors=None, color_stride=1):
+        assert(positions.ndim == 2 and positions.shape[1] == 3)
         self.positions = positions
         # If colors aren't individually set, make black with full opacity
         if colors is None:
@@ -645,8 +646,8 @@
     >>> im, sc = yt.volume_render(ds)
     >>> 
     >>> box_source = BoxSource(ds.domain_left_edge,
-    >>>                       ds.domain_right_edge,
-    >>>                       [1.0, 1.0, 1.0, 1.0])
+    ...                       ds.domain_right_edge,
+    ...                       [1.0, 1.0, 1.0, 1.0])
     >>> sc.add_source(box_source)
     >>> 
     >>> im = sc.render()


https://bitbucket.org/yt_analysis/yt/commits/3f4e2f52fcf9/
Changeset:   3f4e2f52fcf9
Branch:      yt
User:        atmyers
Date:        2015-10-20 05:03:53+00:00
Summary:     better Camera examples
Affected #:  1 file

diff -r dddeb9913d0427b9a7a55a80a5634a4fb3327eed -r 3f4e2f52fcf9322adf7e9ce881135c45b7ef91eb yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -49,8 +49,31 @@
 
     Examples
     --------
+    
+    In this example, the camera is set using defaults that are chosen
+    to be reasonable for the argument Dataset.
+
+    >>> import yt
+    >>> from yt.visualization.volume_rendering.api import Camera
+    >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
     >>> cam = Camera(ds)
 
+    Here, we set the camera properties manually:
+
+    >>> import yt
+    >>> from yt.visualization.volume_rendering.api import Camera
+    >>> cam = Camera()
+    >>> cam.position = np.array([0.5, 0.5, -1.0])
+    >>> cam.focus = np.array([0.5, 0.5, 0.0])
+    >>> cam.north_vector = np.array([1.0, 0.0, 0.0])
+
+    Finally, we create a camera with a non-default lens:
+
+    >>> import yt
+    >>> from yt.visualization.volume_rendering.api import Camera
+    >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
+    >>> cam = Camera(ds, lens_type='perspective')
+
     """
 
     _moved = True


https://bitbucket.org/yt_analysis/yt/commits/2143f1d065e9/
Changeset:   2143f1d065e9
Branch:      yt
User:        atmyers
Date:        2015-10-20 05:04:12+00:00
Summary:     Better scene examples
Affected #:  1 file

diff -r 3f4e2f52fcf9322adf7e9ce881135c45b7ef91eb -r 2143f1d065e97e356169fff7c208dc8a4266fb79 yt/visualization/volume_rendering/scene.py
--- a/yt/visualization/volume_rendering/scene.py
+++ b/yt/visualization/volume_rendering/scene.py
@@ -41,7 +41,31 @@
 
     Examples
     --------
+
+    This example shows how to create an empty scene and add a VolumeSource
+    and a Camera.
+
+    >>> import yt
+    >>> from yt.visualization.volume_rendering.api import Scene, VolumeSource, Camera
+    >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
     >>> sc = Scene()
+    >>> source = VolumeSource(ds.all_data(), 'density')
+    >>> sc.add_source(source)
+    >>> cam = Camera(ds)
+    >>> sc.camera = cam
+    >>> im = sc.render()
+
+    Alternatively, you can use the create_scene function to set up defaults 
+    and then modify the Scene later:
+
+    >>> import yt
+    >>> import numpy as np
+    >>> from yt.visualization.volume_rendering.api import PointSource
+    >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
+    >>> 
+    >>> sc = yt.create_scene(ds)
+    >>> # Modify camera, sources, etc...
+    >>> im = sc.render()
 
     """
 
@@ -125,7 +149,7 @@
         --------
         >>> sc = Scene()
         >>> # Add sources/camera/etc
-        >>> im = sc.render(sigma_clip=4)
+        >>> im = sc.render(sigma_clip=4.0)
         >>> sc.save()
 
         """
@@ -225,7 +249,7 @@
         --------
         >>> sc = Scene()
         >>> # Add sources/camera/etc
-        >>> im = sc.composite(')
+        >>> im = sc.composite()
 
         """
         if camera is None:


https://bitbucket.org/yt_analysis/yt/commits/a063fbc52c2a/
Changeset:   a063fbc52c2a
Branch:      yt
User:        atmyers
Date:        2015-10-20 05:22:31+00:00
Summary:     adding some more error-checking
Affected #:  1 file

diff -r 2143f1d065e97e356169fff7c208dc8a4266fb79 -r a063fbc52c2a2d3c6d62fb8c0916db4bd04a16ae yt/visualization/volume_rendering/render_source.py
--- a/yt/visualization/volume_rendering/render_source.py
+++ b/yt/visualization/volume_rendering/render_source.py
@@ -446,7 +446,7 @@
 
     >>> points = PointSource(vertices, colors=colors)
     >>> sc.add_source(points)
-
+    
     >>> im = sc.render()
 
     """
@@ -457,6 +457,9 @@
 
     def __init__(self, positions, colors=None, color_stride=1):
         assert(positions.ndim == 2 and positions.shape[1] == 3)
+        if colors is not None:
+            assert(colors.ndim == 2 and colors.shape[1] == 4)
+            assert(colors.shape[0] == positions.shape[0]) 
         self.positions = positions
         # If colors aren't individually set, make black with full opacity
         if colors is None:
@@ -562,8 +565,12 @@
     def __init__(self, positions, colors=None, color_stride=1):
         super(LineSource, self).__init__()
 
+        assert(positions.ndim == 3)
         assert(positions.shape[1] == 2)
         assert(positions.shape[2] == 3)
+        if colors is not None:
+            assert(colors.ndim == 2)
+            assert(colors.shape[1] == 4)
 
         # convert the positions to the shape expected by zlines, below
         N = positions.shape[0]
@@ -654,8 +661,13 @@
 
     """
     def __init__(self, left_edge, right_edge, color=None):
+
+        assert(left_edge.shape == (3,))
+        assert(right_edge.shape == (3,))
+        
         if color is None:
             color = np.array([1.0, 1.0, 1.0, 1.0])
+
         color = ensure_numpy_array(color)
         color.shape = (1, 4)
         corners = get_corners(left_edge.copy(), right_edge.copy())


https://bitbucket.org/yt_analysis/yt/commits/f49ca6e334ef/
Changeset:   f49ca6e334ef
Branch:      yt
User:        atmyers
Date:        2015-10-20 06:13:18+00:00
Summary:     full working examples for the camera class
Affected #:  1 file

diff -r a063fbc52c2a2d3c6d62fb8c0916db4bd04a16ae -r f49ca6e334eff1b7b519fa476ee647e1bcb90068 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -356,7 +356,19 @@
         Examples
         --------
 
-        >>> cam.rotate(np.pi/4)
+        >>> import yt
+        >>> import numpy as np
+        >>> from yt.visualization.volume_rendering.api import \
+        ...     Scene, \
+        ...     PointSource, \
+        ...     Camera
+        >>> cam = Camera()
+        >>> # rotate the camera by pi / 4 radians:
+        >>> cam.rotate(np.pi/4.0)  
+        >>> # rotate the camera about the y-axis instead of cam.north_vector:
+        >>> cam.rotate(np.pi/4.0, np.array([0.0, 1.0, 0.0]))  
+        >>> # rotate the camera about the origin instead of its own position:
+        >>> cam.rotate(np.pi/4.0, rot_center=np.array([0.0, 0.0, 0.0]))  
 
         """
         rotate_all = rot_vector is not None
@@ -400,8 +412,18 @@
         Examples
         --------
 
+        >>> import yt
+        >>> import numpy as np
+        >>> from yt.visualization.volume_rendering.api import \
+        ...     Scene, \
+        ...     PointSource, \
+        ...     Camera
         >>> cam = Camera()
-        >>> cam.pitch(np.pi/4)
+        >>> # pitch the camera by pi / 4 radians:
+        >>> cam.pitch(np.pi/4.0)  
+        >>> # pitch the camera about the origin instead of its own position:
+        >>> cam.pitch(np.pi/4.0, rot_center=np.array([0.0, 0.0, 0.0]))
+
         """
         self.rotate(theta, rot_vector=self.unit_vectors[0], rot_center=rot_center)
 
@@ -420,8 +442,18 @@
         Examples
         --------
 
+        >>> import yt
+        >>> import numpy as np
+        >>> from yt.visualization.volume_rendering.api import \
+        ...     Scene, \
+        ...     PointSource, \
+        ...     Camera
         >>> cam = Camera()
-        >>> cam.yaw(np.pi/4)
+        >>> # yaw the camera by pi / 4 radians:
+        >>> cam.yaw(np.pi/4.0)  
+        >>> # yaw the camera about the origin instead of its own position:
+        >>> cam.yaw(np.pi/4.0, rot_center=np.array([0.0, 0.0, 0.0]))
+
         """
         self.rotate(theta, rot_vector=self.unit_vectors[1], rot_center=rot_center)
 
@@ -440,8 +472,18 @@
         Examples
         --------
 
+        >>> import yt
+        >>> import numpy as np
+        >>> from yt.visualization.volume_rendering.api import \
+        ...     Scene, \
+        ...     PointSource, \
+        ...     Camera
         >>> cam = Camera()
-        >>> cam.roll(np.pi/4)
+        >>> # roll the camera by pi / 4 radians:
+        >>> cam.roll(np.pi/4.0)  
+        >>> # roll the camera about the origin instead of its own position:
+        >>> cam.roll(np.pi/4.0, rot_center=np.array([0.0, 0.0, 0.0]))
+
         """
         self.rotate(theta, rot_vector=self.unit_vectors[2], rot_center=rot_center)
 
@@ -469,9 +511,16 @@
         Examples
         --------
 
+        >>> import yt
+        >>> import numpy as np
+        >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
+        >>> 
+        >>> im, sc = yt.volume_render(ds)
+        >>> cam = sc.camera
         >>> for i in cam.iter_rotate(np.pi, 10):
-        ...     im = sc.render()
-        ...     sc.save('rotation_%04i.png' % i)
+        ... im = sc.render()
+        ... sc.save('rotation_%04i.png' % i)
+
         """
 
         dtheta = (1.0*theta)/n_steps
@@ -493,14 +542,22 @@
             The number of snapshots to make.
         exponential : boolean
             Specifies whether the move/zoom transition follows an
-            exponential path toward the destination or linear
+            exponential path toward the destination or linear.
+            Default is False.
 
         Examples
         --------
 
-        >>> for i in cam.iter_move([0.2,0.3,0.6], 10):
+        >>> import yt
+        >>> import numpy as np
+        >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
+        >>> final_position = ds.arr([0.2, 0.3, 0.6], 'unitary')
+        >>> im, sc = yt.volume_render(ds)
+        >>> cam = sc.camera
+        >>> for i in cam.iter_move(final_position, 10):
         ...     sc.render()
         ...     sc.save("move_%04i.png" % i)
+
         """
         assert isinstance(final, YTArray)
         if exponential:
@@ -527,7 +584,17 @@
         factor : float
             The factor by which to reduce the distance to the focal point.
 
+        Examples
+        --------
+
+        >>> import yt
+        >>> from yt.visualization.volume_rendering.api import Camera
+        >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
+        >>> cam = Camera(ds)
+        >>> cam.zoom(1.1)
+
         """
+
         self.set_width(self.width / factor)
 
     def iter_zoom(self, final, n_steps):
@@ -547,9 +614,15 @@
         Examples
         --------
 
+        >>> import yt
+        >>> from yt.visualization.volume_rendering.api import Camera
+        >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
+        >>> im, sc = yt.volume_render(ds)
+        >>> cam = sc.camera
         >>> for i in cam.iter_zoom(100.0, 10):
         ...     sc.render()
         ...     sc.save("zoom_%04i.png" % i)
+
         """
         f = final**(1.0/n_steps)
         for i in xrange(n_steps):


https://bitbucket.org/yt_analysis/yt/commits/f8ad7aa31b5f/
Changeset:   f8ad7aa31b5f
Branch:      yt
User:        atmyers
Date:        2015-10-20 06:24:06+00:00
Summary:     full working examples for Scene
Affected #:  1 file

diff -r f49ca6e334eff1b7b519fa476ee647e1bcb90068 -r f8ad7aa31b5f6eddf27c5092319cd1d0191fe1ed yt/visualization/volume_rendering/scene.py
--- a/yt/visualization/volume_rendering/scene.py
+++ b/yt/visualization/volume_rendering/scene.py
@@ -147,8 +147,14 @@
 
         Examples
         --------
-        >>> sc = Scene()
-        >>> # Add sources/camera/etc
+
+        >>> import yt
+        >>> import numpy as np
+        >>> from yt.visualization.volume_rendering.api import PointSource
+        >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
+        >>>
+        >>> sc = yt.create_scene(ds)
+        >>> # Modify camera, sources, etc...
         >>> im = sc.render(sigma_clip=4.0)
         >>> sc.save()
 
@@ -183,14 +189,26 @@
 
         Examples
         --------
+
+        >>> import yt
+        >>> import numpy as np
+        >>> from yt.visualization.volume_rendering.api import PointSource
+        >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
+        >>>
         >>> sc = yt.create_scene(ds)
-        >>> # Add sources/camera/etc
+        >>> # Modify camera, sources, etc...
         >>> sc.render()
         >>> sc.save('test.png')
 
-        # Or alternatively
+        Or alternatively:
+
+        >>> import yt
+        >>> import numpy as np
+        >>> from yt.visualization.volume_rendering.api import PointSource
+        >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
+        >>>
         >>> sc = yt.create_scene(ds)
-        >>> # Add sources/camera/etc
+        >>> # Modify camera, sources, etc...
         >>> sc.save('test.png')
 
         """
@@ -247,8 +265,14 @@
 
         Examples
         --------
-        >>> sc = Scene()
-        >>> # Add sources/camera/etc
+
+        >>> import yt
+        >>> import numpy as np
+        >>> from yt.visualization.volume_rendering.api import PointSource
+        >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
+        >>>
+        >>> sc = yt.create_scene(ds)
+        >>> # Modify camera, sources, etc...
         >>> im = sc.composite()
 
         """
@@ -320,6 +344,18 @@
             simulation being rendered. Used to get the domain bounds.
 
 
+        Examples
+        --------
+
+        >>> import yt
+        >>> import numpy as np
+        >>> from yt.visualization.volume_rendering.api import PointSource
+        >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
+        >>>
+        >>> sc = yt.create_scene(ds)
+        >>> sc.annotate_domain(ds)
+        >>> im = sc.render()
+
         """
         box_source = BoxSource(ds.domain_left_edge,
                                ds.domain_right_edge,
@@ -329,6 +365,40 @@
 
     def annotate_grids(self, data_source, alpha=0.3, cmap='algae',
                        min_level=None, max_level=None):
+        r"""
+
+        Modifies this scene by drawing the edges of the AMR grids.
+        This adds a new BoxSource to the scene for each AMR grid 
+        and returns the resulting Scene object.
+
+        Parameters
+        ----------
+
+        data_source: :class:`~yt.data_objects.api.DataContainer`
+            The data container that will be used to identify grids to draw.
+        alpha : float
+            The opacity of the grids to draw.
+        cmap : color map name
+            The color map to use to map resolution levels to color.
+        min_level : int, optional
+            Minimum level to draw
+        max_level : int, optional
+            Maximum level to draw
+
+
+        Examples
+        --------
+
+        >>> import yt
+        >>> import numpy as np
+        >>> from yt.visualization.volume_rendering.api import PointSource
+        >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
+        >>>
+        >>> sc = yt.create_scene(ds)
+        >>> sc.annotate_grids(ds.all_data())
+        >>> im = sc.render()
+
+        """
         grids = GridSource(data_source, alpha=alpha, cmap=cmap,
                             min_level=min_level, max_level=max_level)
         self.add_source(grids)
@@ -348,6 +418,18 @@
         alpha : float, optional
             The opacity of the vectors.
 
+        Examples
+        --------
+
+        >>> import yt
+        >>> import numpy as np
+        >>> from yt.visualization.volume_rendering.api import PointSource
+        >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
+        >>>
+        >>> sc = yt.create_scene(ds)
+        >>> sc.annotate_axes(alpha=0.5)
+        >>> im = sc.render()
+
         """
         coords = CoordinateVectorSource(colors, alpha)
         self.add_source(coords)


https://bitbucket.org/yt_analysis/yt/commits/0dd4bfd07da7/
Changeset:   0dd4bfd07da7
Branch:      yt
User:        atmyers
Date:        2015-10-20 16:46:42+00:00
Summary:     removing a couple of un-unsed imports from the docstring examples
Affected #:  1 file

diff -r f8ad7aa31b5f6eddf27c5092319cd1d0191fe1ed -r 0dd4bfd07da7da970fa18dfe23522ce09b238345 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -358,10 +358,7 @@
 
         >>> import yt
         >>> import numpy as np
-        >>> from yt.visualization.volume_rendering.api import \
-        ...     Scene, \
-        ...     PointSource, \
-        ...     Camera
+        >>> from yt.visualization.volume_rendering.api import Camera
         >>> cam = Camera()
         >>> # rotate the camera by pi / 4 radians:
         >>> cam.rotate(np.pi/4.0)  
@@ -414,10 +411,7 @@
 
         >>> import yt
         >>> import numpy as np
-        >>> from yt.visualization.volume_rendering.api import \
-        ...     Scene, \
-        ...     PointSource, \
-        ...     Camera
+        >>> from yt.visualization.volume_rendering.api import Camera
         >>> cam = Camera()
         >>> # pitch the camera by pi / 4 radians:
         >>> cam.pitch(np.pi/4.0)  
@@ -444,10 +438,7 @@
 
         >>> import yt
         >>> import numpy as np
-        >>> from yt.visualization.volume_rendering.api import \
-        ...     Scene, \
-        ...     PointSource, \
-        ...     Camera
+        >>> from yt.visualization.volume_rendering.api import Camera
         >>> cam = Camera()
         >>> # yaw the camera by pi / 4 radians:
         >>> cam.yaw(np.pi/4.0)  
@@ -474,10 +465,7 @@
 
         >>> import yt
         >>> import numpy as np
-        >>> from yt.visualization.volume_rendering.api import \
-        ...     Scene, \
-        ...     PointSource, \
-        ...     Camera
+        >>> from yt.visualization.volume_rendering.api import Camera
         >>> cam = Camera()
         >>> # roll the camera by pi / 4 radians:
         >>> cam.roll(np.pi/4.0)  


https://bitbucket.org/yt_analysis/yt/commits/bc0a3aa1b608/
Changeset:   bc0a3aa1b608
Branch:      yt
User:        atmyers
Date:        2015-10-20 21:33:10+00:00
Summary:     fixing a problem I introduced in scene.composite()
Affected #:  1 file

diff -r 0dd4bfd07da7da970fa18dfe23522ce09b238345 -r bc0a3aa1b608b16eeff7874ecdc6d75fb6cdd610 yt/visualization/volume_rendering/scene.py
--- a/yt/visualization/volume_rendering/scene.py
+++ b/yt/visualization/volume_rendering/scene.py
@@ -282,8 +282,8 @@
         opaque = ZBuffer(empty, np.ones(empty.shape[:2]) * np.inf)
 
         for k, source in self._iter_opaque_sources():
-            opaque = source.render(camera, zbuffer=opaque)
-            im = opaque.rgba
+            source.render(camera, zbuffer=opaque)
+            im = source.zbuffer.rgba
 
         for k, source in self._iter_transparent_sources():
             im = source.render(camera, zbuffer=opaque)


https://bitbucket.org/yt_analysis/yt/commits/48577b8fe920/
Changeset:   48577b8fe920
Branch:      yt
User:        atmyers
Date:        2015-10-21 16:05:53+00:00
Summary:     fixing a docstring indentation issue
Affected #:  1 file

diff -r bc0a3aa1b608b16eeff7874ecdc6d75fb6cdd610 -r 48577b8fe920eafe3eaa2e2a4ab69eef13387f26 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -506,8 +506,8 @@
         >>> im, sc = yt.volume_render(ds)
         >>> cam = sc.camera
         >>> for i in cam.iter_rotate(np.pi, 10):
-        ... im = sc.render()
-        ... sc.save('rotation_%04i.png' % i)
+        ...     im = sc.render()
+        ...     sc.save('rotation_%04i.png' % i)
 
         """
 


https://bitbucket.org/yt_analysis/yt/commits/e0ba8789d21a/
Changeset:   e0ba8789d21a
Branch:      yt
User:        ngoldbaum
Date:        2015-10-21 19:47:59+00:00
Summary:     merging, fixing conflicts
Affected #:  17 files

diff -r 48577b8fe920eafe3eaa2e2a4ab69eef13387f26 -r e0ba8789d21ad41eb928ce34296ae4677e8d4a33 doc/source/cookbook/amrkdtree_downsampling.py
--- a/doc/source/cookbook/amrkdtree_downsampling.py
+++ b/doc/source/cookbook/amrkdtree_downsampling.py
@@ -40,7 +40,7 @@
 render_source.set_volume(kd_low_res)
 render_source.set_fields('density')
 sc.render()
-sc.save("v1.png")
+sc.save("v1.png", sigma_clip=6.0)
 
 # This operation was substantiall faster.  Now lets modify the low resolution
 # rendering until we find something we like.
@@ -49,14 +49,14 @@
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds=[-27.5, -25.5],
               alpha=np.ones(4, dtype='float64'), colormap='RdBu_r')
-sc.render(sigma_clip=6.0)
-sc.save("v2.png")
+sc.render()
+sc.save("v2.png", sigma_clip=6.0)
 
 # This looks better.  Now let's try turning on opacity.
 
 tf.grey_opacity = True
-sc.render(sigma_clip=6.0)
-sc.save("v3.png")
+sc.render()
+sc.save("v3.png", sigma_clip=6.0)
 #
 ## That seemed to pick out som interesting structures.  Now let's bump up the
 ## opacity.
@@ -64,13 +64,13 @@
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds=[-27.5, -25.5],
               alpha=10.0 * np.ones(4, dtype='float64'), colormap='RdBu_r')
-sc.render(sigma_clip=6.0)
-sc.save("v4.png")
+sc.render()
+sc.save("v4.png", sigma_clip=6.0)
 #
 ## This looks pretty good, now lets go back to the full resolution AMRKDTree
 #
 render_source.set_volume(kd)
-sc.render(sigma_clip=6.0)
-sc.render("v5.png")
+sc.render()
+sc.save("v5.png", sigma_clip=6.0)
 
 # This looks great!

diff -r 48577b8fe920eafe3eaa2e2a4ab69eef13387f26 -r e0ba8789d21ad41eb928ce34296ae4677e8d4a33 doc/source/cookbook/camera_movement.py
--- a/doc/source/cookbook/camera_movement.py
+++ b/doc/source/cookbook/camera_movement.py
@@ -14,18 +14,18 @@
 frame = 0
 # Move to the maximum density location over 5 frames
 for _ in cam.iter_move(max_c, 5):
-    sc.render(sigma_clip=8.0)
-    sc.save('camera_movement_%04i.png' % frame)
+    sc.render()
+    sc.save('camera_movement_%04i.png' % frame, sigma_clip=8.0)
     frame += 1
 
 # Zoom in by a factor of 10 over 5 frames
 for _ in cam.iter_zoom(10.0, 5):
-    sc.render(sigma_clip=8.0)
-    sc.save('camera_movement_%04i.png' % frame)
+    sc.render()
+    sc.save('camera_movement_%04i.png' % frame, sigma_clip=8.0)
     frame += 1
 
 # Do a rotation over 5 frames
 for _ in cam.iter_rotate(np.pi, 5):
-    sc.render(sigma_clip=8.0)
-    sc.save('camera_movement_%04i.png' % frame)
+    sc.render()
+    sc.save('camera_movement_%04i.png' % frame, sigma_clip=8.0)
     frame += 1

diff -r 48577b8fe920eafe3eaa2e2a4ab69eef13387f26 -r e0ba8789d21ad41eb928ce34296ae4677e8d4a33 doc/source/cookbook/complex_plots.rst
--- a/doc/source/cookbook/complex_plots.rst
+++ b/doc/source/cookbook/complex_plots.rst
@@ -218,6 +218,17 @@
 
 .. yt_cookbook:: custom_transfer_function_volume_rendering.py
 
+.. _cookbook-sigma_clip:
+
+Volume Rendering with Sigma Clipping
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+In this recipe we output several images with different values of sigma_clip
+set in order to change the contrast of the resulting image.  See 
+:ref:`sigma_clip` for more information.
+
+.. yt_cookbook:: sigma_clip.py
+
 Zooming into an Image
 ~~~~~~~~~~~~~~~~~~~~~
 

diff -r 48577b8fe920eafe3eaa2e2a4ab69eef13387f26 -r e0ba8789d21ad41eb928ce34296ae4677e8d4a33 doc/source/cookbook/custom_camera_volume_rendering.py
--- a/doc/source/cookbook/custom_camera_volume_rendering.py
+++ b/doc/source/cookbook/custom_camera_volume_rendering.py
@@ -18,5 +18,5 @@
 
 # save to disk with a custom filename and apply sigma clipping to eliminate
 # very bright pixels, producing an image with better contrast.
-sc.render(sigma_clip=4)
-sc.save('custom.png')
+sc.render()
+sc.save('custom.png', sigma_clip=4)

diff -r 48577b8fe920eafe3eaa2e2a4ab69eef13387f26 -r e0ba8789d21ad41eb928ce34296ae4677e8d4a33 doc/source/cookbook/opaque_rendering.py
--- a/doc/source/cookbook/opaque_rendering.py
+++ b/doc/source/cookbook/opaque_rendering.py
@@ -12,8 +12,8 @@
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
         alpha=np.logspace(-3,0,4), colormap = 'RdBu_r')
-sc.render(sigma_clip=6.0)
-sc.save("v1.png")
+sc.render()
+sc.save("v1.png", sigma_clip=6.0)
 
 # In this case, the default alphas used (np.logspace(-3,0,Nbins)) does not
 # accentuate the outer regions of the galaxy. Let's start by bringing up the
@@ -23,31 +23,31 @@
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
         alpha=np.logspace(0,0,4), colormap = 'RdBu_r')
-sc.render(sigma_clip=6.0)
-sc.save("v2.png")
+sc.render()
+sc.save("v2.png", sigma_clip=6.0)
 
 # Now let's set the grey_opacity to True.  This should make the inner portions
 # start to be obcured
 
 tf.grey_opacity = True
-sc.render(sigma_clip=6.0)
-sc.save("v3.png")
+sc.render()
+sc.save("v3.png", sigma_clip=6.0)
 
 # That looks pretty good, but let's start bumping up the opacity.
 
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
         alpha=10.0*np.ones(4,dtype='float64'), colormap = 'RdBu_r')
-sc.render(sigma_clip=6.0)
-sc.save("v4.png")
+sc.render()
+sc.save("v4.png", sigma_clip=6.0)
 
 # Let's bump up again to see if we can obscure the inner contour.
 
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
         alpha=30.0*np.ones(4,dtype='float64'), colormap = 'RdBu_r')
-sc.render(sigma_clip=6.0)
-sc.save("v5.png")
+sc.render()
+sc.save("v5.png", sigma_clip=6.0)
 
 # Now we are losing sight of everything.  Let's see if we can obscure the next
 # layer
@@ -55,15 +55,15 @@
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
         alpha=100.0*np.ones(4,dtype='float64'), colormap = 'RdBu_r')
-sc.render(sigma_clip=6.0)
-sc.save("v6.png")
+sc.render()
+sc.save("v6.png", sigma_clip=6.0)
 
 # That is very opaque!  Now lets go back and see what it would look like with
 # grey_opacity = False
 
 tf.grey_opacity=False
-sc.render(sigma_clip=6.0)
-sc.save("v7.png")
+sc.render()
+sc.save("v7.png", sigma_clip=6.0)
 
 # That looks pretty different, but the main thing is that you can see that the
 # inner contours are somewhat visible again.  

diff -r 48577b8fe920eafe3eaa2e2a4ab69eef13387f26 -r e0ba8789d21ad41eb928ce34296ae4677e8d4a33 doc/source/cookbook/sigma_clip.py
--- /dev/null
+++ b/doc/source/cookbook/sigma_clip.py
@@ -0,0 +1,17 @@
+import yt
+
+# Load the dataset.
+ds = yt.load("enzo_tiny_cosmology/RD0009/RD0009")
+
+# Create a volume rendering, which will determine data bounds, use the first
+# acceptable field in the field_list, and set up a default transfer function.
+
+# Render and save output images with different levels of sigma clipping.
+# Sigma clipping removes the highest intensity pixels in a volume render, 
+# which affects the overall contrast of the image.
+sc = yt.create_scene(ds, field=('gas', 'density'))
+sc.render()
+sc.save('clip_0.png')
+sc.save('clip_2.png', sigma_clip=2)
+sc.save('clip_4.png', sigma_clip=4)
+sc.save('clip_6.png', sigma_clip=6)

diff -r 48577b8fe920eafe3eaa2e2a4ab69eef13387f26 -r e0ba8789d21ad41eb928ce34296ae4677e8d4a33 doc/source/cookbook/simple_volume_rendering.py
--- a/doc/source/cookbook/simple_volume_rendering.py
+++ b/doc/source/cookbook/simple_volume_rendering.py
@@ -6,5 +6,5 @@
 # Create a volume rendering, which will determine data bounds, use the first
 # acceptable field in the field_list, and set up a default transfer function.
 
-# This will save a file named 'data0043_density_volume_rendered.png' to disk.
+# This will save a file named 'data0043_Render_density.png' to disk.
 im, sc = yt.volume_render(ds, field=('gas', 'density'))

diff -r 48577b8fe920eafe3eaa2e2a4ab69eef13387f26 -r e0ba8789d21ad41eb928ce34296ae4677e8d4a33 doc/source/cookbook/various_lens.py
--- a/doc/source/cookbook/various_lens.py
+++ b/doc/source/cookbook/various_lens.py
@@ -34,8 +34,8 @@
 cam.set_width(ds.domain_width * 0.5)
 sc.camera = cam
 sc.add_source(vol)
-sc.render(sigma_clip=6.0)
-sc.save('lens_plane-parallel.png')
+sc.render()
+sc.save('lens_plane-parallel.png', sigma_clip=6.0)
 
 # Perspective lens
 cam = Camera(ds, lens_type='perspective')
@@ -51,8 +51,8 @@
 cam.set_width(ds.domain_width * 0.5)
 sc.camera = cam
 sc.add_source(vol)
-sc.render(sigma_clip=6.0)
-sc.save('lens_perspective.png')
+sc.render()
+sc.save('lens_perspective.png', sigma_clip=6.0)
 
 # Stereo-perspective lens
 cam = Camera(ds, lens_type='stereo-perspective')
@@ -67,8 +67,8 @@
 cam.lens.disparity = ds.domain_width[0] * 1.e-3
 sc.camera = cam
 sc.add_source(vol)
-sc.render(sigma_clip=6.0)
-sc.save('lens_stereo-perspective.png')
+sc.render()
+sc.save('lens_stereo-perspective.png', sigma_clip=6.0)
 
 # Fisheye lens
 dd = ds.sphere(ds.domain_center, ds.domain_width[0] / 10)
@@ -82,8 +82,8 @@
 cam.lens.fov = 360.0
 sc.camera = cam
 sc.add_source(vol)
-sc.render(sigma_clip=6.0)
-sc.save('lens_fisheye.png')
+sc.render()
+sc.save('lens_fisheye.png', sigma_clip=6.0)
 
 # Spherical lens
 cam = Camera(ds, lens_type='spherical')
@@ -100,8 +100,8 @@
 cam.set_width(ds.domain_width * 0.5)
 sc.camera = cam
 sc.add_source(vol)
-sc.render(sigma_clip=6.0)
-sc.save('lens_spherical.png')
+sc.render()
+sc.save('lens_spherical.png', sigma_clip=6.0)
 
 # Stereo-spherical lens
 cam = Camera(ds, lens_type='stereo-spherical')
@@ -116,5 +116,5 @@
 cam.lens.disparity = ds.domain_width[0] * 1.e-3
 sc.camera = cam
 sc.add_source(vol)
-sc.render(sigma_clip=6.0)
-sc.save('lens_stereo-spherical.png')
+sc.render()
+sc.save('lens_stereo-spherical.png', sigma_clip=6.0)

diff -r 48577b8fe920eafe3eaa2e2a4ab69eef13387f26 -r e0ba8789d21ad41eb928ce34296ae4677e8d4a33 doc/source/visualizing/volume_rendering.rst
--- a/doc/source/visualizing/volume_rendering.rst
+++ b/doc/source/visualizing/volume_rendering.rst
@@ -167,20 +167,21 @@
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 If your images appear to be too dark, you can try using the ``sigma_clip``
-keyword in the :meth:`~yt.visualization.volume_rendering.scene.Scene.render` 
-or :func:`~yt.visualization.volume_rendering.volume_rendering.volume_render` functions.  
-Because the brightness range in an image is scaled to match the range of 
-emissivity values of underlying rendering, if you have a few really 
+keyword in the :meth:`~yt.visualization.volume_rendering.scene.Scene.save` 
+or :func:`~yt.visualization.volume_rendering.volume_rendering.volume_render` 
+functions.  Because the brightness range in an image is scaled to match the 
+range of emissivity values of underlying rendering, if you have a few really 
 high-emissivity points, they will scale the rest of your image to be quite 
 dark.  ``sigma_clip = N`` can address this by removing values that are more
 than ``N`` standard deviations brighter than the mean of your image.  
 Typically, a choice of 4 to 6 will help dramatically with your resulting image.
+See the cookbook recipe :ref:`cookbook-sigma_clip` for a demonstration.
 
 .. python-script::
 
   sc = yt.create_scene(ds, 'density')
-  sc.render(sigma_clip=4)
-  sc.save()
+  sc.render()
+  sc.save(sigma_clip=4)
 
 .. _transfer_functions:
 

diff -r 48577b8fe920eafe3eaa2e2a4ab69eef13387f26 -r e0ba8789d21ad41eb928ce34296ae4677e8d4a33 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -678,8 +678,8 @@
     def run(self):
         tmpfd, tmpname = tempfile.mkstemp(suffix='.png')
         os.close(tmpfd)
-        self.scene.render(sigma_clip=1.0)
-        self.scene.save(tmpname)
+        self.scene.render()
+        self.scene.save(tmpname, sigma_clip=1.0)
         image = mpimg.imread(tmpname)
         os.remove(tmpname)
         return [zlib.compress(image.dumps())]

diff -r 48577b8fe920eafe3eaa2e2a4ab69eef13387f26 -r e0ba8789d21ad41eb928ce34296ae4677e8d4a33 yt/visualization/volume_rendering/scene.py
--- a/yt/visualization/volume_rendering/scene.py
+++ b/yt/visualization/volume_rendering/scene.py
@@ -124,7 +124,7 @@
 
         return self
 
-    def render(self, sigma_clip=None, camera=None):
+    def render(self, camera=None):
         r"""Render all sources in the Scene.
 
         Use the current state of the Scene object to render all sources
@@ -133,10 +133,6 @@
 
         Parameters
         ----------
-        sigma_clip: float, optional
-            Image will be clipped before saving to the standard deviation
-            of the image multiplied by this value.  Useful for enhancing
-            images. Default: None
         camera: :class:`Camera`, optional
             If specified, use a different :class:`Camera` to render the scene.
 
@@ -155,8 +151,8 @@
         >>>
         >>> sc = yt.create_scene(ds)
         >>> # Modify camera, sources, etc...
-        >>> im = sc.render(sigma_clip=4.0)
-        >>> sc.save()
+        >>> im = sc.render()
+        >>> sc.save(sigma_clip=4.0)
 
         """
         mylog.info("Rendering scene (Can take a while).")
@@ -168,7 +164,7 @@
         self.last_render = bmp
         return bmp
 
-    def save(self, fname=None):
+    def save(self, fname=None, sigma_clip=None):
         r"""Saves the most recently rendered image of the Scene to disk.
 
         Once you have created a scene and rendered that scene to an image 
@@ -182,6 +178,13 @@
             If specified, save the rendering as a bitmap to the file "fname".
             If unspecified, it creates a default based on the dataset filename.
             Default: None
+        sigma_clip: float, optional
+            Image values greater than this number times the standard deviation
+            plus the mean of the image will be clipped before saving. Useful 
+            for enhancing images as it gets rid of rare high pixel values. 
+            Default: None
+
+            floor(vals > std_dev*sigma_clip + mean)
 
         Returns
         -------
@@ -198,7 +201,7 @@
         >>> sc = yt.create_scene(ds)
         >>> # Modify camera, sources, etc...
         >>> sc.render()
-        >>> sc.save('test.png')
+        >>> sc.save('test.png', sigma_clip=4)
 
         Or alternatively:
 
@@ -208,8 +211,10 @@
         >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
         >>>
         >>> sc = yt.create_scene(ds)
-        >>> # Modify camera, sources, etc...
-        >>> sc.save('test.png')
+        >>> # save with different sigma clipping values
+        >>> sc.save('raw.png')
+        >>> sc.save('clipped_2.png', sigma_clip=2)
+        >>> sc.save('clipped_4.png', sigma_clip=4)
 
         """
         if fname is None:
@@ -236,7 +241,7 @@
             self.render()
 
         mylog.info("Saving render %s", fname)
-        self.last_render.write_png(fname)
+        self.last_render.write_png(fname, sigma_clip=sigma_clip)
  
     def _validate(self):
         r"""Validate the current state of the scene."""

diff -r 48577b8fe920eafe3eaa2e2a4ab69eef13387f26 -r e0ba8789d21ad41eb928ce34296ae4677e8d4a33 yt/visualization/volume_rendering/tests/rotation_volume_rendering.py
--- a/yt/visualization/volume_rendering/tests/rotation_volume_rendering.py
+++ b/yt/visualization/volume_rendering/tests/rotation_volume_rendering.py
@@ -21,5 +21,5 @@
 frames = 10
 for i in range(frames):
     sc.camera.yaw(angle/frames)
-    sc.render(sigma_clip=6.0)
-    sc.save('test_rot_%04i.png' % i)
+    sc.render()
+    sc.save('test_rot_%04i.png' % i, sigma_clip=6.0)

diff -r 48577b8fe920eafe3eaa2e2a4ab69eef13387f26 -r e0ba8789d21ad41eb928ce34296ae4677e8d4a33 yt/visualization/volume_rendering/tests/test_lenses.py
--- a/yt/visualization/volume_rendering/tests/test_lenses.py
+++ b/yt/visualization/volume_rendering/tests/test_lenses.py
@@ -55,8 +55,8 @@
         tf.grey_opacity = True
         sc.camera = cam
         sc.add_source(vol)
-        sc.render(sigma_clip=6.0)
-        sc.save('test_perspective_%s.png' % self.field[1])
+        sc.render()
+        sc.save('test_perspective_%s.png' % self.field[1], sigma_clip=6.0)
 
     def test_stereoperspective_lens(self):
         sc = Scene()
@@ -68,8 +68,8 @@
         tf.grey_opacity = True
         sc.camera = cam
         sc.add_source(vol)
-        sc.render(sigma_clip=6.0)
-        sc.save('test_stereoperspective_%s.png' % self.field[1])
+        sc.render()
+        sc.save('test_stereoperspective_%s.png' % self.field[1], sigma_clip=6.0)
 
     def test_fisheye_lens(self):
         dd = self.ds.sphere(self.ds.domain_center,
@@ -86,8 +86,8 @@
         tf.grey_opacity = True
         sc.camera = cam
         sc.add_source(vol)
-        sc.render(sigma_clip=6.0)
-        sc.save('test_fisheye_%s.png' % self.field[1])
+        sc.render()
+        sc.save('test_fisheye_%s.png' % self.field[1], sigma_clip=6.0)
 
     def test_plane_lens(self):
         dd = self.ds.sphere(self.ds.domain_center,
@@ -102,8 +102,8 @@
         tf.grey_opacity = True
         sc.camera = cam
         sc.add_source(vol)
-        sc.render(sigma_clip=6.0)
-        sc.save('test_plane_%s.png' % self.field[1])
+        sc.render()
+        sc.save('test_plane_%s.png' % self.field[1], sigma_clip=6.0)
 
     def test_spherical_lens(self):
         sc = Scene()
@@ -115,8 +115,8 @@
         tf.grey_opacity = True
         sc.camera = cam
         sc.add_source(vol)
-        sc.render(sigma_clip=6.0)
-        sc.save('test_spherical_%s.png' % self.field[1])
+        sc.render()
+        sc.save('test_spherical_%s.png' % self.field[1], sigma_clip=6.0)
 
     def test_stereospherical_lens(self):
         w = (self.ds.domain_width).in_units('code_length')
@@ -130,5 +130,5 @@
         tf.grey_opacity = True
         sc.camera = cam
         sc.add_source(vol)
-        sc.render(sigma_clip=6.0)
-        sc.save('test_stereospherical_%s.png' % self.field[1])
+        sc.render()
+        sc.save('test_stereospherical_%s.png' % self.field[1], sigma_clip=6.0)

diff -r 48577b8fe920eafe3eaa2e2a4ab69eef13387f26 -r e0ba8789d21ad41eb928ce34296ae4677e8d4a33 yt/visualization/volume_rendering/tests/test_scene.py
--- a/yt/visualization/volume_rendering/tests/test_scene.py
+++ b/yt/visualization/volume_rendering/tests/test_scene.py
@@ -76,11 +76,11 @@
         mi_bound = ((ma-mi)*(0.10))+mi
         ma_bound = ((ma-mi)*(0.90))+mi
         tf.map_to_colormap(mi_bound, ma_bound,  scale=0.01, colormap='Reds_r')
-        sc.render(sigma_clip=6.0)
-        sc.save('test_scene.png')
+        sc.render()
+        sc.save('test_scene.png', sigma_clip=6.0)
 
         nrot = 2 
         for i in range(nrot):
             sc.camera.pitch(2*np.pi/nrot)
-            sc.render(sigma_clip=6.0)
-            sc.save('test_rot_%04i.png' % i)
+            sc.render()
+            sc.save('test_rot_%04i.png' % i, sigma_clip=6.0)

diff -r 48577b8fe920eafe3eaa2e2a4ab69eef13387f26 -r e0ba8789d21ad41eb928ce34296ae4677e8d4a33 yt/visualization/volume_rendering/tests/test_sigma_clip.py
--- /dev/null
+++ b/yt/visualization/volume_rendering/tests/test_sigma_clip.py
@@ -0,0 +1,54 @@
+"""
+Test Simple Volume Rendering Scene
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2014, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import os
+import tempfile
+import shutil
+import yt
+from yt.testing import fake_random_ds
+from unittest import TestCase
+
+# This toggles using a temporary directory. Turn off to examine images.
+use_tmpdir = True
+
+
+def setup():
+    """Test specific setup."""
+    from yt.config import ytcfg
+    ytcfg["yt", "__withintesting"] = "True"
+
+
+class SigmaClipTest(TestCase):
+    def setUp(self):
+        if use_tmpdir:
+            self.curdir = os.getcwd()
+            # Perform I/O in safe place instead of yt main dir
+            self.tmpdir = tempfile.mkdtemp()
+            os.chdir(self.tmpdir)
+        else:
+            self.curdir, self.tmpdir = None, None
+
+    def tearDown(self):
+        if use_tmpdir:
+            os.chdir(self.curdir)
+            shutil.rmtree(self.tmpdir)
+
+    def test_sigma_clip(self):
+        ds = fake_random_ds(32)
+        sc = yt.create_scene(ds)
+        im = sc.render()
+        sc.save('raw.png')
+        sc.save('clip_2.png', sigma_clip=2)
+        sc.save('clip_4.png', sigma_clip=4.0)
+        print(sc)
+        return im, sc

diff -r 48577b8fe920eafe3eaa2e2a4ab69eef13387f26 -r e0ba8789d21ad41eb928ce34296ae4677e8d4a33 yt/visualization/volume_rendering/volume_rendering.py
--- a/yt/visualization/volume_rendering/volume_rendering.py
+++ b/yt/visualization/volume_rendering/volume_rendering.py
@@ -113,6 +113,6 @@
     >>> im, sc = yt.volume_render(ds, fname='test.png', sigma_clip=4.0)
     """
     sc = create_scene(data_source, field=field)
-    im = sc.render(sigma_clip=sigma_clip)
-    sc.save(fname=fname)
+    im = sc.render()
+    sc.save(fname=fname, sigma_clip=sigma_clip)
     return im, sc

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list