[yt-svn] commit/yt: ngoldbaum: Merged in ngoldbaum/yt (pull request #2016)

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Mon Mar 7 15:25:45 PST 2016


1 new commit in yt:

https://bitbucket.org/yt_analysis/yt/commits/bdb4a8a9139e/
Changeset:   bdb4a8a9139e
Branch:      yt
User:        ngoldbaum
Date:        2016-03-07 23:25:36+00:00
Summary:     Merged in ngoldbaum/yt (pull request #2016)

VR scene/camera unit overhaul. Closes #1132
Affected #:  24 files

diff -r 169c6b14377e4e691657a18547322d3d0c87aae3 -r bdb4a8a9139ed14202d0e55c30eae8a9dc9c0977 doc/source/cookbook/various_lens.py
--- a/doc/source/cookbook/various_lens.py
+++ b/doc/source/cookbook/various_lens.py
@@ -1,5 +1,5 @@
 import yt
-from yt.visualization.volume_rendering.api import Scene, Camera, VolumeSource
+from yt.visualization.volume_rendering.api import Scene, VolumeSource
 import numpy as np
 
 field = ("gas", "density")
@@ -19,7 +19,7 @@
 tf.grey_opacity = True
 
 # Plane-parallel lens
-cam = Camera(ds, lens_type='plane-parallel')
+cam = sc.add_camera(ds, lens_type='plane-parallel')
 # Set the resolution of tbe final projection.
 cam.resolution = [250, 250]
 # Set the location of the camera to be (x=0.2, y=0.5, z=0.5)
@@ -32,13 +32,12 @@
 # Set the width of the camera, where width[0] and width[1] specify the length and
 # height of final projection, while width[2] in plane-parallel lens is not used.
 cam.set_width(ds.domain_width * 0.5)
-sc.camera = cam
 sc.add_source(vol)
 sc.render()
 sc.save('lens_plane-parallel.png', sigma_clip=6.0)
 
 # Perspective lens
-cam = Camera(ds, lens_type='perspective')
+cam = sc.add_camera(ds, lens_type='perspective')
 cam.resolution = [250, 250]
 # Standing at (x=0.2, y=0.5, z=0.5), we look at the area of x>0.2 (with some open angle
 # specified by camera width) along the positive x direction.
@@ -49,13 +48,12 @@
 # height of the final projection, while width[2] specifies the distance between the
 # camera and the final image.
 cam.set_width(ds.domain_width * 0.5)
-sc.camera = cam
 sc.add_source(vol)
 sc.render()
 sc.save('lens_perspective.png', sigma_clip=6.0)
 
 # Stereo-perspective lens
-cam = Camera(ds, lens_type='stereo-perspective')
+cam = sc.add_camera(ds, lens_type='stereo-perspective')
 # Set the size ratio of the final projection to be 2:1, since stereo-perspective lens
 # will generate the final image with both left-eye and right-eye ones jointed together.
 cam.resolution = [500, 250]
@@ -65,14 +63,13 @@
 cam.set_width(ds.domain_width*0.5)
 # Set the distance between left-eye and right-eye.
 cam.lens.disparity = ds.domain_width[0] * 1.e-3
-sc.camera = cam
 sc.add_source(vol)
 sc.render()
 sc.save('lens_stereo-perspective.png', sigma_clip=6.0)
 
 # Fisheye lens
 dd = ds.sphere(ds.domain_center, ds.domain_width[0] / 10)
-cam = Camera(dd, lens_type='fisheye')
+cam = sc.add_camera(dd, lens_type='fisheye')
 cam.resolution = [250, 250]
 v, c = ds.find_max(field)
 cam.set_position(c - 0.0005 * ds.domain_width)
@@ -80,13 +77,12 @@
                        north_vector=north_vector)
 cam.set_width(ds.domain_width)
 cam.lens.fov = 360.0
-sc.camera = cam
 sc.add_source(vol)
 sc.render()
 sc.save('lens_fisheye.png', sigma_clip=6.0)
 
 # Spherical lens
-cam = Camera(ds, lens_type='spherical')
+cam = sc.add_camera(ds, lens_type='spherical')
 # Set the size ratio of the final projection to be 2:1, since spherical lens
 # will generate the final image with length of 2*pi and height of pi.
 cam.resolution = [500, 250]
@@ -97,13 +93,12 @@
                        north_vector=north_vector)
 # In (stereo)spherical camera, camera width is not used since the entire volume
 # will be rendered
-sc.camera = cam
 sc.add_source(vol)
 sc.render()
 sc.save('lens_spherical.png', sigma_clip=6.0)
 
 # Stereo-spherical lens
-cam = Camera(ds, lens_type='stereo-spherical')
+cam = sc.add_camera(ds, lens_type='stereo-spherical')
 # Set the size ratio of the final projection to be 4:1, since spherical-perspective lens
 # will generate the final image with both left-eye and right-eye ones jointed together.
 cam.resolution = [1000, 250]
@@ -114,7 +109,6 @@
 # will be rendered
 # Set the distance between left-eye and right-eye.
 cam.lens.disparity = ds.domain_width[0] * 1.e-3
-sc.camera = cam
 sc.add_source(vol)
 sc.render()
 sc.save('lens_stereo-spherical.png', sigma_clip=6.0)

diff -r 169c6b14377e4e691657a18547322d3d0c87aae3 -r bdb4a8a9139ed14202d0e55c30eae8a9dc9c0977 doc/source/visualizing/TransferFunctionHelper_Tutorial.ipynb
--- a/doc/source/visualizing/TransferFunctionHelper_Tutorial.ipynb
+++ b/doc/source/visualizing/TransferFunctionHelper_Tutorial.ipynb
@@ -22,7 +22,6 @@
     "from IPython.core.display import Image\n",
     "from yt.visualization.volume_rendering.transfer_function_helper import TransferFunctionHelper\n",
     "from yt.visualization.volume_rendering.render_source import VolumeSource\n",
-    "from yt.visualization.volume_rendering.camera import Camera\n",
     "\n",
     "def showme(im):\n",
     "    # screen out NaNs\n",

diff -r 169c6b14377e4e691657a18547322d3d0c87aae3 -r bdb4a8a9139ed14202d0e55c30eae8a9dc9c0977 doc/source/visualizing/Volume_Rendering_Tutorial.ipynb
--- a/doc/source/visualizing/Volume_Rendering_Tutorial.ipynb
+++ b/doc/source/visualizing/Volume_Rendering_Tutorial.ipynb
@@ -18,7 +18,7 @@
     "import yt\n",
     "import numpy as np\n",
     "from yt.visualization.volume_rendering.transfer_function_helper import TransferFunctionHelper\n",
-    "from yt.visualization.volume_rendering.api import Scene, Camera, VolumeSource\n",
+    "from yt.visualization.volume_rendering.api import Scene, VolumeSource\n",
     "\n",
     "ds = yt.load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")\n",
     "sc = yt.create_scene(ds)"
@@ -199,7 +199,7 @@
    },
    "outputs": [],
    "source": [
-    "cam = Camera(ds, lens_type='perspective')\n",
+    "cam = sc.add_camera(ds, lens_type='perspective')\n",
     "\n",
     "# Standing at (x=0.05, y=0.5, z=0.5), we look at the area of x>0.05 (with some open angle\n",
     "# specified by camera width) along the positive x direction.\n",
@@ -213,7 +213,6 @@
     "# The width determines the opening angle\n",
     "cam.set_width(ds.domain_width * 0.5)\n",
     "\n",
-    "sc.camera = cam\n",
     "print (sc.camera)"
    ]
   },

diff -r 169c6b14377e4e691657a18547322d3d0c87aae3 -r bdb4a8a9139ed14202d0e55c30eae8a9dc9c0977 doc/source/visualizing/unstructured_mesh_rendering.rst
--- a/doc/source/visualizing/unstructured_mesh_rendering.rst
+++ b/doc/source/visualizing/unstructured_mesh_rendering.rst
@@ -292,7 +292,6 @@
 .. python-script::
 
     import yt
-    from yt.visualization.volume_rendering.api import Camera
 
     ds = yt.load("MOOSE_sample_data/out.e-s010")
 
@@ -304,15 +303,12 @@
     ms.cmap = 'Eos A'
    
     # Create a perspective Camera
-    cam = Camera(ds, lens_type='perspective')
+    cam = sc.add_camera(ds, lens_type='perspective')
     cam.focus = ds.arr([0.0, 0.0, 0.0], 'code_length')
     cam_pos = ds.arr([-4.5, 4.5, -4.5], 'code_length')
     north_vector = ds.arr([0.0, -1.0, -1.0], 'dimensionless')
     cam.set_position(cam_pos, north_vector)
    
-    # tell our scene to use it
-    sc.camera = cam
-   
     # increase the default resolution
     cam.resolution = (800, 800)
    
@@ -329,7 +325,7 @@
 .. python-script::
 
     import yt
-    from yt.visualization.volume_rendering.api import MeshSource, Camera, Scene
+    from yt.visualization.volume_rendering.api import MeshSource, Scene
 
     ds = yt.load("MOOSE_sample_data/out.e-s010")
 
@@ -337,16 +333,13 @@
     sc = Scene()
 
     # set up our Camera
-    cam = Camera(ds)
+    cam = sc.add_camera(ds)
     cam.focus = ds.arr([0.0, 0.0, 0.0], 'code_length')
     cam.set_position(ds.arr([-3.0, 3.0, -3.0], 'code_length'),
                      ds.arr([0.0, -1.0, 0.0], 'dimensionless'))
     cam.set_width = ds.arr([8.0, 8.0, 8.0], 'code_length')
     cam.resolution = (800, 800)
 
-    # tell the scene to use it
-    sc.camera = cam
-
     # create two distinct MeshSources from 'connect1' and 'connect2'
     ms1 = MeshSource(ds, ('connect1', 'diffused'))
     ms2 = MeshSource(ds, ('connect2', 'diffused'))
@@ -407,7 +400,7 @@
 .. code-block:: python
 
     import yt
-    from yt.visualization.volume_rendering.api import MeshSource, Camera
+    from yt.visualization.volume_rendering.api import MeshSource
     import pylab as plt
 
     NUM_STEPS = 127
@@ -432,7 +425,7 @@
 	# set up the camera here. these values were arrived by
 	# calling pitch, yaw, and roll in the notebook until I
 	# got the angle I wanted.
-	cam = Camera(ds)
+	sc.add_camera(ds)
 	camera_position = ds.arr([0.1, 0.0, 0.1], 'code_length')
 	cam.focus = ds.domain_center
 	north_vector = ds.arr([-0.3032476, -0.71782557, 0.62671153], 'dimensionless')

diff -r 169c6b14377e4e691657a18547322d3d0c87aae3 -r bdb4a8a9139ed14202d0e55c30eae8a9dc9c0977 yt/analysis_modules/photon_simulator/tests/test_sloshing.py
--- a/yt/analysis_modules/photon_simulator/tests/test_sloshing.py
+++ b/yt/analysis_modules/photon_simulator/tests/test_sloshing.py
@@ -15,10 +15,11 @@
     ThermalPhotonModel, PhotonList, EventList, \
     convert_old_file, merge_files
 from yt.config import ytcfg
-from yt.testing import requires_file
+from yt.testing import \
+    requires_file, \
+    assert_almost_equal
 from yt.utilities.answer_testing.framework import requires_ds, \
     GenericArrayTest, data_dir_load
-from numpy.testing import assert_array_equal
 from numpy.random import RandomState
 from yt.units.yt_array import uconcatenate
 import os
@@ -117,11 +118,11 @@
             arr1 = photons1[k]
             arr2 = photons2[k]
             arr3 = photons3[k]
-        yield assert_array_equal, arr1, arr2
-        yield assert_array_equal, arr1, arr3
+        assert_almost_equal(arr1, arr2)
+        assert_almost_equal(arr1, arr3)
     for k in events1.keys():
-        yield assert_array_equal, events1[k], events2[k]
-        yield assert_array_equal, events1[k], events3[k]
+        assert_almost_equal(events1[k], events2[k])
+        assert_almost_equal(events1[k], events3[k])
 
     nevents = 0
 

diff -r 169c6b14377e4e691657a18547322d3d0c87aae3 -r bdb4a8a9139ed14202d0e55c30eae8a9dc9c0977 yt/frontends/enzo/simulation_handling.py
--- a/yt/frontends/enzo/simulation_handling.py
+++ b/yt/frontends/enzo/simulation_handling.py
@@ -82,6 +82,7 @@
     def _set_units(self):
         self.unit_registry = UnitRegistry()
         self.unit_registry.add("code_time", 1.0, dimensions.time)
+        self.unit_registry.add("code_length", 1.0, dimensions.length)
         if self.cosmological_simulation:
             # Instantiate EnzoCosmology object for units and time conversions.
             self.cosmology = \
@@ -107,6 +108,7 @@
         else:
             self.time_unit = self.quan(self.parameters["TimeUnits"], "s")
         self.unit_registry.modify("code_time", self.time_unit)
+        self.unit_registry.modify("code_length", self.length_unit)
 
     def get_time_series(self, time_data=True, redshift_data=True,
                         initial_time=None, final_time=None,

diff -r 169c6b14377e4e691657a18547322d3d0c87aae3 -r bdb4a8a9139ed14202d0e55c30eae8a9dc9c0977 yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -171,7 +171,7 @@
         units = ('g/cm**3', 'cm/s', 'cm/s', 'cm/s'),
         particle_fields=None, particle_field_units=None,
         negative = False, nprocs = 1, particles = 0, length_unit=1.0,
-        unit_system="cgs"):
+        unit_system="cgs", bbox=None):
     from yt.frontends.stream.api import load_uniform_grid
     if not iterable(ndims):
         ndims = [ndims, ndims, ndims]
@@ -208,7 +208,7 @@
             data['io', 'particle_mass'] = (np.random.random(particles), 'g')
         data['number_of_particles'] = particles
     ug = load_uniform_grid(data, ndims, length_unit=length_unit, nprocs=nprocs,
-                           unit_system=unit_system)
+                           unit_system=unit_system, bbox=bbox)
     return ug
 
 _geom_transforms = {

diff -r 169c6b14377e4e691657a18547322d3d0c87aae3 -r bdb4a8a9139ed14202d0e55c30eae8a9dc9c0977 yt/units/yt_array.py
--- a/yt/units/yt_array.py
+++ b/yt/units/yt_array.py
@@ -346,7 +346,8 @@
                 if registry is None:
                     pass
                 else:
-                    input_array.units.registry = registry
+                    units = Unit(str(input_array.units), registry=registry)
+                    input_array.units = units
             elif isinstance(input_units, Unit):
                 input_array.units = input_units
             else:
@@ -357,7 +358,7 @@
         elif iterable(input_array) and input_array:
             if isinstance(input_array[0], YTArray):
                 return YTArray(np.array(input_array, dtype=dtype),
-                               input_array[0].units)
+                               input_array[0].units, registry=registry)
 
         # Input array is an already formed ndarray instance
         # We first cast to be our class type
@@ -369,7 +370,10 @@
             # Nothing provided. Make dimensionless...
             units = Unit()
         elif isinstance(input_units, Unit):
-            units = input_units
+            if registry and registry is not input_units.registry:
+                units = Unit(str(input_units), registry=registry)
+            else:
+                units = input_units
         else:
             # units kwarg set, but it's not a Unit object.
             # don't handle all the cases here, let the Unit class handle if
@@ -1408,6 +1412,31 @@
     v = validate_numpy_wrapper_units(v, [arr1, arr2])
     return v
 
+def unorm(data):
+    """Matrix or vector norm that preserves units
+
+    This is a wrapper around np.linalg.norm that preserves units.
+    """
+    return YTArray(np.linalg.norm(data), data.units)
+
+def uvstack(arrs):
+    """Stack arrays in sequence vertically (row wise) while preserving units
+
+    This is a wrapper around np.vstack that preserves units.
+    """
+    v = np.vstack(arrs)
+    v = validate_numpy_wrapper_units(v, arrs)
+    return v
+
+def uhstack(arrs):
+    """Stack arrays in sequence horizontally (column wise) while preserving units
+
+    This is a wrapper around np.hstack that preserves units.
+    """
+    v = np.vstack(arrs)
+    v = validate_numpy_wrapper_units(v, arrs)
+    return v
+
 def array_like_field(data, x, field):
     field = data._determine_fields(field)[0]
     if isinstance(field, tuple):

diff -r 169c6b14377e4e691657a18547322d3d0c87aae3 -r bdb4a8a9139ed14202d0e55c30eae8a9dc9c0977 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -826,7 +826,7 @@
                                           verbose=True)
         for k in new_result:
             if self.decimals is None:
-                assert_equal(new_result[k], old_result[k])
+                assert_almost_equal(new_result[k], old_result[k])
             else:
                 assert_allclose_units(new_result[k], old_result[k],
                                       10**(-self.decimals))

diff -r 169c6b14377e4e691657a18547322d3d0c87aae3 -r bdb4a8a9139ed14202d0e55c30eae8a9dc9c0977 yt/utilities/lib/tests/test_bounding_volume_hierarchy.py
--- a/yt/utilities/lib/tests/test_bounding_volume_hierarchy.py
+++ b/yt/utilities/lib/tests/test_bounding_volume_hierarchy.py
@@ -2,7 +2,7 @@
 import numpy as np
 from yt.utilities.lib.bounding_volume_hierarchy import BVH, \
     test_ray_trace
-from yt.visualization.volume_rendering.api import Camera
+from yt.visualization.volume_rendering.api import Camera, Scene
 from yt.testing import requires_file
 
 
@@ -12,13 +12,13 @@
     W = np.array([8.0, 8.0])
     N = np.array([800, 800])
     dx = W / N
-    
+
     x_points = np.linspace((-N[0]/2 + 0.5)*dx[0], (N[0]/2 - 0.5)*dx[0], N[0])
     y_points = np.linspace((-N[1]/2 + 0.5)*dx[1], (N[1]/2 - 0.5)*dx[0], N[1])
-    
+
     X, Y = np.meshgrid(x_points, y_points)
-    result = np.dot(camera.unit_vectors[0:2].T, [X.ravel(), Y.ravel()]) 
-    vec_origins = result.T + camera.position
+    result = np.dot(camera.unit_vectors[0:2].T, [X.ravel(), Y.ravel()])
+    vec_origins = camera.scene.arr(result.T, 'unitary') + camera.position
     return np.array(vec_origins), np.array(normal_vector)
 
 
@@ -36,7 +36,7 @@
 
     bvh = BVH(vertices, indices, field_data)
 
-    cam = Camera()
+    cam = Camera(Scene())
     cam.set_position(np.array([8.0, 8.0, 8.0]))
     cam.focus = np.array([0.0, 0.0, 0.0])
     origins, direction = get_rays(cam)

diff -r 169c6b14377e4e691657a18547322d3d0c87aae3 -r bdb4a8a9139ed14202d0e55c30eae8a9dc9c0977 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -11,19 +11,49 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from yt.funcs import iterable, mylog, ensure_numpy_array
+from yt.funcs import iterable, ensure_numpy_array
 from yt.utilities.orientation import Orientation
-from yt.units.yt_array import YTArray
-from yt.units.unit_registry import UnitParseError
+from yt.units.yt_array import \
+    YTArray, \
+    YTQuantity
 from yt.utilities.math_utils import get_rotation_matrix
 from yt.extern.six import string_types
 from .utils import data_source_or_all
-from .lens import lenses
+from .lens import \
+    lenses, \
+    Lens
 import numpy as np
+from numbers import Number as numeric_type
+
+def _sanitize_camera_property_units(value, scene):
+    if iterable(value):
+        if len(value) == 1:
+            return _sanitize_camera_property_units(value[0], scene)
+        elif isinstance(value, YTArray) and len(value) == 3:
+            return scene.arr(value).in_units('unitary')
+        elif (len(value) == 2 and isinstance(value[0], numeric_type)
+              and isinstance(value[1], string_types)):
+            return scene.arr([scene.arr(value[0], value[1]).in_units('unitary')]*3)
+        if len(value) == 3:
+            if all([iterable(v) for v in value]):
+                if all([isinstance(v[0], numeric_type) and
+                        isinstance(v[1], string_types) for v in value]):
+                    return scene.arr(
+                        [scene.arr(v[0], v[1]) for v in value])
+                else:
+                    raise RuntimeError(
+                        "Cannot set camera width to invalid value '%s'" % (value, ))
+            return scene.arr(value, 'unitary')
+    else:
+        if isinstance(value, (YTQuantity, YTArray)):
+            return scene.arr([value.d]*3, value.units).in_units('unitary')
+        elif isinstance(value, numeric_type):
+            return scene.arr([value]*3, 'unitary')
+    raise RuntimeError(
+        "Cannot set camera width to invalid value '%s'" % (value, ))
 
 
 class Camera(Orientation):
-
     r"""A representation of a point of view into a Scene.
 
     It is defined by a position (the location of the camera
@@ -35,6 +65,8 @@
 
     Parameters
     ----------
+    scene: A :class:`yt.visualization.volume_rendering.scene.Scene` object
+        A scene object that the camera will be attached to.
     data_source: :class:`AMR3DData` or :class:`Dataset`, optional
         This is the source to be rendered, which can be any arbitrary yt
         data object or dataset.
@@ -50,20 +82,22 @@
 
     Examples
     --------
-    
+
     In this example, the camera is set using defaults that are chosen
     to be reasonable for the argument Dataset.
 
     >>> import yt
-    >>> from yt.visualization.volume_rendering.api import Camera
+    >>> from yt.visualization.volume_rendering.api import Scene
     >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
-    >>> cam = Camera(ds)
+    >>> sc = Scene()
+    >>> cam = sc.add_camera(ds)
 
     Here, we set the camera properties manually:
 
     >>> import yt
-    >>> from yt.visualization.volume_rendering.api import Camera
-    >>> cam = Camera()
+    >>> from yt.visualization.volume_rendering.api import Scene
+    >>> sc = Scene()
+    >>> cam = sc.add_camera()
     >>> cam.position = np.array([0.5, 0.5, -1.0])
     >>> cam.focus = np.array([0.5, 0.5, 0.0])
     >>> cam.north_vector = np.array([1.0, 0.0, 0.0])
@@ -71,9 +105,10 @@
     Finally, we create a camera with a non-default lens:
 
     >>> import yt
-    >>> from yt.visualization.volume_rendering.api import Camera
+    >>> from yt.visualization.volume_rendering.api import Scene
     >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
-    >>> cam = Camera(ds, lens_type='perspective')
+    >>> sc = Scene()
+    >>> cam = sc.add_camera(ds, lens_type='perspective')
 
     """
 
@@ -83,24 +118,27 @@
     _position = None
     _resolution = None
 
-    def __init__(self, data_source=None, lens_type='plane-parallel',
+    def __init__(self, scene, data_source=None, lens_type='plane-parallel',
                  auto=False):
-        """Initialize a Camera Instance"""
+        self.scene = scene
         self.lens = None
         self.north_vector = None
         self.normal_vector = None
         self.light = None
+        self.data_source = data_source_or_all(data_source)
         self._resolution = (512, 512)
-        self._width = np.array([1.0, 1.0, 1.0])
-        self._focus = np.array([0.0]*3)
-        self._position = np.array([1.0]*3)
-        if data_source is not None:
-            data_source = data_source_or_all(data_source)
-            self._focus = data_source.ds.domain_center
-            self._position = data_source.ds.domain_right_edge
-            self._width = 1.5*data_source.ds.domain_width
-            self._domain_center = data_source.ds.domain_center
-            self._domain_width = data_source.ds.domain_width
+        if self.data_source is not None:
+            self.scene.set_new_unit_registry(self.data_source.ds.unit_registry)
+            self._focus = self.data_source.ds.domain_center
+            self._position = self.data_source.ds.domain_right_edge
+            self._width = 1.5*self.data_source.ds.domain_width
+            self._domain_center = self.data_source.ds.domain_center
+            self._domain_width = self.data_source.ds.domain_width
+        else:
+            self._focus = scene.arr([0.0, 0.0, 0.0], 'unitary')
+            self._width = scene.arr([1.0, 1.0, 1.0], 'unitary')
+            self._position = scene.arr([1.0, 1.0, 1.0], 'unitary')
+
         if auto:
             self.set_defaults_from_data_source(data_source)
 
@@ -110,20 +148,27 @@
         self.set_lens(lens_type)
 
     def position():
-        doc = '''The position is the location of the camera in
-               the coordinate system of the simulation. This needs
-               to be either a YTArray or a numpy array. If it is a 
-               numpy array, it is assumed to be in code units. If it
-               is a YTArray, it will be converted to code units 
-               automatically. '''
+        doc = '''
+        The location of the camera. 
+
+        Parameters
+        ----------
+
+        position : number, YTQuantity, iterable, or 3 element YTArray
+            If a scalar, assumes that the position is the same in all three
+            coordinates. If an interable, must contain only scalars or
+            (length, unit) tuples.
+        '''
 
         def fget(self):
             return self._position
 
         def fset(self, value):
-            if isinstance(value, YTArray):
-                value = value.in_units("code_length")
-            self._position = value
+            position = _sanitize_camera_property_units(value, self.scene)
+            if np.array_equal(position, self.focus):
+                raise RuntimeError(
+                    'Cannot set the camera focus and position to the same value')
+            self._position = position
             self.switch_orientation()
 
         def fdel(self):
@@ -132,18 +177,24 @@
     position = property(**position())
 
     def width():
-        doc = '''The width of the region that will be seen in the image. 
-               This needs to be either a YTArray or a numpy array. If it 
-               is a numpy array, it is assumed to be in code units. If it
-               is a YTArray, it will be converted to code units automatically. '''
+        doc = '''The width of the region that will be seen in the image.
+
+        Parameters
+        ----------
+
+        width : number, YTQuantity, iterable, or 3 element YTArray
+            The width of the volume rendering in the horizontal, vertical, and
+            depth directions. If a scalar, assumes that the width is the same in
+            all three directions. If an interable, must contain only scalars or
+            (length, unit) tuples.
+        '''
 
         def fget(self):
             return self._width
 
         def fset(self, value):
-            if isinstance(value, YTArray):
-                value = value.in_units("code_length")
-            self._width = value
+            width = _sanitize_camera_property_units(value, self.scene)
+            self._width = width
             self.switch_orientation()
 
         def fdel(self):
@@ -153,19 +204,28 @@
     width = property(**width())
 
     def focus():
-        doc = '''The focus defines the point the Camera is pointed at. This needs
-               to be either a YTArray or a numpy array. If it is a 
-               numpy array, it is assumed to be in code units. If it
-               is a YTArray, it will be converted to code units 
-               automatically. '''
+        doc = '''
+        The focus defines the point the Camera is pointed at.
+
+        Parameters
+        ----------
+
+        focus : number, YTQuantity, iterable, or 3 element YTArray
+            The width of the volume rendering in the horizontal, vertical, and
+            depth directions. If a scalar, assumes that the width is the same in
+            all three directions. If an interable, must contain only scalars or
+            (length, unit) tuples.
+        '''
 
         def fget(self):
             return self._focus
 
         def fset(self, value):
-            if isinstance(value, YTArray):
-                value = value.in_units("code_length")
-            self._focus = value
+            focus = _sanitize_camera_property_units(value, self.scene)
+            if np.array_equal(focus, self.position):
+                raise RuntimeError(
+                    'Cannot set the camera focus and position to the same value')
+            self._focus = focus
             self.switch_orientation()
 
         def fdel(self):
@@ -175,14 +235,15 @@
 
     def resolution():
         doc = '''The resolution is the number of pixels in the image that
-               will be produced. '''
+               will be produced. Must be a 2-tuple of integers or an integer.'''
 
         def fget(self):
             return self._resolution
 
         def fset(self, value):
             if iterable(value):
-                assert (len(value) == 2)
+                if len(value) != 2:
+                    raise RuntimeError
             else:
                 value = (value, value)
             self._resolution = value
@@ -193,6 +254,19 @@
         return locals()
     resolution = property(**resolution())
 
+    def set_resolution(self, resolution):
+        """
+        The resolution is the number of pixels in the image that
+        will be produced. Must be a 2-tuple of integers or an integer.
+        """
+        self.resolution = resolution
+
+    def get_resolution(self):
+        """
+        Returns the resolution of the volume rendering
+        """
+        return self.resolution
+
     def _get_sampler_params(self, render_source):
         lens_params = self.lens._get_sampler_params(self, render_source)
         lens_params.update(width=self.width)
@@ -214,10 +288,14 @@
             'stereo-spherical'
 
         """
-        if lens_type not in lenses:
-            mylog.error("Lens type not available")
-            raise RuntimeError()
-        self.lens = lenses[lens_type]()
+        if isinstance(lens_type, Lens):
+            self.lens = lens_type
+        elif lens_type not in lenses:
+            raise RuntimeError(
+                "Lens type %s not in available list of available lens "
+                "types (%s)" % (lens_type, list(lenses.keys())))
+        else:
+            self.lens = lenses[lens_type]()
         self.lens.set_camera(self)
 
     def set_defaults_from_data_source(self, data_source):
@@ -264,41 +342,61 @@
         Parameters
         ----------
 
-        width : YTQuantity or 3 element YTArray
+        width : number, YTQuantity, iterable, or 3 element YTArray
             The width of the volume rendering in the horizontal, vertical, and
             depth directions. If a scalar, assumes that the width is the same in
-            all three directions.
+            all three directions. If an interable, must contain only scalars or
+            (length, unit) tuples.
         """
-        try:
-            width = width.in_units('code_length')
-        except (AttributeError, UnitParseError):
-            raise ValueError(
-                'Volume rendering width must be a YTArray that can be '
-                'converted to code units')
-
-        if not iterable(width):
-            width = YTArray([width.d]*3, width.units)  # Can't get code units.
         self.width = width
         self.switch_orientation()
 
+    def get_width(self):
+        """Return the current camera width"""
+        return self.width
+
     def set_position(self, position, north_vector=None):
         r"""Set the position of the camera.
 
         Parameters
         ----------
 
-        position : array_like
-            The new position
+        width : number, YTQuantity, iterable, or 3 element YTArray
+            If a scalar, assumes that the position is the same in all three
+            coordinates. If an interable, must contain only scalars or
+            (length, unit) tuples.
+
         north_vector : array_like, optional
             The 'up' direction for the plane of rays.  If not specific,
             calculated automatically.
 
         """
-
         self.position = position
         self.switch_orientation(normal_vector=self.focus - self.position,
                                 north_vector=north_vector)
 
+    def get_position(self):
+        """Return the current camera position"""
+        return self.position
+
+    def set_focus(self, new_focus):
+        """Sets the point the Camera is pointed at.
+
+        Parameters
+        ----------
+
+        focus : number, YTQuantity, iterable, or 3 element YTArray
+            If a scalar, assumes that the focus is the same is all three
+            coordinates. If an interable, must contain only scalars or
+            (length, unit) tuples.
+
+        """
+        self.focus = new_focus
+
+    def get_focus(self):
+        """Returns the current camera focus"""
+        return self.focus
+
     def switch_orientation(self, normal_vector=None, north_vector=None):
         r"""Change the view direction based on any of the orientation parameters.
 
@@ -366,8 +464,9 @@
 
         >>> import yt
         >>> import numpy as np
-        >>> from yt.visualization.volume_rendering.api import Camera
-        >>> cam = Camera()
+        >>> from yt.visualization.volume_rendering.api import Scene
+        >>> sc = Scene()
+        >>> cam = sc.add_camera()
         >>> # rotate the camera by pi / 4 radians:
         >>> cam.rotate(np.pi/4.0)  
         >>> # rotate the camera about the y-axis instead of cam.north_vector:
@@ -419,10 +518,11 @@
 
         >>> import yt
         >>> import numpy as np
-        >>> from yt.visualization.volume_rendering.api import Camera
-        >>> cam = Camera()
+        >>> from yt.visualization.volume_rendering.api import Scene
+        >>> sc = Scene()
+        >>> sc.add_camera()
         >>> # pitch the camera by pi / 4 radians:
-        >>> cam.pitch(np.pi/4.0)  
+        >>> cam.pitch(np.pi/4.0)
         >>> # pitch the camera about the origin instead of its own position:
         >>> cam.pitch(np.pi/4.0, rot_center=np.array([0.0, 0.0, 0.0]))
 
@@ -446,10 +546,11 @@
 
         >>> import yt
         >>> import numpy as np
-        >>> from yt.visualization.volume_rendering.api import Camera
-        >>> cam = Camera()
+        >>> from yt.visualization.volume_rendering.api import Scene
+        >>> sc = Scene()
+        >>> cam = sc.add_camera()
         >>> # yaw the camera by pi / 4 radians:
-        >>> cam.yaw(np.pi/4.0)  
+        >>> cam.yaw(np.pi/4.0)
         >>> # yaw the camera about the origin instead of its own position:
         >>> cam.yaw(np.pi/4.0, rot_center=np.array([0.0, 0.0, 0.0]))
 
@@ -473,8 +574,9 @@
 
         >>> import yt
         >>> import numpy as np
-        >>> from yt.visualization.volume_rendering.api import Camera
-        >>> cam = Camera()
+        >>> from yt.visualization.volume_rendering.api import Scene
+        >>> sc = Scene()
+        >>> cam = sc.add_camera(ds)
         >>> # roll the camera by pi / 4 radians:
         >>> cam.roll(np.pi/4.0)  
         >>> # roll the camera about the origin instead of its own position:
@@ -584,9 +686,10 @@
         --------
 
         >>> import yt
-        >>> from yt.visualization.volume_rendering.api import Camera
+        >>> from yt.visualization.volume_rendering.api import Scene
         >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
-        >>> cam = Camera(ds)
+        >>> sc = Scene()
+        >>> cam = sc.add_camera(ds)
         >>> cam.zoom(1.1)
 
         """
@@ -611,7 +714,6 @@
         --------
 
         >>> import yt
-        >>> from yt.visualization.volume_rendering.api import Camera
         >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
         >>> im, sc = yt.volume_render(ds)
         >>> cam = sc.camera

diff -r 169c6b14377e4e691657a18547322d3d0c87aae3 -r bdb4a8a9139ed14202d0e55c30eae8a9dc9c0977 yt/visualization/volume_rendering/lens.py
--- a/yt/visualization/volume_rendering/lens.py
+++ b/yt/visualization/volume_rendering/lens.py
@@ -17,8 +17,8 @@
 from yt.funcs import mylog
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     ParallelAnalysisInterface
-from yt.units.yt_array import YTArray
 from yt.data_objects.image_array import ImageArray
+from yt.units.yt_array import unorm, uvstack
 from yt.utilities.math_utils import get_rotation_matrix
 import numpy as np
 
@@ -60,13 +60,14 @@
         unit_vectors = camera.unit_vectors
         width = camera.width
         center = camera.focus
-        self.box_vectors = YTArray([unit_vectors[0] * width[0],
-                                    unit_vectors[1] * width[1],
-                                    unit_vectors[2] * width[2]])
-        self.origin = center - 0.5 * width.dot(YTArray(unit_vectors, ""))
+
+        self.box_vectors = camera.scene.arr(
+            [unit_vectors[0] * width[0],
+             unit_vectors[1] * width[1],
+             unit_vectors[2] * width[2]])
+        self.origin = center - 0.5 * width.dot(unit_vectors)
         self.back_center = center - 0.5 * width[2] * unit_vectors[2]
         self.front_center = center + 0.5 * width[2] * unit_vectors[2]
-
         self.set_viewpoint(camera)
 
     def set_viewpoint(self, camera):
@@ -99,9 +100,12 @@
         else:
             image = self.new_image(camera)
 
+        vp_pos = np.concatenate(
+            [camera.inv_mat.ravel('F').d,
+             self.back_center.ravel().in_units('code_length').d])
+
         sampler_params =\
-            dict(vp_pos=np.concatenate([camera.inv_mat.ravel('F'),
-                                        self.back_center.ravel()]),
+            dict(vp_pos=vp_pos,
                  vp_dir=self.box_vectors[2],  # All the same
                  center=self.back_center,
                  bounds=(-camera.width[0] / 2.0, camera.width[0] / 2.0,
@@ -190,8 +194,9 @@
             camera.resolution[0], camera.resolution[1], 3)
 
         # The maximum possible length of ray
-        max_length = np.linalg.norm(camera.position - camera._domain_center) \
-            + 0.5 * np.linalg.norm(camera._domain_width)
+        max_length = (unorm(camera.position - camera._domain_center)
+                      + 0.5 * unorm(camera._domain_width))
+
         # Rescale the ray to be long enough to cover the entire domain
         vectors = (sample_x + sample_y + normal_vecs * camera.width[2]) * \
             (max_length / camera.width[2])
@@ -208,7 +213,7 @@
         sampler_params =\
             dict(vp_pos=positions,
                  vp_dir=vectors,
-                 center=self.back_center.d,
+                 center=self.back_center,
                  bounds=(0.0, 1.0, 0.0, 1.0),
                  x_vec=uv,
                  y_vec=uv,
@@ -303,8 +308,8 @@
         uv = np.ones(3, dtype='float64')
 
         image = self.new_image(camera)
-        vectors_comb = np.vstack([vectors_left, vectors_right])
-        positions_comb = np.vstack([positions_left, positions_right])
+        vectors_comb = uvstack([vectors_left, vectors_right])
+        positions_comb = uvstack([positions_left, positions_right])
 
         image.shape = (camera.resolution[0], camera.resolution[1], 4)
         vectors_comb.shape = (camera.resolution[0], camera.resolution[1], 3)
@@ -313,7 +318,7 @@
         sampler_params =\
             dict(vp_pos=positions_comb,
                  vp_dir=vectors_comb,
-                 center=self.back_center.d,
+                 center=self.back_center,
                  bounds=(0.0, 1.0, 0.0, 1.0),
                  x_vec=uv,
                  y_vec=uv,
@@ -331,7 +336,8 @@
         north_vec = camera.unit_vectors[1]
         normal_vec = camera.unit_vectors[2]
 
-        angle_disparity = - np.arctan2(disparity, camera.width[2])
+        angle_disparity = - np.arctan2(disparity.in_units(camera.width.units),
+                                       camera.width[2])
         R = get_rotation_matrix(angle_disparity, north_vec)
 
         east_vec_rot = np.dot(R, east_vec)
@@ -363,8 +369,9 @@
             single_resolution_x, camera.resolution[1], 3)
 
         # The maximum possible length of ray
-        max_length = np.linalg.norm(camera.position - camera._domain_center) \
-            + 0.5 * np.linalg.norm(camera._domain_width) + np.abs(self.disparity.d)
+        max_length = (unorm(camera.position - camera._domain_center)
+                      + 0.5 * unorm(camera._domain_width)
+                      + np.abs(self.disparity))
         # Rescale the ray to be long enough to cover the entire domain
         vectors = (sample_x + sample_y + normal_vecs * camera.width[2]) * \
             (max_length / camera.width[2])
@@ -394,9 +401,9 @@
         px_right, py_right, dz_right = self._get_px_py_dz(
             camera, pos, res, self.disparity)
 
-        px = np.vstack([px_left, px_right])
-        py = np.vstack([py_left, py_right])
-        dz = np.vstack([dz_left, dz_right])
+        px = uvstack([px_left, px_right])
+        py = uvstack([py_left, py_right])
+        dz = uvstack([dz_left, dz_right])
 
         return px, py, dz
 
@@ -594,8 +601,8 @@
         vectors[:, :, 2] = np.sin(py)
 
         # The maximum possible length of ray
-        max_length = np.linalg.norm(camera.position - camera._domain_center) \
-            + 0.5 * np.linalg.norm(camera._domain_width)
+        max_length = (unorm(camera.position - camera._domain_center)
+                      + 0.5 * unorm(camera._domain_width))
         # Rescale the ray to be long enough to cover the entire domain
         vectors = vectors * max_length
 
@@ -625,7 +632,7 @@
         sampler_params = dict(
             vp_pos=positions,
             vp_dir=vectors,
-            center=self.back_center.d,
+            center=self.back_center,
             bounds=(0.0, 1.0, 0.0, 1.0),
             x_vec=dummy,
             y_vec=dummy,
@@ -706,8 +713,9 @@
         vectors[:, :, 2] = np.sin(py)
 
         # The maximum possible length of ray
-        max_length = np.linalg.norm(camera.position - camera._domain_center) \
-            + 0.5 * np.linalg.norm(camera._domain_width) + np.abs(self.disparity.d)
+        max_length = (unorm(camera.position - camera._domain_center)
+                      + 0.5 * unorm(camera._domain_width)
+                      + np.abs(self.disparity))
         # Rescale the ray to be long enough to cover the entire domain
         vectors = vectors * max_length
 
@@ -741,8 +749,8 @@
 
         dummy = np.ones(3, dtype='float64')
 
-        vectors_comb = np.vstack([vectors, vectors])
-        positions_comb = np.vstack([positions_left, positions_right])
+        vectors_comb = uvstack([vectors, vectors])
+        positions_comb = uvstack([positions_left, positions_right])
 
         image.shape = (camera.resolution[0], camera.resolution[1], 4)
         vectors_comb.shape = (camera.resolution[0], camera.resolution[1], 3)
@@ -751,7 +759,7 @@
         sampler_params = dict(
             vp_pos=positions_comb,
             vp_dir=vectors_comb,
-            center=self.back_center.d,
+            center=self.back_center,
             bounds=(0.0, 1.0, 0.0, 1.0),
             x_vec=dummy,
             y_vec=dummy,

diff -r 169c6b14377e4e691657a18547322d3d0c87aae3 -r bdb4a8a9139ed14202d0e55c30eae8a9dc9c0977 yt/visualization/volume_rendering/off_axis_projection.py
--- a/yt/visualization/volume_rendering/off_axis_projection.py
+++ b/yt/visualization/volume_rendering/off_axis_projection.py
@@ -13,7 +13,6 @@
 
 
 from .scene import Scene
-from .camera import Camera
 from .render_source import VolumeSource
 from .transfer_functions import ProjectionTransferFunction
 from .utils import data_source_or_all
@@ -149,7 +148,7 @@
         data_source.ds.field_dependencies.update(deps)
         fields = [weightfield, weight]
         vol.set_fields(fields)
-    camera = Camera(data_source)
+    camera = sc.add_camera(data_source)
     camera.set_width(width)
     if not iterable(resolution):
         resolution = [resolution]*2
@@ -172,7 +171,6 @@
     camera.switch_orientation(normal_vector,
                               north_vector)
 
-    sc.camera = camera
     sc.add_source(vol)
 
     vol.set_sampler(camera)

diff -r 169c6b14377e4e691657a18547322d3d0c87aae3 -r bdb4a8a9139ed14202d0e55c30eae8a9dc9c0977 yt/visualization/volume_rendering/render_source.py
--- a/yt/visualization/volume_rendering/render_source.py
+++ b/yt/visualization/volume_rendering/render_source.py
@@ -114,8 +114,7 @@
     >>> sc = Scene()
     >>> source = VolumeSource(ds.all_data(), 'density')
     >>> sc.add_source(source)
-    >>> cam = Camera(ds)
-    >>> sc.camera = cam
+    >>> sc.add_camera()
     >>> im = sc.render()
 
     """
@@ -946,10 +945,10 @@
 
     def __init__(self, data_source, alpha=0.3, cmap='algae',
                  min_level=None, max_level=None):
-        data_source = data_source_or_all(data_source)
+        self.data_source = data_source_or_all(data_source)
         corners = []
         levels = []
-        for block, mask in data_source.blocks:
+        for block, mask in self.data_source.blocks:
             block_corners = np.array([
                 [block.LeftEdge[0], block.LeftEdge[1], block.LeftEdge[2]],
                 [block.RightEdge[0], block.LeftEdge[1], block.LeftEdge[2]],
@@ -976,7 +975,7 @@
 
         colors = apply_colormap(
             levels*1.0,
-            color_bounds=[0, data_source.ds.index.max_level],
+            color_bounds=[0, self.data_source.ds.index.max_level],
             cmap_name=cmap)[0, :, :]*alpha/255.
         colors[:, 3] = alpha
 

diff -r 169c6b14377e4e691657a18547322d3d0c87aae3 -r bdb4a8a9139ed14202d0e55c30eae8a9dc9c0977 yt/visualization/volume_rendering/scene.py
--- a/yt/visualization/volume_rendering/scene.py
+++ b/yt/visualization/volume_rendering/scene.py
@@ -12,13 +12,27 @@
 # -----------------------------------------------------------------------------
 
 
+import functools
 import numpy as np
 from collections import OrderedDict
 from yt.funcs import mylog, get_image_suffix
 from yt.extern.six import iteritems, itervalues, string_types
+from yt.units.dimensions import \
+    length
+from yt.units.unit_registry import \
+    UnitRegistry
+from yt.units.yt_array import \
+    YTQuantity, \
+    YTArray
 from .camera import Camera
-from .render_source import OpaqueSource, BoxSource, CoordinateVectorSource, \
-    GridSource, RenderSource, MeshSource
+from .render_source import \
+    OpaqueSource, \
+    BoxSource, \
+    CoordinateVectorSource, \
+    GridSource, \
+    RenderSource, \
+    MeshSource, \
+    VolumeSource
 from .zbuffer_array import ZBuffer
 from yt.extern.six.moves import builtins
 from yt.utilities.exceptions import YTNotInsideNotebook
@@ -52,8 +66,7 @@
     >>> sc = Scene()
     >>> source = VolumeSource(ds.all_data(), 'density')
     >>> sc.add_source(source)
-    >>> cam = Camera(ds)
-    >>> sc.camera = cam
+    >>> cam = sc.add_camera()
     >>> im = sc.render()
 
     Alternatively, you can use the create_scene function to set up defaults 
@@ -70,13 +83,12 @@
 
     _current = None
     _camera = None
+    _unit_registry = None
 
     def __init__(self):
         r"""Create a new Scene instance"""
         super(Scene, self).__init__()
         self.sources = OrderedDict()
-        self.camera = None
-        # An image array containing the last rendered image of the scene
         self.last_render = None
         # A non-public attribute used to get around the fact that we can't
         # pass kwargs into _repr_png_()
@@ -122,10 +134,34 @@
         if keyname is None:
             keyname = 'source_%02i' % len(self.sources)
 
+        if isinstance(render_source, (VolumeSource, MeshSource, GridSource)):
+            self.set_new_unit_registry(
+                render_source.data_source.ds.unit_registry)
+
         self.sources[keyname] = render_source
 
         return self
 
+    def set_new_unit_registry(self, input_registry):
+        self.unit_registry = UnitRegistry(
+            add_default_symbols=False,
+            lut=input_registry.lut)
+
+        # Validate that the new unit registry makes sense
+        current_scaling = self.unit_registry['unitary'][0]
+        if current_scaling != input_registry['unitary'][0]:
+            for source in self.sources.items():
+                data_source = getattr(source, 'data_source', None)
+                if data_source is None:
+                    continue
+                scaling = data_source.ds.unit_registry['unitary'][0]
+                if scaling != current_scaling:
+                    raise NotImplementedError(
+                        "Simultaneously rendering data from datasets with "
+                        "different units is not supported"
+                    )
+
+
     def render(self, camera=None):
         r"""Render all sources in the Scene.
 
@@ -289,6 +325,69 @@
 
         return im
 
+    def add_camera(self, data_source=None, lens_type='plane-parallel',
+                   auto=False):
+        r"""Add a new camera to the Scene.
+
+        The camera is defined by a position (the location of the camera
+        in the simulation domain,), a focus (the point at which the
+        camera is pointed), a width (the width of the snapshot that will
+        be taken, a resolution (the number of pixels in the image), and
+        a north_vector (the "up" direction in the resulting image). A
+        camera can use a variety of different Lens objects.
+
+        If the scene already has a camera associated with it, this function
+        will create a new camera and discard the old one.
+
+        Parameters
+        ----------
+        data_source: :class:`AMR3DData` or :class:`Dataset`, optional
+            This is the source to be rendered, which can be any arbitrary yt
+            data object or dataset.
+        lens_type: string, optional
+            This specifies the type of lens to use for rendering. Current
+            options are 'plane-parallel', 'perspective', and 'fisheye'. See
+            :class:`yt.visualization.volume_rendering.lens.Lens` for details.
+            Default: 'plane-parallel'
+        auto: boolean
+            If True, build smart defaults using the data source extent. This
+            can be time-consuming to iterate over the entire dataset to find
+            the positional bounds. Default: False
+
+        Examples
+        --------
+
+        In this example, the camera is set using defaults that are chosen
+        to be reasonable for the argument Dataset.
+
+        >>> import yt
+        >>> from yt.visualization.volume_rendering.api import Scene, Camera
+        >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
+        >>> sc = Scene()
+        >>> sc.add_camera()
+
+        Here, we set the camera properties manually:
+
+        >>> import yt
+        >>> from yt.visualization.volume_rendering.api import Scene, Camera
+        >>> sc = Scene()
+        >>> cam = sc.add_camera()
+        >>> cam.position = np.array([0.5, 0.5, -1.0])
+        >>> cam.focus = np.array([0.5, 0.5, 0.0])
+        >>> cam.north_vector = np.array([1.0, 0.0, 0.0])
+
+        Finally, we create a camera with a non-default lens:
+
+        >>> import yt
+        >>> from yt.visualization.volume_rendering.api import Camera
+        >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
+        >>> sc = Scene()
+        >>> sc.add_camera(ds, lens_type='perspective')
+
+        """
+        self._camera = Camera(self, data_source, lens_type, auto)
+        return self.camera
+
     def camera():
         doc = r"""The camera property.
 
@@ -297,14 +396,12 @@
         """
 
         def fget(self):
-            cam = self._camera
-            if cam is None:
-                cam = Camera()
-            self._camera = cam
             return self._camera
 
         def fset(self, value):
-            # Should add better validation here
+            value.width = self.arr(value.width)
+            value.focus = self.arr(value.focus)
+            value.position = self.arr(value.position)
             self._camera = value
 
         def fdel(self):
@@ -313,6 +410,32 @@
         return locals()
     camera = property(**camera())
 
+    def unit_registry():
+        def fget(self):
+            ur = self._unit_registry
+            if ur is None:
+                ur = UnitRegistry()
+                # This will be updated when we add a volume source
+                ur.add("unitary", 1.0, length)
+            self._unit_registry = ur
+            return self._unit_registry
+
+        def fset(self, value):
+            self._unit_registry = value
+            if self.camera is not None:
+                self.camera.width = YTArray(
+                    self.camera.width.in_units('unitary'), registry=value)
+                self.camera.focus = YTArray(
+                    self.camera.focus.in_units('unitary'), registry=value)
+                self.camera.position = YTArray(
+                    self.camera.position.in_units('unitary'), registry=value)
+
+        def fdel(self):
+            del self._unit_registry
+            self._unit_registry = None
+        return locals()
+    unit_registry = property(**unit_registry())
+
     def set_camera(self, camera):
         r"""
 
@@ -479,6 +602,92 @@
         else:
             raise YTNotInsideNotebook
 
+    _arr = None
+    @property
+    def arr(self):
+        """Converts an array into a :class:`yt.units.yt_array.YTArray`
+
+        The returned YTArray will be dimensionless by default, but can be
+        cast to arbitrary units using the ``input_units`` keyword argument.
+
+        Parameters
+        ----------
+
+        input_array : iterable
+            A tuple, list, or array to attach units to
+        input_units : String unit specification, unit symbol object, or astropy
+                      units object
+            The units of the array. Powers must be specified using python syntax
+            (cm**3, not cm^3).
+        dtype : string or NumPy dtype object
+            The dtype of the returned array data
+
+        Examples
+        --------
+
+        >>> a = sc.arr([1, 2, 3], 'cm')
+        >>> b = sc.arr([4, 5, 6], 'm')
+        >>> a + b
+        YTArray([ 401.,  502.,  603.]) cm
+        >>> b + a
+        YTArray([ 4.01,  5.02,  6.03]) m
+
+        Arrays returned by this function know about the scene's unit system
+
+        >>> a = sc.arr(np.ones(5), 'unitary')
+        >>> a.in_units('Mpc')
+        YTArray([ 1.00010449,  1.00010449,  1.00010449,  1.00010449,
+                 1.00010449]) Mpc
+
+        """
+        if self._arr is not None:
+            return self._arr
+        self._arr = functools.partial(YTArray, registry=self.unit_registry)
+        return self._arr
+
+    _quan = None
+    @property
+    def quan(self):
+        """Converts an scalar into a :class:`yt.units.yt_array.YTQuantity`
+
+        The returned YTQuantity will be dimensionless by default, but can be
+        cast to arbitrary units using the ``input_units`` keyword argument.
+
+        Parameters
+        ----------
+
+        input_scalar : an integer or floating point scalar
+            The scalar to attach units to
+        input_units : String unit specification, unit symbol object, or astropy
+                      units
+            The units of the quantity. Powers must be specified using python
+            syntax (cm**3, not cm^3).
+        dtype : string or NumPy dtype object
+            The dtype of the array data.
+
+        Examples
+        --------
+
+        >>> a = sc.quan(1, 'cm')
+        >>> b = sc.quan(2, 'm')
+        >>> a + b
+        201.0 cm
+        >>> b + a
+        2.01 m
+
+        Quantities created this way automatically know about the unit system
+        of the scene
+
+        >>> a = ds.quan(5, 'unitary')
+        >>> a.in_cgs()
+        1.543e+25 cm
+
+        """
+        if self._quan is not None:
+            return self._quan
+        self._quan = functools.partial(YTQuantity, registry=self.unit_registry)
+        return self._quan
+
     def _repr_png_(self):
         if self.last_render is None:
             self.render()

diff -r 169c6b14377e4e691657a18547322d3d0c87aae3 -r bdb4a8a9139ed14202d0e55c30eae8a9dc9c0977 yt/visualization/volume_rendering/tests/test_camera_attributes.py
--- /dev/null
+++ b/yt/visualization/volume_rendering/tests/test_camera_attributes.py
@@ -0,0 +1,112 @@
+"""
+Tests for setting camera and scene attributes
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2014, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+import yt.units as u
+
+from yt.testing import \
+    assert_equal, \
+    fake_random_ds
+from yt.visualization.volume_rendering.api import \
+    Scene
+
+
+valid_lens_types = [
+    'plane-parallel',
+    'perspective',
+    'stereo-perspective',
+    'fisheye',
+    'spherical',
+    'stereo-spherical'
+]
+
+
+def test_scene_and_camera_attributes():
+    ds = fake_random_ds(64, length_unit=2, bbox=np.array([[-1, 1], [-1, 1], [-1, 1]]))
+    sc = Scene()
+    cam = sc.add_camera(ds)
+
+    # test that initial values are correct in code units
+    assert_equal(cam.width, ds.arr([3, 3, 3], 'code_length'))
+    assert_equal(cam.position, ds.arr([1, 1, 1], 'code_length'))
+    assert_equal(cam.focus, ds.arr([0, 0, 0], 'code_length'))
+
+    # test setting the attributes in various ways
+
+    attribute_values = [
+        (1, ds.arr([2, 2, 2], 'code_length'), ),
+        ([1], ds.arr([2, 2, 2], 'code_length'), ),
+        ([1, 2], RuntimeError, ),
+        ([1, 1, 1], ds.arr([2, 2, 2], 'code_length'), ),
+        ((1, 'code_length'), ds.arr([1, 1, 1], 'code_length'), ),
+        (((1, 'code_length'), (1, 'code_length')), RuntimeError, ),
+        (((1, 'cm'), (2, 'cm'), (3, 'cm')),
+         ds.arr([0.5, 1, 1.5], 'code_length'), ),
+        (2*u.cm, ds.arr([1, 1, 1], 'code_length'), ),
+        (ds.arr(2, 'cm'), ds.arr([1, 1, 1], 'code_length'), ),
+        ([2*u.cm], ds.arr([1, 1, 1], 'code_length'), ),
+        ([1, 2, 3]*u.cm, ds.arr([0.5, 1, 1.5], 'code_length'), ),
+        ([1, 2]*u.cm, RuntimeError, ),
+        ([u.cm*w for w in [1, 2, 3]], ds.arr([0.5, 1, 1.5], 'code_length'), ),
+    ]
+
+    # define default values to avoid accidentally setting focus = position
+    default_values = {
+        'focus': [0, 0, 0],
+        'position': [4, 4, 4],
+        'width': [1, 1, 1],
+    }
+    attribute_list = list(default_values.keys())
+
+    for attribute in attribute_list:
+        for other_attribute in [a for a in attribute_list if a != attribute]:
+            setattr(cam, other_attribute, default_values[other_attribute])
+        for attribute_value, expected_result in attribute_values:
+            try:
+                # test properties
+                setattr(cam, attribute, attribute_value)
+                assert_equal(getattr(cam, attribute), expected_result)
+            except RuntimeError:
+                assert expected_result is RuntimeError
+
+            try:
+                # test setters/getters
+                getattr(cam, 'set_%s' % attribute)(attribute_value)
+                assert_equal(getattr(cam, 'get_%s' % attribute)(),
+                             expected_result)
+            except RuntimeError:
+                assert expected_result is RuntimeError
+
+    resolution_values = (
+        (512, (512, 512), ),
+        ((512, 512), (512, 512), ),
+        ((256, 512), (256, 512), ),
+        ((256, 256, 256), RuntimeError),
+    )
+
+    for resolution_value, expected_result in resolution_values:
+        try:
+            # test properties
+            cam.resolution = resolution_value
+            assert_equal(cam.resolution, expected_result)
+        except RuntimeError:
+            assert expected_result is RuntimeError
+
+        try:
+            # test setters/getters
+            cam.set_resolution(resolution_value)
+            assert_equal(cam.get_resolution(), expected_result)
+        except RuntimeError:
+            assert expected_result is RuntimeError
+
+    for lens_type in valid_lens_types:
+        cam.set_lens(lens_type)

diff -r 169c6b14377e4e691657a18547322d3d0c87aae3 -r bdb4a8a9139ed14202d0e55c30eae8a9dc9c0977 yt/visualization/volume_rendering/tests/test_composite.py
--- a/yt/visualization/volume_rendering/tests/test_composite.py
+++ b/yt/visualization/volume_rendering/tests/test_composite.py
@@ -14,8 +14,11 @@
 import tempfile
 import shutil
 from yt.testing import fake_random_ds
-from yt.visualization.volume_rendering.api import Scene, Camera, \
-    VolumeSource, LineSource, BoxSource
+from yt.visualization.volume_rendering.api import \
+    Scene, \
+    VolumeSource, \
+    LineSource, \
+    BoxSource
 from yt.data_objects.api import ImageArray
 import numpy as np
 from unittest import TestCase
@@ -52,9 +55,8 @@
         ds.field_info[ds.field_list[0]].take_log=False
 
         sc = Scene()
-        cam = Camera(ds)
+        cam = sc.add_camera(ds)
         cam.resolution = (512, 512)
-        sc.camera = cam
         vr = VolumeSource(dd, field=ds.field_list[0])
         vr.transfer_function.clear()
         vr.transfer_function.grey_opacity=True

diff -r 169c6b14377e4e691657a18547322d3d0c87aae3 -r bdb4a8a9139ed14202d0e55c30eae8a9dc9c0977 yt/visualization/volume_rendering/tests/test_lenses.py
--- a/yt/visualization/volume_rendering/tests/test_lenses.py
+++ b/yt/visualization/volume_rendering/tests/test_lenses.py
@@ -14,7 +14,7 @@
 import tempfile
 import shutil
 from yt.testing import fake_random_ds
-from yt.visualization.volume_rendering.api import Scene, Camera, VolumeSource
+from yt.visualization.volume_rendering.api import Scene, VolumeSource
 import numpy as np
 from unittest import TestCase
 
@@ -39,6 +39,7 @@
 
         self.field = ("gas", "density")
         self.ds = fake_random_ds(32, fields=self.field)
+        self.ds.index
 
     def tearDown(self):
         if self.use_tmpdir:
@@ -47,25 +48,23 @@
 
     def test_perspective_lens(self):
         sc = Scene()
-        cam = Camera(self.ds, lens_type='perspective')
+        cam = sc.add_camera(self.ds, lens_type='perspective')
         cam.position = self.ds.arr(np.array([1.0, 1.0, 1.0]), 'code_length')
         vol = VolumeSource(self.ds, field=self.field)
         tf = vol.transfer_function
         tf.grey_opacity = True
-        sc.camera = cam
         sc.add_source(vol)
         sc.render()
         sc.save('test_perspective_%s.png' % self.field[1], sigma_clip=6.0)
 
     def test_stereoperspective_lens(self):
         sc = Scene()
-        cam = Camera(self.ds, lens_type='stereo-perspective')
+        cam = sc.add_camera(self.ds, lens_type='stereo-perspective')
         cam.resolution = [1024, 512]
         cam.position = self.ds.arr(np.array([0.7, 0.7, 0.7]), 'code_length')
         vol = VolumeSource(self.ds, field=self.field)
         tf = vol.transfer_function
         tf.grey_opacity = True
-        sc.camera = cam
         sc.add_source(vol)
         sc.render()
         sc.save('test_stereoperspective_%s.png' % self.field[1], sigma_clip=6.0)
@@ -74,7 +73,7 @@
         dd = self.ds.sphere(self.ds.domain_center,
                             self.ds.domain_width[0] / 10)
         sc = Scene()
-        cam = Camera(dd, lens_type='fisheye')
+        cam = sc.add_camera(dd, lens_type='fisheye')
         cam.lens.fov = 360.0
         cam.set_width(self.ds.domain_width)
         v, c = self.ds.find_max('density')
@@ -82,7 +81,6 @@
         vol = VolumeSource(dd, field=self.field)
         tf = vol.transfer_function
         tf.grey_opacity = True
-        sc.camera = cam
         sc.add_source(vol)
         sc.render()
         sc.save('test_fisheye_%s.png' % self.field[1], sigma_clip=6.0)
@@ -91,26 +89,24 @@
         dd = self.ds.sphere(self.ds.domain_center,
                             self.ds.domain_width[0] / 10)
         sc = Scene()
-        cam = Camera(dd, lens_type='plane-parallel')
+        cam = sc.add_camera(dd, lens_type='plane-parallel')
         cam.set_width(self.ds.domain_width*1e-2)
         v, c = self.ds.find_max('density')
         vol = VolumeSource(dd, field=self.field)
         tf = vol.transfer_function
         tf.grey_opacity = True
-        sc.camera = cam
         sc.add_source(vol)
         sc.render()
         sc.save('test_plane_%s.png' % self.field[1], sigma_clip=6.0)
 
     def test_spherical_lens(self):
         sc = Scene()
-        cam = Camera(self.ds, lens_type='spherical')
+        cam = sc.add_camera(self.ds, lens_type='spherical')
         cam.resolution = [512, 256]
         cam.position = self.ds.arr(np.array([0.6, 0.5, 0.5]), 'code_length')
         vol = VolumeSource(self.ds, field=self.field)
         tf = vol.transfer_function
         tf.grey_opacity = True
-        sc.camera = cam
         sc.add_source(vol)
         sc.render()
         sc.save('test_spherical_%s.png' % self.field[1], sigma_clip=6.0)
@@ -119,13 +115,12 @@
         w = (self.ds.domain_width).in_units('code_length')
         w = self.ds.arr(w, 'code_length')
         sc = Scene()
-        cam = Camera(self.ds, lens_type='stereo-spherical')
+        cam = sc.add_camera(self.ds, lens_type='stereo-spherical')
         cam.resolution = [1024, 256]
         cam.position = self.ds.arr(np.array([0.6, 0.5, 0.5]), 'code_length')
         vol = VolumeSource(self.ds, field=self.field)
         tf = vol.transfer_function
         tf.grey_opacity = True
-        sc.camera = cam
         sc.add_source(vol)
         sc.render()
         sc.save('test_stereospherical_%s.png' % self.field[1], sigma_clip=6.0)

diff -r 169c6b14377e4e691657a18547322d3d0c87aae3 -r bdb4a8a9139ed14202d0e55c30eae8a9dc9c0977 yt/visualization/volume_rendering/tests/test_mesh_render.py
--- a/yt/visualization/volume_rendering/tests/test_mesh_render.py
+++ b/yt/visualization/volume_rendering/tests/test_mesh_render.py
@@ -15,7 +15,7 @@
 from yt.testing import fake_hexahedral_ds
 from yt.testing import requires_module
 from yt.visualization.volume_rendering.render_source import MeshSource
-from yt.visualization.volume_rendering.camera import Camera
+from yt.visualization.volume_rendering.scene import Scene
 
 
 @requires_module("pyembree")
@@ -24,17 +24,18 @@
     images = []
 
     ds = fake_tetrahedral_ds()
+    sc = Scene()
     for field in ds.field_list:
-        ms = MeshSource(ds, field)
-        cam = Camera(ds)
-        im = ms.render(cam)
+        sc.add_source(MeshSource(ds, field))
+        sc.add_camera()
+        im = sc.render()
         images.append(im)
 
     ds = fake_hexahedral_ds()
     for field in ds.field_list:
-        ms = MeshSource(ds, field)
-        cam = Camera(ds)
-        im = ms.render(cam)
+        sc.add_source(MeshSource(ds, field))
+        sc.add_camera()
+        im = sc.render()
         images.append(im)
 
     return images

diff -r 169c6b14377e4e691657a18547322d3d0c87aae3 -r bdb4a8a9139ed14202d0e55c30eae8a9dc9c0977 yt/visualization/volume_rendering/tests/test_points.py
--- a/yt/visualization/volume_rendering/tests/test_points.py
+++ b/yt/visualization/volume_rendering/tests/test_points.py
@@ -14,8 +14,10 @@
 import tempfile
 import shutil
 from yt.testing import fake_random_ds
-from yt.visualization.volume_rendering.api import Scene, Camera, \
-    VolumeSource, PointSource
+from yt.visualization.volume_rendering.api import \
+    Scene, \
+    VolumeSource, \
+    PointSource
 import numpy as np
 from unittest import TestCase
 
@@ -51,9 +53,8 @@
         ds.field_info[ds.field_list[0]].take_log=False
 
         sc = Scene()
-        cam = Camera(ds)
+        cam = sc.add_camera(ds)
         cam.resolution = (512,512)
-        sc.camera = cam
         vr = VolumeSource(dd, field=ds.field_list[0])
         vr.transfer_function.clear()
         vr.transfer_function.grey_opacity=False

diff -r 169c6b14377e4e691657a18547322d3d0c87aae3 -r bdb4a8a9139ed14202d0e55c30eae8a9dc9c0977 yt/visualization/volume_rendering/tests/test_vr_orientation.py
--- a/yt/visualization/volume_rendering/tests/test_vr_orientation.py
+++ b/yt/visualization/volume_rendering/tests/test_vr_orientation.py
@@ -20,7 +20,6 @@
     GenericImageTest
 from yt.visualization.volume_rendering.api import \
     Scene, \
-    Camera, \
     VolumeSource, \
     ColorTransferFunction, \
     off_axis_projection
@@ -114,14 +113,13 @@
     for lens_type in ['plane-parallel', 'perspective']:
         frame = 0
 
-        cam = Camera(ds, lens_type='plane-parallel')
+        cam = sc.add_camera(ds, lens_type='plane-parallel')
         cam.resolution = (1000, 1000)
         cam.position = ds.arr(np.array([-4., 0., 0.]), 'code_length')
         cam.switch_orientation(normal_vector=[1., 0., 0.],
                                north_vector=[0., 0., 1.])
         cam.set_width(ds.domain_width*2.)
 
-        sc.camera = cam
         sc.add_source(vol)
         yield VRImageComparisonTest(
             sc, ds, '%s_%04d' % (lens_type, frame), decimals)
@@ -130,7 +128,6 @@
             frame += 1
             center = ds.arr([0, 0, 0], 'code_length')
             cam.yaw(theta, rot_center=center)
-            sc.camera = cam
             yield VRImageComparisonTest(
                 sc, ds, 'yaw_%s_%04d' % (lens_type, frame), decimals)
 
@@ -139,7 +136,6 @@
             theta = np.pi / n_frames
             center = ds.arr([0, 0, 0], 'code_length')
             cam.pitch(theta, rot_center=center)
-            sc.camera = cam
             yield VRImageComparisonTest(
                 sc, ds, 'pitch_%s_%04d' % (lens_type, frame), decimals)
 
@@ -148,7 +144,6 @@
             theta = np.pi / n_frames
             center = ds.arr([0, 0, 0], 'code_length')
             cam.roll(theta, rot_center=center)
-            sc.camera = cam
             yield VRImageComparisonTest(
                 sc, ds, 'roll_%s_%04d' % (lens_type, frame), decimals)
 

diff -r 169c6b14377e4e691657a18547322d3d0c87aae3 -r bdb4a8a9139ed14202d0e55c30eae8a9dc9c0977 yt/visualization/volume_rendering/tests/test_zbuff.py
--- a/yt/visualization/volume_rendering/tests/test_zbuff.py
+++ b/yt/visualization/volume_rendering/tests/test_zbuff.py
@@ -15,8 +15,10 @@
 import shutil
 from yt.testing import fake_random_ds
 from yt.visualization.volume_rendering.api import \
-    Scene, Camera, ZBuffer, \
-    VolumeSource, OpaqueSource
+    Scene, \
+    ZBuffer, \
+    VolumeSource, \
+    OpaqueSource
 from yt.testing import assert_almost_equal
 import numpy as np
 from unittest import TestCase
@@ -54,9 +56,8 @@
         ds.field_info[ds.field_list[0]].take_log=False
 
         sc = Scene()
-        cam = Camera(ds)
+        cam = sc.add_camera(ds)
         cam.resolution = (512,512)
-        sc.camera = cam
         vr = VolumeSource(dd, field=ds.field_list[0])
         vr.transfer_function.clear()
         vr.transfer_function.grey_opacity=True

diff -r 169c6b14377e4e691657a18547322d3d0c87aae3 -r bdb4a8a9139ed14202d0e55c30eae8a9dc9c0977 yt/visualization/volume_rendering/utils.py
--- a/yt/visualization/volume_rendering/utils.py
+++ b/yt/visualization/volume_rendering/utils.py
@@ -15,7 +15,7 @@
 
 
 def new_mesh_sampler(camera, render_source):
-    params = camera._get_sampler_params(render_source)
+    params = ensure_code_unit_params(camera._get_sampler_params(render_source))
     args = (
         np.atleast_3d(params['vp_pos']),
         np.atleast_3d(params['vp_dir']),
@@ -32,7 +32,7 @@
 
 
 def new_volume_render_sampler(camera, render_source):
-    params = camera._get_sampler_params(render_source)
+    params = ensure_code_unit_params(camera._get_sampler_params(render_source))
     params.update(transfer_function=render_source.transfer_function)
     params.update(transfer_function=render_source.transfer_function)
     params.update(num_samples=render_source.num_samples)
@@ -61,7 +61,7 @@
 
 
 def new_interpolated_projection_sampler(camera, render_source):
-    params = camera._get_sampler_params(render_source)
+    params = ensure_code_unit_params(camera._get_sampler_params(render_source))
     params.update(transfer_function=render_source.transfer_function)
     params.update(num_samples=render_source.num_samples)
     args = (
@@ -85,7 +85,7 @@
 
 
 def new_projection_sampler(camera, render_source):
-    params = camera._get_sampler_params(render_source)
+    params = ensure_code_unit_params(camera._get_sampler_params(render_source))
     params.update(transfer_function=render_source.transfer_function)
     params.update(num_samples=render_source.num_samples)
     args = (
@@ -107,6 +107,16 @@
     sampler = ProjectionSampler(*args, **kwargs)
     return sampler
 
+def ensure_code_unit_params(params):
+    for param_name in ['center', 'vp_pos', 'vp_dir', 'width']:
+        param = params[param_name]
+        if hasattr(param, 'in_units'):
+            params[param_name] = param.in_units('code_length')
+    bounds = params['bounds']
+    if hasattr(bounds[0], 'units'):
+        params['bounds'] = tuple(b.in_units('code_length').d for b in bounds)
+
+    return params
 
 def get_corners(le, re):
     return np.array([

diff -r 169c6b14377e4e691657a18547322d3d0c87aae3 -r bdb4a8a9139ed14202d0e55c30eae8a9dc9c0977 yt/visualization/volume_rendering/volume_rendering.py
--- a/yt/visualization/volume_rendering/volume_rendering.py
+++ b/yt/visualization/volume_rendering/volume_rendering.py
@@ -13,7 +13,6 @@
 
 
 from .scene import Scene
-from .camera import Camera
 from .render_source import VolumeSource, \
     MeshSource
 from .utils import data_source_or_all
@@ -77,7 +76,7 @@
         source = VolumeSource(data_source, field=field)
 
     sc.add_source(source)
-    sc.camera = Camera(data_source=data_source, lens_type=lens_type)
+    sc.add_camera(data_source=data_source, lens_type=lens_type)
     return sc

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.


More information about the yt-svn mailing list