[yt-svn] commit/yt: 14 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Tue Mar 29 08:21:24 PDT 2016


14 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/3f1b73fedeae/
Changeset:   3f1b73fedeae
Branch:      yt
User:        mzingale
Date:        2016-03-03 17:26:14+00:00
Summary:     add a new function, fake_vr_orientation_test_ds() that creates a
simple dataset that has proved useful in testing the VR orientation
Affected #:  1 file

diff -r 75b45347c44d52c1d4a32ee1b56356879848d92d -r 3f1b73fedeae6dbe5428248192d821a890f4f93b yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -315,7 +315,80 @@
     return ds
 
 
+def fake_vr_orientation_test_ds(N = 96):
+    """
+    create a toy dataset that puts a sphere at (0,0,0), a single cube
+    on +x, two cubes on +y, and three cubes on +z in a domain from
+    [-1,1]**3.  The lower planes (x = -1, y = -1, z = -1) are also
+    given non-zero values.
+
+    This dataset allows you to easily explore orientations and
+    handiness in VR and other renderings
+
+    """
+    from yt.frontends.stream.api import load_uniform_grid
+
+    xmin = ymin = zmin = -1.0
+    xmax = ymax = zmax = 1.0
+
+    dcoord = (xmax - xmin)/N
+
+    arr = np.zeros((N,N,N), dtype=np.float64)
+    arr[:,:,:] = 1.e-4
+
+    bbox = np.array([ [xmin, xmax], [ymin, ymax], [zmin, zmax] ])
+
+    # coordinates -- in the notation data[i, j, k]
+    x = (np.arange(N) + 0.5)*dcoord + xmin
+    y = (np.arange(N) + 0.5)*dcoord + ymin
+    z = (np.arange(N) + 0.5)*dcoord + zmin
+
+    x3d, y3d, z3d = np.meshgrid(x, y, z, indexing="ij")
+
+    # sphere at the origin
+    c = np.array( [0.5*(xmin + xmax), 0.5*(ymin + ymax), 0.5*(zmin + zmax) ] )
+    r = np.sqrt((x3d - c[0])**2 + (y3d - c[1])**2 + (z3d - c[2])**2)
+    arr[r < 0.05] = 1.0
+
+    # coordinate planes
+    ixy = np.logical_and(x3d > 0.0, y3d > 0.0)
+    iyz = np.logical_and(y3d > 0.0, z3d > 0.0)
+    ixz = np.logical_and(x3d > 0.0, z3d > 0.0)
+
+    arr[abs(x3d - xmin) < 2*dcoord] = 0.3
+    arr[abs(y3d - ymin) < 2*dcoord] = 0.3
+    arr[abs(z3d - zmin) < 2*dcoord] = 0.3
+
+    # single cube on +x
+    xc = 0.75
+    dx = 0.05
+    idx = np.logical_and(np.logical_and(x3d > xc-dx, x3d < xc+dx),
+                         np.logical_and(np.logical_and(y3d > -dx, y3d < dx),
+                                        np.logical_and(z3d > -dx, z3d < dx)) )
+    arr[idx] = 1.0
+
+    # two cubes on +y
+    dy = 0.05
+    for yc in [0.65, 0.85]:
+        idx = np.logical_and(np.logical_and(y3d > yc-dy, y3d < yc+dy),
+                             np.logical_and(np.logical_and(x3d > -dy, x3d < dy),
+                                            np.logical_and(z3d > -dy, z3d < dy)) )
+    arr[idx] = 0.8
+
+    # three cubes on +z
+    dz = 0.05
+    for zc in [0.5, 0.7, 0.9]:
+        idx = np.logical_and(np.logical_and(z3d > zc-dz, z3d < zc+dz),
+                             np.logical_and(np.logical_and(x3d > -dz, x3d < dz),
+                                            np.logical_and(y3d > -dz, y3d < dz)) )
+        arr[idx] = 0.6
+
+    data = dict(Density = arr)
+    ds = load_uniform_grid(data, arr.shape, bbox=bbox)
+    return ds
+
 def expand_keywords(keywords, full=False):
+
     """
     expand_keywords is a means for testing all possible keyword
     arguments in the nosetests.  Simply pass it a dictionary of all the


https://bitbucket.org/yt_analysis/yt/commits/bf284f33aece/
Changeset:   bf284f33aece
Branch:      yt
User:        mzingale
Date:        2016-03-03 17:46:48+00:00
Summary:     update this script to use yt.testing.fake_vr_orientation_test_ds()
Affected #:  1 file

diff -r 3f1b73fedeae6dbe5428248192d821a890f4f93b -r bf284f33aece2d0b9a88754fd4d07c79593c60f0 yt/visualization/volume_rendering/tests/test_vr_orientation.py
--- a/yt/visualization/volume_rendering/tests/test_vr_orientation.py
+++ b/yt/visualization/volume_rendering/tests/test_vr_orientation.py
@@ -26,75 +26,9 @@
     off_axis_projection
 
 
-def setup_ds():
-
-    N = 96
-
-    xmin = ymin = zmin = -1.0
-    xmax = ymax = zmax = 1.0
-
-    dcoord = (xmax - xmin)/N
-
-    arr = np.zeros((N, N, N), dtype=np.float64)
-    arr[:, :, :] = 1.e-4
-
-    bbox = np.array([[xmin, xmax], [ymin, ymax], [zmin, zmax]])
-
-    # coordinates -- in the notation data[i, j, k]
-    x = (np.arange(N) + 0.5)*(xmax - xmin)/N + xmin
-    y = (np.arange(N) + 0.5)*(ymax - ymin)/N + ymin
-    z = (np.arange(N) + 0.5)*(zmax - zmin)/N + zmin
-
-    x3d, y3d, z3d = np.meshgrid(x, y, z, indexing="ij")
-
-    # sphere at the origin
-    c = np.array([0.5*(xmin + xmax), 0.5*(ymin + ymax), 0.5*(zmin + zmax)])
-
-    r = np.sqrt((x3d - c[0])**2 + (y3d - c[1])**2 + (z3d - c[2])**2)
-    arr[r < 0.05] = 1.0
-
-    arr[abs(x3d - xmin) < 2*dcoord] = 0.3
-    arr[abs(y3d - ymin) < 2*dcoord] = 0.3
-    arr[abs(z3d - zmin) < 2*dcoord] = 0.3
-
-    # single cube on +x
-    xc = 0.75
-    dx = 0.05
-    idx = np.logical_and(np.logical_and(x3d > xc-dx, x3d < xc+dx),
-                         np.logical_and(np.logical_and(y3d > -dx, y3d < dx),
-                                        np.logical_and(z3d > -dx, z3d < dx)))
-
-    arr[idx] = 1.0
-
-    # two cubes on +y
-    dy = 0.05
-    for yc in [0.65, 0.85]:
-
-        idx = np.logical_and(np.logical_and(y3d > yc-dy, y3d < yc+dy),
-                             np.logical_and(np.logical_and(x3d > -dy, x3d < dy),
-                                            np.logical_and(z3d > -dy, z3d < dy)))
-
-        arr[idx] = 0.8
-
-    # three cubes on +z
-    dz = 0.05
-    for zc in [0.5, 0.7, 0.9]:
-
-        idx = np.logical_and(np.logical_and(z3d > zc-dz, z3d < zc+dz),
-                             np.logical_and(np.logical_and(x3d > -dz, x3d < dz),
-                                            np.logical_and(y3d > -dz, y3d < dz)))
-
-        arr[idx] = 0.6
-
-    data = dict(density=(arr, "g/cm**3"))
-    ds = load_uniform_grid(data, arr.shape, bbox=bbox)
-
-    return ds
-
-
 @requires_answer_testing()
 def test_orientation():
-    ds = setup_ds()
+    ds = yt.testing.fake_vr_orientation_test_ds()
 
     sc = Scene()
 


https://bitbucket.org/yt_analysis/yt/commits/801303d84052/
Changeset:   801303d84052
Branch:      yt
User:        mzingale
Date:        2016-03-03 17:59:54+00:00
Summary:     fix the naming of the field
Affected #:  1 file

diff -r bf284f33aece2d0b9a88754fd4d07c79593c60f0 -r 801303d84052f40f7a55372536b373da79afb2b4 yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -383,7 +383,7 @@
                                             np.logical_and(y3d > -dz, y3d < dz)) )
         arr[idx] = 0.6
 
-    data = dict(Density = arr)
+    data = dict(density = (arr, "g/cm**3"))
     ds = load_uniform_grid(data, arr.shape, bbox=bbox)
     return ds
 


https://bitbucket.org/yt_analysis/yt/commits/08cc6198097c/
Changeset:   08cc6198097c
Branch:      yt
User:        mzingale
Date:        2016-03-03 18:13:32+00:00
Summary:     the script was supposed to test both the plane-parallel and perspective cameras,
and had a loop to do so, but we were not actually using lens_type in setting
up the camera
Affected #:  1 file

diff -r 801303d84052f40f7a55372536b373da79afb2b4 -r 08cc6198097c4972d76662e6c506bfa7d0358e27 yt/visualization/volume_rendering/tests/test_vr_orientation.py
--- a/yt/visualization/volume_rendering/tests/test_vr_orientation.py
+++ b/yt/visualization/volume_rendering/tests/test_vr_orientation.py
@@ -48,7 +48,7 @@
     for lens_type in ['plane-parallel', 'perspective']:
         frame = 0
 
-        cam = Camera(ds, lens_type='plane-parallel')
+        cam = Camera(ds, lens_type=lens_type)
         cam.resolution = (1000, 1000)
         cam.position = ds.arr(np.array([-4., 0., 0.]), 'code_length')
         cam.switch_orientation(normal_vector=[1., 0., 0.],


https://bitbucket.org/yt_analysis/yt/commits/0f24dc78867b/
Changeset:   0f24dc78867b
Branch:      yt
User:        mzingale
Date:        2016-03-03 19:58:13+00:00
Summary:     fix test suite errors
Affected #:  1 file

diff -r 08cc6198097c4972d76662e6c506bfa7d0358e27 -r 0f24dc78867bcc17a8bfa0c91e63a87cde6f8600 yt/visualization/volume_rendering/tests/test_vr_orientation.py
--- a/yt/visualization/volume_rendering/tests/test_vr_orientation.py
+++ b/yt/visualization/volume_rendering/tests/test_vr_orientation.py
@@ -12,8 +12,7 @@
 
 
 import numpy as np
-
-from yt import load_uniform_grid
+from yt import testing
 from yt.utilities.answer_testing.framework import \
     requires_answer_testing, \
     VRImageComparisonTest, \
@@ -28,7 +27,7 @@
 
 @requires_answer_testing()
 def test_orientation():
-    ds = yt.testing.fake_vr_orientation_test_ds()
+    ds = testing.fake_vr_orientation_test_ds()
 
     sc = Scene()
 


https://bitbucket.org/yt_analysis/yt/commits/55246a4b843b/
Changeset:   55246a4b843b
Branch:      yt
User:        mzingale
Date:        2016-03-03 20:23:54+00:00
Summary:     fix an indentation that caused only 1 cube on +y for the fake vr data
Affected #:  1 file

diff -r 0f24dc78867bcc17a8bfa0c91e63a87cde6f8600 -r 55246a4b843b5ab784ae208ee6f17dc57aff31fa yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -373,7 +373,7 @@
         idx = np.logical_and(np.logical_and(y3d > yc-dy, y3d < yc+dy),
                              np.logical_and(np.logical_and(x3d > -dy, x3d < dy),
                                             np.logical_and(z3d > -dy, z3d < dy)) )
-    arr[idx] = 0.8
+        arr[idx] = 0.8
 
     # three cubes on +z
     dz = 0.05


https://bitbucket.org/yt_analysis/yt/commits/27611b3a740e/
Changeset:   27611b3a740e
Branch:      yt
User:        mzingale
Date:        2016-03-03 23:43:58+00:00
Summary:     remove unused arrays
Affected #:  1 file

diff -r 55246a4b843b5ab784ae208ee6f17dc57aff31fa -r 27611b3a740e1c2768ecf7ae5f5c3ec1e9472fa8 yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -350,11 +350,6 @@
     r = np.sqrt((x3d - c[0])**2 + (y3d - c[1])**2 + (z3d - c[2])**2)
     arr[r < 0.05] = 1.0
 
-    # coordinate planes
-    ixy = np.logical_and(x3d > 0.0, y3d > 0.0)
-    iyz = np.logical_and(y3d > 0.0, z3d > 0.0)
-    ixz = np.logical_and(x3d > 0.0, z3d > 0.0)
-
     arr[abs(x3d - xmin) < 2*dcoord] = 0.3
     arr[abs(y3d - ymin) < 2*dcoord] = 0.3
     arr[abs(z3d - zmin) < 2*dcoord] = 0.3


https://bitbucket.org/yt_analysis/yt/commits/17cca52083e9/
Changeset:   17cca52083e9
Branch:      yt
User:        mzingale
Date:        2016-03-04 17:46:16+00:00
Summary:     merge
Affected #:  87 files

diff -r 27611b3a740e1c2768ecf7ae5f5c3ec1e9472fa8 -r 17cca52083e98321d9b71c6a3fb0f9fceca30407 doc/source/analyzing/fields.rst
--- a/doc/source/analyzing/fields.rst
+++ b/doc/source/analyzing/fields.rst
@@ -94,16 +94,16 @@
 
 There is a third, borderline class of field in yt, as well.  This is the
 "alias" type, where a field on disk (for example, (frontend, ``Density``)) is 
-aliased into an internal yt-name (for example, (``gas``, ``density``)).  The 
+aliased into an internal yt-name (for example, (``gas``, ``density``)). The 
 aliasing process allows universally-defined derived fields to take advantage of 
 internal names, and it also provides an easy way to address what units something 
 should be returned in.  If an aliased field is requested (and aliased fields 
 will always be lowercase, with underscores separating words) it will be returned 
-in CGS units (future versions will enable global defaults to be set for MKS and 
-other unit systems), whereas if the frontend-specific field is requested, it 
-will not undergo any unit conversions from its natural units.  (This rule is 
-occasionally violated for fields which are mesh-dependent, specifically particle 
-masses in some cosmology codes.)
+in the units specified by the unit system of the database (see :ref:`unit_systems`
+for a guide to using the different unit systems in yt), whereas if the 
+frontend-specific field is requested, it will not undergo any unit conversions 
+from its natural units.  (This rule is occasionally violated for fields which 
+are mesh-dependent, specifically particle masses in some cosmology codes.)
 
 .. _known-field-types:
 
@@ -125,7 +125,8 @@
 * ``gas`` -- This is the usual default for simulation frontends for fluid
   types.  These fields are typically aliased to the frontend-specific mesh
   fields for grid-based codes or to the deposit fields for particle-based
-  codes.  Default units are in CGS.
+  codes.  Default units are in the unit system of the dataset (see 
+  :ref:`unit_systems` for more information).
 * particle type -- These are particle fields that exist on-disk as written 
   by individual frontends.  If the frontend designates names for these particles
   (i.e. particle type) those names are the field types. 
@@ -240,6 +241,37 @@
    print(ds.field_info["gas", "pressure"].get_units())
    print(ds.field_info["gas", "pressure"].get_source())
 
+.. _bfields:
+
+Magnetic Fields
+---------------
+
+Magnetic fields require special handling, because their dimensions are different in
+different systems of units, in particular between the CGS and MKS (SI) systems of units.
+Superficially, it would appear that they are in the same dimensions, since the units 
+of the magnetic field in the CGS and MKS system are gauss (:math:`\rm{G}`) and tesla 
+(:math:`\rm{T}`), respectively, and numerically :math:`1~\rm{G} = 10^{-4}~\rm{T}`. However, 
+if we examine the base units, we find that they do indeed have different dimensions:
+
+.. math::
+
+    \rm{1~G = 1~\frac{\sqrt{g}}{\sqrt{cm}\cdot{s}}} \\
+    \rm{1~T = 1~\frac{kg}{A\cdot{s^2}}}
+
+It is easier to see the difference between the dimensionality of the magnetic field in the two
+systems in terms of the definition of the magnetic pressure:
+
+.. math::
+
+    p_B = \frac{B^2}{8\pi}~\rm{(cgs)} \\
+    p_B = \frac{B^2}{2\mu_0}~\rm{(MKS)}
+
+where :math:`\mu_0 = 4\pi \times 10^{-7}~\rm{N/A^2}` is the vacuum permeability. yt automatically
+detects on a per-frontend basis what units the magnetic should be in, and allows conversion between 
+different magnetic field units in the different :ref:`unit systems <unit_systems>` as well. To 
+determine how to set up special magnetic field handling when designing a new frontend, check out 
+:ref:`bfields-frontend`.
+
 Particle Fields
 ---------------
 

diff -r 27611b3a740e1c2768ecf7ae5f5c3ec1e9472fa8 -r 17cca52083e98321d9b71c6a3fb0f9fceca30407 doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
--- a/doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
+++ b/doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
@@ -24,9 +24,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "import yt\n",
@@ -41,9 +39,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "print (maxval)"
@@ -52,9 +48,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "print (dens)"
@@ -63,9 +57,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "mass = dd['cell_mass']\n",
@@ -79,9 +71,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "dx = dd['dx']\n",
@@ -107,9 +97,11 @@
     "* `in_units`\n",
     "* `in_cgs`\n",
     "* `in_mks`\n",
+    "* `in_base`\n",
     "* `convert_to_units`\n",
     "* `convert_to_cgs`\n",
-    "* `convert_to_mks`"
+    "* `convert_to_mks`\n",
+    "* `convert_to_base`"
    ]
   },
   {
@@ -122,9 +114,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "print (dd['density'].in_units('Msun/pc**3'))"
@@ -134,35 +124,73 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "`in_cgs` and `in_mks` return a copy of the array converted CGS and MKS units, respectively:"
+    "`in_cgs` and `in_mks` return a copy of the array converted to CGS and MKS units, respectively:"
    ]
   },
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "print (dd['pressure'])\n",
-    "print ((dd['pressure']).in_cgs())\n",
-    "print ((dd['pressure']).in_mks())"
+    "print (dd['pressure'].in_cgs())\n",
+    "print (dd['pressure'].in_mks())"
    ]
   },
   {
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "The next two methods do in-place conversions:"
+    "`in_cgs` and `in_mks` are just special cases of the more general `in_base`, which can convert a `YTArray` to a number of different unit systems:"
    ]
   },
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "print (dd['pressure'].in_base('imperial')) # Imperial/English base units\n",
+    "print (dd['pressure'].in_base('galactic')) # Base units of kpc, Msun, Myr\n",
+    "print (dd['pressure'].in_base('planck')) # Base units in the Planck system\n",
+    "print (dd['pressure'].in_base()) # defaults to cgs if no argument given"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "`in_base` can even take a dataset as the argument to convert the `YTArray` into the base units of the dataset:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "print (dd['pressure'].in_base(ds)) # The IsolatedGalaxy dataset from above"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "yt defines a number of unit systems, and new unit systems may be added by the user, which can also be passed to `in_base`. To learn more about the unit systems, how to use them with datasets and other objects, and how to add new ones, see [Unit Systems](unit_systems.html)."
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "The rest of the methods do in-place conversions:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
    "outputs": [],
    "source": [
     "dens = dd['density']\n",
@@ -182,9 +210,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "print (dd['density'])\n",
@@ -206,9 +232,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "print (dd['cell_mass'])\n",
@@ -234,9 +258,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "q1 = yt.YTArray(1.0,\"C\") # coulombs\n",
@@ -249,9 +271,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "B1 = yt.YTArray(1.0,\"T\") # tesla\n",
@@ -285,9 +305,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "import numpy as np\n",
@@ -317,9 +335,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "print (dd['cell_mass'].ndarray_view())\n",
@@ -338,9 +354,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "density_values = dd['density'].d\n",
@@ -374,23 +388,19 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "from astropy import units as u\n",
     "\n",
     "x = 42.0 * u.meter\n",
-    "y = yt.YTQuantity.from_astropy(x) "
+    "y = yt.YTQuantity.from_astropy(x)"
    ]
   },
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "print (x, type(x))\n",
@@ -400,9 +410,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "a = np.random.random(size=10) * u.km/u.s\n",
@@ -412,9 +420,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "print (a, type(a))\n",
@@ -431,9 +437,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "temp = dd[\"temperature\"]\n",
@@ -443,9 +447,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "print (temp, type(temp))\n",
@@ -462,9 +464,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "from yt.utilities.physical_constants import kboltz\n",
@@ -474,9 +474,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "print (kboltz, type(kboltz))\n",
@@ -493,9 +491,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "k1 = kboltz.to_astropy()\n",
@@ -506,9 +502,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "c = yt.YTArray.from_astropy(a)\n",
@@ -526,9 +520,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "from pint import UnitRegistry\n",
@@ -540,9 +532,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "print (v, type(v))\n",
@@ -552,9 +542,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "ptemp = temp.to_pint()"
@@ -563,9 +551,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "print (temp, type(temp))\n",
@@ -582,7 +568,7 @@
   "language_info": {
    "codemirror_mode": {
     "name": "ipython",
-    "version": 3
+    "version": 3.0
    },
    "file_extension": ".py",
    "mimetype": "text/x-python",
@@ -594,4 +580,4 @@
  },
  "nbformat": 4,
  "nbformat_minor": 0
-}
+}
\ No newline at end of file

diff -r 27611b3a740e1c2768ecf7ae5f5c3ec1e9472fa8 -r 17cca52083e98321d9b71c6a3fb0f9fceca30407 doc/source/analyzing/units/4)_Comparing_units_from_different_datasets.ipynb
--- a/doc/source/analyzing/units/4)_Comparing_units_from_different_datasets.ipynb
+++ b/doc/source/analyzing/units/4)_Comparing_units_from_different_datasets.ipynb
@@ -4,7 +4,7 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "Units that refer to the internal simulation coordinate system will have different CGS conversion factors in different datasets.  Depending on how a unit system is implemented, this could add an element of uncertainty when we compare dimensional arrays instances produced by different unit systems.  Fortunately, this is not a problem for `YTArray` since all `YTArray` unit systems are defined in terms of physical CGS units.\n",
+    "Units that refer to the internal simulation coordinate system will have different CGS conversion factors in different datasets.  Depending on how a unit system is implemented, this could add an element of uncertainty when we compare dimensional array instances produced by different unit systems.  Fortunately, this is not a problem for `YTArray` since all `YTArray` unit systems are defined in terms of physical CGS units.\n",
     "\n",
     "As an example, let's load up two enzo datasets from different redshifts in the same cosmology simulation."
    ]
@@ -12,9 +12,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "# A high redshift output from z ~ 8\n",
@@ -29,9 +27,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "# A low redshift output from z ~ 0\n",
@@ -51,9 +47,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "print (ds2.length_unit.in_cgs()/ds1.length_unit.in_cgs() == (1+ds1.current_redshift)/(1+ds2.current_redshift))"
@@ -69,9 +63,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "print (ds2.length_unit/ds1.length_unit)"
@@ -89,9 +81,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "import yt\n",
@@ -120,7 +110,7 @@
   "language_info": {
    "codemirror_mode": {
     "name": "ipython",
-    "version": 3
+    "version": 3.0
    },
    "file_extension": ".py",
    "mimetype": "text/x-python",
@@ -132,4 +122,4 @@
  },
  "nbformat": 4,
  "nbformat_minor": 0
-}
+}
\ No newline at end of file

diff -r 27611b3a740e1c2768ecf7ae5f5c3ec1e9472fa8 -r 17cca52083e98321d9b71c6a3fb0f9fceca30407 doc/source/analyzing/units/7)_Unit_Systems.ipynb
--- /dev/null
+++ b/doc/source/analyzing/units/7)_Unit_Systems.ipynb
@@ -0,0 +1,491 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "By default, the results of most calculations in yt are expressed in a \"centimeters-grams-seconds\" (CGS) set of units. This includes the values of derived fields and aliased fields.\n",
+    "\n",
+    "However, this system of units may not be the most natural for a given dataset or an entire class of datasets. For this reason, yt provides the ability to define new unit systems and use them in a way that is highly configurable by the end-user. "
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### Unit Systems Available in yt"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Several unit systems are already supplied for use within yt. They are:\n",
+    "\n",
+    "* `\"cgs\"`: Centimeters-grams-seconds unit system, with base of `(cm, g, s, K, radian)`. Uses the Gaussian normalization for electromagnetic units. \n",
+    "* `\"mks\"`: Meters-kilograms-seconds unit system, with base of `(m, kg, s, K, radian, A)`.\n",
+    "* `\"imperial\"`: Imperial unit system, with base of `(mile, lbm, s, R, radian)`.\n",
+    "* `\"galactic\"`: \"Galactic\" unit system, with base of `(kpc, Msun, Myr, K, radian)`.\n",
+    "* `\"solar\"`: \"Solar\" unit system, with base of `(AU, Mearth, yr, K, radian)`. \n",
+    "* `\"planck\"`: Planck natural units $(\\hbar = c = G = k_B = 1)$, with base of `(l_pl, m_pl, t_pl, T_pl, radian)`. \n",
+    "* `\"geometrized\"`: Geometrized natural units $(c = G = 1)$, with base of `(l_geom, m_geom, t_geom, K, radian)`. \n",
+    "\n",
+    "We can examine these unit systems by querying them from the `unit_system_registry`. For example, we can look at the default CGS system:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "import yt\n",
+    "yt.unit_system_registry[\"cgs\"]"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "We can see that we have two sets of units that this system defines: \"base\" and \"other\" units. The \"base\" units are the set of units from which all other units in the system are composed of, such as centimeters, grams, and seconds. The \"other\" units are compound units which fields with specific dimensionalities are converted to, such as ergs, dynes, gauss, and electrostatic units (esu). \n",
+    "\n",
+    "We see a similar setup for the MKS system, except that in this case, there is a base unit of current, the Ampere:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "yt.unit_system_registry[\"mks\"]"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "We can also look at the imperial system:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "yt.unit_system_registry[\"imperial\"]"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "and the \"galactic\" system as well:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "yt.unit_system_registry[\"galactic\"]"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### Converting `YTArrays` to the Different Unit Systems"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### Choosing a Unit System When Loading a Dataset"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "When a dataset is `load`ed, a unit system may be specified. When this happens, all aliased and derived fields will be converted to the units of the given system. The default is `\"cgs\"`.\n",
+    "\n",
+    "For example, we can specify that the fields from a FLASH dataset can be expressed in MKS units:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "ds_flash = yt.load(\"GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0100\", unit_system=\"mks\")\n",
+    "sp = ds_flash.sphere(\"c\", (100.,\"kpc\"))\n",
+    "print (sp[\"density\"]) # This is an alias for (\"flash\",\"dens\")\n",
+    "print (sp[\"pressure\"]) # This is an alias for (\"flash\",\"pres\")\n",
+    "print (sp[\"angular_momentum_x\"]) # This is a derived field\n",
+    "print (sp[\"kinetic_energy\"]) # This is also a derived field"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Aliased fields are converted to the requested unit system, but the on-disk fields that they correspond to remain in their original (code) units:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "print (sp[\"flash\",\"dens\"]) # This is aliased to (\"gas\", \"density\")\n",
+    "print (sp[\"flash\",\"pres\"]) # This is aliased to (\"gas\", \"pressure\")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "We can take an `Enzo` dataset and express it in `\"galactic\"` units:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "ds_enzo = yt.load(\"IsolatedGalaxy/galaxy0030/galaxy0030\", unit_system=\"galactic\")\n",
+    "sp = ds_enzo.sphere(\"c\", (20.,\"kpc\"))\n",
+    "print (sp[\"density\"])\n",
+    "print (sp[\"pressure\"])"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "We can also express all of the fields associated with a dataset in that dataset's system of \"code\" units. Though the on-disk fields are already in these units, this means that we can express even derived fields in code units as well:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "ds_chombo = yt.load(\"KelvinHelmholtz/data.0004.hdf5\", unit_system=\"code\")\n",
+    "dd = ds_chombo.all_data()\n",
+    "print (dd[\"density\"])\n",
+    "print (dd[\"kinetic_energy\"])"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### Defining Fields So That They Can Use the Different Unit Systems"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "If you define a new derived field for use in yt and wish to make the different unit systems available to it, you will need to specify this when calling `add_field`. Suppose I defined a new field called `\"momentum_x\"` and wanted it to have general units. I would have to set it up in this fashion, using the `unit_system` attribute of the dataset and querying it for the appropriate dimensions:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "mom_units = ds_flash.unit_system[\"velocity\"]*ds_flash.unit_system[\"density\"]\n",
+    "def _momentum_x(field, data):\n",
+    "    return data[\"density\"]*data[\"velocity_x\"]\n",
+    "ds_flash.add_field((\"gas\",\"momentum_x\"), function=_momentum_x, units=mom_units)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Now, the field will automatically be expressed in whatever units the dataset was called with. In this case, it was MKS:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "slc = yt.SlicePlot(ds_flash, \"z\", [\"momentum_x\"], width=(300.,\"kpc\"))\n",
+    "slc.show()"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Note that the momentum density has been plotted with the correct MKS units of $\\mathrm{kg/(m^2\\cdot{s})}$."
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "If you don't create a derived field from a dataset but instead use `yt.add_field`, and still want to use the unit system of that dataset for the units, the only option at present is to set `units=\"auto\"` in the call to `yt.add_field` and the `dimensions` keyword to the correct dimensions for the field:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "from yt.units import clight\n",
+    "\n",
+    "def _rest_energy(field, data):\n",
+    "    return data[\"cell_mass\"]*clight*clight\n",
+    "yt.add_field((\"gas\",\"rest_energy\"), function=_rest_energy, units=\"auto\", dimensions=\"energy\")\n",
+    "\n",
+    "ds_flash2 = yt.load(\"GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0150\", unit_system=\"galactic\")\n",
+    "\n",
+    "sp = ds_flash2.sphere(\"c\", (100.,\"kpc\"))\n",
+    "sp[\"rest_energy\"]"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### Obtaining Physical Constants in a Specific Unit System"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Each unit system provides the ability to obtain any physical constant in yt's physical constants database in the base units of that system via the `constants` attribute of the unit system. For example, to obtain the value of Newton's universal constant of gravitation in different base units:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "for name in [\"cgs\", \"mks\", \"imperial\", \"planck\", \"geometrized\"]:\n",
+    "    unit_system = yt.unit_system_registry[name]\n",
+    "    print (name, unit_system.constants.G)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Equivalently, one could import a physical constant from the main database and convert it using `in_base`:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "from yt.utilities.physical_constants import G\n",
+    "print (G.in_base(\"mks\"))"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### Defining Your Own Unit System"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "You are not limited to using the unit systems already defined by yt. A new unit system can be defined by creating a new `UnitSystem` instance. For example, to create a unit system where the default units are in millimeters, centigrams, and microseconds:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "small_unit_system = yt.UnitSystem(\"small\", \"mm\", \"cg\", \"us\")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "where the required arguments are a `name` for the unit system, and the `length_unit`, `mass_unit`, and `time_unit` for the unit system, which serve as the \"base\" units to convert everything else to. Once a unit system instance is created, it is automatically added to the `unit_system_registry` so that it may be used throughout yt:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "yt.unit_system_registry[\"small\"]"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Note that the base units for the dimensions of angle and temperature have been automatically set to radians and Kelvin, respectively. If desired, these can be specified using optional arguments when creating the `UnitSystem` object:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "wacky_unit_system = yt.UnitSystem(\"wacky\", \"mile\", \"kg\", \"day\", temperature_unit=\"R\", angle_unit=\"deg\")\n",
+    "wacky_unit_system"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Though it will rarely be necessary, an MKS-style system of units where a unit of current can be specified as a base unit can also be created using the `current_mks` optional argument:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "mksish_unit_system = yt.UnitSystem(\"mksish\", \"dm\", \"ug\", \"ks\", current_mks_unit=\"mA\")\n",
+    "mksish_unit_system"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Initializing a `UnitSystem` object only sets up the base units. In this case, all fields will be converted to combinations of these base units based on their dimensionality. However, you may want to specify that fields of a given dimensionality use a compound unit by default instead. For example, you might prefer that in the `\"small\"` unit system that pressures be represented in microdynes per millimeter squared. To do this, set these to be the units of the `\"pressure\"` dimension explicitly:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "small_unit_system[\"pressure\"] = \"udyne/mm**2\""
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "We can now look at the `small_unit_system` object and see that these units are now defined for pressure in the \"Other Units\" category:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "small_unit_system"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "We can do the same for a few other dimensionalities:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "small_unit_system[\"magnetic_field_cgs\"] = \"mG\"\n",
+    "small_unit_system[\"specific_energy\"] = \"cerg/ug\"\n",
+    "small_unit_system[\"velocity\"] = \"cm/s\"\n",
+    "small_unit_system"
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 3",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.5.1"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 0
+}

diff -r 27611b3a740e1c2768ecf7ae5f5c3ec1e9472fa8 -r 17cca52083e98321d9b71c6a3fb0f9fceca30407 doc/source/analyzing/units/index.rst
--- a/doc/source/analyzing/units/index.rst
+++ b/doc/source/analyzing/units/index.rst
@@ -34,6 +34,7 @@
    comparing_units_from_different_datasets
    units_and_plotting
    unit_equivalencies
+   unit_systems
 
 .. note::
 

diff -r 27611b3a740e1c2768ecf7ae5f5c3ec1e9472fa8 -r 17cca52083e98321d9b71c6a3fb0f9fceca30407 doc/source/analyzing/units/unit_systems.rst
--- /dev/null
+++ b/doc/source/analyzing/units/unit_systems.rst
@@ -0,0 +1,7 @@
+.. _unit_systems:
+
+Unit Systems
+============
+
+.. notebook:: 7)_Unit_Systems.ipynb
+:skip_exceptions:

diff -r 27611b3a740e1c2768ecf7ae5f5c3ec1e9472fa8 -r 17cca52083e98321d9b71c6a3fb0f9fceca30407 doc/source/developing/creating_derived_fields.rst
--- a/doc/source/developing/creating_derived_fields.rst
+++ b/doc/source/developing/creating_derived_fields.rst
@@ -33,7 +33,10 @@
 In this example, the ``density`` field will return data with units of
 ``g/cm**3`` and the ``thermal_energy`` field will return data units of
 ``erg/g``, so the result will automatically have units of pressure,
-``erg/cm**3``.
+``erg/cm**3``. This assumes the unit system is set to the default, which is
+CGS: if a different unit system is selected, the result will be in the same
+dimensions of pressure but different units. See :ref:`unit_systems` for more
+information.
 
 Once we've defined our function, we need to notify yt that the field is
 available.  The :func:`add_field` function is the means of doing this; it has a
@@ -47,7 +50,7 @@
 
 .. code-block:: python
 
-   yt.add_field("pressure", function=_pressure, units="dyne/cm**2")
+   yt.add_field(("gas", "pressure"), function=_pressure, units="dyne/cm**2")
 
 We feed it the name of the field, the name of the function, and the
 units.  Note that the units parameter is a "raw" string, in the format that yt 
@@ -59,7 +62,7 @@
 as in the ``_pressure`` example above.
 
 Field definitions return array data with units. If the field function returns
-data in a dimensionally equivalent unit (e.g. a ``dyne`` versus a ``N``), the
+data in a dimensionally equivalent unit (e.g. a ``"dyne"`` versus a ``"N"``), the
 field data will be converted to the units specified in ``add_field`` before
 being returned in a data object selection. If the field function returns data
 with dimensions that are incompatibible with units specified in ``add_field``,
@@ -67,7 +70,7 @@
 function returns data in the correct units. Often, this means applying units to
 a dimensionless float or array.
 
-If your field definition influcdes physical constants rather than defining a
+If your field definition includes physical constants rather than defining a
 constant as a float, you can import it from ``yt.utilities.physical_constants``
 to get a predefined version of the constant with the correct units. If you know
 the units your data is supposed to have ahead of time, you can import unit
@@ -82,7 +85,29 @@
 Lastly, if you do not know the units of your field ahead of time, you can
 specify ``units='auto'`` in the call to ``add_field`` for your field.  This will
 automatically determine the appropriate units based on the units of the data
-returned by the field function.
+returned by the field function. This is also a good way to let your derived fields
+be automatically converted to the units of the :ref:`unit system <unit_systems>` in 
+your dataset. 
+
+If ``units='auto'`` is set, it is also required to set the ``dimensions`` keyword
+argument so that error-checking can be done on the derived field to make sure that
+the dimensionality of the returned array and the field are the same:
+
+.. code-block:: python
+
+    import yt
+    from yt.units import dimensions
+    
+    def _pressure(field, data):
+        return (data.ds.gamma - 1.0) * \
+              data["density"] * data["thermal_energy"]
+              
+    yt.add_field(("gas","pressure"), function=_pressure, units="auto",
+                 dimensions=dimensions.pressure)
+
+If ``dimensions`` is not set, an error will be thrown. The ``dimensions`` keyword
+can be a SymPy ``symbol`` object imported from ``yt.units.dimensions``, a compound
+dimension of these, or a string corresponding to one of these objects. 
 
 :func:`add_field` can be invoked in two other ways. The first is by the 
 function decorator :func:`derived_field`. The following code is equivalent to 
@@ -111,10 +136,27 @@
 .. code-block:: python
 
    ds = yt.load("GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0100")
-   ds.add_field("pressure", function=_pressure, units="dyne/cm**2")
+   ds.add_field(("gas", "pressure"), function=_pressure, units="dyne/cm**2")
 
-If you find yourself using the same custom-defined fields over and over, you
-should put them in your plugins file as described in :ref:`plugin-file`.
+If you specify fields in this way, you can take advantage of the dataset's 
+:ref:`unit system <unit_systems>` to define the units for you, so that
+the units will be returned in the units of that system:
+
+.. code-block:: python
+
+    ds.add_field(("gas", "pressure"), function=_pressure, units=ds.unit_system["pressure"])
+
+Since the :class:`yt.units.unit_systems.UnitSystem` object returns a :class:`yt.units.unit_object.Unit` object when
+queried, you're not limited to specifying units in terms of those already available. You can specify units for fields
+using basic arithmetic if necessary:
+
+.. code-block:: python
+
+    ds.add_field(("gas", "my_acceleration"), function=_my_acceleration,
+                 units=ds.unit_system["length"]/ds.unit_system["time"]**2)
+
+If you find yourself using the same custom-defined fields over and over, you should put them in your plugins file as
+described in :ref:`plugin-file`.
 
 A More Complicated Example
 --------------------------
@@ -148,7 +190,7 @@
        y_hat /= r
        z_hat /= r
        return xv*x_hat + yv*y_hat + zv*z_hat
-   yt.add_field("my_radial_velocity",
+   yt.add_field(("gas","my_radial_velocity"),
                 function=_my_radial_velocity,
                 units="cm/s",
                 take_log=False,
@@ -195,8 +237,11 @@
 ``function``
      This is a function handle that defines the field
 ``units``
-     This is a string that describes the units. Powers must be in
-     Python syntax (``**`` instead of ``^``).
+     This is a string that describes the units, or a query to a :ref:`UnitSystem <unit_systems>` 
+     object, e.g. ``ds.unit_system["energy"]``. Powers must be in Python syntax (``**`` 
+     instead of ``^``). Alternatively, it may be set to ``"auto"`` to have the units 
+     determined automatically. In this case, the ``dimensions`` keyword must be set to the
+     correct dimensions of the field. 
 ``display_name``
      This is a name used in the plots, for instance ``"Divergence of
      Velocity"``.  If not supplied, the ``name`` value is used.
@@ -219,6 +264,9 @@
 ``force_override``
      (*Advanced*) Overrides the definition of an old field if a field with the
      same name has already been defined.
+``dimensions``
+     Set this if ``units="auto"``. Can be either a string or a dimension object from
+     ``yt.units.dimensions``.
 
 Debugging a Derived Field
 -------------------------
@@ -236,7 +284,7 @@
 
 .. code-block:: python
 
-   @yt.derived_field(name = "funthings")
+   @yt.derived_field(name = ("gas","funthings"))
    def funthings(field, data):
        return data["sillythings"] + data["humorousthings"]**2.0
 
@@ -244,7 +292,7 @@
 
 .. code-block:: python
 
-   @yt.derived_field(name = "funthings")
+   @yt.derived_field(name = ("gas","funthings"))
    def funthings(field, data):
        data._debug()
        return data["sillythings"] + data["humorousthings"]**2.0

diff -r 27611b3a740e1c2768ecf7ae5f5c3ec1e9472fa8 -r 17cca52083e98321d9b71c6a3fb0f9fceca30407 doc/source/developing/creating_frontend.rst
--- a/doc/source/developing/creating_frontend.rst
+++ b/doc/source/developing/creating_frontend.rst
@@ -104,6 +104,43 @@
 have a display name of ``r"\rho"``.  Omitting the ``"display_name"``
 will result in using a capitalized version of the ``"name"``.
 
+.. _bfields-frontend:
+
+Creating Aliases for Magnetic Fields
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Setting up access to the magnetic fields in your dataset requires special
+handling, because in different unit systems magnetic fields have different
+dimensions (see :ref:`bfields` for an explanation). If your dataset includes 
+magnetic fields, you should include them in ``known_other_fields``, but do
+not set up aliases for them--instead use the special handling function 
+:meth:`~yt.fields.magnetic_fields.setup_magnetic_field_aliases`. It takes
+as arguments the ``FieldInfoContainer`` instance, the field type of the 
+frontend, and the list of magnetic fields from the frontend. Here is an
+example of how this is implemented in the FLASH frontend:
+
+.. code-block:: python
+
+    class FLASHFieldInfo(FieldInfoContainer):
+        known_other_fields = (
+            ...
+            ("magx", (b_units, [], "B_x")), # Note there is no alias here
+            ("magy", (b_units, [], "B_y")),
+            ("magz", (b_units, [], "B_z")),
+            ...
+        )
+
+        def setup_fluid_fields(self):
+            from yt.fields.magnetic_field import \
+                setup_magnetic_field_aliases
+            ...
+            setup_magnetic_field_aliases(self, "flash", ["mag%s" % ax for ax in "xyz"])    
+
+This function should always be imported and called from within the 
+``setup_fluid_fields`` method of the ``FieldInfoContainer``. If this 
+function is used, converting between magnetic fields in different 
+:ref:`unit systems <unit_systems>` will be handled automatically. 
+
 Data Localization Structures
 ----------------------------
 

diff -r 27611b3a740e1c2768ecf7ae5f5c3ec1e9472fa8 -r 17cca52083e98321d9b71c6a3fb0f9fceca30407 doc/source/examining/Loading_Generic_Array_Data.ipynb
--- a/doc/source/examining/Loading_Generic_Array_Data.ipynb
+++ b/doc/source/examining/Loading_Generic_Array_Data.ipynb
@@ -41,9 +41,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "import yt\n",
@@ -60,9 +58,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "arr = np.random.random(size=(64,64,64))"
@@ -78,9 +74,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "data = dict(density = (arr, \"g/cm**3\"))\n",
@@ -124,9 +118,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "slc = yt.SlicePlot(ds, \"z\", [\"density\"])\n",
@@ -148,9 +140,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "posx_arr = np.random.uniform(low=-1.5, high=1.5, size=10000)\n",
@@ -177,9 +167,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "slc = yt.SlicePlot(ds, \"z\", [\"density\"])\n",
@@ -205,9 +193,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "import h5py\n",
@@ -227,9 +213,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "print (f.keys())"
@@ -245,9 +229,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "units = [\"gauss\",\"gauss\",\"gauss\", \"g/cm**3\", \"erg/cm**3\", \"K\", \n",
@@ -264,9 +246,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "data = {k:(v.value,u) for (k,v), u in zip(f.items(),units)}\n",
@@ -276,9 +256,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "ds = yt.load_uniform_grid(data, data[\"Density\"][0].shape, length_unit=250.*cm_per_kpc, bbox=bbox, nprocs=8, \n",
@@ -295,9 +273,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "prj = yt.ProjectionPlot(ds, \"z\", [\"z-velocity\",\"Temperature\",\"Bx\"], weight_field=\"Density\")\n",
@@ -323,9 +299,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "#Find the min and max of the field\n",
@@ -345,9 +319,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "tf = yt.ColorTransferFunction((mi, ma), grey_opacity=False)"
@@ -363,9 +335,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "# Choose a vector representing the viewing direction.\n",
@@ -375,7 +345,7 @@
     "# Define the width of the image\n",
     "W = 1.5*ds.domain_width[0]\n",
     "# Define the number of pixels to render\n",
-    "Npixels = 512 "
+    "Npixels = 512"
    ]
   },
   {
@@ -388,9 +358,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "cam = ds.camera(c, L, W, Npixels, tf, fields=['Temperature'],\n",
@@ -404,9 +372,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "cam.show()"
@@ -429,9 +395,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "import astropy.io.fits as pyfits\n",
@@ -448,9 +412,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "f = pyfits.open(data_dir+\"/UnigridData/velocity_field_20.fits\")\n",
@@ -467,9 +429,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "data = {}\n",
@@ -489,9 +449,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "data[\"velocity_x\"] = data.pop(\"x-velocity\")\n",
@@ -509,9 +467,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "ds = yt.load_uniform_grid(data, data[\"velocity_x\"][0].shape, length_unit=(1.0,\"Mpc\"))\n",
@@ -539,9 +495,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "grid_data = [\n",
@@ -566,9 +520,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "for g in grid_data: \n",
@@ -586,9 +538,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "grid_data[0][\"number_of_particles\"] = 0 # Set no particles in the top-level grid\n",
@@ -611,9 +561,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "ds = yt.load_amr_grids(grid_data, [32, 32, 32])"
@@ -629,9 +577,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "slc = yt.SlicePlot(ds, \"z\", [\"density\"])\n",
@@ -650,7 +596,6 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "* Units will be incorrect unless the data has already been converted to cgs.\n",
     "* Particles may be difficult to integrate.\n",
     "* Data must already reside in memory before loading it in to yt, whether it is generated at runtime or loaded from disk. \n",
     "* Some functions may behave oddly, and parallelism will be disappointing or non-existent in most cases.\n",
@@ -668,7 +613,7 @@
   "language_info": {
    "codemirror_mode": {
     "name": "ipython",
-    "version": 3
+    "version": 3.0
    },
    "file_extension": ".py",
    "mimetype": "text/x-python",
@@ -680,4 +625,4 @@
  },
  "nbformat": 4,
  "nbformat_minor": 0
-}
+}
\ No newline at end of file

diff -r 27611b3a740e1c2768ecf7ae5f5c3ec1e9472fa8 -r 17cca52083e98321d9b71c6a3fb0f9fceca30407 doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -803,7 +803,8 @@
 
 .. rubric:: Caveats
 
-* Please be careful that the units are correctly utilized; yt assumes cgs.
+* Please be careful that the units are correctly utilized; yt assumes cgs by default, but conversion to
+  other :ref:`unit systems <unit_systems>` is also possible. 
 
 .. _loading-gadget-data:
 
@@ -1065,7 +1066,6 @@
 
 .. rubric:: Caveats
 
-* Units will be incorrect unless the data has already been converted to cgs.
 * Some functions may behave oddly, and parallelism will be disappointing or
   non-existent in most cases.
 * No consistency checks are performed on the index
@@ -1123,7 +1123,6 @@
 
 .. rubric:: Caveats
 
-* Units will be incorrect unless the data has already been converted to cgs.
 * Particles may be difficult to integrate.
 * Data must already reside in memory.
 
@@ -1176,7 +1175,6 @@
 
 .. rubric:: Caveats
 
-* Units will be incorrect unless the data has already been converted to cgs.
 * Integration is not implemented.
 * Some functions may behave oddly or not work at all.
 * Data must already reside in memory.
@@ -1230,7 +1228,6 @@
 
 .. rubric:: Caveats
 
-* Units will be incorrect unless the data has already been converted to cgs.
 * Integration is not implemented.
 * Some functions may behave oddly or not work at all.
 * Data must already reside in memory.

diff -r 27611b3a740e1c2768ecf7ae5f5c3ec1e9472fa8 -r 17cca52083e98321d9b71c6a3fb0f9fceca30407 doc/source/reference/field_list.rst
--- a/doc/source/reference/field_list.rst
+++ b/doc/source/reference/field_list.rst
@@ -3084,10 +3084,10 @@
       def _vorticity_x(field, data):
           f  = (data[ftype, "velocity_z"][sl_center,sl_right,sl_center] -
                 data[ftype, "velocity_z"][sl_center,sl_left,sl_center]) \
-                / (div_fac*just_one(data["index", "dy"]).in_cgs())
+                / (div_fac*just_one(data["index", "dy"]))
           f -= (data[ftype, "velocity_y"][sl_center,sl_center,sl_right] -
                 data[ftype, "velocity_y"][sl_center,sl_center,sl_left]) \
-                / (div_fac*just_one(data["index", "dz"].in_cgs()))
+                / (div_fac*just_one(data["index", "dz"]))
           new_field = data.ds.arr(np.zeros_like(data[ftype, "velocity_z"],
                                                 dtype=np.float64),
                                   f.units)
@@ -3220,7 +3220,7 @@
       def _cylindrical_r(field, data):
           normal = data.get_field_parameter("normal")
           coords = get_periodic_rvec(data)
-          return data.ds.arr(get_cyl_r(coords, normal), "code_length").in_cgs()
+          return data.ds.arr(get_cyl_r(coords, normal), "code_length").in_base(unit_system.name)
   
 
 ('index', 'cylindrical_theta')
@@ -3251,7 +3251,7 @@
       def _cylindrical_z(field, data):
           normal = data.get_field_parameter("normal")
           coords = get_periodic_rvec(data)
-          return data.ds.arr(get_cyl_z(coords, normal), "code_length").in_cgs()
+          return data.ds.arr(get_cyl_z(coords, normal), "code_length").in_base(unit_system.name)
   
 
 ('index', 'disk_angle')
@@ -3424,7 +3424,7 @@
 
       def _spherical_r(field, data):
           coords = get_periodic_rvec(data)
-          return data.ds.arr(get_sph_r(coords), "code_length").in_cgs()
+          return data.ds.arr(get_sph_r(coords), "code_length").in_base(unit_system.name)
   
 
 ('index', 'spherical_theta')

diff -r 27611b3a740e1c2768ecf7ae5f5c3ec1e9472fa8 -r 17cca52083e98321d9b71c6a3fb0f9fceca30407 yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -176,5 +176,8 @@
 from yt.utilities.math_utils import \
     ortho_find, quartiles, periodic_position
 
+from yt.units.unit_systems import UnitSystem
+from yt.units.unit_object import unit_system_registry
+
 from yt.analysis_modules.list_modules import \
     amods

diff -r 27611b3a740e1c2768ecf7ae5f5c3ec1e9472fa8 -r 17cca52083e98321d9b71c6a3fb0f9fceca30407 yt/analysis_modules/photon_simulator/photon_models.py
--- a/yt/analysis_modules/photon_simulator/photon_models.py
+++ b/yt/analysis_modules/photon_simulator/photon_models.py
@@ -133,7 +133,7 @@
             if num_cells == 0:
                 continue
             vol = chunk["cell_volume"].in_cgs().v
-            EM = (chunk["density"]/mp).v**2
+            EM = (chunk["density"]/mp).in_cgs().v**2
             EM *= 0.5*(1.+self.X_H)*self.X_H*vol
 
             if isinstance(self.Zmet, string_types):

diff -r 27611b3a740e1c2768ecf7ae5f5c3ec1e9472fa8 -r 17cca52083e98321d9b71c6a3fb0f9fceca30407 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -426,7 +426,7 @@
             # will not be necessary at all, as the final conversion will occur
             # at the display layer.
             if not dl.units.is_dimensionless:
-                dl.convert_to_units("cm")
+                dl.convert_to_units(self.ds.unit_system["length"])
         v = np.empty((chunk.ires.size, len(fields)), dtype="float64")
         for i, field in enumerate(fields):
             d = chunk[field] * dl

diff -r 27611b3a740e1c2768ecf7ae5f5c3ec1e9472fa8 -r 17cca52083e98321d9b71c6a3fb0f9fceca30407 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -36,6 +36,7 @@
 from yt.units.yt_array import \
     YTArray, \
     YTQuantity
+import yt.units.dimensions as ytdims
 from yt.utilities.exceptions import \
     YTUnitConversionError, \
     YTFieldUnitError, \
@@ -45,7 +46,8 @@
     YTFieldNotParseable, \
     YTFieldNotFound, \
     YTFieldTypeNotFound, \
-    YTDataSelectorNotImplemented
+    YTDataSelectorNotImplemented, \
+    YTDimensionalityError
 from yt.utilities.lib.marching_cubes import \
     march_cubes_grid, march_cubes_grid_flux
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
@@ -186,11 +188,11 @@
             self.center = None
             return
         elif isinstance(center, YTArray):
-            self.center = self.ds.arr(center.in_cgs())
+            self.center = self.ds.arr(center.copy())
             self.center.convert_to_units('code_length')
         elif isinstance(center, (list, tuple, np.ndarray)):
             if isinstance(center[0], YTQuantity):
-                self.center = self.ds.arr([c.in_cgs() for c in center])
+                self.center = self.ds.arr([c.copy() for c in center])
                 self.center.convert_to_units('code_length')
             else:
                 self.center = self.ds.arr(center, 'code_length')
@@ -935,7 +937,7 @@
         s = "%s (%s): " % (self.__class__.__name__, self.ds)
         for i in self._con_args:
             try:
-                s += ", %s=%s" % (i, getattr(self, i).in_cgs())
+                s += ", %s=%s" % (i, getattr(self, i).in_base(unit_system=self.ds.unit_system))
             except AttributeError:
                 s += ", %s=%s" % (i, getattr(self, i))
         return s
@@ -1207,13 +1209,19 @@
                         # infer the units from the units of the data we get back
                         # from the field function and use these units for future
                         # field accesses
-                        units = str(getattr(fd, 'units', ''))
+                        units = getattr(fd, 'units', '')
+                        if units == '':
+                            dimensions = ytdims.dimensionless
+                        else:
+                            dimensions = units.dimensions
+                            units = str(units.get_base_equivalent(self.ds.unit_system.name))
+                        if fi.dimensions != dimensions:
+                            raise YTDimensionalityError(fi.dimensions, dimensions)
                         fi.units = units
                         self.field_data[field] = self.ds.arr(fd, units)
                         msg = ("Field %s was added without specifying units, "
                                "assuming units are %s")
                         mylog.warn(msg % (fi.name, units))
-                        continue
                     try:
                         fd.convert_to_units(fi.units)
                     except AttributeError:

diff -r 27611b3a740e1c2768ecf7ae5f5c3ec1e9472fa8 -r 17cca52083e98321d9b71c6a3fb0f9fceca30407 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -40,7 +40,7 @@
     ParameterFileStore, \
     NoParameterShelf, \
     output_type_registry
-from yt.units.unit_object import Unit
+from yt.units.unit_object import Unit, unit_system_registry
 from yt.units.unit_registry import UnitRegistry
 from yt.fields.derived_field import \
     ValidateSpatial
@@ -59,6 +59,7 @@
 from yt.units.yt_array import \
     YTArray, \
     YTQuantity
+from yt.units.unit_systems import create_code_unit_system
 from yt.data_objects.region_expression import \
     RegionExpression
 
@@ -192,7 +193,8 @@
             obj = _cached_datasets[apath]
         return obj
 
-    def __init__(self, filename, dataset_type=None, file_style=None, units_override=None):
+    def __init__(self, filename, dataset_type=None, file_style=None, 
+                 units_override=None, unit_system="cgs"):
         """
         Base class for generating new output types.  Principally consists of
         a *filename* and a *dataset_type* which will be passed on to children.
@@ -235,6 +237,11 @@
         self.set_units()
         self._setup_coordinate_handler()
 
+        create_code_unit_system(self)
+        if unit_system == "code":
+            unit_system = str(self)
+        self.unit_system = unit_system_registry[unit_system]
+
         # Because we need an instantiated class to check the ds's existence in
         # the cache, we move that check to here from __new__.  This avoids
         # double-instantiation.
@@ -870,7 +877,7 @@
         """Converts an array into a :class:`yt.units.yt_array.YTArray`
 
         The returned YTArray will be dimensionless by default, but can be
-        cast to arbitray units using the ``input_units`` keyword argument.
+        cast to arbitrary units using the ``input_units`` keyword argument.
 
         Parameters
         ----------
@@ -916,7 +923,7 @@
         """Converts an scalar into a :class:`yt.units.yt_array.YTQuantity`
 
         The returned YTQuantity will be dimensionless by default, but can be
-        cast to arbitray units using the ``input_units`` keyword argument.
+        cast to arbitrary units using the ``input_units`` keyword argument.
 
         Parameters
         ----------

diff -r 27611b3a740e1c2768ecf7ae5f5c3ec1e9472fa8 -r 17cca52083e98321d9b71c6a3fb0f9fceca30407 yt/data_objects/time_series.py
--- a/yt/data_objects/time_series.py
+++ b/yt/data_objects/time_series.py
@@ -553,7 +553,7 @@
                 values = self.arr(*values)
             else:
                 values = self.arr(values)
-        values = values.in_cgs()
+        values = values.in_base()
 
         if outputs is None:
             outputs = self.all_outputs

diff -r 27611b3a740e1c2768ecf7ae5f5c3ec1e9472fa8 -r 17cca52083e98321d9b71c6a3fb0f9fceca30407 yt/fields/angular_momentum.py
--- a/yt/fields/angular_momentum.py
+++ b/yt/fields/angular_momentum.py
@@ -37,6 +37,7 @@
 
 @register_field_plugin
 def setup_angular_momentum(registry, ftype = "gas", slice_info = None):
+    unit_system = registry.ds.unit_system
     def _specific_angular_momentum_x(field, data):
         xv, yv, zv = obtain_velocities(data, ftype)
         rv = obtain_rvec(data)
@@ -60,26 +61,26 @@
 
     registry.add_field((ftype, "specific_angular_momentum_x"),
                         function=_specific_angular_momentum_x,
-                        units="cm**2/s",
+                        units=unit_system["specific_angular_momentum"],
                         validators=[ValidateParameter("center")])
     registry.add_field((ftype, "specific_angular_momentum_y"),
                         function=_specific_angular_momentum_y,
-                        units="cm**2/s",
+                        units=unit_system["specific_angular_momentum"],
                         validators=[ValidateParameter("center")])
     registry.add_field((ftype, "specific_angular_momentum_z"),
                         function=_specific_angular_momentum_z,
-                        units="cm**2/s",
+                        units=unit_system["specific_angular_momentum"],
                         validators=[ValidateParameter("center")])
 
     create_magnitude_field(registry, "specific_angular_momentum",
-                           "cm**2 / s", ftype=ftype)
+                           unit_system["specific_angular_momentum"], ftype=ftype)
 
     def _angular_momentum_x(field, data):
         return data[ftype, "cell_mass"] \
              * data[ftype, "specific_angular_momentum_x"]
     registry.add_field((ftype, "angular_momentum_x"),
                        function=_angular_momentum_x,
-                       units="g * cm**2 / s",
+                       units=unit_system["angular_momentum"],
                        validators=[ValidateParameter('center')])
 
     def _angular_momentum_y(field, data):
@@ -87,7 +88,7 @@
              * data[ftype, "specific_angular_momentum_y"]
     registry.add_field((ftype, "angular_momentum_y"),
                        function=_angular_momentum_y,
-                       units="g * cm**2 / s",
+                       units=unit_system["angular_momentum"],
                        validators=[ValidateParameter('center')])
 
     def _angular_momentum_z(field, data):
@@ -95,8 +96,8 @@
              * data[ftype, "specific_angular_momentum_z"]
     registry.add_field((ftype, "angular_momentum_z"),
                        function=_angular_momentum_z,
-                       units="g * cm**2 / s",
+                       units=unit_system["angular_momentum"],
                        validators=[ValidateParameter('center')])
 
     create_magnitude_field(registry, "angular_momentum",
-                           "g * cm**2 / s", ftype=ftype)
+                           unit_system["angular_momentum"], ftype=ftype)

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/a28311113b6a/
Changeset:   a28311113b6a
Branch:      yt
User:        mzingale
Date:        2016-03-05 13:44:21+00:00
Summary:     merge
Affected #:  5 files

diff -r 17cca52083e98321d9b71c6a3fb0f9fceca30407 -r a28311113b6a3d2e92a51b13e68e84117fa6b41d yt/utilities/lib/bounding_volume_hierarchy.pxd
--- a/yt/utilities/lib/bounding_volume_hierarchy.pxd
+++ b/yt/utilities/lib/bounding_volume_hierarchy.pxd
@@ -38,8 +38,6 @@
 cdef class BVH:
     cdef BVHNode* root
     cdef Triangle* triangles
-    cdef np.int64_t leaf_size
-    cdef np.float64_t[:, ::1] vertices
     cdef np.int64_t _partition(self, np.int64_t begin, np.int64_t end,
                                np.int64_t ax, np.float64_t split) nogil
     cdef void intersect(self, Ray* ray) nogil

diff -r 17cca52083e98321d9b71c6a3fb0f9fceca30407 -r a28311113b6a3d2e92a51b13e68e84117fa6b41d yt/utilities/lib/bounding_volume_hierarchy.pyx
--- a/yt/utilities/lib/bounding_volume_hierarchy.pyx
+++ b/yt/utilities/lib/bounding_volume_hierarchy.pyx
@@ -10,6 +10,10 @@
         MAX_NUM_TRI
     int triangulate_hex[MAX_NUM_TRI][3]
 
+# define some constants
+cdef np.float64_t DETERMINANT_EPS = 1.0e-10
+cdef np.float64_t INF = np.inf
+cdef np.int64_t   LEAF_SIZE = 16
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
@@ -58,7 +62,7 @@
 
     cdef np.float64_t det, inv_det
     det = dot(e1, P)
-    if(det > -1.0e-10 and det < 1.0e-10): 
+    if(det > -DETERMINANT_EPS and det < DETERMINANT_EPS): 
         return False
     inv_det = 1.0 / det
 
@@ -78,7 +82,7 @@
 
     cdef np.float64_t t = dot(e2, Q) * inv_det
 
-    if(t > 1.0e-10 and t < ray.t_far):
+    if(t > DETERMINANT_EPS and t < ray.t_far):
         ray.t_far = t
         ray.data_val = (1.0 - u - v)*tri.d0 + u*tri.d1 + v*tri.d2
         ray.elem_id = tri.elem_id
@@ -93,8 +97,8 @@
 cdef np.int64_t ray_bbox_intersect(Ray* ray, const BBox bbox) nogil:
 # https://tavianator.com/fast-branchless-raybounding-box-intersections/
 
-    cdef np.float64_t tmin = -1.0e300
-    cdef np.float64_t tmax =  1.0e300
+    cdef np.float64_t tmin = -INF
+    cdef np.float64_t tmax =  INF
  
     cdef np.int64_t i
     cdef np.float64_t t1, t2
@@ -127,9 +131,7 @@
                   np.float64_t[:, ::1] vertices,
                   np.int64_t[:, ::1] indices,
                   np.float64_t[:, ::1] field_data):
-        
-        self.leaf_size = 16
-        self.vertices = vertices
+
         cdef np.int64_t num_elem = indices.shape[0]
         cdef np.int64_t num_tri = 12*num_elem
 
@@ -162,7 +164,7 @@
         self.root = self._recursive_build(0, num_tri)
 
     cdef void _recursive_free(self, BVHNode* node) nogil:
-        if node.end - node.begin > self.leaf_size:
+        if node.end - node.begin > LEAF_SIZE:
             self._recursive_free(node.left)
             self._recursive_free(node.right)
         free(node)
@@ -224,7 +226,7 @@
         # check for leaf
         cdef np.int64_t i, hit
         cdef Triangle* tri
-        if (node.end - node.begin) <= self.leaf_size:
+        if (node.end - node.begin) <= LEAF_SIZE:
             for i in range(node.begin, node.end):
                 tri = &(self.triangles[i])
                 hit = ray_triangle_intersect(ray, tri)
@@ -245,7 +247,7 @@
         self._get_node_bbox(node, begin, end)
         
         # check for leaf
-        if (end - begin) <= self.leaf_size:
+        if (end - begin) <= LEAF_SIZE:
             return node
         
         # we use the "split in the middle of the longest axis approach"
@@ -300,7 +302,7 @@
         for i in prange(N):
             for j in range(3):
                 ray.origin[j] = origins[N*j + i]
-            ray.t_far = 1e30
+            ray.t_far = INF
             ray.t_near = 0.0
             ray.data_val = 0
             bvh.intersect(ray)

diff -r 17cca52083e98321d9b71c6a3fb0f9fceca30407 -r a28311113b6a3d2e92a51b13e68e84117fa6b41d yt/utilities/math_utils.py
--- a/yt/utilities/math_utils.py
+++ b/yt/utilities/math_utils.py
@@ -844,6 +844,15 @@
     else:
         res_vector = np.resize(vector,(3,1))
     return res_vector
+    
+def normalize_vector(vector):
+    # this function normalizes
+    # an input vector
+    
+    L2 = np.atleast_1d(np.linalg.norm(vector))
+    L2[L2==0] = 1.0
+    vector = vector / L2
+    return vector
 
 def get_sph_theta(coords, normal):
     # The angle (theta) with respect to the normal (J), is the arccos
@@ -852,6 +861,10 @@
     
     res_normal = resize_vector(normal, coords)
 
+    # check if the normal vector is normalized
+    # since arccos requires the vector to be normalised
+    res_normal = normalize_vector(res_normal)
+
     tile_shape = [1] + list(coords.shape)[1:]
     
     J = np.tile(res_normal,tile_shape)
@@ -871,6 +884,7 @@
     # yprime-component and the xprime-component of the coordinate 
     # vector.
 
+    normal = normalize_vector(normal)
     (xprime, yprime, zprime) = get_ortho_basis(normal)
 
     res_xprime = resize_vector(xprime, coords)
@@ -890,6 +904,7 @@
     # gives a vector of magnitude equal to the cylindrical radius.
 
     res_normal = resize_vector(normal, coords)
+    res_normal = normalize_vector(res_normal)
 
     tile_shape = [1] + list(coords.shape)[1:]
     J = np.tile(res_normal, tile_shape)
@@ -902,6 +917,7 @@
     # gives the cylindrical height.
 
     res_normal = resize_vector(normal, coords)
+    res_normal = normalize_vector(res_normal)
     
     tile_shape = [1] + list(coords.shape)[1:]
     J = np.tile(res_normal, tile_shape)
@@ -917,6 +933,7 @@
 def get_cyl_r_component(vectors, theta, normal):
     # The r of a vector is the vector dotted with rhat
 
+    normal = normalize_vector(normal)
     (xprime, yprime, zprime) = get_ortho_basis(normal)
 
     res_xprime = resize_vector(xprime, vectors)
@@ -933,6 +950,7 @@
 def get_cyl_theta_component(vectors, theta, normal):
     # The theta component of a vector is the vector dotted with thetahat
     
+    normal = normalize_vector(normal)
     (xprime, yprime, zprime) = get_ortho_basis(normal)
 
     res_xprime = resize_vector(xprime, vectors)
@@ -948,6 +966,7 @@
 
 def get_cyl_z_component(vectors, normal):
     # The z component of a vector is the vector dotted with zhat
+    normal = normalize_vector(normal)
     (xprime, yprime, zprime) = get_ortho_basis(normal)
 
     res_zprime = resize_vector(zprime, vectors)
@@ -959,7 +978,7 @@
 
 def get_sph_r_component(vectors, theta, phi, normal):
     # The r component of a vector is the vector dotted with rhat
-    
+    normal = normalize_vector(normal)
     (xprime, yprime, zprime) = get_ortho_basis(normal)
 
     res_xprime = resize_vector(xprime, vectors)
@@ -980,7 +999,7 @@
 
 def get_sph_phi_component(vectors, phi, normal):
     # The phi component of a vector is the vector dotted with phihat
-
+    normal = normalize_vector(normal)
     (xprime, yprime, zprime) = get_ortho_basis(normal)
 
     res_xprime = resize_vector(xprime, vectors)
@@ -996,7 +1015,7 @@
 
 def get_sph_theta_component(vectors, theta, phi, normal):
     # The theta component of a vector is the vector dotted with thetahat
-    
+    normal = normalize_vector(normal)
     (xprime, yprime, zprime) = get_ortho_basis(normal)
 
     res_xprime = resize_vector(xprime, vectors)

diff -r 17cca52083e98321d9b71c6a3fb0f9fceca30407 -r a28311113b6a3d2e92a51b13e68e84117fa6b41d yt/visualization/volume_rendering/utils.py
--- a/yt/visualization/volume_rendering/utils.py
+++ b/yt/visualization/volume_rendering/utils.py
@@ -51,7 +51,8 @@
     kwargs = {'lens_type': params['lens_type']}
     if render_source.zbuffer is not None:
         kwargs['zbuffer'] = render_source.zbuffer.z
-        args[4][:] = render_source.zbuffer.rgba[:]
+        args[4][:] = np.reshape(render_source.zbuffer.rgba[:], \
+            (camera.resolution[0], camera.resolution[1], 4))
     else:
         kwargs['zbuffer'] = np.ones(params['image'].shape[:2], "float64")
 


https://bitbucket.org/yt_analysis/yt/commits/1e60fd4794dc/
Changeset:   1e60fd4794dc
Branch:      yt
User:        mzingale
Date:        2016-03-09 17:28:49+00:00
Summary:     update camera to scene
Affected #:  1 file

diff -r a28311113b6a3d2e92a51b13e68e84117fa6b41d -r 1e60fd4794dcfdb16d73c50bfdfe7a1c53309316 yt/visualization/volume_rendering/tests/test_vr_orientation.py
--- a/yt/visualization/volume_rendering/tests/test_vr_orientation.py
+++ b/yt/visualization/volume_rendering/tests/test_vr_orientation.py
@@ -47,7 +47,7 @@
     for lens_type in ['plane-parallel', 'perspective']:
         frame = 0
 
-        cam = Camera(ds, lens_type=lens_type)
+        cam = sc.add_camera(ds, lens_type=lens_type)
         cam.resolution = (1000, 1000)
         cam.position = ds.arr(np.array([-4., 0., 0.]), 'code_length')
         cam.switch_orientation(normal_vector=[1., 0., 0.],


https://bitbucket.org/yt_analysis/yt/commits/b7611c34a4ca/
Changeset:   b7611c34a4ca
Branch:      yt
User:        mzingale
Date:        2016-03-09 17:51:26+00:00
Summary:     merge
Affected #:  38 files

diff -r 1e60fd4794dcfdb16d73c50bfdfe7a1c53309316 -r b7611c34a4cac586e94934e0c7a7dd150fffa6fa CONTRIBUTING.rst
--- a/CONTRIBUTING.rst
+++ b/CONTRIBUTING.rst
@@ -843,7 +843,7 @@
    be avoided, they must be explained, even if they are only to be passed on to
    a nested function.
 
-.. _docstrings
+.. _docstrings:
 
 Docstrings
 ----------

diff -r 1e60fd4794dcfdb16d73c50bfdfe7a1c53309316 -r b7611c34a4cac586e94934e0c7a7dd150fffa6fa doc/source/analyzing/objects.rst
--- a/doc/source/analyzing/objects.rst
+++ b/doc/source/analyzing/objects.rst
@@ -246,6 +246,8 @@
     | A plane normal to a specified vector and intersecting a particular 
       coordinate.
 
+.. _region-reference:
+
 3D Objects
 """"""""""
 
@@ -256,8 +258,6 @@
       creating a Region covering the entire dataset domain.  It is effectively 
       ``ds.region(ds.domain_center, ds.domain_left_edge, ds.domain_right_edge)``.
 
-.. _region-reference:
-
 **Box Region** 
     | Class :class:`~yt.data_objects.selection_data_containers.YTRegion`
     | Usage: ``region(center, left_edge, right_edge, fields=None, ds=None, field_parameters=None, data_source=None)``

diff -r 1e60fd4794dcfdb16d73c50bfdfe7a1c53309316 -r b7611c34a4cac586e94934e0c7a7dd150fffa6fa doc/source/cookbook/various_lens.py
--- a/doc/source/cookbook/various_lens.py
+++ b/doc/source/cookbook/various_lens.py
@@ -1,5 +1,5 @@
 import yt
-from yt.visualization.volume_rendering.api import Scene, Camera, VolumeSource
+from yt.visualization.volume_rendering.api import Scene, VolumeSource
 import numpy as np
 
 field = ("gas", "density")
@@ -19,7 +19,7 @@
 tf.grey_opacity = True
 
 # Plane-parallel lens
-cam = Camera(ds, lens_type='plane-parallel')
+cam = sc.add_camera(ds, lens_type='plane-parallel')
 # Set the resolution of tbe final projection.
 cam.resolution = [250, 250]
 # Set the location of the camera to be (x=0.2, y=0.5, z=0.5)
@@ -32,13 +32,12 @@
 # Set the width of the camera, where width[0] and width[1] specify the length and
 # height of final projection, while width[2] in plane-parallel lens is not used.
 cam.set_width(ds.domain_width * 0.5)
-sc.camera = cam
 sc.add_source(vol)
 sc.render()
 sc.save('lens_plane-parallel.png', sigma_clip=6.0)
 
 # Perspective lens
-cam = Camera(ds, lens_type='perspective')
+cam = sc.add_camera(ds, lens_type='perspective')
 cam.resolution = [250, 250]
 # Standing at (x=0.2, y=0.5, z=0.5), we look at the area of x>0.2 (with some open angle
 # specified by camera width) along the positive x direction.
@@ -49,13 +48,12 @@
 # height of the final projection, while width[2] specifies the distance between the
 # camera and the final image.
 cam.set_width(ds.domain_width * 0.5)
-sc.camera = cam
 sc.add_source(vol)
 sc.render()
 sc.save('lens_perspective.png', sigma_clip=6.0)
 
 # Stereo-perspective lens
-cam = Camera(ds, lens_type='stereo-perspective')
+cam = sc.add_camera(ds, lens_type='stereo-perspective')
 # Set the size ratio of the final projection to be 2:1, since stereo-perspective lens
 # will generate the final image with both left-eye and right-eye ones jointed together.
 cam.resolution = [500, 250]
@@ -65,14 +63,13 @@
 cam.set_width(ds.domain_width*0.5)
 # Set the distance between left-eye and right-eye.
 cam.lens.disparity = ds.domain_width[0] * 1.e-3
-sc.camera = cam
 sc.add_source(vol)
 sc.render()
 sc.save('lens_stereo-perspective.png', sigma_clip=6.0)
 
 # Fisheye lens
 dd = ds.sphere(ds.domain_center, ds.domain_width[0] / 10)
-cam = Camera(dd, lens_type='fisheye')
+cam = sc.add_camera(dd, lens_type='fisheye')
 cam.resolution = [250, 250]
 v, c = ds.find_max(field)
 cam.set_position(c - 0.0005 * ds.domain_width)
@@ -80,13 +77,12 @@
                        north_vector=north_vector)
 cam.set_width(ds.domain_width)
 cam.lens.fov = 360.0
-sc.camera = cam
 sc.add_source(vol)
 sc.render()
 sc.save('lens_fisheye.png', sigma_clip=6.0)
 
 # Spherical lens
-cam = Camera(ds, lens_type='spherical')
+cam = sc.add_camera(ds, lens_type='spherical')
 # Set the size ratio of the final projection to be 2:1, since spherical lens
 # will generate the final image with length of 2*pi and height of pi.
 cam.resolution = [500, 250]
@@ -97,13 +93,12 @@
                        north_vector=north_vector)
 # In (stereo)spherical camera, camera width is not used since the entire volume
 # will be rendered
-sc.camera = cam
 sc.add_source(vol)
 sc.render()
 sc.save('lens_spherical.png', sigma_clip=6.0)
 
 # Stereo-spherical lens
-cam = Camera(ds, lens_type='stereo-spherical')
+cam = sc.add_camera(ds, lens_type='stereo-spherical')
 # Set the size ratio of the final projection to be 4:1, since spherical-perspective lens
 # will generate the final image with both left-eye and right-eye ones jointed together.
 cam.resolution = [1000, 250]
@@ -114,7 +109,6 @@
 # will be rendered
 # Set the distance between left-eye and right-eye.
 cam.lens.disparity = ds.domain_width[0] * 1.e-3
-sc.camera = cam
 sc.add_source(vol)
 sc.render()
 sc.save('lens_stereo-spherical.png', sigma_clip=6.0)

diff -r 1e60fd4794dcfdb16d73c50bfdfe7a1c53309316 -r b7611c34a4cac586e94934e0c7a7dd150fffa6fa doc/source/index.rst
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -175,6 +175,7 @@
 .. toctree::
    :hidden:
 
+   intro/index
    installing
    yt Quickstart <quickstart/index>
    yt3differences

diff -r 1e60fd4794dcfdb16d73c50bfdfe7a1c53309316 -r b7611c34a4cac586e94934e0c7a7dd150fffa6fa doc/source/installing.rst
--- a/doc/source/installing.rst
+++ b/doc/source/installing.rst
@@ -19,7 +19,7 @@
 * If you do not have root access on your computer, are not comfortable managing
   python packages, or are working on a supercomputer or cluster computer, you
   will probably want to use the bash all-in-one installation script.  This builds 
-  python, numpy, matplotlib, and yt from source to set up an isolated scientific 
+  Python, NumPy, Matplotlib, and yt from source to set up an isolated scientific 
   python environment inside of a single folder in your home directory. See
   :ref:`install-script` for more details.
 
@@ -35,9 +35,9 @@
   up python using a source-based package manager like `Homebrew
   <http://brew.sh>`_ or `MacPorts <http://www.macports.org/>`_ this choice will
   let you install yt using the python installed by the package manager. Similarly
-  for python environments set up via linux package managers so long as you
+  for python environments set up via Linux package managers so long as you
   have the the necessary compilers installed (e.g. the ``build-essentials``
-  package on debian and ubuntu).
+  package on Debian and Ubuntu).
 
 .. note::
   See `Parallel Computation
@@ -199,13 +199,12 @@
 
 If you do not want to install the full anaconda python distribution, you can
 install a bare-bones Python installation using miniconda.  To install miniconda,
-visit http://repo.continuum.io/miniconda/ and download a recent version of the
-``Miniconda-x.y.z`` script (corresponding to Python 2.7) for your platform and
-system architecture. Next, run the script, e.g.:
+visit http://repo.continuum.io/miniconda/ and download ``Miniconda-latest-...`` 
+script for your platform and system architecture. Next, run the script, e.g.:
 
 .. code-block:: bash
 
-  bash Miniconda-3.3.0-Linux-x86_64.sh
+  bash Miniconda-latest-Linux-x86_64.sh
 
 For both the Anaconda and Miniconda installations, make sure that the Anaconda
 ``bin`` directory is in your path, and then issue:
@@ -214,7 +213,28 @@
 
   conda install yt
 
-which will install yt along with all of its dependencies.
+which will install stable branch of yt along with all of its dependencies.
+
+If you would like to install latest development version of yt, you can download
+it from our custom anaconda channel:
+
+.. code-block:: bash
+
+  conda install -c http://use.yt/with_conda/ yt
+
+New packages for development branch are built after every pull request is
+merged. In order to make sure you are running latest version, it's recommended
+to update frequently:
+
+.. code-block:: bash
+
+  conda update -c http://use.yt/with_conda/ yt
+
+Location of our channel can be added to ``.condarc`` to avoid retyping it during
+each *conda* invocation. Please refer to `Conda Manual
+<http://conda.pydata.org/docs/config.html#channel-locations-channels>`_ for
+detailed instructions.
+
 
 Obtaining Source Code
 ^^^^^^^^^^^^^^^^^^^^^
@@ -252,7 +272,7 @@
 
   git clone https://github.com/conda/conda-recipes
 
-Then navigate to the repository root and invoke `conda build`:
+Then navigate to the repository root and invoke ``conda build``:
 
 .. code-block:: bash
 
@@ -290,7 +310,7 @@
 
 .. code-block:: bash
 
-  $ pip install numpy matplotlib cython cython h5py nose sympy
+  $ pip install numpy matplotlib cython h5py nose sympy
 
 If you're using IPython notebooks, you can install its dependencies
 with ``pip`` as well:
@@ -366,7 +386,7 @@
   yt update
 
 This will detect that you have installed yt from the mercurial repository, pull
-any changes from bitbucket, and then recompile yt if necessary.
+any changes from Bitbucket, and then recompile yt if necessary.
 
 .. _testing-installation:
 

diff -r 1e60fd4794dcfdb16d73c50bfdfe7a1c53309316 -r b7611c34a4cac586e94934e0c7a7dd150fffa6fa doc/source/intro/index.rst
--- a/doc/source/intro/index.rst
+++ b/doc/source/intro/index.rst
@@ -49,7 +49,7 @@
 the :ref:`units system <units>` works to tag every individual field and 
 quantity with a physical unit (e.g. cm, AU, kpc, Mpc, etc.), and it describes 
 ways of analyzing multiple chronological data outputs from the same underlying 
-dataset known as :ref:`time series <time-series-analysis`.  Lastly, it includes 
+dataset known as :ref:`time series <time-series-analysis>`.  Lastly, it includes 
 information on how to enable yt to operate :ref:`in parallel over multiple 
 processors simultaneously <parallel-computation>`.
 

diff -r 1e60fd4794dcfdb16d73c50bfdfe7a1c53309316 -r b7611c34a4cac586e94934e0c7a7dd150fffa6fa doc/source/reference/index.rst
--- a/doc/source/reference/index.rst
+++ b/doc/source/reference/index.rst
@@ -14,5 +14,6 @@
    command-line
    api/api
    configuration
+   python_introduction
    field_list
    changelog

diff -r 1e60fd4794dcfdb16d73c50bfdfe7a1c53309316 -r b7611c34a4cac586e94934e0c7a7dd150fffa6fa doc/source/visualizing/TransferFunctionHelper_Tutorial.ipynb
--- a/doc/source/visualizing/TransferFunctionHelper_Tutorial.ipynb
+++ b/doc/source/visualizing/TransferFunctionHelper_Tutorial.ipynb
@@ -22,7 +22,6 @@
     "from IPython.core.display import Image\n",
     "from yt.visualization.volume_rendering.transfer_function_helper import TransferFunctionHelper\n",
     "from yt.visualization.volume_rendering.render_source import VolumeSource\n",
-    "from yt.visualization.volume_rendering.camera import Camera\n",
     "\n",
     "def showme(im):\n",
     "    # screen out NaNs\n",

diff -r 1e60fd4794dcfdb16d73c50bfdfe7a1c53309316 -r b7611c34a4cac586e94934e0c7a7dd150fffa6fa doc/source/visualizing/Volume_Rendering_Tutorial.ipynb
--- a/doc/source/visualizing/Volume_Rendering_Tutorial.ipynb
+++ b/doc/source/visualizing/Volume_Rendering_Tutorial.ipynb
@@ -18,7 +18,7 @@
     "import yt\n",
     "import numpy as np\n",
     "from yt.visualization.volume_rendering.transfer_function_helper import TransferFunctionHelper\n",
-    "from yt.visualization.volume_rendering.api import Scene, Camera, VolumeSource\n",
+    "from yt.visualization.volume_rendering.api import Scene, VolumeSource\n",
     "\n",
     "ds = yt.load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")\n",
     "sc = yt.create_scene(ds)"
@@ -199,7 +199,7 @@
    },
    "outputs": [],
    "source": [
-    "cam = Camera(ds, lens_type='perspective')\n",
+    "cam = sc.add_camera(ds, lens_type='perspective')\n",
     "\n",
     "# Standing at (x=0.05, y=0.5, z=0.5), we look at the area of x>0.05 (with some open angle\n",
     "# specified by camera width) along the positive x direction.\n",
@@ -213,7 +213,6 @@
     "# The width determines the opening angle\n",
     "cam.set_width(ds.domain_width * 0.5)\n",
     "\n",
-    "sc.camera = cam\n",
     "print (sc.camera)"
    ]
   },

diff -r 1e60fd4794dcfdb16d73c50bfdfe7a1c53309316 -r b7611c34a4cac586e94934e0c7a7dd150fffa6fa doc/source/visualizing/index.rst
--- a/doc/source/visualizing/index.rst
+++ b/doc/source/visualizing/index.rst
@@ -16,7 +16,6 @@
    manual_plotting
    volume_rendering
    unstructured_mesh_rendering
-   hardware_volume_rendering
    sketchfab
    mapserver
    streamlines

diff -r 1e60fd4794dcfdb16d73c50bfdfe7a1c53309316 -r b7611c34a4cac586e94934e0c7a7dd150fffa6fa doc/source/visualizing/unstructured_mesh_rendering.rst
--- a/doc/source/visualizing/unstructured_mesh_rendering.rst
+++ b/doc/source/visualizing/unstructured_mesh_rendering.rst
@@ -292,7 +292,6 @@
 .. python-script::
 
     import yt
-    from yt.visualization.volume_rendering.api import Camera
 
     ds = yt.load("MOOSE_sample_data/out.e-s010")
 
@@ -304,15 +303,12 @@
     ms.cmap = 'Eos A'
    
     # Create a perspective Camera
-    cam = Camera(ds, lens_type='perspective')
+    cam = sc.add_camera(ds, lens_type='perspective')
     cam.focus = ds.arr([0.0, 0.0, 0.0], 'code_length')
     cam_pos = ds.arr([-4.5, 4.5, -4.5], 'code_length')
     north_vector = ds.arr([0.0, -1.0, -1.0], 'dimensionless')
     cam.set_position(cam_pos, north_vector)
    
-    # tell our scene to use it
-    sc.camera = cam
-   
     # increase the default resolution
     cam.resolution = (800, 800)
    
@@ -329,7 +325,7 @@
 .. python-script::
 
     import yt
-    from yt.visualization.volume_rendering.api import MeshSource, Camera, Scene
+    from yt.visualization.volume_rendering.api import MeshSource, Scene
 
     ds = yt.load("MOOSE_sample_data/out.e-s010")
 
@@ -337,16 +333,13 @@
     sc = Scene()
 
     # set up our Camera
-    cam = Camera(ds)
+    cam = sc.add_camera(ds)
     cam.focus = ds.arr([0.0, 0.0, 0.0], 'code_length')
     cam.set_position(ds.arr([-3.0, 3.0, -3.0], 'code_length'),
                      ds.arr([0.0, -1.0, 0.0], 'dimensionless'))
     cam.set_width = ds.arr([8.0, 8.0, 8.0], 'code_length')
     cam.resolution = (800, 800)
 
-    # tell the scene to use it
-    sc.camera = cam
-
     # create two distinct MeshSources from 'connect1' and 'connect2'
     ms1 = MeshSource(ds, ('connect1', 'diffused'))
     ms2 = MeshSource(ds, ('connect2', 'diffused'))
@@ -407,7 +400,7 @@
 .. code-block:: python
 
     import yt
-    from yt.visualization.volume_rendering.api import MeshSource, Camera
+    from yt.visualization.volume_rendering.api import MeshSource
     import pylab as plt
 
     NUM_STEPS = 127
@@ -432,7 +425,7 @@
 	# set up the camera here. these values were arrived by
 	# calling pitch, yaw, and roll in the notebook until I
 	# got the angle I wanted.
-	cam = Camera(ds)
+	sc.add_camera(ds)
 	camera_position = ds.arr([0.1, 0.0, 0.1], 'code_length')
 	cam.focus = ds.domain_center
 	north_vector = ds.arr([-0.3032476, -0.71782557, 0.62671153], 'dimensionless')

diff -r 1e60fd4794dcfdb16d73c50bfdfe7a1c53309316 -r b7611c34a4cac586e94934e0c7a7dd150fffa6fa doc/source/visualizing/volume_rendering.rst
--- a/doc/source/visualizing/volume_rendering.rst
+++ b/doc/source/visualizing/volume_rendering.rst
@@ -236,12 +236,13 @@
 The :class:`~yt.visualization.volume_rendering.camera.Camera` object
 is what it sounds like, a camera within the Scene.  It possesses the 
 quantities:
- * :meth:`~yt.visualization.volume_rendering.camera.Camera.position` - the position of the camera in scene-space
- * :meth:`~yt.visualization.volume_rendering.camera.Camera.width` - the width of the plane the camera can see
- * :meth:`~yt.visualization.volume_rendering.camera.Camera.focus` - the point in space the camera is looking at
- * :meth:`~yt.visualization.volume_rendering.camera.Camera.resolution` - the image resolution
- * ``north_vector`` - a vector defining the "up" direction in an image
- * :ref:`lens <lenses>` - an object controlling how rays traverse the Scene
+ 
+* :meth:`~yt.visualization.volume_rendering.camera.Camera.position` - the position of the camera in scene-space
+* :meth:`~yt.visualization.volume_rendering.camera.Camera.width` - the width of the plane the camera can see
+* :meth:`~yt.visualization.volume_rendering.camera.Camera.focus` - the point in space the camera is looking at
+* :meth:`~yt.visualization.volume_rendering.camera.Camera.resolution` - the image resolution
+* ``north_vector`` - a vector defining the "up" direction in an image
+* :ref:`lens <lenses>` - an object controlling how rays traverse the Scene
 
 .. _camera_movement:
 
@@ -482,7 +483,7 @@
 their combination, are described below.
 
 MPI Parallelization
-+++++++++++++++++++
+^^^^^^^^^^^^^^^^^^^
 
 Currently the volume renderer is parallelized using MPI to decompose the volume
 by attempting to split up the
@@ -516,7 +517,7 @@
 For more information about enabling parallelism, see :ref:`parallel-computation`.
 
 OpenMP Parallelization
-++++++++++++++++++++++
+^^^^^^^^^^^^^^^^^^^^^^
 
 The volume rendering also parallelized using the OpenMP interface in Cython.
 While the MPI parallelization is done using domain decomposition, the OpenMP
@@ -532,7 +533,7 @@
 by default by modifying the environment variable OMP_NUM_THREADS. 
 
 Running in Hybrid MPI + OpenMP
-++++++++++++++++++++++++++++++
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 The two methods for volume rendering parallelization can be used together to
 leverage large supercomputing resources.  When choosing how to balance the

diff -r 1e60fd4794dcfdb16d73c50bfdfe7a1c53309316 -r b7611c34a4cac586e94934e0c7a7dd150fffa6fa tests/nose_runner.py
--- a/tests/nose_runner.py
+++ b/tests/nose_runner.py
@@ -1,54 +1,100 @@
 import sys
 import os
 import yaml
-import multiprocessing as mp
+import multiprocessing
 import nose
-import glob
-from contextlib import closing
+from cStringIO import StringIO
 from yt.config import ytcfg
 from yt.utilities.answer_testing.framework import AnswerTesting
 
 
-def run_job(argv):
-    with closing(open(str(os.getpid()) + ".out", "w")) as fstderr:
-        cur_stderr = sys.stderr
-        sys.stderr = fstderr
-        answer = argv[0]
+class NoseWorker(multiprocessing.Process):
+
+    def __init__(self, task_queue, result_queue):
+        multiprocessing.Process.__init__(self)
+        self.task_queue = task_queue
+        self.result_queue = result_queue
+
+    def run(self):
+        proc_name = self.name
+        while True:
+            next_task = self.task_queue.get()
+            if next_task is None:
+                print("%s: Exiting" % proc_name)
+                self.task_queue.task_done()
+                break
+            print '%s: %s' % (proc_name, next_task)
+            result = next_task()
+            self.task_queue.task_done()
+            self.result_queue.put(result)
+        return
+
+class NoseTask(object):
+    def __init__(self, argv):
+        self.argv = argv
+        self.name = argv[0]
+
+    def __call__(self):
+        old_stderr = sys.stderr
+        sys.stderr = mystderr = StringIO()
         test_dir = ytcfg.get("yt", "test_data_dir")
         answers_dir = os.path.join(test_dir, "answers")
-        if not os.path.isdir(os.path.join(answers_dir, answer)):
-            nose.run(argv=argv + ['--answer-store'],
+        if '--with-answer-testing' in self.argv and \
+                not os.path.isdir(os.path.join(answers_dir, self.name)):
+            nose.run(argv=self.argv + ['--answer-store'],
                      addplugins=[AnswerTesting()], exit=False)
-        nose.run(argv=argv, addplugins=[AnswerTesting()], exit=False)
-    sys.stderr = cur_stderr
+        nose.run(argv=self.argv, addplugins=[AnswerTesting()], exit=False)
+        sys.stderr = old_stderr
+        return mystderr.getvalue()
 
-if __name__ == "__main__":
+    def __str__(self):
+        return 'WILL DO self.name = %s' % self.name
+
+
+def generate_tasks_input():
     test_dir = ytcfg.get("yt", "test_data_dir")
     answers_dir = os.path.join(test_dir, "answers")
     with open('tests/tests_%i.%i.yaml' % sys.version_info[:2], 'r') as obj:
         tests = yaml.load(obj)
 
-    base_argv = ['--local-dir=%s' % answers_dir, '-v', '-s', '--nologcapture',
+    base_argv = ['--local-dir=%s' % answers_dir, '-v',
                  '--with-answer-testing', '--answer-big-data', '--local']
-    args = [['unittests', '-v', '-s', '--nologcapture']]
-    for answer in list(tests.keys()):
+    args = []
+
+    for test in list(tests["other_tests"].keys()):
+        args.append([test] + tests["other_tests"][test])
+    for answer in list(tests["answer_tests"].keys()):
         argv = [answer]
         argv += base_argv
-        argv.append('--xunit-file=%s.xml' % answer)
         argv.append('--answer-name=%s' % answer)
-        argv += tests[answer]
+        argv += tests["answer_tests"][answer]
         args.append(argv)
-    
-    processes = [mp.Process(target=run_job, args=(args[i],))
-                 for i in range(len(args))]
-    for p in processes:
-        p.start()
-    for p in processes:
-        p.join(timeout=7200)
-        if p.is_alive():
-            p.terminate()
-            p.join(timeout=30)
-    for fname in glob.glob("*.out"):
-        with open(fname, 'r') as fin:
-            print(fin.read())
-        os.remove(fname)
+
+    args = [item + ['-s', '--nologcapture', '--xunit-file=%s.xml' % item[0]]
+            for item in args]
+    return args
+
+if __name__ == "__main__":
+    # multiprocessing.log_to_stderr(logging.DEBUG)
+    tasks = multiprocessing.JoinableQueue()
+    results = multiprocessing.Queue()
+
+    num_consumers = 6  # TODO 
+    consumers = [NoseWorker(tasks, results) for i in range(num_consumers)]
+    for w in consumers:
+        w.start()
+
+    num_jobs = 0
+    for job in generate_tasks_input():
+        tasks.put(NoseTask(job))
+        num_jobs += 1
+
+    for i in range(num_consumers):
+        tasks.put(None)
+
+    tasks.join()
+
+    while num_jobs:
+        result = results.get()
+        print(result)
+        num_jobs -= 1

diff -r 1e60fd4794dcfdb16d73c50bfdfe7a1c53309316 -r b7611c34a4cac586e94934e0c7a7dd150fffa6fa tests/tests_2.7.yaml
--- a/tests/tests_2.7.yaml
+++ b/tests/tests_2.7.yaml
@@ -1,51 +1,63 @@
-local_artio_270:
-  - yt/frontends/artio/tests/test_outputs.py
+answer_tests:
+  local_artio_270:
+    - yt/frontends/artio/tests/test_outputs.py
 
-local_athena_270:
-  - yt/frontends/athena
+  local_athena_270:
+    - yt/frontends/athena
 
-local_chombo_270:
-  - yt/frontends/chombo/tests/test_outputs.py
+  local_chombo_270:
+    - yt/frontends/chombo/tests/test_outputs.py
 
-local_enzo_270:
-  - yt/frontends/enzo
+  local_enzo_270:
+    - yt/frontends/enzo
 
-local_fits_270:
-  - yt/frontends/fits/tests/test_outputs.py
+  local_fits_270:
+    - yt/frontends/fits/tests/test_outputs.py
 
-local_flash_270:
-  - yt/frontends/flash/tests/test_outputs.py
+  local_flash_270:
+    - yt/frontends/flash/tests/test_outputs.py
 
-local_gadget_270:
-  - yt/frontends/gadget/tests/test_outputs.py
+  local_gadget_270:
+    - yt/frontends/gadget/tests/test_outputs.py
 
-local_halos_270:
-  - yt/analysis_modules/halo_analysis/tests/test_halo_finders.py
-  - yt/analysis_modules/halo_finding/tests/test_rockstar.py
-  - yt/frontends/owls_subfind/tests/test_outputs.py
+  local_halos_270:
+    - yt/analysis_modules/halo_analysis/tests/test_halo_finders.py
+    - yt/analysis_modules/halo_finding/tests/test_rockstar.py
+    - yt/frontends/owls_subfind/tests/test_outputs.py
+  
+  local_owls_270:
+    - yt/frontends/owls/tests/test_outputs.py
+  
+  local_pw_270:
+    - yt/visualization/tests/test_plotwindow.py:test_attributes
+    - yt/visualization/tests/test_plotwindow.py:test_attributes_wt
+    - yt/visualization/tests/test_profile_plots.py:test_phase_plot_attributes
+    - yt/visualization/tests/test_particle_plot.py:test_particle_projection_answers
+    - yt/visualization/tests/test_particle_plot.py:test_particle_projection_filter
+    - yt/visualization/tests/test_particle_plot.py:test_particle_phase_answers
+  
+  local_tipsy_270:
+    - yt/frontends/tipsy/tests/test_outputs.py
+  
+  local_varia_270:
+    - yt/analysis_modules/radmc3d_export
+    - yt/frontends/moab/tests/test_c5.py
+    - yt/analysis_modules/photon_simulator/tests/test_spectra.py
+    - yt/analysis_modules/photon_simulator/tests/test_sloshing.py
+    - yt/visualization/volume_rendering/tests/test_vr_orientation.py
 
-local_owls_270:
-  - yt/frontends/owls/tests/test_outputs.py
+  local_orion_270:
+    - yt/frontends/boxlib/tests/test_orion.py
+  
+  local_ramses_270:
+    - yt/frontends/ramses/tests/test_outputs.py
+  
+  local_ytdata_270:
+    - yt/frontends/ytdata
 
-local_pw_270:
-  - yt/visualization/tests/test_plotwindow.py:test_attributes
-  - yt/visualization/tests/test_plotwindow.py:test_attributes_wt
-
-local_tipsy_270:
-  - yt/frontends/tipsy/tests/test_outputs.py
-
-local_varia_270:
-  - yt/analysis_modules/radmc3d_export
-  - yt/frontends/moab/tests/test_c5.py
-  - yt/analysis_modules/photon_simulator/tests/test_spectra.py
-  - yt/analysis_modules/photon_simulator/tests/test_sloshing.py
-  - yt/visualization/volume_rendering/tests/test_vr_orientation.py
-
-local_orion_270:
-  - yt/frontends/boxlib/tests/test_orion.py
-
-local_ramses_270:
-  - yt/frontends/ramses/tests/test_outputs.py
-
-local_ytdata_270:
-  - yt/frontends/ytdata
\ No newline at end of file
+other_tests:
+  unittests:
+     - '-v'
+  cookbook:
+     - '-v'
+     - 'doc/source/cookbook/tests/test_cookbook.py'

diff -r 1e60fd4794dcfdb16d73c50bfdfe7a1c53309316 -r b7611c34a4cac586e94934e0c7a7dd150fffa6fa tests/tests_3.4.yaml
--- a/tests/tests_3.4.yaml
+++ b/tests/tests_3.4.yaml
@@ -1,49 +1,57 @@
-local_artio_340:
-  - yt/frontends/artio/tests/test_outputs.py
+answer_tests:
+  local_artio_340:
+    - yt/frontends/artio/tests/test_outputs.py
 
-local_athena_340:
-  - yt/frontends/athena
+  local_athena_340:
+    - yt/frontends/athena
 
-local_chombo_340:
-  - yt/frontends/chombo/tests/test_outputs.py
+  local_chombo_340:
+    - yt/frontends/chombo/tests/test_outputs.py
 
-local_enzo_340:
-  - yt/frontends/enzo
+  local_enzo_340:
+    - yt/frontends/enzo
 
-local_fits_340:
-  - yt/frontends/fits/tests/test_outputs.py
+  local_fits_340:
+    - yt/frontends/fits/tests/test_outputs.py
 
-local_flash_340:
-  - yt/frontends/flash/tests/test_outputs.py
+  local_flash_340:
+    - yt/frontends/flash/tests/test_outputs.py
 
-local_gadget_340:
-  - yt/frontends/gadget/tests/test_outputs.py
+  local_gadget_340:
+    - yt/frontends/gadget/tests/test_outputs.py
 
-local_halos_340:
-  - yt/frontends/owls_subfind/tests/test_outputs.py
+  local_halos_340:
+    - yt/frontends/owls_subfind/tests/test_outputs.py
 
-local_owls_340:
-  - yt/frontends/owls/tests/test_outputs.py
+  local_owls_340:
+    - yt/frontends/owls/tests/test_outputs.py
 
-local_pw_340:
-  - yt/visualization/tests/test_plotwindow.py:test_attributes
-  - yt/visualization/tests/test_plotwindow.py:test_attributes_wt
+  local_pw_340:
+    - yt/visualization/tests/test_plotwindow.py:test_attributes
+    - yt/visualization/tests/test_plotwindow.py:test_attributes_wt
 
-local_tipsy_340:
-  - yt/frontends/tipsy/tests/test_outputs.py
+  local_tipsy_340:
+    - yt/frontends/tipsy/tests/test_outputs.py
 
-local_varia_340:
-  - yt/analysis_modules/radmc3d_export
-  - yt/frontends/moab/tests/test_c5.py
-  - yt/analysis_modules/photon_simulator/tests/test_spectra.py
-  - yt/analysis_modules/photon_simulator/tests/test_sloshing.py
-  - yt/visualization/volume_rendering/tests/test_vr_orientation.py
+  local_varia_340:
+    - yt/analysis_modules/radmc3d_export
+    - yt/frontends/moab/tests/test_c5.py
+    - yt/analysis_modules/photon_simulator/tests/test_spectra.py
+    - yt/analysis_modules/photon_simulator/tests/test_sloshing.py
+    - yt/visualization/volume_rendering/tests/test_vr_orientation.py
 
-local_orion_340:
-  - yt/frontends/boxlib/tests/test_orion.py
+  local_orion_340:
+    - yt/frontends/boxlib/tests/test_orion.py
 
-local_ramses_340:
-  - yt/frontends/ramses/tests/test_outputs.py
+  local_ramses_340:
+    - yt/frontends/ramses/tests/test_outputs.py
 
-local_ytdata_340:
-  - yt/frontends/ytdata
\ No newline at end of file
+  local_ytdata_340:
+    - yt/frontends/ytdata
+
+other_tests:
+  unittests:
+    - '-v'
+  cookbook:
+    - 'doc/source/cookbook/tests/test_cookbook.py'
+    - '-P'

diff -r 1e60fd4794dcfdb16d73c50bfdfe7a1c53309316 -r b7611c34a4cac586e94934e0c7a7dd150fffa6fa yt/analysis_modules/photon_simulator/tests/test_sloshing.py
--- a/yt/analysis_modules/photon_simulator/tests/test_sloshing.py
+++ b/yt/analysis_modules/photon_simulator/tests/test_sloshing.py
@@ -15,10 +15,11 @@
     ThermalPhotonModel, PhotonList, EventList, \
     convert_old_file, merge_files
 from yt.config import ytcfg
-from yt.testing import requires_file
+from yt.testing import \
+    requires_file, \
+    assert_almost_equal
 from yt.utilities.answer_testing.framework import requires_ds, \
     GenericArrayTest, data_dir_load
-from numpy.testing import assert_array_equal
 from numpy.random import RandomState
 from yt.units.yt_array import uconcatenate
 import os
@@ -117,11 +118,11 @@
             arr1 = photons1[k]
             arr2 = photons2[k]
             arr3 = photons3[k]
-        yield assert_array_equal, arr1, arr2
-        yield assert_array_equal, arr1, arr3
+        assert_almost_equal(arr1, arr2)
+        assert_almost_equal(arr1, arr3)
     for k in events1.keys():
-        yield assert_array_equal, events1[k], events2[k]
-        yield assert_array_equal, events1[k], events3[k]
+        assert_almost_equal(events1[k], events2[k])
+        assert_almost_equal(events1[k], events3[k])
 
     nevents = 0
 

diff -r 1e60fd4794dcfdb16d73c50bfdfe7a1c53309316 -r b7611c34a4cac586e94934e0c7a7dd150fffa6fa yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -43,6 +43,15 @@
     PlutoFieldInfo
 
 
+def is_chombo_hdf5(fn):
+    try:
+        with h5py.File(fn, 'r') as fileh:
+            valid = "Chombo_global" in fileh["/"]
+    except (KeyError, IOError, ImportError):
+        return False
+    return valid
+
+
 class ChomboGrid(AMRGridPatch):
     _id_offset = 0
     __slots__ = ["_level_id", "stop_index"]
@@ -351,6 +360,9 @@
     @classmethod
     def _is_valid(self, *args, **kwargs):
 
+        if not is_chombo_hdf5(args[0]):
+            return False
+
         pluto_ini_file_exists = False
         orion2_ini_file_exists = False
 
@@ -507,6 +519,9 @@
     @classmethod
     def _is_valid(self, *args, **kwargs):
 
+        if not is_chombo_hdf5(args[0]):
+            return False
+
         pluto_ini_file_exists = False
 
         if isinstance(args[0], six.string_types):
@@ -649,6 +664,9 @@
     @classmethod
     def _is_valid(self, *args, **kwargs):
 
+        if not is_chombo_hdf5(args[0]):
+            return False
+
         pluto_ini_file_exists = False
         orion2_ini_file_exists = False
 
@@ -703,6 +721,9 @@
     @classmethod
     def _is_valid(self, *args, **kwargs):
 
+        if not is_chombo_hdf5(args[0]):
+            return False
+
         pluto_ini_file_exists = False
         orion2_ini_file_exists = False
 

diff -r 1e60fd4794dcfdb16d73c50bfdfe7a1c53309316 -r b7611c34a4cac586e94934e0c7a7dd150fffa6fa yt/frontends/enzo/simulation_handling.py
--- a/yt/frontends/enzo/simulation_handling.py
+++ b/yt/frontends/enzo/simulation_handling.py
@@ -82,6 +82,7 @@
     def _set_units(self):
         self.unit_registry = UnitRegistry()
         self.unit_registry.add("code_time", 1.0, dimensions.time)
+        self.unit_registry.add("code_length", 1.0, dimensions.length)
         if self.cosmological_simulation:
             # Instantiate EnzoCosmology object for units and time conversions.
             self.cosmology = \
@@ -107,6 +108,7 @@
         else:
             self.time_unit = self.quan(self.parameters["TimeUnits"], "s")
         self.unit_registry.modify("code_time", self.time_unit)
+        self.unit_registry.modify("code_length", self.length_unit)
 
     def get_time_series(self, time_data=True, redshift_data=True,
                         initial_time=None, final_time=None,

diff -r 1e60fd4794dcfdb16d73c50bfdfe7a1c53309316 -r b7611c34a4cac586e94934e0c7a7dd150fffa6fa yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -860,3 +860,55 @@
         return 'unitary'
     else:
         return u
+
+def get_hash(infile, algorithm='md5', BLOCKSIZE=65536):
+    """Generate file hash without reading in the entire file at once.
+
+    Original code licensed under MIT.  Source:
+    http://pythoncentral.io/hashing-files-with-python/
+
+    Parameters
+    ----------
+    infile : str
+        File of interest (including the path).
+    algorithm : str (optional)
+        Hash algorithm of choice. Defaults to 'md5'.
+    BLOCKSIZE : int (optional)
+        How much data in bytes to read in at once.
+
+    Returns
+    -------
+    hash : str
+        The hash of the file.
+
+    Examples
+    --------
+    >>> import yt.funcs as funcs
+    >>> funcs.get_hash('/path/to/test.png')
+    'd38da04859093d430fa4084fd605de60'
+
+    """
+    import hashlib
+
+    try:
+        hasher = getattr(hashlib, algorithm)()
+    except:
+        raise NotImplementedError("'%s' not available!  Available algorithms: %s" %
+                                  (algorithm, hashlib.algorithms))
+
+    filesize   = os.path.getsize(infile)
+    iterations = int(float(filesize)/float(BLOCKSIZE))
+
+    pbar = get_pbar('Generating %s hash' % algorithm, iterations)
+
+    iter = 0
+    with open(infile,'rb') as f:
+        buf = f.read(BLOCKSIZE)
+        while len(buf) > 0:
+            hasher.update(buf)
+            buf = f.read(BLOCKSIZE)
+            iter += 1
+            pbar.update(iter)
+        pbar.finish()
+
+    return hasher.hexdigest()

diff -r 1e60fd4794dcfdb16d73c50bfdfe7a1c53309316 -r b7611c34a4cac586e94934e0c7a7dd150fffa6fa yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -171,7 +171,7 @@
         units = ('g/cm**3', 'cm/s', 'cm/s', 'cm/s'),
         particle_fields=None, particle_field_units=None,
         negative = False, nprocs = 1, particles = 0, length_unit=1.0,
-        unit_system="cgs"):
+        unit_system="cgs", bbox=None):
     from yt.frontends.stream.api import load_uniform_grid
     if not iterable(ndims):
         ndims = [ndims, ndims, ndims]
@@ -208,7 +208,7 @@
             data['io', 'particle_mass'] = (np.random.random(particles), 'g')
         data['number_of_particles'] = particles
     ug = load_uniform_grid(data, ndims, length_unit=length_unit, nprocs=nprocs,
-                           unit_system=unit_system)
+                           unit_system=unit_system, bbox=bbox)
     return ug
 
 _geom_transforms = {

diff -r 1e60fd4794dcfdb16d73c50bfdfe7a1c53309316 -r b7611c34a4cac586e94934e0c7a7dd150fffa6fa yt/units/yt_array.py
--- a/yt/units/yt_array.py
+++ b/yt/units/yt_array.py
@@ -346,7 +346,8 @@
                 if registry is None:
                     pass
                 else:
-                    input_array.units.registry = registry
+                    units = Unit(str(input_array.units), registry=registry)
+                    input_array.units = units
             elif isinstance(input_units, Unit):
                 input_array.units = input_units
             else:
@@ -357,7 +358,7 @@
         elif iterable(input_array) and input_array:
             if isinstance(input_array[0], YTArray):
                 return YTArray(np.array(input_array, dtype=dtype),
-                               input_array[0].units)
+                               input_array[0].units, registry=registry)
 
         # Input array is an already formed ndarray instance
         # We first cast to be our class type
@@ -369,7 +370,10 @@
             # Nothing provided. Make dimensionless...
             units = Unit()
         elif isinstance(input_units, Unit):
-            units = input_units
+            if registry and registry is not input_units.registry:
+                units = Unit(str(input_units), registry=registry)
+            else:
+                units = input_units
         else:
             # units kwarg set, but it's not a Unit object.
             # don't handle all the cases here, let the Unit class handle if
@@ -1408,6 +1412,31 @@
     v = validate_numpy_wrapper_units(v, [arr1, arr2])
     return v
 
+def unorm(data):
+    """Matrix or vector norm that preserves units
+
+    This is a wrapper around np.linalg.norm that preserves units.
+    """
+    return YTArray(np.linalg.norm(data), data.units)
+
+def uvstack(arrs):
+    """Stack arrays in sequence vertically (row wise) while preserving units
+
+    This is a wrapper around np.vstack that preserves units.
+    """
+    v = np.vstack(arrs)
+    v = validate_numpy_wrapper_units(v, arrs)
+    return v
+
+def uhstack(arrs):
+    """Stack arrays in sequence horizontally (column wise) while preserving units
+
+    This is a wrapper around np.hstack that preserves units.
+    """
+    v = np.vstack(arrs)
+    v = validate_numpy_wrapper_units(v, arrs)
+    return v
+
 def array_like_field(data, x, field):
     field = data._determine_fields(field)[0]
     if isinstance(field, tuple):

diff -r 1e60fd4794dcfdb16d73c50bfdfe7a1c53309316 -r b7611c34a4cac586e94934e0c7a7dd150fffa6fa yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -826,7 +826,7 @@
                                           verbose=True)
         for k in new_result:
             if self.decimals is None:
-                assert_equal(new_result[k], old_result[k])
+                assert_almost_equal(new_result[k], old_result[k])
             else:
                 assert_allclose_units(new_result[k], old_result[k],
                                       10**(-self.decimals))

diff -r 1e60fd4794dcfdb16d73c50bfdfe7a1c53309316 -r b7611c34a4cac586e94934e0c7a7dd150fffa6fa yt/utilities/lib/tests/test_bounding_volume_hierarchy.py
--- a/yt/utilities/lib/tests/test_bounding_volume_hierarchy.py
+++ b/yt/utilities/lib/tests/test_bounding_volume_hierarchy.py
@@ -2,7 +2,7 @@
 import numpy as np
 from yt.utilities.lib.bounding_volume_hierarchy import BVH, \
     test_ray_trace
-from yt.visualization.volume_rendering.api import Camera
+from yt.visualization.volume_rendering.api import Camera, Scene
 from yt.testing import requires_file
 
 
@@ -12,13 +12,13 @@
     W = np.array([8.0, 8.0])
     N = np.array([800, 800])
     dx = W / N
-    
+
     x_points = np.linspace((-N[0]/2 + 0.5)*dx[0], (N[0]/2 - 0.5)*dx[0], N[0])
     y_points = np.linspace((-N[1]/2 + 0.5)*dx[1], (N[1]/2 - 0.5)*dx[0], N[1])
-    
+
     X, Y = np.meshgrid(x_points, y_points)
-    result = np.dot(camera.unit_vectors[0:2].T, [X.ravel(), Y.ravel()]) 
-    vec_origins = result.T + camera.position
+    result = np.dot(camera.unit_vectors[0:2].T, [X.ravel(), Y.ravel()])
+    vec_origins = camera.scene.arr(result.T, 'unitary') + camera.position
     return np.array(vec_origins), np.array(normal_vector)
 
 
@@ -36,7 +36,7 @@
 
     bvh = BVH(vertices, indices, field_data)
 
-    cam = Camera()
+    cam = Camera(Scene())
     cam.set_position(np.array([8.0, 8.0, 8.0]))
     cam.focus = np.array([0.0, 0.0, 0.0])
     origins, direction = get_rays(cam)

diff -r 1e60fd4794dcfdb16d73c50bfdfe7a1c53309316 -r b7611c34a4cac586e94934e0c7a7dd150fffa6fa yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -11,19 +11,49 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 # ----------------------------------------------------------------------------
 
-from yt.funcs import iterable, mylog, ensure_numpy_array
+from yt.funcs import iterable, ensure_numpy_array
 from yt.utilities.orientation import Orientation
-from yt.units.yt_array import YTArray
-from yt.units.unit_registry import UnitParseError
+from yt.units.yt_array import \
+    YTArray, \
+    YTQuantity
 from yt.utilities.math_utils import get_rotation_matrix
 from yt.extern.six import string_types
 from .utils import data_source_or_all
-from .lens import lenses
+from .lens import \
+    lenses, \
+    Lens
 import numpy as np
+from numbers import Number as numeric_type
+
+def _sanitize_camera_property_units(value, scene):
+    if iterable(value):
+        if len(value) == 1:
+            return _sanitize_camera_property_units(value[0], scene)
+        elif isinstance(value, YTArray) and len(value) == 3:
+            return scene.arr(value).in_units('unitary')
+        elif (len(value) == 2 and isinstance(value[0], numeric_type)
+              and isinstance(value[1], string_types)):
+            return scene.arr([scene.arr(value[0], value[1]).in_units('unitary')]*3)
+        if len(value) == 3:
+            if all([iterable(v) for v in value]):
+                if all([isinstance(v[0], numeric_type) and
+                        isinstance(v[1], string_types) for v in value]):
+                    return scene.arr(
+                        [scene.arr(v[0], v[1]) for v in value])
+                else:
+                    raise RuntimeError(
+                        "Cannot set camera width to invalid value '%s'" % (value, ))
+            return scene.arr(value, 'unitary')
+    else:
+        if isinstance(value, (YTQuantity, YTArray)):
+            return scene.arr([value.d]*3, value.units).in_units('unitary')
+        elif isinstance(value, numeric_type):
+            return scene.arr([value]*3, 'unitary')
+    raise RuntimeError(
+        "Cannot set camera width to invalid value '%s'" % (value, ))
 
 
 class Camera(Orientation):
-
     r"""A representation of a point of view into a Scene.
 
     It is defined by a position (the location of the camera
@@ -35,6 +65,8 @@
 
     Parameters
     ----------
+    scene: A :class:`yt.visualization.volume_rendering.scene.Scene` object
+        A scene object that the camera will be attached to.
     data_source: :class:`AMR3DData` or :class:`Dataset`, optional
         This is the source to be rendered, which can be any arbitrary yt
         data object or dataset.
@@ -50,20 +82,22 @@
 
     Examples
     --------
-    
+
     In this example, the camera is set using defaults that are chosen
     to be reasonable for the argument Dataset.
 
     >>> import yt
-    >>> from yt.visualization.volume_rendering.api import Camera
+    >>> from yt.visualization.volume_rendering.api import Scene
     >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
-    >>> cam = Camera(ds)
+    >>> sc = Scene()
+    >>> cam = sc.add_camera(ds)
 
     Here, we set the camera properties manually:
 
     >>> import yt
-    >>> from yt.visualization.volume_rendering.api import Camera
-    >>> cam = Camera()
+    >>> from yt.visualization.volume_rendering.api import Scene
+    >>> sc = Scene()
+    >>> cam = sc.add_camera()
     >>> cam.position = np.array([0.5, 0.5, -1.0])
     >>> cam.focus = np.array([0.5, 0.5, 0.0])
     >>> cam.north_vector = np.array([1.0, 0.0, 0.0])
@@ -71,9 +105,10 @@
     Finally, we create a camera with a non-default lens:
 
     >>> import yt
-    >>> from yt.visualization.volume_rendering.api import Camera
+    >>> from yt.visualization.volume_rendering.api import Scene
     >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
-    >>> cam = Camera(ds, lens_type='perspective')
+    >>> sc = Scene()
+    >>> cam = sc.add_camera(ds, lens_type='perspective')
 
     """
 
@@ -83,24 +118,33 @@
     _position = None
     _resolution = None
 
-    def __init__(self, data_source=None, lens_type='plane-parallel',
+    def __init__(self, scene, data_source=None, lens_type='plane-parallel',
                  auto=False):
-        """Initialize a Camera Instance"""
+        # import this here to avoid an import cycle
+        from .scene import Scene
+        if not isinstance(scene, Scene):
+            raise RuntimeError(
+                'The first argument passed to the Camera initializer is a '
+                '%s object, expected a %s object' % (type(scene), Scene))
+        self.scene = scene
         self.lens = None
         self.north_vector = None
         self.normal_vector = None
         self.light = None
+        self.data_source = data_source_or_all(data_source)
         self._resolution = (512, 512)
-        self._width = np.array([1.0, 1.0, 1.0])
-        self._focus = np.array([0.0]*3)
-        self._position = np.array([1.0]*3)
-        if data_source is not None:
-            data_source = data_source_or_all(data_source)
-            self._focus = data_source.ds.domain_center
-            self._position = data_source.ds.domain_right_edge
-            self._width = 1.5*data_source.ds.domain_width
-            self._domain_center = data_source.ds.domain_center
-            self._domain_width = data_source.ds.domain_width
+        if self.data_source is not None:
+            self.scene.set_new_unit_registry(self.data_source.ds.unit_registry)
+            self._focus = self.data_source.ds.domain_center
+            self._position = self.data_source.ds.domain_right_edge
+            self._width = 1.5*self.data_source.ds.domain_width
+            self._domain_center = self.data_source.ds.domain_center
+            self._domain_width = self.data_source.ds.domain_width
+        else:
+            self._focus = scene.arr([0.0, 0.0, 0.0], 'unitary')
+            self._width = scene.arr([1.0, 1.0, 1.0], 'unitary')
+            self._position = scene.arr([1.0, 1.0, 1.0], 'unitary')
+
         if auto:
             self.set_defaults_from_data_source(data_source)
 
@@ -110,20 +154,27 @@
         self.set_lens(lens_type)
 
     def position():
-        doc = '''The position is the location of the camera in
-               the coordinate system of the simulation. This needs
-               to be either a YTArray or a numpy array. If it is a 
-               numpy array, it is assumed to be in code units. If it
-               is a YTArray, it will be converted to code units 
-               automatically. '''
+        doc = '''
+        The location of the camera. 
+
+        Parameters
+        ----------
+
+        position : number, YTQuantity, iterable, or 3 element YTArray
+            If a scalar, assumes that the position is the same in all three
+            coordinates. If an interable, must contain only scalars or
+            (length, unit) tuples.
+        '''
 
         def fget(self):
             return self._position
 
         def fset(self, value):
-            if isinstance(value, YTArray):
-                value = value.in_units("code_length")
-            self._position = value
+            position = _sanitize_camera_property_units(value, self.scene)
+            if np.array_equal(position, self.focus):
+                raise RuntimeError(
+                    'Cannot set the camera focus and position to the same value')
+            self._position = position
             self.switch_orientation()
 
         def fdel(self):
@@ -132,18 +183,24 @@
     position = property(**position())
 
     def width():
-        doc = '''The width of the region that will be seen in the image. 
-               This needs to be either a YTArray or a numpy array. If it 
-               is a numpy array, it is assumed to be in code units. If it
-               is a YTArray, it will be converted to code units automatically. '''
+        doc = '''The width of the region that will be seen in the image.
+
+        Parameters
+        ----------
+
+        width : number, YTQuantity, iterable, or 3 element YTArray
+            The width of the volume rendering in the horizontal, vertical, and
+            depth directions. If a scalar, assumes that the width is the same in
+            all three directions. If an interable, must contain only scalars or
+            (length, unit) tuples.
+        '''
 
         def fget(self):
             return self._width
 
         def fset(self, value):
-            if isinstance(value, YTArray):
-                value = value.in_units("code_length")
-            self._width = value
+            width = _sanitize_camera_property_units(value, self.scene)
+            self._width = width
             self.switch_orientation()
 
         def fdel(self):
@@ -153,19 +210,28 @@
     width = property(**width())
 
     def focus():
-        doc = '''The focus defines the point the Camera is pointed at. This needs
-               to be either a YTArray or a numpy array. If it is a 
-               numpy array, it is assumed to be in code units. If it
-               is a YTArray, it will be converted to code units 
-               automatically. '''
+        doc = '''
+        The focus defines the point the Camera is pointed at.
+
+        Parameters
+        ----------
+
+        focus : number, YTQuantity, iterable, or 3 element YTArray
+            The width of the volume rendering in the horizontal, vertical, and
+            depth directions. If a scalar, assumes that the width is the same in
+            all three directions. If an interable, must contain only scalars or
+            (length, unit) tuples.
+        '''
 
         def fget(self):
             return self._focus
 
         def fset(self, value):
-            if isinstance(value, YTArray):
-                value = value.in_units("code_length")
-            self._focus = value
+            focus = _sanitize_camera_property_units(value, self.scene)
+            if np.array_equal(focus, self.position):
+                raise RuntimeError(
+                    'Cannot set the camera focus and position to the same value')
+            self._focus = focus
             self.switch_orientation()
 
         def fdel(self):
@@ -175,14 +241,15 @@
 
     def resolution():
         doc = '''The resolution is the number of pixels in the image that
-               will be produced. '''
+               will be produced. Must be a 2-tuple of integers or an integer.'''
 
         def fget(self):
             return self._resolution
 
         def fset(self, value):
             if iterable(value):
-                assert (len(value) == 2)
+                if len(value) != 2:
+                    raise RuntimeError
             else:
                 value = (value, value)
             self._resolution = value
@@ -193,6 +260,19 @@
         return locals()
     resolution = property(**resolution())
 
+    def set_resolution(self, resolution):
+        """
+        The resolution is the number of pixels in the image that
+        will be produced. Must be a 2-tuple of integers or an integer.
+        """
+        self.resolution = resolution
+
+    def get_resolution(self):
+        """
+        Returns the resolution of the volume rendering
+        """
+        return self.resolution
+
     def _get_sampler_params(self, render_source):
         lens_params = self.lens._get_sampler_params(self, render_source)
         lens_params.update(width=self.width)
@@ -214,10 +294,14 @@
             'stereo-spherical'
 
         """
-        if lens_type not in lenses:
-            mylog.error("Lens type not available")
-            raise RuntimeError()
-        self.lens = lenses[lens_type]()
+        if isinstance(lens_type, Lens):
+            self.lens = lens_type
+        elif lens_type not in lenses:
+            raise RuntimeError(
+                "Lens type %s not in available list of available lens "
+                "types (%s)" % (lens_type, list(lenses.keys())))
+        else:
+            self.lens = lenses[lens_type]()
         self.lens.set_camera(self)
 
     def set_defaults_from_data_source(self, data_source):
@@ -264,41 +348,61 @@
         Parameters
         ----------
 
-        width : YTQuantity or 3 element YTArray
+        width : number, YTQuantity, iterable, or 3 element YTArray
             The width of the volume rendering in the horizontal, vertical, and
             depth directions. If a scalar, assumes that the width is the same in
-            all three directions.
+            all three directions. If an interable, must contain only scalars or
+            (length, unit) tuples.
         """
-        try:
-            width = width.in_units('code_length')
-        except (AttributeError, UnitParseError):
-            raise ValueError(
-                'Volume rendering width must be a YTArray that can be '
-                'converted to code units')
-
-        if not iterable(width):
-            width = YTArray([width.d]*3, width.units)  # Can't get code units.
         self.width = width
         self.switch_orientation()
 
+    def get_width(self):
+        """Return the current camera width"""
+        return self.width
+
     def set_position(self, position, north_vector=None):
         r"""Set the position of the camera.
 
         Parameters
         ----------
 
-        position : array_like
-            The new position
+        width : number, YTQuantity, iterable, or 3 element YTArray
+            If a scalar, assumes that the position is the same in all three
+            coordinates. If an interable, must contain only scalars or
+            (length, unit) tuples.
+
         north_vector : array_like, optional
             The 'up' direction for the plane of rays.  If not specific,
             calculated automatically.
 
         """
-
         self.position = position
         self.switch_orientation(normal_vector=self.focus - self.position,
                                 north_vector=north_vector)
 
+    def get_position(self):
+        """Return the current camera position"""
+        return self.position
+
+    def set_focus(self, new_focus):
+        """Sets the point the Camera is pointed at.
+
+        Parameters
+        ----------
+
+        focus : number, YTQuantity, iterable, or 3 element YTArray
+            If a scalar, assumes that the focus is the same is all three
+            coordinates. If an interable, must contain only scalars or
+            (length, unit) tuples.
+
+        """
+        self.focus = new_focus
+
+    def get_focus(self):
+        """Returns the current camera focus"""
+        return self.focus
+
     def switch_orientation(self, normal_vector=None, north_vector=None):
         r"""Change the view direction based on any of the orientation parameters.
 
@@ -366,8 +470,9 @@
 
         >>> import yt
         >>> import numpy as np
-        >>> from yt.visualization.volume_rendering.api import Camera
-        >>> cam = Camera()
+        >>> from yt.visualization.volume_rendering.api import Scene
+        >>> sc = Scene()
+        >>> cam = sc.add_camera()
         >>> # rotate the camera by pi / 4 radians:
         >>> cam.rotate(np.pi/4.0)  
         >>> # rotate the camera about the y-axis instead of cam.north_vector:
@@ -419,10 +524,11 @@
 
         >>> import yt
         >>> import numpy as np
-        >>> from yt.visualization.volume_rendering.api import Camera
-        >>> cam = Camera()
+        >>> from yt.visualization.volume_rendering.api import Scene
+        >>> sc = Scene()
+        >>> sc.add_camera()
         >>> # pitch the camera by pi / 4 radians:
-        >>> cam.pitch(np.pi/4.0)  
+        >>> cam.pitch(np.pi/4.0)
         >>> # pitch the camera about the origin instead of its own position:
         >>> cam.pitch(np.pi/4.0, rot_center=np.array([0.0, 0.0, 0.0]))
 
@@ -446,10 +552,11 @@
 
         >>> import yt
         >>> import numpy as np
-        >>> from yt.visualization.volume_rendering.api import Camera
-        >>> cam = Camera()
+        >>> from yt.visualization.volume_rendering.api import Scene
+        >>> sc = Scene()
+        >>> cam = sc.add_camera()
         >>> # yaw the camera by pi / 4 radians:
-        >>> cam.yaw(np.pi/4.0)  
+        >>> cam.yaw(np.pi/4.0)
         >>> # yaw the camera about the origin instead of its own position:
         >>> cam.yaw(np.pi/4.0, rot_center=np.array([0.0, 0.0, 0.0]))
 
@@ -473,8 +580,9 @@
 
         >>> import yt
         >>> import numpy as np
-        >>> from yt.visualization.volume_rendering.api import Camera
-        >>> cam = Camera()
+        >>> from yt.visualization.volume_rendering.api import Scene
+        >>> sc = Scene()
+        >>> cam = sc.add_camera(ds)
         >>> # roll the camera by pi / 4 radians:
         >>> cam.roll(np.pi/4.0)  
         >>> # roll the camera about the origin instead of its own position:
@@ -584,9 +692,10 @@
         --------
 
         >>> import yt
-        >>> from yt.visualization.volume_rendering.api import Camera
+        >>> from yt.visualization.volume_rendering.api import Scene
         >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
-        >>> cam = Camera(ds)
+        >>> sc = Scene()
+        >>> cam = sc.add_camera(ds)
         >>> cam.zoom(1.1)
 
         """
@@ -611,7 +720,6 @@
         --------
 
         >>> import yt
-        >>> from yt.visualization.volume_rendering.api import Camera
         >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
         >>> im, sc = yt.volume_render(ds)
         >>> cam = sc.camera

diff -r 1e60fd4794dcfdb16d73c50bfdfe7a1c53309316 -r b7611c34a4cac586e94934e0c7a7dd150fffa6fa yt/visualization/volume_rendering/lens.py
--- a/yt/visualization/volume_rendering/lens.py
+++ b/yt/visualization/volume_rendering/lens.py
@@ -17,8 +17,8 @@
 from yt.funcs import mylog
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     ParallelAnalysisInterface
-from yt.units.yt_array import YTArray
 from yt.data_objects.image_array import ImageArray
+from yt.units.yt_array import unorm, uvstack
 from yt.utilities.math_utils import get_rotation_matrix
 import numpy as np
 
@@ -60,13 +60,14 @@
         unit_vectors = camera.unit_vectors
         width = camera.width
         center = camera.focus
-        self.box_vectors = YTArray([unit_vectors[0] * width[0],
-                                    unit_vectors[1] * width[1],
-                                    unit_vectors[2] * width[2]])
-        self.origin = center - 0.5 * width.dot(YTArray(unit_vectors, ""))
+
+        self.box_vectors = camera.scene.arr(
+            [unit_vectors[0] * width[0],
+             unit_vectors[1] * width[1],
+             unit_vectors[2] * width[2]])
+        self.origin = center - 0.5 * width.dot(unit_vectors)
         self.back_center = center - 0.5 * width[2] * unit_vectors[2]
         self.front_center = center + 0.5 * width[2] * unit_vectors[2]
-
         self.set_viewpoint(camera)
 
     def set_viewpoint(self, camera):
@@ -99,9 +100,12 @@
         else:
             image = self.new_image(camera)
 
+        vp_pos = np.concatenate(
+            [camera.inv_mat.ravel('F').d,
+             self.back_center.ravel().in_units('code_length').d])
+
         sampler_params =\
-            dict(vp_pos=np.concatenate([camera.inv_mat.ravel('F'),
-                                        self.back_center.ravel()]),
+            dict(vp_pos=vp_pos,
                  vp_dir=self.box_vectors[2],  # All the same
                  center=self.back_center,
                  bounds=(-camera.width[0] / 2.0, camera.width[0] / 2.0,
@@ -190,8 +194,9 @@
             camera.resolution[0], camera.resolution[1], 3)
 
         # The maximum possible length of ray
-        max_length = np.linalg.norm(camera.position - camera._domain_center) \
-            + 0.5 * np.linalg.norm(camera._domain_width)
+        max_length = (unorm(camera.position - camera._domain_center)
+                      + 0.5 * unorm(camera._domain_width))
+
         # Rescale the ray to be long enough to cover the entire domain
         vectors = (sample_x + sample_y + normal_vecs * camera.width[2]) * \
             (max_length / camera.width[2])
@@ -208,7 +213,7 @@
         sampler_params =\
             dict(vp_pos=positions,
                  vp_dir=vectors,
-                 center=self.back_center.d,
+                 center=self.back_center,
                  bounds=(0.0, 1.0, 0.0, 1.0),
                  x_vec=uv,
                  y_vec=uv,
@@ -303,8 +308,8 @@
         uv = np.ones(3, dtype='float64')
 
         image = self.new_image(camera)
-        vectors_comb = np.vstack([vectors_left, vectors_right])
-        positions_comb = np.vstack([positions_left, positions_right])
+        vectors_comb = uvstack([vectors_left, vectors_right])
+        positions_comb = uvstack([positions_left, positions_right])
 
         image.shape = (camera.resolution[0], camera.resolution[1], 4)
         vectors_comb.shape = (camera.resolution[0], camera.resolution[1], 3)
@@ -313,7 +318,7 @@
         sampler_params =\
             dict(vp_pos=positions_comb,
                  vp_dir=vectors_comb,
-                 center=self.back_center.d,
+                 center=self.back_center,
                  bounds=(0.0, 1.0, 0.0, 1.0),
                  x_vec=uv,
                  y_vec=uv,
@@ -331,7 +336,8 @@
         north_vec = camera.unit_vectors[1]
         normal_vec = camera.unit_vectors[2]
 
-        angle_disparity = - np.arctan2(disparity, camera.width[2])
+        angle_disparity = - np.arctan2(disparity.in_units(camera.width.units),
+                                       camera.width[2])
         R = get_rotation_matrix(angle_disparity, north_vec)
 
         east_vec_rot = np.dot(R, east_vec)
@@ -363,8 +369,9 @@
             single_resolution_x, camera.resolution[1], 3)
 
         # The maximum possible length of ray
-        max_length = np.linalg.norm(camera.position - camera._domain_center) \
-            + 0.5 * np.linalg.norm(camera._domain_width) + np.abs(self.disparity.d)
+        max_length = (unorm(camera.position - camera._domain_center)
+                      + 0.5 * unorm(camera._domain_width)
+                      + np.abs(self.disparity))
         # Rescale the ray to be long enough to cover the entire domain
         vectors = (sample_x + sample_y + normal_vecs * camera.width[2]) * \
             (max_length / camera.width[2])
@@ -394,9 +401,9 @@
         px_right, py_right, dz_right = self._get_px_py_dz(
             camera, pos, res, self.disparity)
 
-        px = np.vstack([px_left, px_right])
-        py = np.vstack([py_left, py_right])
-        dz = np.vstack([dz_left, dz_right])
+        px = uvstack([px_left, px_right])
+        py = uvstack([py_left, py_right])
+        dz = uvstack([dz_left, dz_right])
 
         return px, py, dz
 
@@ -594,8 +601,8 @@
         vectors[:, :, 2] = np.sin(py)
 
         # The maximum possible length of ray
-        max_length = np.linalg.norm(camera.position - camera._domain_center) \
-            + 0.5 * np.linalg.norm(camera._domain_width)
+        max_length = (unorm(camera.position - camera._domain_center)
+                      + 0.5 * unorm(camera._domain_width))
         # Rescale the ray to be long enough to cover the entire domain
         vectors = vectors * max_length
 
@@ -625,7 +632,7 @@
         sampler_params = dict(
             vp_pos=positions,
             vp_dir=vectors,
-            center=self.back_center.d,
+            center=self.back_center,
             bounds=(0.0, 1.0, 0.0, 1.0),
             x_vec=dummy,
             y_vec=dummy,
@@ -706,8 +713,9 @@
         vectors[:, :, 2] = np.sin(py)
 
         # The maximum possible length of ray
-        max_length = np.linalg.norm(camera.position - camera._domain_center) \
-            + 0.5 * np.linalg.norm(camera._domain_width) + np.abs(self.disparity.d)
+        max_length = (unorm(camera.position - camera._domain_center)
+                      + 0.5 * unorm(camera._domain_width)
+                      + np.abs(self.disparity))
         # Rescale the ray to be long enough to cover the entire domain
         vectors = vectors * max_length
 
@@ -741,8 +749,8 @@
 
         dummy = np.ones(3, dtype='float64')
 
-        vectors_comb = np.vstack([vectors, vectors])
-        positions_comb = np.vstack([positions_left, positions_right])
+        vectors_comb = uvstack([vectors, vectors])
+        positions_comb = uvstack([positions_left, positions_right])
 
         image.shape = (camera.resolution[0], camera.resolution[1], 4)
         vectors_comb.shape = (camera.resolution[0], camera.resolution[1], 3)
@@ -751,7 +759,7 @@
         sampler_params = dict(
             vp_pos=positions_comb,
             vp_dir=vectors_comb,
-            center=self.back_center.d,
+            center=self.back_center,
             bounds=(0.0, 1.0, 0.0, 1.0),
             x_vec=dummy,
             y_vec=dummy,

diff -r 1e60fd4794dcfdb16d73c50bfdfe7a1c53309316 -r b7611c34a4cac586e94934e0c7a7dd150fffa6fa yt/visualization/volume_rendering/off_axis_projection.py
--- a/yt/visualization/volume_rendering/off_axis_projection.py
+++ b/yt/visualization/volume_rendering/off_axis_projection.py
@@ -13,7 +13,6 @@
 
 
 from .scene import Scene
-from .camera import Camera
 from .render_source import VolumeSource
 from .transfer_functions import ProjectionTransferFunction
 from .utils import data_source_or_all
@@ -149,7 +148,7 @@
         data_source.ds.field_dependencies.update(deps)
         fields = [weightfield, weight]
         vol.set_fields(fields)
-    camera = Camera(data_source)
+    camera = sc.add_camera(data_source)
     camera.set_width(width)
     if not iterable(resolution):
         resolution = [resolution]*2
@@ -172,7 +171,6 @@
     camera.switch_orientation(normal_vector,
                               north_vector)
 
-    sc.camera = camera
     sc.add_source(vol)
 
     vol.set_sampler(camera)

diff -r 1e60fd4794dcfdb16d73c50bfdfe7a1c53309316 -r b7611c34a4cac586e94934e0c7a7dd150fffa6fa yt/visualization/volume_rendering/render_source.py
--- a/yt/visualization/volume_rendering/render_source.py
+++ b/yt/visualization/volume_rendering/render_source.py
@@ -114,8 +114,7 @@
     >>> sc = Scene()
     >>> source = VolumeSource(ds.all_data(), 'density')
     >>> sc.add_source(source)
-    >>> cam = Camera(ds)
-    >>> sc.camera = cam
+    >>> sc.add_camera()
     >>> im = sc.render()
 
     """
@@ -203,7 +202,8 @@
         """Set the source's fields to render
 
         Parameters
-        ---------
+        ----------
+
         fields: field name or list of field names
             The field or fields to render
         no_ghost: boolean
@@ -946,10 +946,10 @@
 
     def __init__(self, data_source, alpha=0.3, cmap='algae',
                  min_level=None, max_level=None):
-        data_source = data_source_or_all(data_source)
+        self.data_source = data_source_or_all(data_source)
         corners = []
         levels = []
-        for block, mask in data_source.blocks:
+        for block, mask in self.data_source.blocks:
             block_corners = np.array([
                 [block.LeftEdge[0], block.LeftEdge[1], block.LeftEdge[2]],
                 [block.RightEdge[0], block.LeftEdge[1], block.LeftEdge[2]],
@@ -976,7 +976,7 @@
 
         colors = apply_colormap(
             levels*1.0,
-            color_bounds=[0, data_source.ds.index.max_level],
+            color_bounds=[0, self.data_source.ds.index.max_level],
             cmap_name=cmap)[0, :, :]*alpha/255.
         colors[:, 3] = alpha
 

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/80db1a0f9d50/
Changeset:   80db1a0f9d50
Branch:      yt
User:        ngoldbaum
Date:        2016-03-28 18:19:42+00:00
Summary:     Merging
Affected #:  107 files

diff -r b7611c34a4cac586e94934e0c7a7dd150fffa6fa -r 80db1a0f9d5070fda36a8266153882d45bed7e0f CONTRIBUTING.rst
--- a/CONTRIBUTING.rst
+++ b/CONTRIBUTING.rst
@@ -795,8 +795,8 @@
    rather than explicitly. Ex: ``super(SpecialGridSubclass, self).__init__()``
    rather than ``SpecialGrid.__init__()``.
  * Docstrings should describe input, output, behavior, and any state changes
-   that occur on an object.  See the file ``doc/docstring_example.txt`` for a
-   fiducial example of a docstring.
+   that occur on an object.  See :ref:`docstrings` below for a fiducial example
+   of a docstring.
  * Use only one top-level import per line. Unless there is a good reason not to,
    imports should happen at the top of the file, after the copyright blurb.
  * Never compare with ``True`` or ``False`` using ``==`` or ``!=``, always use

diff -r b7611c34a4cac586e94934e0c7a7dd150fffa6fa -r 80db1a0f9d5070fda36a8266153882d45bed7e0f doc/source/analyzing/analysis_modules/absorption_spectrum.rst
--- a/doc/source/analyzing/analysis_modules/absorption_spectrum.rst
+++ b/doc/source/analyzing/analysis_modules/absorption_spectrum.rst
@@ -204,7 +204,7 @@
 --------------------------
 
 After loading a spectrum and specifying the properties of the species
-used to generate the spectrum, an apporpriate fit can be generated. 
+used to generate the spectrum, an appropriate fit can be generated. 
 
 .. code-block:: python
 
@@ -232,7 +232,7 @@
 as all lines with the same group number as ``group#[i]``.
 
 The ``fitted_flux`` is an ndarray of the same size as ``flux`` and 
-``wavelength`` that contains the cummulative absorption spectrum generated 
+``wavelength`` that contains the cumulative absorption spectrum generated
 by the lines contained in ``fitted_lines``.
 
 Saving a Spectrum Fit

diff -r b7611c34a4cac586e94934e0c7a7dd150fffa6fa -r 80db1a0f9d5070fda36a8266153882d45bed7e0f doc/source/analyzing/analysis_modules/clump_finding.rst
--- a/doc/source/analyzing/analysis_modules/clump_finding.rst
+++ b/doc/source/analyzing/analysis_modules/clump_finding.rst
@@ -7,7 +7,7 @@
 disconnected structures within a dataset.  This works by first creating a 
 single contour over the full range of the contouring field, then continually 
 increasing the lower value of the contour until it reaches the maximum value 
-of the field.  As disconnected structures are identified as separate contoures, 
+of the field.  As disconnected structures are identified as separate contours, 
 the routine continues recursively through each object, creating a hierarchy of 
 clumps.  Individual clumps can be kept or removed from the hierarchy based on 
 the result of user-specified functions, such as checking for gravitational 

diff -r b7611c34a4cac586e94934e0c7a7dd150fffa6fa -r 80db1a0f9d5070fda36a8266153882d45bed7e0f doc/source/analyzing/analysis_modules/cosmology_calculator.rst
--- /dev/null
+++ b/doc/source/analyzing/analysis_modules/cosmology_calculator.rst
@@ -0,0 +1,75 @@
+.. _cosmology-calculator:
+
+Cosmology Calculator
+====================
+
+The cosmology calculator can be used to calculate cosmological distances and
+times given a set of cosmological parameters.  A cosmological dataset, `ds`,
+will automatically have a cosmology calculator configured with the correct
+parameters associated with it as `ds.cosmology`.  A standalone
+:class:`~yt.utilities.cosmology.Cosmology` calculator object can be created
+in the following way:
+
+.. code-block:: python
+
+   from yt.utilities.cosmology import Cosmology
+
+   co = Cosmology(hubble_constant=0.7, omega_matter=0.3,
+                  omega_lambda=0.7, omega_curvature=0.0)
+
+Once created, various distance calculations as well as conversions between
+redshift and time are available:
+
+.. notebook-cell::
+
+   from yt.utilities.cosmology import Cosmology
+
+   co = Cosmology(hubble_constant=0.7, omega_matter=0.3,
+                  omega_lambda=0.7, omega_curvature=0.0)
+
+   # Hubble distance (c / h)
+   print("hubble distance", co.hubble_distance())
+
+   # distance from z = 0 to 0.5
+   print("comoving radial distance", co.comoving_radial_distance(0, 0.5).in_units("Mpc/h"))
+
+   # transverse distance
+   print("transverse distance", co.comoving_transverse_distance(0, 0.5).in_units("Mpc/h"))
+
+   # comoving volume
+   print("comoving volume", co.comoving_volume(0, 0.5).in_units("Gpc**3"))
+
+   # angulare diameter distance
+   print("angular diameter distance", co.angular_diameter_distance(0, 0.5).in_units("Mpc/h"))
+
+   # angular scale
+   print("angular scale", co.angular_scale(0, 0.5).in_units("Mpc/degree"))
+
+   # luminosity distance
+   print("luminosity distance", co.luminosity_distance(0, 0.5).in_units("Mpc/h"))
+
+   # time between two redshifts
+   print("lookback time", co.lookback_time(0, 0.5).in_units("Gyr"))
+
+   # age of the Universe at a given redshift
+   print("hubble time", co.hubble_time(0).in_units("Gyr"))
+
+   # critical density
+   print("critical density", co.critical_density(0))
+
+   # Hubble parameter at a given redshift
+   print("hubble parameter", co.hubble_parameter(0).in_units("km/s/Mpc"))
+
+   # convert time after Big Bang to redshift
+   my_t = co.quan(8, "Gyr")
+   print("z from t", co.z_from_t(my_t))
+
+   # convert redshift to time after Big Bang (same as Hubble time)
+   print("t from z", co.t_from_z(0.5).in_units("Gyr"))
+
+Note, that all distances returned are comoving distances.  All of the above
+functions accept scalar values and arrays.  The helper functions, `co.quan`
+and `co.arr` exist to create unitful `YTQuantities` and `YTArray` with the
+unit registry of the cosmology calculator.  For more information on the usage
+and meaning of each calculation, consult the reference documentation at
+:ref:`cosmology-calculator-ref`.

diff -r b7611c34a4cac586e94934e0c7a7dd150fffa6fa -r 80db1a0f9d5070fda36a8266153882d45bed7e0f doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst
--- a/doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst
+++ b/doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst
@@ -93,7 +93,7 @@
 ellipsoid's semi-principle axes. "e0" is the largest semi-principle
 axis vector direction that would have magnitude A but normalized.  
 The "tilt" is an angle measured in radians.  It can be best described
-as after the rotation about the z-axis to allign e0 to x in the x-y
+as after the rotation about the z-axis to align e0 to x in the x-y
 plane, and then rotating about the y-axis to align e0 completely to
 the x-axis, the angle remaining to rotate about the x-axis to align
 both e1 to the y-axis and e2 to the z-axis.

diff -r b7611c34a4cac586e94934e0c7a7dd150fffa6fa -r 80db1a0f9d5070fda36a8266153882d45bed7e0f doc/source/analyzing/analysis_modules/halo_catalogs.rst
--- a/doc/source/analyzing/analysis_modules/halo_catalogs.rst
+++ b/doc/source/analyzing/analysis_modules/halo_catalogs.rst
@@ -65,12 +65,13 @@
 
 Analysis is done by adding actions to the 
 :class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog`.
-Each action is represented by a callback function that will be run on each halo. 
-There are three types of actions:
+Each action is represented by a callback function that will be run on
+each halo.  There are four types of actions:
 
 * Filters
 * Quantities
 * Callbacks
+* Recipes
 
 A list of all available filters, quantities, and callbacks can be found in 
 :ref:`halo_analysis_ref`.  
@@ -213,6 +214,50 @@
    # ...  Later on in your script
    hc.add_callback("my_callback")
 
+Recipes
+^^^^^^^
+
+Recipes allow you to create analysis tasks that consist of a series of
+callbacks, quantities, and filters that are run in succession.  An example
+of this is
+:func:`~yt.analysis_modules.halo_analysis.halo_recipes.calculate_virial_quantities`,
+which calculates virial quantities by first creating a sphere container,
+performing 1D radial profiles, and then interpolating to get values at a
+specified threshold overdensity.  All of these operations are separate
+callbacks, but the recipes allow you to add them to your analysis pipeline
+with one call.  For example,
+
+.. code-block:: python
+
+   hc.add_recipe("calculate_virial_quantities", ["radius", "matter_mass"])
+
+The available recipes are located in
+``yt/analysis_modules/halo_analysis/halo_recipes.py``.  New recipes can be
+created in the following manner:
+
+.. code-block:: python
+
+   def my_recipe(halo_catalog, fields, weight_field=None):
+       # create a sphere
+       halo_catalog.add_callback("sphere")
+       # make profiles
+       halo_catalog.add_callback("profile", ["radius"], fields,
+                                 weight_field=weight_field)
+       # save the profile data
+       halo_catalog.add_callback("save_profiles", output_dir="profiles")
+
+   # add recipe to the registry of recipes
+   add_recipe("profile_and_save", my_recipe)
+
+
+   # ...  Later on in your script
+   hc.add_recipe("profile_and_save", ["density", "temperature"],
+                 weight_field="cell_mass")
+
+Note, that unlike callback, filter, and quantity functions that take a ``Halo``
+object as the first argument, recipe functions should take a ``HaloCatalog``
+object as the first argument.
+
 Running Analysis
 ----------------
 
@@ -236,7 +281,7 @@
 All callbacks, quantities, and filters are stored in an actions list, 
 meaning that they are executed in the same order in which they were added. 
 This enables the use of simple, reusable, single action callbacks that 
-depend on each other. This also prevents unecessary computation by allowing 
+depend on each other. This also prevents unnecessary computation by allowing 
 the user to add filters at multiple stages to skip remaining analysis if it 
 is not warranted.
 

diff -r b7611c34a4cac586e94934e0c7a7dd150fffa6fa -r 80db1a0f9d5070fda36a8266153882d45bed7e0f doc/source/analyzing/analysis_modules/halo_mass_function.rst
--- a/doc/source/analyzing/analysis_modules/halo_mass_function.rst
+++ b/doc/source/analyzing/analysis_modules/halo_mass_function.rst
@@ -13,7 +13,7 @@
 
 A halo mass function can be created for the halos identified in a cosmological 
 simulation, as well as analytic fits using any arbitrary set of cosmological
-paramters. In order to create a mass function for simulated halos, they must
+parameters. In order to create a mass function for simulated halos, they must
 first be identified (using HOP, FOF, or Rockstar, see 
 :ref:`halo_catalog`) and loaded as a halo dataset object. The distribution of
 halo masses will then be found, and can be compared to the analytic prediction
@@ -78,7 +78,7 @@
   my_halos = load("rockstar_halos/halos_0.0.bin")
   hmf = HaloMassFcn(halos_ds=my_halos)
 
-A simulation dataset can be passed along with additonal cosmological parameters 
+A simulation dataset can be passed along with additional cosmological parameters 
 to create an analytic mass function.
 
 .. code-block:: python
@@ -106,7 +106,7 @@
 -----------------
 
 * **simulation_ds** (*Simulation dataset object*)
-  The loaded simulation dataset, used to set cosmological paramters.
+  The loaded simulation dataset, used to set cosmological parameters.
   Default : None.
 
 * **halos_ds** (*Halo dataset object*)
@@ -130,7 +130,7 @@
 
 * **omega_baryon0**  (*float*)
   The fraction of the universe made up of baryonic matter. This is not 
-  always stored in the datset and should be checked by hand.
+  always stored in the dataset and should be checked by hand.
   Default : 0.0456.
 
 * **hubble0** (*float*)
@@ -140,14 +140,14 @@
 * **sigma8** (*float*)
   The amplitude of the linear power spectrum at z=0 as specified by 
   the rms amplitude of mass-fluctuations in a top-hat sphere of radius 
-  8 Mpc/h. This is not always stored in the datset and should be 
+  8 Mpc/h. This is not always stored in the dataset and should be 
   checked by hand.
   Default : 0.86.
 
 * **primoridal_index** (*float*)
   This is the index of the mass power spectrum before modification by 
   the transfer function. A value of 1 corresponds to the scale-free 
-  primordial spectrum. This is not always stored in the datset and 
+  primordial spectrum. This is not always stored in the dataset and 
   should be checked by hand.
   Default : 1.0.
 

diff -r b7611c34a4cac586e94934e0c7a7dd150fffa6fa -r 80db1a0f9d5070fda36a8266153882d45bed7e0f doc/source/analyzing/analysis_modules/halo_transition.rst
--- a/doc/source/analyzing/analysis_modules/halo_transition.rst
+++ b/doc/source/analyzing/analysis_modules/halo_transition.rst
@@ -40,7 +40,7 @@
 the full halo catalog documentation for further information about
 how to add these quantities and what quantities are available.
 
-You no longer have to iteratre over halos in the ``halo_list``.
+You no longer have to iterate over halos in the ``halo_list``.
 Now a halo dataset can be treated as a regular dataset and 
 all quantities are available by accessing ``all_data``.
 Specifically, all quantities can be accessed as shown:

diff -r b7611c34a4cac586e94934e0c7a7dd150fffa6fa -r 80db1a0f9d5070fda36a8266153882d45bed7e0f doc/source/analyzing/analysis_modules/index.rst
--- a/doc/source/analyzing/analysis_modules/index.rst
+++ b/doc/source/analyzing/analysis_modules/index.rst
@@ -12,6 +12,7 @@
 .. toctree::
    :maxdepth: 2
 
+   cosmology_calculator
    halo_analysis
    synthetic_observation
    exporting

diff -r b7611c34a4cac586e94934e0c7a7dd150fffa6fa -r 80db1a0f9d5070fda36a8266153882d45bed7e0f doc/source/analyzing/analysis_modules/light_cone_generator.rst
--- a/doc/source/analyzing/analysis_modules/light_cone_generator.rst
+++ b/doc/source/analyzing/analysis_modules/light_cone_generator.rst
@@ -50,7 +50,7 @@
   ``use_minimum_datasets`` set to False, this parameter specifies the 
   fraction of the total box size to be traversed before rerandomizing the 
   projection axis and center.  This was invented to allow light cones with 
-  thin slices to sample coherent large cale structure, but in practice does 
+  thin slices to sample coherent large scale structure, but in practice does 
   not work so well.  Try setting this parameter to 1 and see what happens.  
   Default: 0.0.
 
@@ -74,7 +74,7 @@
 
 A light cone solution consists of a list of datasets spanning a redshift 
 interval with a random orientation for each dataset.  A new solution 
-is calcuated with the 
+is calculated with the 
 :func:`~yt.analysis_modules.cosmological_observation.light_cone.light_cone.LightCone.calculate_light_cone_solution`
 function:
 

diff -r b7611c34a4cac586e94934e0c7a7dd150fffa6fa -r 80db1a0f9d5070fda36a8266153882d45bed7e0f doc/source/analyzing/analysis_modules/photon_simulator.rst
--- a/doc/source/analyzing/analysis_modules/photon_simulator.rst
+++ b/doc/source/analyzing/analysis_modules/photon_simulator.rst
@@ -347,7 +347,7 @@
   be used to control what vector corresponds to the "up" direction in 
   the resulting event list. 
 * ``psf_sigma`` may be specified to provide a crude representation of 
-  a PSF, and corresponds to the standard deviation (in degress) of a 
+  a PSF, and corresponds to the standard deviation (in degrees) of a 
   Gaussian PSF model. 
 
 Let's just take a quick look at the raw events object:

diff -r b7611c34a4cac586e94934e0c7a7dd150fffa6fa -r 80db1a0f9d5070fda36a8266153882d45bed7e0f doc/source/analyzing/objects.rst
--- a/doc/source/analyzing/objects.rst
+++ b/doc/source/analyzing/objects.rst
@@ -313,7 +313,7 @@
     | A ``cut_region`` is a filter which can be applied to any other data 
       object.  The filter is defined by the conditionals present, which 
       apply cuts to the data in the object.  A ``cut_region`` will work
-      for either particle fields or mesh fields, but not on both simulaneously.
+      for either particle fields or mesh fields, but not on both simultaneously.
       For more detailed information and examples, see :ref:`cut-regions`.
 
 **Collection of Data Objects** 

diff -r b7611c34a4cac586e94934e0c7a7dd150fffa6fa -r 80db1a0f9d5070fda36a8266153882d45bed7e0f doc/source/analyzing/parallel_computation.rst
--- a/doc/source/analyzing/parallel_computation.rst
+++ b/doc/source/analyzing/parallel_computation.rst
@@ -49,7 +49,7 @@
 
     $ conda install mpi4py
 
-This will install `MPICH2 <https://www.mpich.org/>`_ and will interefere with
+This will install `MPICH2 <https://www.mpich.org/>`_ and will interfere with
 other MPI libraries that are already installed. Therefore, it is preferable to
 use the ``pip`` installation method.
 
@@ -103,7 +103,7 @@
    p.save()
 
 If this script is run in parallel, two of the most expensive operations -
-finding of the maximum density and the projection will be calulcated in
+finding of the maximum density and the projection will be calculated in
 parallel.  If we save the script as ``my_script.py``, we would run it on 16 MPI
 processes using the following Bash command:
 
@@ -121,7 +121,7 @@
 
 You can set the ``communicator`` keyword in the 
 :func:`~yt.utilities.parallel_tools.parallel_analysis_interface.enable_parallelism` 
-call to a specific MPI communicator to specify a subset of availble MPI 
+call to a specific MPI communicator to specify a subset of available MPI 
 processes.  If none is specified, it defaults to ``COMM_WORLD``.
 
 Creating Parallel and Serial Sections in a Script
@@ -251,7 +251,7 @@
 You may define an empty dictionary and include it as the keyword argument 
 ``storage`` to ``piter()``.  Then, during the processing step, you can access
 this dictionary as the ``sto`` object.  After the 
-loop is finished, the dictionary is re-aggragated from all of the processors, 
+loop is finished, the dictionary is re-aggregated from all of the processors, 
 and you can access the contents:
 
 .. code-block:: python

diff -r b7611c34a4cac586e94934e0c7a7dd150fffa6fa -r 80db1a0f9d5070fda36a8266153882d45bed7e0f doc/source/analyzing/time_series_analysis.rst
--- a/doc/source/analyzing/time_series_analysis.rst
+++ b/doc/source/analyzing/time_series_analysis.rst
@@ -79,7 +79,7 @@
 Analyzing an Entire Simulation
 ------------------------------
 
-.. note:: Implemented for: Enzo, Gadget, OWLS.
+.. note:: Implemented for the Enzo, Gadget, OWLS, and Exodus II frontends.
 
 The parameter file used to run a simulation contains all the information 
 necessary to know what datasets should be available.  The ``simulation`` 

diff -r b7611c34a4cac586e94934e0c7a7dd150fffa6fa -r 80db1a0f9d5070fda36a8266153882d45bed7e0f doc/source/analyzing/units/comoving_units_and_code_units.rst
--- a/doc/source/analyzing/units/comoving_units_and_code_units.rst
+++ b/doc/source/analyzing/units/comoving_units_and_code_units.rst
@@ -12,7 +12,7 @@
 
 yt has additional capabilities to handle the comoving coordinate system used
 internally in cosmological simulations. Simulations that use comoving
-coordinates, all length units have three other counterparts correspoding to
+coordinates, all length units have three other counterparts corresponding to
 comoving units, scaled comoving units, and scaled proper units. In all cases
 'scaled' units refer to scaling by the reduced Hubble parameter - i.e. the length
 unit is what it would be in a universe where Hubble's parameter is 100 km/s/Mpc.

diff -r b7611c34a4cac586e94934e0c7a7dd150fffa6fa -r 80db1a0f9d5070fda36a8266153882d45bed7e0f doc/source/conf.py
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -60,7 +60,7 @@
 
 # General information about the project.
 project = u'The yt Project'
-copyright = u'2013, the yt Project'
+copyright = u'2013-2016, the yt Project'
 
 # The version info for the project you're documenting, acts as replacement for
 # |version| and |release|, also used in various other places throughout the

diff -r b7611c34a4cac586e94934e0c7a7dd150fffa6fa -r 80db1a0f9d5070fda36a8266153882d45bed7e0f doc/source/cookbook/camera_movement.py
--- a/doc/source/cookbook/camera_movement.py
+++ b/doc/source/cookbook/camera_movement.py
@@ -1,31 +1,30 @@
 import yt
 import numpy as np
 
-# Follow the simple_volume_rendering cookbook for the first part of this.
-ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")  # load data
+ds = yt.load("MOOSE_sample_data/out.e-s010")
 sc = yt.create_scene(ds)
 cam = sc.camera
-cam.resolution = (512, 512)
-cam.set_width(ds.domain_width/20.0)
 
-# Find the maximum density location, store it in max_c
-v, max_c = ds.find_max('density')
+# save an image at the starting position
+frame = 0
+sc.save('camera_movement_%04i.png' % frame)
+frame += 1
 
-frame = 0
-# Move to the maximum density location over 5 frames
-for _ in cam.iter_move(max_c, 5):
+# Zoom out by a factor of 2 over 5 frames
+for _ in cam.iter_zoom(0.5, 5):
     sc.render()
-    sc.save('camera_movement_%04i.png' % frame, sigma_clip=8.0)
+    sc.save('camera_movement_%04i.png' % frame)
     frame += 1
 
-# Zoom in by a factor of 10 over 5 frames
-for _ in cam.iter_zoom(10.0, 5):
+# Move to the position [-10.0, 10.0, -10.0] over 5 frames
+pos = ds.arr([-10.0, 10.0, -10.0], 'code_length')
+for _ in cam.iter_move(pos, 5):
     sc.render()
-    sc.save('camera_movement_%04i.png' % frame, sigma_clip=8.0)
+    sc.save('camera_movement_%04i.png' % frame)
     frame += 1
 
-# Do a rotation over 5 frames
+# Rotate by 180 degrees over 5 frames
 for _ in cam.iter_rotate(np.pi, 5):
     sc.render()
-    sc.save('camera_movement_%04i.png' % frame, sigma_clip=8.0)
+    sc.save('camera_movement_%04i.png' % frame)
     frame += 1

diff -r b7611c34a4cac586e94934e0c7a7dd150fffa6fa -r 80db1a0f9d5070fda36a8266153882d45bed7e0f doc/source/cookbook/complex_plots.rst
--- a/doc/source/cookbook/complex_plots.rst
+++ b/doc/source/cookbook/complex_plots.rst
@@ -195,7 +195,11 @@
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
 In this recipe, we move a camera through a domain and take multiple volume
-rendering snapshots.
+rendering snapshots. This recipe uses an unstructured mesh dataset (see
+:ref:`unstructured_mesh_rendering`), which makes it easier to visualize what 
+the Camera is doing, but you can manipulate the Camera for other dataset types 
+in exactly the same manner.
+
 See :ref:`camera_movement` for more information.
 
 .. yt_cookbook:: camera_movement.py

diff -r b7611c34a4cac586e94934e0c7a7dd150fffa6fa -r 80db1a0f9d5070fda36a8266153882d45bed7e0f doc/source/cookbook/cosmological_analysis.rst
--- a/doc/source/cookbook/cosmological_analysis.rst
+++ b/doc/source/cookbook/cosmological_analysis.rst
@@ -65,7 +65,7 @@
 
 .. yt_cookbook:: light_ray.py 
 
-This script demontrates how to make a light ray from a single dataset.
+This script demonstrates how to make a light ray from a single dataset.
 
 .. _cookbook-single-dataset-light-ray:
 

diff -r b7611c34a4cac586e94934e0c7a7dd150fffa6fa -r 80db1a0f9d5070fda36a8266153882d45bed7e0f doc/source/cookbook/custom_colorbar_tickmarks.ipynb
--- a/doc/source/cookbook/custom_colorbar_tickmarks.ipynb
+++ b/doc/source/cookbook/custom_colorbar_tickmarks.ipynb
@@ -64,6 +64,24 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
+    "Next, we call `_setup_plots()` to ensure the plot is properly initialized. Without this, the custom tickmarks we are adding will be ignored."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": true
+   },
+   "outputs": [],
+   "source": [
+    "slc._setup_plots()"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
     "To set custom tickmarks, simply call the `matplotlib` [`set_ticks`](http://matplotlib.org/api/colorbar_api.html#matplotlib.colorbar.ColorbarBase.set_ticks) and [`set_ticklabels`](http://matplotlib.org/api/colorbar_api.html#matplotlib.colorbar.ColorbarBase.set_ticklabels) functions."
    ]
   },

diff -r b7611c34a4cac586e94934e0c7a7dd150fffa6fa -r 80db1a0f9d5070fda36a8266153882d45bed7e0f doc/source/cookbook/halo_profiler.py
--- a/doc/source/cookbook/halo_profiler.py
+++ b/doc/source/cookbook/halo_profiler.py
@@ -12,26 +12,16 @@
 # Filter out less massive halos
 hc.add_filter("quantity_value", "particle_mass", ">", 1e14, "Msun")
 
-# attach a sphere object to each halo whose radius extends
-#   to twice the radius of the halo
-hc.add_callback("sphere", factor=2.0)
+# This recipe creates a spherical data container, computes
+# radial profiles, and calculates r_200 and M_200.
+hc.add_recipe("calculate_virial_quantities", ["radius", "matter_mass"])
 
-# use the sphere to calculate radial profiles of gas density
-# weighted by cell volume in terms of the virial radius
-hc.add_callback("profile", ["radius"],
-                [("gas", "overdensity")],
-                weight_field="cell_volume",
-                accumulation=True,
-                storage="virial_quantities_profiles")
-
-
-hc.add_callback("virial_quantities", ["radius"],
-                profile_storage="virial_quantities_profiles")
-hc.add_callback('delete_attribute', 'virial_quantities_profiles')
-
+# Create a sphere container with radius 5x r_200.
 field_params = dict(virial_radius=('quantity', 'radius_200'))
 hc.add_callback('sphere', radius_field='radius_200', factor=5,
                 field_parameters=field_params)
+
+# Compute profiles of T vs. r/r_200
 hc.add_callback('profile', ['virial_radius_fraction'], 
                 [('gas', 'temperature')],
                 storage='virial_profiles',

diff -r b7611c34a4cac586e94934e0c7a7dd150fffa6fa -r 80db1a0f9d5070fda36a8266153882d45bed7e0f doc/source/cookbook/notebook_tutorial.rst
--- a/doc/source/cookbook/notebook_tutorial.rst
+++ b/doc/source/cookbook/notebook_tutorial.rst
@@ -17,7 +17,7 @@
    $ ipython notebook
 
 Depending on your default web browser and system setup this will open a web
-browser and direct you to the notebook dahboard.  If it does not,  you might
+browser and direct you to the notebook dashboard.  If it does not,  you might
 need to connect to the notebook manually.  See the `IPython documentation
 <http://ipython.org/ipython-doc/stable/notebook/notebook.html#starting-the-notebook-server>`_
 for more details.

diff -r b7611c34a4cac586e94934e0c7a7dd150fffa6fa -r 80db1a0f9d5070fda36a8266153882d45bed7e0f doc/source/developing/building_the_docs.rst
--- a/doc/source/developing/building_the_docs.rst
+++ b/doc/source/developing/building_the_docs.rst
@@ -158,7 +158,7 @@
 HTML. to simplify versioning of the notebook JSON format, we store notebooks in
 an unevaluated state.
 
-To build the full documentation, you will need yt, jupyter, and all depedencies 
+To build the full documentation, you will need yt, jupyter, and all dependencies 
 needed for yt's analysis modules installed. The following dependencies were 
 used to generate the yt documentation during the release of yt 3.2 in 2015.
 

diff -r b7611c34a4cac586e94934e0c7a7dd150fffa6fa -r 80db1a0f9d5070fda36a8266153882d45bed7e0f doc/source/developing/creating_derived_fields.rst
--- a/doc/source/developing/creating_derived_fields.rst
+++ b/doc/source/developing/creating_derived_fields.rst
@@ -65,7 +65,7 @@
 data in a dimensionally equivalent unit (e.g. a ``"dyne"`` versus a ``"N"``), the
 field data will be converted to the units specified in ``add_field`` before
 being returned in a data object selection. If the field function returns data
-with dimensions that are incompatibible with units specified in ``add_field``,
+with dimensions that are incompatible with units specified in ``add_field``,
 you will see an error. To clear this error, you must ensure that your field
 function returns data in the correct units. Often, this means applying units to
 a dimensionless float or array.
@@ -75,7 +75,7 @@
 to get a predefined version of the constant with the correct units. If you know
 the units your data is supposed to have ahead of time, you can import unit
 symbols like ``g`` or ``cm`` from the ``yt.units`` namespace and multiply the
-return value of your field function by the appropriate compbination of unit
+return value of your field function by the appropriate combination of unit
 symbols for your field's units. You can also convert floats or NumPy arrays into
 :class:`~yt.units.yt_array.YTArray` or :class:`~yt.units.yt_array.YTQuantity`
 instances by making use of the

diff -r b7611c34a4cac586e94934e0c7a7dd150fffa6fa -r 80db1a0f9d5070fda36a8266153882d45bed7e0f doc/source/developing/testing.rst
--- a/doc/source/developing/testing.rst
+++ b/doc/source/developing/testing.rst
@@ -19,7 +19,7 @@
 checking in any code that breaks existing functionality.  To further this goal,
 an automatic buildbot runs the test suite after each code commit to confirm
 that yt hasn't broken recently.  To supplement this effort, we also maintain a
-`continuous integration server <http://tests.yt-project.org>`_ that runs the
+`continuous integration server <https://tests.yt-project.org>`_ that runs the
 tests with each commit to the yt version control repository.
 
 .. _unit_testing:
@@ -471,8 +471,87 @@
 Another good example of an image comparison test is the
 ``PlotWindowAttributeTest`` defined in the answer testing framework and used in
 ``yt/visualization/tests/test_plotwindow.py``. This test shows how a new answer
-test subclass can be used to programitically test a variety of different methods
+test subclass can be used to programmatically test a variety of different methods
 of a complicated class using the same test class. This sort of image comparison
 test is more useful if you are finding yourself writing a ton of boilerplate
 code to get your image comparison test working.  The ``GenericImageTest`` is
 more useful if you only need to do a one-off image comparison test.
+
+Enabling Answer Tests on Jenkins
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Before any code is added to or modified in the yt codebase, each incoming
+changeset is run against all available unit and answer tests on our `continuous
+integration server <http://tests.yt-project.org>`_. While unit tests are
+autodiscovered by `nose <http://nose.readthedocs.org/en/latest/>`_ itself,
+answer tests require definition of which set of tests constitute to a given
+answer. Configuration for the integration server is stored in
+*tests/tests_2.7.yaml* in the main yt repository:
+
+.. code-block:: yaml
+
+   answer_tests:
+      local_artio_270:
+         - yt/frontends/artio/tests/test_outputs.py
+   # ...
+   other_tests:
+      unittests:
+         - '-v'
+         - '-s'
+
+Each element under *answer_tests* defines answer name (*local_artio_270* in above
+snippet) and specifies a list of files/classes/methods that will be validated
+(*yt/frontends/artio/tests/test_outputs.py* in above snippet). On the testing
+server it is translated to:
+
+.. code-block:: bash
+
+   $ nosetests --with-answer-testing --local --local-dir ... --answer-big-data \
+      --answer-name=local_artio_270 \
+      yt/frontends/artio/tests/test_outputs.py
+
+If the answer doesn't exist on the server yet, ``nosetests`` is run twice and
+during first pass ``--answer-store`` is added to the commandline. 
+
+Updating Answers
+~~~~~~~~~~~~~~~~
+
+In order to regenerate answers for a particular set of tests it is sufficient to
+change the answer name in *tests/tests_2.7.yaml* e.g.:
+
+.. code-block:: diff
+
+   --- a/tests/tests_2.7.yaml
+   +++ b/tests/tests_2.7.yaml
+   @@ -25,7 +25,7 @@
+        - yt/analysis_modules/halo_finding/tests/test_rockstar.py
+        - yt/frontends/owls_subfind/tests/test_outputs.py
+   
+   -  local_owls_270:
+   +  local_owls_271:
+        - yt/frontends/owls/tests/test_outputs.py
+   
+      local_pw_270:
+
+would regenerate answers for OWLS frontend.
+
+Adding New Answer Tests
+~~~~~~~~~~~~~~~~~~~~~~~
+
+In order to add a new set of answer tests, it is sufficient to extend the
+*answer_tests* list in *tests/tests_2.7.yaml* e.g.: 
+
+.. code-block:: diff
+
+   --- a/tests/tests_2.7.yaml
+   +++ b/tests/tests_2.7.yaml
+   @@ -60,6 +60,10 @@
+        - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_non_cosmo
+        - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_cosmo
+    
+   +  local_gdf_270:
+   +    - yt/frontends/gdf/tests/test_outputs.py
+   +
+   +
+    other_tests:
+      unittests:
+

diff -r b7611c34a4cac586e94934e0c7a7dd150fffa6fa -r 80db1a0f9d5070fda36a8266153882d45bed7e0f doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -369,7 +369,7 @@
 
 This particular dataset has two meshes in it, both of which are made of 8-node hexes.
 yt uses a field name convention to access these different meshes in plots and data
-objects. To see all the fields found in a particlular dataset, you can do:
+objects. To see all the fields found in a particular dataset, you can do:
 
 .. code-block:: python
     
@@ -540,7 +540,7 @@
 
 * ``CDELTx``: The pixel width in along axis ``x``
 * ``CRVALx``: The coordinate value at the reference position along axis ``x``
-* ``CRPIXx``: The the reference pixel along axis ``x``
+* ``CRPIXx``: The reference pixel along axis ``x``
 * ``CTYPEx``: The projection type of axis ``x``
 * ``CUNITx``: The units of the coordinate along axis ``x``
 * ``BTYPE``: The type of the image
@@ -870,7 +870,7 @@
 ``over_refine_factor``.  They are weak proxies for each other.  The first,
 ``n_ref``, governs how many particles in an oct results in that oct being
 refined into eight child octs.  Lower values mean higher resolution; the
-default is 64.  The secon parameter, ``over_refine_factor``, governs how many
+default is 64.  The second parameter, ``over_refine_factor``, governs how many
 cells are in a given oct; the default value of 1 corresponds to 8 cells.
 The number of cells in an oct is defined by the expression
 ``2**(3*over_refine_factor)``.
@@ -1118,8 +1118,10 @@
    bbox = np.array([[-1.5, 1.5], [-1.5, 1.5], [1.5, 1.5]])
    ds = yt.load_uniform_grid(data, arr.shape, 3.08e24, bbox=bbox, nprocs=12)
 
-where in this exampe the particle position fields have been assigned. ``number_of_particles`` must be the same size as the particle
-arrays. If no particle arrays are supplied then ``number_of_particles`` is assumed to be zero. 
+where in this example the particle position fields have been assigned.
+``number_of_particles`` must be the same size as the particle arrays. If no
+particle arrays are supplied then ``number_of_particles`` is assumed to be
+zero. 
 
 .. rubric:: Caveats
 
@@ -1153,7 +1155,7 @@
    coordinates,connectivity = yt.hexahedral_connectivity(xgrid,ygrid,zgrid)
 
 will define the (x,y,z) coordinates of the hexahedral cells and
-information about that cell's neighbors such that the celll corners
+information about that cell's neighbors such that the cell corners
 will be a grid of points constructed as the Cartesion product of
 xgrid, ygrid, and zgrid.
 
@@ -1386,8 +1388,8 @@
 ---------
 
 `PyNE <http://pyne.io/>`_ is an open source nuclear engineering toolkit
-maintained by the PyNE developement team (`pyne-dev at googlegroups.com
-<pyne-dev%40googlegroups.com>`_). PyNE meshes utilize the Mesh-Oriented datABase
+maintained by the PyNE developement team (pyne-dev at googlegroups.com).
+PyNE meshes utilize the Mesh-Oriented datABase
 `(MOAB) <http://trac.mcs.anl.gov/projects/ITAPS/wiki/MOAB/>`_ and can be
 Cartesian or tetrahedral. In addition to field data, pyne meshes store pyne
 Material objects which provide a rich set of capabilities for nuclear

diff -r b7611c34a4cac586e94934e0c7a7dd150fffa6fa -r 80db1a0f9d5070fda36a8266153882d45bed7e0f doc/source/examining/low_level_inspection.rst
--- a/doc/source/examining/low_level_inspection.rst
+++ b/doc/source/examining/low_level_inspection.rst
@@ -176,7 +176,7 @@
 cells from the parent grid will be duplicated (appropriately) to fill the 
 covering grid.
 
-Let's say we now want to look at that entire data volume and sample it at the 
+Let's say we now want to look at that entire data volume and sample it at
 a higher resolution (i.e. level 2).  As stated above, we'll be oversampling
 under-refined regions, but that's OK.  We must also increase the resolution 
 of our output array by a factor of 2^2 in each direction to hold this new 

diff -r b7611c34a4cac586e94934e0c7a7dd150fffa6fa -r 80db1a0f9d5070fda36a8266153882d45bed7e0f doc/source/help/index.rst
--- a/doc/source/help/index.rst
+++ b/doc/source/help/index.rst
@@ -141,7 +141,7 @@
   $ grep -r SlicePlot *         (or $ grin SlicePlot)
 
 This will print a number of locations in the yt source tree where ``SlicePlot``
-is mentioned.  You can now followup on this and open up the files that have
+is mentioned.  You can now follow-up on this and open up the files that have
 references to ``SlicePlot`` (particularly the one that defines SlicePlot) and
 inspect their contents for problems or clarification.
 

diff -r b7611c34a4cac586e94934e0c7a7dd150fffa6fa -r 80db1a0f9d5070fda36a8266153882d45bed7e0f doc/source/installing.rst
--- a/doc/source/installing.rst
+++ b/doc/source/installing.rst
@@ -36,7 +36,7 @@
   <http://brew.sh>`_ or `MacPorts <http://www.macports.org/>`_ this choice will
   let you install yt using the python installed by the package manager. Similarly
   for python environments set up via Linux package managers so long as you
-  have the the necessary compilers installed (e.g. the ``build-essentials``
+  have the necessary compilers installed (e.g. the ``build-essentials``
   package on Debian and Ubuntu).
 
 .. note::
@@ -417,7 +417,7 @@
 
 With the release of version 3.0 of yt, development of the legacy yt 2.x series
 has been relegated to bugfixes.  That said, we will continue supporting the 2.x
-series for the forseeable future.  This makes it easy to use scripts written
+series for the foreseeable future.  This makes it easy to use scripts written
 for older versions of yt without substantially updating them to support the
 new field naming or unit systems in yt version 3.
 
@@ -431,7 +431,7 @@
 You already have the mercurial repository, so you simply need to switch
 which version you're using.  Navigate to the root of the yt mercurial
 repository, update to the desired version, and rebuild the source (some of the
-c code requires a compilation step for big changes like this):
+C code requires a compilation step for big changes like this):
 
 .. code-block:: bash
 
@@ -439,7 +439,7 @@
   hg update <desired-version>
   python setup.py develop
 
-Valid versions to jump to are described in :ref:`branches-of-yt`).
+Valid versions to jump to are described in :ref:`branches-of-yt`.
 
 You can check which version of yt you have installed by invoking ``yt version``
 at the command line.  If you encounter problems, see :ref:`update-errors`.

diff -r b7611c34a4cac586e94934e0c7a7dd150fffa6fa -r 80db1a0f9d5070fda36a8266153882d45bed7e0f doc/source/reference/api/api.rst
--- a/doc/source/reference/api/api.rst
+++ b/doc/source/reference/api/api.rst
@@ -472,6 +472,8 @@
    ~yt.analysis_modules.halo_analysis.halo_quantities.HaloQuantity
    ~yt.analysis_modules.halo_analysis.halo_quantities.bulk_velocity
    ~yt.analysis_modules.halo_analysis.halo_quantities.center_of_mass
+   ~yt.analysis_modules.halo_analysis.halo_recipes.HaloRecipe
+   ~yt.analysis_modules.halo_analysis.halo_recipes.calculate_virial_quantities
 
 Halo Finding
 ^^^^^^^^^^^^
@@ -789,6 +791,7 @@
    ~yt.data_objects.static_output.Dataset.box
    ~yt.funcs.deprecate
    ~yt.funcs.ensure_list
+   ~yt.funcs.enable_plugins
    ~yt.funcs.get_pbar
    ~yt.funcs.humanize_time
    ~yt.funcs.insert_ipython
@@ -859,6 +862,29 @@
    ~yt.utilities.parallel_tools.parallel_analysis_interface.ParallelAnalysisInterface
    ~yt.utilities.parallel_tools.parallel_analysis_interface.ParallelObjectIterator
 
+.. _cosmology-calculator-ref:
+
+Cosmology Calculator
+--------------------
+
+.. autosummary::
+   :toctree: generated/
+
+   ~yt.utilities.cosmology.Cosmology
+   ~yt.utilities.cosmology.Cosmology.hubble_distance
+   ~yt.utilities.cosmology.Cosmology.comoving_radial_distance
+   ~yt.utilities.cosmology.Cosmology.comoving_transverse_distance
+   ~yt.utilities.cosmology.Cosmology.comoving_volume
+   ~yt.utilities.cosmology.Cosmology.angular_diameter_distance
+   ~yt.utilities.cosmology.Cosmology.angular_scale
+   ~yt.utilities.cosmology.Cosmology.luminosity_distance
+   ~yt.utilities.cosmology.Cosmology.lookback_time
+   ~yt.utilities.cosmology.Cosmology.hubble_time
+   ~yt.utilities.cosmology.Cosmology.critical_density
+   ~yt.utilities.cosmology.Cosmology.hubble_parameter
+   ~yt.utilities.cosmology.Cosmology.expansion_factor
+   ~yt.utilities.cosmology.Cosmology.z_from_t
+   ~yt.utilities.cosmology.Cosmology.t_from_z
 
 Testing Infrastructure
 ----------------------

diff -r b7611c34a4cac586e94934e0c7a7dd150fffa6fa -r 80db1a0f9d5070fda36a8266153882d45bed7e0f doc/source/reference/code_support.rst
--- a/doc/source/reference/code_support.rst
+++ b/doc/source/reference/code_support.rst
@@ -36,7 +36,7 @@
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ 
 | Gasoline              |     Y      |     Y     |      Y     |   Y   | Y [#f2]_ |    Y     |     Y      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ 
-| Grid Data Format (GDF)|     Y      |    N/A    |      Y     |   Y   |    Y     |    Y     |     N      |   Full   |
+| Grid Data Format (GDF)|     Y      |    N/A    |      Y     |   Y   |    Y     |    Y     |     Y      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ 
 | Maestro               |   Y [#f1]_ |     N     |      Y     |   Y   |    Y     |    Y     |     N      | Partial  |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ 
@@ -48,7 +48,7 @@
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ 
 | OWLS/EAGLE            |     Y      |     Y     |      Y     |   Y   | Y [#f2]_ |    Y     |     Y      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ 
-| Piernik               |     Y      |    N/A    |      Y     |   Y   |    Y     |    Y     |     N      |   Full   |
+| Piernik               |     Y      |    N/A    |      Y     |   Y   |    Y     |    Y     |     Y      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ 
 | Pluto                 |     Y      |     N     |      Y     |   Y   |    Y     |    Y     |     Y      | Partial  |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ 

diff -r b7611c34a4cac586e94934e0c7a7dd150fffa6fa -r 80db1a0f9d5070fda36a8266153882d45bed7e0f doc/source/reference/configuration.rst
--- a/doc/source/reference/configuration.rst
+++ b/doc/source/reference/configuration.rst
@@ -124,25 +124,22 @@
 objects, colormaps, and other code classes and objects to be used in future
 yt sessions without modifying the source code directly.  
 
+To force the plugin file to be parsed, call the function
+:func:`~yt.funcs.enable_plugins` at the top of your script.
 
 .. note::
 
-   The ``my_plugins.py`` is only parsed inside of ``yt.mods``, so in order
-   to use it, you must load yt with either: ``import yt.mods as yt``
-   or ``from yt.mods import *``.  You can tell that your
-   plugins file is being parsed by watching for a logging message when you
-   import yt.  Note that both the ``yt load`` and ``iyt`` command line entry
-   points invoke ``from yt.mods import *``, so the ``my_plugins.py`` file
-   will be parsed if you enter yt that way.
+   You can tell that your plugins file is being parsed by watching for a logging
+   message when you import yt.  Note that both the ``yt load`` and ``iyt``
+   command line entry points parse the plugin file, so the ``my_plugins.py``
+   file will be parsed if you enter yt that way.
 
 Plugin File Format
 ^^^^^^^^^^^^^^^^^^
 
-yt will look for and recognize the file ``$HOME/.yt/my_plugins`` as a plugin
+yt will look for and recognize the file ``$HOME/.yt/my_plugins.py`` as a plugin
 file, which should contain python code.  If accessing yt functions and classes
 they will not require the ``yt.`` prefix, because of how they are loaded.
-It is executed at the bottom of ``yt.mods``, and so
-it is provided with the entire namespace available in the module ``yt.mods``.
 
 For example, if I created a plugin file containing:
 
@@ -152,7 +149,7 @@
        return np.random.random(data["density"].shape)
    add_field("random", function=_myfunc, units='auto')
 
-then all of my data objects would have access to the field ``some_quantity``.
+then all of my data objects would have access to the field ``random``.
 
 You can also define other convenience functions in your plugin file.  For
 instance, you could define some variables or functions, and even import common
@@ -176,13 +173,19 @@
 
 .. code-block:: python
 
-   import yt.mods as yt
+   import yt
+   yt.enable_plugins()
 
    my_run = yt.load_run("hotgasflow/DD0040/DD0040")
 
-And because we have imported from ``yt.mods`` we have access to the
+And because we have used ``yt.enable_plugins`` we have access to the
 ``load_run`` function defined in our plugin file.
 
+Note that using the plugins file implies that your script is no longer fully
+reproducible. If you share your script with someone else and use some of the
+functionality if your plugins file, you will also need to share your plugins
+file for someone else to re-run your script properly.
+
 Adding Custom Colormaps
 ^^^^^^^^^^^^^^^^^^^^^^^
 

diff -r b7611c34a4cac586e94934e0c7a7dd150fffa6fa -r 80db1a0f9d5070fda36a8266153882d45bed7e0f doc/source/reference/python_introduction.rst
--- a/doc/source/reference/python_introduction.rst
+++ b/doc/source/reference/python_introduction.rst
@@ -315,7 +315,7 @@
 Let's try this out with a for loop.  First type ``for i in range(10):`` and
 press enter.  This will change the prompt to be three periods, instead of three
 greater-than signs, and you will be expected to hit the tab key to indent.
-Then type "print i", press enter, and then instead of indenting again, press
+Then type "print(i)", press enter, and then instead of indenting again, press
 enter again.  The entire entry should look like this::
 
    >>> for i in range(10):

diff -r b7611c34a4cac586e94934e0c7a7dd150fffa6fa -r 80db1a0f9d5070fda36a8266153882d45bed7e0f doc/source/visualizing/TransferFunctionHelper_Tutorial.ipynb
--- a/doc/source/visualizing/TransferFunctionHelper_Tutorial.ipynb
+++ b/doc/source/visualizing/TransferFunctionHelper_Tutorial.ipynb
@@ -4,7 +4,7 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "Here, we explain how to use TransferFunctionHelper to visualize and interpret yt volume rendering transfer functions.  TransferFunctionHelper is a utility class that makes it easy to visualize he probability density functions of yt fields that you might want to volume render.  This makes it easier to choose a nice transfer function that highlights interesting physical regimes.\n",
+    "Here, we explain how to use TransferFunctionHelper to visualize and interpret yt volume rendering transfer functions.  Creating a custom transfer function is a process that usually involves some trial-and-error. TransferFunctionHelper is a utility class designed to help you visualize the probability density functions of yt fields that you might want to volume render.  This makes it easier to choose a nice transfer function that highlights interesting physical regimes.\n",
     "\n",
     "First, we set up our namespace and define a convenience function to display volume renderings inline in the notebook.  Using `%matplotlib inline` makes it so matplotlib plots display inline in the notebook."
    ]
@@ -132,8 +132,8 @@
     "tfh.set_log(True)\n",
     "tfh.build_transfer_function()\n",
     "tfh.tf.add_layers(8, w=0.01, mi=4.0, ma=8.0, col_bounds=[4.,8.], alpha=np.logspace(-1,2,7), colormap='RdBu_r')\n",
-    "tfh.tf.map_to_colormap(6.0, 8.0, colormap='Reds', scale=10.0)\n",
-    "tfh.tf.map_to_colormap(-1.0, 6.0, colormap='Blues_r', scale=1.)\n",
+    "tfh.tf.map_to_colormap(6.0, 8.0, colormap='Reds')\n",
+    "tfh.tf.map_to_colormap(-1.0, 6.0, colormap='Blues_r')\n",
     "\n",
     "tfh.plot(profile_field='cell_mass')"
    ]
@@ -142,7 +142,7 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "Finally, let's take a look at the volume rendering. First use the helper function to create a default rendering, then we override this with the transfer function we just created."
+    "Let's take a look at the volume rendering. First use the helper function to create a default rendering, then we override this with the transfer function we just created."
    ]
   },
   {
@@ -166,7 +166,55 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "We can clearly see that the hot gas is mostly associated with bound structures while the cool gas is associated with low-density voids."
+    "That looks okay, but the red gas (associated with temperatures between 1e6 and 1e8 K) is a bit hard to see in the image. To fix this, we can make that gas contribute a larger alpha value to the image by using the ``scale`` keyword argument in ``map_to_colormap``."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "tfh2 = TransferFunctionHelper(ds)\n",
+    "tfh2.set_field('temperature')\n",
+    "tfh2.set_bounds()\n",
+    "tfh2.set_log(True)\n",
+    "tfh2.build_transfer_function()\n",
+    "tfh2.tf.add_layers(8, w=0.01, mi=4.0, ma=8.0, col_bounds=[4.,8.], alpha=np.logspace(-1,2,7), colormap='RdBu_r')\n",
+    "tfh2.tf.map_to_colormap(6.0, 8.0, colormap='Reds', scale=5.0)\n",
+    "tfh2.tf.map_to_colormap(-1.0, 6.0, colormap='Blues_r', scale=1.0)\n",
+    "\n",
+    "tfh2.plot(profile_field='cell_mass')"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Note that the height of the red portion of the transfer function has increased by a factor of 5.0. If we use this transfer function to make the final image:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "source.set_transfer_function(tfh2.tf)\n",
+    "im3 = sc.render()\n",
+    "\n",
+    "showme(im3[:,:,:3])"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "The red gas is now much more prominant in the image. We can clearly see that the hot gas is mostly associated with bound structures while the cool gas is associated with low-density voids."
    ]
   }
  ],

diff -r b7611c34a4cac586e94934e0c7a7dd150fffa6fa -r 80db1a0f9d5070fda36a8266153882d45bed7e0f doc/source/visualizing/colormaps/index.rst
--- a/doc/source/visualizing/colormaps/index.rst
+++ b/doc/source/visualizing/colormaps/index.rst
@@ -47,7 +47,7 @@
 store them in your :ref:`plugin-file` for access to them in every future yt 
 session.  The example below creates two custom colormaps, one that has
 three equally spaced bars of blue, white and red, and the other that 
-interpolates in increasing lengthed intervals from black to red, to green, 
+interpolates in increasing lengthen intervals from black to red, to green, 
 to blue.  These will be accessible for the rest of the yt session as 
 'french_flag' and 'weird'.  See 
 :func:`~yt.visualization.color_maps.make_colormap` and 
@@ -97,7 +97,8 @@
 .. code-block:: python
 
     import yt
-    yt.show_colormaps(subset=['algae', 'kamae', 'spectral'], 
+    yt.show_colormaps(subset=['algae', 'kamae', 'spectral',
+                              'arbre', 'dusk', 'octarine', 'kelp'], 
                       filename="yt_native.png")
 
 Applying a Colormap to your Rendering

diff -r b7611c34a4cac586e94934e0c7a7dd150fffa6fa -r 80db1a0f9d5070fda36a8266153882d45bed7e0f doc/source/visualizing/plots.rst
--- a/doc/source/visualizing/plots.rst
+++ b/doc/source/visualizing/plots.rst
@@ -415,9 +415,19 @@
 determined by the ``thresh`` parameter, which can be varied to make the lines thicker or
 thinner.
 
+The above example all involve 8-node hexahedral mesh elements. Here is another example from
+a dataset that uses 6-node wedge elements:
+
+.. python-script::
+   
+   import yt
+   ds = yt.load("MOOSE_sample_data/wedge_out.e")
+   sl = yt.SlicePlot(ds, 2, ('connect2', 'diffused'))
+   sl.save()
+
 Finally, slices can also be used to examine 2D unstructured mesh datasets, but the
 slices must be taken to be normal to the ``'z'`` axis, or you'll get an error. Here is
-an example using another MOOSE dataset:
+an example using another MOOSE dataset that uses triangular mesh elements:
 
 .. python-script::
 

diff -r b7611c34a4cac586e94934e0c7a7dd150fffa6fa -r 80db1a0f9d5070fda36a8266153882d45bed7e0f doc/source/visualizing/sketchfab.rst
--- a/doc/source/visualizing/sketchfab.rst
+++ b/doc/source/visualizing/sketchfab.rst
@@ -80,8 +80,8 @@
 ``export_ply``, which will write to a file and optionally sample a field at
 every face or vertex, outputting a color value to the file as well.  This file
 can then be viewed in MeshLab, Blender or on the website `Sketchfab.com
-<Sketchfab.com>`_.  But if you want to view it on Sketchfab, there's an even
-easier way!
+<https://sketchfab.com>`_.  But if you want to view it on Sketchfab, there's an
+even easier way!
 
 Exporting to Sketchfab
 ----------------------

diff -r b7611c34a4cac586e94934e0c7a7dd150fffa6fa -r 80db1a0f9d5070fda36a8266153882d45bed7e0f doc/source/visualizing/unstructured_mesh_rendering.rst
--- a/doc/source/visualizing/unstructured_mesh_rendering.rst
+++ b/doc/source/visualizing/unstructured_mesh_rendering.rst
@@ -14,7 +14,7 @@
 
 .. code-block:: bash
 
-    conda install -c http://use.yt/with_conda/ yt=3.3_dev
+    conda install -c http://use.yt/with_conda/ yt
 
 If you want to install from source, you can use the ``get_yt.sh`` script.
 Be sure to set the INST_YT_SOURCE and INST_UNSTRUCTURED flags to 1 at the 
@@ -73,7 +73,13 @@
 
 as usual. Finally, if you create a file called embree.cfg in the yt-hg directory with
 the location of the embree installation, the setup script will find this and use it, 
-provided EMBREE_DIR is not set. We recommend one of the later two methods, especially
+provided EMBREE_DIR is not set. An example embree.cfg file could like this:
+
+.. code-block:: bash
+
+   /opt/local/
+
+We recommend one of the later two methods, especially
 if you plan on re-compiling the cython extensions regularly. Note that none of this is
 neccessary if you installed embree into a location that is in your default path, such
 as /usr/local.
@@ -214,6 +220,29 @@
     # render and save
     sc.save()
 
+Here is an example using 6-node wedge elements:
+
+.. python-script::
+
+   import yt
+
+   ds = yt.load("MOOSE_sample_data/wedge_out.e")
+
+   # create a default scene
+   sc = yt.create_scene(ds, ('connect2', 'diffused'))
+
+   # override the default colormap
+   ms = sc.get_source(0)
+   ms.cmap = 'Eos A'
+
+   # adjust the camera position and orientation
+   cam = sc.camera
+   cam.set_position(ds.arr([1.0, -1.0, 1.0], 'code_length'))
+   cam.width = ds.arr([1.5, 1.5, 1.5], 'code_length')
+
+   # render and save
+   sc.save()
+
 Another example, this time plotting the temperature field from a 20-node hex 
 MOOSE dataset:
 
@@ -273,7 +302,7 @@
     # adjust the camera position and orientation
     cam = sc.camera
     camera_position = ds.arr([-1.0, 1.0, -0.5], 'code_length')
-    north_vector = ds.arr([0.0, 1.0, 1.0], 'dimensionless')
+    north_vector = ds.arr([0.0, -1.0, -1.0], 'dimensionless')
     cam.width = ds.arr([0.05, 0.05, 0.05], 'code_length')
     cam.set_position(camera_position, north_vector)
     
@@ -355,7 +384,7 @@
 ^^^^^^^^^^^^^
 
 Here are a couple of example scripts that show how to create image frames that 
-can later be stiched together into a movie. In the first example, we look at a 
+can later be stitched together into a movie. In the first example, we look at a 
 single dataset at a fixed time, but we move the camera around to get a different
 vantage point. We call the rotate() method 300 times, saving a new image to the 
 disk each time.

diff -r b7611c34a4cac586e94934e0c7a7dd150fffa6fa -r 80db1a0f9d5070fda36a8266153882d45bed7e0f doc/source/yt3differences.rst
--- a/doc/source/yt3differences.rst
+++ b/doc/source/yt3differences.rst
@@ -84,7 +84,7 @@
   external code**
   Mesh fields that exist on-disk in an output file can be read in using whatever
   name is used by the output file.  On-disk fields are always returned in code
-  units.  The full field name will be will be ``(code_name, field_name)``. See
+  units.  The full field name will be ``(code_name, field_name)``. See
   :ref:`field-list`.
 * **Particle fields are now more obviously different than mesh fields**
   Particle fields on-disk will also be in code units, and will be named
@@ -247,8 +247,8 @@
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 Wherever possible, we have attempted to replace the term "parameter file"
-(i.e., ``pf``) with the term "dataset."  In yt-3.0, all of the 
-the ``pf`` atrributes of objects are now ``ds`` or ``dataset`` attributes.
+(i.e., ``pf``) with the term "dataset."  In yt-3.0, all of
+the ``pf`` attributes of objects are now ``ds`` or ``dataset`` attributes.
 
 Hierarchy is Now Index
 ^^^^^^^^^^^^^^^^^^^^^^
@@ -262,7 +262,7 @@
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 Derived quantities can now be accessed via a function that hangs off of the
-``quantities`` atribute of data objects. Instead of
+``quantities`` attribute of data objects. Instead of
 ``dd.quantities['TotalMass']()``, you can now use ``dd.quantities.total_mass()``
 to do the same thing. All derived quantities can be accessed via a function that
 hangs off of the `quantities` attribute of data objects.

diff -r b7611c34a4cac586e94934e0c7a7dd150fffa6fa -r 80db1a0f9d5070fda36a8266153882d45bed7e0f scripts/iyt
--- a/scripts/iyt
+++ b/scripts/iyt
@@ -1,5 +1,7 @@
 #!python
-import os, re
+from __future__ import print_function
+import os
+import re
 from distutils.version import LooseVersion
 from yt.mods import *
 from yt.data_objects.data_containers import YTDataContainer
@@ -16,8 +18,8 @@
 
 try:
     import IPython
-except:
-    print 'ipython is not available. using default python interpreter.'
+except ImportError:
+    print('ipython is not available. using default python interpreter.')
     import code
     import sys
     code.interact(doc, None, namespace)
@@ -70,7 +72,7 @@
 Feel free to edit this file to customize your ipython experience.
 
 Note that as such this file does nothing, for backwards compatibility.
-Consult e.g. file 'ipy_profile_sh.py' for an example of the things 
+Consult e.g. file 'ipy_profile_sh.py' for an example of the things
 you can do here.
 
 See http://ipython.scipy.org/moin/IpythonExtensionApi for detailed
@@ -96,7 +98,7 @@
 # http://pymel.googlecode.com/svn/trunk/tools/ipymel.py
 # We'll start with some fields.
 
-import re
+
 def yt_fieldname_completer(self, event):
     """Match dictionary completions"""
     #print "python_matches", event.symbol
@@ -110,7 +112,7 @@
 
     if not m:
         raise try_next
-    
+
     expr, attr = m.group(1, 3)
     #print "COMPLETING ON ", expr, attr
     #print type(self.Completer), dir(self.Completer)
@@ -122,9 +124,9 @@
         try:
             obj = eval(expr, self.Completer.global_namespace)
         except:
-            raise IPython.ipapi.TryNext 
-        
-    if isinstance(obj, (YTDataContainer, ) ):
+            raise IPython.ipapi.TryNext
+
+    if isinstance(obj, YTDataContainer):
         #print "COMPLETING ON THIS THING"
         all_fields = [f for f in sorted(
                 obj.ds.field_list + obj.ds.derived_field_list)]
@@ -135,6 +137,6 @@
 
     raise try_next
 
-ip.set_hook('complete_command', yt_fieldname_completer , re_key = ".*" )
+ip.set_hook('complete_command', yt_fieldname_completer, re_key = ".*")
 
 ip_shell.mainloop(**kwargs)

diff -r b7611c34a4cac586e94934e0c7a7dd150fffa6fa -r 80db1a0f9d5070fda36a8266153882d45bed7e0f scripts/pr_backport.py
--- a/scripts/pr_backport.py
+++ b/scripts/pr_backport.py
@@ -1,3 +1,4 @@
+from __future__ import print_function
 import hglib
 import requests
 import shutil
@@ -6,6 +7,7 @@
 from datetime import datetime
 from distutils.version import LooseVersion
 from time import strptime, mktime
+from yt.extern.six.moves import input
 
 MERGED_PR_ENDPOINT = ("http://bitbucket.org/api/2.0/repositories/yt_analysis/"
                       "yt/pullrequests/?state=MERGED")
@@ -280,17 +282,17 @@
             if commit_already_on_stable(repo_path, commits[0]) is True:
                 continue
             message = "hg graft %s\n" % commits[0]
-        print "PR #%s\nTitle: %s\nCreated on: %s\nLink: %s\n%s" % pr_desc
-        print "To backport, issue the following command(s):\n"
-        print message
-        raw_input('Press any key to continue')
+        print("PR #%s\nTitle: %s\nCreated on: %s\nLink: %s\n%s" % pr_desc)
+        print("To backport, issue the following command(s):\n")
+        print(message)
+        input('Press any key to continue')
 
 
 if __name__ == "__main__":
-    print ""
-    print "Gathering PR information, this may take a minute."
-    print "Don't worry, yt loves you."
-    print ""
+    print("")
+    print("Gathering PR information, this may take a minute.")
+    print("Don't worry, yt loves you.")
+    print("")
     repo_path = clone_new_repo()
     try:
         last_major_release = get_first_commit_after_last_major_release(repo_path)
@@ -308,11 +310,11 @@
         del inv_map[None]
 
         inv_map = screen_already_backported(repo_path, inv_map)
-        print "In another terminal window, navigate to the following path:"
-        print "%s" % repo_path
-        raw_input("Press any key to continue")
+        print("In another terminal window, navigate to the following path:")
+        print("%s" % repo_path)
+        input("Press any key to continue")
         backport_pr_commits(repo_path, inv_map, last_stable, prs)
-        raw_input(
+        input(
             "Now you need to push your backported changes. The temporary\n"
             "repository currently being used will be deleted as soon as you\n"
             "press any key.")

diff -r b7611c34a4cac586e94934e0c7a7dd150fffa6fa -r 80db1a0f9d5070fda36a8266153882d45bed7e0f setup.py
--- a/setup.py
+++ b/setup.py
@@ -9,7 +9,7 @@
 from setuptools.command.build_py import build_py as _build_py
 from setupext import \
     check_for_openmp, check_for_pyembree, read_embree_location, \
-    get_mercurial_changeset_id
+    get_mercurial_changeset_id, in_conda_env
 
 if sys.version_info < (2, 7):
     print("yt currently requires Python version 2.7")
@@ -127,7 +127,9 @@
               ["yt/utilities/lib/bounding_volume_hierarchy.pyx"],
               extra_compile_args=omp_args,
               extra_link_args=omp_args,
-              libraries=["m"], depends=["yt/utilities/lib/bounding_volume_hierarchy.pxd"]),
+              libraries=["m"],
+              depends=["yt/utilities/lib/bounding_volume_hierarchy.pxd",
+                       "yt/utilities/lib/vec3_ops.pxd"]),
     Extension("yt.utilities.lib.contour_finding",
               ["yt/utilities/lib/contour_finding.pyx"],
               include_dirs=["yt/utilities/lib/",
@@ -177,7 +179,8 @@
                        "yt/utilities/lib/kdtree.h",
                        "yt/utilities/lib/fixed_interpolator.h",
                        "yt/utilities/lib/fixed_interpolator.pxd",
-                       "yt/utilities/lib/field_interpolation_tables.pxd"]),
+                       "yt/utilities/lib/field_interpolation_tables.pxd",
+                       "yt/utilities/lib/vec3_ops.pxd"]),
     Extension("yt.utilities.lib.element_mappings",
               ["yt/utilities/lib/element_mappings.pyx"],
               libraries=["m"], depends=["yt/utilities/lib/element_mappings.pxd"]),
@@ -254,14 +257,21 @@
     ]
 
     embree_prefix = os.path.abspath(read_embree_location())
+    embree_inc_dir = [os.path.join(embree_prefix, 'include')]
+    embree_lib_dir = [os.path.join(embree_prefix, 'lib')]
+    if in_conda_env():
+        conda_basedir = os.path.dirname(os.path.dirname(sys.executable))
+        embree_inc_dir.append(os.path.join(conda_basedir, 'include'))
+        embree_lib_dir.append(os.path.join(conda_basedir, 'lib'))
+        
     if _platform == "darwin":
         embree_lib_name = "embree.2"
     else:
         embree_lib_name = "embree"
 
     for ext in embree_extensions:
-        ext.include_dirs.append(os.path.join(embree_prefix, 'include'))
-        ext.library_dirs.append(os.path.join(embree_prefix, 'lib'))
+        ext.include_dirs += embree_inc_dir
+        ext.library_dirs += embree_lib_dir
         ext.language = "c++"
         ext.libraries += ["m", embree_lib_name]
 
@@ -352,7 +362,11 @@
                  "Operating System :: POSIX :: AIX",
                  "Operating System :: POSIX :: Linux",
                  "Programming Language :: C",
-                 "Programming Language :: Python",
+                 "Programming Language :: Python :: 2",
+                 "Programming Language :: Python :: 2.7",
+                 "Programming Language :: Python :: 3",
+                 "Programming Language :: Python :: 3.4",
+                 "Programming Language :: Python :: 3.5",
                  "Topic :: Scientific/Engineering :: Astronomy",
                  "Topic :: Scientific/Engineering :: Physics",
                  "Topic :: Scientific/Engineering :: Visualization"],

diff -r b7611c34a4cac586e94934e0c7a7dd150fffa6fa -r 80db1a0f9d5070fda36a8266153882d45bed7e0f setupext.py
--- a/setupext.py
+++ b/setupext.py
@@ -59,6 +59,8 @@
         return None
     return os.path.dirname(fn)
 
+def in_conda_env():
+    return any(s in sys.version for s in ("Anaconda", "Continuum"))
 
 def read_embree_location():
     '''
@@ -74,17 +76,58 @@
     '''
 
     rd = os.environ.get('EMBREE_DIR')
-    if rd is not None:
-        return rd
-    print("EMBREE_DIR not set. Attempting to read embree.cfg")
+    if rd is None:
+        try:
+            rd = open("embree.cfg").read().strip()
+        except IOError:
+            rd = '/usr/local'
+
+    fail_msg = ("Pyembree is installed, but I could not compile Embree test code. \n"
+               "I attempted to find Embree headers in %s. \n"
+               "If this is not correct, please set your correct embree location \n"
+               "using EMBREE_DIR environment variable or your embree.cfg file. \n"
+               "Please see http://yt-project.org/docs/dev/visualizing/unstructured_mesh_rendering.html "
+                "for more information." % rd)
+
+    # Create a temporary directory
+    tmpdir = tempfile.mkdtemp()
+    curdir = os.getcwd()
+
     try:
-        rd = open("embree.cfg").read().strip()
-        return rd
-    except IOError:
-        print("Reading Embree location from embree.cfg failed.")
-        print("If compilation fails, please place the base directory")
-        print("of your Embree install in embree.cfg and restart.")
-        return '/usr/local'
+        os.chdir(tmpdir)
+
+        # Get compiler invocation
+        compiler = os.getenv('CXX', 'c++')
+        compiler = compiler.split(' ')
+
+        # Attempt to compile a test script.
+        filename = r'test.cpp'
+        file = open(filename, 'wt', 1)
+        file.write(
+            '#include "embree2/rtcore.h"\n'
+            'int main() {\n'
+            'return 0;\n'
+            '}'
+        )
+        file.flush()
+        with open(os.devnull, 'w') as fnull:
+            exit_code = subprocess.call(compiler + ['-I%s/include/' % rd, filename],
+                             stdout=fnull, stderr=fnull)
+
+        # Clean up
+        file.close()
+
+    except OSError:
+        print(fail_msg)
+
+    finally:
+        os.chdir(curdir)
+        shutil.rmtree(tmpdir)
+
+    if exit_code != 0:
+        print(fail_msg)
+
+    return rd
 
 
 def get_mercurial_changeset_id(target_dir):

diff -r b7611c34a4cac586e94934e0c7a7dd150fffa6fa -r 80db1a0f9d5070fda36a8266153882d45bed7e0f tests/tests_2.7.yaml
--- a/tests/tests_2.7.yaml
+++ b/tests/tests_2.7.yaml
@@ -20,6 +20,9 @@
   local_gadget_270:
     - yt/frontends/gadget/tests/test_outputs.py
 
+  local_gdf_270:
+    - yt/frontends/gdf/tests/test_outputs.py
+
   local_halos_270:
     - yt/analysis_modules/halo_analysis/tests/test_halo_finders.py
     - yt/analysis_modules/halo_finding/tests/test_rockstar.py
@@ -28,7 +31,7 @@
   local_owls_270:
     - yt/frontends/owls/tests/test_outputs.py
   
-  local_pw_270:
+  local_pw_271:
     - yt/visualization/tests/test_plotwindow.py:test_attributes
     - yt/visualization/tests/test_plotwindow.py:test_attributes_wt
     - yt/visualization/tests/test_profile_plots.py:test_phase_plot_attributes
@@ -39,12 +42,13 @@
   local_tipsy_270:
     - yt/frontends/tipsy/tests/test_outputs.py
   
-  local_varia_270:
+  local_varia_272:
     - yt/analysis_modules/radmc3d_export
     - yt/frontends/moab/tests/test_c5.py
     - yt/analysis_modules/photon_simulator/tests/test_spectra.py
     - yt/analysis_modules/photon_simulator/tests/test_sloshing.py
     - yt/visualization/volume_rendering/tests/test_vr_orientation.py
+    - yt/visualization/volume_rendering/tests/test_mesh_render.py
 
   local_orion_270:
     - yt/frontends/boxlib/tests/test_orion.py
@@ -55,6 +59,10 @@
   local_ytdata_270:
     - yt/frontends/ytdata
 
+  local_absorption_spectrum_271:
+    - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_non_cosmo
+    - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_cosmo
+
 other_tests:
   unittests:
      - '-v'

diff -r b7611c34a4cac586e94934e0c7a7dd150fffa6fa -r 80db1a0f9d5070fda36a8266153882d45bed7e0f yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
@@ -52,6 +52,8 @@
     def __init__(self, lambda_min, lambda_max, n_lambda):
         self.n_lambda = n_lambda
         # lambda, flux, and tau are wavelength, flux, and optical depth
+        self.lambda_min = lambda_min
+        self.lambda_max = lambda_max
         self.lambda_field = YTArray(np.linspace(lambda_min, lambda_max, 
                                     n_lambda), "angstrom")
         self.tau_field = None
@@ -117,8 +119,9 @@
 
     def make_spectrum(self, input_file, output_file=None,
                       line_list_file=None, output_absorbers_file=None,
-                      use_peculiar_velocity=True, 
-                      subgrid_resolution=10, njobs="auto"):
+                      use_peculiar_velocity=True,
+                      subgrid_resolution=10, observing_redshift=0.,
+                      njobs="auto"):
         """
         Make spectrum from ray data using the line list.
 
@@ -128,33 +131,38 @@
         input_file : string or dataset
            path to input ray data or a loaded ray dataset
         output_file : optional, string
-           Option to save a file containing the wavelength, flux, and optical 
-           depth fields.  File formats are chosen based on the filename extension.  
-           ``.h5`` for hdf5, ``.fits`` for fits, and everything else is ASCII.
+           Option to save a file containing the wavelength, flux, and optical
+           depth fields.  File formats are chosen based on the filename
+           extension. ``.h5`` for hdf5, ``.fits`` for fits, and everything
+           else is ASCII.
            Default: None
         output_absorbers_file : optional, string
-           Option to save a text file containing all of the absorbers and 
+           Option to save a text file containing all of the absorbers and
            corresponding wavelength and redshift information.
            For parallel jobs, combining the lines lists can be slow so it
            is recommended to set to None in such circumstances.
            Default: None
         use_peculiar_velocity : optional, bool
            if True, include peculiar velocity for calculating doppler redshift
-           to shift lines.  Requires similar flag to be set in LightRay 
+           to shift lines.  Requires similar flag to be set in LightRay
            generation.
            Default: True
         subgrid_resolution : optional, int
            When a line is being added that is unresolved (ie its thermal
            width is less than the spectral bin width), the voigt profile of
-           the line is deposited into an array of virtual wavelength bins at 
-           higher resolution.  The optical depth from these virtual bins is 
-           integrated and then added to the coarser spectral wavelength bin.  
-           The subgrid_resolution value determines the ratio between the 
-           thermal width and the bin width of the virtual bins.  Increasing 
-           this value yields smaller virtual bins, which increases accuracy, 
-           but is more expensive.  A value of 10 yields accuracy to the 4th 
+           the line is deposited into an array of virtual wavelength bins at
+           higher resolution.  The optical depth from these virtual bins is
+           integrated and then added to the coarser spectral wavelength bin.
+           The subgrid_resolution value determines the ratio between the
+           thermal width and the bin width of the virtual bins.  Increasing
+           this value yields smaller virtual bins, which increases accuracy,
+           but is more expensive.  A value of 10 yields accuracy to the 4th
            significant digit in tau.
            Default: 10
+        observing_redshift : optional, float
+           This is the redshift at which the observer is observing
+           the absorption spectrum.
+           Default: 0
         njobs : optional, int or "auto"
            the number of process groups into which the loop over
            absorption lines will be divided.  If set to -1, each
@@ -181,6 +189,9 @@
             input_fields.append('redshift_eff')
             field_units["velocity_los"] = "cm/s"
             field_units["redshift_eff"] = ""
+        if observing_redshift != 0.:
+            input_fields.append('redshift_dopp')
+            field_units["redshift_dopp"] = ""
         for feature in self.line_list + self.continuum_list:
             if not feature['field_name'] in input_fields:
                 input_fields.append(feature['field_name'])
@@ -202,8 +213,10 @@
         self._add_lines_to_spectrum(field_data, use_peculiar_velocity,
                                     output_absorbers_file,
                                     subgrid_resolution=subgrid_resolution,
+                                    observing_redshift=observing_redshift,
                                     njobs=njobs)
-        self._add_continua_to_spectrum(field_data, use_peculiar_velocity)
+        self._add_continua_to_spectrum(field_data, use_peculiar_velocity,
+                                       observing_redshift=observing_redshift)
 
         self.flux_field = np.exp(-self.tau_field)
 
@@ -221,20 +234,63 @@
         del field_data
         return (self.lambda_field, self.flux_field)
 
-    def _add_continua_to_spectrum(self, field_data, use_peculiar_velocity):
+    def _apply_observing_redshift(self, field_data, use_peculiar_velocity,
+                                 observing_redshift):
+        """
+        Change the redshifts of individual absorbers to account for the 
+        redshift at which the observer sits.
+
+        The intermediate redshift that is seen by an observer
+        at a redshift other than z=0 is z12, where z1 is the
+        observing redshift and z2 is the emitted photon's redshift
+        Hogg (2000) eq. 13:
+
+        1 + z12 = (1 + z2) / (1 + z1)
+        """
+        if observing_redshift == 0.:
+            # This is already assumed in the generation of the LightRay
+            redshift = field_data['redshift']
+            if use_peculiar_velocity:
+                redshift_eff = field_data['redshift_eff']
+        else:
+            # The intermediate redshift that is seen by an observer
+            # at a redshift other than z=0 is z12, where z1 is the
+            # observing redshift and z2 is the emitted photon's redshift
+            # Hogg (2000) eq. 13:
+            # 1 + z12 = (1 + z2) / (1 + z1)
+            redshift = ((1 + field_data['redshift']) / \
+                        (1 + observing_redshift)) - 1.
+            # Combining cosmological redshift and doppler redshift
+            # into an effective redshift is found in Peacock's
+            # Cosmological Physics eqn 3.75:
+            # 1 + z_eff = (1 + z_cosmo) * (1 + z_doppler)
+            if use_peculiar_velocity:
+                redshift_eff = ((1 + redshift) * \
+                                (1 + field_data['redshift_dopp'])) - 1.
+
+        return redshift, redshift_eff
+
+    def _add_continua_to_spectrum(self, field_data, use_peculiar_velocity,
+                                  observing_redshift=0.):
         """
         Add continuum features to the spectrum.
         """
+        # Change the redshifts of continuum sources to account for the 
+        # redshift at which the observer sits
+        redshift, redshift_eff = self._apply_observing_redshift(field_data, 
+                                 use_peculiar_velocity, observing_redshift)
+
         # Only add continuum features down to tau of 1.e-4.
         min_tau = 1.e-3
 
         for continuum in self.continuum_list:
             column_density = field_data[continuum['field_name']] * field_data['dl']
+
             # redshift_eff field combines cosmological and velocity redshifts
             if use_peculiar_velocity:
-                delta_lambda = continuum['wavelength'] * field_data['redshift_eff']
+                delta_lambda = continuum['wavelength'] * redshift_eff
             else:
-                delta_lambda = continuum['wavelength'] * field_data['redshift']
+                delta_lambda = continuum['wavelength'] * redshift
             this_wavelength = delta_lambda + continuum['wavelength']
             right_index = np.digitize(this_wavelength, self.lambda_field).clip(0, self.n_lambda)
             left_index = np.digitize((this_wavelength *
@@ -257,13 +313,19 @@
             pbar.finish()
 
     def _add_lines_to_spectrum(self, field_data, use_peculiar_velocity,
-                               output_absorbers_file, subgrid_resolution=10, 
-                               njobs=-1):
+                               output_absorbers_file, subgrid_resolution=10,
+                               observing_redshift=0., njobs=-1):
         """
         Add the absorption lines to the spectrum.
         """
-        # Widen wavelength window until optical depth falls below this tau 
-        # value at the ends to assure that the wings of a line have been 
+
+        # Change the redshifts of individual absorbers to account for the 
+        # redshift at which the observer sits
+        redshift, redshift_eff = self._apply_observing_redshift(field_data, 
+                                 use_peculiar_velocity, observing_redshift)
+
+        # Widen wavelength window until optical depth falls below this tau
+        # value at the ends to assure that the wings of a line have been
         # fully resolved.
         min_tau = 1e-3
 
@@ -274,15 +336,31 @@
 
             # redshift_eff field combines cosmological and velocity redshifts
             # so delta_lambda gives the offset in angstroms from the rest frame
-            # wavelength to the observed wavelength of the transition 
+            # wavelength to the observed wavelength of the transition
             if use_peculiar_velocity:
-                delta_lambda = line['wavelength'] * field_data['redshift_eff']
+                delta_lambda = line['wavelength'] * redshift_eff
             else:
-                delta_lambda = line['wavelength'] * field_data['redshift']
+                delta_lambda = line['wavelength'] * redshift
             # lambda_obs is central wavelength of line after redshift
             lambda_obs = line['wavelength'] + delta_lambda
-            # bin index in lambda_field of central wavelength of line after z
-            center_index = np.digitize(lambda_obs, self.lambda_field)
+            # the total number of absorbers per transition
+            n_absorbers = len(lambda_obs)
+
+            # we want to know the bin index in the lambda_field array
+            # where each line has its central wavelength after being
+            # redshifted.  however, because we don't know a priori how wide
+            # a line will be (ie DLAs), we have to include bin indices 
+            # *outside* the spectral range of the AbsorptionSpectrum 
+            # object.  Thus, we find the "equivalent" bin index, which
+            # may be <0 or >the size of the array.  In the end, we deposit
+            # the bins that actually overlap with the AbsorptionSpectrum's
+            # range in lambda.
+            
+            # this equation gives us the "equivalent" bin index for each line
+            # if it were placed into the self.lambda_field array
+            center_index = (lambda_obs.in_units('Angstrom').d - self.lambda_min) \
+                            / self.bin_width.d
+            center_index = np.ceil(center_index).astype('int')
 
             # thermal broadening b parameter
             thermal_b =  np.sqrt((2 * boltzmann_constant_cgs *
@@ -290,12 +368,11 @@
                                   line['atomic_mass'])
 
             # the actual thermal width of the lines
-            thermal_width = (lambda_obs * thermal_b / 
+            thermal_width = (lambda_obs * thermal_b /
                              speed_of_light_cgs).convert_to_units("angstrom")
 
             # Sanitize units for faster runtime of the tau_profile machinery.
             lambda_0 = line['wavelength'].d  # line's rest frame; angstroms
-            lambda_1 = lambda_obs.d # line's observed frame; angstroms
             cdens = column_density.in_units("cm**-2").d # cm**-2
             thermb = thermal_b.in_cgs().d  # thermal b coefficient; cm / s
             dlambda = delta_lambda.d  # lambda offset; angstroms
@@ -303,94 +380,107 @@
 
             # When we actually deposit the voigt profile, sometimes we will
             # have underresolved lines (ie lines with smaller widths than
-            # the spectral bin size).  Here, we create virtual wavelength bins 
-            # small enough in width to well resolve each line, deposit the 
-            # voigt profile into them, then numerically integrate their tau 
-            # values and sum them to redeposit them into the actual spectral 
+            # the spectral bin size).  Here, we create virtual wavelength bins
+            # small enough in width to well resolve each line, deposit the
+            # voigt profile into them, then numerically integrate their tau
+            # values and sum them to redeposit them into the actual spectral
             # bins.
 
             # virtual bins (vbins) will be:
             # 1) <= the bin_width; assures at least as good as spectral bins
             # 2) <= 1/10th the thermal width; assures resolving voigt profiles
             #   (actually 1/subgrid_resolution value, default is 1/10)
-            # 3) a bin width will be divisible by vbin_width times a power of 
+            # 3) a bin width will be divisible by vbin_width times a power of
             #    10; this will assure we don't get spikes in the deposited
             #    spectra from uneven numbers of vbins per bin
-            resolution = thermal_width / self.bin_width 
-            vbin_width = self.bin_width / \
-                         10**(np.ceil(np.log10(subgrid_resolution/resolution)).clip(0, np.inf))
-            vbin_width = vbin_width.in_units('angstrom').d
+            resolution = thermal_width / self.bin_width
+            n_vbins_per_bin = 10**(np.ceil(np.log10(subgrid_resolution/resolution)).clip(0, np.inf))
+            vbin_width = self.bin_width.d / n_vbins_per_bin
 
-            # the virtual window into which the line is deposited initially 
-            # spans a region of 5 thermal_widths, but this may expand
-            n_vbins = np.ceil(5*thermal_width.d/vbin_width)
-            vbin_window_width = n_vbins*vbin_width
-
+            # a note to the user about which lines components are unresolved
             if (thermal_width < self.bin_width).any():
                 mylog.info(("%d out of %d line components will be " + \
                             "deposited as unresolved lines.") %
-                           ((thermal_width < self.bin_width).sum(), 
-                            thermal_width.size))
+                           ((thermal_width < self.bin_width).sum(),
+                            n_absorbers))
 
-            valid_lines = np.arange(len(thermal_width))
+            # provide a progress bar with information about lines processsed
             pbar = get_pbar("Adding line - %s [%f A]: " % \
-                            (line['label'], line['wavelength']),
-                            thermal_width.size)
+                            (line['label'], line['wavelength']), n_absorbers)
 
-            # for a given transition, step through each location in the 
+            # for a given transition, step through each location in the
             # observed spectrum where it occurs and deposit a voigt profile
-            for i in parallel_objects(valid_lines, njobs=-1):
-                my_vbin_window_width = vbin_window_width[i]
-                my_n_vbins = n_vbins[i]
-                my_vbin_width = vbin_width[i]
+            for i in parallel_objects(np.arange(n_absorbers), njobs=-1):
+ 
+                # the virtual window into which the line is deposited initially 
+                # spans a region of 2 coarse spectral bins 
+                # (one on each side of the center_index) but the window
+                # can expand as necessary.
+                # it will continue to expand until the tau value in the far
+                # edge of the wings is less than the min_tau value or it 
+                # reaches the edge of the spectrum
+                window_width_in_bins = 2
 
                 while True:
+                    left_index = (center_index[i] - window_width_in_bins/2)
+                    right_index = (center_index[i] + window_width_in_bins/2)
+                    n_vbins = (right_index - left_index) * n_vbins_per_bin[i]
+                    
+                    # the array of virtual bins in lambda space
                     vbins = \
-                        np.linspace(lambda_1[i]-my_vbin_window_width/2.,
-                                    lambda_1[i]+my_vbin_window_width/2., 
-                                    my_n_vbins, endpoint=False)
+                        np.linspace(self.lambda_min + self.bin_width.d * left_index, 
+                                    self.lambda_min + self.bin_width.d * right_index, 
+                                    n_vbins, endpoint=False)
 
+                    # the virtual bins and their corresponding opacities
                     vbins, vtau = \
                         tau_profile(
-                            lambda_0, line['f_value'], line['gamma'], thermb[i],
-                            cdens[i], delta_lambda=dlambda[i],
-                            lambda_bins=vbins)
+                            lambda_0, line['f_value'], line['gamma'], 
+                            thermb[i], cdens[i], 
+                            delta_lambda=dlambda[i], lambda_bins=vbins)
 
                     # If tau has not dropped below min tau threshold by the
-                    # edges (ie the wings), then widen the wavelength 
-                    # window and repeat process. 
+                    # edges (ie the wings), then widen the wavelength
+                    # window and repeat process.
                     if (vtau[0] < min_tau and vtau[-1] < min_tau):
                         break
-                    my_vbin_window_width *= 2
-                    my_n_vbins *= 2
-
-                # identify the extrema of the vbin_window so as to speed
-                # up searching over the entire lambda_field array
-                bins_from_center = np.ceil((my_vbin_window_width/2.) / \
-                                           self.bin_width.d) + 1
-                left_index = (center_index[i] - bins_from_center).clip(0, self.n_lambda)
-                right_index = (center_index[i] + bins_from_center).clip(0, self.n_lambda)
-                window_width = right_index - left_index
-
-                # run digitize to identify which vbins are deposited into which
-                # global lambda bins.
-                # shift global lambda bins over by half a bin width; 
-                # this has the effect of assuring np.digitize will place 
-                # the vbins in the closest bin center.
-                binned = np.digitize(vbins, 
-                                     self.lambda_field[left_index:right_index] \
-                                     + (0.5 * self.bin_width))
+                    window_width_in_bins *= 2
 
                 # numerically integrate the virtual bins to calculate a
                 # virtual equivalent width; then sum the virtual equivalent
                 # widths and deposit into each spectral bin
-                vEW = vtau * my_vbin_width
-                EW = [vEW[binned == j].sum() for j in np.arange(window_width)]
-                EW = np.array(EW)/self.bin_width.d
-                self.tau_field[left_index:right_index] += EW
+                vEW = vtau * vbin_width[i]
+                EW = np.zeros(right_index - left_index)
+                EW_indices = np.arange(left_index, right_index)
+                for k, val in enumerate(EW_indices):
+                    EW[k] = vEW[n_vbins_per_bin[i] * k: \
+                                n_vbins_per_bin[i] * (k + 1)].sum()
+                EW = EW/self.bin_width.d
+
+                # only deposit EW bins that actually intersect the original
+                # spectral wavelength range (i.e. lambda_field)
+
+                # if EW bins don't intersect the original spectral range at all
+                # then skip the deposition
+                if ((left_index >= self.n_lambda) or \
+                    (right_index < 0)):
+                    pbar.update(i)
+                    continue
+
+                # otherwise, determine how much of the original spectrum
+                # is intersected by the expanded line window to be deposited, 
+                # and deposit the Equivalent Width data into that intersecting
+                # window in the original spectrum's tau
+                else:
+                    intersect_left_index = max(left_index, 0)
+                    intersect_right_index = min(right_index, self.n_lambda-1)
+                    self.tau_field[intersect_left_index:intersect_right_index] \
+                        += EW[(intersect_left_index - left_index): \
+                              (intersect_right_index - left_index)]
+
 
                 # write out absorbers to file if the column density of
-                # an absorber is greater than the specified "label_threshold" 
+                # an absorber is greater than the specified "label_threshold"
                 # of that absorption line
                 if output_absorbers_file and \
                    line['label_threshold'] is not None and \
@@ -404,16 +494,15 @@
                                                 'wavelength': (lambda_0 + dlambda[i]),
                                                 'column_density': column_density[i],
                                                 'b_thermal': thermal_b[i],
-                                                'redshift': field_data['redshift'][i],
-                                                'redshift_eff': field_data['redshift_eff'][i],
+                                                'redshift': redshift[i],
+                                                'redshift_eff': redshift_eff[i],
                                                 'v_pec': peculiar_velocity})
                 pbar.update(i)
             pbar.finish()
 
             del column_density, delta_lambda, lambda_obs, center_index, \
-                thermal_b, thermal_width, lambda_1, cdens, thermb, dlambda, \
-                vlos, resolution, vbin_width, n_vbins, vbin_window_width, \
-                valid_lines, vbins, vtau, vEW
+                thermal_b, thermal_width, cdens, thermb, dlambda, \
+                vlos, resolution, vbin_width, n_vbins, n_vbins_per_bin
 
         comm = _get_comm(())
         self.tau_field = comm.mpi_allreduce(self.tau_field, op="sum")

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/16efbdab0196/
Changeset:   16efbdab0196
Branch:      yt
User:        ngoldbaum
Date:        2016-03-28 18:21:00+00:00
Summary:     Fix answer tests in mike's VR PR
Affected #:  1 file

diff -r 80db1a0f9d5070fda36a8266153882d45bed7e0f -r 16efbdab019688b3845a86aa25919bdbd96657f4 tests/tests_2.7.yaml
--- a/tests/tests_2.7.yaml
+++ b/tests/tests_2.7.yaml
@@ -42,7 +42,7 @@
   local_tipsy_270:
     - yt/frontends/tipsy/tests/test_outputs.py
   
-  local_varia_272:
+  local_varia_273:
     - yt/analysis_modules/radmc3d_export
     - yt/frontends/moab/tests/test_c5.py
     - yt/analysis_modules/photon_simulator/tests/test_spectra.py


https://bitbucket.org/yt_analysis/yt/commits/b6dad5b69998/
Changeset:   b6dad5b69998
Branch:      yt
User:        ngoldbaum
Date:        2016-03-29 15:21:08+00:00
Summary:     Merged in mzingale/yt-mz (pull request #2012)

add a new function, fake_vr_orientation_test_ds() that creates a
Affected #:  3 files

diff -r 86db031f1f56a6f396d4440188707f1fdb8e6bfa -r b6dad5b69998cff732a5c3ea7847a4b01092ffc8 tests/tests_2.7.yaml
--- a/tests/tests_2.7.yaml
+++ b/tests/tests_2.7.yaml
@@ -42,7 +42,7 @@
   local_tipsy_270:
     - yt/frontends/tipsy/tests/test_outputs.py
   
-  local_varia_272:
+  local_varia_273:
     - yt/analysis_modules/radmc3d_export
     - yt/frontends/moab/tests/test_c5.py
     - yt/analysis_modules/photon_simulator/tests/test_spectra.py

diff -r 86db031f1f56a6f396d4440188707f1fdb8e6bfa -r b6dad5b69998cff732a5c3ea7847a4b01092ffc8 yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -317,7 +317,75 @@
     return ds
 
 
+def fake_vr_orientation_test_ds(N = 96):
+    """
+    create a toy dataset that puts a sphere at (0,0,0), a single cube
+    on +x, two cubes on +y, and three cubes on +z in a domain from
+    [-1,1]**3.  The lower planes (x = -1, y = -1, z = -1) are also
+    given non-zero values.
+
+    This dataset allows you to easily explore orientations and
+    handiness in VR and other renderings
+
+    """
+    from yt.frontends.stream.api import load_uniform_grid
+
+    xmin = ymin = zmin = -1.0
+    xmax = ymax = zmax = 1.0
+
+    dcoord = (xmax - xmin)/N
+
+    arr = np.zeros((N,N,N), dtype=np.float64)
+    arr[:,:,:] = 1.e-4
+
+    bbox = np.array([ [xmin, xmax], [ymin, ymax], [zmin, zmax] ])
+
+    # coordinates -- in the notation data[i, j, k]
+    x = (np.arange(N) + 0.5)*dcoord + xmin
+    y = (np.arange(N) + 0.5)*dcoord + ymin
+    z = (np.arange(N) + 0.5)*dcoord + zmin
+
+    x3d, y3d, z3d = np.meshgrid(x, y, z, indexing="ij")
+
+    # sphere at the origin
+    c = np.array( [0.5*(xmin + xmax), 0.5*(ymin + ymax), 0.5*(zmin + zmax) ] )
+    r = np.sqrt((x3d - c[0])**2 + (y3d - c[1])**2 + (z3d - c[2])**2)
+    arr[r < 0.05] = 1.0
+
+    arr[abs(x3d - xmin) < 2*dcoord] = 0.3
+    arr[abs(y3d - ymin) < 2*dcoord] = 0.3
+    arr[abs(z3d - zmin) < 2*dcoord] = 0.3
+
+    # single cube on +x
+    xc = 0.75
+    dx = 0.05
+    idx = np.logical_and(np.logical_and(x3d > xc-dx, x3d < xc+dx),
+                         np.logical_and(np.logical_and(y3d > -dx, y3d < dx),
+                                        np.logical_and(z3d > -dx, z3d < dx)) )
+    arr[idx] = 1.0
+
+    # two cubes on +y
+    dy = 0.05
+    for yc in [0.65, 0.85]:
+        idx = np.logical_and(np.logical_and(y3d > yc-dy, y3d < yc+dy),
+                             np.logical_and(np.logical_and(x3d > -dy, x3d < dy),
+                                            np.logical_and(z3d > -dy, z3d < dy)) )
+        arr[idx] = 0.8
+
+    # three cubes on +z
+    dz = 0.05
+    for zc in [0.5, 0.7, 0.9]:
+        idx = np.logical_and(np.logical_and(z3d > zc-dz, z3d < zc+dz),
+                             np.logical_and(np.logical_and(x3d > -dz, x3d < dz),
+                                            np.logical_and(y3d > -dz, y3d < dz)) )
+        arr[idx] = 0.6
+
+    data = dict(density = (arr, "g/cm**3"))
+    ds = load_uniform_grid(data, arr.shape, bbox=bbox)
+    return ds
+
 def expand_keywords(keywords, full=False):
+
     """
     expand_keywords is a means for testing all possible keyword
     arguments in the nosetests.  Simply pass it a dictionary of all the

diff -r 86db031f1f56a6f396d4440188707f1fdb8e6bfa -r b6dad5b69998cff732a5c3ea7847a4b01092ffc8 yt/visualization/volume_rendering/tests/test_vr_orientation.py
--- a/yt/visualization/volume_rendering/tests/test_vr_orientation.py
+++ b/yt/visualization/volume_rendering/tests/test_vr_orientation.py
@@ -12,8 +12,7 @@
 
 
 import numpy as np
-
-from yt import load_uniform_grid
+from yt import testing
 from yt.utilities.answer_testing.framework import \
     requires_answer_testing, \
     VRImageComparisonTest, \
@@ -25,75 +24,9 @@
     off_axis_projection
 
 
-def setup_ds():
-
-    N = 96
-
-    xmin = ymin = zmin = -1.0
-    xmax = ymax = zmax = 1.0
-
-    dcoord = (xmax - xmin)/N
-
-    arr = np.zeros((N, N, N), dtype=np.float64)
-    arr[:, :, :] = 1.e-4
-
-    bbox = np.array([[xmin, xmax], [ymin, ymax], [zmin, zmax]])
-
-    # coordinates -- in the notation data[i, j, k]
-    x = (np.arange(N) + 0.5)*(xmax - xmin)/N + xmin
-    y = (np.arange(N) + 0.5)*(ymax - ymin)/N + ymin
-    z = (np.arange(N) + 0.5)*(zmax - zmin)/N + zmin
-
-    x3d, y3d, z3d = np.meshgrid(x, y, z, indexing="ij")
-
-    # sphere at the origin
-    c = np.array([0.5*(xmin + xmax), 0.5*(ymin + ymax), 0.5*(zmin + zmax)])
-
-    r = np.sqrt((x3d - c[0])**2 + (y3d - c[1])**2 + (z3d - c[2])**2)
-    arr[r < 0.05] = 1.0
-
-    arr[abs(x3d - xmin) < 2*dcoord] = 0.3
-    arr[abs(y3d - ymin) < 2*dcoord] = 0.3
-    arr[abs(z3d - zmin) < 2*dcoord] = 0.3
-
-    # single cube on +x
-    xc = 0.75
-    dx = 0.05
-    idx = np.logical_and(np.logical_and(x3d > xc-dx, x3d < xc+dx),
-                         np.logical_and(np.logical_and(y3d > -dx, y3d < dx),
-                                        np.logical_and(z3d > -dx, z3d < dx)))
-
-    arr[idx] = 1.0
-
-    # two cubes on +y
-    dy = 0.05
-    for yc in [0.65, 0.85]:
-
-        idx = np.logical_and(np.logical_and(y3d > yc-dy, y3d < yc+dy),
-                             np.logical_and(np.logical_and(x3d > -dy, x3d < dy),
-                                            np.logical_and(z3d > -dy, z3d < dy)))
-
-        arr[idx] = 0.8
-
-    # three cubes on +z
-    dz = 0.05
-    for zc in [0.5, 0.7, 0.9]:
-
-        idx = np.logical_and(np.logical_and(z3d > zc-dz, z3d < zc+dz),
-                             np.logical_and(np.logical_and(x3d > -dz, x3d < dz),
-                                            np.logical_and(y3d > -dz, y3d < dz)))
-
-        arr[idx] = 0.6
-
-    data = dict(density=(arr, "g/cm**3"))
-    ds = load_uniform_grid(data, arr.shape, bbox=bbox)
-
-    return ds
-
-
 @requires_answer_testing()
 def test_orientation():
-    ds = setup_ds()
+    ds = testing.fake_vr_orientation_test_ds()
 
     sc = Scene()
 
@@ -113,7 +46,7 @@
     for lens_type in ['plane-parallel', 'perspective']:
         frame = 0
 
-        cam = sc.add_camera(ds, lens_type='plane-parallel')
+        cam = sc.add_camera(ds, lens_type=lens_type)
         cam.resolution = (1000, 1000)
         cam.position = ds.arr(np.array([-4., 0., 0.]), 'code_length')
         cam.switch_orientation(normal_vector=[1., 0., 0.],

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.spacepope.org/pipermail/yt-svn-spacepope.org/attachments/20160329/53c4c003/attachment-0001.htm>


More information about the yt-svn mailing list