[yt-svn] commit/yt: 17 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Wed Mar 16 09:39:43 PDT 2016


17 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/9fbe63990d78/
Changeset:   9fbe63990d78
Branch:      yt
User:        atmyers
Date:        2016-02-26 18:59:29+00:00
Summary:     change the north vector in this example script to match the others.
Affected #:  1 file

diff -r 7130b7cef71f9422cc6191b755e1bd5ca96fbaa0 -r 9fbe63990d78ace6d67a198d82eea132ae3ce49e doc/source/visualizing/unstructured_mesh_rendering.rst
--- a/doc/source/visualizing/unstructured_mesh_rendering.rst
+++ b/doc/source/visualizing/unstructured_mesh_rendering.rst
@@ -273,7 +273,7 @@
     # adjust the camera position and orientation
     cam = sc.camera
     camera_position = ds.arr([-1.0, 1.0, -0.5], 'code_length')
-    north_vector = ds.arr([0.0, 1.0, 1.0], 'dimensionless')
+    north_vector = ds.arr([0.0, -1.0, -1.0], 'dimensionless')
     cam.width = ds.arr([0.05, 0.05, 0.05], 'code_length')
     cam.set_position(camera_position, north_vector)
     


https://bitbucket.org/yt_analysis/yt/commits/355a5058c8ea/
Changeset:   355a5058c8ea
Branch:      yt
User:        atmyers
Date:        2016-02-27 03:16:38+00:00
Summary:     by default, cameras should make square images
Affected #:  1 file

diff -r 9fbe63990d78ace6d67a198d82eea132ae3ce49e -r 355a5058c8ea88ba059342fc7db34ba1ac5a736c yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -98,7 +98,8 @@
             data_source = data_source_or_all(data_source)
             self._focus = data_source.ds.domain_center
             self._position = data_source.ds.domain_right_edge
-            self._width = 1.5*data_source.ds.domain_width
+            self._width = data_source.ds.arr(
+                [1.5*data_source.ds.domain_width.max()]*3)
             self._domain_center = data_source.ds.domain_center
             self._domain_width = data_source.ds.domain_width
         if auto:


https://bitbucket.org/yt_analysis/yt/commits/7f89b97cc103/
Changeset:   7f89b97cc103
Branch:      yt
User:        atmyers
Date:        2016-02-27 03:17:14+00:00
Summary:     pad the domain boundaries for Exodus Datasets
Affected #:  1 file

diff -r 355a5058c8ea88ba059342fc7db34ba1ac5a736c -r 7f89b97cc10351f96f047e6733da0915c7dc9243 yt/frontends/exodus_ii/data_structures.py
--- a/yt/frontends/exodus_ii/data_structures.py
+++ b/yt/frontends/exodus_ii/data_structures.py
@@ -354,6 +354,11 @@
             displaced_coords = self._apply_displacement(coords, mesh_id)
             mi = np.minimum(displaced_coords.min(axis=0), mi)
             ma = np.maximum(displaced_coords.max(axis=0), ma)
+
+        # pad domain boundaries
+        width = ma - mi
+        mi -= 0.1 * width
+        ma += 0.1 * width
         return mi, ma
 
     @classmethod


https://bitbucket.org/yt_analysis/yt/commits/ff91721d5e4a/
Changeset:   ff91721d5e4a
Branch:      yt
User:        atmyers
Date:        2016-02-27 03:56:30+00:00
Summary:     adding some mesh rendering answer tests
Affected #:  1 file

diff -r 7f89b97cc10351f96f047e6733da0915c7dc9243 -r ff91721d5e4a5ad38601ae1e3f686ee11a4481c2 yt/visualization/volume_rendering/tests/test_mesh_render.py
--- a/yt/visualization/volume_rendering/tests/test_mesh_render.py
+++ b/yt/visualization/volume_rendering/tests/test_mesh_render.py
@@ -11,11 +11,18 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-from yt.testing import fake_tetrahedral_ds
-from yt.testing import fake_hexahedral_ds
-from yt.testing import requires_module
-from yt.visualization.volume_rendering.render_source import MeshSource
-from yt.visualization.volume_rendering.camera import Camera
+from yt.testing import \
+    fake_tetrahedral_ds, \
+    fake_hexahedral_ds, \
+    requires_module
+from yt.utilities.answer_testing.framework import \
+    requires_ds, \
+    data_dir_load, \
+    GenericImageTest
+from yt.visualization.volume_rendering.api import \
+    MeshSource, \
+    Camera, \
+    create_scene
 
 
 @requires_module("pyembree")
@@ -38,3 +45,60 @@
         images.append(im)
 
     return images
+
+
+hex8 = "MOOSE_sample_data/out.e-s010"
+hex8_fields = [('connect1', 'diffused'), ('connect2', 'convected')]
+
+ at requires_ds(hex8)
+ at requires_module("pyembree")
+def test_hex8_render():
+    for field in hex8_fields:
+        ds = data_dir_load(hex8, kwargs={'step':-1})
+        sc = create_scene(ds, field)
+        im = sc.render()
+
+        def mesh_render_image_func(filename_prefix):
+            return im.write_image(filename_prefix)
+
+        test = GenericImageTest(ds, mesh_render_image_func, 12)
+        test.prefix = "render_answers_hex8_%s_%s" % field
+        yield test
+
+
+tet4 = "MOOSE_sample_data/high_order_elems_tet4_refine_out.e"
+tet4_fields = [("connect1", "u")]
+
+ at requires_ds(tet4)
+ at requires_module("pyembree")
+def test_tet4_render():
+    for field in tet4_fields:
+        ds = data_dir_load(tet4, kwargs={'step':-1})
+        sc = create_scene(ds, field)
+        im = sc.render()
+
+        def mesh_render_image_func(filename_prefix):
+            return im.write_image(filename_prefix)
+
+        test = GenericImageTest(ds, mesh_render_image_func, 12)
+        test.prefix = "render_answers_tet4_%s_%s" % field
+        yield test
+
+
+hex20 = "MOOSE_sample_data/mps_out.e"
+hex20_fields = [('connect2', 'temp')]
+
+ at requires_ds(hex20)
+ at requires_module("pyembree")
+def test_hex20_render():
+    for field in hex20_fields:
+        ds = data_dir_load(hex20, kwargs={'step':-1})
+        sc = create_scene(ds, field)
+        im = sc.render()
+
+        def mesh_render_image_func(filename_prefix):
+            return im.write_image(filename_prefix)
+
+        test = GenericImageTest(ds, mesh_render_image_func, 12)
+        test.prefix = "render_answers_hex20_%s_%s" % field
+        yield test


https://bitbucket.org/yt_analysis/yt/commits/751cfc0945e9/
Changeset:   751cfc0945e9
Branch:      yt
User:        atmyers
Date:        2016-02-27 07:45:09+00:00
Summary:     adding more mesh rendering answer tests.
Affected #:  1 file

diff -r ff91721d5e4a5ad38601ae1e3f686ee11a4481c2 -r 751cfc0945e9f09757706905ef91653537882919 yt/visualization/volume_rendering/tests/test_mesh_render.py
--- a/yt/visualization/volume_rendering/tests/test_mesh_render.py
+++ b/yt/visualization/volume_rendering/tests/test_mesh_render.py
@@ -22,6 +22,7 @@
 from yt.visualization.volume_rendering.api import \
     MeshSource, \
     Camera, \
+    Scene, \
     create_scene
 
 
@@ -47,6 +48,14 @@
     return images
 
 
+def compare(ds, im, test_prefix, decimals=12):
+    def mesh_render_image_func(filename_prefix):
+        return im.write_image(filename_prefix)
+
+    test = GenericImageTest(ds, mesh_render_image_func, decimals)
+    test.prefix = test_prefix
+    return test
+
 hex8 = "MOOSE_sample_data/out.e-s010"
 hex8_fields = [('connect1', 'diffused'), ('connect2', 'convected')]
 
@@ -57,13 +66,7 @@
         ds = data_dir_load(hex8, kwargs={'step':-1})
         sc = create_scene(ds, field)
         im = sc.render()
-
-        def mesh_render_image_func(filename_prefix):
-            return im.write_image(filename_prefix)
-
-        test = GenericImageTest(ds, mesh_render_image_func, 12)
-        test.prefix = "render_answers_hex8_%s_%s" % field
-        yield test
+        yield compare(ds, im, "render_answers_hex8_%s_%s" % field)
 
 
 tet4 = "MOOSE_sample_data/high_order_elems_tet4_refine_out.e"
@@ -76,13 +79,7 @@
         ds = data_dir_load(tet4, kwargs={'step':-1})
         sc = create_scene(ds, field)
         im = sc.render()
-
-        def mesh_render_image_func(filename_prefix):
-            return im.write_image(filename_prefix)
-
-        test = GenericImageTest(ds, mesh_render_image_func, 12)
-        test.prefix = "render_answers_tet4_%s_%s" % field
-        yield test
+        yield compare(ds, im, "render_answers_tet4_%s_%s" % field)
 
 
 hex20 = "MOOSE_sample_data/mps_out.e"
@@ -95,10 +92,57 @@
         ds = data_dir_load(hex20, kwargs={'step':-1})
         sc = create_scene(ds, field)
         im = sc.render()
+        yield compare(ds, im, "render_answers_hex20_%s_%s" % field)
 
-        def mesh_render_image_func(filename_prefix):
-            return im.write_image(filename_prefix)
 
-        test = GenericImageTest(ds, mesh_render_image_func, 12)
-        test.prefix = "render_answers_hex20_%s_%s" % field
-        yield test
+wedge6 = "MOOSE_sample_data/wedge_out.e"
+wedge6_fields = [('connect1', 'diffused')]
+
+ at requires_ds(wedge6)
+ at requires_module("pyembree")
+def test_wedge6_render():
+    for field in wedge6_fields:
+        ds = data_dir_load(wedge6, kwargs={'step':-1})
+        sc = create_scene(ds, field)
+        im = sc.render()
+        yield compare(ds, im, "render_answers_wedge6_%s_%s" % field)
+
+
+ at requires_ds(hex8)
+ at requires_module("pyembree")
+def test_perspective_mesh_render():
+    ds = data_dir_load(hex8)
+    sc = create_scene(ds, ("connect2", "diffused"))
+
+    cam = Camera(ds, lens_type='perspective')
+    cam.focus = ds.arr([0.0, 0.0, 0.0], 'code_length')
+    cam_pos = ds.arr([-4.5, 4.5, -4.5], 'code_length')
+    north_vector = ds.arr([0.0, -1.0, -1.0], 'dimensionless')
+    cam.set_position(cam_pos, north_vector)
+    sc.camera = cam
+    cam.resolution = (800, 800)
+    im = sc.render()
+    yield compare(ds, im, "perspective_mesh_render")
+
+
+ at requires_ds(hex8)
+ at requires_module("pyembree")
+def test_composite_mesh_render():
+    ds = data_dir_load(hex8)
+    sc = Scene()
+    cam = Camera(ds)
+    cam.focus = ds.arr([0.0, 0.0, 0.0], 'code_length')
+    cam.set_position(ds.arr([-3.0, 3.0, -3.0], 'code_length'),
+                     ds.arr([0.0, -1.0, 0.0], 'dimensionless'))
+    cam.set_width = ds.arr([8.0, 8.0, 8.0], 'code_length')
+    cam.resolution = (800, 800)
+    sc.camera = cam
+
+    ms1 = MeshSource(ds, ('connect1', 'diffused'))
+    ms2 = MeshSource(ds, ('connect2', 'diffused'))
+
+    sc.add_source(ms1)
+    sc.add_source(ms2)
+
+    im = sc.render()
+    yield compare(ds, im, "composite_mesh_render")


https://bitbucket.org/yt_analysis/yt/commits/57e62378bcf3/
Changeset:   57e62378bcf3
Branch:      yt
User:        atmyers
Date:        2016-02-27 19:46:11+00:00
Summary:     adding a wedge element rendering example to the docs
Affected #:  1 file

diff -r 751cfc0945e9f09757706905ef91653537882919 -r 57e62378bcf352bd8962b76b18388169d8255312 doc/source/visualizing/unstructured_mesh_rendering.rst
--- a/doc/source/visualizing/unstructured_mesh_rendering.rst
+++ b/doc/source/visualizing/unstructured_mesh_rendering.rst
@@ -214,6 +214,29 @@
     # render and save
     sc.save()
 
+Here is an example using 6-node wedge elements:
+
+.. python-script::
+
+   import yt
+
+   ds = yt.load("MOOSE_sample_data/wedge_out.e")
+
+   # create a default scene
+   sc = yt.create_scene(ds, ('connect2', 'diffused'))
+
+   # override the default colormap
+   ms = sc.get_source(0)
+   ms.cmap = 'Eos A'
+
+   # adjust the camera position and orientation
+   cam = sc.camera
+   cam.set_position(ds.arr([1.0, -1.0, 1.0], 'code_length'))
+   cam.width = ds.arr([1.5, 1.5, 1.5], 'code_length')
+
+   # render and save
+   sc.save()
+
 Another example, this time plotting the temperature field from a 20-node hex 
 MOOSE dataset:
 


https://bitbucket.org/yt_analysis/yt/commits/5bcf6c5645d1/
Changeset:   5bcf6c5645d1
Branch:      yt
User:        atmyers
Date:        2016-02-27 19:50:15+00:00
Summary:     adding a wedge element slice example to the docs
Affected #:  1 file

diff -r 57e62378bcf352bd8962b76b18388169d8255312 -r 5bcf6c5645d15a0c0fc4a5c404c6c15dbe76c02f doc/source/visualizing/plots.rst
--- a/doc/source/visualizing/plots.rst
+++ b/doc/source/visualizing/plots.rst
@@ -415,9 +415,19 @@
 determined by the ``thresh`` parameter, which can be varied to make the lines thicker or
 thinner.
 
+The above example all involve 8-node hexahedral mesh elements. Here is another example from
+a dataset that uses 6-node wedge elements:
+
+.. python-script::
+   
+   import yt
+   ds = yt.load("MOOSE_sample_data/wedge_out.e")
+   sl = yt.SlicePlot(ds, 2, ('connect2', 'diffused'))
+   sl.save()
+
 Finally, slices can also be used to examine 2D unstructured mesh datasets, but the
 slices must be taken to be normal to the ``'z'`` axis, or you'll get an error. Here is
-an example using another MOOSE dataset:
+an example using another MOOSE dataset that uses triangular mesh elements:
 
 .. python-script::
 


https://bitbucket.org/yt_analysis/yt/commits/a3d319a05e3a/
Changeset:   a3d319a05e3a
Branch:      yt
User:        atmyers
Date:        2016-02-27 20:36:16+00:00
Summary:     fixing the position attribute setter for Camera.
Affected #:  1 file

diff -r 5bcf6c5645d15a0c0fc4a5c404c6c15dbe76c02f -r a3d319a05e3a56544a269d2349af4cd046d0cf73 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -125,6 +125,7 @@
             if isinstance(value, YTArray):
                 value = value.in_units("code_length")
             self._position = value
+            self.normal_vector = self.focus - self._position
             self.switch_orientation()
 
         def fdel(self):


https://bitbucket.org/yt_analysis/yt/commits/cf054e982488/
Changeset:   cf054e982488
Branch:      yt
User:        atmyers
Date:        2016-03-09 20:34:01+00:00
Summary:     merging
Affected #:  136 files

diff -r a3d319a05e3a56544a269d2349af4cd046d0cf73 -r cf054e98248827ec731b7ccce97f9c0cb13f28dd CONTRIBUTING.rst
--- a/CONTRIBUTING.rst
+++ b/CONTRIBUTING.rst
@@ -843,7 +843,7 @@
    be avoided, they must be explained, even if they are only to be passed on to
    a nested function.
 
-.. _docstrings
+.. _docstrings:
 
 Docstrings
 ----------

diff -r a3d319a05e3a56544a269d2349af4cd046d0cf73 -r cf054e98248827ec731b7ccce97f9c0cb13f28dd doc/source/analyzing/fields.rst
--- a/doc/source/analyzing/fields.rst
+++ b/doc/source/analyzing/fields.rst
@@ -94,16 +94,16 @@
 
 There is a third, borderline class of field in yt, as well.  This is the
 "alias" type, where a field on disk (for example, (frontend, ``Density``)) is 
-aliased into an internal yt-name (for example, (``gas``, ``density``)).  The 
+aliased into an internal yt-name (for example, (``gas``, ``density``)). The 
 aliasing process allows universally-defined derived fields to take advantage of 
 internal names, and it also provides an easy way to address what units something 
 should be returned in.  If an aliased field is requested (and aliased fields 
 will always be lowercase, with underscores separating words) it will be returned 
-in CGS units (future versions will enable global defaults to be set for MKS and 
-other unit systems), whereas if the frontend-specific field is requested, it 
-will not undergo any unit conversions from its natural units.  (This rule is 
-occasionally violated for fields which are mesh-dependent, specifically particle 
-masses in some cosmology codes.)
+in the units specified by the unit system of the database (see :ref:`unit_systems`
+for a guide to using the different unit systems in yt), whereas if the 
+frontend-specific field is requested, it will not undergo any unit conversions 
+from its natural units.  (This rule is occasionally violated for fields which 
+are mesh-dependent, specifically particle masses in some cosmology codes.)
 
 .. _known-field-types:
 
@@ -125,7 +125,8 @@
 * ``gas`` -- This is the usual default for simulation frontends for fluid
   types.  These fields are typically aliased to the frontend-specific mesh
   fields for grid-based codes or to the deposit fields for particle-based
-  codes.  Default units are in CGS.
+  codes.  Default units are in the unit system of the dataset (see 
+  :ref:`unit_systems` for more information).
 * particle type -- These are particle fields that exist on-disk as written 
   by individual frontends.  If the frontend designates names for these particles
   (i.e. particle type) those names are the field types. 
@@ -240,6 +241,37 @@
    print(ds.field_info["gas", "pressure"].get_units())
    print(ds.field_info["gas", "pressure"].get_source())
 
+.. _bfields:
+
+Magnetic Fields
+---------------
+
+Magnetic fields require special handling, because their dimensions are different in
+different systems of units, in particular between the CGS and MKS (SI) systems of units.
+Superficially, it would appear that they are in the same dimensions, since the units 
+of the magnetic field in the CGS and MKS system are gauss (:math:`\rm{G}`) and tesla 
+(:math:`\rm{T}`), respectively, and numerically :math:`1~\rm{G} = 10^{-4}~\rm{T}`. However, 
+if we examine the base units, we find that they do indeed have different dimensions:
+
+.. math::
+
+    \rm{1~G = 1~\frac{\sqrt{g}}{\sqrt{cm}\cdot{s}}} \\
+    \rm{1~T = 1~\frac{kg}{A\cdot{s^2}}}
+
+It is easier to see the difference between the dimensionality of the magnetic field in the two
+systems in terms of the definition of the magnetic pressure:
+
+.. math::
+
+    p_B = \frac{B^2}{8\pi}~\rm{(cgs)} \\
+    p_B = \frac{B^2}{2\mu_0}~\rm{(MKS)}
+
+where :math:`\mu_0 = 4\pi \times 10^{-7}~\rm{N/A^2}` is the vacuum permeability. yt automatically
+detects on a per-frontend basis what units the magnetic should be in, and allows conversion between 
+different magnetic field units in the different :ref:`unit systems <unit_systems>` as well. To 
+determine how to set up special magnetic field handling when designing a new frontend, check out 
+:ref:`bfields-frontend`.
+
 Particle Fields
 ---------------
 

diff -r a3d319a05e3a56544a269d2349af4cd046d0cf73 -r cf054e98248827ec731b7ccce97f9c0cb13f28dd doc/source/analyzing/objects.rst
--- a/doc/source/analyzing/objects.rst
+++ b/doc/source/analyzing/objects.rst
@@ -246,6 +246,8 @@
     | A plane normal to a specified vector and intersecting a particular 
       coordinate.
 
+.. _region-reference:
+
 3D Objects
 """"""""""
 
@@ -256,8 +258,6 @@
       creating a Region covering the entire dataset domain.  It is effectively 
       ``ds.region(ds.domain_center, ds.domain_left_edge, ds.domain_right_edge)``.
 
-.. _region-reference:
-
 **Box Region** 
     | Class :class:`~yt.data_objects.selection_data_containers.YTRegion`
     | Usage: ``region(center, left_edge, right_edge, fields=None, ds=None, field_parameters=None, data_source=None)``

diff -r a3d319a05e3a56544a269d2349af4cd046d0cf73 -r cf054e98248827ec731b7ccce97f9c0cb13f28dd doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
--- a/doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
+++ b/doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
@@ -24,9 +24,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "import yt\n",
@@ -41,9 +39,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "print (maxval)"
@@ -52,9 +48,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "print (dens)"
@@ -63,9 +57,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "mass = dd['cell_mass']\n",
@@ -79,9 +71,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "dx = dd['dx']\n",
@@ -107,9 +97,11 @@
     "* `in_units`\n",
     "* `in_cgs`\n",
     "* `in_mks`\n",
+    "* `in_base`\n",
     "* `convert_to_units`\n",
     "* `convert_to_cgs`\n",
-    "* `convert_to_mks`"
+    "* `convert_to_mks`\n",
+    "* `convert_to_base`"
    ]
   },
   {
@@ -122,9 +114,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "print (dd['density'].in_units('Msun/pc**3'))"
@@ -134,35 +124,73 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "`in_cgs` and `in_mks` return a copy of the array converted CGS and MKS units, respectively:"
+    "`in_cgs` and `in_mks` return a copy of the array converted to CGS and MKS units, respectively:"
    ]
   },
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "print (dd['pressure'])\n",
-    "print ((dd['pressure']).in_cgs())\n",
-    "print ((dd['pressure']).in_mks())"
+    "print (dd['pressure'].in_cgs())\n",
+    "print (dd['pressure'].in_mks())"
    ]
   },
   {
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "The next two methods do in-place conversions:"
+    "`in_cgs` and `in_mks` are just special cases of the more general `in_base`, which can convert a `YTArray` to a number of different unit systems:"
    ]
   },
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "print (dd['pressure'].in_base('imperial')) # Imperial/English base units\n",
+    "print (dd['pressure'].in_base('galactic')) # Base units of kpc, Msun, Myr\n",
+    "print (dd['pressure'].in_base('planck')) # Base units in the Planck system\n",
+    "print (dd['pressure'].in_base()) # defaults to cgs if no argument given"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "`in_base` can even take a dataset as the argument to convert the `YTArray` into the base units of the dataset:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "print (dd['pressure'].in_base(ds)) # The IsolatedGalaxy dataset from above"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "yt defines a number of unit systems, and new unit systems may be added by the user, which can also be passed to `in_base`. To learn more about the unit systems, how to use them with datasets and other objects, and how to add new ones, see [Unit Systems](unit_systems.html)."
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "The rest of the methods do in-place conversions:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
    "outputs": [],
    "source": [
     "dens = dd['density']\n",
@@ -182,9 +210,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "print (dd['density'])\n",
@@ -206,9 +232,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "print (dd['cell_mass'])\n",
@@ -234,9 +258,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "q1 = yt.YTArray(1.0,\"C\") # coulombs\n",
@@ -249,9 +271,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "B1 = yt.YTArray(1.0,\"T\") # tesla\n",
@@ -285,9 +305,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "import numpy as np\n",
@@ -317,9 +335,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "print (dd['cell_mass'].ndarray_view())\n",
@@ -338,9 +354,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "density_values = dd['density'].d\n",
@@ -374,23 +388,19 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "from astropy import units as u\n",
     "\n",
     "x = 42.0 * u.meter\n",
-    "y = yt.YTQuantity.from_astropy(x) "
+    "y = yt.YTQuantity.from_astropy(x)"
    ]
   },
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "print (x, type(x))\n",
@@ -400,9 +410,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "a = np.random.random(size=10) * u.km/u.s\n",
@@ -412,9 +420,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "print (a, type(a))\n",
@@ -431,9 +437,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "temp = dd[\"temperature\"]\n",
@@ -443,9 +447,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "print (temp, type(temp))\n",
@@ -462,9 +464,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "from yt.utilities.physical_constants import kboltz\n",
@@ -474,9 +474,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "print (kboltz, type(kboltz))\n",
@@ -493,9 +491,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "k1 = kboltz.to_astropy()\n",
@@ -506,9 +502,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "c = yt.YTArray.from_astropy(a)\n",
@@ -526,9 +520,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "from pint import UnitRegistry\n",
@@ -540,9 +532,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "print (v, type(v))\n",
@@ -552,9 +542,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "ptemp = temp.to_pint()"
@@ -563,9 +551,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "print (temp, type(temp))\n",
@@ -582,7 +568,7 @@
   "language_info": {
    "codemirror_mode": {
     "name": "ipython",
-    "version": 3
+    "version": 3.0
    },
    "file_extension": ".py",
    "mimetype": "text/x-python",
@@ -594,4 +580,4 @@
  },
  "nbformat": 4,
  "nbformat_minor": 0
-}
+}
\ No newline at end of file

diff -r a3d319a05e3a56544a269d2349af4cd046d0cf73 -r cf054e98248827ec731b7ccce97f9c0cb13f28dd doc/source/analyzing/units/4)_Comparing_units_from_different_datasets.ipynb
--- a/doc/source/analyzing/units/4)_Comparing_units_from_different_datasets.ipynb
+++ b/doc/source/analyzing/units/4)_Comparing_units_from_different_datasets.ipynb
@@ -4,7 +4,7 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "Units that refer to the internal simulation coordinate system will have different CGS conversion factors in different datasets.  Depending on how a unit system is implemented, this could add an element of uncertainty when we compare dimensional arrays instances produced by different unit systems.  Fortunately, this is not a problem for `YTArray` since all `YTArray` unit systems are defined in terms of physical CGS units.\n",
+    "Units that refer to the internal simulation coordinate system will have different CGS conversion factors in different datasets.  Depending on how a unit system is implemented, this could add an element of uncertainty when we compare dimensional array instances produced by different unit systems.  Fortunately, this is not a problem for `YTArray` since all `YTArray` unit systems are defined in terms of physical CGS units.\n",
     "\n",
     "As an example, let's load up two enzo datasets from different redshifts in the same cosmology simulation."
    ]
@@ -12,9 +12,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "# A high redshift output from z ~ 8\n",
@@ -29,9 +27,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "# A low redshift output from z ~ 0\n",
@@ -51,9 +47,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "print (ds2.length_unit.in_cgs()/ds1.length_unit.in_cgs() == (1+ds1.current_redshift)/(1+ds2.current_redshift))"
@@ -69,9 +63,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "print (ds2.length_unit/ds1.length_unit)"
@@ -89,9 +81,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "import yt\n",
@@ -120,7 +110,7 @@
   "language_info": {
    "codemirror_mode": {
     "name": "ipython",
-    "version": 3
+    "version": 3.0
    },
    "file_extension": ".py",
    "mimetype": "text/x-python",
@@ -132,4 +122,4 @@
  },
  "nbformat": 4,
  "nbformat_minor": 0
-}
+}
\ No newline at end of file

diff -r a3d319a05e3a56544a269d2349af4cd046d0cf73 -r cf054e98248827ec731b7ccce97f9c0cb13f28dd doc/source/analyzing/units/7)_Unit_Systems.ipynb
--- /dev/null
+++ b/doc/source/analyzing/units/7)_Unit_Systems.ipynb
@@ -0,0 +1,491 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "By default, the results of most calculations in yt are expressed in a \"centimeters-grams-seconds\" (CGS) set of units. This includes the values of derived fields and aliased fields.\n",
+    "\n",
+    "However, this system of units may not be the most natural for a given dataset or an entire class of datasets. For this reason, yt provides the ability to define new unit systems and use them in a way that is highly configurable by the end-user. "
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### Unit Systems Available in yt"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Several unit systems are already supplied for use within yt. They are:\n",
+    "\n",
+    "* `\"cgs\"`: Centimeters-grams-seconds unit system, with base of `(cm, g, s, K, radian)`. Uses the Gaussian normalization for electromagnetic units. \n",
+    "* `\"mks\"`: Meters-kilograms-seconds unit system, with base of `(m, kg, s, K, radian, A)`.\n",
+    "* `\"imperial\"`: Imperial unit system, with base of `(mile, lbm, s, R, radian)`.\n",
+    "* `\"galactic\"`: \"Galactic\" unit system, with base of `(kpc, Msun, Myr, K, radian)`.\n",
+    "* `\"solar\"`: \"Solar\" unit system, with base of `(AU, Mearth, yr, K, radian)`. \n",
+    "* `\"planck\"`: Planck natural units $(\\hbar = c = G = k_B = 1)$, with base of `(l_pl, m_pl, t_pl, T_pl, radian)`. \n",
+    "* `\"geometrized\"`: Geometrized natural units $(c = G = 1)$, with base of `(l_geom, m_geom, t_geom, K, radian)`. \n",
+    "\n",
+    "We can examine these unit systems by querying them from the `unit_system_registry`. For example, we can look at the default CGS system:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "import yt\n",
+    "yt.unit_system_registry[\"cgs\"]"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "We can see that we have two sets of units that this system defines: \"base\" and \"other\" units. The \"base\" units are the set of units from which all other units in the system are composed of, such as centimeters, grams, and seconds. The \"other\" units are compound units which fields with specific dimensionalities are converted to, such as ergs, dynes, gauss, and electrostatic units (esu). \n",
+    "\n",
+    "We see a similar setup for the MKS system, except that in this case, there is a base unit of current, the Ampere:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "yt.unit_system_registry[\"mks\"]"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "We can also look at the imperial system:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "yt.unit_system_registry[\"imperial\"]"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "and the \"galactic\" system as well:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "yt.unit_system_registry[\"galactic\"]"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### Converting `YTArrays` to the Different Unit Systems"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### Choosing a Unit System When Loading a Dataset"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "When a dataset is `load`ed, a unit system may be specified. When this happens, all aliased and derived fields will be converted to the units of the given system. The default is `\"cgs\"`.\n",
+    "\n",
+    "For example, we can specify that the fields from a FLASH dataset can be expressed in MKS units:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "ds_flash = yt.load(\"GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0100\", unit_system=\"mks\")\n",
+    "sp = ds_flash.sphere(\"c\", (100.,\"kpc\"))\n",
+    "print (sp[\"density\"]) # This is an alias for (\"flash\",\"dens\")\n",
+    "print (sp[\"pressure\"]) # This is an alias for (\"flash\",\"pres\")\n",
+    "print (sp[\"angular_momentum_x\"]) # This is a derived field\n",
+    "print (sp[\"kinetic_energy\"]) # This is also a derived field"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Aliased fields are converted to the requested unit system, but the on-disk fields that they correspond to remain in their original (code) units:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "print (sp[\"flash\",\"dens\"]) # This is aliased to (\"gas\", \"density\")\n",
+    "print (sp[\"flash\",\"pres\"]) # This is aliased to (\"gas\", \"pressure\")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "We can take an `Enzo` dataset and express it in `\"galactic\"` units:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "ds_enzo = yt.load(\"IsolatedGalaxy/galaxy0030/galaxy0030\", unit_system=\"galactic\")\n",
+    "sp = ds_enzo.sphere(\"c\", (20.,\"kpc\"))\n",
+    "print (sp[\"density\"])\n",
+    "print (sp[\"pressure\"])"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "We can also express all of the fields associated with a dataset in that dataset's system of \"code\" units. Though the on-disk fields are already in these units, this means that we can express even derived fields in code units as well:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "ds_chombo = yt.load(\"KelvinHelmholtz/data.0004.hdf5\", unit_system=\"code\")\n",
+    "dd = ds_chombo.all_data()\n",
+    "print (dd[\"density\"])\n",
+    "print (dd[\"kinetic_energy\"])"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### Defining Fields So That They Can Use the Different Unit Systems"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "If you define a new derived field for use in yt and wish to make the different unit systems available to it, you will need to specify this when calling `add_field`. Suppose I defined a new field called `\"momentum_x\"` and wanted it to have general units. I would have to set it up in this fashion, using the `unit_system` attribute of the dataset and querying it for the appropriate dimensions:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "mom_units = ds_flash.unit_system[\"velocity\"]*ds_flash.unit_system[\"density\"]\n",
+    "def _momentum_x(field, data):\n",
+    "    return data[\"density\"]*data[\"velocity_x\"]\n",
+    "ds_flash.add_field((\"gas\",\"momentum_x\"), function=_momentum_x, units=mom_units)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Now, the field will automatically be expressed in whatever units the dataset was called with. In this case, it was MKS:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "slc = yt.SlicePlot(ds_flash, \"z\", [\"momentum_x\"], width=(300.,\"kpc\"))\n",
+    "slc.show()"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Note that the momentum density has been plotted with the correct MKS units of $\\mathrm{kg/(m^2\\cdot{s})}$."
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "If you don't create a derived field from a dataset but instead use `yt.add_field`, and still want to use the unit system of that dataset for the units, the only option at present is to set `units=\"auto\"` in the call to `yt.add_field` and the `dimensions` keyword to the correct dimensions for the field:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "from yt.units import clight\n",
+    "\n",
+    "def _rest_energy(field, data):\n",
+    "    return data[\"cell_mass\"]*clight*clight\n",
+    "yt.add_field((\"gas\",\"rest_energy\"), function=_rest_energy, units=\"auto\", dimensions=\"energy\")\n",
+    "\n",
+    "ds_flash2 = yt.load(\"GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0150\", unit_system=\"galactic\")\n",
+    "\n",
+    "sp = ds_flash2.sphere(\"c\", (100.,\"kpc\"))\n",
+    "sp[\"rest_energy\"]"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### Obtaining Physical Constants in a Specific Unit System"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Each unit system provides the ability to obtain any physical constant in yt's physical constants database in the base units of that system via the `constants` attribute of the unit system. For example, to obtain the value of Newton's universal constant of gravitation in different base units:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "for name in [\"cgs\", \"mks\", \"imperial\", \"planck\", \"geometrized\"]:\n",
+    "    unit_system = yt.unit_system_registry[name]\n",
+    "    print (name, unit_system.constants.G)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Equivalently, one could import a physical constant from the main database and convert it using `in_base`:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "from yt.utilities.physical_constants import G\n",
+    "print (G.in_base(\"mks\"))"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### Defining Your Own Unit System"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "You are not limited to using the unit systems already defined by yt. A new unit system can be defined by creating a new `UnitSystem` instance. For example, to create a unit system where the default units are in millimeters, centigrams, and microseconds:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "small_unit_system = yt.UnitSystem(\"small\", \"mm\", \"cg\", \"us\")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "where the required arguments are a `name` for the unit system, and the `length_unit`, `mass_unit`, and `time_unit` for the unit system, which serve as the \"base\" units to convert everything else to. Once a unit system instance is created, it is automatically added to the `unit_system_registry` so that it may be used throughout yt:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "yt.unit_system_registry[\"small\"]"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Note that the base units for the dimensions of angle and temperature have been automatically set to radians and Kelvin, respectively. If desired, these can be specified using optional arguments when creating the `UnitSystem` object:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "wacky_unit_system = yt.UnitSystem(\"wacky\", \"mile\", \"kg\", \"day\", temperature_unit=\"R\", angle_unit=\"deg\")\n",
+    "wacky_unit_system"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Though it will rarely be necessary, an MKS-style system of units where a unit of current can be specified as a base unit can also be created using the `current_mks` optional argument:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "mksish_unit_system = yt.UnitSystem(\"mksish\", \"dm\", \"ug\", \"ks\", current_mks_unit=\"mA\")\n",
+    "mksish_unit_system"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Initializing a `UnitSystem` object only sets up the base units. In this case, all fields will be converted to combinations of these base units based on their dimensionality. However, you may want to specify that fields of a given dimensionality use a compound unit by default instead. For example, you might prefer that in the `\"small\"` unit system that pressures be represented in microdynes per millimeter squared. To do this, set these to be the units of the `\"pressure\"` dimension explicitly:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "small_unit_system[\"pressure\"] = \"udyne/mm**2\""
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "We can now look at the `small_unit_system` object and see that these units are now defined for pressure in the \"Other Units\" category:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "small_unit_system"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "We can do the same for a few other dimensionalities:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "small_unit_system[\"magnetic_field_cgs\"] = \"mG\"\n",
+    "small_unit_system[\"specific_energy\"] = \"cerg/ug\"\n",
+    "small_unit_system[\"velocity\"] = \"cm/s\"\n",
+    "small_unit_system"
+   ]
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 3",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.5.1"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 0
+}

diff -r a3d319a05e3a56544a269d2349af4cd046d0cf73 -r cf054e98248827ec731b7ccce97f9c0cb13f28dd doc/source/analyzing/units/index.rst
--- a/doc/source/analyzing/units/index.rst
+++ b/doc/source/analyzing/units/index.rst
@@ -34,6 +34,7 @@
    comparing_units_from_different_datasets
    units_and_plotting
    unit_equivalencies
+   unit_systems
 
 .. note::
 

diff -r a3d319a05e3a56544a269d2349af4cd046d0cf73 -r cf054e98248827ec731b7ccce97f9c0cb13f28dd doc/source/analyzing/units/unit_systems.rst
--- /dev/null
+++ b/doc/source/analyzing/units/unit_systems.rst
@@ -0,0 +1,7 @@
+.. _unit_systems:
+
+Unit Systems
+============
+
+.. notebook:: 7)_Unit_Systems.ipynb
+:skip_exceptions:

diff -r a3d319a05e3a56544a269d2349af4cd046d0cf73 -r cf054e98248827ec731b7ccce97f9c0cb13f28dd doc/source/cookbook/camera_movement.py
--- a/doc/source/cookbook/camera_movement.py
+++ b/doc/source/cookbook/camera_movement.py
@@ -1,31 +1,30 @@
 import yt
 import numpy as np
 
-# Follow the simple_volume_rendering cookbook for the first part of this.
-ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")  # load data
+ds = yt.load("MOOSE_sample_data/out.e-s010")
 sc = yt.create_scene(ds)
 cam = sc.camera
-cam.resolution = (512, 512)
-cam.set_width(ds.domain_width/20.0)
 
-# Find the maximum density location, store it in max_c
-v, max_c = ds.find_max('density')
+# save an image at the starting position
+frame = 0
+sc.save('camera_movement_%04i.png' % frame)
+frame += 1
 
-frame = 0
-# Move to the maximum density location over 5 frames
-for _ in cam.iter_move(max_c, 5):
+# Zoom out by a factor of 2 over 5 frames
+for _ in cam.iter_zoom(0.5, 5):
     sc.render()
-    sc.save('camera_movement_%04i.png' % frame, sigma_clip=8.0)
+    sc.save('camera_movement_%04i.png' % frame)
     frame += 1
 
-# Zoom in by a factor of 10 over 5 frames
-for _ in cam.iter_zoom(10.0, 5):
+# Move to the position [-10.0, 10.0, -10.0] over 5 frames
+pos = ds.arr([-10.0, 10.0, -10.0], 'code_length')
+for _ in cam.iter_move(pos, 5):
     sc.render()
-    sc.save('camera_movement_%04i.png' % frame, sigma_clip=8.0)
+    sc.save('camera_movement_%04i.png' % frame)
     frame += 1
 
-# Do a rotation over 5 frames
+# Rotate by 180 degrees over 5 frames
 for _ in cam.iter_rotate(np.pi, 5):
     sc.render()
-    sc.save('camera_movement_%04i.png' % frame, sigma_clip=8.0)
+    sc.save('camera_movement_%04i.png' % frame)
     frame += 1

diff -r a3d319a05e3a56544a269d2349af4cd046d0cf73 -r cf054e98248827ec731b7ccce97f9c0cb13f28dd doc/source/cookbook/complex_plots.rst
--- a/doc/source/cookbook/complex_plots.rst
+++ b/doc/source/cookbook/complex_plots.rst
@@ -195,7 +195,11 @@
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
 In this recipe, we move a camera through a domain and take multiple volume
-rendering snapshots.
+rendering snapshots. This recipe uses an unstructured mesh dataset (see
+:ref:`unstructured_mesh_rendering`), which makes it easier to visualize what 
+the Camera is doing, but you can manipulate the Camera for other dataset types 
+in exactly the same manner.
+
 See :ref:`camera_movement` for more information.
 
 .. yt_cookbook:: camera_movement.py

diff -r a3d319a05e3a56544a269d2349af4cd046d0cf73 -r cf054e98248827ec731b7ccce97f9c0cb13f28dd doc/source/cookbook/various_lens.py
--- a/doc/source/cookbook/various_lens.py
+++ b/doc/source/cookbook/various_lens.py
@@ -1,5 +1,5 @@
 import yt
-from yt.visualization.volume_rendering.api import Scene, Camera, VolumeSource
+from yt.visualization.volume_rendering.api import Scene, VolumeSource
 import numpy as np
 
 field = ("gas", "density")
@@ -19,7 +19,7 @@
 tf.grey_opacity = True
 
 # Plane-parallel lens
-cam = Camera(ds, lens_type='plane-parallel')
+cam = sc.add_camera(ds, lens_type='plane-parallel')
 # Set the resolution of tbe final projection.
 cam.resolution = [250, 250]
 # Set the location of the camera to be (x=0.2, y=0.5, z=0.5)
@@ -32,13 +32,12 @@
 # Set the width of the camera, where width[0] and width[1] specify the length and
 # height of final projection, while width[2] in plane-parallel lens is not used.
 cam.set_width(ds.domain_width * 0.5)
-sc.camera = cam
 sc.add_source(vol)
 sc.render()
 sc.save('lens_plane-parallel.png', sigma_clip=6.0)
 
 # Perspective lens
-cam = Camera(ds, lens_type='perspective')
+cam = sc.add_camera(ds, lens_type='perspective')
 cam.resolution = [250, 250]
 # Standing at (x=0.2, y=0.5, z=0.5), we look at the area of x>0.2 (with some open angle
 # specified by camera width) along the positive x direction.
@@ -49,13 +48,12 @@
 # height of the final projection, while width[2] specifies the distance between the
 # camera and the final image.
 cam.set_width(ds.domain_width * 0.5)
-sc.camera = cam
 sc.add_source(vol)
 sc.render()
 sc.save('lens_perspective.png', sigma_clip=6.0)
 
 # Stereo-perspective lens
-cam = Camera(ds, lens_type='stereo-perspective')
+cam = sc.add_camera(ds, lens_type='stereo-perspective')
 # Set the size ratio of the final projection to be 2:1, since stereo-perspective lens
 # will generate the final image with both left-eye and right-eye ones jointed together.
 cam.resolution = [500, 250]
@@ -65,14 +63,13 @@
 cam.set_width(ds.domain_width*0.5)
 # Set the distance between left-eye and right-eye.
 cam.lens.disparity = ds.domain_width[0] * 1.e-3
-sc.camera = cam
 sc.add_source(vol)
 sc.render()
 sc.save('lens_stereo-perspective.png', sigma_clip=6.0)
 
 # Fisheye lens
 dd = ds.sphere(ds.domain_center, ds.domain_width[0] / 10)
-cam = Camera(dd, lens_type='fisheye')
+cam = sc.add_camera(dd, lens_type='fisheye')
 cam.resolution = [250, 250]
 v, c = ds.find_max(field)
 cam.set_position(c - 0.0005 * ds.domain_width)
@@ -80,13 +77,12 @@
                        north_vector=north_vector)
 cam.set_width(ds.domain_width)
 cam.lens.fov = 360.0
-sc.camera = cam
 sc.add_source(vol)
 sc.render()
 sc.save('lens_fisheye.png', sigma_clip=6.0)
 
 # Spherical lens
-cam = Camera(ds, lens_type='spherical')
+cam = sc.add_camera(ds, lens_type='spherical')
 # Set the size ratio of the final projection to be 2:1, since spherical lens
 # will generate the final image with length of 2*pi and height of pi.
 cam.resolution = [500, 250]
@@ -97,13 +93,12 @@
                        north_vector=north_vector)
 # In (stereo)spherical camera, camera width is not used since the entire volume
 # will be rendered
-sc.camera = cam
 sc.add_source(vol)
 sc.render()
 sc.save('lens_spherical.png', sigma_clip=6.0)
 
 # Stereo-spherical lens
-cam = Camera(ds, lens_type='stereo-spherical')
+cam = sc.add_camera(ds, lens_type='stereo-spherical')
 # Set the size ratio of the final projection to be 4:1, since spherical-perspective lens
 # will generate the final image with both left-eye and right-eye ones jointed together.
 cam.resolution = [1000, 250]
@@ -114,7 +109,6 @@
 # will be rendered
 # Set the distance between left-eye and right-eye.
 cam.lens.disparity = ds.domain_width[0] * 1.e-3
-sc.camera = cam
 sc.add_source(vol)
 sc.render()
 sc.save('lens_stereo-spherical.png', sigma_clip=6.0)

diff -r a3d319a05e3a56544a269d2349af4cd046d0cf73 -r cf054e98248827ec731b7ccce97f9c0cb13f28dd doc/source/developing/creating_derived_fields.rst
--- a/doc/source/developing/creating_derived_fields.rst
+++ b/doc/source/developing/creating_derived_fields.rst
@@ -33,7 +33,10 @@
 In this example, the ``density`` field will return data with units of
 ``g/cm**3`` and the ``thermal_energy`` field will return data units of
 ``erg/g``, so the result will automatically have units of pressure,
-``erg/cm**3``.
+``erg/cm**3``. This assumes the unit system is set to the default, which is
+CGS: if a different unit system is selected, the result will be in the same
+dimensions of pressure but different units. See :ref:`unit_systems` for more
+information.
 
 Once we've defined our function, we need to notify yt that the field is
 available.  The :func:`add_field` function is the means of doing this; it has a
@@ -47,7 +50,7 @@
 
 .. code-block:: python
 
-   yt.add_field("pressure", function=_pressure, units="dyne/cm**2")
+   yt.add_field(("gas", "pressure"), function=_pressure, units="dyne/cm**2")
 
 We feed it the name of the field, the name of the function, and the
 units.  Note that the units parameter is a "raw" string, in the format that yt 
@@ -59,7 +62,7 @@
 as in the ``_pressure`` example above.
 
 Field definitions return array data with units. If the field function returns
-data in a dimensionally equivalent unit (e.g. a ``dyne`` versus a ``N``), the
+data in a dimensionally equivalent unit (e.g. a ``"dyne"`` versus a ``"N"``), the
 field data will be converted to the units specified in ``add_field`` before
 being returned in a data object selection. If the field function returns data
 with dimensions that are incompatibible with units specified in ``add_field``,
@@ -67,7 +70,7 @@
 function returns data in the correct units. Often, this means applying units to
 a dimensionless float or array.
 
-If your field definition influcdes physical constants rather than defining a
+If your field definition includes physical constants rather than defining a
 constant as a float, you can import it from ``yt.utilities.physical_constants``
 to get a predefined version of the constant with the correct units. If you know
 the units your data is supposed to have ahead of time, you can import unit
@@ -82,7 +85,29 @@
 Lastly, if you do not know the units of your field ahead of time, you can
 specify ``units='auto'`` in the call to ``add_field`` for your field.  This will
 automatically determine the appropriate units based on the units of the data
-returned by the field function.
+returned by the field function. This is also a good way to let your derived fields
+be automatically converted to the units of the :ref:`unit system <unit_systems>` in 
+your dataset. 
+
+If ``units='auto'`` is set, it is also required to set the ``dimensions`` keyword
+argument so that error-checking can be done on the derived field to make sure that
+the dimensionality of the returned array and the field are the same:
+
+.. code-block:: python
+
+    import yt
+    from yt.units import dimensions
+    
+    def _pressure(field, data):
+        return (data.ds.gamma - 1.0) * \
+              data["density"] * data["thermal_energy"]
+              
+    yt.add_field(("gas","pressure"), function=_pressure, units="auto",
+                 dimensions=dimensions.pressure)
+
+If ``dimensions`` is not set, an error will be thrown. The ``dimensions`` keyword
+can be a SymPy ``symbol`` object imported from ``yt.units.dimensions``, a compound
+dimension of these, or a string corresponding to one of these objects. 
 
 :func:`add_field` can be invoked in two other ways. The first is by the 
 function decorator :func:`derived_field`. The following code is equivalent to 
@@ -111,10 +136,27 @@
 .. code-block:: python
 
    ds = yt.load("GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0100")
-   ds.add_field("pressure", function=_pressure, units="dyne/cm**2")
+   ds.add_field(("gas", "pressure"), function=_pressure, units="dyne/cm**2")
 
-If you find yourself using the same custom-defined fields over and over, you
-should put them in your plugins file as described in :ref:`plugin-file`.
+If you specify fields in this way, you can take advantage of the dataset's 
+:ref:`unit system <unit_systems>` to define the units for you, so that
+the units will be returned in the units of that system:
+
+.. code-block:: python
+
+    ds.add_field(("gas", "pressure"), function=_pressure, units=ds.unit_system["pressure"])
+
+Since the :class:`yt.units.unit_systems.UnitSystem` object returns a :class:`yt.units.unit_object.Unit` object when
+queried, you're not limited to specifying units in terms of those already available. You can specify units for fields
+using basic arithmetic if necessary:
+
+.. code-block:: python
+
+    ds.add_field(("gas", "my_acceleration"), function=_my_acceleration,
+                 units=ds.unit_system["length"]/ds.unit_system["time"]**2)
+
+If you find yourself using the same custom-defined fields over and over, you should put them in your plugins file as
+described in :ref:`plugin-file`.
 
 A More Complicated Example
 --------------------------
@@ -148,7 +190,7 @@
        y_hat /= r
        z_hat /= r
        return xv*x_hat + yv*y_hat + zv*z_hat
-   yt.add_field("my_radial_velocity",
+   yt.add_field(("gas","my_radial_velocity"),
                 function=_my_radial_velocity,
                 units="cm/s",
                 take_log=False,
@@ -195,8 +237,11 @@
 ``function``
      This is a function handle that defines the field
 ``units``
-     This is a string that describes the units. Powers must be in
-     Python syntax (``**`` instead of ``^``).
+     This is a string that describes the units, or a query to a :ref:`UnitSystem <unit_systems>` 
+     object, e.g. ``ds.unit_system["energy"]``. Powers must be in Python syntax (``**`` 
+     instead of ``^``). Alternatively, it may be set to ``"auto"`` to have the units 
+     determined automatically. In this case, the ``dimensions`` keyword must be set to the
+     correct dimensions of the field. 
 ``display_name``
      This is a name used in the plots, for instance ``"Divergence of
      Velocity"``.  If not supplied, the ``name`` value is used.
@@ -219,6 +264,9 @@
 ``force_override``
      (*Advanced*) Overrides the definition of an old field if a field with the
      same name has already been defined.
+``dimensions``
+     Set this if ``units="auto"``. Can be either a string or a dimension object from
+     ``yt.units.dimensions``.
 
 Debugging a Derived Field
 -------------------------
@@ -236,7 +284,7 @@
 
 .. code-block:: python
 
-   @yt.derived_field(name = "funthings")
+   @yt.derived_field(name = ("gas","funthings"))
    def funthings(field, data):
        return data["sillythings"] + data["humorousthings"]**2.0
 
@@ -244,7 +292,7 @@
 
 .. code-block:: python
 
-   @yt.derived_field(name = "funthings")
+   @yt.derived_field(name = ("gas","funthings"))
    def funthings(field, data):
        data._debug()
        return data["sillythings"] + data["humorousthings"]**2.0

diff -r a3d319a05e3a56544a269d2349af4cd046d0cf73 -r cf054e98248827ec731b7ccce97f9c0cb13f28dd doc/source/developing/creating_frontend.rst
--- a/doc/source/developing/creating_frontend.rst
+++ b/doc/source/developing/creating_frontend.rst
@@ -104,6 +104,43 @@
 have a display name of ``r"\rho"``.  Omitting the ``"display_name"``
 will result in using a capitalized version of the ``"name"``.
 
+.. _bfields-frontend:
+
+Creating Aliases for Magnetic Fields
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Setting up access to the magnetic fields in your dataset requires special
+handling, because in different unit systems magnetic fields have different
+dimensions (see :ref:`bfields` for an explanation). If your dataset includes 
+magnetic fields, you should include them in ``known_other_fields``, but do
+not set up aliases for them--instead use the special handling function 
+:meth:`~yt.fields.magnetic_fields.setup_magnetic_field_aliases`. It takes
+as arguments the ``FieldInfoContainer`` instance, the field type of the 
+frontend, and the list of magnetic fields from the frontend. Here is an
+example of how this is implemented in the FLASH frontend:
+
+.. code-block:: python
+
+    class FLASHFieldInfo(FieldInfoContainer):
+        known_other_fields = (
+            ...
+            ("magx", (b_units, [], "B_x")), # Note there is no alias here
+            ("magy", (b_units, [], "B_y")),
+            ("magz", (b_units, [], "B_z")),
+            ...
+        )
+
+        def setup_fluid_fields(self):
+            from yt.fields.magnetic_field import \
+                setup_magnetic_field_aliases
+            ...
+            setup_magnetic_field_aliases(self, "flash", ["mag%s" % ax for ax in "xyz"])    
+
+This function should always be imported and called from within the 
+``setup_fluid_fields`` method of the ``FieldInfoContainer``. If this 
+function is used, converting between magnetic fields in different 
+:ref:`unit systems <unit_systems>` will be handled automatically. 
+
 Data Localization Structures
 ----------------------------
 

diff -r a3d319a05e3a56544a269d2349af4cd046d0cf73 -r cf054e98248827ec731b7ccce97f9c0cb13f28dd doc/source/examining/Loading_Generic_Array_Data.ipynb
--- a/doc/source/examining/Loading_Generic_Array_Data.ipynb
+++ b/doc/source/examining/Loading_Generic_Array_Data.ipynb
@@ -41,9 +41,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "import yt\n",
@@ -60,9 +58,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "arr = np.random.random(size=(64,64,64))"
@@ -78,9 +74,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "data = dict(density = (arr, \"g/cm**3\"))\n",
@@ -124,9 +118,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "slc = yt.SlicePlot(ds, \"z\", [\"density\"])\n",
@@ -148,9 +140,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "posx_arr = np.random.uniform(low=-1.5, high=1.5, size=10000)\n",
@@ -177,9 +167,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "slc = yt.SlicePlot(ds, \"z\", [\"density\"])\n",
@@ -205,9 +193,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "import h5py\n",
@@ -227,9 +213,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "print (f.keys())"
@@ -245,9 +229,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "units = [\"gauss\",\"gauss\",\"gauss\", \"g/cm**3\", \"erg/cm**3\", \"K\", \n",
@@ -264,9 +246,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "data = {k:(v.value,u) for (k,v), u in zip(f.items(),units)}\n",
@@ -276,9 +256,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "ds = yt.load_uniform_grid(data, data[\"Density\"][0].shape, length_unit=250.*cm_per_kpc, bbox=bbox, nprocs=8, \n",
@@ -295,9 +273,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "prj = yt.ProjectionPlot(ds, \"z\", [\"z-velocity\",\"Temperature\",\"Bx\"], weight_field=\"Density\")\n",
@@ -323,9 +299,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "#Find the min and max of the field\n",
@@ -345,9 +319,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "tf = yt.ColorTransferFunction((mi, ma), grey_opacity=False)"
@@ -363,9 +335,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "# Choose a vector representing the viewing direction.\n",
@@ -375,7 +345,7 @@
     "# Define the width of the image\n",
     "W = 1.5*ds.domain_width[0]\n",
     "# Define the number of pixels to render\n",
-    "Npixels = 512 "
+    "Npixels = 512"
    ]
   },
   {
@@ -388,9 +358,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "cam = ds.camera(c, L, W, Npixels, tf, fields=['Temperature'],\n",
@@ -404,9 +372,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "cam.show()"
@@ -429,9 +395,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "import astropy.io.fits as pyfits\n",
@@ -448,9 +412,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "f = pyfits.open(data_dir+\"/UnigridData/velocity_field_20.fits\")\n",
@@ -467,9 +429,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "data = {}\n",
@@ -489,9 +449,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "data[\"velocity_x\"] = data.pop(\"x-velocity\")\n",
@@ -509,9 +467,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "ds = yt.load_uniform_grid(data, data[\"velocity_x\"][0].shape, length_unit=(1.0,\"Mpc\"))\n",
@@ -539,9 +495,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "grid_data = [\n",
@@ -566,9 +520,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "for g in grid_data: \n",
@@ -586,9 +538,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "grid_data[0][\"number_of_particles\"] = 0 # Set no particles in the top-level grid\n",
@@ -611,9 +561,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "ds = yt.load_amr_grids(grid_data, [32, 32, 32])"
@@ -629,9 +577,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
+   "metadata": {},
    "outputs": [],
    "source": [
     "slc = yt.SlicePlot(ds, \"z\", [\"density\"])\n",
@@ -650,7 +596,6 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "* Units will be incorrect unless the data has already been converted to cgs.\n",
     "* Particles may be difficult to integrate.\n",
     "* Data must already reside in memory before loading it in to yt, whether it is generated at runtime or loaded from disk. \n",
     "* Some functions may behave oddly, and parallelism will be disappointing or non-existent in most cases.\n",
@@ -668,7 +613,7 @@
   "language_info": {
    "codemirror_mode": {
     "name": "ipython",
-    "version": 3
+    "version": 3.0
    },
    "file_extension": ".py",
    "mimetype": "text/x-python",
@@ -680,4 +625,4 @@
  },
  "nbformat": 4,
  "nbformat_minor": 0
-}
+}
\ No newline at end of file

diff -r a3d319a05e3a56544a269d2349af4cd046d0cf73 -r cf054e98248827ec731b7ccce97f9c0cb13f28dd doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -803,7 +803,8 @@
 
 .. rubric:: Caveats
 
-* Please be careful that the units are correctly utilized; yt assumes cgs.
+* Please be careful that the units are correctly utilized; yt assumes cgs by default, but conversion to
+  other :ref:`unit systems <unit_systems>` is also possible. 
 
 .. _loading-gadget-data:
 
@@ -1065,7 +1066,6 @@
 
 .. rubric:: Caveats
 
-* Units will be incorrect unless the data has already been converted to cgs.
 * Some functions may behave oddly, and parallelism will be disappointing or
   non-existent in most cases.
 * No consistency checks are performed on the index
@@ -1123,7 +1123,6 @@
 
 .. rubric:: Caveats
 
-* Units will be incorrect unless the data has already been converted to cgs.
 * Particles may be difficult to integrate.
 * Data must already reside in memory.
 
@@ -1176,7 +1175,6 @@
 
 .. rubric:: Caveats
 
-* Units will be incorrect unless the data has already been converted to cgs.
 * Integration is not implemented.
 * Some functions may behave oddly or not work at all.
 * Data must already reside in memory.
@@ -1230,7 +1228,6 @@
 
 .. rubric:: Caveats
 
-* Units will be incorrect unless the data has already been converted to cgs.
 * Integration is not implemented.
 * Some functions may behave oddly or not work at all.
 * Data must already reside in memory.

diff -r a3d319a05e3a56544a269d2349af4cd046d0cf73 -r cf054e98248827ec731b7ccce97f9c0cb13f28dd doc/source/index.rst
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -175,6 +175,7 @@
 .. toctree::
    :hidden:
 
+   intro/index
    installing
    yt Quickstart <quickstart/index>
    yt3differences

diff -r a3d319a05e3a56544a269d2349af4cd046d0cf73 -r cf054e98248827ec731b7ccce97f9c0cb13f28dd doc/source/installing.rst
--- a/doc/source/installing.rst
+++ b/doc/source/installing.rst
@@ -19,7 +19,7 @@
 * If you do not have root access on your computer, are not comfortable managing
   python packages, or are working on a supercomputer or cluster computer, you
   will probably want to use the bash all-in-one installation script.  This builds 
-  python, numpy, matplotlib, and yt from source to set up an isolated scientific 
+  Python, NumPy, Matplotlib, and yt from source to set up an isolated scientific 
   python environment inside of a single folder in your home directory. See
   :ref:`install-script` for more details.
 
@@ -35,9 +35,9 @@
   up python using a source-based package manager like `Homebrew
   <http://brew.sh>`_ or `MacPorts <http://www.macports.org/>`_ this choice will
   let you install yt using the python installed by the package manager. Similarly
-  for python environments set up via linux package managers so long as you
+  for python environments set up via Linux package managers so long as you
   have the the necessary compilers installed (e.g. the ``build-essentials``
-  package on debian and ubuntu).
+  package on Debian and Ubuntu).
 
 .. note::
   See `Parallel Computation
@@ -199,13 +199,12 @@
 
 If you do not want to install the full anaconda python distribution, you can
 install a bare-bones Python installation using miniconda.  To install miniconda,
-visit http://repo.continuum.io/miniconda/ and download a recent version of the
-``Miniconda-x.y.z`` script (corresponding to Python 2.7) for your platform and
-system architecture. Next, run the script, e.g.:
+visit http://repo.continuum.io/miniconda/ and download ``Miniconda-latest-...`` 
+script for your platform and system architecture. Next, run the script, e.g.:
 
 .. code-block:: bash
 
-  bash Miniconda-3.3.0-Linux-x86_64.sh
+  bash Miniconda-latest-Linux-x86_64.sh
 
 For both the Anaconda and Miniconda installations, make sure that the Anaconda
 ``bin`` directory is in your path, and then issue:
@@ -214,7 +213,28 @@
 
   conda install yt
 
-which will install yt along with all of its dependencies.
+which will install stable branch of yt along with all of its dependencies.
+
+If you would like to install latest development version of yt, you can download
+it from our custom anaconda channel:
+
+.. code-block:: bash
+
+  conda install -c http://use.yt/with_conda/ yt
+
+New packages for development branch are built after every pull request is
+merged. In order to make sure you are running latest version, it's recommended
+to update frequently:
+
+.. code-block:: bash
+
+  conda update -c http://use.yt/with_conda/ yt
+
+Location of our channel can be added to ``.condarc`` to avoid retyping it during
+each *conda* invocation. Please refer to `Conda Manual
+<http://conda.pydata.org/docs/config.html#channel-locations-channels>`_ for
+detailed instructions.
+
 
 Obtaining Source Code
 ^^^^^^^^^^^^^^^^^^^^^
@@ -252,7 +272,7 @@
 
   git clone https://github.com/conda/conda-recipes
 
-Then navigate to the repository root and invoke `conda build`:
+Then navigate to the repository root and invoke ``conda build``:
 
 .. code-block:: bash
 
@@ -290,7 +310,7 @@
 
 .. code-block:: bash
 
-  $ pip install numpy matplotlib cython cython h5py nose sympy
+  $ pip install numpy matplotlib cython h5py nose sympy
 
 If you're using IPython notebooks, you can install its dependencies
 with ``pip`` as well:
@@ -366,7 +386,7 @@
   yt update
 
 This will detect that you have installed yt from the mercurial repository, pull
-any changes from bitbucket, and then recompile yt if necessary.
+any changes from Bitbucket, and then recompile yt if necessary.
 
 .. _testing-installation:
 

diff -r a3d319a05e3a56544a269d2349af4cd046d0cf73 -r cf054e98248827ec731b7ccce97f9c0cb13f28dd doc/source/intro/index.rst
--- a/doc/source/intro/index.rst
+++ b/doc/source/intro/index.rst
@@ -49,7 +49,7 @@
 the :ref:`units system <units>` works to tag every individual field and 
 quantity with a physical unit (e.g. cm, AU, kpc, Mpc, etc.), and it describes 
 ways of analyzing multiple chronological data outputs from the same underlying 
-dataset known as :ref:`time series <time-series-analysis`.  Lastly, it includes 
+dataset known as :ref:`time series <time-series-analysis>`.  Lastly, it includes 
 information on how to enable yt to operate :ref:`in parallel over multiple 
 processors simultaneously <parallel-computation>`.
 

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/d97ba62c5c5a/
Changeset:   d97ba62c5c5a
Branch:      yt
User:        atmyers
Date:        2016-03-09 20:42:42+00:00
Summary:     fixing bad merge
Affected #:  1 file

diff -r cf054e98248827ec731b7ccce97f9c0cb13f28dd -r d97ba62c5c5ac58e733742a020332fef71ae783e yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -138,9 +138,8 @@
             self.scene.set_new_unit_registry(self.data_source.ds.unit_registry)
             self._focus = self.data_source.ds.domain_center
             self._position = self.data_source.ds.domain_right_edge
-            self._width = 1.5*self.data_source.ds.domain_width
             self._width = self.data_source.ds.arr(
-                [1.5*data_source.ds.domain_width.max()]*3)
+                [1.5*self.data_source.ds.domain_width.max()]*3)
             self._domain_center = self.data_source.ds.domain_center
             self._domain_width = self.data_source.ds.domain_width
         else:


https://bitbucket.org/yt_analysis/yt/commits/ce4bfdcf508d/
Changeset:   ce4bfdcf508d
Branch:      yt
User:        atmyers
Date:        2016-03-10 02:28:28+00:00
Summary:     making the position property setter act exactly like set_position
Affected #:  1 file

diff -r d97ba62c5c5ac58e733742a020332fef71ae783e -r ce4bfdcf508deca0d301096d898c66014d57a3cb yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -177,8 +177,8 @@
                 raise RuntimeError(
                     'Cannot set the camera focus and position to the same value')
             self._position = position
-            self.normal_vector = self.focus - self._position
-            self.switch_orientation()
+            self.switch_orientation(normal_vector=self.focus - self._position,
+                                    north_vector=None)
 
         def fdel(self):
             del self._position


https://bitbucket.org/yt_analysis/yt/commits/87d00f1dbf2f/
Changeset:   87d00f1dbf2f
Branch:      yt
User:        atmyers
Date:        2016-03-13 09:11:43+00:00
Summary:     off axis projection should set the camera position based on the input normal_vector
Affected #:  1 file

diff -r ce4bfdcf508deca0d301096d898c66014d57a3cb -r 87d00f1dbf2f2e6568ab50b8dcf2cce9bca1410a yt/visualization/volume_rendering/off_axis_projection.py
--- a/yt/visualization/volume_rendering/off_axis_projection.py
+++ b/yt/visualization/volume_rendering/off_axis_projection.py
@@ -155,7 +155,7 @@
     camera.resolution = resolution
     if not iterable(width):
         width = data_source.ds.arr([width]*3)
-    camera.position = center - width[2]*camera.normal_vector
+    camera.position = center - width[2]*normal_vector
     camera.focus = center
     
     # If north_vector is None, we set the default here.


https://bitbucket.org/yt_analysis/yt/commits/5dbc29e6c000/
Changeset:   5dbc29e6c000
Branch:      yt
User:        atmyers
Date:        2016-03-13 09:12:20+00:00
Summary:     the focus property should also update the normal vector when set.
Affected #:  1 file

diff -r 87d00f1dbf2f2e6568ab50b8dcf2cce9bca1410a -r 5dbc29e6c000109dd70c281ed91ed0572360d715 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -235,7 +235,8 @@
                 raise RuntimeError(
                     'Cannot set the camera focus and position to the same value')
             self._focus = focus
-            self.switch_orientation()
+            self.switch_orientation(normal_vector=self.focus - self._position,
+                                    north_vector=None)
 
         def fdel(self):
             del self._focus


https://bitbucket.org/yt_analysis/yt/commits/ffd2a7b9fb3e/
Changeset:   ffd2a7b9fb3e
Branch:      yt
User:        atmyers
Date:        2016-03-13 16:37:56+00:00
Summary:     merging with tip
Affected #:  5 files

diff -r 5dbc29e6c000109dd70c281ed91ed0572360d715 -r ffd2a7b9fb3e40503948fba69467a4b7f54f4ec5 doc/source/visualizing/sketchfab.rst
--- a/doc/source/visualizing/sketchfab.rst
+++ b/doc/source/visualizing/sketchfab.rst
@@ -80,8 +80,8 @@
 ``export_ply``, which will write to a file and optionally sample a field at
 every face or vertex, outputting a color value to the file as well.  This file
 can then be viewed in MeshLab, Blender or on the website `Sketchfab.com
-<Sketchfab.com>`_.  But if you want to view it on Sketchfab, there's an even
-easier way!
+<https://sketchfab.com>`_.  But if you want to view it on Sketchfab, there's an
+even easier way!
 
 Exporting to Sketchfab
 ----------------------

diff -r 5dbc29e6c000109dd70c281ed91ed0572360d715 -r ffd2a7b9fb3e40503948fba69467a4b7f54f4ec5 yt/utilities/lib/grid_traversal.pyx
--- a/yt/utilities/lib/grid_traversal.pyx
+++ b/yt/utilities/lib/grid_traversal.pyx
@@ -281,11 +281,11 @@
         else:
             if not (vp_pos.shape[0] == vp_dir.shape[0] == image.shape[0]) or \
                not (vp_pos.shape[1] == vp_dir.shape[1] == image.shape[1]):
-                print "Bad lense shape / direction for %s" % (self.lens_type)
-                print "Shapes: (%s - %s - %s) and (%s - %s - %s)" % (
+                msg = "Bad lens shape / direction for %s\n" % (self.lens_type)
+                msg += "Shapes: (%s - %s - %s) and (%s - %s - %s)" % (
                     vp_pos.shape[0], vp_dir.shape[0], image.shape[0],
                     vp_pos.shape[1], vp_dir.shape[1], image.shape[1])
-                raise RuntimeError
+                raise RuntimeError(msg)
             self.extent_function = calculate_extent_null
             self.vector_function = generate_vector_info_null
         self.sampler = NULL

diff -r 5dbc29e6c000109dd70c281ed91ed0572360d715 -r ffd2a7b9fb3e40503948fba69467a4b7f54f4ec5 yt/visualization/volume_rendering/lens.py
--- a/yt/visualization/volume_rendering/lens.py
+++ b/yt/visualization/volume_rendering/lens.py
@@ -123,17 +123,23 @@
     def project_to_plane(self, camera, pos, res=None):
         if res is None:
             res = camera.resolution
-        dx = np.dot(pos - self.origin.d, camera.unit_vectors[1])
-        dy = np.dot(pos - self.origin.d, camera.unit_vectors[0])
-        dz = np.dot(pos - self.front_center.d, -camera.unit_vectors[2])
+
+        origin = self.origin.in_units('code_length').d
+        front_center = self.front_center.in_units('code_length').d
+        width = camera.width.in_units('code_length').d
+
+        dx = np.array(np.dot(pos - origin, camera.unit_vectors[1]))
+        dy = np.array(np.dot(pos - origin, camera.unit_vectors[0]))
+        dz = np.array(np.dot(pos - front_center, -camera.unit_vectors[2]))
         # Transpose into image coords.
-        py = (res[0]*(dx/camera.width[0].d)).astype('int')
-        px = (res[1]*(dy/camera.width[1].d)).astype('int')
+
+        py = (res[0]*(dx/width[0])).astype('int')
+        px = (res[1]*(dy/width[1])).astype('int')
         return px, py, dz
 
     def __repr__(self):
-        disp = "<Lens Object>:\n\tlens_type:plane-parallel\n\tviewpoint:%s" %\
-            (self.viewpoint)
+        disp = ("<Lens Object>:\n\tlens_type:plane-parallel\n\tviewpoint:%s" %
+                (self.viewpoint))
         return disp
 
 
@@ -232,41 +238,53 @@
         if res is None:
             res = camera.resolution
 
-        # Enforce width[1] / width[0] = resolution[1] / resolution[0]
-        camera.width[1] = camera.width[0] * (res[1] /res[0])
+        width = camera.width.in_units('code_length').d
+        position = camera.position.in_units('code_length').d
 
-        sight_vector = pos - camera.position.d
+        width[1] = width[0] * res[1] / res[0]
+
+        sight_vector = pos - position
+
         pos1 = sight_vector
+
         for i in range(0, sight_vector.shape[0]):
             sight_vector_norm = np.sqrt(np.dot(sight_vector[i], sight_vector[i]))
-            sight_vector[i] = sight_vector[i] / sight_vector_norm
+            if sight_vector_norm != 0:
+                sight_vector[i] = sight_vector[i] / sight_vector_norm
+
         sight_center = camera.position + camera.width[2] * camera.unit_vectors[2]
 
+        sight_center = sight_center.in_units('code_length').d
+
         for i in range(0, sight_vector.shape[0]):
             sight_angle_cos = np.dot(sight_vector[i], camera.unit_vectors[2])
+            # clip sight_angle_cos since floating point noise might
+            # go outside the domain of arccos
+            sight_angle_cos = np.clip(sight_angle_cos, -1.0, 1.0)
             if np.arccos(sight_angle_cos) < 0.5 * np.pi:
-                sight_length = camera.width[2] / sight_angle_cos
+                sight_length = width[2] / sight_angle_cos
             else:
                 # If the corner is on the backwards, then we put it outside of
                 # the image It can not be simply removed because it may connect
                 # to other corner within the image, which produces visible
                 # domain boundary line
-                sight_length = np.sqrt(camera.width[0]**2 + camera.width[1]**2)
+                sight_length = np.sqrt(width[0]**2 + width[1]**2)
                 sight_length = sight_length / np.sqrt(1 - sight_angle_cos**2)
-            pos1[i] = camera.position + sight_length * sight_vector[i]
+            pos1[i] = position + sight_length * sight_vector[i]
 
-        dx = np.dot(pos1 - sight_center.d, camera.unit_vectors[0])
-        dy = np.dot(pos1 - sight_center.d, camera.unit_vectors[1])
-        dz = np.dot(pos - camera.position.d, camera.unit_vectors[2])
+        dx = np.dot(pos1 - sight_center, camera.unit_vectors[0])
+        dy = np.dot(pos1 - sight_center, camera.unit_vectors[1])
+        dz = np.dot(pos - position, camera.unit_vectors[2])
 
         # Transpose into image coords.
         px = (res[0] * 0.5 + res[0] / camera.width[0].d * dx).astype('int')
         py = (res[1] * 0.5 + res[1] / camera.width[1].d * dy).astype('int')
+
         return px, py, dz
 
     def __repr__(self):
-        disp = "<Lens Object>: lens_type:perspective viewpoint:%s" % \
-            (self.viewpoint)
+        disp = ("<Lens Object>:\n\tlens_type:perspective\n\tviewpoint:%s" %
+                (self.viewpoint))
         return disp
 
 
@@ -427,7 +445,9 @@
         normal_vec_rot = np.dot(R, normal_vec)
 
         camera_position_shift = camera.position + east_vec * disparity
-        sight_vector = pos - camera_position_shift.d
+        camera_position_shift = camera_position_shift.in_units('code_length').d
+        width = camera.width.in_units('code_length').d
+        sight_vector = pos - camera_position_shift
         pos1 = sight_vector
 
         for i in range(0, sight_vector.shape[0]):
@@ -437,21 +457,24 @@
 
         for i in range(0, sight_vector.shape[0]):
             sight_angle_cos = np.dot(sight_vector[i], normal_vec_rot)
+            # clip sight_angle_cos since floating point noise might
+            # cause it go outside the domain of arccos
+            sight_angle_cos = np.clip(sight_angle_cos, -1.0, 1.0)
             if np.arccos(sight_angle_cos) < 0.5 * np.pi:
-                sight_length = camera.width[2] / sight_angle_cos
+                sight_length = width[2] / sight_angle_cos
             else:
                 # If the corner is on the backwards, then we put it outside of
                 # the image It can not be simply removed because it may connect
                 # to other corner within the image, which produces visible
                 # domain boundary line
-                sight_length = np.sqrt(camera.width[0]**2 + camera.width[1]**2)
+                sight_length = np.sqrt(width[0]**2 + width[1]**2)
                 sight_length = sight_length / np.sqrt(1 - sight_angle_cos**2)
             pos1[i] = camera_position_shift + sight_length * sight_vector[i]
 
-        dx = np.dot(pos1 - sight_center.d, east_vec_rot)
-        dy = np.dot(pos1 - sight_center.d, north_vec)
-        dz = np.dot(pos - camera_position_shift.d, normal_vec_rot)
-        
+        dx = np.dot(pos1 - sight_center, east_vec_rot)
+        dy = np.dot(pos1 - sight_center, north_vec)
+        dz = np.dot(pos - camera_position_shift, normal_vec_rot)
+
         # Transpose into image coords.
         if disparity > 0:
             px = (res0_h * 0.5 + res0_h / camera.width[0].d * dx).astype('int')
@@ -468,8 +491,8 @@
         self.viewpoint = self.front_center
 
     def __repr__(self):
-        disp = "<Lens Object>: lens_type:perspective viewpoint:%s" % \
-            (self.viewpoint)
+        disp = ("<Lens Object>:\n\tlens_type:perspective\n\tviewpoint:%s" %
+                (self.viewpoint))
         return disp
 
 
@@ -537,8 +560,9 @@
         self.viewpoint = camera.position
 
     def __repr__(self):
-        disp = "<Lens Object>: lens_type:fisheye viewpoint:%s fov:%s radius:" %\
-            (self.viewpoint, self.fov, self.radius)
+        disp = ("<Lens Object>:\n\tlens_type:fisheye\n\tviewpoint:%s"
+                "\nt\tfov:%s\n\tradius:%s" %
+                (self.viewpoint, self.fov, self.radius))
         return disp
 
     def project_to_plane(self, camera, pos, res=None):
@@ -550,26 +574,31 @@
         # vectors back onto the plane.  arr_fisheye_vectors goes from px, py to
         # vector, and we need the reverse.
         # First, we transform lpos into *relative to the camera* coordinates.
-        lpos = camera.position.d - pos
+
+        position = camera.position.in_units('code_length').d
+
+        lpos = position - pos
         lpos = lpos.dot(self.rotation_matrix)
-        # lpos = lpos.dot(self.rotation_matrix)
         mag = (lpos * lpos).sum(axis=1)**0.5
+
+        # screen out NaN values that would result from dividing by mag
+        mag[mag == 0] = 1
         lpos /= mag[:, None]
-        dz = mag / self.radius
+
+        dz = (mag / self.radius).in_units('1/code_length').d
         theta = np.arccos(lpos[:, 2])
         fov_rad = self.fov * np.pi / 180.0
         r = 2.0 * theta / fov_rad
         phi = np.arctan2(lpos[:, 1], lpos[:, 0])
         px = r * np.cos(phi)
         py = r * np.sin(phi)
-        u = camera.focus.uq
-        length_unit = u / u.d
+
         # dz is distance the ray would travel
         px = (px + 1.0) * res[0] / 2.0
         py = (py + 1.0) * res[1] / 2.0
         # px and py should be dimensionless
-        px = (u * np.rint(px) / length_unit).astype("int64")
-        py = (u * np.rint(py) / length_unit).astype("int64")
+        px = np.rint(px).astype("int64")
+        py = np.rint(py).astype("int64")
         return px, py, dz
 
 
@@ -655,11 +684,15 @@
             res = camera.resolution
         # Much of our setup here is the same as in the fisheye, except for the
         # actual conversion back to the px, py values.
-        lpos = camera.position.d - pos
-        # inv_mat = np.linalg.inv(self.rotation_matrix)
-        # lpos = lpos.dot(self.rotation_matrix)
+        position = camera.position.in_units('code_length').d
+
+        lpos = position - pos
         mag = (lpos * lpos).sum(axis=1)**0.5
+
+        # screen out NaN values that would result from dividing by mag
+        mag[mag == 0] = 1
         lpos /= mag[:, None]
+
         # originally:
         #  the x vector is cos(px) * cos(py)
         #  the y vector is sin(px) * cos(py)
@@ -671,14 +704,12 @@
         px = np.arctan2(lpos[:, 1], lpos[:, 0])
         py = np.arcsin(lpos[:, 2])
         dz = mag / self.radius
-        u = camera.focus.uq
-        length_unit = u / u.d
         # dz is distance the ray would travel
         px = ((-px + np.pi) / (2.0*np.pi)) * res[0]
         py = ((-py + np.pi/2.0) / np.pi) * res[1]
         # px and py should be dimensionless
-        px = (u * np.rint(px) / length_unit).astype("int64")
-        py = (u * np.rint(py) / length_unit).astype("int64")
+        px = np.rint(px).astype("int64")
+        py = np.rint(py).astype("int64")
         return px, py, dz
 
 

diff -r 5dbc29e6c000109dd70c281ed91ed0572360d715 -r ffd2a7b9fb3e40503948fba69467a4b7f54f4ec5 yt/visualization/volume_rendering/render_source.py
--- a/yt/visualization/volume_rendering/render_source.py
+++ b/yt/visualization/volume_rendering/render_source.py
@@ -693,17 +693,7 @@
         camera.lens.setup_box_properties(camera)
         px, py, dz = camera.lens.project_to_plane(camera, vertices)
 
-        # Non-plane-parallel lenses only support 1D array
-        # 1D array needs to be transformed to 2D to get points plotted
-        if 'plane-parallel' not in str(camera.lens):
-            empty.shape = (camera.resolution[0], camera.resolution[1], 4)
-            z.shape = (camera.resolution[0], camera.resolution[1])
-
-        zpoints(empty, z, px.d, py.d, dz.d, self.colors, self.color_stride)
-
-        if 'plane-parallel' not in str(camera.lens):
-            empty.shape = (camera.resolution[0] * camera.resolution[1], 1, 4)
-            z.shape = (camera.resolution[0] * camera.resolution[1], 1)
+        zpoints(empty, z, px, py, dz, self.colors, self.color_stride)
 
         self.zbuffer = zbuffer
         return zbuffer
@@ -819,22 +809,15 @@
         camera.lens.setup_box_properties(camera)
         px, py, dz = camera.lens.project_to_plane(camera, vertices)
 
-        # Non-plane-parallel lenses only support 1D array
-        # 1D array needs to be transformed to 2D to get lines plotted
-        if 'plane-parallel' not in str(camera.lens):
-            empty.shape = (camera.resolution[0], camera.resolution[1], 4)
-            z.shape = (camera.resolution[0], camera.resolution[1])
-
         if len(px.shape) == 1:
-            zlines(empty, z, px.d, py.d, dz.d, self.colors, self.color_stride)
+            zlines(empty, z, px, py, dz, self.colors, self.color_stride)
         else:
-            # For stereo-lens, two sets of pos for each eye are contained in px...pz
-            zlines(empty, z, px.d[0,:], py.d[0,:], dz.d[0,:], self.colors, self.color_stride)
-            zlines(empty, z, px.d[1,:], py.d[1,:], dz.d[1,:], self.colors, self.color_stride)
-
-        if 'plane-parallel' not in str(camera.lens):
-            empty.shape = (camera.resolution[0] * camera.resolution[1], 1, 4)
-            z.shape = (camera.resolution[0] * camera.resolution[1], 1)
+            # For stereo-lens, two sets of pos for each eye are contained
+            # in px...pz
+            zlines(empty, z, px[0,:], py[0,:], dz[0,:], self.colors, 
+                   self.color_stride)
+            zlines(empty, z, px[1,:], py[1,:], dz[1,:], self.colors, 
+                   self.color_stride)
 
         self.zbuffer = zbuffer
         return zbuffer
@@ -1121,22 +1104,15 @@
 
         # Draw the vectors
 
-        # Non-plane-parallel lenses only support 1D array
-        # 1D array needs to be transformed to 2D to get lines plotted
-        if 'plane-parallel' not in str(camera.lens):
-            empty.shape = (camera.resolution[0], camera.resolution[1], 4)
-            z.shape = (camera.resolution[0], camera.resolution[1])
-
         if len(px.shape) == 1:
-            zlines(empty, z, px.d, py.d, dz.d, self.colors, self.color_stride)
+            zlines(empty, z, px, py, dz, self.colors, self.color_stride)
         else:
-            # For stereo-lens, two sets of pos for each eye are contained in px...pz
-            zlines(empty, z, px.d[0,:], py.d[0,:], dz.d[0,:], self.colors, self.color_stride)
-            zlines(empty, z, px.d[1,:], py.d[1,:], dz.d[1,:], self.colors, self.color_stride)
-
-        if 'plane-parallel' not in str(camera.lens):
-            empty.shape = (camera.resolution[0] * camera.resolution[1], 1, 4)
-            z.shape = (camera.resolution[0] * camera.resolution[1], 1)
+            # For stereo-lens, two sets of pos for each eye are contained
+            # in px...pz
+            zlines(empty, z, px[0,:], py[0,:], dz[0,:], self.colors,
+                   self.color_stride)
+            zlines(empty, z, px[1,:], py[1,:], dz[1,:], self.colors,
+                   self.color_stride)
 
         # Set the new zbuffer
         self.zbuffer = zbuffer

diff -r 5dbc29e6c000109dd70c281ed91ed0572360d715 -r ffd2a7b9fb3e40503948fba69467a4b7f54f4ec5 yt/visualization/volume_rendering/scene.py
--- a/yt/visualization/volume_rendering/scene.py
+++ b/yt/visualization/volume_rendering/scene.py
@@ -134,10 +134,21 @@
         if keyname is None:
             keyname = 'source_%02i' % len(self.sources)
 
-        if isinstance(render_source, (VolumeSource, MeshSource, GridSource)):
+        data_sources = (VolumeSource, MeshSource, GridSource)
+
+        if isinstance(render_source, data_sources):
             self.set_new_unit_registry(
                 render_source.data_source.ds.unit_registry)
 
+        line_annotation_sources = (GridSource, BoxSource, CoordinateVectorSource)
+
+        if isinstance(render_source, line_annotation_sources):
+            lens_str = str(self.camera.lens)
+            if 'fisheye' in lens_str or 'spherical' in lens_str:
+                raise NotImplementedError(
+                    "Line annotation sources are not supported for %s."
+                    % (type(self.camera.lens).__name__), )
+
         self.sources[keyname] = render_source
 
         return self
@@ -489,7 +500,7 @@
         r"""
 
         Modifies this scene by drawing the edges of the AMR grids.
-        This adds a new BoxSource to the scene for each AMR grid 
+        This adds a new GridSource to the scene that represents the AMR grid 
         and returns the resulting Scene object.
 
         Parameters


https://bitbucket.org/yt_analysis/yt/commits/eb6d32ca0215/
Changeset:   eb6d32ca0215
Branch:      yt
User:        atmyers
Date:        2016-03-13 16:40:52+00:00
Summary:     adding the new answer tests to the yaml file
Affected #:  1 file

diff -r ffd2a7b9fb3e40503948fba69467a4b7f54f4ec5 -r eb6d32ca021569923d5b33e29cdc49c918ff22f9 tests/tests_2.7.yaml
--- a/tests/tests_2.7.yaml
+++ b/tests/tests_2.7.yaml
@@ -45,6 +45,7 @@
     - yt/analysis_modules/photon_simulator/tests/test_spectra.py
     - yt/analysis_modules/photon_simulator/tests/test_sloshing.py
     - yt/visualization/volume_rendering/tests/test_vr_orientation.py
+    - yt/visualization/volume_rendering/tests/test_mesh_render.py
 
   local_orion_270:
     - yt/frontends/boxlib/tests/test_orion.py


https://bitbucket.org/yt_analysis/yt/commits/63f8eec60861/
Changeset:   63f8eec60861
Branch:      yt
User:        atmyers
Date:        2016-03-14 00:46:34+00:00
Summary:     bump the id number for the varia answer tests
Affected #:  1 file

diff -r eb6d32ca021569923d5b33e29cdc49c918ff22f9 -r 63f8eec60861af1a4a469df548daf2b8539506f5 tests/tests_2.7.yaml
--- a/tests/tests_2.7.yaml
+++ b/tests/tests_2.7.yaml
@@ -39,7 +39,7 @@
   local_tipsy_270:
     - yt/frontends/tipsy/tests/test_outputs.py
   
-  local_varia_270:
+  local_varia_271:
     - yt/analysis_modules/radmc3d_export
     - yt/frontends/moab/tests/test_c5.py
     - yt/analysis_modules/photon_simulator/tests/test_spectra.py


https://bitbucket.org/yt_analysis/yt/commits/5179a7111bd9/
Changeset:   5179a7111bd9
Branch:      yt
User:        chummels
Date:        2016-03-16 16:39:28+00:00
Summary:     Merged in atmyers/yt (pull request #2007)

Answer tests for unstructured mesh renderings
Affected #:  7 files

diff -r 02dd7d47781782a1e30561f1c4337111a02f7183 -r 5179a7111bd9bd4d9174f368a76f2acab232155c doc/source/visualizing/plots.rst
--- a/doc/source/visualizing/plots.rst
+++ b/doc/source/visualizing/plots.rst
@@ -415,9 +415,19 @@
 determined by the ``thresh`` parameter, which can be varied to make the lines thicker or
 thinner.
 
+The above example all involve 8-node hexahedral mesh elements. Here is another example from
+a dataset that uses 6-node wedge elements:
+
+.. python-script::
+   
+   import yt
+   ds = yt.load("MOOSE_sample_data/wedge_out.e")
+   sl = yt.SlicePlot(ds, 2, ('connect2', 'diffused'))
+   sl.save()
+
 Finally, slices can also be used to examine 2D unstructured mesh datasets, but the
 slices must be taken to be normal to the ``'z'`` axis, or you'll get an error. Here is
-an example using another MOOSE dataset:
+an example using another MOOSE dataset that uses triangular mesh elements:
 
 .. python-script::
 

diff -r 02dd7d47781782a1e30561f1c4337111a02f7183 -r 5179a7111bd9bd4d9174f368a76f2acab232155c doc/source/visualizing/unstructured_mesh_rendering.rst
--- a/doc/source/visualizing/unstructured_mesh_rendering.rst
+++ b/doc/source/visualizing/unstructured_mesh_rendering.rst
@@ -214,6 +214,29 @@
     # render and save
     sc.save()
 
+Here is an example using 6-node wedge elements:
+
+.. python-script::
+
+   import yt
+
+   ds = yt.load("MOOSE_sample_data/wedge_out.e")
+
+   # create a default scene
+   sc = yt.create_scene(ds, ('connect2', 'diffused'))
+
+   # override the default colormap
+   ms = sc.get_source(0)
+   ms.cmap = 'Eos A'
+
+   # adjust the camera position and orientation
+   cam = sc.camera
+   cam.set_position(ds.arr([1.0, -1.0, 1.0], 'code_length'))
+   cam.width = ds.arr([1.5, 1.5, 1.5], 'code_length')
+
+   # render and save
+   sc.save()
+
 Another example, this time plotting the temperature field from a 20-node hex 
 MOOSE dataset:
 
@@ -273,7 +296,7 @@
     # adjust the camera position and orientation
     cam = sc.camera
     camera_position = ds.arr([-1.0, 1.0, -0.5], 'code_length')
-    north_vector = ds.arr([0.0, 1.0, 1.0], 'dimensionless')
+    north_vector = ds.arr([0.0, -1.0, -1.0], 'dimensionless')
     cam.width = ds.arr([0.05, 0.05, 0.05], 'code_length')
     cam.set_position(camera_position, north_vector)
     

diff -r 02dd7d47781782a1e30561f1c4337111a02f7183 -r 5179a7111bd9bd4d9174f368a76f2acab232155c tests/tests_2.7.yaml
--- a/tests/tests_2.7.yaml
+++ b/tests/tests_2.7.yaml
@@ -39,12 +39,13 @@
   local_tipsy_270:
     - yt/frontends/tipsy/tests/test_outputs.py
   
-  local_varia_270:
+  local_varia_271:
     - yt/analysis_modules/radmc3d_export
     - yt/frontends/moab/tests/test_c5.py
     - yt/analysis_modules/photon_simulator/tests/test_spectra.py
     - yt/analysis_modules/photon_simulator/tests/test_sloshing.py
     - yt/visualization/volume_rendering/tests/test_vr_orientation.py
+    - yt/visualization/volume_rendering/tests/test_mesh_render.py
 
   local_orion_270:
     - yt/frontends/boxlib/tests/test_orion.py

diff -r 02dd7d47781782a1e30561f1c4337111a02f7183 -r 5179a7111bd9bd4d9174f368a76f2acab232155c yt/frontends/exodus_ii/data_structures.py
--- a/yt/frontends/exodus_ii/data_structures.py
+++ b/yt/frontends/exodus_ii/data_structures.py
@@ -354,6 +354,11 @@
             displaced_coords = self._apply_displacement(coords, mesh_id)
             mi = np.minimum(displaced_coords.min(axis=0), mi)
             ma = np.maximum(displaced_coords.max(axis=0), ma)
+
+        # pad domain boundaries
+        width = ma - mi
+        mi -= 0.1 * width
+        ma += 0.1 * width
         return mi, ma
 
     @classmethod

diff -r 02dd7d47781782a1e30561f1c4337111a02f7183 -r 5179a7111bd9bd4d9174f368a76f2acab232155c yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -133,11 +133,13 @@
         self.light = None
         self.data_source = data_source_or_all(data_source)
         self._resolution = (512, 512)
+
         if self.data_source is not None:
             self.scene.set_new_unit_registry(self.data_source.ds.unit_registry)
             self._focus = self.data_source.ds.domain_center
             self._position = self.data_source.ds.domain_right_edge
-            self._width = 1.5*self.data_source.ds.domain_width
+            self._width = self.data_source.ds.arr(
+                [1.5*self.data_source.ds.domain_width.max()]*3)
             self._domain_center = self.data_source.ds.domain_center
             self._domain_width = self.data_source.ds.domain_width
         else:
@@ -175,7 +177,8 @@
                 raise RuntimeError(
                     'Cannot set the camera focus and position to the same value')
             self._position = position
-            self.switch_orientation()
+            self.switch_orientation(normal_vector=self.focus - self._position,
+                                    north_vector=None)
 
         def fdel(self):
             del self._position
@@ -232,7 +235,8 @@
                 raise RuntimeError(
                     'Cannot set the camera focus and position to the same value')
             self._focus = focus
-            self.switch_orientation()
+            self.switch_orientation(normal_vector=self.focus - self._position,
+                                    north_vector=None)
 
         def fdel(self):
             del self._focus

diff -r 02dd7d47781782a1e30561f1c4337111a02f7183 -r 5179a7111bd9bd4d9174f368a76f2acab232155c yt/visualization/volume_rendering/off_axis_projection.py
--- a/yt/visualization/volume_rendering/off_axis_projection.py
+++ b/yt/visualization/volume_rendering/off_axis_projection.py
@@ -155,7 +155,7 @@
     camera.resolution = resolution
     if not iterable(width):
         width = data_source.ds.arr([width]*3)
-    camera.position = center - width[2]*camera.normal_vector
+    camera.position = center - width[2]*normal_vector
     camera.focus = center
     
     # If north_vector is None, we set the default here.

diff -r 02dd7d47781782a1e30561f1c4337111a02f7183 -r 5179a7111bd9bd4d9174f368a76f2acab232155c yt/visualization/volume_rendering/tests/test_mesh_render.py
--- a/yt/visualization/volume_rendering/tests/test_mesh_render.py
+++ b/yt/visualization/volume_rendering/tests/test_mesh_render.py
@@ -11,11 +11,18 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-from yt.testing import fake_tetrahedral_ds
-from yt.testing import fake_hexahedral_ds
-from yt.testing import requires_module
-from yt.visualization.volume_rendering.render_source import MeshSource
-from yt.visualization.volume_rendering.scene import Scene
+from yt.testing import \
+    fake_tetrahedral_ds, \
+    fake_hexahedral_ds, \
+    requires_module
+from yt.utilities.answer_testing.framework import \
+    requires_ds, \
+    data_dir_load, \
+    GenericImageTest
+from yt.visualization.volume_rendering.api import \
+    MeshSource, \
+    Scene, \
+    create_scene
 
 
 @requires_module("pyembree")
@@ -39,3 +46,101 @@
         images.append(im)
 
     return images
+
+
+def compare(ds, im, test_prefix, decimals=12):
+    def mesh_render_image_func(filename_prefix):
+        return im.write_image(filename_prefix)
+
+    test = GenericImageTest(ds, mesh_render_image_func, decimals)
+    test.prefix = test_prefix
+    return test
+
+hex8 = "MOOSE_sample_data/out.e-s010"
+hex8_fields = [('connect1', 'diffused'), ('connect2', 'convected')]
+
+ at requires_ds(hex8)
+ at requires_module("pyembree")
+def test_hex8_render():
+    for field in hex8_fields:
+        ds = data_dir_load(hex8, kwargs={'step':-1})
+        sc = create_scene(ds, field)
+        im = sc.render()
+        yield compare(ds, im, "render_answers_hex8_%s_%s" % field)
+
+
+tet4 = "MOOSE_sample_data/high_order_elems_tet4_refine_out.e"
+tet4_fields = [("connect1", "u")]
+
+ at requires_ds(tet4)
+ at requires_module("pyembree")
+def test_tet4_render():
+    for field in tet4_fields:
+        ds = data_dir_load(tet4, kwargs={'step':-1})
+        sc = create_scene(ds, field)
+        im = sc.render()
+        yield compare(ds, im, "render_answers_tet4_%s_%s" % field)
+
+
+hex20 = "MOOSE_sample_data/mps_out.e"
+hex20_fields = [('connect2', 'temp')]
+
+ at requires_ds(hex20)
+ at requires_module("pyembree")
+def test_hex20_render():
+    for field in hex20_fields:
+        ds = data_dir_load(hex20, kwargs={'step':-1})
+        sc = create_scene(ds, field)
+        im = sc.render()
+        yield compare(ds, im, "render_answers_hex20_%s_%s" % field)
+
+
+wedge6 = "MOOSE_sample_data/wedge_out.e"
+wedge6_fields = [('connect1', 'diffused')]
+
+ at requires_ds(wedge6)
+ at requires_module("pyembree")
+def test_wedge6_render():
+    for field in wedge6_fields:
+        ds = data_dir_load(wedge6, kwargs={'step':-1})
+        sc = create_scene(ds, field)
+        im = sc.render()
+        yield compare(ds, im, "render_answers_wedge6_%s_%s" % field)
+
+
+ at requires_ds(hex8)
+ at requires_module("pyembree")
+def test_perspective_mesh_render():
+    ds = data_dir_load(hex8)
+    sc = create_scene(ds, ("connect2", "diffused"))
+
+    cam = sc.add_camera(ds, lens_type='perspective')
+    cam.focus = ds.arr([0.0, 0.0, 0.0], 'code_length')
+    cam_pos = ds.arr([-4.5, 4.5, -4.5], 'code_length')
+    north_vector = ds.arr([0.0, -1.0, -1.0], 'dimensionless')
+    cam.set_position(cam_pos, north_vector)
+    cam.resolution = (800, 800)
+    im = sc.render()
+    yield compare(ds, im, "perspective_mesh_render")
+
+
+ at requires_ds(hex8)
+ at requires_module("pyembree")
+def test_composite_mesh_render():
+    ds = data_dir_load(hex8)
+    sc = Scene()
+    cam = sc.add_camera(ds)
+    cam.focus = ds.arr([0.0, 0.0, 0.0], 'code_length')
+    cam.set_position(ds.arr([-3.0, 3.0, -3.0], 'code_length'),
+                     ds.arr([0.0, -1.0, 0.0], 'dimensionless'))
+    cam.set_width = ds.arr([8.0, 8.0, 8.0], 'code_length')
+    cam.resolution = (800, 800)
+
+    ms1 = MeshSource(ds, ('connect1', 'diffused'))
+    ms2 = MeshSource(ds, ('connect2', 'diffused'))
+
+    sc.add_source(ms1)
+    sc.add_source(ms2)
+
+    im = sc.render()
+    yield compare(ds, im, "composite_mesh_render")

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.


More information about the yt-svn mailing list