[yt-svn] commit/yt: 8 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Fri Mar 18 07:45:37 PDT 2016


8 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/70c0c76b9b4e/
Changeset:   70c0c76b9b4e
Branch:      yt
User:        mzingale
Date:        2016-03-05 14:30:10+00:00
Summary:     port the annotation stuff over from the old camera + add the ability
to do nice text annotations
Affected #:  1 file

diff -r 169c6b14377e4e691657a18547322d3d0c87aae3 -r 70c0c76b9b4e021f7abfe1adce88d3dd47ce2729 yt/visualization/volume_rendering/scene.py
--- a/yt/visualization/volume_rendering/scene.py
+++ b/yt/visualization/volume_rendering/scene.py
@@ -22,6 +22,7 @@
 from .zbuffer_array import ZBuffer
 from yt.extern.six.moves import builtins
 from yt.utilities.exceptions import YTNotInsideNotebook
+import matplotlib.pyplot as plt
 
 class Scene(object):
 
@@ -56,12 +57,12 @@
     >>> sc.camera = cam
     >>> im = sc.render()
 
-    Alternatively, you can use the create_scene function to set up defaults 
+    Alternatively, you can use the create_scene function to set up defaults
     and then modify the Scene later:
 
     >>> import yt
     >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
-    >>> 
+    >>>
     >>> sc = yt.create_scene(ds)
     >>> # Modify camera, sources, etc...
     >>> im = sc.render()
@@ -167,7 +168,7 @@
     def save(self, fname=None, sigma_clip=None):
         r"""Saves the most recently rendered image of the Scene to disk.
 
-        Once you have created a scene and rendered that scene to an image 
+        Once you have created a scene and rendered that scene to an image
         array, this saves that image array to disk with an optional filename.
         If an image has not yet been rendered for the current scene object,
         it forces one and writes it out.
@@ -180,8 +181,8 @@
             Default: None
         sigma_clip: float, optional
             Image values greater than this number times the standard deviation
-            plus the mean of the image will be clipped before saving. Useful 
-            for enhancing images as it gets rid of rare high pixel values. 
+            plus the mean of the image will be clipped before saving. Useful
+            for enhancing images as it gets rid of rare high pixel values.
             Default: None
 
             floor(vals > std_dev*sigma_clip + mean)
@@ -227,7 +228,7 @@
                 fname = "%s_Render_%s.png" % (basename, field)
             # if no volume source present, use a default filename
             else:
-                fname = "Render_opaque.png"   
+                fname = "Render_opaque.png"
         suffix = get_image_suffix(fname)
         if suffix == '':
             suffix = '.png'
@@ -238,7 +239,129 @@
 
         mylog.info("Saving render %s", fname)
         self.last_render.write_png(fname, sigma_clip=sigma_clip)
- 
+
+
+    def save_annotated(self, fname=None, label_fmt=None,
+                       text_annotate=None, dpi=100, sigma_clip=None):
+        r"""Saves the most recently rendered image of the Scene to disk,
+        including an image of the transfer function.
+
+        Once you have created a scene and rendered that scene to an image
+        array, this saves that image array to disk with an optional filename.
+        If an image has not yet been rendered for the current scene object,
+        it forces one and writes it out.
+
+        Parameters
+        ----------
+        fname: string, optional
+            If specified, save the rendering as a bitmap to the file "fname".
+            If unspecified, it creates a default based on the dataset filename.
+            Default: None
+        sigma_clip: float, optional
+            Image values greater than this number times the standard deviation
+            plus the mean of the image will be clipped before saving. Useful
+            for enhancing images as it gets rid of rare high pixel values.
+            Default: None
+
+            floor(vals > std_dev*sigma_clip + mean)
+
+        Returns
+        -------
+            Nothing
+
+        """
+        sources = list(itervalues(self.sources))
+        rensources = [s for s in sources if isinstance(s, RenderSource)]
+
+        if fname is None:
+            # if a volume source present, use its affiliated ds for fname
+            if len(rensources) > 0:
+                rs = rensources[0]
+                basename = rs.data_source.ds.basename
+                if isinstance(rs.field, string_types):
+                    field = rs.field
+                else:
+                    field = rs.field[-1]
+                fname = "%s_Render_%s.png" % (basename, field)
+            # if no volume source present, use a default filename
+            else:
+                fname = "Render_opaque.png"
+        suffix = get_image_suffix(fname)
+        if suffix == '':
+            suffix = '.png'
+            fname = '%s%s' % (fname, suffix)
+
+        if self.last_render is None:
+            self.render()
+
+        # which transfer function?
+        rs = rensources[0]
+        tf = rs.transfer_function
+        label = rs.data_source.ds._get_field_info(rs.field).get_label()
+
+        ax = self._show_mpl(self.last_render.swapaxes(0,1),
+                            sigma_clip=sigma_clip, dpi=dpi)
+        self._annotate(ax.axes, tf, label=label, label_fmt=label_fmt)
+        plt.tight_layout()
+
+        # any text?
+        if not text_annotate is None:
+            f = plt.gcf()
+            need_keys = ["x", "y", "string"]
+            for t in text_annotate:
+                valid = True
+                for n in need_keys:
+                    if not n in t.keys():
+                        print("warning: missing key '{}' for text annotation".format(n))
+                        valid = False
+                if not valid: continue
+
+                try: align = t["horizontalalignment"]
+                except: align = "center"
+
+                try: fontsize = t["fontsize"]
+                except: fontsize = "medium"
+
+                try: color = t["color"]
+                except: color="w"
+
+                plt.text(t["x"], t["y"], t["string"], 
+                         fontsize=fontsize, color=color,
+                         horizontalalignment=align,
+                         transform=f.transFigure)
+
+        plt.savefig(fname, facecolor='black', pad_inches=0)
+
+    def _show_mpl(self, im, sigma_clip=None, dpi=100):
+        s = im.shape
+        self._render_figure = plt.figure(1, figsize=(s[1]/dpi, s[0]/dpi))
+        ax = plt.gca()
+        ax.set_position([0, 0, 1, 1])
+
+        if not sigma_clip is None:
+            print("here: sigma_clip = {}".format(sigma_clip))
+            nz = im[im > 0.0]
+            nim = im / (nz.mean() + sigma_clip * np.std(nz))
+            nim[nim > 1.0] = 1.0
+            nim[nim < 0.0] = 0.0
+            del nz
+        else:
+            nim = im
+        axim = plt.imshow(nim[:,:,:3]/nim[:,:,:3].max(), interpolation="nearest")
+
+        return axim
+
+    def _annotate(self, ax, tf, label="", label_fmt=None):
+        ax.get_xaxis().set_visible(False)
+        ax.get_xaxis().set_ticks([])
+        ax.get_yaxis().set_visible(False)
+        ax.get_yaxis().set_ticks([])
+        cb = plt.colorbar(ax.images[0], pad=0.0, fraction=0.05,
+                          drawedges=True, shrink=0.75)
+        #if self.log_fields[0]:
+        #    label = r'$\rm{log}\ $' + label
+        tf.vert_cbar(ax=cb.ax, label=label, label_fmt=label_fmt)
+
     def _validate(self):
         r"""Validate the current state of the scene."""
 
@@ -366,7 +489,7 @@
         r"""
 
         Modifies this scene by drawing the edges of the AMR grids.
-        This adds a new BoxSource to the scene for each AMR grid 
+        This adds a new BoxSource to the scene for each AMR grid
         and returns the resulting Scene object.
 
         Parameters
@@ -451,11 +574,11 @@
 
 
     def show(self, sigma_clip=None):
-        r"""This will send the most recently rendered image to the IPython 
+        r"""This will send the most recently rendered image to the IPython
         notebook.
 
         If yt is being run from within an IPython session, and it is able to
-        determine this, this function will send the current image of this Scene 
+        determine this, this function will send the current image of this Scene
         to the notebook for display. If there is no current image, it will
         run the render() method on this Scene before sending the result to the
         notebook.


https://bitbucket.org/yt_analysis/yt/commits/10478bcd90a4/
Changeset:   10478bcd90a4
Branch:      yt
User:        mzingale
Date:        2016-03-05 14:35:59+00:00
Summary:     fix doc string
Affected #:  1 file

diff -r 70c0c76b9b4e021f7abfe1adce88d3dd47ce2729 -r 10478bcd90a4eb7b265b084b479afa4db1620d41 yt/visualization/volume_rendering/scene.py
--- a/yt/visualization/volume_rendering/scene.py
+++ b/yt/visualization/volume_rendering/scene.py
@@ -244,7 +244,8 @@
     def save_annotated(self, fname=None, label_fmt=None,
                        text_annotate=None, dpi=100, sigma_clip=None):
         r"""Saves the most recently rendered image of the Scene to disk,
-        including an image of the transfer function.
+        including an image of the transfer function and and user-defined
+        text.
 
         Once you have created a scene and rendered that scene to an image
         array, this saves that image array to disk with an optional filename.
@@ -264,6 +265,21 @@
             Default: None
 
             floor(vals > std_dev*sigma_clip + mean)
+        dpi: integer, optional
+            By default, the resulting image will be the same size as the camera
+            parameters.  If you supply a dpi, then the image will be scaled
+            accordingly (from the default 100 dpi)
+        label_fmt : str, optional
+           A format specifier (e.g., label_fmt="%.2g") to use in formatting 
+           the data values that label the transfer function colorbar. 
+        text_annotate : list of dictionaries
+           Any text that you wish to display on the image.  This should be a 
+           list of dictionaries, with the dictionary keys "x", "y", giving
+           the normalized figure coordinates to display the text, "string"
+           giving the text to display, and optional keys "horizontalalignment"
+           specifying the centering ("left", "right", or "center"), and
+           "fontsize" giving the size of the text.  Each list item is a 
+           separate string to write.
 
         Returns
         -------


https://bitbucket.org/yt_analysis/yt/commits/f10217b05eb0/
Changeset:   f10217b05eb0
Branch:      yt
User:        mzingale
Date:        2016-03-05 15:40:19+00:00
Summary:     address fido flake complaints
Affected #:  1 file

diff -r 10478bcd90a4eb7b265b084b479afa4db1620d41 -r f10217b05eb05ccc7fe95c7e82d65674bd587446 yt/visualization/volume_rendering/scene.py
--- a/yt/visualization/volume_rendering/scene.py
+++ b/yt/visualization/volume_rendering/scene.py
@@ -321,13 +321,13 @@
         plt.tight_layout()
 
         # any text?
-        if not text_annotate is None:
+        if text_annotate is not None:
             f = plt.gcf()
             need_keys = ["x", "y", "string"]
             for t in text_annotate:
                 valid = True
                 for n in need_keys:
-                    if not n in t.keys():
+                    if n not in t.keys():
                         print("warning: missing key '{}' for text annotation".format(n))
                         valid = False
                 if not valid: continue
@@ -354,7 +354,7 @@
         ax = plt.gca()
         ax.set_position([0, 0, 1, 1])
 
-        if not sigma_clip is None:
+        if sigma_clip is not None:
             print("here: sigma_clip = {}".format(sigma_clip))
             nz = im[im > 0.0]
             nim = im / (nz.mean() + sigma_clip * np.std(nz))


https://bitbucket.org/yt_analysis/yt/commits/83fa10d780bc/
Changeset:   83fa10d780bc
Branch:      yt
User:        mzingale
Date:        2016-03-05 19:36:55+00:00
Summary:     much cleaner implementation of the options to text() -- now it is a dict
that is passed as kwargs to the plt.text() function.
Affected #:  1 file

diff -r f10217b05eb05ccc7fe95c7e82d65674bd587446 -r 83fa10d780bc02e524691e30bdcabc480f422097 yt/visualization/volume_rendering/scene.py
--- a/yt/visualization/volume_rendering/scene.py
+++ b/yt/visualization/volume_rendering/scene.py
@@ -272,19 +272,32 @@
         label_fmt : str, optional
            A format specifier (e.g., label_fmt="%.2g") to use in formatting 
            the data values that label the transfer function colorbar. 
-        text_annotate : list of dictionaries
-           Any text that you wish to display on the image.  This should be a 
-           list of dictionaries, with the dictionary keys "x", "y", giving
-           the normalized figure coordinates to display the text, "string"
-           giving the text to display, and optional keys "horizontalalignment"
-           specifying the centering ("left", "right", or "center"), and
-           "fontsize" giving the size of the text.  Each list item is a 
-           separate string to write.
+        text_annotate : list of iterables
+           Any text that you wish to display on the image.  This should be an 
+           list of a tuple of coordinates (in normalized figure coordinates),
+           the text to display, and, optionally, a dictionary of keyword/value
+           pairs to pass through to the matplotlib text() function. 
+
+           Each item in the main list is a separate string to write.
+
 
         Returns
         -------
             Nothing
 
+
+        Examples
+        --------
+
+        >>> sc.save_annotated("fig.png", 
+        >>>                   text_annotate=[[(0.05, 0.05), 
+        >>>                                   "t = {}".format(ds.current_time.d),
+        >>>                                   dict(horizontalalignment="left")],
+        >>>                                  [(0.5,0.95), 
+        >>>                                   "simulation title",
+        >>>                                   dict(color="y", fontsize="24",
+        >>>                                        horizontalalignment="center")]])
+
         """
         sources = list(itervalues(self.sources))
         rensources = [s for s in sources if isinstance(s, RenderSource)]
@@ -323,28 +336,20 @@
         # any text?
         if text_annotate is not None:
             f = plt.gcf()
-            need_keys = ["x", "y", "string"]
             for t in text_annotate:
-                valid = True
-                for n in need_keys:
-                    if n not in t.keys():
-                        print("warning: missing key '{}' for text annotation".format(n))
-                        valid = False
-                if not valid: continue
+                xy = t[0]
+                string = t[1]
+                if len(t) == 3:
+                    opt = t[2]
+                else:
+                    opt = dict()
 
-                try: align = t["horizontalalignment"]
-                except: align = "center"
+                # sane default
+                if "color" not in opt:
+                    opt["color"] = "w"
 
-                try: fontsize = t["fontsize"]
-                except: fontsize = "medium"
-
-                try: color = t["color"]
-                except: color="w"
-
-                plt.text(t["x"], t["y"], t["string"], 
-                         fontsize=fontsize, color=color,
-                         horizontalalignment=align,
-                         transform=f.transFigure)
+                plt.text(xy[0], xy[1], string,
+                         transform=f.transFigure, **opt)
 
         plt.savefig(fname, facecolor='black', pad_inches=0)
 


https://bitbucket.org/yt_analysis/yt/commits/328a76a8092d/
Changeset:   328a76a8092d
Branch:      yt
User:        mzingale
Date:        2016-03-09 17:55:39+00:00
Summary:     bugfix: if we have a tick value of 0, we cannot take the log of
it to get it in scientific notation
Affected #:  1 file

diff -r 83fa10d780bc02e524691e30bdcabc480f422097 -r 328a76a8092df26ea616c2ecf649b13afa14600b yt/visualization/volume_rendering/transfer_functions.py
--- a/yt/visualization/volume_rendering/transfer_functions.py
+++ b/yt/visualization/volume_rendering/transfer_functions.py
@@ -604,8 +604,11 @@
             val = x * (self.alpha.x[-1] - self.alpha.x[0]) / (self.alpha.x.size-1) + self.alpha.x[0]
             if label_fmt is None:
                 if abs(val) < 1.e-3 or abs(val) > 1.e4:
-                    e = np.floor(np.log10(abs(val)))
-                    return r"${:.2f}\times 10^{:d}$".format(val/10.0**e, int(e))
+                    if not val == 0.0:
+                        e = np.floor(np.log10(abs(val)))
+                        return r"${:.2f}\times 10^{:d}$".format(val/10.0**e, int(e))
+                    else:
+                        return r"$0$"
                 else:
                     return "%.1g" % (val)
             else:


https://bitbucket.org/yt_analysis/yt/commits/05815443e955/
Changeset:   05815443e955
Branch:      yt
User:        mzingale
Date:        2016-03-09 18:07:42+00:00
Summary:     fix the part about labeling 'log' of the field
Affected #:  1 file

diff -r 328a76a8092df26ea616c2ecf649b13afa14600b -r 05815443e955781705cf611e912f0fa7f5b0b1cd yt/visualization/volume_rendering/scene.py
--- a/yt/visualization/volume_rendering/scene.py
+++ b/yt/visualization/volume_rendering/scene.py
@@ -327,6 +327,8 @@
         rs = rensources[0]
         tf = rs.transfer_function
         label = rs.data_source.ds._get_field_info(rs.field).get_label()
+        if rs.data_source.ds._get_field_info(rs.field).take_log:
+            label = r'$\rm{log}\ $' + label
 
         ax = self._show_mpl(self.last_render.swapaxes(0,1),
                             sigma_clip=sigma_clip, dpi=dpi)
@@ -379,8 +381,6 @@
         ax.get_yaxis().set_ticks([])
         cb = plt.colorbar(ax.images[0], pad=0.0, fraction=0.05,
                           drawedges=True, shrink=0.75)
-        #if self.log_fields[0]:
-        #    label = r'$\rm{log}\ $' + label
         tf.vert_cbar(ax=cb.ax, label=label, label_fmt=label_fmt)
 
     def _validate(self):


https://bitbucket.org/yt_analysis/yt/commits/ed87473301fe/
Changeset:   ed87473301fe
Branch:      yt
User:        mzingale
Date:        2016-03-17 20:08:59+00:00
Summary:     merge
Affected #:  88 files

diff -r 05815443e955781705cf611e912f0fa7f5b0b1cd -r ed87473301feb44201bc66a801138f43749109ac CONTRIBUTING.rst
--- a/CONTRIBUTING.rst
+++ b/CONTRIBUTING.rst
@@ -795,8 +795,8 @@
    rather than explicitly. Ex: ``super(SpecialGridSubclass, self).__init__()``
    rather than ``SpecialGrid.__init__()``.
  * Docstrings should describe input, output, behavior, and any state changes
-   that occur on an object.  See the file ``doc/docstring_example.txt`` for a
-   fiducial example of a docstring.
+   that occur on an object.  See :ref:`docstrings` below for a fiducial example
+   of a docstring.
  * Use only one top-level import per line. Unless there is a good reason not to,
    imports should happen at the top of the file, after the copyright blurb.
  * Never compare with ``True`` or ``False`` using ``==`` or ``!=``, always use
@@ -843,7 +843,7 @@
    be avoided, they must be explained, even if they are only to be passed on to
    a nested function.
 
-.. _docstrings
+.. _docstrings:
 
 Docstrings
 ----------

diff -r 05815443e955781705cf611e912f0fa7f5b0b1cd -r ed87473301feb44201bc66a801138f43749109ac doc/source/analyzing/analysis_modules/absorption_spectrum.rst
--- a/doc/source/analyzing/analysis_modules/absorption_spectrum.rst
+++ b/doc/source/analyzing/analysis_modules/absorption_spectrum.rst
@@ -204,7 +204,7 @@
 --------------------------
 
 After loading a spectrum and specifying the properties of the species
-used to generate the spectrum, an apporpriate fit can be generated. 
+used to generate the spectrum, an appropriate fit can be generated. 
 
 .. code-block:: python
 
@@ -232,7 +232,7 @@
 as all lines with the same group number as ``group#[i]``.
 
 The ``fitted_flux`` is an ndarray of the same size as ``flux`` and 
-``wavelength`` that contains the cummulative absorption spectrum generated 
+``wavelength`` that contains the cumulative absorption spectrum generated
 by the lines contained in ``fitted_lines``.
 
 Saving a Spectrum Fit

diff -r 05815443e955781705cf611e912f0fa7f5b0b1cd -r ed87473301feb44201bc66a801138f43749109ac doc/source/analyzing/analysis_modules/clump_finding.rst
--- a/doc/source/analyzing/analysis_modules/clump_finding.rst
+++ b/doc/source/analyzing/analysis_modules/clump_finding.rst
@@ -7,7 +7,7 @@
 disconnected structures within a dataset.  This works by first creating a 
 single contour over the full range of the contouring field, then continually 
 increasing the lower value of the contour until it reaches the maximum value 
-of the field.  As disconnected structures are identified as separate contoures, 
+of the field.  As disconnected structures are identified as separate contours, 
 the routine continues recursively through each object, creating a hierarchy of 
 clumps.  Individual clumps can be kept or removed from the hierarchy based on 
 the result of user-specified functions, such as checking for gravitational 

diff -r 05815443e955781705cf611e912f0fa7f5b0b1cd -r ed87473301feb44201bc66a801138f43749109ac doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst
--- a/doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst
+++ b/doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst
@@ -93,7 +93,7 @@
 ellipsoid's semi-principle axes. "e0" is the largest semi-principle
 axis vector direction that would have magnitude A but normalized.  
 The "tilt" is an angle measured in radians.  It can be best described
-as after the rotation about the z-axis to allign e0 to x in the x-y
+as after the rotation about the z-axis to align e0 to x in the x-y
 plane, and then rotating about the y-axis to align e0 completely to
 the x-axis, the angle remaining to rotate about the x-axis to align
 both e1 to the y-axis and e2 to the z-axis.

diff -r 05815443e955781705cf611e912f0fa7f5b0b1cd -r ed87473301feb44201bc66a801138f43749109ac doc/source/analyzing/analysis_modules/halo_catalogs.rst
--- a/doc/source/analyzing/analysis_modules/halo_catalogs.rst
+++ b/doc/source/analyzing/analysis_modules/halo_catalogs.rst
@@ -236,7 +236,7 @@
 All callbacks, quantities, and filters are stored in an actions list, 
 meaning that they are executed in the same order in which they were added. 
 This enables the use of simple, reusable, single action callbacks that 
-depend on each other. This also prevents unecessary computation by allowing 
+depend on each other. This also prevents unnecessary computation by allowing 
 the user to add filters at multiple stages to skip remaining analysis if it 
 is not warranted.
 

diff -r 05815443e955781705cf611e912f0fa7f5b0b1cd -r ed87473301feb44201bc66a801138f43749109ac doc/source/analyzing/analysis_modules/halo_mass_function.rst
--- a/doc/source/analyzing/analysis_modules/halo_mass_function.rst
+++ b/doc/source/analyzing/analysis_modules/halo_mass_function.rst
@@ -13,7 +13,7 @@
 
 A halo mass function can be created for the halos identified in a cosmological 
 simulation, as well as analytic fits using any arbitrary set of cosmological
-paramters. In order to create a mass function for simulated halos, they must
+parameters. In order to create a mass function for simulated halos, they must
 first be identified (using HOP, FOF, or Rockstar, see 
 :ref:`halo_catalog`) and loaded as a halo dataset object. The distribution of
 halo masses will then be found, and can be compared to the analytic prediction
@@ -78,7 +78,7 @@
   my_halos = load("rockstar_halos/halos_0.0.bin")
   hmf = HaloMassFcn(halos_ds=my_halos)
 
-A simulation dataset can be passed along with additonal cosmological parameters 
+A simulation dataset can be passed along with additional cosmological parameters 
 to create an analytic mass function.
 
 .. code-block:: python
@@ -106,7 +106,7 @@
 -----------------
 
 * **simulation_ds** (*Simulation dataset object*)
-  The loaded simulation dataset, used to set cosmological paramters.
+  The loaded simulation dataset, used to set cosmological parameters.
   Default : None.
 
 * **halos_ds** (*Halo dataset object*)
@@ -130,7 +130,7 @@
 
 * **omega_baryon0**  (*float*)
   The fraction of the universe made up of baryonic matter. This is not 
-  always stored in the datset and should be checked by hand.
+  always stored in the dataset and should be checked by hand.
   Default : 0.0456.
 
 * **hubble0** (*float*)
@@ -140,14 +140,14 @@
 * **sigma8** (*float*)
   The amplitude of the linear power spectrum at z=0 as specified by 
   the rms amplitude of mass-fluctuations in a top-hat sphere of radius 
-  8 Mpc/h. This is not always stored in the datset and should be 
+  8 Mpc/h. This is not always stored in the dataset and should be 
   checked by hand.
   Default : 0.86.
 
 * **primoridal_index** (*float*)
   This is the index of the mass power spectrum before modification by 
   the transfer function. A value of 1 corresponds to the scale-free 
-  primordial spectrum. This is not always stored in the datset and 
+  primordial spectrum. This is not always stored in the dataset and 
   should be checked by hand.
   Default : 1.0.
 

diff -r 05815443e955781705cf611e912f0fa7f5b0b1cd -r ed87473301feb44201bc66a801138f43749109ac doc/source/analyzing/analysis_modules/halo_transition.rst
--- a/doc/source/analyzing/analysis_modules/halo_transition.rst
+++ b/doc/source/analyzing/analysis_modules/halo_transition.rst
@@ -40,7 +40,7 @@
 the full halo catalog documentation for further information about
 how to add these quantities and what quantities are available.
 
-You no longer have to iteratre over halos in the ``halo_list``.
+You no longer have to iterate over halos in the ``halo_list``.
 Now a halo dataset can be treated as a regular dataset and 
 all quantities are available by accessing ``all_data``.
 Specifically, all quantities can be accessed as shown:

diff -r 05815443e955781705cf611e912f0fa7f5b0b1cd -r ed87473301feb44201bc66a801138f43749109ac doc/source/analyzing/analysis_modules/light_cone_generator.rst
--- a/doc/source/analyzing/analysis_modules/light_cone_generator.rst
+++ b/doc/source/analyzing/analysis_modules/light_cone_generator.rst
@@ -50,7 +50,7 @@
   ``use_minimum_datasets`` set to False, this parameter specifies the 
   fraction of the total box size to be traversed before rerandomizing the 
   projection axis and center.  This was invented to allow light cones with 
-  thin slices to sample coherent large cale structure, but in practice does 
+  thin slices to sample coherent large scale structure, but in practice does 
   not work so well.  Try setting this parameter to 1 and see what happens.  
   Default: 0.0.
 
@@ -74,7 +74,7 @@
 
 A light cone solution consists of a list of datasets spanning a redshift 
 interval with a random orientation for each dataset.  A new solution 
-is calcuated with the 
+is calculated with the 
 :func:`~yt.analysis_modules.cosmological_observation.light_cone.light_cone.LightCone.calculate_light_cone_solution`
 function:
 

diff -r 05815443e955781705cf611e912f0fa7f5b0b1cd -r ed87473301feb44201bc66a801138f43749109ac doc/source/analyzing/analysis_modules/photon_simulator.rst
--- a/doc/source/analyzing/analysis_modules/photon_simulator.rst
+++ b/doc/source/analyzing/analysis_modules/photon_simulator.rst
@@ -347,7 +347,7 @@
   be used to control what vector corresponds to the "up" direction in 
   the resulting event list. 
 * ``psf_sigma`` may be specified to provide a crude representation of 
-  a PSF, and corresponds to the standard deviation (in degress) of a 
+  a PSF, and corresponds to the standard deviation (in degrees) of a 
   Gaussian PSF model. 
 
 Let's just take a quick look at the raw events object:

diff -r 05815443e955781705cf611e912f0fa7f5b0b1cd -r ed87473301feb44201bc66a801138f43749109ac doc/source/analyzing/objects.rst
--- a/doc/source/analyzing/objects.rst
+++ b/doc/source/analyzing/objects.rst
@@ -246,6 +246,8 @@
     | A plane normal to a specified vector and intersecting a particular 
       coordinate.
 
+.. _region-reference:
+
 3D Objects
 """"""""""
 
@@ -256,8 +258,6 @@
       creating a Region covering the entire dataset domain.  It is effectively 
       ``ds.region(ds.domain_center, ds.domain_left_edge, ds.domain_right_edge)``.
 
-.. _region-reference:
-
 **Box Region** 
     | Class :class:`~yt.data_objects.selection_data_containers.YTRegion`
     | Usage: ``region(center, left_edge, right_edge, fields=None, ds=None, field_parameters=None, data_source=None)``
@@ -313,7 +313,7 @@
     | A ``cut_region`` is a filter which can be applied to any other data 
       object.  The filter is defined by the conditionals present, which 
       apply cuts to the data in the object.  A ``cut_region`` will work
-      for either particle fields or mesh fields, but not on both simulaneously.
+      for either particle fields or mesh fields, but not on both simultaneously.
       For more detailed information and examples, see :ref:`cut-regions`.
 
 **Collection of Data Objects** 

diff -r 05815443e955781705cf611e912f0fa7f5b0b1cd -r ed87473301feb44201bc66a801138f43749109ac doc/source/analyzing/parallel_computation.rst
--- a/doc/source/analyzing/parallel_computation.rst
+++ b/doc/source/analyzing/parallel_computation.rst
@@ -49,7 +49,7 @@
 
     $ conda install mpi4py
 
-This will install `MPICH2 <https://www.mpich.org/>`_ and will interefere with
+This will install `MPICH2 <https://www.mpich.org/>`_ and will interfere with
 other MPI libraries that are already installed. Therefore, it is preferable to
 use the ``pip`` installation method.
 
@@ -103,7 +103,7 @@
    p.save()
 
 If this script is run in parallel, two of the most expensive operations -
-finding of the maximum density and the projection will be calulcated in
+finding of the maximum density and the projection will be calculated in
 parallel.  If we save the script as ``my_script.py``, we would run it on 16 MPI
 processes using the following Bash command:
 
@@ -121,7 +121,7 @@
 
 You can set the ``communicator`` keyword in the 
 :func:`~yt.utilities.parallel_tools.parallel_analysis_interface.enable_parallelism` 
-call to a specific MPI communicator to specify a subset of availble MPI 
+call to a specific MPI communicator to specify a subset of available MPI 
 processes.  If none is specified, it defaults to ``COMM_WORLD``.
 
 Creating Parallel and Serial Sections in a Script
@@ -251,7 +251,7 @@
 You may define an empty dictionary and include it as the keyword argument 
 ``storage`` to ``piter()``.  Then, during the processing step, you can access
 this dictionary as the ``sto`` object.  After the 
-loop is finished, the dictionary is re-aggragated from all of the processors, 
+loop is finished, the dictionary is re-aggregated from all of the processors, 
 and you can access the contents:
 
 .. code-block:: python

diff -r 05815443e955781705cf611e912f0fa7f5b0b1cd -r ed87473301feb44201bc66a801138f43749109ac doc/source/analyzing/time_series_analysis.rst
--- a/doc/source/analyzing/time_series_analysis.rst
+++ b/doc/source/analyzing/time_series_analysis.rst
@@ -79,7 +79,7 @@
 Analyzing an Entire Simulation
 ------------------------------
 
-.. note:: Implemented for: Enzo, Gadget, OWLS.
+.. note:: Implemented for the Enzo, Gadget, OWLS, and Exodus II frontends.
 
 The parameter file used to run a simulation contains all the information 
 necessary to know what datasets should be available.  The ``simulation`` 

diff -r 05815443e955781705cf611e912f0fa7f5b0b1cd -r ed87473301feb44201bc66a801138f43749109ac doc/source/analyzing/units/comoving_units_and_code_units.rst
--- a/doc/source/analyzing/units/comoving_units_and_code_units.rst
+++ b/doc/source/analyzing/units/comoving_units_and_code_units.rst
@@ -12,7 +12,7 @@
 
 yt has additional capabilities to handle the comoving coordinate system used
 internally in cosmological simulations. Simulations that use comoving
-coordinates, all length units have three other counterparts correspoding to
+coordinates, all length units have three other counterparts corresponding to
 comoving units, scaled comoving units, and scaled proper units. In all cases
 'scaled' units refer to scaling by the reduced Hubble parameter - i.e. the length
 unit is what it would be in a universe where Hubble's parameter is 100 km/s/Mpc.

diff -r 05815443e955781705cf611e912f0fa7f5b0b1cd -r ed87473301feb44201bc66a801138f43749109ac doc/source/conf.py
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -60,7 +60,7 @@
 
 # General information about the project.
 project = u'The yt Project'
-copyright = u'2013, the yt Project'
+copyright = u'2013-2016, the yt Project'
 
 # The version info for the project you're documenting, acts as replacement for
 # |version| and |release|, also used in various other places throughout the

diff -r 05815443e955781705cf611e912f0fa7f5b0b1cd -r ed87473301feb44201bc66a801138f43749109ac doc/source/cookbook/camera_movement.py
--- a/doc/source/cookbook/camera_movement.py
+++ b/doc/source/cookbook/camera_movement.py
@@ -1,31 +1,30 @@
 import yt
 import numpy as np
 
-# Follow the simple_volume_rendering cookbook for the first part of this.
-ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")  # load data
+ds = yt.load("MOOSE_sample_data/out.e-s010")
 sc = yt.create_scene(ds)
 cam = sc.camera
-cam.resolution = (512, 512)
-cam.set_width(ds.domain_width/20.0)
 
-# Find the maximum density location, store it in max_c
-v, max_c = ds.find_max('density')
+# save an image at the starting position
+frame = 0
+sc.save('camera_movement_%04i.png' % frame)
+frame += 1
 
-frame = 0
-# Move to the maximum density location over 5 frames
-for _ in cam.iter_move(max_c, 5):
+# Zoom out by a factor of 2 over 5 frames
+for _ in cam.iter_zoom(0.5, 5):
     sc.render()
-    sc.save('camera_movement_%04i.png' % frame, sigma_clip=8.0)
+    sc.save('camera_movement_%04i.png' % frame)
     frame += 1
 
-# Zoom in by a factor of 10 over 5 frames
-for _ in cam.iter_zoom(10.0, 5):
+# Move to the position [-10.0, 10.0, -10.0] over 5 frames
+pos = ds.arr([-10.0, 10.0, -10.0], 'code_length')
+for _ in cam.iter_move(pos, 5):
     sc.render()
-    sc.save('camera_movement_%04i.png' % frame, sigma_clip=8.0)
+    sc.save('camera_movement_%04i.png' % frame)
     frame += 1
 
-# Do a rotation over 5 frames
+# Rotate by 180 degrees over 5 frames
 for _ in cam.iter_rotate(np.pi, 5):
     sc.render()
-    sc.save('camera_movement_%04i.png' % frame, sigma_clip=8.0)
+    sc.save('camera_movement_%04i.png' % frame)
     frame += 1

diff -r 05815443e955781705cf611e912f0fa7f5b0b1cd -r ed87473301feb44201bc66a801138f43749109ac doc/source/cookbook/complex_plots.rst
--- a/doc/source/cookbook/complex_plots.rst
+++ b/doc/source/cookbook/complex_plots.rst
@@ -195,7 +195,11 @@
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
 In this recipe, we move a camera through a domain and take multiple volume
-rendering snapshots.
+rendering snapshots. This recipe uses an unstructured mesh dataset (see
+:ref:`unstructured_mesh_rendering`), which makes it easier to visualize what 
+the Camera is doing, but you can manipulate the Camera for other dataset types 
+in exactly the same manner.
+
 See :ref:`camera_movement` for more information.
 
 .. yt_cookbook:: camera_movement.py

diff -r 05815443e955781705cf611e912f0fa7f5b0b1cd -r ed87473301feb44201bc66a801138f43749109ac doc/source/cookbook/cosmological_analysis.rst
--- a/doc/source/cookbook/cosmological_analysis.rst
+++ b/doc/source/cookbook/cosmological_analysis.rst
@@ -65,7 +65,7 @@
 
 .. yt_cookbook:: light_ray.py 
 
-This script demontrates how to make a light ray from a single dataset.
+This script demonstrates how to make a light ray from a single dataset.
 
 .. _cookbook-single-dataset-light-ray:
 

diff -r 05815443e955781705cf611e912f0fa7f5b0b1cd -r ed87473301feb44201bc66a801138f43749109ac doc/source/cookbook/notebook_tutorial.rst
--- a/doc/source/cookbook/notebook_tutorial.rst
+++ b/doc/source/cookbook/notebook_tutorial.rst
@@ -17,7 +17,7 @@
    $ ipython notebook
 
 Depending on your default web browser and system setup this will open a web
-browser and direct you to the notebook dahboard.  If it does not,  you might
+browser and direct you to the notebook dashboard.  If it does not,  you might
 need to connect to the notebook manually.  See the `IPython documentation
 <http://ipython.org/ipython-doc/stable/notebook/notebook.html#starting-the-notebook-server>`_
 for more details.

diff -r 05815443e955781705cf611e912f0fa7f5b0b1cd -r ed87473301feb44201bc66a801138f43749109ac doc/source/cookbook/various_lens.py
--- a/doc/source/cookbook/various_lens.py
+++ b/doc/source/cookbook/various_lens.py
@@ -1,5 +1,5 @@
 import yt
-from yt.visualization.volume_rendering.api import Scene, Camera, VolumeSource
+from yt.visualization.volume_rendering.api import Scene, VolumeSource
 import numpy as np
 
 field = ("gas", "density")
@@ -19,7 +19,7 @@
 tf.grey_opacity = True
 
 # Plane-parallel lens
-cam = Camera(ds, lens_type='plane-parallel')
+cam = sc.add_camera(ds, lens_type='plane-parallel')
 # Set the resolution of tbe final projection.
 cam.resolution = [250, 250]
 # Set the location of the camera to be (x=0.2, y=0.5, z=0.5)
@@ -32,13 +32,12 @@
 # Set the width of the camera, where width[0] and width[1] specify the length and
 # height of final projection, while width[2] in plane-parallel lens is not used.
 cam.set_width(ds.domain_width * 0.5)
-sc.camera = cam
 sc.add_source(vol)
 sc.render()
 sc.save('lens_plane-parallel.png', sigma_clip=6.0)
 
 # Perspective lens
-cam = Camera(ds, lens_type='perspective')
+cam = sc.add_camera(ds, lens_type='perspective')
 cam.resolution = [250, 250]
 # Standing at (x=0.2, y=0.5, z=0.5), we look at the area of x>0.2 (with some open angle
 # specified by camera width) along the positive x direction.
@@ -49,13 +48,12 @@
 # height of the final projection, while width[2] specifies the distance between the
 # camera and the final image.
 cam.set_width(ds.domain_width * 0.5)
-sc.camera = cam
 sc.add_source(vol)
 sc.render()
 sc.save('lens_perspective.png', sigma_clip=6.0)
 
 # Stereo-perspective lens
-cam = Camera(ds, lens_type='stereo-perspective')
+cam = sc.add_camera(ds, lens_type='stereo-perspective')
 # Set the size ratio of the final projection to be 2:1, since stereo-perspective lens
 # will generate the final image with both left-eye and right-eye ones jointed together.
 cam.resolution = [500, 250]
@@ -65,14 +63,13 @@
 cam.set_width(ds.domain_width*0.5)
 # Set the distance between left-eye and right-eye.
 cam.lens.disparity = ds.domain_width[0] * 1.e-3
-sc.camera = cam
 sc.add_source(vol)
 sc.render()
 sc.save('lens_stereo-perspective.png', sigma_clip=6.0)
 
 # Fisheye lens
 dd = ds.sphere(ds.domain_center, ds.domain_width[0] / 10)
-cam = Camera(dd, lens_type='fisheye')
+cam = sc.add_camera(dd, lens_type='fisheye')
 cam.resolution = [250, 250]
 v, c = ds.find_max(field)
 cam.set_position(c - 0.0005 * ds.domain_width)
@@ -80,13 +77,12 @@
                        north_vector=north_vector)
 cam.set_width(ds.domain_width)
 cam.lens.fov = 360.0
-sc.camera = cam
 sc.add_source(vol)
 sc.render()
 sc.save('lens_fisheye.png', sigma_clip=6.0)
 
 # Spherical lens
-cam = Camera(ds, lens_type='spherical')
+cam = sc.add_camera(ds, lens_type='spherical')
 # Set the size ratio of the final projection to be 2:1, since spherical lens
 # will generate the final image with length of 2*pi and height of pi.
 cam.resolution = [500, 250]
@@ -97,13 +93,12 @@
                        north_vector=north_vector)
 # In (stereo)spherical camera, camera width is not used since the entire volume
 # will be rendered
-sc.camera = cam
 sc.add_source(vol)
 sc.render()
 sc.save('lens_spherical.png', sigma_clip=6.0)
 
 # Stereo-spherical lens
-cam = Camera(ds, lens_type='stereo-spherical')
+cam = sc.add_camera(ds, lens_type='stereo-spherical')
 # Set the size ratio of the final projection to be 4:1, since spherical-perspective lens
 # will generate the final image with both left-eye and right-eye ones jointed together.
 cam.resolution = [1000, 250]
@@ -114,7 +109,6 @@
 # will be rendered
 # Set the distance between left-eye and right-eye.
 cam.lens.disparity = ds.domain_width[0] * 1.e-3
-sc.camera = cam
 sc.add_source(vol)
 sc.render()
 sc.save('lens_stereo-spherical.png', sigma_clip=6.0)

diff -r 05815443e955781705cf611e912f0fa7f5b0b1cd -r ed87473301feb44201bc66a801138f43749109ac doc/source/developing/building_the_docs.rst
--- a/doc/source/developing/building_the_docs.rst
+++ b/doc/source/developing/building_the_docs.rst
@@ -158,7 +158,7 @@
 HTML. to simplify versioning of the notebook JSON format, we store notebooks in
 an unevaluated state.
 
-To build the full documentation, you will need yt, jupyter, and all depedencies 
+To build the full documentation, you will need yt, jupyter, and all dependencies 
 needed for yt's analysis modules installed. The following dependencies were 
 used to generate the yt documentation during the release of yt 3.2 in 2015.
 

diff -r 05815443e955781705cf611e912f0fa7f5b0b1cd -r ed87473301feb44201bc66a801138f43749109ac doc/source/developing/creating_derived_fields.rst
--- a/doc/source/developing/creating_derived_fields.rst
+++ b/doc/source/developing/creating_derived_fields.rst
@@ -65,7 +65,7 @@
 data in a dimensionally equivalent unit (e.g. a ``"dyne"`` versus a ``"N"``), the
 field data will be converted to the units specified in ``add_field`` before
 being returned in a data object selection. If the field function returns data
-with dimensions that are incompatibible with units specified in ``add_field``,
+with dimensions that are incompatible with units specified in ``add_field``,
 you will see an error. To clear this error, you must ensure that your field
 function returns data in the correct units. Often, this means applying units to
 a dimensionless float or array.
@@ -75,7 +75,7 @@
 to get a predefined version of the constant with the correct units. If you know
 the units your data is supposed to have ahead of time, you can import unit
 symbols like ``g`` or ``cm`` from the ``yt.units`` namespace and multiply the
-return value of your field function by the appropriate compbination of unit
+return value of your field function by the appropriate combination of unit
 symbols for your field's units. You can also convert floats or NumPy arrays into
 :class:`~yt.units.yt_array.YTArray` or :class:`~yt.units.yt_array.YTQuantity`
 instances by making use of the

diff -r 05815443e955781705cf611e912f0fa7f5b0b1cd -r ed87473301feb44201bc66a801138f43749109ac doc/source/developing/testing.rst
--- a/doc/source/developing/testing.rst
+++ b/doc/source/developing/testing.rst
@@ -471,7 +471,7 @@
 Another good example of an image comparison test is the
 ``PlotWindowAttributeTest`` defined in the answer testing framework and used in
 ``yt/visualization/tests/test_plotwindow.py``. This test shows how a new answer
-test subclass can be used to programitically test a variety of different methods
+test subclass can be used to programmatically test a variety of different methods
 of a complicated class using the same test class. This sort of image comparison
 test is more useful if you are finding yourself writing a ton of boilerplate
 code to get your image comparison test working.  The ``GenericImageTest`` is

diff -r 05815443e955781705cf611e912f0fa7f5b0b1cd -r ed87473301feb44201bc66a801138f43749109ac doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -369,7 +369,7 @@
 
 This particular dataset has two meshes in it, both of which are made of 8-node hexes.
 yt uses a field name convention to access these different meshes in plots and data
-objects. To see all the fields found in a particlular dataset, you can do:
+objects. To see all the fields found in a particular dataset, you can do:
 
 .. code-block:: python
     
@@ -540,7 +540,7 @@
 
 * ``CDELTx``: The pixel width in along axis ``x``
 * ``CRVALx``: The coordinate value at the reference position along axis ``x``
-* ``CRPIXx``: The the reference pixel along axis ``x``
+* ``CRPIXx``: The reference pixel along axis ``x``
 * ``CTYPEx``: The projection type of axis ``x``
 * ``CUNITx``: The units of the coordinate along axis ``x``
 * ``BTYPE``: The type of the image
@@ -870,7 +870,7 @@
 ``over_refine_factor``.  They are weak proxies for each other.  The first,
 ``n_ref``, governs how many particles in an oct results in that oct being
 refined into eight child octs.  Lower values mean higher resolution; the
-default is 64.  The secon parameter, ``over_refine_factor``, governs how many
+default is 64.  The second parameter, ``over_refine_factor``, governs how many
 cells are in a given oct; the default value of 1 corresponds to 8 cells.
 The number of cells in an oct is defined by the expression
 ``2**(3*over_refine_factor)``.
@@ -1118,8 +1118,10 @@
    bbox = np.array([[-1.5, 1.5], [-1.5, 1.5], [1.5, 1.5]])
    ds = yt.load_uniform_grid(data, arr.shape, 3.08e24, bbox=bbox, nprocs=12)
 
-where in this exampe the particle position fields have been assigned. ``number_of_particles`` must be the same size as the particle
-arrays. If no particle arrays are supplied then ``number_of_particles`` is assumed to be zero. 
+where in this example the particle position fields have been assigned.
+``number_of_particles`` must be the same size as the particle arrays. If no
+particle arrays are supplied then ``number_of_particles`` is assumed to be
+zero. 
 
 .. rubric:: Caveats
 
@@ -1153,7 +1155,7 @@
    coordinates,connectivity = yt.hexahedral_connectivity(xgrid,ygrid,zgrid)
 
 will define the (x,y,z) coordinates of the hexahedral cells and
-information about that cell's neighbors such that the celll corners
+information about that cell's neighbors such that the cell corners
 will be a grid of points constructed as the Cartesion product of
 xgrid, ygrid, and zgrid.
 
@@ -1386,8 +1388,8 @@
 ---------
 
 `PyNE <http://pyne.io/>`_ is an open source nuclear engineering toolkit
-maintained by the PyNE developement team (`pyne-dev at googlegroups.com
-<pyne-dev%40googlegroups.com>`_). PyNE meshes utilize the Mesh-Oriented datABase
+maintained by the PyNE developement team (pyne-dev at googlegroups.com).
+PyNE meshes utilize the Mesh-Oriented datABase
 `(MOAB) <http://trac.mcs.anl.gov/projects/ITAPS/wiki/MOAB/>`_ and can be
 Cartesian or tetrahedral. In addition to field data, pyne meshes store pyne
 Material objects which provide a rich set of capabilities for nuclear

diff -r 05815443e955781705cf611e912f0fa7f5b0b1cd -r ed87473301feb44201bc66a801138f43749109ac doc/source/examining/low_level_inspection.rst
--- a/doc/source/examining/low_level_inspection.rst
+++ b/doc/source/examining/low_level_inspection.rst
@@ -176,7 +176,7 @@
 cells from the parent grid will be duplicated (appropriately) to fill the 
 covering grid.
 
-Let's say we now want to look at that entire data volume and sample it at the 
+Let's say we now want to look at that entire data volume and sample it at
 a higher resolution (i.e. level 2).  As stated above, we'll be oversampling
 under-refined regions, but that's OK.  We must also increase the resolution 
 of our output array by a factor of 2^2 in each direction to hold this new 

diff -r 05815443e955781705cf611e912f0fa7f5b0b1cd -r ed87473301feb44201bc66a801138f43749109ac doc/source/help/index.rst
--- a/doc/source/help/index.rst
+++ b/doc/source/help/index.rst
@@ -141,7 +141,7 @@
   $ grep -r SlicePlot *         (or $ grin SlicePlot)
 
 This will print a number of locations in the yt source tree where ``SlicePlot``
-is mentioned.  You can now followup on this and open up the files that have
+is mentioned.  You can now follow-up on this and open up the files that have
 references to ``SlicePlot`` (particularly the one that defines SlicePlot) and
 inspect their contents for problems or clarification.
 

diff -r 05815443e955781705cf611e912f0fa7f5b0b1cd -r ed87473301feb44201bc66a801138f43749109ac doc/source/index.rst
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -175,6 +175,7 @@
 .. toctree::
    :hidden:
 
+   intro/index
    installing
    yt Quickstart <quickstart/index>
    yt3differences

diff -r 05815443e955781705cf611e912f0fa7f5b0b1cd -r ed87473301feb44201bc66a801138f43749109ac doc/source/installing.rst
--- a/doc/source/installing.rst
+++ b/doc/source/installing.rst
@@ -19,7 +19,7 @@
 * If you do not have root access on your computer, are not comfortable managing
   python packages, or are working on a supercomputer or cluster computer, you
   will probably want to use the bash all-in-one installation script.  This builds 
-  python, numpy, matplotlib, and yt from source to set up an isolated scientific 
+  Python, NumPy, Matplotlib, and yt from source to set up an isolated scientific 
   python environment inside of a single folder in your home directory. See
   :ref:`install-script` for more details.
 
@@ -35,9 +35,9 @@
   up python using a source-based package manager like `Homebrew
   <http://brew.sh>`_ or `MacPorts <http://www.macports.org/>`_ this choice will
   let you install yt using the python installed by the package manager. Similarly
-  for python environments set up via linux package managers so long as you
-  have the the necessary compilers installed (e.g. the ``build-essentials``
-  package on debian and ubuntu).
+  for python environments set up via Linux package managers so long as you
+  have the necessary compilers installed (e.g. the ``build-essentials``
+  package on Debian and Ubuntu).
 
 .. note::
   See `Parallel Computation
@@ -199,13 +199,12 @@
 
 If you do not want to install the full anaconda python distribution, you can
 install a bare-bones Python installation using miniconda.  To install miniconda,
-visit http://repo.continuum.io/miniconda/ and download a recent version of the
-``Miniconda-x.y.z`` script (corresponding to Python 2.7) for your platform and
-system architecture. Next, run the script, e.g.:
+visit http://repo.continuum.io/miniconda/ and download ``Miniconda-latest-...`` 
+script for your platform and system architecture. Next, run the script, e.g.:
 
 .. code-block:: bash
 
-  bash Miniconda-3.3.0-Linux-x86_64.sh
+  bash Miniconda-latest-Linux-x86_64.sh
 
 For both the Anaconda and Miniconda installations, make sure that the Anaconda
 ``bin`` directory is in your path, and then issue:
@@ -214,7 +213,28 @@
 
   conda install yt
 
-which will install yt along with all of its dependencies.
+which will install stable branch of yt along with all of its dependencies.
+
+If you would like to install latest development version of yt, you can download
+it from our custom anaconda channel:
+
+.. code-block:: bash
+
+  conda install -c http://use.yt/with_conda/ yt
+
+New packages for development branch are built after every pull request is
+merged. In order to make sure you are running latest version, it's recommended
+to update frequently:
+
+.. code-block:: bash
+
+  conda update -c http://use.yt/with_conda/ yt
+
+Location of our channel can be added to ``.condarc`` to avoid retyping it during
+each *conda* invocation. Please refer to `Conda Manual
+<http://conda.pydata.org/docs/config.html#channel-locations-channels>`_ for
+detailed instructions.
+
 
 Obtaining Source Code
 ^^^^^^^^^^^^^^^^^^^^^
@@ -252,7 +272,7 @@
 
   git clone https://github.com/conda/conda-recipes
 
-Then navigate to the repository root and invoke `conda build`:
+Then navigate to the repository root and invoke ``conda build``:
 
 .. code-block:: bash
 
@@ -290,7 +310,7 @@
 
 .. code-block:: bash
 
-  $ pip install numpy matplotlib cython cython h5py nose sympy
+  $ pip install numpy matplotlib cython h5py nose sympy
 
 If you're using IPython notebooks, you can install its dependencies
 with ``pip`` as well:
@@ -366,7 +386,7 @@
   yt update
 
 This will detect that you have installed yt from the mercurial repository, pull
-any changes from bitbucket, and then recompile yt if necessary.
+any changes from Bitbucket, and then recompile yt if necessary.
 
 .. _testing-installation:
 
@@ -397,7 +417,7 @@
 
 With the release of version 3.0 of yt, development of the legacy yt 2.x series
 has been relegated to bugfixes.  That said, we will continue supporting the 2.x
-series for the forseeable future.  This makes it easy to use scripts written
+series for the foreseeable future.  This makes it easy to use scripts written
 for older versions of yt without substantially updating them to support the
 new field naming or unit systems in yt version 3.
 
@@ -411,7 +431,7 @@
 You already have the mercurial repository, so you simply need to switch
 which version you're using.  Navigate to the root of the yt mercurial
 repository, update to the desired version, and rebuild the source (some of the
-c code requires a compilation step for big changes like this):
+C code requires a compilation step for big changes like this):
 
 .. code-block:: bash
 
@@ -419,7 +439,7 @@
   hg update <desired-version>
   python setup.py develop
 
-Valid versions to jump to are described in :ref:`branches-of-yt`).
+Valid versions to jump to are described in :ref:`branches-of-yt`.
 
 You can check which version of yt you have installed by invoking ``yt version``
 at the command line.  If you encounter problems, see :ref:`update-errors`.

diff -r 05815443e955781705cf611e912f0fa7f5b0b1cd -r ed87473301feb44201bc66a801138f43749109ac doc/source/intro/index.rst
--- a/doc/source/intro/index.rst
+++ b/doc/source/intro/index.rst
@@ -49,7 +49,7 @@
 the :ref:`units system <units>` works to tag every individual field and 
 quantity with a physical unit (e.g. cm, AU, kpc, Mpc, etc.), and it describes 
 ways of analyzing multiple chronological data outputs from the same underlying 
-dataset known as :ref:`time series <time-series-analysis`.  Lastly, it includes 
+dataset known as :ref:`time series <time-series-analysis>`.  Lastly, it includes 
 information on how to enable yt to operate :ref:`in parallel over multiple 
 processors simultaneously <parallel-computation>`.
 

diff -r 05815443e955781705cf611e912f0fa7f5b0b1cd -r ed87473301feb44201bc66a801138f43749109ac doc/source/reference/index.rst
--- a/doc/source/reference/index.rst
+++ b/doc/source/reference/index.rst
@@ -14,5 +14,6 @@
    command-line
    api/api
    configuration
+   python_introduction
    field_list
    changelog

diff -r 05815443e955781705cf611e912f0fa7f5b0b1cd -r ed87473301feb44201bc66a801138f43749109ac doc/source/reference/python_introduction.rst
--- a/doc/source/reference/python_introduction.rst
+++ b/doc/source/reference/python_introduction.rst
@@ -315,7 +315,7 @@
 Let's try this out with a for loop.  First type ``for i in range(10):`` and
 press enter.  This will change the prompt to be three periods, instead of three
 greater-than signs, and you will be expected to hit the tab key to indent.
-Then type "print i", press enter, and then instead of indenting again, press
+Then type "print(i)", press enter, and then instead of indenting again, press
 enter again.  The entire entry should look like this::
 
    >>> for i in range(10):

diff -r 05815443e955781705cf611e912f0fa7f5b0b1cd -r ed87473301feb44201bc66a801138f43749109ac doc/source/visualizing/TransferFunctionHelper_Tutorial.ipynb
--- a/doc/source/visualizing/TransferFunctionHelper_Tutorial.ipynb
+++ b/doc/source/visualizing/TransferFunctionHelper_Tutorial.ipynb
@@ -4,7 +4,7 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "Here, we explain how to use TransferFunctionHelper to visualize and interpret yt volume rendering transfer functions.  TransferFunctionHelper is a utility class that makes it easy to visualize he probability density functions of yt fields that you might want to volume render.  This makes it easier to choose a nice transfer function that highlights interesting physical regimes.\n",
+    "Here, we explain how to use TransferFunctionHelper to visualize and interpret yt volume rendering transfer functions.  Creating a custom transfer function is a process that usually involves some trial-and-error. TransferFunctionHelper is a utility class designed to help you visualize the probability density functions of yt fields that you might want to volume render.  This makes it easier to choose a nice transfer function that highlights interesting physical regimes.\n",
     "\n",
     "First, we set up our namespace and define a convenience function to display volume renderings inline in the notebook.  Using `%matplotlib inline` makes it so matplotlib plots display inline in the notebook."
    ]
@@ -22,7 +22,6 @@
     "from IPython.core.display import Image\n",
     "from yt.visualization.volume_rendering.transfer_function_helper import TransferFunctionHelper\n",
     "from yt.visualization.volume_rendering.render_source import VolumeSource\n",
-    "from yt.visualization.volume_rendering.camera import Camera\n",
     "\n",
     "def showme(im):\n",
     "    # screen out NaNs\n",
@@ -133,8 +132,8 @@
     "tfh.set_log(True)\n",
     "tfh.build_transfer_function()\n",
     "tfh.tf.add_layers(8, w=0.01, mi=4.0, ma=8.0, col_bounds=[4.,8.], alpha=np.logspace(-1,2,7), colormap='RdBu_r')\n",
-    "tfh.tf.map_to_colormap(6.0, 8.0, colormap='Reds', scale=10.0)\n",
-    "tfh.tf.map_to_colormap(-1.0, 6.0, colormap='Blues_r', scale=1.)\n",
+    "tfh.tf.map_to_colormap(6.0, 8.0, colormap='Reds')\n",
+    "tfh.tf.map_to_colormap(-1.0, 6.0, colormap='Blues_r')\n",
     "\n",
     "tfh.plot(profile_field='cell_mass')"
    ]
@@ -143,7 +142,7 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "Finally, let's take a look at the volume rendering. First use the helper function to create a default rendering, then we override this with the transfer function we just created."
+    "Let's take a look at the volume rendering. First use the helper function to create a default rendering, then we override this with the transfer function we just created."
    ]
   },
   {
@@ -167,7 +166,55 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "We can clearly see that the hot gas is mostly associated with bound structures while the cool gas is associated with low-density voids."
+    "That looks okay, but the red gas (associated with temperatures between 1e6 and 1e8 K) is a bit hard to see in the image. To fix this, we can make that gas contribute a larger alpha value to the image by using the ``scale`` keyword argument in ``map_to_colormap``."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "tfh2 = TransferFunctionHelper(ds)\n",
+    "tfh2.set_field('temperature')\n",
+    "tfh2.set_bounds()\n",
+    "tfh2.set_log(True)\n",
+    "tfh2.build_transfer_function()\n",
+    "tfh2.tf.add_layers(8, w=0.01, mi=4.0, ma=8.0, col_bounds=[4.,8.], alpha=np.logspace(-1,2,7), colormap='RdBu_r')\n",
+    "tfh2.tf.map_to_colormap(6.0, 8.0, colormap='Reds', scale=5.0)\n",
+    "tfh2.tf.map_to_colormap(-1.0, 6.0, colormap='Blues_r', scale=1.0)\n",
+    "\n",
+    "tfh2.plot(profile_field='cell_mass')"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "Note that the height of the red portion of the transfer function has increased by a factor of 5.0. If we use this transfer function to make the final image:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [],
+   "source": [
+    "source.set_transfer_function(tfh2.tf)\n",
+    "im3 = sc.render()\n",
+    "\n",
+    "showme(im3[:,:,:3])"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "The red gas is now much more prominant in the image. We can clearly see that the hot gas is mostly associated with bound structures while the cool gas is associated with low-density voids."
    ]
   }
  ],

diff -r 05815443e955781705cf611e912f0fa7f5b0b1cd -r ed87473301feb44201bc66a801138f43749109ac doc/source/visualizing/Volume_Rendering_Tutorial.ipynb
--- a/doc/source/visualizing/Volume_Rendering_Tutorial.ipynb
+++ b/doc/source/visualizing/Volume_Rendering_Tutorial.ipynb
@@ -18,7 +18,7 @@
     "import yt\n",
     "import numpy as np\n",
     "from yt.visualization.volume_rendering.transfer_function_helper import TransferFunctionHelper\n",
-    "from yt.visualization.volume_rendering.api import Scene, Camera, VolumeSource\n",
+    "from yt.visualization.volume_rendering.api import Scene, VolumeSource\n",
     "\n",
     "ds = yt.load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")\n",
     "sc = yt.create_scene(ds)"
@@ -199,7 +199,7 @@
    },
    "outputs": [],
    "source": [
-    "cam = Camera(ds, lens_type='perspective')\n",
+    "cam = sc.add_camera(ds, lens_type='perspective')\n",
     "\n",
     "# Standing at (x=0.05, y=0.5, z=0.5), we look at the area of x>0.05 (with some open angle\n",
     "# specified by camera width) along the positive x direction.\n",
@@ -213,7 +213,6 @@
     "# The width determines the opening angle\n",
     "cam.set_width(ds.domain_width * 0.5)\n",
     "\n",
-    "sc.camera = cam\n",
     "print (sc.camera)"
    ]
   },

diff -r 05815443e955781705cf611e912f0fa7f5b0b1cd -r ed87473301feb44201bc66a801138f43749109ac doc/source/visualizing/colormaps/index.rst
--- a/doc/source/visualizing/colormaps/index.rst
+++ b/doc/source/visualizing/colormaps/index.rst
@@ -47,7 +47,7 @@
 store them in your :ref:`plugin-file` for access to them in every future yt 
 session.  The example below creates two custom colormaps, one that has
 three equally spaced bars of blue, white and red, and the other that 
-interpolates in increasing lengthed intervals from black to red, to green, 
+interpolates in increasing lengthen intervals from black to red, to green, 
 to blue.  These will be accessible for the rest of the yt session as 
 'french_flag' and 'weird'.  See 
 :func:`~yt.visualization.color_maps.make_colormap` and 

diff -r 05815443e955781705cf611e912f0fa7f5b0b1cd -r ed87473301feb44201bc66a801138f43749109ac doc/source/visualizing/index.rst
--- a/doc/source/visualizing/index.rst
+++ b/doc/source/visualizing/index.rst
@@ -16,7 +16,6 @@
    manual_plotting
    volume_rendering
    unstructured_mesh_rendering
-   hardware_volume_rendering
    sketchfab
    mapserver
    streamlines

diff -r 05815443e955781705cf611e912f0fa7f5b0b1cd -r ed87473301feb44201bc66a801138f43749109ac doc/source/visualizing/plots.rst
--- a/doc/source/visualizing/plots.rst
+++ b/doc/source/visualizing/plots.rst
@@ -415,9 +415,19 @@
 determined by the ``thresh`` parameter, which can be varied to make the lines thicker or
 thinner.
 
+The above example all involve 8-node hexahedral mesh elements. Here is another example from
+a dataset that uses 6-node wedge elements:
+
+.. python-script::
+   
+   import yt
+   ds = yt.load("MOOSE_sample_data/wedge_out.e")
+   sl = yt.SlicePlot(ds, 2, ('connect2', 'diffused'))
+   sl.save()
+
 Finally, slices can also be used to examine 2D unstructured mesh datasets, but the
 slices must be taken to be normal to the ``'z'`` axis, or you'll get an error. Here is
-an example using another MOOSE dataset:
+an example using another MOOSE dataset that uses triangular mesh elements:
 
 .. python-script::
 

diff -r 05815443e955781705cf611e912f0fa7f5b0b1cd -r ed87473301feb44201bc66a801138f43749109ac doc/source/visualizing/sketchfab.rst
--- a/doc/source/visualizing/sketchfab.rst
+++ b/doc/source/visualizing/sketchfab.rst
@@ -80,8 +80,8 @@
 ``export_ply``, which will write to a file and optionally sample a field at
 every face or vertex, outputting a color value to the file as well.  This file
 can then be viewed in MeshLab, Blender or on the website `Sketchfab.com
-<Sketchfab.com>`_.  But if you want to view it on Sketchfab, there's an even
-easier way!
+<https://sketchfab.com>`_.  But if you want to view it on Sketchfab, there's an
+even easier way!
 
 Exporting to Sketchfab
 ----------------------

diff -r 05815443e955781705cf611e912f0fa7f5b0b1cd -r ed87473301feb44201bc66a801138f43749109ac doc/source/visualizing/unstructured_mesh_rendering.rst
--- a/doc/source/visualizing/unstructured_mesh_rendering.rst
+++ b/doc/source/visualizing/unstructured_mesh_rendering.rst
@@ -214,6 +214,29 @@
     # render and save
     sc.save()
 
+Here is an example using 6-node wedge elements:
+
+.. python-script::
+
+   import yt
+
+   ds = yt.load("MOOSE_sample_data/wedge_out.e")
+
+   # create a default scene
+   sc = yt.create_scene(ds, ('connect2', 'diffused'))
+
+   # override the default colormap
+   ms = sc.get_source(0)
+   ms.cmap = 'Eos A'
+
+   # adjust the camera position and orientation
+   cam = sc.camera
+   cam.set_position(ds.arr([1.0, -1.0, 1.0], 'code_length'))
+   cam.width = ds.arr([1.5, 1.5, 1.5], 'code_length')
+
+   # render and save
+   sc.save()
+
 Another example, this time plotting the temperature field from a 20-node hex 
 MOOSE dataset:
 
@@ -273,7 +296,7 @@
     # adjust the camera position and orientation
     cam = sc.camera
     camera_position = ds.arr([-1.0, 1.0, -0.5], 'code_length')
-    north_vector = ds.arr([0.0, 1.0, 1.0], 'dimensionless')
+    north_vector = ds.arr([0.0, -1.0, -1.0], 'dimensionless')
     cam.width = ds.arr([0.05, 0.05, 0.05], 'code_length')
     cam.set_position(camera_position, north_vector)
     
@@ -292,7 +315,6 @@
 .. python-script::
 
     import yt
-    from yt.visualization.volume_rendering.api import Camera
 
     ds = yt.load("MOOSE_sample_data/out.e-s010")
 
@@ -304,15 +326,12 @@
     ms.cmap = 'Eos A'
    
     # Create a perspective Camera
-    cam = Camera(ds, lens_type='perspective')
+    cam = sc.add_camera(ds, lens_type='perspective')
     cam.focus = ds.arr([0.0, 0.0, 0.0], 'code_length')
     cam_pos = ds.arr([-4.5, 4.5, -4.5], 'code_length')
     north_vector = ds.arr([0.0, -1.0, -1.0], 'dimensionless')
     cam.set_position(cam_pos, north_vector)
    
-    # tell our scene to use it
-    sc.camera = cam
-   
     # increase the default resolution
     cam.resolution = (800, 800)
    
@@ -329,7 +348,7 @@
 .. python-script::
 
     import yt
-    from yt.visualization.volume_rendering.api import MeshSource, Camera, Scene
+    from yt.visualization.volume_rendering.api import MeshSource, Scene
 
     ds = yt.load("MOOSE_sample_data/out.e-s010")
 
@@ -337,16 +356,13 @@
     sc = Scene()
 
     # set up our Camera
-    cam = Camera(ds)
+    cam = sc.add_camera(ds)
     cam.focus = ds.arr([0.0, 0.0, 0.0], 'code_length')
     cam.set_position(ds.arr([-3.0, 3.0, -3.0], 'code_length'),
                      ds.arr([0.0, -1.0, 0.0], 'dimensionless'))
     cam.set_width = ds.arr([8.0, 8.0, 8.0], 'code_length')
     cam.resolution = (800, 800)
 
-    # tell the scene to use it
-    sc.camera = cam
-
     # create two distinct MeshSources from 'connect1' and 'connect2'
     ms1 = MeshSource(ds, ('connect1', 'diffused'))
     ms2 = MeshSource(ds, ('connect2', 'diffused'))
@@ -362,7 +378,7 @@
 ^^^^^^^^^^^^^
 
 Here are a couple of example scripts that show how to create image frames that 
-can later be stiched together into a movie. In the first example, we look at a 
+can later be stitched together into a movie. In the first example, we look at a 
 single dataset at a fixed time, but we move the camera around to get a different
 vantage point. We call the rotate() method 300 times, saving a new image to the 
 disk each time.
@@ -407,7 +423,7 @@
 .. code-block:: python
 
     import yt
-    from yt.visualization.volume_rendering.api import MeshSource, Camera
+    from yt.visualization.volume_rendering.api import MeshSource
     import pylab as plt
 
     NUM_STEPS = 127
@@ -432,7 +448,7 @@
 	# set up the camera here. these values were arrived by
 	# calling pitch, yaw, and roll in the notebook until I
 	# got the angle I wanted.
-	cam = Camera(ds)
+	sc.add_camera(ds)
 	camera_position = ds.arr([0.1, 0.0, 0.1], 'code_length')
 	cam.focus = ds.domain_center
 	north_vector = ds.arr([-0.3032476, -0.71782557, 0.62671153], 'dimensionless')

diff -r 05815443e955781705cf611e912f0fa7f5b0b1cd -r ed87473301feb44201bc66a801138f43749109ac doc/source/visualizing/volume_rendering.rst
--- a/doc/source/visualizing/volume_rendering.rst
+++ b/doc/source/visualizing/volume_rendering.rst
@@ -236,12 +236,13 @@
 The :class:`~yt.visualization.volume_rendering.camera.Camera` object
 is what it sounds like, a camera within the Scene.  It possesses the 
 quantities:
- * :meth:`~yt.visualization.volume_rendering.camera.Camera.position` - the position of the camera in scene-space
- * :meth:`~yt.visualization.volume_rendering.camera.Camera.width` - the width of the plane the camera can see
- * :meth:`~yt.visualization.volume_rendering.camera.Camera.focus` - the point in space the camera is looking at
- * :meth:`~yt.visualization.volume_rendering.camera.Camera.resolution` - the image resolution
- * ``north_vector`` - a vector defining the "up" direction in an image
- * :ref:`lens <lenses>` - an object controlling how rays traverse the Scene
+ 
+* :meth:`~yt.visualization.volume_rendering.camera.Camera.position` - the position of the camera in scene-space
+* :meth:`~yt.visualization.volume_rendering.camera.Camera.width` - the width of the plane the camera can see
+* :meth:`~yt.visualization.volume_rendering.camera.Camera.focus` - the point in space the camera is looking at
+* :meth:`~yt.visualization.volume_rendering.camera.Camera.resolution` - the image resolution
+* ``north_vector`` - a vector defining the "up" direction in an image
+* :ref:`lens <lenses>` - an object controlling how rays traverse the Scene
 
 .. _camera_movement:
 
@@ -482,7 +483,7 @@
 their combination, are described below.
 
 MPI Parallelization
-+++++++++++++++++++
+^^^^^^^^^^^^^^^^^^^
 
 Currently the volume renderer is parallelized using MPI to decompose the volume
 by attempting to split up the
@@ -516,7 +517,7 @@
 For more information about enabling parallelism, see :ref:`parallel-computation`.
 
 OpenMP Parallelization
-++++++++++++++++++++++
+^^^^^^^^^^^^^^^^^^^^^^
 
 The volume rendering also parallelized using the OpenMP interface in Cython.
 While the MPI parallelization is done using domain decomposition, the OpenMP
@@ -532,7 +533,7 @@
 by default by modifying the environment variable OMP_NUM_THREADS. 
 
 Running in Hybrid MPI + OpenMP
-++++++++++++++++++++++++++++++
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 The two methods for volume rendering parallelization can be used together to
 leverage large supercomputing resources.  When choosing how to balance the

diff -r 05815443e955781705cf611e912f0fa7f5b0b1cd -r ed87473301feb44201bc66a801138f43749109ac doc/source/yt3differences.rst
--- a/doc/source/yt3differences.rst
+++ b/doc/source/yt3differences.rst
@@ -84,7 +84,7 @@
   external code**
   Mesh fields that exist on-disk in an output file can be read in using whatever
   name is used by the output file.  On-disk fields are always returned in code
-  units.  The full field name will be will be ``(code_name, field_name)``. See
+  units.  The full field name will be ``(code_name, field_name)``. See
   :ref:`field-list`.
 * **Particle fields are now more obviously different than mesh fields**
   Particle fields on-disk will also be in code units, and will be named
@@ -247,8 +247,8 @@
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 Wherever possible, we have attempted to replace the term "parameter file"
-(i.e., ``pf``) with the term "dataset."  In yt-3.0, all of the 
-the ``pf`` atrributes of objects are now ``ds`` or ``dataset`` attributes.
+(i.e., ``pf``) with the term "dataset."  In yt-3.0, all of
+the ``pf`` attributes of objects are now ``ds`` or ``dataset`` attributes.
 
 Hierarchy is Now Index
 ^^^^^^^^^^^^^^^^^^^^^^
@@ -262,7 +262,7 @@
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 Derived quantities can now be accessed via a function that hangs off of the
-``quantities`` atribute of data objects. Instead of
+``quantities`` attribute of data objects. Instead of
 ``dd.quantities['TotalMass']()``, you can now use ``dd.quantities.total_mass()``
 to do the same thing. All derived quantities can be accessed via a function that
 hangs off of the `quantities` attribute of data objects.

diff -r 05815443e955781705cf611e912f0fa7f5b0b1cd -r ed87473301feb44201bc66a801138f43749109ac scripts/iyt
--- a/scripts/iyt
+++ b/scripts/iyt
@@ -1,5 +1,7 @@
 #!python
-import os, re
+from __future__ import print_function
+import os
+import re
 from distutils.version import LooseVersion
 from yt.mods import *
 from yt.data_objects.data_containers import YTDataContainer
@@ -16,8 +18,8 @@
 
 try:
     import IPython
-except:
-    print 'ipython is not available. using default python interpreter.'
+except ImportError:
+    print('ipython is not available. using default python interpreter.')
     import code
     import sys
     code.interact(doc, None, namespace)
@@ -70,7 +72,7 @@
 Feel free to edit this file to customize your ipython experience.
 
 Note that as such this file does nothing, for backwards compatibility.
-Consult e.g. file 'ipy_profile_sh.py' for an example of the things 
+Consult e.g. file 'ipy_profile_sh.py' for an example of the things
 you can do here.
 
 See http://ipython.scipy.org/moin/IpythonExtensionApi for detailed
@@ -96,7 +98,7 @@
 # http://pymel.googlecode.com/svn/trunk/tools/ipymel.py
 # We'll start with some fields.
 
-import re
+
 def yt_fieldname_completer(self, event):
     """Match dictionary completions"""
     #print "python_matches", event.symbol
@@ -110,7 +112,7 @@
 
     if not m:
         raise try_next
-    
+
     expr, attr = m.group(1, 3)
     #print "COMPLETING ON ", expr, attr
     #print type(self.Completer), dir(self.Completer)
@@ -122,9 +124,9 @@
         try:
             obj = eval(expr, self.Completer.global_namespace)
         except:
-            raise IPython.ipapi.TryNext 
-        
-    if isinstance(obj, (YTDataContainer, ) ):
+            raise IPython.ipapi.TryNext
+
+    if isinstance(obj, YTDataContainer):
         #print "COMPLETING ON THIS THING"
         all_fields = [f for f in sorted(
                 obj.ds.field_list + obj.ds.derived_field_list)]
@@ -135,6 +137,6 @@
 
     raise try_next
 
-ip.set_hook('complete_command', yt_fieldname_completer , re_key = ".*" )
+ip.set_hook('complete_command', yt_fieldname_completer, re_key = ".*")
 
 ip_shell.mainloop(**kwargs)

diff -r 05815443e955781705cf611e912f0fa7f5b0b1cd -r ed87473301feb44201bc66a801138f43749109ac scripts/pr_backport.py
--- a/scripts/pr_backport.py
+++ b/scripts/pr_backport.py
@@ -1,3 +1,4 @@
+from __future__ import print_function
 import hglib
 import requests
 import shutil
@@ -6,6 +7,7 @@
 from datetime import datetime
 from distutils.version import LooseVersion
 from time import strptime, mktime
+from yt.extern.six.moves import input
 
 MERGED_PR_ENDPOINT = ("http://bitbucket.org/api/2.0/repositories/yt_analysis/"
                       "yt/pullrequests/?state=MERGED")
@@ -280,17 +282,17 @@
             if commit_already_on_stable(repo_path, commits[0]) is True:
                 continue
             message = "hg graft %s\n" % commits[0]
-        print "PR #%s\nTitle: %s\nCreated on: %s\nLink: %s\n%s" % pr_desc
-        print "To backport, issue the following command(s):\n"
-        print message
-        raw_input('Press any key to continue')
+        print("PR #%s\nTitle: %s\nCreated on: %s\nLink: %s\n%s" % pr_desc)
+        print("To backport, issue the following command(s):\n")
+        print(message)
+        input('Press any key to continue')
 
 
 if __name__ == "__main__":
-    print ""
-    print "Gathering PR information, this may take a minute."
-    print "Don't worry, yt loves you."
-    print ""
+    print("")
+    print("Gathering PR information, this may take a minute.")
+    print("Don't worry, yt loves you.")
+    print("")
     repo_path = clone_new_repo()
     try:
         last_major_release = get_first_commit_after_last_major_release(repo_path)
@@ -308,11 +310,11 @@
         del inv_map[None]
 
         inv_map = screen_already_backported(repo_path, inv_map)
-        print "In another terminal window, navigate to the following path:"
-        print "%s" % repo_path
-        raw_input("Press any key to continue")
+        print("In another terminal window, navigate to the following path:")
+        print("%s" % repo_path)
+        input("Press any key to continue")
         backport_pr_commits(repo_path, inv_map, last_stable, prs)
-        raw_input(
+        input(
             "Now you need to push your backported changes. The temporary\n"
             "repository currently being used will be deleted as soon as you\n"
             "press any key.")

diff -r 05815443e955781705cf611e912f0fa7f5b0b1cd -r ed87473301feb44201bc66a801138f43749109ac setup.py
--- a/setup.py
+++ b/setup.py
@@ -127,7 +127,9 @@
               ["yt/utilities/lib/bounding_volume_hierarchy.pyx"],
               extra_compile_args=omp_args,
               extra_link_args=omp_args,
-              libraries=["m"], depends=["yt/utilities/lib/bounding_volume_hierarchy.pxd"]),
+              libraries=["m"],
+              depends=["yt/utilities/lib/bounding_volume_hierarchy.pxd",
+                       "yt/utilities/lib/vec3_ops.pxd"]),
     Extension("yt.utilities.lib.contour_finding",
               ["yt/utilities/lib/contour_finding.pyx"],
               include_dirs=["yt/utilities/lib/",
@@ -177,7 +179,8 @@
                        "yt/utilities/lib/kdtree.h",
                        "yt/utilities/lib/fixed_interpolator.h",
                        "yt/utilities/lib/fixed_interpolator.pxd",
-                       "yt/utilities/lib/field_interpolation_tables.pxd"]),
+                       "yt/utilities/lib/field_interpolation_tables.pxd",
+                       "yt/utilities/lib/vec3_ops.pxd"]),
     Extension("yt.utilities.lib.element_mappings",
               ["yt/utilities/lib/element_mappings.pyx"],
               libraries=["m"], depends=["yt/utilities/lib/element_mappings.pxd"]),

diff -r 05815443e955781705cf611e912f0fa7f5b0b1cd -r ed87473301feb44201bc66a801138f43749109ac tests/nose_runner.py
--- a/tests/nose_runner.py
+++ b/tests/nose_runner.py
@@ -1,54 +1,100 @@
 import sys
 import os
 import yaml
-import multiprocessing as mp
+import multiprocessing
 import nose
-import glob
-from contextlib import closing
+from cStringIO import StringIO
 from yt.config import ytcfg
 from yt.utilities.answer_testing.framework import AnswerTesting
 
 
-def run_job(argv):
-    with closing(open(str(os.getpid()) + ".out", "w")) as fstderr:
-        cur_stderr = sys.stderr
-        sys.stderr = fstderr
-        answer = argv[0]
+class NoseWorker(multiprocessing.Process):
+
+    def __init__(self, task_queue, result_queue):
+        multiprocessing.Process.__init__(self)
+        self.task_queue = task_queue
+        self.result_queue = result_queue
+
+    def run(self):
+        proc_name = self.name
+        while True:
+            next_task = self.task_queue.get()
+            if next_task is None:
+                print("%s: Exiting" % proc_name)
+                self.task_queue.task_done()
+                break
+            print '%s: %s' % (proc_name, next_task)
+            result = next_task()
+            self.task_queue.task_done()
+            self.result_queue.put(result)
+        return
+
+class NoseTask(object):
+    def __init__(self, argv):
+        self.argv = argv
+        self.name = argv[0]
+
+    def __call__(self):
+        old_stderr = sys.stderr
+        sys.stderr = mystderr = StringIO()
         test_dir = ytcfg.get("yt", "test_data_dir")
         answers_dir = os.path.join(test_dir, "answers")
-        if not os.path.isdir(os.path.join(answers_dir, answer)):
-            nose.run(argv=argv + ['--answer-store'],
+        if '--with-answer-testing' in self.argv and \
+                not os.path.isdir(os.path.join(answers_dir, self.name)):
+            nose.run(argv=self.argv + ['--answer-store'],
                      addplugins=[AnswerTesting()], exit=False)
-        nose.run(argv=argv, addplugins=[AnswerTesting()], exit=False)
-    sys.stderr = cur_stderr
+        nose.run(argv=self.argv, addplugins=[AnswerTesting()], exit=False)
+        sys.stderr = old_stderr
+        return mystderr.getvalue()
 
-if __name__ == "__main__":
+    def __str__(self):
+        return 'WILL DO self.name = %s' % self.name
+
+
+def generate_tasks_input():
     test_dir = ytcfg.get("yt", "test_data_dir")
     answers_dir = os.path.join(test_dir, "answers")
     with open('tests/tests_%i.%i.yaml' % sys.version_info[:2], 'r') as obj:
         tests = yaml.load(obj)
 
-    base_argv = ['--local-dir=%s' % answers_dir, '-v', '-s', '--nologcapture',
+    base_argv = ['--local-dir=%s' % answers_dir, '-v',
                  '--with-answer-testing', '--answer-big-data', '--local']
-    args = [['unittests', '-v', '-s', '--nologcapture']]
-    for answer in list(tests.keys()):
+    args = []
+
+    for test in list(tests["other_tests"].keys()):
+        args.append([test] + tests["other_tests"][test])
+    for answer in list(tests["answer_tests"].keys()):
         argv = [answer]
         argv += base_argv
-        argv.append('--xunit-file=%s.xml' % answer)
         argv.append('--answer-name=%s' % answer)
-        argv += tests[answer]
+        argv += tests["answer_tests"][answer]
         args.append(argv)
-    
-    processes = [mp.Process(target=run_job, args=(args[i],))
-                 for i in range(len(args))]
-    for p in processes:
-        p.start()
-    for p in processes:
-        p.join(timeout=7200)
-        if p.is_alive():
-            p.terminate()
-            p.join(timeout=30)
-    for fname in glob.glob("*.out"):
-        with open(fname, 'r') as fin:
-            print(fin.read())
-        os.remove(fname)
+
+    args = [item + ['-s', '--nologcapture', '--xunit-file=%s.xml' % item[0]]
+            for item in args]
+    return args
+
+if __name__ == "__main__":
+    # multiprocessing.log_to_stderr(logging.DEBUG)
+    tasks = multiprocessing.JoinableQueue()
+    results = multiprocessing.Queue()
+
+    num_consumers = 6  # TODO 
+    consumers = [NoseWorker(tasks, results) for i in range(num_consumers)]
+    for w in consumers:
+        w.start()
+
+    num_jobs = 0
+    for job in generate_tasks_input():
+        tasks.put(NoseTask(job))
+        num_jobs += 1
+
+    for i in range(num_consumers):
+        tasks.put(None)
+
+    tasks.join()
+
+    while num_jobs:
+        result = results.get()
+        print(result)
+        num_jobs -= 1

diff -r 05815443e955781705cf611e912f0fa7f5b0b1cd -r ed87473301feb44201bc66a801138f43749109ac tests/tests_2.7.yaml
--- a/tests/tests_2.7.yaml
+++ b/tests/tests_2.7.yaml
@@ -1,51 +1,68 @@
-local_artio_270:
-  - yt/frontends/artio/tests/test_outputs.py
+answer_tests:
+  local_artio_270:
+    - yt/frontends/artio/tests/test_outputs.py
 
-local_athena_270:
-  - yt/frontends/athena
+  local_athena_270:
+    - yt/frontends/athena
 
-local_chombo_270:
-  - yt/frontends/chombo/tests/test_outputs.py
+  local_chombo_270:
+    - yt/frontends/chombo/tests/test_outputs.py
 
-local_enzo_270:
-  - yt/frontends/enzo
+  local_enzo_270:
+    - yt/frontends/enzo
 
-local_fits_270:
-  - yt/frontends/fits/tests/test_outputs.py
+  local_fits_270:
+    - yt/frontends/fits/tests/test_outputs.py
 
-local_flash_270:
-  - yt/frontends/flash/tests/test_outputs.py
+  local_flash_270:
+    - yt/frontends/flash/tests/test_outputs.py
 
-local_gadget_270:
-  - yt/frontends/gadget/tests/test_outputs.py
+  local_gadget_270:
+    - yt/frontends/gadget/tests/test_outputs.py
 
-local_halos_270:
-  - yt/analysis_modules/halo_analysis/tests/test_halo_finders.py
-  - yt/analysis_modules/halo_finding/tests/test_rockstar.py
-  - yt/frontends/owls_subfind/tests/test_outputs.py
+  local_halos_270:
+    - yt/analysis_modules/halo_analysis/tests/test_halo_finders.py
+    - yt/analysis_modules/halo_finding/tests/test_rockstar.py
+    - yt/frontends/owls_subfind/tests/test_outputs.py
+  
+  local_owls_270:
+    - yt/frontends/owls/tests/test_outputs.py
+  
+  local_pw_270:
+    - yt/visualization/tests/test_plotwindow.py:test_attributes
+    - yt/visualization/tests/test_plotwindow.py:test_attributes_wt
+    - yt/visualization/tests/test_profile_plots.py:test_phase_plot_attributes
+    - yt/visualization/tests/test_particle_plot.py:test_particle_projection_answers
+    - yt/visualization/tests/test_particle_plot.py:test_particle_projection_filter
+    - yt/visualization/tests/test_particle_plot.py:test_particle_phase_answers
+  
+  local_tipsy_270:
+    - yt/frontends/tipsy/tests/test_outputs.py
+  
+  local_varia_271:
+    - yt/analysis_modules/radmc3d_export
+    - yt/frontends/moab/tests/test_c5.py
+    - yt/analysis_modules/photon_simulator/tests/test_spectra.py
+    - yt/analysis_modules/photon_simulator/tests/test_sloshing.py
+    - yt/visualization/volume_rendering/tests/test_vr_orientation.py
+    - yt/visualization/volume_rendering/tests/test_mesh_render.py
 
-local_owls_270:
-  - yt/frontends/owls/tests/test_outputs.py
+  local_orion_270:
+    - yt/frontends/boxlib/tests/test_orion.py
+  
+  local_ramses_270:
+    - yt/frontends/ramses/tests/test_outputs.py
+  
+  local_ytdata_270:
+    - yt/frontends/ytdata
 
-local_pw_270:
-  - yt/visualization/tests/test_plotwindow.py:test_attributes
-  - yt/visualization/tests/test_plotwindow.py:test_attributes_wt
+  local_absorption_spectrum_271:
+    - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_non_cosmo
+    - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_cosmo
 
-local_tipsy_270:
-  - yt/frontends/tipsy/tests/test_outputs.py
-
-local_varia_270:
-  - yt/analysis_modules/radmc3d_export
-  - yt/frontends/moab/tests/test_c5.py
-  - yt/analysis_modules/photon_simulator/tests/test_spectra.py
-  - yt/analysis_modules/photon_simulator/tests/test_sloshing.py
-  - yt/visualization/volume_rendering/tests/test_vr_orientation.py
-
-local_orion_270:
-  - yt/frontends/boxlib/tests/test_orion.py
-
-local_ramses_270:
-  - yt/frontends/ramses/tests/test_outputs.py
-
-local_ytdata_270:
-  - yt/frontends/ytdata
\ No newline at end of file
+other_tests:
+  unittests:
+     - '-v'
+  cookbook:
+     - '-v'
+     - 'doc/source/cookbook/tests/test_cookbook.py'

diff -r 05815443e955781705cf611e912f0fa7f5b0b1cd -r ed87473301feb44201bc66a801138f43749109ac tests/tests_3.4.yaml
--- a/tests/tests_3.4.yaml
+++ b/tests/tests_3.4.yaml
@@ -1,49 +1,57 @@
-local_artio_340:
-  - yt/frontends/artio/tests/test_outputs.py
+answer_tests:
+  local_artio_340:
+    - yt/frontends/artio/tests/test_outputs.py
 
-local_athena_340:
-  - yt/frontends/athena
+  local_athena_340:
+    - yt/frontends/athena
 
-local_chombo_340:
-  - yt/frontends/chombo/tests/test_outputs.py
+  local_chombo_340:
+    - yt/frontends/chombo/tests/test_outputs.py
 
-local_enzo_340:
-  - yt/frontends/enzo
+  local_enzo_340:
+    - yt/frontends/enzo
 
-local_fits_340:
-  - yt/frontends/fits/tests/test_outputs.py
+  local_fits_340:
+    - yt/frontends/fits/tests/test_outputs.py
 
-local_flash_340:
-  - yt/frontends/flash/tests/test_outputs.py
+  local_flash_340:
+    - yt/frontends/flash/tests/test_outputs.py
 
-local_gadget_340:
-  - yt/frontends/gadget/tests/test_outputs.py
+  local_gadget_340:
+    - yt/frontends/gadget/tests/test_outputs.py
 
-local_halos_340:
-  - yt/frontends/owls_subfind/tests/test_outputs.py
+  local_halos_340:
+    - yt/frontends/owls_subfind/tests/test_outputs.py
 
-local_owls_340:
-  - yt/frontends/owls/tests/test_outputs.py
+  local_owls_340:
+    - yt/frontends/owls/tests/test_outputs.py
 
-local_pw_340:
-  - yt/visualization/tests/test_plotwindow.py:test_attributes
-  - yt/visualization/tests/test_plotwindow.py:test_attributes_wt
+  local_pw_340:
+    - yt/visualization/tests/test_plotwindow.py:test_attributes
+    - yt/visualization/tests/test_plotwindow.py:test_attributes_wt
 
-local_tipsy_340:
-  - yt/frontends/tipsy/tests/test_outputs.py
+  local_tipsy_340:
+    - yt/frontends/tipsy/tests/test_outputs.py
 
-local_varia_340:
-  - yt/analysis_modules/radmc3d_export
-  - yt/frontends/moab/tests/test_c5.py
-  - yt/analysis_modules/photon_simulator/tests/test_spectra.py
-  - yt/analysis_modules/photon_simulator/tests/test_sloshing.py
-  - yt/visualization/volume_rendering/tests/test_vr_orientation.py
+  local_varia_340:
+    - yt/analysis_modules/radmc3d_export
+    - yt/frontends/moab/tests/test_c5.py
+    - yt/analysis_modules/photon_simulator/tests/test_spectra.py
+    - yt/analysis_modules/photon_simulator/tests/test_sloshing.py
+    - yt/visualization/volume_rendering/tests/test_vr_orientation.py
 
-local_orion_340:
-  - yt/frontends/boxlib/tests/test_orion.py
+  local_orion_340:
+    - yt/frontends/boxlib/tests/test_orion.py
 
-local_ramses_340:
-  - yt/frontends/ramses/tests/test_outputs.py
+  local_ramses_340:
+    - yt/frontends/ramses/tests/test_outputs.py
 
-local_ytdata_340:
-  - yt/frontends/ytdata
\ No newline at end of file
+  local_ytdata_340:
+    - yt/frontends/ytdata
+
+other_tests:
+  unittests:
+    - '-v'
+  cookbook:
+    - 'doc/source/cookbook/tests/test_cookbook.py'
+    - '-P'

diff -r 05815443e955781705cf611e912f0fa7f5b0b1cd -r ed87473301feb44201bc66a801138f43749109ac yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
@@ -52,6 +52,8 @@
     def __init__(self, lambda_min, lambda_max, n_lambda):
         self.n_lambda = n_lambda
         # lambda, flux, and tau are wavelength, flux, and optical depth
+        self.lambda_min = lambda_min
+        self.lambda_max = lambda_max
         self.lambda_field = YTArray(np.linspace(lambda_min, lambda_max, 
                                     n_lambda), "angstrom")
         self.tau_field = None
@@ -281,8 +283,24 @@
                 delta_lambda = line['wavelength'] * field_data['redshift']
             # lambda_obs is central wavelength of line after redshift
             lambda_obs = line['wavelength'] + delta_lambda
-            # bin index in lambda_field of central wavelength of line after z
-            center_index = np.digitize(lambda_obs, self.lambda_field)
+            # the total number of absorbers per transition
+            n_absorbers = len(lambda_obs)
+
+            # we want to know the bin index in the lambda_field array
+            # where each line has its central wavelength after being
+            # redshifted.  however, because we don't know a priori how wide
+            # a line will be (ie DLAs), we have to include bin indices 
+            # *outside* the spectral range of the AbsorptionSpectrum 
+            # object.  Thus, we find the "equivalent" bin index, which
+            # may be <0 or >the size of the array.  In the end, we deposit
+            # the bins that actually overlap with the AbsorptionSpectrum's
+            # range in lambda.
+            
+            # this equation gives us the "equivalent" bin index for each line
+            # if it were placed into the self.lambda_field array
+            center_index = (lambda_obs.in_units('Angstrom').d - self.lambda_min) \
+                            / self.bin_width.d
+            center_index = np.ceil(center_index).astype('int')
 
             # thermal broadening b parameter
             thermal_b =  np.sqrt((2 * boltzmann_constant_cgs *
@@ -295,7 +313,6 @@
 
             # Sanitize units for faster runtime of the tau_profile machinery.
             lambda_0 = line['wavelength'].d  # line's rest frame; angstroms
-            lambda_1 = lambda_obs.d # line's observed frame; angstroms
             cdens = column_density.in_units("cm**-2").d # cm**-2
             thermb = thermal_b.in_cgs().d  # thermal b coefficient; cm / s
             dlambda = delta_lambda.d  # lambda offset; angstroms
@@ -317,77 +334,92 @@
             #    10; this will assure we don't get spikes in the deposited
             #    spectra from uneven numbers of vbins per bin
             resolution = thermal_width / self.bin_width 
-            vbin_width = self.bin_width / \
-                         10**(np.ceil(np.log10(subgrid_resolution/resolution)).clip(0, np.inf))
-            vbin_width = vbin_width.in_units('angstrom').d
+            n_vbins_per_bin = 10**(np.ceil(np.log10(subgrid_resolution/resolution)).clip(0, np.inf))
+            vbin_width = self.bin_width.d / n_vbins_per_bin
 
-            # the virtual window into which the line is deposited initially 
-            # spans a region of 5 thermal_widths, but this may expand
-            n_vbins = np.ceil(5*thermal_width.d/vbin_width)
-            vbin_window_width = n_vbins*vbin_width
-
+            # a note to the user about which lines components are unresolved
             if (thermal_width < self.bin_width).any():
                 mylog.info(("%d out of %d line components will be " + \
                             "deposited as unresolved lines.") %
                            ((thermal_width < self.bin_width).sum(), 
-                            thermal_width.size))
+                            n_absorbers))
 
-            valid_lines = np.arange(len(thermal_width))
+            # provide a progress bar with information about lines processsed
             pbar = get_pbar("Adding line - %s [%f A]: " % \
-                            (line['label'], line['wavelength']),
-                            thermal_width.size)
+                            (line['label'], line['wavelength']), n_absorbers)
 
             # for a given transition, step through each location in the 
             # observed spectrum where it occurs and deposit a voigt profile
-            for i in parallel_objects(valid_lines, njobs=-1):
-                my_vbin_window_width = vbin_window_width[i]
-                my_n_vbins = n_vbins[i]
-                my_vbin_width = vbin_width[i]
+            for i in parallel_objects(np.arange(n_absorbers), njobs=-1):
+
+                # the virtual window into which the line is deposited initially 
+                # spans a region of 2 coarse spectral bins 
+                # (one on each side of the center_index) but the window
+                # can expand as necessary.
+                # it will continue to expand until the tau value in the far
+                # edge of the wings is less than the min_tau value or it 
+                # reaches the edge of the spectrum
+                window_width_in_bins = 2
 
                 while True:
+                    left_index = (center_index[i] - \
+                            window_width_in_bins/2)
+                    right_index = (center_index[i] + \
+                            window_width_in_bins/2)
+                    n_vbins = (right_index - left_index) * \
+                              n_vbins_per_bin[i]
+                    
+                    # the array of virtual bins in lambda space
                     vbins = \
-                        np.linspace(lambda_1[i]-my_vbin_window_width/2.,
-                                    lambda_1[i]+my_vbin_window_width/2., 
-                                    my_n_vbins, endpoint=False)
+                        np.linspace(self.lambda_min + self.bin_width.d * left_index, 
+                                    self.lambda_min + self.bin_width.d * right_index, 
+                                    n_vbins, endpoint=False)
 
+                    # the virtual bins and their corresponding opacities
                     vbins, vtau = \
                         tau_profile(
-                            lambda_0, line['f_value'], line['gamma'], thermb[i],
-                            cdens[i], delta_lambda=dlambda[i],
-                            lambda_bins=vbins)
+                            lambda_0, line['f_value'], line['gamma'], 
+                            thermb[i], cdens[i], 
+                            delta_lambda=dlambda[i], lambda_bins=vbins)
 
                     # If tau has not dropped below min tau threshold by the
-                    # edges (ie the wings), then widen the wavelength 
+                    # edges (ie the wings), then widen the wavelength
                     # window and repeat process. 
-                    if (vtau[0] < min_tau and vtau[-1] < min_tau):
+                    if ((vtau[0] < min_tau) and (vtau[-1] < min_tau)):
                         break
-                    my_vbin_window_width *= 2
-                    my_n_vbins *= 2
-
-                # identify the extrema of the vbin_window so as to speed
-                # up searching over the entire lambda_field array
-                bins_from_center = np.ceil((my_vbin_window_width/2.) / \
-                                           self.bin_width.d) + 1
-                left_index = (center_index[i] - bins_from_center).clip(0, self.n_lambda)
-                right_index = (center_index[i] + bins_from_center).clip(0, self.n_lambda)
-                window_width = right_index - left_index
-
-                # run digitize to identify which vbins are deposited into which
-                # global lambda bins.
-                # shift global lambda bins over by half a bin width; 
-                # this has the effect of assuring np.digitize will place 
-                # the vbins in the closest bin center.
-                binned = np.digitize(vbins, 
-                                     self.lambda_field[left_index:right_index] \
-                                     + (0.5 * self.bin_width))
+                    window_width_in_bins *= 2
 
                 # numerically integrate the virtual bins to calculate a
                 # virtual equivalent width; then sum the virtual equivalent
                 # widths and deposit into each spectral bin
-                vEW = vtau * my_vbin_width
-                EW = [vEW[binned == j].sum() for j in np.arange(window_width)]
-                EW = np.array(EW)/self.bin_width.d
-                self.tau_field[left_index:right_index] += EW
+                vEW = vtau * vbin_width[i]
+                EW = np.zeros(right_index - left_index)
+                EW_indices = np.arange(left_index, right_index)
+                for k, val in enumerate(EW_indices):
+                    EW[k] = vEW[n_vbins_per_bin[i] * k: \
+                                n_vbins_per_bin[i] * (k + 1)].sum()
+                EW = EW/self.bin_width.d
+
+                # only deposit EW bins that actually intersect the original
+                # spectral wavelength range (i.e. lambda_field)
+
+                # if EW bins don't intersect the original spectral range at all
+                # then skip the deposition
+                if ((left_index >= self.n_lambda) or \
+                    (right_index < 0)):
+                    pbar.update(i)
+                    continue
+
+                # otherwise, determine how much of the original spectrum
+                # is intersected by the expanded line window to be deposited, 
+                # and deposit the Equivalent Width data into that intersecting
+                # window in the original spectrum's tau
+                else:
+                    intersect_left_index = max(left_index, 0)
+                    intersect_right_index = min(right_index, self.n_lambda-1)
+                    self.tau_field[intersect_left_index:intersect_right_index] \
+                        += EW[(intersect_left_index - left_index): \
+                              (intersect_right_index - left_index)]
 
                 # write out absorbers to file if the column density of
                 # an absorber is greater than the specified "label_threshold" 
@@ -411,9 +443,8 @@
             pbar.finish()
 
             del column_density, delta_lambda, lambda_obs, center_index, \
-                thermal_b, thermal_width, lambda_1, cdens, thermb, dlambda, \
-                vlos, resolution, vbin_width, n_vbins, vbin_window_width, \
-                valid_lines, vbins, vtau, vEW
+                thermal_b, thermal_width, cdens, thermb, dlambda, \
+                vlos, resolution, vbin_width, n_vbins_per_bin
 
         comm = _get_comm(())
         self.tau_field = comm.mpi_allreduce(self.tau_field, op="sum")

diff -r 05815443e955781705cf611e912f0fa7f5b0b1cd -r ed87473301feb44201bc66a801138f43749109ac yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py
@@ -13,34 +13,31 @@
 import numpy as np
 from yt.testing import \
     assert_allclose_units, requires_file, requires_module, \
-    assert_almost_equal, assert_array_almost_equal
+    assert_almost_equal
 from yt.analysis_modules.absorption_spectrum.absorption_line import \
     voigt_old, voigt_scipy
 from yt.analysis_modules.absorption_spectrum.api import AbsorptionSpectrum
 from yt.analysis_modules.cosmological_observation.api import LightRay
-from yt.config import ytcfg
+from yt.utilities.answer_testing.framework import \
+    GenericArrayTest, \
+    requires_answer_testing
 import tempfile
 import os
 import shutil
 from yt.utilities.on_demand_imports import \
     _h5py as h5
 
-test_dir = ytcfg.get("yt", "test_data_dir")
 
 COSMO_PLUS = "enzo_cosmology_plus/AMRCosmology.enzo"
 COSMO_PLUS_SINGLE = "enzo_cosmology_plus/RD0009/RD0009"
-HI_SPECTRUM_COSMO = "absorption_spectrum_data/enzo_lyman_alpha_cosmo_spec.h5"
-HI_SPECTRUM_COSMO_FILE = os.path.join(test_dir, HI_SPECTRUM_COSMO)
-HI_SPECTRUM = "absorption_spectrum_data/enzo_lyman_alpha_spec.h5"
-HI_SPECTRUM_FILE = os.path.join(test_dir, HI_SPECTRUM)
+
 
 @requires_file(COSMO_PLUS)
- at requires_file(HI_SPECTRUM_COSMO)
+ at requires_answer_testing()
 def test_absorption_spectrum_cosmo():
     """
     This test generates an absorption spectrum from a cosmological light ray
     """
-
     # Set up in a temp dir
     tmpdir = tempfile.mkdtemp()
     curdir = os.getcwd()
@@ -78,22 +75,21 @@
                                         use_peculiar_velocity=True)
 
     # load just-generated hdf5 file of spectral data (for consistency)
-    f_new = h5.File('spectrum.h5', 'r')
+    data = h5.File('spectrum.h5', 'r')
 
-    # load standard data for comparison
-    f_old = h5.File(HI_SPECTRUM_COSMO_FILE, 'r')
-
-    # compare between standard data and current data for each array saved 
-    # (wavelength, flux, tau)
-    for key in f_old.keys():
-        assert_array_almost_equal(f_new[key].value, f_old[key].value, 10)
+    for key in data.keys():
+        func = lambda x=key: data[x][:]
+        func.__name__ = "{}_cosmo".format(key)
+        test = GenericArrayTest(None, func)
+        test_absorption_spectrum_cosmo.__name__ = test.description
+        yield test
 
     # clean up
     os.chdir(curdir)
     shutil.rmtree(tmpdir)
 
 @requires_file(COSMO_PLUS_SINGLE)
- at requires_file(HI_SPECTRUM)
+ at requires_answer_testing()
 def test_absorption_spectrum_non_cosmo():
     """
     This test generates an absorption spectrum from a non-cosmological light ray
@@ -130,15 +126,14 @@
                                         use_peculiar_velocity=True)
 
     # load just-generated hdf5 file of spectral data (for consistency)
-    f_new = h5.File('spectrum.h5', 'r')
-
-    # load standard data for comparison
-    f_old = h5.File(HI_SPECTRUM_FILE, 'r')
-
-    # compare between standard data and current data for each array saved 
-    # (wavelength, flux, tau)
-    for key in f_old.keys():
-        assert_array_almost_equal(f_new[key].value, f_old[key].value, 10)
+    data = h5.File('spectrum.h5', 'r')
+    
+    for key in data.keys():
+        func = lambda x=key: data[x][:]
+        func.__name__ = "{}_non_cosmo".format(key)
+        test = GenericArrayTest(None, func)
+        test_absorption_spectrum_non_cosmo.__name__ = test.description
+        yield test
 
     # clean up
     os.chdir(curdir)
@@ -185,9 +180,9 @@
         wavelength, flux = sp.make_spectrum('lightray.h5')
         total_tau.append((lambda_bin_width * sp.tau_field).sum())
         
-    # assure that the total tau values are all within 1e-5 of each other
+    # assure that the total tau values are all within 1e-3 of each other
     for tau in total_tau:
-        assert_almost_equal(tau, total_tau[0], 5)
+        assert_almost_equal(tau, total_tau[0], 3)
 
     # clean up
     os.chdir(curdir)

diff -r 05815443e955781705cf611e912f0fa7f5b0b1cd -r ed87473301feb44201bc66a801138f43749109ac yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -480,6 +480,9 @@
 
                     sub_vel_mag = sub_ray['velocity_magnitude']
                     cos_theta = np.dot(line_of_sight, sub_vel) / sub_vel_mag
+                    # Protect against stituations where velocity mag is exactly
+                    # zero, in which case zero / zero = NaN.
+                    cos_theta = np.nan_to_num(cos_theta)
                     redshift_dopp = \
                         (1 + sub_vel_mag * cos_theta / speed_of_light_cgs) / \
                          np.sqrt(1 - sub_vel_mag**2 / speed_of_light_cgs**2) - 1

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/ff8cdfb114c3/
Changeset:   ff8cdfb114c3
Branch:      yt
User:        mzingale
Date:        2016-03-17 22:03:52+00:00
Summary:     move import of matplotlib into functions
Affected #:  1 file

diff -r ed87473301feb44201bc66a801138f43749109ac -r ff8cdfb114c333124655223ef7b0aaffd01455bc yt/visualization/volume_rendering/scene.py
--- a/yt/visualization/volume_rendering/scene.py
+++ b/yt/visualization/volume_rendering/scene.py
@@ -36,7 +36,6 @@
 from .zbuffer_array import ZBuffer
 from yt.extern.six.moves import builtins
 from yt.utilities.exceptions import YTNotInsideNotebook
-import matplotlib.pyplot as plt
 
 class Scene(object):
 
@@ -346,6 +345,8 @@
         >>>                                        horizontalalignment="center")]])
 
         """
+        import matplotlib.pyplot as plt
+
         sources = list(itervalues(self.sources))
         rensources = [s for s in sources if isinstance(s, RenderSource)]
 
@@ -403,6 +404,7 @@
         plt.savefig(fname, facecolor='black', pad_inches=0)
 
     def _show_mpl(self, im, sigma_clip=None, dpi=100):
+        import matplotlib.pyplot as plt
         s = im.shape
         self._render_figure = plt.figure(1, figsize=(s[1]/dpi, s[0]/dpi))
         ax = plt.gca()
@@ -422,6 +424,7 @@
         return axim
 
     def _annotate(self, ax, tf, label="", label_fmt=None):
+        import matplotlib.pyplot as plt
         ax.get_xaxis().set_visible(False)
         ax.get_xaxis().set_ticks([])
         ax.get_yaxis().set_visible(False)

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list