[yt-svn] commit/yt: 2 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Wed Mar 30 11:23:10 PDT 2016


2 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/1ad674e40250/
Changeset:   1ad674e40250
Branch:      yt
User:        migueldvb
Date:        2016-03-24 17:27:01+00:00
Summary:     Strip unnecessary whitespace in docs

Affected #:  107 files

diff -r 86db031f1f56a6f396d4440188707f1fdb8e6bfa -r 1ad674e40250ed9c2d21e51b5f0d3263d5150522 doc/get_yt.sh
--- a/doc/get_yt.sh
+++ b/doc/get_yt.sh
@@ -17,7 +17,7 @@
 #
 # By default this will install yt from source.
 #
-# If you experience problems, please visit the Help section at 
+# If you experience problems, please visit the Help section at
 # http://yt-project.org.
 #
 DEST_SUFFIX="yt-conda"
@@ -298,7 +298,7 @@
 
 if [ $INST_UNSTRUCTURED -eq 1 ]
 then
-  YT_DEPS+=('netcdf4')   
+  YT_DEPS+=('netcdf4')
 fi
 
 # Here is our dependency list for yt
@@ -361,7 +361,7 @@
 echo "yt and the Conda system are now installed in $DEST_DIR ."
 echo
 echo "You must now modify your PATH variable by prepending:"
-echo 
+echo
 echo "   $DEST_DIR/bin"
 echo
 echo "On Bash-style shells you can copy/paste the following command to "

diff -r 86db031f1f56a6f396d4440188707f1fdb8e6bfa -r 1ad674e40250ed9c2d21e51b5f0d3263d5150522 doc/helper_scripts/code_support.py
--- a/doc/helper_scripts/code_support.py
+++ b/doc/helper_scripts/code_support.py
@@ -85,7 +85,7 @@
 print("|| . ||", end=' ')
 for c in code_names:
     print("%s || " % (c), end=' ')
-print() 
+print()
 
 for vn in vals:
     print("|| !%s ||" % (vn), end=' ')

diff -r 86db031f1f56a6f396d4440188707f1fdb8e6bfa -r 1ad674e40250ed9c2d21e51b5f0d3263d5150522 doc/helper_scripts/table.py
--- a/doc/helper_scripts/table.py
+++ b/doc/helper_scripts/table.py
@@ -44,7 +44,7 @@
       "A bunch of illustrated examples of how to do things"),
      ("reference/index.html", "Reference Materials",
       "A list of all bundled fields, API documentation, the Change Log..."),
-     ("faq/index.html", "FAQ", 
+     ("faq/index.html", "FAQ",
       "Frequently Asked Questions: answered for you!")
   ]),
 ]

diff -r 86db031f1f56a6f396d4440188707f1fdb8e6bfa -r 1ad674e40250ed9c2d21e51b5f0d3263d5150522 doc/helper_scripts/update_recipes.py
--- a/doc/helper_scripts/update_recipes.py
+++ b/doc/helper_scripts/update_recipes.py
@@ -66,7 +66,7 @@
             written = cond_output(output, written)
             ofn = "%s/%s_%s" % (ndir, fn, os.path.basename(ifn))
             open(ofn, "wb").write(open(ifn, "rb").read())
-            output.write(".. image:: _%s/%s_%s\n" % (fn, fn, os.path.basename(ifn)) + 
+            output.write(".. image:: _%s/%s_%s\n" % (fn, fn, os.path.basename(ifn)) +
                          "   :width: 240\n" +
                          "   :target: ../_images/%s_%s\n" % (fn, os.path.basename(ifn))
                         )

diff -r 86db031f1f56a6f396d4440188707f1fdb8e6bfa -r 1ad674e40250ed9c2d21e51b5f0d3263d5150522 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -1,13 +1,13 @@
 #
 # Hi there!  Welcome to the yt installation script.
 #
-# First things first, if you experience problems, please visit the Help 
+# First things first, if you experience problems, please visit the Help
 # section at http://yt-project.org.
 #
 # This script is designed to create a fully isolated Python installation
 # with the dependencies you need to run yt.
 #
-# There are a few options, but you only need to set *one* of them, which is 
+# There are a few options, but you only need to set *one* of them, which is
 # the next one, DEST_DIR:
 
 DEST_SUFFIX="yt-`uname -m`"
@@ -307,7 +307,7 @@
         echo "  * gcc-{,c++,gfortran}"
         echo "  * make"
         echo "  * patch"
-        echo 
+        echo
         echo "You can accomplish this by executing:"
         echo "$ sudo yum install gcc gcc-c++ gcc-gfortran make patch zip"
         echo "$ sudo yum install ncurses-devel uuid-devel openssl-devel readline-devel"
@@ -495,7 +495,7 @@
 if [ $INST_PY3 -eq 1 ]
 then
      PYTHON_EXEC='python3.4'
-else 
+else
      PYTHON_EXEC='python2.7'
 fi
 
@@ -513,7 +513,7 @@
     [ ! -e $LIB/extracted ] && tar xfz $LIB.tar.gz
     touch $LIB/extracted
     BUILD_ARGS=""
-    if [[ $LIB =~ .*mercurial.* ]] 
+    if [[ $LIB =~ .*mercurial.* ]]
     then
         PYEXE="python2.7"
     else
@@ -620,9 +620,9 @@
 CYTHON='Cython-0.22'
 PYX='PyX-0.12.1'
 BZLIB='bzip2-1.0.6'
-FREETYPE_VER='freetype-2.4.12' 
+FREETYPE_VER='freetype-2.4.12'
 H5PY='h5py-2.5.0'
-HDF5='hdf5-1.8.14' 
+HDF5='hdf5-1.8.14'
 LAPACK='lapack-3.4.2'
 PNG=libpng-1.6.3
 MATPLOTLIB='matplotlib-1.4.3'
@@ -880,7 +880,7 @@
 
 # This fixes problems with gfortran linking.
 unset LDFLAGS
- 
+
 echo "Installing pip"
 ( ${GETFILE} https://bootstrap.pypa.io/get-pip.py 2>&1 ) 1>> ${LOG_FILE} || do_exit
 ( ${DEST_DIR}/bin/${PYTHON_EXEC} get-pip.py 2>&1 ) 1>> ${LOG_FILE} || do_exit
@@ -1006,7 +1006,7 @@
 cd $MY_PWD
 
 if !( ( ${DEST_DIR}/bin/${PYTHON_EXEC} -c "import readline" 2>&1 )>> ${LOG_FILE}) || \
-    [[ "${MYOS##Darwin}" != "${MYOS}" && $INST_PY3 -eq 1 ]] 
+    [[ "${MYOS##Darwin}" != "${MYOS}" && $INST_PY3 -eq 1 ]]
 then
     if !( ( ${DEST_DIR}/bin/${PYTHON_EXEC} -c "import gnureadline" 2>&1 )>> ${LOG_FILE})
     then

diff -r 86db031f1f56a6f396d4440188707f1fdb8e6bfa -r 1ad674e40250ed9c2d21e51b5f0d3263d5150522 doc/source/_static/custom.css
--- a/doc/source/_static/custom.css
+++ b/doc/source/_static/custom.css
@@ -40,7 +40,7 @@
         padding-bottom: 10px;
     }
     /* since 3.1.0 */
-    .navbar-collapse.collapse.in { 
+    .navbar-collapse.collapse.in {
         display: block!important;
     }
     .collapsing {
@@ -48,7 +48,7 @@
     }
 }
 
-/* 
+/*
 
 Sphinx code literals conflict with the notebook code tag, so we special-case
 literals that are inside text.
@@ -56,7 +56,7 @@
 */
 
 p code {
-    color:  #d14;    
+    color:  #d14;
     white-space: nowrap;
     font-size: 90%;
     background-color: #f9f2f4;
@@ -93,16 +93,16 @@
 */
 
 *[id]:before :not(p) {
-  display: block; 
-  content: " "; 
-  margin-top: -45px; 
-  height: 45px; 
-  visibility: hidden; 
+  display: block;
+  content: " ";
+  margin-top: -45px;
+  height: 45px;
+  visibility: hidden;
 }
 
 /*
 
-Make tables span only half the page. 
+Make tables span only half the page.
 
 */
 

diff -r 86db031f1f56a6f396d4440188707f1fdb8e6bfa -r 1ad674e40250ed9c2d21e51b5f0d3263d5150522 doc/source/about/index.rst
--- a/doc/source/about/index.rst
+++ b/doc/source/about/index.rst
@@ -12,10 +12,10 @@
 -----------
 
 yt is a toolkit for analyzing and visualizing quantitative data.  Originally
-written to analyze 3D grid-based astrophysical simulation data, 
+written to analyze 3D grid-based astrophysical simulation data,
 it has grown to handle any kind of data represented in a 2D or 3D volume.
-yt is an Python-based open source project and is open for anyone to use or 
-contribute code.  The entire source code and history is available to all 
+yt is an Python-based open source project and is open for anyone to use or
+contribute code.  The entire source code and history is available to all
 at https://bitbucket.org/yt_analysis/yt .
 
 .. _who-is-yt:
@@ -23,16 +23,16 @@
 Who is yt?
 ----------
 
-As an open-source project, yt has a large number of user-developers.  
-In September of 2014, the yt developer community collectively decided to endow 
-the title of *member* on individuals who had contributed in a significant way 
-to the project.  For a list of those members and a description of their 
-contributions to the code, see 
+As an open-source project, yt has a large number of user-developers.
+In September of 2014, the yt developer community collectively decided to endow
+the title of *member* on individuals who had contributed in a significant way
+to the project.  For a list of those members and a description of their
+contributions to the code, see
 `our members website. <http://yt-project.org/members.html>`_
 
-For an up-to-date list of everyone who has contributed to the yt codebase, 
-see the current `CREDITS <http://bitbucket.org/yt_analysis/yt/src/yt/CREDITS>`_ file.  
-For a more detailed breakup of contributions made by individual users, see out 
+For an up-to-date list of everyone who has contributed to the yt codebase,
+see the current `CREDITS <http://bitbucket.org/yt_analysis/yt/src/yt/CREDITS>`_ file.
+For a more detailed breakup of contributions made by individual users, see out
 `Open HUB page <https://www.openhub.net/p/yt_amr/contributors?query=&sort=commits>`_.
 
 History of yt
@@ -40,17 +40,17 @@
 
 yt was originally begun by Matthew Turk in 2007 in the course of his graduate
 studies in computational astrophysics.  The code was developed
-as a simple data-reader and exporter for grid-based hydrodynamical simulation 
-data outputs from the *Enzo* code.  Over the next few years, he invited 
+as a simple data-reader and exporter for grid-based hydrodynamical simulation
+data outputs from the *Enzo* code.  Over the next few years, he invited
 collaborators and friends to contribute and use yt.  As the community grew,
-so did the capabilities of yt.  It is now a community-developed project with 
-contributions from many people, the hospitality of several institutions, and 
-benefiting from numerous grants.  With this community-driven approach 
-and contributions from a sizeable population of developers, it has evolved 
-into a fully-featured toolkit for analysis and visualization of 
-multidimensional data.  It relies on no proprietary software -- although it 
-can be and has been extended to interface with proprietary software and 
-libraries -- and has been designed from the ground up to enable users to be 
+so did the capabilities of yt.  It is now a community-developed project with
+contributions from many people, the hospitality of several institutions, and
+benefiting from numerous grants.  With this community-driven approach
+and contributions from a sizeable population of developers, it has evolved
+into a fully-featured toolkit for analysis and visualization of
+multidimensional data.  It relies on no proprietary software -- although it
+can be and has been extended to interface with proprietary software and
+libraries -- and has been designed from the ground up to enable users to be
 as immersed in the data as they desire.
 
 How do I contact yt?
@@ -58,7 +58,7 @@
 
 If you have any questions about the code, please contact the `yt users email
 list <http://lists.spacepope.org/listinfo.cgi/yt-users-spacepope.org>`_.  If
-you're having other problems, please follow the steps in 
+you're having other problems, please follow the steps in
 :ref:`asking-for-help`.
 
 How do I cite yt?
@@ -70,7 +70,7 @@
 entry: ::
 
    @ARTICLE{2011ApJS..192....9T,
-      author = {{Turk}, M.~J. and {Smith}, B.~D. and {Oishi}, J.~S. and {Skory}, S. and 
+      author = {{Turk}, M.~J. and {Smith}, B.~D. and {Oishi}, J.~S. and {Skory}, S. and
    	{Skillman}, S.~W. and {Abel}, T. and {Norman}, M.~L.},
        title = "{yt: A Multi-code Analysis Toolkit for Astrophysical Simulation Data}",
      journal = {\apjs},

diff -r 86db031f1f56a6f396d4440188707f1fdb8e6bfa -r 1ad674e40250ed9c2d21e51b5f0d3263d5150522 doc/source/analyzing/analysis_modules/absorption_spectrum.rst
--- a/doc/source/analyzing/analysis_modules/absorption_spectrum.rst
+++ b/doc/source/analyzing/analysis_modules/absorption_spectrum.rst
@@ -5,23 +5,23 @@
 
 .. sectionauthor:: Britton Smith <brittonsmith at gmail.com>
 
-Absorption line spectra, such as shown below, can be made with data created 
-by the (:ref:`light-ray-generator`).  For each element of the ray, column 
-densities are calculated multiplying the number density within a grid cell 
-with the path length of the ray through the cell.  Line profiles are 
-generated using a voigt profile based on the temperature field.  The lines 
-are then shifted according to the redshift recorded by the light ray tool 
-and (optionally) the peculiar velocity of gas along the ray.  Inclusion of the 
-peculiar velocity requires setting ``use_peculiar_velocity`` to True in the call to 
+Absorption line spectra, such as shown below, can be made with data created
+by the (:ref:`light-ray-generator`).  For each element of the ray, column
+densities are calculated multiplying the number density within a grid cell
+with the path length of the ray through the cell.  Line profiles are
+generated using a voigt profile based on the temperature field.  The lines
+are then shifted according to the redshift recorded by the light ray tool
+and (optionally) the peculiar velocity of gas along the ray.  Inclusion of the
+peculiar velocity requires setting ``use_peculiar_velocity`` to True in the call to
 :meth:`~yt.analysis_modules.cosmological_observation.light_ray.light_ray.LightRay.make_light_ray`.
 
-The spectrum generator will output a file containing the wavelength and 
+The spectrum generator will output a file containing the wavelength and
 normalized flux.  It will also output a text file listing all important lines.
 
 .. image:: _images/spectrum_full.png
    :width: 500
 
-An absorption spectrum for the wavelength range from 900 to 1800 Angstroms 
+An absorption spectrum for the wavelength range from 900 to 1800 Angstroms
 made with a light ray extending from z = 0 to z = 0.4.
 
 .. image:: _images/spectrum_zoom.png
@@ -32,7 +32,7 @@
 Creating an Absorption Spectrum
 -------------------------------
 
-To instantiate an AbsorptionSpectrum object, the arguments required are the 
+To instantiate an AbsorptionSpectrum object, the arguments required are the
 minimum and maximum wavelengths, and the number of wavelength bins.
 
 .. code-block:: python
@@ -44,33 +44,33 @@
 Adding Features to the Spectrum
 -------------------------------
 
-Absorption lines and continuum features can then be added to the spectrum.  
-To add a line, you must know some properties of the line: the rest wavelength, 
-f-value, gamma value, and the atomic mass in amu of the atom.  That line must 
+Absorption lines and continuum features can then be added to the spectrum.
+To add a line, you must know some properties of the line: the rest wavelength,
+f-value, gamma value, and the atomic mass in amu of the atom.  That line must
 be tied in some way to a field in the dataset you are loading, and this field
-must be added to the LightRay object when it is created.  Below, we will 
-add the H Lyman-alpha line, which is tied to the neutral hydrogen field 
+must be added to the LightRay object when it is created.  Below, we will
+add the H Lyman-alpha line, which is tied to the neutral hydrogen field
 ('H_number_density').
 
 .. code-block:: python
-  
+
   my_label = 'HI Lya'
   field = 'H_number_density'
   wavelength = 1215.6700 # Angstroms
   f_value = 4.164E-01
   gamma = 6.265e+08
   mass = 1.00794
-  
+
   sp.add_line(my_label, field, wavelength, f_value, gamma, mass, label_threshold=1.e10)
 
-In the above example, the *field* argument tells the spectrum generator which 
-field from the ray data to use to calculate the column density.  The 
-``label_threshold`` keyword tells the spectrum generator to add all lines 
-above a column density of 10 :superscript:`10` cm :superscript:`-2` to the 
-text line list.  If None is provided, as is the default, no lines of this 
+In the above example, the *field* argument tells the spectrum generator which
+field from the ray data to use to calculate the column density.  The
+``label_threshold`` keyword tells the spectrum generator to add all lines
+above a column density of 10 :superscript:`10` cm :superscript:`-2` to the
+text line list.  If None is provided, as is the default, no lines of this
 type will be added to the text list.
 
-Continuum features with optical depths that follow a power law can also be 
+Continuum features with optical depths that follow a power law can also be
 added.  Like adding lines, you must specify details like the wavelength
 and the field in the dataset and LightRay that is tied to this feature.
 Below, we will add H Lyman continuum.
@@ -82,29 +82,29 @@
   wavelength = 912.323660 # Angstroms
   normalization = 1.6e17
   index = 3.0
-  
+
   sp.add_continuum(my_label, field, wavelength, normalization, index)
 
 Making the Spectrum
 -------------------
 
-Once all the lines and continuum are added, it is time to make a spectrum out 
+Once all the lines and continuum are added, it is time to make a spectrum out
 of some light ray data.
 
 .. code-block:: python
 
-  wavelength, flux = sp.make_spectrum('lightray.h5', 
-                                      output_file='spectrum.fits', 
+  wavelength, flux = sp.make_spectrum('lightray.h5',
+                                      output_file='spectrum.fits',
                                       line_list_file='lines.txt',
                                       use_peculiar_velocity=True)
 
-A spectrum will be made using the specified ray data and the wavelength and 
-flux arrays will also be returned.  If ``use_peculiar_velocity`` is set to 
+A spectrum will be made using the specified ray data and the wavelength and
+flux arrays will also be returned.  If ``use_peculiar_velocity`` is set to
 False, the lines will only be shifted according to the redshift.
 
-Three output file formats are supported for writing out the spectrum: fits, 
-hdf5, and ascii.  The file format used is based on the extension provided 
-in the ``output_file`` keyword: ``.fits`` for a fits file, 
+Three output file formats are supported for writing out the spectrum: fits,
+hdf5, and ascii.  The file format used is based on the extension provided
+in the ``output_file`` keyword: ``.fits`` for a fits file,
 ``.h5`` for an hdf5 file, and anything else for an ascii file.
 
 .. note:: To write out a fits file, you must install the `astropy <http://www.astropy.org>`_ python library in order to access the astropy.io.fits module.  You can usually do this by simply running `pip install astropy` at the command line.
@@ -112,11 +112,11 @@
 Generating Spectra in Parallel
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
-The spectrum generator can be run in parallel simply by following the procedures 
-laid out in :ref:`parallel-computation` for running yt scripts in parallel.  
-Spectrum generation is parallelized using a multi-level strategy where each 
-absorption line is deposited by a different processor.  If the number of available 
-processors is greater than the number of lines, then the deposition of 
+The spectrum generator can be run in parallel simply by following the procedures
+laid out in :ref:`parallel-computation` for running yt scripts in parallel.
+Spectrum generation is parallelized using a multi-level strategy where each
+absorption line is deposited by a different processor.  If the number of available
+processors is greater than the number of lines, then the deposition of
 individual lines will be divided over multiple processors.
 
 Fitting an Absorption Spectrum
@@ -127,14 +127,14 @@
 This tool can be used to fit absorption spectra, particularly those
 generated using the (``AbsorptionSpectrum``) tool. For more details
 on its uses and implementation please see (`Egan et al. (2013)
-<http://arxiv.org/abs/1307.2244>`_). If you find this tool useful we 
+<http://arxiv.org/abs/1307.2244>`_). If you find this tool useful we
 encourage you to cite accordingly.
 
 Loading an Absorption Spectrum
 ------------------------------
 
-To load an absorption spectrum created by 
-(:class:`~yt.analysis_modules.absorption_spectrum.absorption_spectrum.AbsorptionSpectrum``), 
+To load an absorption spectrum created by
+(:class:`~yt.analysis_modules.absorption_spectrum.absorption_spectrum.AbsorptionSpectrum``),
 we specify the output file name. It is advisable to use either an .h5
 or .fits file, rather than an ascii file to save the spectrum as rounding
 errors produced in saving to a ascii file will negatively impact fit quality.
@@ -149,7 +149,7 @@
 Specifying Species Properties
 -----------------------------
 
-Before fitting a spectrum, you must specify the properties of all the 
+Before fitting a spectrum, you must specify the properties of all the
 species included when generating the spectrum.
 
 The physical properties needed for each species are the rest wavelength,
@@ -160,7 +160,7 @@
 
 To fine tune the fitting procedure and give results in a minimal
 number of optimizing steps, we specify expected maximum and minimum
-values for the column density, doppler parameter, and redshift. These 
+values for the column density, doppler parameter, and redshift. These
 values can be well outside the range of expected values for a typical line
 and are mostly to prevent the algorithm from fitting to negative values
 or becoming numerically unstable.
@@ -204,7 +204,7 @@
 --------------------------
 
 After loading a spectrum and specifying the properties of the species
-used to generate the spectrum, an appropriate fit can be generated. 
+used to generate the spectrum, an appropriate fit can be generated.
 
 .. code-block:: python
 
@@ -219,19 +219,19 @@
 recommended to fit species the generate multiple lines first, as a fit
 will only be accepted if all of the lines are fit appropriately using
 a single set of parameters. At the moment no cross correlation between
-lines of different species is performed. 
+lines of different species is performed.
 
-The parameters of the lines that are needed to fit the spectrum are contained 
+The parameters of the lines that are needed to fit the spectrum are contained
 in the ``fitted_lines`` variable. Each species given in ``orderFits`` will
-be a key in the ``fitted_lines`` dictionary. The entry for each species 
-key will be another dictionary containing entries for 'N','b','z', and 
+be a key in the ``fitted_lines`` dictionary. The entry for each species
+key will be another dictionary containing entries for 'N','b','z', and
 'group#' which are the column density, doppler parameter, redshift,
-and associate line complex respectively. The i :superscript:`th` line 
-of a given species is then given by the parameters ``N[i]``, ``b[i]``, 
+and associate line complex respectively. The i :superscript:`th` line
+of a given species is then given by the parameters ``N[i]``, ``b[i]``,
 and ``z[i]`` and is part of the same complex (and was fitted at the same time)
 as all lines with the same group number as ``group#[i]``.
 
-The ``fitted_flux`` is an ndarray of the same size as ``flux`` and 
+The ``fitted_flux`` is an ndarray of the same size as ``flux`` and
 ``wavelength`` that contains the cumulative absorption spectrum generated
 by the lines contained in ``fitted_lines``.
 
@@ -250,8 +250,8 @@
 
 .. sectionauthor:: Hilary Egan <hilary.egan at colorado.edu>
 
-To generate a fit for a spectrum 
-:func:`~yt.analysis_modules.absorption_spectrum.absorption_spectrum_fit.generate_total_fit` 
+To generate a fit for a spectrum
+:func:`~yt.analysis_modules.absorption_spectrum.absorption_spectrum_fit.generate_total_fit`
 is called.
 This function controls the identification of line complexes, the fit
 of a series of absorption lines for each appropriate species, checks of
@@ -260,14 +260,14 @@
 Finding Line Complexes
 ----------------------
 
-Line complexes are found using the 
+Line complexes are found using the
 :func:`~yt.analysis_modules.absorption_spectrum.absorption_spectrum_fit.find_complexes`
-function. The process by which line complexes are found involves walking 
-through the array of flux in order from minimum to maximum wavelength, and 
-finding series of spatially contiguous cells whose flux is less than some 
-limit.  These regions are then checked in terms of an additional flux limit 
-and size.  The bounds of all the passing regions are then listed and returned. 
-Those bounds that cover an exceptionally large region of wavelength space will 
+function. The process by which line complexes are found involves walking
+through the array of flux in order from minimum to maximum wavelength, and
+finding series of spatially contiguous cells whose flux is less than some
+limit.  These regions are then checked in terms of an additional flux limit
+and size.  The bounds of all the passing regions are then listed and returned.
+Those bounds that cover an exceptionally large region of wavelength space will
 be broken up if a suitable cut point is found. This method is only appropriate
 for noiseless spectra.
 
@@ -280,25 +280,25 @@
 unstable when optimizing.
 
 The ``fitLim`` parameter controls what is the maximum flux that the trough
-of the region can have and still be considered a line complex. This 
+of the region can have and still be considered a line complex. This
 effectively controls the sensitivity to very low column absorbers. Default
-value is ``fitLim`` = 0.99. If a region is identified where the flux of the 
+value is ``fitLim`` = 0.99. If a region is identified where the flux of the
 trough is greater than this value, the region is simply ignored.
 
-The ``minLength`` parameter controls the minimum number of array elements 
+The ``minLength`` parameter controls the minimum number of array elements
 that an identified region must have. This value must be greater than or
 equal to 3 as there are a minimum of 3 free parameters that must be fit.
 Default is ``minLength`` = 3.
 
 The ``maxLength`` parameter controls the maximum number of array elements
 that an identified region can have before it is split into separate regions.
-Default is ``maxLength`` = 1000. This should be adjusted based on the 
+Default is ``maxLength`` = 1000. This should be adjusted based on the
 resolution of the spectrum to remain appropriate. The value correspond
-to a wavelength of roughly 50 angstroms. 
+to a wavelength of roughly 50 angstroms.
 
 The ``splitLim`` parameter controls how exceptionally large regions are split.
 When such a region is identified by having more array elements than
-``maxLength``, the point of maximum flux (or minimum absorption) in the 
+``maxLength``, the point of maximum flux (or minimum absorption) in the
 middle two quartiles is identified. If that point has a flux greater than
 or equal to ``splitLim``, then two separate complexes are created: one from
 the lower wavelength edge to the minimum absorption point and the other from
@@ -309,7 +309,7 @@
 Fitting a Line Complex
 ----------------------
 
-After a complex is identified, it is fitted by iteratively adding and 
+After a complex is identified, it is fitted by iteratively adding and
 optimizing a set of Voigt Profiles for a particular species until the
 region is considered successfully fit. The optimizing is accomplished
 using scipy's least squares optimizer. This requires an initial estimate
@@ -326,36 +326,36 @@
 smaller initial guess is given. These values are chosen to make optimization
 faster and more stable by being closer to the actual value, but the final
 results of fitting should not depend on them as they merely provide a
-starting point. 
+starting point.
 
-After the parameters for a line are optimized for the first time, the 
-optimized parameters are then used for the initial guess on subsequent 
-iterations with more lines. 
+After the parameters for a line are optimized for the first time, the
+optimized parameters are then used for the initial guess on subsequent
+iterations with more lines.
 
-The complex is considered successfully fit when the sum of the squares of 
+The complex is considered successfully fit when the sum of the squares of
 the difference between the flux generated from the fit and the desired flux
 profile is less than ``errBound``. ``errBound`` is related to the optional
-parameter to 
+parameter to
 :meth:`~yt.analysis_modules.cosmological_observation.light_ray.light_ray.LightRay.generate_total_fit`,
-``maxAvgError`` by the number of array elements in the region such that 
+``maxAvgError`` by the number of array elements in the region such that
 ``errBound`` = number of elements * ``maxAvgError``.
 
-There are several other conditions under which the cycle of adding and 
+There are several other conditions under which the cycle of adding and
 optimizing lines will halt. If the error of the optimized fit from adding
 a line is an order of magnitude worse than the error of the fit without
-that line, then it is assumed that the fitting has become unstable and 
+that line, then it is assumed that the fitting has become unstable and
 the latest line is removed. Lines are also prevented from being added if
 the total number of lines is greater than the number of elements in the flux
 array being fit divided by 3. This is because there must not be more free
-parameters in a fit than the number of points to constrain them. 
+parameters in a fit than the number of points to constrain them.
 
 Checking Fit Results
 --------------------
 
 After an acceptable fit for a region is determined, there are several steps
-the algorithm must go through to validate the fits. 
+the algorithm must go through to validate the fits.
 
-First, the parameters must be in a reasonable range. This is a check to make 
+First, the parameters must be in a reasonable range. This is a check to make
 sure that the optimization did not become unstable and generate a fit that
 diverges wildly outside the region where the fit was performed. This way, even
 if particular complex cannot be fit, the rest of the spectrum fitting still
@@ -363,13 +363,13 @@
 in the species parameter dictionary. These are merely broad limits that will
 prevent numerical instability rather than physical limits.
 
-In cases where a single species generates multiple lines (as in the OVI 
+In cases where a single species generates multiple lines (as in the OVI
 doublet), the fits are then checked for higher wavelength lines. Originally
 the fits are generated only considering the lowest wavelength fit to a region.
 This is because we perform the fitting of complexes in order from the lowest
 wavelength to the highest, so any contribution to a complex being fit must
 come from the lower wavelength as the higher wavelength contributions would
-already have been subtracted out after fitting the lower wavelength. 
+already have been subtracted out after fitting the lower wavelength.
 
 Saturated Lyman Alpha Fitting Tools
 -----------------------------------
@@ -380,8 +380,8 @@
 The basic approach is to simply try a much wider range of initial parameter
 guesses in order to find the true optimization minimum, rather than getting
 stuck in a local minimum. A set of hard coded initial parameter guesses
-for Lyman alpha lines is given by the function 
+for Lyman alpha lines is given by the function
 :func:`~yt.analysis_modules.absorption_spectrum.absorption_spectrum_fit.get_test_lines`.
 Also included in these parameter guesses is an an initial guess of a high
-column cool line overlapping a lower column warm line, indictive of a 
+column cool line overlapping a lower column warm line, indictive of a
 broad Lyman alpha (BLA) absorber.

diff -r 86db031f1f56a6f396d4440188707f1fdb8e6bfa -r 1ad674e40250ed9c2d21e51b5f0d3263d5150522 doc/source/analyzing/analysis_modules/clump_finding.rst
--- a/doc/source/analyzing/analysis_modules/clump_finding.rst
+++ b/doc/source/analyzing/analysis_modules/clump_finding.rst
@@ -3,17 +3,17 @@
 Clump Finding
 =============
 
-The clump finder uses a contouring algorithm to identified topologically 
-disconnected structures within a dataset.  This works by first creating a 
-single contour over the full range of the contouring field, then continually 
-increasing the lower value of the contour until it reaches the maximum value 
-of the field.  As disconnected structures are identified as separate contours, 
-the routine continues recursively through each object, creating a hierarchy of 
-clumps.  Individual clumps can be kept or removed from the hierarchy based on 
-the result of user-specified functions, such as checking for gravitational 
+The clump finder uses a contouring algorithm to identified topologically
+disconnected structures within a dataset.  This works by first creating a
+single contour over the full range of the contouring field, then continually
+increasing the lower value of the contour until it reaches the maximum value
+of the field.  As disconnected structures are identified as separate contours,
+the routine continues recursively through each object, creating a hierarchy of
+clumps.  Individual clumps can be kept or removed from the hierarchy based on
+the result of user-specified functions, such as checking for gravitational
 boundedness.  A sample recipe can be found in :ref:`cookbook-find_clumps`.
 
-The clump finder requires a data object (see :ref:`data-objects`) and a field 
+The clump finder requires a data object (see :ref:`data-objects`) and a field
 over which the contouring is to be performed.
 
 .. code:: python
@@ -28,11 +28,11 @@
 
    master_clump = Clump(data_source, ("gas", "density"))
 
-At this point, every isolated contour will be considered a clump, 
-whether this is physical or not.  Validator functions can be added to 
-determine if an individual contour should be considered a real clump.  
-These functions are specified with the ``Clump.add_validator`` function.  
-Current, two validators exist: a minimum number of cells and gravitational 
+At this point, every isolated contour will be considered a clump,
+whether this is physical or not.  Validator functions can be added to
+determine if an individual contour should be considered a real clump.
+These functions are specified with the ``Clump.add_validator`` function.
+Current, two validators exist: a minimum number of cells and gravitational
 boundedness.
 
 .. code:: python
@@ -41,9 +41,9 @@
 
    master_clump.add_validator("gravitationally_bound", use_particles=False)
 
-As many validators as desired can be added, and a clump is only kept if all 
-return True.  If not, a clump is remerged into its parent.  Custom validators 
-can easily be added.  A validator function must only accept a ``Clump`` object 
+As many validators as desired can be added, and a clump is only kept if all
+return True.  If not, a clump is remerged into its parent.  Custom validators
+can easily be added.  A validator function must only accept a ``Clump`` object
 and either return True or False.
 
 .. code:: python
@@ -52,16 +52,16 @@
        return (clump["gas", "cell_mass"].sum() >= min_mass)
    add_validator("minimum_gas_mass", _minimum_gas_mass)
 
-The ``add_validator`` function adds the validator to a registry that can 
-be accessed by the clump finder.  Then, the validator can be added to the 
+The ``add_validator`` function adds the validator to a registry that can
+be accessed by the clump finder.  Then, the validator can be added to the
 clump finding just like the others.
 
 .. code:: python
 
    master_clump.add_validator("minimum_gas_mass", ds.quan(1.0, "Msun"))
 
-The clump finding algorithm accepts the ``Clump`` object, the initial minimum 
-and maximum of the contouring field, and the step size.  The lower value of the 
+The clump finding algorithm accepts the ``Clump`` object, the initial minimum
+and maximum of the contouring field, and the step size.  The lower value of the
 contour finder will be continually multiplied by the step size.
 
 .. code:: python
@@ -71,9 +71,9 @@
    step = 2.0
    find_clumps(master_clump, c_min, c_max, step)
 
-After the clump finding has finished, the master clump will represent the top 
-of a hierarchy of clumps.  The ``children`` attribute within a ``Clump`` object 
-contains a list of all sub-clumps.  Each sub-clump is also a ``Clump`` object 
+After the clump finding has finished, the master clump will represent the top
+of a hierarchy of clumps.  The ``children`` attribute within a ``Clump`` object
+contains a list of all sub-clumps.  Each sub-clump is also a ``Clump`` object
 with its own ``children`` attribute, and so on.
 
 A number of helper routines exist for examining the clump hierarchy.
@@ -96,15 +96,15 @@
    print(leaf_clumps[0]["gas", "density"])
    print(leaf_clumps[0].quantities.total_mass())
 
-The writing functions will write out a series or properties about each 
-clump by default.  Additional properties can be appended with the 
+The writing functions will write out a series or properties about each
+clump by default.  Additional properties can be appended with the
 ``Clump.add_info_item`` function.
 
 .. code:: python
 
    master_clump.add_info_item("total_cells")
 
-Just like the validators, custom info items can be added by defining functions 
+Just like the validators, custom info items can be added by defining functions
 that minimally accept a ``Clump`` object and return a string to be printed.
 
 .. code:: python
@@ -121,16 +121,16 @@
 
    master_clump.add_info_item("mass_weighted_jeans_mass")
 
-By default, the following info items are activated: **total_cells**, 
-**cell_mass**, **mass_weighted_jeans_mass**, **volume_weighted_jeans_mass**, 
-**max_grid_level**, **min_number_density**, **max_number_density**, and 
+By default, the following info items are activated: **total_cells**,
+**cell_mass**, **mass_weighted_jeans_mass**, **volume_weighted_jeans_mass**,
+**max_grid_level**, **min_number_density**, **max_number_density**, and
 **distance_to_main_clump**.
 
 Clumps can be visualized using the ``annotate_clumps`` callback.
 
 .. code:: python
 
-   prj = yt.ProjectionPlot(ds, 2, ("gas", "density"), 
+   prj = yt.ProjectionPlot(ds, 2, ("gas", "density"),
                            center='c', width=(20,'kpc'))
    prj.annotate_clumps(leaf_clumps)
    prj.save('clumps')

diff -r 86db031f1f56a6f396d4440188707f1fdb8e6bfa -r 1ad674e40250ed9c2d21e51b5f0d3263d5150522 doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst
--- a/doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst
+++ b/doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst
@@ -91,7 +91,7 @@
 The center of mass would be the same one as returned by the halo
 finder.  The A, B, C are the largest to smallest magnitude of the
 ellipsoid's semi-principle axes. "e0" is the largest semi-principle
-axis vector direction that would have magnitude A but normalized.  
+axis vector direction that would have magnitude A but normalized.
 The "tilt" is an angle measured in radians.  It can be best described
 as after the rotation about the z-axis to align e0 to x in the x-y
 plane, and then rotating about the y-axis to align e0 completely to
@@ -128,7 +128,7 @@
 Since this is a first attempt, there are many drawbacks and corners
 cut.  Many things listed here will be amended when I have time.
 
-* The ellipsoid 3D container like the boolean object, do not contain 
+* The ellipsoid 3D container like the boolean object, do not contain
   particle position and velocity information.
 * This currently assume periodic boundary condition, so if an
   ellipsoid center is at the edge, it will return part of the opposite
@@ -136,7 +136,7 @@
   periodicity in the future.
 * This method gives a minimalistic ellipsoid centered around the
   center of mass that contains all the particles, but sometimes people
-  prefer an inertial tensor triaxial ellipsoid described in 
+  prefer an inertial tensor triaxial ellipsoid described in
   `Dubinski, Carlberg 1991
   <http://adsabs.harvard.edu/abs/1991ApJ...378..496D>`_.  I have that
   method composed but it is not fully tested yet.

diff -r 86db031f1f56a6f396d4440188707f1fdb8e6bfa -r 1ad674e40250ed9c2d21e51b5f0d3263d5150522 doc/source/analyzing/analysis_modules/halo_catalogs.rst
--- a/doc/source/analyzing/analysis_modules/halo_catalogs.rst
+++ b/doc/source/analyzing/analysis_modules/halo_catalogs.rst
@@ -7,21 +7,21 @@
 ----------------------
 
 In yt 3.0, operations relating to the analysis of halos (halo finding,
-merger tree creation, and individual halo analysis) are all brought 
+merger tree creation, and individual halo analysis) are all brought
 together into a single framework. This framework is substantially
-different from the halo analysis machinery available in yt-2.x and is 
-entirely backward incompatible.  
+different from the halo analysis machinery available in yt-2.x and is
+entirely backward incompatible.
 For a direct translation of various halo analysis tasks using yt-2.x
 to yt-3.0 please see :ref:`halo-transition`.
 
-A catalog of halos can be created from any initial dataset given to halo 
+A catalog of halos can be created from any initial dataset given to halo
 catalog through data_ds. These halos can be found using friends-of-friends,
 HOP, and Rockstar. The finder_method keyword dictates which halo finder to
-use. The available arguments are :ref:`fof`, :ref:`hop`, and :ref:`rockstar`. 
-For more details on the relative differences between these halo finders see 
+use. The available arguments are :ref:`fof`, :ref:`hop`, and :ref:`rockstar`.
+For more details on the relative differences between these halo finders see
 :ref:`halo_finding`.
 
-The class which holds all of the halo information is the 
+The class which holds all of the halo information is the
 :class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog`.
 
 .. code-block:: python
@@ -32,11 +32,11 @@
    data_ds = yt.load('Enzo_64/RD0006/RedshiftOutput0006')
    hc = HaloCatalog(data_ds=data_ds, finder_method='hop')
 
-A halo catalog may also be created from already run rockstar outputs. 
-This method is not implemented for previously run friends-of-friends or 
-HOP finders. Even though rockstar creates one file per processor, 
-specifying any one file allows the full catalog to be loaded. Here we 
-only specify the file output by the processor with ID 0. Note that the 
+A halo catalog may also be created from already run rockstar outputs.
+This method is not implemented for previously run friends-of-friends or
+HOP finders. Even though rockstar creates one file per processor,
+specifying any one file allows the full catalog to be loaded. Here we
+only specify the file output by the processor with ID 0. Note that the
 argument for supplying a rockstar output is `halos_ds`, not `data_ds`.
 
 .. code-block:: python
@@ -44,10 +44,10 @@
    halos_ds = yt.load(path+'rockstar_halos/halos_0.0.bin')
    hc = HaloCatalog(halos_ds=halos_ds)
 
-Although supplying only the binary output of the rockstar halo finder 
-is sufficient for creating a halo catalog, it is not possible to find 
-any new information about the identified halos. To associate the halos 
-with the dataset from which they were found, supply arguments to both 
+Although supplying only the binary output of the rockstar halo finder
+is sufficient for creating a halo catalog, it is not possible to find
+any new information about the identified halos. To associate the halos
+with the dataset from which they were found, supply arguments to both
 halos_ds and data_ds.
 
 .. code-block:: python
@@ -56,14 +56,14 @@
    data_ds = yt.load('Enzo_64/RD0006/RedshiftOutput0006')
    hc = HaloCatalog(data_ds=data_ds, halos_ds=halos_ds)
 
-A data object can also be supplied via the keyword ``data_source``, 
-associated with either dataset, to control the spatial region in 
+A data object can also be supplied via the keyword ``data_source``,
+associated with either dataset, to control the spatial region in
 which halo analysis will be performed.
 
 Analysis Using Halo Catalogs
 ----------------------------
 
-Analysis is done by adding actions to the 
+Analysis is done by adding actions to the
 :class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog`.
 Each action is represented by a callback function that will be run on
 each halo.  There are four types of actions:
@@ -73,18 +73,18 @@
 * Callbacks
 * Recipes
 
-A list of all available filters, quantities, and callbacks can be found in 
-:ref:`halo_analysis_ref`.  
-All interaction with this analysis can be performed by importing from 
+A list of all available filters, quantities, and callbacks can be found in
+:ref:`halo_analysis_ref`.
+All interaction with this analysis can be performed by importing from
 halo_analysis.
 
 Filters
 ^^^^^^^
 
-A filter is a function that returns True or False. If the return value 
-is True, any further queued analysis will proceed and the halo in 
-question will be added to the final catalog. If the return value False, 
-further analysis will not be performed and the halo will not be included 
+A filter is a function that returns True or False. If the return value
+is True, any further queued analysis will proceed and the halo in
+question will be added to the final catalog. If the return value False,
+further analysis will not be performed and the halo will not be included
 in the final catalog.
 
 An example of adding a filter:
@@ -93,11 +93,11 @@
 
    hc.add_filter('quantity_value', 'particle_mass', '>', 1E13, 'Msun')
 
-Currently quantity_value is the only available filter, but more can be 
-added by the user by defining a function that accepts a halo object as 
-the first argument and then adding it as an available filter. If you 
-think that your filter may be of use to the general community, you can 
-add it to ``yt/analysis_modules/halo_analysis/halo_filters.py`` and issue a 
+Currently quantity_value is the only available filter, but more can be
+added by the user by defining a function that accepts a halo object as
+the first argument and then adding it as an available filter. If you
+think that your filter may be of use to the general community, you can
+add it to ``yt/analysis_modules/halo_analysis/halo_filters.py`` and issue a
 pull request.
 
 An example of defining your own filter:
@@ -105,11 +105,11 @@
 .. code-block:: python
 
    def my_filter_function(halo):
-       
+
        # Define condition for filter
        filter_value = True
-       
-       # Return a boolean value 
+
+       # Return a boolean value
        return filter_value
 
    # Add your filter to the filter registry
@@ -121,17 +121,17 @@
 Quantities
 ^^^^^^^^^^
 
-A quantity is a call back that returns a value or values. The return values 
-are stored within the halo object in a dictionary called “quantities.” At 
-the end of the analysis, all of these quantities will be written to disk as 
+A quantity is a call back that returns a value or values. The return values
+are stored within the halo object in a dictionary called “quantities.” At
+the end of the analysis, all of these quantities will be written to disk as
 the final form of the generated halo catalog.
 
-Quantities may be available in the initial fields found in the halo catalog, 
-or calculated from a function after supplying a definition. An example 
-definition of center of mass is shown below. Currently available quantities 
-are center_of_mass and bulk_velocity. Their definitions are available in 
-``yt/analysis_modules/halo_analysis/halo_quantities.py``. If you think that 
-your quantity may be of use to the general community, add it to 
+Quantities may be available in the initial fields found in the halo catalog,
+or calculated from a function after supplying a definition. An example
+definition of center of mass is shown below. Currently available quantities
+are center_of_mass and bulk_velocity. Their definitions are available in
+``yt/analysis_modules/halo_analysis/halo_quantities.py``. If you think that
+your quantity may be of use to the general community, add it to
 ``halo_quantities.py`` and issue a pull request.  Default halo quantities are:
 
 * ``particle_identifier`` -- Halo ID (e.g. 0 to N)
@@ -154,7 +154,7 @@
    def my_quantity_function(halo):
        # Define quantity to return
        quantity = 5
-       
+
        return quantity
 
    # Add your filter to the filter registry
@@ -162,9 +162,9 @@
 
 
    # ... Later on in your script
-   hc.add_quantity("my_quantity") 
+   hc.add_quantity("my_quantity")
 
-This quantity will then be accessible for functions called later via the 
+This quantity will then be accessible for functions called later via the
 *quantities* dictionary that is associated with the halo object.
 
 .. code-block:: python
@@ -179,23 +179,23 @@
 Callbacks
 ^^^^^^^^^
 
-A callback is actually the super class for quantities and filters and 
-is a general purpose function that does something, anything, to a Halo 
-object. This can include hanging new attributes off the Halo object, 
-performing analysis and writing to disk, etc. A callback does not return 
+A callback is actually the super class for quantities and filters and
+is a general purpose function that does something, anything, to a Halo
+object. This can include hanging new attributes off the Halo object,
+performing analysis and writing to disk, etc. A callback does not return
 anything.
 
-An example of using a pre-defined callback where we create a sphere for 
+An example of using a pre-defined callback where we create a sphere for
 each halo with a radius that is twice the saved ``radius``.
 
 .. code-block:: python
 
    hc.add_callback("sphere", factor=2.0)
-    
-Currently available callbacks are located in 
-``yt/analysis_modules/halo_analysis/halo_callbacks.py``.  New callbacks may 
-be added by using the syntax shown below. If you think that your 
-callback may be of use to the general community, add it to 
+
+Currently available callbacks are located in
+``yt/analysis_modules/halo_analysis/halo_callbacks.py``.  New callbacks may
+be added by using the syntax shown below. If you think that your
+callback may be of use to the general community, add it to
 halo_callbacks.py and issue a pull request.
 
 An example of defining your own callback:
@@ -261,37 +261,37 @@
 Running Analysis
 ----------------
 
-After all callbacks, quantities, and filters have been added, the 
+After all callbacks, quantities, and filters have been added, the
 analysis begins with a call to HaloCatalog.create.
 
 .. code-block:: python
 
    hc.create()
 
-The save_halos keyword determines whether the actual Halo objects 
-are saved after analysis on them has completed or whether just the 
-contents of their quantities dicts will be retained for creating the 
-final catalog. The looping over halos uses a call to parallel_objects 
-allowing the user to control how many processors work on each halo. 
-The final catalog is written to disk in the output directory given 
-when the 
-:class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog` 
+The save_halos keyword determines whether the actual Halo objects
+are saved after analysis on them has completed or whether just the
+contents of their quantities dicts will be retained for creating the
+final catalog. The looping over halos uses a call to parallel_objects
+allowing the user to control how many processors work on each halo.
+The final catalog is written to disk in the output directory given
+when the
+:class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog`
 object was created.
 
-All callbacks, quantities, and filters are stored in an actions list, 
-meaning that they are executed in the same order in which they were added. 
-This enables the use of simple, reusable, single action callbacks that 
-depend on each other. This also prevents unnecessary computation by allowing 
-the user to add filters at multiple stages to skip remaining analysis if it 
+All callbacks, quantities, and filters are stored in an actions list,
+meaning that they are executed in the same order in which they were added.
+This enables the use of simple, reusable, single action callbacks that
+depend on each other. This also prevents unnecessary computation by allowing
+the user to add filters at multiple stages to skip remaining analysis if it
 is not warranted.
 
 Saving and Reloading Halo Catalogs
 ----------------------------------
 
-A :class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog` 
-saved to disk can be reloaded as a yt dataset with the 
-standard call to load. Any side data, such as profiles, can be reloaded 
-with a ``load_profiles`` callback and a call to 
+A :class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog`
+saved to disk can be reloaded as a yt dataset with the
+standard call to load. Any side data, such as profiles, can be reloaded
+with a ``load_profiles`` callback and a call to
 :func:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog.load`.
 
 .. code-block:: python
@@ -306,5 +306,5 @@
 Worked Example of Halo Catalog in Action
 ----------------------------------------
 
-For a full example of how to use these methods together see 
+For a full example of how to use these methods together see
 :ref:`halo-analysis-example`.

diff -r 86db031f1f56a6f396d4440188707f1fdb8e6bfa -r 1ad674e40250ed9c2d21e51b5f0d3263d5150522 doc/source/analyzing/analysis_modules/halo_finders.rst
--- a/doc/source/analyzing/analysis_modules/halo_finders.rst
+++ b/doc/source/analyzing/analysis_modules/halo_finders.rst
@@ -3,16 +3,16 @@
 Halo Finding
 ============
 
-There are three methods of finding particle haloes in yt. The 
-default method is called HOP, a method described 
-in `Eisenstein and Hut (1998) 
-<http://adsabs.harvard.edu/abs/1998ApJ...498..137E>`_. A basic 
-friends-of-friends (e.g. `Efstathiou et al. (1985) 
-<http://adsabs.harvard.edu/abs/1985ApJS...57..241E>`_) halo 
-finder is also implemented. Finally Rockstar (`Behroozi et a. 
-(2011) <http://adsabs.harvard.edu/abs/2011arXiv1110.4372B>`_) is 
-a 6D-phase space halo finder developed by Peter Behroozi that 
-excels in finding subhalos and substrcture, but does not allow 
+There are three methods of finding particle haloes in yt. The
+default method is called HOP, a method described
+in `Eisenstein and Hut (1998)
+<http://adsabs.harvard.edu/abs/1998ApJ...498..137E>`_. A basic
+friends-of-friends (e.g. `Efstathiou et al. (1985)
+<http://adsabs.harvard.edu/abs/1985ApJS...57..241E>`_) halo
+finder is also implemented. Finally Rockstar (`Behroozi et a.
+(2011) <http://adsabs.harvard.edu/abs/2011arXiv1110.4372B>`_) is
+a 6D-phase space halo finder developed by Peter Behroozi that
+excels in finding subhalos and substrcture, but does not allow
 multiple particle masses.
 
 .. _hop:
@@ -20,30 +20,30 @@
 HOP
 ---
 
-The version of HOP used in yt is an upgraded version of the 
-`publicly available HOP code 
-<http://cmb.as.arizona.edu/~eisenste/hop/hop.html>`_. Support 
-for 64-bit floats and integers has been added, as well as 
-parallel analysis through spatial decomposition. HOP builds 
+The version of HOP used in yt is an upgraded version of the
+`publicly available HOP code
+<http://cmb.as.arizona.edu/~eisenste/hop/hop.html>`_. Support
+for 64-bit floats and integers has been added, as well as
+parallel analysis through spatial decomposition. HOP builds
 groups in this fashion:
 
-#. Estimates the local density at each particle using a 
+#. Estimates the local density at each particle using a
    smoothing kernel.
 
-#. Builds chains of linked particles by 'hopping' from one 
-   particle to its densest neighbor. A particle which is 
+#. Builds chains of linked particles by 'hopping' from one
+   particle to its densest neighbor. A particle which is
    its own densest neighbor is the end of the chain.
 
-#. All chains that share the same densest particle are 
+#. All chains that share the same densest particle are
    grouped together.
 
-#. Groups are included, linked together, or discarded 
+#. Groups are included, linked together, or discarded
    depending on the user-supplied over density
    threshold parameter. The default is 160.0.
 
-Please see the `HOP method paper 
-<http://adsabs.harvard.edu/abs/1998ApJ...498..137E>`_ for 
-full details and the 
+Please see the `HOP method paper
+<http://adsabs.harvard.edu/abs/1998ApJ...498..137E>`_ for
+full details and the
 :class:`~yt.analysis_modules.halo_finding.halo_objects.HOPHalo` and
 :class:`~yt.analysis_modules.halo_finding.halo_objects.Halo` classes.
 
@@ -61,28 +61,28 @@
 Rockstar Halo Finding
 ---------------------
 
-Rockstar uses an adaptive hierarchical refinement of friends-of-friends 
-groups in six phase-space dimensions and one time dimension, which 
+Rockstar uses an adaptive hierarchical refinement of friends-of-friends
+groups in six phase-space dimensions and one time dimension, which
 allows for robust (grid-independent, shape-independent, and noise-
-resilient) tracking of substructure. The code is prepackaged with yt, 
-but also `separately available <https://bitbucket.org/gfcstanford/rockstar>`_. The lead 
+resilient) tracking of substructure. The code is prepackaged with yt,
+but also `separately available <https://bitbucket.org/gfcstanford/rockstar>`_. The lead
 developer is Peter Behroozi, and the methods are described in `Behroozi
-et al. 2011 <http://arxiv.org/abs/1110.4372>`_. 
-In order to run the Rockstar halo finder in yt, make sure you've 
+et al. 2011 <http://arxiv.org/abs/1110.4372>`_.
+In order to run the Rockstar halo finder in yt, make sure you've
 :ref:`installed it so that it can integrate with yt <rockstar-installation>`.
 
-At the moment, Rockstar does not support multiple particle masses, 
-instead using a fixed particle mass. This will not affect most dark matter 
+At the moment, Rockstar does not support multiple particle masses,
+instead using a fixed particle mass. This will not affect most dark matter
 simulations, but does make it less useful for finding halos from the stellar
-mass. In simulations where the highest-resolution particles all have the 
+mass. In simulations where the highest-resolution particles all have the
 same mass (ie: zoom-in grid based simulations), one can set up a particle
 filter to select the lowest mass particles and perform the halo finding
-only on those.  See the this cookbook recipe for an example: 
+only on those.  See the this cookbook recipe for an example:
 :ref:`cookbook-rockstar-nested-grid`.
 
-To run the Rockstar Halo finding, you must launch python with MPI and 
-parallelization enabled. While Rockstar itself does not require MPI to run, 
-the MPI libraries allow yt to distribute particle information across multiple 
+To run the Rockstar Halo finding, you must launch python with MPI and
+parallelization enabled. While Rockstar itself does not require MPI to run,
+the MPI libraries allow yt to distribute particle information across multiple
 nodes.
 
 .. warning:: At the moment, running Rockstar inside of yt on multiple compute nodes
@@ -92,23 +92,23 @@
    For example, here is how Rockstar might be called using 24 cores:
    ``mpirun -n 24 --mca btl ^openib python ./run_rockstar.py --parallel``.
 
-The script above configures the Halo finder, launches a server process which 
-disseminates run information and coordinates writer-reader processes. 
-Afterwards, it launches reader and writer tasks, filling the available MPI 
-slots, which alternately read particle information and analyze for halo 
+The script above configures the Halo finder, launches a server process which
+disseminates run information and coordinates writer-reader processes.
+Afterwards, it launches reader and writer tasks, filling the available MPI
+slots, which alternately read particle information and analyze for halo
 content.
 
-The RockstarHaloFinder class has these options that can be supplied to the 
+The RockstarHaloFinder class has these options that can be supplied to the
 halo catalog through the ``finder_kwargs`` argument:
 
-* ``dm_type``, the index of the dark matter particle. Default is 1. 
+* ``dm_type``, the index of the dark matter particle. Default is 1.
 * ``outbase``, This is where the out*list files that Rockstar makes should be
   placed. Default is 'rockstar_halos'.
-* ``num_readers``, the number of reader tasks (which are idle most of the 
+* ``num_readers``, the number of reader tasks (which are idle most of the
   time.) Default is 1.
 * ``num_writers``, the number of writer tasks (which are fed particles and
-  do most of the analysis). Default is MPI_TASKS-num_readers-1. 
-  If left undefined, the above options are automatically 
+  do most of the analysis). Default is MPI_TASKS-num_readers-1.
+  If left undefined, the above options are automatically
   configured from the number of available MPI tasks.
 * ``force_res``, the resolution that Rockstar uses for various calculations
   and smoothing lengths. This is in units of Mpc/h.
@@ -130,14 +130,14 @@
   this option can save disk access time if there are no star particles
   (or other non-dark matter particles) in the simulation. Default: ``False``.
 
-Rockstar dumps halo information in a series of text (halo*list and 
-out*list) and binary (halo*bin) files inside the ``outbase`` directory. 
-We use the halo list classes to recover the information. 
+Rockstar dumps halo information in a series of text (halo*list and
+out*list) and binary (halo*bin) files inside the ``outbase`` directory.
+We use the halo list classes to recover the information.
 
 Inside the ``outbase`` directory there is a text file named ``datasets.txt``
 that records the connection between ds names and the Rockstar file names.
 
-For more information, see the 
+For more information, see the
 :class:`~yt.analysis_modules.halo_finding.halo_objects.RockstarHalo` and
 :class:`~yt.analysis_modules.halo_finding.halo_objects.Halo` classes.
 
@@ -146,9 +146,9 @@
 Parallel HOP and FOF
 --------------------
 
-Both the HOP and FoF halo finders can run in parallel using simple 
-spatial decomposition. In order to run them in parallel it is helpful 
-to understand how it works. Below in the first plot (i) is a simplified 
+Both the HOP and FoF halo finders can run in parallel using simple
+spatial decomposition. In order to run them in parallel it is helpful
+to understand how it works. Below in the first plot (i) is a simplified
 depiction of three haloes labeled 1,2 and 3:
 
 .. image:: _images/ParallelHaloFinder.png
@@ -156,35 +156,35 @@
 
 Halo 3 is twice reflected around the periodic boundary conditions.
 
-In (ii), the volume has been sub-divided into four equal subregions, 
-A,B,C and D, shown with dotted lines. Notice that halo 2 is now in 
-two different subregions, C and D, and that halo 3 is now in three, 
+In (ii), the volume has been sub-divided into four equal subregions,
+A,B,C and D, shown with dotted lines. Notice that halo 2 is now in
+two different subregions, C and D, and that halo 3 is now in three,
 A, B and D. If the halo finder is run on these four separate subregions,
-halo 1 is be identified as a single halo, but haloes 2 and 3 are split 
-up into multiple haloes, which is incorrect. The solution is to give 
+halo 1 is be identified as a single halo, but haloes 2 and 3 are split
+up into multiple haloes, which is incorrect. The solution is to give
 each subregion padding to oversample into neighboring regions.
 
-In (iii), subregion C has oversampled into the other three regions, 
-with the periodic boundary conditions taken into account, shown by 
+In (iii), subregion C has oversampled into the other three regions,
+with the periodic boundary conditions taken into account, shown by
 dot-dashed lines. The other subregions oversample in a similar way.
 
-The halo finder is then run on each padded subregion independently 
-and simultaneously. By oversampling like this, haloes 2 and 3 will 
-both be enclosed fully in at least one subregion and identified 
+The halo finder is then run on each padded subregion independently
+and simultaneously. By oversampling like this, haloes 2 and 3 will
+both be enclosed fully in at least one subregion and identified
 completely.
 
-Haloes identified with centers of mass inside the padded part of a 
-subregion are thrown out, eliminating the problem of halo duplication. 
+Haloes identified with centers of mass inside the padded part of a
+subregion are thrown out, eliminating the problem of halo duplication.
 The centers for the three haloes are shown with stars. Halo 1 will
 belong to subregion A, 2 to C and 3 to B.
 
-To run with parallel halo finding, you must supply a value for 
-padding in the finder_kwargs argument. The ``padding`` parameter 
-is in simulation units and defaults to 0.02. This parameter is how 
-much padding is added to each of the six sides of a subregion. 
-This value should be 2x-3x larger than the largest expected halo 
-in the simulation. It is unlikely, of course, that the largest 
-object in the simulation will be on a subregion boundary, but there 
+To run with parallel halo finding, you must supply a value for
+padding in the finder_kwargs argument. The ``padding`` parameter
+is in simulation units and defaults to 0.02. This parameter is how
+much padding is added to each of the six sides of a subregion.
+This value should be 2x-3x larger than the largest expected halo
+in the simulation. It is unlikely, of course, that the largest
+object in the simulation will be on a subregion boundary, but there
 is no way of knowing before the halo finder is run.
 
 .. code-block:: python
@@ -197,10 +197,10 @@
   # --or--
   hc = HaloCatalog(data_ds = ds, finder_method = 'fof', finder_kwargs={'padding':0.02})
 
-In general, a little bit of padding goes a long way, and too much 
-just slows down the analysis and doesn't improve the answer (but 
-doesn't change it).  It may be worth your time to run the parallel 
-halo finder at a few paddings to find the right amount, especially 
+In general, a little bit of padding goes a long way, and too much
+just slows down the analysis and doesn't improve the answer (but
+doesn't change it).  It may be worth your time to run the parallel
+halo finder at a few paddings to find the right amount, especially
 if you're analyzing many similar datasets.
 
 .. _rockstar-installation:
@@ -209,15 +209,15 @@
 ---------------------
 
 Because of changes in the Rockstar API over time, yt only currently works with
-a slightly older version of Rockstar.  This version of Rockstar has been 
-slightly patched and modified to run as a library inside of yt. By default it 
-is not installed with yt, but installation is very easy.  The 
-:ref:`install-script` used to install yt from source has a line: 
+a slightly older version of Rockstar.  This version of Rockstar has been
+slightly patched and modified to run as a library inside of yt. By default it
+is not installed with yt, but installation is very easy.  The
+:ref:`install-script` used to install yt from source has a line:
 ``INST_ROCKSTAR=0`` that must be changed to ``INST_ROCKSTAR=1``.  You can
 rerun this installer script over the top of an existing installation, and
-it will only install components missing from the existing installation.  
+it will only install components missing from the existing installation.
 You can do this as follows.  Put your freshly modified install_script in
-the parent directory of the yt installation directory (e.g. the parent of 
+the parent directory of the yt installation directory (e.g. the parent of
 ``$YT_DEST``, ``yt-x86_64``, ``yt-i386``, etc.), and rerun the installer:
 
 .. code-block:: bash

diff -r 86db031f1f56a6f396d4440188707f1fdb8e6bfa -r 1ad674e40250ed9c2d21e51b5f0d3263d5150522 doc/source/analyzing/analysis_modules/halo_mass_function.rst
--- a/doc/source/analyzing/analysis_modules/halo_mass_function.rst
+++ b/doc/source/analyzing/analysis_modules/halo_mass_function.rst
@@ -11,21 +11,21 @@
 General Overview
 ----------------
 
-A halo mass function can be created for the halos identified in a cosmological 
+A halo mass function can be created for the halos identified in a cosmological
 simulation, as well as analytic fits using any arbitrary set of cosmological
 parameters. In order to create a mass function for simulated halos, they must
-first be identified (using HOP, FOF, or Rockstar, see 
+first be identified (using HOP, FOF, or Rockstar, see
 :ref:`halo_catalog`) and loaded as a halo dataset object. The distribution of
 halo masses will then be found, and can be compared to the analytic prediction
 at the same redshift and using the same cosmological parameters as were used
 in the simulation. Care should be taken in this regard, as the analytic fit
-requires the specification of cosmological parameters that are not necessarily 
+requires the specification of cosmological parameters that are not necessarily
 stored in the halo or simulation datasets, and must be specified by the user.
-Efforts have been made to set reasonable defaults for these parameters, but 
+Efforts have been made to set reasonable defaults for these parameters, but
 setting them to identically match those used in the simulation will produce a
 much better comparison.
 
-Analytic halo mass functions can also be created without a halo dataset by 
+Analytic halo mass functions can also be created without a halo dataset by
 providing either a simulation dataset or specifying cosmological parameters by
 hand. yt includes 5 analytic fits for the halo mass function which can be
 selected.
@@ -65,8 +65,8 @@
 
 This will create a HaloMassFcn object off of which arrays holding the information
 about the analytic mass function hang. Creating the halo mass function for a set
-of simulated halos requires only the loaded halo dataset to be passed as an 
-argument. This also creates the analytic mass function using all parameters that 
+of simulated halos requires only the loaded halo dataset to be passed as an
+argument. This also creates the analytic mass function using all parameters that
 can be extracted from the halo dataset, at the same redshift, spanning a similar
 range of halo masses.
 
@@ -78,7 +78,7 @@
   my_halos = load("rockstar_halos/halos_0.0.bin")
   hmf = HaloMassFcn(halos_ds=my_halos)
 
-A simulation dataset can be passed along with additional cosmological parameters 
+A simulation dataset can be passed along with additional cosmological parameters
 to create an analytic mass function.
 
 .. code-block:: python
@@ -87,10 +87,10 @@
   from yt.analysis_modules.halo_mass_function.api import *
 
   my_ds = load("RD0027/RedshiftOutput0027")
-  hmf = HaloMassFcn(simulation_ds=my_ds, omega_baryon0=0.05, primordial_index=0.96, 
+  hmf = HaloMassFcn(simulation_ds=my_ds, omega_baryon0=0.05, primordial_index=0.96,
                     sigma8 = 0.8, log_mass_min=5, log_mass_max=9)
 
-The analytic mass function can be created for a set of arbitrary cosmological 
+The analytic mass function can be created for a set of arbitrary cosmological
 parameters without any dataset being passed as an argument.
 
 .. code-block:: python
@@ -98,7 +98,7 @@
   from yt.mods import *
   from yt.analysis_modules.halo_mass_function.api import *
 
-  hmf = HaloMassFcn(omega_baryon0=0.05, omega_matter0=0.27, 
+  hmf = HaloMassFcn(omega_baryon0=0.05, omega_matter0=0.27,
                     omega_lambda0=0.73, hubble0=0.7, this_redshift=10,
                     log_mass_min=5, log_mass_max=9, fitting_function=5)
 
@@ -110,95 +110,95 @@
   Default : None.
 
 * **halos_ds** (*Halo dataset object*)
-  The halos from a simulation to be used for creation of the 
+  The halos from a simulation to be used for creation of the
   halo mass function in the simulation.
   Default : None.
 
 * **make_analytic** (*bool*)
-  Whether or not to calculate the analytic mass function to go with 
-  the simulated halo mass function.  Automatically set to true if a 
+  Whether or not to calculate the analytic mass function to go with
+  the simulated halo mass function.  Automatically set to true if a
   simulation dataset is provided.
   Default : True.
 
 * **omega_matter0** (*float*)
-  The fraction of the universe made up of matter (dark and baryonic). 
+  The fraction of the universe made up of matter (dark and baryonic).
   Default : 0.2726.
 
 * **omega_lambda0** (*float*)
-  The fraction of the universe made up of dark energy. 
+  The fraction of the universe made up of dark energy.
   Default : 0.7274.
 
 * **omega_baryon0**  (*float*)
-  The fraction of the universe made up of baryonic matter. This is not 
+  The fraction of the universe made up of baryonic matter. This is not
   always stored in the dataset and should be checked by hand.
   Default : 0.0456.
 
 * **hubble0** (*float*)
-  The expansion rate of the universe in units of 100 km/s/Mpc. 
+  The expansion rate of the universe in units of 100 km/s/Mpc.
   Default : 0.704.
 
 * **sigma8** (*float*)
-  The amplitude of the linear power spectrum at z=0 as specified by 
-  the rms amplitude of mass-fluctuations in a top-hat sphere of radius 
-  8 Mpc/h. This is not always stored in the dataset and should be 
+  The amplitude of the linear power spectrum at z=0 as specified by
+  the rms amplitude of mass-fluctuations in a top-hat sphere of radius
+  8 Mpc/h. This is not always stored in the dataset and should be
   checked by hand.
   Default : 0.86.
 
 * **primoridal_index** (*float*)
-  This is the index of the mass power spectrum before modification by 
-  the transfer function. A value of 1 corresponds to the scale-free 
-  primordial spectrum. This is not always stored in the dataset and 
+  This is the index of the mass power spectrum before modification by
+  the transfer function. A value of 1 corresponds to the scale-free
+  primordial spectrum. This is not always stored in the dataset and
   should be checked by hand.
   Default : 1.0.
 
 * **this_redshift** (*float*)
-  The current redshift. 
+  The current redshift.
   Default : 0.
 
 * **log_mass_min** (*float*)
   The log10 of the mass of the minimum of the halo mass range. This is
-  set automatically by the range of halo masses if a simulated halo 
+  set automatically by the range of halo masses if a simulated halo
   dataset is provided. If a halo dataset if not provided and no value
   is specified, it will be set to 5. Units: M_solar
   Default : None.
 
 * **log_mass_max** (*float*)
   The log10 of the mass of the maximum of the halo mass range. This is
-  set automatically by the range of halo masses if a simulated halo 
+  set automatically by the range of halo masses if a simulated halo
   dataset is provided. If a halo dataset if not provided and no value
   is specified, it will be set to 16. Units: M_solar
   Default : None.
 
 * **num_sigma_bins** (*float*)
-  The number of bins (points) to use for the calculation of the 
-  analytic mass function. 
+  The number of bins (points) to use for the calculation of the
+  analytic mass function.
   Default : 360.
 
 * **fitting_function** (*int*)
-  Which fitting function to use. 1 = Press-Schechter, 2 = Jenkins, 
+  Which fitting function to use. 1 = Press-Schechter, 2 = Jenkins,
   3 = Sheth-Tormen, 4 = Warren, 5 = Tinker
   Default : 4.
 
 Outputs
 -------
 
-A HaloMassFnc object has several arrays hanging off of it containing the 
+A HaloMassFnc object has several arrays hanging off of it containing the
 
 * **masses_sim**: Halo masses from simulated halos. Units: M_solar
 
-* **n_cumulative_sim**: Number density of halos with mass greater than the 
+* **n_cumulative_sim**: Number density of halos with mass greater than the
   corresponding mass in masses_sim. Units: comoving Mpc^-3
 
-* **masses_analytic**: Masses used for the generation of the analytic mass 
+* **masses_analytic**: Masses used for the generation of the analytic mass
   function. Units: M_solar
 
-* **n_cumulative_analytic**: Number density of halos with mass greater then 
+* **n_cumulative_analytic**: Number density of halos with mass greater then
   the corresponding mass in masses_analytic. Units: comoving Mpc^-3
 
 * **dndM_dM_analytic**: Differential number density of halos, (dn/dM)*dM.
 
 After the mass function has been created for both simulated halos and the
-corresponding analytic fits, they can be plotted though something along the 
+corresponding analytic fits, they can be plotted though something along the
 lines of
 
 .. code-block:: python
@@ -213,7 +213,7 @@
   plt.loglog(hmf.masses_sim, hmf.n_cumulative_sim)
   plt.loglog(hmf.masses_analytic, hmf.n_cumulative_analytic)
 
-Attached to ``hmf`` is the convenience function ``write_out``, which saves the 
+Attached to ``hmf`` is the convenience function ``write_out``, which saves the
 halo mass function to a text file. (continued from above)
 .. code-block:: python
 

diff -r 86db031f1f56a6f396d4440188707f1fdb8e6bfa -r 1ad674e40250ed9c2d21e51b5f0d3263d5150522 doc/source/analyzing/analysis_modules/halo_transition.rst
--- a/doc/source/analyzing/analysis_modules/halo_transition.rst
+++ b/doc/source/analyzing/analysis_modules/halo_transition.rst
@@ -5,20 +5,20 @@
 
 If you're used to halo analysis in yt-2.x, heres a guide to
 how to update your analysis pipeline to take advantage of
-the new halo catalog infrastructure. 
+the new halo catalog infrastructure.
 
 Finding Halos
 -------------
 
-Previously, halos were found using calls to ``HaloFinder``, 
-``FOFHaloFinder`` and ``RockstarHaloFinder``. Now it is 
-encouraged that you find the halos upon creation of the halo catalog 
+Previously, halos were found using calls to ``HaloFinder``,
+``FOFHaloFinder`` and ``RockstarHaloFinder``. Now it is
+encouraged that you find the halos upon creation of the halo catalog
 by supplying a value to the ``finder_method`` keyword when calling
-``HaloCatalog``. Currently, only halos found using rockstar or a 
-previous instance of a halo catalog are able to be loaded 
+``HaloCatalog``. Currently, only halos found using rockstar or a
+previous instance of a halo catalog are able to be loaded
 using the ``halos_ds`` keyword.
 
-To pass additional arguments to the halo finders 
+To pass additional arguments to the halo finders
 themselves, supply a dictionary to ``finder_kwargs`` where
 each key in the dictionary is a keyword of the halo finder
 and the corresponding value is the value to be passed for
@@ -41,7 +41,7 @@
 how to add these quantities and what quantities are available.
 
 You no longer have to iterate over halos in the ``halo_list``.
-Now a halo dataset can be treated as a regular dataset and 
+Now a halo dataset can be treated as a regular dataset and
 all quantities are available by accessing ``all_data``.
 Specifically, all quantities can be accessed as shown:
 
@@ -77,14 +77,14 @@
 
 The halo profiler available in yt-2.x has been removed, and
 profiling functionality is now completely contained within the
-halo catalog. A complete example of how to profile halos by 
-radius using the new infrastructure is given in 
-:ref:`halo-analysis-example`. 
+halo catalog. A complete example of how to profile halos by
+radius using the new infrastructure is given in
+:ref:`halo-analysis-example`.
 
 Plotting Halos
 --------------
 
-Annotating halo locations onto a slice or projection works in 
+Annotating halo locations onto a slice or projection works in
 the same way as in yt-2.x, but now a halo catalog must be
 passed to the annotate halo call rather than a halo list.
 
@@ -105,6 +105,6 @@
 ------------
 
 Data is now written out in the form of h5 files rather than
-text files. The directory they are written out to is 
+text files. The directory they are written out to is
 controlled by the keyword ``output_dir``. Each quantity
 is a field in the file.

diff -r 86db031f1f56a6f396d4440188707f1fdb8e6bfa -r 1ad674e40250ed9c2d21e51b5f0d3263d5150522 doc/source/analyzing/analysis_modules/index.rst
--- a/doc/source/analyzing/analysis_modules/index.rst
+++ b/doc/source/analyzing/analysis_modules/index.rst
@@ -5,7 +5,7 @@
 
 These semi-autonomous analysis modules are unique to specific subject matter
 like tracking halos, generating synthetic observations, exporting output to
-external visualization routines, and more.  Because they are somewhat 
+external visualization routines, and more.  Because they are somewhat
 specialized, they exist in their own corners of yt, and they do not get loaded
 by default when you "import yt".  Read up on these advanced tools below.
 

diff -r 86db031f1f56a6f396d4440188707f1fdb8e6bfa -r 1ad674e40250ed9c2d21e51b5f0d3263d5150522 doc/source/analyzing/analysis_modules/light_cone_generator.rst
--- a/doc/source/analyzing/analysis_modules/light_cone_generator.rst
+++ b/doc/source/analyzing/analysis_modules/light_cone_generator.rst
@@ -3,29 +3,29 @@
 Light Cone Generator
 ====================
 
-Light cones are created by stacking multiple datasets together to 
-continuously span a given redshift interval.  To make a projection of a 
-field through a light cone, the width of individual slices is adjusted 
-such that each slice has the same angular size.  
-Each slice is randomly shifted and projected along a random axis to 
-ensure that the same structures are not sampled multiple times.  A 
-recipe for creating a simple light cone projection can be found in 
+Light cones are created by stacking multiple datasets together to
+continuously span a given redshift interval.  To make a projection of a
+field through a light cone, the width of individual slices is adjusted
+such that each slice has the same angular size.
+Each slice is randomly shifted and projected along a random axis to
+ensure that the same structures are not sampled multiple times.  A
+recipe for creating a simple light cone projection can be found in
 the cookbook under :ref:`cookbook-light_cone`.
 
 .. image:: _images/LightCone_full_small.png
    :width: 500
 
-A light cone projection of the thermal Sunyaev-Zeldovich Y parameter from 
-z = 0 to 0.4 with a 450x450 arcminute field of view using 9 individual 
-slices.  The panels shows the contributions from the 9 individual slices with 
+A light cone projection of the thermal Sunyaev-Zeldovich Y parameter from
+z = 0 to 0.4 with a 450x450 arcminute field of view using 9 individual
+slices.  The panels shows the contributions from the 9 individual slices with
 the final light cone image shown in the bottom, right.
 
 Configuring the Light Cone Generator
 ------------------------------------
 
-The required arguments to instantiate a 
+The required arguments to instantiate a
 :class:`~yt.analysis_modules.cosmological_observation.light_cone.light_cone.LightCone`
-object are the path to the simulation parameter file, the simulation type, the 
+object are the path to the simulation parameter file, the simulation type, the
 nearest redshift, and the furthest redshift of the light cone.
 
 .. code-block:: python
@@ -38,29 +38,29 @@
 
 The additional keyword arguments are:
 
-* ``use_minimum_datasets`` (*bool*):  If True, the minimum number of 
-  datasets is used to connect the initial and final redshift.  If False, 
-  the light cone solution will contain as many entries as possible within 
+* ``use_minimum_datasets`` (*bool*):  If True, the minimum number of
+  datasets is used to connect the initial and final redshift.  If False,
+  the light cone solution will contain as many entries as possible within
   the redshift interval.  Default: True.
 
-* ``deltaz_min`` (*float*): Specifies the minimum Delta-z between 
+* ``deltaz_min`` (*float*): Specifies the minimum Delta-z between
   consecutive datasets in the returned list.  Default: 0.0.
 
-* ``minimum_coherent_box_fraction`` (*float*): Used with 
-  ``use_minimum_datasets`` set to False, this parameter specifies the 
-  fraction of the total box size to be traversed before rerandomizing the 
-  projection axis and center.  This was invented to allow light cones with 
-  thin slices to sample coherent large scale structure, but in practice does 
-  not work so well.  Try setting this parameter to 1 and see what happens.  
+* ``minimum_coherent_box_fraction`` (*float*): Used with
+  ``use_minimum_datasets`` set to False, this parameter specifies the
+  fraction of the total box size to be traversed before rerandomizing the
+  projection axis and center.  This was invented to allow light cones with
+  thin slices to sample coherent large scale structure, but in practice does
+  not work so well.  Try setting this parameter to 1 and see what happens.
   Default: 0.0.
 
-* ``time_data`` (*bool*): Whether or not to include time outputs when 
+* ``time_data`` (*bool*): Whether or not to include time outputs when
   gathering datasets for time series.  Default: True.
 
-* ``redshift_data`` (*bool*): Whether or not to include redshift outputs 
+* ``redshift_data`` (*bool*): Whether or not to include redshift outputs
   when gathering datasets for time series.  Default: True.
 
-* ``set_parameters`` (*dict*): Dictionary of parameters to attach to 
+* ``set_parameters`` (*dict*): Dictionary of parameters to attach to
   ds.parameters.  Default: None.
 
 * ``output_dir`` (*string*): The directory in which images and data files
@@ -72,9 +72,9 @@
 Creating Light Cone Solutions
 -----------------------------
 
-A light cone solution consists of a list of datasets spanning a redshift 
-interval with a random orientation for each dataset.  A new solution 
-is calculated with the 
+A light cone solution consists of a list of datasets spanning a redshift
+interval with a random orientation for each dataset.  A new solution
+is calculated with the
 :func:`~yt.analysis_modules.cosmological_observation.light_cone.light_cone.LightCone.calculate_light_cone_solution`
 function:
 
@@ -84,17 +84,17 @@
 
 The keyword argument are:
 
-* ``seed`` (*int*): the seed for the random number generator.  Any light 
-  cone solution can be reproduced by giving the same random seed.  
+* ``seed`` (*int*): the seed for the random number generator.  Any light
+  cone solution can be reproduced by giving the same random seed.
   Default: None.
 
-* ``filename`` (*str*): if given, a text file detailing the solution will be 
+* ``filename`` (*str*): if given, a text file detailing the solution will be
   written out.  Default: None.
 
 Making a Light Cone Projection
 ------------------------------
 
-With the light cone solution in place, projections with a given field of 
+With the light cone solution in place, projections with a given field of
 view and resolution can be made of any available field:
 
 .. code-block:: python
@@ -103,38 +103,38 @@
   field_of_view = (600.0, "arcmin")
   resolution = (60.0, "arcsec")
   lc.project_light_cone(field_of_vew, resolution,
-                        field , weight_field=None, 
-                        save_stack=True, 
+                        field , weight_field=None,
+                        save_stack=True,
                         save_slice_images=True)
 
-The field of view and resolution can be specified either as a tuple of 
-value and unit string or as a unitful ``YTQuantity``.  
+The field of view and resolution can be specified either as a tuple of
+value and unit string or as a unitful ``YTQuantity``.
 Additional keyword arguments:
 
-* ``weight_field`` (*str*): the weight field of the projection.  This has 
+* ``weight_field`` (*str*): the weight field of the projection.  This has
   the same meaning as in standard projections.  Default: None.
 
-* ``photon_field`` (*bool*): if True, the projection data for each slice is 
-  decremented by 4 pi R :superscript:`2` , where R is the luminosity 
+* ``photon_field`` (*bool*): if True, the projection data for each slice is
+  decremented by 4 pi R :superscript:`2` , where R is the luminosity
   distance between the observer and the slice redshift.  Default: False.
 
-* ``save_stack`` (*bool*): if True, the unflatted light cone data including 
+* ``save_stack`` (*bool*): if True, the unflatted light cone data including
   each individual slice is written to an hdf5 file.  Default: True.
 
-* ``save_final_image`` (*bool*): if True, save an image of the final light 
+* ``save_final_image`` (*bool*): if True, save an image of the final light
   cone projection.  Default: True.
 
-* ``save_slice_images`` (*bool*): save images for each individual projection 
+* ``save_slice_images`` (*bool*): save images for each individual projection
   slice.  Default: False.
 
 * ``cmap_name`` (*string*): color map for images.  Default: "algae".
 
-* ``njobs`` (*int*): The number of parallel jobs over which the light cone 
+* ``njobs`` (*int*): The number of parallel jobs over which the light cone
   projection will be split.  Choose -1 for one processor per individual
   projection and 1 to have all processors work together on each projection.
   Default: 1.
 
-* ``dynamic`` (*bool*): If True, use dynamic load balancing to create the 
+* ``dynamic`` (*bool*): If True, use dynamic load balancing to create the
   projections.  Default: False.
 
 .. note:: As of :code:`yt-3.0`, the halo mask and unique light cone functionality no longer exist.  These are still available in :code:`yt-2.x`.  If you would like to use these features in :code:`yt-3.x`, help is needed to port them over.  Contact the yt-users mailing list if you are interested in doing this.

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/a307a8a74445/
Changeset:   a307a8a74445
Branch:      yt
User:        xarthisius
Date:        2016-03-30 18:22:56+00:00
Summary:     Merged in migueldvb/yt (pull request #2093)

Remove unnecessary whitespace in docs
Affected #:  107 files

diff -r 0b0c899118f446392e27a4624b997e50b797d707 -r a307a8a744455f53b0fb2339dc551ea9d37d3d8d doc/get_yt.sh
--- a/doc/get_yt.sh
+++ b/doc/get_yt.sh
@@ -17,7 +17,7 @@
 #
 # By default this will install yt from source.
 #
-# If you experience problems, please visit the Help section at 
+# If you experience problems, please visit the Help section at
 # http://yt-project.org.
 #
 DEST_SUFFIX="yt-conda"
@@ -298,7 +298,7 @@
 
 if [ $INST_UNSTRUCTURED -eq 1 ]
 then
-  YT_DEPS+=('netcdf4')   
+  YT_DEPS+=('netcdf4')
 fi
 
 # Here is our dependency list for yt
@@ -361,7 +361,7 @@
 echo "yt and the Conda system are now installed in $DEST_DIR ."
 echo
 echo "You must now modify your PATH variable by prepending:"
-echo 
+echo
 echo "   $DEST_DIR/bin"
 echo
 echo "On Bash-style shells you can copy/paste the following command to "

diff -r 0b0c899118f446392e27a4624b997e50b797d707 -r a307a8a744455f53b0fb2339dc551ea9d37d3d8d doc/helper_scripts/code_support.py
--- a/doc/helper_scripts/code_support.py
+++ b/doc/helper_scripts/code_support.py
@@ -85,7 +85,7 @@
 print("|| . ||", end=' ')
 for c in code_names:
     print("%s || " % (c), end=' ')
-print() 
+print()
 
 for vn in vals:
     print("|| !%s ||" % (vn), end=' ')

diff -r 0b0c899118f446392e27a4624b997e50b797d707 -r a307a8a744455f53b0fb2339dc551ea9d37d3d8d doc/helper_scripts/table.py
--- a/doc/helper_scripts/table.py
+++ b/doc/helper_scripts/table.py
@@ -44,7 +44,7 @@
       "A bunch of illustrated examples of how to do things"),
      ("reference/index.html", "Reference Materials",
       "A list of all bundled fields, API documentation, the Change Log..."),
-     ("faq/index.html", "FAQ", 
+     ("faq/index.html", "FAQ",
       "Frequently Asked Questions: answered for you!")
   ]),
 ]

diff -r 0b0c899118f446392e27a4624b997e50b797d707 -r a307a8a744455f53b0fb2339dc551ea9d37d3d8d doc/helper_scripts/update_recipes.py
--- a/doc/helper_scripts/update_recipes.py
+++ b/doc/helper_scripts/update_recipes.py
@@ -66,7 +66,7 @@
             written = cond_output(output, written)
             ofn = "%s/%s_%s" % (ndir, fn, os.path.basename(ifn))
             open(ofn, "wb").write(open(ifn, "rb").read())
-            output.write(".. image:: _%s/%s_%s\n" % (fn, fn, os.path.basename(ifn)) + 
+            output.write(".. image:: _%s/%s_%s\n" % (fn, fn, os.path.basename(ifn)) +
                          "   :width: 240\n" +
                          "   :target: ../_images/%s_%s\n" % (fn, os.path.basename(ifn))
                         )

diff -r 0b0c899118f446392e27a4624b997e50b797d707 -r a307a8a744455f53b0fb2339dc551ea9d37d3d8d doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -1,13 +1,13 @@
 #
 # Hi there!  Welcome to the yt installation script.
 #
-# First things first, if you experience problems, please visit the Help 
+# First things first, if you experience problems, please visit the Help
 # section at http://yt-project.org.
 #
 # This script is designed to create a fully isolated Python installation
 # with the dependencies you need to run yt.
 #
-# There are a few options, but you only need to set *one* of them, which is 
+# There are a few options, but you only need to set *one* of them, which is
 # the next one, DEST_DIR:
 
 DEST_SUFFIX="yt-`uname -m`"
@@ -307,7 +307,7 @@
         echo "  * gcc-{,c++,gfortran}"
         echo "  * make"
         echo "  * patch"
-        echo 
+        echo
         echo "You can accomplish this by executing:"
         echo "$ sudo yum install gcc gcc-c++ gcc-gfortran make patch zip"
         echo "$ sudo yum install ncurses-devel uuid-devel openssl-devel readline-devel"
@@ -495,7 +495,7 @@
 if [ $INST_PY3 -eq 1 ]
 then
      PYTHON_EXEC='python3.4'
-else 
+else
      PYTHON_EXEC='python2.7'
 fi
 
@@ -513,7 +513,7 @@
     [ ! -e $LIB/extracted ] && tar xfz $LIB.tar.gz
     touch $LIB/extracted
     BUILD_ARGS=""
-    if [[ $LIB =~ .*mercurial.* ]] 
+    if [[ $LIB =~ .*mercurial.* ]]
     then
         PYEXE="python2.7"
     else
@@ -620,9 +620,9 @@
 CYTHON='Cython-0.22'
 PYX='PyX-0.12.1'
 BZLIB='bzip2-1.0.6'
-FREETYPE_VER='freetype-2.4.12' 
+FREETYPE_VER='freetype-2.4.12'
 H5PY='h5py-2.5.0'
-HDF5='hdf5-1.8.14' 
+HDF5='hdf5-1.8.14'
 LAPACK='lapack-3.4.2'
 PNG=libpng-1.6.3
 MATPLOTLIB='matplotlib-1.4.3'
@@ -880,7 +880,7 @@
 
 # This fixes problems with gfortran linking.
 unset LDFLAGS
- 
+
 echo "Installing pip"
 ( ${GETFILE} https://bootstrap.pypa.io/get-pip.py 2>&1 ) 1>> ${LOG_FILE} || do_exit
 ( ${DEST_DIR}/bin/${PYTHON_EXEC} get-pip.py 2>&1 ) 1>> ${LOG_FILE} || do_exit
@@ -1006,7 +1006,7 @@
 cd $MY_PWD
 
 if !( ( ${DEST_DIR}/bin/${PYTHON_EXEC} -c "import readline" 2>&1 )>> ${LOG_FILE}) || \
-    [[ "${MYOS##Darwin}" != "${MYOS}" && $INST_PY3 -eq 1 ]] 
+    [[ "${MYOS##Darwin}" != "${MYOS}" && $INST_PY3 -eq 1 ]]
 then
     if !( ( ${DEST_DIR}/bin/${PYTHON_EXEC} -c "import gnureadline" 2>&1 )>> ${LOG_FILE})
     then

diff -r 0b0c899118f446392e27a4624b997e50b797d707 -r a307a8a744455f53b0fb2339dc551ea9d37d3d8d doc/source/_static/custom.css
--- a/doc/source/_static/custom.css
+++ b/doc/source/_static/custom.css
@@ -40,7 +40,7 @@
         padding-bottom: 10px;
     }
     /* since 3.1.0 */
-    .navbar-collapse.collapse.in { 
+    .navbar-collapse.collapse.in {
         display: block!important;
     }
     .collapsing {
@@ -48,7 +48,7 @@
     }
 }
 
-/* 
+/*
 
 Sphinx code literals conflict with the notebook code tag, so we special-case
 literals that are inside text.
@@ -56,7 +56,7 @@
 */
 
 p code {
-    color:  #d14;    
+    color:  #d14;
     white-space: nowrap;
     font-size: 90%;
     background-color: #f9f2f4;
@@ -93,16 +93,16 @@
 */
 
 *[id]:before :not(p) {
-  display: block; 
-  content: " "; 
-  margin-top: -45px; 
-  height: 45px; 
-  visibility: hidden; 
+  display: block;
+  content: " ";
+  margin-top: -45px;
+  height: 45px;
+  visibility: hidden;
 }
 
 /*
 
-Make tables span only half the page. 
+Make tables span only half the page.
 
 */
 

diff -r 0b0c899118f446392e27a4624b997e50b797d707 -r a307a8a744455f53b0fb2339dc551ea9d37d3d8d doc/source/about/index.rst
--- a/doc/source/about/index.rst
+++ b/doc/source/about/index.rst
@@ -12,10 +12,10 @@
 -----------
 
 yt is a toolkit for analyzing and visualizing quantitative data.  Originally
-written to analyze 3D grid-based astrophysical simulation data, 
+written to analyze 3D grid-based astrophysical simulation data,
 it has grown to handle any kind of data represented in a 2D or 3D volume.
-yt is an Python-based open source project and is open for anyone to use or 
-contribute code.  The entire source code and history is available to all 
+yt is an Python-based open source project and is open for anyone to use or
+contribute code.  The entire source code and history is available to all
 at https://bitbucket.org/yt_analysis/yt .
 
 .. _who-is-yt:
@@ -23,16 +23,16 @@
 Who is yt?
 ----------
 
-As an open-source project, yt has a large number of user-developers.  
-In September of 2014, the yt developer community collectively decided to endow 
-the title of *member* on individuals who had contributed in a significant way 
-to the project.  For a list of those members and a description of their 
-contributions to the code, see 
+As an open-source project, yt has a large number of user-developers.
+In September of 2014, the yt developer community collectively decided to endow
+the title of *member* on individuals who had contributed in a significant way
+to the project.  For a list of those members and a description of their
+contributions to the code, see
 `our members website. <http://yt-project.org/members.html>`_
 
-For an up-to-date list of everyone who has contributed to the yt codebase, 
-see the current `CREDITS <http://bitbucket.org/yt_analysis/yt/src/yt/CREDITS>`_ file.  
-For a more detailed breakup of contributions made by individual users, see out 
+For an up-to-date list of everyone who has contributed to the yt codebase,
+see the current `CREDITS <http://bitbucket.org/yt_analysis/yt/src/yt/CREDITS>`_ file.
+For a more detailed breakup of contributions made by individual users, see out
 `Open HUB page <https://www.openhub.net/p/yt_amr/contributors?query=&sort=commits>`_.
 
 History of yt
@@ -40,17 +40,17 @@
 
 yt was originally begun by Matthew Turk in 2007 in the course of his graduate
 studies in computational astrophysics.  The code was developed
-as a simple data-reader and exporter for grid-based hydrodynamical simulation 
-data outputs from the *Enzo* code.  Over the next few years, he invited 
+as a simple data-reader and exporter for grid-based hydrodynamical simulation
+data outputs from the *Enzo* code.  Over the next few years, he invited
 collaborators and friends to contribute and use yt.  As the community grew,
-so did the capabilities of yt.  It is now a community-developed project with 
-contributions from many people, the hospitality of several institutions, and 
-benefiting from numerous grants.  With this community-driven approach 
-and contributions from a sizeable population of developers, it has evolved 
-into a fully-featured toolkit for analysis and visualization of 
-multidimensional data.  It relies on no proprietary software -- although it 
-can be and has been extended to interface with proprietary software and 
-libraries -- and has been designed from the ground up to enable users to be 
+so did the capabilities of yt.  It is now a community-developed project with
+contributions from many people, the hospitality of several institutions, and
+benefiting from numerous grants.  With this community-driven approach
+and contributions from a sizeable population of developers, it has evolved
+into a fully-featured toolkit for analysis and visualization of
+multidimensional data.  It relies on no proprietary software -- although it
+can be and has been extended to interface with proprietary software and
+libraries -- and has been designed from the ground up to enable users to be
 as immersed in the data as they desire.
 
 How do I contact yt?
@@ -58,7 +58,7 @@
 
 If you have any questions about the code, please contact the `yt users email
 list <http://lists.spacepope.org/listinfo.cgi/yt-users-spacepope.org>`_.  If
-you're having other problems, please follow the steps in 
+you're having other problems, please follow the steps in
 :ref:`asking-for-help`.
 
 How do I cite yt?
@@ -70,7 +70,7 @@
 entry: ::
 
    @ARTICLE{2011ApJS..192....9T,
-      author = {{Turk}, M.~J. and {Smith}, B.~D. and {Oishi}, J.~S. and {Skory}, S. and 
+      author = {{Turk}, M.~J. and {Smith}, B.~D. and {Oishi}, J.~S. and {Skory}, S. and
    	{Skillman}, S.~W. and {Abel}, T. and {Norman}, M.~L.},
        title = "{yt: A Multi-code Analysis Toolkit for Astrophysical Simulation Data}",
      journal = {\apjs},

diff -r 0b0c899118f446392e27a4624b997e50b797d707 -r a307a8a744455f53b0fb2339dc551ea9d37d3d8d doc/source/analyzing/analysis_modules/absorption_spectrum.rst
--- a/doc/source/analyzing/analysis_modules/absorption_spectrum.rst
+++ b/doc/source/analyzing/analysis_modules/absorption_spectrum.rst
@@ -5,23 +5,23 @@
 
 .. sectionauthor:: Britton Smith <brittonsmith at gmail.com>
 
-Absorption line spectra, such as shown below, can be made with data created 
-by the (:ref:`light-ray-generator`).  For each element of the ray, column 
-densities are calculated multiplying the number density within a grid cell 
-with the path length of the ray through the cell.  Line profiles are 
-generated using a voigt profile based on the temperature field.  The lines 
-are then shifted according to the redshift recorded by the light ray tool 
-and (optionally) the peculiar velocity of gas along the ray.  Inclusion of the 
-peculiar velocity requires setting ``use_peculiar_velocity`` to True in the call to 
+Absorption line spectra, such as shown below, can be made with data created
+by the (:ref:`light-ray-generator`).  For each element of the ray, column
+densities are calculated multiplying the number density within a grid cell
+with the path length of the ray through the cell.  Line profiles are
+generated using a voigt profile based on the temperature field.  The lines
+are then shifted according to the redshift recorded by the light ray tool
+and (optionally) the peculiar velocity of gas along the ray.  Inclusion of the
+peculiar velocity requires setting ``use_peculiar_velocity`` to True in the call to
 :meth:`~yt.analysis_modules.cosmological_observation.light_ray.light_ray.LightRay.make_light_ray`.
 
-The spectrum generator will output a file containing the wavelength and 
+The spectrum generator will output a file containing the wavelength and
 normalized flux.  It will also output a text file listing all important lines.
 
 .. image:: _images/spectrum_full.png
    :width: 500
 
-An absorption spectrum for the wavelength range from 900 to 1800 Angstroms 
+An absorption spectrum for the wavelength range from 900 to 1800 Angstroms
 made with a light ray extending from z = 0 to z = 0.4.
 
 .. image:: _images/spectrum_zoom.png
@@ -32,7 +32,7 @@
 Creating an Absorption Spectrum
 -------------------------------
 
-To instantiate an AbsorptionSpectrum object, the arguments required are the 
+To instantiate an AbsorptionSpectrum object, the arguments required are the
 minimum and maximum wavelengths, and the number of wavelength bins.
 
 .. code-block:: python
@@ -44,33 +44,33 @@
 Adding Features to the Spectrum
 -------------------------------
 
-Absorption lines and continuum features can then be added to the spectrum.  
-To add a line, you must know some properties of the line: the rest wavelength, 
-f-value, gamma value, and the atomic mass in amu of the atom.  That line must 
+Absorption lines and continuum features can then be added to the spectrum.
+To add a line, you must know some properties of the line: the rest wavelength,
+f-value, gamma value, and the atomic mass in amu of the atom.  That line must
 be tied in some way to a field in the dataset you are loading, and this field
-must be added to the LightRay object when it is created.  Below, we will 
-add the H Lyman-alpha line, which is tied to the neutral hydrogen field 
+must be added to the LightRay object when it is created.  Below, we will
+add the H Lyman-alpha line, which is tied to the neutral hydrogen field
 ('H_number_density').
 
 .. code-block:: python
-  
+
   my_label = 'HI Lya'
   field = 'H_number_density'
   wavelength = 1215.6700 # Angstroms
   f_value = 4.164E-01
   gamma = 6.265e+08
   mass = 1.00794
-  
+
   sp.add_line(my_label, field, wavelength, f_value, gamma, mass, label_threshold=1.e10)
 
-In the above example, the *field* argument tells the spectrum generator which 
-field from the ray data to use to calculate the column density.  The 
-``label_threshold`` keyword tells the spectrum generator to add all lines 
-above a column density of 10 :superscript:`10` cm :superscript:`-2` to the 
-text line list.  If None is provided, as is the default, no lines of this 
+In the above example, the *field* argument tells the spectrum generator which
+field from the ray data to use to calculate the column density.  The
+``label_threshold`` keyword tells the spectrum generator to add all lines
+above a column density of 10 :superscript:`10` cm :superscript:`-2` to the
+text line list.  If None is provided, as is the default, no lines of this
 type will be added to the text list.
 
-Continuum features with optical depths that follow a power law can also be 
+Continuum features with optical depths that follow a power law can also be
 added.  Like adding lines, you must specify details like the wavelength
 and the field in the dataset and LightRay that is tied to this feature.
 Below, we will add H Lyman continuum.
@@ -82,29 +82,29 @@
   wavelength = 912.323660 # Angstroms
   normalization = 1.6e17
   index = 3.0
-  
+
   sp.add_continuum(my_label, field, wavelength, normalization, index)
 
 Making the Spectrum
 -------------------
 
-Once all the lines and continuum are added, it is time to make a spectrum out 
+Once all the lines and continuum are added, it is time to make a spectrum out
 of some light ray data.
 
 .. code-block:: python
 
-  wavelength, flux = sp.make_spectrum('lightray.h5', 
-                                      output_file='spectrum.fits', 
+  wavelength, flux = sp.make_spectrum('lightray.h5',
+                                      output_file='spectrum.fits',
                                       line_list_file='lines.txt',
                                       use_peculiar_velocity=True)
 
-A spectrum will be made using the specified ray data and the wavelength and 
-flux arrays will also be returned.  If ``use_peculiar_velocity`` is set to 
+A spectrum will be made using the specified ray data and the wavelength and
+flux arrays will also be returned.  If ``use_peculiar_velocity`` is set to
 False, the lines will only be shifted according to the redshift.
 
-Three output file formats are supported for writing out the spectrum: fits, 
-hdf5, and ascii.  The file format used is based on the extension provided 
-in the ``output_file`` keyword: ``.fits`` for a fits file, 
+Three output file formats are supported for writing out the spectrum: fits,
+hdf5, and ascii.  The file format used is based on the extension provided
+in the ``output_file`` keyword: ``.fits`` for a fits file,
 ``.h5`` for an hdf5 file, and anything else for an ascii file.
 
 .. note:: To write out a fits file, you must install the `astropy <http://www.astropy.org>`_ python library in order to access the astropy.io.fits module.  You can usually do this by simply running `pip install astropy` at the command line.
@@ -112,11 +112,11 @@
 Generating Spectra in Parallel
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
-The spectrum generator can be run in parallel simply by following the procedures 
-laid out in :ref:`parallel-computation` for running yt scripts in parallel.  
-Spectrum generation is parallelized using a multi-level strategy where each 
-absorption line is deposited by a different processor.  If the number of available 
-processors is greater than the number of lines, then the deposition of 
+The spectrum generator can be run in parallel simply by following the procedures
+laid out in :ref:`parallel-computation` for running yt scripts in parallel.
+Spectrum generation is parallelized using a multi-level strategy where each
+absorption line is deposited by a different processor.  If the number of available
+processors is greater than the number of lines, then the deposition of
 individual lines will be divided over multiple processors.
 
 Fitting an Absorption Spectrum
@@ -127,14 +127,14 @@
 This tool can be used to fit absorption spectra, particularly those
 generated using the (``AbsorptionSpectrum``) tool. For more details
 on its uses and implementation please see (`Egan et al. (2013)
-<http://arxiv.org/abs/1307.2244>`_). If you find this tool useful we 
+<http://arxiv.org/abs/1307.2244>`_). If you find this tool useful we
 encourage you to cite accordingly.
 
 Loading an Absorption Spectrum
 ------------------------------
 
-To load an absorption spectrum created by 
-(:class:`~yt.analysis_modules.absorption_spectrum.absorption_spectrum.AbsorptionSpectrum``), 
+To load an absorption spectrum created by
+(:class:`~yt.analysis_modules.absorption_spectrum.absorption_spectrum.AbsorptionSpectrum``),
 we specify the output file name. It is advisable to use either an .h5
 or .fits file, rather than an ascii file to save the spectrum as rounding
 errors produced in saving to a ascii file will negatively impact fit quality.
@@ -149,7 +149,7 @@
 Specifying Species Properties
 -----------------------------
 
-Before fitting a spectrum, you must specify the properties of all the 
+Before fitting a spectrum, you must specify the properties of all the
 species included when generating the spectrum.
 
 The physical properties needed for each species are the rest wavelength,
@@ -160,7 +160,7 @@
 
 To fine tune the fitting procedure and give results in a minimal
 number of optimizing steps, we specify expected maximum and minimum
-values for the column density, doppler parameter, and redshift. These 
+values for the column density, doppler parameter, and redshift. These
 values can be well outside the range of expected values for a typical line
 and are mostly to prevent the algorithm from fitting to negative values
 or becoming numerically unstable.
@@ -204,7 +204,7 @@
 --------------------------
 
 After loading a spectrum and specifying the properties of the species
-used to generate the spectrum, an appropriate fit can be generated. 
+used to generate the spectrum, an appropriate fit can be generated.
 
 .. code-block:: python
 
@@ -219,19 +219,19 @@
 recommended to fit species the generate multiple lines first, as a fit
 will only be accepted if all of the lines are fit appropriately using
 a single set of parameters. At the moment no cross correlation between
-lines of different species is performed. 
+lines of different species is performed.
 
-The parameters of the lines that are needed to fit the spectrum are contained 
+The parameters of the lines that are needed to fit the spectrum are contained
 in the ``fitted_lines`` variable. Each species given in ``orderFits`` will
-be a key in the ``fitted_lines`` dictionary. The entry for each species 
-key will be another dictionary containing entries for 'N','b','z', and 
+be a key in the ``fitted_lines`` dictionary. The entry for each species
+key will be another dictionary containing entries for 'N','b','z', and
 'group#' which are the column density, doppler parameter, redshift,
-and associate line complex respectively. The i :superscript:`th` line 
-of a given species is then given by the parameters ``N[i]``, ``b[i]``, 
+and associate line complex respectively. The i :superscript:`th` line
+of a given species is then given by the parameters ``N[i]``, ``b[i]``,
 and ``z[i]`` and is part of the same complex (and was fitted at the same time)
 as all lines with the same group number as ``group#[i]``.
 
-The ``fitted_flux`` is an ndarray of the same size as ``flux`` and 
+The ``fitted_flux`` is an ndarray of the same size as ``flux`` and
 ``wavelength`` that contains the cumulative absorption spectrum generated
 by the lines contained in ``fitted_lines``.
 
@@ -250,8 +250,8 @@
 
 .. sectionauthor:: Hilary Egan <hilary.egan at colorado.edu>
 
-To generate a fit for a spectrum 
-:func:`~yt.analysis_modules.absorption_spectrum.absorption_spectrum_fit.generate_total_fit` 
+To generate a fit for a spectrum
+:func:`~yt.analysis_modules.absorption_spectrum.absorption_spectrum_fit.generate_total_fit`
 is called.
 This function controls the identification of line complexes, the fit
 of a series of absorption lines for each appropriate species, checks of
@@ -260,14 +260,14 @@
 Finding Line Complexes
 ----------------------
 
-Line complexes are found using the 
+Line complexes are found using the
 :func:`~yt.analysis_modules.absorption_spectrum.absorption_spectrum_fit.find_complexes`
-function. The process by which line complexes are found involves walking 
-through the array of flux in order from minimum to maximum wavelength, and 
-finding series of spatially contiguous cells whose flux is less than some 
-limit.  These regions are then checked in terms of an additional flux limit 
-and size.  The bounds of all the passing regions are then listed and returned. 
-Those bounds that cover an exceptionally large region of wavelength space will 
+function. The process by which line complexes are found involves walking
+through the array of flux in order from minimum to maximum wavelength, and
+finding series of spatially contiguous cells whose flux is less than some
+limit.  These regions are then checked in terms of an additional flux limit
+and size.  The bounds of all the passing regions are then listed and returned.
+Those bounds that cover an exceptionally large region of wavelength space will
 be broken up if a suitable cut point is found. This method is only appropriate
 for noiseless spectra.
 
@@ -280,25 +280,25 @@
 unstable when optimizing.
 
 The ``fitLim`` parameter controls what is the maximum flux that the trough
-of the region can have and still be considered a line complex. This 
+of the region can have and still be considered a line complex. This
 effectively controls the sensitivity to very low column absorbers. Default
-value is ``fitLim`` = 0.99. If a region is identified where the flux of the 
+value is ``fitLim`` = 0.99. If a region is identified where the flux of the
 trough is greater than this value, the region is simply ignored.
 
-The ``minLength`` parameter controls the minimum number of array elements 
+The ``minLength`` parameter controls the minimum number of array elements
 that an identified region must have. This value must be greater than or
 equal to 3 as there are a minimum of 3 free parameters that must be fit.
 Default is ``minLength`` = 3.
 
 The ``maxLength`` parameter controls the maximum number of array elements
 that an identified region can have before it is split into separate regions.
-Default is ``maxLength`` = 1000. This should be adjusted based on the 
+Default is ``maxLength`` = 1000. This should be adjusted based on the
 resolution of the spectrum to remain appropriate. The value correspond
-to a wavelength of roughly 50 angstroms. 
+to a wavelength of roughly 50 angstroms.
 
 The ``splitLim`` parameter controls how exceptionally large regions are split.
 When such a region is identified by having more array elements than
-``maxLength``, the point of maximum flux (or minimum absorption) in the 
+``maxLength``, the point of maximum flux (or minimum absorption) in the
 middle two quartiles is identified. If that point has a flux greater than
 or equal to ``splitLim``, then two separate complexes are created: one from
 the lower wavelength edge to the minimum absorption point and the other from
@@ -309,7 +309,7 @@
 Fitting a Line Complex
 ----------------------
 
-After a complex is identified, it is fitted by iteratively adding and 
+After a complex is identified, it is fitted by iteratively adding and
 optimizing a set of Voigt Profiles for a particular species until the
 region is considered successfully fit. The optimizing is accomplished
 using scipy's least squares optimizer. This requires an initial estimate
@@ -326,36 +326,36 @@
 smaller initial guess is given. These values are chosen to make optimization
 faster and more stable by being closer to the actual value, but the final
 results of fitting should not depend on them as they merely provide a
-starting point. 
+starting point.
 
-After the parameters for a line are optimized for the first time, the 
-optimized parameters are then used for the initial guess on subsequent 
-iterations with more lines. 
+After the parameters for a line are optimized for the first time, the
+optimized parameters are then used for the initial guess on subsequent
+iterations with more lines.
 
-The complex is considered successfully fit when the sum of the squares of 
+The complex is considered successfully fit when the sum of the squares of
 the difference between the flux generated from the fit and the desired flux
 profile is less than ``errBound``. ``errBound`` is related to the optional
-parameter to 
+parameter to
 :meth:`~yt.analysis_modules.cosmological_observation.light_ray.light_ray.LightRay.generate_total_fit`,
-``maxAvgError`` by the number of array elements in the region such that 
+``maxAvgError`` by the number of array elements in the region such that
 ``errBound`` = number of elements * ``maxAvgError``.
 
-There are several other conditions under which the cycle of adding and 
+There are several other conditions under which the cycle of adding and
 optimizing lines will halt. If the error of the optimized fit from adding
 a line is an order of magnitude worse than the error of the fit without
-that line, then it is assumed that the fitting has become unstable and 
+that line, then it is assumed that the fitting has become unstable and
 the latest line is removed. Lines are also prevented from being added if
 the total number of lines is greater than the number of elements in the flux
 array being fit divided by 3. This is because there must not be more free
-parameters in a fit than the number of points to constrain them. 
+parameters in a fit than the number of points to constrain them.
 
 Checking Fit Results
 --------------------
 
 After an acceptable fit for a region is determined, there are several steps
-the algorithm must go through to validate the fits. 
+the algorithm must go through to validate the fits.
 
-First, the parameters must be in a reasonable range. This is a check to make 
+First, the parameters must be in a reasonable range. This is a check to make
 sure that the optimization did not become unstable and generate a fit that
 diverges wildly outside the region where the fit was performed. This way, even
 if particular complex cannot be fit, the rest of the spectrum fitting still
@@ -363,13 +363,13 @@
 in the species parameter dictionary. These are merely broad limits that will
 prevent numerical instability rather than physical limits.
 
-In cases where a single species generates multiple lines (as in the OVI 
+In cases where a single species generates multiple lines (as in the OVI
 doublet), the fits are then checked for higher wavelength lines. Originally
 the fits are generated only considering the lowest wavelength fit to a region.
 This is because we perform the fitting of complexes in order from the lowest
 wavelength to the highest, so any contribution to a complex being fit must
 come from the lower wavelength as the higher wavelength contributions would
-already have been subtracted out after fitting the lower wavelength. 
+already have been subtracted out after fitting the lower wavelength.
 
 Saturated Lyman Alpha Fitting Tools
 -----------------------------------
@@ -380,8 +380,8 @@
 The basic approach is to simply try a much wider range of initial parameter
 guesses in order to find the true optimization minimum, rather than getting
 stuck in a local minimum. A set of hard coded initial parameter guesses
-for Lyman alpha lines is given by the function 
+for Lyman alpha lines is given by the function
 :func:`~yt.analysis_modules.absorption_spectrum.absorption_spectrum_fit.get_test_lines`.
 Also included in these parameter guesses is an an initial guess of a high
-column cool line overlapping a lower column warm line, indictive of a 
+column cool line overlapping a lower column warm line, indictive of a
 broad Lyman alpha (BLA) absorber.

diff -r 0b0c899118f446392e27a4624b997e50b797d707 -r a307a8a744455f53b0fb2339dc551ea9d37d3d8d doc/source/analyzing/analysis_modules/clump_finding.rst
--- a/doc/source/analyzing/analysis_modules/clump_finding.rst
+++ b/doc/source/analyzing/analysis_modules/clump_finding.rst
@@ -3,17 +3,17 @@
 Clump Finding
 =============
 
-The clump finder uses a contouring algorithm to identified topologically 
-disconnected structures within a dataset.  This works by first creating a 
-single contour over the full range of the contouring field, then continually 
-increasing the lower value of the contour until it reaches the maximum value 
-of the field.  As disconnected structures are identified as separate contours, 
-the routine continues recursively through each object, creating a hierarchy of 
-clumps.  Individual clumps can be kept or removed from the hierarchy based on 
-the result of user-specified functions, such as checking for gravitational 
+The clump finder uses a contouring algorithm to identified topologically
+disconnected structures within a dataset.  This works by first creating a
+single contour over the full range of the contouring field, then continually
+increasing the lower value of the contour until it reaches the maximum value
+of the field.  As disconnected structures are identified as separate contours,
+the routine continues recursively through each object, creating a hierarchy of
+clumps.  Individual clumps can be kept or removed from the hierarchy based on
+the result of user-specified functions, such as checking for gravitational
 boundedness.  A sample recipe can be found in :ref:`cookbook-find_clumps`.
 
-The clump finder requires a data object (see :ref:`data-objects`) and a field 
+The clump finder requires a data object (see :ref:`data-objects`) and a field
 over which the contouring is to be performed.
 
 .. code:: python
@@ -28,11 +28,11 @@
 
    master_clump = Clump(data_source, ("gas", "density"))
 
-At this point, every isolated contour will be considered a clump, 
-whether this is physical or not.  Validator functions can be added to 
-determine if an individual contour should be considered a real clump.  
-These functions are specified with the ``Clump.add_validator`` function.  
-Current, two validators exist: a minimum number of cells and gravitational 
+At this point, every isolated contour will be considered a clump,
+whether this is physical or not.  Validator functions can be added to
+determine if an individual contour should be considered a real clump.
+These functions are specified with the ``Clump.add_validator`` function.
+Current, two validators exist: a minimum number of cells and gravitational
 boundedness.
 
 .. code:: python
@@ -41,9 +41,9 @@
 
    master_clump.add_validator("gravitationally_bound", use_particles=False)
 
-As many validators as desired can be added, and a clump is only kept if all 
-return True.  If not, a clump is remerged into its parent.  Custom validators 
-can easily be added.  A validator function must only accept a ``Clump`` object 
+As many validators as desired can be added, and a clump is only kept if all
+return True.  If not, a clump is remerged into its parent.  Custom validators
+can easily be added.  A validator function must only accept a ``Clump`` object
 and either return True or False.
 
 .. code:: python
@@ -52,16 +52,16 @@
        return (clump["gas", "cell_mass"].sum() >= min_mass)
    add_validator("minimum_gas_mass", _minimum_gas_mass)
 
-The ``add_validator`` function adds the validator to a registry that can 
-be accessed by the clump finder.  Then, the validator can be added to the 
+The ``add_validator`` function adds the validator to a registry that can
+be accessed by the clump finder.  Then, the validator can be added to the
 clump finding just like the others.
 
 .. code:: python
 
    master_clump.add_validator("minimum_gas_mass", ds.quan(1.0, "Msun"))
 
-The clump finding algorithm accepts the ``Clump`` object, the initial minimum 
-and maximum of the contouring field, and the step size.  The lower value of the 
+The clump finding algorithm accepts the ``Clump`` object, the initial minimum
+and maximum of the contouring field, and the step size.  The lower value of the
 contour finder will be continually multiplied by the step size.
 
 .. code:: python
@@ -71,9 +71,9 @@
    step = 2.0
    find_clumps(master_clump, c_min, c_max, step)
 
-After the clump finding has finished, the master clump will represent the top 
-of a hierarchy of clumps.  The ``children`` attribute within a ``Clump`` object 
-contains a list of all sub-clumps.  Each sub-clump is also a ``Clump`` object 
+After the clump finding has finished, the master clump will represent the top
+of a hierarchy of clumps.  The ``children`` attribute within a ``Clump`` object
+contains a list of all sub-clumps.  Each sub-clump is also a ``Clump`` object
 with its own ``children`` attribute, and so on.
 
 A number of helper routines exist for examining the clump hierarchy.
@@ -96,15 +96,15 @@
    print(leaf_clumps[0]["gas", "density"])
    print(leaf_clumps[0].quantities.total_mass())
 
-The writing functions will write out a series or properties about each 
-clump by default.  Additional properties can be appended with the 
+The writing functions will write out a series or properties about each
+clump by default.  Additional properties can be appended with the
 ``Clump.add_info_item`` function.
 
 .. code:: python
 
    master_clump.add_info_item("total_cells")
 
-Just like the validators, custom info items can be added by defining functions 
+Just like the validators, custom info items can be added by defining functions
 that minimally accept a ``Clump`` object and return a string to be printed.
 
 .. code:: python
@@ -121,16 +121,16 @@
 
    master_clump.add_info_item("mass_weighted_jeans_mass")
 
-By default, the following info items are activated: **total_cells**, 
-**cell_mass**, **mass_weighted_jeans_mass**, **volume_weighted_jeans_mass**, 
-**max_grid_level**, **min_number_density**, **max_number_density**, and 
+By default, the following info items are activated: **total_cells**,
+**cell_mass**, **mass_weighted_jeans_mass**, **volume_weighted_jeans_mass**,
+**max_grid_level**, **min_number_density**, **max_number_density**, and
 **distance_to_main_clump**.
 
 Clumps can be visualized using the ``annotate_clumps`` callback.
 
 .. code:: python
 
-   prj = yt.ProjectionPlot(ds, 2, ("gas", "density"), 
+   prj = yt.ProjectionPlot(ds, 2, ("gas", "density"),
                            center='c', width=(20,'kpc'))
    prj.annotate_clumps(leaf_clumps)
    prj.save('clumps')

diff -r 0b0c899118f446392e27a4624b997e50b797d707 -r a307a8a744455f53b0fb2339dc551ea9d37d3d8d doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst
--- a/doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst
+++ b/doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst
@@ -91,7 +91,7 @@
 The center of mass would be the same one as returned by the halo
 finder.  The A, B, C are the largest to smallest magnitude of the
 ellipsoid's semi-principle axes. "e0" is the largest semi-principle
-axis vector direction that would have magnitude A but normalized.  
+axis vector direction that would have magnitude A but normalized.
 The "tilt" is an angle measured in radians.  It can be best described
 as after the rotation about the z-axis to align e0 to x in the x-y
 plane, and then rotating about the y-axis to align e0 completely to
@@ -128,7 +128,7 @@
 Since this is a first attempt, there are many drawbacks and corners
 cut.  Many things listed here will be amended when I have time.
 
-* The ellipsoid 3D container like the boolean object, do not contain 
+* The ellipsoid 3D container like the boolean object, do not contain
   particle position and velocity information.
 * This currently assume periodic boundary condition, so if an
   ellipsoid center is at the edge, it will return part of the opposite
@@ -136,7 +136,7 @@
   periodicity in the future.
 * This method gives a minimalistic ellipsoid centered around the
   center of mass that contains all the particles, but sometimes people
-  prefer an inertial tensor triaxial ellipsoid described in 
+  prefer an inertial tensor triaxial ellipsoid described in
   `Dubinski, Carlberg 1991
   <http://adsabs.harvard.edu/abs/1991ApJ...378..496D>`_.  I have that
   method composed but it is not fully tested yet.

diff -r 0b0c899118f446392e27a4624b997e50b797d707 -r a307a8a744455f53b0fb2339dc551ea9d37d3d8d doc/source/analyzing/analysis_modules/halo_catalogs.rst
--- a/doc/source/analyzing/analysis_modules/halo_catalogs.rst
+++ b/doc/source/analyzing/analysis_modules/halo_catalogs.rst
@@ -7,21 +7,21 @@
 ----------------------
 
 In yt 3.0, operations relating to the analysis of halos (halo finding,
-merger tree creation, and individual halo analysis) are all brought 
+merger tree creation, and individual halo analysis) are all brought
 together into a single framework. This framework is substantially
-different from the halo analysis machinery available in yt-2.x and is 
-entirely backward incompatible.  
+different from the halo analysis machinery available in yt-2.x and is
+entirely backward incompatible.
 For a direct translation of various halo analysis tasks using yt-2.x
 to yt-3.0 please see :ref:`halo-transition`.
 
-A catalog of halos can be created from any initial dataset given to halo 
+A catalog of halos can be created from any initial dataset given to halo
 catalog through data_ds. These halos can be found using friends-of-friends,
 HOP, and Rockstar. The finder_method keyword dictates which halo finder to
-use. The available arguments are :ref:`fof`, :ref:`hop`, and :ref:`rockstar`. 
-For more details on the relative differences between these halo finders see 
+use. The available arguments are :ref:`fof`, :ref:`hop`, and :ref:`rockstar`.
+For more details on the relative differences between these halo finders see
 :ref:`halo_finding`.
 
-The class which holds all of the halo information is the 
+The class which holds all of the halo information is the
 :class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog`.
 
 .. code-block:: python
@@ -32,11 +32,11 @@
    data_ds = yt.load('Enzo_64/RD0006/RedshiftOutput0006')
    hc = HaloCatalog(data_ds=data_ds, finder_method='hop')
 
-A halo catalog may also be created from already run rockstar outputs. 
-This method is not implemented for previously run friends-of-friends or 
-HOP finders. Even though rockstar creates one file per processor, 
-specifying any one file allows the full catalog to be loaded. Here we 
-only specify the file output by the processor with ID 0. Note that the 
+A halo catalog may also be created from already run rockstar outputs.
+This method is not implemented for previously run friends-of-friends or
+HOP finders. Even though rockstar creates one file per processor,
+specifying any one file allows the full catalog to be loaded. Here we
+only specify the file output by the processor with ID 0. Note that the
 argument for supplying a rockstar output is `halos_ds`, not `data_ds`.
 
 .. code-block:: python
@@ -44,10 +44,10 @@
    halos_ds = yt.load(path+'rockstar_halos/halos_0.0.bin')
    hc = HaloCatalog(halos_ds=halos_ds)
 
-Although supplying only the binary output of the rockstar halo finder 
-is sufficient for creating a halo catalog, it is not possible to find 
-any new information about the identified halos. To associate the halos 
-with the dataset from which they were found, supply arguments to both 
+Although supplying only the binary output of the rockstar halo finder
+is sufficient for creating a halo catalog, it is not possible to find
+any new information about the identified halos. To associate the halos
+with the dataset from which they were found, supply arguments to both
 halos_ds and data_ds.
 
 .. code-block:: python
@@ -56,14 +56,14 @@
    data_ds = yt.load('Enzo_64/RD0006/RedshiftOutput0006')
    hc = HaloCatalog(data_ds=data_ds, halos_ds=halos_ds)
 
-A data object can also be supplied via the keyword ``data_source``, 
-associated with either dataset, to control the spatial region in 
+A data object can also be supplied via the keyword ``data_source``,
+associated with either dataset, to control the spatial region in
 which halo analysis will be performed.
 
 Analysis Using Halo Catalogs
 ----------------------------
 
-Analysis is done by adding actions to the 
+Analysis is done by adding actions to the
 :class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog`.
 Each action is represented by a callback function that will be run on
 each halo.  There are four types of actions:
@@ -73,18 +73,18 @@
 * Callbacks
 * Recipes
 
-A list of all available filters, quantities, and callbacks can be found in 
-:ref:`halo_analysis_ref`.  
-All interaction with this analysis can be performed by importing from 
+A list of all available filters, quantities, and callbacks can be found in
+:ref:`halo_analysis_ref`.
+All interaction with this analysis can be performed by importing from
 halo_analysis.
 
 Filters
 ^^^^^^^
 
-A filter is a function that returns True or False. If the return value 
-is True, any further queued analysis will proceed and the halo in 
-question will be added to the final catalog. If the return value False, 
-further analysis will not be performed and the halo will not be included 
+A filter is a function that returns True or False. If the return value
+is True, any further queued analysis will proceed and the halo in
+question will be added to the final catalog. If the return value False,
+further analysis will not be performed and the halo will not be included
 in the final catalog.
 
 An example of adding a filter:
@@ -93,11 +93,11 @@
 
    hc.add_filter('quantity_value', 'particle_mass', '>', 1E13, 'Msun')
 
-Currently quantity_value is the only available filter, but more can be 
-added by the user by defining a function that accepts a halo object as 
-the first argument and then adding it as an available filter. If you 
-think that your filter may be of use to the general community, you can 
-add it to ``yt/analysis_modules/halo_analysis/halo_filters.py`` and issue a 
+Currently quantity_value is the only available filter, but more can be
+added by the user by defining a function that accepts a halo object as
+the first argument and then adding it as an available filter. If you
+think that your filter may be of use to the general community, you can
+add it to ``yt/analysis_modules/halo_analysis/halo_filters.py`` and issue a
 pull request.
 
 An example of defining your own filter:
@@ -105,11 +105,11 @@
 .. code-block:: python
 
    def my_filter_function(halo):
-       
+
        # Define condition for filter
        filter_value = True
-       
-       # Return a boolean value 
+
+       # Return a boolean value
        return filter_value
 
    # Add your filter to the filter registry
@@ -121,17 +121,17 @@
 Quantities
 ^^^^^^^^^^
 
-A quantity is a call back that returns a value or values. The return values 
-are stored within the halo object in a dictionary called “quantities.” At 
-the end of the analysis, all of these quantities will be written to disk as 
+A quantity is a call back that returns a value or values. The return values
+are stored within the halo object in a dictionary called “quantities.” At
+the end of the analysis, all of these quantities will be written to disk as
 the final form of the generated halo catalog.
 
-Quantities may be available in the initial fields found in the halo catalog, 
-or calculated from a function after supplying a definition. An example 
-definition of center of mass is shown below. Currently available quantities 
-are center_of_mass and bulk_velocity. Their definitions are available in 
-``yt/analysis_modules/halo_analysis/halo_quantities.py``. If you think that 
-your quantity may be of use to the general community, add it to 
+Quantities may be available in the initial fields found in the halo catalog,
+or calculated from a function after supplying a definition. An example
+definition of center of mass is shown below. Currently available quantities
+are center_of_mass and bulk_velocity. Their definitions are available in
+``yt/analysis_modules/halo_analysis/halo_quantities.py``. If you think that
+your quantity may be of use to the general community, add it to
 ``halo_quantities.py`` and issue a pull request.  Default halo quantities are:
 
 * ``particle_identifier`` -- Halo ID (e.g. 0 to N)
@@ -154,7 +154,7 @@
    def my_quantity_function(halo):
        # Define quantity to return
        quantity = 5
-       
+
        return quantity
 
    # Add your filter to the filter registry
@@ -162,9 +162,9 @@
 
 
    # ... Later on in your script
-   hc.add_quantity("my_quantity") 
+   hc.add_quantity("my_quantity")
 
-This quantity will then be accessible for functions called later via the 
+This quantity will then be accessible for functions called later via the
 *quantities* dictionary that is associated with the halo object.
 
 .. code-block:: python
@@ -179,23 +179,23 @@
 Callbacks
 ^^^^^^^^^
 
-A callback is actually the super class for quantities and filters and 
-is a general purpose function that does something, anything, to a Halo 
-object. This can include hanging new attributes off the Halo object, 
-performing analysis and writing to disk, etc. A callback does not return 
+A callback is actually the super class for quantities and filters and
+is a general purpose function that does something, anything, to a Halo
+object. This can include hanging new attributes off the Halo object,
+performing analysis and writing to disk, etc. A callback does not return
 anything.
 
-An example of using a pre-defined callback where we create a sphere for 
+An example of using a pre-defined callback where we create a sphere for
 each halo with a radius that is twice the saved ``radius``.
 
 .. code-block:: python
 
    hc.add_callback("sphere", factor=2.0)
-    
-Currently available callbacks are located in 
-``yt/analysis_modules/halo_analysis/halo_callbacks.py``.  New callbacks may 
-be added by using the syntax shown below. If you think that your 
-callback may be of use to the general community, add it to 
+
+Currently available callbacks are located in
+``yt/analysis_modules/halo_analysis/halo_callbacks.py``.  New callbacks may
+be added by using the syntax shown below. If you think that your
+callback may be of use to the general community, add it to
 halo_callbacks.py and issue a pull request.
 
 An example of defining your own callback:
@@ -261,37 +261,37 @@
 Running Analysis
 ----------------
 
-After all callbacks, quantities, and filters have been added, the 
+After all callbacks, quantities, and filters have been added, the
 analysis begins with a call to HaloCatalog.create.
 
 .. code-block:: python
 
    hc.create()
 
-The save_halos keyword determines whether the actual Halo objects 
-are saved after analysis on them has completed or whether just the 
-contents of their quantities dicts will be retained for creating the 
-final catalog. The looping over halos uses a call to parallel_objects 
-allowing the user to control how many processors work on each halo. 
-The final catalog is written to disk in the output directory given 
-when the 
-:class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog` 
+The save_halos keyword determines whether the actual Halo objects
+are saved after analysis on them has completed or whether just the
+contents of their quantities dicts will be retained for creating the
+final catalog. The looping over halos uses a call to parallel_objects
+allowing the user to control how many processors work on each halo.
+The final catalog is written to disk in the output directory given
+when the
+:class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog`
 object was created.
 
-All callbacks, quantities, and filters are stored in an actions list, 
-meaning that they are executed in the same order in which they were added. 
-This enables the use of simple, reusable, single action callbacks that 
-depend on each other. This also prevents unnecessary computation by allowing 
-the user to add filters at multiple stages to skip remaining analysis if it 
+All callbacks, quantities, and filters are stored in an actions list,
+meaning that they are executed in the same order in which they were added.
+This enables the use of simple, reusable, single action callbacks that
+depend on each other. This also prevents unnecessary computation by allowing
+the user to add filters at multiple stages to skip remaining analysis if it
 is not warranted.
 
 Saving and Reloading Halo Catalogs
 ----------------------------------
 
-A :class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog` 
-saved to disk can be reloaded as a yt dataset with the 
-standard call to load. Any side data, such as profiles, can be reloaded 
-with a ``load_profiles`` callback and a call to 
+A :class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog`
+saved to disk can be reloaded as a yt dataset with the
+standard call to load. Any side data, such as profiles, can be reloaded
+with a ``load_profiles`` callback and a call to
 :func:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog.load`.
 
 .. code-block:: python
@@ -306,5 +306,5 @@
 Worked Example of Halo Catalog in Action
 ----------------------------------------
 
-For a full example of how to use these methods together see 
+For a full example of how to use these methods together see
 :ref:`halo-analysis-example`.

diff -r 0b0c899118f446392e27a4624b997e50b797d707 -r a307a8a744455f53b0fb2339dc551ea9d37d3d8d doc/source/analyzing/analysis_modules/halo_finders.rst
--- a/doc/source/analyzing/analysis_modules/halo_finders.rst
+++ b/doc/source/analyzing/analysis_modules/halo_finders.rst
@@ -3,16 +3,16 @@
 Halo Finding
 ============
 
-There are three methods of finding particle haloes in yt. The 
-default method is called HOP, a method described 
-in `Eisenstein and Hut (1998) 
-<http://adsabs.harvard.edu/abs/1998ApJ...498..137E>`_. A basic 
-friends-of-friends (e.g. `Efstathiou et al. (1985) 
-<http://adsabs.harvard.edu/abs/1985ApJS...57..241E>`_) halo 
-finder is also implemented. Finally Rockstar (`Behroozi et a. 
-(2011) <http://adsabs.harvard.edu/abs/2011arXiv1110.4372B>`_) is 
-a 6D-phase space halo finder developed by Peter Behroozi that 
-excels in finding subhalos and substrcture, but does not allow 
+There are three methods of finding particle haloes in yt. The
+default method is called HOP, a method described
+in `Eisenstein and Hut (1998)
+<http://adsabs.harvard.edu/abs/1998ApJ...498..137E>`_. A basic
+friends-of-friends (e.g. `Efstathiou et al. (1985)
+<http://adsabs.harvard.edu/abs/1985ApJS...57..241E>`_) halo
+finder is also implemented. Finally Rockstar (`Behroozi et a.
+(2011) <http://adsabs.harvard.edu/abs/2011arXiv1110.4372B>`_) is
+a 6D-phase space halo finder developed by Peter Behroozi that
+excels in finding subhalos and substrcture, but does not allow
 multiple particle masses.
 
 .. _hop:
@@ -20,30 +20,30 @@
 HOP
 ---
 
-The version of HOP used in yt is an upgraded version of the 
-`publicly available HOP code 
-<http://cmb.as.arizona.edu/~eisenste/hop/hop.html>`_. Support 
-for 64-bit floats and integers has been added, as well as 
-parallel analysis through spatial decomposition. HOP builds 
+The version of HOP used in yt is an upgraded version of the
+`publicly available HOP code
+<http://cmb.as.arizona.edu/~eisenste/hop/hop.html>`_. Support
+for 64-bit floats and integers has been added, as well as
+parallel analysis through spatial decomposition. HOP builds
 groups in this fashion:
 
-#. Estimates the local density at each particle using a 
+#. Estimates the local density at each particle using a
    smoothing kernel.
 
-#. Builds chains of linked particles by 'hopping' from one 
-   particle to its densest neighbor. A particle which is 
+#. Builds chains of linked particles by 'hopping' from one
+   particle to its densest neighbor. A particle which is
    its own densest neighbor is the end of the chain.
 
-#. All chains that share the same densest particle are 
+#. All chains that share the same densest particle are
    grouped together.
 
-#. Groups are included, linked together, or discarded 
+#. Groups are included, linked together, or discarded
    depending on the user-supplied over density
    threshold parameter. The default is 160.0.
 
-Please see the `HOP method paper 
-<http://adsabs.harvard.edu/abs/1998ApJ...498..137E>`_ for 
-full details and the 
+Please see the `HOP method paper
+<http://adsabs.harvard.edu/abs/1998ApJ...498..137E>`_ for
+full details and the
 :class:`~yt.analysis_modules.halo_finding.halo_objects.HOPHalo` and
 :class:`~yt.analysis_modules.halo_finding.halo_objects.Halo` classes.
 
@@ -61,28 +61,28 @@
 Rockstar Halo Finding
 ---------------------
 
-Rockstar uses an adaptive hierarchical refinement of friends-of-friends 
-groups in six phase-space dimensions and one time dimension, which 
+Rockstar uses an adaptive hierarchical refinement of friends-of-friends
+groups in six phase-space dimensions and one time dimension, which
 allows for robust (grid-independent, shape-independent, and noise-
-resilient) tracking of substructure. The code is prepackaged with yt, 
-but also `separately available <https://bitbucket.org/gfcstanford/rockstar>`_. The lead 
+resilient) tracking of substructure. The code is prepackaged with yt,
+but also `separately available <https://bitbucket.org/gfcstanford/rockstar>`_. The lead
 developer is Peter Behroozi, and the methods are described in `Behroozi
-et al. 2011 <http://arxiv.org/abs/1110.4372>`_. 
-In order to run the Rockstar halo finder in yt, make sure you've 
+et al. 2011 <http://arxiv.org/abs/1110.4372>`_.
+In order to run the Rockstar halo finder in yt, make sure you've
 :ref:`installed it so that it can integrate with yt <rockstar-installation>`.
 
-At the moment, Rockstar does not support multiple particle masses, 
-instead using a fixed particle mass. This will not affect most dark matter 
+At the moment, Rockstar does not support multiple particle masses,
+instead using a fixed particle mass. This will not affect most dark matter
 simulations, but does make it less useful for finding halos from the stellar
-mass. In simulations where the highest-resolution particles all have the 
+mass. In simulations where the highest-resolution particles all have the
 same mass (ie: zoom-in grid based simulations), one can set up a particle
 filter to select the lowest mass particles and perform the halo finding
-only on those.  See the this cookbook recipe for an example: 
+only on those.  See the this cookbook recipe for an example:
 :ref:`cookbook-rockstar-nested-grid`.
 
-To run the Rockstar Halo finding, you must launch python with MPI and 
-parallelization enabled. While Rockstar itself does not require MPI to run, 
-the MPI libraries allow yt to distribute particle information across multiple 
+To run the Rockstar Halo finding, you must launch python with MPI and
+parallelization enabled. While Rockstar itself does not require MPI to run,
+the MPI libraries allow yt to distribute particle information across multiple
 nodes.
 
 .. warning:: At the moment, running Rockstar inside of yt on multiple compute nodes
@@ -92,23 +92,23 @@
    For example, here is how Rockstar might be called using 24 cores:
    ``mpirun -n 24 --mca btl ^openib python ./run_rockstar.py --parallel``.
 
-The script above configures the Halo finder, launches a server process which 
-disseminates run information and coordinates writer-reader processes. 
-Afterwards, it launches reader and writer tasks, filling the available MPI 
-slots, which alternately read particle information and analyze for halo 
+The script above configures the Halo finder, launches a server process which
+disseminates run information and coordinates writer-reader processes.
+Afterwards, it launches reader and writer tasks, filling the available MPI
+slots, which alternately read particle information and analyze for halo
 content.
 
-The RockstarHaloFinder class has these options that can be supplied to the 
+The RockstarHaloFinder class has these options that can be supplied to the
 halo catalog through the ``finder_kwargs`` argument:
 
-* ``dm_type``, the index of the dark matter particle. Default is 1. 
+* ``dm_type``, the index of the dark matter particle. Default is 1.
 * ``outbase``, This is where the out*list files that Rockstar makes should be
   placed. Default is 'rockstar_halos'.
-* ``num_readers``, the number of reader tasks (which are idle most of the 
+* ``num_readers``, the number of reader tasks (which are idle most of the
   time.) Default is 1.
 * ``num_writers``, the number of writer tasks (which are fed particles and
-  do most of the analysis). Default is MPI_TASKS-num_readers-1. 
-  If left undefined, the above options are automatically 
+  do most of the analysis). Default is MPI_TASKS-num_readers-1.
+  If left undefined, the above options are automatically
   configured from the number of available MPI tasks.
 * ``force_res``, the resolution that Rockstar uses for various calculations
   and smoothing lengths. This is in units of Mpc/h.
@@ -130,14 +130,14 @@
   this option can save disk access time if there are no star particles
   (or other non-dark matter particles) in the simulation. Default: ``False``.
 
-Rockstar dumps halo information in a series of text (halo*list and 
-out*list) and binary (halo*bin) files inside the ``outbase`` directory. 
-We use the halo list classes to recover the information. 
+Rockstar dumps halo information in a series of text (halo*list and
+out*list) and binary (halo*bin) files inside the ``outbase`` directory.
+We use the halo list classes to recover the information.
 
 Inside the ``outbase`` directory there is a text file named ``datasets.txt``
 that records the connection between ds names and the Rockstar file names.
 
-For more information, see the 
+For more information, see the
 :class:`~yt.analysis_modules.halo_finding.halo_objects.RockstarHalo` and
 :class:`~yt.analysis_modules.halo_finding.halo_objects.Halo` classes.
 
@@ -146,9 +146,9 @@
 Parallel HOP and FOF
 --------------------
 
-Both the HOP and FoF halo finders can run in parallel using simple 
-spatial decomposition. In order to run them in parallel it is helpful 
-to understand how it works. Below in the first plot (i) is a simplified 
+Both the HOP and FoF halo finders can run in parallel using simple
+spatial decomposition. In order to run them in parallel it is helpful
+to understand how it works. Below in the first plot (i) is a simplified
 depiction of three haloes labeled 1,2 and 3:
 
 .. image:: _images/ParallelHaloFinder.png
@@ -156,35 +156,35 @@
 
 Halo 3 is twice reflected around the periodic boundary conditions.
 
-In (ii), the volume has been sub-divided into four equal subregions, 
-A,B,C and D, shown with dotted lines. Notice that halo 2 is now in 
-two different subregions, C and D, and that halo 3 is now in three, 
+In (ii), the volume has been sub-divided into four equal subregions,
+A,B,C and D, shown with dotted lines. Notice that halo 2 is now in
+two different subregions, C and D, and that halo 3 is now in three,
 A, B and D. If the halo finder is run on these four separate subregions,
-halo 1 is be identified as a single halo, but haloes 2 and 3 are split 
-up into multiple haloes, which is incorrect. The solution is to give 
+halo 1 is be identified as a single halo, but haloes 2 and 3 are split
+up into multiple haloes, which is incorrect. The solution is to give
 each subregion padding to oversample into neighboring regions.
 
-In (iii), subregion C has oversampled into the other three regions, 
-with the periodic boundary conditions taken into account, shown by 
+In (iii), subregion C has oversampled into the other three regions,
+with the periodic boundary conditions taken into account, shown by
 dot-dashed lines. The other subregions oversample in a similar way.
 
-The halo finder is then run on each padded subregion independently 
-and simultaneously. By oversampling like this, haloes 2 and 3 will 
-both be enclosed fully in at least one subregion and identified 
+The halo finder is then run on each padded subregion independently
+and simultaneously. By oversampling like this, haloes 2 and 3 will
+both be enclosed fully in at least one subregion and identified
 completely.
 
-Haloes identified with centers of mass inside the padded part of a 
-subregion are thrown out, eliminating the problem of halo duplication. 
+Haloes identified with centers of mass inside the padded part of a
+subregion are thrown out, eliminating the problem of halo duplication.
 The centers for the three haloes are shown with stars. Halo 1 will
 belong to subregion A, 2 to C and 3 to B.
 
-To run with parallel halo finding, you must supply a value for 
-padding in the finder_kwargs argument. The ``padding`` parameter 
-is in simulation units and defaults to 0.02. This parameter is how 
-much padding is added to each of the six sides of a subregion. 
-This value should be 2x-3x larger than the largest expected halo 
-in the simulation. It is unlikely, of course, that the largest 
-object in the simulation will be on a subregion boundary, but there 
+To run with parallel halo finding, you must supply a value for
+padding in the finder_kwargs argument. The ``padding`` parameter
+is in simulation units and defaults to 0.02. This parameter is how
+much padding is added to each of the six sides of a subregion.
+This value should be 2x-3x larger than the largest expected halo
+in the simulation. It is unlikely, of course, that the largest
+object in the simulation will be on a subregion boundary, but there
 is no way of knowing before the halo finder is run.
 
 .. code-block:: python
@@ -197,10 +197,10 @@
   # --or--
   hc = HaloCatalog(data_ds = ds, finder_method = 'fof', finder_kwargs={'padding':0.02})
 
-In general, a little bit of padding goes a long way, and too much 
-just slows down the analysis and doesn't improve the answer (but 
-doesn't change it).  It may be worth your time to run the parallel 
-halo finder at a few paddings to find the right amount, especially 
+In general, a little bit of padding goes a long way, and too much
+just slows down the analysis and doesn't improve the answer (but
+doesn't change it).  It may be worth your time to run the parallel
+halo finder at a few paddings to find the right amount, especially
 if you're analyzing many similar datasets.
 
 .. _rockstar-installation:
@@ -209,15 +209,15 @@
 ---------------------
 
 Because of changes in the Rockstar API over time, yt only currently works with
-a slightly older version of Rockstar.  This version of Rockstar has been 
-slightly patched and modified to run as a library inside of yt. By default it 
-is not installed with yt, but installation is very easy.  The 
-:ref:`install-script` used to install yt from source has a line: 
+a slightly older version of Rockstar.  This version of Rockstar has been
+slightly patched and modified to run as a library inside of yt. By default it
+is not installed with yt, but installation is very easy.  The
+:ref:`install-script` used to install yt from source has a line:
 ``INST_ROCKSTAR=0`` that must be changed to ``INST_ROCKSTAR=1``.  You can
 rerun this installer script over the top of an existing installation, and
-it will only install components missing from the existing installation.  
+it will only install components missing from the existing installation.
 You can do this as follows.  Put your freshly modified install_script in
-the parent directory of the yt installation directory (e.g. the parent of 
+the parent directory of the yt installation directory (e.g. the parent of
 ``$YT_DEST``, ``yt-x86_64``, ``yt-i386``, etc.), and rerun the installer:
 
 .. code-block:: bash

diff -r 0b0c899118f446392e27a4624b997e50b797d707 -r a307a8a744455f53b0fb2339dc551ea9d37d3d8d doc/source/analyzing/analysis_modules/halo_mass_function.rst
--- a/doc/source/analyzing/analysis_modules/halo_mass_function.rst
+++ b/doc/source/analyzing/analysis_modules/halo_mass_function.rst
@@ -11,21 +11,21 @@
 General Overview
 ----------------
 
-A halo mass function can be created for the halos identified in a cosmological 
+A halo mass function can be created for the halos identified in a cosmological
 simulation, as well as analytic fits using any arbitrary set of cosmological
 parameters. In order to create a mass function for simulated halos, they must
-first be identified (using HOP, FOF, or Rockstar, see 
+first be identified (using HOP, FOF, or Rockstar, see
 :ref:`halo_catalog`) and loaded as a halo dataset object. The distribution of
 halo masses will then be found, and can be compared to the analytic prediction
 at the same redshift and using the same cosmological parameters as were used
 in the simulation. Care should be taken in this regard, as the analytic fit
-requires the specification of cosmological parameters that are not necessarily 
+requires the specification of cosmological parameters that are not necessarily
 stored in the halo or simulation datasets, and must be specified by the user.
-Efforts have been made to set reasonable defaults for these parameters, but 
+Efforts have been made to set reasonable defaults for these parameters, but
 setting them to identically match those used in the simulation will produce a
 much better comparison.
 
-Analytic halo mass functions can also be created without a halo dataset by 
+Analytic halo mass functions can also be created without a halo dataset by
 providing either a simulation dataset or specifying cosmological parameters by
 hand. yt includes 5 analytic fits for the halo mass function which can be
 selected.
@@ -65,8 +65,8 @@
 
 This will create a HaloMassFcn object off of which arrays holding the information
 about the analytic mass function hang. Creating the halo mass function for a set
-of simulated halos requires only the loaded halo dataset to be passed as an 
-argument. This also creates the analytic mass function using all parameters that 
+of simulated halos requires only the loaded halo dataset to be passed as an
+argument. This also creates the analytic mass function using all parameters that
 can be extracted from the halo dataset, at the same redshift, spanning a similar
 range of halo masses.
 
@@ -78,7 +78,7 @@
   my_halos = load("rockstar_halos/halos_0.0.bin")
   hmf = HaloMassFcn(halos_ds=my_halos)
 
-A simulation dataset can be passed along with additional cosmological parameters 
+A simulation dataset can be passed along with additional cosmological parameters
 to create an analytic mass function.
 
 .. code-block:: python
@@ -87,10 +87,10 @@
   from yt.analysis_modules.halo_mass_function.api import *
 
   my_ds = load("RD0027/RedshiftOutput0027")
-  hmf = HaloMassFcn(simulation_ds=my_ds, omega_baryon0=0.05, primordial_index=0.96, 
+  hmf = HaloMassFcn(simulation_ds=my_ds, omega_baryon0=0.05, primordial_index=0.96,
                     sigma8 = 0.8, log_mass_min=5, log_mass_max=9)
 
-The analytic mass function can be created for a set of arbitrary cosmological 
+The analytic mass function can be created for a set of arbitrary cosmological
 parameters without any dataset being passed as an argument.
 
 .. code-block:: python
@@ -98,7 +98,7 @@
   from yt.mods import *
   from yt.analysis_modules.halo_mass_function.api import *
 
-  hmf = HaloMassFcn(omega_baryon0=0.05, omega_matter0=0.27, 
+  hmf = HaloMassFcn(omega_baryon0=0.05, omega_matter0=0.27,
                     omega_lambda0=0.73, hubble0=0.7, this_redshift=10,
                     log_mass_min=5, log_mass_max=9, fitting_function=5)
 
@@ -110,95 +110,95 @@
   Default : None.
 
 * **halos_ds** (*Halo dataset object*)
-  The halos from a simulation to be used for creation of the 
+  The halos from a simulation to be used for creation of the
   halo mass function in the simulation.
   Default : None.
 
 * **make_analytic** (*bool*)
-  Whether or not to calculate the analytic mass function to go with 
-  the simulated halo mass function.  Automatically set to true if a 
+  Whether or not to calculate the analytic mass function to go with
+  the simulated halo mass function.  Automatically set to true if a
   simulation dataset is provided.
   Default : True.
 
 * **omega_matter0** (*float*)
-  The fraction of the universe made up of matter (dark and baryonic). 
+  The fraction of the universe made up of matter (dark and baryonic).
   Default : 0.2726.
 
 * **omega_lambda0** (*float*)
-  The fraction of the universe made up of dark energy. 
+  The fraction of the universe made up of dark energy.
   Default : 0.7274.
 
 * **omega_baryon0**  (*float*)
-  The fraction of the universe made up of baryonic matter. This is not 
+  The fraction of the universe made up of baryonic matter. This is not
   always stored in the dataset and should be checked by hand.
   Default : 0.0456.
 
 * **hubble0** (*float*)
-  The expansion rate of the universe in units of 100 km/s/Mpc. 
+  The expansion rate of the universe in units of 100 km/s/Mpc.
   Default : 0.704.
 
 * **sigma8** (*float*)
-  The amplitude of the linear power spectrum at z=0 as specified by 
-  the rms amplitude of mass-fluctuations in a top-hat sphere of radius 
-  8 Mpc/h. This is not always stored in the dataset and should be 
+  The amplitude of the linear power spectrum at z=0 as specified by
+  the rms amplitude of mass-fluctuations in a top-hat sphere of radius
+  8 Mpc/h. This is not always stored in the dataset and should be
   checked by hand.
   Default : 0.86.
 
 * **primoridal_index** (*float*)
-  This is the index of the mass power spectrum before modification by 
-  the transfer function. A value of 1 corresponds to the scale-free 
-  primordial spectrum. This is not always stored in the dataset and 
+  This is the index of the mass power spectrum before modification by
+  the transfer function. A value of 1 corresponds to the scale-free
+  primordial spectrum. This is not always stored in the dataset and
   should be checked by hand.
   Default : 1.0.
 
 * **this_redshift** (*float*)
-  The current redshift. 
+  The current redshift.
   Default : 0.
 
 * **log_mass_min** (*float*)
   The log10 of the mass of the minimum of the halo mass range. This is
-  set automatically by the range of halo masses if a simulated halo 
+  set automatically by the range of halo masses if a simulated halo
   dataset is provided. If a halo dataset if not provided and no value
   is specified, it will be set to 5. Units: M_solar
   Default : None.
 
 * **log_mass_max** (*float*)
   The log10 of the mass of the maximum of the halo mass range. This is
-  set automatically by the range of halo masses if a simulated halo 
+  set automatically by the range of halo masses if a simulated halo
   dataset is provided. If a halo dataset if not provided and no value
   is specified, it will be set to 16. Units: M_solar
   Default : None.
 
 * **num_sigma_bins** (*float*)
-  The number of bins (points) to use for the calculation of the 
-  analytic mass function. 
+  The number of bins (points) to use for the calculation of the
+  analytic mass function.
   Default : 360.
 
 * **fitting_function** (*int*)
-  Which fitting function to use. 1 = Press-Schechter, 2 = Jenkins, 
+  Which fitting function to use. 1 = Press-Schechter, 2 = Jenkins,
   3 = Sheth-Tormen, 4 = Warren, 5 = Tinker
   Default : 4.
 
 Outputs
 -------
 
-A HaloMassFnc object has several arrays hanging off of it containing the 
+A HaloMassFnc object has several arrays hanging off of it containing the
 
 * **masses_sim**: Halo masses from simulated halos. Units: M_solar
 
-* **n_cumulative_sim**: Number density of halos with mass greater than the 
+* **n_cumulative_sim**: Number density of halos with mass greater than the
   corresponding mass in masses_sim. Units: comoving Mpc^-3
 
-* **masses_analytic**: Masses used for the generation of the analytic mass 
+* **masses_analytic**: Masses used for the generation of the analytic mass
   function. Units: M_solar
 
-* **n_cumulative_analytic**: Number density of halos with mass greater then 
+* **n_cumulative_analytic**: Number density of halos with mass greater then
   the corresponding mass in masses_analytic. Units: comoving Mpc^-3
 
 * **dndM_dM_analytic**: Differential number density of halos, (dn/dM)*dM.
 
 After the mass function has been created for both simulated halos and the
-corresponding analytic fits, they can be plotted though something along the 
+corresponding analytic fits, they can be plotted though something along the
 lines of
 
 .. code-block:: python
@@ -213,7 +213,7 @@
   plt.loglog(hmf.masses_sim, hmf.n_cumulative_sim)
   plt.loglog(hmf.masses_analytic, hmf.n_cumulative_analytic)
 
-Attached to ``hmf`` is the convenience function ``write_out``, which saves the 
+Attached to ``hmf`` is the convenience function ``write_out``, which saves the
 halo mass function to a text file. (continued from above)
 .. code-block:: python
 

diff -r 0b0c899118f446392e27a4624b997e50b797d707 -r a307a8a744455f53b0fb2339dc551ea9d37d3d8d doc/source/analyzing/analysis_modules/halo_transition.rst
--- a/doc/source/analyzing/analysis_modules/halo_transition.rst
+++ b/doc/source/analyzing/analysis_modules/halo_transition.rst
@@ -5,20 +5,20 @@
 
 If you're used to halo analysis in yt-2.x, heres a guide to
 how to update your analysis pipeline to take advantage of
-the new halo catalog infrastructure. 
+the new halo catalog infrastructure.
 
 Finding Halos
 -------------
 
-Previously, halos were found using calls to ``HaloFinder``, 
-``FOFHaloFinder`` and ``RockstarHaloFinder``. Now it is 
-encouraged that you find the halos upon creation of the halo catalog 
+Previously, halos were found using calls to ``HaloFinder``,
+``FOFHaloFinder`` and ``RockstarHaloFinder``. Now it is
+encouraged that you find the halos upon creation of the halo catalog
 by supplying a value to the ``finder_method`` keyword when calling
-``HaloCatalog``. Currently, only halos found using rockstar or a 
-previous instance of a halo catalog are able to be loaded 
+``HaloCatalog``. Currently, only halos found using rockstar or a
+previous instance of a halo catalog are able to be loaded
 using the ``halos_ds`` keyword.
 
-To pass additional arguments to the halo finders 
+To pass additional arguments to the halo finders
 themselves, supply a dictionary to ``finder_kwargs`` where
 each key in the dictionary is a keyword of the halo finder
 and the corresponding value is the value to be passed for
@@ -41,7 +41,7 @@
 how to add these quantities and what quantities are available.
 
 You no longer have to iterate over halos in the ``halo_list``.
-Now a halo dataset can be treated as a regular dataset and 
+Now a halo dataset can be treated as a regular dataset and
 all quantities are available by accessing ``all_data``.
 Specifically, all quantities can be accessed as shown:
 
@@ -77,14 +77,14 @@
 
 The halo profiler available in yt-2.x has been removed, and
 profiling functionality is now completely contained within the
-halo catalog. A complete example of how to profile halos by 
-radius using the new infrastructure is given in 
-:ref:`halo-analysis-example`. 
+halo catalog. A complete example of how to profile halos by
+radius using the new infrastructure is given in
+:ref:`halo-analysis-example`.
 
 Plotting Halos
 --------------
 
-Annotating halo locations onto a slice or projection works in 
+Annotating halo locations onto a slice or projection works in
 the same way as in yt-2.x, but now a halo catalog must be
 passed to the annotate halo call rather than a halo list.
 
@@ -105,6 +105,6 @@
 ------------
 
 Data is now written out in the form of h5 files rather than
-text files. The directory they are written out to is 
+text files. The directory they are written out to is
 controlled by the keyword ``output_dir``. Each quantity
 is a field in the file.

diff -r 0b0c899118f446392e27a4624b997e50b797d707 -r a307a8a744455f53b0fb2339dc551ea9d37d3d8d doc/source/analyzing/analysis_modules/index.rst
--- a/doc/source/analyzing/analysis_modules/index.rst
+++ b/doc/source/analyzing/analysis_modules/index.rst
@@ -5,7 +5,7 @@
 
 These semi-autonomous analysis modules are unique to specific subject matter
 like tracking halos, generating synthetic observations, exporting output to
-external visualization routines, and more.  Because they are somewhat 
+external visualization routines, and more.  Because they are somewhat
 specialized, they exist in their own corners of yt, and they do not get loaded
 by default when you "import yt".  Read up on these advanced tools below.
 

diff -r 0b0c899118f446392e27a4624b997e50b797d707 -r a307a8a744455f53b0fb2339dc551ea9d37d3d8d doc/source/analyzing/analysis_modules/light_cone_generator.rst
--- a/doc/source/analyzing/analysis_modules/light_cone_generator.rst
+++ b/doc/source/analyzing/analysis_modules/light_cone_generator.rst
@@ -3,29 +3,29 @@
 Light Cone Generator
 ====================
 
-Light cones are created by stacking multiple datasets together to 
-continuously span a given redshift interval.  To make a projection of a 
-field through a light cone, the width of individual slices is adjusted 
-such that each slice has the same angular size.  
-Each slice is randomly shifted and projected along a random axis to 
-ensure that the same structures are not sampled multiple times.  A 
-recipe for creating a simple light cone projection can be found in 
+Light cones are created by stacking multiple datasets together to
+continuously span a given redshift interval.  To make a projection of a
+field through a light cone, the width of individual slices is adjusted
+such that each slice has the same angular size.
+Each slice is randomly shifted and projected along a random axis to
+ensure that the same structures are not sampled multiple times.  A
+recipe for creating a simple light cone projection can be found in
 the cookbook under :ref:`cookbook-light_cone`.
 
 .. image:: _images/LightCone_full_small.png
    :width: 500
 
-A light cone projection of the thermal Sunyaev-Zeldovich Y parameter from 
-z = 0 to 0.4 with a 450x450 arcminute field of view using 9 individual 
-slices.  The panels shows the contributions from the 9 individual slices with 
+A light cone projection of the thermal Sunyaev-Zeldovich Y parameter from
+z = 0 to 0.4 with a 450x450 arcminute field of view using 9 individual
+slices.  The panels shows the contributions from the 9 individual slices with
 the final light cone image shown in the bottom, right.
 
 Configuring the Light Cone Generator
 ------------------------------------
 
-The required arguments to instantiate a 
+The required arguments to instantiate a
 :class:`~yt.analysis_modules.cosmological_observation.light_cone.light_cone.LightCone`
-object are the path to the simulation parameter file, the simulation type, the 
+object are the path to the simulation parameter file, the simulation type, the
 nearest redshift, and the furthest redshift of the light cone.
 
 .. code-block:: python
@@ -38,29 +38,29 @@
 
 The additional keyword arguments are:
 
-* ``use_minimum_datasets`` (*bool*):  If True, the minimum number of 
-  datasets is used to connect the initial and final redshift.  If False, 
-  the light cone solution will contain as many entries as possible within 
+* ``use_minimum_datasets`` (*bool*):  If True, the minimum number of
+  datasets is used to connect the initial and final redshift.  If False,
+  the light cone solution will contain as many entries as possible within
   the redshift interval.  Default: True.
 
-* ``deltaz_min`` (*float*): Specifies the minimum Delta-z between 
+* ``deltaz_min`` (*float*): Specifies the minimum Delta-z between
   consecutive datasets in the returned list.  Default: 0.0.
 
-* ``minimum_coherent_box_fraction`` (*float*): Used with 
-  ``use_minimum_datasets`` set to False, this parameter specifies the 
-  fraction of the total box size to be traversed before rerandomizing the 
-  projection axis and center.  This was invented to allow light cones with 
-  thin slices to sample coherent large scale structure, but in practice does 
-  not work so well.  Try setting this parameter to 1 and see what happens.  
+* ``minimum_coherent_box_fraction`` (*float*): Used with
+  ``use_minimum_datasets`` set to False, this parameter specifies the
+  fraction of the total box size to be traversed before rerandomizing the
+  projection axis and center.  This was invented to allow light cones with
+  thin slices to sample coherent large scale structure, but in practice does
+  not work so well.  Try setting this parameter to 1 and see what happens.
   Default: 0.0.
 
-* ``time_data`` (*bool*): Whether or not to include time outputs when 
+* ``time_data`` (*bool*): Whether or not to include time outputs when
   gathering datasets for time series.  Default: True.
 
-* ``redshift_data`` (*bool*): Whether or not to include redshift outputs 
+* ``redshift_data`` (*bool*): Whether or not to include redshift outputs
   when gathering datasets for time series.  Default: True.
 
-* ``set_parameters`` (*dict*): Dictionary of parameters to attach to 
+* ``set_parameters`` (*dict*): Dictionary of parameters to attach to
   ds.parameters.  Default: None.
 
 * ``output_dir`` (*string*): The directory in which images and data files
@@ -72,9 +72,9 @@
 Creating Light Cone Solutions
 -----------------------------
 
-A light cone solution consists of a list of datasets spanning a redshift 
-interval with a random orientation for each dataset.  A new solution 
-is calculated with the 
+A light cone solution consists of a list of datasets spanning a redshift
+interval with a random orientation for each dataset.  A new solution
+is calculated with the
 :func:`~yt.analysis_modules.cosmological_observation.light_cone.light_cone.LightCone.calculate_light_cone_solution`
 function:
 
@@ -84,17 +84,17 @@
 
 The keyword argument are:
 
-* ``seed`` (*int*): the seed for the random number generator.  Any light 
-  cone solution can be reproduced by giving the same random seed.  
+* ``seed`` (*int*): the seed for the random number generator.  Any light
+  cone solution can be reproduced by giving the same random seed.
   Default: None.
 
-* ``filename`` (*str*): if given, a text file detailing the solution will be 
+* ``filename`` (*str*): if given, a text file detailing the solution will be
   written out.  Default: None.
 
 Making a Light Cone Projection
 ------------------------------
 
-With the light cone solution in place, projections with a given field of 
+With the light cone solution in place, projections with a given field of
 view and resolution can be made of any available field:
 
 .. code-block:: python
@@ -103,38 +103,38 @@
   field_of_view = (600.0, "arcmin")
   resolution = (60.0, "arcsec")
   lc.project_light_cone(field_of_vew, resolution,
-                        field , weight_field=None, 
-                        save_stack=True, 
+                        field , weight_field=None,
+                        save_stack=True,
                         save_slice_images=True)
 
-The field of view and resolution can be specified either as a tuple of 
-value and unit string or as a unitful ``YTQuantity``.  
+The field of view and resolution can be specified either as a tuple of
+value and unit string or as a unitful ``YTQuantity``.
 Additional keyword arguments:
 
-* ``weight_field`` (*str*): the weight field of the projection.  This has 
+* ``weight_field`` (*str*): the weight field of the projection.  This has
   the same meaning as in standard projections.  Default: None.
 
-* ``photon_field`` (*bool*): if True, the projection data for each slice is 
-  decremented by 4 pi R :superscript:`2` , where R is the luminosity 
+* ``photon_field`` (*bool*): if True, the projection data for each slice is
+  decremented by 4 pi R :superscript:`2` , where R is the luminosity
   distance between the observer and the slice redshift.  Default: False.
 
-* ``save_stack`` (*bool*): if True, the unflatted light cone data including 
+* ``save_stack`` (*bool*): if True, the unflatted light cone data including
   each individual slice is written to an hdf5 file.  Default: True.
 
-* ``save_final_image`` (*bool*): if True, save an image of the final light 
+* ``save_final_image`` (*bool*): if True, save an image of the final light
   cone projection.  Default: True.
 
-* ``save_slice_images`` (*bool*): save images for each individual projection 
+* ``save_slice_images`` (*bool*): save images for each individual projection
   slice.  Default: False.
 
 * ``cmap_name`` (*string*): color map for images.  Default: "algae".
 
-* ``njobs`` (*int*): The number of parallel jobs over which the light cone 
+* ``njobs`` (*int*): The number of parallel jobs over which the light cone
   projection will be split.  Choose -1 for one processor per individual
   projection and 1 to have all processors work together on each projection.
   Default: 1.
 
-* ``dynamic`` (*bool*): If True, use dynamic load balancing to create the 
+* ``dynamic`` (*bool*): If True, use dynamic load balancing to create the
   projections.  Default: False.
 
 .. note:: As of :code:`yt-3.0`, the halo mask and unique light cone functionality no longer exist.  These are still available in :code:`yt-2.x`.  If you would like to use these features in :code:`yt-3.x`, help is needed to port them over.  Contact the yt-users mailing list if you are interested in doing this.

This diff is so big that we needed to truncate the remainder.

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.spacepope.org/pipermail/yt-svn-spacepope.org/attachments/20160330/7f48e6e6/attachment.html>


More information about the yt-svn mailing list