[yt-svn] commit/yt: 23 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Thu Nov 9 10:36:41 PST 2017


23 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/a281df8a261f/
Changeset:   a281df8a261f
User:        ngoldbaum
Date:        2017-10-18 21:28:10+00:00
Summary:     Handle the case where the weight field has values equal to zero
Affected #:  1 file

diff -r 89cf5553a4699f5583704674b5bb578b9a1766b4 -r a281df8a261fe85c04ddd9a740034b8e9bb20c6e yt/utilities/lib/misc_utilities.pyx
--- a/yt/utilities/lib/misc_utilities.pyx
+++ b/yt/utilities/lib/misc_utilities.pyx
@@ -53,6 +53,9 @@
     for n in range(nb):
         bin = bins_x[n]
         wval = wsource[n]
+        # Skip field value entries where the weight field is zero
+        if wval == 0:
+            continue
         oldwr = wresult[bin]
         wresult[bin] += wval
         for fi in range(nf):
@@ -87,6 +90,9 @@
         bin_x = bins_x[n]
         bin_y = bins_y[n]
         wval = wsource[n]
+        # Skip field value entries where the weight field is zero
+        if wval == 0:
+            continue
         oldwr = wresult[bin_x, bin_y]
         wresult[bin_x,bin_y] += wval
         for fi in range(nf):
@@ -123,6 +129,9 @@
         bin_y = bins_y[n]
         bin_z = bins_z[n]
         wval = wsource[n]
+        # Skip field value entries where the weight field is zero
+        if wval == 0:
+            continue
         oldwr = wresult[bin_x, bin_y, bin_z]
         wresult[bin_x,bin_y,bin_z] += wval
         for fi in range(nf):


https://bitbucket.org/yt_analysis/yt/commits/f0a40f79fe5a/
Changeset:   f0a40f79fe5a
User:        ngoldbaum
Date:        2017-10-18 21:39:08+00:00
Summary:     add a test for profiles with weight fields that have zeros
Affected #:  1 file

diff -r a281df8a261fe85c04ddd9a740034b8e9bb20c6e -r f0a40f79fe5aacd455a1ba425cdf408594f3d538 yt/data_objects/tests/test_profiles.py
--- a/yt/data_objects/tests/test_profiles.py
+++ b/yt/data_objects/tests/test_profiles.py
@@ -1,6 +1,8 @@
 import yt
 import numpy as np
 
+from yt.data_objects.particle_filters import \
+    add_particle_filter
 from yt.data_objects.profiles import \
     Profile1D, \
     Profile2D, \
@@ -10,7 +12,8 @@
     fake_random_ds, \
     assert_equal, \
     assert_raises, \
-    assert_rel_equal
+    assert_rel_equal, \
+    requires_file
 from yt.utilities.exceptions import \
     YTIllDefinedProfile
 from yt.visualization.profile_plotter import \
@@ -270,3 +273,31 @@
                     'particle_position_z': False},
             weight_field=None, deposition='cic',
             accumulation=True, fractional=True)
+
+ at requires_file("IsolatedGalaxy/galaxy0030/galaxy0030")
+def test_profile_zero_weight():
+    def DMparticles(pfilter, data):
+        filter = data[(pfilter.filtered_type, "particle_type")] == 1
+        return filter
+
+    def DM_in_cell_mass(field, data):
+        return data['deposit', 'DM_density']*data['index', 'cell_volume']
+
+    add_particle_filter("DM", function=DMparticles,
+                        filtered_type='io', requires=["particle_type"])
+
+    ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
+
+    ds.add_particle_filter('DM')
+
+    ds.add_field(("gas", "DM_cell_mass"), units="g", function=DM_in_cell_mass,
+                 sampling_type='cell')
+
+    sp = ds.sphere(ds.domain_center, (10, 'kpc'))
+
+    profile = yt.create_profile(sp,
+                                [("gas", "density")],
+                                [("gas", "temperature")],
+                                weight_field=("gas", "DM_cell_mass"))
+
+    assert not np.any(np.isnan(profile['gas', 'temperature']))


https://bitbucket.org/yt_analysis/yt/commits/52dadcaf4324/
Changeset:   52dadcaf4324
User:        ngoldbaum
Date:        2017-10-21 12:42:38+00:00
Summary:     Merge pull request #1590 from ngoldbaum/profile-zero-weight

 Handle the case where the profile weight field has values equal to zero
Affected #:  2 files

diff -r 89cf5553a4699f5583704674b5bb578b9a1766b4 -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e yt/data_objects/tests/test_profiles.py
--- a/yt/data_objects/tests/test_profiles.py
+++ b/yt/data_objects/tests/test_profiles.py
@@ -1,6 +1,8 @@
 import yt
 import numpy as np
 
+from yt.data_objects.particle_filters import \
+    add_particle_filter
 from yt.data_objects.profiles import \
     Profile1D, \
     Profile2D, \
@@ -10,7 +12,8 @@
     fake_random_ds, \
     assert_equal, \
     assert_raises, \
-    assert_rel_equal
+    assert_rel_equal, \
+    requires_file
 from yt.utilities.exceptions import \
     YTIllDefinedProfile
 from yt.visualization.profile_plotter import \
@@ -270,3 +273,31 @@
                     'particle_position_z': False},
             weight_field=None, deposition='cic',
             accumulation=True, fractional=True)
+
+ at requires_file("IsolatedGalaxy/galaxy0030/galaxy0030")
+def test_profile_zero_weight():
+    def DMparticles(pfilter, data):
+        filter = data[(pfilter.filtered_type, "particle_type")] == 1
+        return filter
+
+    def DM_in_cell_mass(field, data):
+        return data['deposit', 'DM_density']*data['index', 'cell_volume']
+
+    add_particle_filter("DM", function=DMparticles,
+                        filtered_type='io', requires=["particle_type"])
+
+    ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
+
+    ds.add_particle_filter('DM')
+
+    ds.add_field(("gas", "DM_cell_mass"), units="g", function=DM_in_cell_mass,
+                 sampling_type='cell')
+
+    sp = ds.sphere(ds.domain_center, (10, 'kpc'))
+
+    profile = yt.create_profile(sp,
+                                [("gas", "density")],
+                                [("gas", "temperature")],
+                                weight_field=("gas", "DM_cell_mass"))
+
+    assert not np.any(np.isnan(profile['gas', 'temperature']))

diff -r 89cf5553a4699f5583704674b5bb578b9a1766b4 -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e yt/utilities/lib/misc_utilities.pyx
--- a/yt/utilities/lib/misc_utilities.pyx
+++ b/yt/utilities/lib/misc_utilities.pyx
@@ -53,6 +53,9 @@
     for n in range(nb):
         bin = bins_x[n]
         wval = wsource[n]
+        # Skip field value entries where the weight field is zero
+        if wval == 0:
+            continue
         oldwr = wresult[bin]
         wresult[bin] += wval
         for fi in range(nf):
@@ -87,6 +90,9 @@
         bin_x = bins_x[n]
         bin_y = bins_y[n]
         wval = wsource[n]
+        # Skip field value entries where the weight field is zero
+        if wval == 0:
+            continue
         oldwr = wresult[bin_x, bin_y]
         wresult[bin_x,bin_y] += wval
         for fi in range(nf):
@@ -123,6 +129,9 @@
         bin_y = bins_y[n]
         bin_z = bins_z[n]
         wval = wsource[n]
+        # Skip field value entries where the weight field is zero
+        if wval == 0:
+            continue
         oldwr = wresult[bin_x, bin_y, bin_z]
         wresult[bin_x,bin_y,bin_z] += wval
         for fi in range(nf):


https://bitbucket.org/yt_analysis/yt/commits/0b314fc49d8e/
Changeset:   0b314fc49d8e
User:        Josh Soref
Date:        2017-10-02 02:57:55+00:00
Summary:     Spelling fixes

* execution
* explicitly
* expression
* extracting
* facilitate
* finding
* function
* gamma
* hierarchy
* independent
* inferred
* information
* initial
* initialize
* instances
* instructive
* integer
* interactive
* interpreted
* irrelevant
* iterable
* iteration
* iterator
* lineage
* magnetic
* matplotlib
* maximum
* merely
* necessary
* newton
* object
* obscured
* octree
* omitted
* optional
* orientation
* origin
* overestimate
* overridden
* overwrite
* parameter
* particles
* partition
* performance
* perspective
* plotting
* positions
* prepended
* preserves
* primitives
* processed
* prominent
* providing
* purposes
* quantities
* quantity
* radius
* recipes
* reconstructed
* reddening
* redshift
* relationship
* remember
* reorganize
* retrieve
* sample
* selector
* simulated
* situations
* slightly
* specific
* stretching
* subclasses
* substantially
* successfully
* the
* time
* transfer
* translates
* triangles
* unnecessarily
* variables
* varied
* visualization
* whether
* writes
* absorption
* accommodate
* additional
* additionally
* ancestor
* angstroms
* arbitrary
* associate
* associated
* attachment
* attempting
* attribute
* calculating
* capabilities
* checker
* compare
* connect
* consistent
* consisting
* constant
* containers
* coordinate
* corresponding
* cosmological
* cumulative
* current
* cylindrical
* dataset
* densthresh
* dictionary
* directly
* directory
* discussed
* domain
* endianness
* enzotools
* equality
* equally
* equivalency
* equivalent
* evaluate
Affected #:  114 files

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 CONTRIBUTING.rst
--- a/CONTRIBUTING.rst
+++ b/CONTRIBUTING.rst
@@ -758,7 +758,7 @@
    ``is`` or ``is not``.
  * If you are comparing with a numpy boolean array, just refer to the array.
    Ex: do ``np.all(array)`` instead of ``np.all(array == True)``.
- * Never comapre with None using ``==`` or ``!=``, use ``is None`` or
+ * Never compare with None using ``==`` or ``!=``, use ``is None`` or
    ``is not None``.
  * Use ``statement is not True`` instead of ``not statement is True``
  * Only one statement per line, do not use semicolons to put two or more

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 benchmarks/benchmarks/small_enzo.py
--- a/benchmarks/benchmarks/small_enzo.py
+++ b/benchmarks/benchmarks/small_enzo.py
@@ -40,7 +40,7 @@
         dd.quantities.extrema("particle_velocity_magnitude")
         dd.quantities.extrema(["particle_velocity_%s" % ax for ax in 'xyz'])
 
-    def time_gas_quantites(self):
+    def time_gas_quantities(self):
         dd = self.ds.all_data()
         dd.quantities.extrema("density")
         dd.quantities.extrema(["velocity_x", "velocity_y", "velocity_z"])

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 benchmarks/benchmarks/small_flash.py
--- a/benchmarks/benchmarks/small_flash.py
+++ b/benchmarks/benchmarks/small_flash.py
@@ -25,7 +25,7 @@
                             self.ds.domain_width[0] * 0.25)
         dd["velocity_divergence"]
 
-    def time_gas_quantites(self):
+    def time_gas_quantities(self):
         dd = self.ds.all_data()
         dd.quantities.extrema("density")
         dd.quantities.extrema(["velocity_x", "velocity_y", "velocity_z"])

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 benchmarks/benchmarks/small_ramses.py
--- a/benchmarks/benchmarks/small_ramses.py
+++ b/benchmarks/benchmarks/small_ramses.py
@@ -36,7 +36,7 @@
         dd.quantities.extrema("particle_velocity_magnitude")
         dd.quantities.extrema(["particle_velocity_%s" % ax for ax in 'xyz'])
 
-    def time_gas_quantites(self):
+    def time_gas_quantities(self):
         dd = self.ds.all_data()
         dd.quantities.extrema("density")
         dd.quantities.extrema(["velocity_x", "velocity_y", "velocity_z"])

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 doc/activate
--- a/doc/activate
+++ b/doc/activate
@@ -54,7 +54,7 @@
     fi
 }
 
-# unset irrelavent variables
+# unset irrelevant variables
 deactivate nondestructive
 
 VIRTUAL_ENV="__YT_DIR__"

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 doc/activate.csh
--- a/doc/activate.csh
+++ b/doc/activate.csh
@@ -4,7 +4,7 @@
 
 alias deactivate 'test $?_OLD_VIRTUAL_PATH != 0 && setenv PATH "$_OLD_VIRTUAL_PATH" && unset _OLD_VIRTUAL_PATH; test $?_OLD_VIRTUAL_YT_DEST != 0 && setenv YT_DEST "$_OLD_VIRTUAL_YT_DEST" && unset _OLD_VIRTUAL_YT_DEST; test $?_OLD_VIRTUAL_PYTHONPATH != 0 && setenv PYTHONPATH "$_OLD_VIRTUAL_PYTHONPATH" && unset _OLD_VIRTUAL_PYTHONPATH; test $?_OLD_VIRTUAL_LD_LIBRARY_PATH != 0 && setenv LD_LIBRARY_PATH "$_OLD_VIRTUAL_LD_LIBRARY_PATH" && unset _OLD_VIRTUAL_LD_LIBRARY_PATH; rehash; test $?_OLD_VIRTUAL_PROMPT != 0 && set prompt="$_OLD_VIRTUAL_PROMPT" && unset _OLD_VIRTUAL_PROMPT; unsetenv VIRTUAL_ENV; test "\!:*" != "nondestructive" && unalias deactivate'
 
-# Unset irrelavent variables.
+# Unset irrelevant variables.
 deactivate nondestructive
 
 setenv VIRTUAL_ENV "__YT_DIR__"

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 doc/cheatsheet.tex
--- a/doc/cheatsheet.tex
+++ b/doc/cheatsheet.tex
@@ -251,7 +251,7 @@
 \texttt{slc.save(\textit{file\_prefix})} \textemdash\ Save the slice to a png with name prefix \textit{file\_prefix}.
 \texttt{.save()} works similarly for the commands below.\\
 
-\texttt{prj = yt.ProjectionPlot(ds, \textit{axis}, \textit{field}, \textit{addit. params})} \textemdash\ Make a projection. \\
+\texttt{prj = yt.ProjectionPlot(ds, \textit{axis}, \textit{field}, \textit{additional params})} \textemdash\ Make a projection. \\
 \texttt{prj = yt.OffAxisProjectionPlot(ds, \textit{normal}, \textit{fields}, \textit{center=}, \textit{width=}, \textit{depth=},\textit{north\_vector=},\textit{weight\_field=})} \textemdash Make an off axis projection. Note this takes an array of fields. \\
 
 \subsection{Plot Annotations}

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 doc/helper_scripts/show_fields.py
--- a/doc/helper_scripts/show_fields.py
+++ b/doc/helper_scripts/show_fields.py
@@ -205,7 +205,7 @@
             else:
                 field_info_names.append("BoxlibFieldInfo")
     elif frontend == "chombo":
-        # remove low dimensional field info containters for ChomboPIC
+        # remove low dimensional field info containers for ChomboPIC
         field_info_names = [f for f in field_info_names if '1D' not in f
                             and '2D' not in f]
 

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 doc/source/analyzing/analysis_modules/halo_catalogs.rst
--- a/doc/source/analyzing/analysis_modules/halo_catalogs.rst
+++ b/doc/source/analyzing/analysis_modules/halo_catalogs.rst
@@ -50,7 +50,7 @@
 The ``finder_method`` options should be given as "fof", "hop", or
 "rockstar".  Each of these methods has their own set of keyword
 arguments to control functionality.  These can specified in the form
-of a dictinoary using the ``finder_kwargs`` keyword.
+of a dictionary using the ``finder_kwargs`` keyword.
 
 .. code-block:: python
 

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 doc/source/analyzing/units/1)_Symbolic_Units.ipynb
--- a/doc/source/analyzing/units/1)_Symbolic_Units.ipynb
+++ b/doc/source/analyzing/units/1)_Symbolic_Units.ipynb
@@ -119,7 +119,7 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "Most people will interact with the new unit system using `YTArray` and `YTQuantity`.  These are both subclasses of numpy's fast array type, `ndarray`, and can be used interchangeably with other NumPy arrays. These new classes make use of the unit system to append unit metadata to the underlying `ndarray`.  `YTArray` is intended to store array data, while `YTQuantitity` is intended to store scalars in a particular unit system.\n",
+    "Most people will interact with the new unit system using `YTArray` and `YTQuantity`.  These are both subclasses of numpy's fast array type, `ndarray`, and can be used interchangeably with other NumPy arrays. These new classes make use of the unit system to append unit metadata to the underlying `ndarray`.  `YTArray` is intended to store array data, while `YTQuantity` is intended to store scalars in a particular unit system.\n",
     "\n",
     "There are two ways to create arrays and quantities. The first is to explicitly create it by calling the class constructor and supplying a unit string:"
    ]

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
--- a/doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
+++ b/doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
@@ -361,7 +361,7 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "When working with views, rememeber that you are touching the raw array data and no longer have any of the unit checking provided by the unit system.  This can be useful where it might be more straightforward to treat the array as if it didn't have units but without copying the data."
+    "When working with views, remember that you are touching the raw array data and no longer have any of the unit checking provided by the unit system.  This can be useful where it might be more straightforward to treat the array as if it didn't have units but without copying the data."
    ]
   },
   {

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 doc/source/cookbook/Halo_Analysis.ipynb
--- a/doc/source/cookbook/Halo_Analysis.ipynb
+++ b/doc/source/cookbook/Halo_Analysis.ipynb
@@ -76,7 +76,7 @@
    },
    "outputs": [],
    "source": [
-    "# Instantiate a catalog using those two paramter files\n",
+    "# Instantiate a catalog using those two parameter files\n",
     "hc = HaloCatalog(data_ds=data_ds, halos_ds=halos_ds, \n",
     "                 output_dir=os.path.join(tmpdir, 'halo_catalog'))"
    ]
@@ -137,7 +137,7 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "Next we find the radial profile of the gas overdensity along the sphere object in order to find the virial radius. `radius` is the axis along which we make bins for the radial profiles. `[(\"gas\",\"overdensity\")]` is the quantity that we are profiling. This is a list so we can profile as many quantities as we want. The `weight_field` indicates how the cells should be weighted, but note that this is not a list, so all quantities will be weighted in the same way. The `accumulation` keyword indicates if the profile should be cummulative; this is useful for calculating profiles such as enclosed mass. The `storage` keyword indicates the name of the attribute of a halo where these profiles will be stored. Setting the storage keyword to \"virial_quantities_profiles\" means that the profiles will be stored in a dictionary that can be accessed by `halo.virial_quantities_profiles`."
+    "Next we find the radial profile of the gas overdensity along the sphere object in order to find the virial radius. `radius` is the axis along which we make bins for the radial profiles. `[(\"gas\",\"overdensity\")]` is the quantity that we are profiling. This is a list so we can profile as many quantities as we want. The `weight_field` indicates how the cells should be weighted, but note that this is not a list, so all quantities will be weighted in the same way. The `accumulation` keyword indicates if the profile should be cumulative; this is useful for calculating profiles such as enclosed mass. The `storage` keyword indicates the name of the attribute of a halo where these profiles will be stored. Setting the storage keyword to \"virial_quantities_profiles\" means that the profiles will be stored in a dictionary that can be accessed by `halo.virial_quantities_profiles`."
    ]
   },
   {

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 doc/source/cookbook/amrkdtree_downsampling.py
--- a/doc/source/cookbook/amrkdtree_downsampling.py
+++ b/doc/source/cookbook/amrkdtree_downsampling.py
@@ -42,7 +42,7 @@
 sc.render()
 sc.save("v1.png", sigma_clip=6.0)
 
-# This operation was substantiall faster.  Now lets modify the low resolution
+# This operation was substantially faster.  Now lets modify the low resolution
 # rendering until we find something we like.
 
 tf = render_source.transfer_function

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 doc/source/cookbook/custom_colorbar_tickmarks.ipynb
--- a/doc/source/cookbook/custom_colorbar_tickmarks.ipynb
+++ b/doc/source/cookbook/custom_colorbar_tickmarks.ipynb
@@ -46,7 +46,7 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "The plot has a few attributes that point to underlying `matplotlib` plot primites.  For example, the `colorbar` object corresponds to the `cb` attribute of the plot."
+    "The plot has a few attributes that point to underlying `matplotlib` plot primitives.  For example, the `colorbar` object corresponds to the `cb` attribute of the plot."
    ]
   },
   {

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 doc/source/cookbook/fits_radio_cubes.ipynb
--- a/doc/source/cookbook/fits_radio_cubes.ipynb
+++ b/doc/source/cookbook/fits_radio_cubes.ipynb
@@ -16,7 +16,7 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "This notebook demonstrates some of the capabilties of yt on some FITS \"position-position-spectrum\" cubes of radio data.\n",
+    "This notebook demonstrates some of the capabilities of yt on some FITS \"position-position-spectrum\" cubes of radio data.\n",
     "\n",
     "Note that it depends on some external dependencies, including `astropy`, `wcsaxes`, and `pyregion`."
    ]

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 doc/source/cookbook/fits_xray_images.ipynb
--- a/doc/source/cookbook/fits_xray_images.ipynb
+++ b/doc/source/cookbook/fits_xray_images.ipynb
@@ -160,7 +160,7 @@
    },
    "outputs": [],
    "source": [
-    "v, c = ds.find_max(\"flux\") # Find the maxmimum flux and its center\n",
+    "v, c = ds.find_max(\"flux\") # Find the maximum flux and its center\n",
     "my_sphere = ds.sphere(c, (100.,\"code_length\")) # Radius of 150 pixels\n",
     "my_sphere.set_field_parameter(\"exposure_time\", exposure_time)"
    ]

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 doc/source/cookbook/halo_profiler.py
--- a/doc/source/cookbook/halo_profiler.py
+++ b/doc/source/cookbook/halo_profiler.py
@@ -6,7 +6,7 @@
 data_ds = yt.load('Enzo_64/RD0006/RedshiftOutput0006')
 halos_ds = yt.load('rockstar_halos/halos_0.0.bin')
 
-# Instantiate a catalog using those two paramter files
+# Instantiate a catalog using those two parameter files
 hc = HaloCatalog(data_ds=data_ds, halos_ds=halos_ds)
 
 # Filter out less massive halos

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 doc/source/cookbook/multiplot_2x2_coordaxes_slice.py
--- a/doc/source/cookbook/multiplot_2x2_coordaxes_slice.py
+++ b/doc/source/cookbook/multiplot_2x2_coordaxes_slice.py
@@ -38,7 +38,7 @@
     # Since there are only two colorbar axes, we need to make sure we don't try
     # to set the temperature colorbar to cbar_axes[4], which would if we used i
     # to index cbar_axes, yielding a plot without a temperature colorbar.
-    # This unecessarily redraws the Density colorbar three times, but that has
+    # This unnecessarily redraws the Density colorbar three times, but that has
     # no effect on the final plot.
     if field == 'density':
         plot.cax = grid.cbar_axes[0]

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 doc/source/cookbook/offaxis_projection.py
--- a/doc/source/cookbook/offaxis_projection.py
+++ b/doc/source/cookbook/offaxis_projection.py
@@ -25,7 +25,7 @@
 
 # Create the off axis projection.
 # Setting no_ghost to False speeds up the process, but makes a
-# slighly lower quality image.
+# slightly lower quality image.
 image = yt.off_axis_projection(ds, c, L, W, Npixels, "density", no_ghost=False)
 
 # Write out the final image and give it a name

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 doc/source/cookbook/opaque_rendering.py
--- a/doc/source/cookbook/opaque_rendering.py
+++ b/doc/source/cookbook/opaque_rendering.py
@@ -27,7 +27,7 @@
 sc.save("v2.png", sigma_clip=6.0)
 
 # Now let's set the grey_opacity to True.  This should make the inner portions
-# start to be obcured
+# start to be obscured
 
 tf.grey_opacity = True
 sc.render()

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 doc/source/cookbook/opengl_vr.py
--- a/doc/source/cookbook/opengl_vr.py
+++ b/doc/source/cookbook/opengl_vr.py
@@ -14,7 +14,7 @@
 dd = ds.all_data()
 collection.add_data(dd, "density")
 
-# Initiliaze basic Scene and pass the data
+# Initialize basic Scene and pass the data
 scene = SceneGraph()
 scene.add_collection(collection)
 

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 doc/source/cookbook/tipsy_and_yt.ipynb
--- a/doc/source/cookbook/tipsy_and_yt.ipynb
+++ b/doc/source/cookbook/tipsy_and_yt.ipynb
@@ -29,7 +29,7 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "We will be looking at a fairly low resolution dataset.  In the next cell, the `ds` object has an atribute called `n_ref` that tells the oct-tree how many particles to refine on.  The default is 64, but we'll get prettier plots (at the expense of a deeper tree) with 8.  Just passing the argument `n_ref=8` to load does this for us."
+    "We will be looking at a fairly low resolution dataset.  In the next cell, the `ds` object has an attribute called `n_ref` that tells the oct-tree how many particles to refine on.  The default is 64, but we'll get prettier plots (at the expense of a deeper tree) with 8.  Just passing the argument `n_ref=8` to load does this for us."
    ]
   },
   {

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 doc/source/cookbook/yt_gadget_analysis.ipynb
--- a/doc/source/cookbook/yt_gadget_analysis.ipynb
+++ b/doc/source/cookbook/yt_gadget_analysis.ipynb
@@ -209,7 +209,7 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "Using this new data object, let's confirm that we're only looking at a subset of the domain by first calculating thte total mass in gas and particles contained in the subvolume:"
+    "Using this new data object, let's confirm that we're only looking at a subset of the domain by first calculating the total mass in gas and particles contained in the subvolume:"
    ]
   },
   {

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 doc/source/faq/index.rst
--- a/doc/source/faq/index.rst
+++ b/doc/source/faq/index.rst
@@ -20,7 +20,7 @@
 If you run into problems with yt and you're writing to the mailing list
 or contacting developers on IRC, they will likely want to know what version of
 yt you're using.  Oftentimes, you'll want to know both the yt version,
-as well as the last changeset that was committed to the branch you're using.
+as well as the last changeset that was comitted to the branch you're using.
 To reveal this, go to a command line and type:
 
 .. code-block:: bash

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 doc/source/installing.rst
--- a/doc/source/installing.rst
+++ b/doc/source/installing.rst
@@ -416,7 +416,7 @@
   you might need ``sudo`` depending on where python is installed. See `This
   StackOverflow discussion
   <http://stackoverflow.com/questions/4495120/combine-user-with-prefix-error-with-setup-py-install>`_
-  if you are curious why ``--install-option="--prefix="`` is neccessary on some systems.
+  if you are curious why ``--install-option="--prefix="`` is necessary on some systems.
 
 This will install yt into a folder in your home directory
 (``$HOME/.local/lib64/python2.7/site-packages`` on Linux,

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 doc/source/quickstart/3)_Simple_Visualization.ipynb
--- a/doc/source/quickstart/3)_Simple_Visualization.ipynb
+++ b/doc/source/quickstart/3)_Simple_Visualization.ipynb
@@ -64,7 +64,7 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "The `show` command simply sends the plot to the IPython notebook.  You can also call `p.save()` which will save the plot to the file system.  This function accepts an argument, which will be pre-prended to the filename and can be used to name it based on the width or to supply a location.\n",
+    "The `show` command simply sends the plot to the IPython notebook.  You can also call `p.save()` which will save the plot to the file system.  This function accepts an argument, which will be prepended to the filename and can be used to name it based on the width or to supply a location.\n",
     "\n",
     "Now we'll zoom and pan a bit."
    ]

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 doc/source/visualizing/TransferFunctionHelper_Tutorial.ipynb
--- a/doc/source/visualizing/TransferFunctionHelper_Tutorial.ipynb
+++ b/doc/source/visualizing/TransferFunctionHelper_Tutorial.ipynb
@@ -214,7 +214,7 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "The red gas is now much more prominant in the image. We can clearly see that the hot gas is mostly associated with bound structures while the cool gas is associated with low-density voids."
+    "The red gas is now much more prominent in the image. We can clearly see that the hot gas is mostly associated with bound structures while the cool gas is associated with low-density voids."
    ]
   }
  ],

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 doc/source/visualizing/plots.rst
--- a/doc/source/visualizing/plots.rst
+++ b/doc/source/visualizing/plots.rst
@@ -1332,7 +1332,7 @@
 
 will create a plot with the particle mass used to set the colorbar.
 Specifically, :class:`~yt.visualization.particle_plots.ParticlePlot`
-shows the total ``z_field`` for all the partices in each pixel on the
+shows the total ``z_field`` for all the particles in each pixel on the
 colorbar axis; to plot average quantities instead, one can supply a
 ``weight_field`` argument.
 
@@ -1453,7 +1453,7 @@
 Under the hood, the :class:`~yt.data_objects.profiles.ParticleProfile` class works a lot like a
 :class:`~yt.data_objects.profiles.Profile2D` object, except that instead of just binning the
 particle field, you can also use higher-order deposition functions like the cloud-in-cell
-interpolant to spread out the particle quantites over a few cells in the profile. The
+interpolant to spread out the particle quantities over a few cells in the profile. The
 :func:`~yt.data_objects.profiles.create_profile` will automatically detect when all the fields
 you pass in are particle fields, and return a :class:`~yt.data_objects.profiles.ParticleProfile`
 if that is the case. For a complete description of the :class:`~yt.data_objects.profiles.ParticleProfile`
@@ -1591,7 +1591,7 @@
    fn = p.data_source.save_as_dataset()
 
 This function will optionally take a ``filename`` keyword that follows
-the same logic as dicussed above in :ref:`saving_plots`.  The filename
+the same logic as discussed above in :ref:`saving_plots`.  The filename
 to which the dataset was written will be returned.
 
 Once saved, this file can be reloaded completely independently of the

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 doc/source/visualizing/unstructured_mesh_rendering.rst
--- a/doc/source/visualizing/unstructured_mesh_rendering.rst
+++ b/doc/source/visualizing/unstructured_mesh_rendering.rst
@@ -87,7 +87,7 @@
 
 We recommend one of the later two methods, especially
 if you plan on re-compiling the cython extensions regularly. Note that none of this is
-neccessary if you installed embree into a location that is in your default path, such
+necessary if you installed embree into a location that is in your default path, such
 as /usr/local.
 
 Examples

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 scripts/pr_backport.py
--- a/scripts/pr_backport.py
+++ b/scripts/pr_backport.py
@@ -158,7 +158,7 @@
     return None
 
 
-def create_commits_to_prs_mapping(linege, prs):
+def create_commits_to_prs_mapping(lineage, prs):
     """create a mapping from commits to the pull requests that the commit is
     part of
     """

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
@@ -81,7 +81,7 @@
         f_value  : float
            line f-value.
         gamma : float
-           line gamme value.
+           line gamma value.
         atomic_mass : float
            mass of atom in amu.
         """
@@ -471,7 +471,7 @@
                             (thermal_width < self.bin_width).sum(),
                             n_absorbers)
 
-            # provide a progress bar with information about lines processsed
+            # provide a progress bar with information about lines processed
             pbar = get_pbar("Adding line - %s [%f A]: " % \
                             (line['label'], line['wavelength']), n_absorbers)
 

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
@@ -872,7 +872,7 @@
 
     """
     Check to see if any of the parameters in p are the
-    same as initial paramters and if so, attempt to
+    same as initial parameters and if so, attempt to
     split the region and refit it.
 
     Parameters

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py
@@ -58,7 +58,7 @@
 
     my_label = 'HI Lya'
     field = 'H_number_density'
-    wavelength = 1215.6700  # Angstromss
+    wavelength = 1215.6700  # Angstroms
     f_value = 4.164E-01
     gamma = 6.265e+08
     mass = 1.00794
@@ -118,7 +118,7 @@
 
     my_label = 'HI Lya'
     field = 'H_number_density'
-    wavelength = 1215.6700  # Angstromss
+    wavelength = 1215.6700  # Angstroms
     f_value = 4.164E-01
     gamma = 6.265e+08
     mass = 1.00794
@@ -170,7 +170,7 @@
 
     my_label = 'HI Lya'
     field = 'H_number_density'
-    wavelength = 1215.6700  # Angstromss
+    wavelength = 1215.6700  # Angstroms
     f_value = 4.164E-01
     gamma = 6.265e+08
     mass = 1.00794
@@ -220,7 +220,7 @@
 
     my_label = 'HI Lya'
     field = 'H_number_density'
-    wave = 1215.6700  # Angstromss
+    wave = 1215.6700  # Angstroms
     f_value = 4.164E-01
     gamma = 6.265e+08
     mass = 1.00794
@@ -271,7 +271,7 @@
 
     my_label = 'HI Lya'
     field = 'H_number_density'
-    wavelength = 1215.6700  # Angstromss
+    wavelength = 1215.6700  # Angstroms
     f_value = 4.164E-01
     gamma = 6.265e+08
     mass = 1.00794
@@ -326,7 +326,7 @@
 
     my_label = 'HI Lya'
     field = ('gas', 'H_number_density')
-    wavelength = 1215.6700  # Angstromss
+    wavelength = 1215.6700  # Angstroms
     f_value = 4.164E-01
     gamma = 6.265e+08
     mass = 1.00794
@@ -387,7 +387,7 @@
 
     my_label = 'HI Lya'
     field = ('gas', 'H_number_density')
-    wavelength = 1215.6700  # Angstromss
+    wavelength = 1215.6700  # Angstroms
     f_value = 4.164E-01
     gamma = 6.265e+08
     mass = 1.00794
@@ -440,7 +440,7 @@
 
     my_label = 'HI Lya'
     field = 'H_number_density'
-    wavelength = 1215.6700  # Angstromss
+    wavelength = 1215.6700  # Angstroms
     f_value = 4.164E-01
     gamma = 6.265e+08
     mass = 1.00794

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 yt/analysis_modules/cosmological_observation/cosmology_splice.py
--- a/yt/analysis_modules/cosmological_observation/cosmology_splice.py
+++ b/yt/analysis_modules/cosmological_observation/cosmology_splice.py
@@ -183,7 +183,7 @@
 
         # Make light ray using maximum number of datasets (minimum spacing).
         else:
-            # Sort data outputs by proximity to current redsfhit.
+            # Sort data outputs by proximity to current redshift.
             self.splice_outputs.sort(key=lambda obj:np.abs(far_redshift -
                                                            obj['redshift']))
             # For first data dump, choose closest to desired redshift.

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
+++ b/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
@@ -288,13 +288,13 @@
             field_of_view = self.simulation.quan(field_of_view[0],
                                                  field_of_view[1])
         elif not isinstance(field_of_view, YTArray):
-          raise RuntimeError("field_of_view argument must be either a YTQauntity " +
+          raise RuntimeError("field_of_view argument must be either a YTQuantity " +
                              "or a tuple of type (float, str).")
         if isinstance(image_resolution, tuple) and len(image_resolution) == 2:
             image_resolution = self.simulation.quan(image_resolution[0],
                                                     image_resolution[1])
         elif not isinstance(image_resolution, YTArray):
-          raise RuntimeError("image_resolution argument must be either a YTQauntity " +
+          raise RuntimeError("image_resolution argument must be either a YTQuantity " +
                              "or a tuple of type (float, str).")
         
         # Calculate number of pixels on a side.

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -582,7 +582,7 @@
 
                     sub_vel_mag = sub_ray['velocity_magnitude']
                     cos_theta = line_of_sight.dot(sub_vel) / sub_vel_mag
-                    # Protect against stituations where velocity mag is exactly
+                    # Protect against situations where velocity mag is exactly
                     # zero, in which case zero / zero = NaN.
                     cos_theta = np.nan_to_num(cos_theta)
                     redshift_dopp = \

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 yt/analysis_modules/halo_analysis/enzofof_merger_tree.py
--- a/yt/analysis_modules/halo_analysis/enzofof_merger_tree.py
+++ b/yt/analysis_modules/halo_analysis/enzofof_merger_tree.py
@@ -30,7 +30,7 @@
 # 7. For every halo in ball-query result, execute numpy's intersect1d on
 #    particle IDs
 # 8. Parentage is described by a fraction of particles that pass from one to
-#    the other; we have both descendent fractions and ancestory fractions. 
+#    the other; we have both descendent fractions and ancestor fractions. 
 
 
 import numpy as np

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 yt/analysis_modules/halo_analysis/halo_callbacks.py
--- a/yt/analysis_modules/halo_analysis/halo_callbacks.py
+++ b/yt/analysis_modules/halo_analysis/halo_callbacks.py
@@ -402,7 +402,7 @@
         calculate virial quantities.
         Default: ("gas", "overdensity")
     critical_overdensity : float
-        The value of the overdensity at which to evaulate the virial quantities.  
+        The value of the overdensity at which to evaluate the virial quantities.  
         Overdensity is with respect to the critical density.
         Default: 200
     profile_storage : string

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 yt/analysis_modules/halo_analysis/halo_recipes.py
--- a/yt/analysis_modules/halo_analysis/halo_recipes.py
+++ b/yt/analysis_modules/halo_analysis/halo_recipes.py
@@ -78,7 +78,7 @@
         calculate virial quantities.
         Default: ("gas", "overdensity")
     critical_overdensity : float
-        The value of the overdensity at which to evaulate the virial quantities.
+        The value of the overdensity at which to evaluate the virial quantities.
         Overdensity is with respect to the critical density.
         Default: 200
 

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -387,7 +387,7 @@
         Returns
         -------
         radius : float
-            The virial raius in code units of the particles in the halo.  -1
+            The virial radius in code units of the particles in the halo.  -1
             if not virialized.
 
         Examples
@@ -1332,7 +1332,7 @@
         r"""Write out the names of the HDF5 files containing halo particle data
         to a text file.
 
-        This function wirtes out the names of all the HDF5 files that would
+        This function writes out the names of all the HDF5 files that would
         contain halo particle data.  Only the root processor writes out.
 
         Parameters
@@ -1445,7 +1445,7 @@
         used when dm_only is set to True.
     padding : float
         When run in parallel, the finder needs to surround each subvolume
-        with duplicated particles for halo finidng to work. This number
+        with duplicated particles for halo finding to work. This number
         must be no smaller than the radius of the largest halo in the box
         in code units. Default = 0.02.
     total_mass : float
@@ -1574,7 +1574,7 @@
         used when dm_only is set to True.
     padding : float
         When run in parallel, the finder needs to surround each subvolume
-        with duplicated particles for halo finidng to work. This number
+        with duplicated particles for halo finding to work. This number
         must be no smaller than the radius of the largest halo in the box
         in code units. Default = 0.02.
 

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 yt/analysis_modules/halo_finding/hop/EnzoHop.c
--- a/yt/analysis_modules/halo_finding/hop/EnzoHop.c
+++ b/yt/analysis_modules/halo_finding/hop/EnzoHop.c
@@ -31,7 +31,7 @@
 int kdMedianJst(KD kd, int d, int l, int u);
 void kdUpPass(KD kd, int iCell);
 void initgrouplist(Grouplist *g);
-void hop_main(KD kd, HC *my_comm, float densthres);
+void hop_main(KD kd, HC *my_comm, float densthresh);
 void regroup_main(float dens_outer, HC *my_comm);
 static PyObject *_HOPerror;
 

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 yt/analysis_modules/halo_finding/hop/hop_hop.c
--- a/yt/analysis_modules/halo_finding/hop/hop_hop.c
+++ b/yt/analysis_modules/halo_finding/hop/hop_hop.c
@@ -50,11 +50,11 @@
 void ReSizeSMX(SMX smx, int nSmooth);
  
 void PrepareKD(KD kd);
-void binOutHop(SMX smx, HC *my_comm, float densthres);
+void binOutHop(SMX smx, HC *my_comm, float densthresh);
 void outGroupMerge(SMX smx, HC *my_comm);
 
 /* void main(int argc,char **argv) */
-void hop_main(KD kd, HC *my_comm, float densthres)
+void hop_main(KD kd, HC *my_comm, float densthresh)
 {
   /*	KD kd; */
 	SMX smx;
@@ -152,7 +152,7 @@
 	if (bMerge) free(smx->hash);
  
 	if (bGroup&2) {
-	    binOutHop(smx, my_comm, densthres);
+	    binOutHop(smx, my_comm, densthresh);
 	}
 	if (bGroup) {free(smx->densestingroup); free(smx->nmembers);}
 	smFinish(smx);
@@ -520,7 +520,7 @@
     return;
 }
  
-void binOutHop(SMX smx, HC *my_comm, float densthres)
+void binOutHop(SMX smx, HC *my_comm, float densthresh)
 /* Write Group tag for each particle.  Particles should be ordered. */
 /* Binary file: nActive, nGroups, list of Groups */
 {
@@ -535,7 +535,7 @@
     //s->ID = ivector(1,s->numlist);
     for (j=0;j<smx->kd->nActive;j++) {
       //s->ID[1+j] = smx->kd->p[j].iID; /* S Skory's addition */
-      if (NP_DENS(smx->kd,j) < densthres) s->ntag[j+1] = -1;
+      if (NP_DENS(smx->kd,j) < densthresh) s->ntag[j+1] = -1;
       else s->ntag[j+1] = smx->kd->p[j].iHop;
 
     }

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 yt/analysis_modules/halo_finding/hop/hop_regroup.c
--- a/yt/analysis_modules/halo_finding/hop/hop_regroup.c
+++ b/yt/analysis_modules/halo_finding/hop/hop_regroup.c
@@ -22,7 +22,7 @@
  
 /* #define MINDENS (-FLT_MAX/3.0) */
 #define MINDENS (-1.e+30/3.0)
-/* This is the most negative density that can be accomodated.  Note
+/* This is the most negative density that can be accommodated.  Note
 that MINDENS*2.0 is referenced in the code and so must be properly
 represented by the machine.  There's no reason for this to be close to
 the actual minimum of the density. */

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 yt/analysis_modules/halo_finding/hop/hop_slice.c
--- a/yt/analysis_modules/halo_finding/hop/hop_slice.c
+++ b/yt/analysis_modules/halo_finding/hop/hop_slice.c
@@ -94,7 +94,7 @@
 }
  
 /* =================================================================== */
-/* The following are public-domain routines from Numerical Repices in C,
+/* The following are public-domain routines from Numerical Recipes in C,
 2nd edition, by Press, Teulkolsky, Vetterling, & Flannery, 1992, Cambridge
 Univ. Press */
  

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 yt/analysis_modules/halo_finding/hop/hop_smooth.c
--- a/yt/analysis_modules/halo_finding/hop/hop_smooth.c
+++ b/yt/analysis_modules/halo_finding/hop/hop_smooth.c
@@ -102,7 +102,7 @@
 	cell = ROOT;
 	/*
 	 ** First find the "local" Bucket.
-	 ** This could mearly be the closest bucket to ri[3].
+	 ** This could merely be the closest bucket to ri[3].
 	 */
 	while (cell < smx->kd->nSplit) {
 		if (ri[c[cell].iDim] < c[cell].fSplit) cell = LOWER(cell);
@@ -282,7 +282,7 @@
 			/* printf("%"ISYM": %"GSYM" %"GSYM" %"GSYM"\n", pi, x, y, z); */
 			/*
 			 ** First find the "local" Bucket.
-			 ** This could mearly be the closest bucket to ri[3].
+			 ** This could merely be the closest bucket to ri[3].
 			 */
 			cell = ROOT;
 			while (cell < smx->kd->nSplit) {

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
@@ -89,7 +89,7 @@
 
 cdef import from "config_vars.h":
     # Rockstar cleverly puts all of the config variables inside a templated
-    # definition of their vaiables.
+    # definition of their variables.
     char *FILE_FORMAT
     np.float64_t PARTICLE_MASS
 

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
@@ -74,7 +74,7 @@
 
 cdef import from "config_vars.h":
     # Rockstar cleverly puts all of the config variables inside a templated
-    # definition of their vaiables.
+    # definition of their variables.
     char *FILE_FORMAT
     np.float64_t PARTICLE_MASS
 
@@ -244,7 +244,7 @@
 
     def __cinit__(self, ts):
         self.ts = ts
-        self.tsl = ts.__iter__() #timseries generator used by read
+        self.tsl = ts.__iter__() #timeseries generator used by read
 
     def setup_rockstar(self, char *server_address, char *server_port,
                        int num_snaps, np.int64_t total_particles,

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 yt/analysis_modules/halo_mass_function/halo_mass_function.py
--- a/yt/analysis_modules/halo_mass_function/halo_mass_function.py
+++ b/yt/analysis_modules/halo_mass_function/halo_mass_function.py
@@ -25,13 +25,13 @@
 
 class HaloMassFcn():
     r"""
-    Initalize a HaloMassFcn object to analyze the distribution of halos as 
+    Initialize a HaloMassFcn object to analyze the distribution of halos as 
     a function of mass.  A mass function can be created for a set of 
     simulated halos, an analytic fit to can be created for a redshift and 
     set of cosmological parameters, or both can be created.
 
     Provided with a halo dataset object, this will make a the mass function 
-    for simulated halos.  Prodiving a simulation dataset will set as many 
+    for simulated halos.  Providing a simulation dataset will set as many 
     of the cosmological parameters as possible for the creation of the 
     analytic mass function.
 
@@ -56,12 +56,12 @@
     will write out the data to disk.
 
     Creating a HaloMassFcn object with no arguments will produce an analytic
-    mass function at redshift = 0 using default cosmolocigal values.
+    mass function at redshift = 0 using default cosmological values.
 
     Parameters
     ----------
     simulation_ds : Simulation dataset object
-        The loaded simulation dataset, used to set cosmological paramters.
+        The loaded simulation dataset, used to set cosmological parameters.
         Default : None.
     halos_ds : Halo dataset object
         The halos from a simulation to be used for creation of the 
@@ -80,7 +80,7 @@
         Default : 0.7274.
     omega_baryon0  : float 
         The fraction of the universe made up of baryonic matter. This is not 
-        always stored in the datset and should be checked by hand.
+        always stored in the dataset and should be checked by hand.
         Default : 0.0456.
     hubble0 : float 
         The expansion rate of the universe in units of 100 km/s/Mpc. 
@@ -88,13 +88,13 @@
     sigma8 : float 
         The amplitude of the linear power spectrum at z=0 as specified by 
         the rms amplitude of mass-fluctuations in a top-hat sphere of radius 
-        8 Mpc/h. This is not always stored in the datset and should be 
+        8 Mpc/h. This is not always stored in the dataset and should be 
         checked by hand.
         Default : 0.86.
     primoridal_index : float 
         This is the index of the mass power spectrum before modification by 
         the transfer function. A value of 1 corresponds to the scale-free 
-        primordial spectrum. This is not always stored in the datset and 
+        primordial spectrum. This is not always stored in the dataset and 
         should be checked by hand.
         Default : 1.0.
     this_redshift : float 
@@ -135,7 +135,7 @@
     >>> plt.savefig("mass_function.png")
 
     This creates only the analytic halo mass function for a simulation
-    dataset, with default values for cosmological paramters not stored in 
+    dataset, with default values for cosmological parameters not stored in 
     the dataset.
 
     >>> ds = load("enzo_tiny_cosmology/DD0046/DD0046")
@@ -240,7 +240,7 @@
         If a halo file has been supplied, make a mass function for the simulated halos.
         """
         if halos_ds is not None:
-            # Used to check if a simulated halo mass funciton exists to write out
+            # Used to check if a simulated halo mass function exists to write out
             self.make_simulated=True
             # Calculate the simulated halo mass function
             self.create_sim_hmf()
@@ -622,7 +622,7 @@
 many times as you want. 
 
    TFmdm_set_cosm() -- User passes all the cosmological parameters as
-   arguments; the routine sets up all of the scalar quantites needed 
+   arguments; the routine sets up all of the scalar quantities needed 
    computation of the fitting formula.  The input parameters are: 
    1) omega_matter -- Density of CDM, baryons, and massive neutrinos,
                       in units of the critical density. 

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 yt/analysis_modules/photon_simulator/photon_models.py
--- a/yt/analysis_modules/photon_simulator/photon_models.py
+++ b/yt/analysis_modules/photon_simulator/photon_models.py
@@ -206,7 +206,7 @@
                     if cn == 0: continue
                     # The rather verbose form of the few next statements is a
                     # result of code optimization and shouldn't be changed
-                    # without checking for perfomance degradation. See
+                    # without checking for performance degradation. See
                     # https://bitbucket.org/yt_analysis/yt/pull-requests/1766
                     # for details.
                     if self.method == "invert_cdf":

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 yt/analysis_modules/photon_simulator/spectral_models.py
--- a/yt/analysis_modules/photon_simulator/spectral_models.py
+++ b/yt/analysis_modules/photon_simulator/spectral_models.py
@@ -1,5 +1,5 @@
 """
-Photon emission and absoprtion models for use with the
+Photon emission and absorption models for use with the
 photon simulator.
 """
 
@@ -195,7 +195,7 @@
         "2.0.2"
     thermal_broad : boolean, optional
         Whether to apply thermal broadening to spectral lines. Only should
-        be used if you are attemping to simulate a high-spectral resolution
+        be used if you are attempting to simulate a high-spectral resolution
         detector.
 
     Examples

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
--- a/yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
+++ b/yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
@@ -362,7 +362,7 @@
                                        source.position[2]))
             fhandle.write('\n')
 
-        # write wavelength informaton
+        # write wavelength information
         for wavelength in wavelengths:
             fhandle.write('%f \n' % wavelength)
 

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 yt/analysis_modules/star_analysis/sfr_spectrum.py
--- a/yt/analysis_modules/star_analysis/sfr_spectrum.py
+++ b/yt/analysis_modules/star_analysis/sfr_spectrum.py
@@ -435,7 +435,7 @@
                      star_metallicity_constant is None):
                 mylog.error(
                     """
-                If data_source is not provided, all of these paramters
+                If data_source is not provided, all of these parameters
                 need to be set:
                    star_mass (array, Msun),
                    star_creation_time (array, code units),

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 yt/analysis_modules/sunrise_export/sunrise_exporter.py
--- a/yt/analysis_modules/sunrise_export/sunrise_exporter.py
+++ b/yt/analysis_modules/sunrise_export/sunrise_exporter.py
@@ -122,7 +122,7 @@
         both of which are assumed to be in unitary length units.
     frvir (optional) : float
         Ensure that CoM +/- frvir*Rvir is contained within each domain
-    domains_list (optiona): dict of halos
+    domains_list (optional): dict of halos
         Organize halos into a dict of domains. Keys are DLE/DRE tuple
         values are a list of halos
     """
@@ -221,7 +221,7 @@
         grids[g.id] = og
         #how many refinement cells will we have?
         #measure the 'volume' of each mesh, but many
-        #cells do not exist. an overstimate
+        #cells do not exist. an overestimate
         levels_all[g.Level] += g.ActiveDimensions.prod()
         #how many leaves do we have?
         #this overestimates. a child of -1 means no child,
@@ -442,7 +442,7 @@
     width = 0.0
     if nwide is None:
         #expand until borders are included and
-        #we have an equaly-sized, non-zero box
+        #we have an equally-sized, non-zero box
         idxq,out=False,True
         while not out or not idxq:
             cfle,cfre = fc-width, fc+width

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 yt/analysis_modules/two_point_functions/two_point_functions.py
--- a/yt/analysis_modules/two_point_functions/two_point_functions.py
+++ b/yt/analysis_modules/two_point_functions/two_point_functions.py
@@ -52,7 +52,7 @@
         parameter. Default: 10.
     length_range : Float
         A min/max pair for the range of values to search the over
-        the simulational volume. Default: [sqrt(3)dx, 1/2*shortest box edge],
+        the simulated volume. Default: [sqrt(3)dx, 1/2*shortest box edge],
         where dx is the smallest grid cell size.
     vol_ratio : Integer
         How to multiply-assign subvolumes to the parallel

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -81,7 +81,7 @@
     InternalGeographicCoordinateHandler
 
 # We want to support the movie format in the future.
-# When such a thing comes to pass, I'll move all the stuff that is contant up
+# When such a thing comes to pass, I'll move all the stuff that is constant up
 # to here, and then have it instantiate EnzoDatasets as appropriate.
 
 _cached_datasets = weakref.WeakValueDictionary()

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 yt/data_objects/tests/test_pickle.py
--- a/yt/data_objects/tests/test_pickle.py
+++ b/yt/data_objects/tests/test_pickle.py
@@ -51,7 +51,7 @@
     test_load = cPickle.load(open(cpklfile.name, "rb"))
 
     assert_equal.description = \
-        "%s: File was pickle-loaded succesfully" % __name__
+        "%s: File was pickle-loaded successfully" % __name__
     assert_equal(test_load is not None, True)
     assert_equal.description = \
         "%s: Length of pickle-loaded connected set object" % __name__

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -690,7 +690,7 @@
         This is called from IOHandler. It takes content
         which is a binary stream, reads the requested field
         over this while domain. It then uses oct_handler fill
-        to reorgnize values from IO read index order to
+        to reorganize values from IO read index order to
         the order they are in in the octhandler.
         """
         oct_handler = self.oct_handler
@@ -742,7 +742,7 @@
     # and since these headers are defined in only a single file it's
     # best to leave them in the static output
     _last_mask = None
-    _last_seletor_id = None
+    _last_selector_id = None
 
     def __init__(self, ds, nvar, oct_handler, domain_id):
         self.nvar = nvar

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 yt/frontends/artio/artio_headers/artio.h
--- a/yt/frontends/artio/artio_headers/artio.h
+++ b/yt/frontends/artio/artio_headers/artio.h
@@ -273,7 +273,7 @@
 int artio_grid_write_oct(artio_fileset *handle, float *variables, int *refined);
 
 /*
- * Description:	Read the variables of the root level cell and the index of the Octtree
+ * Description:	Read the variables of the root level cell and the index of the Octree
  *              correlated with this root level cell
  *
  *  handle			The File handle
@@ -406,7 +406,7 @@
 			double* primary_variables, float *secondary_variables);
 
 /*
- * Description:	Read the variables of the root level cell and the index of the Octtree
+ * Description:	Read the variables of the root level cell and the index of the Octree
  *              correlated with this root level cell
  *
  *  handle			The File handle

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 yt/frontends/artio/artio_headers/cosmology.c
--- a/yt/frontends/artio/artio_headers/cosmology.c
+++ b/yt/frontends/artio/artio_headers/cosmology.c
@@ -247,7 +247,7 @@
       x = c->aUni[i]/aeq;
 
       c->tPhys[i] = tPhysFac*2*x*x*(2+sqrt(x+1))/(3*pow(1+sqrt(x+1),2.0));
-      c->dPlus[i] = aeq*(x + 2.0/3.0 + (6*sqrt(1+x)+(2+3*x)*log(x)-2*(2+3*x)*log(1+sqrt(1+x)))/(log(64.0)-9));  /* long last term is the decaying mode generated after euality; it is very small for x > 10, I keep ot just for completeness; */
+      c->dPlus[i] = aeq*(x + 2.0/3.0 + (6*sqrt(1+x)+(2+3*x)*log(x)-2*(2+3*x)*log(1+sqrt(1+x)))/(log(64.0)-9));  /* long last term is the decaying mode generated after equality; it is very small for x > 10, I keep ot just for completeness; */
       c->qPlus[i] = c->aUni[i]*cosmology_mu(c,c->aUni[i])*(1 + ((2+6*x)/(x*sqrt(1+x))+3*log(x)-6*log(1+sqrt(1+x)))/(log(64)-9)); /* this is a^2*dDPlus/dt/H0 */
 
       c->aBox[i] = c->aUni[i]*cosmology_dc_factor(c,c->dPlus[i]);

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 yt/frontends/athena/data_structures.py
--- a/yt/frontends/athena/data_structures.py
+++ b/yt/frontends/athena/data_structures.py
@@ -477,7 +477,7 @@
         if "length_unit" not in self.units_override:
             self.no_cgs_equiv_length = True
         for unit, cgs in [("length", "cm"), ("time", "s"), ("mass", "g")]:
-            # We set these to cgs for now, but they may have been overriden
+            # We set these to cgs for now, but they may have been overridden
             if getattr(self, unit+'_unit', None) is not None:
                 continue
             mylog.warning("Assuming 1.0 = 1.0 %s", cgs)

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 yt/frontends/athena_pp/data_structures.py
--- a/yt/frontends/athena_pp/data_structures.py
+++ b/yt/frontends/athena_pp/data_structures.py
@@ -272,7 +272,7 @@
             self.no_cgs_equiv_length = True
         for unit, cgs in [("length", "cm"), ("time", "s"), ("mass", "g"),
                           ("temperature", "K")]:
-            # We set these to cgs for now, but they may have been overriden
+            # We set these to cgs for now, but they may have been overridden
             if getattr(self, unit+'_unit', None) is not None:
                 continue
             mylog.warning("Assuming 1.0 = 1.0 %s", cgs)

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -779,7 +779,7 @@
             # We use a default of two, as Nyx doesn't always output this value
             ref_factors = [2] * (self._max_level + 1)
         # We can't vary refinement factors based on dimension, or whatever else
-        # they are vaied on.  In one curious thing, I found that some Castro 3D
+        # they are varied on.  In one curious thing, I found that some Castro 3D
         # data has only two refinement factors, which I don't know how to
         # understand.
         self.ref_factors = ref_factors

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -668,7 +668,7 @@
         to the index to pre-determine the style of data-output.  However,
         it is not strictly necessary.  Optionally you may specify a
         *parameter_override* dictionary that will override anything in the
-        paarmeter file and a *conversion_override* dictionary that consists
+        parameter file and a *conversion_override* dictionary that consists
         of {fieldname : conversion_to_cgs} that will override the #DataCGS.
         """
         self.fluid_types += ("enzo",)

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 yt/frontends/enzo_p/data_structures.py
--- a/yt/frontends/enzo_p/data_structures.py
+++ b/yt/frontends/enzo_p/data_structures.py
@@ -267,7 +267,7 @@
         to the index to pre-determine the style of data-output.  However,
         it is not strictly necessary.  Optionally you may specify a
         *parameter_override* dictionary that will override anything in the
-        paarmeter file and a *conversion_override* dictionary that consists
+        parameter file and a *conversion_override* dictionary that consists
         of {fieldname : conversion_to_cgs} that will override the #DataCGS.
         """
         self.fluid_types += ("enzop",)

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 yt/frontends/exodus_ii/simulation_handling.py
--- a/yt/frontends/exodus_ii/simulation_handling.py
+++ b/yt/frontends/exodus_ii/simulation_handling.py
@@ -21,7 +21,7 @@
     r"""
     Initialize an ExodusII Simulation object.
 
-    Upon creation, the input directoy is searched for valid ExodusIIDatasets.
+    Upon creation, the input directory is searched for valid ExodusIIDatasets.
     The get_time_series can be used to generate a DatasetSeries object.
 
     simulation_directory : str

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -290,7 +290,7 @@
         # First we load all of the parameters
         hns = ["simulation parameters"]
         # note the ordering here is important: runtime parameters should
-        # ovewrite scalars with the same name.
+        # overwrite scalars with the same name.
         for ptype in ['scalars', 'runtime parameters']:
             for vtype in ['integer', 'real', 'logical', 'string']:
                 hns.append("%s %s" % (vtype, ptype))

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 yt/frontends/gadget/data_structures.py
--- a/yt/frontends/gadget/data_structures.py
+++ b/yt/frontends/gadget/data_structures.py
@@ -348,7 +348,7 @@
         except struct.error:
             f.close()
             return False, 1
-        # Use value to check endianess
+        # Use value to check endianness
         if rhead == 256:
             endianswap = '<'
         elif rhead == 65536:

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 yt/frontends/gadget/fields.py
--- a/yt/frontends/gadget/fields.py
+++ b/yt/frontends/gadget/fields.py
@@ -1,5 +1,5 @@
 """
-Gadget-specfic fields
+Gadget-specific fields
 
 
 

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 yt/frontends/gamer/data_structures.py
--- a/yt/frontends/gamer/data_structures.py
+++ b/yt/frontends/gamer/data_structures.py
@@ -141,10 +141,10 @@
 
         # validate the parent-children relationship in the debug mode
         if self.dataset._debug:
-            self._validate_parent_children_relasionship()
+            self._validate_parent_children_relationship()
 
     # for _debug mode only
-    def _validate_parent_children_relasionship(self):
+    def _validate_parent_children_relationship(self):
         mylog.info('Validating the parent-children relationship ...')
 
         father_list = self._handle["Tree/Father"].value

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -1283,9 +1283,9 @@
 
     def _initialize_mesh(self):
         coords = self.stream_handler.fields.pop('coordinates')
-        connec = self.stream_handler.fields.pop('connectivity')
+        connect = self.stream_handler.fields.pop('connectivity')
         self.meshes = [StreamHexahedralMesh(0,
-          self.index_filename, connec, coords, self)]
+          self.index_filename, connect, coords, self)]
 
     def _setup_data_io(self):
         if self.stream_handler.io is not None:
@@ -1683,10 +1683,10 @@
 
     def _initialize_mesh(self):
         coords = ensure_list(self.stream_handler.fields.pop("coordinates"))
-        connec = ensure_list(self.stream_handler.fields.pop("connectivity"))
+        connect = ensure_list(self.stream_handler.fields.pop("connectivity"))
         self.meshes = [StreamUnstructuredMesh(
                        i, self.index_filename, c1, c2, self)
-                       for i, (c1, c2) in enumerate(zip(connec, repeat(coords[0])))]
+                       for i, (c1, c2) in enumerate(zip(connect, repeat(coords[0])))]
         self.mesh_union = MeshUnion("mesh_union", self.meshes)
 
     def _setup_data_io(self):

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 yt/frontends/ytdata/data_structures.py
--- a/yt/frontends/ytdata/data_structures.py
+++ b/yt/frontends/ytdata/data_structures.py
@@ -68,7 +68,7 @@
 from yt.data_objects.data_containers import \
     GenerationInProgress
 
-_grid_data_containers = ["abritrary_grid",
+_grid_data_containers = ["arbitrary_grid",
                          "covering_grid",
                          "smoothed_covering_grid"]
 
@@ -271,7 +271,7 @@
         """
 
         if self._data_obj is None:
-            # Some data containers can't be recontructed in the same way
+            # Some data containers can't be reconstructed in the same way
             # since this is now particle-like data.
             data_type = self.parameters.get("data_type")
             container_type = self.parameters.get("container_type")
@@ -308,7 +308,7 @@
 
     def _restore_light_ray_solution(self):
         """
-        Restore all information asssociate with the light ray solution
+        Restore all information associate with the light ray solution
         to its original form.
         """
         key = "light_ray_solution"

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -48,7 +48,7 @@
 
 def iterable(obj):
     """
-    Grabbed from Python Cookbook / matploblib.cbook.  Returns true/false for
+    Grabbed from Python Cookbook / matplotlib.cbook.  Returns true/false for
     *obj* iterable.
     """
     try: len(obj)
@@ -289,7 +289,7 @@
     """
     Placed inside a function, this will insert an IPython interpreter at that
     current location.  This will enabled detailed inspection of the current
-    exeuction environment, as well as (optional) modification of that environment.
+    execution environment, as well as (optional) modification of that environment.
     *num_up* refers to how many frames of the stack get stripped off, and
     defaults to 1 so that this function itself is stripped off.
     """

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 yt/geometry/tests/test_grid_container.py
--- a/yt/geometry/tests/test_grid_container.py
+++ b/yt/geometry/tests/test_grid_container.py
@@ -100,7 +100,7 @@
 
     assert_equal(point_grid_inds, grid_inds)
 
-    # Test wheter find_points works for lists
+    # Test whether find_points works for lists
     point_grids, point_grid_inds = test_ds.index._find_points(randx.tolist(),
                                                               randy.tolist(),
                                                               randz.tolist())

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -847,7 +847,7 @@
     ... def field_checker(dd, field_name):
     ...     return dd[field_name]
 
-    >>> field_cheker(ds.all_data(), 'density', result_basename='density')
+    >>> field_checker(ds.all_data(), 'density', result_basename='density')
 
     """
     def compute_results(func):
@@ -973,7 +973,7 @@
         Array obtained (possibly with attached units)
     desired : array-like
         Array to compare with (possibly with attached units)
-    rtol : float, oprtional
+    rtol : float, optional
         Relative tolerance, defaults to 1e-7
     atol : float or quantity, optional
         Absolute tolerance. If units are attached, they must be consistent

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 yt/units/tests/test_ytarray.py
--- a/yt/units/tests/test_ytarray.py
+++ b/yt/units/tests/test_ytarray.py
@@ -658,7 +658,7 @@
 
 def test_iteration():
     """
-    Test that iterating over a YTArray returns a sequence of YTQuantity insances
+    Test that iterating over a YTArray returns a sequence of YTQuantity instances
     """
     a = np.arange(3)
     b = YTArray(np.arange(3), 'cm')

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 yt/units/unit_object.py
--- a/yt/units/unit_object.py
+++ b/yt/units/unit_object.py
@@ -122,7 +122,7 @@
             invert_symbols[value].append(key)
 
     # if there are any units with identical latex representations, substitute
-    # units to avoid  uncanceled terms in the final latex expresion.
+    # units to avoid  uncanceled terms in the final latex expression.
     for val in invert_symbols:
         symbols = invert_symbols[val]
         for i in range(1, len(symbols)):

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 yt/units/unit_systems.py
--- a/yt/units/unit_systems.py
+++ b/yt/units/unit_systems.py
@@ -18,7 +18,7 @@
 
 class UnitSystemConstants(object):
     """
-    A class to faciliate conversions of physical constants into a given unit
+    A class to facilitate conversions of physical constants into a given unit
     system specified by *name*.
     """
     def __init__(self, name):
@@ -52,7 +52,7 @@
         The base temperature unit of this unit system. Defaults to "K".
     angle_unit : string, optional
         The base angle unit of this unit system. Defaults to "rad".
-    curent_mks_unit : string, optional
+    current_mks_unit : string, optional
         The base current unit of this unit system. Only used in MKS 
         or MKS-based unit systems.
     registry : :class:`yt.units.unit_registry.UnitRegistry` object

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 yt/units/yt_array.py
--- a/yt/units/yt_array.py
+++ b/yt/units/yt_array.py
@@ -1627,7 +1627,7 @@
     return YTArray(norm, data.units)
 
 def udot(op1, op2):
-    """Matrix or vector dot product that preservs units
+    """Matrix or vector dot product that preserves units
 
     This is a wrapper around np.dot that preserves units.
     """

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -366,7 +366,7 @@
 
         That is they start from the lower left and proceed to upper
         right varying the third index most frequently. Note that the
-        center cell (i,j,k) is ommitted.
+        center cell (i,j,k) is omitted.
 
         """
         ci = np.array(ci)
@@ -422,7 +422,7 @@
 
         That is they start from the lower left and proceed to upper
         right varying the third index most frequently. Note that the
-        center cell (i,j,k) is ommitted.
+        center cell (i,j,k) is omitted.
 
         """
         position = np.array(position)

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 yt/utilities/command_line.py
--- a/yt/utilities/command_line.py
+++ b/yt/utilities/command_line.py
@@ -779,7 +779,7 @@
                   help="Use syntax highlighter for the file in language"),
              dict(short="-L", longname="--languages", action="store_true",
                   default = False, dest="languages",
-                  help="Retrive a list of supported languages"),
+                  help="Retrieve a list of supported languages"),
              dict(short="-e", longname="--encoding", action="store",
                   default = 'utf-8', dest="encoding",
                   help="Specify the encoding of a file (default is "

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 yt/utilities/decompose.py
--- a/yt/utilities/decompose.py
+++ b/yt/utilities/decompose.py
@@ -64,7 +64,7 @@
 
 
 def factorize_number(pieces):
-    """ Return array consiting of prime, its power and number of different
+    """ Return array consisting of prime, its power and number of different
         decompositions in three dimensions for this prime
     """
     factors = [factor for factor in decompose_to_primes(pieces)]

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 yt/utilities/grid_data_format/conversion/conversion_athena.py
--- a/yt/utilities/grid_data_format/conversion/conversion_athena.py
+++ b/yt/utilities/grid_data_format/conversion/conversion_athena.py
@@ -127,7 +127,7 @@
                       (np.prod(grid['dimensions']), grid['ncells']))
                 raise TypeError
 
-            # Append all hierachy info before reading this grid's data
+            # Append all hierarchy info before reading this grid's data
             grid_dims[i]=grid['dimensions']
             grid_left_edges[i]=grid['left_edge']
             grid_dds[i]=grid['dds']

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 yt/utilities/grid_data_format/docs/gdf_specification.txt
--- a/yt/utilities/grid_data_format/docs/gdf_specification.txt
+++ b/yt/utilities/grid_data_format/docs/gdf_specification.txt
@@ -28,7 +28,7 @@
    this format.)
 #. All fluid fields in this version of the format are assumed to have the
    dimensionality of the grid they reside in plus any ghost zones, plus any
-   additionaly dimensionality required by the staggering property.
+   additionally dimensionality required by the staggering property.
 #. Particles may have dataspaces affiliated with them.  (See Enzo's
    OutputParticleTypeGrouping for more information.)  This enables a light
    wrapper around data formats with interspersed particle types.

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 yt/utilities/hierarchy_inspection.py
--- a/yt/utilities/hierarchy_inspection.py
+++ b/yt/utilities/hierarchy_inspection.py
@@ -13,7 +13,7 @@
     Parameters
     ----------
     candidates : Iterable
-        An interable object that is a collection of classes to find the lowest
+        An iterable object that is a collection of classes to find the lowest
         subclass of.
 
     Returns

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 yt/utilities/lib/alt_ray_tracers.pyx
--- a/yt/utilities/lib/alt_ray_tracers.pyx
+++ b/yt/utilities/lib/alt_ray_tracers.pyx
@@ -90,7 +90,7 @@
     s : 1d float ndarray
         ray parametric distance on range [0,len(ray)]
     rztheta : 2d float ndarray
-        ray grid cell intersections in cylidrical coordinates
+        ray grid cell intersections in cylindrical coordinates
     inds : 1d int ndarray
         indexes into the grid cells which the ray crosses in order.
 
@@ -145,7 +145,7 @@
     # compute first cut of indexes and thetas, which 
     # have been filtered by those values for which intersection
     # times are impossible (see above masks). Note that this is
-    # still independnent of z.
+    # still independent of z.
     inds = np.unique(np.concatenate([np.argwhere(tmmleft).flat, 
                                      np.argwhere(tpmleft).flat, 
                                      np.argwhere(tmmright).flat, 

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 yt/utilities/lib/bounding_volume_hierarchy.pyx
--- a/yt/utilities/lib/bounding_volume_hierarchy.pyx
+++ b/yt/utilities/lib/bounding_volume_hierarchy.pyx
@@ -69,7 +69,7 @@
 
     Currently, 2nd-order meshes are only supported for 20-node hexahedral elements.
     There, the primitive type is a bi-quadratic patch instead of a triangle, and
-    each intersection involves computing a Netwon-Raphson solve.
+    each intersection involves computing a Newton-Raphson solve.
 
     See yt/utilities/lib/primitives.pyx for the definitions of both of these primitive
     types.

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 yt/utilities/lib/fortran_reader.pyx
--- a/yt/utilities/lib/fortran_reader.pyx
+++ b/yt/utilities/lib/fortran_reader.pyx
@@ -308,7 +308,7 @@
                     np.ndarray[np.uint8_t, ndim=4] art_child_masks,
                     np.ndarray[np.uint8_t, ndim=3] child_mask):
 
-    #loop over file_locations, for each row exracting the index & LE
+    #loop over file_locations, for each row extracting the index & LE
     #of the oct we will pull pull from art_child_masks
     #then use the art_child_masks info to fill in child_mask
     cdef int i,ioct,x,y,z

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 yt/utilities/lib/pixelization_routines.pyx
--- a/yt/utilities/lib/pixelization_routines.pyx
+++ b/yt/utilities/lib/pixelization_routines.pyx
@@ -794,7 +794,7 @@
     vertices = <np.float64_t *> malloc(ndim * sizeof(np.float64_t) * nvertices)
     field_vals = <np.float64_t *> malloc(sizeof(np.float64_t) * num_field_vals)
 
-    # fill the image bounds and pixel size informaton here
+    # fill the image bounds and pixel size information here
     for i in range(ndim):
         pLE[i] = extents[i][0]
         pRE[i] = extents[i][1]

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 yt/utilities/lib/primitives.pyx
--- a/yt/utilities/lib/primitives.pyx
+++ b/yt/utilities/lib/primitives.pyx
@@ -267,7 +267,7 @@
     cdef cython.floating fv = dot(N2, S) + d2
     cdef cython.floating err = fmax(fabs(fu), fabs(fv))
 
-    # begin Newton interation
+    # begin Newton iteration
     cdef cython.floating tol = 1.0e-5
     cdef int iterations = 0
     cdef int max_iter = 10
@@ -483,7 +483,7 @@
     cdef cython.floating fv = dot(N2, S) + d2
     cdef cython.floating err = fmax(fabs(fu), fabs(fv))
 
-    # begin Newton interation
+    # begin Newton iteration
     cdef cython.floating tol = 1.0e-5
     cdef int iterations = 0
     cdef int max_iter = 10

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 yt/utilities/lodgeit.py
--- a/yt/utilities/lodgeit.py
+++ b/yt/utilities/lodgeit.py
@@ -5,7 +5,7 @@
     ~~~~~~~~
 
     A script that pastes stuff into the enzotools pastebin on
-    paste.enztools.org.
+    paste.enzotools.org.
 
     Modified (very, very slightly) from the original script by the authors
     below.

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 yt/utilities/math_utils.py
--- a/yt/utilities/math_utils.py
+++ b/yt/utilities/math_utils.py
@@ -262,7 +262,7 @@
 
     P : array
         The positions of the data to be modified (i.e. particle or grid cell
-        postions). The array should be Nx3.
+        positions). The array should be Nx3.
 
     V : array
         The velocities of the data to be modified (i.e. particle or grid cell
@@ -376,7 +376,7 @@
 
     P : array
         The positions of the data to be modified (i.e. particle or grid cell
-        postions). The array should be Nx3.
+        positions). The array should be Nx3.
 
     V : array
         The velocities of the data to be modified (i.e. particle or grid cell
@@ -431,7 +431,7 @@
 
     P : array
         The positions of the data to be modified (i.e. particle or grid cell
-        postions). The array should be Nx3.
+        positions). The array should be Nx3.
 
     V : array
         The velocities of the data to be modified (i.e. particle or grid cell
@@ -477,7 +477,7 @@
 
     P : array
         The positions of the data to be modified (i.e. particle or grid cell
-        postions). The array should be Nx3.
+        positions). The array should be Nx3.
 
     V : array
         The velocities of the data to be modified (i.e. particle or grid cell
@@ -531,7 +531,7 @@
 
     P : array
         The positions of the data to be modified (i.e. particle or grid cell
-        postions). The array should be Nx3.
+        positions). The array should be Nx3.
 
     V : array
         The velocities of the data to be modified (i.e. particle or grid cell
@@ -809,7 +809,7 @@
     -----
     The usage of 4D homogeneous coordinates is for OpenGL and GPU
     hardware that automatically performs the divide by w operation.
-    See the following for more details about the OpenGL perpective matrices.
+    See the following for more details about the OpenGL perspective matrices.
 
     http://www.tomdalling.com/blog/modern-opengl/explaining-homogenous-coordinates-and-projective-geometry/
     http://www.songho.ca/opengl/gl_projectionmatrix.html
@@ -880,7 +880,7 @@
     -----
     The usage of 4D homogeneous coordinates is for OpenGL and GPU
     hardware that automatically performs the divide by w operation.
-    See the following for more details about the OpenGL perpective matrices.
+    See the following for more details about the OpenGL perspective matrices.
 
     http://www.scratchapixel.com/lessons/3d-basic-rendering/perspective-and-orthographic-projection-matrix/orthographic-projection-matrix
     http://www.tomdalling.com/blog/modern-opengl/explaining-homogenous-coordinates-and-projective-geometry/

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -203,7 +203,7 @@
 class ParallelObjectIterator(ObjectIterator):
     """
     This takes an object, *pobj*, that implements ParallelAnalysisInterface,
-    and then does its thing, calling initliaze and finalize on the object.
+    and then does its thing, calling initialize and finalize on the object.
     """
     def __init__(self, pobj, just_list = False, attr='_grids',
                  round_robin=False):
@@ -1059,7 +1059,7 @@
         tmp_send = send.view(self.__tocast)
         recv = np.empty(total_size, dtype=send.dtype)
         if isinstance(send, YTArray):
-            # We assume send.units is consitent with the units
+            # We assume send.units is consistent with the units
             # on the receiving end.
             if isinstance(send, ImageArray):
                 recv = ImageArray(recv, input_units=send.units)

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 yt/utilities/poster/encode.py
--- a/yt/utilities/poster/encode.py
+++ b/yt/utilities/poster/encode.py
@@ -1,6 +1,6 @@
 """multipart/form-data encoding module
 
-This module provides functions that faciliate encoding name/value pairs
+This module provides functions that facilitate encoding name/value pairs
 as multipart/form-data suitable for a HTTP POST or PUT request.
 
 multipart/form-data is the standard way to upload files over HTTP"""
@@ -385,7 +385,7 @@
 
     Returns a tuple of `datagen`, `headers`, where `datagen` is a
     generator that will yield blocks of data that make up the encoded
-    parameters, and `headers` is a dictionary with the assoicated
+    parameters, and `headers` is a dictionary with the associated
     Content-Type and Content-Length headers.
 
     Examples:

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 yt/utilities/poster/streaminghttp.py
--- a/yt/utilities/poster/streaminghttp.py
+++ b/yt/utilities/poster/streaminghttp.py
@@ -10,7 +10,7 @@
 
 **N.B.** You must specify a Content-Length header if using an iterable object
 since there is no way to determine in advance the total size that will be
-yielded, and there is no way to reset an interator.
+yielded, and there is no way to reset an iterator.
 
 Example usage:
 
@@ -159,7 +159,7 @@
 
     def http_request(self, req):
         """Handle a HTTP request.  Make sure that Content-Length is specified
-        if we're using an interable value"""
+        if we're using an iterable value"""
         # Make sure that if we're using an iterable object as the request
         # body, that we've also specified Content-Length
         if request_has_data(req):

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 yt/utilities/tests/test_coordinate_conversions.py
--- a/yt/utilities/tests/test_coordinate_conversions.py
+++ b/yt/utilities/tests/test_coordinate_conversions.py
@@ -51,7 +51,7 @@
     assert_array_almost_equal(calc_theta, real_theta)
     assert_array_almost_equal(calc_phi, real_phi)
 
-def test_cylindrical_coordiante_conversion():
+def test_cylindrical_coordinate_conversion():
     normal = [0, 0, 1]
     real_r =     [ 0.47021498,  0.75970506,  0.94676179,  0.96327853,  
                    0.79516968,  0.96904193,  1.00437346,  1.3344104 ]    

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 yt/utilities/tests/test_periodicity.py
--- a/yt/utilities/tests/test_periodicity.py
+++ b/yt/utilities/tests/test_periodicity.py
@@ -31,7 +31,7 @@
     dist = euclidean_dist(a,b)
     assert_almost_equal(dist, 1.3856406460551021)
 
-    # Now test the more complicated cases where we're calculaing radii based 
+    # Now test the more complicated cases where we're calculating radii based 
     # on data objects
     ds = fake_random_ds(64)
 

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 yt/visualization/base_plot_types.py
--- a/yt/visualization/base_plot_types.py
+++ b/yt/visualization/base_plot_types.py
@@ -422,7 +422,7 @@
     Notes
     -----
     This is a simple implementation for a common use case.  Viewing the source
-    can be instructure, and is encouraged to see how to generate more
+    can be instructive, and is encouraged to see how to generate more
     complicated or more specific sets of multiplots for your own purposes.
     """
     import matplotlib.figure

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 yt/visualization/eps_writer.py
--- a/yt/visualization/eps_writer.py
+++ b/yt/visualization/eps_writer.py
@@ -301,7 +301,7 @@
 
         Parameters
         ----------
-        plot : `yt.visalization.plot_window.PlotWindow`
+        plot : `yt.visualization.plot_window.PlotWindow`
             yt plot on which the axes are based.
         units : string
             Unit description that overrides yt's unit description.  Only
@@ -490,7 +490,7 @@
 
         Parameters
         ----------
-        plot : `yt.visalization.plot_window.PlotWindow`
+        plot : `yt.visualization.plot_window.PlotWindow`
             yt plot that provides the image
         pos : tuple of floats
             Position of the origin of the image in centimeters.
@@ -627,7 +627,7 @@
 
         # Scale the colorbar
         shift = (0.5*(1.0-shrink[0])*size[0], 0.5*(1.0-shrink[1])*size[1])
-        # To facilitate strething rather than shrinking
+        # To facilitate stretching rather than shrinking
         # If stretched in both directions (makes no sense?) then y dominates. 
         if(shrink[0] > 1.0):
             shift = (0.05*self.figsize[0], 0.5*(1.0-shrink[1])*size[1])
@@ -1314,12 +1314,12 @@
 def single_plot(plot, field=None, figsize=(12,12), cb_orient="right", 
                 bare_axes=False, savefig=None, colorbar=True, 
                 file_format='eps', **kwargs):
-    r"""Wrapper for DualEPS routines to create a figure directy from a yt
+    r"""Wrapper for DualEPS routines to create a figure directly from a yt
     plot.  Calls insert_image_yt, axis_box_yt, and colorbar_yt.
 
     Parameters
     ----------
-    plot : `yt.visalization.plot_window.PlotWindow`
+    plot : `yt.visualization.plot_window.PlotWindow`
         yt plot that provides the image and metadata
     figsize : tuple of floats
         Size of the figure in centimeters.

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -204,7 +204,7 @@
 
     def convert_distance_x(self, distance):
         r"""This function converts code-space distance into pixel-space
-        distance in the x-coordiante.
+        distance in the x-coordinate.
 
         Parameters
         ----------
@@ -222,7 +222,7 @@
 
     def convert_distance_y(self, distance):
         r"""This function converts code-space distance into pixel-space
-        distance in the y-coordiante.
+        distance in the y-coordinate.
 
         Parameters
         ----------
@@ -252,7 +252,7 @@
         equivalency : string, optional
            If set, the equivalency to use to convert the current units to
            the new requested unit. If None, the unit conversion will be done
-           without an equivelancy
+           without an equivalency
 
         equivalency_kwargs : string, optional
            Keyword arguments to be passed to the equivalency. Only used if
@@ -268,7 +268,7 @@
                 unit, equivalency, **equivalency_kwargs)
             # equiv_array isn't necessarily an ImageArray. This is an issue
             # inherent to the way the unit system handles YTArray
-            # sublcasses and I don't see how to modify the unit system to
+            # subclasses and I don't see how to modify the unit system to
             # fix this. Instead, we paper over this issue and hard code
             # that equiv_array is an ImageArray
             self[field] = ImageArray(

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 yt/visualization/particle_plots.py
--- a/yt/visualization/particle_plots.py
+++ b/yt/visualization/particle_plots.py
@@ -152,7 +152,7 @@
          space, plot 'window' space or 'native' simulation coordinate system
          is given. For example, both 'upper-right-domain' and ['upper',
          'right', 'domain'] both place the origin in the upper right hand
-         corner of domain space. If x or y are not given, a value is inffered.
+         corner of domain space. If x or y are not given, a value is inferred.
          For instance, 'left-domain' corresponds to the lower-left hand corner
          of the simulation domain, 'center-domain' corresponds to the center
          of the simulation domain, or 'center-window' for the center of the
@@ -445,7 +445,7 @@
          space, plot 'window' space or 'native' simulation coordinate system
          is given. For example, both 'upper-right-domain' and ['upper',
          'right', 'domain'] both place the origin in the upper right hand
-         corner of domain space. If x or y are not given, a value is inffered.
+         corner of domain space. If x or y are not given, a value is inferred.
          For instance, 'left-domain' corresponds to the lower-left hand corner
          of the simulation domain, 'center-domain' corresponds to the center
          of the simulation domain, or 'center-window' for the center of the

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 yt/visualization/plot_container.py
--- a/yt/visualization/plot_container.py
+++ b/yt/visualization/plot_container.py
@@ -302,7 +302,7 @@
         return self
 
     def _setup_plots(self):
-        # Left blank to be overriden in subclasses
+        # Left blank to be overridden in subclasses
         pass
 
     def _initialize_dataset(self, ts):

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -632,7 +632,7 @@
             if self.edgecolors is not None:
                 edgecolors = colorConverter.to_rgba(
                     self.edgecolors, alpha=self.alpha)
-            else:  # use colormap if not explicity overridden by edgecolors
+            else:  # use colormap if not explicitly overridden by edgecolors
                 if self.cmap is not None:
                     color_bounds = [0,plot.data.ds.index.max_level]
                     edgecolors = apply_colormap(
@@ -1698,7 +1698,7 @@
     Intended for representing a slice of a triangular faceted
     geometry in a slice plot.
 
-    Uses a set of *triangle_vertices* to find all trangles the plane of a
+    Uses a set of *triangle_vertices* to find all triangles the plane of a
     SlicePlot intersects with. The lines between the intersection points
     of the triangles are then added to the plot to create an outline
     of the geometry represented by the triangles.
@@ -2271,7 +2271,7 @@
     """
     Add the line integral convolution to the plot for vector fields
     visualization. Two component of vector fields needed to be provided
-    (i.e., velocity_x and velocity_y, magentic_field_x and magnetic_field_y).
+    (i.e., velocity_x and velocity_y, magnetic_field_x and magnetic_field_y).
 
     Parameters
     ----------

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -141,7 +141,7 @@
 
 class PlotWindow(ImagePlotContainer):
     r"""
-    A ploting mechanism based around the concept of a window into a
+    A plotting mechanism based around the concept of a window into a
     data source. It can have arbitrary fields, each of which will be
     centered on the same viewpoint, but will have individual zlimits.
 
@@ -376,7 +376,7 @@
         equivalency : string, optional
            If set, the equivalency to use to convert the current units to
            the new requested unit. If None, the unit conversion will be done
-           without an equivelancy
+           without an equivalency
 
         equivalency_kwargs : string, optional
            Keyword arguments to be passed to the equivalency. Only used if
@@ -733,7 +733,7 @@
         y_in_bounds = yc >= yllim and yc <= yrlim
 
         if not x_in_bounds and not y_in_bounds:
-            msg = ('orgin inputs not in bounds of specified coordinate sytem' +
+            msg = ('origin inputs not in bounds of specified coordinate sytem' +
                    'domain.')
             msg = msg.format(self.origin)
             raise RuntimeError(msg)
@@ -1800,7 +1800,7 @@
         This specifies the normal vector to the slice.  If given as an integer
         or a coordinate string (0=x, 1=y, 2=z), this function will return an
         :class:`AxisAlignedSlicePlot` object.  If given as a sequence of floats,
-        this is interpretted as an off-axis vector and an
+        this is interpreted as an off-axis vector and an
         :class:`OffAxisSlicePlot` object is returned.
     fields : string
          The name of the field(s) to be plotted.

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 yt/visualization/tick_locators.py
--- a/yt/visualization/tick_locators.py
+++ b/yt/visualization/tick_locators.py
@@ -30,7 +30,7 @@
 
     def base(self,base):
         """
-        set the base of the log scaling (major tick every base**i, i interger)
+        set the base of the log scaling (major tick every base**i, i integer)
         """
         self._base=base+0.0
 

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 yt/visualization/volume_rendering/camera_path.py
--- a/yt/visualization/volume_rendering/camera_path.py
+++ b/yt/visualization/volume_rendering/camera_path.py
@@ -44,7 +44,7 @@
         niter : integer, optional
             Maximum number of iterations to find solution. Default: 50000
         init_temp : float, optional
-            Intital temperature for simulated annealing when finding a
+            Initial temperature for simulated annealing when finding a
             solution.  Lower initial temperatures result in an initial solution
             in first several iterations that changes more rapidly. Default: 10.0
         alpha : float, optional
@@ -109,7 +109,7 @@
         niter : integer, optional
             Maximum number of iterations to find solution. Default: 50000
         init_temp : float, optional
-            Intital temperature for simulated annealing when finding a
+            Initial temperature for simulated annealing when finding a
             solution.  Lower initial temperatures result in an initial solution
             in first several iterations that changes more rapidly. Default: 10.0
         alpha : float, optional
@@ -138,7 +138,7 @@
         
     def rand_seq(self):
         r"""
-        Generates values in random order, equivlanet to using shuffle
+        Generates values in random order, equivalent to using shuffle
         in random without generation all values at once.
         """
         values = range(self.nframes)

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 yt/visualization/volume_rendering/glfw_inputhook.py
--- a/yt/visualization/volume_rendering/glfw_inputhook.py
+++ b/yt/visualization/volume_rendering/glfw_inputhook.py
@@ -1,6 +1,6 @@
 # encoding: utf-8
 """
-Enable pyglet to be used interacive by setting PyOS_InputHook.
+Enable pyglet to be used interactive by setting PyOS_InputHook.
 
 Authors
 -------

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 yt/visualization/volume_rendering/interactive_vr.py
--- a/yt/visualization/volume_rendering/interactive_vr.py
+++ b/yt/visualization/volume_rendering/interactive_vr.py
@@ -183,7 +183,7 @@
 
     This class implements a basic "Trackball" or "Arcball" camera control system
     that allows for unconstrained 3D rotations without suffering from Gimbal lock.
-    Following Ken Shoemake's orginal C implementation (Graphics Gems IV, III.1)
+    Following Ken Shoemake's original C implementation (Graphics Gems IV, III.1)
     we project mouse movements onto the unit sphere and use quaternions to
     represent the corresponding rotation.
 
@@ -346,7 +346,7 @@
         self.set_shader("max_intensity.f")
         self.data_source = None
 
-        self.blocks = {} # A collection of PartionedGrid objects
+        self.blocks = {} # A collection of PartitionedGrid objects
         self.block_order = []
 
         self.gl_texture_names = []
@@ -785,7 +785,7 @@
 
         # --- end texture init
 
-        # Set "fb_texture" as our colour attachement #0
+        # Set "fb_texture" as our colour attachment #0
         GL.glFramebufferTexture2D(
             GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT0, GL.GL_TEXTURE_2D,
             self.fb_texture,

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 yt/visualization/volume_rendering/lens.py
--- a/yt/visualization/volume_rendering/lens.py
+++ b/yt/visualization/volume_rendering/lens.py
@@ -291,7 +291,7 @@
     """A lens that includes two sources for perspective rays, for 3D viewing
 
     The degree of differences between the left and right images is controlled by 
-    the disparity (the maximum distance between cooresponding points in the left
+    the disparity (the maximum distance between corresponding points in the left
     and right images). By default, the disparity is set to be 3 pixels.
     """
 

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 yt/visualization/volume_rendering/old_camera.py
--- a/yt/visualization/volume_rendering/old_camera.py
+++ b/yt/visualization/volume_rendering/old_camera.py
@@ -1307,7 +1307,7 @@
             else:
                 # The corner is on the backwards, then put it outside of the
                 # image It can not be simply removed because it may connect to
-                # other corner within the image, which produces visible domian
+                # other corner within the image, which produces visible domain
                 # boundary line
                 sight_length = np.sqrt(self.width[0]**2+self.width[1]**2) / \
                                np.sqrt(1 - sight_angle_cos**2)

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 yt/visualization/volume_rendering/scene.py
--- a/yt/visualization/volume_rendering/scene.py
+++ b/yt/visualization/volume_rendering/scene.py
@@ -332,7 +332,7 @@
             out = np.clip(out[:, :, :3] / max_val, 0.0, 1.0) * 255
             out = np.concatenate(
                 [out.astype('uint8'), alpha[..., None]], axis=-1)
-            # not sure why we need rot90, but this makes the orentation
+            # not sure why we need rot90, but this makes the orientation
             # match the png writer
             ax.imshow(np.rot90(out), origin='lower')
             canvas.print_figure(fname, dpi=100)

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 yt/visualization/volume_rendering/shader_objects.py
--- a/yt/visualization/volume_rendering/shader_objects.py
+++ b/yt/visualization/volume_rendering/shader_objects.py
@@ -242,7 +242,7 @@
 
 class PassthroughFragmentShader(FragmentShader):
     '''A first pass fragment shader that performs no operation. Used for debug
-    puproses. It's distinct from NoOpFragmentShader, because of the number of
+    purposes. It's distinct from NoOpFragmentShader, because of the number of
     uniforms'''
     _source = "passthrough.fragmentshader"
     _shader_name = "passthrough.f"
@@ -261,7 +261,7 @@
     _shader_name = "transfer_function.f"
 
 class DefaultVertexShader(VertexShader):
-    '''A first pass vertex shader that tranlates the location of vertices from
+    '''A first pass vertex shader that translates the location of vertices from
     the world coordinates to the viewing plane coordinates'''
     _source = "default.vertexshader"
     _shader_name = "default.v"

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 yt/visualization/volume_rendering/transfer_function_helper.py
--- a/yt/visualization/volume_rendering/transfer_function_helper.py
+++ b/yt/visualization/volume_rendering/transfer_function_helper.py
@@ -121,7 +121,7 @@
         """
         if self.bounds is None:
             mylog.info('Calculating data bounds. This may take a while.' +
-                       '  Set the TranferFunctionHelper.bounds to avoid this.')
+                       '  Set the TransferFunctionHelper.bounds to avoid this.')
             self.set_bounds()
 
         if self.log:
@@ -138,7 +138,7 @@
         """Setup a default colormap
 
         Creates a ColorTransferFunction including 10 gaussian layers whose
-        colors smaple the 'spectral' colormap. Also attempts to scale the
+        colors sample the 'spectral' colormap. Also attempts to scale the
         transfer function to produce a natural contrast ratio.
 
         """

diff -r f292c74a3a4f6b535647dd138465dc2561b68850 -r 0b314fc49d8ec31fcf0495cff2a5656ff2b76c93 yt/visualization/volume_rendering/transfer_functions.py
--- a/yt/visualization/volume_rendering/transfer_functions.py
+++ b/yt/visualization/volume_rendering/transfer_functions.py
@@ -873,7 +873,7 @@
     defines the somewhat arbitrary normalization to the scattering
     approximation: because everything is done largely unit-free, and is
     really not terribly accurate anyway, feel free to adjust this to change
-    the relative amount of reddenning.  Maybe in some future version this
+    the relative amount of reddening.  Maybe in some future version this
     will be unitful.
     """
     def __init__(self, T_bounds, rho_bounds, nbins=256,

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/5b1e0e11e01b/
Changeset:   5b1e0e11e01b
User:        ngoldbaum
Date:        2017-10-23 02:16:47+00:00
Summary:     Merge pull request #1575 from jsoref/spelling

Spelling
Affected #:  114 files

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 CONTRIBUTING.rst
--- a/CONTRIBUTING.rst
+++ b/CONTRIBUTING.rst
@@ -758,7 +758,7 @@
    ``is`` or ``is not``.
  * If you are comparing with a numpy boolean array, just refer to the array.
    Ex: do ``np.all(array)`` instead of ``np.all(array == True)``.
- * Never comapre with None using ``==`` or ``!=``, use ``is None`` or
+ * Never compare with None using ``==`` or ``!=``, use ``is None`` or
    ``is not None``.
  * Use ``statement is not True`` instead of ``not statement is True``
  * Only one statement per line, do not use semicolons to put two or more

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 benchmarks/benchmarks/small_enzo.py
--- a/benchmarks/benchmarks/small_enzo.py
+++ b/benchmarks/benchmarks/small_enzo.py
@@ -40,7 +40,7 @@
         dd.quantities.extrema("particle_velocity_magnitude")
         dd.quantities.extrema(["particle_velocity_%s" % ax for ax in 'xyz'])
 
-    def time_gas_quantites(self):
+    def time_gas_quantities(self):
         dd = self.ds.all_data()
         dd.quantities.extrema("density")
         dd.quantities.extrema(["velocity_x", "velocity_y", "velocity_z"])

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 benchmarks/benchmarks/small_flash.py
--- a/benchmarks/benchmarks/small_flash.py
+++ b/benchmarks/benchmarks/small_flash.py
@@ -25,7 +25,7 @@
                             self.ds.domain_width[0] * 0.25)
         dd["velocity_divergence"]
 
-    def time_gas_quantites(self):
+    def time_gas_quantities(self):
         dd = self.ds.all_data()
         dd.quantities.extrema("density")
         dd.quantities.extrema(["velocity_x", "velocity_y", "velocity_z"])

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 benchmarks/benchmarks/small_ramses.py
--- a/benchmarks/benchmarks/small_ramses.py
+++ b/benchmarks/benchmarks/small_ramses.py
@@ -36,7 +36,7 @@
         dd.quantities.extrema("particle_velocity_magnitude")
         dd.quantities.extrema(["particle_velocity_%s" % ax for ax in 'xyz'])
 
-    def time_gas_quantites(self):
+    def time_gas_quantities(self):
         dd = self.ds.all_data()
         dd.quantities.extrema("density")
         dd.quantities.extrema(["velocity_x", "velocity_y", "velocity_z"])

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 doc/activate
--- a/doc/activate
+++ b/doc/activate
@@ -54,7 +54,7 @@
     fi
 }
 
-# unset irrelavent variables
+# unset irrelevant variables
 deactivate nondestructive
 
 VIRTUAL_ENV="__YT_DIR__"

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 doc/activate.csh
--- a/doc/activate.csh
+++ b/doc/activate.csh
@@ -4,7 +4,7 @@
 
 alias deactivate 'test $?_OLD_VIRTUAL_PATH != 0 && setenv PATH "$_OLD_VIRTUAL_PATH" && unset _OLD_VIRTUAL_PATH; test $?_OLD_VIRTUAL_YT_DEST != 0 && setenv YT_DEST "$_OLD_VIRTUAL_YT_DEST" && unset _OLD_VIRTUAL_YT_DEST; test $?_OLD_VIRTUAL_PYTHONPATH != 0 && setenv PYTHONPATH "$_OLD_VIRTUAL_PYTHONPATH" && unset _OLD_VIRTUAL_PYTHONPATH; test $?_OLD_VIRTUAL_LD_LIBRARY_PATH != 0 && setenv LD_LIBRARY_PATH "$_OLD_VIRTUAL_LD_LIBRARY_PATH" && unset _OLD_VIRTUAL_LD_LIBRARY_PATH; rehash; test $?_OLD_VIRTUAL_PROMPT != 0 && set prompt="$_OLD_VIRTUAL_PROMPT" && unset _OLD_VIRTUAL_PROMPT; unsetenv VIRTUAL_ENV; test "\!:*" != "nondestructive" && unalias deactivate'
 
-# Unset irrelavent variables.
+# Unset irrelevant variables.
 deactivate nondestructive
 
 setenv VIRTUAL_ENV "__YT_DIR__"

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 doc/cheatsheet.tex
--- a/doc/cheatsheet.tex
+++ b/doc/cheatsheet.tex
@@ -251,7 +251,7 @@
 \texttt{slc.save(\textit{file\_prefix})} \textemdash\ Save the slice to a png with name prefix \textit{file\_prefix}.
 \texttt{.save()} works similarly for the commands below.\\
 
-\texttt{prj = yt.ProjectionPlot(ds, \textit{axis}, \textit{field}, \textit{addit. params})} \textemdash\ Make a projection. \\
+\texttt{prj = yt.ProjectionPlot(ds, \textit{axis}, \textit{field}, \textit{additional params})} \textemdash\ Make a projection. \\
 \texttt{prj = yt.OffAxisProjectionPlot(ds, \textit{normal}, \textit{fields}, \textit{center=}, \textit{width=}, \textit{depth=},\textit{north\_vector=},\textit{weight\_field=})} \textemdash Make an off axis projection. Note this takes an array of fields. \\
 
 \subsection{Plot Annotations}

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 doc/helper_scripts/show_fields.py
--- a/doc/helper_scripts/show_fields.py
+++ b/doc/helper_scripts/show_fields.py
@@ -205,7 +205,7 @@
             else:
                 field_info_names.append("BoxlibFieldInfo")
     elif frontend == "chombo":
-        # remove low dimensional field info containters for ChomboPIC
+        # remove low dimensional field info containers for ChomboPIC
         field_info_names = [f for f in field_info_names if '1D' not in f
                             and '2D' not in f]
 

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 doc/source/analyzing/analysis_modules/halo_catalogs.rst
--- a/doc/source/analyzing/analysis_modules/halo_catalogs.rst
+++ b/doc/source/analyzing/analysis_modules/halo_catalogs.rst
@@ -50,7 +50,7 @@
 The ``finder_method`` options should be given as "fof", "hop", or
 "rockstar".  Each of these methods has their own set of keyword
 arguments to control functionality.  These can specified in the form
-of a dictinoary using the ``finder_kwargs`` keyword.
+of a dictionary using the ``finder_kwargs`` keyword.
 
 .. code-block:: python
 

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 doc/source/analyzing/units/1)_Symbolic_Units.ipynb
--- a/doc/source/analyzing/units/1)_Symbolic_Units.ipynb
+++ b/doc/source/analyzing/units/1)_Symbolic_Units.ipynb
@@ -119,7 +119,7 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "Most people will interact with the new unit system using `YTArray` and `YTQuantity`.  These are both subclasses of numpy's fast array type, `ndarray`, and can be used interchangeably with other NumPy arrays. These new classes make use of the unit system to append unit metadata to the underlying `ndarray`.  `YTArray` is intended to store array data, while `YTQuantitity` is intended to store scalars in a particular unit system.\n",
+    "Most people will interact with the new unit system using `YTArray` and `YTQuantity`.  These are both subclasses of numpy's fast array type, `ndarray`, and can be used interchangeably with other NumPy arrays. These new classes make use of the unit system to append unit metadata to the underlying `ndarray`.  `YTArray` is intended to store array data, while `YTQuantity` is intended to store scalars in a particular unit system.\n",
     "\n",
     "There are two ways to create arrays and quantities. The first is to explicitly create it by calling the class constructor and supplying a unit string:"
    ]

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
--- a/doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
+++ b/doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
@@ -361,7 +361,7 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "When working with views, rememeber that you are touching the raw array data and no longer have any of the unit checking provided by the unit system.  This can be useful where it might be more straightforward to treat the array as if it didn't have units but without copying the data."
+    "When working with views, remember that you are touching the raw array data and no longer have any of the unit checking provided by the unit system.  This can be useful where it might be more straightforward to treat the array as if it didn't have units but without copying the data."
    ]
   },
   {

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 doc/source/cookbook/Halo_Analysis.ipynb
--- a/doc/source/cookbook/Halo_Analysis.ipynb
+++ b/doc/source/cookbook/Halo_Analysis.ipynb
@@ -76,7 +76,7 @@
    },
    "outputs": [],
    "source": [
-    "# Instantiate a catalog using those two paramter files\n",
+    "# Instantiate a catalog using those two parameter files\n",
     "hc = HaloCatalog(data_ds=data_ds, halos_ds=halos_ds, \n",
     "                 output_dir=os.path.join(tmpdir, 'halo_catalog'))"
    ]
@@ -137,7 +137,7 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "Next we find the radial profile of the gas overdensity along the sphere object in order to find the virial radius. `radius` is the axis along which we make bins for the radial profiles. `[(\"gas\",\"overdensity\")]` is the quantity that we are profiling. This is a list so we can profile as many quantities as we want. The `weight_field` indicates how the cells should be weighted, but note that this is not a list, so all quantities will be weighted in the same way. The `accumulation` keyword indicates if the profile should be cummulative; this is useful for calculating profiles such as enclosed mass. The `storage` keyword indicates the name of the attribute of a halo where these profiles will be stored. Setting the storage keyword to \"virial_quantities_profiles\" means that the profiles will be stored in a dictionary that can be accessed by `halo.virial_quantities_profiles`."
+    "Next we find the radial profile of the gas overdensity along the sphere object in order to find the virial radius. `radius` is the axis along which we make bins for the radial profiles. `[(\"gas\",\"overdensity\")]` is the quantity that we are profiling. This is a list so we can profile as many quantities as we want. The `weight_field` indicates how the cells should be weighted, but note that this is not a list, so all quantities will be weighted in the same way. The `accumulation` keyword indicates if the profile should be cumulative; this is useful for calculating profiles such as enclosed mass. The `storage` keyword indicates the name of the attribute of a halo where these profiles will be stored. Setting the storage keyword to \"virial_quantities_profiles\" means that the profiles will be stored in a dictionary that can be accessed by `halo.virial_quantities_profiles`."
    ]
   },
   {

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 doc/source/cookbook/amrkdtree_downsampling.py
--- a/doc/source/cookbook/amrkdtree_downsampling.py
+++ b/doc/source/cookbook/amrkdtree_downsampling.py
@@ -42,7 +42,7 @@
 sc.render()
 sc.save("v1.png", sigma_clip=6.0)
 
-# This operation was substantiall faster.  Now lets modify the low resolution
+# This operation was substantially faster.  Now lets modify the low resolution
 # rendering until we find something we like.
 
 tf = render_source.transfer_function

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 doc/source/cookbook/custom_colorbar_tickmarks.ipynb
--- a/doc/source/cookbook/custom_colorbar_tickmarks.ipynb
+++ b/doc/source/cookbook/custom_colorbar_tickmarks.ipynb
@@ -46,7 +46,7 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "The plot has a few attributes that point to underlying `matplotlib` plot primites.  For example, the `colorbar` object corresponds to the `cb` attribute of the plot."
+    "The plot has a few attributes that point to underlying `matplotlib` plot primitives.  For example, the `colorbar` object corresponds to the `cb` attribute of the plot."
    ]
   },
   {

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 doc/source/cookbook/fits_radio_cubes.ipynb
--- a/doc/source/cookbook/fits_radio_cubes.ipynb
+++ b/doc/source/cookbook/fits_radio_cubes.ipynb
@@ -16,7 +16,7 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "This notebook demonstrates some of the capabilties of yt on some FITS \"position-position-spectrum\" cubes of radio data.\n",
+    "This notebook demonstrates some of the capabilities of yt on some FITS \"position-position-spectrum\" cubes of radio data.\n",
     "\n",
     "Note that it depends on some external dependencies, including `astropy` and `pyregion`."
    ]

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 doc/source/cookbook/fits_xray_images.ipynb
--- a/doc/source/cookbook/fits_xray_images.ipynb
+++ b/doc/source/cookbook/fits_xray_images.ipynb
@@ -160,7 +160,7 @@
    },
    "outputs": [],
    "source": [
-    "v, c = ds.find_max(\"flux\") # Find the maxmimum flux and its center\n",
+    "v, c = ds.find_max(\"flux\") # Find the maximum flux and its center\n",
     "my_sphere = ds.sphere(c, (100.,\"code_length\")) # Radius of 150 pixels\n",
     "my_sphere.set_field_parameter(\"exposure_time\", exposure_time)"
    ]

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 doc/source/cookbook/halo_profiler.py
--- a/doc/source/cookbook/halo_profiler.py
+++ b/doc/source/cookbook/halo_profiler.py
@@ -6,7 +6,7 @@
 data_ds = yt.load('Enzo_64/RD0006/RedshiftOutput0006')
 halos_ds = yt.load('rockstar_halos/halos_0.0.bin')
 
-# Instantiate a catalog using those two paramter files
+# Instantiate a catalog using those two parameter files
 hc = HaloCatalog(data_ds=data_ds, halos_ds=halos_ds)
 
 # Filter out less massive halos

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 doc/source/cookbook/multiplot_2x2_coordaxes_slice.py
--- a/doc/source/cookbook/multiplot_2x2_coordaxes_slice.py
+++ b/doc/source/cookbook/multiplot_2x2_coordaxes_slice.py
@@ -38,7 +38,7 @@
     # Since there are only two colorbar axes, we need to make sure we don't try
     # to set the temperature colorbar to cbar_axes[4], which would if we used i
     # to index cbar_axes, yielding a plot without a temperature colorbar.
-    # This unecessarily redraws the Density colorbar three times, but that has
+    # This unnecessarily redraws the Density colorbar three times, but that has
     # no effect on the final plot.
     if field == 'density':
         plot.cax = grid.cbar_axes[0]

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 doc/source/cookbook/offaxis_projection.py
--- a/doc/source/cookbook/offaxis_projection.py
+++ b/doc/source/cookbook/offaxis_projection.py
@@ -25,7 +25,7 @@
 
 # Create the off axis projection.
 # Setting no_ghost to False speeds up the process, but makes a
-# slighly lower quality image.
+# slightly lower quality image.
 image = yt.off_axis_projection(ds, c, L, W, Npixels, "density", no_ghost=False)
 
 # Write out the final image and give it a name

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 doc/source/cookbook/opaque_rendering.py
--- a/doc/source/cookbook/opaque_rendering.py
+++ b/doc/source/cookbook/opaque_rendering.py
@@ -27,7 +27,7 @@
 sc.save("v2.png", sigma_clip=6.0)
 
 # Now let's set the grey_opacity to True.  This should make the inner portions
-# start to be obcured
+# start to be obscured
 
 tf.grey_opacity = True
 sc.render()

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 doc/source/cookbook/opengl_vr.py
--- a/doc/source/cookbook/opengl_vr.py
+++ b/doc/source/cookbook/opengl_vr.py
@@ -14,7 +14,7 @@
 dd = ds.all_data()
 collection.add_data(dd, "density")
 
-# Initiliaze basic Scene and pass the data
+# Initialize basic Scene and pass the data
 scene = SceneGraph()
 scene.add_collection(collection)
 

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 doc/source/cookbook/tipsy_and_yt.ipynb
--- a/doc/source/cookbook/tipsy_and_yt.ipynb
+++ b/doc/source/cookbook/tipsy_and_yt.ipynb
@@ -29,7 +29,7 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "We will be looking at a fairly low resolution dataset.  In the next cell, the `ds` object has an atribute called `n_ref` that tells the oct-tree how many particles to refine on.  The default is 64, but we'll get prettier plots (at the expense of a deeper tree) with 8.  Just passing the argument `n_ref=8` to load does this for us."
+    "We will be looking at a fairly low resolution dataset.  In the next cell, the `ds` object has an attribute called `n_ref` that tells the oct-tree how many particles to refine on.  The default is 64, but we'll get prettier plots (at the expense of a deeper tree) with 8.  Just passing the argument `n_ref=8` to load does this for us."
    ]
   },
   {

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 doc/source/cookbook/yt_gadget_analysis.ipynb
--- a/doc/source/cookbook/yt_gadget_analysis.ipynb
+++ b/doc/source/cookbook/yt_gadget_analysis.ipynb
@@ -209,7 +209,7 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "Using this new data object, let's confirm that we're only looking at a subset of the domain by first calculating thte total mass in gas and particles contained in the subvolume:"
+    "Using this new data object, let's confirm that we're only looking at a subset of the domain by first calculating the total mass in gas and particles contained in the subvolume:"
    ]
   },
   {

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 doc/source/faq/index.rst
--- a/doc/source/faq/index.rst
+++ b/doc/source/faq/index.rst
@@ -20,7 +20,7 @@
 If you run into problems with yt and you're writing to the mailing list
 or contacting developers on IRC, they will likely want to know what version of
 yt you're using.  Oftentimes, you'll want to know both the yt version,
-as well as the last changeset that was committed to the branch you're using.
+as well as the last changeset that was comitted to the branch you're using.
 To reveal this, go to a command line and type:
 
 .. code-block:: bash

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 doc/source/installing.rst
--- a/doc/source/installing.rst
+++ b/doc/source/installing.rst
@@ -416,7 +416,7 @@
   you might need ``sudo`` depending on where python is installed. See `This
   StackOverflow discussion
   <http://stackoverflow.com/questions/4495120/combine-user-with-prefix-error-with-setup-py-install>`_
-  if you are curious why ``--install-option="--prefix="`` is neccessary on some systems.
+  if you are curious why ``--install-option="--prefix="`` is necessary on some systems.
 
 This will install yt into a folder in your home directory
 (``$HOME/.local/lib64/python2.7/site-packages`` on Linux,

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 doc/source/quickstart/3)_Simple_Visualization.ipynb
--- a/doc/source/quickstart/3)_Simple_Visualization.ipynb
+++ b/doc/source/quickstart/3)_Simple_Visualization.ipynb
@@ -64,7 +64,7 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "The `show` command simply sends the plot to the IPython notebook.  You can also call `p.save()` which will save the plot to the file system.  This function accepts an argument, which will be pre-prended to the filename and can be used to name it based on the width or to supply a location.\n",
+    "The `show` command simply sends the plot to the IPython notebook.  You can also call `p.save()` which will save the plot to the file system.  This function accepts an argument, which will be prepended to the filename and can be used to name it based on the width or to supply a location.\n",
     "\n",
     "Now we'll zoom and pan a bit."
    ]

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 doc/source/visualizing/TransferFunctionHelper_Tutorial.ipynb
--- a/doc/source/visualizing/TransferFunctionHelper_Tutorial.ipynb
+++ b/doc/source/visualizing/TransferFunctionHelper_Tutorial.ipynb
@@ -214,7 +214,7 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "The red gas is now much more prominant in the image. We can clearly see that the hot gas is mostly associated with bound structures while the cool gas is associated with low-density voids."
+    "The red gas is now much more prominent in the image. We can clearly see that the hot gas is mostly associated with bound structures while the cool gas is associated with low-density voids."
    ]
   }
  ],

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 doc/source/visualizing/plots.rst
--- a/doc/source/visualizing/plots.rst
+++ b/doc/source/visualizing/plots.rst
@@ -1332,7 +1332,7 @@
 
 will create a plot with the particle mass used to set the colorbar.
 Specifically, :class:`~yt.visualization.particle_plots.ParticlePlot`
-shows the total ``z_field`` for all the partices in each pixel on the
+shows the total ``z_field`` for all the particles in each pixel on the
 colorbar axis; to plot average quantities instead, one can supply a
 ``weight_field`` argument.
 
@@ -1453,7 +1453,7 @@
 Under the hood, the :class:`~yt.data_objects.profiles.ParticleProfile` class works a lot like a
 :class:`~yt.data_objects.profiles.Profile2D` object, except that instead of just binning the
 particle field, you can also use higher-order deposition functions like the cloud-in-cell
-interpolant to spread out the particle quantites over a few cells in the profile. The
+interpolant to spread out the particle quantities over a few cells in the profile. The
 :func:`~yt.data_objects.profiles.create_profile` will automatically detect when all the fields
 you pass in are particle fields, and return a :class:`~yt.data_objects.profiles.ParticleProfile`
 if that is the case. For a complete description of the :class:`~yt.data_objects.profiles.ParticleProfile`
@@ -1591,7 +1591,7 @@
    fn = p.data_source.save_as_dataset()
 
 This function will optionally take a ``filename`` keyword that follows
-the same logic as dicussed above in :ref:`saving_plots`.  The filename
+the same logic as discussed above in :ref:`saving_plots`.  The filename
 to which the dataset was written will be returned.
 
 Once saved, this file can be reloaded completely independently of the

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 doc/source/visualizing/unstructured_mesh_rendering.rst
--- a/doc/source/visualizing/unstructured_mesh_rendering.rst
+++ b/doc/source/visualizing/unstructured_mesh_rendering.rst
@@ -87,7 +87,7 @@
 
 We recommend one of the later two methods, especially
 if you plan on re-compiling the cython extensions regularly. Note that none of this is
-neccessary if you installed embree into a location that is in your default path, such
+necessary if you installed embree into a location that is in your default path, such
 as /usr/local.
 
 Examples

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 scripts/pr_backport.py
--- a/scripts/pr_backport.py
+++ b/scripts/pr_backport.py
@@ -158,7 +158,7 @@
     return None
 
 
-def create_commits_to_prs_mapping(linege, prs):
+def create_commits_to_prs_mapping(lineage, prs):
     """create a mapping from commits to the pull requests that the commit is
     part of
     """

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
@@ -81,7 +81,7 @@
         f_value  : float
            line f-value.
         gamma : float
-           line gamme value.
+           line gamma value.
         atomic_mass : float
            mass of atom in amu.
         """
@@ -471,7 +471,7 @@
                             (thermal_width < self.bin_width).sum(),
                             n_absorbers)
 
-            # provide a progress bar with information about lines processsed
+            # provide a progress bar with information about lines processed
             pbar = get_pbar("Adding line - %s [%f A]: " % \
                             (line['label'], line['wavelength']), n_absorbers)
 

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
@@ -872,7 +872,7 @@
 
     """
     Check to see if any of the parameters in p are the
-    same as initial paramters and if so, attempt to
+    same as initial parameters and if so, attempt to
     split the region and refit it.
 
     Parameters

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py
@@ -58,7 +58,7 @@
 
     my_label = 'HI Lya'
     field = 'H_number_density'
-    wavelength = 1215.6700  # Angstromss
+    wavelength = 1215.6700  # Angstroms
     f_value = 4.164E-01
     gamma = 6.265e+08
     mass = 1.00794
@@ -118,7 +118,7 @@
 
     my_label = 'HI Lya'
     field = 'H_number_density'
-    wavelength = 1215.6700  # Angstromss
+    wavelength = 1215.6700  # Angstroms
     f_value = 4.164E-01
     gamma = 6.265e+08
     mass = 1.00794
@@ -170,7 +170,7 @@
 
     my_label = 'HI Lya'
     field = 'H_number_density'
-    wavelength = 1215.6700  # Angstromss
+    wavelength = 1215.6700  # Angstroms
     f_value = 4.164E-01
     gamma = 6.265e+08
     mass = 1.00794
@@ -220,7 +220,7 @@
 
     my_label = 'HI Lya'
     field = 'H_number_density'
-    wave = 1215.6700  # Angstromss
+    wave = 1215.6700  # Angstroms
     f_value = 4.164E-01
     gamma = 6.265e+08
     mass = 1.00794
@@ -271,7 +271,7 @@
 
     my_label = 'HI Lya'
     field = 'H_number_density'
-    wavelength = 1215.6700  # Angstromss
+    wavelength = 1215.6700  # Angstroms
     f_value = 4.164E-01
     gamma = 6.265e+08
     mass = 1.00794
@@ -326,7 +326,7 @@
 
     my_label = 'HI Lya'
     field = ('gas', 'H_number_density')
-    wavelength = 1215.6700  # Angstromss
+    wavelength = 1215.6700  # Angstroms
     f_value = 4.164E-01
     gamma = 6.265e+08
     mass = 1.00794
@@ -387,7 +387,7 @@
 
     my_label = 'HI Lya'
     field = ('gas', 'H_number_density')
-    wavelength = 1215.6700  # Angstromss
+    wavelength = 1215.6700  # Angstroms
     f_value = 4.164E-01
     gamma = 6.265e+08
     mass = 1.00794
@@ -440,7 +440,7 @@
 
     my_label = 'HI Lya'
     field = 'H_number_density'
-    wavelength = 1215.6700  # Angstromss
+    wavelength = 1215.6700  # Angstroms
     f_value = 4.164E-01
     gamma = 6.265e+08
     mass = 1.00794

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 yt/analysis_modules/cosmological_observation/cosmology_splice.py
--- a/yt/analysis_modules/cosmological_observation/cosmology_splice.py
+++ b/yt/analysis_modules/cosmological_observation/cosmology_splice.py
@@ -183,7 +183,7 @@
 
         # Make light ray using maximum number of datasets (minimum spacing).
         else:
-            # Sort data outputs by proximity to current redsfhit.
+            # Sort data outputs by proximity to current redshift.
             self.splice_outputs.sort(key=lambda obj:np.abs(far_redshift -
                                                            obj['redshift']))
             # For first data dump, choose closest to desired redshift.

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
+++ b/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
@@ -288,13 +288,13 @@
             field_of_view = self.simulation.quan(field_of_view[0],
                                                  field_of_view[1])
         elif not isinstance(field_of_view, YTArray):
-          raise RuntimeError("field_of_view argument must be either a YTQauntity " +
+          raise RuntimeError("field_of_view argument must be either a YTQuantity " +
                              "or a tuple of type (float, str).")
         if isinstance(image_resolution, tuple) and len(image_resolution) == 2:
             image_resolution = self.simulation.quan(image_resolution[0],
                                                     image_resolution[1])
         elif not isinstance(image_resolution, YTArray):
-          raise RuntimeError("image_resolution argument must be either a YTQauntity " +
+          raise RuntimeError("image_resolution argument must be either a YTQuantity " +
                              "or a tuple of type (float, str).")
         
         # Calculate number of pixels on a side.

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -582,7 +582,7 @@
 
                     sub_vel_mag = sub_ray['velocity_magnitude']
                     cos_theta = line_of_sight.dot(sub_vel) / sub_vel_mag
-                    # Protect against stituations where velocity mag is exactly
+                    # Protect against situations where velocity mag is exactly
                     # zero, in which case zero / zero = NaN.
                     cos_theta = np.nan_to_num(cos_theta)
                     redshift_dopp = \

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 yt/analysis_modules/halo_analysis/enzofof_merger_tree.py
--- a/yt/analysis_modules/halo_analysis/enzofof_merger_tree.py
+++ b/yt/analysis_modules/halo_analysis/enzofof_merger_tree.py
@@ -30,7 +30,7 @@
 # 7. For every halo in ball-query result, execute numpy's intersect1d on
 #    particle IDs
 # 8. Parentage is described by a fraction of particles that pass from one to
-#    the other; we have both descendent fractions and ancestory fractions. 
+#    the other; we have both descendent fractions and ancestor fractions. 
 
 
 import numpy as np

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 yt/analysis_modules/halo_analysis/halo_callbacks.py
--- a/yt/analysis_modules/halo_analysis/halo_callbacks.py
+++ b/yt/analysis_modules/halo_analysis/halo_callbacks.py
@@ -402,7 +402,7 @@
         calculate virial quantities.
         Default: ("gas", "overdensity")
     critical_overdensity : float
-        The value of the overdensity at which to evaulate the virial quantities.  
+        The value of the overdensity at which to evaluate the virial quantities.  
         Overdensity is with respect to the critical density.
         Default: 200
     profile_storage : string

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 yt/analysis_modules/halo_analysis/halo_recipes.py
--- a/yt/analysis_modules/halo_analysis/halo_recipes.py
+++ b/yt/analysis_modules/halo_analysis/halo_recipes.py
@@ -78,7 +78,7 @@
         calculate virial quantities.
         Default: ("gas", "overdensity")
     critical_overdensity : float
-        The value of the overdensity at which to evaulate the virial quantities.
+        The value of the overdensity at which to evaluate the virial quantities.
         Overdensity is with respect to the critical density.
         Default: 200
 

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -387,7 +387,7 @@
         Returns
         -------
         radius : float
-            The virial raius in code units of the particles in the halo.  -1
+            The virial radius in code units of the particles in the halo.  -1
             if not virialized.
 
         Examples
@@ -1332,7 +1332,7 @@
         r"""Write out the names of the HDF5 files containing halo particle data
         to a text file.
 
-        This function wirtes out the names of all the HDF5 files that would
+        This function writes out the names of all the HDF5 files that would
         contain halo particle data.  Only the root processor writes out.
 
         Parameters
@@ -1445,7 +1445,7 @@
         used when dm_only is set to True.
     padding : float
         When run in parallel, the finder needs to surround each subvolume
-        with duplicated particles for halo finidng to work. This number
+        with duplicated particles for halo finding to work. This number
         must be no smaller than the radius of the largest halo in the box
         in code units. Default = 0.02.
     total_mass : float
@@ -1574,7 +1574,7 @@
         used when dm_only is set to True.
     padding : float
         When run in parallel, the finder needs to surround each subvolume
-        with duplicated particles for halo finidng to work. This number
+        with duplicated particles for halo finding to work. This number
         must be no smaller than the radius of the largest halo in the box
         in code units. Default = 0.02.
 

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 yt/analysis_modules/halo_finding/hop/EnzoHop.c
--- a/yt/analysis_modules/halo_finding/hop/EnzoHop.c
+++ b/yt/analysis_modules/halo_finding/hop/EnzoHop.c
@@ -31,7 +31,7 @@
 int kdMedianJst(KD kd, int d, int l, int u);
 void kdUpPass(KD kd, int iCell);
 void initgrouplist(Grouplist *g);
-void hop_main(KD kd, HC *my_comm, float densthres);
+void hop_main(KD kd, HC *my_comm, float densthresh);
 void regroup_main(float dens_outer, HC *my_comm);
 static PyObject *_HOPerror;
 

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 yt/analysis_modules/halo_finding/hop/hop_hop.c
--- a/yt/analysis_modules/halo_finding/hop/hop_hop.c
+++ b/yt/analysis_modules/halo_finding/hop/hop_hop.c
@@ -50,11 +50,11 @@
 void ReSizeSMX(SMX smx, int nSmooth);
  
 void PrepareKD(KD kd);
-void binOutHop(SMX smx, HC *my_comm, float densthres);
+void binOutHop(SMX smx, HC *my_comm, float densthresh);
 void outGroupMerge(SMX smx, HC *my_comm);
 
 /* void main(int argc,char **argv) */
-void hop_main(KD kd, HC *my_comm, float densthres)
+void hop_main(KD kd, HC *my_comm, float densthresh)
 {
   /*	KD kd; */
 	SMX smx;
@@ -152,7 +152,7 @@
 	if (bMerge) free(smx->hash);
  
 	if (bGroup&2) {
-	    binOutHop(smx, my_comm, densthres);
+	    binOutHop(smx, my_comm, densthresh);
 	}
 	if (bGroup) {free(smx->densestingroup); free(smx->nmembers);}
 	smFinish(smx);
@@ -520,7 +520,7 @@
     return;
 }
  
-void binOutHop(SMX smx, HC *my_comm, float densthres)
+void binOutHop(SMX smx, HC *my_comm, float densthresh)
 /* Write Group tag for each particle.  Particles should be ordered. */
 /* Binary file: nActive, nGroups, list of Groups */
 {
@@ -535,7 +535,7 @@
     //s->ID = ivector(1,s->numlist);
     for (j=0;j<smx->kd->nActive;j++) {
       //s->ID[1+j] = smx->kd->p[j].iID; /* S Skory's addition */
-      if (NP_DENS(smx->kd,j) < densthres) s->ntag[j+1] = -1;
+      if (NP_DENS(smx->kd,j) < densthresh) s->ntag[j+1] = -1;
       else s->ntag[j+1] = smx->kd->p[j].iHop;
 
     }

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 yt/analysis_modules/halo_finding/hop/hop_regroup.c
--- a/yt/analysis_modules/halo_finding/hop/hop_regroup.c
+++ b/yt/analysis_modules/halo_finding/hop/hop_regroup.c
@@ -22,7 +22,7 @@
  
 /* #define MINDENS (-FLT_MAX/3.0) */
 #define MINDENS (-1.e+30/3.0)
-/* This is the most negative density that can be accomodated.  Note
+/* This is the most negative density that can be accommodated.  Note
 that MINDENS*2.0 is referenced in the code and so must be properly
 represented by the machine.  There's no reason for this to be close to
 the actual minimum of the density. */

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 yt/analysis_modules/halo_finding/hop/hop_slice.c
--- a/yt/analysis_modules/halo_finding/hop/hop_slice.c
+++ b/yt/analysis_modules/halo_finding/hop/hop_slice.c
@@ -94,7 +94,7 @@
 }
  
 /* =================================================================== */
-/* The following are public-domain routines from Numerical Repices in C,
+/* The following are public-domain routines from Numerical Recipes in C,
 2nd edition, by Press, Teulkolsky, Vetterling, & Flannery, 1992, Cambridge
 Univ. Press */
  

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 yt/analysis_modules/halo_finding/hop/hop_smooth.c
--- a/yt/analysis_modules/halo_finding/hop/hop_smooth.c
+++ b/yt/analysis_modules/halo_finding/hop/hop_smooth.c
@@ -102,7 +102,7 @@
 	cell = ROOT;
 	/*
 	 ** First find the "local" Bucket.
-	 ** This could mearly be the closest bucket to ri[3].
+	 ** This could merely be the closest bucket to ri[3].
 	 */
 	while (cell < smx->kd->nSplit) {
 		if (ri[c[cell].iDim] < c[cell].fSplit) cell = LOWER(cell);
@@ -282,7 +282,7 @@
 			/* printf("%"ISYM": %"GSYM" %"GSYM" %"GSYM"\n", pi, x, y, z); */
 			/*
 			 ** First find the "local" Bucket.
-			 ** This could mearly be the closest bucket to ri[3].
+			 ** This could merely be the closest bucket to ri[3].
 			 */
 			cell = ROOT;
 			while (cell < smx->kd->nSplit) {

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
@@ -89,7 +89,7 @@
 
 cdef import from "config_vars.h":
     # Rockstar cleverly puts all of the config variables inside a templated
-    # definition of their vaiables.
+    # definition of their variables.
     char *FILE_FORMAT
     np.float64_t PARTICLE_MASS
 

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
@@ -74,7 +74,7 @@
 
 cdef import from "config_vars.h":
     # Rockstar cleverly puts all of the config variables inside a templated
-    # definition of their vaiables.
+    # definition of their variables.
     char *FILE_FORMAT
     np.float64_t PARTICLE_MASS
 
@@ -244,7 +244,7 @@
 
     def __cinit__(self, ts):
         self.ts = ts
-        self.tsl = ts.__iter__() #timseries generator used by read
+        self.tsl = ts.__iter__() #timeseries generator used by read
 
     def setup_rockstar(self, char *server_address, char *server_port,
                        int num_snaps, np.int64_t total_particles,

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 yt/analysis_modules/halo_mass_function/halo_mass_function.py
--- a/yt/analysis_modules/halo_mass_function/halo_mass_function.py
+++ b/yt/analysis_modules/halo_mass_function/halo_mass_function.py
@@ -25,13 +25,13 @@
 
 class HaloMassFcn():
     r"""
-    Initalize a HaloMassFcn object to analyze the distribution of halos as 
+    Initialize a HaloMassFcn object to analyze the distribution of halos as 
     a function of mass.  A mass function can be created for a set of 
     simulated halos, an analytic fit to can be created for a redshift and 
     set of cosmological parameters, or both can be created.
 
     Provided with a halo dataset object, this will make a the mass function 
-    for simulated halos.  Prodiving a simulation dataset will set as many 
+    for simulated halos.  Providing a simulation dataset will set as many 
     of the cosmological parameters as possible for the creation of the 
     analytic mass function.
 
@@ -56,12 +56,12 @@
     will write out the data to disk.
 
     Creating a HaloMassFcn object with no arguments will produce an analytic
-    mass function at redshift = 0 using default cosmolocigal values.
+    mass function at redshift = 0 using default cosmological values.
 
     Parameters
     ----------
     simulation_ds : Simulation dataset object
-        The loaded simulation dataset, used to set cosmological paramters.
+        The loaded simulation dataset, used to set cosmological parameters.
         Default : None.
     halos_ds : Halo dataset object
         The halos from a simulation to be used for creation of the 
@@ -80,7 +80,7 @@
         Default : 0.7274.
     omega_baryon0  : float 
         The fraction of the universe made up of baryonic matter. This is not 
-        always stored in the datset and should be checked by hand.
+        always stored in the dataset and should be checked by hand.
         Default : 0.0456.
     hubble0 : float 
         The expansion rate of the universe in units of 100 km/s/Mpc. 
@@ -88,13 +88,13 @@
     sigma8 : float 
         The amplitude of the linear power spectrum at z=0 as specified by 
         the rms amplitude of mass-fluctuations in a top-hat sphere of radius 
-        8 Mpc/h. This is not always stored in the datset and should be 
+        8 Mpc/h. This is not always stored in the dataset and should be 
         checked by hand.
         Default : 0.86.
     primoridal_index : float 
         This is the index of the mass power spectrum before modification by 
         the transfer function. A value of 1 corresponds to the scale-free 
-        primordial spectrum. This is not always stored in the datset and 
+        primordial spectrum. This is not always stored in the dataset and 
         should be checked by hand.
         Default : 1.0.
     this_redshift : float 
@@ -135,7 +135,7 @@
     >>> plt.savefig("mass_function.png")
 
     This creates only the analytic halo mass function for a simulation
-    dataset, with default values for cosmological paramters not stored in 
+    dataset, with default values for cosmological parameters not stored in 
     the dataset.
 
     >>> ds = load("enzo_tiny_cosmology/DD0046/DD0046")
@@ -240,7 +240,7 @@
         If a halo file has been supplied, make a mass function for the simulated halos.
         """
         if halos_ds is not None:
-            # Used to check if a simulated halo mass funciton exists to write out
+            # Used to check if a simulated halo mass function exists to write out
             self.make_simulated=True
             # Calculate the simulated halo mass function
             self.create_sim_hmf()
@@ -622,7 +622,7 @@
 many times as you want. 
 
    TFmdm_set_cosm() -- User passes all the cosmological parameters as
-   arguments; the routine sets up all of the scalar quantites needed 
+   arguments; the routine sets up all of the scalar quantities needed 
    computation of the fitting formula.  The input parameters are: 
    1) omega_matter -- Density of CDM, baryons, and massive neutrinos,
                       in units of the critical density. 

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 yt/analysis_modules/photon_simulator/photon_models.py
--- a/yt/analysis_modules/photon_simulator/photon_models.py
+++ b/yt/analysis_modules/photon_simulator/photon_models.py
@@ -206,7 +206,7 @@
                     if cn == 0: continue
                     # The rather verbose form of the few next statements is a
                     # result of code optimization and shouldn't be changed
-                    # without checking for perfomance degradation. See
+                    # without checking for performance degradation. See
                     # https://bitbucket.org/yt_analysis/yt/pull-requests/1766
                     # for details.
                     if self.method == "invert_cdf":

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 yt/analysis_modules/photon_simulator/spectral_models.py
--- a/yt/analysis_modules/photon_simulator/spectral_models.py
+++ b/yt/analysis_modules/photon_simulator/spectral_models.py
@@ -1,5 +1,5 @@
 """
-Photon emission and absoprtion models for use with the
+Photon emission and absorption models for use with the
 photon simulator.
 """
 
@@ -195,7 +195,7 @@
         "2.0.2"
     thermal_broad : boolean, optional
         Whether to apply thermal broadening to spectral lines. Only should
-        be used if you are attemping to simulate a high-spectral resolution
+        be used if you are attempting to simulate a high-spectral resolution
         detector.
 
     Examples

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
--- a/yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
+++ b/yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
@@ -362,7 +362,7 @@
                                        source.position[2]))
             fhandle.write('\n')
 
-        # write wavelength informaton
+        # write wavelength information
         for wavelength in wavelengths:
             fhandle.write('%f \n' % wavelength)
 

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 yt/analysis_modules/star_analysis/sfr_spectrum.py
--- a/yt/analysis_modules/star_analysis/sfr_spectrum.py
+++ b/yt/analysis_modules/star_analysis/sfr_spectrum.py
@@ -435,7 +435,7 @@
                      star_metallicity_constant is None):
                 mylog.error(
                     """
-                If data_source is not provided, all of these paramters
+                If data_source is not provided, all of these parameters
                 need to be set:
                    star_mass (array, Msun),
                    star_creation_time (array, code units),

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 yt/analysis_modules/sunrise_export/sunrise_exporter.py
--- a/yt/analysis_modules/sunrise_export/sunrise_exporter.py
+++ b/yt/analysis_modules/sunrise_export/sunrise_exporter.py
@@ -122,7 +122,7 @@
         both of which are assumed to be in unitary length units.
     frvir (optional) : float
         Ensure that CoM +/- frvir*Rvir is contained within each domain
-    domains_list (optiona): dict of halos
+    domains_list (optional): dict of halos
         Organize halos into a dict of domains. Keys are DLE/DRE tuple
         values are a list of halos
     """
@@ -221,7 +221,7 @@
         grids[g.id] = og
         #how many refinement cells will we have?
         #measure the 'volume' of each mesh, but many
-        #cells do not exist. an overstimate
+        #cells do not exist. an overestimate
         levels_all[g.Level] += g.ActiveDimensions.prod()
         #how many leaves do we have?
         #this overestimates. a child of -1 means no child,
@@ -442,7 +442,7 @@
     width = 0.0
     if nwide is None:
         #expand until borders are included and
-        #we have an equaly-sized, non-zero box
+        #we have an equally-sized, non-zero box
         idxq,out=False,True
         while not out or not idxq:
             cfle,cfre = fc-width, fc+width

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 yt/analysis_modules/two_point_functions/two_point_functions.py
--- a/yt/analysis_modules/two_point_functions/two_point_functions.py
+++ b/yt/analysis_modules/two_point_functions/two_point_functions.py
@@ -52,7 +52,7 @@
         parameter. Default: 10.
     length_range : Float
         A min/max pair for the range of values to search the over
-        the simulational volume. Default: [sqrt(3)dx, 1/2*shortest box edge],
+        the simulated volume. Default: [sqrt(3)dx, 1/2*shortest box edge],
         where dx is the smallest grid cell size.
     vol_ratio : Integer
         How to multiply-assign subvolumes to the parallel

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -81,7 +81,7 @@
     InternalGeographicCoordinateHandler
 
 # We want to support the movie format in the future.
-# When such a thing comes to pass, I'll move all the stuff that is contant up
+# When such a thing comes to pass, I'll move all the stuff that is constant up
 # to here, and then have it instantiate EnzoDatasets as appropriate.
 
 _cached_datasets = weakref.WeakValueDictionary()

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 yt/data_objects/tests/test_pickle.py
--- a/yt/data_objects/tests/test_pickle.py
+++ b/yt/data_objects/tests/test_pickle.py
@@ -51,7 +51,7 @@
     test_load = cPickle.load(open(cpklfile.name, "rb"))
 
     assert_equal.description = \
-        "%s: File was pickle-loaded succesfully" % __name__
+        "%s: File was pickle-loaded successfully" % __name__
     assert_equal(test_load is not None, True)
     assert_equal.description = \
         "%s: Length of pickle-loaded connected set object" % __name__

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -690,7 +690,7 @@
         This is called from IOHandler. It takes content
         which is a binary stream, reads the requested field
         over this while domain. It then uses oct_handler fill
-        to reorgnize values from IO read index order to
+        to reorganize values from IO read index order to
         the order they are in in the octhandler.
         """
         oct_handler = self.oct_handler
@@ -742,7 +742,7 @@
     # and since these headers are defined in only a single file it's
     # best to leave them in the static output
     _last_mask = None
-    _last_seletor_id = None
+    _last_selector_id = None
 
     def __init__(self, ds, nvar, oct_handler, domain_id):
         self.nvar = nvar

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 yt/frontends/artio/artio_headers/artio.h
--- a/yt/frontends/artio/artio_headers/artio.h
+++ b/yt/frontends/artio/artio_headers/artio.h
@@ -273,7 +273,7 @@
 int artio_grid_write_oct(artio_fileset *handle, float *variables, int *refined);
 
 /*
- * Description:	Read the variables of the root level cell and the index of the Octtree
+ * Description:	Read the variables of the root level cell and the index of the Octree
  *              correlated with this root level cell
  *
  *  handle			The File handle
@@ -406,7 +406,7 @@
 			double* primary_variables, float *secondary_variables);
 
 /*
- * Description:	Read the variables of the root level cell and the index of the Octtree
+ * Description:	Read the variables of the root level cell and the index of the Octree
  *              correlated with this root level cell
  *
  *  handle			The File handle

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 yt/frontends/artio/artio_headers/cosmology.c
--- a/yt/frontends/artio/artio_headers/cosmology.c
+++ b/yt/frontends/artio/artio_headers/cosmology.c
@@ -247,7 +247,7 @@
       x = c->aUni[i]/aeq;
 
       c->tPhys[i] = tPhysFac*2*x*x*(2+sqrt(x+1))/(3*pow(1+sqrt(x+1),2.0));
-      c->dPlus[i] = aeq*(x + 2.0/3.0 + (6*sqrt(1+x)+(2+3*x)*log(x)-2*(2+3*x)*log(1+sqrt(1+x)))/(log(64.0)-9));  /* long last term is the decaying mode generated after euality; it is very small for x > 10, I keep ot just for completeness; */
+      c->dPlus[i] = aeq*(x + 2.0/3.0 + (6*sqrt(1+x)+(2+3*x)*log(x)-2*(2+3*x)*log(1+sqrt(1+x)))/(log(64.0)-9));  /* long last term is the decaying mode generated after equality; it is very small for x > 10, I keep ot just for completeness; */
       c->qPlus[i] = c->aUni[i]*cosmology_mu(c,c->aUni[i])*(1 + ((2+6*x)/(x*sqrt(1+x))+3*log(x)-6*log(1+sqrt(1+x)))/(log(64)-9)); /* this is a^2*dDPlus/dt/H0 */
 
       c->aBox[i] = c->aUni[i]*cosmology_dc_factor(c,c->dPlus[i]);

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 yt/frontends/athena/data_structures.py
--- a/yt/frontends/athena/data_structures.py
+++ b/yt/frontends/athena/data_structures.py
@@ -477,7 +477,7 @@
         if "length_unit" not in self.units_override:
             self.no_cgs_equiv_length = True
         for unit, cgs in [("length", "cm"), ("time", "s"), ("mass", "g")]:
-            # We set these to cgs for now, but they may have been overriden
+            # We set these to cgs for now, but they may have been overridden
             if getattr(self, unit+'_unit', None) is not None:
                 continue
             mylog.warning("Assuming 1.0 = 1.0 %s", cgs)

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 yt/frontends/athena_pp/data_structures.py
--- a/yt/frontends/athena_pp/data_structures.py
+++ b/yt/frontends/athena_pp/data_structures.py
@@ -272,7 +272,7 @@
             self.no_cgs_equiv_length = True
         for unit, cgs in [("length", "cm"), ("time", "s"), ("mass", "g"),
                           ("temperature", "K")]:
-            # We set these to cgs for now, but they may have been overriden
+            # We set these to cgs for now, but they may have been overridden
             if getattr(self, unit+'_unit', None) is not None:
                 continue
             mylog.warning("Assuming 1.0 = 1.0 %s", cgs)

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -779,7 +779,7 @@
             # We use a default of two, as Nyx doesn't always output this value
             ref_factors = [2] * (self._max_level + 1)
         # We can't vary refinement factors based on dimension, or whatever else
-        # they are vaied on.  In one curious thing, I found that some Castro 3D
+        # they are varied on.  In one curious thing, I found that some Castro 3D
         # data has only two refinement factors, which I don't know how to
         # understand.
         self.ref_factors = ref_factors

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -668,7 +668,7 @@
         to the index to pre-determine the style of data-output.  However,
         it is not strictly necessary.  Optionally you may specify a
         *parameter_override* dictionary that will override anything in the
-        paarmeter file and a *conversion_override* dictionary that consists
+        parameter file and a *conversion_override* dictionary that consists
         of {fieldname : conversion_to_cgs} that will override the #DataCGS.
         """
         self.fluid_types += ("enzo",)

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 yt/frontends/enzo_p/data_structures.py
--- a/yt/frontends/enzo_p/data_structures.py
+++ b/yt/frontends/enzo_p/data_structures.py
@@ -267,7 +267,7 @@
         to the index to pre-determine the style of data-output.  However,
         it is not strictly necessary.  Optionally you may specify a
         *parameter_override* dictionary that will override anything in the
-        paarmeter file and a *conversion_override* dictionary that consists
+        parameter file and a *conversion_override* dictionary that consists
         of {fieldname : conversion_to_cgs} that will override the #DataCGS.
         """
         self.fluid_types += ("enzop",)

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 yt/frontends/exodus_ii/simulation_handling.py
--- a/yt/frontends/exodus_ii/simulation_handling.py
+++ b/yt/frontends/exodus_ii/simulation_handling.py
@@ -21,7 +21,7 @@
     r"""
     Initialize an ExodusII Simulation object.
 
-    Upon creation, the input directoy is searched for valid ExodusIIDatasets.
+    Upon creation, the input directory is searched for valid ExodusIIDatasets.
     The get_time_series can be used to generate a DatasetSeries object.
 
     simulation_directory : str

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -290,7 +290,7 @@
         # First we load all of the parameters
         hns = ["simulation parameters"]
         # note the ordering here is important: runtime parameters should
-        # ovewrite scalars with the same name.
+        # overwrite scalars with the same name.
         for ptype in ['scalars', 'runtime parameters']:
             for vtype in ['integer', 'real', 'logical', 'string']:
                 hns.append("%s %s" % (vtype, ptype))

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 yt/frontends/gadget/data_structures.py
--- a/yt/frontends/gadget/data_structures.py
+++ b/yt/frontends/gadget/data_structures.py
@@ -363,7 +363,7 @@
         except struct.error:
             f.close()
             return False, 1
-        # Use value to check endianess
+        # Use value to check endianness
         if rhead == 256:
             endianswap = '<'
         elif rhead == 65536:

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 yt/frontends/gadget/fields.py
--- a/yt/frontends/gadget/fields.py
+++ b/yt/frontends/gadget/fields.py
@@ -1,5 +1,5 @@
 """
-Gadget-specfic fields
+Gadget-specific fields
 
 
 

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 yt/frontends/gamer/data_structures.py
--- a/yt/frontends/gamer/data_structures.py
+++ b/yt/frontends/gamer/data_structures.py
@@ -141,10 +141,10 @@
 
         # validate the parent-children relationship in the debug mode
         if self.dataset._debug:
-            self._validate_parent_children_relasionship()
+            self._validate_parent_children_relationship()
 
     # for _debug mode only
-    def _validate_parent_children_relasionship(self):
+    def _validate_parent_children_relationship(self):
         mylog.info('Validating the parent-children relationship ...')
 
         father_list = self._handle["Tree/Father"].value

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -1283,9 +1283,9 @@
 
     def _initialize_mesh(self):
         coords = self.stream_handler.fields.pop('coordinates')
-        connec = self.stream_handler.fields.pop('connectivity')
+        connect = self.stream_handler.fields.pop('connectivity')
         self.meshes = [StreamHexahedralMesh(0,
-          self.index_filename, connec, coords, self)]
+          self.index_filename, connect, coords, self)]
 
     def _setup_data_io(self):
         if self.stream_handler.io is not None:
@@ -1683,10 +1683,10 @@
 
     def _initialize_mesh(self):
         coords = ensure_list(self.stream_handler.fields.pop("coordinates"))
-        connec = ensure_list(self.stream_handler.fields.pop("connectivity"))
+        connect = ensure_list(self.stream_handler.fields.pop("connectivity"))
         self.meshes = [StreamUnstructuredMesh(
                        i, self.index_filename, c1, c2, self)
-                       for i, (c1, c2) in enumerate(zip(connec, repeat(coords[0])))]
+                       for i, (c1, c2) in enumerate(zip(connect, repeat(coords[0])))]
         self.mesh_union = MeshUnion("mesh_union", self.meshes)
 
     def _setup_data_io(self):

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 yt/frontends/ytdata/data_structures.py
--- a/yt/frontends/ytdata/data_structures.py
+++ b/yt/frontends/ytdata/data_structures.py
@@ -271,7 +271,7 @@
         """
 
         if self._data_obj is None:
-            # Some data containers can't be recontructed in the same way
+            # Some data containers can't be reconstructed in the same way
             # since this is now particle-like data.
             data_type = self.parameters.get("data_type")
             container_type = self.parameters.get("container_type")
@@ -308,7 +308,7 @@
 
     def _restore_light_ray_solution(self):
         """
-        Restore all information asssociate with the light ray solution
+        Restore all information associate with the light ray solution
         to its original form.
         """
         key = "light_ray_solution"

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -48,7 +48,7 @@
 
 def iterable(obj):
     """
-    Grabbed from Python Cookbook / matploblib.cbook.  Returns true/false for
+    Grabbed from Python Cookbook / matplotlib.cbook.  Returns true/false for
     *obj* iterable.
     """
     try: len(obj)
@@ -289,7 +289,7 @@
     """
     Placed inside a function, this will insert an IPython interpreter at that
     current location.  This will enabled detailed inspection of the current
-    exeuction environment, as well as (optional) modification of that environment.
+    execution environment, as well as (optional) modification of that environment.
     *num_up* refers to how many frames of the stack get stripped off, and
     defaults to 1 so that this function itself is stripped off.
     """

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 yt/geometry/tests/test_grid_container.py
--- a/yt/geometry/tests/test_grid_container.py
+++ b/yt/geometry/tests/test_grid_container.py
@@ -100,7 +100,7 @@
 
     assert_equal(point_grid_inds, grid_inds)
 
-    # Test wheter find_points works for lists
+    # Test whether find_points works for lists
     point_grids, point_grid_inds = test_ds.index._find_points(randx.tolist(),
                                                               randy.tolist(),
                                                               randz.tolist())

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -847,7 +847,7 @@
     ... def field_checker(dd, field_name):
     ...     return dd[field_name]
 
-    >>> field_cheker(ds.all_data(), 'density', result_basename='density')
+    >>> field_checker(ds.all_data(), 'density', result_basename='density')
 
     """
     def compute_results(func):
@@ -973,7 +973,7 @@
         Array obtained (possibly with attached units)
     desired : array-like
         Array to compare with (possibly with attached units)
-    rtol : float, oprtional
+    rtol : float, optional
         Relative tolerance, defaults to 1e-7
     atol : float or quantity, optional
         Absolute tolerance. If units are attached, they must be consistent

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 yt/units/tests/test_ytarray.py
--- a/yt/units/tests/test_ytarray.py
+++ b/yt/units/tests/test_ytarray.py
@@ -658,7 +658,7 @@
 
 def test_iteration():
     """
-    Test that iterating over a YTArray returns a sequence of YTQuantity insances
+    Test that iterating over a YTArray returns a sequence of YTQuantity instances
     """
     a = np.arange(3)
     b = YTArray(np.arange(3), 'cm')

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 yt/units/unit_object.py
--- a/yt/units/unit_object.py
+++ b/yt/units/unit_object.py
@@ -122,7 +122,7 @@
             invert_symbols[value].append(key)
 
     # if there are any units with identical latex representations, substitute
-    # units to avoid  uncanceled terms in the final latex expresion.
+    # units to avoid  uncanceled terms in the final latex expression.
     for val in invert_symbols:
         symbols = invert_symbols[val]
         for i in range(1, len(symbols)):

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 yt/units/unit_systems.py
--- a/yt/units/unit_systems.py
+++ b/yt/units/unit_systems.py
@@ -18,7 +18,7 @@
 
 class UnitSystemConstants(object):
     """
-    A class to faciliate conversions of physical constants into a given unit
+    A class to facilitate conversions of physical constants into a given unit
     system specified by *name*.
     """
     def __init__(self, name):
@@ -52,7 +52,7 @@
         The base temperature unit of this unit system. Defaults to "K".
     angle_unit : string, optional
         The base angle unit of this unit system. Defaults to "rad".
-    curent_mks_unit : string, optional
+    current_mks_unit : string, optional
         The base current unit of this unit system. Only used in MKS 
         or MKS-based unit systems.
     registry : :class:`yt.units.unit_registry.UnitRegistry` object

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 yt/units/yt_array.py
--- a/yt/units/yt_array.py
+++ b/yt/units/yt_array.py
@@ -1627,7 +1627,7 @@
     return YTArray(norm, data.units)
 
 def udot(op1, op2):
-    """Matrix or vector dot product that preservs units
+    """Matrix or vector dot product that preserves units
 
     This is a wrapper around np.dot that preserves units.
     """

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -366,7 +366,7 @@
 
         That is they start from the lower left and proceed to upper
         right varying the third index most frequently. Note that the
-        center cell (i,j,k) is ommitted.
+        center cell (i,j,k) is omitted.
 
         """
         ci = np.array(ci)
@@ -422,7 +422,7 @@
 
         That is they start from the lower left and proceed to upper
         right varying the third index most frequently. Note that the
-        center cell (i,j,k) is ommitted.
+        center cell (i,j,k) is omitted.
 
         """
         position = np.array(position)

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 yt/utilities/command_line.py
--- a/yt/utilities/command_line.py
+++ b/yt/utilities/command_line.py
@@ -779,7 +779,7 @@
                   help="Use syntax highlighter for the file in language"),
              dict(short="-L", longname="--languages", action="store_true",
                   default = False, dest="languages",
-                  help="Retrive a list of supported languages"),
+                  help="Retrieve a list of supported languages"),
              dict(short="-e", longname="--encoding", action="store",
                   default = 'utf-8', dest="encoding",
                   help="Specify the encoding of a file (default is "

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 yt/utilities/decompose.py
--- a/yt/utilities/decompose.py
+++ b/yt/utilities/decompose.py
@@ -64,7 +64,7 @@
 
 
 def factorize_number(pieces):
-    """ Return array consiting of prime, its power and number of different
+    """ Return array consisting of prime, its power and number of different
         decompositions in three dimensions for this prime
     """
     factors = [factor for factor in decompose_to_primes(pieces)]

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 yt/utilities/grid_data_format/conversion/conversion_athena.py
--- a/yt/utilities/grid_data_format/conversion/conversion_athena.py
+++ b/yt/utilities/grid_data_format/conversion/conversion_athena.py
@@ -127,7 +127,7 @@
                       (np.prod(grid['dimensions']), grid['ncells']))
                 raise TypeError
 
-            # Append all hierachy info before reading this grid's data
+            # Append all hierarchy info before reading this grid's data
             grid_dims[i]=grid['dimensions']
             grid_left_edges[i]=grid['left_edge']
             grid_dds[i]=grid['dds']

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 yt/utilities/grid_data_format/docs/gdf_specification.txt
--- a/yt/utilities/grid_data_format/docs/gdf_specification.txt
+++ b/yt/utilities/grid_data_format/docs/gdf_specification.txt
@@ -28,7 +28,7 @@
    this format.)
 #. All fluid fields in this version of the format are assumed to have the
    dimensionality of the grid they reside in plus any ghost zones, plus any
-   additionaly dimensionality required by the staggering property.
+   additionally dimensionality required by the staggering property.
 #. Particles may have dataspaces affiliated with them.  (See Enzo's
    OutputParticleTypeGrouping for more information.)  This enables a light
    wrapper around data formats with interspersed particle types.

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 yt/utilities/hierarchy_inspection.py
--- a/yt/utilities/hierarchy_inspection.py
+++ b/yt/utilities/hierarchy_inspection.py
@@ -13,7 +13,7 @@
     Parameters
     ----------
     candidates : Iterable
-        An interable object that is a collection of classes to find the lowest
+        An iterable object that is a collection of classes to find the lowest
         subclass of.
 
     Returns

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 yt/utilities/lib/alt_ray_tracers.pyx
--- a/yt/utilities/lib/alt_ray_tracers.pyx
+++ b/yt/utilities/lib/alt_ray_tracers.pyx
@@ -90,7 +90,7 @@
     s : 1d float ndarray
         ray parametric distance on range [0,len(ray)]
     rztheta : 2d float ndarray
-        ray grid cell intersections in cylidrical coordinates
+        ray grid cell intersections in cylindrical coordinates
     inds : 1d int ndarray
         indexes into the grid cells which the ray crosses in order.
 
@@ -145,7 +145,7 @@
     # compute first cut of indexes and thetas, which 
     # have been filtered by those values for which intersection
     # times are impossible (see above masks). Note that this is
-    # still independnent of z.
+    # still independent of z.
     inds = np.unique(np.concatenate([np.argwhere(tmmleft).flat, 
                                      np.argwhere(tpmleft).flat, 
                                      np.argwhere(tmmright).flat, 

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 yt/utilities/lib/bounding_volume_hierarchy.pyx
--- a/yt/utilities/lib/bounding_volume_hierarchy.pyx
+++ b/yt/utilities/lib/bounding_volume_hierarchy.pyx
@@ -69,7 +69,7 @@
 
     Currently, 2nd-order meshes are only supported for 20-node hexahedral elements.
     There, the primitive type is a bi-quadratic patch instead of a triangle, and
-    each intersection involves computing a Netwon-Raphson solve.
+    each intersection involves computing a Newton-Raphson solve.
 
     See yt/utilities/lib/primitives.pyx for the definitions of both of these primitive
     types.

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 yt/utilities/lib/fortran_reader.pyx
--- a/yt/utilities/lib/fortran_reader.pyx
+++ b/yt/utilities/lib/fortran_reader.pyx
@@ -308,7 +308,7 @@
                     np.ndarray[np.uint8_t, ndim=4] art_child_masks,
                     np.ndarray[np.uint8_t, ndim=3] child_mask):
 
-    #loop over file_locations, for each row exracting the index & LE
+    #loop over file_locations, for each row extracting the index & LE
     #of the oct we will pull pull from art_child_masks
     #then use the art_child_masks info to fill in child_mask
     cdef int i,ioct,x,y,z

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 yt/utilities/lib/pixelization_routines.pyx
--- a/yt/utilities/lib/pixelization_routines.pyx
+++ b/yt/utilities/lib/pixelization_routines.pyx
@@ -794,7 +794,7 @@
     vertices = <np.float64_t *> malloc(ndim * sizeof(np.float64_t) * nvertices)
     field_vals = <np.float64_t *> malloc(sizeof(np.float64_t) * num_field_vals)
 
-    # fill the image bounds and pixel size informaton here
+    # fill the image bounds and pixel size information here
     for i in range(ndim):
         pLE[i] = extents[i][0]
         pRE[i] = extents[i][1]

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 yt/utilities/lib/primitives.pyx
--- a/yt/utilities/lib/primitives.pyx
+++ b/yt/utilities/lib/primitives.pyx
@@ -267,7 +267,7 @@
     cdef cython.floating fv = dot(N2, S) + d2
     cdef cython.floating err = fmax(fabs(fu), fabs(fv))
 
-    # begin Newton interation
+    # begin Newton iteration
     cdef cython.floating tol = 1.0e-5
     cdef int iterations = 0
     cdef int max_iter = 10
@@ -483,7 +483,7 @@
     cdef cython.floating fv = dot(N2, S) + d2
     cdef cython.floating err = fmax(fabs(fu), fabs(fv))
 
-    # begin Newton interation
+    # begin Newton iteration
     cdef cython.floating tol = 1.0e-5
     cdef int iterations = 0
     cdef int max_iter = 10

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 yt/utilities/lodgeit.py
--- a/yt/utilities/lodgeit.py
+++ b/yt/utilities/lodgeit.py
@@ -5,7 +5,7 @@
     ~~~~~~~~
 
     A script that pastes stuff into the enzotools pastebin on
-    paste.enztools.org.
+    paste.enzotools.org.
 
     Modified (very, very slightly) from the original script by the authors
     below.

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 yt/utilities/math_utils.py
--- a/yt/utilities/math_utils.py
+++ b/yt/utilities/math_utils.py
@@ -262,7 +262,7 @@
 
     P : array
         The positions of the data to be modified (i.e. particle or grid cell
-        postions). The array should be Nx3.
+        positions). The array should be Nx3.
 
     V : array
         The velocities of the data to be modified (i.e. particle or grid cell
@@ -376,7 +376,7 @@
 
     P : array
         The positions of the data to be modified (i.e. particle or grid cell
-        postions). The array should be Nx3.
+        positions). The array should be Nx3.
 
     V : array
         The velocities of the data to be modified (i.e. particle or grid cell
@@ -431,7 +431,7 @@
 
     P : array
         The positions of the data to be modified (i.e. particle or grid cell
-        postions). The array should be Nx3.
+        positions). The array should be Nx3.
 
     V : array
         The velocities of the data to be modified (i.e. particle or grid cell
@@ -477,7 +477,7 @@
 
     P : array
         The positions of the data to be modified (i.e. particle or grid cell
-        postions). The array should be Nx3.
+        positions). The array should be Nx3.
 
     V : array
         The velocities of the data to be modified (i.e. particle or grid cell
@@ -531,7 +531,7 @@
 
     P : array
         The positions of the data to be modified (i.e. particle or grid cell
-        postions). The array should be Nx3.
+        positions). The array should be Nx3.
 
     V : array
         The velocities of the data to be modified (i.e. particle or grid cell
@@ -809,7 +809,7 @@
     -----
     The usage of 4D homogeneous coordinates is for OpenGL and GPU
     hardware that automatically performs the divide by w operation.
-    See the following for more details about the OpenGL perpective matrices.
+    See the following for more details about the OpenGL perspective matrices.
 
     http://www.tomdalling.com/blog/modern-opengl/explaining-homogenous-coordinates-and-projective-geometry/
     http://www.songho.ca/opengl/gl_projectionmatrix.html
@@ -880,7 +880,7 @@
     -----
     The usage of 4D homogeneous coordinates is for OpenGL and GPU
     hardware that automatically performs the divide by w operation.
-    See the following for more details about the OpenGL perpective matrices.
+    See the following for more details about the OpenGL perspective matrices.
 
     http://www.scratchapixel.com/lessons/3d-basic-rendering/perspective-and-orthographic-projection-matrix/orthographic-projection-matrix
     http://www.tomdalling.com/blog/modern-opengl/explaining-homogenous-coordinates-and-projective-geometry/

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -203,7 +203,7 @@
 class ParallelObjectIterator(ObjectIterator):
     """
     This takes an object, *pobj*, that implements ParallelAnalysisInterface,
-    and then does its thing, calling initliaze and finalize on the object.
+    and then does its thing, calling initialize and finalize on the object.
     """
     def __init__(self, pobj, just_list = False, attr='_grids',
                  round_robin=False):
@@ -1059,7 +1059,7 @@
         tmp_send = send.view(self.__tocast)
         recv = np.empty(total_size, dtype=send.dtype)
         if isinstance(send, YTArray):
-            # We assume send.units is consitent with the units
+            # We assume send.units is consistent with the units
             # on the receiving end.
             if isinstance(send, ImageArray):
                 recv = ImageArray(recv, input_units=send.units)

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 yt/utilities/poster/encode.py
--- a/yt/utilities/poster/encode.py
+++ b/yt/utilities/poster/encode.py
@@ -1,6 +1,6 @@
 """multipart/form-data encoding module
 
-This module provides functions that faciliate encoding name/value pairs
+This module provides functions that facilitate encoding name/value pairs
 as multipart/form-data suitable for a HTTP POST or PUT request.
 
 multipart/form-data is the standard way to upload files over HTTP"""
@@ -385,7 +385,7 @@
 
     Returns a tuple of `datagen`, `headers`, where `datagen` is a
     generator that will yield blocks of data that make up the encoded
-    parameters, and `headers` is a dictionary with the assoicated
+    parameters, and `headers` is a dictionary with the associated
     Content-Type and Content-Length headers.
 
     Examples:

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 yt/utilities/poster/streaminghttp.py
--- a/yt/utilities/poster/streaminghttp.py
+++ b/yt/utilities/poster/streaminghttp.py
@@ -10,7 +10,7 @@
 
 **N.B.** You must specify a Content-Length header if using an iterable object
 since there is no way to determine in advance the total size that will be
-yielded, and there is no way to reset an interator.
+yielded, and there is no way to reset an iterator.
 
 Example usage:
 
@@ -159,7 +159,7 @@
 
     def http_request(self, req):
         """Handle a HTTP request.  Make sure that Content-Length is specified
-        if we're using an interable value"""
+        if we're using an iterable value"""
         # Make sure that if we're using an iterable object as the request
         # body, that we've also specified Content-Length
         if request_has_data(req):

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 yt/utilities/tests/test_coordinate_conversions.py
--- a/yt/utilities/tests/test_coordinate_conversions.py
+++ b/yt/utilities/tests/test_coordinate_conversions.py
@@ -51,7 +51,7 @@
     assert_array_almost_equal(calc_theta, real_theta)
     assert_array_almost_equal(calc_phi, real_phi)
 
-def test_cylindrical_coordiante_conversion():
+def test_cylindrical_coordinate_conversion():
     normal = [0, 0, 1]
     real_r =     [ 0.47021498,  0.75970506,  0.94676179,  0.96327853,  
                    0.79516968,  0.96904193,  1.00437346,  1.3344104 ]    

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 yt/utilities/tests/test_periodicity.py
--- a/yt/utilities/tests/test_periodicity.py
+++ b/yt/utilities/tests/test_periodicity.py
@@ -31,7 +31,7 @@
     dist = euclidean_dist(a,b)
     assert_almost_equal(dist, 1.3856406460551021)
 
-    # Now test the more complicated cases where we're calculaing radii based 
+    # Now test the more complicated cases where we're calculating radii based 
     # on data objects
     ds = fake_random_ds(64)
 

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 yt/visualization/base_plot_types.py
--- a/yt/visualization/base_plot_types.py
+++ b/yt/visualization/base_plot_types.py
@@ -422,7 +422,7 @@
     Notes
     -----
     This is a simple implementation for a common use case.  Viewing the source
-    can be instructure, and is encouraged to see how to generate more
+    can be instructive, and is encouraged to see how to generate more
     complicated or more specific sets of multiplots for your own purposes.
     """
     import matplotlib.figure

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 yt/visualization/eps_writer.py
--- a/yt/visualization/eps_writer.py
+++ b/yt/visualization/eps_writer.py
@@ -301,7 +301,7 @@
 
         Parameters
         ----------
-        plot : `yt.visalization.plot_window.PlotWindow`
+        plot : `yt.visualization.plot_window.PlotWindow`
             yt plot on which the axes are based.
         units : string
             Unit description that overrides yt's unit description.  Only
@@ -490,7 +490,7 @@
 
         Parameters
         ----------
-        plot : `yt.visalization.plot_window.PlotWindow`
+        plot : `yt.visualization.plot_window.PlotWindow`
             yt plot that provides the image
         pos : tuple of floats
             Position of the origin of the image in centimeters.
@@ -627,7 +627,7 @@
 
         # Scale the colorbar
         shift = (0.5*(1.0-shrink[0])*size[0], 0.5*(1.0-shrink[1])*size[1])
-        # To facilitate strething rather than shrinking
+        # To facilitate stretching rather than shrinking
         # If stretched in both directions (makes no sense?) then y dominates. 
         if(shrink[0] > 1.0):
             shift = (0.05*self.figsize[0], 0.5*(1.0-shrink[1])*size[1])
@@ -1314,12 +1314,12 @@
 def single_plot(plot, field=None, figsize=(12,12), cb_orient="right", 
                 bare_axes=False, savefig=None, colorbar=True, 
                 file_format='eps', **kwargs):
-    r"""Wrapper for DualEPS routines to create a figure directy from a yt
+    r"""Wrapper for DualEPS routines to create a figure directly from a yt
     plot.  Calls insert_image_yt, axis_box_yt, and colorbar_yt.
 
     Parameters
     ----------
-    plot : `yt.visalization.plot_window.PlotWindow`
+    plot : `yt.visualization.plot_window.PlotWindow`
         yt plot that provides the image and metadata
     figsize : tuple of floats
         Size of the figure in centimeters.

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -204,7 +204,7 @@
 
     def convert_distance_x(self, distance):
         r"""This function converts code-space distance into pixel-space
-        distance in the x-coordiante.
+        distance in the x-coordinate.
 
         Parameters
         ----------
@@ -222,7 +222,7 @@
 
     def convert_distance_y(self, distance):
         r"""This function converts code-space distance into pixel-space
-        distance in the y-coordiante.
+        distance in the y-coordinate.
 
         Parameters
         ----------
@@ -252,7 +252,7 @@
         equivalency : string, optional
            If set, the equivalency to use to convert the current units to
            the new requested unit. If None, the unit conversion will be done
-           without an equivelancy
+           without an equivalency
 
         equivalency_kwargs : string, optional
            Keyword arguments to be passed to the equivalency. Only used if
@@ -268,7 +268,7 @@
                 unit, equivalency, **equivalency_kwargs)
             # equiv_array isn't necessarily an ImageArray. This is an issue
             # inherent to the way the unit system handles YTArray
-            # sublcasses and I don't see how to modify the unit system to
+            # subclasses and I don't see how to modify the unit system to
             # fix this. Instead, we paper over this issue and hard code
             # that equiv_array is an ImageArray
             self[field] = ImageArray(

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 yt/visualization/particle_plots.py
--- a/yt/visualization/particle_plots.py
+++ b/yt/visualization/particle_plots.py
@@ -152,7 +152,7 @@
          space, plot 'window' space or 'native' simulation coordinate system
          is given. For example, both 'upper-right-domain' and ['upper',
          'right', 'domain'] both place the origin in the upper right hand
-         corner of domain space. If x or y are not given, a value is inffered.
+         corner of domain space. If x or y are not given, a value is inferred.
          For instance, 'left-domain' corresponds to the lower-left hand corner
          of the simulation domain, 'center-domain' corresponds to the center
          of the simulation domain, or 'center-window' for the center of the
@@ -445,7 +445,7 @@
          space, plot 'window' space or 'native' simulation coordinate system
          is given. For example, both 'upper-right-domain' and ['upper',
          'right', 'domain'] both place the origin in the upper right hand
-         corner of domain space. If x or y are not given, a value is inffered.
+         corner of domain space. If x or y are not given, a value is inferred.
          For instance, 'left-domain' corresponds to the lower-left hand corner
          of the simulation domain, 'center-domain' corresponds to the center
          of the simulation domain, or 'center-window' for the center of the

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 yt/visualization/plot_container.py
--- a/yt/visualization/plot_container.py
+++ b/yt/visualization/plot_container.py
@@ -302,7 +302,7 @@
         return self
 
     def _setup_plots(self):
-        # Left blank to be overriden in subclasses
+        # Left blank to be overridden in subclasses
         pass
 
     def _initialize_dataset(self, ts):

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -632,7 +632,7 @@
             if self.edgecolors is not None:
                 edgecolors = colorConverter.to_rgba(
                     self.edgecolors, alpha=self.alpha)
-            else:  # use colormap if not explicity overridden by edgecolors
+            else:  # use colormap if not explicitly overridden by edgecolors
                 if self.cmap is not None:
                     color_bounds = [0,plot.data.ds.index.max_level]
                     edgecolors = apply_colormap(
@@ -1698,7 +1698,7 @@
     Intended for representing a slice of a triangular faceted
     geometry in a slice plot.
 
-    Uses a set of *triangle_vertices* to find all trangles the plane of a
+    Uses a set of *triangle_vertices* to find all triangles the plane of a
     SlicePlot intersects with. The lines between the intersection points
     of the triangles are then added to the plot to create an outline
     of the geometry represented by the triangles.
@@ -2271,7 +2271,7 @@
     """
     Add the line integral convolution to the plot for vector fields
     visualization. Two component of vector fields needed to be provided
-    (i.e., velocity_x and velocity_y, magentic_field_x and magnetic_field_y).
+    (i.e., velocity_x and velocity_y, magnetic_field_x and magnetic_field_y).
 
     Parameters
     ----------

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -141,7 +141,7 @@
 
 class PlotWindow(ImagePlotContainer):
     r"""
-    A ploting mechanism based around the concept of a window into a
+    A plotting mechanism based around the concept of a window into a
     data source. It can have arbitrary fields, each of which will be
     centered on the same viewpoint, but will have individual zlimits.
 
@@ -376,7 +376,7 @@
         equivalency : string, optional
            If set, the equivalency to use to convert the current units to
            the new requested unit. If None, the unit conversion will be done
-           without an equivelancy
+           without an equivalency
 
         equivalency_kwargs : string, optional
            Keyword arguments to be passed to the equivalency. Only used if
@@ -733,7 +733,7 @@
         y_in_bounds = yc >= yllim and yc <= yrlim
 
         if not x_in_bounds and not y_in_bounds:
-            msg = ('orgin inputs not in bounds of specified coordinate sytem' +
+            msg = ('origin inputs not in bounds of specified coordinate sytem' +
                    'domain.')
             msg = msg.format(self.origin)
             raise RuntimeError(msg)
@@ -1800,7 +1800,7 @@
         This specifies the normal vector to the slice.  If given as an integer
         or a coordinate string (0=x, 1=y, 2=z), this function will return an
         :class:`AxisAlignedSlicePlot` object.  If given as a sequence of floats,
-        this is interpretted as an off-axis vector and an
+        this is interpreted as an off-axis vector and an
         :class:`OffAxisSlicePlot` object is returned.
     fields : string
          The name of the field(s) to be plotted.

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 yt/visualization/tick_locators.py
--- a/yt/visualization/tick_locators.py
+++ b/yt/visualization/tick_locators.py
@@ -30,7 +30,7 @@
 
     def base(self,base):
         """
-        set the base of the log scaling (major tick every base**i, i interger)
+        set the base of the log scaling (major tick every base**i, i integer)
         """
         self._base=base+0.0
 

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 yt/visualization/volume_rendering/camera_path.py
--- a/yt/visualization/volume_rendering/camera_path.py
+++ b/yt/visualization/volume_rendering/camera_path.py
@@ -44,7 +44,7 @@
         niter : integer, optional
             Maximum number of iterations to find solution. Default: 50000
         init_temp : float, optional
-            Intital temperature for simulated annealing when finding a
+            Initial temperature for simulated annealing when finding a
             solution.  Lower initial temperatures result in an initial solution
             in first several iterations that changes more rapidly. Default: 10.0
         alpha : float, optional
@@ -109,7 +109,7 @@
         niter : integer, optional
             Maximum number of iterations to find solution. Default: 50000
         init_temp : float, optional
-            Intital temperature for simulated annealing when finding a
+            Initial temperature for simulated annealing when finding a
             solution.  Lower initial temperatures result in an initial solution
             in first several iterations that changes more rapidly. Default: 10.0
         alpha : float, optional
@@ -138,7 +138,7 @@
         
     def rand_seq(self):
         r"""
-        Generates values in random order, equivlanet to using shuffle
+        Generates values in random order, equivalent to using shuffle
         in random without generation all values at once.
         """
         values = range(self.nframes)

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 yt/visualization/volume_rendering/glfw_inputhook.py
--- a/yt/visualization/volume_rendering/glfw_inputhook.py
+++ b/yt/visualization/volume_rendering/glfw_inputhook.py
@@ -1,6 +1,6 @@
 # encoding: utf-8
 """
-Enable pyglet to be used interacive by setting PyOS_InputHook.
+Enable pyglet to be used interactive by setting PyOS_InputHook.
 
 Authors
 -------

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 yt/visualization/volume_rendering/interactive_vr.py
--- a/yt/visualization/volume_rendering/interactive_vr.py
+++ b/yt/visualization/volume_rendering/interactive_vr.py
@@ -183,7 +183,7 @@
 
     This class implements a basic "Trackball" or "Arcball" camera control system
     that allows for unconstrained 3D rotations without suffering from Gimbal lock.
-    Following Ken Shoemake's orginal C implementation (Graphics Gems IV, III.1)
+    Following Ken Shoemake's original C implementation (Graphics Gems IV, III.1)
     we project mouse movements onto the unit sphere and use quaternions to
     represent the corresponding rotation.
 
@@ -346,7 +346,7 @@
         self.set_shader("max_intensity.f")
         self.data_source = None
 
-        self.blocks = {} # A collection of PartionedGrid objects
+        self.blocks = {} # A collection of PartitionedGrid objects
         self.block_order = []
 
         self.gl_texture_names = []
@@ -785,7 +785,7 @@
 
         # --- end texture init
 
-        # Set "fb_texture" as our colour attachement #0
+        # Set "fb_texture" as our colour attachment #0
         GL.glFramebufferTexture2D(
             GL.GL_FRAMEBUFFER, GL.GL_COLOR_ATTACHMENT0, GL.GL_TEXTURE_2D,
             self.fb_texture,

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 yt/visualization/volume_rendering/lens.py
--- a/yt/visualization/volume_rendering/lens.py
+++ b/yt/visualization/volume_rendering/lens.py
@@ -291,7 +291,7 @@
     """A lens that includes two sources for perspective rays, for 3D viewing
 
     The degree of differences between the left and right images is controlled by 
-    the disparity (the maximum distance between cooresponding points in the left
+    the disparity (the maximum distance between corresponding points in the left
     and right images). By default, the disparity is set to be 3 pixels.
     """
 

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 yt/visualization/volume_rendering/old_camera.py
--- a/yt/visualization/volume_rendering/old_camera.py
+++ b/yt/visualization/volume_rendering/old_camera.py
@@ -1307,7 +1307,7 @@
             else:
                 # The corner is on the backwards, then put it outside of the
                 # image It can not be simply removed because it may connect to
-                # other corner within the image, which produces visible domian
+                # other corner within the image, which produces visible domain
                 # boundary line
                 sight_length = np.sqrt(self.width[0]**2+self.width[1]**2) / \
                                np.sqrt(1 - sight_angle_cos**2)

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 yt/visualization/volume_rendering/scene.py
--- a/yt/visualization/volume_rendering/scene.py
+++ b/yt/visualization/volume_rendering/scene.py
@@ -332,7 +332,7 @@
             out = np.clip(out[:, :, :3] / max_val, 0.0, 1.0) * 255
             out = np.concatenate(
                 [out.astype('uint8'), alpha[..., None]], axis=-1)
-            # not sure why we need rot90, but this makes the orentation
+            # not sure why we need rot90, but this makes the orientation
             # match the png writer
             ax.imshow(np.rot90(out), origin='lower')
             canvas.print_figure(fname, dpi=100)

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 yt/visualization/volume_rendering/shader_objects.py
--- a/yt/visualization/volume_rendering/shader_objects.py
+++ b/yt/visualization/volume_rendering/shader_objects.py
@@ -242,7 +242,7 @@
 
 class PassthroughFragmentShader(FragmentShader):
     '''A first pass fragment shader that performs no operation. Used for debug
-    puproses. It's distinct from NoOpFragmentShader, because of the number of
+    purposes. It's distinct from NoOpFragmentShader, because of the number of
     uniforms'''
     _source = "passthrough.fragmentshader"
     _shader_name = "passthrough.f"
@@ -261,7 +261,7 @@
     _shader_name = "transfer_function.f"
 
 class DefaultVertexShader(VertexShader):
-    '''A first pass vertex shader that tranlates the location of vertices from
+    '''A first pass vertex shader that translates the location of vertices from
     the world coordinates to the viewing plane coordinates'''
     _source = "default.vertexshader"
     _shader_name = "default.v"

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 yt/visualization/volume_rendering/transfer_function_helper.py
--- a/yt/visualization/volume_rendering/transfer_function_helper.py
+++ b/yt/visualization/volume_rendering/transfer_function_helper.py
@@ -121,7 +121,7 @@
         """
         if self.bounds is None:
             mylog.info('Calculating data bounds. This may take a while.' +
-                       '  Set the TranferFunctionHelper.bounds to avoid this.')
+                       '  Set the TransferFunctionHelper.bounds to avoid this.')
             self.set_bounds()
 
         if self.log:
@@ -138,7 +138,7 @@
         """Setup a default colormap
 
         Creates a ColorTransferFunction including 10 gaussian layers whose
-        colors smaple the 'spectral' colormap. Also attempts to scale the
+        colors sample the 'spectral' colormap. Also attempts to scale the
         transfer function to produce a natural contrast ratio.
 
         """

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 yt/visualization/volume_rendering/transfer_functions.py
--- a/yt/visualization/volume_rendering/transfer_functions.py
+++ b/yt/visualization/volume_rendering/transfer_functions.py
@@ -873,7 +873,7 @@
     defines the somewhat arbitrary normalization to the scattering
     approximation: because everything is done largely unit-free, and is
     really not terribly accurate anyway, feel free to adjust this to change
-    the relative amount of reddenning.  Maybe in some future version this
+    the relative amount of reddening.  Maybe in some future version this
     will be unitful.
     """
     def __init__(self, T_bounds, rho_bounds, nbins=256,

diff -r 52dadcaf4324fa06ae70691cfddca24af6b6ce0e -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 yt/visualization/volume_rendering/utils.py
--- a/yt/visualization/volume_rendering/utils.py
+++ b/yt/visualization/volume_rendering/utils.py
@@ -18,7 +18,7 @@
     if not isinstance(data_source, (YTSelectionContainer3D, type(None))):
         raise RuntimeError(
             "The data_source is not a valid 3D data container.\n"
-            "Expected an ojbect of type YTSelectionContainer3D but received "
+            "Expected an object of type YTSelectionContainer3D but received "
             "an object of type %s." % type(data_source))
     return data_source
 


https://bitbucket.org/yt_analysis/yt/commits/559e0f1b3fd3/
Changeset:   559e0f1b3fd3
User:        ngoldbaum
Date:        2017-10-24 02:03:55+00:00
Summary:     add ignore rules for new flake8 rules
Affected #:  1 file

diff -r 89cf5553a4699f5583704674b5bb578b9a1766b4 -r 559e0f1b3fd3a6fd6661bfdceb92d9566fb03eaf setup.cfg
--- a/setup.cfg
+++ b/setup.cfg
@@ -15,4 +15,4 @@
 #      vendored libraries
 exclude = doc,benchmarks,*/api.py,*/__init__.py,*/__config__.py,yt/visualization/_mpl_imports.py,yt/utilities/lodgeit.py,yt/utilities/lru_cache.py,yt/utilities/poster/*,yt/extern/*,yt/mods.py,yt/utilities/fits_image.py
 max-line-length=999
-ignore = E111,E121,E122,E123,E124,E125,E126,E127,E128,E129,E131,E201,E202,E211,E221,E222,E227,E228,E241,E301,E203,E225,E226,E231,E251,E261,E262,E265,E266,E302,E303,E305,E306,E402,E502,E701,E703,E731,W291,W292,W293,W391,W503
\ No newline at end of file
+ignore = E111,E121,E122,E123,E124,E125,E126,E127,E128,E129,E131,E201,E202,E211,E221,E222,E227,E228,E241,E301,E203,E225,E226,E231,E251,E261,E262,E265,E266,E302,E303,E305,E306,E402,E502,E701,E703,E722,E741,E731,W291,W292,W293,W391,W503
\ No newline at end of file


https://bitbucket.org/yt_analysis/yt/commits/532cc67f458d/
Changeset:   532cc67f458d
User:        ngoldbaum
Date:        2017-10-24 03:22:56+00:00
Summary:     Merge pull request #1592 from ngoldbaum/flake8-fix

Add ignore rules for new flake8 rules
Affected #:  1 file

diff -r 5b1e0e11e01b7078bcf1eb2c666a51b768379d56 -r 532cc67f458d76196441c6772091a710ebe6adcf setup.cfg
--- a/setup.cfg
+++ b/setup.cfg
@@ -15,4 +15,4 @@
 #      vendored libraries
 exclude = doc,benchmarks,*/api.py,*/__init__.py,*/__config__.py,yt/visualization/_mpl_imports.py,yt/utilities/lodgeit.py,yt/utilities/lru_cache.py,yt/utilities/poster/*,yt/extern/*,yt/mods.py,yt/utilities/fits_image.py
 max-line-length=999
-ignore = E111,E121,E122,E123,E124,E125,E126,E127,E128,E129,E131,E201,E202,E211,E221,E222,E227,E228,E241,E301,E203,E225,E226,E231,E251,E261,E262,E265,E266,E302,E303,E305,E306,E402,E502,E701,E703,E731,W291,W292,W293,W391,W503
\ No newline at end of file
+ignore = E111,E121,E122,E123,E124,E125,E126,E127,E128,E129,E131,E201,E202,E211,E221,E222,E227,E228,E241,E301,E203,E225,E226,E231,E251,E261,E262,E265,E266,E302,E303,E305,E306,E402,E502,E701,E703,E722,E741,E731,W291,W292,W293,W391,W503
\ No newline at end of file


https://bitbucket.org/yt_analysis/yt/commits/57e395fe22e8/
Changeset:   57e395fe22e8
User:        Josh Borrow
Date:        2017-10-25 18:42:09+00:00
Summary:     Added a hack for _switch_ds to work with ParticleProjectionPlots and a test to check that it is working.
Affected #:  2 files

diff -r 532cc67f458d76196441c6772091a710ebe6adcf -r 57e395fe22e844a2532228234f124335af3edaa4 yt/visualization/plot_container.py
--- a/yt/visualization/plot_container.py
+++ b/yt/visualization/plot_container.py
@@ -48,6 +48,7 @@
 from yt.visualization.color_maps import \
     yt_colormaps
 
+
 def invalidate_data(f):
     @wraps(f)
     def newfunc(*args, **kwargs):
@@ -322,10 +323,19 @@
                 raise RuntimeError("The data_source keyword argument "
                                    "is only defined for projections.")
             kwargs['data_source'] = data_source
-        new_object = getattr(new_ds, name)(**kwargs)
+
         self.ds = new_ds
+
+        # A _hack_ for ParticleProjectionPlots
+        if name == 'Particle':
+            from yt.visualization.particle_plots import ParticleAxisAlignedDummyDataSource
+            new_object = ParticleAxisAlignedDummyDataSource(ds=self.ds, **kwargs)
+        else:
+            new_object = getattr(new_ds, name)(**kwargs)
+
         self.data_source = new_object
         self._data_valid = self._plot_valid = False
+
         for d in 'xyz':
             lim_name = d+'lim'
             if hasattr(self, lim_name):

diff -r 532cc67f458d76196441c6772091a710ebe6adcf -r 57e395fe22e844a2532228234f124335af3edaa4 yt/visualization/tests/test_particle_plot.py
--- a/yt/visualization/tests/test_particle_plot.py
+++ b/yt/visualization/tests/test_particle_plot.py
@@ -41,7 +41,6 @@
     ParticlePhasePlot
 from yt.units.yt_array import YTArray
 
-
 def setup():
     """Test specific setup."""
     from yt.config import ytcfg
@@ -272,6 +271,26 @@
     # make sure we can set the units using the tuple without erroring out
     pp.set_unit(("Gas", "particle_mass"), "Msun")
 
+ at requires_file(tgal)
+def test_switch_ds():
+    """
+    Tests the _switch_ds() method for ParticleProjectionPlots that as of
+    25th October 2017 requires a specific hack in plot_container.py
+    """
+    ds = load(tgal)
+    ds2 = load(tgal)
+
+    plot = ParticlePlot(
+        ds,
+        ("Gas", "particle_position_x"),
+        ("Gas", "particle_position_y"),
+        ("Gas", "density"),
+    )
+
+    plot._switch_ds(ds2)
+
+    return
+
 class TestParticleProjectionPlotSave(unittest.TestCase):
 
     def setUp(self):


https://bitbucket.org/yt_analysis/yt/commits/b0d2b5cffd2e/
Changeset:   b0d2b5cffd2e
User:        Josh Borrow
Date:        2017-10-25 18:50:03+00:00
Summary:     pep8 line length
Affected #:  1 file

diff -r 57e395fe22e844a2532228234f124335af3edaa4 -r b0d2b5cffd2e4a90c9ada11d905d70e3543f198a yt/visualization/plot_container.py
--- a/yt/visualization/plot_container.py
+++ b/yt/visualization/plot_container.py
@@ -328,7 +328,8 @@
 
         # A _hack_ for ParticleProjectionPlots
         if name == 'Particle':
-            from yt.visualization.particle_plots import ParticleAxisAlignedDummyDataSource
+            from yt.visualization.particle_plots import \
+            ParticleAxisAlignedDummyDataSource
             new_object = ParticleAxisAlignedDummyDataSource(ds=self.ds, **kwargs)
         else:
             new_object = getattr(new_ds, name)(**kwargs)


https://bitbucket.org/yt_analysis/yt/commits/7da7f5e19cdd/
Changeset:   7da7f5e19cdd
User:        ngoldbaum
Date:        2017-10-26 13:23:45+00:00
Summary:     Merge pull request #1594 from JBorrow/fix-_switch_ds-particle-projection-plot

Fix  _switch_ds() use with ParticleProjectionPlots
Affected #:  2 files

diff -r 532cc67f458d76196441c6772091a710ebe6adcf -r 7da7f5e19cdd525f64c9d3c1fa5e46a59d80f59d yt/visualization/plot_container.py
--- a/yt/visualization/plot_container.py
+++ b/yt/visualization/plot_container.py
@@ -48,6 +48,7 @@
 from yt.visualization.color_maps import \
     yt_colormaps
 
+
 def invalidate_data(f):
     @wraps(f)
     def newfunc(*args, **kwargs):
@@ -322,10 +323,20 @@
                 raise RuntimeError("The data_source keyword argument "
                                    "is only defined for projections.")
             kwargs['data_source'] = data_source
-        new_object = getattr(new_ds, name)(**kwargs)
+
         self.ds = new_ds
+
+        # A _hack_ for ParticleProjectionPlots
+        if name == 'Particle':
+            from yt.visualization.particle_plots import \
+            ParticleAxisAlignedDummyDataSource
+            new_object = ParticleAxisAlignedDummyDataSource(ds=self.ds, **kwargs)
+        else:
+            new_object = getattr(new_ds, name)(**kwargs)
+
         self.data_source = new_object
         self._data_valid = self._plot_valid = False
+
         for d in 'xyz':
             lim_name = d+'lim'
             if hasattr(self, lim_name):

diff -r 532cc67f458d76196441c6772091a710ebe6adcf -r 7da7f5e19cdd525f64c9d3c1fa5e46a59d80f59d yt/visualization/tests/test_particle_plot.py
--- a/yt/visualization/tests/test_particle_plot.py
+++ b/yt/visualization/tests/test_particle_plot.py
@@ -41,7 +41,6 @@
     ParticlePhasePlot
 from yt.units.yt_array import YTArray
 
-
 def setup():
     """Test specific setup."""
     from yt.config import ytcfg
@@ -272,6 +271,26 @@
     # make sure we can set the units using the tuple without erroring out
     pp.set_unit(("Gas", "particle_mass"), "Msun")
 
+ at requires_file(tgal)
+def test_switch_ds():
+    """
+    Tests the _switch_ds() method for ParticleProjectionPlots that as of
+    25th October 2017 requires a specific hack in plot_container.py
+    """
+    ds = load(tgal)
+    ds2 = load(tgal)
+
+    plot = ParticlePlot(
+        ds,
+        ("Gas", "particle_position_x"),
+        ("Gas", "particle_position_y"),
+        ("Gas", "density"),
+    )
+
+    plot._switch_ds(ds2)
+
+    return
+
 class TestParticleProjectionPlotSave(unittest.TestCase):
 
     def setUp(self):


https://bitbucket.org/yt_analysis/yt/commits/54aa71b3e9ba/
Changeset:   54aa71b3e9ba
User:        jisuoqing
Date:        2017-10-28 21:59:02+00:00
Summary:     unable pre-defined display center
Affected #:  1 file

diff -r 7da7f5e19cdd525f64c9d3c1fa5e46a59d80f59d -r 54aa71b3e9ba9738eb6df539c4f6882081ebf88a yt/geometry/coordinates/cylindrical_coordinates.py
--- a/yt/geometry/coordinates/cylindrical_coordinates.py
+++ b/yt/geometry/coordinates/cylindrical_coordinates.py
@@ -191,11 +191,7 @@
             display_center[theta_ax] = self.ds.domain_center[theta_ax]
             display_center[z_ax] = self.ds.domain_center[z_ax]
         elif ax_name == "theta":
-            # Note we are using domain_right_edge, not domain_width, so that in
-            # cases where DLE is not zero we go to the inner edge.
-            display_center[r_ax] = self.ds.domain_right_edge[r_ax]/2.0
-            display_center[z_ax] = self.ds.domain_center[z_ax]
-            # zeros for the others
+            display_center = center[:2]
         return center, display_center
 
     def sanitize_width(self, axis, width, depth):


https://bitbucket.org/yt_analysis/yt/commits/532f088baf78/
Changeset:   532f088baf78
User:        jisuoqing
Date:        2017-10-29 21:02:02+00:00
Summary:     resolve flake8 issue
Affected #:  1 file

diff -r 54aa71b3e9ba9738eb6df539c4f6882081ebf88a -r 532f088baf78d14def0e6889bb611918ed67f378 yt/geometry/coordinates/cylindrical_coordinates.py
--- a/yt/geometry/coordinates/cylindrical_coordinates.py
+++ b/yt/geometry/coordinates/cylindrical_coordinates.py
@@ -183,7 +183,6 @@
                           0.0 * display_center[1],
                           0.0 * display_center[2]]
         ax_name = self.axis_name[axis]
-        r_ax = self.axis_id['r']
         theta_ax = self.axis_id['theta']
         z_ax = self.axis_id['z']
         if ax_name == "r":


https://bitbucket.org/yt_analysis/yt/commits/efd194abd158/
Changeset:   efd194abd158
User:        jisuoqing
Date:        2017-10-29 23:49:30+00:00
Summary:     use existing center values, if possible
Affected #:  1 file

diff -r 532f088baf78d14def0e6889bb611918ed67f378 -r efd194abd1583b00db488e08d47c0f35182e0fcd yt/geometry/coordinates/cylindrical_coordinates.py
--- a/yt/geometry/coordinates/cylindrical_coordinates.py
+++ b/yt/geometry/coordinates/cylindrical_coordinates.py
@@ -183,14 +183,22 @@
                           0.0 * display_center[1],
                           0.0 * display_center[2]]
         ax_name = self.axis_name[axis]
+        r_ax = self.axis_id['r']
         theta_ax = self.axis_id['theta']
         z_ax = self.axis_id['z']
         if ax_name == "r":
-            # zeros everywhere
-            display_center[theta_ax] = self.ds.domain_center[theta_ax]
-            display_center[z_ax] = self.ds.domain_center[z_ax]
+            # use existing center value, if available
+            try:
+                display_center = np.take(center, (theta_ax, z_ax))
+            except:
+                display_center[theta_ax] = self.ds.domain_center[theta_ax]
+                display_center[z_ax] = self.ds.domain_center[z_ax]
         elif ax_name == "theta":
-            display_center = center[:2]
+            try:
+                display_center = np.take(center, (r_ax, z_ax))
+            except:
+                display_center[r_ax] = self.ds.domain_right_edge[r_ax]/2.0
+                display_center[z_ax] = self.ds.domain_center[z_ax]
         return center, display_center
 
     def sanitize_width(self, axis, width, depth):


https://bitbucket.org/yt_analysis/yt/commits/5f8170cead92/
Changeset:   5f8170cead92
User:        jisuoqing
Date:        2017-10-30 00:58:39+00:00
Summary:     bug fix
Affected #:  1 file

diff -r efd194abd1583b00db488e08d47c0f35182e0fcd -r 5f8170cead927dcc34a418c51716f629bc97ed44 yt/geometry/coordinates/cylindrical_coordinates.py
--- a/yt/geometry/coordinates/cylindrical_coordinates.py
+++ b/yt/geometry/coordinates/cylindrical_coordinates.py
@@ -187,18 +187,11 @@
         theta_ax = self.axis_id['theta']
         z_ax = self.axis_id['z']
         if ax_name == "r":
-            # use existing center value, if available
-            try:
-                display_center = np.take(center, (theta_ax, z_ax))
-            except:
-                display_center[theta_ax] = self.ds.domain_center[theta_ax]
-                display_center[z_ax] = self.ds.domain_center[z_ax]
+            display_center[theta_ax] = self.ds.domain_center[theta_ax]
+            display_center[z_ax] = self.ds.domain_center[z_ax]
         elif ax_name == "theta":
-            try:
-                display_center = np.take(center, (r_ax, z_ax))
-            except:
-                display_center[r_ax] = self.ds.domain_right_edge[r_ax]/2.0
-                display_center[z_ax] = self.ds.domain_center[z_ax]
+            # use existing center value
+            for idx in (r_ax, z_ax): display_center[idx] = center[idx]
         return center, display_center
 
     def sanitize_width(self, axis, width, depth):


https://bitbucket.org/yt_analysis/yt/commits/21be624bbb55/
Changeset:   21be624bbb55
User:        ngoldbaum
Date:        2017-10-30 15:24:13+00:00
Summary:     Merge pull request #1597 from jisuoqing/master

Fix zoom-in of slice plot for 2D cylindrical geometry
Affected #:  1 file

diff -r 7da7f5e19cdd525f64c9d3c1fa5e46a59d80f59d -r 21be624bbb559f9f6ff0c47251e53397d8804f1b yt/geometry/coordinates/cylindrical_coordinates.py
--- a/yt/geometry/coordinates/cylindrical_coordinates.py
+++ b/yt/geometry/coordinates/cylindrical_coordinates.py
@@ -187,15 +187,11 @@
         theta_ax = self.axis_id['theta']
         z_ax = self.axis_id['z']
         if ax_name == "r":
-            # zeros everywhere
             display_center[theta_ax] = self.ds.domain_center[theta_ax]
             display_center[z_ax] = self.ds.domain_center[z_ax]
         elif ax_name == "theta":
-            # Note we are using domain_right_edge, not domain_width, so that in
-            # cases where DLE is not zero we go to the inner edge.
-            display_center[r_ax] = self.ds.domain_right_edge[r_ax]/2.0
-            display_center[z_ax] = self.ds.domain_center[z_ax]
-            # zeros for the others
+            # use existing center value
+            for idx in (r_ax, z_ax): display_center[idx] = center[idx]
         return center, display_center
 
     def sanitize_width(self, axis, width, depth):


https://bitbucket.org/yt_analysis/yt/commits/e86113fd760a/
Changeset:   e86113fd760a
User:        ngoldbaum
Date:        2017-11-03 16:39:36+00:00
Summary:     use newer miniconda base in appveyor config
Affected #:  1 file

diff -r 21be624bbb559f9f6ff0c47251e53397d8804f1b -r e86113fd760a4761891333bcbefda57b53493762 appveyor.yml
--- a/appveyor.yml
+++ b/appveyor.yml
@@ -4,7 +4,7 @@
 environment:
 
   global:
-      PYTHON: "C:\\Miniconda3-x64"
+      PYTHON: "C:\\Miniconda36-x64"
 
   matrix:
       - PYTHON_VERSION: "2.7"


https://bitbucket.org/yt_analysis/yt/commits/eccaa23d408f/
Changeset:   eccaa23d408f
User:        ngoldbaum
Date:        2017-11-03 19:15:26+00:00
Summary:     Merge pull request #1607 from ngoldbaum/appveyor-fix

use newer miniconda base in appveyor config
Affected #:  1 file

diff -r 21be624bbb559f9f6ff0c47251e53397d8804f1b -r eccaa23d408f0027881e21ba82f86d44b32aa013 appveyor.yml
--- a/appveyor.yml
+++ b/appveyor.yml
@@ -4,7 +4,7 @@
 environment:
 
   global:
-      PYTHON: "C:\\Miniconda3-x64"
+      PYTHON: "C:\\Miniconda36-x64"
 
   matrix:
       - PYTHON_VERSION: "2.7"


https://bitbucket.org/yt_analysis/yt/commits/e6c05afa80c7/
Changeset:   e6c05afa80c7
User:        qobilidop
Date:        2017-11-04 20:10:01+00:00
Summary:     Add `code_energy` to unit registry
Affected #:  1 file

diff -r eccaa23d408f0027881e21ba82f86d44b32aa013 -r e6c05afa80c79eb38f4ae8a091cd7ef65cfeae53 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -941,6 +941,8 @@
         self.unit_registry.add("code_length", 1.0, dimensions.length)
         self.unit_registry.add("code_mass", 1.0, dimensions.mass)
         self.unit_registry.add("code_density", 1.0, dimensions.density)
+        # Note that energy is actually energy per unit mass
+        self.unit_registry.add("code_energy", 1.0, dimensions.energy / dimensions.mass)
         self.unit_registry.add("code_time", 1.0, dimensions.time)
         self.unit_registry.add("code_magnetic", 1.0, dimensions.magnetic_field)
         self.unit_registry.add("code_temperature", 1.0, dimensions.temperature)
@@ -1018,10 +1020,12 @@
             self.mass_unit / (self.length_unit * (self.time_unit)**2))
         temperature_unit = getattr(self, "temperature_unit", 1.0)
         density_unit = getattr(self, "density_unit", self.mass_unit / self.length_unit**3)
+        energy_unit = getattr(self, "energy_unit", vel_unit**2)
         self.unit_registry.modify("code_velocity", vel_unit)
         self.unit_registry.modify("code_temperature", temperature_unit)
         self.unit_registry.modify("code_pressure", pressure_unit)
         self.unit_registry.modify("code_density", density_unit)
+        self.unit_registry.modify("code_energy", energy_unit)
         # domain_width does not yet exist
         if (self.domain_left_edge is not None and
             self.domain_right_edge is not None):


https://bitbucket.org/yt_analysis/yt/commits/01b09e1d3641/
Changeset:   01b09e1d3641
User:        qobilidop
Date:        2017-11-04 20:18:24+00:00
Summary:     Set sane default for energy unit in Gadget frontend
Affected #:  1 file

diff -r e6c05afa80c79eb38f4ae8a091cd7ef65cfeae53 -r 01b09e1d3641f382f6cf14bc7ba7feb67d57855d yt/frontends/gadget/data_structures.py
--- a/yt/frontends/gadget/data_structures.py
+++ b/yt/frontends/gadget/data_structures.py
@@ -337,6 +337,15 @@
             vel_unit = self.velocity_unit
         self.time_unit = self.length_unit / vel_unit
 
+        # Note that energy is actually energy per unit mass
+        if "energy" in unit_base:
+            energy_unit = unit_base["energy"]
+        else:
+            # Sane default
+            energy_unit = (1, "(km/s)**2")
+        energy_unit = _fix_unit_ordering(energy_unit)
+        self.energy_unit = self.quan(energy_unit[0], energy_unit[1])
+
     @staticmethod
     def _validate_header(filename):
         '''


https://bitbucket.org/yt_analysis/yt/commits/56291fce8077/
Changeset:   56291fce8077
User:        qobilidop
Date:        2017-11-04 20:22:22+00:00
Summary:     Use `code_energy` as internal energy unit for Gadget and Gizmo

These are the only two places I see internal energy unit set explicitly. The change will propagate to other frontends inherited from Gadget. I don't know if that is the expected setting for other frontends. The frontends authors need to be notified to check this.
Affected #:  2 files

diff -r 01b09e1d3641f382f6cf14bc7ba7feb67d57855d -r 56291fce8077a7d8d4658666baf7c98b77b5526d yt/frontends/gizmo/fields.py
--- a/yt/frontends/gizmo/fields.py
+++ b/yt/frontends/gizmo/fields.py
@@ -37,7 +37,7 @@
         ("Velocity", ("code_velocity", ["particle_velocity"], None)),
         ("Velocities", ("code_velocity", ["particle_velocity"], None)),
         ("ParticleIDs", ("", ["particle_index"], None)),
-        ("InternalEnergy", ("code_velocity ** 2", ["thermal_energy"], None)),
+        ("InternalEnergy", ("code_energy", ["thermal_energy"], None)),
         ("SmoothingLength", ("code_length", ["smoothing_length"], None)),
         ("Density", ("code_mass / code_length**3", ["density"], None)),
         ("MaximumTemperature", ("K", [], None)),

diff -r 01b09e1d3641f382f6cf14bc7ba7feb67d57855d -r 56291fce8077a7d8d4658666baf7c98b77b5526d yt/frontends/sph/fields.py
--- a/yt/frontends/sph/fields.py
+++ b/yt/frontends/sph/fields.py
@@ -30,7 +30,7 @@
         ("Velocity", ("code_velocity", ["particle_velocity"], None)),
         ("Velocities", ("code_velocity", ["particle_velocity"], None)),
         ("ParticleIDs", ("", ["particle_index"], None)),
-        ("InternalEnergy", ("code_velocity ** 2", ["thermal_energy"], None)),
+        ("InternalEnergy", ("code_energy", ["thermal_energy"], None)),
         ("SmoothingLength", ("code_length", ["smoothing_length"], None)),
         ("Density", ("code_mass / code_length**3", ["density"], None)),
         ("MaximumTemperature", ("K", [], None)),


https://bitbucket.org/yt_analysis/yt/commits/52d3fb9e5130/
Changeset:   52d3fb9e5130
User:        qobilidop
Date:        2017-11-04 21:58:07+00:00
Summary:     Renaming and some formating changes

Prefer the more accurate name.
Affected #:  4 files

diff -r 56291fce8077a7d8d4658666baf7c98b77b5526d -r 52d3fb9e5130c541838688b71eaa39d31aeb2177 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -941,8 +941,8 @@
         self.unit_registry.add("code_length", 1.0, dimensions.length)
         self.unit_registry.add("code_mass", 1.0, dimensions.mass)
         self.unit_registry.add("code_density", 1.0, dimensions.density)
-        # Note that energy is actually energy per unit mass
-        self.unit_registry.add("code_energy", 1.0, dimensions.energy / dimensions.mass)
+        self.unit_registry.add("code_specific_energy", 1.0,
+                               dimensions.energy / dimensions.mass)
         self.unit_registry.add("code_time", 1.0, dimensions.time)
         self.unit_registry.add("code_magnetic", 1.0, dimensions.magnetic_field)
         self.unit_registry.add("code_temperature", 1.0, dimensions.temperature)
@@ -1020,12 +1020,12 @@
             self.mass_unit / (self.length_unit * (self.time_unit)**2))
         temperature_unit = getattr(self, "temperature_unit", 1.0)
         density_unit = getattr(self, "density_unit", self.mass_unit / self.length_unit**3)
-        energy_unit = getattr(self, "energy_unit", vel_unit**2)
+        specific_energy_unit = getattr(self, "specific_energy_unit", vel_unit**2)
         self.unit_registry.modify("code_velocity", vel_unit)
         self.unit_registry.modify("code_temperature", temperature_unit)
         self.unit_registry.modify("code_pressure", pressure_unit)
         self.unit_registry.modify("code_density", density_unit)
-        self.unit_registry.modify("code_energy", energy_unit)
+        self.unit_registry.modify("code_specific_energy", specific_energy_unit)
         # domain_width does not yet exist
         if (self.domain_left_edge is not None and
             self.domain_right_edge is not None):

diff -r 56291fce8077a7d8d4658666baf7c98b77b5526d -r 52d3fb9e5130c541838688b71eaa39d31aeb2177 yt/frontends/gadget/data_structures.py
--- a/yt/frontends/gadget/data_structures.py
+++ b/yt/frontends/gadget/data_structures.py
@@ -337,14 +337,13 @@
             vel_unit = self.velocity_unit
         self.time_unit = self.length_unit / vel_unit
 
-        # Note that energy is actually energy per unit mass
-        if "energy" in unit_base:
-            energy_unit = unit_base["energy"]
+        if "specific_energy" in unit_base:
+            specific_energy_unit = unit_base["specific_energy"]
         else:
             # Sane default
-            energy_unit = (1, "(km/s)**2")
-        energy_unit = _fix_unit_ordering(energy_unit)
-        self.energy_unit = self.quan(energy_unit[0], energy_unit[1])
+            specific_energy_unit = (1, "(km/s) ** 2")
+        specific_energy_unit = _fix_unit_ordering(specific_energy_unit)
+        self.specific_energy_unit = self.quan(*specific_energy_unit)
 
     @staticmethod
     def _validate_header(filename):

diff -r 56291fce8077a7d8d4658666baf7c98b77b5526d -r 52d3fb9e5130c541838688b71eaa39d31aeb2177 yt/frontends/gizmo/fields.py
--- a/yt/frontends/gizmo/fields.py
+++ b/yt/frontends/gizmo/fields.py
@@ -37,7 +37,7 @@
         ("Velocity", ("code_velocity", ["particle_velocity"], None)),
         ("Velocities", ("code_velocity", ["particle_velocity"], None)),
         ("ParticleIDs", ("", ["particle_index"], None)),
-        ("InternalEnergy", ("code_energy", ["thermal_energy"], None)),
+        ("InternalEnergy", ("code_specific_energy", ["thermal_energy"], None)),
         ("SmoothingLength", ("code_length", ["smoothing_length"], None)),
         ("Density", ("code_mass / code_length**3", ["density"], None)),
         ("MaximumTemperature", ("K", [], None)),

diff -r 56291fce8077a7d8d4658666baf7c98b77b5526d -r 52d3fb9e5130c541838688b71eaa39d31aeb2177 yt/frontends/sph/fields.py
--- a/yt/frontends/sph/fields.py
+++ b/yt/frontends/sph/fields.py
@@ -30,7 +30,7 @@
         ("Velocity", ("code_velocity", ["particle_velocity"], None)),
         ("Velocities", ("code_velocity", ["particle_velocity"], None)),
         ("ParticleIDs", ("", ["particle_index"], None)),
-        ("InternalEnergy", ("code_energy", ["thermal_energy"], None)),
+        ("InternalEnergy", ("code_specific_energy", ["thermal_energy"], None)),
         ("SmoothingLength", ("code_length", ["smoothing_length"], None)),
         ("Density", ("code_mass / code_length**3", ["density"], None)),
         ("MaximumTemperature", ("K", [], None)),


https://bitbucket.org/yt_analysis/yt/commits/6d8b9586b975/
Changeset:   6d8b9586b975
User:        qobilidop
Date:        2017-11-07 17:30:55+00:00
Summary:     Update answer tests' answers
Affected #:  1 file

diff -r 52d3fb9e5130c541838688b71eaa39d31aeb2177 -r 6d8b9586b975e06721975ef38aca7574ef4daf81 tests/tests.yaml
--- a/tests/tests.yaml
+++ b/tests/tests.yaml
@@ -104,7 +104,7 @@
   local_ytdata_003:
     - yt/frontends/ytdata
 
-  local_absorption_spectrum_005:
+  local_absorption_spectrum_006:
     - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_non_cosmo
     - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_non_cosmo_novpec
     - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_cosmo


https://bitbucket.org/yt_analysis/yt/commits/76d8412d2fed/
Changeset:   76d8412d2fed
User:        ngoldbaum
Date:        2017-11-09 18:35:09+00:00
Summary:     Merge pull request #1611 from qobilidop/code_energy

Register code_specific_energy and correct GadgetDataset's internal energy unit
Affected #:  5 files

diff -r eccaa23d408f0027881e21ba82f86d44b32aa013 -r 76d8412d2fed58cb0a7ce232849519fe3177c7bb tests/tests.yaml
--- a/tests/tests.yaml
+++ b/tests/tests.yaml
@@ -104,7 +104,7 @@
   local_ytdata_003:
     - yt/frontends/ytdata
 
-  local_absorption_spectrum_005:
+  local_absorption_spectrum_006:
     - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_non_cosmo
     - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_non_cosmo_novpec
     - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_cosmo

diff -r eccaa23d408f0027881e21ba82f86d44b32aa013 -r 76d8412d2fed58cb0a7ce232849519fe3177c7bb yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -941,6 +941,8 @@
         self.unit_registry.add("code_length", 1.0, dimensions.length)
         self.unit_registry.add("code_mass", 1.0, dimensions.mass)
         self.unit_registry.add("code_density", 1.0, dimensions.density)
+        self.unit_registry.add("code_specific_energy", 1.0,
+                               dimensions.energy / dimensions.mass)
         self.unit_registry.add("code_time", 1.0, dimensions.time)
         self.unit_registry.add("code_magnetic", 1.0, dimensions.magnetic_field)
         self.unit_registry.add("code_temperature", 1.0, dimensions.temperature)
@@ -1018,10 +1020,12 @@
             self.mass_unit / (self.length_unit * (self.time_unit)**2))
         temperature_unit = getattr(self, "temperature_unit", 1.0)
         density_unit = getattr(self, "density_unit", self.mass_unit / self.length_unit**3)
+        specific_energy_unit = getattr(self, "specific_energy_unit", vel_unit**2)
         self.unit_registry.modify("code_velocity", vel_unit)
         self.unit_registry.modify("code_temperature", temperature_unit)
         self.unit_registry.modify("code_pressure", pressure_unit)
         self.unit_registry.modify("code_density", density_unit)
+        self.unit_registry.modify("code_specific_energy", specific_energy_unit)
         # domain_width does not yet exist
         if (self.domain_left_edge is not None and
             self.domain_right_edge is not None):

diff -r eccaa23d408f0027881e21ba82f86d44b32aa013 -r 76d8412d2fed58cb0a7ce232849519fe3177c7bb yt/frontends/gadget/data_structures.py
--- a/yt/frontends/gadget/data_structures.py
+++ b/yt/frontends/gadget/data_structures.py
@@ -337,6 +337,14 @@
             vel_unit = self.velocity_unit
         self.time_unit = self.length_unit / vel_unit
 
+        if "specific_energy" in unit_base:
+            specific_energy_unit = unit_base["specific_energy"]
+        else:
+            # Sane default
+            specific_energy_unit = (1, "(km/s) ** 2")
+        specific_energy_unit = _fix_unit_ordering(specific_energy_unit)
+        self.specific_energy_unit = self.quan(*specific_energy_unit)
+
     @staticmethod
     def _validate_header(filename):
         '''

diff -r eccaa23d408f0027881e21ba82f86d44b32aa013 -r 76d8412d2fed58cb0a7ce232849519fe3177c7bb yt/frontends/gizmo/fields.py
--- a/yt/frontends/gizmo/fields.py
+++ b/yt/frontends/gizmo/fields.py
@@ -37,7 +37,7 @@
         ("Velocity", ("code_velocity", ["particle_velocity"], None)),
         ("Velocities", ("code_velocity", ["particle_velocity"], None)),
         ("ParticleIDs", ("", ["particle_index"], None)),
-        ("InternalEnergy", ("code_velocity ** 2", ["thermal_energy"], None)),
+        ("InternalEnergy", ("code_specific_energy", ["thermal_energy"], None)),
         ("SmoothingLength", ("code_length", ["smoothing_length"], None)),
         ("Density", ("code_mass / code_length**3", ["density"], None)),
         ("MaximumTemperature", ("K", [], None)),

diff -r eccaa23d408f0027881e21ba82f86d44b32aa013 -r 76d8412d2fed58cb0a7ce232849519fe3177c7bb yt/frontends/sph/fields.py
--- a/yt/frontends/sph/fields.py
+++ b/yt/frontends/sph/fields.py
@@ -30,7 +30,7 @@
         ("Velocity", ("code_velocity", ["particle_velocity"], None)),
         ("Velocities", ("code_velocity", ["particle_velocity"], None)),
         ("ParticleIDs", ("", ["particle_index"], None)),
-        ("InternalEnergy", ("code_velocity ** 2", ["thermal_energy"], None)),
+        ("InternalEnergy", ("code_specific_energy", ["thermal_energy"], None)),
         ("SmoothingLength", ("code_length", ["smoothing_length"], None)),
         ("Density", ("code_mass / code_length**3", ["density"], None)),
         ("MaximumTemperature", ("K", [], None)),

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list