[yt-svn] commit/yt: 11 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Mon Nov 30 16:41:56 PST 2015


11 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/4ad268b6374b/
Changeset:   4ad268b6374b
Branch:      yt
User:        chummels
Date:        2015-11-11 06:45:32+00:00
Summary:     Adding transverse velocity calculation to LightRay analysis module
Affected #:  1 file

diff -r c2ce64029b634223fc234dd68037d07a19c02687 -r 4ad268b6374b1e4e456ea40a88b3de6980fd5f06 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -369,8 +369,8 @@
         all_fields.extend(['x', 'y', 'z', 'dx', 'dy', 'dz'])
         data_fields.extend(['x', 'y', 'z', 'dx', 'dy', 'dz'])
         if get_los_velocity:
-            all_fields.extend(['velocity_x', 'velocity_y',
-                               'velocity_z', 'velocity_los', 'redshift_eff'])
+            all_fields.extend(['velocity_x', 'velocity_y', 'velocity_z', 
+                               'velocity_los', 'velocity_trans', 'redshift_eff'])
             data_fields.extend(['velocity_x', 'velocity_y', 'velocity_z'])
 
         all_ray_storage = {}
@@ -451,9 +451,11 @@
                                       sub_ray['velocity_y'],
                                       sub_ray['velocity_z']])
                     # line of sight velocity is reversed relative to ray
-                    sub_data['velocity_los'].extend(-1*(np.rollaxis(sub_vel, 1) *
-                                                     line_of_sight).sum(axis=1)[asort])
-                    del sub_vel
+                    sub_vel_los = -1*(np.rollaxis(sub_vel, 1) * line_of_sight).sum(axis=1)
+                    sub_vel_trans = (sub_ray['velocity_magnitude']**2 - sub_vel_los**2)**0.5
+                    sub_data['velocity_los'].extend(sub_vel_los[asort])
+                    sub_data['velocity_trans'].extend(sub_vel_trans[asort])
+                    del sub_vel, sub_vel_los, sub_vel_trans
 
                 sub_ray.clear_data()
                 del sub_ray, asort


https://bitbucket.org/yt_analysis/yt/commits/6516e3a905ab/
Changeset:   6516e3a905ab
Branch:      yt
User:        chummels
Date:        2015-11-11 07:02:08+00:00
Summary:     Adding effect of transverse doppler redshift to LightRay analysis module.
Affected #:  1 file

diff -r 4ad268b6374b1e4e456ea40a88b3de6980fd5f06 -r 6516e3a905ab8c6e5b52972f6d88c385d76f8c88 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -475,22 +475,32 @@
             # doppler redshift.
             
             # first convert los velocities to comoving frame (ie mult. by (1+z)), 
-            # then calculate doppler redshift:
-            # 1 + redshift_dopp = sqrt((1+v/c) / (1-v/c))
+            # then calculate los doppler redshift:
+            # 1 + redshift_dopp_los = sqrt((1+v/c) / (1-v/c))
 
-            # then to add cosmological redshift and doppler redshift, follow
+            # next, convert transverse velocities to comoving frame
+            # then calculate transverse doppler redshift (from time dilation):
+            # 1 + redshift_dopp_trans = 1 / sqrt(1+v**2/c**2)
+
+            # then to add cosmological redshift and doppler redshifts, follow
             # eqn 3.75 in Peacock's Cosmological Physics:
-            # 1 + z_obs = (1 + z_cosmo) * (1 + z_doppler)
+            # 1 + z_obs = (1 + z_cosmo) * (1 + z_doppler_los) * (1 + z_doppler_trans)
             # Alternatively, see eqn 5.49 in Peebles for a similar result.
             if get_los_velocity:
 
                 velocity_los_cm = (1 + sub_data['redshift']) * \
                                   sub_data['velocity_los']
-                redshift_dopp = ((1 + velocity_los_cm / speed_of_light_cgs) /
-                                (1 - velocity_los_cm / speed_of_light_cgs))**(0.5) - 1
-                sub_data['redshift_eff'] = ((1 + redshift_dopp) * \
-                                           (1 + sub_data['redshift'])) - 1
-                del velocity_los_cm, redshift_dopp
+                velocity_trans_cm = (1 + sub_data['redshift']) * \
+                                    sub_data['velocity_trans']
+                redshift_dopp_los = ((1 + velocity_los_cm / speed_of_light_cgs) /
+                                     (1 - velocity_los_cm / speed_of_light_cgs))**0.5 - 1
+                redshift_dopp_trans = (1 / (1 - (velocity_trans_cm**2 / 
+                                                 speed_of_light_cgs**2))**0.5) - 1
+                sub_data['redshift_eff'] = ((1 + redshift_dopp_los) * \
+                                            (1 + redshift_dopp_trans) * \
+                                            (1 + sub_data['redshift'])) - 1
+                del velocity_los_cm, velocity_trans_cm, redshift_dopp_los, \
+                    redshift_dopp_trans
 
             # Remove empty lixels.
             sub_dl_nonzero = sub_data['dl'].nonzero()


https://bitbucket.org/yt_analysis/yt/commits/c2bc2d1afdfa/
Changeset:   c2bc2d1afdfa
Branch:      yt
User:        chummels
Date:        2015-11-17 02:40:26+00:00
Summary:     Making doppler velocity a product of line of sight and transverse velocities in LightRay
Affected #:  1 file

diff -r 6516e3a905ab8c6e5b52972f6d88c385d76f8c88 -r c2bc2d1afdfad8155106a2a540a11397cf2f09e6 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -370,7 +370,8 @@
         data_fields.extend(['x', 'y', 'z', 'dx', 'dy', 'dz'])
         if get_los_velocity:
             all_fields.extend(['velocity_x', 'velocity_y', 'velocity_z', 
-                               'velocity_los', 'velocity_trans', 'redshift_eff'])
+                               'velocity_los', 'velocity_mag', 'theta',
+                               'redshift_eff'])
             data_fields.extend(['velocity_x', 'velocity_y', 'velocity_z'])
 
         all_ray_storage = {}
@@ -445,17 +446,22 @@
                     sub_data[field].extend(sub_ray[field][asort])
 
                 if get_los_velocity:
-                    line_of_sight = sub_segment[1] - sub_segment[0]
+                    line_of_sight = sub_segment[0] - sub_segment[1]
                     line_of_sight /= ((line_of_sight**2).sum())**0.5
                     sub_vel = ds.arr([sub_ray['velocity_x'],
                                       sub_ray['velocity_y'],
                                       sub_ray['velocity_z']])
-                    # line of sight velocity is reversed relative to ray
-                    sub_vel_los = -1*(np.rollaxis(sub_vel, 1) * line_of_sight).sum(axis=1)
-                    sub_vel_trans = (sub_ray['velocity_magnitude']**2 - sub_vel_los**2)**0.5
+                    sub_vel_los = (np.rollaxis(sub_vel, 1) * \
+                                   line_of_sight).sum(axis=1)
                     sub_data['velocity_los'].extend(sub_vel_los[asort])
-                    sub_data['velocity_trans'].extend(sub_vel_trans[asort])
-                    del sub_vel, sub_vel_los, sub_vel_trans
+                    # theta is the angle between the ray vector (i.e. line of 
+                    # sight) and the velocity vectors:
+                    # a dot b = ab cos(theta)
+                    theta = np.arccos(np.dot(line_of_sight, sub_vel) / \
+                                      sub_ray['velocity_magnitude'])
+                    sub_data['theta'].extend(theta[asort])
+                    sub_data['velocity_mag'].extend(sub_ray['velocity_magnitude'][asort])
+                    del sub_vel, sub_vel_los, theta
 
                 sub_ray.clear_data()
                 del sub_ray, asort
@@ -463,7 +469,8 @@
             for key in sub_data:
                 sub_data[key] = ds.arr(sub_data[key]).in_cgs()
 
-            # Get redshift for each lixel.  Assume linear relation between l and z.
+            # Get redshift for each lixel.  Assume linear relation between l 
+            # and z.
             sub_data['dredshift'] = (my_segment['redshift'] - next_redshift) * \
                 (sub_data['dl'] / vector_length(my_segment['start'],
                                                 my_segment['end']).in_cgs())
@@ -474,33 +481,30 @@
             # (redshift_eff) field by combining cosmological redshift and 
             # doppler redshift.
             
-            # first convert los velocities to comoving frame (ie mult. by (1+z)), 
-            # then calculate los doppler redshift:
-            # 1 + redshift_dopp_los = sqrt((1+v/c) / (1-v/c))
+            # first convert the velocity magnitudes to comoving frame 
+            # (ie mult. by (1+z)), then calculate the doppler redshift:
+            # 1 + redshift_dopp = (1 + v*cos(theta)/c) / sqrt(1 - v**2/c**2)
 
-            # next, convert transverse velocities to comoving frame
-            # then calculate transverse doppler redshift (from time dilation):
-            # 1 + redshift_dopp_trans = 1 / sqrt(1+v**2/c**2)
+            # the bulk of the doppler redshift is from line of sight motion,
+            # but there is a small amount from time dilation of transverse
+            # motion, hence the inclusion of theta (the angle between line of
+            # sight and the velocity). See:
+            # https://en.wikipedia.org/wiki/Redshift
 
             # then to add cosmological redshift and doppler redshifts, follow
             # eqn 3.75 in Peacock's Cosmological Physics:
-            # 1 + z_obs = (1 + z_cosmo) * (1 + z_doppler_los) * (1 + z_doppler_trans)
+            # 1 + z_obs = (1 + z_cosmo) * (1 + z_doppler)
             # Alternatively, see eqn 5.49 in Peebles for a similar result.
+
             if get_los_velocity:
-
-                velocity_los_cm = (1 + sub_data['redshift']) * \
-                                  sub_data['velocity_los']
-                velocity_trans_cm = (1 + sub_data['redshift']) * \
-                                    sub_data['velocity_trans']
-                redshift_dopp_los = ((1 + velocity_los_cm / speed_of_light_cgs) /
-                                     (1 - velocity_los_cm / speed_of_light_cgs))**0.5 - 1
-                redshift_dopp_trans = (1 / (1 - (velocity_trans_cm**2 / 
-                                                 speed_of_light_cgs**2))**0.5) - 1
-                sub_data['redshift_eff'] = ((1 + redshift_dopp_los) * \
-                                            (1 + redshift_dopp_trans) * \
+                velocity_mag_cm = (1 + sub_data['redshift']) * \
+                                  sub_data['velocity_mag']
+                redshift_dopp = (1 + velocity_mag_cm * \
+                                 np.cos(sub_data['theta']) / speed_of_light_cgs) / \
+                                 np.sqrt(1 - velocity_mag_cm**2 / speed_of_light_cgs**2) - 1
+                sub_data['redshift_eff'] = ((1 + redshift_dopp) * \
                                             (1 + sub_data['redshift'])) - 1
-                del velocity_los_cm, velocity_trans_cm, redshift_dopp_los, \
-                    redshift_dopp_trans
+                del velocity_mag_cm, redshift_dopp
 
             # Remove empty lixels.
             sub_dl_nonzero = sub_data['dl'].nonzero()


https://bitbucket.org/yt_analysis/yt/commits/07c35755da9f/
Changeset:   07c35755da9f
Branch:      yt
User:        chummels
Date:        2015-11-17 02:45:47+00:00
Summary:     Merging tip.
Affected #:  175 files

diff -r c2bc2d1afdfad8155106a2a540a11397cf2f09e6 -r 07c35755da9f2f684d52698d7711f99e6f1f13ed coding_styleguide.txt
--- /dev/null
+++ b/coding_styleguide.txt
@@ -0,0 +1,101 @@
+Style Guide for Coding in yt
+============================
+
+Coding Style Guide
+------------------
+
+ * In general, follow PEP-8 guidelines.
+   http://www.python.org/dev/peps/pep-0008/
+ * Classes are ``ConjoinedCapitals``, methods and functions are
+   ``lowercase_with_underscores``.
+ * Use 4 spaces, not tabs, to represent indentation.
+ * Line widths should not be more than 80 characters.
+ * Do not use nested classes unless you have a very good reason to, such as
+   requiring a namespace or class-definition modification.  Classes should live
+   at the top level.  ``__metaclass__`` is exempt from this.
+ * Do not use unnecessary parenthesis in conditionals.  ``if((something) and
+   (something_else))`` should be rewritten as
+   ``if something and something_else``. Python is more forgiving than C.
+ * Avoid copying memory when possible. For example, don't do
+   ``a = a.reshape(3,4)`` when ``a.shape = (3,4)`` will do, and ``a = a * 3``
+   should be ``np.multiply(a, 3, a)``.
+ * In general, avoid all double-underscore method names: ``__something`` is
+   usually unnecessary.
+ * When writing a subclass, use the super built-in to access the super class,
+   rather than explicitly. Ex: ``super(SpecialGridSubclass, self).__init__()``
+   rather than ``SpecialGrid.__init__()``.
+ * Docstrings should describe input, output, behavior, and any state changes
+   that occur on an object.  See the file ``doc/docstring_example.txt`` for a
+   fiducial example of a docstring.
+ * Use only one top-level import per line. Unless there is a good reason not to,
+   imports should happen at the top of the file, after the copyright blurb.
+ * Never compare with ``True`` or ``False`` using ``==`` or ``!=``, always use
+   ``is`` or ``is not``.
+ * If you are comparing with a numpy boolean array, just refer to the array.
+   Ex: do ``np.all(array)`` instead of ``np.all(array == True)``.
+ * Never comapre with None using ``==`` or ``!=``, use ``is None`` or
+   ``is not None``.
+ * Use ``statement is not True`` instead of ``not statement is True``
+ * Only one statement per line, do not use semicolons to put two or more
+   statements on a single line.
+ * Only declare local variables if they will be used later. If you do not use the
+   return value of a function, do not store it in a variable.
+ * Add tests for new functionality. When fixing a bug, consider adding a test to
+   prevent the bug from recurring.
+
+API Guide
+---------
+
+ * Do not use ``from some_module import *``
+ * Internally, only import from source files directly -- instead of:
+
+     ``from yt.visualization.api import ProjectionPlot``
+
+   do:
+
+     ``from yt.visualization.plot_window import ProjectionPlot``
+
+ * Import symbols from the module where they are defined, avoid transitive
+   imports.
+ * Import standard library modules, functions, and classes from builtins, do not
+   import them from other yt files.
+ * Numpy is to be imported as ``np``.
+ * Do not use too many keyword arguments.  If you have a lot of keyword
+   arguments, then you are doing too much in ``__init__`` and not enough via
+   parameter setting.
+ * In function arguments, place spaces before commas.  ``def something(a,b,c)``
+   should be ``def something(a, b, c)``.
+ * Don't create a new class to replicate the functionality of an old class --
+   replace the old class.  Too many options makes for a confusing user
+   experience.
+ * Parameter files external to yt are a last resort.
+ * The usage of the ``**kwargs`` construction should be avoided.  If they cannot
+   be avoided, they must be explained, even if they are only to be passed on to
+   a nested function.
+
+Variable Names and Enzo-isms
+----------------------------
+Avoid Enzo-isms.  This includes but is not limited to:
+
+ * Hard-coding parameter names that are the same as those in Enzo.  The
+   following translation table should be of some help.  Note that the
+   parameters are now properties on a ``Dataset`` subclass: you access them
+   like ds.refine_by .
+
+    - ``RefineBy `` => `` refine_by``
+    - ``TopGridRank `` => `` dimensionality``
+    - ``TopGridDimensions `` => `` domain_dimensions``
+    - ``InitialTime `` => `` current_time``
+    - ``DomainLeftEdge `` => `` domain_left_edge``
+    - ``DomainRightEdge `` => `` domain_right_edge``
+    - ``CurrentTimeIdentifier `` => `` unique_identifier``
+    - ``CosmologyCurrentRedshift `` => `` current_redshift``
+    - ``ComovingCoordinates `` => `` cosmological_simulation``
+    - ``CosmologyOmegaMatterNow `` => `` omega_matter``
+    - ``CosmologyOmegaLambdaNow `` => `` omega_lambda``
+    - ``CosmologyHubbleConstantNow `` => `` hubble_constant``
+
+ * Do not assume that the domain runs from 0 .. 1.  This is not true
+   everywhere.
+ * Variable names should be short but descriptive.
+ * No globals!

diff -r c2bc2d1afdfad8155106a2a540a11397cf2f09e6 -r 07c35755da9f2f684d52698d7711f99e6f1f13ed doc/coding_styleguide.txt
--- a/doc/coding_styleguide.txt
+++ /dev/null
@@ -1,80 +0,0 @@
-Style Guide for Coding in yt
-============================
-
-Coding Style Guide
-------------------
-
- * In general, follow PEP-8 guidelines.
-   http://www.python.org/dev/peps/pep-0008/
- * Classes are ConjoinedCapitals, methods and functions are
-   lowercase_with_underscores.
- * Use 4 spaces, not tabs, to represent indentation.
- * Line widths should not be more than 80 characters.
- * Do not use nested classes unless you have a very good reason to, such as
-   requiring a namespace or class-definition modification.  Classes should live
-   at the top level.  __metaclass__ is exempt from this.
- * Do not use unnecessary parenthesis in conditionals.  if((something) and
-   (something_else)) should be rewritten as if something and something_else.
-   Python is more forgiving than C.
- * Avoid copying memory when possible. For example, don't do 
-   "a = a.reshape(3,4)" when "a.shape = (3,4)" will do, and "a = a * 3" should
-   be "np.multiply(a, 3, a)".
- * In general, avoid all double-underscore method names: __something is usually
-   unnecessary.
- * When writing a subclass, use the super built-in to access the super class,
-   rather than explicitly. Ex: "super(SpecialGrid, self).__init__()" rather than
-   "SpecialGrid.__init__()".
- * Doc strings should describe input, output, behavior, and any state changes
-   that occur on an object.  See the file `doc/docstring_example.txt` for a
-   fiducial example of a docstring.
-
-API Guide
----------
-
- * Do not import "*" from anything other than "yt.funcs".
- * Internally, only import from source files directly -- instead of:
-
-   from yt.visualization.api import ProjectionPlot
-
-   do:
-
-   from yt.visualization.plot_window import ProjectionPlot
-
- * Numpy is to be imported as "np", after a long time of using "na".
- * Do not use too many keyword arguments.  If you have a lot of keyword
-   arguments, then you are doing too much in __init__ and not enough via
-   parameter setting.
- * In function arguments, place spaces before commas.  def something(a,b,c)
-   should be def something(a, b, c).
- * Don't create a new class to replicate the functionality of an old class --
-   replace the old class.  Too many options makes for a confusing user
-   experience.
- * Parameter files external to yt are a last resort.
- * The usage of the **kwargs construction should be avoided.  If they cannot
-   be avoided, they must be explained, even if they are only to be passed on to
-   a nested function.
-
-Variable Names and Enzo-isms
-----------------------------
-
- * Avoid Enzo-isms.  This includes but is not limited to:
-   * Hard-coding parameter names that are the same as those in Enzo.  The
-     following translation table should be of some help.  Note that the
-     parameters are now properties on a Dataset subclass: you access them
-     like ds.refine_by .
-     * RefineBy => refine_by
-     * TopGridRank => dimensionality
-     * TopGridDimensions => domain_dimensions
-     * InitialTime => current_time
-     * DomainLeftEdge => domain_left_edge
-     * DomainRightEdge => domain_right_edge
-     * CurrentTimeIdentifier => unique_identifier
-     * CosmologyCurrentRedshift => current_redshift
-     * ComovingCoordinates => cosmological_simulation
-     * CosmologyOmegaMatterNow => omega_matter
-     * CosmologyOmegaLambdaNow => omega_lambda
-     * CosmologyHubbleConstantNow => hubble_constant
-   * Do not assume that the domain runs from 0 .. 1.  This is not true
-     everywhere.
- * Variable names should be short but descriptive.
- * No globals!

diff -r c2bc2d1afdfad8155106a2a540a11397cf2f09e6 -r 07c35755da9f2f684d52698d7711f99e6f1f13ed doc/get_yt.sh
--- a/doc/get_yt.sh
+++ b/doc/get_yt.sh
@@ -314,12 +314,12 @@
 echo
 echo "    export PATH=$DEST_DIR/bin:\$PATH"
 echo
-echo "and on csh-style shells"
+echo "and on csh-style shells:"
 echo
 echo "    setenv PATH $DEST_DIR/bin:\$PATH"
 echo
-echo "You can also the init file appropriate for your shell to include the same"
-echo "command."
+echo "You can also update the init file appropriate for your shell to include"
+echo "the same command."
 echo
 echo "To get started with yt, check out the orientation:"
 echo

diff -r c2bc2d1afdfad8155106a2a540a11397cf2f09e6 -r 07c35755da9f2f684d52698d7711f99e6f1f13ed doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst
--- a/doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst
+++ b/doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst
@@ -59,7 +59,7 @@
   from yt.analysis_modules.halo_finding.api import *
 
   ds = yt.load('Enzo_64/RD0006/RedshiftOutput0006')
-  halo_list = parallelHF(ds)
+  halo_list = HaloFinder(ds)
   halo_list.dump('MyHaloList')
 
 Ellipsoid Parameters

diff -r c2bc2d1afdfad8155106a2a540a11397cf2f09e6 -r 07c35755da9f2f684d52698d7711f99e6f1f13ed doc/source/analyzing/parallel_computation.rst
--- a/doc/source/analyzing/parallel_computation.rst
+++ b/doc/source/analyzing/parallel_computation.rst
@@ -501,11 +501,7 @@
 subtle art in estimating the amount of memory needed for halo finding, but a
 rule of thumb is that the HOP halo finder is the most memory intensive
 (:func:`HaloFinder`), and Friends of Friends (:func:`FOFHaloFinder`) being the
-most memory-conservative.  It has been found that :func:`parallelHF` needs
-roughly 1 MB of memory per 5,000 particles, although recent work has improved
-this and the memory requirement is now smaller than this. But this is a good
-starting point for beginning to calculate the memory required for halo-finding.
-For more information, see :ref:`halo_finding`.
+most memory-conservative. For more information, see :ref:`halo_finding`.
 
 **Volume Rendering**
 

diff -r c2bc2d1afdfad8155106a2a540a11397cf2f09e6 -r 07c35755da9f2f684d52698d7711f99e6f1f13ed doc/source/conf.py
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -67,7 +67,7 @@
 # built documents.
 #
 # The short X.Y version.
-version = '3.3'
+version = '3.3-dev'
 # The full version, including alpha/beta/rc tags.
 release = '3.3-dev'
 

diff -r c2bc2d1afdfad8155106a2a540a11397cf2f09e6 -r 07c35755da9f2f684d52698d7711f99e6f1f13ed doc/source/developing/developing.rst
--- a/doc/source/developing/developing.rst
+++ b/doc/source/developing/developing.rst
@@ -494,80 +494,4 @@
 
 .. _code-style-guide:
 
-Code Style Guide
-----------------
-
-To keep things tidy, we try to stick with a couple simple guidelines.
-
-General Guidelines
-++++++++++++++++++
-
-* In general, follow `PEP-8 <http://www.python.org/dev/peps/pep-0008/>`_ guidelines.
-* Classes are ConjoinedCapitals, methods and functions are
-  ``lowercase_with_underscores.``
-* Use 4 spaces, not tabs, to represent indentation.
-* Line widths should not be more than 80 characters.
-* Do not use nested classes unless you have a very good reason to, such as
-  requiring a namespace or class-definition modification.  Classes should live
-  at the top level.  ``__metaclass__`` is exempt from this.
-* Do not use unnecessary parentheses in conditionals.  ``if((something) and
-  (something_else))`` should be rewritten as ``if something and
-  something_else``.  Python is more forgiving than C.
-* Avoid copying memory when possible. For example, don't do ``a =
-  a.reshape(3,4)`` when ``a.shape = (3,4)`` will do, and ``a = a * 3`` should be
-  ``np.multiply(a, 3, a)``.
-* In general, avoid all double-underscore method names: ``__something`` is
-  usually unnecessary.
-* Doc strings should describe input, output, behavior, and any state changes
-  that occur on an object.  See the file `doc/docstring_example.txt` for a
-  fiducial example of a docstring.
-
-API Guide
-+++++++++
-
-* Do not import "*" from anything other than ``yt.funcs``.
-* Internally, only import from source files directly; instead of: ``from
-  yt.visualization.api import SlicePlot`` do
-  ``from yt.visualization.plot_window import SlicePlot``.
-* Numpy is to be imported as ``np``.
-* Do not use too many keyword arguments.  If you have a lot of keyword
-  arguments, then you are doing too much in ``__init__`` and not enough via
-  parameter setting.
-* In function arguments, place spaces before commas.  ``def something(a,b,c)``
-  should be ``def something(a, b, c)``.
-* Don't create a new class to replicate the functionality of an old class --
-  replace the old class.  Too many options makes for a confusing user
-  experience.
-* Parameter files external to yt are a last resort.
-* The usage of the ``**kwargs`` construction should be avoided.  If they
-  cannot be avoided, they must be explained, even if they are only to be
-  passed on to a nested function.
-* Constructor APIs should be kept as *simple* as possible.
-* Variable names should be short but descriptive.
-* No global variables!
-
-Variable Names and Enzo-isms
-++++++++++++++++++++++++++++
-
-* Avoid Enzo-isms.  This includes but is not limited to:
-
-  + Hard-coding parameter names that are the same as those in Enzo.  The
-    following translation table should be of some help.  Note that the
-    parameters are now properties on a Dataset subclass: you access them
-    like ``ds.refine_by`` .
-
-    - ``RefineBy `` => `` refine_by``
-    - ``TopGridRank `` => `` dimensionality``
-    - ``TopGridDimensions `` => `` domain_dimensions``
-    - ``InitialTime `` => `` current_time``
-    - ``DomainLeftEdge `` => `` domain_left_edge``
-    - ``DomainRightEdge `` => `` domain_right_edge``
-    - ``CurrentTimeIdentifier `` => `` unique_identifier``
-    - ``CosmologyCurrentRedshift `` => `` current_redshift``
-    - ``ComovingCoordinates `` => `` cosmological_simulation``
-    - ``CosmologyOmegaMatterNow `` => `` omega_matter``
-    - ``CosmologyOmegaLambdaNow `` => `` omega_lambda``
-    - ``CosmologyHubbleConstantNow `` => `` hubble_constant``
-
-  + Do not assume that the domain runs from 0 to 1.  This is not true
-    for many codes and datasets.
+.. include:: ../../../coding_styleguide.txt

diff -r c2bc2d1afdfad8155106a2a540a11397cf2f09e6 -r 07c35755da9f2f684d52698d7711f99e6f1f13ed doc/source/visualizing/unstructured_mesh_rendering.rst
--- a/doc/source/visualizing/unstructured_mesh_rendering.rst
+++ b/doc/source/visualizing/unstructured_mesh_rendering.rst
@@ -6,10 +6,10 @@
 Beginning with version 3.3, yt has the ability to volume render unstructured
 meshes from, for example, finite element calculations. In order to use this
 capability, a few additional dependencies are required beyond those you get
-when you run the install script. First, `embree <https://embree.github.io>`
+when you run the install script. First, `embree <https://embree.github.io>`_
 (a fast software ray-tracing library from Intel) must be installed, either
 by compiling from source or by using one of the pre-built binaries available
-at Embree's `downloads <https://embree.github.io/downloads.html>` page. Once
+at Embree's `downloads <https://embree.github.io/downloads.html>`_ page. Once
 Embree is installed, you must also create a symlink next to the library. For
 example, if the libraries were installed at /usr/local/lib/, you must do
 
@@ -18,7 +18,7 @@
     sudo ln -s /usr/local/lib/libembree.2.6.1.dylib /usr/local/lib/libembree.so
 
 Second, the python bindings for embree (called 
-`pyembree <https://github.com/scopatz/pyembree>`) must also be installed. To
+`pyembree <https://github.com/scopatz/pyembree>`_) must also be installed. To
 do so, first obtain a copy, by .e.g. cloning the repo:
 
 .. code-block:: bash

diff -r c2bc2d1afdfad8155106a2a540a11397cf2f09e6 -r 07c35755da9f2f684d52698d7711f99e6f1f13ed scripts/pr_backport.py
--- a/scripts/pr_backport.py
+++ b/scripts/pr_backport.py
@@ -23,7 +23,12 @@
     with hglib.open(dest_repo_path) as client:
         # Changesets that are on the yt branch but aren't topological ancestors
         # of whichever changeset the experimental bookmark is pointing at
-        client.update('heads(branch(yt) - ::bookmark(experimental))')
+        bookmarks, _ = client.bookmarks()
+        bookmark_names = [b[0] for b in bookmarks]
+        if 'experimental' in bookmark_names:
+            client.update('heads(branch(yt) - ::bookmark(experimental))')
+        else:
+            client.update('heads(branch(yt))')
     return dest_repo_path
 
 
@@ -51,9 +56,13 @@
 def get_branch_tip(repo_path, branch, exclude=None):
     """Returns the SHA1 hash of the most recent commit on the given branch"""
     revset = "head() and branch(%s)" % branch
-    if exclude is not None:
-        revset += "and not %s" % exclude
     with hglib.open(repo_path) as client:
+        if exclude is not None:
+            try:
+                client.log(exclude)
+                revset += "and not %s" % exclude
+            except hglib.error.CommandError:
+                pass
         change = client.log(revset)[0][1][:12]
     return change
 

diff -r c2bc2d1afdfad8155106a2a540a11397cf2f09e6 -r 07c35755da9f2f684d52698d7711f99e6f1f13ed setup.cfg
--- a/setup.cfg
+++ b/setup.cfg
@@ -9,7 +9,11 @@
 with-xunit=1
 
 [flake8]
-# if we include api.py files, we get tons of spurious "imported but unused" errors
-exclude = */api.py,*/__config__.py,yt/visualization/_mpl_imports.py
+# we exclude:
+#      api.py and __init__.py files to avoid spurious unused import errors
+#      _mpl_imports.py for the same reason
+#      autogenerated __config__.py files
+#      vendored libraries
+exclude = */api.py,*/__init__.py,*/__config__.py,yt/visualization/_mpl_imports.py,yt/utilities/lodgeit.py,yt/utilities/poster/*,yt/extern/*,yt/mods.py
 max-line-length=999
-ignore = E111,E121,E122,E123,E124,E125,E126,E127,E128,E129,E131,E201,E202,E211,E221,E222,E228,E241,E301,E203,E225,E226,E231,E251,E261,E262,E265,E302,E303,E401,E502,E701,E703,W291,W293,W391
\ No newline at end of file
+ignore = E111,E121,E122,E123,E124,E125,E126,E127,E128,E129,E131,E201,E202,E211,E221,E222,E227,E228,E241,E301,E203,E225,E226,E231,E251,E261,E262,E265,E266,E302,E303,E402,E502,E701,E703,E731,W291,W293,W391,W503
\ No newline at end of file

diff -r c2bc2d1afdfad8155106a2a540a11397cf2f09e6 -r 07c35755da9f2f684d52698d7711f99e6f1f13ed tests/README
--- /dev/null
+++ b/tests/README
@@ -0,0 +1,3 @@
+This directory contains two tiny enzo cosmological datasets. 
+
+They were added a long time ago and are provided for testing purposes.
\ No newline at end of file

diff -r c2bc2d1afdfad8155106a2a540a11397cf2f09e6 -r 07c35755da9f2f684d52698d7711f99e6f1f13ed tests/boolean_regions.py
--- a/tests/boolean_regions.py
+++ /dev/null
@@ -1,18 +0,0 @@
-from yt.utilities.answer_testing.output_tests import \
-    SingleOutputTest, create_test
-from yt.utilities.answer_testing.boolean_region_tests import \
-    TestBooleanANDGridQuantity, TestBooleanORGridQuantity, \
-    TestBooleanNOTGridQuantity, TestBooleanANDParticleQuantity, \
-    TestBooleanORParticleQuantity, TestBooleanNOTParticleQuantity
-
-create_test(TestBooleanANDGridQuantity, "BooleanANDGrid")
-
-create_test(TestBooleanORGridQuantity, "BooleanORGrid")
-
-create_test(TestBooleanNOTGridQuantity, "BooleanNOTGrid")
-
-create_test(TestBooleanANDParticleQuantity, "BooleanANDParticle")
-
-create_test(TestBooleanORParticleQuantity, "BooleanORParticle")
-
-create_test(TestBooleanNOTParticleQuantity, "BooleanNOTParticle")

diff -r c2bc2d1afdfad8155106a2a540a11397cf2f09e6 -r 07c35755da9f2f684d52698d7711f99e6f1f13ed tests/fields_to_test.py
--- a/tests/fields_to_test.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# We want to test several things.  We need to be able to run the
-
-field_list = ["Density", "Temperature", "x-velocity", "y-velocity",
-    "z-velocity",
-    # Now some derived fields
-    "Pressure", "SoundSpeed", "particle_density", "Entropy",
-    # Ghost zones
-    "AveragedDensity", "DivV"]
-
-particle_field_list = ["particle_position_x", "ParticleMassMsun"]

diff -r c2bc2d1afdfad8155106a2a540a11397cf2f09e6 -r 07c35755da9f2f684d52698d7711f99e6f1f13ed tests/halos.py
--- a/tests/halos.py
+++ /dev/null
@@ -1,10 +0,0 @@
-from yt.utilities.answer_testing.output_tests import \
-    SingleOutputTest, create_test
-from yt.utilities.answer_testing.halo_tests import \
-    TestHaloCountHOP, TestHaloCountFOF, TestHaloCountPHOP
-
-create_test(TestHaloCountHOP, "halo_count_HOP", threshold=80.0)
-
-create_test(TestHaloCountFOF, "halo_count_FOF", link=0.2, padding=0.02)
-
-create_test(TestHaloCountPHOP, "halo_count_PHOP", threshold=80.0)

diff -r c2bc2d1afdfad8155106a2a540a11397cf2f09e6 -r 07c35755da9f2f684d52698d7711f99e6f1f13ed tests/hierarchy_consistency.py
--- a/tests/hierarchy_consistency.py
+++ /dev/null
@@ -1,69 +0,0 @@
-import numpy as na
-
-from yt.utilities.answer_testing.output_tests import \
-    YTDatasetTest, RegressionTestException
-from yt.funcs import ensure_list
-
-
-class HierarchyInconsistent(RegressionTestException):
-    pass
-
-
-class HierarchyConsistency(YTDatasetTest):
-    name = "index_consistency"
-
-    def run(self):
-        self.result = \
-            all(g in ensure_list(c.Parent) for g in self.ds.index.grids
-                                            for c in g.Children)
-
-    def compare(self, old_result):
-        if not(old_result and self.result): raise HierarchyInconsistent()
-
-
-class GridLocationsProperties(YTDatasetTest):
-    name = "level_consistency"
-
-    def run(self):
-        self.result = dict(grid_left_edge=self.ds.grid_left_edge,
-                           grid_right_edge=self.ds.grid_right_edge,
-                           grid_levels=self.ds.grid_levels,
-                           grid_particle_count=self.ds.grid_particle_count,
-                           grid_dimensions=self.ds.grid_dimensions)
-
-    def compare(self, old_result):
-        # We allow now difference between these values
-        self.compare_data_arrays(self.result, old_result, 0.0)
-
-
-class GridRelationshipsChanged(RegressionTestException):
-    pass
-
-
-class GridRelationships(YTDatasetTest):
-
-    name = "grid_relationships"
-
-    def run(self):
-        self.result = [[p.id for p in ensure_list(g.Parent) \
-            if g.Parent is not None]
-            for g in self.ds.index.grids]
-
-    def compare(self, old_result):
-        if len(old_result) != len(self.result):
-            raise GridRelationshipsChanged()
-        for plist1, plist2 in zip(old_result, self.result):
-            if len(plist1) != len(plist2): raise GridRelationshipsChanged()
-            if not all((p1 == p2 for p1, p2 in zip(plist1, plist2))):
-                raise GridRelationshipsChanged()
-
-
-class GridGlobalIndices(YTDatasetTest):
-    name = "global_startindex"
-
-    def run(self):
-        self.result = na.array([g.get_global_startindex()
-                                for g in self.ds.index.grids])
-
-    def compare(self, old_result):
-        self.compare_array_delta(old_result, self.result, 0.0)

diff -r c2bc2d1afdfad8155106a2a540a11397cf2f09e6 -r 07c35755da9f2f684d52698d7711f99e6f1f13ed tests/object_field_values.py
--- a/tests/object_field_values.py
+++ /dev/null
@@ -1,193 +0,0 @@
-import hashlib
-import numpy as na
-
-from yt.utilities.answer_testing.output_tests import \
-    YTDatasetTest, RegressionTestException, create_test
-from yt.funcs import ensure_list, iterable
-from fields_to_test import field_list, particle_field_list
-
-
-class FieldHashesDontMatch(RegressionTestException):
-    pass
-
-known_objects = {}
-
-
-def register_object(func):
-    known_objects[func.func_name] = func
-    return func
-
-
- at register_object
-def centered_sphere(tobj):
-    center = 0.5 * (tobj.ds.domain_right_edge + tobj.ds.domain_left_edge)
-    width = (tobj.ds.domain_right_edge - tobj.ds.domain_left_edge).max()
-    tobj.data_object = tobj.ds.sphere(center, width / 0.25)
-
-
- at register_object
-def off_centered_sphere(tobj):
-    center = 0.5 * (tobj.ds.domain_right_edge + tobj.ds.domain_left_edge)
-    width = (tobj.ds.domain_right_edge - tobj.ds.domain_left_edge).max()
-    tobj.data_object = tobj.ds.sphere(center - 0.25 * width, width / 0.25)
-
-
- at register_object
-def corner_sphere(tobj):
-    width = (tobj.ds.domain_right_edge - tobj.ds.domain_left_edge).max()
-    tobj.data_object = tobj.ds.sphere(tobj.ds.domain_left_edge, width / 0.25)
-
-
- at register_object
-def disk(self):
-    center = (self.ds.domain_right_edge + self.ds.domain_left_edge) / 2.
-    radius = (self.ds.domain_right_edge - self.ds.domain_left_edge).max() / 10.
-    height = (self.ds.domain_right_edge - self.ds.domain_left_edge).max() / 10.
-    normal = na.array([1.] * 3)
-    self.data_object = self.ds.disk(center, normal, radius, height)
-
-
- at register_object
-def all_data(self):
-    self.data_object = self.ds.all_data()
-
-_new_known_objects = {}
-for field in ["Density"]:  # field_list:
-    for object_name in known_objects:
-
-        def _rfunc(oname, fname):
-
-            def func(tobj):
-                known_objects[oname](tobj)
-                tobj.orig_data_object = tobj.data_object
-                avg_value = tobj.orig_data_object.quantities[
-                        "WeightedAverageQuantity"](fname, "Density")
-                tobj.data_object = tobj.orig_data_object.cut_region(
-                        ["grid['%s'] > %s" % (fname, avg_value)])
-            return func
-        _new_known_objects["%s_cut_region_%s" % (object_name, field)] = \
-                _rfunc(object_name, field)
-known_objects.update(_new_known_objects)
-
-
-class YTFieldValuesTest(YTDatasetTest):
-
-    def run(self):
-        vals = self.data_object[self.field].copy()
-        vals.sort()
-        self.result = hashlib.sha256(vals.tostring()).hexdigest()
-
-    def compare(self, old_result):
-        if self.result != old_result: raise FieldHashesDontMatch
-
-    def setup(self):
-        YTDatasetTest.setup(self)
-        known_objects[self.object_name](self)
-
-
-class YTExtractIsocontoursTest(YTFieldValuesTest):
-
-    def run(self):
-        val = self.data_object.quantities["WeightedAverageQuantity"](
-            "Density", "Density")
-        rset = self.data_object.extract_isocontours("Density",
-            val, rescale=False, sample_values="Temperature")
-        self.result = rset
-
-    def compare(self, old_result):
-        if self.result[0].size == 0 and old_result[0].size == 0:
-            return True
-        self.compare_array_delta(self.result[0].ravel(),
-                                 old_result[0].ravel(), 1e-7)
-        self.compare_array_delta(self.result[1], old_result[1], 1e-7)
-
-
-class YTIsocontourFluxTest(YTFieldValuesTest):
-
-    def run(self):
-        val = self.data_object.quantities["WeightedAverageQuantity"](
-            "Density", "Density")
-        flux = self.data_object.calculate_isocontour_flux(
-           "Density", val, "x-velocity", "y-velocity", "z-velocity")
-        self.result = flux
-
-    def compare(self, old_result):
-        self.compare_value_delta(self.result, old_result, 1e-7)
-
-for object_name in known_objects:
-    for field in field_list + particle_field_list:
-        if "cut_region" in object_name and field in particle_field_list:
-            continue
-        create_test(YTFieldValuesTest, "%s_%s" % (object_name, field),
-                    field=field, object_name=object_name)
-    create_test(YTExtractIsocontoursTest, "%s" % (object_name),
-                object_name=object_name)
-    create_test(YTIsocontourFluxTest, "%s" % (object_name),
-                object_name=object_name)
-
-
-class YTDerivedQuantityTest(YTDatasetTest):
-
-    def setup(self):
-        YTDatasetTest.setup(self)
-        known_objects[self.object_name](self)
-
-    def compare(self, old_result):
-        if hasattr(self.result, 'tostring'):
-            self.compare_array_delta(self.result, old_result, 1e-7)
-            return
-        elif iterable(self.result):
-            a1 = na.array(self.result)
-            a2 = na.array(old_result)
-            self.compare_array_delta(a1, a2, 1e-7)
-        else:
-            if self.result != old_result: raise FieldHashesDontMatch
-
-    def run(self):
-        # This only works if it takes no arguments
-        self.result = self.data_object.quantities[self.dq_name]()
-
-dq_names = ["TotalMass", "AngularMomentumVector", "CenterOfMass",
-            "BulkVelocity", "BaryonSpinParameter", "ParticleSpinParameter"]
-
-# Extrema, WeightedAverageQuantity, TotalQuantity, MaxLocation,
-# MinLocation
-
-for object_name in known_objects:
-    for dq in dq_names:
-        # Some special exceptions
-        if "cut_region" in object_name and (
-            "SpinParameter" in dq or
-            "TotalMass" in dq):
-            continue
-        create_test(YTDerivedQuantityTest, "%s_%s" % (object_name, dq),
-                    dq_name=dq, object_name=object_name)
-
-
-class YTDerivedQuantityTestField(YTDerivedQuantityTest):
-
-    def run(self):
-        self.result = self.data_object.quantities[self.dq_name](
-            self.field_name)
-
-for object_name in known_objects:
-    for field in field_list:
-        for dq in ["Extrema", "TotalQuantity", "MaxLocation", "MinLocation"]:
-            create_test(YTDerivedQuantityTestField,
-                        "%s_%s" % (object_name, field),
-                        field_name=field, dq_name=dq,
-                        object_name=object_name)
-
-
-class YTDerivedQuantityTest_WeightedAverageQuantity(YTDerivedQuantityTest):
-
-    def run(self):
-        self.result = self.data_object.quantities["WeightedAverageQuantity"](
-            self.field_name, weight="CellMassMsun")
-
-for object_name in known_objects:
-    for field in field_list:
-        create_test(YTDerivedQuantityTest_WeightedAverageQuantity,
-                    "%s_%s" % (object_name, field),
-                    field_name=field,
-                    object_name=object_name)

diff -r c2bc2d1afdfad8155106a2a540a11397cf2f09e6 -r 07c35755da9f2f684d52698d7711f99e6f1f13ed tests/projections.py
--- a/tests/projections.py
+++ /dev/null
@@ -1,37 +0,0 @@
-from yt.utilities.answer_testing.output_tests import \
-    SingleOutputTest, create_test
-from yt.utilities.answer_testing.hydro_tests import \
-    TestProjection, TestOffAxisProjection, TestSlice, \
-    TestRay, TestGasDistribution, Test2DGasDistribution
-
-from fields_to_test import field_list
-
-for field in field_list:
-    create_test(TestRay, "%s" % field, field=field)
-
-for axis in range(3):
-    for field in field_list:
-        create_test(TestSlice, "%s_%s" % (axis, field),
-                    field=field, axis=axis)
-
-for axis in range(3):
-    for field in field_list:
-        create_test(TestProjection, "%s_%s" % (axis, field),
-                    field=field, axis=axis)
-        create_test(TestProjection, "%s_%s_Density" % (axis, field),
-                    field=field, axis=axis, weight_field="Density")
-
-for field in field_list:
-    create_test(TestOffAxisProjection, "%s_%s" % (axis, field),
-                field=field, axis=axis)
-    create_test(TestOffAxisProjection, "%s_%s_Density" % (axis, field),
-                field=field, axis=axis, weight_field="Density")
-
-for field in field_list:
-    if field != "Density":
-        create_test(TestGasDistribution, "density_%s" % field,
-                    field_x="Density", field_y=field)
-    if field not in ("x-velocity", "Density"):
-        create_test(Test2DGasDistribution, "density_x-vel_%s" % field,
-                    field_x="Density", field_y="x-velocity", field_z=field,
-                    weight="CellMassMsun")

diff -r c2bc2d1afdfad8155106a2a540a11397cf2f09e6 -r 07c35755da9f2f684d52698d7711f99e6f1f13ed tests/runall.py
--- a/tests/runall.py
+++ /dev/null
@@ -1,135 +0,0 @@
-import matplotlib
-matplotlib.use('Agg')
-from yt.config import ytcfg
-ytcfg["yt", "loglevel"] = "50"
-ytcfg["yt", "serialize"] = "False"
-
-from yt.utilities.answer_testing.api import \
-    RegressionTestRunner, clear_registry, create_test, \
-    TestFieldStatistics, TestAllProjections, registry_entries, \
-    Xunit
-from yt.utilities.command_line import get_yt_version
-
-from yt.mods import *
-import fnmatch
-import imp
-import optparse
-import itertools
-import time
-
-#
-# We assume all tests are to be run, unless explicitly given the name of a
-# single test or something that can be run through fnmatch.
-#
-# Keep in mind that we use a different nomenclature here than is used in the
-# Enzo testing system.  Our 'tests' are actually tests that are small and that
-# run relatively quickly on a single dataset; in Enzo's system, a 'test'
-# encompasses both the creation and the examination of data.  Here we assume
-# the data is kept constant.
-#
-
-cwd = os.path.dirname(globals().get("__file__", os.getcwd()))
-
-
-def load_tests(iname, idir):
-    f, filename, desc = imp.find_module(iname, [idir])
-    tmod = imp.load_module(iname, f, filename, desc)
-    return tmod
-
-
-def find_and_initialize_tests():
-    mapping = {}
-    for f in glob.glob(os.path.join(cwd, "*.py")):
-        clear_registry()
-        iname = os.path.basename(f[:-3])
-        try:
-            load_tests(iname, cwd)
-            mapping[iname] = registry_entries()
-            #print "Associating %s with" % (iname)
-            #print "\n    ".join(registry_entries())
-        except ImportError:
-            pass
-    return mapping
-
-if __name__ == "__main__":
-    clear_registry()
-    mapping = find_and_initialize_tests()
-    test_storage_directory = ytcfg.get("yt", "test_storage_dir")
-    try:
-        my_hash = get_yt_version()
-    except:
-        my_hash = "UNKNOWN%s" % (time.time())
-    parser = optparse.OptionParser()
-    parser.add_option("-f", "--parameter-file", dest="parameter_file",
-        default=os.path.join(cwd, "DD0010/moving7_0010"),
-        help="The parameter file value to feed to 'load' to test against")
-    parser.add_option("-l", "--list", dest="list_tests", action="store_true",
-        default=False, help="List all tests and then exit")
-    parser.add_option("-t", "--tests", dest="test_pattern", default="*",
-        help="The test name pattern to match.  Can include wildcards.")
-    parser.add_option("-o", "--output", dest="storage_dir",
-        default=test_storage_directory,
-        help="Base directory for storing test output.")
-    parser.add_option("-c", "--compare", dest="compare_name",
-        default=None,
-        help="The name against which we will compare")
-    parser.add_option("-n", "--name", dest="this_name",
-        default=my_hash,
-        help="The name we'll call this set of tests")
-    opts, args = parser.parse_args()
-
-    if opts.list_tests:
-        tests_to_run = []
-        for m, vals in mapping.items():
-            new_tests = fnmatch.filter(vals, opts.test_pattern)
-            if len(new_tests) == 0: continue
-            load_tests(m, cwd)
-            keys = set(registry_entries())
-            tests_to_run += [t for t in new_tests if t in keys]
-        tests = list(set(tests_to_run))
-        print ("\n    ".join(tests))
-        sys.exit(0)
-
-    # Load the test ds and make sure it's good.
-    ds = load(opts.parameter_file)
-    if ds is None:
-        print "Couldn't load the specified parameter file."
-        sys.exit(1)
-
-    # Now we modify our compare name and self name to include the ds.
-    compare_id = opts.compare_name
-    watcher = None
-    if compare_id is not None:
-        compare_id += "_%s_%s" % (ds, ds._hash())
-        watcher = Xunit()
-    this_id = opts.this_name + "_%s_%s" % (ds, ds._hash())
-
-    rtr = RegressionTestRunner(this_id, compare_id,
-                               results_path=opts.storage_dir,
-                               compare_results_path=opts.storage_dir,
-                               io_log=[opts.parameter_file])
-
-    rtr.watcher = watcher
-    tests_to_run = []
-    for m, vals in mapping.items():
-        new_tests = fnmatch.filter(vals, opts.test_pattern)
-
-        if len(new_tests) == 0: continue
-        load_tests(m, cwd)
-        keys = set(registry_entries())
-        tests_to_run += [t for t in new_tests if t in keys]
-    for test_name in sorted(tests_to_run):
-        print "RUNNING TEST", test_name
-        rtr.run_test(test_name)
-    if watcher is not None:
-        rtr.watcher.report()
-    failures = 0
-    passes = 1
-    for test_name, result in sorted(rtr.passed_tests.items()):
-        if not result:
-            print "TEST %s: %s" % (test_name, result)
-            print "    %s" % rtr.test_messages[test_name]
-        if result: passes += 1
-        else: failures += 1
-    print "Number of passes  : %s" % passes
-    print "Number of failures: %s" % failures

diff -r c2bc2d1afdfad8155106a2a540a11397cf2f09e6 -r 07c35755da9f2f684d52698d7711f99e6f1f13ed tests/volume_rendering.py
--- a/tests/volume_rendering.py
+++ /dev/null
@@ -1,42 +0,0 @@
-from yt.mods import *
-import numpy as na
-
-from yt.utilities.answer_testing.output_tests import \
-    YTDatasetTest, RegressionTestException
-from yt.funcs import ensure_list
-
-
-class VolumeRenderingInconsistent(RegressionTestException):
-    pass
-
-
-class VolumeRenderingConsistency(YTDatasetTest):
-    name = "volume_rendering_consistency"
-
-    def run(self):
-        c = (self.ds.domain_right_edge + self.ds.domain_left_edge) / 2.
-        W = na.sqrt(3.) * (self.ds.domain_right_edge - \
-            self.ds.domain_left_edge)
-        N = 512
-        n_contours = 5
-        cmap = 'algae'
-        field = 'Density'
-        mi, ma = self.ds.all_data().quantities['Extrema'](field)[0]
-        mi, ma = na.log10(mi), na.log10(ma)
-        contour_width = (ma - mi) / 100.
-        L = na.array([1.] * 3)
-        tf = ColorTransferFunction((mi - 2, ma + 2))
-        tf.add_layers(n_contours, w=contour_width,
-                      col_bounds=(mi * 1.001, ma * 0.999),
-                      colormap=cmap, alpha=na.logspace(-1, 0, n_contours))
-        cam = self.ds.camera(c, L, W, (N, N), transfer_function=tf,
-            no_ghost=True)
-        image = cam.snapshot()
-        # image = cam.snapshot('test_rendering_%s.png'%field)
-        self.result = image
-
-    def compare(self, old_result):
-        # Compare the deltas; give a leeway of 1e-8
-        delta = na.nanmax(na.abs(self.result - old_result) /
-                                 (self.result + old_result))
-        if delta > 1e-9: raise VolumeRenderingInconsistent()

diff -r c2bc2d1afdfad8155106a2a540a11397cf2f09e6 -r 07c35755da9f2f684d52698d7711f99e6f1f13ed yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -121,7 +121,6 @@
     derived_field
 
 from yt.data_objects.api import \
-    BinnedProfile1D, BinnedProfile2D, BinnedProfile3D, \
     DatasetSeries, ImageArray, \
     particle_filter, add_particle_filter, \
     create_profile, Profile1D, Profile2D, Profile3D, \

diff -r c2bc2d1afdfad8155106a2a540a11397cf2f09e6 -r 07c35755da9f2f684d52698d7711f99e6f1f13ed yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
@@ -116,7 +116,8 @@
 
     def make_spectrum(self, input_file, output_file="spectrum.h5",
                       line_list_file="lines.txt",
-                      use_peculiar_velocity=True, njobs="auto"):
+                      use_peculiar_velocity=True, 
+                      subgrid_resolution=10, njobs="auto"):
         """
         Make spectrum from ray data using the line list.
 
@@ -141,6 +142,17 @@
         use_peculiar_velocity : optional, bool
            if True, include line of sight velocity for shifting lines.
            Default: True
+        subgrid_resolution : optional, int
+           When a line is being added that is unresolved (ie its thermal
+           width is less than the spectral bin width), the voigt profile of
+           the line is deposited into an array of virtual bins at higher
+           resolution.  The optical depth from these virtual bins is integrated
+           and then added to the coarser spectral bin.  The subgrid_resolution
+           value determines the ratio between the thermal width and the 
+           bin width of the virtual bins.  Increasing this value yields smaller
+           virtual bins, which increases accuracy, but is more expensive.
+           A value of 10 yields accuracy to the 4th significant digit.
+           Default: 10
         njobs : optional, int or "auto"
            the number of process groups into which the loop over
            absorption lines will be divided.  If set to -1, each
@@ -182,7 +194,9 @@
             njobs = min(comm.size, len(self.line_list))
 
         self._add_lines_to_spectrum(field_data, use_peculiar_velocity,
-                                    line_list_file is not None, njobs=njobs)
+                                    line_list_file is not None, 
+                                    subgrid_resolution=subgrid_resolution,
+                                    njobs=njobs)
         self._add_continua_to_spectrum(field_data, use_peculiar_velocity)
 
         self.flux_field = np.exp(-self.tau_field)
@@ -204,7 +218,7 @@
         Add continuum features to the spectrum.
         """
         # Only add continuum features down to tau of 1.e-4.
-        tau_min = 1.e-4
+        min_tau = 1.e-3
 
         for continuum in self.continuum_list:
             column_density = field_data[continuum['field_name']] * field_data['dl']
@@ -216,12 +230,12 @@
             this_wavelength = delta_lambda + continuum['wavelength']
             right_index = np.digitize(this_wavelength, self.lambda_bins).clip(0, self.n_lambda)
             left_index = np.digitize((this_wavelength *
-                                     np.power((tau_min * continuum['normalization'] /
+                                     np.power((min_tau * continuum['normalization'] /
                                                column_density), (1. / continuum['index']))),
                                     self.lambda_bins).clip(0, self.n_lambda)
 
             valid_continuua = np.where(((column_density /
-                                         continuum['normalization']) > tau_min) &
+                                         continuum['normalization']) > min_tau) &
                                        (right_index - left_index > 1))[0]
             pbar = get_pbar("Adding continuum feature - %s [%f A]: " % \
                                 (continuum['label'], continuum['wavelength']),
@@ -235,98 +249,155 @@
             pbar.finish()
 
     def _add_lines_to_spectrum(self, field_data, use_peculiar_velocity,
-                               save_line_list, njobs=-1):
+                               save_line_list, subgrid_resolution=10, njobs=-1):
         """
         Add the absorption lines to the spectrum.
         """
-        # Only make voigt profile for slice of spectrum that is 10 times the line width.
-        spectrum_bin_ratio = 5
-        # Widen wavelength window until optical depth reaches a max value at the ends.
-        max_tau = 0.001
+        # Widen wavelength window until optical depth falls below this tau 
+        # value at the ends to assure that the wings of a line have been 
+        # fully resolved.
+        min_tau = 1e-3
 
+        # step through each ionic transition (e.g. HI, HII, MgII) specified
+        # and deposit the lines into the spectrum
         for line in parallel_objects(self.line_list, njobs=njobs):
             column_density = field_data[line['field_name']] * field_data['dl']
+
             # redshift_eff field combines cosmological and velocity redshifts
+            # so delta_lambda gives the offset in angstroms from the rest frame
+            # wavelength to the observed wavelength of the transition 
             if use_peculiar_velocity:
                 delta_lambda = line['wavelength'] * field_data['redshift_eff']
             else:
                 delta_lambda = line['wavelength'] * field_data['redshift']
+            # lambda_obs is central wavelength of line after redshift
+            lambda_obs = line['wavelength'] + delta_lambda
+            # bin index in lambda_bins of central wavelength of line after z
+            center_index = np.digitize(lambda_obs, self.lambda_bins)
+
+            # thermal broadening b parameter
             thermal_b =  np.sqrt((2 * boltzmann_constant_cgs *
                                   field_data['temperature']) /
                                   line['atomic_mass'])
-            center_bins = np.digitize((delta_lambda + line['wavelength']),
-                                      self.lambda_bins)
 
-            # ratio of line width to bin width
-            width_ratio = ((line['wavelength'] + delta_lambda) * \
-                           thermal_b / speed_of_light_cgs / self.bin_width).in_units("").d
+            # the actual thermal width of the lines
+            thermal_width = (lambda_obs * thermal_b / 
+                             speed_of_light_cgs).convert_to_units("angstrom")
 
-            if (width_ratio < 1.0).any():
-                mylog.warn(("%d out of %d line components are unresolved, " +
-                            "consider increasing spectral resolution.") %
-                           ((width_ratio < 1.0).sum(), width_ratio.size))
+            # Sanitize units for faster runtime of the tau_profile machinery.
+            lambda_0 = line['wavelength'].d  # line's rest frame; angstroms
+            lambda_1 = lambda_obs.d # line's observed frame; angstroms
+            cdens = column_density.in_units("cm**-2").d # cm**-2
+            thermb = thermal_b.in_cgs().d  # thermal b coefficient; cm / s
+            dlambda = delta_lambda.d  # lambda offset; angstroms
+            vlos = field_data['velocity_los'].in_units("km/s").d # km/s
 
-            # do voigt profiles for a subset of the full spectrum
-            left_index  = (center_bins -
-                           spectrum_bin_ratio * width_ratio).astype(int).clip(0, self.n_lambda)
-            right_index = (center_bins +
-                           spectrum_bin_ratio * width_ratio).astype(int).clip(0, self.n_lambda)
+            # When we actually deposit the voigt profile, sometimes we will
+            # have underresolved lines (ie lines with smaller widths than
+            # the spectral bin size).  Here, we create virtual bins small
+            # enough in width to well resolve each line, deposit the voigt 
+            # profile into them, then numerically integrate their tau values
+            # and sum them to redeposit them into the actual spectral bins.
 
-            # loop over all lines wider than the bin width
-            valid_lines = np.where((width_ratio >= 1.0) &
-                                   (right_index - left_index > 1))[0]
-            pbar = get_pbar("Adding line - %s [%f A]: " % (line['label'], line['wavelength']),
-                            valid_lines.size)
+            # virtual bins (vbins) will be:
+            # 1) <= the bin_width; assures at least as good as spectral bins
+            # 2) <= 1/10th the thermal width; assures resolving voigt profiles
+            #   (actually 1/subgrid_resolution value, default is 1/10)
+            # 3) a bin width will be divisible by vbin_width times a power of 
+            #    10; this will assure we don't get spikes in the deposited
+            #    spectra from uneven numbers of vbins per bin
+            resolution = thermal_width / self.bin_width 
+            vbin_width = self.bin_width / \
+                         10**(np.ceil(np.log10(subgrid_resolution/resolution)).clip(0, np.inf))
+            vbin_width = vbin_width.in_units('angstrom').d
 
-            # Sanitize units here
-            column_density.convert_to_units("cm ** -2")
-            lbins = self.lambda_bins.d  # Angstroms
-            lambda_0 = line['wavelength'].d  # Angstroms
-            v_doppler = thermal_b.in_cgs().d  # cm / s
-            cdens = column_density.d
-            dlambda = delta_lambda.d  # Angstroms
-            vlos = field_data['velocity_los'].in_units("km/s").d
+            # the virtual window into which the line is deposited initially 
+            # spans a region of 5 thermal_widths, but this may expand
+            n_vbins = np.ceil(5*thermal_width.d/vbin_width)
+            vbin_window_width = n_vbins*vbin_width
 
-            for i, lixel in parallel_objects(enumerate(valid_lines), njobs=-1):
-                my_bin_ratio = spectrum_bin_ratio
+            if (thermal_width < self.bin_width).any():
+                mylog.info(("%d out of %d line components will be " + \
+                            "deposited as unresolved lines.") %
+                           ((thermal_width < self.bin_width).sum(), 
+                            thermal_width.size))
+
+            valid_lines = np.arange(len(thermal_width))
+            pbar = get_pbar("Adding line - %s [%f A]: " % \
+                            (line['label'], line['wavelength']),
+                            thermal_width.size)
+
+            # for a given transition, step through each location in the 
+            # observed spectrum where it occurs and deposit a voigt profile
+            for i in parallel_objects(valid_lines, njobs=-1):
+                my_vbin_window_width = vbin_window_width[i]
+                my_n_vbins = n_vbins[i]
+                my_vbin_width = vbin_width[i]
 
                 while True:
-                    lambda_bins, line_tau = \
+                    vbins = \
+                        np.linspace(lambda_1[i]-my_vbin_window_width/2.,
+                                    lambda_1[i]+my_vbin_window_width/2., 
+                                    my_n_vbins, endpoint=False)
+
+                    vbins, vtau = \
                         tau_profile(
-                            lambda_0, line['f_value'], line['gamma'], v_doppler[lixel],
-                            cdens[lixel], delta_lambda=dlambda[lixel],
-                            lambda_bins=lbins[left_index[lixel]:right_index[lixel]])
+                            lambda_0, line['f_value'], line['gamma'], thermb[i],
+                            cdens[i], delta_lambda=dlambda[i],
+                            lambda_bins=vbins)
 
-                    # Widen wavelength window until optical depth reaches a max value at the ends.
-                    if (line_tau[0] < max_tau and line_tau[-1] < max_tau) or \
-                      (left_index[lixel] <= 0 and right_index[lixel] >= self.n_lambda):
+                    # If tau has not dropped below min tau threshold by the
+                    # edges (ie the wings), then widen the wavelength 
+                    # window and repeat process. 
+                    if (vtau[0] < min_tau and vtau[-1] < min_tau):
                         break
-                    my_bin_ratio *= 2
-                    left_index[lixel]  = (center_bins[lixel] -
-                                          my_bin_ratio *
-                                          width_ratio[lixel]).astype(int).clip(0, self.n_lambda)
-                    right_index[lixel] = (center_bins[lixel] +
-                                          my_bin_ratio *
-                                          width_ratio[lixel]).astype(int).clip(0, self.n_lambda)
+                    my_vbin_window_width *= 2
+                    my_n_vbins *= 2
 
-                self.tau_field[left_index[lixel]:right_index[lixel]] += line_tau
+                # identify the extrema of the vbin_window so as to speed
+                # up searching over the entire lambda_bins array
+                bins_from_center = np.ceil((my_vbin_window_width/2.) / \
+                                           self.bin_width.d) + 1
+                left_index = (center_index[i] - bins_from_center).clip(0, self.n_lambda)
+                right_index = (center_index[i] + bins_from_center).clip(0, self.n_lambda)
+                window_width = right_index - left_index
+
+                # run digitize to identify which vbins are deposited into which
+                # global lambda bins.
+                # shift global lambda bins over by half a bin width; 
+                # this has the effect of assuring np.digitize will place 
+                # the vbins in the closest bin center.
+                binned = np.digitize(vbins, 
+                                     self.lambda_bins[left_index:right_index] \
+                                     + (0.5 * self.bin_width))
+
+                # numerically integrate the virtual bins to calculate a
+                # virtual equivalent width; then sum the virtual equivalent
+                # widths and deposit into each spectral bin
+                vEW = vtau * my_vbin_width
+                EW = [vEW[binned == j].sum() for j in np.arange(window_width)]
+                EW = np.array(EW)/self.bin_width.d
+                self.tau_field[left_index:right_index] += EW
+
                 if save_line_list and line['label_threshold'] is not None and \
-                        cdens[lixel] >= line['label_threshold']:
+                        cdens[i] >= line['label_threshold']:
                     if use_peculiar_velocity:
-                        peculiar_velocity = vlos[lixel]
+                        peculiar_velocity = vlos[i]
                     else:
                         peculiar_velocity = 0.0
                     self.spectrum_line_list.append({'label': line['label'],
-                                                    'wavelength': (lambda_0 + dlambda[lixel]),
-                                                    'column_density': column_density[lixel],
-                                                    'b_thermal': thermal_b[lixel],
-                                                    'redshift': field_data['redshift'][lixel],
+                                                    'wavelength': (lambda_0 + dlambda[i]),
+                                                    'column_density': column_density[i],
+                                                    'b_thermal': thermal_b[i],
+                                                    'redshift': field_data['redshift'][i],
                                                     'v_pec': peculiar_velocity})
                 pbar.update(i)
             pbar.finish()
 
-            del column_density, delta_lambda, thermal_b, \
-                center_bins, width_ratio, left_index, right_index
+            del column_density, delta_lambda, lambda_obs, center_index, \
+                thermal_b, thermal_width, lambda_1, cdens, thermb, dlambda, \
+                vlos, resolution, vbin_width, n_vbins, vbin_window_width, \
+                valid_lines, vbins, vtau, vEW
 
         comm = _get_comm(())
         self.tau_field = comm.mpi_allreduce(self.tau_field, op="sum")

diff -r c2bc2d1afdfad8155106a2a540a11397cf2f09e6 -r 07c35755da9f2f684d52698d7711f99e6f1f13ed yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
@@ -281,8 +281,6 @@
         errSq=sum(dif**2)
 
         if any(linesP[:,1]==speciesDict['init_b']):
-         #   linesP = prevLinesP
-
             flag = True
             break
             

diff -r c2bc2d1afdfad8155106a2a540a11397cf2f09e6 -r 07c35755da9f2f684d52698d7711f99e6f1f13ed yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py
@@ -12,24 +12,33 @@
 
 import numpy as np
 from yt.testing import \
-    assert_allclose_units, requires_file, requires_module
+    assert_allclose_units, requires_file, requires_module, \
+    assert_almost_equal, assert_array_almost_equal
 from yt.analysis_modules.absorption_spectrum.absorption_line import \
     voigt_old, voigt_scipy
 from yt.analysis_modules.absorption_spectrum.api import AbsorptionSpectrum
 from yt.analysis_modules.cosmological_observation.api import LightRay
+from yt.config import ytcfg
 import tempfile
 import os
 import shutil
+from yt.utilities.on_demand_imports import \
+    _h5py as h5
+
+test_dir = ytcfg.get("yt", "test_data_dir")
 
 COSMO_PLUS = "enzo_cosmology_plus/AMRCosmology.enzo"
-
+COSMO_PLUS_SINGLE = "enzo_cosmology_plus/RD0009/RD0009"
+HI_SPECTRUM_COSMO = "absorption_spectrum_data/enzo_lyman_alpha_cosmo_spec.h5"
+HI_SPECTRUM_COSMO_FILE = os.path.join(test_dir, HI_SPECTRUM_COSMO)
+HI_SPECTRUM = "absorption_spectrum_data/enzo_lyman_alpha_spec.h5"
+HI_SPECTRUM_FILE = os.path.join(test_dir, HI_SPECTRUM)
 
 @requires_file(COSMO_PLUS)
-def test_absorption_spectrum():
+ at requires_file(HI_SPECTRUM_COSMO)
+def test_absorption_spectrum_cosmo():
     """
-    This test is simply following the description in the docs for how to
-    generate an absorption spectrum from a cosmological light ray for one
-    of the sample datasets
+    This test generates an absorption spectrum from a cosmological light ray
     """
 
     # Set up in a temp dir
@@ -37,7 +46,7 @@
     curdir = os.getcwd()
     os.chdir(tmpdir)
 
-    lr = LightRay(COSMO_PLUS, 'Enzo', 0.0, 0.1)
+    lr = LightRay(COSMO_PLUS, 'Enzo', 0.0, 0.03)
 
     lr.make_light_ray(seed=1234567,
                       fields=['temperature', 'density', 'H_number_density'],
@@ -65,22 +74,30 @@
     sp.add_continuum(my_label, field, wavelength, normalization, index)
 
     wavelength, flux = sp.make_spectrum('lightray.h5',
-                                        output_file='spectrum.txt',
+                                        output_file='spectrum.h5',
                                         line_list_file='lines.txt',
                                         use_peculiar_velocity=True)
 
+    # load just-generated hdf5 file of spectral data (for consistency)
+    f_new = h5.File('spectrum.h5', 'r')
+
+    # load standard data for comparison
+    f_old = h5.File(HI_SPECTRUM_COSMO_FILE, 'r')
+
+    # compare between standard data and current data for each array saved 
+    # (wavelength, flux, tau)
+    for key in f_old.keys():
+        assert_array_almost_equal(f_new[key].value, f_old[key].value, 10)
+
     # clean up
     os.chdir(curdir)
     shutil.rmtree(tmpdir)
 
-
- at requires_file(COSMO_PLUS)
- at requires_module("astropy")
-def test_absorption_spectrum_fits():
+ at requires_file(COSMO_PLUS_SINGLE)
+ at requires_file(HI_SPECTRUM)
+def test_absorption_spectrum_non_cosmo():
     """
-    This test is simply following the description in the docs for how to
-    generate an absorption spectrum from a cosmological light ray for one
-    of the sample datasets.  Outputs to fits file if astropy is installed.
+    This test generates an absorption spectrum from a non-cosmological light ray
     """
 
     # Set up in a temp dir
@@ -88,11 +105,114 @@
     curdir = os.getcwd()
     os.chdir(tmpdir)
 
-    lr = LightRay(COSMO_PLUS, 'Enzo', 0.0, 0.1)
+    lr = LightRay(COSMO_PLUS_SINGLE)
 
-    lr.make_light_ray(seed=1234567,
+    ray_start = [0,0,0]
+    ray_end = [1,1,1]
+    lr.make_light_ray(start_position=ray_start, end_position=ray_end,
                       fields=['temperature', 'density', 'H_number_density'],
-                      get_los_velocity=True,
+                      data_filename='lightray.h5')
+
+    sp = AbsorptionSpectrum(1200.0, 1300.0, 10001)
+
+    my_label = 'HI Lya'
+    field = 'H_number_density'
+    wavelength = 1215.6700  # Angstromss
+    f_value = 4.164E-01
+    gamma = 6.265e+08
+    mass = 1.00794
+
+    sp.add_line(my_label, field, wavelength, f_value,
+                gamma, mass, label_threshold=1.e10)
+
+    wavelength, flux = sp.make_spectrum('lightray.h5',
+                                        output_file='spectrum.h5',
+                                        line_list_file='lines.txt',
+                                        use_peculiar_velocity=True)
+
+    # load just-generated hdf5 file of spectral data (for consistency)
+    f_new = h5.File('spectrum.h5', 'r')
+
+    # load standard data for comparison
+    f_old = h5.File(HI_SPECTRUM_FILE, 'r')
+
+    # compare between standard data and current data for each array saved 
+    # (wavelength, flux, tau)
+    for key in f_old.keys():
+        assert_array_almost_equal(f_new[key].value, f_old[key].value, 10)
+
+    # clean up
+    os.chdir(curdir)
+    shutil.rmtree(tmpdir)
+
+ at requires_file(COSMO_PLUS_SINGLE)
+def test_equivalent_width_conserved():
+    """
+    This tests that the equivalent width of the optical depth is conserved 
+    regardless of the bin width employed in wavelength space.
+    Unresolved lines should still deposit optical depth into the spectrum.
+    """
+
+    # Set up in a temp dir
+    tmpdir = tempfile.mkdtemp()
+    curdir = os.getcwd()
+    os.chdir(tmpdir)
+
+    lr = LightRay(COSMO_PLUS_SINGLE)
+
+    ray_start = [0,0,0]
+    ray_end = [1,1,1]
+    lr.make_light_ray(start_position=ray_start, end_position=ray_end,
+                      fields=['temperature', 'density', 'H_number_density'],
+                      data_filename='lightray.h5')
+
+    my_label = 'HI Lya'
+    field = 'H_number_density'
+    wave = 1215.6700  # Angstromss
+    f_value = 4.164E-01
+    gamma = 6.265e+08
+    mass = 1.00794
+
+    lambda_min= 1200
+    lambda_max= 1300
+    lambda_bin_widths = [1e-3, 1e-2, 1e-1, 1e0, 1e1]
+    total_tau = []
+
+    for lambda_bin_width in lambda_bin_widths:
+        n_lambda = ((lambda_max - lambda_min)/ lambda_bin_width) + 1
+        sp = AbsorptionSpectrum(lambda_min=lambda_min, lambda_max=lambda_max, 
+                                n_lambda=n_lambda)
+        sp.add_line(my_label, field, wave, f_value, gamma, mass)
+        wavelength, flux = sp.make_spectrum('lightray.h5')
+        total_tau.append((lambda_bin_width * sp.tau_field).sum())
+        
+    # assure that the total tau values are all within 1e-5 of each other
+    for tau in total_tau:
+        assert_almost_equal(tau, total_tau[0], 5)
+
+    # clean up
+    os.chdir(curdir)
+    shutil.rmtree(tmpdir)
+
+
+ at requires_file(COSMO_PLUS_SINGLE)
+ at requires_module("astropy")
+def test_absorption_spectrum_fits():
+    """
+    This test generates an absorption spectrum and saves it as a fits file.
+    """
+
+    # Set up in a temp dir
+    tmpdir = tempfile.mkdtemp()
+    curdir = os.getcwd()
+    os.chdir(tmpdir)
+
+    lr = LightRay(COSMO_PLUS_SINGLE)
+
+    ray_start = [0,0,0]
+    ray_end = [1,1,1]
+    lr.make_light_ray(start_position=ray_start, end_position=ray_end,
+                      fields=['temperature', 'density', 'H_number_density'],
                       data_filename='lightray.h5')
 
     sp = AbsorptionSpectrum(900.0, 1800.0, 10000)

diff -r c2bc2d1afdfad8155106a2a540a11397cf2f09e6 -r 07c35755da9f2f684d52698d7711f99e6f1f13ed yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -64,12 +64,12 @@
         Default: None
     near_redshift : optional, float
         The near (lowest) redshift for a light ray containing multiple
-        datasets.  Do not use is making a light ray from a single
+        datasets.  Do not use if making a light ray from a single
         dataset.
         Default: None
     far_redshift : optional, float
         The far (highest) redshift for a light ray containing multiple
-        datasets.  Do not use is making a light ray from a single
+        datasets.  Do not use if making a light ray from a single
         dataset.
         Default: None
     use_minimum_datasets : optional, bool
@@ -168,9 +168,9 @@
 
         # If using only one dataset, set start and stop manually.
         if start_position is not None:
-            if len(self.light_ray_solution) > 1:
-                raise RuntimeError("LightRay Error: cannot specify start_position " + \
-                                   "if light ray uses more than one dataset.")
+            if self.near_redshift is not None or self.far_redshift is not None:
+                raise RuntimeError("LightRay Error: cannot specify both " + \
+                                   "start_position and a redshift range.")
             if not ((end_position is None) ^ (trajectory is None)):
                 raise RuntimeError("LightRay Error: must specify either end_position " + \
                                    "or trajectory, but not both.")

diff -r c2bc2d1afdfad8155106a2a540a11397cf2f09e6 -r 07c35755da9f2f684d52698d7711f99e6f1f13ed yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -30,10 +30,10 @@
     get_rotation_matrix, \
     periodic_dist
 from yt.utilities.physical_constants import \
-    mass_sun_cgs, \
+    mass_sun_cgs
+from yt.utilities.physical_ratios import \
+    rho_crit_g_cm3_h2, \
     TINY
-from yt.utilities.physical_ratios import \
-    rho_crit_g_cm3_h2
 
 from .hop.EnzoHop import RunHOP
 from .fof.EnzoFOF import RunFOF

diff -r c2bc2d1afdfad8155106a2a540a11397cf2f09e6 -r 07c35755da9f2f684d52698d7711f99e6f1f13ed yt/analysis_modules/photon_simulator/tests/test_beta_model.py
--- a/yt/analysis_modules/photon_simulator/tests/test_beta_model.py
+++ b/yt/analysis_modules/photon_simulator/tests/test_beta_model.py
@@ -14,9 +14,7 @@
     XSpecThermalModel, XSpecAbsorbModel, \
     ThermalPhotonModel, PhotonList
 from yt.config import ytcfg
-from yt.utilities.answer_testing.framework import \
-    requires_module
-from yt.testing import requires_file
+from yt.testing import requires_file, requires_module
 import numpy as np
 from yt.utilities.physical_ratios import \
     K_per_keV, mass_hydrogen_grams
@@ -43,7 +41,7 @@
 @requires_file(rmf)
 def test_beta_model():
     import xspec
-    
+
     xspec.Fit.statMethod = "cstat"
     xspec.Xset.addModelString("APECTHERMAL","yes")
     xspec.Fit.query = "yes"
@@ -119,7 +117,7 @@
     norm_sim = float(norm_sim.in_cgs())
 
     events = photons.project_photons("z", responses=[arf,rmf],
-                                     absorb_model=abs_model, 
+                                     absorb_model=abs_model,
                                      convolve_energies=True, prng=my_prng)
     events.write_spectrum("beta_model_evt.pi", clobber=True)
 
@@ -143,7 +141,7 @@
     xspec.Fit.renorm()
     xspec.Fit.nIterations = 100
     xspec.Fit.perform()
-    
+
     kT  = m.bapec.kT.values[0]
     mu = (m.bapec.Redshift.values[0]-redshift)*ckms
     Z = m.bapec.Abundanc.values[0]
@@ -156,10 +154,8 @@
     dsigma = m.bapec.Velocity.sigma
     dnorm = m.bapec.norm.sigma
 
-    print kT, kT_sim, dkT
-
     assert np.abs(mu-mu_sim) < dmu
-    assert np.abs(kT-kT_sim) < dkT    
+    assert np.abs(kT-kT_sim) < dkT
     assert np.abs(Z-Z_sim) < dZ
     assert np.abs(sigma-sigma_sim) < dsigma
     assert np.abs(norm-norm_sim) < dnorm

diff -r c2bc2d1afdfad8155106a2a540a11397cf2f09e6 -r 07c35755da9f2f684d52698d7711f99e6f1f13ed yt/analysis_modules/photon_simulator/tests/test_sloshing.py
--- a/yt/analysis_modules/photon_simulator/tests/test_sloshing.py
+++ b/yt/analysis_modules/photon_simulator/tests/test_sloshing.py
@@ -17,7 +17,6 @@
 from yt.testing import requires_file
 from yt.utilities.answer_testing.framework import requires_ds, \
     GenericArrayTest, data_dir_load
-import numpy as np
 from numpy.random import RandomState
 import os
 

diff -r c2bc2d1afdfad8155106a2a540a11397cf2f09e6 -r 07c35755da9f2f684d52698d7711f99e6f1f13ed yt/analysis_modules/photon_simulator/tests/test_spectra.py
--- a/yt/analysis_modules/photon_simulator/tests/test_spectra.py
+++ b/yt/analysis_modules/photon_simulator/tests/test_spectra.py
@@ -1,9 +1,8 @@
 from yt.analysis_modules.photon_simulator.api import \
     TableApecModel, XSpecThermalModel
-import numpy as np
 from yt.testing import requires_module, fake_random_ds
 from yt.utilities.answer_testing.framework import \
-    GenericArrayTest, data_dir_load
+    GenericArrayTest
 from yt.config import ytcfg
 
 def setup():

diff -r c2bc2d1afdfad8155106a2a540a11397cf2f09e6 -r 07c35755da9f2f684d52698d7711f99e6f1f13ed yt/analysis_modules/sunyaev_zeldovich/projection.py
--- a/yt/analysis_modules/sunyaev_zeldovich/projection.py
+++ b/yt/analysis_modules/sunyaev_zeldovich/projection.py
@@ -19,11 +19,11 @@
 #-----------------------------------------------------------------------------
 
 from yt.utilities.physical_constants import sigma_thompson, clight, hcgs, kboltz, mh, Tcmb
-from yt.units.yt_array import YTQuantity
-from yt.funcs import fix_axis, mylog, iterable, get_pbar
-from yt.visualization.volume_rendering.api import off_axis_projection
+from yt.funcs import fix_axis, mylog, get_pbar
+from yt.visualization.volume_rendering.off_axis_projection import \
+    off_axis_projection
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
-     communication_system, parallel_root_only
+    communication_system, parallel_root_only
 from yt import units
 from yt.utilities.on_demand_imports import _astropy
 

diff -r c2bc2d1afdfad8155106a2a540a11397cf2f09e6 -r 07c35755da9f2f684d52698d7711f99e6f1f13ed yt/analysis_modules/sunyaev_zeldovich/tests/test_projection.py
--- a/yt/analysis_modules/sunyaev_zeldovich/tests/test_projection.py
+++ b/yt/analysis_modules/sunyaev_zeldovich/tests/test_projection.py
@@ -12,8 +12,17 @@
 
 from yt.frontends.stream.api import load_uniform_grid
 from yt.funcs import get_pbar
-from yt.utilities.physical_constants import cm_per_kpc, K_per_keV, \
-    mh, cm_per_km, kboltz, Tcmb, hcgs, clight, sigma_thompson
+from yt.utilities.physical_ratios import \
+    cm_per_kpc, \
+    K_per_keV, \
+    cm_per_km
+from yt.utilities.physical_constants import \
+    mh, \
+    kboltz, \
+    Tcmb, \
+    hcgs, \
+    clight, \
+    sigma_thompson
 from yt.testing import requires_module, assert_almost_equal
 from yt.utilities.answer_testing.framework import requires_ds, \
     GenericArrayTest, data_dir_load, GenericImageTest

diff -r c2bc2d1afdfad8155106a2a540a11397cf2f09e6 -r 07c35755da9f2f684d52698d7711f99e6f1f13ed yt/analysis_modules/two_point_functions/two_point_functions.py
--- a/yt/analysis_modules/two_point_functions/two_point_functions.py
+++ b/yt/analysis_modules/two_point_functions/two_point_functions.py
@@ -26,7 +26,9 @@
 except ImportError:
     mylog.debug("The Fortran kD-Tree did not import correctly.")
 
-import math, inspect, time
+import math
+import inspect
+import time
 from collections import defaultdict
 
 sep = 12

diff -r c2bc2d1afdfad8155106a2a540a11397cf2f09e6 -r 07c35755da9f2f684d52698d7711f99e6f1f13ed yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -16,7 +16,6 @@
 #-----------------------------------------------------------------------------
 
 import os
-import types
 from yt.extern.six.moves import configparser
 
 ytcfg_defaults = dict(

diff -r c2bc2d1afdfad8155106a2a540a11397cf2f09e6 -r 07c35755da9f2f684d52698d7711f99e6f1f13ed yt/convenience.py
--- a/yt/convenience.py
+++ b/yt/convenience.py
@@ -13,16 +13,19 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import os, os.path, types
+import os
 
 # Named imports
 from yt.extern.six import string_types
-from yt.funcs import *
 from yt.config import ytcfg
+from yt.funcs import mylog
 from yt.utilities.parameter_file_storage import \
     output_type_registry, \
     simulation_time_series_registry, \
     EnzoRunDatabase
+from yt.utilities.exceptions import \
+    YTOutputNotIdentified, \
+    YTSimulationNotIdentified
 from yt.utilities.hierarchy_inspection import find_lowest_subclasses
 
 def load(*args ,**kwargs):

diff -r c2bc2d1afdfad8155106a2a540a11397cf2f09e6 -r 07c35755da9f2f684d52698d7711f99e6f1f13ed yt/data_objects/analyzer_objects.py
--- a/yt/data_objects/analyzer_objects.py
+++ b/yt/data_objects/analyzer_objects.py
@@ -15,7 +15,6 @@
 
 import inspect
 
-from yt.funcs import *
 from yt.extern.six import add_metaclass
 
 analysis_task_registry = {}
@@ -23,7 +22,7 @@
 class RegisteredTask(type):
     def __init__(cls, name, b, d):
         type.__init__(cls, name, b, d)
-        if hasattr(cls, "skip") and cls.skip == False:
+        if hasattr(cls, "skip") and cls.skip is False:
             return
         analysis_task_registry[cls.__name__] = cls
 

diff -r c2bc2d1afdfad8155106a2a540a11397cf2f09e6 -r 07c35755da9f2f684d52698d7711f99e6f1f13ed yt/data_objects/api.py
--- a/yt/data_objects/api.py
+++ b/yt/data_objects/api.py
@@ -27,11 +27,6 @@
     particle_handler_registry
 
 from .profiles import \
-    YTEmptyProfileData, \
-    BinnedProfile, \
-    BinnedProfile1D, \
-    BinnedProfile2D, \
-    BinnedProfile3D, \
     create_profile, \
     Profile1D, \
     Profile2D, \

diff -r c2bc2d1afdfad8155106a2a540a11397cf2f09e6 -r 07c35755da9f2f684d52698d7711f99e6f1f13ed yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -15,21 +15,29 @@
 #-----------------------------------------------------------------------------
 
 import numpy as np
-import math
-import weakref
-import itertools
-import shelve
 from functools import wraps
 import fileinput
 from re import finditer
+from tempfile import TemporaryFile
 import os
+import zipfile
 
 from yt.config import ytcfg
-from yt.funcs import *
-from yt.utilities.logger import ytLogger
-from .data_containers import \
-    YTSelectionContainer1D, YTSelectionContainer2D, YTSelectionContainer3D, \
-    restore_field_information_state, YTFieldData
+from yt.data_objects.data_containers import \
+    YTSelectionContainer1D, \
+    YTSelectionContainer2D, \
+    YTSelectionContainer3D, \
+    YTFieldData
+from yt.funcs import \
+    ensure_list, \
+    mylog, \
+    get_memory_usage, \
+    iterable, \
+    only_on_root
+from yt.utilities.exceptions import \
+    YTParticleDepositionNotImplemented, \
+    YTNoAPIKey, \
+    YTTooManyVertices
 from yt.utilities.lib.QuadTree import \
     QuadTree
 from yt.utilities.lib.Interpolators import \
@@ -38,8 +46,6 @@
     fill_region
 from yt.utilities.lib.marching_cubes import \
     march_cubes_grid, march_cubes_grid_flux
-from yt.utilities.data_point_utilities import CombineGrids,\
-    DataCubeRefine, DataCubeReplace, FillRegion, FillBuffer
 from yt.utilities.minimal_representation import \
     MinimalProjectionData
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
@@ -47,16 +53,10 @@
 from yt.units.unit_object import Unit
 import yt.geometry.particle_deposit as particle_deposit
 from yt.utilities.grid_data_format.writer import write_to_gdf
+from yt.fields.field_exceptions import \
+    NeedsOriginalGrid
 from yt.frontends.stream.api import load_uniform_grid
 
-from yt.fields.field_exceptions import \
-    NeedsGridType,\
-    NeedsOriginalGrid,\
-    NeedsDataField,\
-    NeedsProperty,\
-    NeedsParameter
-from yt.fields.derived_field import \
-    TranslationFunc
 
 class YTStreamline(YTSelectionContainer1D):
     """
@@ -369,14 +369,13 @@
         data['pdy'] = self.ds.arr(pdy, code_length)
         data['fields'] = nvals
         # Now we run the finalizer, which is ignored if we don't need it
-        fd = data['fields']
         field_data = np.hsplit(data.pop('fields'), len(fields))
         for fi, field in enumerate(fields):
-            finfo = self.ds._get_field_info(*field)
             mylog.debug("Setting field %s", field)
             input_units = self._projected_units[field]
             self[field] = self.ds.arr(field_data[fi].ravel(), input_units)
-        for i in list(data.keys()): self[i] = data.pop(i)
+        for i in list(data.keys()):
+            self[i] = data.pop(i)
         mylog.info("Projection completed")
         self.tree = tree
 
@@ -939,7 +938,6 @@
         ls.current_level += 1
         ls.current_dx = ls.base_dx / \
             self.ds.relative_refinement(0, ls.current_level)
-        LL = ls.left_edge - ls.domain_left_edge
         ls.old_global_startindex = ls.global_startindex
         ls.global_startindex, end_index, ls.current_dims = \
             self._minimal_box(ls.current_dx)
@@ -1509,11 +1507,8 @@
                     color_log = True, emit_log = True, plot_index = None,
                     color_field_max = None, color_field_min = None,
                     emit_field_max = None, emit_field_min = None):
-        import io
-        from sys import version
         if plot_index is None:
             plot_index = 0
-            vmax=0
         ftype = [("cind", "uint8"), ("emit", "float")]
         vtype = [("x","float"),("y","float"), ("z","float")]
         #(0) formulate vertices
@@ -1552,7 +1547,7 @@
                 tmp = self.vertices[i,:]
                 np.divide(tmp, dist_fac, tmp)
                 v[ax][:] = tmp
-        return  v, lut, transparency, emiss, f['cind']
+        return v, lut, transparency, emiss, f['cind']
 
 
     def export_ply(self, filename, bounds = None, color_field = None,
@@ -1734,8 +1729,6 @@
         api_key = api_key or ytcfg.get("yt","sketchfab_api_key")
         if api_key in (None, "None"):
             raise YTNoAPIKey("SketchFab.com", "sketchfab_api_key")
-        import zipfile, json
-        from tempfile import TemporaryFile
 
         ply_file = TemporaryFile()
         self.export_ply(ply_file, bounds, color_field, color_map, color_log,

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/75529b9523e6/
Changeset:   75529b9523e6
Branch:      yt
User:        chummels
Date:        2015-11-17 18:41:55+00:00
Summary:     Updating light_ray's kwarg "get_los_velocity" to "use_peculiar_velocity".
Affected #:  3 files

diff -r 07c35755da9f2f684d52698d7711f99e6f1f13ed -r 75529b9523e67d8c812826edefe50ec9acd040ac yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
@@ -140,7 +140,9 @@
            want them.
            Default: "lines.txt"
         use_peculiar_velocity : optional, bool
-           if True, include line of sight velocity for shifting lines.
+           if True, include peculiar velocity for calculating doppler redshift
+           to shift lines.  Requires similar flag to be set in LightRay 
+           generation.
            Default: True
         subgrid_resolution : optional, int
            When a line is being added that is unresolved (ie its thermal

diff -r 07c35755da9f2f684d52698d7711f99e6f1f13ed -r 75529b9523e67d8c812826edefe50ec9acd040ac yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py
@@ -50,7 +50,6 @@
 
     lr.make_light_ray(seed=1234567,
                       fields=['temperature', 'density', 'H_number_density'],
-                      get_los_velocity=True,
                       data_filename='lightray.h5')
 
     sp = AbsorptionSpectrum(900.0, 1800.0, 10000)

diff -r 07c35755da9f2f684d52698d7711f99e6f1f13ed -r 75529b9523e67d8c812826edefe50ec9acd040ac yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -258,13 +258,13 @@
                        trajectory=None,
                        fields=None, setup_function=None,
                        solution_filename=None, data_filename=None,
-                       get_los_velocity=True, redshift=None,
+                       use_peculiar_velocity=True, redshift=None,
                        njobs=-1):
         """
         make_light_ray(seed=None, start_position=None, end_position=None,
                        trajectory=None, fields=None, setup_function=None,
                        solution_filename=None, data_filename=None,
-                       get_los_velocity=True, redshift=None,
+                       use_peculiar_velocity=True, redshift=None,
                        njobs=-1)
 
         Create a light ray and get field values for each lixel.  A light
@@ -305,9 +305,10 @@
         data_filename : optional, string
             Path to output file for ray data.
             Default: None.
-        get_los_velocity : optional, bool
-            If True, the line of sight velocity is calculated for
-            each point in the ray.
+        use_peculiar_velocity : optional, bool
+            If True, the peculiar velocity along the ray will be sampled for
+            calculating the effective redshift combining the cosmological
+            redshift and the doppler redshift.
             Default: True.
         redshift : optional, float
             Used with light rays made from single datasets to specify a
@@ -335,7 +336,7 @@
         ...                       solution_filename="solution.txt",
         ...                       data_filename="my_ray.h5",
         ...                       fields=["temperature", "density"],
-        ...                       get_los_velocity=True)
+        ...                       use_peculiar_velocity=True)
 
         Make a light ray from a single dataset:
 
@@ -349,7 +350,7 @@
         ...                       solution_filename="solution.txt",
         ...                       data_filename="my_ray.h5",
         ...                       fields=["temperature", "density"],
-        ...                       get_los_velocity=True)
+        ...                       use_peculiar_velocity=True)
 
         """
 
@@ -368,7 +369,7 @@
         all_fields.extend(['dl', 'dredshift', 'redshift'])
         all_fields.extend(['x', 'y', 'z', 'dx', 'dy', 'dz'])
         data_fields.extend(['x', 'y', 'z', 'dx', 'dy', 'dz'])
-        if get_los_velocity:
+        if use_peculiar_velocity:
             all_fields.extend(['velocity_x', 'velocity_y', 'velocity_z', 
                                'velocity_los', 'velocity_mag', 'theta',
                                'redshift_eff'])
@@ -445,12 +446,13 @@
                 for field in data_fields:
                     sub_data[field].extend(sub_ray[field][asort])
 
-                if get_los_velocity:
+                if use_peculiar_velocity:
                     line_of_sight = sub_segment[0] - sub_segment[1]
                     line_of_sight /= ((line_of_sight**2).sum())**0.5
                     sub_vel = ds.arr([sub_ray['velocity_x'],
                                       sub_ray['velocity_y'],
                                       sub_ray['velocity_z']])
+                    # Line of sight velocity = vel_los
                     sub_vel_los = (np.rollaxis(sub_vel, 1) * \
                                    line_of_sight).sum(axis=1)
                     sub_data['velocity_los'].extend(sub_vel_los[asort])
@@ -477,7 +479,7 @@
             sub_data['redshift'] = my_segment['redshift'] - \
               sub_data['dredshift'].cumsum() + sub_data['dredshift']
 
-            # When velocity_los is present, add effective redshift 
+            # When using the peculiar velocity, add effective redshift 
             # (redshift_eff) field by combining cosmological redshift and 
             # doppler redshift.
             
@@ -496,7 +498,7 @@
             # 1 + z_obs = (1 + z_cosmo) * (1 + z_doppler)
             # Alternatively, see eqn 5.49 in Peebles for a similar result.
 
-            if get_los_velocity:
+            if use_peculiar_velocity:
                 velocity_mag_cm = (1 + sub_data['redshift']) * \
                                   sub_data['velocity_mag']
                 redshift_dopp = (1 + velocity_mag_cm * \


https://bitbucket.org/yt_analysis/yt/commits/7a5d1ccfb286/
Changeset:   7a5d1ccfb286
Branch:      yt
User:        chummels
Date:        2015-11-17 18:48:15+00:00
Summary:     Updating the docs and cookbooks to reflect the change in kwarg.
Affected #:  5 files

diff -r 75529b9523e67d8c812826edefe50ec9acd040ac -r 7a5d1ccfb28669607e956ce6aed04b4ff7b0a2ce doc/source/analyzing/analysis_modules/absorption_spectrum.rst
--- a/doc/source/analyzing/analysis_modules/absorption_spectrum.rst
+++ b/doc/source/analyzing/analysis_modules/absorption_spectrum.rst
@@ -11,8 +11,8 @@
 with the path length of the ray through the cell.  Line profiles are 
 generated using a voigt profile based on the temperature field.  The lines 
 are then shifted according to the redshift recorded by the light ray tool 
-and (optionally) the line of sight peculiar velocity.  Inclusion of the 
-peculiar velocity requires setting ``get_los_velocity`` to True in the call to 
+and (optionally) the peculiar velocity of gas along the ray.  Inclusion of the 
+peculiar velocity requires setting ``use_peculiar_velocity`` to True in the call to 
 :meth:`~yt.analysis_modules.cosmological_observation.light_ray.light_ray.LightRay.make_light_ray`.
 
 The spectrum generator will output a file containing the wavelength and 

diff -r 75529b9523e67d8c812826edefe50ec9acd040ac -r 7a5d1ccfb28669607e956ce6aed04b4ff7b0a2ce doc/source/analyzing/analysis_modules/light_ray_generator.rst
--- a/doc/source/analyzing/analysis_modules/light_ray_generator.rst
+++ b/doc/source/analyzing/analysis_modules/light_ray_generator.rst
@@ -79,7 +79,7 @@
 
   lr.make_light_ray(seed=8675309,
                     fields=['temperature', 'density'],
-                    get_los_velocity=True)
+                    use_peculiar_velocity=True)
 
 The keyword arguments are:
 
@@ -107,8 +107,10 @@
 * ``data_filename`` (*string*): Path to output file for ray data.  
   Default: None.
 
-* ``get_los_velocity`` (*bool*): If True, the line of sight velocity is 
-  calculated for each point in the ray.  Default: True.
+* ``use_peculiar_velocity`` (*bool*): If True, the doppler redshift from
+  the peculiar velocity of gas along the ray is calculated and added to the
+  cosmological redshift as the "effective" redshift.
+  Default: True.
 
 * ``redshift`` (*float*): Used with light rays made from single datasets to 
   specify a starting redshift for the ray.  If not used, the starting 

diff -r 75529b9523e67d8c812826edefe50ec9acd040ac -r 7a5d1ccfb28669607e956ce6aed04b4ff7b0a2ce doc/source/cookbook/fit_spectrum.py
--- a/doc/source/cookbook/fit_spectrum.py
+++ b/doc/source/cookbook/fit_spectrum.py
@@ -71,7 +71,6 @@
                   solution_filename='lightraysolution.txt',
                   data_filename='lightray.h5',
                   fields=fields, setup_function=setup_ds,
-                  get_los_velocity=True,
                   njobs=-1)
 
 # Create an AbsorptionSpectrum object extending from

diff -r 75529b9523e67d8c812826edefe50ec9acd040ac -r 7a5d1ccfb28669607e956ce6aed04b4ff7b0a2ce doc/source/cookbook/light_ray.py
--- a/doc/source/cookbook/light_ray.py
+++ b/doc/source/cookbook/light_ray.py
@@ -20,7 +20,6 @@
                   solution_filename='LR/lightraysolution.txt',
                   data_filename='LR/lightray.h5',
                   fields=['temperature', 'density'],
-                  get_los_velocity=True,
                   njobs=-1)
 
 # Optionally, we can now overplot the part of this ray that intersects 

diff -r 75529b9523e67d8c812826edefe50ec9acd040ac -r 7a5d1ccfb28669607e956ce6aed04b4ff7b0a2ce doc/source/cookbook/single_dataset_light_ray.py
--- a/doc/source/cookbook/single_dataset_light_ray.py
+++ b/doc/source/cookbook/single_dataset_light_ray.py
@@ -13,8 +13,7 @@
                   end_position=[1., 1., 1.],
                   solution_filename='lightraysolution.txt',
                   data_filename='lightray.h5',
-                  fields=['temperature', 'density'],
-                  get_los_velocity=True)
+                  fields=['temperature', 'density'])
 
 # Optionally, we can now overplot this ray on a projection of the source 
 # dataset


https://bitbucket.org/yt_analysis/yt/commits/e9971dce61f3/
Changeset:   e9971dce61f3
Branch:      yt
User:        chummels
Date:        2015-11-17 19:09:22+00:00
Summary:     Creating deprecation warning for get_los_velocity kwarg
Affected #:  1 file

diff -r 7a5d1ccfb28669607e956ce6aed04b4ff7b0a2ce -r e9971dce61f34f824a148fc84043367e13a6f7f3 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -258,8 +258,8 @@
                        trajectory=None,
                        fields=None, setup_function=None,
                        solution_filename=None, data_filename=None,
-                       use_peculiar_velocity=True, redshift=None,
-                       njobs=-1):
+                       get_los_velocity=None, use_peculiar_velocity=True, 
+                       redshift=None, njobs=-1):
         """
         make_light_ray(seed=None, start_position=None, end_position=None,
                        trajectory=None, fields=None, setup_function=None,
@@ -353,6 +353,9 @@
         ...                       use_peculiar_velocity=True)
 
         """
+        if get_los_velocity is not None:
+            use_peculiar_velocity = get_los_velocity
+            mylog.warn("'get_los_velocity' kwarg is deprecated. Use 'use_peculiar_velocity' instead.")
 
         # Calculate solution.
         self._calculate_light_ray_solution(seed=seed,


https://bitbucket.org/yt_analysis/yt/commits/fb9ab33efa56/
Changeset:   fb9ab33efa56
Branch:      yt
User:        chummels
Date:        2015-11-18 17:27:52+00:00
Summary:     Changing velocity_mag to velocity_magnitude in output from LightRay
Affected #:  1 file

diff -r e9971dce61f34f824a148fc84043367e13a6f7f3 -r fb9ab33efa5639219ab97acbd3e6d44d4a0a974f yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -374,7 +374,7 @@
         data_fields.extend(['x', 'y', 'z', 'dx', 'dy', 'dz'])
         if use_peculiar_velocity:
             all_fields.extend(['velocity_x', 'velocity_y', 'velocity_z', 
-                               'velocity_los', 'velocity_mag', 'theta',
+                               'velocity_los', 'velocity_magnitude', 'theta',
                                'redshift_eff'])
             data_fields.extend(['velocity_x', 'velocity_y', 'velocity_z'])
 
@@ -465,7 +465,7 @@
                     theta = np.arccos(np.dot(line_of_sight, sub_vel) / \
                                       sub_ray['velocity_magnitude'])
                     sub_data['theta'].extend(theta[asort])
-                    sub_data['velocity_mag'].extend(sub_ray['velocity_magnitude'][asort])
+                    sub_data['velocity_magnitude'].extend(sub_ray['velocity_magnitude'][asort])
                     del sub_vel, sub_vel_los, theta
 
                 sub_ray.clear_data()
@@ -503,7 +503,7 @@
 
             if use_peculiar_velocity:
                 velocity_mag_cm = (1 + sub_data['redshift']) * \
-                                  sub_data['velocity_mag']
+                                  sub_data['velocity_magnitude']
                 redshift_dopp = (1 + velocity_mag_cm * \
                                  np.cos(sub_data['theta']) / speed_of_light_cgs) / \
                                  np.sqrt(1 - velocity_mag_cm**2 / speed_of_light_cgs**2) - 1


https://bitbucket.org/yt_analysis/yt/commits/fcd197314a19/
Changeset:   fcd197314a19
Branch:      yt
User:        chummels
Date:        2015-11-30 06:09:07+00:00
Summary:     Removing 1+z normalization from doppler redshift.  Creating redshift_dopp field in LightRay
Affected #:  1 file

diff -r fb9ab33efa5639219ab97acbd3e6d44d4a0a974f -r fcd197314a190c75c6a6c283f6ce24dce5d37033 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -374,8 +374,8 @@
         data_fields.extend(['x', 'y', 'z', 'dx', 'dy', 'dz'])
         if use_peculiar_velocity:
             all_fields.extend(['velocity_x', 'velocity_y', 'velocity_z', 
-                               'velocity_los', 'velocity_magnitude', 'theta',
-                               'redshift_eff'])
+                               'velocity_los', 'redshift_eff', 
+                               'redshift_dopp'])
             data_fields.extend(['velocity_x', 'velocity_y', 'velocity_z'])
 
         all_ray_storage = {}
@@ -459,14 +459,33 @@
                     sub_vel_los = (np.rollaxis(sub_vel, 1) * \
                                    line_of_sight).sum(axis=1)
                     sub_data['velocity_los'].extend(sub_vel_los[asort])
+
+                    # doppler redshift:
+                    # See https://en.wikipedia.org/wiki/Redshift and 
+                    # Peebles eqns: 5.48, 5.49
+
+                    # 1 + redshift_dopp = (1 + v*cos(theta)/c) / 
+                    # sqrt(1 - v**2/c**2)
+
+                    # where v is the peculiar velocity (ie physical velocity
+                    # without the hubble flow, but no hubble flow in sim, so
+                    # just the physical velocity).
+
+                    # the bulk of the doppler redshift is from line of sight 
+                    # motion, but there is a small amount from time dilation 
+                    # of transverse motion, hence the inclusion of theta (the 
+                    # angle between line of sight and the velocity). 
                     # theta is the angle between the ray vector (i.e. line of 
-                    # sight) and the velocity vectors:
-                    # a dot b = ab cos(theta)
-                    theta = np.arccos(np.dot(line_of_sight, sub_vel) / \
-                                      sub_ray['velocity_magnitude'])
-                    sub_data['theta'].extend(theta[asort])
-                    sub_data['velocity_magnitude'].extend(sub_ray['velocity_magnitude'][asort])
-                    del sub_vel, sub_vel_los, theta
+                    # sight) and the velocity vectors: a dot b = ab cos(theta)
+
+                    sub_vel_mag = sub_ray['velocity_magnitude']
+                    cos_theta = np.dot(line_of_sight, sub_vel) / sub_vel_mag
+                    redshift_dopp = \
+                        (1 + sub_vel_mag * cos_theta / speed_of_light_cgs) / \
+                         np.sqrt(1 - sub_vel_mag**2 / speed_of_light_cgs**2) - 1
+                    sub_data['redshift_dopp'].extend(redshift_dopp[asort])
+                    del sub_vel, sub_vel_los, sub_vel_mag, cos_theta, \
+                        redshift_dopp
 
                 sub_ray.clear_data()
                 del sub_ray, asort
@@ -482,34 +501,17 @@
             sub_data['redshift'] = my_segment['redshift'] - \
               sub_data['dredshift'].cumsum() + sub_data['dredshift']
 
-            # When using the peculiar velocity, add effective redshift 
-            # (redshift_eff) field by combining cosmological redshift and 
+            # When using the peculiar velocity, create effective redshift 
+            # (redshift_eff) field combining cosmological redshift and 
             # doppler redshift.
             
-            # first convert the velocity magnitudes to comoving frame 
-            # (ie mult. by (1+z)), then calculate the doppler redshift:
-            # 1 + redshift_dopp = (1 + v*cos(theta)/c) / sqrt(1 - v**2/c**2)
-
-            # the bulk of the doppler redshift is from line of sight motion,
-            # but there is a small amount from time dilation of transverse
-            # motion, hence the inclusion of theta (the angle between line of
-            # sight and the velocity). See:
-            # https://en.wikipedia.org/wiki/Redshift
-
             # then to add cosmological redshift and doppler redshifts, follow
             # eqn 3.75 in Peacock's Cosmological Physics:
-            # 1 + z_obs = (1 + z_cosmo) * (1 + z_doppler)
-            # Alternatively, see eqn 5.49 in Peebles for a similar result.
+            # 1 + z_eff = (1 + z_cosmo) * (1 + z_doppler)
 
             if use_peculiar_velocity:
-                velocity_mag_cm = (1 + sub_data['redshift']) * \
-                                  sub_data['velocity_magnitude']
-                redshift_dopp = (1 + velocity_mag_cm * \
-                                 np.cos(sub_data['theta']) / speed_of_light_cgs) / \
-                                 np.sqrt(1 - velocity_mag_cm**2 / speed_of_light_cgs**2) - 1
-                sub_data['redshift_eff'] = ((1 + redshift_dopp) * \
+               sub_data['redshift_eff'] = ((1 + sub_data['redshift_dopp']) * \
                                             (1 + sub_data['redshift'])) - 1
-                del velocity_mag_cm, redshift_dopp
 
             # Remove empty lixels.
             sub_dl_nonzero = sub_data['dl'].nonzero()


https://bitbucket.org/yt_analysis/yt/commits/fe75b5a192df/
Changeset:   fe75b5a192df
Branch:      yt
User:        chummels
Date:        2015-11-30 06:10:55+00:00
Summary:     Merging with tip.
Affected #:  43 files

diff -r fcd197314a190c75c6a6c283f6ce24dce5d37033 -r fe75b5a192df183a8009864c4a2789aba86fa478 CONTRIBUTING.rst
--- /dev/null
+++ b/CONTRIBUTING.rst
@@ -0,0 +1,970 @@
+.. This document is rendered in HTML with cross-reference links filled in at
+   http://yt-project.org/doc/developing/
+
+.. _getting-involved:
+
+Getting Involved
+================
+
+There are *lots* of ways to get involved with yt, as a community and as a
+technical system -- not all of them just contributing code, but also
+participating in the community, helping us with designing the websites, adding
+documentation, and sharing your scripts with others.
+
+Coding is only one way to be involved!
+
+Communication Channels
+----------------------
+
+There are five main communication channels for yt:
+
+ * We have an IRC channel, on ``irc.freenode.net`` in ``#yt``.
+   You can connect through our web
+   gateway without any special client, at http://yt-project.org/irc.html .
+   *IRC is the first stop for conversation!*
+ * Many yt developers participate in the yt Slack community. Slack is a free 
+   chat service that many teams use to organize their work. You can get an
+   invite to yt's Slack organization by clicking the "Join us @ Slack" button
+   on this page: http://yt-project.org/community.html
+ * `yt-users <http://lists.spacepope.org/listinfo.cgi/yt-users-spacepope.org>`_
+   is a relatively high-traffic mailing list where people are encouraged to ask
+   questions about the code, figure things out and so on.
+ * `yt-dev <http://lists.spacepope.org/listinfo.cgi/yt-dev-spacepope.org>`_ is
+   a much lower-traffic mailing list designed to focus on discussions of
+   improvements to the code, ideas about planning, development issues, and so
+   on.
+ * `yt-svn <http://lists.spacepope.org/listinfo.cgi/yt-svn-spacepope.org>`_ is
+   the (now-inaccurately titled) mailing list where all pushes to the primary
+   repository are sent.
+
+The easiest way to get involved with yt is to read the mailing lists, hang out
+in IRC or slack chat, and participate.  If someone asks a question you know the
+answer to (or have your own question about!) write back and answer it.
+
+If you have an idea about something, suggest it!  We not only welcome
+participation, we encourage it.
+
+Documentation
+-------------
+
+The yt documentation is constantly being updated, and it is a task we would very
+much appreciate assistance with.  Whether that is adding a section, updating an
+outdated section, contributing typo or grammatical fixes, adding a FAQ, or
+increasing coverage of functionality, it would be very helpful if you wanted to
+help out.
+
+The easiest way to help out is to fork the main yt repository (where the
+documentation lives in the ``doc`` directory in the root of the yt mercurial
+repository) and then make your changes in your own fork.  When you are done,
+issue a pull request through the website for your new fork, and we can comment
+back and forth and eventually accept your changes. See :ref:`sharing-changes` for
+more information about contributing your changes to yt on bitbucket.
+
+Gallery Images and Videos
+-------------------------
+
+If you have an image or video you'd like to display in the image or video
+galleries, getting it included it easy!  You can either fork the `yt homepage
+repository <http://bitbucket.org/yt_analysis/website>`_ and add it there, or
+email it to us and we'll add it to the `Gallery
+<http://yt-project.org/gallery.html>`_.
+
+We're eager to show off the images and movies you make with yt, so please feel
+free to drop `us <http://lists.spacepope.org/listinfo.cgi/yt-dev-spacepope.org>`_
+a line and let us know if you've got something great!
+
+Technical Contributions
+-----------------------
+
+Contributing code is another excellent way to participate -- whether it's
+bug fixes, new features, analysis modules, or a new code frontend.  See
+:ref:`creating_frontend` for more details.
+
+The process is pretty simple: fork on BitBucket, make changes, issue a pull
+request.  We can then go back and forth with comments in the pull request, but
+usually we end up accepting.
+
+For more information, see :ref:`contributing-code`, where we spell out how to
+get up and running with a development environment, how to commit, and how to
+use BitBucket.
+
+Online Presence
+---------------
+
+Some of these fall under the other items, but if you'd like to help out with
+the website or any of the other ways yt is presented online, please feel free!
+Almost everything is kept in hg repositories on BitBucket, and it is very easy
+to fork and contribute back changes.
+
+Please feel free to dig in and contribute changes.
+
+Word of Mouth
+-------------
+
+If you're using yt and it has increased your productivity, please feel
+encouraged to share that information.  Cite our `paper
+<http://adsabs.harvard.edu/abs/2011ApJS..192....9T>`_, tell your colleagues,
+and just spread word of mouth.  By telling people about your successes, you'll
+help bring more eyes and hands to the table -- in this manner, by increasing
+participation, collaboration, and simply spreading the limits of what the code
+is asked to do, we hope to help scale the utility and capability of yt with the
+community size.
+
+Feel free to `blog <http://blog.yt-project.org/>`_ about, `tweet
+<http://twitter.com/yt_astro>`_ about and talk about what you are up to!
+
+Long-Term Projects
+------------------
+
+There are some wild-eyed, out-there ideas that have been bandied about for the
+future directions of yt -- some of them even written into the mission
+statement.  The ultimate goal is to move past simple analysis and visualization
+of data and begin to approach it from the other side, of generating data,
+running solvers.  We also hope to increase its ability to act as an in situ
+analysis code, by presenting a unified protocol.  Other projects include
+interfacing with ParaView and VisIt, creating a web GUI for running
+simulations, creating a run-tracker that follows simulations in progress, a
+federated database for simulation outputs, and so on and so forth.
+
+yt is an ambitious project.  Let's be ambitious together.
+
+yt Community Code of Conduct
+----------------------------
+
+The community of participants in open source
+Scientific projects is made up of members from around the
+globe with a diverse set of skills, personalities, and
+experiences. It is through these differences that our
+community experiences success and continued growth. We
+expect everyone in our community to follow these guidelines
+when interacting with others both inside and outside of our
+community. Our goal is to keep ours a positive, inclusive,
+successful, and growing community.
+
+As members of the community,
+
+- We pledge to treat all people with respect and
+  provide a harassment- and bullying-free environment,
+  regardless of sex, sexual orientation and/or gender
+  identity, disability, physical appearance, body size,
+  race, nationality, ethnicity, and religion. In
+  particular, sexual language and imagery, sexist,
+  racist, or otherwise exclusionary jokes are not
+  appropriate.
+
+- We pledge to respect the work of others by
+  recognizing acknowledgment/citation requests of
+  original authors. As authors, we pledge to be explicit
+  about how we want our own work to be cited or
+  acknowledged.
+
+- We pledge to welcome those interested in joining the
+  community, and realize that including people with a
+  variety of opinions and backgrounds will only serve to
+  enrich our community. In particular, discussions
+  relating to pros/cons of various technologies,
+  programming languages, and so on are welcome, but
+  these should be done with respect, taking proactive
+  measure to ensure that all participants are heard and
+  feel confident that they can freely express their
+  opinions.
+
+- We pledge to welcome questions and answer them
+  respectfully, paying particular attention to those new
+  to the community. We pledge to provide respectful
+  criticisms and feedback in forums, especially in
+  discussion threads resulting from code
+  contributions.
+
+- We pledge to be conscientious of the perceptions of
+  the wider community and to respond to criticism
+  respectfully. We will strive to model behaviors that
+  encourage productive debate and disagreement, both
+  within our community and where we are criticized. We
+  will treat those outside our community with the same
+  respect as people within our community.
+
+- We pledge to help the entire community follow the
+  code of conduct, and to not remain silent when we see
+  violations of the code of conduct. We will take action
+  when members of our community violate this code such as
+  contacting confidential at yt-project.org (all emails sent to
+  this address will be treated with the strictest
+  confidence) or talking privately with the person.
+
+This code of conduct applies to all
+community situations online and offline, including mailing
+lists, forums, social media, conferences, meetings,
+associated social events, and one-to-one interactions.
+
+The yt Community Code of Conduct was adapted from the
+`Astropy Community Code of Conduct
+<http://www.astropy.org/about.html#codeofconduct>`_,
+which was partially inspired by the PSF code of conduct.
+
+.. _contributing-code:
+
+How to Develop yt
+=================
+
+yt is a community project!
+
+We are very happy to accept patches, features, and bugfixes from any member of
+the community!  yt is developed using mercurial, primarily because it enables
+very easy and straightforward submission of changesets.  We're eager to hear
+from you, and if you are developing yt, we encourage you to subscribe to the
+`developer mailing list
+<http://lists.spacepope.org/listinfo.cgi/yt-dev-spacepope.org>`_. Please feel
+free to hack around, commit changes, and send them upstream.
+
+.. note:: If you already know how to use the `mercurial version control system
+   <http://mercurial-scm.org>`_ and are comfortable with handling it yourself,
+   the quickest way to contribute to yt is to `fork us on BitBucket
+   <http://bitbucket.org/yt_analysis/yt/fork>`_, make your changes, push the
+   changes to your fork and issue a `pull request
+   <http://bitbucket.org/yt_analysis/yt/pull-requests>`_.  The rest of this
+   document is just an explanation of how to do that.
+
+See :ref:`code-style-guide` for more information about coding style in yt and
+:ref:`docstrings` for an example docstring.  Please read them before hacking on
+the codebase, and feel free to email any of the mailing lists for help with the
+codebase.
+
+Keep in touch, and happy hacking!
+
+.. _open-issues:
+
+Open Issues
+-----------
+
+If you're interested in participating in yt development, take a look at the
+`issue tracker on bitbucket
+<https://bitbucket.org/yt_analysis/yt/issues?milestone=easy?status=new>`_.
+Issues are marked with a milestone of "easy", "moderate", or "difficult"
+depending on the estimated level of difficulty for fixing the issue. While we
+try to triage the issue tracker regularly, it may be the case that issues marked
+"moderate" are actually easier than their milestone label indicates since that
+is the default value.
+
+Here are some predefined issue searches that might be useful:
+
+* Unresolved issues `marked "easy" <https://bitbucket.org/yt_analysis/yt/issues?milestone=easy&status=open&status=new>`_.
+* Unresolved issues `marked "easy" or "moderate" <https://bitbucket.org/yt_analysis/yt/issues?milestone=easy&milestone=moderate&status=open&status=new>`_
+* `All unresolved issues <https://bitbucket.org/yt_analysis/yt/issues?status=open&status=new>`_
+
+Submitting Changes
+------------------
+
+We provide a brief introduction to submitting changes here.  yt thrives on the
+strength of its communities (http://arxiv.org/abs/1301.7064 has further
+discussion) and we encourage contributions from any user.  While we do not
+discuss version control, mercurial or the advanced usage of BitBucket in detail
+here, we do provide an outline of how to submit changes and we are happy to
+provide further assistance or guidance.
+
+Licensing
++++++++++
+
+yt is `licensed <http://blog.yt-project.org/post/Relicensing.html>`_ under the
+BSD 3-clause license.  Versions previous to yt-2.6 were released under the GPLv3.
+
+All contributed code must be BSD-compatible.  If you'd rather not license in
+this manner, but still want to contribute, please consider creating an external
+package, which we'll happily link to.
+
+How To Get The Source Code For Editing
+++++++++++++++++++++++++++++++++++++++
+
+yt is hosted on BitBucket, and you can see all of the yt repositories at
+http://bitbucket.org/yt_analysis/.  With the yt installation script you should have a
+copy of Mercurial for checking out pieces of code.  Make sure you have followed
+the steps above for bootstrapping your development (to assure you have a
+bitbucket account, etc.)
+
+In order to modify the source code for yt, we ask that you make a "fork" of the
+main yt repository on bitbucket.  A fork is simply an exact copy of the main
+repository (along with its history) that you will now own and can make
+modifications as you please.  You can create a personal fork by visiting the yt
+bitbucket webpage at https://bitbucket.org/yt_analysis/yt/ .  After logging in,
+you should see an option near the top right labeled "fork".  Click this option,
+and then click the fork repository button on the subsequent page.  You now have
+a forked copy of the yt repository for your own personal modification.
+
+This forked copy exists on the bitbucket repository, so in order to access
+it locally, follow the instructions at the top of that webpage for that
+forked repository, namely run at a local command line:
+
+.. code-block:: bash
+
+   $ hg clone http://bitbucket.org/<USER>/<REPOSITORY_NAME>
+
+This downloads that new forked repository to your local machine, so that you
+can access it, read it, make modifications, etc.  It will put the repository in
+a local directory of the same name as the repository in the current working
+directory.  You can see any past state of the code by using the hg log command.
+For example, the following command would show you the last 5 changesets
+(modifications to the code) that were submitted to that repository.
+
+.. code-block:: bash
+
+   $ cd <REPOSITORY_NAME>
+   $ hg log -l 5
+
+Using the revision specifier (the number or hash identifier next to each
+changeset), you can update the local repository to any past state of the
+code (a previous changeset or version) by executing the command:
+
+.. code-block:: bash
+
+   $ hg up revision_specifier
+
+Lastly, if you want to use this new downloaded version of your yt repository as
+the *active* version of yt on your computer (i.e. the one which is executed when
+you run yt from the command line or the one that is loaded when you do ``import
+yt``), then you must "activate" it using the following commands from within the
+repository directory.
+
+.. code-block:: bash
+
+   $ cd <REPOSITORY_NAME>
+   $ python2.7 setup.py develop
+
+This will rebuild all C modules as well.
+
+.. _reading-source:
+
+How To Read The Source Code
++++++++++++++++++++++++++++
+
+If you just want to *look* at the source code, you may already have it on your
+computer.  If you build yt using the install script, the source is available at
+``$YT_DEST/src/yt-hg``.  See :ref:`source-installation` for more details about
+to obtain the yt source code if you did not build yt using the install
+script.
+
+The root directory of the yt mercurial repository contains a number of
+subdirectories with different components of the code.  Most of the yt source
+code is contained in the yt subdirectory.  This directory its self contains
+the following subdirectories:
+
+``frontends``
+   This is where interfaces to codes are created.  Within each subdirectory of
+   yt/frontends/ there must exist the following files, even if empty:
+
+   * ``data_structures.py``, where subclasses of AMRGridPatch, Dataset
+     and AMRHierarchy are defined.
+   * ``io.py``, where a subclass of IOHandler is defined.
+   * ``fields.py``, where fields we expect to find in datasets are defined
+   * ``misc.py``, where any miscellaneous functions or classes are defined.
+   * ``definitions.py``, where any definitions specific to the frontend are
+     defined.  (i.e., header formats, etc.)
+
+``fields``
+   This is where all of the derived fields that ship with yt are defined.
+
+``geometry``
+   This is where geometric helpler routines are defined. Handlers
+   for grid and oct data, as well as helpers for coordinate transformations
+   can be found here.
+
+``visualization``
+   This is where all visualization modules are stored.  This includes plot
+   collections, the volume rendering interface, and pixelization frontends.
+
+``data_objects``
+   All objects that handle data, processed or unprocessed, not explicitly
+   defined as visualization are located in here.  This includes the base
+   classes for data regions, covering grids, time series, and so on.  This
+   also includes derived fields and derived quantities.
+
+``analysis_modules``
+   This is where all mechanisms for processing data live.  This includes
+   things like clump finding, halo profiling, halo finding, and so on.  This
+   is something of a catchall, but it serves as a level of greater
+   abstraction that simply data selection and modification.
+
+``gui``
+   This is where all GUI components go.  Typically this will be some small
+   tool used for one or two things, which contains a launching mechanism on
+   the command line.
+
+``utilities``
+   All broadly useful code that doesn't clearly fit in one of the other
+   categories goes here.
+
+``extern``
+   Bundled external modules (i.e. code that was not written by one of
+   the yt authors but that yt depends on) lives here.
+
+
+If you're looking for a specific file or function in the yt source code, use
+the unix find command:
+
+.. code-block:: bash
+
+   $ find <DIRECTORY_TREE_TO_SEARCH> -name '<FILENAME>'
+
+The above command will find the FILENAME in any subdirectory in the
+DIRECTORY_TREE_TO_SEARCH.  Alternatively, if you're looking for a function
+call or a keyword in an unknown file in a directory tree, try:
+
+.. code-block:: bash
+
+   $ grep -R <KEYWORD_TO_FIND><DIRECTORY_TREE_TO_SEARCH>
+
+This can be very useful for tracking down functions in the yt source.
+
+.. _building-yt:
+
+Building yt
++++++++++++
+
+If you have made changes to any C or Cython (``.pyx``) modules, you have to
+rebuild yt.  If your changes have exclusively been to Python modules, you will
+not need to re-build, but (see below) you may need to re-install.
+
+If you are running from a clone that is executable in-place (i.e., has been
+installed via the installation script or you have run ``setup.py develop``) you
+can rebuild these modules by executing:
+
+.. code-block:: bash
+
+  $ python2.7 setup.py develop
+
+If you have previously "installed" via ``setup.py install`` you have to
+re-install:
+
+.. code-block:: bash
+
+  $ python2.7 setup.py install
+
+Only one of these two options is needed.
+
+.. _windows-developing:
+
+Developing yt on Windows
+------------------------
+
+If you plan to develop yt on Windows, it is necessary to use the `MinGW
+<http://www.mingw.org/>`_ gcc compiler that can be installed using the `Anaconda
+Python Distribution <https://store.continuum.io/cshop/anaconda/>`_. The libpython package must be
+installed from Anaconda as well. These can both be installed with a single command:
+
+.. code-block:: bash
+
+  $ conda install libpython mingw
+
+Additionally, the syntax for the setup command is slightly different; you must type:
+
+.. code-block:: bash
+
+  $ python2.7 setup.py build --compiler=mingw32 develop
+
+or
+
+.. code-block:: bash
+
+  $ python2.7 setup.py build --compiler=mingw32 install
+
+.. _requirements-for-code-submission:
+
+Requirements for Code Submission
+--------------------------------
+
+Modifications to the code typically fall into one of three categories, each of
+which have different requirements for acceptance into the code base.  These
+requirements are in place for a few reasons -- to make sure that the code is
+maintainable, testable, and that we can easily include information about
+changes in changelogs during the release procedure.  (See `YTEP-0008
+<https://ytep.readthedocs.org/en/latest/YTEPs/YTEP-0008.html>`_ for more
+detail.)
+
+* New Features
+
+  * New unit tests (possibly new answer tests) (See :ref:`testing`)
+  * Docstrings in the source code for the public API
+  * Addition of new feature to the narrative documentation (See :ref:`writing_documentation`)
+  * Addition of cookbook recipe (See :ref:`writing_documentation`)
+  * Issue created on issue tracker, to ensure this is added to the changelog
+
+* Extension or Breakage of API in Existing Features
+
+  * Update existing narrative docs and docstrings (See :ref:`writing_documentation`)
+  * Update existing cookbook recipes (See :ref:`writing_documentation`)
+  * Modify of create new unit tests (See :ref:`testing`)
+  * Issue created on issue tracker, to ensure this is added to the changelog
+
+* Bug fixes
+
+  * Unit test is encouraged, to ensure breakage does not happen again in the
+    future. (See :ref:`testing`)
+  * Issue created on issue tracker, to ensure this is added to the changelog
+
+When submitting, you will be asked to make sure that your changes meet all of
+these requirements.  They are pretty easy to meet, and we're also happy to help
+out with them.  In :ref:`code-style-guide` there is a list of handy tips for
+how to structure and write your code.
+
+.. _mercurial-with-yt:
+
+How to Use Mercurial with yt
+----------------------------
+
+If you're new to Mercurial, these three resources are pretty great for learning
+the ins and outs:
+
+* http://hginit.com/
+* http://hgbook.red-bean.com/read/
+* http://mercurial-scm.org/
+* http://mercurial-scm.org/wiki
+
+The commands that are essential for using mercurial include:
+
+* ``hg help`` which provides help for any mercurial command. For example, you
+  can learn more about the ``log`` command by doing ``hg help log``. Other useful
+  topics to use with ``hg help`` are ``hg help glossary``, ``hg help config``,
+  ``hg help extensions``, and ``hg help revsets``.
+* ``hg commit`` which commits changes in the working directory to the
+  repository, creating a new "changeset object."
+* ``hg add`` which adds a new file to be tracked by mercurial.  This does
+  not change the working directory.
+* ``hg pull`` which pulls (from an optional path specifier) changeset
+  objects from a remote source.  The working directory is not modified.
+* ``hg push`` which sends (to an optional path specifier) changeset objects
+  to a remote source.  The working directory is not modified.
+* ``hg log`` which shows a log of all changeset objects in the current
+  repository.  Use ``-G`` to show a graph of changeset objects and their
+  relationship.
+* ``hg update`` which (with an optional "revision" specifier) updates the
+  state of the working directory to match a changeset object in the
+  repository.
+* ``hg merge`` which combines two changesets to make a union of their lines
+  of development.  This updates the working directory.
+
+We are happy to asnswers questions about mercurial use on our IRC, slack
+chat or on the mailing list to walk you through any troubles you might have.
+Here are some general suggestions for using mercurial with yt:
+
+* Named branches are to be avoided.  Try using bookmarks (``see hg help
+  bookmark``) to track work.  (`More info about bookmarks is available on the
+  mercurial wiki <http://mercurial-scm.org/wiki/Bookmarks>`_)
+* Make sure you set a username in your ``~/.hgrc`` before you commit any
+  changes!  All of the tutorials above will describe how to do this as one of
+  the very first steps.
+* When contributing changes, you might be asked to make a handful of
+  modifications to your source code.  We'll work through how to do this with
+  you, and try to make it as painless as possible.
+* Your test may fail automated style checks. See :ref:`code-style-guide` for
+  more information about automatically verifying your code style.
+* Please avoid deleting your yt forks, as that deletes the pull request
+  discussion from process from BitBucket's website, even if your pull request
+  is merged.
+* You should only need one fork.  To keep it in sync, you can sync from the
+  website. See Bitbucket's `Blog Post
+  <https://blog.bitbucket.org/2013/02/04/syncing-and-merging-come-to-bitbucket/>`_
+  about this. See :ref:`sharing-changes` for a description of the basic workflow
+  and :ref:`multiple-PRs` for a discussion about what to do when you want to
+  have multiple open pull requests at the same time.
+* If you run into any troubles, stop by IRC (see :ref:`irc`) or the mailing
+  list.
+
+.. _sharing-changes:
+
+Making and Sharing Changes
+--------------------------
+
+The simplest way to submit changes to yt is to do the following:
+
+* Build yt from the mercurial repository
+* Navigate to the root of the yt repository
+* Make some changes and commit them
+* Fork the `yt repository on BitBucket <https://bitbucket.org/yt_analysis/yt>`_
+* Push the changesets to your fork
+* Issue a pull request.
+
+Here's a more detailed flowchart of how to submit changes.
+
+#. If you have used the installation script, the source code for yt can be
+   found in ``$YT_DEST/src/yt-hg``.  Alternatively see
+   :ref:`source-installation` for instructions on how to build yt from the
+   mercurial repository. (Below, in :ref:`reading-source`, we describe how to
+   find items of interest.)
+#. Edit the source file you are interested in and
+   test your changes.  (See :ref:`testing` for more information.)
+#. Fork yt on BitBucket.  (This step only has to be done once.)  You can do
+   this at: https://bitbucket.org/yt_analysis/yt/fork.  Call this repository
+   yt.
+#. Create a bookmark to track your work. For example: ``hg bookmark
+   my-first-pull-request``
+#. Commit these changes, using ``hg commit``.  This can take an argument
+   which is a series of filenames, if you have some changes you do not want
+   to commit.
+#. Remember that this is a large development effort and to keep the code
+   accessible to everyone, good documentation is a must.  Add in source code
+   comments for what you are doing.  Add in docstrings
+   if you are adding a new function or class or keyword to a function.
+   Add documentation to the appropriate section of the online docs so that
+   people other than yourself know how to use your new code.
+#. If your changes include new functionality or cover an untested area of the
+   code, add a test.  (See :ref:`testing` for more information.)  Commit
+   these changes as well.
+#. Push your changes to your new fork using the command::
+
+      hg push -B my-first-pull-request https://bitbucket.org/YourUsername/yt/
+
+   Where you should substitute the name of the bookmark you are working on for
+   ``my-first-pull-request``. If you end up doing considerable development, you
+   can set an alias in the file ``.hg/hgrc`` to point to this path.
+
+   .. note::
+     Note that the above approach uses HTTPS as the transfer protocol
+     between your machine and BitBucket.  If you prefer to use SSH - or
+     perhaps you're behind a proxy that doesn't play well with SSL via
+     HTTPS - you may want to set up an `SSH key`_ on BitBucket.  Then, you use
+     the syntax ``ssh://hg@bitbucket.org/YourUsername/yt``, or equivalent, in
+     place of ``https://bitbucket.org/YourUsername/yt`` in Mercurial commands.
+     For consistency, all commands we list in this document will use the HTTPS
+     protocol.
+
+     .. _SSH key: https://confluence.atlassian.com/display/BITBUCKET/Set+up+SSH+for+Mercurial
+
+#. Issue a pull request at
+   https://bitbucket.org/YourUsername/yt/pull-request/new
+   A pull request is essentially just asking people to review and accept the
+   modifications you have made to your personal version of the code.
+
+
+During the course of your pull request you may be asked to make changes.  These
+changes may be related to style issues, correctness issues, or even requesting
+tests.  The process for responding to pull request code review is relatively
+straightforward.
+
+#. Make requested changes, or leave a comment indicating why you don't think
+   they should be made.
+#. Commit those changes to your local repository.
+#. Push the changes to your fork:
+
+      hg push https://bitbucket.org/YourUsername/yt/
+
+#. Your pull request will be automatically updated.
+
+.. _multiple-PRs:
+
+Working with Multiple BitBucket Pull Requests
++++++++++++++++++++++++++++++++++++++++++++++
+
+Once you become active developing for yt, you may be working on
+various aspects of the code or bugfixes at the same time.  Currently,
+BitBucket's *modus operandi* for pull requests automatically updates
+your active pull request with every ``hg push`` of commits that are a
+descendant of the head of your pull request.  In a normal workflow,
+this means that if you have an active pull request, make some changes
+locally for, say, an unrelated bugfix, then push those changes back to
+your fork in the hopes of creating a *new* pull request, you'll
+actually end up updating your current pull request!
+
+There are a few ways around this feature of BitBucket that will allow
+for multiple pull requests to coexist; we outline one such method
+below.  We assume that you have a fork of yt at
+``http://bitbucket.org/YourUsername/Your_yt`` (see
+:ref:`sharing-changes` for instructions on creating a fork) and that
+you have an active pull request to the main repository.
+
+The main issue with starting another pull request is to make sure that
+your push to BitBucket doesn't go to the same head as your
+existing pull request and trigger BitBucket's auto-update feature.
+Here's how to get your local repository away from your current pull
+request head using `revsets <http://www.selenic.com/hg/help/revsets>`_
+and your ``hgrc`` file:
+
+#. Set up a Mercurial path for the main yt repository (note this is a convenience
+   step and only needs to be done once).  Add the following to your
+   ``Your_yt/.hg/hgrc``::
+
+     [paths]
+     upstream = https://bitbucket.org/yt_analysis/yt
+
+   This will create a path called ``upstream`` that is aliased to the URL of the
+   main yt repository.
+#. Now we'll use revsets_ to update your local repository to the tip of the
+   ``upstream`` path:
+
+   .. code-block:: bash
+
+      $ hg pull upstream
+      $ hg update -r "remote(yt, 'upstream')"
+
+After the above steps, your local repository should be at the current head of
+the ``yt`` branch in the main yt repository.  If you find yourself doing this a
+lot, it may be worth aliasing this task in your ``hgrc`` file by adding
+something like::
+
+  [alias]
+  ytupdate = update -r "remote(yt, 'upstream')"
+
+And then you can just issue ``hg ytupdate`` to get at the current head of the
+``yt`` branch on main yt repository.
+
+Make sure you are on the branch you want to be on, and then you can make changes
+and ``hg commit`` them.  If you prefer working with `bookmarks
+<http://mercurial-scm.org/wiki/Bookmarks>`_, you may want to make a bookmark
+before committing your changes, such as ``hg bookmark mybookmark``.
+
+To push your changes on a bookmark to bitbucket, you can issue the following
+command:
+
+.. code-block:: bash
+
+    $ hg push -B myfeature https://bitbucket.org/YourUsername/Your_yt
+
+The ``-B`` means "publish my bookmark, the changeset the bookmark is pointing
+at, and any ancestors of that changeset that aren't already on the remote
+server".
+
+To push to your fork on BitBucket if you didn't use a bookmark, you issue the
+following:
+
+.. code-block:: bash
+
+  $ hg push -r . -f https://bitbucket.org/YourUsername/Your_yt
+
+The ``-r .`` means "push only the commit I'm standing on and any ancestors."
+The ``-f`` is to force Mecurial to do the push since we are creating a new
+remote head without a bookmark.
+
+You can then go to the BitBucket interface and issue a new pull request based on
+your last changes, as usual.
+
+.. _code-style-guide:
+
+Coding Style Guide
+==================
+
+Automatically checking code style
+---------------------------------
+
+Below are a list of rules for coding style in yt. Some of these rules are
+suggestions are not explicitly enforced, while some are enforced via automated
+testing. The yt project uses a subset of the rules checked by ``flake8`` to
+verify our code. The ``flake8`` tool is a combination of the ``pyflakes`` and
+``pep8`` tools. To check the coding style of your contributions locally you will
+need to install the ``flake8`` tool from ``pip``:
+
+.. code-block:: bash
+
+    $ pip install flake8
+
+And then navigate to the root of the yt repository and run ``flake8`` on the
+``yt`` folder:
+
+.. code-block:: bash
+
+    $ cd $YT_HG
+    $ flake8 ./yt
+
+This will print out any ``flake8`` errors or warnings that your newly added code
+triggers. The errors will be in your newly added code because we have already
+cleaned up the rest of the yt codebase of the errors and warnings detected by
+the `flake8` tool. Note that this will only trigger a subset of the `full flake8
+error and warning list
+<http://flake8.readthedocs.org/en/latest/warnings.html>`_, since we explicitly
+blacklist a large number of the full list of rules that are checked by
+``flake8`` by default.
+
+Source code style guide
+-----------------------
+
+ * In general, follow PEP-8 guidelines.
+   http://www.python.org/dev/peps/pep-0008/
+ * Classes are ``ConjoinedCapitals``, methods and functions are
+   ``lowercase_with_underscores``.
+ * Use 4 spaces, not tabs, to represent indentation.
+ * Line widths should not be more than 80 characters.
+ * Do not use nested classes unless you have a very good reason to, such as
+   requiring a namespace or class-definition modification.  Classes should live
+   at the top level.  ``__metaclass__`` is exempt from this.
+ * Do not use unnecessary parenthesis in conditionals.  ``if((something) and
+   (something_else))`` should be rewritten as
+   ``if something and something_else``. Python is more forgiving than C.
+ * Avoid copying memory when possible. For example, don't do
+   ``a = a.reshape(3,4)`` when ``a.shape = (3,4)`` will do, and ``a = a * 3``
+   should be ``np.multiply(a, 3, a)``.
+ * In general, avoid all double-underscore method names: ``__something`` is
+   usually unnecessary.
+ * When writing a subclass, use the super built-in to access the super class,
+   rather than explicitly. Ex: ``super(SpecialGridSubclass, self).__init__()``
+   rather than ``SpecialGrid.__init__()``.
+ * Docstrings should describe input, output, behavior, and any state changes
+   that occur on an object.  See the file ``doc/docstring_example.txt`` for a
+   fiducial example of a docstring.
+ * Use only one top-level import per line. Unless there is a good reason not to,
+   imports should happen at the top of the file, after the copyright blurb.
+ * Never compare with ``True`` or ``False`` using ``==`` or ``!=``, always use
+   ``is`` or ``is not``.
+ * If you are comparing with a numpy boolean array, just refer to the array.
+   Ex: do ``np.all(array)`` instead of ``np.all(array == True)``.
+ * Never comapre with None using ``==`` or ``!=``, use ``is None`` or
+   ``is not None``.
+ * Use ``statement is not True`` instead of ``not statement is True``
+ * Only one statement per line, do not use semicolons to put two or more
+   statements on a single line.
+ * Only declare local variables if they will be used later. If you do not use the
+   return value of a function, do not store it in a variable.
+ * Add tests for new functionality. When fixing a bug, consider adding a test to
+   prevent the bug from recurring.
+
+API Style Guide
+---------------
+
+ * Do not use ``from some_module import *``
+ * Internally, only import from source files directly -- instead of:
+
+     ``from yt.visualization.api import ProjectionPlot``
+
+   do:
+
+     ``from yt.visualization.plot_window import ProjectionPlot``
+
+ * Import symbols from the module where they are defined, avoid transitive
+   imports.
+ * Import standard library modules, functions, and classes from builtins, do not
+   import them from other yt files.
+ * Numpy is to be imported as ``np``.
+ * Do not use too many keyword arguments.  If you have a lot of keyword
+   arguments, then you are doing too much in ``__init__`` and not enough via
+   parameter setting.
+ * In function arguments, place spaces before commas.  ``def something(a,b,c)``
+   should be ``def something(a, b, c)``.
+ * Don't create a new class to replicate the functionality of an old class --
+   replace the old class.  Too many options makes for a confusing user
+   experience.
+ * Parameter files external to yt are a last resort.
+ * The usage of the ``**kwargs`` construction should be avoided.  If they cannot
+   be avoided, they must be explained, even if they are only to be passed on to
+   a nested function.
+
+.. _docstrings
+
+Docstrings
+----------
+
+The following is an example docstring. You can use it as a template for
+docstrings in your code and as a guide for how we expect docstrings to look and
+the level of detail we are looking for. Note that we use NumPy style docstrings
+written in `Sphinx restructured text format <http://sphinx-doc.org/rest.html>`_.
+
+.. code-block:: rest
+
+    r"""A one-line summary that does not use variable names or the
+    function name.
+
+    Several sentences providing an extended description. Refer to
+    variables using back-ticks, e.g. ``var``.
+
+    Parameters
+    ----------
+    var1 : array_like
+        Array_like means all those objects -- lists, nested lists, etc. --
+        that can be converted to an array.  We can also refer to
+        variables like ``var1``.
+    var2 : int
+        The type above can either refer to an actual Python type
+        (e.g. ``int``), or describe the type of the variable in more
+        detail, e.g. ``(N,) ndarray`` or ``array_like``.
+    Long_variable_name : {'hi', 'ho'}, optional
+        Choices in brackets, default first when optional.
+
+    Returns
+    -------
+    describe : type
+        Explanation
+    output : type
+        Explanation
+    tuple : type
+        Explanation
+    items : type
+        even more explaining
+
+    Other Parameters
+    ----------------
+    only_seldom_used_keywords : type
+        Explanation
+    common_parameters_listed_above : type
+        Explanation
+
+    Raises
+    ------
+    BadException
+        Because you shouldn't have done that.
+
+    See Also
+    --------
+    otherfunc : relationship (optional)
+    newfunc : Relationship (optional), which could be fairly long, in which
+              case the line wraps here.
+    thirdfunc, fourthfunc, fifthfunc
+
+    Notes
+    -----
+    Notes about the implementation algorithm (if needed).
+
+    This can have multiple paragraphs.
+
+    You may include some math:
+
+    .. math:: X(e^{j\omega } ) = x(n)e^{ - j\omega n}
+
+    And even use a greek symbol like :math:`omega` inline.
+
+    References
+    ----------
+    Cite the relevant literature, e.g. [1]_.  You may also cite these
+    references in the notes section above.
+
+    .. [1] O. McNoleg, "The integration of GIS, remote sensing,
+       expert systems and adaptive co-kriging for environmental habitat
+       modelling of the Highland Haggis using object-oriented, fuzzy-logic
+       and neural-network techniques," Computers & Geosciences, vol. 22,
+       pp. 585-588, 1996.
+
+    Examples
+    --------
+    These are written in doctest format, and should illustrate how to
+    use the function.  Use the variables 'ds' for the dataset, 'pc' for
+    a plot collection, 'c' for a center, and 'L' for a vector.
+
+    >>> a=[1,2,3]
+    >>> print [x + 3 for x in a]
+    [4, 5, 6]
+    >>> print "a\n\nb"
+    a
+    b
+
+    """
+
+Variable Names and Enzo-isms
+----------------------------
+Avoid Enzo-isms.  This includes but is not limited to:
+
+ * Hard-coding parameter names that are the same as those in Enzo.  The
+   following translation table should be of some help.  Note that the
+   parameters are now properties on a ``Dataset`` subclass: you access them
+   like ds.refine_by .
+
+    - ``RefineBy `` => `` refine_by``
+    - ``TopGridRank `` => `` dimensionality``
+    - ``TopGridDimensions `` => `` domain_dimensions``
+    - ``InitialTime `` => `` current_time``
+    - ``DomainLeftEdge `` => `` domain_left_edge``
+    - ``DomainRightEdge `` => `` domain_right_edge``
+    - ``CurrentTimeIdentifier `` => `` unique_identifier``
+    - ``CosmologyCurrentRedshift `` => `` current_redshift``
+    - ``ComovingCoordinates `` => `` cosmological_simulation``
+    - ``CosmologyOmegaMatterNow `` => `` omega_matter``
+    - ``CosmologyOmegaLambdaNow `` => `` omega_lambda``
+    - ``CosmologyHubbleConstantNow `` => `` hubble_constant``
+
+ * Do not assume that the domain runs from 0 .. 1.  This is not true
+   everywhere.
+ * Variable names should be short but descriptive.
+ * No globals!

diff -r fcd197314a190c75c6a6c283f6ce24dce5d37033 -r fe75b5a192df183a8009864c4a2789aba86fa478 coding_styleguide.txt
--- a/coding_styleguide.txt
+++ /dev/null
@@ -1,101 +0,0 @@
-Style Guide for Coding in yt
-============================
-
-Coding Style Guide
-------------------
-
- * In general, follow PEP-8 guidelines.
-   http://www.python.org/dev/peps/pep-0008/
- * Classes are ``ConjoinedCapitals``, methods and functions are
-   ``lowercase_with_underscores``.
- * Use 4 spaces, not tabs, to represent indentation.
- * Line widths should not be more than 80 characters.
- * Do not use nested classes unless you have a very good reason to, such as
-   requiring a namespace or class-definition modification.  Classes should live
-   at the top level.  ``__metaclass__`` is exempt from this.
- * Do not use unnecessary parenthesis in conditionals.  ``if((something) and
-   (something_else))`` should be rewritten as
-   ``if something and something_else``. Python is more forgiving than C.
- * Avoid copying memory when possible. For example, don't do
-   ``a = a.reshape(3,4)`` when ``a.shape = (3,4)`` will do, and ``a = a * 3``
-   should be ``np.multiply(a, 3, a)``.
- * In general, avoid all double-underscore method names: ``__something`` is
-   usually unnecessary.
- * When writing a subclass, use the super built-in to access the super class,
-   rather than explicitly. Ex: ``super(SpecialGridSubclass, self).__init__()``
-   rather than ``SpecialGrid.__init__()``.
- * Docstrings should describe input, output, behavior, and any state changes
-   that occur on an object.  See the file ``doc/docstring_example.txt`` for a
-   fiducial example of a docstring.
- * Use only one top-level import per line. Unless there is a good reason not to,
-   imports should happen at the top of the file, after the copyright blurb.
- * Never compare with ``True`` or ``False`` using ``==`` or ``!=``, always use
-   ``is`` or ``is not``.
- * If you are comparing with a numpy boolean array, just refer to the array.
-   Ex: do ``np.all(array)`` instead of ``np.all(array == True)``.
- * Never comapre with None using ``==`` or ``!=``, use ``is None`` or
-   ``is not None``.
- * Use ``statement is not True`` instead of ``not statement is True``
- * Only one statement per line, do not use semicolons to put two or more
-   statements on a single line.
- * Only declare local variables if they will be used later. If you do not use the
-   return value of a function, do not store it in a variable.
- * Add tests for new functionality. When fixing a bug, consider adding a test to
-   prevent the bug from recurring.
-
-API Guide
----------
-
- * Do not use ``from some_module import *``
- * Internally, only import from source files directly -- instead of:
-
-     ``from yt.visualization.api import ProjectionPlot``
-
-   do:
-
-     ``from yt.visualization.plot_window import ProjectionPlot``
-
- * Import symbols from the module where they are defined, avoid transitive
-   imports.
- * Import standard library modules, functions, and classes from builtins, do not
-   import them from other yt files.
- * Numpy is to be imported as ``np``.
- * Do not use too many keyword arguments.  If you have a lot of keyword
-   arguments, then you are doing too much in ``__init__`` and not enough via
-   parameter setting.
- * In function arguments, place spaces before commas.  ``def something(a,b,c)``
-   should be ``def something(a, b, c)``.
- * Don't create a new class to replicate the functionality of an old class --
-   replace the old class.  Too many options makes for a confusing user
-   experience.
- * Parameter files external to yt are a last resort.
- * The usage of the ``**kwargs`` construction should be avoided.  If they cannot
-   be avoided, they must be explained, even if they are only to be passed on to
-   a nested function.
-
-Variable Names and Enzo-isms
-----------------------------
-Avoid Enzo-isms.  This includes but is not limited to:
-
- * Hard-coding parameter names that are the same as those in Enzo.  The
-   following translation table should be of some help.  Note that the
-   parameters are now properties on a ``Dataset`` subclass: you access them
-   like ds.refine_by .
-
-    - ``RefineBy `` => `` refine_by``
-    - ``TopGridRank `` => `` dimensionality``
-    - ``TopGridDimensions `` => `` domain_dimensions``
-    - ``InitialTime `` => `` current_time``
-    - ``DomainLeftEdge `` => `` domain_left_edge``
-    - ``DomainRightEdge `` => `` domain_right_edge``
-    - ``CurrentTimeIdentifier `` => `` unique_identifier``
-    - ``CosmologyCurrentRedshift `` => `` current_redshift``
-    - ``ComovingCoordinates `` => `` cosmological_simulation``
-    - ``CosmologyOmegaMatterNow `` => `` omega_matter``
-    - ``CosmologyOmegaLambdaNow `` => `` omega_lambda``
-    - ``CosmologyHubbleConstantNow `` => `` hubble_constant``
-
- * Do not assume that the domain runs from 0 .. 1.  This is not true
-   everywhere.
- * Variable names should be short but descriptive.
- * No globals!

diff -r fcd197314a190c75c6a6c283f6ce24dce5d37033 -r fe75b5a192df183a8009864c4a2789aba86fa478 doc/docstring_example.txt
--- a/doc/docstring_example.txt
+++ b/doc/docstring_example.txt
@@ -1,86 +0,0 @@
-    r"""A one-line summary that does not use variable names or the
-    function name.
-
-    Several sentences providing an extended description. Refer to
-    variables using back-ticks, e.g. `var`.
-
-    Parameters
-    ----------
-    var1 : array_like
-        Array_like means all those objects -- lists, nested lists, etc. --
-        that can be converted to an array.  We can also refer to
-        variables like `var1`.
-    var2 : int
-        The type above can either refer to an actual Python type
-        (e.g. ``int``), or describe the type of the variable in more
-        detail, e.g. ``(N,) ndarray`` or ``array_like``.
-    Long_variable_name : {'hi', 'ho'}, optional
-        Choices in brackets, default first when optional.
-
-    Returns
-    -------
-    describe : type
-        Explanation
-    output : type
-        Explanation
-    tuple : type
-        Explanation
-    items : type
-        even more explaining
-
-    Other Parameters
-    ----------------
-    only_seldom_used_keywords : type
-        Explanation
-    common_parameters_listed_above : type
-        Explanation
-
-    Raises
-    ------
-    BadException
-        Because you shouldn't have done that.
-
-    See Also
-    --------
-    otherfunc : relationship (optional)
-    newfunc : Relationship (optional), which could be fairly long, in which
-              case the line wraps here.
-    thirdfunc, fourthfunc, fifthfunc
-
-    Notes
-    -----
-    Notes about the implementation algorithm (if needed).
-
-    This can have multiple paragraphs.
-
-    You may include some math:
-
-    .. math:: X(e^{j\omega } ) = x(n)e^{ - j\omega n}
-
-    And even use a greek symbol like :math:`omega` inline.
-
-    References
-    ----------
-    Cite the relevant literature, e.g. [1]_.  You may also cite these
-    references in the notes section above.
-
-    .. [1] O. McNoleg, "The integration of GIS, remote sensing,
-       expert systems and adaptive co-kriging for environmental habitat
-       modelling of the Highland Haggis using object-oriented, fuzzy-logic
-       and neural-network techniques," Computers & Geosciences, vol. 22,
-       pp. 585-588, 1996.
-
-    Examples
-    --------
-    These are written in doctest format, and should illustrate how to
-    use the function.  Use the variables 'ds' for the dataset, 'pc' for
-    a plot collection, 'c' for a center, and 'L' for a vector. 
-
-    >>> a=[1,2,3]
-    >>> print [x + 3 for x in a]
-    [4, 5, 6]
-    >>> print "a\n\nb"
-    a
-    b
-
-    """

diff -r fcd197314a190c75c6a6c283f6ce24dce5d37033 -r fe75b5a192df183a8009864c4a2789aba86fa478 doc/source/analyzing/analysis_modules/photon_simulator.rst
--- a/doc/source/analyzing/analysis_modules/photon_simulator.rst
+++ b/doc/source/analyzing/analysis_modules/photon_simulator.rst
@@ -627,3 +627,31 @@
 .. image:: _images/ds9_sloshing.png
 
 .. image:: _images/ds9_bubbles.png
+
+In November 2015, the structure of the photon and event HDF5 files changed. To 
+convert an old-format file to the new format, use the ``convert_old_file`` utility:
+
+.. code:: python
+
+   from yt.analysis_modules.photon_simulator.api import convert_old_file
+   convert_old_file("old_photons.h5", "new_photons.h5", clobber=True)
+   convert_old_file("old_events.h5", "new_events.h5", clobber=True)
+
+This utility will auto-detect the kind of file (photons or events) and will write 
+the correct replacement for the new version.
+
+At times it may be convenient to write several ``EventLists`` to disk to be merged 
+together later. This can be achieved with the ``merge_files`` utility. It takes a 
+list of 
+
+.. code:: python
+
+   from yt.analysis_modules.photon_simulator.api import merge_files
+   merge_files(["events_0.h5", "events_1.h5", "events_2.h5"], "merged_events.h5",
+                add_exposure_times=True, clobber=False)
+
+At the current time this utility is very limited, as it only allows merging of 
+``EventLists`` which have the same parameters, with the exception of the exposure
+time. If the ``add_exposure_times`` argument to ``merge_files`` is set to ``True``, 
+the lists will be merged together with the exposure times added. Otherwise, the 
+exposure times of the different files must be equal. 

diff -r fcd197314a190c75c6a6c283f6ce24dce5d37033 -r fe75b5a192df183a8009864c4a2789aba86fa478 doc/source/analyzing/fields.rst
--- a/doc/source/analyzing/fields.rst
+++ b/doc/source/analyzing/fields.rst
@@ -195,10 +195,32 @@
 :ref:`field-list`.  If you want to create additional custom derived fields, 
 see :ref:`creating-derived-fields`.
 
-The full list of fields available for a dataset can be found as 
-the attribute ``field_list`` for native, on-disk fields and ``derived_field_list``
-for derived fields (``derived_field_list`` is a superset of ``field_list``).
-You can view these lists by examining a dataset like this:
+Every dataset has an attribute, ``ds.fields``.  This attribute possesses
+attributes itself, each of which is a "field type," and each field type has as
+its attributes the fields themselves.  When one of these is printed, it returns
+information about the field and things like units and so on.  You can use this
+for tab-completing as well as easier access to information.
+
+As an example, you might browse the available fields like so:::
+
+  print(dir(ds.fields))
+  print(dir(ds.fields.gas))
+  print(ds.fields.gas.density)
+
+On an Enzo dataset, the result from the final command would look something like
+this:::
+
+  Alias Field for "('enzo', 'Density')" (gas, density): (units: g/cm**3)
+
+You can use this to easily explore available fields, particularly through
+tab-completion in Jupyter/IPython.
+
+For a more programmatic method of accessing fields, you can utilize the
+``ds.field_list``, ``ds.derived_field_list`` and some accessor methods to gain
+information about fields.  The full list of fields available for a dataset can
+be found as the attribute ``field_list`` for native, on-disk fields and
+``derived_field_list`` for derived fields (``derived_field_list`` is a superset
+of ``field_list``).  You can view these lists by examining a dataset like this:
 
 .. code-block:: python
 

diff -r fcd197314a190c75c6a6c283f6ce24dce5d37033 -r fe75b5a192df183a8009864c4a2789aba86fa478 doc/source/analyzing/objects.rst
--- a/doc/source/analyzing/objects.rst
+++ b/doc/source/analyzing/objects.rst
@@ -63,6 +63,117 @@
    for i in range(sp["temperature"].size):
        print "(%f,  %f,  %f)    %f" % (sp["x"][i], sp["y"][i], sp["z"][i], sp["temperature"][i])
 
+.. _quickly-selecting-data:
+
+Slicing Syntax for Selecting Data
+---------------------------------
+
+yt provides a mechanism for easily selecting data while doing interactive work
+on the command line.  This allows for region selection based on the full domain
+of the object.  Selecting in this manner is exposed through a slice-like
+syntax.  All of these attributes are exposed through the ``RegionExpression``
+object, which is an attribute of a ``DataSet`` object, called ``r``.
+
+Getting All The Data
+^^^^^^^^^^^^^^^^^^^^
+
+The ``.r`` attribute serves as a persistent means of accessing the full data
+from a dataset.  You can access this shorthand operation by querying any field
+on the ``.r`` object, like so:
+
+.. code-block:: python
+
+   ds = yt.load("RedshiftOutput0005")
+   rho = ds.r["density"]
+
+This will return a *flattened* array of data.  The region expression object
+(``r``) doesn't have any derived quantities on it.  This is completely
+equivalent to this set of statements:
+
+.. code-block:: python
+
+   ds = yt.load("RedshiftOutput0005")
+   dd = ds.all_data()
+   rho = dd["density"]
+
+.. warning::
+
+   One thing to keep in mind with accessing data in this way is that it is
+   *persistent*.  It is loaded into memory, and then retained until the dataset
+   is deleted or garbage collected.
+
+Selecting Multiresolution Regions
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+To select rectilinear regions, where the data is selected the same way that it
+is selected in a :ref:`region-reference`, you can utilize slice-like syntax,
+supplying start and stop, but not supplying a step argument.  This requires
+that three components of the slice must be specified.  These take a start and a
+stop, and are for the three axes in simulation order (if your data is ordered
+z, y, x for instance, this would be in z, y, x order).
+
+The slices can have both position and, optionally, unit values.  These define
+the value with respect to the ``domain_left_edge`` of the dataset.  So for
+instance, you could specify it like so:::
+
+   ds.r[(100, 'kpc'):(200,'kpc'),:,:]
+
+This would return a region that included everything between 100 kpc from the
+left edge of the dataset to 200 kpc from the left edge of the dataset in the
+first dimension, and which spans the entire dataset in the second and third
+dimensions.  By default, if the units are unspecified, they are in the "native"
+code units of the dataset.
+
+This works in all types of datasets, as well.  For instance, if you have a
+geographic dataset (which is usually ordered latitude, longitude, altitude) you
+can easily select, for instance, one hemisphere with a region selection:::
+
+   ds.r[:,-180:0,:]
+
+Selecting Fixed Resolution Regions
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+yt also provides functionality for selecting regions that have been turned into
+voxels.  This returns an :ref:`arbitrary-grid` object.  It can be created by
+specifying a complex slice "step", where the start and stop follow the same
+rules as above.  This is similar to how the numpy ``mgrid`` operation works.
+For instance, this code block will generate a grid covering the full domain,
+but converted to being 21x35x100 dimensions:::
+
+  region = ds.r[::21j, ::35j, ::100j]
+
+The left and right edges, as above, can be specified to provide bounds as well.
+For instance, to select a 10 meter cube, with 24 cells in each dimension, we
+could supply:::
+
+  region = ds.r[(20,'m'):(30,'m'):24j, (30,'m'):(40,'m'):24j,
+                (7,'m'):(17,'m'):24j]
+
+This can select both particles and mesh fields.  Mesh fields will be 3D arrays,
+and generated through volume-weighted overlap calculations.
+
+Selecting Slices
+^^^^^^^^^^^^^^^^
+
+If one dimension is specified as a single value, that will be the dimension
+along which a slice is made.  This provides a simple means of generating a
+slice from a subset of the data.  For instance, to create a slice of a dataset,
+you can very simply specify the full domain along two axes:::
+
+   sl = ds.r[:,:,0.25]
+
+This can also be very easily plotted:::
+
+   sl = ds.r[:,:,0.25]
+   sl.plot()
+
+This accepts arguments the same way:::
+
+
+   sl = ds.r[(20.1, 'km'):(31.0, 'km'), (504.143,'m'):(1000.0,'m'),
+             (900.1, 'm')]
+   sl.plot()
+
 .. _available-objects:
 
 Available Objects
@@ -144,6 +255,8 @@
       creating a Region covering the entire dataset domain.  It is effectively 
       ``ds.region(ds.domain_center, ds.domain_left_edge, ds.domain_right_edge)``.
 
+.. _region-reference:
+
 **Box Region** 
     | Class :class:`~yt.data_objects.selection_data_containers.YTRegion`
     | Usage: ``region(center, left_edge, right_edge, fields=None, ds=None, field_parameters=None, data_source=None)``
@@ -227,15 +340,15 @@
       interpolates as necessary from coarse regions to fine.  See 
       :ref:`examining-grid-data-in-a-fixed-resolution-array`.
 
-**Fixed-Resolution Region for Particle Deposition** 
+**Fixed-Resolution Region**
     | Class :class:`~yt.data_objects.construction_data_containers.YTArbitraryGrid`
     | Usage: ``arbitrary_grid(left_edge, right_edge, dimensions, ds=None, field_parameters=None)``
     | When particles are deposited on to mesh fields, they use the existing
       mesh structure, but this may have too much or too little resolution
       relative to the particle locations (or it may not exist at all!).  An
       `arbitrary_grid` provides a means for generating a new independent mesh 
-      structure for particle deposition.  See :ref:`arbitrary-grid` for more 
-      information.
+      structure for particle deposition and simple mesh field interpolation.
+      See :ref:`arbitrary-grid` for more information.
 
 **Projection** 
     | Class :class:`~yt.data_objects.construction_data_containers.YTQuadTreeProj`
@@ -279,6 +392,82 @@
    sp = ds.sphere('c', (10, 'kpc'))
    print sp.quantities.angular_momentum_vector()
 
+Quickly Processing Data
+^^^^^^^^^^^^^^^^^^^^^^^
+
+Most data objects now have multiple numpy-like methods that allow you to
+quickly process data.  More of these methods will be added over time and added
+to this list.  Most, if not all, of these map to other yt operations and are
+designed as syntactic sugar to slightly simplify otherwise somewhat obtuse
+pipelines.
+
+These operations are parallelized.
+
+You can compute the extrema of a field by using the ``max`` or ``min``
+functions.  This will cache the extrema in between, so calling ``min`` right
+after ``max`` will be considerably faster.  Here is an example:::
+
+  ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
+  reg = ds.r[0.3:0.6, 0.2:0.4, 0.9:0.95]
+  min_rho = reg.min("density")
+  max_rho = reg.max("density")
+
+This is equivalent to:::
+
+  min_rho, max_rho = reg.quantities.extrema("density")
+
+The ``max`` operation can also compute the maximum intensity projection:::
+
+  proj = reg.max("density", axis="x")
+  proj.plot()
+
+This is equivalent to:::
+
+  proj = ds.proj("density", "x", data_source=reg, method="mip")
+  proj.plot()
+
+The ``min`` operator does not do this, however, as a minimum intensity
+projection is not currently implemented.
+
+You can also compute the ``mean`` value, which accepts a field, axis and wight
+function.  If the axis is not specified, it will return the average value of
+the specified field, weighted by the weight argument.  The weight argument
+defaults to ``ones``, which performs an arithmetic average.  For instance:::
+
+  mean_rho = reg.mean("density")
+  rho_by_vol = reg.mean("density", weight="cell_volume")
+
+This is equivalent to:::
+
+  mean_rho = reg.quantities.weighted_average("density", weight_field="ones")
+  rho_by_vol = reg.quantities.weighted_average("density",
+                    weight_field="cell_volume")
+
+If an axis is provided, it will project along that axis and return it to you:::
+
+  rho_proj = reg.mean("temperature", axis="y", weight="density")
+  rho_proj.plot()
+
+The ``sum`` function will add all the values in the data object.  It accepts a
+field and, optionally, an axis.  If the axis is left unspecified, it will sum
+the values in the object:::
+
+  vol = reg.sum("cell_volume")
+
+If the axis is specified, it will compute a projection using the method ``sum``
+(which does *not* take into account varying path length!) and return that to
+you.::
+
+  cell_count = reg.sum("ones", axis="z")
+  cell_count.plot()
+
+To compute a projection where the path length *is* taken into account, you can
+use the ``integrate`` function:::
+
+  proj = reg.integrate("density", "x")
+
+All of these projections supply the data object as their base input.
+
 Available Derived Quantities
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
@@ -350,8 +539,8 @@
 
 .. _arbitrary-grid:
 
-Arbitrary Grids Objects for Particle Deposition
------------------------------------------------
+Arbitrary Grids Objects
+-----------------------
 
 The covering grid and smoothed covering grid objects mandate that they be
 exactly aligned with the mesh.  This is a
@@ -379,6 +568,13 @@
 While these cannot yet be used as input to projections or slices, slices and
 projections can be taken of the data in them and visualized by hand.
 
+These objects, as of yt 3.3, are now also able to "voxelize" mesh fields.  This
+means that you can query the "density" field and it will return the density
+field as deposited, identically to how it would be deposited in a fixed
+resolution buffer.  Note that this means that contributions from misaligned or
+partially-overlapping cells are added in a volume-weighted way, which makes it
+inappropriate for some types of analysis.
+
 .. _boolean_data_objects:
 
 Combining Objects: Boolean Data Objects

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/315acc8b8296/
Changeset:   315acc8b8296
Branch:      yt
User:        ngoldbaum
Date:        2015-12-01 00:41:47+00:00
Summary:     Merged in chummels/yt (pull request #1856)

Adds effects of transverse doppler redshift to LightRay
Affected #:  8 files

diff -r 64675f402d50709159e5400b0a64873f2fc7400a -r 315acc8b8296a1655efbc5fa6dfc9c88fab44b62 doc/source/analyzing/analysis_modules/absorption_spectrum.rst
--- a/doc/source/analyzing/analysis_modules/absorption_spectrum.rst
+++ b/doc/source/analyzing/analysis_modules/absorption_spectrum.rst
@@ -11,8 +11,8 @@
 with the path length of the ray through the cell.  Line profiles are 
 generated using a voigt profile based on the temperature field.  The lines 
 are then shifted according to the redshift recorded by the light ray tool 
-and (optionally) the line of sight peculiar velocity.  Inclusion of the 
-peculiar velocity requires setting ``get_los_velocity`` to True in the call to 
+and (optionally) the peculiar velocity of gas along the ray.  Inclusion of the 
+peculiar velocity requires setting ``use_peculiar_velocity`` to True in the call to 
 :meth:`~yt.analysis_modules.cosmological_observation.light_ray.light_ray.LightRay.make_light_ray`.
 
 The spectrum generator will output a file containing the wavelength and 

diff -r 64675f402d50709159e5400b0a64873f2fc7400a -r 315acc8b8296a1655efbc5fa6dfc9c88fab44b62 doc/source/analyzing/analysis_modules/light_ray_generator.rst
--- a/doc/source/analyzing/analysis_modules/light_ray_generator.rst
+++ b/doc/source/analyzing/analysis_modules/light_ray_generator.rst
@@ -79,7 +79,7 @@
 
   lr.make_light_ray(seed=8675309,
                     fields=['temperature', 'density'],
-                    get_los_velocity=True)
+                    use_peculiar_velocity=True)
 
 The keyword arguments are:
 
@@ -107,8 +107,10 @@
 * ``data_filename`` (*string*): Path to output file for ray data.  
   Default: None.
 
-* ``get_los_velocity`` (*bool*): If True, the line of sight velocity is 
-  calculated for each point in the ray.  Default: True.
+* ``use_peculiar_velocity`` (*bool*): If True, the doppler redshift from
+  the peculiar velocity of gas along the ray is calculated and added to the
+  cosmological redshift as the "effective" redshift.
+  Default: True.
 
 * ``redshift`` (*float*): Used with light rays made from single datasets to 
   specify a starting redshift for the ray.  If not used, the starting 

diff -r 64675f402d50709159e5400b0a64873f2fc7400a -r 315acc8b8296a1655efbc5fa6dfc9c88fab44b62 doc/source/cookbook/fit_spectrum.py
--- a/doc/source/cookbook/fit_spectrum.py
+++ b/doc/source/cookbook/fit_spectrum.py
@@ -71,7 +71,6 @@
                   solution_filename='lightraysolution.txt',
                   data_filename='lightray.h5',
                   fields=fields, setup_function=setup_ds,
-                  get_los_velocity=True,
                   njobs=-1)
 
 # Create an AbsorptionSpectrum object extending from

diff -r 64675f402d50709159e5400b0a64873f2fc7400a -r 315acc8b8296a1655efbc5fa6dfc9c88fab44b62 doc/source/cookbook/light_ray.py
--- a/doc/source/cookbook/light_ray.py
+++ b/doc/source/cookbook/light_ray.py
@@ -20,7 +20,6 @@
                   solution_filename='LR/lightraysolution.txt',
                   data_filename='LR/lightray.h5',
                   fields=['temperature', 'density'],
-                  get_los_velocity=True,
                   njobs=-1)
 
 # Optionally, we can now overplot the part of this ray that intersects 

diff -r 64675f402d50709159e5400b0a64873f2fc7400a -r 315acc8b8296a1655efbc5fa6dfc9c88fab44b62 doc/source/cookbook/single_dataset_light_ray.py
--- a/doc/source/cookbook/single_dataset_light_ray.py
+++ b/doc/source/cookbook/single_dataset_light_ray.py
@@ -13,8 +13,7 @@
                   end_position=[1., 1., 1.],
                   solution_filename='lightraysolution.txt',
                   data_filename='lightray.h5',
-                  fields=['temperature', 'density'],
-                  get_los_velocity=True)
+                  fields=['temperature', 'density'])
 
 # Optionally, we can now overplot this ray on a projection of the source 
 # dataset

diff -r 64675f402d50709159e5400b0a64873f2fc7400a -r 315acc8b8296a1655efbc5fa6dfc9c88fab44b62 yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
@@ -139,7 +139,9 @@
            is recommended to set to None in such circumstances.
            Default: None
         use_peculiar_velocity : optional, bool
-           if True, include line of sight velocity for shifting lines.
+           if True, include peculiar velocity for calculating doppler redshift
+           to shift lines.  Requires similar flag to be set in LightRay 
+           generation.
            Default: True
         subgrid_resolution : optional, int
            When a line is being added that is unresolved (ie its thermal

diff -r 64675f402d50709159e5400b0a64873f2fc7400a -r 315acc8b8296a1655efbc5fa6dfc9c88fab44b62 yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py
@@ -50,7 +50,6 @@
 
     lr.make_light_ray(seed=1234567,
                       fields=['temperature', 'density', 'H_number_density'],
-                      get_los_velocity=True,
                       data_filename='lightray.h5')
 
     sp = AbsorptionSpectrum(900.0, 1800.0, 10000)

diff -r 64675f402d50709159e5400b0a64873f2fc7400a -r 315acc8b8296a1655efbc5fa6dfc9c88fab44b62 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -258,13 +258,13 @@
                        trajectory=None,
                        fields=None, setup_function=None,
                        solution_filename=None, data_filename=None,
-                       get_los_velocity=True, redshift=None,
-                       njobs=-1):
+                       get_los_velocity=None, use_peculiar_velocity=True, 
+                       redshift=None, njobs=-1):
         """
         make_light_ray(seed=None, start_position=None, end_position=None,
                        trajectory=None, fields=None, setup_function=None,
                        solution_filename=None, data_filename=None,
-                       get_los_velocity=True, redshift=None,
+                       use_peculiar_velocity=True, redshift=None,
                        njobs=-1)
 
         Create a light ray and get field values for each lixel.  A light
@@ -305,9 +305,10 @@
         data_filename : optional, string
             Path to output file for ray data.
             Default: None.
-        get_los_velocity : optional, bool
-            If True, the line of sight velocity is calculated for
-            each point in the ray.
+        use_peculiar_velocity : optional, bool
+            If True, the peculiar velocity along the ray will be sampled for
+            calculating the effective redshift combining the cosmological
+            redshift and the doppler redshift.
             Default: True.
         redshift : optional, float
             Used with light rays made from single datasets to specify a
@@ -335,7 +336,7 @@
         ...                       solution_filename="solution.txt",
         ...                       data_filename="my_ray.h5",
         ...                       fields=["temperature", "density"],
-        ...                       get_los_velocity=True)
+        ...                       use_peculiar_velocity=True)
 
         Make a light ray from a single dataset:
 
@@ -349,9 +350,12 @@
         ...                       solution_filename="solution.txt",
         ...                       data_filename="my_ray.h5",
         ...                       fields=["temperature", "density"],
-        ...                       get_los_velocity=True)
+        ...                       use_peculiar_velocity=True)
 
         """
+        if get_los_velocity is not None:
+            use_peculiar_velocity = get_los_velocity
+            mylog.warn("'get_los_velocity' kwarg is deprecated. Use 'use_peculiar_velocity' instead.")
 
         # Calculate solution.
         self._calculate_light_ray_solution(seed=seed,
@@ -368,9 +372,10 @@
         all_fields.extend(['dl', 'dredshift', 'redshift'])
         all_fields.extend(['x', 'y', 'z', 'dx', 'dy', 'dz'])
         data_fields.extend(['x', 'y', 'z', 'dx', 'dy', 'dz'])
-        if get_los_velocity:
-            all_fields.extend(['velocity_x', 'velocity_y',
-                               'velocity_z', 'velocity_los', 'redshift_eff'])
+        if use_peculiar_velocity:
+            all_fields.extend(['velocity_x', 'velocity_y', 'velocity_z', 
+                               'velocity_los', 'redshift_eff', 
+                               'redshift_dopp'])
             data_fields.extend(['velocity_x', 'velocity_y', 'velocity_z'])
 
         all_ray_storage = {}
@@ -444,16 +449,43 @@
                 for field in data_fields:
                     sub_data[field].extend(sub_ray[field][asort])
 
-                if get_los_velocity:
-                    line_of_sight = sub_segment[1] - sub_segment[0]
+                if use_peculiar_velocity:
+                    line_of_sight = sub_segment[0] - sub_segment[1]
                     line_of_sight /= ((line_of_sight**2).sum())**0.5
                     sub_vel = ds.arr([sub_ray['velocity_x'],
                                       sub_ray['velocity_y'],
                                       sub_ray['velocity_z']])
-                    # line of sight velocity is reversed relative to ray
-                    sub_data['velocity_los'].extend(-1*(np.rollaxis(sub_vel, 1) *
-                                                     line_of_sight).sum(axis=1)[asort])
-                    del sub_vel
+                    # Line of sight velocity = vel_los
+                    sub_vel_los = (np.rollaxis(sub_vel, 1) * \
+                                   line_of_sight).sum(axis=1)
+                    sub_data['velocity_los'].extend(sub_vel_los[asort])
+
+                    # doppler redshift:
+                    # See https://en.wikipedia.org/wiki/Redshift and 
+                    # Peebles eqns: 5.48, 5.49
+
+                    # 1 + redshift_dopp = (1 + v*cos(theta)/c) / 
+                    # sqrt(1 - v**2/c**2)
+
+                    # where v is the peculiar velocity (ie physical velocity
+                    # without the hubble flow, but no hubble flow in sim, so
+                    # just the physical velocity).
+
+                    # the bulk of the doppler redshift is from line of sight 
+                    # motion, but there is a small amount from time dilation 
+                    # of transverse motion, hence the inclusion of theta (the 
+                    # angle between line of sight and the velocity). 
+                    # theta is the angle between the ray vector (i.e. line of 
+                    # sight) and the velocity vectors: a dot b = ab cos(theta)
+
+                    sub_vel_mag = sub_ray['velocity_magnitude']
+                    cos_theta = np.dot(line_of_sight, sub_vel) / sub_vel_mag
+                    redshift_dopp = \
+                        (1 + sub_vel_mag * cos_theta / speed_of_light_cgs) / \
+                         np.sqrt(1 - sub_vel_mag**2 / speed_of_light_cgs**2) - 1
+                    sub_data['redshift_dopp'].extend(redshift_dopp[asort])
+                    del sub_vel, sub_vel_los, sub_vel_mag, cos_theta, \
+                        redshift_dopp
 
                 sub_ray.clear_data()
                 del sub_ray, asort
@@ -461,34 +493,25 @@
             for key in sub_data:
                 sub_data[key] = ds.arr(sub_data[key]).in_cgs()
 
-            # Get redshift for each lixel.  Assume linear relation between l and z.
+            # Get redshift for each lixel.  Assume linear relation between l 
+            # and z.
             sub_data['dredshift'] = (my_segment['redshift'] - next_redshift) * \
                 (sub_data['dl'] / vector_length(my_segment['start'],
                                                 my_segment['end']).in_cgs())
             sub_data['redshift'] = my_segment['redshift'] - \
               sub_data['dredshift'].cumsum() + sub_data['dredshift']
 
-            # When velocity_los is present, add effective redshift 
-            # (redshift_eff) field by combining cosmological redshift and 
+            # When using the peculiar velocity, create effective redshift 
+            # (redshift_eff) field combining cosmological redshift and 
             # doppler redshift.
             
-            # first convert los velocities to comoving frame (ie mult. by (1+z)), 
-            # then calculate doppler redshift:
-            # 1 + redshift_dopp = sqrt((1+v/c) / (1-v/c))
+            # then to add cosmological redshift and doppler redshifts, follow
+            # eqn 3.75 in Peacock's Cosmological Physics:
+            # 1 + z_eff = (1 + z_cosmo) * (1 + z_doppler)
 
-            # then to add cosmological redshift and doppler redshift, follow
-            # eqn 3.75 in Peacock's Cosmological Physics:
-            # 1 + z_obs = (1 + z_cosmo) * (1 + z_doppler)
-            # Alternatively, see eqn 5.49 in Peebles for a similar result.
-            if get_los_velocity:
-
-                velocity_los_cm = (1 + sub_data['redshift']) * \
-                                  sub_data['velocity_los']
-                redshift_dopp = ((1 + velocity_los_cm / speed_of_light_cgs) /
-                                (1 - velocity_los_cm / speed_of_light_cgs))**(0.5) - 1
-                sub_data['redshift_eff'] = ((1 + redshift_dopp) * \
-                                           (1 + sub_data['redshift'])) - 1
-                del velocity_los_cm, redshift_dopp
+            if use_peculiar_velocity:
+               sub_data['redshift_eff'] = ((1 + sub_data['redshift_dopp']) * \
+                                            (1 + sub_data['redshift'])) - 1
 
             # Remove empty lixels.
             sub_dl_nonzero = sub_data['dl'].nonzero()

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list