[yt-svn] commit/yt: 7 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Thu Aug 25 07:17:25 PDT 2016


7 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/af48d08f22e2/
Changeset:   af48d08f22e2
Branch:      yt
User:        MatthewTurk
Date:        2016-07-14 23:02:48+00:00
Summary:     Implement .plot() for profile objects.
Affected #:  2 files

diff -r 1c339ea7619997cba6842e973cf5f0e13b2f30ee -r af48d08f22e29bd832aec8d9ff5be9237e084b63 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -813,8 +813,9 @@
         ex = self._compute_extrema(field)
         return ex[1] - ex[0]
 
-    def hist(self, field, weight = None, bins = None):
+    def hist(self, fields, weight = None, bins = None, range = None):
         raise NotImplementedError
+        
 
     def mean(self, field, axis=None, weight='ones'):
         r"""Compute the mean of a field, optionally along an axis, with a

diff -r 1c339ea7619997cba6842e973cf5f0e13b2f30ee -r af48d08f22e29bd832aec8d9ff5be9237e084b63 yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -452,6 +452,14 @@
     def bounds(self):
         return ((self.x_bins[0], self.x_bins[-1]),)
 
+    def plot(self):
+        r"""
+        This returns a :class:~yt.visualization.profile_plotter.ProfilePlot
+        with the fields that have been added to this object.
+        """
+        from yt.visualization.profile_plotter import ProfilePlot
+        return ProfilePlot.from_profiles(self)
+
 class Profile1DFromDataset(ProfileNDFromDataset, Profile1D):
     """
     A 1D profile object loaded from a ytdata dataset.
@@ -569,6 +577,14 @@
         return ((self.x_bins[0], self.x_bins[-1]),
                 (self.y_bins[0], self.y_bins[-1]))
 
+    def plot(self):
+        r"""
+        This returns a :class:~yt.visualization.profile_plotter.PhasePlot with
+        the fields that have been added to this object.
+        """
+        from yt.visualization.profile_plotter import PhasePlot
+        return PhasePlot.from_profile(self)
+
 class Profile2DFromDataset(ProfileNDFromDataset, Profile2D):
     """
     A 2D profile object loaded from a ytdata dataset.


https://bitbucket.org/yt_analysis/yt/commits/a7b699a776c4/
Changeset:   a7b699a776c4
Branch:      yt
User:        MatthewTurk
Date:        2016-07-15 23:52:04+00:00
Summary:     Add a wrapped .profile function and split out YTFieldData.
Affected #:  11 files

diff -r af48d08f22e29bd832aec8d9ff5be9237e084b63 -r a7b699a776c488ed7c9f8a57e8e8498ed71c2931 yt/analysis_modules/particle_trajectories/particle_trajectories.py
--- a/yt/analysis_modules/particle_trajectories/particle_trajectories.py
+++ b/yt/analysis_modules/particle_trajectories/particle_trajectories.py
@@ -11,7 +11,7 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-from yt.data_objects.data_containers import YTFieldData
+from yt.data_objects.field_data import YTFieldData
 from yt.data_objects.time_series import DatasetSeries
 from yt.utilities.lib.particle_mesh_operations import CICSample_3
 from yt.utilities.parallel_tools.parallel_analysis_interface import \

diff -r af48d08f22e29bd832aec8d9ff5be9237e084b63 -r a7b699a776c488ed7c9f8a57e8e8498ed71c2931 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -28,7 +28,8 @@
 from yt.data_objects.data_containers import \
     YTSelectionContainer1D, \
     YTSelectionContainer2D, \
-    YTSelectionContainer3D, \
+    YTSelectionContainer3D
+from yt.data_objects.field_data import \
     YTFieldData
 from yt.funcs import \
     ensure_list, \

diff -r af48d08f22e29bd832aec8d9ff5be9237e084b63 -r a7b699a776c488ed7c9f8a57e8e8498ed71c2931 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -22,6 +22,7 @@
 
 from collections import defaultdict
 from contextlib import contextmanager
+from functools import wraps
 
 from yt.data_objects.particle_io import particle_handler_registry
 from yt.fields.derived_field import \
@@ -65,6 +66,8 @@
 from yt.geometry.selection_routines import \
     compose_selector
 from yt.extern.six import add_metaclass, string_types
+from yt.data_objects.field_data import YTFieldData
+from yt.data_objects.profiles import create_profile
 
 data_object_registry = {}
 
@@ -91,12 +94,6 @@
         return tr
     return save_state
 
-class YTFieldData(dict):
-    """
-    A Container object for field data, instead of just having it be a dict.
-    """
-    pass
-
 class RegisteredDataContainer(type):
     def __init__(cls, name, b, d):
         type.__init__(cls, name, b, d)
@@ -813,9 +810,79 @@
         ex = self._compute_extrema(field)
         return ex[1] - ex[0]
 
-    def hist(self, fields, weight = None, bins = None, range = None):
-        raise NotImplementedError
-        
+    def profile(self, bin_fields, fields, n_bins=64,
+                   extrema=None, logs=None, units=None,
+                   weight_field="cell_mass",
+                   accumulation=False, fractional=False,
+                   deposition='ngp'):
+        r"""
+        Create a 1, 2, or 3D profile object from this data_source.
+
+        The dimensionality of the profile object is chosen by the number of
+        fields given in the bin_fields argument.  This simply calls
+        :func:`yt.data_objects.profiles.create_profile`.
+
+        Parameters
+        ----------
+        bin_fields : list of strings
+            List of the binning fields for profiling.
+        fields : list of strings
+            The fields to be profiled.
+        n_bins : int or list of ints
+            The number of bins in each dimension.  If None, 64 bins for
+            each bin are used for each bin field.
+            Default: 64.
+        extrema : dict of min, max tuples
+            Minimum and maximum values of the bin_fields for the profiles.
+            The keys correspond to the field names. Defaults to the extrema
+            of the bin_fields of the dataset. If a units dict is provided, extrema
+            are understood to be in the units specified in the dictionary.
+        logs : dict of boolean values
+            Whether or not to log the bin_fields for the profiles.
+            The keys correspond to the field names. Defaults to the take_log
+            attribute of the field.
+        units : dict of strings
+            The units of the fields in the profiles, including the bin_fields.
+        weight_field : str or tuple field identifier
+            The weight field for computing weighted average for the profile
+            values.  If None, the profile values are sums of the data in
+            each bin.
+        accumulation : bool or list of bools
+            If True, the profile values for a bin n are the cumulative sum of
+            all the values from bin 0 to n.  If -True, the sum is reversed so
+            that the value for bin n is the cumulative sum from bin N (total bins)
+            to n.  If the profile is 2D or 3D, a list of values can be given to
+            control the summation in each dimension independently.
+            Default: False.
+        fractional : If True the profile values are divided by the sum of all
+            the profile data such that the profile represents a probability
+            distribution function.
+        deposition : Controls the type of deposition used for ParticlePhasePlots.
+            Valid choices are 'ngp' and 'cic'. Default is 'ngp'. This parameter is
+            ignored the if the input fields are not of particle type.
+
+
+        Examples
+        --------
+
+        Create a 1d profile.  Access bin field from profile.x and field
+        data from profile[<field_name>].
+
+        >>> ds = load("DD0046/DD0046")
+        >>> ad = ds.all_data()
+        >>> profile = ad.profile(ad, [("gas", "density")],
+        ...                          [("gas", "temperature"),
+        ...                          ("gas", "velocity_x")])
+        >>> print (profile.x)
+        >>> print (profile["gas", "temperature"])
+        >>> plot = profile.plot()
+        """
+        p = create_profile(self, bin_fields, fields, n_bins=64,
+                   extrema=None, logs=None, units=None,
+                   weight_field="cell_mass",
+                   accumulation=False, fractional=False,
+                   deposition='ngp')
+        return p
 
     def mean(self, field, axis=None, weight='ones'):
         r"""Compute the mean of a field, optionally along an axis, with a

diff -r af48d08f22e29bd832aec8d9ff5be9237e084b63 -r a7b699a776c488ed7c9f8a57e8e8498ed71c2931 yt/data_objects/field_data.py
--- /dev/null
+++ b/yt/data_objects/field_data.py
@@ -0,0 +1,20 @@
+"""
+The YTFieldData object.
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2016, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+class YTFieldData(dict):
+    """
+    A Container object for field data, instead of just having it be a dict.
+    """
+    pass

diff -r af48d08f22e29bd832aec8d9ff5be9237e084b63 -r a7b699a776c488ed7c9f8a57e8e8498ed71c2931 yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -17,8 +17,9 @@
 import numpy as np
 
 from yt.data_objects.data_containers import \
-    YTFieldData, \
     YTSelectionContainer
+from yt.data_objects.field_data import \
+    YTFieldData
 from yt.geometry.selection_routines import convert_mask_to_indices
 import yt.geometry.particle_deposit as particle_deposit
 from yt.utilities.exceptions import \

diff -r af48d08f22e29bd832aec8d9ff5be9237e084b63 -r a7b699a776c488ed7c9f8a57e8e8498ed71c2931 yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -17,8 +17,9 @@
 import numpy as np
 
 from yt.data_objects.data_containers import \
-    YTFieldData, \
     YTSelectionContainer
+from yt.data_objects.field_data import \
+    YTFieldData
 import yt.geometry.particle_deposit as particle_deposit
 import yt.geometry.particle_smooth as particle_smooth
 

diff -r af48d08f22e29bd832aec8d9ff5be9237e084b63 -r a7b699a776c488ed7c9f8a57e8e8498ed71c2931 yt/data_objects/particle_io.py
--- a/yt/data_objects/particle_io.py
+++ b/yt/data_objects/particle_io.py
@@ -21,11 +21,12 @@
     ensure_list, \
     mylog
 from yt.extern.six import add_metaclass
+from yt.data_objects.field_data import \
+    YTFieldData
 
 particle_handler_registry = defaultdict()
 
 def particle_converter(func):
-    from .data_containers import YTFieldData
     def save_state(grid):
         old_params = grid.field_parameters
         old_keys = grid.field_data.keys()

diff -r af48d08f22e29bd832aec8d9ff5be9237e084b63 -r a7b699a776c488ed7c9f8a57e8e8498ed71c2931 yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -26,7 +26,7 @@
     array_like_field, \
     YTQuantity
 from yt.units.unit_object import Unit
-from yt.data_objects.data_containers import YTFieldData
+from yt.data_objects.field_data import YTFieldData
 from yt.utilities.lib.misc_utilities import \
     new_bin_profile1d, \
     new_bin_profile2d, \
@@ -938,7 +938,7 @@
     data from profile[<field_name>].
 
     >>> ds = load("DD0046/DD0046")
-    >>> ad = ds.h.all_data()
+    >>> ad = ds.all_data()
     >>> profile = create_profile(ad, [("gas", "density")],
     ...                              [("gas", "temperature"),
     ...                               ("gas", "velocity_x")])

diff -r af48d08f22e29bd832aec8d9ff5be9237e084b63 -r a7b699a776c488ed7c9f8a57e8e8498ed71c2931 yt/data_objects/unstructured_mesh.py
--- a/yt/data_objects/unstructured_mesh.py
+++ b/yt/data_objects/unstructured_mesh.py
@@ -22,8 +22,9 @@
     fill_fcoords, fill_fwidths
 
 from yt.data_objects.data_containers import \
-    YTFieldData, \
     YTSelectionContainer
+from yt.data_objects.field_data import \
+    YTFieldData
 import yt.geometry.particle_deposit as particle_deposit
 
 class UnstructuredMesh(YTSelectionContainer):

diff -r af48d08f22e29bd832aec8d9ff5be9237e084b63 -r a7b699a776c488ed7c9f8a57e8e8498ed71c2931 yt/frontends/artio/data_structures.py
--- a/yt/frontends/artio/data_structures.py
+++ b/yt/frontends/artio/data_structures.py
@@ -39,7 +39,7 @@
     Dataset
 from yt.data_objects.octree_subset import \
     OctreeSubset
-from yt.data_objects.data_containers import \
+from yt.data_objects.field_data import \
     YTFieldData
 from yt.utilities.exceptions import \
     YTParticleDepositionNotImplemented

diff -r af48d08f22e29bd832aec8d9ff5be9237e084b63 -r a7b699a776c488ed7c9f8a57e8e8498ed71c2931 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -26,7 +26,7 @@
     iterable, \
     ensure_list
 from yt.utilities.io_handler import io_registry
-from yt.data_objects.data_containers import \
+from yt.data_objects.field_data import \
     YTFieldData
 from yt.data_objects.particle_unions import \
     ParticleUnion


https://bitbucket.org/yt_analysis/yt/commits/9a2c5f76d099/
Changeset:   9a2c5f76d099
Branch:      yt
User:        MatthewTurk
Date:        2016-08-10 23:48:07+00:00
Summary:     Merging with upstream
Affected #:  182 files

diff -r a7b699a776c488ed7c9f8a57e8e8498ed71c2931 -r 9a2c5f76d099c12560161637bca3ccd3db1087fc .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -37,9 +37,11 @@
 yt/utilities/lib/fortran_reader.c
 yt/utilities/lib/freetype_writer.c
 yt/utilities/lib/geometry_utils.c
+yt/utilities/lib/image_samplers.c
 yt/utilities/lib/image_utilities.c
 yt/utilities/lib/interpolators.c
 yt/utilities/lib/kdtree.c
+yt/utilities/lib/lenses.c
 yt/utilities/lib/line_integral_convolution.c
 yt/utilities/lib/mesh_construction.cpp
 yt/utilities/lib/mesh_intersection.cpp
@@ -49,6 +51,7 @@
 yt/utilities/lib/mesh_utilities.c
 yt/utilities/lib/misc_utilities.c
 yt/utilities/lib/particle_mesh_operations.c
+yt/utilities/lib/partitioned_grid.c
 yt/utilities/lib/primitives.c
 yt/utilities/lib/origami.c
 yt/utilities/lib/particle_mesh_operations.c
@@ -62,6 +65,11 @@
 yt/utilities/lib/marching_cubes.c
 yt/utilities/lib/png_writer.h
 yt/utilities/lib/write_array.c
+yt/utilities/lib/perftools_wrap.c
+yt/utilities/lib/partitioned_grid.c
+yt/utilities/lib/volume_container.c
+yt/utilities/lib/lenses.c
+yt/utilities/lib/image_samplers.c
 syntax: glob
 *.pyc
 *.pyd

diff -r a7b699a776c488ed7c9f8a57e8e8498ed71c2931 -r 9a2c5f76d099c12560161637bca3ccd3db1087fc .hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -5160,4 +5160,38 @@
 954d1ffcbf04c3d1b394c2ea05324d903a9a07cf yt-3.0a2
 f4853999c2b5b852006d6628719c882cddf966df yt-3.0a3
 079e456c38a87676472a458210077e2be325dc85 last_gplv3
+ca6e536c15a60070e6988fd472dc771a1897e170 yt-2.0
+882c41eed5dd4a3cdcbb567bcb79b833e46b1f42 yt-2.0.1
+a2b3521b1590c25029ca0bc602ad6cb7ae7b8ba2 yt-2.1
+41bd8aacfbc81fa66d7a3f2cd2880f10c3e237a4 yt-2.2
+3836676ee6307f9caf5ccdb0f0dd373676a68535 yt-2.3
+076cec2c57d2e4b508babbfd661f5daa1e34ec80 yt-2.4
+bd285a9a8a643ebb7b47b543e9343da84cd294c5 yt-2.5
+34a5e6774ceb26896c9d767563951d185a720774 yt-2.5.1
+2197c101413723de13e1d0dea153b182342ff719 yt-2.5.2
+59aa6445b5f4a26ecb2449f913c7f2b5fee04bee yt-2.5.3
+4da03e5f00b68c3a52107ff75ce48b09360b30c2 yt-2.5.4
+21c0314cee16242b6685e42a74d16f7a993c9a88 yt-2.5.5
+053487f48672b8fd5c43af992e92bc2f2499f31f yt-2.6
+d43ff9d8e20f2d2b8f31f4189141d2521deb341b yt-2.6.1
+f1e22ef9f3a225f818c43262e6ce9644e05ffa21 yt-2.6.2
+816186f16396a16853810ac9ebcde5057d8d5b1a yt-2.6.3
 f327552a6ede406b82711fb800ebcd5fe692d1cb yt-3.0a4
+73a9f749157260c8949f05c07715305aafa06408 yt-3.0.0
+0cf350f11a551f5a5b4039a70e9ff6d98342d1da yt-3.0.1
+511887af4c995a78fe606e58ce8162c88380ecdc yt-3.0.2
+fd7cdc4836188a3badf81adb477bcc1b9632e485 yt-3.1.0
+28733726b2a751e774c8b7ae46121aa57fd1060f yt-3.2
+425ff6dc64a8eb92354d7e6091653a397c068167 yt-3.2.1
+425ff6dc64a8eb92354d7e6091653a397c068167 yt-3.2.1
+0000000000000000000000000000000000000000 yt-3.2.1
+0000000000000000000000000000000000000000 yt-3.2.1
+f7ca21c7b3fdf25d2ccab139849ae457597cfd5c yt-3.2.1
+a7896583c06585be66de8404d76ad5bc3d2caa9a yt-3.2.2
+80aff0c49f40e04f00d7b39149c7fc297b8ed311 yt-3.2.3
+80aff0c49f40e04f00d7b39149c7fc297b8ed311 yt-3.2.3
+0000000000000000000000000000000000000000 yt-3.2.3
+0000000000000000000000000000000000000000 yt-3.2.3
+83d2c1e9313e7d83eb5b96888451ff2646fd8ff3 yt-3.2.3
+7edbfde96c3d55b227194394f46c0b2e6ed2b961 yt-3.3.0
+9bc3d0e9b750c923d44d73c447df64fc431f5838 yt-3.3.1

diff -r a7b699a776c488ed7c9f8a57e8e8498ed71c2931 -r 9a2c5f76d099c12560161637bca3ccd3db1087fc CONTRIBUTING.rst
--- a/CONTRIBUTING.rst
+++ b/CONTRIBUTING.rst
@@ -301,7 +301,15 @@
 This downloads that new forked repository to your local machine, so that you
 can access it, read it, make modifications, etc.  It will put the repository in
 a local directory of the same name as the repository in the current working
-directory.  You can see any past state of the code by using the hg log command.
+directory. You should also run the following command, to make sure you are at
+the "yt" branch, and not other ones like "stable" (this will be important
+later when you want to submit your pull requests):
+
+.. code-block:: bash
+
+   $ hg update yt
+
+You can see any past state of the code by using the hg log command.
 For example, the following command would show you the last 5 changesets
 (modifications to the code) that were submitted to that repository.
 

diff -r a7b699a776c488ed7c9f8a57e8e8498ed71c2931 -r 9a2c5f76d099c12560161637bca3ccd3db1087fc CREDITS
--- a/CREDITS
+++ b/CREDITS
@@ -21,6 +21,7 @@
                 Daniel Fenn (df11c at my.fsu.edu)
                 John Forces (jforbes at ucolick.org)
                 Adam Ginsburg (keflavich at gmail.com)
+                Austin Gilbert (augilbert4 at gmail.com)
                 Sam Geen (samgeen at gmail.com)
                 Nathan Goldbaum (goldbaum at ucolick.org)
                 William Gray (graywilliamj at gmail.com)

diff -r a7b699a776c488ed7c9f8a57e8e8498ed71c2931 -r 9a2c5f76d099c12560161637bca3ccd3db1087fc MANIFEST.in
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,4 +1,4 @@
-include README* CREDITS COPYING.txt CITATION requirements.txt optional-requirements.txt setupext.py CONTRIBUTING.rst
+include README* CREDITS COPYING.txt CITATION  setupext.py CONTRIBUTING.rst
 include yt/visualization/mapserver/html/map_index.html
 include yt/visualization/mapserver/html/leaflet/*.css
 include yt/visualization/mapserver/html/leaflet/*.js

diff -r a7b699a776c488ed7c9f8a57e8e8498ed71c2931 -r 9a2c5f76d099c12560161637bca3ccd3db1087fc doc/extensions/config_help.py
--- /dev/null
+++ b/doc/extensions/config_help.py
@@ -0,0 +1,34 @@
+import re
+import subprocess
+from docutils import statemachine
+from sphinx.util.compat import Directive
+
+def setup(app):
+    app.add_directive('config_help', GetConfigHelp)
+    setup.app = app
+    setup.config = app.config
+    setup.confdir = app.confdir
+
+    retdict = dict(
+        version='1.0',
+        parallel_read_safe=True,
+        parallel_write_safe=True
+    )
+
+    return retdict
+
+class GetConfigHelp(Directive):
+    required_arguments = 1
+    optional_arguments = 0
+    final_argument_whitespace = True
+
+    def run(self):
+        rst_file = self.state_machine.document.attributes['source']
+        data = subprocess.check_output(
+            self.arguments[0].split(" ") + ['-h']).decode('utf8').split('\n')
+        ind = next((i for i, val in enumerate(data)
+                    if re.match('\s{0,3}\{.*\}\s*$', val)))
+        lines = ['.. code-block:: none', ''] + data[ind + 1:]
+        self.state_machine.insert_input(
+            statemachine.string2lines("\n".join(lines)), rst_file)
+        return []

diff -r a7b699a776c488ed7c9f8a57e8e8498ed71c2931 -r 9a2c5f76d099c12560161637bca3ccd3db1087fc doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -49,8 +49,8 @@
                     # in Python 3 (except Mercurial, which requires Python 2).
 INST_HG=1           # Install Mercurial or not?  If hg is not already
                     # installed, yt cannot be installed from source.
-INST_UNSTRUCTURED=0 # Install dependencies needed for unstructured mesh 
-                    # rendering?
+INST_EMBREE=0       # Install dependencies needed for Embree-accelerated 
+                    # ray tracing
 
 # These options control whether low-level system libraries are installed
 # they are necessary for building yt's dependencies from source and are 
@@ -75,6 +75,7 @@
 INST_H5PY=1     # Install h5py?
 INST_ASTROPY=0  # Install astropy?
 INST_NOSE=1     # Install nose?
+INST_NETCDF4=0  # Install netcdf4 and its python bindings?
 
 # These options allow you to customize the builds of yt dependencies.
 # They are only used if INST_CONDA=0.
@@ -115,7 +116,10 @@
         echo
         echo "    $ source deactivate"
         echo
-        echo "or install yt into your current environment"
+        echo "or install yt into your current environment with:"
+        echo
+        echo "    $ conda install -c conda-forge yt"
+        echo
         exit 1
     fi
     DEST_SUFFIX="yt-conda"
@@ -484,21 +488,19 @@
     ( $* 2>&1 ) 1>> ${LOG_FILE} || do_exit
 }
 
-# set paths needed for unstructured mesh rendering support
+# set paths needed for Embree
 
-if [ $INST_UNSTRUCTURED -ne 0 ]
+if [ $INST_EMBREE -ne 0 ]
 then
     if [ $INST_YT_SOURCE -eq 0 ]
     then
-        echo "yt must be compiled from source to install support for"
-        echo "unstructured mesh rendering. Please set INST_YT_SOURCE to 1"
-        echo "and re-run the install script."
+        echo "yt must be compiled from source to install Embree support."
+        echo "Please set INST_YT_SOURCE to 1 and re-run the install script."
         exit 1
     fi
     if [ $INST_CONDA -eq 0 ]
     then
-        echo "unstructured mesh rendering support has not yet been implemented"
-        echo "for INST_CONDA=0."
+        echo "Embree support has not yet been implemented for INST_CONDA=0."
         exit 1
     fi
     if [ `uname` = "Darwin" ]
@@ -510,8 +512,8 @@
         EMBREE="embree-2.8.0.x86_64.linux"
         EMBREE_URL="https://github.com/embree/embree/releases/download/v2.8.0/$EMBREE.tar.gz"
     else
-        echo "Unstructured mesh rendering is not supported on this platform."
-        echo "Set INST_UNSTRUCTURED=0 and re-run the install script."
+        echo "Embree is not supported on this platform."
+        echo "Set INST_EMBREE=0 and re-run the install script."
         exit 1
     fi
     PYEMBREE_URL="https://github.com/scopatz/pyembree/archive/master.zip"
@@ -528,6 +530,17 @@
     fi
 fi
 
+if [ $INST_NETCDF4 -ne 0 ]
+then
+    if [ $INST_CONDA -eq 0 ]
+    then
+        echo "This script can only install netcdf4 through conda."
+        echo "Please set INST_CONDA to 1"
+        echo "and re-run the install script"
+        exit 1
+    fi
+fi
+
 echo
 echo
 echo "========================================================================"
@@ -557,9 +570,9 @@
 get_willwont ${INST_HG}
 echo "be installing Mercurial"
 
-printf "%-18s = %s so I " "INST_UNSTRUCTURED" "${INST_UNSTRUCTURED}"
-get_willwont ${INST_UNSTRUCTURED}
-echo "be installing unstructured mesh rendering"
+printf "%-18s = %s so I " "INST_EMBREE" "${INST_EMBREE}"
+get_willwont ${INST_EMBREE}
+echo "be installing Embree"
 
 if [ $INST_CONDA -eq 0 ]
 then
@@ -1411,7 +1424,7 @@
     fi
     YT_DEPS+=('sympy')
 
-    if [ $INST_UNSTRUCTURED -eq 1 ]
+    if [ $INST_NETCDF4 -eq 1 ]
     then
         YT_DEPS+=('netcdf4')   
     fi
@@ -1425,14 +1438,21 @@
         log_cmd conda install --yes ${YT_DEP}
     done
 
+    if [ $INST_PY3 -eq 1 ]
+    then
+        echo "Installing mercurial"
+        log_cmd conda create -y -n py27 python=2.7 mercurial
+        log_cmd ln -s ${DEST_DIR}/envs/py27/bin/hg ${DEST_DIR}/bin
+    fi
+
     log_cmd pip install python-hglib
 
     log_cmd hg clone https://bitbucket.org/yt_analysis/yt_conda ${DEST_DIR}/src/yt_conda
     
-    if [ $INST_UNSTRUCTURED -eq 1 ]
+    if [ $INST_EMBREE -eq 1 ]
     then
         
-        echo "Installing embree"
+        echo "Installing Embree"
         if [ ! -d ${DEST_DIR}/src ]
         then
             mkdir ${DEST_DIR}/src
@@ -1479,22 +1499,15 @@
         fi
     fi
 
-    if [ $INST_PY3 -eq 1 ]
-    then
-        echo "Installing mercurial"
-        log_cmd conda create -y -n py27 python=2.7 mercurial
-        log_cmd ln -s ${DEST_DIR}/envs/py27/bin/hg ${DEST_DIR}/bin
-    fi
-
     if [ $INST_YT_SOURCE -eq 0 ]
     then
         echo "Installing yt"
-        log_cmd conda install --yes yt
+        log_cmd conda install -c conda-forge --yes yt
     else
         echo "Building yt from source"
         YT_DIR="${DEST_DIR}/src/yt-hg"
         log_cmd hg clone -r ${BRANCH} https://bitbucket.org/yt_analysis/yt ${YT_DIR}
-        if [ $INST_UNSTRUCTURED -eq 1 ]
+        if [ $INST_EMBREE -eq 1 ]
         then
             echo $DEST_DIR > ${YT_DIR}/embree.cfg
         fi

diff -r a7b699a776c488ed7c9f8a57e8e8498ed71c2931 -r 9a2c5f76d099c12560161637bca3ccd3db1087fc doc/source/_static/apiKey01.jpg
Binary file doc/source/_static/apiKey01.jpg has changed

diff -r a7b699a776c488ed7c9f8a57e8e8498ed71c2931 -r 9a2c5f76d099c12560161637bca3ccd3db1087fc doc/source/_static/apiKey02.jpg
Binary file doc/source/_static/apiKey02.jpg has changed

diff -r a7b699a776c488ed7c9f8a57e8e8498ed71c2931 -r 9a2c5f76d099c12560161637bca3ccd3db1087fc doc/source/_static/apiKey03.jpg
Binary file doc/source/_static/apiKey03.jpg has changed

diff -r a7b699a776c488ed7c9f8a57e8e8498ed71c2931 -r 9a2c5f76d099c12560161637bca3ccd3db1087fc doc/source/_static/apiKey04.jpg
Binary file doc/source/_static/apiKey04.jpg has changed

diff -r a7b699a776c488ed7c9f8a57e8e8498ed71c2931 -r 9a2c5f76d099c12560161637bca3ccd3db1087fc doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst
--- a/doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst
+++ b/doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst
@@ -4,10 +4,9 @@
 =======================
 .. sectionauthor:: Geoffrey So <gso at physics.ucsd.edu>
 
-.. warning:: This is my first attempt at modifying the yt source code,
-   so the program may be bug ridden.  Please send yt-dev an email and
-   address to Geoffrey So if you discover something wrong with this
-   portion of the code.
+.. warning:: This functionality is currently broken and needs to
+   be updated to make use of the :ref:`halo_catalog` framework.
+   Anyone interested in doing so should contact the yt-dev list.
 
 Purpose
 -------

diff -r a7b699a776c488ed7c9f8a57e8e8498ed71c2931 -r 9a2c5f76d099c12560161637bca3ccd3db1087fc doc/source/analyzing/analysis_modules/halo_analysis.rst
--- a/doc/source/analyzing/analysis_modules/halo_analysis.rst
+++ b/doc/source/analyzing/analysis_modules/halo_analysis.rst
@@ -3,14 +3,16 @@
 Halo Analysis
 =============
 
-Using halo catalogs, understanding the different halo finding methods,
-and using the halo mass function.
+This section covers halo finding, performing extra analysis on halos,
+and the halo mass function calculator.  If you already have halo
+catalogs and simply want to load them into yt, see
+:ref:`halo-catalog-data`.
 
 .. toctree::
    :maxdepth: 2
 
+   halo_catalogs
+   halo_mass_function
    halo_transition
-   halo_catalogs
-   halo_finders
-   halo_mass_function
    halo_merger_tree
+   ellipsoid_analysis

diff -r a7b699a776c488ed7c9f8a57e8e8498ed71c2931 -r 9a2c5f76d099c12560161637bca3ccd3db1087fc doc/source/analyzing/analysis_modules/halo_catalogs.rst
--- a/doc/source/analyzing/analysis_modules/halo_catalogs.rst
+++ b/doc/source/analyzing/analysis_modules/halo_catalogs.rst
@@ -1,28 +1,42 @@
 .. _halo_catalog:
 
-Halo Catalogs
-=============
+Halo Finding and Analysis
+=========================
 
-Creating Halo Catalogs
-----------------------
+In yt-3.x, halo finding and analysis are combined into a single
+framework called the
+:class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog`.
+This framework is substantially different from the halo analysis
+machinery available in yt-2.x and is entirely backward incompatible.
+For a direct translation of various halo analysis tasks using yt-2.x
+to yt-3.x, see :ref:`halo-transition`.
 
-In yt 3.0, operations relating to the analysis of halos (halo finding,
-merger tree creation, and individual halo analysis) are all brought
-together into a single framework. This framework is substantially
-different from the halo analysis machinery available in yt-2.x and is
-entirely backward incompatible.
-For a direct translation of various halo analysis tasks using yt-2.x
-to yt-3.0 please see :ref:`halo-transition`.
+.. _halo_catalog_finding:
 
-A catalog of halos can be created from any initial dataset given to halo
-catalog through data_ds. These halos can be found using friends-of-friends,
-HOP, and Rockstar. The finder_method keyword dictates which halo finder to
-use. The available arguments are :ref:`fof`, :ref:`hop`, and :ref:`rockstar`.
-For more details on the relative differences between these halo finders see
-:ref:`halo_finding`.
+Halo Finding
+------------
 
-The class which holds all of the halo information is the
-:class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog`.
+If you already have a halo catalog, either produced by one of the methods
+below or in a format described in :ref:`halo-catalog-data`, and want to
+perform further analysis, skip to :ref:`halo_catalog_analysis`.
+
+Three halo finding methods exist within yt.  These are:
+
+* :ref:`fof_finding`: a basic friend-of-friends algorithm (e.g. `Efstathiou et al. (1985)
+  <http://adsabs.harvard.edu/abs/1985ApJS...57..241E>`_)
+* :ref:`hop_finding`: `Eisenstein and Hut (1998)
+  <http://adsabs.harvard.edu/abs/1998ApJ...498..137E>`_.
+* :ref:`rockstar_finding`: a 6D phase-space halo finder developed by Peter Behroozi that
+  scales well and does substructure finding (`Behroozi et al.
+  2011 <http://adsabs.harvard.edu/abs/2011arXiv1110.4372B>`_)
+
+Halo finding is performed through the creation of a
+:class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog`
+object.  The dataset on which halo finding is to be performed should
+be loaded and given to the
+:class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog`
+along with the ``finder_method`` keyword to specify the method to be
+used.
 
 .. code-block:: python
 
@@ -31,28 +45,195 @@
 
    data_ds = yt.load('Enzo_64/RD0006/RedshiftOutput0006')
    hc = HaloCatalog(data_ds=data_ds, finder_method='hop')
+   hc.create()
 
-A halo catalog may also be created from already run rockstar outputs.
-This method is not implemented for previously run friends-of-friends or
-HOP finders. Even though rockstar creates one file per processor,
-specifying any one file allows the full catalog to be loaded. Here we
-only specify the file output by the processor with ID 0. Note that the
-argument for supplying a rockstar output is `halos_ds`, not `data_ds`.
+The ``finder_method`` options should be given as "fof", "hop", or
+"rockstar".  Each of these methods has their own set of keyword
+arguments to control functionality.  These can specified in the form
+of a dictinoary using the ``finder_kwargs`` keyword.
 
 .. code-block:: python
 
-   halos_ds = yt.load(path+'rockstar_halos/halos_0.0.bin')
-   hc = HaloCatalog(halos_ds=halos_ds)
+   import yt
+   from yt.analysis_modules.halo_analysis.api import HaloCatalog
 
-Although supplying only the binary output of the rockstar halo finder
-is sufficient for creating a halo catalog, it is not possible to find
-any new information about the identified halos. To associate the halos
-with the dataset from which they were found, supply arguments to both
-halos_ds and data_ds.
+   data_ds = yt.load('Enzo_64/RD0006/RedshiftOutput0006')
+   hc = HaloCatalog(data_ds=data_ds, finder_method='fof',
+                    finder_kwargs={"ptype": "stars",
+                                   "padding": 0.02})
+   hc.create()
+
+For a full list of keywords for each halo finder, see
+:class:`~yt.analysis_modules.halo_finding.halo_objects.FOFHaloFinder`,
+:class:`~yt.analysis_modules.halo_finding.halo_objects.HOPHaloFinder`,
+and
+:class:`~yt.analysis_modules.halo_finding.rockstar.rockstar.RockstarHaloFinder`.
+
+.. _fof_finding:
+
+FOF
+^^^
+
+This is a basic friends-of-friends algorithm.  See
+`Efstathiou et al. (1985)
+<http://adsabs.harvard.edu/abs/1985ApJS...57..241E>`_ for more
+details as well as
+:class:`~yt.analysis_modules.halo_finding.halo_objects.FOFHaloFinder`.
+
+.. _hop_finding:
+
+HOP
+^^^
+
+The version of HOP used in yt is an upgraded version of the
+`publicly available HOP code
+<http://cmb.as.arizona.edu/~eisenste/hop/hop.html>`_. Support
+for 64-bit floats and integers has been added, as well as
+parallel analysis through spatial decomposition. HOP builds
+groups in this fashion:
+
+#. Estimates the local density at each particle using a
+   smoothing kernel.
+
+#. Builds chains of linked particles by 'hopping' from one
+   particle to its densest neighbor. A particle which is
+   its own densest neighbor is the end of the chain.
+
+#. All chains that share the same densest particle are
+   grouped together.
+
+#. Groups are included, linked together, or discarded
+   depending on the user-supplied over density
+   threshold parameter. The default is 160.0.
+
+See the `HOP method paper
+<http://adsabs.harvard.edu/abs/1998ApJ...498..137E>`_ for
+full details as well as
+:class:`~yt.analysis_modules.halo_finding.halo_objects.HOPHaloFinder`.
+
+.. _rockstar_finding:
+
+Rockstar
+^^^^^^^^
+
+Rockstar uses an adaptive hierarchical refinement of friends-of-friends
+groups in six phase-space dimensions and one time dimension, which
+allows for robust (grid-independent, shape-independent, and noise-
+resilient) tracking of substructure. The code is prepackaged with yt,
+but also `separately available <https://bitbucket.org/gfcstanford/rockstar>`_. The lead
+developer is Peter Behroozi, and the methods are described in
+`Behroozi et al. 2011 <http://adsabs.harvard.edu/abs/2011arXiv1110.4372B>`_.
+In order to run the Rockstar halo finder in yt, make sure you've
+:ref:`installed it so that it can integrate with yt <rockstar-installation>`.
+
+At the moment, Rockstar does not support multiple particle masses,
+instead using a fixed particle mass. This will not affect most dark matter
+simulations, but does make it less useful for finding halos from the stellar
+mass. In simulations where the highest-resolution particles all have the
+same mass (ie: zoom-in grid based simulations), one can set up a particle
+filter to select the lowest mass particles and perform the halo finding
+only on those.  See the this cookbook recipe for an example:
+:ref:`cookbook-rockstar-nested-grid`.
+
+To run the Rockstar Halo finding, you must launch python with MPI and
+parallelization enabled. While Rockstar itself does not require MPI to run,
+the MPI libraries allow yt to distribute particle information across multiple
+nodes.
+
+.. warning:: At the moment, running Rockstar inside of yt on multiple compute nodes
+   connected by an Infiniband network can be problematic. Therefore, for now
+   we recommend forcing the use of the non-Infiniband network (e.g. Ethernet)
+   using this flag: ``--mca btl ^openib``.
+   For example, here is how Rockstar might be called using 24 cores:
+   ``mpirun -n 24 --mca btl ^openib python ./run_rockstar.py --parallel``.
+
+The script above configures the Halo finder, launches a server process which
+disseminates run information and coordinates writer-reader processes.
+Afterwards, it launches reader and writer tasks, filling the available MPI
+slots, which alternately read particle information and analyze for halo
+content.
+
+The RockstarHaloFinder class has these options that can be supplied to the
+halo catalog through the ``finder_kwargs`` argument:
+
+* ``dm_type``, the index of the dark matter particle. Default is 1.
+* ``outbase``, This is where the out*list files that Rockstar makes should be
+  placed. Default is 'rockstar_halos'.
+* ``num_readers``, the number of reader tasks (which are idle most of the
+  time.) Default is 1.
+* ``num_writers``, the number of writer tasks (which are fed particles and
+  do most of the analysis). Default is MPI_TASKS-num_readers-1.
+  If left undefined, the above options are automatically
+  configured from the number of available MPI tasks.
+* ``force_res``, the resolution that Rockstar uses for various calculations
+  and smoothing lengths. This is in units of Mpc/h.
+  If no value is provided, this parameter is automatically set to
+  the width of the smallest grid element in the simulation from the
+  last data snapshot (i.e. the one where time has evolved the
+  longest) in the time series:
+  ``ds_last.index.get_smallest_dx() * ds_last['Mpch']``.
+* ``total_particles``, if supplied, this is a pre-calculated
+  total number of dark matter
+  particles present in the simulation. For example, this is useful
+  when analyzing a series of snapshots where the number of dark
+  matter particles should not change and this will save some disk
+  access time. If left unspecified, it will
+  be calculated automatically. Default: ``None``.
+* ``dm_only``, if set to ``True``, it will be assumed that there are
+  only dark matter particles present in the simulation.
+  This option does not modify the halos found by Rockstar, however
+  this option can save disk access time if there are no star particles
+  (or other non-dark matter particles) in the simulation. Default: ``False``.
+
+Rockstar dumps halo information in a series of text (halo*list and
+out*list) and binary (halo*bin) files inside the ``outbase`` directory.
+We use the halo list classes to recover the information.
+
+Inside the ``outbase`` directory there is a text file named ``datasets.txt``
+that records the connection between ds names and the Rockstar file names.
+
+.. _rockstar-installation:
+
+Installing Rockstar
+"""""""""""""""""""
+
+Because of changes in the Rockstar API over time, yt only currently works with
+a slightly older version of Rockstar.  This version of Rockstar has been
+slightly patched and modified to run as a library inside of yt. By default it
+is not installed with yt, but installation is very easy.  The
+:ref:`install-script` used to install yt from source has a line:
+``INST_ROCKSTAR=0`` that must be changed to ``INST_ROCKSTAR=1``.  You can
+rerun this installer script over the top of an existing installation, and
+it will only install components missing from the existing installation.
+You can do this as follows.  Put your freshly modified install_script in
+the parent directory of the yt installation directory (e.g. the parent of
+``$YT_DEST``, ``yt-x86_64``, ``yt-i386``, etc.), and rerun the installer:
+
+.. code-block:: bash
+
+    cd $YT_DEST
+    cd ..
+    vi install_script.sh  // or your favorite editor to change INST_ROCKSTAR=1
+    bash < install_script.sh
+
+This will download Rockstar and install it as a library in yt.
+
+.. _halo_catalog_analysis:
+
+Extra Halo Analysis
+-------------------
+
+As a reminder, all halo catalogs created by the methods outlined in
+:ref:`halo_catalog_finding` as well as those in the formats discussed in
+:ref:`halo-catalog-data` can be loaded in to yt as first-class datasets.
+Once a halo catalog has been created, further analysis can be performed
+by providing both the halo catalog and the original simulation dataset to
+the
+:class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog`.
 
 .. code-block:: python
 
-   halos_ds = yt.load(path+'rockstar_halos/halos_0.0.bin')
+   halos_ds = yt.load('rockstar_halos/halos_0.0.bin')
    data_ds = yt.load('Enzo_64/RD0006/RedshiftOutput0006')
    hc = HaloCatalog(data_ds=data_ds, halos_ds=halos_ds)
 
@@ -60,24 +241,28 @@
 associated with either dataset, to control the spatial region in
 which halo analysis will be performed.
 
-Analysis Using Halo Catalogs
-----------------------------
-
-Analysis is done by adding actions to the
+The :class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog`
+allows the user to create a pipeline of analysis actions that will be
+performed on all halos in the existing catalog.  The analysis can be
+performed in parallel with separate processors or groups of processors
+being allocated to perform the entire pipeline on individual halos.
+The pipeline is setup by adding actions to the
 :class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog`.
 Each action is represented by a callback function that will be run on
 each halo.  There are four types of actions:
 
-* Filters
-* Quantities
-* Callbacks
-* Recipes
+* :ref:`halo_catalog_filters`
+* :ref:`halo_catalog_quantities`
+* :ref:`halo_catalog_callbacks`
+* :ref:`halo_catalog_recipes`
 
 A list of all available filters, quantities, and callbacks can be found in
 :ref:`halo_analysis_ref`.
 All interaction with this analysis can be performed by importing from
 halo_analysis.
 
+.. _halo_catalog_filters:
+
 Filters
 ^^^^^^^
 
@@ -118,6 +303,8 @@
    # ... Later on in your script
    hc.add_filter("my_filter")
 
+.. _halo_catalog_quantities:
+
 Quantities
 ^^^^^^^^^^
 
@@ -176,6 +363,8 @@
    # ... Anywhere after "my_quantity" has been called
    hc.add_callback("print_quantity")
 
+.. _halo_catalog_callbacks:
+
 Callbacks
 ^^^^^^^^^
 
@@ -214,6 +403,8 @@
    # ...  Later on in your script
    hc.add_callback("my_callback")
 
+.. _halo_catalog_recipes:
+
 Recipes
 ^^^^^^^
 
@@ -258,8 +449,8 @@
 object as the first argument, recipe functions should take a ``HaloCatalog``
 object as the first argument.
 
-Running Analysis
-----------------
+Running the Pipeline
+--------------------
 
 After all callbacks, quantities, and filters have been added, the
 analysis begins with a call to HaloCatalog.create.
@@ -290,7 +481,7 @@
 
 A :class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog`
 saved to disk can be reloaded as a yt dataset with the
-standard call to load. Any side data, such as profiles, can be reloaded
+standard call to ``yt.load``. Any side data, such as profiles, can be reloaded
 with a ``load_profiles`` callback and a call to
 :func:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog.load`.
 
@@ -303,8 +494,8 @@
                    filename="virial_profiles")
    hc.load()
 
-Worked Example of Halo Catalog in Action
-----------------------------------------
+Halo Catalog in Action
+----------------------
 
 For a full example of how to use these methods together see
 :ref:`halo-analysis-example`.

diff -r a7b699a776c488ed7c9f8a57e8e8498ed71c2931 -r 9a2c5f76d099c12560161637bca3ccd3db1087fc doc/source/analyzing/analysis_modules/halo_finders.rst
--- a/doc/source/analyzing/analysis_modules/halo_finders.rst
+++ /dev/null
@@ -1,231 +0,0 @@
-.. _halo_finding:
-
-Halo Finding
-============
-
-There are three methods of finding particle haloes in yt. The
-default method is called HOP, a method described
-in `Eisenstein and Hut (1998)
-<http://adsabs.harvard.edu/abs/1998ApJ...498..137E>`_. A basic
-friends-of-friends (e.g. `Efstathiou et al. (1985)
-<http://adsabs.harvard.edu/abs/1985ApJS...57..241E>`_) halo
-finder is also implemented. Finally Rockstar (`Behroozi et a.
-(2011) <http://adsabs.harvard.edu/abs/2011arXiv1110.4372B>`_) is
-a 6D-phase space halo finder developed by Peter Behroozi that
-excels in finding subhalos and substrcture, but does not allow
-multiple particle masses.
-
-.. _hop:
-
-HOP
----
-
-The version of HOP used in yt is an upgraded version of the
-`publicly available HOP code
-<http://cmb.as.arizona.edu/~eisenste/hop/hop.html>`_. Support
-for 64-bit floats and integers has been added, as well as
-parallel analysis through spatial decomposition. HOP builds
-groups in this fashion:
-
-#. Estimates the local density at each particle using a
-   smoothing kernel.
-
-#. Builds chains of linked particles by 'hopping' from one
-   particle to its densest neighbor. A particle which is
-   its own densest neighbor is the end of the chain.
-
-#. All chains that share the same densest particle are
-   grouped together.
-
-#. Groups are included, linked together, or discarded
-   depending on the user-supplied over density
-   threshold parameter. The default is 160.0.
-
-Please see the `HOP method paper 
-<http://adsabs.harvard.edu/abs/1998ApJ...498..137E>`_ for 
-full details and the 
-:class:`~yt.analysis_modules.halo_finding.halo_objects.HOPHaloFinder`
-documentation.
-
-.. _fof:
-
-FOF
----
-
-A basic friends-of-friends halo finder is included.  See the
-:class:`~yt.analysis_modules.halo_finding.halo_objects.FOFHaloFinder`
-documentation.
-
-.. _rockstar:
-
-Rockstar Halo Finding
----------------------
-
-Rockstar uses an adaptive hierarchical refinement of friends-of-friends
-groups in six phase-space dimensions and one time dimension, which
-allows for robust (grid-independent, shape-independent, and noise-
-resilient) tracking of substructure. The code is prepackaged with yt,
-but also `separately available <https://bitbucket.org/gfcstanford/rockstar>`_. The lead
-developer is Peter Behroozi, and the methods are described in `Behroozi
-et al. 2011 <http://arxiv.org/abs/1110.4372>`_.
-In order to run the Rockstar halo finder in yt, make sure you've
-:ref:`installed it so that it can integrate with yt <rockstar-installation>`.
-
-At the moment, Rockstar does not support multiple particle masses,
-instead using a fixed particle mass. This will not affect most dark matter
-simulations, but does make it less useful for finding halos from the stellar
-mass. In simulations where the highest-resolution particles all have the
-same mass (ie: zoom-in grid based simulations), one can set up a particle
-filter to select the lowest mass particles and perform the halo finding
-only on those.  See the this cookbook recipe for an example:
-:ref:`cookbook-rockstar-nested-grid`.
-
-To run the Rockstar Halo finding, you must launch python with MPI and
-parallelization enabled. While Rockstar itself does not require MPI to run,
-the MPI libraries allow yt to distribute particle information across multiple
-nodes.
-
-.. warning:: At the moment, running Rockstar inside of yt on multiple compute nodes
-   connected by an Infiniband network can be problematic. Therefore, for now
-   we recommend forcing the use of the non-Infiniband network (e.g. Ethernet)
-   using this flag: ``--mca btl ^openib``.
-   For example, here is how Rockstar might be called using 24 cores:
-   ``mpirun -n 24 --mca btl ^openib python ./run_rockstar.py --parallel``.
-
-The script above configures the Halo finder, launches a server process which
-disseminates run information and coordinates writer-reader processes.
-Afterwards, it launches reader and writer tasks, filling the available MPI
-slots, which alternately read particle information and analyze for halo
-content.
-
-The RockstarHaloFinder class has these options that can be supplied to the
-halo catalog through the ``finder_kwargs`` argument:
-
-* ``dm_type``, the index of the dark matter particle. Default is 1.
-* ``outbase``, This is where the out*list files that Rockstar makes should be
-  placed. Default is 'rockstar_halos'.
-* ``num_readers``, the number of reader tasks (which are idle most of the
-  time.) Default is 1.
-* ``num_writers``, the number of writer tasks (which are fed particles and
-  do most of the analysis). Default is MPI_TASKS-num_readers-1.
-  If left undefined, the above options are automatically
-  configured from the number of available MPI tasks.
-* ``force_res``, the resolution that Rockstar uses for various calculations
-  and smoothing lengths. This is in units of Mpc/h.
-  If no value is provided, this parameter is automatically set to
-  the width of the smallest grid element in the simulation from the
-  last data snapshot (i.e. the one where time has evolved the
-  longest) in the time series:
-  ``ds_last.index.get_smallest_dx() * ds_last['Mpch']``.
-* ``total_particles``, if supplied, this is a pre-calculated
-  total number of dark matter
-  particles present in the simulation. For example, this is useful
-  when analyzing a series of snapshots where the number of dark
-  matter particles should not change and this will save some disk
-  access time. If left unspecified, it will
-  be calculated automatically. Default: ``None``.
-* ``dm_only``, if set to ``True``, it will be assumed that there are
-  only dark matter particles present in the simulation.
-  This option does not modify the halos found by Rockstar, however
-  this option can save disk access time if there are no star particles
-  (or other non-dark matter particles) in the simulation. Default: ``False``.
-
-Rockstar dumps halo information in a series of text (halo*list and
-out*list) and binary (halo*bin) files inside the ``outbase`` directory.
-We use the halo list classes to recover the information.
-
-Inside the ``outbase`` directory there is a text file named ``datasets.txt``
-that records the connection between ds names and the Rockstar file names.
-
-For more information, see the
-:class:`~yt.analysis_modules.halo_finding.halo_objects.RockstarHalo` and
-:class:`~yt.analysis_modules.halo_finding.halo_objects.Halo` classes.
-
-.. _parallel-hop-and-fof:
-
-Parallel HOP and FOF
---------------------
-
-Both the HOP and FoF halo finders can run in parallel using simple
-spatial decomposition. In order to run them in parallel it is helpful
-to understand how it works. Below in the first plot (i) is a simplified
-depiction of three haloes labeled 1,2 and 3:
-
-.. image:: _images/ParallelHaloFinder.png
-   :width: 500
-
-Halo 3 is twice reflected around the periodic boundary conditions.
-
-In (ii), the volume has been sub-divided into four equal subregions,
-A,B,C and D, shown with dotted lines. Notice that halo 2 is now in
-two different subregions, C and D, and that halo 3 is now in three,
-A, B and D. If the halo finder is run on these four separate subregions,
-halo 1 is be identified as a single halo, but haloes 2 and 3 are split
-up into multiple haloes, which is incorrect. The solution is to give
-each subregion padding to oversample into neighboring regions.
-
-In (iii), subregion C has oversampled into the other three regions,
-with the periodic boundary conditions taken into account, shown by
-dot-dashed lines. The other subregions oversample in a similar way.
-
-The halo finder is then run on each padded subregion independently
-and simultaneously. By oversampling like this, haloes 2 and 3 will
-both be enclosed fully in at least one subregion and identified
-completely.
-
-Haloes identified with centers of mass inside the padded part of a
-subregion are thrown out, eliminating the problem of halo duplication.
-The centers for the three haloes are shown with stars. Halo 1 will
-belong to subregion A, 2 to C and 3 to B.
-
-To run with parallel halo finding, you must supply a value for
-padding in the finder_kwargs argument. The ``padding`` parameter
-is in simulation units and defaults to 0.02. This parameter is how
-much padding is added to each of the six sides of a subregion.
-This value should be 2x-3x larger than the largest expected halo
-in the simulation. It is unlikely, of course, that the largest
-object in the simulation will be on a subregion boundary, but there
-is no way of knowing before the halo finder is run.
-
-.. code-block:: python
-
-  import yt
-  from yt.analysis_modules.halo_analysis.api import *
-  ds = yt.load("data0001")
-
-  hc = HaloCatalog(data_ds = ds, finder_method = 'hop', finder_kwargs={'padding':0.02})
-  # --or--
-  hc = HaloCatalog(data_ds = ds, finder_method = 'fof', finder_kwargs={'padding':0.02})
-
-In general, a little bit of padding goes a long way, and too much
-just slows down the analysis and doesn't improve the answer (but
-doesn't change it).  It may be worth your time to run the parallel
-halo finder at a few paddings to find the right amount, especially
-if you're analyzing many similar datasets.
-
-.. _rockstar-installation:
-
-Rockstar Installation
----------------------
-
-Because of changes in the Rockstar API over time, yt only currently works with
-a slightly older version of Rockstar.  This version of Rockstar has been
-slightly patched and modified to run as a library inside of yt. By default it
-is not installed with yt, but installation is very easy.  The
-:ref:`install-script` used to install yt from source has a line:
-``INST_ROCKSTAR=0`` that must be changed to ``INST_ROCKSTAR=1``.  You can
-rerun this installer script over the top of an existing installation, and
-it will only install components missing from the existing installation.
-You can do this as follows.  Put your freshly modified install_script in
-the parent directory of the yt installation directory (e.g. the parent of
-``$YT_DEST``, ``yt-x86_64``, ``yt-i386``, etc.), and rerun the installer:
-
-.. code-block:: bash
-
-    cd $YT_DEST
-    cd ..
-    vi install_script.sh  // or your favorite editor to change INST_ROCKSTAR=1
-    bash < install_script.sh
-
-This will download Rockstar and install it as a library in yt.  You should now
-be able to use Rockstar and yt together.

diff -r a7b699a776c488ed7c9f8a57e8e8498ed71c2931 -r 9a2c5f76d099c12560161637bca3ccd3db1087fc doc/source/analyzing/analysis_modules/halo_transition.rst
--- a/doc/source/analyzing/analysis_modules/halo_transition.rst
+++ b/doc/source/analyzing/analysis_modules/halo_transition.rst
@@ -1,11 +1,12 @@
 .. _halo-transition:
 
-Getting up to Speed with Halo Analysis in yt-3.0
-================================================
+Transitioning From yt-2 to yt-3
+===============================
 
 If you're used to halo analysis in yt-2.x, heres a guide to
 how to update your analysis pipeline to take advantage of
-the new halo catalog infrastructure.
+the new halo catalog infrastructure.  If you're starting
+from scratch, see :ref:`halo_catalog`.
 
 Finding Halos
 -------------

diff -r a7b699a776c488ed7c9f8a57e8e8498ed71c2931 -r 9a2c5f76d099c12560161637bca3ccd3db1087fc doc/source/analyzing/analysis_modules/index.rst
--- a/doc/source/analyzing/analysis_modules/index.rst
+++ b/doc/source/analyzing/analysis_modules/index.rst
@@ -19,4 +19,3 @@
    two_point_functions
    clump_finding
    particle_trajectories
-   ellipsoid_analysis

diff -r a7b699a776c488ed7c9f8a57e8e8498ed71c2931 -r 9a2c5f76d099c12560161637bca3ccd3db1087fc doc/source/analyzing/analysis_modules/photon_simulator.rst
--- a/doc/source/analyzing/analysis_modules/photon_simulator.rst
+++ b/doc/source/analyzing/analysis_modules/photon_simulator.rst
@@ -99,9 +99,9 @@
    To work out the following examples, you should install
    `AtomDB <http://www.atomdb.org>`_ and get the files from the
    `xray_data <http://yt-project.org/data/xray_data.tar.gz>`_ auxiliary
-   data package (see the ``xray_data`` `README <xray_data_README.html>`_
-   for details on the latter). Make sure that in what follows you
-   specify the full path to the locations of these files.
+   data package (see the :ref:`xray_data_README` for details on the latter). 
+   Make sure that in what follows you specify the full path to the locations 
+   of these files.
 
 To generate photons from this dataset, we have several different things
 we need to set up. The first is a standard yt data object. It could

diff -r a7b699a776c488ed7c9f8a57e8e8498ed71c2931 -r 9a2c5f76d099c12560161637bca3ccd3db1087fc doc/source/analyzing/analysis_modules/radial_column_density.rst
--- a/doc/source/analyzing/analysis_modules/radial_column_density.rst
+++ /dev/null
@@ -1,93 +0,0 @@
-.. _radial-column-density:
-
-Radial Column Density
-=====================
-.. sectionauthor:: Stephen Skory <s at skory.us>
-.. versionadded:: 2.3
-
-.. note::
-
-    As of :code:`yt-3.0`, the radial column density analysis module is not
-    currently functional.  This functionality is still available in
-    :code:`yt-2.x`.  If you would like to use these features in :code:`yt-3.x`,
-    help is needed to port them over.  Contact the yt-users mailing list if you
-    are interested in doing this.
-
-This module allows the calculation of column densities around a point over a
-field such as ``NumberDensity`` or ``Density``.
-This uses :ref:`healpix_volume_rendering` to interpolate column densities
-on the grid cells.
-
-Details
--------
-
-This module allows the calculation of column densities around a single point.
-For example, this is useful for looking at the gas around a radiating source.
-Briefly summarized, the calculation is performed by first creating a number
-of HEALPix shells around the central point.
-Next, the value of the column density at cell centers is found by
-linearly interpolating the values on the inner and outer shell.
-This is added as derived field, which can be used like any other derived field.
-
-Basic Example
--------------
-
-In this simple example below, the radial column density for the field
-``NumberDensity`` is calculated and added as a derived field named
-``RCDNumberDensity``.
-The calculations will use the starting point of (x, y, z) = (0.5, 0.5, 0.5) and
-go out to a maximum radius of 0.5 in code units.
-Due to the way normalization is handled in HEALPix, the column density
-calculation can extend out only as far as the nearest face of the volume.
-For example, with a center point of (0.2, 0.3, 0.4), the column density
-is calculated out to only a radius of 0.2.
-The column density will be output as zero (0.0) outside the maximum radius.
-Just like a real number column density, when the derived is added using
-``add_field``, we give the units as :math:`1/\rm{cm}^2`.
-
-.. code-block:: python
-
-  from yt.mods import *
-  from yt.analysis_modules.radial_column_density.api import *
-  ds = load("data0030")
-
-  rcdnumdens = RadialColumnDensity(ds, 'NumberDensity', [0.5, 0.5, 0.5],
-    max_radius = 0.5)
-  def _RCDNumberDensity(field, data, rcd = rcdnumdens):
-      return rcd._build_derived_field(data)
-  add_field('RCDNumberDensity', _RCDNumberDensity, units=r'1/\rm{cm}^2')
-
-  dd = ds.all_data()
-  print(dd['RCDNumberDensity'])
-
-The field ``RCDNumberDensity`` can be used just like any other derived field
-in yt.
-
-Additional Parameters
----------------------
-
-Each of these parameters is added to the call to ``RadialColumnDensity()``,
-just like ``max_radius`` is used above.
-
-  * ``steps`` : integer - Because this implementation uses linear
-    interpolation to calculate the column
-    density at each cell, the accuracy of the solution goes up as the number of
-    HEALPix surfaces is increased.
-    The ``steps`` parameter controls the number of HEALPix surfaces, and a larger
-    number is more accurate, but slower. Default = 10.
-
-  * ``base`` : string - This controls where the surfaces are placed, with
-    linear "lin" or logarithmic "log" spacing. The inner-most
-    surface is always set to the size of the smallest cell.
-    Default = "lin".
-
-  * ``Nside`` : int
-    The resolution of column density calculation as performed by
-    HEALPix. Higher numbers mean higher quality. Max = 8192.
-    Default = 32.
-
-  * ``ang_divs`` : imaginary integer
-    This number controls the gridding of the HEALPix projection onto
-    the spherical surfaces. Higher numbers mean higher quality.
-    Default = 800j.
-

diff -r a7b699a776c488ed7c9f8a57e8e8498ed71c2931 -r 9a2c5f76d099c12560161637bca3ccd3db1087fc doc/source/analyzing/analysis_modules/xray_data_README.rst
--- a/doc/source/analyzing/analysis_modules/xray_data_README.rst
+++ b/doc/source/analyzing/analysis_modules/xray_data_README.rst
@@ -1,3 +1,5 @@
+.. _xray_data_README:
+
 Auxiliary Data Files for use with yt's Photon Simulator
 =======================================================
 

diff -r a7b699a776c488ed7c9f8a57e8e8498ed71c2931 -r 9a2c5f76d099c12560161637bca3ccd3db1087fc doc/source/analyzing/objects.rst
--- a/doc/source/analyzing/objects.rst
+++ b/doc/source/analyzing/objects.rst
@@ -131,6 +131,16 @@
 
    ds.r[:,-180:0,:]
 
+If you specify a single slice, it will be repeated along all three dimensions.
+For instance, this will give all data:::
+
+   ds.r[:]
+
+And this will select a box running from 0.4 to 0.6 along all three
+dimensions:::
+
+   ds.r[0.4:0.6]
+
 Selecting Fixed Resolution Regions
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 

diff -r a7b699a776c488ed7c9f8a57e8e8498ed71c2931 -r 9a2c5f76d099c12560161637bca3ccd3db1087fc doc/source/analyzing/parallel_computation.rst
--- a/doc/source/analyzing/parallel_computation.rst
+++ b/doc/source/analyzing/parallel_computation.rst
@@ -21,7 +21,7 @@
 * Derived Quantities (total mass, angular momentum, etc) (:ref:`creating_derived_quantities`,
   :ref:`derived-quantities`)
 * 1-, 2-, and 3-D profiles (:ref:`generating-profiles-and-histograms`)
-* Halo finding (:ref:`halo_finding`)
+* Halo analysis (:ref:`halo-analysis`)
 * Volume rendering (:ref:`volume_rendering`)
 * Isocontours & flux calculations (:ref:`extracting-isocontour-information`)
 
@@ -194,7 +194,7 @@
 
 The following operations use spatial decomposition:
 
-* :ref:`halo_finding`
+* :ref:`halo-analysis`
 * :ref:`volume_rendering`
 
 Grid Decomposition
@@ -501,7 +501,7 @@
 subtle art in estimating the amount of memory needed for halo finding, but a
 rule of thumb is that the HOP halo finder is the most memory intensive
 (:func:`HaloFinder`), and Friends of Friends (:func:`FOFHaloFinder`) being the
-most memory-conservative. For more information, see :ref:`halo_finding`.
+most memory-conservative. For more information, see :ref:`halo-analysis`.
 
 **Volume Rendering**
 

diff -r a7b699a776c488ed7c9f8a57e8e8498ed71c2931 -r 9a2c5f76d099c12560161637bca3ccd3db1087fc doc/source/analyzing/saving_data.rst
--- a/doc/source/analyzing/saving_data.rst
+++ b/doc/source/analyzing/saving_data.rst
@@ -1,4 +1,4 @@
-.. _saving_data
+.. _saving_data:
 
 Saving Reloadable Data
 ======================

diff -r a7b699a776c488ed7c9f8a57e8e8498ed71c2931 -r 9a2c5f76d099c12560161637bca3ccd3db1087fc doc/source/conf.py
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -31,7 +31,8 @@
 # coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
 extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx',
               'sphinx.ext.pngmath', 'sphinx.ext.viewcode',
-              'sphinx.ext.napoleon', 'yt_cookbook', 'yt_colormaps']
+              'sphinx.ext.napoleon', 'yt_cookbook', 'yt_colormaps',
+              'config_help']
 
 if not on_rtd:
     extensions.append('sphinx.ext.autosummary')
@@ -67,9 +68,9 @@
 # built documents.
 #
 # The short X.Y version.
-version = '3.3-dev'
+version = '3.4-dev'
 # The full version, including alpha/beta/rc tags.
-release = '3.3-dev'
+release = '3.4-dev'
 
 # The language for content autogenerated by Sphinx. Refer to documentation
 # for a list of supported languages.

diff -r a7b699a776c488ed7c9f8a57e8e8498ed71c2931 -r 9a2c5f76d099c12560161637bca3ccd3db1087fc doc/source/cookbook/amrkdtree_downsampling.py
--- a/doc/source/cookbook/amrkdtree_downsampling.py
+++ b/doc/source/cookbook/amrkdtree_downsampling.py
@@ -38,7 +38,7 @@
 # again.
 
 render_source.set_volume(kd_low_res)
-render_source.set_fields('density')
+render_source.set_field('density')
 sc.render()
 sc.save("v1.png", sigma_clip=6.0)
 

diff -r a7b699a776c488ed7c9f8a57e8e8498ed71c2931 -r 9a2c5f76d099c12560161637bca3ccd3db1087fc doc/source/cookbook/calculating_information.rst
--- a/doc/source/cookbook/calculating_information.rst
+++ b/doc/source/cookbook/calculating_information.rst
@@ -56,6 +56,16 @@
 
 .. yt_cookbook:: simulation_analysis.py
 
+Smoothed Fields
+~~~~~~~~~~~~~~~
+
+This recipe demonstrates how to create a smoothed field,
+corresponding to a user-created derived field, using the
+:meth:`~yt.fields.particle_fields.add_volume_weighted_smoothed_field` method.
+See :ref:`gadget-notebook` for how to work with Gadget data.
+
+.. yt_cookbook:: smoothed_field.py
+
 
 .. _cookbook-time-series-analysis:
 
@@ -93,16 +103,6 @@
 
 .. yt_cookbook:: hse_field.py
 
-Smoothed Fields
-~~~~~~~~~~~~~~~
-
-This recipe demonstrates how to create a smoothed field,
-corresponding to a user-created derived field, using the
-:meth:`~yt.fields.particle_fields.add_volume_weighted_smoothed_field` method.
-See :ref:`gadget-notebook` for how to work with Gadget data.
-
-.. yt_cookbook:: smoothed_field.py
-
 Using Particle Filters to Calculate Star Formation Rates
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 

diff -r a7b699a776c488ed7c9f8a57e8e8498ed71c2931 -r 9a2c5f76d099c12560161637bca3ccd3db1087fc doc/source/cookbook/complex_plots.rst
--- a/doc/source/cookbook/complex_plots.rst
+++ b/doc/source/cookbook/complex_plots.rst
@@ -303,6 +303,26 @@
 
 .. yt_cookbook:: vol-annotated.py
 
+.. _cookbook-vol-points:
+
+Volume Rendering with Points
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This recipe demonstrates how to make a volume rendering composited with point
+sources. This could represent star or dark matter particles, for example.
+
+.. yt_cookbook:: vol-points.py
+
+.. _cookbook-vol-lines:
+
+Volume Rendering with Lines
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This recipe demonstrates how to make a volume rendering composited with line
+sources.
+
+.. yt_cookbook:: vol-lines.py
+
 .. _cookbook-opengl_vr:
 
 Advanced Interactive Data Visualization

diff -r a7b699a776c488ed7c9f8a57e8e8498ed71c2931 -r 9a2c5f76d099c12560161637bca3ccd3db1087fc doc/source/cookbook/cosmological_analysis.rst
--- a/doc/source/cookbook/cosmological_analysis.rst
+++ b/doc/source/cookbook/cosmological_analysis.rst
@@ -65,10 +65,13 @@
 
 .. yt_cookbook:: light_ray.py
 
+.. _cookbook-single-dataset-light-ray:
+
+Single Dataset Light Ray
+~~~~~~~~~~~~~~~~~~~~~~~~
+
 This script demonstrates how to make a light ray from a single dataset.
 
-.. _cookbook-single-dataset-light-ray:
-
 .. yt_cookbook:: single_dataset_light_ray.py
 
 Creating and Fitting Absorption Spectra

diff -r a7b699a776c488ed7c9f8a57e8e8498ed71c2931 -r 9a2c5f76d099c12560161637bca3ccd3db1087fc doc/source/cookbook/rendering_with_box_and_grids.py
--- a/doc/source/cookbook/rendering_with_box_and_grids.py
+++ b/doc/source/cookbook/rendering_with_box_and_grids.py
@@ -1,22 +1,20 @@
 import yt
-import numpy as np
-from yt.visualization.volume_rendering.api import BoxSource, CoordinateVectorSource
 
 # Load the dataset.
 ds = yt.load("Enzo_64/DD0043/data0043")
-sc = yt.create_scene(ds, ('gas','density'))
-sc.get_source(0).transfer_function.grey_opacity=True
+sc = yt.create_scene(ds, ('gas', 'density'))
 
-sc.annotate_domain(ds)
-sc.render()
-sc.save("%s_vr_domain.png" % ds)
+# You may need to adjust the alpha values to get a rendering with good contrast
+# For annotate_domain, the fourth color value is alpha.
 
-sc.annotate_grids(ds)
-sc.render()
-sc.save("%s_vr_grids.png" % ds)
+# Draw the domain boundary
+sc.annotate_domain(ds, color=[1, 1, 1, 0.01])
+sc.save("%s_vr_domain.png" % ds, sigma_clip=4)
 
-# Here we can draw the coordinate vectors on top of the image by processing
-# it through the camera. Then save it out.
-sc.annotate_axes()
-sc.render()
-sc.save("%s_vr_coords.png" % ds)
+# Draw the grid boundaries
+sc.annotate_grids(ds, alpha=0.01)
+sc.save("%s_vr_grids.png" % ds, sigma_clip=4)
+
+# Draw a coordinate axes triad
+sc.annotate_axes(alpha=0.01)
+sc.save("%s_vr_coords.png" % ds, sigma_clip=4)

diff -r a7b699a776c488ed7c9f8a57e8e8498ed71c2931 -r 9a2c5f76d099c12560161637bca3ccd3db1087fc doc/source/cookbook/single_dataset_light_ray.py
--- a/doc/source/cookbook/single_dataset_light_ray.py
+++ b/doc/source/cookbook/single_dataset_light_ray.py
@@ -8,9 +8,12 @@
 
 # With a single dataset, a start_position and
 # end_position or trajectory must be given.
-# Trajectory should be given as (r, theta, phi)
-lr.make_light_ray(start_position=[0., 0., 0.],
-                  end_position=[1., 1., 1.],
+# These positions can be defined as xyz coordinates,
+# but here we just use the two opposite corners of the 
+# simulation box.  Alternatively, trajectory should 
+# be given as (r, theta, phi)
+lr.make_light_ray(start_position=ds.domain_left_edge,
+                  end_position=ds.domain_right_edge,
                   solution_filename='lightraysolution.txt',
                   data_filename='lightray.h5',
                   fields=['temperature', 'density'])

diff -r a7b699a776c488ed7c9f8a57e8e8498ed71c2931 -r 9a2c5f76d099c12560161637bca3ccd3db1087fc doc/source/cookbook/vol-annotated.py
--- a/doc/source/cookbook/vol-annotated.py
+++ b/doc/source/cookbook/vol-annotated.py
@@ -1,75 +1,29 @@
-#!/usr/bin/env python
+import yt
 
-import numpy as np
-import pylab
+ds = yt.load('Enzo_64/DD0043/data0043')
 
-import yt
-import yt.visualization.volume_rendering.old_camera as vr
+sc = yt.create_scene(ds, lens_type='perspective')
 
-ds = yt.load("maestro_subCh_plt00248")
+source = sc[0]
 
-dd = ds.all_data()
+source.set_field('density')
+source.set_log(True)
 
-# field in the dataset we will visualize
-field = ('boxlib', 'radial_velocity')
+# Set up the camera parameters: focus, width, resolution, and image orientation
+sc.camera.focus = ds.domain_center
+sc.camera.resolution = 1024
+sc.camera.north_vector = [0, 0, 1]
+sc.camera.position = [1.7, 1.7, 1.7]
 
-# the values we wish to highlight in the rendering.  We'll put a Gaussian
-# centered on these with width sigma
-vals = [-1.e7, -5.e6, -2.5e6, 2.5e6, 5.e6, 1.e7]
-sigma = 2.e5
+# You may need to adjust the alpha values to get an image with good contrast.
+# For the annotate_domain call, the fourth value in the color tuple is the
+# alpha value.
+sc.annotate_axes(alpha=.02)
+sc.annotate_domain(ds, color=[1, 1, 1, .01])
 
-mi, ma = min(vals), max(vals)
+text_string = "T = {} Gyr".format(float(ds.current_time.to('Gyr')))
 
-# Instantiate the ColorTransferfunction.
-tf =  yt.ColorTransferFunction((mi, ma))
-
-for v in vals:
-    tf.sample_colormap(v, sigma**2, colormap="coolwarm")
-
-
-# volume rendering requires periodic boundaries.  This dataset has
-# solid walls.  We need to hack it for now (this will be fixed in
-# a later yt)
-ds.periodicity = (True, True, True)
-
-
-# Set up the camera parameters: center, looking direction, width, resolution
-c = np.array([0.0, 0.0, 0.0])
-L = np.array([1.0, 1.0, 1.2])
-W = 1.5*ds.domain_width
-N = 720
-
-# +z is "up" for our dataset
-north=[0.0,0.0,1.0]
-
-# Create a camera object
-cam = vr.Camera(c, L, W, N, transfer_function=tf, ds=ds,
-                no_ghost=False, north_vector=north,
-                fields = [field], log_fields = [False])
-
-im = cam.snapshot()
-
-# add an axes triad
-cam.draw_coordinate_vectors(im)
-
-# add the domain box to the image
-nim = cam.draw_domain(im)
-
-# increase the contrast -- for some reason, the enhance default
-# to save_annotated doesn't do the trick
-max_val = im[:,:,:3].std() * 4.0
-nim[:,:,:3] /= max_val
-
-# we want to write the simulation time on the figure, so create a
-# figure and annotate it
-f = pylab.figure()
-
-pylab.text(0.2, 0.85, "{:.3g} s".format(float(ds.current_time.d)),
-           transform=f.transFigure, color="white")
-
-# tell the camera to use our figure
-cam._render_figure = f
-
-# save annotated -- this added the transfer function values,
-# and the clear_fig=False ensures it writes onto our existing figure.
-cam.save_annotated("vol_annotated.png", nim, dpi=145, clear_fig=False)
+# save an annotated version of the volume rendering including a representation
+# of the transfer function and a nice label showing the simulation time.
+sc.save_annotated("vol_annotated.png", sigma_clip=6,
+                  text_annotate=[[(.1, 1.05), text_string]])

diff -r a7b699a776c488ed7c9f8a57e8e8498ed71c2931 -r 9a2c5f76d099c12560161637bca3ccd3db1087fc doc/source/cookbook/vol-lines.py
--- /dev/null
+++ b/doc/source/cookbook/vol-lines.py
@@ -0,0 +1,22 @@
+import yt
+import numpy as np
+from yt.visualization.volume_rendering.api import LineSource
+from yt.units import kpc
+
+ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
+
+sc = yt.create_scene(ds)
+
+np.random.seed(1234567)
+
+nlines = 50
+vertices = (np.random.random([nlines, 2, 3]) - 0.5) * 200 * kpc
+colors = np.random.random([nlines, 4])
+colors[:, 3] = 0.1
+
+lines = LineSource(vertices, colors)
+sc.add_source(lines)
+
+sc.camera.width = 300*kpc
+
+sc.save(sigma_clip=4.0)

diff -r a7b699a776c488ed7c9f8a57e8e8498ed71c2931 -r 9a2c5f76d099c12560161637bca3ccd3db1087fc doc/source/cookbook/vol-points.py
--- /dev/null
+++ b/doc/source/cookbook/vol-points.py
@@ -0,0 +1,29 @@
+import yt
+import numpy as np
+from yt.visualization.volume_rendering.api import PointSource
+from yt.units import kpc
+
+ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
+
+sc = yt.create_scene(ds)
+
+np.random.seed(1234567)
+
+npoints = 1000
+
+# Random particle positions
+vertices = np.random.random([npoints, 3])*200*kpc
+
+# Random colors
+colors = np.random.random([npoints, 4])
+
+# Set alpha value to something that produces a good contrast with the volume
+# rendering
+colors[:, 3] = 0.1
+
+points = PointSource(vertices, colors=colors)
+sc.add_source(points)
+
+sc.camera.width = 300*kpc
+
+sc.save(sigma_clip=5)

diff -r a7b699a776c488ed7c9f8a57e8e8498ed71c2931 -r 9a2c5f76d099c12560161637bca3ccd3db1087fc doc/source/cookbook/yt_gadget_owls_analysis.ipynb
--- a/doc/source/cookbook/yt_gadget_owls_analysis.ipynb
+++ b/doc/source/cookbook/yt_gadget_owls_analysis.ipynb
@@ -20,7 +20,7 @@
    "source": [
     "The first thing you will need to run these examples is a working installation of yt.  The author or these examples followed the instructions under \"Get yt: from source\" at http://yt-project.org/ to install an up to date development version of yt.\n",
     "\n",
-    "Next you should set the default ``test_data_dir`` in the ``.yt/config`` file in your home directory.  Note that you may have to create the directory and file if it doesn't exist already.\n",
+    "Next you should set the default ``test_data_dir`` in the ``~/.config/yt/ytrc`` file in your home directory.  Note that you may have to create the directory and file if it doesn't exist already.\n",
     "\n",
     "> [yt]\n",
     "\n",

diff -r a7b699a776c488ed7c9f8a57e8e8498ed71c2931 -r 9a2c5f76d099c12560161637bca3ccd3db1087fc doc/source/developing/building_the_docs.rst
--- a/doc/source/developing/building_the_docs.rst
+++ b/doc/source/developing/building_the_docs.rst
@@ -176,6 +176,7 @@
 .. _Sphinx: http://sphinx-doc.org/
 .. _pandoc: http://johnmacfarlane.net/pandoc/
 .. _ffmpeg: http://www.ffmpeg.org/
+.. _IPython: https://ipython.org/
 
 You will also need the full yt suite of `yt test data
 <http://yt-project.org/data/>`_, including the larger datasets that are not used

diff -r a7b699a776c488ed7c9f8a57e8e8498ed71c2931 -r 9a2c5f76d099c12560161637bca3ccd3db1087fc doc/source/developing/extensions.rst
--- a/doc/source/developing/extensions.rst
+++ b/doc/source/developing/extensions.rst
@@ -3,7 +3,7 @@
 Extension Packages
 ==================
 
-.. note:: For some additional discussion, see :ref:`YTEP-0029
+.. note:: For some additional discussion, see `YTEP-0029
           <http://ytep.readthedocs.io/en/latest/YTEPs/YTEP-0029.html>`_, where
           this plan was designed.
 

diff -r a7b699a776c488ed7c9f8a57e8e8498ed71c2931 -r 9a2c5f76d099c12560161637bca3ccd3db1087fc doc/source/developing/testing.rst
--- a/doc/source/developing/testing.rst
+++ b/doc/source/developing/testing.rst
@@ -103,7 +103,7 @@
    accept no arguments. The test function should do some work that tests some
    functionality and should also verify that the results are correct using
    assert statements or functions.  
-# Tests can ``yield`` a tuple of the form ``function``, ``argument_one``,
+#. Tests can ``yield`` a tuple of the form ``function``, ``argument_one``,
    ``argument_two``, etc.  For example ``yield assert_equal, 1.0, 1.0`` would be
    captured by nose as a test that asserts that 1.0 is equal to 1.0.
 #. Use ``fake_random_ds`` to test on datasets, and be sure to test for
@@ -285,15 +285,12 @@
 
 These datasets are available at http://yt-project.org/data/.
 
-Next, modify the file ``~/.yt/config`` to include a section ``[yt]``
-with the parameter ``test_data_dir``.  Set this to point to the
-directory with the test data you want to test with.  Here is an example
-config file:
+Next, add the config parameter ``test_data_dir`` pointing to 
+directory with the test data you want to test with, e.g.:
 
 .. code-block:: none
 
-   [yt]
-   test_data_dir = /Users/tomservo/src/yt-data
+   $ yt config set yt test_data_dir /Users/tomservo/src/yt-data
 
 More data will be added over time.  To run the answer tests, you must first
 generate a set of test answers locally on a "known good" revision, then update
@@ -313,7 +310,7 @@
 This command will create a set of local answers from the tipsy frontend tests
 and store them in ``$HOME/Documents/test`` (this can but does not have to be the
 same directory as the ``test_data_dir`` configuration variable defined in your
-``.yt/config`` file) in a file named ``local-tipsy``. To run the tipsy
+``~/.config/yt/ytrc`` file) in a file named ``local-tipsy``. To run the tipsy
 frontend's answer tests using a different yt changeset, update to that
 changeset, recompile if necessary, and run the tests using the following
 command:
@@ -487,7 +484,7 @@
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 Before any code is added to or modified in the yt codebase, each incoming
 changeset is run against all available unit and answer tests on our `continuous
-integration server <http://tests.yt-project.org>`_. While unit tests are
+integration server <https://tests.yt-project.org>`_. While unit tests are
 autodiscovered by `nose <http://nose.readthedocs.org/en/latest/>`_ itself,
 answer tests require definition of which set of tests constitute to a given
 answer. Configuration for the integration server is stored in

diff -r a7b699a776c488ed7c9f8a57e8e8498ed71c2931 -r 9a2c5f76d099c12560161637bca3ccd3db1087fc doc/source/examining/Loading_Generic_Array_Data.ipynb
--- a/doc/source/examining/Loading_Generic_Array_Data.ipynb
+++ b/doc/source/examining/Loading_Generic_Array_Data.ipynb
@@ -41,7 +41,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": true
+   },
    "outputs": [],
    "source": [
     "import yt\n",
@@ -58,7 +60,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": true
+   },
    "outputs": [],
    "source": [
     "arr = np.random.random(size=(64,64,64))"
@@ -74,7 +78,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
     "data = dict(density = (arr, \"g/cm**3\"))\n",
@@ -118,7 +124,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
     "slc = yt.SlicePlot(ds, \"z\", [\"density\"])\n",
@@ -140,7 +148,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
     "posx_arr = np.random.uniform(low=-1.5, high=1.5, size=10000)\n",
@@ -167,7 +177,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
     "slc = yt.SlicePlot(ds, \"z\", [\"density\"])\n",
@@ -193,7 +205,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": true
+   },
    "outputs": [],
    "source": [
     "import h5py\n",
@@ -213,7 +227,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
     "print (f.keys())"
@@ -229,7 +245,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": true
+   },
    "outputs": [],
    "source": [
     "units = [\"gauss\",\"gauss\",\"gauss\", \"g/cm**3\", \"erg/cm**3\", \"K\", \n",
@@ -246,7 +264,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": true
+   },
    "outputs": [],
    "source": [
     "data = {k:(v.value,u) for (k,v), u in zip(f.items(),units)}\n",
@@ -256,7 +276,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
     "ds = yt.load_uniform_grid(data, data[\"Density\"][0].shape, length_unit=250.*cm_per_kpc, bbox=bbox, nprocs=8, \n",
@@ -273,7 +295,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
     "prj = yt.ProjectionPlot(ds, \"z\", [\"z-velocity\",\"Temperature\",\"Bx\"], weight_field=\"Density\")\n",
@@ -299,7 +323,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": true
+   },
    "outputs": [],
    "source": [
     "#Find the min and max of the field\n",
@@ -313,29 +339,15 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "Create a Transfer Function that goes from the minimum to the maximum of the data:"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "tf = yt.ColorTransferFunction((mi, ma), grey_opacity=False)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
     "Define the properties and size of the `camera` viewport:"
    ]
   },
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": true
+   },
    "outputs": [],
    "source": [
     "# Choose a vector representing the viewing direction.\n",
@@ -358,24 +370,41 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
-    "cam = ds.camera(c, L, W, Npixels, tf, fields=['Temperature'],\n",
-    "                north_vector=[0,0,1], steady_north=True, \n",
-    "                sub_samples=5, log_fields=[False])\n",
+    "sc = yt.create_scene(ds, 'Temperature')\n",
+    "dd = ds.all_data()\n",
     "\n",
-    "cam.transfer_function.map_to_colormap(mi,ma, \n",
-    "                                      scale=15.0, colormap='algae')"
+    "source = sc[0]\n",
+    "\n",
+    "source.log_field = False\n",
+    "\n",
+    "tf = yt.ColorTransferFunction((mi, ma), grey_opacity=False)\n",
+    "tf.map_to_colormap(mi, ma, scale=15.0, colormap='algae')\n",
+    "\n",
+    "source.set_transfer_function(tf)\n",
+    "\n",
+    "sc.add_source(source)\n",
+    "\n",
+    "cam = sc.add_camera()\n",
+    "cam.width = W\n",
+    "cam.center = c\n",
+    "cam.normal_vector = L\n",
+    "cam.north_vector = [0, 0, 1]"
    ]
   },
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
-    "cam.show()"
+    "sc.show(sigma_clip=4)"
    ]
   },
   {
@@ -395,7 +424,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": true
+   },
    "outputs": [],
    "source": [
     "import astropy.io.fits as pyfits\n",
@@ -412,7 +443,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
     "f = pyfits.open(data_dir+\"/UnigridData/velocity_field_20.fits\")\n",
@@ -429,7 +462,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
     "data = {}\n",
@@ -449,7 +484,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": true
+   },
    "outputs": [],
    "source": [
     "data[\"velocity_x\"] = data.pop(\"x-velocity\")\n",
@@ -467,7 +504,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
     "ds = yt.load_uniform_grid(data, data[\"velocity_x\"][0].shape, length_unit=(1.0,\"Mpc\"))\n",
@@ -495,7 +534,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": true
+   },
    "outputs": [],
    "source": [
     "grid_data = [\n",
@@ -520,7 +561,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": true
+   },
    "outputs": [],
    "source": [
     "for g in grid_data: \n",
@@ -538,7 +581,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": true
+   },
    "outputs": [],
    "source": [
     "grid_data[0][\"number_of_particles\"] = 0 # Set no particles in the top-level grid\n",
@@ -561,7 +606,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
     "ds = yt.load_amr_grids(grid_data, [32, 32, 32])"
@@ -577,7 +624,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
     "slc = yt.SlicePlot(ds, \"z\", [\"density\"])\n",
@@ -613,7 +662,7 @@
   "language_info": {
    "codemirror_mode": {
     "name": "ipython",
-    "version": 3.0
+    "version": 3
    },
    "file_extension": ".py",
    "mimetype": "text/x-python",
@@ -625,4 +674,4 @@
  },
  "nbformat": 4,
  "nbformat_minor": 0
-}
\ No newline at end of file
+}

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/0b4a08a4a7e8/
Changeset:   0b4a08a4a7e8
Branch:      yt
User:        MatthewTurk
Date:        2016-08-10 23:49:25+00:00
Summary:     Whoops, gotta fix these arguments.
Affected #:  1 file

diff -r 9a2c5f76d099c12560161637bca3ccd3db1087fc -r 0b4a08a4a7e827b94e762b77d6e0d5d04b82ea93 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -877,11 +877,9 @@
         >>> print (profile["gas", "temperature"])
         >>> plot = profile.plot()
         """
-        p = create_profile(self, bin_fields, fields, n_bins=64,
-                   extrema=None, logs=None, units=None,
-                   weight_field="cell_mass",
-                   accumulation=False, fractional=False,
-                   deposition='ngp')
+        p = create_profile(self, bin_fields, fields, n_bins,
+                   extrema, logs, units, weight_field, accumulation,
+                   fractional, deposition)
         return p
 
     def mean(self, field, axis=None, weight='ones'):


https://bitbucket.org/yt_analysis/yt/commits/15e452467720/
Changeset:   15e452467720
Branch:      yt
User:        MatthewTurk
Date:        2016-08-12 17:47:30+00:00
Summary:     Fixing indentation
Affected #:  1 file

diff -r 0b4a08a4a7e827b94e762b77d6e0d5d04b82ea93 -r 15e452467720cf3c5eddd9e3a4101fb3a15724ce yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -811,10 +811,10 @@
         return ex[1] - ex[0]
 
     def profile(self, bin_fields, fields, n_bins=64,
-                   extrema=None, logs=None, units=None,
-                   weight_field="cell_mass",
-                   accumulation=False, fractional=False,
-                   deposition='ngp'):
+                extrema=None, logs=None, units=None,
+                weight_field="cell_mass",
+                accumulation=False, fractional=False,
+                deposition='ngp'):
         r"""
         Create a 1, 2, or 3D profile object from this data_source.
 


https://bitbucket.org/yt_analysis/yt/commits/1aa4c05b23da/
Changeset:   1aa4c05b23da
Branch:      yt
User:        MatthewTurk
Date:        2016-08-12 17:48:43+00:00
Summary:     Removing unused import
Affected #:  1 file

diff -r 15e452467720cf3c5eddd9e3a4101fb3a15724ce -r 1aa4c05b23da4b035effa8689ec99099ccd34707 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -22,7 +22,6 @@
 
 from collections import defaultdict
 from contextlib import contextmanager
-from functools import wraps
 
 from yt.data_objects.particle_io import particle_handler_registry
 from yt.fields.derived_field import \


https://bitbucket.org/yt_analysis/yt/commits/f6198bb1cb2c/
Changeset:   f6198bb1cb2c
Branch:      yt
User:        MatthewTurk
Date:        2016-08-24 18:18:04+00:00
Summary:     Merging from upstream
Affected #:  52 files

diff -r 1aa4c05b23da4b035effa8689ec99099ccd34707 -r f6198bb1cb2c1beabcbda68f20a77409659eedb3 .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -23,6 +23,7 @@
 yt/geometry/particle_smooth.c
 yt/geometry/selection_routines.c
 yt/utilities/amr_utils.c
+yt/utilities/lib/autogenerated_element_samplers.c
 yt/utilities/kdtree/forthonf2c.h
 yt/utilities/libconfig_wrapper.c
 yt/utilities/spatial/ckdtree.c

diff -r 1aa4c05b23da4b035effa8689ec99099ccd34707 -r f6198bb1cb2c1beabcbda68f20a77409659eedb3 MANIFEST.in
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -3,6 +3,7 @@
 include yt/visualization/mapserver/html/leaflet/*.css
 include yt/visualization/mapserver/html/leaflet/*.js
 include yt/visualization/mapserver/html/leaflet/images/*.png
+include yt/utilities/mesh_types.yaml
 exclude scripts/pr_backport.py
 recursive-include yt *.py *.pyx *.pxd *.h README* *.txt LICENSE* *.cu
 recursive-include doc *.rst *.txt *.py *.ipynb *.png *.jpg *.css *.html

diff -r 1aa4c05b23da4b035effa8689ec99099ccd34707 -r f6198bb1cb2c1beabcbda68f20a77409659eedb3 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -1429,25 +1429,24 @@
         YT_DEPS+=('netcdf4')   
     fi
     
-    # Here is our dependency list for yt
-    log_cmd conda update --yes conda
+    log_cmd ${DEST_DIR}/bin/conda update --yes conda
     
     log_cmd echo "DEPENDENCIES" ${YT_DEPS[@]}
     for YT_DEP in "${YT_DEPS[@]}"; do
         echo "Installing $YT_DEP"
-        log_cmd conda install --yes ${YT_DEP}
+        log_cmd ${DEST_DIR}/bin/conda install --yes ${YT_DEP}
     done
 
     if [ $INST_PY3 -eq 1 ]
     then
         echo "Installing mercurial"
-        log_cmd conda create -y -n py27 python=2.7 mercurial
+        log_cmd ${DEST_DIR}/bin/conda create -y -n py27 python=2.7 mercurial
         log_cmd ln -s ${DEST_DIR}/envs/py27/bin/hg ${DEST_DIR}/bin
     fi
 
-    log_cmd pip install python-hglib
+    log_cmd ${DEST_DIR}/bin/pip install python-hglib
 
-    log_cmd hg clone https://bitbucket.org/yt_analysis/yt_conda ${DEST_DIR}/src/yt_conda
+    log_cmd ${DEST_DIR}/bin/hg clone https://bitbucket.org/yt_analysis/yt_conda ${DEST_DIR}/src/yt_conda
     
     if [ $INST_EMBREE -eq 1 ]
     then
@@ -1474,17 +1473,17 @@
         ( ${GETFILE} "$PYEMBREE_URL" 2>&1 ) 1>> ${LOG_FILE} || do_exit
         log_cmd unzip ${DEST_DIR}/src/master.zip
         pushd ${DEST_DIR}/src/pyembree-master &> /dev/null
-        log_cmd python setup.py install build_ext -I${DEST_DIR}/include -L${DEST_DIR}/lib
+        log_cmd ${DEST_DIR}/bin/${PYTHON_EXEC} setup.py install build_ext -I${DEST_DIR}/include -L${DEST_DIR}/lib
         popd &> /dev/null
     fi
 
     if [ $INST_ROCKSTAR -eq 1 ]
     then
         echo "Building Rockstar"
-        ( hg clone http://bitbucket.org/MatthewTurk/rockstar ${DEST_DIR}/src/rockstar/ 2>&1 ) 1>> ${LOG_FILE}
-        ROCKSTAR_PACKAGE=$(conda build ${DEST_DIR}/src/yt_conda/rockstar --output)
-        log_cmd conda build ${DEST_DIR}/src/yt_conda/rockstar
-        log_cmd conda install $ROCKSTAR_PACKAGE
+        ( ${DEST_DIR}/bin/hg clone http://bitbucket.org/MatthewTurk/rockstar ${DEST_DIR}/src/rockstar/ 2>&1 ) 1>> ${LOG_FILE}
+        ROCKSTAR_PACKAGE=$(${DEST_DIR}/bin/conda build ${DEST_DIR}/src/yt_conda/rockstar --output)
+        log_cmd ${DEST_DIR}/bin/conda build ${DEST_DIR}/src/yt_conda/rockstar
+        log_cmd ${DEST_DIR}/bin/conda install $ROCKSTAR_PACKAGE
         ROCKSTAR_DIR=${DEST_DIR}/src/rockstar
     fi
 
@@ -1493,20 +1492,20 @@
     then
         if [ $INST_PY3 -eq 1 ]
         then
-            log_cmd pip install pyx
+            log_cmd ${DEST_DIR}/bin/pip install pyx
         else
-            log_cmd pip install pyx==0.12.1
+            log_cmd ${DEST_DIR}/bin/pip install pyx==0.12.1
         fi
     fi
 
     if [ $INST_YT_SOURCE -eq 0 ]
     then
         echo "Installing yt"
-        log_cmd conda install -c conda-forge --yes yt
+        log_cmd ${DEST_DIR}/bin/conda install -c conda-forge --yes yt
     else
         echo "Building yt from source"
         YT_DIR="${DEST_DIR}/src/yt-hg"
-        log_cmd hg clone -r ${BRANCH} https://bitbucket.org/yt_analysis/yt ${YT_DIR}
+        log_cmd ${DEST_DIR}/bin/hg clone -r ${BRANCH} https://bitbucket.org/yt_analysis/yt ${YT_DIR}
         if [ $INST_EMBREE -eq 1 ]
         then
             echo $DEST_DIR > ${YT_DIR}/embree.cfg
@@ -1517,7 +1516,7 @@
             ROCKSTAR_LIBRARY_PATH=${DEST_DIR}/lib
         fi
         pushd ${YT_DIR} &> /dev/null
-        ( LIBRARY_PATH=$ROCKSTAR_LIBRARY_PATH python setup.py develop 2>&1) 1>> ${LOG_FILE} || do_exit
+        ( LIBRARY_PATH=$ROCKSTAR_LIBRARY_PATH ${DEST_DIR}/bin/${PYTHON_EXEC} setup.py develop 2>&1) 1>> ${LOG_FILE} || do_exit
         popd &> /dev/null
     fi
 

diff -r 1aa4c05b23da4b035effa8689ec99099ccd34707 -r f6198bb1cb2c1beabcbda68f20a77409659eedb3 doc/source/analyzing/analysis_modules/absorption_spectrum.rst
--- a/doc/source/analyzing/analysis_modules/absorption_spectrum.rst
+++ b/doc/source/analyzing/analysis_modules/absorption_spectrum.rst
@@ -116,7 +116,12 @@
 Continuum features with optical depths that follow a power law can also be
 added.  Like adding lines, you must specify details like the wavelength
 and the field in the dataset and LightRay that is tied to this feature.
-Below, we will add H Lyman continuum.
+The wavelength refers to the location at which the continuum begins to be 
+applied to the dataset, and as it moves to lower wavelength values, the 
+optical depth value decreases according to the defined power law.  The 
+normalization value is the column density of the linked field which results
+in an optical depth of 1 at the defined wavelength.  Below, we add the hydrogen 
+Lyman continuum.
 
 .. code-block:: python
 
@@ -131,7 +136,7 @@
 Making the Spectrum
 ^^^^^^^^^^^^^^^^^^^
 
-Once all the lines and continuum are added, it is time to make a spectrum out
+Once all the lines and continuua are added, it is time to make a spectrum out
 of some light ray data.
 
 .. code-block:: python

diff -r 1aa4c05b23da4b035effa8689ec99099ccd34707 -r f6198bb1cb2c1beabcbda68f20a77409659eedb3 doc/source/cookbook/embedded_webm_animation.ipynb
--- a/doc/source/cookbook/embedded_webm_animation.ipynb
+++ /dev/null
@@ -1,137 +0,0 @@
-{
- "cells": [
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "This example shows how to embed an animation produced by `matplotlib` into an IPython notebook.  This example makes use of `matplotlib`'s [animation toolkit](http://matplotlib.org/api/animation_api.html) to transform individual frames into a final rendered movie.  \n",
-    "\n",
-    "Matplotlib uses [`ffmpeg`](http://www.ffmpeg.org/) to generate the movie, so you must install `ffmpeg` for this example to work correctly.  Usually the best way to install `ffmpeg` is using your system's package manager."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "import yt\n",
-    "from matplotlib import animation"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "First, we need to construct a function that will embed the video produced by ffmpeg directly into the notebook document. This makes use of the [HTML5 video tag](http://www.w3schools.com/html/html5_video.asp) and the WebM video format.  WebM is supported by Chrome, Firefox, and Opera, but not Safari and Internet Explorer.  If you have trouble viewing the video you may need to use a different video format.  Since this uses `libvpx` to construct the frames, you will need to ensure that ffmpeg has been compiled with `libvpx` support."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "from tempfile import NamedTemporaryFile\n",
-    "import base64\n",
-    "\n",
-    "VIDEO_TAG = \"\"\"<video controls>\n",
-    " <source src=\"data:video/x-webm;base64,{0}\" type=\"video/webm\">\n",
-    " Your browser does not support the video tag.\n",
-    "</video>\"\"\"\n",
-    "\n",
-    "def anim_to_html(anim):\n",
-    "    if not hasattr(anim, '_encoded_video'):\n",
-    "        with NamedTemporaryFile(suffix='.webm') as f:\n",
-    "            anim.save(f.name, fps=6, extra_args=['-vcodec', 'libvpx'])\n",
-    "            video = open(f.name, \"rb\").read()\n",
-    "        anim._encoded_video = base64.b64encode(video)\n",
-    "    \n",
-    "    return VIDEO_TAG.format(anim._encoded_video.decode('ascii'))"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Next, we define a function to actually display the video inline in the notebook."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "from IPython.display import HTML\n",
-    "\n",
-    "def display_animation(anim):\n",
-    "    plt.close(anim._fig)\n",
-    "    return HTML(anim_to_html(anim))"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
-    "Finally, we set up the animation itsself.  We use yt to load the data and create each frame and use matplotlib to stitch the frames together.  Note that we customize the plot a bit by calling the `set_zlim` function.  Customizations only need to be applied to the first frame - they will carry through to the rest.\n",
-    "\n",
-    "This may take a while to run, be patient."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {
-    "collapsed": false
-   },
-   "outputs": [],
-   "source": [
-    "import matplotlib.pyplot as plt\n",
-    "from matplotlib.backends.backend_agg import FigureCanvasAgg\n",
-    "\n",
-    "prj = yt.ProjectionPlot(yt.load('Enzo_64/DD0000/data0000'), 0, 'density', weight_field='density',width=(180,'Mpccm'))\n",
-    "prj.set_zlim('density',1e-32,1e-26)\n",
-    "fig = prj.plots['density'].figure\n",
-    "\n",
-    "# animation function.  This is called sequentially\n",
-    "def animate(i):\n",
-    "    ds = yt.load('Enzo_64/DD%04i/data%04i' % (i,i))\n",
-    "    prj._switch_ds(ds)\n",
-    "\n",
-    "# call the animator.  blit=True means only re-draw the parts that have changed.\n",
-    "anim = animation.FuncAnimation(fig, animate, frames=44, interval=200, blit=False)\n",
-    "\n",
-    "# call our new function to display the animation\n",
-    "display_animation(anim)"
-   ]
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "Python 2",
-   "language": "python",
-   "name": "python2"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 2
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython2",
-   "version": "2.7.10"
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 0
-}

diff -r 1aa4c05b23da4b035effa8689ec99099ccd34707 -r f6198bb1cb2c1beabcbda68f20a77409659eedb3 doc/source/cookbook/embedded_webm_animation.rst
--- a/doc/source/cookbook/embedded_webm_animation.rst
+++ /dev/null
@@ -1,4 +0,0 @@
-Making animations using matplotlib and ffmpeg
----------------------------------------------
-
-.. notebook:: embedded_webm_animation.ipynb

diff -r 1aa4c05b23da4b035effa8689ec99099ccd34707 -r f6198bb1cb2c1beabcbda68f20a77409659eedb3 doc/source/cookbook/index.rst
--- a/doc/source/cookbook/index.rst
+++ b/doc/source/cookbook/index.rst
@@ -41,7 +41,6 @@
 
    notebook_tutorial
    custom_colorbar_tickmarks
-   embedded_webm_animation
    gadget_notebook
    owls_notebook
    ../visualizing/transfer_function_helper

diff -r 1aa4c05b23da4b035effa8689ec99099ccd34707 -r f6198bb1cb2c1beabcbda68f20a77409659eedb3 setup.py
--- a/setup.py
+++ b/setup.py
@@ -114,6 +114,9 @@
               ["yt/utilities/spatial/ckdtree.pyx"],
               include_dirs=["yt/utilities/lib/"],
               libraries=std_libs),
+    Extension("yt.utilities.lib.autogenerated_element_samplers",
+              ["yt/utilities/lib/autogenerated_element_samplers.pyx"],
+              include_dirs=["yt/utilities/lib/"]),
     Extension("yt.utilities.lib.bitarray",
               ["yt/utilities/lib/bitarray.pyx"],
               libraries=std_libs),
@@ -193,7 +196,7 @@
     "particle_mesh_operations", "depth_first_octree", "fortran_reader",
     "interpolators", "misc_utilities", "basic_octree", "image_utilities",
     "points_in_volume", "quad_tree", "ray_integrators", "mesh_utilities",
-    "amr_kdtools", "lenses",
+    "amr_kdtools", "lenses", "distance_queue"
 ]
 for ext_name in lib_exts:
     cython_extensions.append(

diff -r 1aa4c05b23da4b035effa8689ec99099ccd34707 -r f6198bb1cb2c1beabcbda68f20a77409659eedb3 tests/tests.yaml
--- a/tests/tests.yaml
+++ b/tests/tests.yaml
@@ -67,11 +67,13 @@
   local_ytdata_000:
     - yt/frontends/ytdata
 
-  local_absorption_spectrum_001:
+  local_absorption_spectrum_004:
     - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_non_cosmo
+    - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_non_cosmo_novpec
     - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_cosmo
     - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_non_cosmo_sph
     - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_cosmo_sph
+    - yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py:test_absorption_spectrum_with_continuum
 
   local_axialpix_001:
     - yt/geometry/coordinates/tests/test_axial_pixelization.py:test_axial_pixelization

diff -r 1aa4c05b23da4b035effa8689ec99099ccd34707 -r f6198bb1cb2c1beabcbda68f20a77409659eedb3 yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
@@ -203,6 +203,13 @@
             input_ds = input_file
         field_data = input_ds.all_data()
 
+        # temperature field required to calculate voigt profile widths
+        if ('temperature' not in input_ds.derived_field_list) and \
+           (('gas', 'temperature') not in input_ds.derived_field_list):
+            raise RuntimeError(
+                "('gas', 'temperature') field required to be present in %s "
+                "for AbsorptionSpectrum to function." % input_file)
+
         self.tau_field = np.zeros(self.lambda_field.size)
         self.absorbers_list = []
 
@@ -210,6 +217,7 @@
             comm = _get_comm(())
             njobs = min(comm.size, len(self.line_list))
 
+        mylog.info("Creating spectrum")
         self._add_lines_to_spectrum(field_data, use_peculiar_velocity,
                                     output_absorbers_file,
                                     subgrid_resolution=subgrid_resolution,
@@ -268,47 +276,96 @@
                 redshift_eff = ((1 + redshift) * \
                                 (1 + field_data['redshift_dopp'])) - 1.
 
+        if not use_peculiar_velocity:
+            redshift_eff = redshift
+
         return redshift, redshift_eff
 
     def _add_continua_to_spectrum(self, field_data, use_peculiar_velocity,
                                   observing_redshift=0.):
         """
-        Add continuum features to the spectrum.
+        Add continuum features to the spectrum.  Continuua are recorded as
+        a name, associated field, wavelength, normalization value, and index.
+        Continuua are applied at and below the denoted wavelength, where the
+        optical depth decreases as a power law of desired index.  For positive 
+        index values, this means optical depth is highest at the denoted 
+        wavelength, and it drops with shorter and shorter wavelengths.  
+        Consequently, transmitted flux undergoes a discontinuous cutoff at the 
+        denoted wavelength, and then slowly increases with decreasing wavelength 
+        according to the power law.
         """
         # Change the redshifts of continuum sources to account for the
         # redshift at which the observer sits
         redshift, redshift_eff = self._apply_observing_redshift(field_data,
                                  use_peculiar_velocity, observing_redshift)
 
-        # Only add continuum features down to tau of 1.e-4.
-        min_tau = 1.e-3
+        # min_tau is the minimum optical depth value that warrants 
+        # accounting for an absorber.  for a single absorber, noticeable 
+        # continuum effects begin for tau = 1e-3 (leading to transmitted 
+        # flux of e^-tau ~ 0.999).  but we apply a cutoff to remove
+        # absorbers with insufficient column_density to contribute 
+        # significantly to a continuum (see below).  because lots of 
+        # low column density absorbers can add up to a significant
+        # continuum effect, we normalize min_tau by the n_absorbers.
+        n_absorbers = field_data['dl'].size
+        min_tau = 1.e-3/n_absorbers
 
         for continuum in self.continuum_list:
-            column_density = field_data[continuum['field_name']] * field_data['dl']
+
+            # Normalization is in cm**-2, so column density must be as well
+            column_density = (field_data[continuum['field_name']] * 
+                              field_data['dl']).in_units('cm**-2')
+            if (column_density == 0).all():
+                mylog.info("Not adding continuum %s: insufficient column density" % continuum['label'])
+                continue
 
             # redshift_eff field combines cosmological and velocity redshifts
             if use_peculiar_velocity:
                 delta_lambda = continuum['wavelength'] * redshift_eff
             else:
                 delta_lambda = continuum['wavelength'] * redshift
+
+            # right index of continuum affected area is wavelength itself
             this_wavelength = delta_lambda + continuum['wavelength']
-            right_index = np.digitize(this_wavelength, self.lambda_field).clip(0, self.n_lambda)
+            right_index = np.digitize(this_wavelength, 
+                                      self.lambda_field).clip(0, self.n_lambda)
+            # left index of continuum affected area wavelength at which 
+            # optical depth reaches tau_min
             left_index = np.digitize((this_wavelength *
-                                     np.power((min_tau * continuum['normalization'] /
-                                               column_density), (1. / continuum['index']))),
-                                    self.lambda_field).clip(0, self.n_lambda)
+                              np.power((min_tau * continuum['normalization'] /
+                                        column_density),
+                                       (1. / continuum['index']))),
+                              self.lambda_field).clip(0, self.n_lambda)
 
+            # Only calculate the effects of continuua where normalized 
+            # column_density is greater than min_tau
+            # because lower column will not have significant contribution
             valid_continuua = np.where(((column_density /
                                          continuum['normalization']) > min_tau) &
                                        (right_index - left_index > 1))[0]
+            if valid_continuua.size == 0:
+                mylog.info("Not adding continuum %s: insufficient column density or out of range" %
+                    continuum['label'])
+                continue
+
             pbar = get_pbar("Adding continuum - %s [%f A]: " % \
                                 (continuum['label'], continuum['wavelength']),
                             valid_continuua.size)
+
+            # Tau value is (wavelength / continuum_wavelength)**index / 
+            #              (column_dens / norm)
+            # i.e. a power law decreasing as wavelength decreases
+
+            # Step through the absorber list and add continuum tau for each to
+            # the total optical depth for all wavelengths
             for i, lixel in enumerate(valid_continuua):
-                line_tau = np.power((self.lambda_field[left_index[lixel]:right_index[lixel]] /
-                                     this_wavelength[lixel]), continuum['index']) * \
-                                     column_density[lixel] / continuum['normalization']
-                self.tau_field[left_index[lixel]:right_index[lixel]] += line_tau
+                cont_tau = \
+                    np.power((self.lambda_field[left_index[lixel] :
+                                                right_index[lixel]] /
+                                   this_wavelength[lixel]), \
+                              continuum['index']) * \
+                    (column_density[lixel] / continuum['normalization'])
+                self.tau_field[left_index[lixel]:right_index[lixel]] += cont_tau
                 pbar.update(i)
             pbar.finish()
 
@@ -333,6 +390,9 @@
         # and deposit the lines into the spectrum
         for line in parallel_objects(self.line_list, njobs=njobs):
             column_density = field_data[line['field_name']] * field_data['dl']
+            if (column_density == 0).all():
+                mylog.info("Not adding line %s: insufficient column density" % line['label'])
+                continue
 
             # redshift_eff field combines cosmological and velocity redshifts
             # so delta_lambda gives the offset in angstroms from the rest frame
@@ -376,7 +436,10 @@
             cdens = column_density.in_units("cm**-2").d # cm**-2
             thermb = thermal_b.in_cgs().d  # thermal b coefficient; cm / s
             dlambda = delta_lambda.d  # lambda offset; angstroms
-            vlos = field_data['velocity_los'].in_units("km/s").d # km/s
+            if use_peculiar_velocity:
+                vlos = field_data['velocity_los'].in_units("km/s").d # km/s
+            else:
+                vlos = np.zeros(field_data['temperature'].size)
 
             # When we actually deposit the voigt profile, sometimes we will
             # have underresolved lines (ie lines with smaller widths than
@@ -413,6 +476,12 @@
             # observed spectrum where it occurs and deposit a voigt profile
             for i in parallel_objects(np.arange(n_absorbers), njobs=-1):
 
+                # if there is a ray element with temperature = 0 or column
+                # density = 0, skip it
+                if (thermal_b[i] == 0.) or (cdens[i] == 0.):
+                    pbar.update(i)
+                    continue
+
                 # the virtual window into which the line is deposited initially
                 # spans a region of 2 coarse spectral bins
                 # (one on each side of the center_index) but the window

diff -r 1aa4c05b23da4b035effa8689ec99099ccd34707 -r f6198bb1cb2c1beabcbda68f20a77409659eedb3 yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/tests/test_absorption_spectrum.py
@@ -33,7 +33,8 @@
 COSMO_PLUS_SINGLE = "enzo_cosmology_plus/RD0009/RD0009"
 GIZMO_PLUS = "gizmo_cosmology_plus/N128L16.param"
 GIZMO_PLUS_SINGLE = "gizmo_cosmology_plus/snap_N128L16_151.hdf5"
-
+ISO_GALAXY = "IsolatedGalaxy/galaxy0030/galaxy0030"
+FIRE = "FIRE_M12i_ref11/snapshot_600.hdf5"
 
 @requires_file(COSMO_PLUS)
 @requires_answer_testing()
@@ -145,6 +146,58 @@
     shutil.rmtree(tmpdir)
 
 @requires_file(COSMO_PLUS_SINGLE)
+ at requires_answer_testing()
+def test_absorption_spectrum_non_cosmo_novpec():
+    """
+    This test generates an absorption spectrum from a simple light ray on a
+    grid dataset
+    """
+
+    # Set up in a temp dir
+    tmpdir = tempfile.mkdtemp()
+    curdir = os.getcwd()
+    os.chdir(tmpdir)
+
+    lr = LightRay(COSMO_PLUS_SINGLE)
+
+    ray_start = [0,0,0]
+    ray_end = [1,1,1]
+    lr.make_light_ray(start_position=ray_start, end_position=ray_end,
+                      fields=['temperature', 'density', 'H_number_density'],
+                      data_filename='lightray.h5', use_peculiar_velocity=False)
+
+    sp = AbsorptionSpectrum(1200.0, 1300.0, 10001)
+
+    my_label = 'HI Lya'
+    field = 'H_number_density'
+    wavelength = 1215.6700  # Angstromss
+    f_value = 4.164E-01
+    gamma = 6.265e+08
+    mass = 1.00794
+
+    sp.add_line(my_label, field, wavelength, f_value,
+                gamma, mass, label_threshold=1.e10)
+
+    wavelength, flux = sp.make_spectrum('lightray.h5',
+                                        output_file='spectrum.h5',
+                                        line_list_file='lines.txt',
+                                        use_peculiar_velocity=False)
+
+    # load just-generated hdf5 file of spectral data (for consistency)
+    data = h5.File('spectrum.h5', 'r')
+
+    for key in data.keys():
+        func = lambda x=key: data[x][:]
+        func.__name__ = "{}_non_cosmo_novpec".format(key)
+        test = GenericArrayTest(None, func)
+        test_absorption_spectrum_non_cosmo_novpec.__name__ = test.description
+        yield test
+
+    # clean up
+    os.chdir(curdir)
+    shutil.rmtree(tmpdir)
+
+ at requires_file(COSMO_PLUS_SINGLE)
 def test_equivalent_width_conserved():
     """
     This tests that the equivalent width of the optical depth is conserved 
@@ -360,3 +413,146 @@
     # clean up
     os.chdir(curdir)
     shutil.rmtree(tmpdir)
+
+ at requires_file(ISO_GALAXY)
+ at requires_answer_testing()
+def test_absorption_spectrum_with_continuum():
+    """
+    This test generates an absorption spectrum from a simple light ray on a
+    grid dataset and adds Lyman alpha and Lyman continuum to it
+    """
+
+    # Set up in a temp dir
+    tmpdir = tempfile.mkdtemp()
+    curdir = os.getcwd()
+    os.chdir(tmpdir)
+
+    ds = load(ISO_GALAXY)
+    lr = LightRay(ds)
+
+    ray_start = ds.domain_left_edge
+    ray_end = ds.domain_right_edge
+    lr.make_light_ray(start_position=ray_start, end_position=ray_end,
+                      fields=['temperature', 'density', 'H_number_density'],
+                      data_filename='lightray.h5')
+
+    sp = AbsorptionSpectrum(800.0, 1300.0, 5001)
+
+    my_label = 'HI Lya'
+    field = 'H_number_density'
+    wavelength = 1215.6700  # Angstromss
+    f_value = 4.164E-01
+    gamma = 6.265e+08
+    mass = 1.00794
+
+    sp.add_line(my_label, field, wavelength, f_value,
+                gamma, mass, label_threshold=1.e10)
+
+    my_label = 'Ly C'
+    field = 'H_number_density'
+    wavelength = 912.323660  # Angstroms
+    normalization = 1.6e17
+    index = 3.0
+
+    sp.add_continuum(my_label, field, wavelength, normalization, index)
+
+    wavelength, flux = sp.make_spectrum('lightray.h5',
+                                        output_file='spectrum.h5',
+                                        line_list_file='lines.txt',
+                                        use_peculiar_velocity=True)
+
+    # load just-generated hdf5 file of spectral data (for consistency)
+    data = h5.File('spectrum.h5', 'r')
+    
+    for key in data.keys():
+        func = lambda x=key: data[x][:]
+        func.__name__ = "{}_continuum".format(key)
+        test = GenericArrayTest(None, func)
+        test_absorption_spectrum_with_continuum.__name__ = test.description
+        yield test
+
+    # clean up
+    os.chdir(curdir)
+    shutil.rmtree(tmpdir)
+
+ at requires_file(FIRE)
+def test_absorption_spectrum_with_zero_field():
+    """
+    This test generates an absorption spectrum with some 
+    particle dataset
+    """
+
+    # Set up in a temp dir
+    tmpdir = tempfile.mkdtemp()
+    curdir = os.getcwd()
+    os.chdir(tmpdir)
+
+    ds = load(FIRE)
+    lr = LightRay(ds)
+
+    # Define species and associated parameters to add to continuum
+    # Parameters used for both adding the transition to the spectrum
+    # and for fitting
+    # Note that for single species that produce multiple lines
+    # (as in the OVI doublet), 'numLines' will be equal to the number
+    # of lines, and f,gamma, and wavelength will have multiple values.
+
+    HI_parameters = {
+        'name': 'HI',
+        'field': 'H_number_density',
+        'f': [.4164],
+        'Gamma': [6.265E8],
+        'wavelength': [1215.67],
+        'mass': 1.00794,
+        'numLines': 1,
+        'maxN': 1E22, 'minN': 1E11,
+        'maxb': 300, 'minb': 1,
+        'maxz': 6, 'minz': 0,
+        'init_b': 30,
+        'init_N': 1E14
+    }
+
+    species_dicts = {'HI': HI_parameters}
+
+
+    # Get all fields that need to be added to the light ray
+    fields = [('gas','temperature')]
+    for s, params in species_dicts.items():
+        fields.append(params['field'])
+
+    # With a single dataset, a start_position and
+    # end_position or trajectory must be given.
+    # Trajectory should be given as (r, theta, phi)
+    lr.make_light_ray(
+        start_position=ds.arr([0., 0., 0.], 'unitary'),
+        end_position=ds.arr([1., 1., 1.], 'unitary'),
+        solution_filename='test_lightraysolution.txt',
+        data_filename='test_lightray.h5',
+        fields=fields)
+    
+    # Create an AbsorptionSpectrum object extending from
+    # lambda = 900 to lambda = 1800, with 10000 pixels
+    sp = AbsorptionSpectrum(900.0, 1400.0, 50000)
+    
+    # Iterate over species
+    for s, params in species_dicts.items():
+        # Iterate over transitions for a single species
+        for i in range(params['numLines']):
+            # Add the lines to the spectrum
+            sp.add_line(
+                s, params['field'],
+                params['wavelength'][i], params['f'][i],
+                params['Gamma'][i], params['mass'],
+                label_threshold=1.e10)
+    
+    
+    # Make and save spectrum
+    wavelength, flux = sp.make_spectrum(
+        'test_lightray.h5',
+        output_file='test_spectrum.h5',
+        line_list_file='test_lines.txt',
+        use_peculiar_velocity=True)
+
+    # clean up
+    os.chdir(curdir)
+    shutil.rmtree(tmpdir)

diff -r 1aa4c05b23da4b035effa8689ec99099ccd34707 -r f6198bb1cb2c1beabcbda68f20a77409659eedb3 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -21,8 +21,6 @@
     load
 from yt.frontends.ytdata.utilities import \
     save_as_dataset
-from yt.units.unit_object import \
-    Unit
 from yt.units.yt_array import \
     YTArray
 from yt.utilities.cosmology import \
@@ -391,7 +389,11 @@
 
         # Initialize data structures.
         self._data = {}
+        # temperature field is automatically added to fields
         if fields is None: fields = []
+        if (('gas', 'temperature') not in fields) and \
+           ('temperature' not in fields):
+           fields.append(('gas', 'temperature'))
         data_fields = fields[:]
         all_fields = fields[:]
         all_fields.extend(['dl', 'dredshift', 'redshift'])
@@ -604,19 +606,18 @@
               self.cosmology.t_from_z(ds["current_redshift"])
         extra_attrs = {"data_type": "yt_light_ray"}
         field_types = dict([(field, "grid") for field in data.keys()])
+
         # Only return LightRay elements with non-zero density
-        mask_field_units = ['K', 'cm**-3', 'g/cm**3']
-        mask_field_units = [Unit(u) for u in mask_field_units]
-        for f in data:
-            for u in mask_field_units:
-                if data[f].units.same_dimensions_as(u):
-                    mask = data[f] > 0
-                    if not np.any(mask):
-                        raise RuntimeError(
-                            "No zones along light ray with nonzero %s. "
-                            "Please modify your light ray trajectory." % (f,))
-                    for key in data.keys():
-                        data[key] = data[key][mask]
+        if 'temperature' in data: f = 'temperature'
+        if ('gas', 'temperature') in data: f = ('gas', 'temperature')
+        if 'temperature' in data or ('gas', 'temperature') in data:
+            mask = data[f] > 0
+            if not np.any(mask):
+                raise RuntimeError(
+                    "No zones along light ray with nonzero %s. "
+                    "Please modify your light ray trajectory." % (f,))
+            for key in data.keys():
+                data[key] = data[key][mask]
         save_as_dataset(ds, filename, data, field_types=field_types,
                         extra_attrs=extra_attrs)
 

diff -r 1aa4c05b23da4b035effa8689ec99099ccd34707 -r f6198bb1cb2c1beabcbda68f20a77409659eedb3 yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -27,6 +27,8 @@
     YTQuantity
 from yt.units.unit_object import Unit
 from yt.data_objects.field_data import YTFieldData
+from yt.utilities.exceptions import \
+    YTIllDefinedProfile
 from yt.utilities.lib.misc_utilities import \
     new_bin_profile1d, \
     new_bin_profile2d, \
@@ -956,10 +958,18 @@
     fields = ensure_list(fields)
     is_pfield = [data_source.ds._get_field_info(f).particle_type
                  for f in bin_fields + fields]
+    wf = None
+    if weight_field is not None:
+        wf = data_source.ds._get_field_info(weight_field)
+        is_pfield.append(wf.particle_type)
+        wf = wf.name
 
-    if len(bin_fields) == 1:
+    if any(is_pfield) and not all(is_pfield):
+        raise YTIllDefinedProfile(
+            bin_fields, data_source._determine_fields(fields), wf, is_pfield)
+    elif len(bin_fields) == 1:
         cls = Profile1D
-    elif len(bin_fields) == 2 and np.all(is_pfield):
+    elif len(bin_fields) == 2 and all(is_pfield):
         # log bin_fields set to False for Particle Profiles.
         # doesn't make much sense for CIC deposition.
         # accumulation and fractional set to False as well.

diff -r 1aa4c05b23da4b035effa8689ec99099ccd34707 -r f6198bb1cb2c1beabcbda68f20a77409659eedb3 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -889,10 +889,12 @@
         return new_unit
 
     def set_code_units(self):
-        self._set_code_unit_attributes()
         # here we override units, if overrides have been provided.
         self._override_code_units()
 
+        # set attributes like ds.length_unit
+        self._set_code_unit_attributes()
+
         self.unit_registry.modify("code_length", self.length_unit)
         self.unit_registry.modify("code_mass", self.mass_unit)
         self.unit_registry.modify("code_time", self.time_unit)
@@ -932,19 +934,22 @@
     def _override_code_units(self):
         if len(self.units_override) == 0:
             return
-        mylog.warning("Overriding code units. This is an experimental and potentially "+
-                      "dangerous option that may yield inconsistent results, and must be used "+
-                      "very carefully, and only if you know what you want from it.")
+        mylog.warning(
+            "Overriding code units. This is an experimental and potentially "
+            "dangerous option that may yield inconsistent results, and must be "
+            "used very carefully, and only if you know what you want from it.")
         for unit, cgs in [("length", "cm"), ("time", "s"), ("mass", "g"),
-                          ("velocity","cm/s"), ("magnetic","gauss"), ("temperature","K")]:
+                          ("velocity","cm/s"), ("magnetic","gauss"), 
+                          ("temperature","K")]:
             val = self.units_override.get("%s_unit" % unit, None)
             if val is not None:
                 if isinstance(val, YTQuantity):
                     val = (val.v, str(val.units))
                 elif not isinstance(val, tuple):
                     val = (val, cgs)
-                u = getattr(self, "%s_unit" % unit)
-                mylog.info("Overriding %s_unit: %g %s -> %g %s.", unit, u.v, u.units, val[0], val[1])
+                u = getattr(self, "%s_unit" % unit, None)
+                mylog.info("Overriding %s_unit: %g -> %g %s.",
+                           unit, u, val[0], val[1])
                 setattr(self, "%s_unit" % unit, self.quan(val[0], val[1]))
 
     _arr = None

diff -r 1aa4c05b23da4b035effa8689ec99099ccd34707 -r f6198bb1cb2c1beabcbda68f20a77409659eedb3 yt/data_objects/tests/test_profiles.py
--- a/yt/data_objects/tests/test_profiles.py
+++ b/yt/data_objects/tests/test_profiles.py
@@ -8,7 +8,13 @@
 from yt.testing import \
     fake_random_ds, \
     assert_equal, \
+    assert_raises, \
     assert_rel_equal
+from yt.utilities.exceptions import \
+    YTIllDefinedProfile
+from yt.visualization.profile_plotter import \
+    ProfilePlot, \
+    PhasePlot
 
 _fields = ("density", "temperature", "dinosaurs", "tribbles")
 _units = ("g/cm**3", "K", "dyne", "erg")
@@ -158,3 +164,34 @@
                         weight_field = None)
         p3d.add_fields(["particle_ones"])
         yield assert_equal, p3d["particle_ones"].sum(), 32**3
+
+def test_mixed_particle_mesh_profiles():
+    ds = fake_random_ds(32, particles=10)
+    ad = ds.all_data()
+    assert_raises(
+        YTIllDefinedProfile, ProfilePlot, ad, 'radius', 'particle_mass')
+    assert_raises(
+        YTIllDefinedProfile, ProfilePlot, ad, 'radius',
+        ['particle_mass', 'particle_ones'])
+    assert_raises(
+        YTIllDefinedProfile, ProfilePlot, ad, 'radius',
+        ['particle_mass', 'ones'])
+    assert_raises(
+        YTIllDefinedProfile, ProfilePlot, ad, 'particle_radius', 'particle_mass',
+        'cell_mass')
+    assert_raises(
+        YTIllDefinedProfile, ProfilePlot, ad, 'radius', 'cell_mass',
+        'particle_ones')
+
+    assert_raises(
+        YTIllDefinedProfile, PhasePlot, ad, 'radius', 'particle_mass',
+        'velocity_x')
+    assert_raises(
+        YTIllDefinedProfile, PhasePlot, ad, 'particle_radius', 'particle_mass',
+        'cell_mass')
+    assert_raises(
+        YTIllDefinedProfile, PhasePlot, ad, 'radius', 'cell_mass',
+        'particle_ones')
+    assert_raises(
+        YTIllDefinedProfile, PhasePlot, ad, 'particle_radius', 'particle_mass',
+        'particle_ones')

diff -r 1aa4c05b23da4b035effa8689ec99099ccd34707 -r f6198bb1cb2c1beabcbda68f20a77409659eedb3 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -25,7 +25,8 @@
 from yt.data_objects.octree_subset import \
     OctreeSubset
 from yt.funcs import \
-    mylog
+    mylog, \
+    setdefaultattr
 from yt.geometry.oct_container import \
     ARTOctreeContainer
 from yt.frontends.art.definitions import \
@@ -243,10 +244,10 @@
         mass = aM0 * 1.98892e33
 
         self.cosmological_simulation = True
-        self.mass_unit = self.quan(mass, "g*%s" % ng**3)
-        self.length_unit = self.quan(box_proper, "Mpc")
-        self.velocity_unit = self.quan(velocity, "cm/s")
-        self.time_unit = self.length_unit / self.velocity_unit
+        setdefaultattr(self, 'mass_unit', self.quan(mass, "g*%s" % ng**3))
+        setdefaultattr(self, 'length_unit', self.quan(box_proper, "Mpc"))
+        setdefaultattr(self, 'velocity_unit', self.quan(velocity, "cm/s"))
+        setdefaultattr(self, 'time_unit', self.length_unit / self.velocity_unit)
 
     def _parse_parameter_file(self):
         """

diff -r 1aa4c05b23da4b035effa8689ec99099ccd34707 -r f6198bb1cb2c1beabcbda68f20a77409659eedb3 yt/frontends/artio/data_structures.py
--- a/yt/frontends/artio/data_structures.py
+++ b/yt/frontends/artio/data_structures.py
@@ -30,7 +30,8 @@
     ARTIOFieldInfo
 
 from yt.funcs import \
-    mylog
+    mylog, \
+    setdefaultattr
 from yt.geometry.geometry_handler import \
     Index, \
     YTDataChunk
@@ -354,10 +355,13 @@
         self.storage_filename = storage_filename
 
     def _set_code_unit_attributes(self):
-        self.mass_unit = self.quan(self.parameters["unit_m"], "g")
-        self.length_unit = self.quan(self.parameters["unit_l"], "cm")
-        self.time_unit = self.quan(self.parameters["unit_t"], "s")
-        self.velocity_unit = self.length_unit / self.time_unit
+        setdefaultattr(
+            self, 'mass_unit', self.quan(self.parameters["unit_m"], "g"))
+        setdefaultattr(
+            self, 'length_unit', self.quan(self.parameters["unit_l"], "cm"))
+        setdefaultattr(
+            self, 'time_unit', self.quan(self.parameters["unit_t"], "s"))
+        setdefaultattr(self, 'velocity_unit', self.length_unit / self.time_unit)
 
     def _parse_parameter_file(self):
         # hard-coded -- not provided by headers

diff -r 1aa4c05b23da4b035effa8689ec99099ccd34707 -r f6198bb1cb2c1beabcbda68f20a77409659eedb3 yt/frontends/athena/data_structures.py
--- a/yt/frontends/athena/data_structures.py
+++ b/yt/frontends/athena/data_structures.py
@@ -471,12 +471,15 @@
 
     def _set_code_unit_attributes(self):
         """
-        Generates the conversion to various physical _units based on the parameter file
+        Generates the conversion to various physical _units based on the
+        parameter file
         """
         if "length_unit" not in self.units_override:
             self.no_cgs_equiv_length = True
         for unit, cgs in [("length", "cm"), ("time", "s"), ("mass", "g")]:
-            # We set these to cgs for now, but they may be overridden later.
+            # We set these to cgs for now, but they may have been overriden
+            if getattr(self, unit+'_unit', None) is not None:
+                continue
             mylog.warning("Assuming 1.0 = 1.0 %s", cgs)
             setattr(self, "%s_unit" % unit, self.quan(1.0, cgs))
         self.magnetic_unit = np.sqrt(4*np.pi * self.mass_unit /

diff -r 1aa4c05b23da4b035effa8689ec99099ccd34707 -r f6198bb1cb2c1beabcbda68f20a77409659eedb3 yt/frontends/athena/fields.py
--- a/yt/frontends/athena/fields.py
+++ b/yt/frontends/athena/fields.py
@@ -34,6 +34,7 @@
         ("cell_centered_B_x", (b_units, [], None)),
         ("cell_centered_B_y", (b_units, [], None)),
         ("cell_centered_B_z", (b_units, [], None)),
+        ("gravitational_potential", ("code_velocity**2", ["gravitational_potential"], None)),
     )
 
 # In Athena, conservative or primitive variables may be written out.

diff -r 1aa4c05b23da4b035effa8689ec99099ccd34707 -r f6198bb1cb2c1beabcbda68f20a77409659eedb3 yt/frontends/athena/tests/test_outputs.py
--- a/yt/frontends/athena/tests/test_outputs.py
+++ b/yt/frontends/athena/tests/test_outputs.py
@@ -49,6 +49,20 @@
         test_blast.__name__ = test.description
         yield test
 
+uo_blast = {
+    'length_unit': (1.0, 'pc'),
+    'mass_unit': (2.38858753789e-24, 'g/cm**3*pc**3'),
+    'time_unit': (1.0, 's*pc/km'),
+}
+
+ at requires_file(blast)
+def test_blast_override():
+    # verify that overriding units causes derived unit values to be updated.
+    # see issue #1259
+    ds = load(blast, units_override=uo_blast)
+    assert_equal(float(ds.magnetic_unit.in_units('gauss')),
+                 5.478674679698131e-07)
+
 uo_stripping = {"time_unit":3.086e14,
                 "length_unit":8.0236e22,
                 "mass_unit":9.999e-30*8.0236e22**3}

diff -r 1aa4c05b23da4b035effa8689ec99099ccd34707 -r f6198bb1cb2c1beabcbda68f20a77409659eedb3 yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -22,8 +22,9 @@
 import numpy as np
 
 from yt.funcs import \
+    ensure_tuple, \
     mylog, \
-    ensure_tuple
+    setdefaultattr
 from yt.data_objects.grid_patch import AMRGridPatch
 from yt.extern.six.moves import zip as izip
 from yt.geometry.grid_geometry_handler import GridIndex
@@ -608,10 +609,10 @@
             self._setup2d()
 
     def _set_code_unit_attributes(self):
-        self.length_unit = self.quan(1.0, "cm")
-        self.mass_unit = self.quan(1.0, "g")
-        self.time_unit = self.quan(1.0, "s")
-        self.velocity_unit = self.quan(1.0, "cm/s")
+        setdefaultattr(self, 'length_unit', self.quan(1.0, "cm"))
+        setdefaultattr(self, 'mass_unit', self.quan(1.0, "g"))
+        setdefaultattr(self, 'time_unit', self.quan(1.0, "s"))
+        setdefaultattr(self, 'velocity_unit', self.quan(1.0, "cm/s"))
 
     def _setup1d(self):
         # self._index_class = BoxlibHierarchy1D
@@ -1016,10 +1017,11 @@
             self.particle_types_raw = self.particle_types
 
     def _set_code_unit_attributes(self):
-        self.mass_unit = self.quan(1.0, "Msun")
-        self.time_unit = self.quan(1.0 / 3.08568025e19, "s")
-        self.length_unit = self.quan(1.0 / (1 + self.current_redshift), "Mpc")
-        self.velocity_unit = self.length_unit / self.time_unit
+        setdefaultattr(self, 'mass_unit', self.quan(1.0, "Msun"))
+        setdefaultattr(self, 'time_unit', self.quan(1.0 / 3.08568025e19, "s"))
+        setdefaultattr(self, 'length_unit',
+                       self.quan(1.0 / (1 + self.current_redshift), "Mpc"))
+        setdefaultattr(self, 'velocity_unit', self.length_unit / self.time_unit)
 
 def _guess_pcast(vals):
     # Now we guess some things about the parameter and its type

diff -r 1aa4c05b23da4b035effa8689ec99099ccd34707 -r f6198bb1cb2c1beabcbda68f20a77409659eedb3 yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -23,7 +23,9 @@
 from stat import \
     ST_CTIME
 
-from yt.funcs import mylog
+from yt.funcs import \
+    mylog, \
+    setdefaultattr
 from yt.data_objects.grid_patch import \
     AMRGridPatch
 from yt.extern import six
@@ -275,14 +277,19 @@
         self.parameters["EOSType"] = -1 # default
 
     def _set_code_unit_attributes(self):
-        mylog.warning("Setting code length to be 1.0 cm")
-        mylog.warning("Setting code mass to be 1.0 g")
-        mylog.warning("Setting code time to be 1.0 s")
-        self.length_unit = self.quan(1.0, "cm")
-        self.mass_unit = self.quan(1.0, "g")
-        self.time_unit = self.quan(1.0, "s")
-        self.magnetic_unit = self.quan(np.sqrt(4.*np.pi), "gauss")
-        self.velocity_unit = self.length_unit / self.time_unit
+        if not hasattr(self, 'length_unit'):
+            mylog.warning("Setting code length unit to be 1.0 cm")
+        if not hasattr(self, 'mass_unit'):
+            mylog.warning("Setting code mass unit to be 1.0 g")
+        if not hasattr(self, 'time_unit'):
+            mylog.warning("Setting code time unit to be 1.0 s")
+        setdefaultattr(self, 'length_unit', self.quan(1.0, "cm"))
+        setdefaultattr(self, 'mass_unit', self.quan(1.0, "g"))
+        setdefaultattr(self, 'time_unit', self.quan(1.0, "s"))
+        setdefaultattr(self, 'magnetic_unit',
+                       self.quan(np.sqrt(4.*np.pi), "gauss"))
+        setdefaultattr(self, 'velocity_unit',
+                       self.length_unit / self.time_unit)
 
     def _localize(self, f, default):
         if f is None:

diff -r 1aa4c05b23da4b035effa8689ec99099ccd34707 -r f6198bb1cb2c1beabcbda68f20a77409659eedb3 yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -28,7 +28,8 @@
 from yt.funcs import \
     ensure_list, \
     ensure_tuple, \
-    get_pbar
+    get_pbar, \
+    setdefaultattr
 from yt.config import ytcfg
 from yt.data_objects.grid_patch import \
     AMRGridPatch
@@ -917,11 +918,12 @@
             if box_size is None:
                 box_size = self.parameters["Physics"]["Cosmology"]\
                     ["CosmologyComovingBoxSize"]
-            self.length_unit = self.quan(box_size, "Mpccm/h")
-            self.mass_unit = \
-                self.quan(k['urho'], 'g/cm**3') * (self.length_unit.in_cgs())**3
-            self.time_unit = self.quan(k['utim'], 's')
-            self.velocity_unit = self.quan(k['uvel'], 'cm/s')
+            setdefaultattr(self, 'length_unit', self.quan(box_size, "Mpccm/h"))
+            setdefaultattr(
+                self, 'mass_unit',
+                self.quan(k['urho'], 'g/cm**3') * (self.length_unit.in_cgs())**3)
+            setdefaultattr(self, 'time_unit', self.quan(k['utim'], 's'))
+            setdefaultattr(self, 'velocity_unit', self.quan(k['uvel'], 'cm/s'))
         else:
             if "LengthUnits" in self.parameters:
                 length_unit = self.parameters["LengthUnits"]
@@ -937,15 +939,16 @@
                 mylog.warning("Setting 1.0 in code units to be 1.0 s")
                 length_unit = mass_unit = time_unit = 1.0
 
-            self.length_unit = self.quan(length_unit, "cm")
-            self.mass_unit = self.quan(mass_unit, "g")
-            self.time_unit = self.quan(time_unit, "s")
-            self.velocity_unit = self.length_unit / self.time_unit
+            setdefaultattr(self, 'length_unit', self.quan(length_unit, "cm"))
+            setdefaultattr(self, 'mass_unit', self.quan(mass_unit, "g"))
+            setdefaultattr(self, 'time_unit', self.quan(time_unit, "s"))
+            setdefaultattr(
+                self, 'velocity_unit', self.length_unit / self.time_unit)
 
         magnetic_unit = np.sqrt(4*np.pi * self.mass_unit /
                                 (self.time_unit**2 * self.length_unit))
         magnetic_unit = np.float64(magnetic_unit.in_cgs())
-        self.magnetic_unit = self.quan(magnetic_unit, "gauss")
+        setdefaultattr(self, 'magnetic_unit', self.quan(magnetic_unit, "gauss"))
 
     def cosmology_get_units(self):
         """

diff -r 1aa4c05b23da4b035effa8689ec99099ccd34707 -r f6198bb1cb2c1beabcbda68f20a77409659eedb3 yt/frontends/exodus_ii/data_structures.py
--- a/yt/frontends/exodus_ii/data_structures.py
+++ b/yt/frontends/exodus_ii/data_structures.py
@@ -14,6 +14,8 @@
 #-----------------------------------------------------------------------------
 import numpy as np
 
+from yt.funcs import \
+    setdefaultattr
 from yt.geometry.unstructured_mesh_handler import \
     UnstructuredIndex
 from yt.data_objects.unstructured_mesh import \
@@ -163,9 +165,9 @@
         # should be set, along with examples of how to set them to standard
         # values.
         #
-        self.length_unit = self.quan(1.0, "cm")
-        self.mass_unit = self.quan(1.0, "g")
-        self.time_unit = self.quan(1.0, "s")
+        setdefaultattr(self, 'length_unit', self.quan(1.0, "cm"))
+        setdefaultattr(self, 'mass_unit', self.quan(1.0, "g"))
+        setdefaultattr(self, 'time_unit', self.quan(1.0, "s"))
         #
         # These can also be set:
         # self.velocity_unit = self.quan(1.0, "cm/s")

diff -r 1aa4c05b23da4b035effa8689ec99099ccd34707 -r f6198bb1cb2c1beabcbda68f20a77409659eedb3 yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -25,8 +25,9 @@
 
 from yt.config import ytcfg
 from yt.funcs import \
+    ensure_list, \
     mylog, \
-    ensure_list
+    setdefaultattr
 from yt.data_objects.grid_patch import \
     AMRGridPatch
 from yt.geometry.grid_geometry_handler import \
@@ -447,10 +448,10 @@
             mylog.warning("No length conversion provided. Assuming 1 = 1 cm.")
             length_factor = 1.0
             length_unit = "cm"
-        self.length_unit = self.quan(length_factor,length_unit)
-        self.mass_unit = self.quan(1.0, "g")
-        self.time_unit = self.quan(1.0, "s")
-        self.velocity_unit = self.quan(1.0, "cm/s")
+        setdefaultattr(self, 'length_unit', self.quan(length_factor,length_unit))
+        setdefaultattr(self, 'mass_unit', self.quan(1.0, "g"))
+        setdefaultattr(self, 'time_unit', self.quan(1.0, "s"))
+        setdefaultattr(self, 'velocity_unit', self.quan(1.0, "cm/s"))
         if "beam_size" in self.specified_parameters:
             beam_size = self.specified_parameters["beam_size"]
             beam_size = self.quan(beam_size[0], beam_size[1]).in_cgs().value

diff -r 1aa4c05b23da4b035effa8689ec99099ccd34707 -r f6198bb1cb2c1beabcbda68f20a77409659eedb3 yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -22,7 +22,9 @@
     AMRGridPatch
 from yt.data_objects.static_output import \
     Dataset, ParticleFile
-from yt.funcs import mylog
+from yt.funcs import \
+    mylog, \
+    setdefaultattr
 from yt.geometry.grid_geometry_handler import \
     GridIndex
 from yt.geometry.particle_geometry_handler import \
@@ -246,13 +248,14 @@
         else:
             length_factor = 1.0
             temperature_factor = 1.0
-        self.magnetic_unit = self.quan(b_factor, "gauss")
 
-        self.length_unit = self.quan(length_factor, "cm")
-        self.mass_unit = self.quan(1.0, "g")
-        self.time_unit = self.quan(1.0, "s")
-        self.velocity_unit = self.quan(1.0, "cm/s")
-        self.temperature_unit = self.quan(temperature_factor, "K")
+        setdefaultattr(self, 'magnetic_unit', self.quan(b_factor, "gauss"))
+        setdefaultattr(self, 'length_unit', self.quan(length_factor, "cm"))
+        setdefaultattr(self, 'mass_unit', self.quan(1.0, "g"))
+        setdefaultattr(self, 'time_unit', self.quan(1.0, "s"))
+        setdefaultattr(self, 'velocity_unit', self.quan(1.0, "cm/s"))
+        setdefaultattr(
+            self, 'temperature_unit', self.quan(temperature_factor, "K"))
 
     def set_code_units(self):
         super(FLASHDataset, self).set_code_units()

diff -r 1aa4c05b23da4b035effa8689ec99099ccd34707 -r f6198bb1cb2c1beabcbda68f20a77409659eedb3 yt/frontends/gadget_fof/data_structures.py
--- a/yt/frontends/gadget_fof/data_structures.py
+++ b/yt/frontends/gadget_fof/data_structures.py
@@ -33,7 +33,8 @@
     GadgetFOFFieldInfo, \
     GadgetFOFHaloFieldInfo
 from yt.funcs import \
-    only_on_root
+    only_on_root, \
+    setdefaultattr
 from yt.geometry.particle_geometry_handler import \
     ParticleIndex
 from yt.utilities.cosmology import \
@@ -232,7 +233,8 @@
         else:
             raise RuntimeError
         length_unit = _fix_unit_ordering(length_unit)
-        self.length_unit = self.quan(length_unit[0], length_unit[1])
+        setdefaultattr(self, 'length_unit',
+                       self.quan(length_unit[0], length_unit[1]))
         
         if "velocity" in unit_base:
             velocity_unit = unit_base["velocity"]
@@ -244,7 +246,8 @@
             else:
                 velocity_unit = (1e5, "cmcm/s")
         velocity_unit = _fix_unit_ordering(velocity_unit)
-        self.velocity_unit = self.quan(velocity_unit[0], velocity_unit[1])
+        setdefaultattr(self, 'velocity_unit',
+                       self.quan(velocity_unit[0], velocity_unit[1]))
 
         # We set hubble_constant = 1.0 for non-cosmology, so this is safe.
         # Default to 1e10 Msun/h if mass is not specified.
@@ -259,7 +262,7 @@
             # Sane default
             mass_unit = (1.0, "1e10*Msun/h")
         mass_unit = _fix_unit_ordering(mass_unit)
-        self.mass_unit = self.quan(mass_unit[0], mass_unit[1])
+        setdefaultattr(self, 'mass_unit', self.quan(mass_unit[0], mass_unit[1]))
 
         if "time" in unit_base:
             time_unit = unit_base["time"]
@@ -267,7 +270,7 @@
             time_unit = (unit_base["UnitTime_in_s"], "s")
         else:
             time_unit = (1., "s")        
-        self.time_unit = self.quan(time_unit[0], time_unit[1])
+        setdefaultattr(self, 'time_unit', self.quan(time_unit[0], time_unit[1]))
 
     def __repr__(self):
         return self.basename.split(".", 1)[0]

diff -r 1aa4c05b23da4b035effa8689ec99099ccd34707 -r f6198bb1cb2c1beabcbda68f20a77409659eedb3 yt/frontends/gamer/data_structures.py
--- a/yt/frontends/gamer/data_structures.py
+++ b/yt/frontends/gamer/data_structures.py
@@ -18,7 +18,9 @@
 import numpy as np
 import weakref
 
-from yt.funcs import mylog
+from yt.funcs import \
+    mylog, \
+    setdefaultattr
 from yt.data_objects.grid_patch import \
     AMRGridPatch
 from yt.geometry.grid_geometry_handler import \
@@ -205,7 +207,7 @@
                           "Use units_override to specify the units")
 
         for unit, cgs in [("length", "cm"), ("time", "s"), ("mass", "g")]:
-            setattr(self, "%s_unit"%unit, self.quan(1.0, cgs))
+            setdefaultattr(self, "%s_unit"%unit, self.quan(1.0, cgs))
 
             if len(self.units_override) == 0:
                 mylog.warning("Assuming 1.0 = 1.0 %s", cgs)

diff -r 1aa4c05b23da4b035effa8689ec99099ccd34707 -r f6198bb1cb2c1beabcbda68f20a77409659eedb3 yt/frontends/gdf/data_structures.py
--- a/yt/frontends/gdf/data_structures.py
+++ b/yt/frontends/gdf/data_structures.py
@@ -19,7 +19,9 @@
 import os
 from yt.extern.six import string_types
 from yt.funcs import \
-    just_one, ensure_tuple
+    ensure_tuple, \
+    just_one, \
+    setdefaultattr
 from yt.data_objects.grid_patch import \
     AMRGridPatch
 from yt.geometry.grid_geometry_handler import \
@@ -223,17 +225,17 @@
                     un = unit_name[:-5]
                     un = un.replace('magnetic', 'magnetic_field', 1)
                     unit = self.unit_system[un]
-                    setattr(self, unit_name, self.quan(value, unit))
-                setattr(self, unit_name, self.quan(value, unit))
+                    setdefaultattr(self, unit_name, self.quan(value, unit))
+                setdefaultattr(self, unit_name, self.quan(value, unit))
                 if unit_name in h5f["/field_types"]:
                     if unit_name in self.field_units:
                         mylog.warning("'field_units' was overridden by 'dataset_units/%s'"
                                       % (unit_name))
                     self.field_units[unit_name] = str(unit)
         else:
-            self.length_unit = self.quan(1.0, "cm")
-            self.mass_unit = self.quan(1.0, "g")
-            self.time_unit = self.quan(1.0, "s")
+            setdefaultattr(self, 'length_unit', self.quan(1.0, "cm"))
+            setdefaultattr(self, 'mass_unit', self.quan(1.0, "g"))
+            setdefaultattr(self, 'time_unit', self.quan(1.0, "s"))
 
         h5f.close()
 

diff -r 1aa4c05b23da4b035effa8689ec99099ccd34707 -r f6198bb1cb2c1beabcbda68f20a77409659eedb3 yt/frontends/halo_catalog/data_structures.py
--- a/yt/frontends/halo_catalog/data_structures.py
+++ b/yt/frontends/halo_catalog/data_structures.py
@@ -23,6 +23,8 @@
 from .fields import \
     HaloCatalogFieldInfo
 
+from yt.funcs import \
+    setdefaultattr
 from yt.geometry.particle_geometry_handler import \
     ParticleIndex
 from yt.data_objects.static_output import \
@@ -76,10 +78,10 @@
         self.parameters.update(hvals)
 
     def _set_code_unit_attributes(self):
-        self.length_unit = self.quan(1.0, "cm")
-        self.mass_unit = self.quan(1.0, "g")
-        self.velocity_unit = self.quan(1.0, "cm / s")
-        self.time_unit = self.quan(1.0, "s")
+        setdefaultattr(self, 'length_unit', self.quan(1.0, "cm"))
+        setdefaultattr(self, 'mass_unit', self.quan(1.0, "g"))
+        setdefaultattr(self, 'velocity_unit', self.quan(1.0, "cm / s"))
+        setdefaultattr(self, 'time_unit', self.quan(1.0, "s"))
 
     @classmethod
     def _is_valid(self, *args, **kwargs):

diff -r 1aa4c05b23da4b035effa8689ec99099ccd34707 -r f6198bb1cb2c1beabcbda68f20a77409659eedb3 yt/frontends/moab/data_structures.py
--- a/yt/frontends/moab/data_structures.py
+++ b/yt/frontends/moab/data_structures.py
@@ -19,6 +19,8 @@
 import weakref
 from yt.data_objects.unstructured_mesh import \
     SemiStructuredMesh
+from yt.funcs import \
+    setdefaultattr
 from yt.geometry.unstructured_mesh_handler import \
     UnstructuredIndex
 from yt.data_objects.static_output import \
@@ -78,9 +80,9 @@
     def _set_code_unit_attributes(self):
         # Almost everything is regarded as dimensionless in MOAB, so these will
         # not be used very much or at all.
-        self.length_unit = self.quan(1.0, "cm")
-        self.time_unit = self.quan(1.0, "s")
-        self.mass_unit = self.quan(1.0, "g")
+        setdefaultattr(self, 'length_unit', self.quan(1.0, "cm"))
+        setdefaultattr(self, 'time_unit', self.quan(1.0, "s"))
+        setdefaultattr(self, 'mass_unit', self.quan(1.0, "g"))
 
     def _parse_parameter_file(self):
         self._handle = h5py.File(self.parameter_filename, "r")
@@ -161,9 +163,9 @@
     def _set_code_unit_attributes(self):
         # Almost everything is regarded as dimensionless in MOAB, so these will
         # not be used very much or at all.
-        self.length_unit = self.quan(1.0, "cm")
-        self.time_unit = self.quan(1.0, "s")
-        self.mass_unit = self.quan(1.0, "g")
+        setdefaultattr(self, 'length_unit', self.quan(1.0, "cm"))
+        setdefaultattr(self, 'time_unit', self.quan(1.0, "s"))
+        setdefaultattr(self, 'mass_unit', self.quan(1.0, "g"))
 
     def _parse_parameter_file(self):
         #  not sure if this import has side-effects so I'm not deleting it

diff -r 1aa4c05b23da4b035effa8689ec99099ccd34707 -r f6198bb1cb2c1beabcbda68f20a77409659eedb3 yt/frontends/owls_subfind/data_structures.py
--- a/yt/frontends/owls_subfind/data_structures.py
+++ b/yt/frontends/owls_subfind/data_structures.py
@@ -24,7 +24,9 @@
 from .fields import \
     OWLSSubfindFieldInfo
 
-from yt.funcs import only_on_root
+from yt.funcs import \
+    only_on_root, \
+    setdefaultattr
 from yt.utilities.exceptions import \
     YTException
 from yt.utilities.logger import ytLogger as \
@@ -176,7 +178,8 @@
         else:
             raise RuntimeError
         length_unit = _fix_unit_ordering(length_unit)
-        self.length_unit = self.quan(length_unit[0], length_unit[1])
+        setdefaultattr(self, 'length_unit',
+                       self.quan(length_unit[0], length_unit[1]))
 
         if "velocity" in unit_base:
             velocity_unit = unit_base["velocity"]
@@ -185,7 +188,8 @@
         else:
             velocity_unit = (1e5, "cm/s")
         velocity_unit = _fix_unit_ordering(velocity_unit)
-        self.velocity_unit = self.quan(velocity_unit[0], velocity_unit[1])
+        setdefaultattr(self, 'velocity_unit',
+                       self.quan(velocity_unit[0], velocity_unit[1]))
 
         # We set hubble_constant = 1.0 for non-cosmology, so this is safe.
         # Default to 1e10 Msun/h if mass is not specified.
@@ -200,7 +204,7 @@
             # Sane default
             mass_unit = (1.0, "1e10*Msun/h")
         mass_unit = _fix_unit_ordering(mass_unit)
-        self.mass_unit = self.quan(mass_unit[0], mass_unit[1])
+        setdefaultattr(self, 'mass_unit', self.quan(mass_unit[0], mass_unit[1]))
 
         if "time" in unit_base:
             time_unit = unit_base["time"]
@@ -208,7 +212,7 @@
             time_unit = (unit_base["UnitTime_in_s"], "s")
         else:
             time_unit = (1., "s")        
-        self.time_unit = self.quan(time_unit[0], time_unit[1])
+        setdefaultattr(self, 'time_unit', self.quan(time_unit[0], time_unit[1]))
 
     @classmethod
     def _is_valid(self, *args, **kwargs):

diff -r 1aa4c05b23da4b035effa8689ec99099ccd34707 -r f6198bb1cb2c1beabcbda68f20a77409659eedb3 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -23,7 +23,8 @@
 
 from yt.extern.six import string_types
 from yt.funcs import \
-    mylog
+    mylog, \
+    setdefaultattr
 from yt.geometry.oct_geometry_handler import \
     OctreeIndex
 from yt.geometry.geometry_handler import \
@@ -565,17 +566,21 @@
         # For now assume an atomic ideal gas with cosmic abundances (x_H = 0.76)
         mean_molecular_weight_factor = _X**-1
 
-        self.density_unit = self.quan(density_unit, 'g/cm**3')
-        self.magnetic_unit = self.quan(magnetic_unit, "gauss")
-        self.pressure_unit = self.quan(pressure_unit, 'dyne/cm**2')
-        self.time_unit = self.quan(time_unit, "s")
-        self.mass_unit = self.quan(mass_unit, "g")
-        self.velocity_unit = self.quan(length_unit, 'cm') / self.time_unit
-        self.temperature_unit = (self.velocity_unit**2*mp* 
-                                 mean_molecular_weight_factor/kb).in_units('K')
+        setdefaultattr(self, 'density_unit', self.quan(density_unit, 'g/cm**3'))
+        setdefaultattr(self, 'magnetic_unit', self.quan(magnetic_unit, "gauss"))
+        setdefaultattr(self, 'pressure_unit',
+                       self.quan(pressure_unit, 'dyne/cm**2'))
+        setdefaultattr(self, 'time_unit', self.quan(time_unit, "s"))
+        setdefaultattr(self, 'mass_unit', self.quan(mass_unit, "g"))
+        setdefaultattr(self, 'velocity_unit',
+                       self.quan(length_unit, 'cm') / self.time_unit)
+        temperature_unit = (
+            self.velocity_unit**2*mp*mean_molecular_weight_factor/kb)
+        setdefaultattr(self, 'temperature_unit', temperature_unit.in_units('K'))
 
         # Only the length unit get scales by a factor of boxlen
-        self.length_unit = self.quan(length_unit * boxlen, "cm")
+        setdefaultattr(self, 'length_unit',
+                       self.quan(length_unit * boxlen, "cm"))
 
     def _parse_parameter_file(self):
         # hardcoded for now

diff -r 1aa4c05b23da4b035effa8689ec99099ccd34707 -r f6198bb1cb2c1beabcbda68f20a77409659eedb3 yt/frontends/rockstar/data_structures.py
--- a/yt/frontends/rockstar/data_structures.py
+++ b/yt/frontends/rockstar/data_structures.py
@@ -22,12 +22,14 @@
 from .fields import \
     RockstarFieldInfo
 
-from yt.utilities.cosmology import Cosmology
-from yt.geometry.particle_geometry_handler import \
-    ParticleIndex
 from yt.data_objects.static_output import \
     Dataset, \
     ParticleFile
+from yt.funcs import \
+    setdefaultattr
+from yt.geometry.particle_geometry_handler import \
+    ParticleIndex
+from yt.utilities.cosmology import Cosmology
 import yt.utilities.fortran_utils as fpu
 
 from .definitions import \
@@ -92,10 +94,10 @@
 
     def _set_code_unit_attributes(self):
         z = self.current_redshift
-        self.length_unit = self.quan(1.0 / (1.0+z), "Mpc / h")
-        self.mass_unit = self.quan(1.0, "Msun / h")
-        self.velocity_unit = self.quan(1.0, "km / s")
-        self.time_unit = self.length_unit / self.velocity_unit
+        setdefaultattr(self, 'length_unit', self.quan(1.0 / (1.0+z), "Mpc / h"))
+        setdefaultattr(self, 'mass_unit', self.quan(1.0, "Msun / h"))
+        setdefaultattr(self, 'velocity_unit', self.quan(1.0, "km / s"))
+        setdefaultattr(self, 'time_unit', self.length_unit / self.velocity_unit)
 
     @classmethod
     def _is_valid(self, *args, **kwargs):

diff -r 1aa4c05b23da4b035effa8689ec99099ccd34707 -r f6198bb1cb2c1beabcbda68f20a77409659eedb3 yt/frontends/sdf/data_structures.py
--- a/yt/frontends/sdf/data_structures.py
+++ b/yt/frontends/sdf/data_structures.py
@@ -28,7 +28,8 @@
 from yt.data_objects.static_output import \
     Dataset, ParticleFile
 from yt.funcs import \
-    get_requests
+    get_requests, \
+    setdefaultattr
 from .fields import \
     SDFFieldInfo
 from yt.utilities.sdf import \
@@ -177,16 +178,22 @@
         return self._midx
 
     def _set_code_unit_attributes(self):
-        self.length_unit = self.quan(1.0, self.parameters.get("length_unit", 'kpc'))
-        self.velocity_unit = self.quan(1.0, self.parameters.get("velocity_unit", 'kpc/Gyr'))
-        self.time_unit = self.quan(1.0, self.parameters.get("time_unit", 'Gyr'))
+        setdefaultattr(
+            self, 'length_unit',
+            self.quan(1.0, self.parameters.get("length_unit", 'kpc')))
+        setdefaultattr(
+            self, 'velocity_unit',
+            self.quan(1.0, self.parameters.get("velocity_unit", 'kpc/Gyr')))
+        setdefaultattr(
+            self, 'time_unit',
+            self.quan(1.0, self.parameters.get("time_unit", 'Gyr')))
         mass_unit = self.parameters.get("mass_unit", '1e10 Msun')
         if ' ' in mass_unit:
             factor, unit = mass_unit.split(' ')
         else:
             factor = 1.0
             unit = mass_unit
-        self.mass_unit = self.quan(float(factor), unit)
+        setdefaultattr(self, 'mass_unit', self.quan(float(factor), unit))
 
     @classmethod
     def _is_valid(cls, *args, **kwargs):

diff -r 1aa4c05b23da4b035effa8689ec99099ccd34707 -r f6198bb1cb2c1beabcbda68f20a77409659eedb3 yt/frontends/stream/fields.py
--- a/yt/frontends/stream/fields.py
+++ b/yt/frontends/stream/fields.py
@@ -34,6 +34,7 @@
         ("radiation_acceleration_x", ("code_length/code_time**2", ["radiation_acceleration_x"], None)),
         ("radiation_acceleration_y", ("code_length/code_time**2", ["radiation_acceleration_y"], None)),
         ("radiation_acceleration_z", ("code_length/code_time**2", ["radiation_acceleration_z"], None)),
+        ("metallicity", ("Zsun", ["metallicity"], None)),
 
         # We need to have a bunch of species fields here, too
         ("metal_density",   ("code_mass/code_length**3", ["metal_density"], None)),

diff -r 1aa4c05b23da4b035effa8689ec99099ccd34707 -r f6198bb1cb2c1beabcbda68f20a77409659eedb3 yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -1004,3 +1004,12 @@
 
 def get_interactivity():
     return interactivity
+
+def setdefaultattr(obj, name, value):
+    """Set attribute with *name* on *obj* with *value* if it doesn't exist yet
+
+    Analogous to dict.setdefault
+    """
+    if not hasattr(obj, name):
+        setattr(obj, name, value)
+    return getattr(obj, name)

diff -r 1aa4c05b23da4b035effa8689ec99099ccd34707 -r f6198bb1cb2c1beabcbda68f20a77409659eedb3 yt/geometry/particle_smooth.pxd
--- a/yt/geometry/particle_smooth.pxd
+++ b/yt/geometry/particle_smooth.pxd
@@ -23,15 +23,12 @@
 from yt.utilities.lib.fp_utils cimport *
 from oct_container cimport Oct, OctAllocationContainer, OctreeContainer
 from .particle_deposit cimport kernel_func, get_kernel_func, gind
+from yt.utilities.lib.distance_queue cimport NeighborList, Neighbor_compare, \
+    r2dist, DistanceQueue
 
 cdef extern from "platform_dep.h":
     void *alloca(int)
 
-cdef struct NeighborList
-cdef struct NeighborList:
-    np.int64_t pn       # Particle number
-    np.float64_t r2     # radius**2
-
 cdef class ParticleSmoothOperation:
     # We assume each will allocate and define their own temporary storage
     cdef kernel_func sph_kernel
@@ -39,10 +36,8 @@
     cdef np.float64_t DW[3]
     cdef int nfields
     cdef int maxn
-    cdef int curn
     cdef bint periodicity[3]
     # Note that we are preallocating here, so this is *not* threadsafe.
-    cdef NeighborList *neighbors
     cdef void (*pos_setup)(np.float64_t ipos[3], np.float64_t opos[3])
     cdef void neighbor_process(self, int dim[3], np.float64_t left_edge[3],
                                np.float64_t dds[3], np.float64_t[:,:] ppos,
@@ -52,7 +47,7 @@
                                np.int64_t offset, np.float64_t **index_fields,
                                OctreeContainer octree, np.int64_t domain_id,
                                int *nsize, np.float64_t[:,:] oct_left_edges,
-                               np.float64_t[:,:] oct_dds)
+                               np.float64_t[:,:] oct_dds, DistanceQueue dq)
     cdef int neighbor_search(self, np.float64_t pos[3], OctreeContainer octree,
                              np.int64_t **nind, int *nsize, 
                              np.int64_t nneighbors, np.int64_t domain_id, 
@@ -65,10 +60,7 @@
                                np.int64_t offset,
                                np.float64_t **index_fields,
                                OctreeContainer octree, np.int64_t domain_id,
-                               int *nsize)
-    cdef void neighbor_eval(self, np.int64_t pn, np.float64_t ppos[3],
-                            np.float64_t cpos[3])
-    cdef void neighbor_reset(self)
+                               int *nsize, DistanceQueue dq)
     cdef void neighbor_find(self,
                             np.int64_t nneighbors,
                             np.int64_t *nind,
@@ -78,7 +70,7 @@
                             np.float64_t[:,:] ppos,
                             np.float64_t cpos[3],
                             np.float64_t[:,:] oct_left_edges,
-                            np.float64_t[:,:] oct_dds)
+                            np.float64_t[:,:] oct_dds, DistanceQueue dq)
     cdef void process(self, np.int64_t offset, int i, int j, int k,
                       int dim[3], np.float64_t cpos[3], np.float64_t **fields,
-                      np.float64_t **index_fields)
+                      np.float64_t **index_fields, DistanceQueue dq)

This diff is so big that we needed to truncate the remainder.

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list