[yt-svn] commit/yt: 8 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Fri Jul 22 07:48:58 PDT 2016


8 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/49a9ecfd4fc4/
Changeset:   49a9ecfd4fc4
Branch:      yt
User:        jzuhone
Date:        2016-07-07 19:41:17+00:00
Summary:     For some reason "field_info" can't be accessed in here for some datasets
Affected #:  1 file

diff -r d7f213e1752e7db1bc4ae5b37c1b9acdaa080a48 -r 49a9ecfd4fc47bce8501e44b811ea99b3cdf5f13 yt/analysis_modules/particle_trajectories/particle_trajectories.py
--- a/yt/analysis_modules/particle_trajectories/particle_trajectories.py
+++ b/yt/analysis_modules/particle_trajectories/particle_trajectories.py
@@ -206,7 +206,7 @@
             dd_first = ds_first.all_data()
             fd = dd_first._determine_fields(field)[0]
             if field not in self.particle_fields:
-                if self.data_series[0].field_info[fd].particle_type:
+                if self.data_series[0]._get_field_info(*fd).particle_type:
                     self.particle_fields.append(field)
             particles = np.empty((self.num_indices,self.num_steps))
             particles[:] = np.nan


https://bitbucket.org/yt_analysis/yt/commits/64d6055280ca/
Changeset:   64d6055280ca
Branch:      yt
User:        jzuhone
Date:        2016-07-07 19:42:15+00:00
Summary:     Fixing a bug here that is derivative of a bug in XSPEC
Affected #:  1 file

diff -r 49a9ecfd4fc47bce8501e44b811ea99b3cdf5f13 -r 64d6055280ca07d5f2f45b9d526a84c49ab5e301 yt/analysis_modules/photon_simulator/spectral_models.py
--- a/yt/analysis_modules/photon_simulator/spectral_models.py
+++ b/yt/analysis_modules/photon_simulator/spectral_models.py
@@ -288,7 +288,7 @@
 
         tmpspec += np.interp(emid, e_pseudo*self.scale_factor, pseudo)*de/self.scale_factor
 
-        return tmpspec
+        return tmpspec*self.scale_factor
 
     def get_spectrum(self, kT):
         """


https://bitbucket.org/yt_analysis/yt/commits/8108253d7f4d/
Changeset:   8108253d7f4d
Branch:      yt
User:        jzuhone
Date:        2016-07-07 19:45:47+00:00
Summary:     No reason why these shouldn't be 64-bits
Affected #:  1 file

diff -r 64d6055280ca07d5f2f45b9d526a84c49ab5e301 -r 8108253d7f4d937d10a8829e9f3672f0107a80e2 yt/analysis_modules/particle_trajectories/particle_trajectories.py
--- a/yt/analysis_modules/particle_trajectories/particle_trajectories.py
+++ b/yt/analysis_modules/particle_trajectories/particle_trajectories.py
@@ -339,7 +339,7 @@
         """
         fid = h5py.File(filename, "w")
         fields = [field for field in sorted(self.field_data.keys())]
-        fid.create_dataset("particle_indices", dtype=np.int32,
+        fid.create_dataset("particle_indices", dtype=np.int64,
                            data=self.indices)
         fid.create_dataset("particle_time", data=self.times)
         for field in fields:


https://bitbucket.org/yt_analysis/yt/commits/66296ae011d2/
Changeset:   66296ae011d2
Branch:      yt
User:        jzuhone
Date:        2016-07-19 17:23:07+00:00
Summary:     Bump varia answers
Affected #:  1 file

diff -r 8108253d7f4d937d10a8829e9f3672f0107a80e2 -r 66296ae011d25376d2b3f7d9c80730fdabfef2ee tests/tests.yaml
--- a/tests/tests.yaml
+++ b/tests/tests.yaml
@@ -50,7 +50,7 @@
   local_tipsy_001:
     - yt/frontends/tipsy/tests/test_outputs.py
   
-  local_varia_001:
+  local_varia_002:
     - yt/analysis_modules/radmc3d_export
     - yt/frontends/moab/tests/test_c5.py
     - yt/analysis_modules/photon_simulator/tests/test_spectra.py


https://bitbucket.org/yt_analysis/yt/commits/a2f734ed05e8/
Changeset:   a2f734ed05e8
Branch:      yt
User:        jzuhone
Date:        2016-07-20 17:52:19+00:00
Summary:     Bump varia again
Affected #:  1 file

diff -r 66296ae011d25376d2b3f7d9c80730fdabfef2ee -r a2f734ed05e8a467c39a7c8589170faac49a5c4c tests/tests.yaml
--- a/tests/tests.yaml
+++ b/tests/tests.yaml
@@ -50,7 +50,7 @@
   local_tipsy_001:
     - yt/frontends/tipsy/tests/test_outputs.py
   
-  local_varia_002:
+  local_varia_003:
     - yt/analysis_modules/radmc3d_export
     - yt/frontends/moab/tests/test_c5.py
     - yt/analysis_modules/photon_simulator/tests/test_spectra.py


https://bitbucket.org/yt_analysis/yt/commits/6a81505b217b/
Changeset:   6a81505b217b
Branch:      yt
User:        jzuhone
Date:        2016-07-20 20:53:29+00:00
Summary:     Merge
Affected #:  110 files

diff -r a2f734ed05e8a467c39a7c8589170faac49a5c4c -r 6a81505b217ba53d9290b66dda0a129b162d7120 .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -37,9 +37,11 @@
 yt/utilities/lib/fortran_reader.c
 yt/utilities/lib/freetype_writer.c
 yt/utilities/lib/geometry_utils.c
+yt/utilities/lib/image_samplers.c
 yt/utilities/lib/image_utilities.c
 yt/utilities/lib/interpolators.c
 yt/utilities/lib/kdtree.c
+yt/utilities/lib/lenses.c
 yt/utilities/lib/line_integral_convolution.c
 yt/utilities/lib/mesh_construction.cpp
 yt/utilities/lib/mesh_intersection.cpp
@@ -49,6 +51,7 @@
 yt/utilities/lib/mesh_utilities.c
 yt/utilities/lib/misc_utilities.c
 yt/utilities/lib/particle_mesh_operations.c
+yt/utilities/lib/partitioned_grid.c
 yt/utilities/lib/primitives.c
 yt/utilities/lib/origami.c
 yt/utilities/lib/particle_mesh_operations.c
@@ -62,6 +65,11 @@
 yt/utilities/lib/marching_cubes.c
 yt/utilities/lib/png_writer.h
 yt/utilities/lib/write_array.c
+yt/utilities/lib/perftools_wrap.c
+yt/utilities/lib/partitioned_grid.c
+yt/utilities/lib/volume_container.c
+yt/utilities/lib/lenses.c
+yt/utilities/lib/image_samplers.c
 syntax: glob
 *.pyc
 *.pyd

diff -r a2f734ed05e8a467c39a7c8589170faac49a5c4c -r 6a81505b217ba53d9290b66dda0a129b162d7120 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -119,6 +119,24 @@
         exit 1
     fi
     DEST_SUFFIX="yt-conda"
+    if [ -n "${PYTHONPATH}" ]
+    then
+        echo "WARNING WARNING WARNING WARNING WARNING WARNING WARNING"
+        echo "*******************************************************"
+        echo
+        echo "The PYTHONPATH environment variable is set to:"
+        echo
+        echo "    $PYTHONPATH"
+        echo
+        echo "If dependencies of yt (numpy, scipy, matplotlib) are installed"
+        echo "to this path, this may cause issues. Exit the install script"
+        echo "with Ctrl-C and unset PYTHONPATH if you are unsure."
+        echo "Hit enter to continue."
+        echo
+        echo "WARNING WARNING WARNING WARNING WARNING WARNING WARNING"
+        echo "*******************************************************"
+        read -p "[hit enter]"
+    fi
 else
     if [ $INST_YT_SOURCE -eq 0 ]
     then
@@ -524,11 +542,11 @@
 echo
 
 printf "%-18s = %s so I " "INST_CONDA" "${INST_CONDA}"
-get_willwont ${INST_PY3}
+get_willwont ${INST_CONDA}
 echo "be installing a conda-based python environment"
 
 printf "%-18s = %s so I " "INST_YT_SOURCE" "${INST_YT_SOURCE}"
-get_willwont ${INST_PY3}
+get_willwont ${INST_YT_SOURCE}
 echo "be compiling yt from source"
 
 printf "%-18s = %s so I " "INST_PY3" "${INST_PY3}"
@@ -744,6 +762,12 @@
     ( ${SHASUM} -c $1.sha512 2>&1 ) 1>> ${LOG_FILE} || do_exit
 }
 
+function test_install
+{
+    echo "Testing that yt can be imported"
+    ( ${DEST_DIR}/bin/${PYTHON_EXEC} -c "import yt" 2>&1 ) 1>> ${LOG_FILE} || do_exit
+}
+
 ORIG_PWD=`pwd`
 
 if [ -z "${DEST_DIR}" ]
@@ -1238,6 +1262,8 @@
     ( cp ${YT_DIR}/doc/activate.csh ${DEST_DIR}/bin/activate.csh 2>&1 ) 1>> ${LOG_FILE}
     sed -i.bak -e "s,__YT_DIR__,${DEST_DIR}," ${DEST_DIR}/bin/activate.csh
 
+    test_install
+
     function print_afterword
     {
         echo
@@ -1478,10 +1504,12 @@
             ROCKSTAR_LIBRARY_PATH=${DEST_DIR}/lib
         fi
         pushd ${YT_DIR} &> /dev/null
-        ( LIBRARY_PATH=$ROCKSTAR_LIBRARY_PATH python setup.py develop 2>&1) 1>> ${LOG_FILE}
+        ( LIBRARY_PATH=$ROCKSTAR_LIBRARY_PATH python setup.py develop 2>&1) 1>> ${LOG_FILE} || do_exit
         popd &> /dev/null
     fi
 
+    test_install
+
     echo
     echo
     echo "========================================================================"

diff -r a2f734ed05e8a467c39a7c8589170faac49a5c4c -r 6a81505b217ba53d9290b66dda0a129b162d7120 doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst
--- a/doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst
+++ b/doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst
@@ -4,10 +4,9 @@
 =======================
 .. sectionauthor:: Geoffrey So <gso at physics.ucsd.edu>
 
-.. warning:: This is my first attempt at modifying the yt source code,
-   so the program may be bug ridden.  Please send yt-dev an email and
-   address to Geoffrey So if you discover something wrong with this
-   portion of the code.
+.. warning:: This functionality is currently broken and needs to
+   be updated to make use of the :ref:`halo_catalog` framework.
+   Anyone interested in doing so should contact the yt-dev list.
 
 Purpose
 -------

diff -r a2f734ed05e8a467c39a7c8589170faac49a5c4c -r 6a81505b217ba53d9290b66dda0a129b162d7120 doc/source/analyzing/analysis_modules/halo_analysis.rst
--- a/doc/source/analyzing/analysis_modules/halo_analysis.rst
+++ b/doc/source/analyzing/analysis_modules/halo_analysis.rst
@@ -3,14 +3,16 @@
 Halo Analysis
 =============
 
-Using halo catalogs, understanding the different halo finding methods,
-and using the halo mass function.
+This section covers halo finding, performing extra analysis on halos,
+and the halo mass function calculator.  If you already have halo
+catalogs and simply want to load them into yt, see
+:ref:`halo-catalog-data`.
 
 .. toctree::
    :maxdepth: 2
 
+   halo_catalogs
+   halo_mass_function
    halo_transition
-   halo_catalogs
-   halo_finders
-   halo_mass_function
    halo_merger_tree
+   ellipsoid_analysis

diff -r a2f734ed05e8a467c39a7c8589170faac49a5c4c -r 6a81505b217ba53d9290b66dda0a129b162d7120 doc/source/analyzing/analysis_modules/halo_catalogs.rst
--- a/doc/source/analyzing/analysis_modules/halo_catalogs.rst
+++ b/doc/source/analyzing/analysis_modules/halo_catalogs.rst
@@ -1,28 +1,42 @@
 .. _halo_catalog:
 
-Halo Catalogs
-=============
+Halo Finding and Analysis
+=========================
 
-Creating Halo Catalogs
-----------------------
+In yt-3.x, halo finding and analysis are combined into a single
+framework called the
+:class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog`.
+This framework is substantially different from the halo analysis
+machinery available in yt-2.x and is entirely backward incompatible.
+For a direct translation of various halo analysis tasks using yt-2.x
+to yt-3.x, see :ref:`halo-transition`.
 
-In yt 3.0, operations relating to the analysis of halos (halo finding,
-merger tree creation, and individual halo analysis) are all brought
-together into a single framework. This framework is substantially
-different from the halo analysis machinery available in yt-2.x and is
-entirely backward incompatible.
-For a direct translation of various halo analysis tasks using yt-2.x
-to yt-3.0 please see :ref:`halo-transition`.
+.. _halo_catalog_finding:
 
-A catalog of halos can be created from any initial dataset given to halo
-catalog through data_ds. These halos can be found using friends-of-friends,
-HOP, and Rockstar. The finder_method keyword dictates which halo finder to
-use. The available arguments are :ref:`fof`, :ref:`hop`, and :ref:`rockstar`.
-For more details on the relative differences between these halo finders see
-:ref:`halo_finding`.
+Halo Finding
+------------
 
-The class which holds all of the halo information is the
-:class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog`.
+If you already have a halo catalog, either produced by one of the methods
+below or in a format described in :ref:`halo-catalog-data`, and want to
+perform further analysis, skip to :ref:`halo_catalog_analysis`.
+
+Three halo finding methods exist within yt.  These are:
+
+* :ref:`fof_finding`: a basic friend-of-friends algorithm (e.g. `Efstathiou et al. (1985)
+  <http://adsabs.harvard.edu/abs/1985ApJS...57..241E>`_)
+* :ref:`hop_finding`: `Eisenstein and Hut (1998)
+  <http://adsabs.harvard.edu/abs/1998ApJ...498..137E>`_.
+* :ref:`rockstar_finding`: a 6D phase-space halo finder developed by Peter Behroozi that
+  scales well and does substructure finding (`Behroozi et al.
+  2011 <http://adsabs.harvard.edu/abs/2011arXiv1110.4372B>`_)
+
+Halo finding is performed through the creation of a
+:class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog`
+object.  The dataset on which halo finding is to be performed should
+be loaded and given to the
+:class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog`
+along with the ``finder_method`` keyword to specify the method to be
+used.
 
 .. code-block:: python
 
@@ -31,28 +45,195 @@
 
    data_ds = yt.load('Enzo_64/RD0006/RedshiftOutput0006')
    hc = HaloCatalog(data_ds=data_ds, finder_method='hop')
+   hc.create()
 
-A halo catalog may also be created from already run rockstar outputs.
-This method is not implemented for previously run friends-of-friends or
-HOP finders. Even though rockstar creates one file per processor,
-specifying any one file allows the full catalog to be loaded. Here we
-only specify the file output by the processor with ID 0. Note that the
-argument for supplying a rockstar output is `halos_ds`, not `data_ds`.
+The ``finder_method`` options should be given as "fof", "hop", or
+"rockstar".  Each of these methods has their own set of keyword
+arguments to control functionality.  These can specified in the form
+of a dictinoary using the ``finder_kwargs`` keyword.
 
 .. code-block:: python
 
-   halos_ds = yt.load(path+'rockstar_halos/halos_0.0.bin')
-   hc = HaloCatalog(halos_ds=halos_ds)
+   import yt
+   from yt.analysis_modules.halo_analysis.api import HaloCatalog
 
-Although supplying only the binary output of the rockstar halo finder
-is sufficient for creating a halo catalog, it is not possible to find
-any new information about the identified halos. To associate the halos
-with the dataset from which they were found, supply arguments to both
-halos_ds and data_ds.
+   data_ds = yt.load('Enzo_64/RD0006/RedshiftOutput0006')
+   hc = HaloCatalog(data_ds=data_ds, finder_method='fof',
+                    finder_kwargs={"ptype": "stars",
+                                   "padding": 0.02})
+   hc.create()
+
+For a full list of keywords for each halo finder, see
+:class:`~yt.analysis_modules.halo_finding.halo_objects.FOFHaloFinder`,
+:class:`~yt.analysis_modules.halo_finding.halo_objects.HOPHaloFinder`,
+and
+:class:`~yt.analysis_modules.halo_finding.rockstar.rockstar.RockstarHaloFinder`.
+
+.. _fof_finding:
+
+FOF
+^^^
+
+This is a basic friends-of-friends algorithm.  See
+`Efstathiou et al. (1985)
+<http://adsabs.harvard.edu/abs/1985ApJS...57..241E>`_ for more
+details as well as
+:class:`~yt.analysis_modules.halo_finding.halo_objects.FOFHaloFinder`.
+
+.. _hop_finding:
+
+HOP
+^^^
+
+The version of HOP used in yt is an upgraded version of the
+`publicly available HOP code
+<http://cmb.as.arizona.edu/~eisenste/hop/hop.html>`_. Support
+for 64-bit floats and integers has been added, as well as
+parallel analysis through spatial decomposition. HOP builds
+groups in this fashion:
+
+#. Estimates the local density at each particle using a
+   smoothing kernel.
+
+#. Builds chains of linked particles by 'hopping' from one
+   particle to its densest neighbor. A particle which is
+   its own densest neighbor is the end of the chain.
+
+#. All chains that share the same densest particle are
+   grouped together.
+
+#. Groups are included, linked together, or discarded
+   depending on the user-supplied over density
+   threshold parameter. The default is 160.0.
+
+See the `HOP method paper
+<http://adsabs.harvard.edu/abs/1998ApJ...498..137E>`_ for
+full details as well as
+:class:`~yt.analysis_modules.halo_finding.halo_objects.HOPHaloFinder`.
+
+.. _rockstar_finding:
+
+Rockstar
+^^^^^^^^
+
+Rockstar uses an adaptive hierarchical refinement of friends-of-friends
+groups in six phase-space dimensions and one time dimension, which
+allows for robust (grid-independent, shape-independent, and noise-
+resilient) tracking of substructure. The code is prepackaged with yt,
+but also `separately available <https://bitbucket.org/gfcstanford/rockstar>`_. The lead
+developer is Peter Behroozi, and the methods are described in
+`Behroozi et al. 2011 <http://adsabs.harvard.edu/abs/2011arXiv1110.4372B>`_.
+In order to run the Rockstar halo finder in yt, make sure you've
+:ref:`installed it so that it can integrate with yt <rockstar-installation>`.
+
+At the moment, Rockstar does not support multiple particle masses,
+instead using a fixed particle mass. This will not affect most dark matter
+simulations, but does make it less useful for finding halos from the stellar
+mass. In simulations where the highest-resolution particles all have the
+same mass (ie: zoom-in grid based simulations), one can set up a particle
+filter to select the lowest mass particles and perform the halo finding
+only on those.  See the this cookbook recipe for an example:
+:ref:`cookbook-rockstar-nested-grid`.
+
+To run the Rockstar Halo finding, you must launch python with MPI and
+parallelization enabled. While Rockstar itself does not require MPI to run,
+the MPI libraries allow yt to distribute particle information across multiple
+nodes.
+
+.. warning:: At the moment, running Rockstar inside of yt on multiple compute nodes
+   connected by an Infiniband network can be problematic. Therefore, for now
+   we recommend forcing the use of the non-Infiniband network (e.g. Ethernet)
+   using this flag: ``--mca btl ^openib``.
+   For example, here is how Rockstar might be called using 24 cores:
+   ``mpirun -n 24 --mca btl ^openib python ./run_rockstar.py --parallel``.
+
+The script above configures the Halo finder, launches a server process which
+disseminates run information and coordinates writer-reader processes.
+Afterwards, it launches reader and writer tasks, filling the available MPI
+slots, which alternately read particle information and analyze for halo
+content.
+
+The RockstarHaloFinder class has these options that can be supplied to the
+halo catalog through the ``finder_kwargs`` argument:
+
+* ``dm_type``, the index of the dark matter particle. Default is 1.
+* ``outbase``, This is where the out*list files that Rockstar makes should be
+  placed. Default is 'rockstar_halos'.
+* ``num_readers``, the number of reader tasks (which are idle most of the
+  time.) Default is 1.
+* ``num_writers``, the number of writer tasks (which are fed particles and
+  do most of the analysis). Default is MPI_TASKS-num_readers-1.
+  If left undefined, the above options are automatically
+  configured from the number of available MPI tasks.
+* ``force_res``, the resolution that Rockstar uses for various calculations
+  and smoothing lengths. This is in units of Mpc/h.
+  If no value is provided, this parameter is automatically set to
+  the width of the smallest grid element in the simulation from the
+  last data snapshot (i.e. the one where time has evolved the
+  longest) in the time series:
+  ``ds_last.index.get_smallest_dx() * ds_last['Mpch']``.
+* ``total_particles``, if supplied, this is a pre-calculated
+  total number of dark matter
+  particles present in the simulation. For example, this is useful
+  when analyzing a series of snapshots where the number of dark
+  matter particles should not change and this will save some disk
+  access time. If left unspecified, it will
+  be calculated automatically. Default: ``None``.
+* ``dm_only``, if set to ``True``, it will be assumed that there are
+  only dark matter particles present in the simulation.
+  This option does not modify the halos found by Rockstar, however
+  this option can save disk access time if there are no star particles
+  (or other non-dark matter particles) in the simulation. Default: ``False``.
+
+Rockstar dumps halo information in a series of text (halo*list and
+out*list) and binary (halo*bin) files inside the ``outbase`` directory.
+We use the halo list classes to recover the information.
+
+Inside the ``outbase`` directory there is a text file named ``datasets.txt``
+that records the connection between ds names and the Rockstar file names.
+
+.. _rockstar-installation:
+
+Installing Rockstar
+"""""""""""""""""""
+
+Because of changes in the Rockstar API over time, yt only currently works with
+a slightly older version of Rockstar.  This version of Rockstar has been
+slightly patched and modified to run as a library inside of yt. By default it
+is not installed with yt, but installation is very easy.  The
+:ref:`install-script` used to install yt from source has a line:
+``INST_ROCKSTAR=0`` that must be changed to ``INST_ROCKSTAR=1``.  You can
+rerun this installer script over the top of an existing installation, and
+it will only install components missing from the existing installation.
+You can do this as follows.  Put your freshly modified install_script in
+the parent directory of the yt installation directory (e.g. the parent of
+``$YT_DEST``, ``yt-x86_64``, ``yt-i386``, etc.), and rerun the installer:
+
+.. code-block:: bash
+
+    cd $YT_DEST
+    cd ..
+    vi install_script.sh  // or your favorite editor to change INST_ROCKSTAR=1
+    bash < install_script.sh
+
+This will download Rockstar and install it as a library in yt.
+
+.. _halo_catalog_analysis:
+
+Extra Halo Analysis
+-------------------
+
+As a reminder, all halo catalogs created by the methods outlined in
+:ref:`halo_catalog_finding` as well as those in the formats discussed in
+:ref:`halo-catalog-data` can be loaded in to yt as first-class datasets.
+Once a halo catalog has been created, further analysis can be performed
+by providing both the halo catalog and the original simulation dataset to
+the
+:class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog`.
 
 .. code-block:: python
 
-   halos_ds = yt.load(path+'rockstar_halos/halos_0.0.bin')
+   halos_ds = yt.load('rockstar_halos/halos_0.0.bin')
    data_ds = yt.load('Enzo_64/RD0006/RedshiftOutput0006')
    hc = HaloCatalog(data_ds=data_ds, halos_ds=halos_ds)
 
@@ -60,24 +241,28 @@
 associated with either dataset, to control the spatial region in
 which halo analysis will be performed.
 
-Analysis Using Halo Catalogs
-----------------------------
-
-Analysis is done by adding actions to the
+The :class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog`
+allows the user to create a pipeline of analysis actions that will be
+performed on all halos in the existing catalog.  The analysis can be
+performed in parallel with separate processors or groups of processors
+being allocated to perform the entire pipeline on individual halos.
+The pipeline is setup by adding actions to the
 :class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog`.
 Each action is represented by a callback function that will be run on
 each halo.  There are four types of actions:
 
-* Filters
-* Quantities
-* Callbacks
-* Recipes
+* :ref:`halo_catalog_filters`
+* :ref:`halo_catalog_quantities`
+* :ref:`halo_catalog_callbacks`
+* :ref:`halo_catalog_recipes`
 
 A list of all available filters, quantities, and callbacks can be found in
 :ref:`halo_analysis_ref`.
 All interaction with this analysis can be performed by importing from
 halo_analysis.
 
+.. _halo_catalog_filters:
+
 Filters
 ^^^^^^^
 
@@ -118,6 +303,8 @@
    # ... Later on in your script
    hc.add_filter("my_filter")
 
+.. _halo_catalog_quantities:
+
 Quantities
 ^^^^^^^^^^
 
@@ -176,6 +363,8 @@
    # ... Anywhere after "my_quantity" has been called
    hc.add_callback("print_quantity")
 
+.. _halo_catalog_callbacks:
+
 Callbacks
 ^^^^^^^^^
 
@@ -214,6 +403,8 @@
    # ...  Later on in your script
    hc.add_callback("my_callback")
 
+.. _halo_catalog_recipes:
+
 Recipes
 ^^^^^^^
 
@@ -258,8 +449,8 @@
 object as the first argument, recipe functions should take a ``HaloCatalog``
 object as the first argument.
 
-Running Analysis
-----------------
+Running the Pipeline
+--------------------
 
 After all callbacks, quantities, and filters have been added, the
 analysis begins with a call to HaloCatalog.create.
@@ -290,7 +481,7 @@
 
 A :class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog`
 saved to disk can be reloaded as a yt dataset with the
-standard call to load. Any side data, such as profiles, can be reloaded
+standard call to ``yt.load``. Any side data, such as profiles, can be reloaded
 with a ``load_profiles`` callback and a call to
 :func:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog.load`.
 
@@ -303,8 +494,8 @@
                    filename="virial_profiles")
    hc.load()
 
-Worked Example of Halo Catalog in Action
-----------------------------------------
+Halo Catalog in Action
+----------------------
 
 For a full example of how to use these methods together see
 :ref:`halo-analysis-example`.

diff -r a2f734ed05e8a467c39a7c8589170faac49a5c4c -r 6a81505b217ba53d9290b66dda0a129b162d7120 doc/source/analyzing/analysis_modules/halo_finders.rst
--- a/doc/source/analyzing/analysis_modules/halo_finders.rst
+++ /dev/null
@@ -1,231 +0,0 @@
-.. _halo_finding:
-
-Halo Finding
-============
-
-There are three methods of finding particle haloes in yt. The
-default method is called HOP, a method described
-in `Eisenstein and Hut (1998)
-<http://adsabs.harvard.edu/abs/1998ApJ...498..137E>`_. A basic
-friends-of-friends (e.g. `Efstathiou et al. (1985)
-<http://adsabs.harvard.edu/abs/1985ApJS...57..241E>`_) halo
-finder is also implemented. Finally Rockstar (`Behroozi et a.
-(2011) <http://adsabs.harvard.edu/abs/2011arXiv1110.4372B>`_) is
-a 6D-phase space halo finder developed by Peter Behroozi that
-excels in finding subhalos and substrcture, but does not allow
-multiple particle masses.
-
-.. _hop:
-
-HOP
----
-
-The version of HOP used in yt is an upgraded version of the
-`publicly available HOP code
-<http://cmb.as.arizona.edu/~eisenste/hop/hop.html>`_. Support
-for 64-bit floats and integers has been added, as well as
-parallel analysis through spatial decomposition. HOP builds
-groups in this fashion:
-
-#. Estimates the local density at each particle using a
-   smoothing kernel.
-
-#. Builds chains of linked particles by 'hopping' from one
-   particle to its densest neighbor. A particle which is
-   its own densest neighbor is the end of the chain.
-
-#. All chains that share the same densest particle are
-   grouped together.
-
-#. Groups are included, linked together, or discarded
-   depending on the user-supplied over density
-   threshold parameter. The default is 160.0.
-
-Please see the `HOP method paper 
-<http://adsabs.harvard.edu/abs/1998ApJ...498..137E>`_ for 
-full details and the 
-:class:`~yt.analysis_modules.halo_finding.halo_objects.HOPHaloFinder`
-documentation.
-
-.. _fof:
-
-FOF
----
-
-A basic friends-of-friends halo finder is included.  See the
-:class:`~yt.analysis_modules.halo_finding.halo_objects.FOFHaloFinder`
-documentation.
-
-.. _rockstar:
-
-Rockstar Halo Finding
----------------------
-
-Rockstar uses an adaptive hierarchical refinement of friends-of-friends
-groups in six phase-space dimensions and one time dimension, which
-allows for robust (grid-independent, shape-independent, and noise-
-resilient) tracking of substructure. The code is prepackaged with yt,
-but also `separately available <https://bitbucket.org/gfcstanford/rockstar>`_. The lead
-developer is Peter Behroozi, and the methods are described in `Behroozi
-et al. 2011 <http://arxiv.org/abs/1110.4372>`_.
-In order to run the Rockstar halo finder in yt, make sure you've
-:ref:`installed it so that it can integrate with yt <rockstar-installation>`.
-
-At the moment, Rockstar does not support multiple particle masses,
-instead using a fixed particle mass. This will not affect most dark matter
-simulations, but does make it less useful for finding halos from the stellar
-mass. In simulations where the highest-resolution particles all have the
-same mass (ie: zoom-in grid based simulations), one can set up a particle
-filter to select the lowest mass particles and perform the halo finding
-only on those.  See the this cookbook recipe for an example:
-:ref:`cookbook-rockstar-nested-grid`.
-
-To run the Rockstar Halo finding, you must launch python with MPI and
-parallelization enabled. While Rockstar itself does not require MPI to run,
-the MPI libraries allow yt to distribute particle information across multiple
-nodes.
-
-.. warning:: At the moment, running Rockstar inside of yt on multiple compute nodes
-   connected by an Infiniband network can be problematic. Therefore, for now
-   we recommend forcing the use of the non-Infiniband network (e.g. Ethernet)
-   using this flag: ``--mca btl ^openib``.
-   For example, here is how Rockstar might be called using 24 cores:
-   ``mpirun -n 24 --mca btl ^openib python ./run_rockstar.py --parallel``.
-
-The script above configures the Halo finder, launches a server process which
-disseminates run information and coordinates writer-reader processes.
-Afterwards, it launches reader and writer tasks, filling the available MPI
-slots, which alternately read particle information and analyze for halo
-content.
-
-The RockstarHaloFinder class has these options that can be supplied to the
-halo catalog through the ``finder_kwargs`` argument:
-
-* ``dm_type``, the index of the dark matter particle. Default is 1.
-* ``outbase``, This is where the out*list files that Rockstar makes should be
-  placed. Default is 'rockstar_halos'.
-* ``num_readers``, the number of reader tasks (which are idle most of the
-  time.) Default is 1.
-* ``num_writers``, the number of writer tasks (which are fed particles and
-  do most of the analysis). Default is MPI_TASKS-num_readers-1.
-  If left undefined, the above options are automatically
-  configured from the number of available MPI tasks.
-* ``force_res``, the resolution that Rockstar uses for various calculations
-  and smoothing lengths. This is in units of Mpc/h.
-  If no value is provided, this parameter is automatically set to
-  the width of the smallest grid element in the simulation from the
-  last data snapshot (i.e. the one where time has evolved the
-  longest) in the time series:
-  ``ds_last.index.get_smallest_dx() * ds_last['Mpch']``.
-* ``total_particles``, if supplied, this is a pre-calculated
-  total number of dark matter
-  particles present in the simulation. For example, this is useful
-  when analyzing a series of snapshots where the number of dark
-  matter particles should not change and this will save some disk
-  access time. If left unspecified, it will
-  be calculated automatically. Default: ``None``.
-* ``dm_only``, if set to ``True``, it will be assumed that there are
-  only dark matter particles present in the simulation.
-  This option does not modify the halos found by Rockstar, however
-  this option can save disk access time if there are no star particles
-  (or other non-dark matter particles) in the simulation. Default: ``False``.
-
-Rockstar dumps halo information in a series of text (halo*list and
-out*list) and binary (halo*bin) files inside the ``outbase`` directory.
-We use the halo list classes to recover the information.
-
-Inside the ``outbase`` directory there is a text file named ``datasets.txt``
-that records the connection between ds names and the Rockstar file names.
-
-For more information, see the
-:class:`~yt.analysis_modules.halo_finding.halo_objects.RockstarHalo` and
-:class:`~yt.analysis_modules.halo_finding.halo_objects.Halo` classes.
-
-.. _parallel-hop-and-fof:
-
-Parallel HOP and FOF
---------------------
-
-Both the HOP and FoF halo finders can run in parallel using simple
-spatial decomposition. In order to run them in parallel it is helpful
-to understand how it works. Below in the first plot (i) is a simplified
-depiction of three haloes labeled 1,2 and 3:
-
-.. image:: _images/ParallelHaloFinder.png
-   :width: 500
-
-Halo 3 is twice reflected around the periodic boundary conditions.
-
-In (ii), the volume has been sub-divided into four equal subregions,
-A,B,C and D, shown with dotted lines. Notice that halo 2 is now in
-two different subregions, C and D, and that halo 3 is now in three,
-A, B and D. If the halo finder is run on these four separate subregions,
-halo 1 is be identified as a single halo, but haloes 2 and 3 are split
-up into multiple haloes, which is incorrect. The solution is to give
-each subregion padding to oversample into neighboring regions.
-
-In (iii), subregion C has oversampled into the other three regions,
-with the periodic boundary conditions taken into account, shown by
-dot-dashed lines. The other subregions oversample in a similar way.
-
-The halo finder is then run on each padded subregion independently
-and simultaneously. By oversampling like this, haloes 2 and 3 will
-both be enclosed fully in at least one subregion and identified
-completely.
-
-Haloes identified with centers of mass inside the padded part of a
-subregion are thrown out, eliminating the problem of halo duplication.
-The centers for the three haloes are shown with stars. Halo 1 will
-belong to subregion A, 2 to C and 3 to B.
-
-To run with parallel halo finding, you must supply a value for
-padding in the finder_kwargs argument. The ``padding`` parameter
-is in simulation units and defaults to 0.02. This parameter is how
-much padding is added to each of the six sides of a subregion.
-This value should be 2x-3x larger than the largest expected halo
-in the simulation. It is unlikely, of course, that the largest
-object in the simulation will be on a subregion boundary, but there
-is no way of knowing before the halo finder is run.
-
-.. code-block:: python
-
-  import yt
-  from yt.analysis_modules.halo_analysis.api import *
-  ds = yt.load("data0001")
-
-  hc = HaloCatalog(data_ds = ds, finder_method = 'hop', finder_kwargs={'padding':0.02})
-  # --or--
-  hc = HaloCatalog(data_ds = ds, finder_method = 'fof', finder_kwargs={'padding':0.02})
-
-In general, a little bit of padding goes a long way, and too much
-just slows down the analysis and doesn't improve the answer (but
-doesn't change it).  It may be worth your time to run the parallel
-halo finder at a few paddings to find the right amount, especially
-if you're analyzing many similar datasets.
-
-.. _rockstar-installation:
-
-Rockstar Installation
----------------------
-
-Because of changes in the Rockstar API over time, yt only currently works with
-a slightly older version of Rockstar.  This version of Rockstar has been
-slightly patched and modified to run as a library inside of yt. By default it
-is not installed with yt, but installation is very easy.  The
-:ref:`install-script` used to install yt from source has a line:
-``INST_ROCKSTAR=0`` that must be changed to ``INST_ROCKSTAR=1``.  You can
-rerun this installer script over the top of an existing installation, and
-it will only install components missing from the existing installation.
-You can do this as follows.  Put your freshly modified install_script in
-the parent directory of the yt installation directory (e.g. the parent of
-``$YT_DEST``, ``yt-x86_64``, ``yt-i386``, etc.), and rerun the installer:
-
-.. code-block:: bash
-
-    cd $YT_DEST
-    cd ..
-    vi install_script.sh  // or your favorite editor to change INST_ROCKSTAR=1
-    bash < install_script.sh
-
-This will download Rockstar and install it as a library in yt.  You should now
-be able to use Rockstar and yt together.

diff -r a2f734ed05e8a467c39a7c8589170faac49a5c4c -r 6a81505b217ba53d9290b66dda0a129b162d7120 doc/source/analyzing/analysis_modules/halo_transition.rst
--- a/doc/source/analyzing/analysis_modules/halo_transition.rst
+++ b/doc/source/analyzing/analysis_modules/halo_transition.rst
@@ -1,11 +1,12 @@
 .. _halo-transition:
 
-Getting up to Speed with Halo Analysis in yt-3.0
-================================================
+Transitioning From yt-2 to yt-3
+===============================
 
 If you're used to halo analysis in yt-2.x, heres a guide to
 how to update your analysis pipeline to take advantage of
-the new halo catalog infrastructure.
+the new halo catalog infrastructure.  If you're starting
+from scratch, see :ref:`halo_catalog`.
 
 Finding Halos
 -------------

diff -r a2f734ed05e8a467c39a7c8589170faac49a5c4c -r 6a81505b217ba53d9290b66dda0a129b162d7120 doc/source/analyzing/analysis_modules/index.rst
--- a/doc/source/analyzing/analysis_modules/index.rst
+++ b/doc/source/analyzing/analysis_modules/index.rst
@@ -19,4 +19,3 @@
    two_point_functions
    clump_finding
    particle_trajectories
-   ellipsoid_analysis

diff -r a2f734ed05e8a467c39a7c8589170faac49a5c4c -r 6a81505b217ba53d9290b66dda0a129b162d7120 doc/source/analyzing/analysis_modules/photon_simulator.rst
--- a/doc/source/analyzing/analysis_modules/photon_simulator.rst
+++ b/doc/source/analyzing/analysis_modules/photon_simulator.rst
@@ -99,9 +99,9 @@
    To work out the following examples, you should install
    `AtomDB <http://www.atomdb.org>`_ and get the files from the
    `xray_data <http://yt-project.org/data/xray_data.tar.gz>`_ auxiliary
-   data package (see the ``xray_data`` `README <xray_data_README.html>`_
-   for details on the latter). Make sure that in what follows you
-   specify the full path to the locations of these files.
+   data package (see the :ref:`xray_data_README` for details on the latter). 
+   Make sure that in what follows you specify the full path to the locations 
+   of these files.
 
 To generate photons from this dataset, we have several different things
 we need to set up. The first is a standard yt data object. It could

diff -r a2f734ed05e8a467c39a7c8589170faac49a5c4c -r 6a81505b217ba53d9290b66dda0a129b162d7120 doc/source/analyzing/analysis_modules/radial_column_density.rst
--- a/doc/source/analyzing/analysis_modules/radial_column_density.rst
+++ /dev/null
@@ -1,93 +0,0 @@
-.. _radial-column-density:
-
-Radial Column Density
-=====================
-.. sectionauthor:: Stephen Skory <s at skory.us>
-.. versionadded:: 2.3
-
-.. note::
-
-    As of :code:`yt-3.0`, the radial column density analysis module is not
-    currently functional.  This functionality is still available in
-    :code:`yt-2.x`.  If you would like to use these features in :code:`yt-3.x`,
-    help is needed to port them over.  Contact the yt-users mailing list if you
-    are interested in doing this.
-
-This module allows the calculation of column densities around a point over a
-field such as ``NumberDensity`` or ``Density``.
-This uses :ref:`healpix_volume_rendering` to interpolate column densities
-on the grid cells.
-
-Details
--------
-
-This module allows the calculation of column densities around a single point.
-For example, this is useful for looking at the gas around a radiating source.
-Briefly summarized, the calculation is performed by first creating a number
-of HEALPix shells around the central point.
-Next, the value of the column density at cell centers is found by
-linearly interpolating the values on the inner and outer shell.
-This is added as derived field, which can be used like any other derived field.
-
-Basic Example
--------------
-
-In this simple example below, the radial column density for the field
-``NumberDensity`` is calculated and added as a derived field named
-``RCDNumberDensity``.
-The calculations will use the starting point of (x, y, z) = (0.5, 0.5, 0.5) and
-go out to a maximum radius of 0.5 in code units.
-Due to the way normalization is handled in HEALPix, the column density
-calculation can extend out only as far as the nearest face of the volume.
-For example, with a center point of (0.2, 0.3, 0.4), the column density
-is calculated out to only a radius of 0.2.
-The column density will be output as zero (0.0) outside the maximum radius.
-Just like a real number column density, when the derived is added using
-``add_field``, we give the units as :math:`1/\rm{cm}^2`.
-
-.. code-block:: python
-
-  from yt.mods import *
-  from yt.analysis_modules.radial_column_density.api import *
-  ds = load("data0030")
-
-  rcdnumdens = RadialColumnDensity(ds, 'NumberDensity', [0.5, 0.5, 0.5],
-    max_radius = 0.5)
-  def _RCDNumberDensity(field, data, rcd = rcdnumdens):
-      return rcd._build_derived_field(data)
-  add_field('RCDNumberDensity', _RCDNumberDensity, units=r'1/\rm{cm}^2')
-
-  dd = ds.all_data()
-  print(dd['RCDNumberDensity'])
-
-The field ``RCDNumberDensity`` can be used just like any other derived field
-in yt.
-
-Additional Parameters
----------------------
-
-Each of these parameters is added to the call to ``RadialColumnDensity()``,
-just like ``max_radius`` is used above.
-
-  * ``steps`` : integer - Because this implementation uses linear
-    interpolation to calculate the column
-    density at each cell, the accuracy of the solution goes up as the number of
-    HEALPix surfaces is increased.
-    The ``steps`` parameter controls the number of HEALPix surfaces, and a larger
-    number is more accurate, but slower. Default = 10.
-
-  * ``base`` : string - This controls where the surfaces are placed, with
-    linear "lin" or logarithmic "log" spacing. The inner-most
-    surface is always set to the size of the smallest cell.
-    Default = "lin".
-
-  * ``Nside`` : int
-    The resolution of column density calculation as performed by
-    HEALPix. Higher numbers mean higher quality. Max = 8192.
-    Default = 32.
-
-  * ``ang_divs`` : imaginary integer
-    This number controls the gridding of the HEALPix projection onto
-    the spherical surfaces. Higher numbers mean higher quality.
-    Default = 800j.
-

diff -r a2f734ed05e8a467c39a7c8589170faac49a5c4c -r 6a81505b217ba53d9290b66dda0a129b162d7120 doc/source/analyzing/analysis_modules/xray_data_README.rst
--- a/doc/source/analyzing/analysis_modules/xray_data_README.rst
+++ b/doc/source/analyzing/analysis_modules/xray_data_README.rst
@@ -1,3 +1,5 @@
+.. _xray_data_README:
+
 Auxiliary Data Files for use with yt's Photon Simulator
 =======================================================
 

diff -r a2f734ed05e8a467c39a7c8589170faac49a5c4c -r 6a81505b217ba53d9290b66dda0a129b162d7120 doc/source/analyzing/objects.rst
--- a/doc/source/analyzing/objects.rst
+++ b/doc/source/analyzing/objects.rst
@@ -131,6 +131,16 @@
 
    ds.r[:,-180:0,:]
 
+If you specify a single slice, it will be repeated along all three dimensions.
+For instance, this will give all data:::
+
+   ds.r[:]
+
+And this will select a box running from 0.4 to 0.6 along all three
+dimensions:::
+
+   ds.r[0.4:0.6]
+
 Selecting Fixed Resolution Regions
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 

diff -r a2f734ed05e8a467c39a7c8589170faac49a5c4c -r 6a81505b217ba53d9290b66dda0a129b162d7120 doc/source/analyzing/parallel_computation.rst
--- a/doc/source/analyzing/parallel_computation.rst
+++ b/doc/source/analyzing/parallel_computation.rst
@@ -21,7 +21,7 @@
 * Derived Quantities (total mass, angular momentum, etc) (:ref:`creating_derived_quantities`,
   :ref:`derived-quantities`)
 * 1-, 2-, and 3-D profiles (:ref:`generating-profiles-and-histograms`)
-* Halo finding (:ref:`halo_finding`)
+* Halo analysis (:ref:`halo-analysis`)
 * Volume rendering (:ref:`volume_rendering`)
 * Isocontours & flux calculations (:ref:`extracting-isocontour-information`)
 
@@ -194,7 +194,7 @@
 
 The following operations use spatial decomposition:
 
-* :ref:`halo_finding`
+* :ref:`halo-analysis`
 * :ref:`volume_rendering`
 
 Grid Decomposition
@@ -501,7 +501,7 @@
 subtle art in estimating the amount of memory needed for halo finding, but a
 rule of thumb is that the HOP halo finder is the most memory intensive
 (:func:`HaloFinder`), and Friends of Friends (:func:`FOFHaloFinder`) being the
-most memory-conservative. For more information, see :ref:`halo_finding`.
+most memory-conservative. For more information, see :ref:`halo-analysis`.
 
 **Volume Rendering**
 

diff -r a2f734ed05e8a467c39a7c8589170faac49a5c4c -r 6a81505b217ba53d9290b66dda0a129b162d7120 doc/source/analyzing/saving_data.rst
--- a/doc/source/analyzing/saving_data.rst
+++ b/doc/source/analyzing/saving_data.rst
@@ -1,4 +1,4 @@
-.. _saving_data
+.. _saving_data:
 
 Saving Reloadable Data
 ======================

diff -r a2f734ed05e8a467c39a7c8589170faac49a5c4c -r 6a81505b217ba53d9290b66dda0a129b162d7120 doc/source/cookbook/amrkdtree_downsampling.py
--- a/doc/source/cookbook/amrkdtree_downsampling.py
+++ b/doc/source/cookbook/amrkdtree_downsampling.py
@@ -38,7 +38,7 @@
 # again.
 
 render_source.set_volume(kd_low_res)
-render_source.set_fields('density')
+render_source.set_field('density')
 sc.render()
 sc.save("v1.png", sigma_clip=6.0)
 

diff -r a2f734ed05e8a467c39a7c8589170faac49a5c4c -r 6a81505b217ba53d9290b66dda0a129b162d7120 doc/source/cookbook/colormaps.py
--- a/doc/source/cookbook/colormaps.py
+++ b/doc/source/cookbook/colormaps.py
@@ -7,11 +7,11 @@
 p = yt.ProjectionPlot(ds, "z", "density", width=(100, 'kpc'))
 p.save()
 
-# Change the colormap to 'jet' and save again.  We must specify
+# Change the colormap to 'dusk' and save again.  We must specify
 # a different filename here or it will save it over the top of
 # our first projection.
-p.set_cmap(field="density", cmap='jet')
-p.save('proj_with_jet_cmap.png')
+p.set_cmap(field="density", cmap='dusk')
+p.save('proj_with_dusk_cmap.png')
 
 # Change the colormap to 'hot' and save again.
 p.set_cmap(field="density", cmap='hot')

diff -r a2f734ed05e8a467c39a7c8589170faac49a5c4c -r 6a81505b217ba53d9290b66dda0a129b162d7120 doc/source/cookbook/complex_plots.rst
--- a/doc/source/cookbook/complex_plots.rst
+++ b/doc/source/cookbook/complex_plots.rst
@@ -303,6 +303,26 @@
 
 .. yt_cookbook:: vol-annotated.py
 
+.. _cookbook-vol-points:
+
+Volume Rendering with Points
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This recipe demonstrates how to make a volume rendering composited with point
+sources. This could represent star or dark matter particles, for example.
+
+.. yt_cookbook:: vol-points.py
+
+.. _cookbook-vol-lines:
+
+Volume Rendering with Lines
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This recipe demonstrates how to make a volume rendering composited with line
+sources.
+
+.. yt_cookbook:: vol-lines.py
+
 .. _cookbook-opengl_vr:
 
 Advanced Interactive Data Visualization

diff -r a2f734ed05e8a467c39a7c8589170faac49a5c4c -r 6a81505b217ba53d9290b66dda0a129b162d7120 doc/source/cookbook/cosmological_analysis.rst
--- a/doc/source/cookbook/cosmological_analysis.rst
+++ b/doc/source/cookbook/cosmological_analysis.rst
@@ -65,10 +65,13 @@
 
 .. yt_cookbook:: light_ray.py
 
+.. _cookbook-single-dataset-light-ray:
+
+Single Dataset Light Ray
+~~~~~~~~~~~~~~~~~~~~~~~~
+
 This script demonstrates how to make a light ray from a single dataset.
 
-.. _cookbook-single-dataset-light-ray:
-
 .. yt_cookbook:: single_dataset_light_ray.py
 
 Creating and Fitting Absorption Spectra

diff -r a2f734ed05e8a467c39a7c8589170faac49a5c4c -r 6a81505b217ba53d9290b66dda0a129b162d7120 doc/source/cookbook/rendering_with_box_and_grids.py
--- a/doc/source/cookbook/rendering_with_box_and_grids.py
+++ b/doc/source/cookbook/rendering_with_box_and_grids.py
@@ -1,22 +1,20 @@
 import yt
-import numpy as np
-from yt.visualization.volume_rendering.api import BoxSource, CoordinateVectorSource
 
 # Load the dataset.
 ds = yt.load("Enzo_64/DD0043/data0043")
-sc = yt.create_scene(ds, ('gas','density'))
-sc.get_source(0).transfer_function.grey_opacity=True
+sc = yt.create_scene(ds, ('gas', 'density'))
 
-sc.annotate_domain(ds)
-sc.render()
-sc.save("%s_vr_domain.png" % ds)
+# You may need to adjust the alpha values to get a rendering with good contrast
+# For annotate_domain, the fourth color value is alpha.
 
-sc.annotate_grids(ds)
-sc.render()
-sc.save("%s_vr_grids.png" % ds)
+# Draw the domain boundary
+sc.annotate_domain(ds, color=[1, 1, 1, 0.01])
+sc.save("%s_vr_domain.png" % ds, sigma_clip=4)
 
-# Here we can draw the coordinate vectors on top of the image by processing
-# it through the camera. Then save it out.
-sc.annotate_axes()
-sc.render()
-sc.save("%s_vr_coords.png" % ds)
+# Draw the grid boundaries
+sc.annotate_grids(ds, alpha=0.01)
+sc.save("%s_vr_grids.png" % ds, sigma_clip=4)
+
+# Draw a coordinate axes triad
+sc.annotate_axes(alpha=0.01)
+sc.save("%s_vr_coords.png" % ds, sigma_clip=4)

diff -r a2f734ed05e8a467c39a7c8589170faac49a5c4c -r 6a81505b217ba53d9290b66dda0a129b162d7120 doc/source/cookbook/vol-annotated.py
--- a/doc/source/cookbook/vol-annotated.py
+++ b/doc/source/cookbook/vol-annotated.py
@@ -1,75 +1,29 @@
-#!/usr/bin/env python
+import yt
 
-import numpy as np
-import pylab
+ds = yt.load('Enzo_64/DD0043/data0043')
 
-import yt
-import yt.visualization.volume_rendering.old_camera as vr
+sc = yt.create_scene(ds, lens_type='perspective')
 
-ds = yt.load("maestro_subCh_plt00248")
+source = sc[0]
 
-dd = ds.all_data()
+source.set_field('density')
+source.set_log(True)
 
-# field in the dataset we will visualize
-field = ('boxlib', 'radial_velocity')
+# Set up the camera parameters: focus, width, resolution, and image orientation
+sc.camera.focus = ds.domain_center
+sc.camera.resolution = 1024
+sc.camera.north_vector = [0, 0, 1]
+sc.camera.position = [1.7, 1.7, 1.7]
 
-# the values we wish to highlight in the rendering.  We'll put a Gaussian
-# centered on these with width sigma
-vals = [-1.e7, -5.e6, -2.5e6, 2.5e6, 5.e6, 1.e7]
-sigma = 2.e5
+# You may need to adjust the alpha values to get an image with good contrast.
+# For the annotate_domain call, the fourth value in the color tuple is the
+# alpha value.
+sc.annotate_axes(alpha=.02)
+sc.annotate_domain(ds, color=[1, 1, 1, .01])
 
-mi, ma = min(vals), max(vals)
+text_string = "T = {} Gyr".format(float(ds.current_time.to('Gyr')))
 
-# Instantiate the ColorTransferfunction.
-tf =  yt.ColorTransferFunction((mi, ma))
-
-for v in vals:
-    tf.sample_colormap(v, sigma**2, colormap="coolwarm")
-
-
-# volume rendering requires periodic boundaries.  This dataset has
-# solid walls.  We need to hack it for now (this will be fixed in
-# a later yt)
-ds.periodicity = (True, True, True)
-
-
-# Set up the camera parameters: center, looking direction, width, resolution
-c = np.array([0.0, 0.0, 0.0])
-L = np.array([1.0, 1.0, 1.2])
-W = 1.5*ds.domain_width
-N = 720
-
-# +z is "up" for our dataset
-north=[0.0,0.0,1.0]
-
-# Create a camera object
-cam = vr.Camera(c, L, W, N, transfer_function=tf, ds=ds,
-                no_ghost=False, north_vector=north,
-                fields = [field], log_fields = [False])
-
-im = cam.snapshot()
-
-# add an axes triad
-cam.draw_coordinate_vectors(im)
-
-# add the domain box to the image
-nim = cam.draw_domain(im)
-
-# increase the contrast -- for some reason, the enhance default
-# to save_annotated doesn't do the trick
-max_val = im[:,:,:3].std() * 4.0
-nim[:,:,:3] /= max_val
-
-# we want to write the simulation time on the figure, so create a
-# figure and annotate it
-f = pylab.figure()
-
-pylab.text(0.2, 0.85, "{:.3g} s".format(float(ds.current_time.d)),
-           transform=f.transFigure, color="white")
-
-# tell the camera to use our figure
-cam._render_figure = f
-
-# save annotated -- this added the transfer function values,
-# and the clear_fig=False ensures it writes onto our existing figure.
-cam.save_annotated("vol_annotated.png", nim, dpi=145, clear_fig=False)
+# save an annotated version of the volume rendering including a representation
+# of the transfer function and a nice label showing the simulation time.
+sc.save_annotated("vol_annotated.png", sigma_clip=6,
+                  text_annotate=[[(.1, 1.05), text_string]])

diff -r a2f734ed05e8a467c39a7c8589170faac49a5c4c -r 6a81505b217ba53d9290b66dda0a129b162d7120 doc/source/cookbook/vol-lines.py
--- /dev/null
+++ b/doc/source/cookbook/vol-lines.py
@@ -0,0 +1,22 @@
+import yt
+import numpy as np
+from yt.visualization.volume_rendering.api import LineSource
+from yt.units import kpc
+
+ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
+
+sc = yt.create_scene(ds)
+
+np.random.seed(1234567)
+
+nlines = 50
+vertices = (np.random.random([nlines, 2, 3]) - 0.5) * 200 * kpc
+colors = np.random.random([nlines, 4])
+colors[:, 3] = 0.1
+
+lines = LineSource(vertices, colors)
+sc.add_source(lines)
+
+sc.camera.width = 300*kpc
+
+sc.save(sigma_clip=4.0)

diff -r a2f734ed05e8a467c39a7c8589170faac49a5c4c -r 6a81505b217ba53d9290b66dda0a129b162d7120 doc/source/cookbook/vol-points.py
--- /dev/null
+++ b/doc/source/cookbook/vol-points.py
@@ -0,0 +1,29 @@
+import yt
+import numpy as np
+from yt.visualization.volume_rendering.api import PointSource
+from yt.units import kpc
+
+ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
+
+sc = yt.create_scene(ds)
+
+np.random.seed(1234567)
+
+npoints = 1000
+
+# Random particle positions
+vertices = np.random.random([npoints, 3])*200*kpc
+
+# Random colors
+colors = np.random.random([npoints, 4])
+
+# Set alpha value to something that produces a good contrast with the volume
+# rendering
+colors[:, 3] = 0.1
+
+points = PointSource(vertices, colors=colors)
+sc.add_source(points)
+
+sc.camera.width = 300*kpc
+
+sc.save(sigma_clip=5)

diff -r a2f734ed05e8a467c39a7c8589170faac49a5c4c -r 6a81505b217ba53d9290b66dda0a129b162d7120 doc/source/developing/building_the_docs.rst
--- a/doc/source/developing/building_the_docs.rst
+++ b/doc/source/developing/building_the_docs.rst
@@ -176,6 +176,7 @@
 .. _Sphinx: http://sphinx-doc.org/
 .. _pandoc: http://johnmacfarlane.net/pandoc/
 .. _ffmpeg: http://www.ffmpeg.org/
+.. _IPython: https://ipython.org/
 
 You will also need the full yt suite of `yt test data
 <http://yt-project.org/data/>`_, including the larger datasets that are not used

diff -r a2f734ed05e8a467c39a7c8589170faac49a5c4c -r 6a81505b217ba53d9290b66dda0a129b162d7120 doc/source/developing/extensions.rst
--- /dev/null
+++ b/doc/source/developing/extensions.rst
@@ -0,0 +1,54 @@
+.. _extensions:
+
+Extension Packages
+==================
+
+.. note:: For some additional discussion, see `YTEP-0029
+          <http://ytep.readthedocs.io/en/latest/YTEPs/YTEP-0029.html>`_, where
+          this plan was designed.
+
+As of version 3.3 of yt, we have put into place new methods for easing the
+process of developing "extensions" to yt.  Extensions might be analysis
+packages, visualization tools, or other software projects that use yt as a base
+engine but that are versioned, developed and distributed separately.  This
+brings with it the advantage of retaining control over the versioning,
+contribution guidelines, scope, etc, while also providing a mechanism for
+disseminating information about it, and potentially a method of interacting
+with other extensions.
+
+We have created a few pieces of infrastructure for developing extensions,
+making them discoverable, and distributing them to collaborators.
+
+If you have a module you would like to retain some external control over, or
+that you don't feel would fit into yt, we encourage you to build it as an
+extension module and distribute and version it independently.
+
+Hooks for Extensions
+--------------------
+
+Starting with version 3.3 of yt, any package named with the prefix ``yt_`` is
+importable from the namespace ``yt.extensions``.  For instance, the
+``yt_interaction`` package ( https://bitbucket.org/data-exp-lab/yt_interaction
+) is importable as ``yt.extensions.interaction``.
+
+In subsequent versions, we plan to include in yt a catalog of known extensions
+and where to find them; this will put discoverability directly into the code
+base.
+
+Extension Template
+------------------
+
+A template for starting an extension module (or converting an existing set of
+code to an extension module) can be found at
+https://bitbucket.org/yt_analysis/yt_extension_template .
+
+To get started, download a zipfile of the template (
+https://bitbucket.org/yt_analysis/yt_extension_template/get/tip.zip ) and
+follow the directions in ``README.md`` to modify the metadata.
+
+Distributing Extensions
+-----------------------
+
+We encourage you to version on your choice of hosting platform (Bitbucket,
+GitHub, etc), and to distribute your extension widely.  We are presently
+working on deploying a method for listing extension modules on the yt webpage.

diff -r a2f734ed05e8a467c39a7c8589170faac49a5c4c -r 6a81505b217ba53d9290b66dda0a129b162d7120 doc/source/developing/index.rst
--- a/doc/source/developing/index.rst
+++ b/doc/source/developing/index.rst
@@ -19,6 +19,7 @@
    developing
    building_the_docs
    testing
+   extensions
    debugdrive
    releasing
    creating_datatypes

diff -r a2f734ed05e8a467c39a7c8589170faac49a5c4c -r 6a81505b217ba53d9290b66dda0a129b162d7120 doc/source/developing/testing.rst
--- a/doc/source/developing/testing.rst
+++ b/doc/source/developing/testing.rst
@@ -103,7 +103,7 @@
    accept no arguments. The test function should do some work that tests some
    functionality and should also verify that the results are correct using
    assert statements or functions.  
-# Tests can ``yield`` a tuple of the form ``function``, ``argument_one``,
+#. Tests can ``yield`` a tuple of the form ``function``, ``argument_one``,
    ``argument_two``, etc.  For example ``yield assert_equal, 1.0, 1.0`` would be
    captured by nose as a test that asserts that 1.0 is equal to 1.0.
 #. Use ``fake_random_ds`` to test on datasets, and be sure to test for
@@ -487,7 +487,7 @@
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 Before any code is added to or modified in the yt codebase, each incoming
 changeset is run against all available unit and answer tests on our `continuous
-integration server <http://tests.yt-project.org>`_. While unit tests are
+integration server <https://tests.yt-project.org>`_. While unit tests are
 autodiscovered by `nose <http://nose.readthedocs.org/en/latest/>`_ itself,
 answer tests require definition of which set of tests constitute to a given
 answer. Configuration for the integration server is stored in

diff -r a2f734ed05e8a467c39a7c8589170faac49a5c4c -r 6a81505b217ba53d9290b66dda0a129b162d7120 doc/source/examining/Loading_Generic_Array_Data.ipynb
--- a/doc/source/examining/Loading_Generic_Array_Data.ipynb
+++ b/doc/source/examining/Loading_Generic_Array_Data.ipynb
@@ -41,7 +41,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": true
+   },
    "outputs": [],
    "source": [
     "import yt\n",
@@ -58,7 +60,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": true
+   },
    "outputs": [],
    "source": [
     "arr = np.random.random(size=(64,64,64))"
@@ -74,7 +78,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
     "data = dict(density = (arr, \"g/cm**3\"))\n",
@@ -118,7 +124,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
     "slc = yt.SlicePlot(ds, \"z\", [\"density\"])\n",
@@ -140,7 +148,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
     "posx_arr = np.random.uniform(low=-1.5, high=1.5, size=10000)\n",
@@ -167,7 +177,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
     "slc = yt.SlicePlot(ds, \"z\", [\"density\"])\n",
@@ -193,7 +205,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": true
+   },
    "outputs": [],
    "source": [
     "import h5py\n",
@@ -213,7 +227,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
     "print (f.keys())"
@@ -229,7 +245,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": true
+   },
    "outputs": [],
    "source": [
     "units = [\"gauss\",\"gauss\",\"gauss\", \"g/cm**3\", \"erg/cm**3\", \"K\", \n",
@@ -246,7 +264,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": true
+   },
    "outputs": [],
    "source": [
     "data = {k:(v.value,u) for (k,v), u in zip(f.items(),units)}\n",
@@ -256,7 +276,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
     "ds = yt.load_uniform_grid(data, data[\"Density\"][0].shape, length_unit=250.*cm_per_kpc, bbox=bbox, nprocs=8, \n",
@@ -273,7 +295,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
     "prj = yt.ProjectionPlot(ds, \"z\", [\"z-velocity\",\"Temperature\",\"Bx\"], weight_field=\"Density\")\n",
@@ -299,7 +323,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": true
+   },
    "outputs": [],
    "source": [
     "#Find the min and max of the field\n",
@@ -313,29 +339,15 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "Create a Transfer Function that goes from the minimum to the maximum of the data:"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "tf = yt.ColorTransferFunction((mi, ma), grey_opacity=False)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "metadata": {},
-   "source": [
     "Define the properties and size of the `camera` viewport:"
    ]
   },
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": true
+   },
    "outputs": [],
    "source": [
     "# Choose a vector representing the viewing direction.\n",
@@ -358,24 +370,41 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
-    "cam = ds.camera(c, L, W, Npixels, tf, fields=['Temperature'],\n",
-    "                north_vector=[0,0,1], steady_north=True, \n",
-    "                sub_samples=5, log_fields=[False])\n",
+    "sc = yt.create_scene(ds, 'Temperature')\n",
+    "dd = ds.all_data()\n",
     "\n",
-    "cam.transfer_function.map_to_colormap(mi,ma, \n",
-    "                                      scale=15.0, colormap='algae')"
+    "source = sc[0]\n",
+    "\n",
+    "source.log_field = False\n",
+    "\n",
+    "tf = yt.ColorTransferFunction((mi, ma), grey_opacity=False)\n",
+    "tf.map_to_colormap(mi, ma, scale=15.0, colormap='algae')\n",
+    "\n",
+    "source.set_transfer_function(tf)\n",
+    "\n",
+    "sc.add_source(source)\n",
+    "\n",
+    "cam = sc.add_camera()\n",
+    "cam.width = W\n",
+    "cam.center = c\n",
+    "cam.normal_vector = L\n",
+    "cam.north_vector = [0, 0, 1]"
    ]
   },
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
-    "cam.show()"
+    "sc.show(sigma_clip=4)"
    ]
   },
   {
@@ -395,7 +424,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": true
+   },
    "outputs": [],
    "source": [
     "import astropy.io.fits as pyfits\n",
@@ -412,7 +443,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
     "f = pyfits.open(data_dir+\"/UnigridData/velocity_field_20.fits\")\n",
@@ -429,7 +462,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
     "data = {}\n",
@@ -449,7 +484,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": true
+   },
    "outputs": [],
    "source": [
     "data[\"velocity_x\"] = data.pop(\"x-velocity\")\n",
@@ -467,7 +504,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
     "ds = yt.load_uniform_grid(data, data[\"velocity_x\"][0].shape, length_unit=(1.0,\"Mpc\"))\n",
@@ -495,7 +534,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": true
+   },
    "outputs": [],
    "source": [
     "grid_data = [\n",
@@ -520,7 +561,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": true
+   },
    "outputs": [],
    "source": [
     "for g in grid_data: \n",
@@ -538,7 +581,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": true
+   },
    "outputs": [],
    "source": [
     "grid_data[0][\"number_of_particles\"] = 0 # Set no particles in the top-level grid\n",
@@ -561,7 +606,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
     "ds = yt.load_amr_grids(grid_data, [32, 32, 32])"
@@ -577,7 +624,9 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "metadata": {},
+   "metadata": {
+    "collapsed": false
+   },
    "outputs": [],
    "source": [
     "slc = yt.SlicePlot(ds, \"z\", [\"density\"])\n",
@@ -613,7 +662,7 @@
   "language_info": {
    "codemirror_mode": {
     "name": "ipython",
-    "version": 3.0
+    "version": 3
    },
    "file_extension": ".py",
    "mimetype": "text/x-python",
@@ -625,4 +674,4 @@
  },
  "nbformat": 4,
  "nbformat_minor": 0
-}
\ No newline at end of file
+}

diff -r a2f734ed05e8a467c39a7c8589170faac49a5c4c -r 6a81505b217ba53d9290b66dda0a129b162d7120 doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -884,6 +884,21 @@
 It's recommended that if you want higher-resolution, try reducing the value of
 ``n_ref`` to 32 or 16.
 
+Also yt can be set to generate the global mesh index according to a specific
+type of particles instead of all the particles through the parameter
+``index_ptype``. For example, to build the octree only according to the
+``"PartType0"`` particles, you can do:
+
+.. code-block:: python
+
+   ds = yt.load("snapshot_061.hdf5", index_ptype="PartType0")
+
+By default, ``index_ptype`` is set to ``"all"``, which means all the particles.
+Currently this feature only works for the Gadget HDF5 and OWLS datasets. To
+bring the feature to other frontends, it's recommended to refer to this
+`PR <https://bitbucket.org/yt_analysis/yt/pull-requests/1985/add-particle-type-aware-octree/diff>`_
+for implementation details.
+
 .. _gadget-field-spec:
 
 Field Specifications
@@ -1342,24 +1357,91 @@
 a bounding box, field specification, and units as are done for standard 
 Gadget outputs.  See :ref:`loading-gadget-data` for more information.
 
-.. _loading-pyne-data:
+.. _halo-catalog-data:
 
 Halo Catalog Data
 -----------------
 
 yt has support for reading halo catalogs produced by Rockstar and the inline
 FOF/SUBFIND halo finders of Gadget and OWLS.  The halo catalogs are treated as
-particle datasets where each particle represents a single halo.  Member particles
-for individual halos can be accessed through halo data containers.  Further halo
-analysis can be performed using :ref:`halo_catalog`.
+particle datasets where each particle represents a single halo.  For example,
+this means that the `particle_mass` field refers to the mass of the halos.  For
+Gadget FOF/SUBFIND catalogs, the member particles for a given halo can be
+accessed by creating `halo` data containers.  See :ref:`halo_containers` for
+more information.
 
-In the case where halo catalogs are written to multiple files, one must only
-give the path to one of them.
+If you have access to both the halo catalog and the simulation snapshot from
+the same redshift, additional analysis can be performed for each halo using
+:ref:`halo_catalog`.
+
+.. _rockstar:
+
+Rockstar
+^^^^^^^^
+
+Rockstar halo catalogs are loaded by providing the path to one of the .bin files.
+In the case where multiple files were produced, one need only provide the path
+to a single one of them.  The field type for all fields is "halos".  Some fields
+of note available from Rockstar are:
+
++----------------+---------------------------+
+| Rockstar field | yt field name             |
++================+===========================+
+| halo id        | particle_identifier       |
++----------------+---------------------------+
+| virial mass    | particle_mass             |
++----------------+---------------------------+
+| virial radius  | virial_radius             |
++----------------+---------------------------+
+| halo position  | particle_position_(x,y,z) |
++----------------+---------------------------+
+| halo velocity  | particle_velocity_(x,y,z) |
++----------------+---------------------------+
+
+Numerous other Rockstar fields exist.  To see them, check the field list by
+typing `ds.field_list` for a dataset loaded as `ds`.  Like all other datasets,
+fields must be accessed through :ref:`Data-objects`.
+
+.. code-block:: python
+
+   import yt
+   ds = yt.load("rockstar_halos/halos_0.0.bin")
+   ad = ds.all_data()
+   # halo masses
+   print(ad["halos", "particle_mass"])
+   # halo radii
+   print(ad["halos", "virial_radius"])
+
+.. _gadget_fof:
 
 Gadget FOF/SUBFIND
 ^^^^^^^^^^^^^^^^^^
 
-The two field types for GadgetFOF data are "Group" (FOF) and "Subhalo" (SUBFIND).
+Gadget FOF/SUBFIND halo catalogs work in the same way as those created by
+:ref:`rockstar`, except there are two field types: `FOF` for friend-of-friends
+groups and `Subhalo` for halos found with the SUBFIND substructure finder.
+Also like Rockstar, there are a number of fields specific to these halo
+catalogs.
+
++-------------------+---------------------------+
+| FOF/SUBFIND field | yt field name             |
++===================+===========================+
+| halo id           | particle_identifier       |
++-------------------+---------------------------+
+| halo mass         | particle_mass             |
++-------------------+---------------------------+
+| halo position     | particle_position_(x,y,z) |
++-------------------+---------------------------+
+| halo velocity     | particle_velocity_(x,y,z) |
++-------------------+---------------------------+
+| num. of particles | particle_number           |
++-------------------+---------------------------+
+| num. of subhalos  | subhalo_number (FOF only) |
++-------------------+---------------------------+
+
+Many other fields exist, especially for SUBFIND subhalos.  Check the field
+list by typing `ds.field_list` for a dataset loaded as `ds`.  Like all
+other datasets, fields must be accessed through :ref:`Data-objects`.
 
 .. code-block:: python
 
@@ -1385,6 +1467,11 @@
    # x component of the spin
    print(ad["Subhalo", "SubhaloSpin_0"])
 
+.. _halo_containers:
+
+Halo Data Containers
+^^^^^^^^^^^^^^^^^^^^
+
 Halo member particles are accessed by creating halo data containers with the
 type of halo ("Group" or "Subhalo") and the halo id.  Scalar values for halos
 can be accessed in the same way.  Halos also have mass, position, and velocity
@@ -1416,8 +1503,8 @@
 ^^^^^^^^^^^^^^^^
 
 OWLS halo catalogs have a very similar structure to regular Gadget halo catalogs.
-The two field types are "FOF" and "SUBFIND".  At this time, halo member particles
-cannot be loaded.
+The two field types are `FOF` and `SUBFIND`.  See :ref:`gadget_fof` for more
+information.  At this time, halo member particles cannot be loaded.
 
 .. code-block:: python
 
@@ -1427,19 +1514,7 @@
    # The halo mass
    print(ad["FOF", "particle_mass"])
 
-Rockstar
-^^^^^^^^
-
-Rockstar halo catalogs are loaded by providing the path to one of the .bin files.
-The single field type available is "halos".
-
-.. code-block:: python
-
-   import yt
-   ds = yt.load("rockstar_halos/halos_0.0.bin")
-   ad = ds.all_data()
-   # The halo mass
-   print(ad["halos", "particle_mass"])
+.. _loading-pyne-data:
 
 PyNE Data
 ---------

diff -r a2f734ed05e8a467c39a7c8589170faac49a5c4c -r 6a81505b217ba53d9290b66dda0a129b162d7120 doc/source/intro/index.rst
--- a/doc/source/intro/index.rst
+++ b/doc/source/intro/index.rst
@@ -105,7 +105,7 @@
 use external libraries or codes.  While they are installed with yt, they are
 not loaded by default in every session so you have to call them specifically.
 Examples include :ref:`halo analysis <halo-analysis>` (including
-:ref:`halo finding <halo_finding>`, :ref:`merger trees <merger_tree>`,
+:ref:`halo finding <halo-analysis>`, :ref:`merger trees <merger_tree>`,
 :ref:`halo mass functions <halo_mass_function>`), :ref:`synthetic observations
 <synthetic-observations>` (including :ref:`cosmological light cones
 <light-cone-generator>`, :ref:`cosmological light rays <light-ray-generator>`,

diff -r a2f734ed05e8a467c39a7c8589170faac49a5c4c -r 6a81505b217ba53d9290b66dda0a129b162d7120 doc/source/quickstart/6)_Volume_Rendering.ipynb
--- a/doc/source/quickstart/6)_Volume_Rendering.ipynb
+++ b/doc/source/quickstart/6)_Volume_Rendering.ipynb
@@ -106,21 +106,21 @@
  ],
  "metadata": {
   "kernelspec": {
-   "display_name": "Python 2",
+   "display_name": "Python 3",
    "language": "python",
-   "name": "python2"
+   "name": "python3"
   },
   "language_info": {
    "codemirror_mode": {
     "name": "ipython",
-    "version": 2
+    "version": 3
    },
    "file_extension": ".py",
    "mimetype": "text/x-python",
    "name": "python",
    "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython2",
-   "version": "2.7.10"
+   "pygments_lexer": "ipython3",
+   "version": "3.5.1"
   }
  },
  "nbformat": 4,

diff -r a2f734ed05e8a467c39a7c8589170faac49a5c4c -r 6a81505b217ba53d9290b66dda0a129b162d7120 doc/source/reference/changelog.rst
--- a/doc/source/reference/changelog.rst
+++ b/doc/source/reference/changelog.rst
@@ -611,7 +611,7 @@
  * WebGL interface for isocontours and a pannable map widget added to Reason
  * Performance improvements for volume rendering
  * Adaptive HEALPix support
- * Column density calculations (see :ref:`radial-column-density`)
+ * Column density calculations
  * Massive speedup for 1D profiles
  * Lots more, bug fixes etc.
  * Substantial improvements to the documentation, including
@@ -733,9 +733,9 @@
 -----------
 
 Version 1.6 is a point release, primarily notable for the new parallel halo
-finder (see :ref:`halo_finding`)
+finder (see :ref:`halo-analysis`)
 
- * (New) Parallel HOP ( http://arxiv.org/abs/1001.3411 , :ref:`halo_finding` )
+ * (New) Parallel HOP ( http://arxiv.org/abs/1001.3411 , :ref:`halo-analysis` )
  * (Beta) Software ray casting and volume rendering
    (see :ref:`volume_rendering`)
  * Rewritten, faster and better contouring engine for clump identification

diff -r a2f734ed05e8a467c39a7c8589170faac49a5c4c -r 6a81505b217ba53d9290b66dda0a129b162d7120 doc/source/reference/configuration.rst
--- a/doc/source/reference/configuration.rst
+++ b/doc/source/reference/configuration.rst
@@ -91,6 +91,8 @@
 used internally.
 
 * ``coloredlogs`` (default: ``'False'``): Should logs be colored?
+* ``default_colormap`` (default: ``'arbre'``): What colormap should be used by
+  default for yt-produced images?
 * ``loadfieldplugins`` (default: ``'True'``): Do we want to load the plugin file?
 * ``pluginfilename``  (default ``'my_plugins.py'``) The name of our plugin file.
 * ``logfile`` (default: ``'False'``): Should we output to a log file in the

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/6f7fa3382902/
Changeset:   6f7fa3382902
Branch:      yt
User:        jzuhone
Date:        2016-07-21 15:42:41+00:00
Summary:     Removing comparisons between answers from old format file since the new answers are changing. We still test that we can read the old file without issue.
Affected #:  1 file

diff -r 6a81505b217ba53d9290b66dda0a129b162d7120 -r 6f7fa3382902a9ae3146c8998326aa1edab83100 yt/analysis_modules/photon_simulator/tests/test_sloshing.py
--- a/yt/analysis_modules/photon_simulator/tests/test_sloshing.py
+++ b/yt/analysis_modules/photon_simulator/tests/test_sloshing.py
@@ -106,23 +106,19 @@
     convert_old_file(old_photon_file, "converted_photons.h5")
     convert_old_file(old_event_file, "converted_events.h5")
 
-    photons3 = PhotonList.from_file("converted_photons.h5")
-    events3 = EventList.from_h5_file("converted_events.h5")
+    PhotonList.from_file("converted_photons.h5")
+    EventList.from_h5_file("converted_events.h5")
 
     for k in photons1.keys():
         if k == "Energy":
             arr1 = uconcatenate(photons1[k])
             arr2 = uconcatenate(photons2[k])
-            arr3 = uconcatenate(photons3[k])
         else:
             arr1 = photons1[k]
             arr2 = photons2[k]
-            arr3 = photons3[k]
         assert_almost_equal(arr1, arr2)
-        assert_almost_equal(arr1, arr3)
     for k in events1.keys():
         assert_almost_equal(events1[k], events2[k])
-        assert_almost_equal(events1[k], events3[k])
 
     nevents = 0
 


https://bitbucket.org/yt_analysis/yt/commits/67200f8bbde3/
Changeset:   67200f8bbde3
Branch:      yt
User:        ngoldbaum
Date:        2016-07-22 14:48:18+00:00
Summary:     Merged in jzuhone/yt (pull request #2292)

[bugfix] Three analysis module bugfixes
Affected #:  4 files

diff -r dda861e88caf4443bf2713470cbe2964a283e565 -r 67200f8bbde35f9f6d03ba012c76cb3e64aa603c tests/tests.yaml
--- a/tests/tests.yaml
+++ b/tests/tests.yaml
@@ -50,7 +50,7 @@
   local_tipsy_001:
     - yt/frontends/tipsy/tests/test_outputs.py
   
-  local_varia_002:
+  local_varia_003:
     - yt/analysis_modules/radmc3d_export
     - yt/frontends/moab/tests/test_c5.py
     - yt/analysis_modules/photon_simulator/tests/test_spectra.py

diff -r dda861e88caf4443bf2713470cbe2964a283e565 -r 67200f8bbde35f9f6d03ba012c76cb3e64aa603c yt/analysis_modules/particle_trajectories/particle_trajectories.py
--- a/yt/analysis_modules/particle_trajectories/particle_trajectories.py
+++ b/yt/analysis_modules/particle_trajectories/particle_trajectories.py
@@ -206,7 +206,7 @@
             dd_first = ds_first.all_data()
             fd = dd_first._determine_fields(field)[0]
             if field not in self.particle_fields:
-                if self.data_series[0].field_info[fd].particle_type:
+                if self.data_series[0]._get_field_info(*fd).particle_type:
                     self.particle_fields.append(field)
             particles = np.empty((self.num_indices,self.num_steps))
             particles[:] = np.nan
@@ -339,7 +339,7 @@
         """
         fid = h5py.File(filename, "w")
         fields = [field for field in sorted(self.field_data.keys())]
-        fid.create_dataset("particle_indices", dtype=np.int32,
+        fid.create_dataset("particle_indices", dtype=np.int64,
                            data=self.indices)
         fid.create_dataset("particle_time", data=self.times)
         for field in fields:

diff -r dda861e88caf4443bf2713470cbe2964a283e565 -r 67200f8bbde35f9f6d03ba012c76cb3e64aa603c yt/analysis_modules/photon_simulator/spectral_models.py
--- a/yt/analysis_modules/photon_simulator/spectral_models.py
+++ b/yt/analysis_modules/photon_simulator/spectral_models.py
@@ -288,7 +288,7 @@
 
         tmpspec += np.interp(emid, e_pseudo*self.scale_factor, pseudo)*de/self.scale_factor
 
-        return tmpspec
+        return tmpspec*self.scale_factor
 
     def get_spectrum(self, kT):
         """

diff -r dda861e88caf4443bf2713470cbe2964a283e565 -r 67200f8bbde35f9f6d03ba012c76cb3e64aa603c yt/analysis_modules/photon_simulator/tests/test_sloshing.py
--- a/yt/analysis_modules/photon_simulator/tests/test_sloshing.py
+++ b/yt/analysis_modules/photon_simulator/tests/test_sloshing.py
@@ -106,23 +106,19 @@
     convert_old_file(old_photon_file, "converted_photons.h5")
     convert_old_file(old_event_file, "converted_events.h5")
 
-    photons3 = PhotonList.from_file("converted_photons.h5")
-    events3 = EventList.from_h5_file("converted_events.h5")
+    PhotonList.from_file("converted_photons.h5")
+    EventList.from_h5_file("converted_events.h5")
 
     for k in photons1.keys():
         if k == "Energy":
             arr1 = uconcatenate(photons1[k])
             arr2 = uconcatenate(photons2[k])
-            arr3 = uconcatenate(photons3[k])
         else:
             arr1 = photons1[k]
             arr2 = photons2[k]
-            arr3 = photons3[k]
         assert_almost_equal(arr1, arr2)
-        assert_almost_equal(arr1, arr3)
     for k in events1.keys():
         assert_almost_equal(events1[k], events2[k])
-        assert_almost_equal(events1[k], events3[k])
 
     nevents = 0

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list