[yt-svn] commit/yt: 15 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Mon Nov 2 11:42:32 PST 2015


15 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/2fbc390103da/
Changeset:   2fbc390103da
Branch:      yt
User:        qobilidop
Date:        2015-08-19 18:31:37+00:00
Summary:     add Wendland kernels
Affected #:  1 file

diff -r 6a02dfe8980faf70b67f647c7c497d126aa75850 -r 2fbc390103da4c25a00bb22a4a9f85ef8eff08ad yt/geometry/particle_deposit.pxd
--- a/yt/geometry/particle_deposit.pxd
+++ b/yt/geometry/particle_deposit.pxd
@@ -80,6 +80,36 @@
         kernel = 0.
     return kernel * C
 
+# Wendland C2
+cdef inline np.float64_t sph_kernel_wendland2(np.float64_t x):
+    cdef np.float64_t kernel
+    cdef np.float64_t C = 21./2/np.pi
+    if x < 1:
+        kernel = (1.-x)**4 * (1+4*x)
+    else:
+        kernel = 0.
+    return kernel * C
+
+# Wendland C4
+cdef inline np.float64_t sph_kernel_wendland4(np.float64_t x):
+    cdef np.float64_t kernel
+    cdef np.float64_t C = 495./32/np.pi
+    if x < 1:
+        kernel = (1.-x)**6 * (1+6*x+35./3*x**2)
+    else:
+        kernel = 0.
+    return kernel * C
+
+# Wendland C6
+cdef inline np.float64_t sph_kernel_wendland6(np.float64_t x):
+    cdef np.float64_t kernel
+    cdef np.float64_t C = 1365./64/np.pi
+    if x < 1:
+        kernel = (1.-x)**8 * (1+8*x+25*x**2+32*x**3)
+    else:
+        kernel = 0.
+    return kernel * C
+
 # I don't know the way to use a dict in a cdef class.
 # So in order to mimic a registry functionality,
 # I manually created a function to lookup the kernel functions.
@@ -91,6 +121,12 @@
         return sph_kernel_quartic
     elif kernel_name == 'quintic':
         return sph_kernel_quintic
+    elif kernel_name == 'Wenland2':
+        return sph_kernel_wendland2
+    elif kernel_name == 'Wenland4':
+        return sph_kernel_wendland4
+    elif kernel_name == 'Wenland6':
+        return sph_kernel_wendland6
     else:
         raise NotImplementedError
 


https://bitbucket.org/yt_analysis/yt/commits/f24062bb5bcd/
Changeset:   f24062bb5bcd
Branch:      yt
User:        qobilidop
Date:        2015-08-21 02:38:14+00:00
Summary:     make kernel names lower case
Affected #:  1 file

diff -r 2fbc390103da4c25a00bb22a4a9f85ef8eff08ad -r f24062bb5bcdcf7cbda27fadfe0fde2e30d650b7 yt/geometry/particle_deposit.pxd
--- a/yt/geometry/particle_deposit.pxd
+++ b/yt/geometry/particle_deposit.pxd
@@ -121,11 +121,11 @@
         return sph_kernel_quartic
     elif kernel_name == 'quintic':
         return sph_kernel_quintic
-    elif kernel_name == 'Wenland2':
+    elif kernel_name == 'wenland2':
         return sph_kernel_wendland2
-    elif kernel_name == 'Wenland4':
+    elif kernel_name == 'wenland4':
         return sph_kernel_wendland4
-    elif kernel_name == 'Wenland6':
+    elif kernel_name == 'wenland6':
         return sph_kernel_wendland6
     else:
         raise NotImplementedError


https://bitbucket.org/yt_analysis/yt/commits/311a1c8bb0c9/
Changeset:   311a1c8bb0c9
Branch:      yt
User:        qobilidop
Date:        2015-08-21 02:42:55+00:00
Summary:     correct spelling
Affected #:  1 file

diff -r f24062bb5bcdcf7cbda27fadfe0fde2e30d650b7 -r 311a1c8bb0c9b7b8748619fa8b9b56d597cd916f yt/geometry/particle_deposit.pxd
--- a/yt/geometry/particle_deposit.pxd
+++ b/yt/geometry/particle_deposit.pxd
@@ -121,11 +121,11 @@
         return sph_kernel_quartic
     elif kernel_name == 'quintic':
         return sph_kernel_quintic
-    elif kernel_name == 'wenland2':
+    elif kernel_name == 'wendland2':
         return sph_kernel_wendland2
-    elif kernel_name == 'wenland4':
+    elif kernel_name == 'wendland4':
         return sph_kernel_wendland4
-    elif kernel_name == 'wenland6':
+    elif kernel_name == 'wendland6':
         return sph_kernel_wendland6
     else:
         raise NotImplementedError


https://bitbucket.org/yt_analysis/yt/commits/4a1de57c1bc6/
Changeset:   4a1de57c1bc6
Branch:      yt
User:        qobilidop
Date:        2015-08-21 02:45:48+00:00
Summary:     update docstrings
Affected #:  1 file

diff -r 311a1c8bb0c9b7b8748619fa8b9b56d597cd916f -r 4a1de57c1bc6263cf6352593027d3f935b2517f4 yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -165,6 +165,10 @@
             `particle_deposit` namespace as `methodname_deposit`.  Current
             methods include `count`, `simple_smooth`, `sum`, `std`, `cic`,
             `weighted_mean`, `mesh_id`, and `nearest`.
+        kernel_name : string, default 'cubic'
+            This is the name of the smoothing kernel to use. Current supported
+            kernel names include `cubic`, `quartic`, `quintic`, `wendland2`,
+            `wendland4`, and `wendland6`.
 
         Returns
         -------
@@ -228,6 +232,10 @@
             we are able to find and identify all relevant particles.
         nneighbors : int, default 64
             The number of neighbors to examine during the process.
+        kernel_name : string, default 'cubic'
+            This is the name of the smoothing kernel to use. Current supported
+            kernel names include `cubic`, `quartic`, `quintic`, `wendland2`,
+            `wendland4`, and `wendland6`.
 
         Returns
         -------
@@ -310,6 +318,10 @@
             `particle_smooth` namespace as `methodname_smooth`.
         nneighbors : int, default 64
             The number of neighbors to examine during the process.
+        kernel_name : string, default 'cubic'
+            This is the name of the smoothing kernel to use. Current supported
+            kernel names include `cubic`, `quartic`, `quintic`, `wendland2`,
+            `wendland4`, and `wendland6`.
 
         Returns
         -------


https://bitbucket.org/yt_analysis/yt/commits/c903704e3af6/
Changeset:   c903704e3af6
Branch:      yt
User:        qobilidop
Date:        2015-09-20 23:53:55+00:00
Summary:     update Dataset.add_deposited_particle_field
Affected #:  1 file

diff -r 4a1de57c1bc6263cf6352593027d3f935b2517f4 -r c903704e3af6550ee745678c5d07d10d84b2f7e4 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -910,7 +910,7 @@
         deps, _ = self.field_info.check_derived_fields([name])
         self.field_dependencies.update(deps)
 
-    def add_deposited_particle_field(self, deposit_field, method):
+    def add_deposited_particle_field(self, deposit_field, method, kernel_name='cubic'):
         """Add a new deposited particle field
 
         Creates a new deposited field based on the particle *deposit_field*.
@@ -924,6 +924,10 @@
            appropriately infer the correct particle type.
         method : one of 'count', 'sum', or 'cic'
            The particle deposition method to use.
+        kernel_name : string, default 'cubic'
+           This is the name of the smoothing kernel to use. Current supported
+           kernel names include `cubic`, `quartic`, `quintic`, `wendland2`,
+           `wendland4`, and `wendland6`.
 
         Returns
         -------
@@ -947,15 +951,17 @@
             if method != 'count':
                 pden = data[ptype, "particle_mass"]
                 top = data.deposit(pos, [data[(ptype, deposit_field)]*pden],
-                                   method=method)
-                bottom = data.deposit(pos, [pden], method=method)
+                                   method=method, kernel_name=kernel_name)
+                bottom = data.deposit(pos, [pden], method=method,
+                                      kernel_name=kernel_name)
                 top[bottom == 0] = 0.0
                 bnz = bottom.nonzero()
                 top[bnz] /= bottom[bnz]
                 d = data.ds.arr(top, input_units=units)
             else:
                 d = data.ds.arr(data.deposit(pos, [data[ptype, deposit_field]],
-                                             method=method))
+                                             method=method,
+                                             kernel_name=kernel_name))
             return d
         name_map = {"cic": "cic", "sum": "nn", "count": "count"}
         field_name = "%s_" + name_map[method] + "_%s"


https://bitbucket.org/yt_analysis/yt/commits/176b197e3a50/
Changeset:   176b197e3a50
Branch:      yt
User:        qobilidop
Date:        2015-09-21 17:51:51+00:00
Summary:     update docstring of add_deposited_particle_field
Affected #:  1 file

diff -r c903704e3af6550ee745678c5d07d10d84b2f7e4 -r 176b197e3a500fd4db004bb90d7e5eb84758cd6a yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -922,8 +922,11 @@
            The field name tuple of the particle field the deposited field will
            be created from.  This must be a field name tuple so yt can
            appropriately infer the correct particle type.
-        method : one of 'count', 'sum', or 'cic'
-           The particle deposition method to use.
+        method : string
+           This is the "method name" which will be looked up in the
+           `particle_deposit` namespace as `methodname_deposit`.  Current
+           methods include `count`, `simple_smooth`, `sum`, `std`, `cic`,
+           `weighted_mean`, `mesh_id`, and `nearest`.
         kernel_name : string, default 'cubic'
            This is the name of the smoothing kernel to use. Current supported
            kernel names include `cubic`, `quartic`, `quintic`, `wendland2`,


https://bitbucket.org/yt_analysis/yt/commits/be9fb8fc3cb5/
Changeset:   be9fb8fc3cb5
Branch:      yt
User:        qobilidop
Date:        2015-09-22 00:08:16+00:00
Summary:     remove add_density_kernel
Affected #:  1 file

diff -r 176b197e3a500fd4db004bb90d7e5eb84758cd6a -r be9fb8fc3cb54cefea1d1f9776c5ee22ea342185 yt/fields/particle_fields.py
--- a/yt/fields/particle_fields.py
+++ b/yt/fields/particle_fields.py
@@ -821,34 +821,6 @@
                        units = "code_length")
     return [field_name]
 
-def add_density_kernel(ptype, coord_name, mass_name, registry, nneighbors = 64,
-                       kernel_name = 'cubic'):
-    if kernel_name == 'cubic':
-        field_name = (ptype, "smoothed_density")
-    else:
-        field_name = (ptype, "%s_smoothed_density" % (kernel_name))
-    field_units = registry[ptype, mass_name].units
-    def _nth_neighbor(field, data):
-        pos = data[ptype, coord_name]
-        pos.convert_to_units("code_length")
-        mass = data[ptype, mass_name]
-        mass.convert_to_units("g")
-        densities = mass * 0.0
-        data.particle_operation(pos, [mass, densities],
-                         method="density",
-                         nneighbors = nneighbors,
-                         kernel_name = kernel_name)
-        ones = pos.prod(axis=1) # Get us in code_length**3
-        ones[:] = 1.0
-        densities /= ones
-        # Now some quick unit conversions.
-        return densities
-    registry.add_field(field_name, function = _nth_neighbor,
-                       validators = [ValidateSpatial(0)],
-                       particle_type = True,
-                       units = "g/cm**3")
-    return [field_name]
-
 def add_union_field(registry, ptype, field_name, units):
     """
     Create a field that is the concatenation of multiple particle types.


https://bitbucket.org/yt_analysis/yt/commits/b80648a5b8e7/
Changeset:   b80648a5b8e7
Branch:      yt
User:        qobilidop
Date:        2015-10-23 03:00:16+00:00
Summary:     merge in the tip, kept add_density_kernel in resolving the conflicts
Affected #:  478 files

diff -r be9fb8fc3cb54cefea1d1f9776c5ee22ea342185 -r b80648a5b8e7f41a029528512e33e72e246a09a8 .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -10,6 +10,7 @@
 yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.c
 yt/analysis_modules/halo_finding/rockstar/rockstar_interface.c
 yt/analysis_modules/ppv_cube/ppv_utils.c
+yt/analysis_modules/photon_simulator/utils.c
 yt/frontends/ramses/_ramses_reader.cpp
 yt/frontends/sph/smoothing_kernel.c
 yt/geometry/fake_octree.c
@@ -38,6 +39,7 @@
 yt/utilities/lib/image_utilities.c
 yt/utilities/lib/Interpolators.c
 yt/utilities/lib/kdtree.c
+yt/utilities/lib/line_integral_convolution.c
 yt/utilities/lib/mesh_utilities.c
 yt/utilities/lib/misc_utilities.c
 yt/utilities/lib/Octree.c

diff -r be9fb8fc3cb54cefea1d1f9776c5ee22ea342185 -r b80648a5b8e7f41a029528512e33e72e246a09a8 MANIFEST.in
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,14 +1,15 @@
-include distribute_setup.py README* CREDITS COPYING.txt CITATION
-recursive-include yt/gui/reason/html *.html *.png *.ico *.js *.gif *.css
-recursive-include yt *.py *.pyx *.pxd *.h README* *.txt LICENSE*
-recursive-include doc *.rst *.txt *.py *.ipynb *.png *.jpg *.css *.inc *.html
+include README* CREDITS COPYING.txt CITATION requirements.txt optional-requirements.txt
+include yt/visualization/mapserver/html/map_index.html
+include yt/visualization/mapserver/html/leaflet/*.css
+include yt/visualization/mapserver/html/leaflet/*.js
+include yt/visualization/mapserver/html/leaflet/images/*.png
+exclude scripts/pr_backport.py
+recursive-include yt *.py *.pyx *.pxd *.h README* *.txt LICENSE* *.cu
+recursive-include doc *.rst *.txt *.py *.ipynb *.png *.jpg *.css *.html
 recursive-include doc *.h *.c *.sh *.svgz *.pdf *.svg *.pyx
 include doc/README doc/activate doc/activate.csh doc/cheatsheet.tex
 include doc/extensions/README doc/Makefile
 prune doc/source/reference/api/generated
-prune doc/build/
+prune doc/build
 recursive-include yt/analysis_modules/halo_finding/rockstar *.py *.pyx
 prune yt/frontends/_skeleton
-prune tests
-graft yt/gui/reason/html/resources
-exclude clean.sh .hgchurn

diff -r be9fb8fc3cb54cefea1d1f9776c5ee22ea342185 -r b80648a5b8e7f41a029528512e33e72e246a09a8 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -47,7 +47,6 @@
                 # lead to broken mercurial installations.
 INST_PNG=1      # Install a local libpng?  Same things apply as with zlib.
 INST_FTYPE=1    # Install FreeType2 locally?
-INST_ENZO=0     # Clone a copy of Enzo?
 INST_SQLITE3=1  # Install a local version of SQLite3?
 INST_PYX=0      # Install PyX?  Sometimes PyX can be problematic without a
                 # working TeX installation.
@@ -109,7 +108,6 @@
     echo INST_BZLIB=${INST_BZLIB} >> ${CONFIG_FILE}
     echo INST_PNG=${INST_PNG} >> ${CONFIG_FILE}
     echo INST_FTYPE=${INST_FTYPE} >> ${CONFIG_FILE}
-    echo INST_ENZO=${INST_ENZO} >> ${CONFIG_FILE}
     echo INST_SQLITE3=${INST_SQLITE3} >> ${CONFIG_FILE}
     echo INST_PYX=${INST_PYX} >> ${CONFIG_FILE}
     echo INST_0MQ=${INST_0MQ} >> ${CONFIG_FILE}
@@ -426,10 +424,6 @@
 get_willwont ${INST_HG}
 echo "be installing Mercurial"
 
-printf "%-15s = %s so I " "INST_ENZO" "${INST_ENZO}"
-get_willwont ${INST_ENZO}
-echo "be checking out Enzo"
-
 printf "%-15s = %s so I " "INST_PYX" "${INST_PYX}"
 get_willwont ${INST_PYX}
 echo "be installing PyX"
@@ -650,7 +644,6 @@
 echo '609cc82586fabecb25f25ecb410f2938e01d21cde85dd3f8824fe55c6edde9ecf3b7609195473d3fa05a16b9b121464f5414db1a0187103b78ea6edfa71684a7  Python-3.4.3.tgz' > Python-3.4.3.tgz.sha512
 echo '276bd9c061ec9a27d478b33078a86f93164ee2da72210e12e2c9da71dcffeb64767e4460b93f257302b09328eda8655e93c4b9ae85e74472869afbeae35ca71e  blas.tar.gz' > blas.tar.gz.sha512
 echo '00ace5438cfa0c577e5f578d8a808613187eff5217c35164ffe044fbafdfec9e98f4192c02a7d67e01e5a5ccced630583ad1003c37697219b0f147343a3fdd12  bzip2-1.0.6.tar.gz' > bzip2-1.0.6.tar.gz.sha512
-echo 'a296dfcaef7e853e58eed4e24b37c4fa29cfc6ac688def048480f4bb384b9e37ca447faf96eec7b378fd764ba291713f03ac464581d62275e28eb2ec99110ab6  reason-js-20120623.zip' > reason-js-20120623.zip.sha512
 echo '609a68a3675087e0cc95268574f31e104549daa48efe15a25a33b8e269a93b4bd160f4c3e8178dca9c950ef5ca514b039d6fd1b45db6af57f25342464d0429ce  freetype-2.4.12.tar.gz' > freetype-2.4.12.tar.gz.sha512
 echo '4a83f9ae1855a7fad90133b327d426201c8ccfd2e7fbe9f39b2d61a2eee2f3ebe2ea02cf80f3d4e1ad659f8e790c173df8cc99b87d0b7ce63d34aa88cfdc7939  h5py-2.5.0.tar.gz' > h5py-2.5.0.tar.gz.sha512
 echo '4073fba510ccadaba41db0939f909613c9cb52ba8fb6c1062fc9118edc601394c75e102310be1af4077d07c9b327e6bbb1a6359939a7268dc140382d0c1e0199  hdf5-1.8.14.tar.gz' > hdf5-1.8.14.tar.gz.sha512
@@ -692,7 +685,6 @@
 get_ytproject $IPYTHON.tar.gz
 get_ytproject $H5PY.tar.gz
 get_ytproject $CYTHON.tar.gz
-get_ytproject reason-js-20120623.zip
 get_ytproject $NOSE.tar.gz
 get_ytproject $PYTHON_HGLIB.tar.gz
 get_ytproject $SYMPY.tar.gz
@@ -1047,14 +1039,6 @@
     fi
 fi
 
-if [ $INST_ENZO -eq 1 ]
-then
-    echo "Cloning a copy of Enzo."
-    cd ${DEST_DIR}/src/
-    ${HG_EXEC} clone https://bitbucket.org/enzo/enzo-stable ./enzo-hg-stable
-    cd $MY_PWD
-fi
-
 if [ -e $HOME/.matplotlib/fontList.cache ] && \
    ( grep -q python2.6 $HOME/.matplotlib/fontList.cache )
 then
@@ -1106,16 +1090,6 @@
       echo "$DEST_DIR/bin/hg"
       echo
     fi
-    if [ $INST_ENZO -eq 1 ]
-    then
-      echo "Enzo has also been checked out, but not built."
-      echo
-      echo "$DEST_DIR/src/enzo-hg-stable"
-      echo
-      echo "The value of YT_DEST can be used as an HDF5 installation location."
-      echo "Questions about Enzo should be directed to the Enzo User List."
-      echo
-    fi
     echo
     echo "For support, see the website and join the mailing list:"
     echo

diff -r be9fb8fc3cb54cefea1d1f9776c5ee22ea342185 -r b80648a5b8e7f41a029528512e33e72e246a09a8 doc/source/_static/custom.css
--- a/doc/source/_static/custom.css
+++ b/doc/source/_static/custom.css
@@ -109,3 +109,8 @@
 .table {
     width: 50%
 }
+
+
+.navbar-form.navbar-right:last-child {
+    margin-right: -20px;
+}

diff -r be9fb8fc3cb54cefea1d1f9776c5ee22ea342185 -r b80648a5b8e7f41a029528512e33e72e246a09a8 doc/source/_templates/layout.html
--- a/doc/source/_templates/layout.html
+++ b/doc/source/_templates/layout.html
@@ -1,5 +1,10 @@
 {% extends '!layout.html' %}
 
+{%- block linktags %}
+    <link href="http://yt-project.org/doc/{{ pagename }}.html" rel="canonical" />
+    {{ super() }}
+{%- endblock %}
+
 {%- block extrahead %}
     {{ super() }}
     <script type="text/javascript">

diff -r be9fb8fc3cb54cefea1d1f9776c5ee22ea342185 -r b80648a5b8e7f41a029528512e33e72e246a09a8 doc/source/analyzing/analysis_modules/absorption_spectrum.rst
--- a/doc/source/analyzing/analysis_modules/absorption_spectrum.rst
+++ b/doc/source/analyzing/analysis_modules/absorption_spectrum.rst
@@ -109,6 +109,16 @@
 
 .. note:: To write out a fits file, you must install the `astropy <http://www.astropy.org>`_ python library in order to access the astropy.io.fits module.  You can usually do this by simply running `pip install astropy` at the command line.
 
+Generating Spectra in Parallel
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The spectrum generator can be run in parallel simply by following the procedures 
+laid out in :ref:`parallel-computation` for running yt scripts in parallel.  
+Spectrum generation is parallelized using a multi-level strategy where each 
+absorption line is deposited by a different processor.  If the number of available 
+processors is greater than the number of lines, then the deposition of 
+individual lines will be divided over multiple processors.
+
 Fitting an Absorption Spectrum
 ------------------------------
 

diff -r be9fb8fc3cb54cefea1d1f9776c5ee22ea342185 -r b80648a5b8e7f41a029528512e33e72e246a09a8 doc/source/analyzing/analysis_modules/photon_simulator.rst
--- a/doc/source/analyzing/analysis_modules/photon_simulator.rst
+++ b/doc/source/analyzing/analysis_modules/photon_simulator.rst
@@ -132,7 +132,7 @@
 
 .. code:: python
 
-    apec_model = TableApecModel("atomdb_v2.0.2",
+    apec_model = TableApecModel("$SPECTRAL_DATA/spectral",
                                 0.01, 20.0, 20000,
                                 thermal_broad=False,
                                 apec_vers="2.0.2")
@@ -476,6 +476,7 @@
 .. code:: python
 
    import yt
+   import numpy as np
    from yt.utilities.physical_constants import cm_per_kpc, K_per_keV, mp
    from yt.utilities.cosmology import Cosmology
    from yt.analysis_modules.photon_simulator.api import *
@@ -548,14 +549,15 @@
                                0.01, 20.0, 20000)
    abs_model = TableAbsorbModel("tbabs_table.h5", 0.1)
 
-   thermal_model = ThermalPhotonModel(apec_model)
+   thermal_model = ThermalPhotonModel(apec_model, photons_per_chunk=40000000)
    photons = PhotonList.from_scratch(sphere, redshift, A,
                                      exp_time, thermal_model, center="c")
 
 
    events = photons.project_photons([0.0,0.0,1.0], 
                                     responses=["sim_arf.fits","sim_rmf.fits"], 
-                                    absorb_model=abs_model)
+                                    absorb_model=abs_model,
+                                    north_vector=[0.0,1.0,0.0])
 
    events.write_fits_image("img.fits", clobber=True)
 

diff -r be9fb8fc3cb54cefea1d1f9776c5ee22ea342185 -r b80648a5b8e7f41a029528512e33e72e246a09a8 doc/source/analyzing/filtering.rst
--- a/doc/source/analyzing/filtering.rst
+++ b/doc/source/analyzing/filtering.rst
@@ -23,7 +23,7 @@
 ----------------------
 
 Mesh fields can be filtered by two methods: cut region objects 
-(:class:`~yt.data_objects.selection_data_containers.YTCutRegionBase`) 
+(:class:`~yt.data_objects.selection_data_containers.YTCutRegion`) 
 and NumPy boolean masks.  Boolean masks are simpler, but they only work
 for examining datasets, whereas cut regions objects create wholly new
 data objects suitable for full analysis (data examination, image generation, 
@@ -111,7 +111,7 @@
 
 .. code-block:: python
 
-    @yt.particle_filter(requires=["particle_type], filtered_type='all')
+    @yt.particle_filter(requires=["particle_type"], filtered_type='all')
     def stars(pfilter, data):
         filter = data[(pfilter.filtered_type, "particle_type")] == 2
         return filter

diff -r be9fb8fc3cb54cefea1d1f9776c5ee22ea342185 -r b80648a5b8e7f41a029528512e33e72e246a09a8 doc/source/analyzing/generating_processed_data.rst
--- a/doc/source/analyzing/generating_processed_data.rst
+++ b/doc/source/analyzing/generating_processed_data.rst
@@ -173,7 +173,7 @@
 ---------------------------------
 
 To calculate the values along a line connecting two points in a simulation, you
-can use the object :class:`~yt.data_objects.selection_data_containers.YTRayBase`,
+can use the object :class:`~yt.data_objects.selection_data_containers.YTRay`,
 accessible as the ``ray`` property on a index.  (See :ref:`data-objects`
 for more information on this.)  To do so, you can supply two points and access
 fields within the returned object.  For instance, this code will generate a ray

diff -r be9fb8fc3cb54cefea1d1f9776c5ee22ea342185 -r b80648a5b8e7f41a029528512e33e72e246a09a8 doc/source/analyzing/objects.rst
--- a/doc/source/analyzing/objects.rst
+++ b/doc/source/analyzing/objects.rst
@@ -1,4 +1,4 @@
-.. _data-objects:
+.. _Data-objects:
 
 Data Objects
 ============
@@ -97,7 +97,7 @@
 """"""""""
 
 **Point** 
-    | Class :class:`~yt.data_objects.selection_data_containers.YTPointBase`    
+    | Class :class:`~yt.data_objects.selection_data_containers.YTPoint`    
     | Usage: ``point(coord, ds=None, field_parameters=None, data_source=None)``
     | A point defined by a single cell at specified coordinates.
 
@@ -105,7 +105,7 @@
 """"""""""
 
 **Ray (Axis-Aligned)** 
-    | Class :class:`~yt.data_objects.selection_data_containers.YTOrthoRayBase`
+    | Class :class:`~yt.data_objects.selection_data_containers.YTOrthoRay`
     | Usage: ``ortho_ray(axis, coord, ds=None, field_parameters=None, data_source=None)``
     | A line (of data cells) stretching through the full domain 
       aligned with one of the x,y,z axes.  Defined by an axis and a point
@@ -113,7 +113,7 @@
       :ref:`note about ray data value ordering <ray-data-ordering>`.
 
 **Ray (Arbitrarily-Aligned)** 
-    | Class :class:`~yt.data_objects.selection_data_containers.YTRayBase`
+    | Class :class:`~yt.data_objects.selection_data_containers.YTRay`
     | Usage: ``ray(start_coord, end_coord, ds=None, field_parameters=None, data_source=None)``
     | A line (of data cells) defined by arbitrary start and end coordinates. 
       Please see this 
@@ -123,13 +123,13 @@
 """"""""""
 
 **Slice (Axis-Aligned)** 
-    | Class :class:`~yt.data_objects.selection_data_containers.YTSliceBase`
+    | Class :class:`~yt.data_objects.selection_data_containers.YTSlice`
     | Usage: ``slice(axis, coord, center=None, ds=None, field_parameters=None, data_source=None)``
     | A plane normal to one of the axes and intersecting a particular 
       coordinate.
 
 **Slice (Arbitrarily-Aligned)** 
-    | Class :class:`~yt.data_objects.selection_data_containers.YTCuttingPlaneBase`
+    | Class :class:`~yt.data_objects.selection_data_containers.YTCuttingPlane`
     | Usage: ``cutting(normal, coord, north_vector=None, ds=None, field_parameters=None, data_source=None)``
     | A plane normal to a specified vector and intersecting a particular 
       coordinate.
@@ -145,7 +145,7 @@
       ``ds.region(ds.domain_center, ds.domain_left_edge, ds.domain_right_edge)``.
 
 **Box Region** 
-    | Class :class:`~yt.data_objects.selection_data_containers.YTRegionBase`
+    | Class :class:`~yt.data_objects.selection_data_containers.YTRegion`
     | Usage: ``region(center, left_edge, right_edge, fields=None, ds=None, field_parameters=None, data_source=None)``
     | Alternatively: ``box(left_edge, right_edge, fields=None, ds=None, field_parameters=None, data_source=None)``
     | A box-like region aligned with the grid axis orientation.  It is 
@@ -156,14 +156,14 @@
       is assumed to be the midpoint between the left and right edges.
 
 **Disk/Cylinder** 
-    | Class: :class:`~yt.data_objects.selection_data_containers.YTDiskBase`
+    | Class: :class:`~yt.data_objects.selection_data_containers.YTDisk`
     | Usage: ``disk(center, normal, radius, height, fields=None, ds=None, field_parameters=None, data_source=None)``
     | A cylinder defined by a point at the center of one of the circular bases,
       a normal vector to it defining the orientation of the length of the
       cylinder, and radius and height values for the cylinder's dimensions.
 
 **Ellipsoid** 
-    | Class :class:`~yt.data_objects.selection_data_containers.YTEllipsoidBase`
+    | Class :class:`~yt.data_objects.selection_data_containers.YTEllipsoid`
     | Usage: ``ellipsoid(center, semi_major_axis_length, semi_medium_axis_length, semi_minor_axis_length, semi_major_vector, tilt, fields=None, ds=None, field_parameters=None, data_source=None)``
     | An ellipsoid with axis magnitudes set by semi_major_axis_length, 
      semi_medium_axis_length, and semi_minor_axis_length.  semi_major_vector 
@@ -171,7 +171,7 @@
      of the semi-medium and semi_minor axes.
 
 **Sphere** 
-    | Class :class:`~yt.data_objects.selection_data_containers.YTSphereBase`
+    | Class :class:`~yt.data_objects.selection_data_containers.YTSphere`
     | Usage: ``sphere(center, radius, ds=None, field_parameters=None, data_source=None)``
     | A sphere defined by a central coordinate and a radius.
 
@@ -194,7 +194,7 @@
     | See :ref:`boolean_data_objects`.
 
 **Filter** 
-    | Class :class:`~yt.data_objects.selection_data_containers.YTCutRegionBase`
+    | Class :class:`~yt.data_objects.selection_data_containers.YTCutRegion`
     | Usage: ``cut_region(base_object, conditionals, ds=None, field_parameters=None)``
     | A ``cut_region`` is a filter which can be applied to any other data 
       object.  The filter is defined by the conditionals present, which 
@@ -203,7 +203,7 @@
       For more detailed information and examples, see :ref:`cut-regions`.
 
 **Collection of Data Objects** 
-    | Class :class:`~yt.data_objects.selection_data_containers.YTDataCollectionBase`
+    | Class :class:`~yt.data_objects.selection_data_containers.YTDataCollection`
     | Usage: ``data_collection(center, obj_list, ds=None, field_parameters=None)``
     | A ``data_collection`` is a list of data objects that can be 
       sampled and processed as a whole in a single data object.
@@ -214,13 +214,13 @@
 ^^^^^^^^^^^^^^^^^^^^
 
 **Fixed-Resolution Region** 
-    | Class :class:`~yt.data_objects.construction_data_containers.YTCoveringGridBase`
+    | Class :class:`~yt.data_objects.construction_data_containers.YTCoveringGrid`
     | Usage: ``covering_grid(level, left_edge, dimensions, fields=None, ds=None, num_ghost_zones=0, use_pbar=True, field_parameters=None)``
     | A 3D region with all data extracted to a single, specified resolution.
       See :ref:`examining-grid-data-in-a-fixed-resolution-array`.
 
 **Fixed-Resolution Region with Smoothing** 
-    | Class :class:`~yt.data_objects.construction_data_containers.YTSmoothedCoveringGridBase`
+    | Class :class:`~yt.data_objects.construction_data_containers.YTSmoothedCoveringGrid`
     | Usage: ``smoothed_covering_grid(level, left_edge, dimensions, fields=None, ds=None, num_ghost_zones=0, use_pbar=True, field_parameters=None)``
     | A 3D region with all data extracted and interpolated to a single, 
       specified resolution.  Identical to covering_grid, except that it 
@@ -228,7 +228,7 @@
       :ref:`examining-grid-data-in-a-fixed-resolution-array`.
 
 **Fixed-Resolution Region for Particle Deposition** 
-    | Class :class:`~yt.data_objects.construction_data_containers.YTArbitraryGridBase`
+    | Class :class:`~yt.data_objects.construction_data_containers.YTArbitraryGrid`
     | Usage: ``arbitrary_grid(left_edge, right_edge, dimensions, ds=None, field_parameters=None)``
     | When particles are deposited on to mesh fields, they use the existing
       mesh structure, but this may have too much or too little resolution
@@ -238,7 +238,7 @@
       information.
 
 **Projection** 
-    | Class :class:`~yt.data_objects.construction_data_containers.YTQuadTreeProjBase`
+    | Class :class:`~yt.data_objects.construction_data_containers.YTQuadTreeProj`
     | Usage: ``proj(field, axis, weight_field=None, center=None, ds=None, data_source=None, method="integrate", field_parameters=None)``
     | A 2D projection of a 3D volume along one of the axis directions.  
       By default, this is a line integral through the entire simulation volume 
@@ -248,14 +248,14 @@
       of the projection outcome.  See :ref:`projection-types` for more information.
 
 **Streamline** 
-    | Class :class:`~yt.data_objects.construction_data_containers.YTStreamlineBase`
+    | Class :class:`~yt.data_objects.construction_data_containers.YTStreamline`
     | Usage: ``streamline(coord_list, length, fields=None, ds=None, field_parameters=None)``
     | A ``streamline`` can be traced out by identifying a starting coordinate (or 
       list of coordinates) and allowing it to trace a vector field, like gas
       velocity.  See :ref:`streamlines` for more information.
 
 **Surface** 
-    | Class :class:`~yt.data_objects.construction_data_containers.YTSurfaceBase`
+    | Class :class:`~yt.data_objects.construction_data_containers.YTSurface`
     | Usage: ``surface(data_source, field, field_value)``
     | The surface defined by all an isocontour in any mesh field.  An existing 
       data object must be provided as the source, as well as a mesh field
@@ -358,7 +358,7 @@
 holdover from the time when yt was used exclusively for data that came in
 regularly structured grid patches, and does not necessarily work as well for
 data that is composed of discrete objects like particles.  To augment this, the
-:class:`~yt.data_objects.construction_data_containers.YTArbitraryGridBase` object 
+:class:`~yt.data_objects.construction_data_containers.YTArbitraryGrid` object 
 was created, which enables construction of meshes (onto which particles can be
 deposited or smoothed) in arbitrary regions.  This eliminates any assumptions
 on yt's part about how the data is organized, and will allow for more
@@ -444,7 +444,7 @@
 set of level sets.  The second (``connected_sets``) will be a dict of dicts.
 The key for the first (outer) dict is the level of the contour, corresponding
 to ``contour_values``.  The inner dict returned is keyed by the contour ID.  It
-contains :class:`~yt.data_objects.selection_data_containers.YTCutRegionBase`
+contains :class:`~yt.data_objects.selection_data_containers.YTCutRegion`
 objects.  These can be queried just as any other data object.  The clump finder 
 (:ref:`clump_finding`) differs from the above method in that the contour 
 identification is performed recursively within each individual structure, and 

diff -r be9fb8fc3cb54cefea1d1f9776c5ee22ea342185 -r b80648a5b8e7f41a029528512e33e72e246a09a8 doc/source/analyzing/parallel_computation.rst
--- a/doc/source/analyzing/parallel_computation.rst
+++ b/doc/source/analyzing/parallel_computation.rst
@@ -42,7 +42,18 @@
 
     $ pip install mpi4py
 
-Once that has been installed, you're all done!  You just need to launch your
+If you have an Anaconda installation of yt and there is no MPI library on the
+system you are using try:
+
+.. code-block:: bash
+
+    $ conda install mpi4py
+
+This will install `MPICH2 <https://www.mpich.org/>`_ and will interefere with
+other MPI libraries that are already installed. Therefore, it is preferable to
+use the ``pip`` installation method.
+
+Once mpi4py has been installed, you're all done!  You just need to launch your
 scripts with ``mpirun`` (or equivalent) and signal to yt that you want to
 run them in parallel by invoking the ``yt.enable_parallelism()`` function in
 your script.  In general, that's all it takes to get a speed benefit on a

diff -r be9fb8fc3cb54cefea1d1f9776c5ee22ea342185 -r b80648a5b8e7f41a029528512e33e72e246a09a8 doc/source/analyzing/units/1)_Symbolic_Units.ipynb
--- a/doc/source/analyzing/units/1)_Symbolic_Units.ipynb
+++ b/doc/source/analyzing/units/1)_Symbolic_Units.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:5d881061b9e82bd9df5d3598983c8ddc5fbec35e3bf7ae4524430dc558e27489"
+  "signature": "sha256:0dbaef644354e4d0191367f8f90e6dfd0d3d527925ef0331e1ef381c9099a8cd"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -516,9 +516,147 @@
      "language": "python",
      "metadata": {},
      "outputs": []
+    },
+    {
+     "cell_type": "heading",
+     "level": 3,
+     "metadata": {},
+     "source": [
+      "Saving and Loading `YTArray`s to/from disk"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "`YTArray`s can be written to disk, to be loaded again to be used in yt or in a different context later. There are two formats that can be written to/read from: HDF5 and ASCII.  \n",
+      "\n",
+      "To write to HDF5, use `write_hdf5`:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "my_dens = YTArray(np.random.random(10), 'Msun/kpc**3')\n",
+      "my_temp = YTArray(np.random.random(10), 'K')\n",
+      "my_dens.write_hdf5(\"my_data.h5\", dataset_name=\"density\")\n",
+      "my_temp.write_hdf5(\"my_data.h5\", dataset_name=\"temperature\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Where we used the `dataset_name` keyword argument to create a separate dataset for each array in the same file.\n",
+      "\n",
+      "We can use the `from_hdf5` classmethod to read the data back in:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "read_dens = YTArray.from_hdf5(\"my_data.h5\", dataset_name=\"density\")\n",
+      "print read_dens\n",
+      "print my_dens"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "We can use the `info` keyword argument to `write_hdf5` to write some additional data to the file, which will be stored as attributes of the dataset:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "my_vels = YTArray(np.random.normal(10), 'km/s')\n",
+      "info = {\"source\":\"galaxy cluster\",\"user\":\"jzuhone\"}\n",
+      "my_vels.write_hdf5(\"my_data.h5\", dataset_name=\"velocity\", info=info)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "To write one or more `YTArray`s to an ASCII text file, use `yt.savetxt`, which works a lot like NumPy's `savetxt`, except with units:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "import yt\n",
+      "a = YTArray(np.random.random(size=10), \"cm\")\n",
+      "b = YTArray(np.random.random(size=10), \"g\")\n",
+      "c = YTArray(np.random.random(size=10), \"s\")\n",
+      "yt.savetxt(\"my_data.dat\", [a,b,c], header='My cool data', footer='Data is over', delimiter=\"\\t\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "The file we wrote can then be easily used in other contexts, such as plotting in Gnuplot, or loading into a spreadsheet, or just for causal examination. We can quickly check it here:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "%%bash \n",
+      "more my_data.dat"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "You can see that the header comes first, and then right before the data we have a subheader marking the units of each column. The footer comes after the data. "
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "`yt.loadtxt` can be used to read the same data with units back in, or read data that has been generated from some other source. Just make sure it's in the format above. `loadtxt` can also selectively read from particular columns in the file with the `usecols` keyword argument:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "bb, cc = yt.loadtxt(\"my_data.dat\", usecols=(1,2), delimiter=\"\\t\")\n",
+      "print bb\n",
+      "print b\n",
+      "print\n",
+      "print cc\n",
+      "print c"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
     }
    ],
    "metadata": {}
   }
  ]
-}
+}
\ No newline at end of file

diff -r be9fb8fc3cb54cefea1d1f9776c5ee22ea342185 -r b80648a5b8e7f41a029528512e33e72e246a09a8 doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
--- a/doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
+++ b/doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:4d19ee42177c60fb4b39550b5acd7a0f7e97f59f5c2da3565ff42cdd580454b0"
+  "signature": "sha256:6a06d5720eb6316ac0d322ef0898ec20f33d65ea3eeeacef35ae1d869af12607"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -352,7 +352,7 @@
      "level": 3,
      "metadata": {},
      "source": [
-      "Round-Trip Conversions to and from AstroPy's Units System"
+      "Round-Trip Conversions to and from Other Unit Systems"
      ]
     },
     {
@@ -503,6 +503,58 @@
      "language": "python",
      "metadata": {},
      "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "We can also do the same thing with unitful quantities from the [Pint package](http://pint.readthedocs.org), using essentially the same procedure:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "from pint import UnitRegistry\n",
+      "ureg = UnitRegistry()\n",
+      "v = 1000.*ureg.km/ureg.s\n",
+      "w = yt.YTQuantity.from_pint(v)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print v, type(v)\n",
+      "print w, type(w)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "ptemp = temp.to_pint()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print temp, type(temp)\n",
+      "print ptemp, type(ptemp)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
     }
    ],
    "metadata": {}

diff -r be9fb8fc3cb54cefea1d1f9776c5ee22ea342185 -r b80648a5b8e7f41a029528512e33e72e246a09a8 doc/source/cookbook/tests/test_cookbook.py
--- a/doc/source/cookbook/tests/test_cookbook.py
+++ b/doc/source/cookbook/tests/test_cookbook.py
@@ -34,7 +34,7 @@
     try:
         subprocess.check_call(cmd)
         result = True
-    except subprocess.CalledProcessError, e:
+    except subprocess.CalledProcessError as e:
         print(("Stdout output:\n", e.output))
         result = False
     assert result

diff -r be9fb8fc3cb54cefea1d1f9776c5ee22ea342185 -r b80648a5b8e7f41a029528512e33e72e246a09a8 doc/source/developing/index.rst
--- a/doc/source/developing/index.rst
+++ b/doc/source/developing/index.rst
@@ -21,6 +21,7 @@
    building_the_docs
    testing
    debugdrive
+   releasing
    creating_datatypes
    creating_derived_fields
    creating_derived_quantities

diff -r be9fb8fc3cb54cefea1d1f9776c5ee22ea342185 -r b80648a5b8e7f41a029528512e33e72e246a09a8 doc/source/developing/releasing.rst
--- /dev/null
+++ b/doc/source/developing/releasing.rst
@@ -0,0 +1,208 @@
+How to Do a Release
+-------------------
+
+Periodically, the yt development community issues new releases. Since yt follows
+`semantic versioning <http://semver.org/>`_, the type of release can be read off
+from the version number used. Version numbers should follow the scheme
+``MAJOR.MINOR.PATCH``. There are three kinds of possible releases:
+
+* Bugfix releases
+
+  These releases are regularly scheduled and will optimally happen approximately
+  once a month. These releases should contain only fixes for bugs discovered in
+  earlier releases and should not contain new features or API changes. Bugfix
+  releases should increment the ``PATCH`` version number. Bugfix releases should
+  *not* be generated by merging from the ``yt`` branch, instead bugfix pull
+  requests should be manually backported using the PR backport script, described
+  below. Version ``3.2.2`` is a bugfix release.
+
+* Minor releases
+
+  These releases happen when new features are deemed ready to be merged into the
+  ``stable`` branch and should not happen on a regular schedule. Minor releases
+  can also include fixes for bugs if the fix is determined to be too invasive
+  for a bugfix release. Minor releases should *not* inlucde
+  backwards-incompatible changes and should not change APIs.  If an API change
+  is deemed to be necessary, the old API should continue to function but might
+  trigger deprecation warnings. Minor releases should happen by merging the
+  ``yt`` branch into the ``stable`` branch. Minor releases should increment the
+  ``MINOR`` version number and reset the ``PATCH`` version number to zero.
+  Version ``3.3.0`` is a minor release.
+
+* Major releases
+
+  These releases happen when the development community decides to make major
+  backwards-incompatible changes. In principle a major version release could
+  include arbitrary changes to the library. Major version releases should only
+  happen after extensive discussion and vetting among the developer and user
+  community. Like minor releases, a major release should happen by merging the
+  ``yt`` branch into the ``stable`` branch. Major releases should increment the
+  ``MAJOR`` version number and reset the ``MINOR`` and ``PATCH`` version numbers
+  to zero. If it ever happens, version ``4.0.0`` will be a major release.
+
+The job of doing a release differs depending on the kind of release. Below, we
+describe the necessary steps for each kind of release in detail.
+
+Doing a Bugfix Release
+~~~~~~~~~~~~~~~~~~~~~~
+
+As described above, bugfix releases are regularly scheduled updates for minor
+releases to ensure fixes for bugs make their way out to users in a timely
+manner. Since bugfix releases should not include new features, we do not issue
+bugfix releases by simply merging from the development ``yt`` branch into the
+``stable`` branch.  Instead, we make use of the ``pr_backport.py`` script to
+manually cherry-pick bugfixes from the from ``yt`` branch onto the ``stable``
+branch.
+
+The backport script issues interactive prompts to backport individual pull
+requests to the ``stable`` branch in a temporary clone of the main yt mercurial
+repository on bitbucket. The script is written this way to to avoid editing
+history in a clone of the repository that a developer uses for day-to-day work
+and to avoid mixing work-in-progress changes with changes that have made their
+way to the "canonical" yt repository on bitbucket.
+
+Rather than automatically manipulating the temporary repository by scripting
+mercurial commands using ``python-hglib``, the script must be "operated" by a
+human who is ready to think carefully about what the script is telling them
+to do. Most operations will merely require copy/pasting a suggested mercurial
+command. However, some changes will require manual backporting.
+
+To run the backport script, first open two terminal windows. The first window
+will be used to run the backport script. The second terminal will be used to
+manipulate a temporary clone of the yt mercurial repository. In the first
+window, navigate to the ``scripts`` directory at the root of the yt repository
+and run the backport script,
+
+.. code-block:: bash
+
+   $ cd $YT_HG/scripts
+   $ python pr_backport.py
+
+You will then need to wait for about a minute (depending on the speed of your
+internet connection and bitbucket's servers) while the script makes a clone of
+the main yt repository and then gathers information about pull requests that
+have been merged since the last tagged release. Once this step finishes, you
+will be prompted to navigate to the temporary folder in a new separate terminal
+session. Do so, and then hit the enter key in the original terminal session.
+
+For each pull request in the set of pull requests that were merged since the
+last tagged release that were pointed at the "main" line of development
+(e.g. not the ``experimental`` bookmark), you will be prompted by the script
+with the PR number, title, description, and a suggested mercurial
+command to use to backport the pull request. If the pull request consists of a
+single changeset, you will be prompted to use ``hg graft``. If it contains more
+than one changeset, you will be prompted to use ``hg rebase``. Note that
+``rebase`` is an optional extension for mercurial that is not turned on by
+default. To enable it, add a section like the following in your ``.hgrc`` file:
+
+.. code-block:: none
+
+   [extensions]
+   rebase=
+
+Since ``rebase`` is bundled with core mercurial, you do not need to specify a
+path to the rebase extension, just say ``rebase=`` and mercurial will find the
+version of ``rebase`` bundled with mercurial. Note also that mercurial does not
+automatically update to the tip of the rebased head after executing ``hg
+rebase`` so you will need to manually issue ``hg update stable`` to move your
+working directory to the new head of the stable branch. The backport script
+should prompt you with a suggestion to update as well.
+
+If the pull request contains merge commits, you must take care to *not* backport
+commits that merge with the main line of development on the ``yt`` branch. Doing
+so may bring unrelated changes, including new features, into a bugfix
+release. If the pull request you'd like to backport contains merge commits, the
+backport script should warn you to be extra careful.
+
+Once you've finished backporting, the script will let you know that you are done
+and warn you to push your work. The temporary repository you have been working
+with will be deleted as soon as the script exits, so take care to push your work
+on the ``stable`` branch to your fork on bitbucket. Once you've pushed to your
+fork, you will be able to issue a pull request containing the backported fixes
+just like any other yt pull request.
+
+Doing a Minor or Major Release
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This is much simpler than a bugfix release.  All that needs to happen is the
+``yt`` branch must get merged into the ``stable`` branch, and any conflicts that
+happen must be resolved, almost certainly in favor of the yt branch. This can
+happen either using a merge tool like ``vimdiff`` and ``kdiff3`` or by telling
+mercurial to write merge markers. If you prefer merge markers, the following
+configuration options should be turned on in your ``hgrc`` to get more detail
+during the merge:
+
+.. code-block:: none
+
+   [ui]
+   merge = internal:merge3
+   mergemarkers = detailed
+
+The first option tells mercurial to write merge markers that show the state of
+the conflicted region of the code on both sides of the merge as well as the
+"base" most recent common ancestor changeset. The second option tells mercurial
+to add extra information about the code near the merge markers.
+
+
+Incrementing Version Numbers and Tagging a Release
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Before creating the tag for the release, you must increment the version numbers
+that are hard-coded in a few files in the yt source so that version metadata
+for the code is generated correctly. This includes things like ``yt.__version__``
+and the version that gets read by the Python Package Index (PyPI) infrastructure.
+
+The paths relative to the root of the repository for the three files that need
+to be edited are:
+
+* ``doc/source/conf.py``
+
+  The ``version`` and ``release`` variables need to be updated.
+
+* ``setup.py``
+
+  The ``VERSION`` variable needs to be updated
+
+* ``yt/__init__.py``
+
+  The ``__version__`` variable must be updated.
+
+Once these files have been updated, commit these updates. This is the commit we
+will tag for the release.
+
+To actually create the tag, issue the following command:
+
+.. code-block:: bash
+
+   hg tag <tag-name>
+
+Where ``<tag-name>`` follows the project's naming scheme for tags
+(e.g. ``yt-3.2.1``). Commit the tag, and you should be ready to upload the
+release to pypi.
+
+If you are doing a minor or major version number release, you will also need to
+update back to the development branch and update the development version numbers
+in the same files.
+
+
+Uploading to PyPI
+~~~~~~~~~~~~~~~~~
+
+To actually upload the release to the Python Package Index, you just need to
+issue the following command:
+
+.. code-block:: bash
+
+   python setup.py sdist upload -r https://pypi.python.org/pypi
+
+You will be prompted for your PyPI credentials and then the package should
+upload. Note that for this to complete successfully, you will need an account on
+PyPI and that account will need to be registered as an "owner" of the yt
+package. Right now there are three owners: Matt Turk, Britton Smith, and Nathan
+Goldbaum.
+
+After the release is uploaded to PyPI, you should send out an announcement
+e-mail to the yt mailing lists as well as other possibly interested mailing
+lists for all but bugfix releases. In addition, you should contact John ZuHone
+about uploading binary wheels to PyPI for Windows and OS X users and contact
+Nathan Goldbaum about getting the Anaconda packages updated.

diff -r be9fb8fc3cb54cefea1d1f9776c5ee22ea342185 -r b80648a5b8e7f41a029528512e33e72e246a09a8 doc/source/developing/testing.rst
--- a/doc/source/developing/testing.rst
+++ b/doc/source/developing/testing.rst
@@ -84,6 +84,9 @@
 * :func:`~yt.testing.assert_equal` can operate on arrays.
 * :func:`~yt.testing.assert_almost_equal` can operate on arrays and accepts a
   relative allowable difference.
+* :func:`~yt.testing.assert_allclose_units` raises an error if two arrays are
+  not equal up to a desired absolute or relative tolerance. This wraps numpy's
+  assert_allclose to correctly verify unit consistency as well.
 * :func:`~yt.testing.amrspace` provides the ability to create AMR grid
   structures.
 * :func:`~yt.testing.expand_keywords` provides the ability to iterate over
@@ -99,9 +102,10 @@
 #. Inside that directory, create a new python file prefixed with ``test_`` and
    including the name of the functionality.
 #. Inside that file, create one or more routines prefixed with ``test_`` that
-   accept no arguments.  These should ``yield`` a set of values of the form
-   ``function``, ``arguments``.  For example ``yield assert_equal, 1.0, 1.0``
-   would evaluate that 1.0 equaled 1.0.
+   accept no arguments.  These should ``yield`` a tuple of the form
+   ``function``, ``argument_one``, ``argument_two``, etc.  For example
+   ``yield assert_equal, 1.0, 1.0`` would be captured by nose as a test that
+   asserts that 1.0 is equal to 1.0.
 #. Use ``fake_random_ds`` to test on datasets, and be sure to test for
    several combinations of ``nproc``, so that domain decomposition can be
    tested as well.
@@ -113,6 +117,53 @@
 ``yt/data_objects/tests/test_covering_grid.py``, which covers a great deal of
 functionality.
 
+Debugging failing tests
+^^^^^^^^^^^^^^^^^^^^^^^
+
+When writing new tests, often one exposes bugs or writes a test incorrectly,
+causing an exception to be raised or a failed test. To help debug issues like
+this, ``nose`` can drop into a debugger whenever a test fails or raises an
+exception. This can be accomplished by passing ``--pdb`` and ``--pdb-failures``
+to the ``nosetests`` executable. These options will drop into the pdb debugger
+whenever an error is raised or a failure happens, respectively. Inside the
+debugger you can interactively print out variables and go up and down the call
+stack to determine the context for your failure or error.
+
+.. code-block:: bash
+
+    nosetests --pdb --pdb-failures
+
+In addition, one can debug more crudely using print statements. To do this,
+you can add print statements to the code as normal. However, the test runner
+will capture all print output by default. To ensure that output gets printed
+to your terminal while the tests are running, pass ``-s`` to the ``nosetests``
+executable.
+
+Lastly, to quickly debug a specific failing test, it is best to only run that
+one test during your testing session. This can be accomplished by explicitly
+passing the name of the test function or class to ``nosetests``, as in the
+following example:
+
+.. code-block:: bash
+
+    $ nosetests yt.visualization.tests.test_plotwindow:TestSetWidth
+
+This nosetests invocation will only run the tests defined by the
+``TestSetWidth`` class.
+
+Finally, to determine which test is failing while the tests are running, it helps
+to run the tests in "verbose" mode. This can be done by passing the ``-v`` option
+to the ``nosetests`` executable.
+
+All of the above ``nosetests`` options can be combined. So, for example to run
+the ``TestSetWidth`` tests with verbose output, letting the output of print
+statements come out on the terminal prompt, and enabling pdb debugging on errors
+or test failures, one would do:
+
+.. code-block:: bash
+
+    $ nosetests --pdb --pdb-failures -v -s yt.visualization.tests.test_plotwindow:TestSetWidth
+
 .. _answer_testing:
 
 Answer Testing
@@ -122,8 +173,8 @@
 ^^^^^^^^^^^^^^^^^^^^^^^
 
 Answer tests test **actual data**, and many operations on that data, to make
-sure that answers don't drift over time.  This is how we will be testing
-frontends, as opposed to operations, in yt.
+sure that answers don't drift over time.  This is how we test frontends, as
+opposed to operations, in yt.
 
 .. _run_answer_testing:
 
@@ -133,20 +184,104 @@
 The very first step is to make a directory and copy over the data against which
 you want to test.  Currently, we test:
 
+NMSU ART
+~~~~~~~~
+
+* ``D9p_500/10MpcBox_HartGal_csf_a0.500.d``
+
+ARTIO
+~~~~~
+
+* ``sizmbhloz-clref04SNth-rs9_a0.9011/sizmbhloz-clref04SNth-rs9_a0.9011.art``
+
+Athena
+~~~~~~
+
+* ``ShockCloud/id0/Cloud.0050.vtk``
+* ``MHDBlast/id0/Blast.0100.vtk``
+* ``RamPressureStripping/id0/rps.0062.vtk``
+* ``MHDSloshing/virgo_low_res.0054.vtk``
+
+Boxlib
+~~~~~~
+
+* ``RadAdvect/plt00000``
+* ``RadTube/plt00500``
+* ``StarParticles/plrd01000``
+
+Chombo
+~~~~~~
+
+* ``TurbBoxLowRes/data.0005.3d.hdf5``
+* ``GaussianCloud/data.0077.3d.hdf5``
+* ``IsothermalSphere/data.0000.3d.hdf5``
+* ``ZeldovichPancake/plt32.2d.hdf5``
+* ``KelvinHelmholtz/data.0004.hdf5``
+
+Enzo
+~~~~
+
 * ``DD0010/moving7_0010`` (available in ``tests/`` in the yt distribution)
 * ``IsolatedGalaxy/galaxy0030/galaxy0030``
+* ``enzo_tiny_cosmology/DD0046/DD0046``
+* ``enzo_cosmology_pluts/DD0046/DD0046``
+
+FITS
+~~~~
+
+* ``radio_fits/grs-50-cube.fits``
+* ``UnigridData/velocity_field_20.fits``
+
+FLASH
+~~~~~
+
 * ``WindTunnel/windtunnel_4lev_hdf5_plt_cnt_0030``
 * ``GasSloshingLowRes/sloshing_low_res_hdf5_plt_cnt_0300``
-* ``TurbBoxLowRes/data.0005.3d.hdf5``
-* ``GaussianCloud/data.0077.3d.hdf5``
-* ``RadAdvect/plt00000``
-* ``RadTube/plt00500``
+
+Gadget
+~~~~~~
+
+* ``IsothermalCollapse/snap_505``
+* ``IsothermalCollapse/snap_505.hdf5``
+* ``GadgetDiskGalaxy/snapshot_200.hdf5``
+
+Halo Catalog
+~~~~~~~~~~~~
+
+* ``owls_fof_halos/groups_001/group_001.0.hdf5``
+* ``owls_fof_halos/groups_008/group_008.0.hdf5``
+* ``gadget_fof_halos/groups_005/fof_subhalo_tab_005.0.hdf5``
+* ``gadget_fof_halos/groups_042/fof_subhalo_tab_042.0.hdf5``
+* ``rockstar_halos/halos_0.0.bin``
+
+MOAB
+~~~~
+
+* ``c5/c5.h5m``
+
+
+RAMSES
+~~~~~~
+
+* ``output_00080/info_00080.txt``
+
+Tipsy
+~~~~~
+
+* ``halo1e11_run1.00400/halo1e11_run1.00400``
+* ``agora_1e11.00400/agora_1e11.00400``
+* ``TipsyGalaxy/galaxy.00300``
+
+OWLS
+~~~~
+
+* ``snapshot_033/snap_033.0.hdf5``
 
 These datasets are available at http://yt-project.org/data/.
 
 Next, modify the file ``~/.yt/config`` to include a section ``[yt]``
 with the parameter ``test_data_dir``.  Set this to point to the
-directory with the test data you want to compare.  Here is an example
+directory with the test data you want to test with.  Here is an example
 config file:
 
 .. code-block:: none
@@ -154,47 +289,46 @@
    [yt]
    test_data_dir = /Users/tomservo/src/yt-data
 
-More data will be added over time.  To run the tests, you can import the yt
-module and invoke ``yt.run_nose()`` with a new keyword argument:
+More data will be added over time.  To run the answer tests, you must first
+generate a set of test answers locally on a "known good" revision, then update
+to the revision you want to test, and run the tests again using the locally
+stored answers.
 
-.. code-block:: python
-
-   import yt
-   yt.run_nose(run_answer_tests=True)
-
-If you have installed yt using ``python setup.py develop`` you can also
-optionally invoke nose using the ``nosetests`` command line interface:
+Let's focus on running the answer tests for a single frontend. It's possible to
+run the answer tests for **all** the frontends, but due to the large number of
+test datasets we currently use this is not normally done except on the yt
+project's contiguous integration server.
 
 .. code-block:: bash
 
    $ cd $YT_HG
-   $ nosetests --with-answer-testing
+   $ nosetests --with-answer-testing --local --local-dir $HOME/Documents/test --answer-store --answer-name=local-tipsy frontends.tipsy
 
-In either case, the current gold standard results will be downloaded from the
-rackspace cloud and compared to what is generated locally.  The results from a
-nose testing session are pretty straightforward to understand, the results for
-each test are printed directly to STDOUT. If a test passes, nose prints a
-period, F if a test fails, and E if the test encounters an exception or errors
-out for some reason.  If you want to also run tests for the 'big' datasets,
-then you can use the ``answer_big_data`` keyword argument:
-
-.. code-block:: python
-
-   import yt
-   yt.run_nose(run_answer_tests=True, answer_big_data=True)
-
-or, in the base directory of the yt mercurial repository:
+This command will create a set of local answers from the tipsy frontend tests
+and store them in ``$HOME/Documents/test`` (this can but does not have to be the
+same directory as the ``test_data_dir`` configuration variable defined in your
+``.yt/config`` file) in a file named ``local-tipsy``. To run the tipsy
+frontend's answer tests using a different yt changeset, update to that
+changeset, recompile if necessary, and run the tests using the following
+command:
 
 .. code-block:: bash
 
-   $ nosetests --with-answer-testing --answer-big-data
+   $ nosetests --with-answer-testing --local --local-dir $HOME/Documents/test --answer-name=local-tipsy frontends.tipsy
 
-It's also possible to only run the answer tests for one frontend.  For example,
-to run only the enzo answers tests, one can do,
+The results from a nose testing session are pretty straightforward to
+understand, the results for each test are printed directly to STDOUT.  If a test
+passes, nose prints a period, F if a test fails, and E if the test encounters an
+exception or errors out for some reason.  Explicit descriptions for each test
+are also printed if you pass ``-v`` to the ``nosetests`` executable.  If you
+want to also run tests for the 'big' datasets, then you will need to pass
+``--answer-big-data`` to ``nosetests``.  For example, to run the tests for the
+OWLS frontend, do the following:
 
 .. code-block:: bash
 
-   $ nosetests --with-answer-testing yt.frontends.enzo
+   $ nosetests --with-answer-testing --local --local-dir $HOME/Documents/test --answer-store --answer-big-data frontends.owls
+
 
 How to Write Answer Tests
 ^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -260,38 +394,21 @@
   directory.
 
 * Create a new routine that operates similarly to the routines you can see
-  in Enzo's outputs.
+  in Enzo's output tests.
 
   * This routine should test a number of different fields and data objects.
 
   * The test routine itself should be decorated with
-    ``@requires_ds(file_name)``  This decorate can accept the argument
-    ``big_data`` for if this data is too big to run all the time.
+    ``@requires_ds(path_to_test_dataset)``. This decorator can accept the
+    argument ``big_data=True`` if the test is expensive.
 
-  * There are ``small_patch_amr`` and ``big_patch_amr`` routines that
-    you can yield from to execute a bunch of standard tests.  This is where
-    you should start, and then yield additional tests that stress the
-    outputs in whatever ways are necessary to ensure functionality.
+  * There are ``small_patch_amr`` and ``big_patch_amr`` routines that you can
+    yield from to execute a bunch of standard tests. In addition we have created
+    ``sph_answer`` which is more suited for particle SPH datasets. This is where
+    you should start, and then yield additional tests that stress the outputs in
+    whatever ways are necessary to ensure functionality.
 
   * **All tests should be yielded!**
 
 If you are adding to a frontend that has a few tests already, skip the first
 two steps.
-
-How to Upload Answers
-^^^^^^^^^^^^^^^^^^^^^
-
-To upload answers you can execute this command:
-
-.. code-block:: bash
-
-   $ nosetests --with-answer-testing frontends/enzo/ --answer-store --answer-name=whatever
-
-The current version of the gold standard can be found in the variable
-``_latest`` inside ``yt/utilities/answer_testing/framework.py``  As of
-the time of this writing, it is ``gold007``  Note that the name of the
-suite of results is now disconnected from the dataset's name, so you
-can upload multiple outputs with the same name and not collide.
-
-To upload answers, you **must** have the package boto installed, and you
-**must** have an Amazon key provided by Matt.  Contact Matt for these keys.

diff -r be9fb8fc3cb54cefea1d1f9776c5ee22ea342185 -r b80648a5b8e7f41a029528512e33e72e246a09a8 doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -771,7 +771,7 @@
 
 .. code-block:: python
 
-   from yt.frontends.sph.definitions import gadget_field_specs
+   from yt.frontends.gadget.definitions import gadget_field_specs
    gadget_field_specs["my_field_def"] = my_field_def
 
 Please also feel free to issue a pull request with any new field
@@ -871,7 +871,7 @@
 ----------------
 
 See :ref:`loading-numpy-array` and
-:func:`~yt.frontends.sph.data_structures.load_amr_grids` for more detail.
+:func:`~yt.frontends.stream.data_structures.load_amr_grids` for more detail.
 
 It is possible to create native yt dataset from Python's dictionary
 that describes set of rectangular patches of data of possibly varying
@@ -1257,8 +1257,8 @@
 
 .. _specifying-cosmology-tipsy:
 
-Specifying Tipsy Cosmological Parameters
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Specifying Tipsy Cosmological Parameters and Setting Default Units
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 Cosmological parameters can be specified to Tipsy to enable computation of
 default units.  The parameters recognized are of this form:
@@ -1270,5 +1270,27 @@
                            'omega_matter': 0.272,
                            'hubble_constant': 0.702}
 
-These will be used set the units, if they are specified.
+If you wish to set the default units directly, you can do so by using the
+``unit_base`` keyword in the load statement.
 
+ .. code-block:: python
+
+    import yt
+    ds = yt.load(filename, unit_base={'length', (1.0, 'Mpc')})
+
+
+Loading Cosmological Simulations
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+If you are not using a parameter file (i.e. non-Gasoline users), then you must
+use keyword ``cosmology_parameters`` when loading your data set to indicate to
+yt that it is a cosmological data set. If you do not wish to set any
+non-default cosmological parameters, you may pass an empty dictionary.
+
+ .. code-block:: python
+
+    import yt
+    ds = yt.load(filename, cosmology_parameters={})
+
+
+

diff -r be9fb8fc3cb54cefea1d1f9776c5ee22ea342185 -r b80648a5b8e7f41a029528512e33e72e246a09a8 doc/source/faq/index.rst
--- a/doc/source/faq/index.rst
+++ b/doc/source/faq/index.rst
@@ -329,8 +329,8 @@
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 Using the Ray objects 
-(:class:`~yt.data_objects.selection_data_containers.YTOrthoRayBase` and 
-:class:`~yt.data_objects.selection_data_containers.YTRayBase`) with AMR data 
+(:class:`~yt.data_objects.selection_data_containers.YTOrthoRay` and 
+:class:`~yt.data_objects.selection_data_containers.YTRay`) with AMR data 
 gives non-contiguous cell information in the Ray's data array. The 
 higher-resolution cells are appended to the end of the array.  Unfortunately, 
 due to how data is loaded by chunks for data containers, there is really no 

diff -r be9fb8fc3cb54cefea1d1f9776c5ee22ea342185 -r b80648a5b8e7f41a029528512e33e72e246a09a8 doc/source/index.rst
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -11,6 +11,18 @@
 :ref:`sample data for each format <getting-sample-data>` with 
 :ref:`instructions on how to load and examine each data type <examining-data>`.
 
+.. raw:: html
+
+   <form action="search.html" method="get" _lpchecked="1">
+     <div class="form-group">
+       <input type="text" name="q" class="form-control" placeholder="Search" style="width: 70%">
+     </div>
+     <input type="hidden" name="check_keywords" value="yes">
+     <input type="hidden" name="area" value="default">
+   </form>
+
+
+
 Table of Contents
 -----------------
 

diff -r be9fb8fc3cb54cefea1d1f9776c5ee22ea342185 -r b80648a5b8e7f41a029528512e33e72e246a09a8 doc/source/installing.rst
--- a/doc/source/installing.rst
+++ b/doc/source/installing.rst
@@ -39,6 +39,12 @@
   have the the necessary compilers installed (e.g. the ``build-essentials``
   package on debian and ubuntu).
 
+.. note::
+  See `Parallel Computation
+  <http://yt-project.org/docs/dev/analyzing/parallel_computation.html>`_
+  for a discussion on using yt in parallel.
+
+
 .. _branches-of-yt:
 
 Branches of yt: ``yt``, ``stable``, and ``yt-2.x``
@@ -201,7 +207,8 @@
 
   bash Miniconda-3.3.0-Linux-x86_64.sh
 
-Make sure that the Anaconda ``bin`` directory is in your path, and then issue:
+For both the Anaconda and Miniconda installations, make sure that the Anaconda
+``bin`` directory is in your path, and then issue:
 
 .. code-block:: bash
 
@@ -209,6 +216,34 @@
 
 which will install yt along with all of its dependencies.
 
+Obtaining Source Code
+^^^^^^^^^^^^^^^^^^^^^
+
+There are two ways to get the yt source code when using an Anaconda
+installation.
+
+Option 1:
+
+Clone the yt repository with:
+
+.. code-block:: bash
+
+  hg clone https://bitbucket.org/yt_analysis/yt
+
+Once inside the yt directory, update to the appropriate branch and
+run ``setup.py``. For example, the following commands will allow you
+to see the tip of the development branch.
+
+.. code-block:: bash
+
+  hg up yt
+  python setup.py develop
+
+This will make sure you are running a version of yt corresponding to the 
+most up-to-date source code.
+
+Option 2:
+
 Recipes to build conda packages for yt are available at
 https://github.com/conda/conda-recipes.  To build the yt conda recipe, first
 clone the conda-recipes repository

diff -r be9fb8fc3cb54cefea1d1f9776c5ee22ea342185 -r b80648a5b8e7f41a029528512e33e72e246a09a8 doc/source/reference/api/api.rst
--- a/doc/source/reference/api/api.rst
+++ b/doc/source/reference/api/api.rst
@@ -87,17 +87,17 @@
 .. autosummary::
    :toctree: generated/
 
-   ~yt.data_objects.selection_data_containers.YTPointBase
-   ~yt.data_objects.selection_data_containers.YTOrthoRayBase
-   ~yt.data_objects.selection_data_containers.YTRayBase
-   ~yt.data_objects.selection_data_containers.YTSliceBase
-   ~yt.data_objects.selection_data_containers.YTCuttingPlaneBase
-   ~yt.data_objects.selection_data_containers.YTDiskBase
-   ~yt.data_objects.selection_data_containers.YTRegionBase
-   ~yt.data_objects.selection_data_containers.YTDataCollectionBase
-   ~yt.data_objects.selection_data_containers.YTSphereBase
-   ~yt.data_objects.selection_data_containers.YTEllipsoidBase
-   ~yt.data_objects.selection_data_containers.YTCutRegionBase
+   ~yt.data_objects.selection_data_containers.YTPoint
+   ~yt.data_objects.selection_data_containers.YTOrthoRay
+   ~yt.data_objects.selection_data_containers.YTRay
+   ~yt.data_objects.selection_data_containers.YTSlice
+   ~yt.data_objects.selection_data_containers.YTCuttingPlane
+   ~yt.data_objects.selection_data_containers.YTDisk
+   ~yt.data_objects.selection_data_containers.YTRegion
+   ~yt.data_objects.selection_data_containers.YTDataCollection
+   ~yt.data_objects.selection_data_containers.YTSphere
+   ~yt.data_objects.selection_data_containers.YTEllipsoid
+   ~yt.data_objects.selection_data_containers.YTCutRegion
    ~yt.data_objects.grid_patch.AMRGridPatch
 
 Construction Objects
@@ -110,12 +110,12 @@
 .. autosummary::
    :toctree: generated/
 
-   ~yt.data_objects.construction_data_containers.YTStreamlineBase
-   ~yt.data_objects.construction_data_containers.YTQuadTreeProjBase
-   ~yt.data_objects.construction_data_containers.YTCoveringGridBase
-   ~yt.data_objects.construction_data_containers.YTArbitraryGridBase
-   ~yt.data_objects.construction_data_containers.YTSmoothedCoveringGridBase
-   ~yt.data_objects.construction_data_containers.YTSurfaceBase
+   ~yt.data_objects.construction_data_containers.YTStreamline
+   ~yt.data_objects.construction_data_containers.YTQuadTreeProj
+   ~yt.data_objects.construction_data_containers.YTCoveringGrid
+   ~yt.data_objects.construction_data_containers.YTArbitraryGrid
+   ~yt.data_objects.construction_data_containers.YTSmoothedCoveringGrid
+   ~yt.data_objects.construction_data_containers.YTSurface
 
 Time Series Objects
 ^^^^^^^^^^^^^^^^^^^
@@ -211,8 +211,6 @@
    ~yt.frontends.boxlib.data_structures.OrionDataset
    ~yt.frontends.boxlib.fields.BoxlibFieldInfo
    ~yt.frontends.boxlib.io.IOHandlerBoxlib
-   ~yt.frontends.boxlib.io.IOHandlerCastro
-   ~yt.frontends.boxlib.io.IOHandlerNyx
    ~yt.frontends.boxlib.io.IOHandlerOrion
 
 Chombo
@@ -610,10 +608,8 @@
 .. autosummary::
    :toctree: generated/
 
-   ~yt.visualization.volume_rendering.camera.MosaicFisheyeCamera
    ~yt.visualization.volume_rendering.camera.FisheyeCamera
    ~yt.visualization.volume_rendering.camera.MosaicCamera
-   ~yt.visualization.volume_rendering.camera.plot_allsky_healpix
    ~yt.visualization.volume_rendering.camera.PerspectiveCamera
    ~yt.utilities.amr_kdtree.amr_kdtree.AMRKDTree
    ~yt.visualization.volume_rendering.camera.StereoPairCamera

diff -r be9fb8fc3cb54cefea1d1f9776c5ee22ea342185 -r b80648a5b8e7f41a029528512e33e72e246a09a8 doc/source/reference/code_support.rst
--- a/doc/source/reference/code_support.rst
+++ b/doc/source/reference/code_support.rst
@@ -22,7 +22,7 @@
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ 
 | Athena                |     Y      |     N     |      Y     |   Y   |    Y     |    Y     |     Y      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ 
-| Castro                |     Y      |     Y     |   Partial  |   Y   |    Y     |    Y     |     N      |   Full   |
+| Castro                |     Y      |     N     |   Partial  |   Y   |    Y     |    Y     |     N      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ 
 | Chombo                |     Y      |     Y     |      Y     |   Y   |    Y     |    Y     |     Y      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ 
@@ -42,7 +42,7 @@
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ 
 | MOAB                  |     Y      |    N/A    |      Y     |   Y   |    Y     |    Y     |     Y      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ 
-| Nyx                   |     Y      |     Y     |      Y     |   Y   |    Y     |    Y     |     Y      |   Full   |
+| Nyx                   |     Y      |     N     |      Y     |   Y   |    Y     |    Y     |     Y      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ 
 | Orion                 |     Y      |     Y     |      Y     |   Y   |    Y     |    Y     |     Y      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ 

diff -r be9fb8fc3cb54cefea1d1f9776c5ee22ea342185 -r b80648a5b8e7f41a029528512e33e72e246a09a8 doc/source/reference/command-line.rst
--- a/doc/source/reference/command-line.rst
+++ b/doc/source/reference/command-line.rst
@@ -42,9 +42,9 @@
 ~~~~~~~~~~~~~~~~~~~~~~
 
 The :code:`yt` command-line tool allows you to access some of yt's basic
-funcionality without opening a python interpreter.  The tools is a collection of
+functionality without opening a python interpreter.  The tools is a collection of
 subcommands.  These can quickly making plots of slices and projections through a
-dataset, updating yt's codebase, print basic statistics about a dataset, laucnh
+dataset, updating yt's codebase, print basic statistics about a dataset, launch
 an IPython notebook session, and more.  To get a quick list of what is
 available, just type:
 

diff -r be9fb8fc3cb54cefea1d1f9776c5ee22ea342185 -r b80648a5b8e7f41a029528512e33e72e246a09a8 doc/source/reference/python_introduction.rst
--- a/doc/source/reference/python_introduction.rst
+++ b/doc/source/reference/python_introduction.rst
@@ -34,7 +34,7 @@
 called on it.  ``dir()`` will return the available commands and objects that
 can be directly called, and ``dir(something)`` will return information about
 all the commands that ``something`` provides.  This probably sounds a bit
-opaque, but it will become clearer with time -- it's also probably heldsul to
+opaque, but it will become clearer with time -- it's also probably helpful to
 call ``help`` on any or all of the objects we create during this orientation.
 
 To start up Python, at your prompt simply type:

diff -r be9fb8fc3cb54cefea1d1f9776c5ee22ea342185 -r b80648a5b8e7f41a029528512e33e72e246a09a8 doc/source/visualizing/callbacks.rst
--- a/doc/source/visualizing/callbacks.rst
+++ b/doc/source/visualizing/callbacks.rst
@@ -463,6 +463,35 @@
    s.annotate_streamlines('velocity_x', 'velocity_y')
    s.save()
 
+.. _annotate-line-integral-convolution:
+
+Overplot Line Integral Convolution
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. function:: annotate_line_integral_convolution(self, field_x, field_y, \
+                                                 texture=None, kernellen=50., \
+                                                 lim=(0.5,0.6), cmap='binary', \
+                                                 alpha=0.8, const_alpha=False)
+
+   (This is a proxy for
+   :class:`~yt.visualization.plot_modifications.LineIntegralConvolutionCallback`.)
+
+   Add line integral convolution to any plot, using the ``field_x`` and ``field_y`` 
+   from the associated data. A white noise background will be used for ``texture`` 
+   as default. Adjust the bounds of ``lim`` in the range of ``[0, 1]`` which applies 
+   upper and lower bounds to the values of line integral convolution and enhance 
+   the visibility of plots. When ``const_alpha=False``, alpha will be weighted 
+   spatially by the values of line integral convolution; otherwise a constant value 
+   of the given alpha is used.
+
+.. python-script::
+
+   import yt
+   ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
+   s = yt.SlicePlot(ds, 'z', 'density', center='c', width=(20, 'kpc'))
+   s.annotate_line_integral_convolution('velocity_x', 'velocity_y', lim=(0.5,0.65))
+   s.save()
+
 .. _annotate-text:
 
 Overplot Text
@@ -568,23 +597,27 @@
 
 Add a Physical Scale Bar
 ~~~~~~~~~~~~~~~~~~~~~~~~
-
 .. function:: annotate_scale(corner='lower_right', coeff=None, \
-                             unit=None, pos=None, max_frac=0.2, \
-                             min_frac=0.018, text_args=None, \
-                             inset_box_args=None)
+                             unit=None, pos=None, max_frac=0.16, \
+                             min_frac=0.015, coord_system='axis', \
+                             text_args=None, size_bar_args=None, \
+                             draw_inset_box=False, inset_box_args=None)
 
    (This is a proxy for
    :class:`~yt.visualization.plot_modifications.ScaleCallback`.)
 
     Annotates the scale of the plot at a specified location in the image
     (either in a preset corner, or by specifying (x,y) image coordinates with
-    the pos argument.  Coeff and units (e.g. 1 Mpc) refer to the distance scale
-    you desire to show on the plot.  If no coeff and units are specified,
-    an appropriate pair will be determined such that your scale bar is never
-    smaller than min_frac or greater than max_frac of your plottable axis
-    length.  For additional text and plot arguments for the text and line,
-    include them as dictionaries to pass to text_args and plot_args.
+    the pos argument.  Coeff and units (e.g. 1 Mpc or 100 kpc) refer to the
+    distance scale you desire to show on the plot.  If no coeff and units are
+    specified, an appropriate pair will be determined such that your scale bar
+    is never smaller than min_frac or greater than max_frac of your plottable
+    axis length.  Additional customization of the scale bar is possible by
+    adjusting the text_args and size_bar_args dictionaries.  The text_args
+    dictionary accepts matplotlib's font_properties arguments to override
+    the default font_properties for the current plot.  The size_bar_args
+    dictionary accepts keyword arguments for the AnchoredSizeBar class in
+    matplotlib's axes_grid toolkit.
 
 .. python-script::
 
@@ -644,9 +677,13 @@
    (This is a proxy for
    :class:`~yt.visualization.plot_modifications.RayCallback`.)
 
-    Adds a line representing the projected path of a ray across the plot.
-    The ray can be either a YTOrthoRayBase, YTRayBase, or a LightRay object.
-    annotate_ray() will properly account for periodic rays across the volume.
+    Adds a line representing the projected path of a ray across the plot.  The
+    ray can be either a
+    :class:`~yt.data_objects.selection_data_containers.YTOrthoRay`,
+    :class:`~yt.data_objects.selection_data_contaners.YTRay`, or a
+    :class:`~yt.analysis_modules.cosmological_observation.light_ray.light_ray.LightRay`
+    object.  annotate_ray() will properly account for periodic rays across the
+    volume.
 
 .. python-script::
 

diff -r be9fb8fc3cb54cefea1d1f9776c5ee22ea342185 -r b80648a5b8e7f41a029528512e33e72e246a09a8 doc/source/visualizing/ffmpeg_volume_rendering.py
--- a/doc/source/visualizing/ffmpeg_volume_rendering.py
+++ /dev/null
@@ -1,99 +0,0 @@
-#This is an example of how to make videos of 
-#uniform grid data using Theia and ffmpeg
-
-#The Scene object to hold the ray caster and view camera
-from yt.visualization.volume_rendering.theia.scene import TheiaScene
-
-#GPU based raycasting algorithm to use 
-from yt.visualization.volume_rendering.theia.algorithms.front_to_back import FrontToBackRaycaster
-
-#These will be used to define how to color the data
-from yt.visualization.volume_rendering.transfer_functions import ColorTransferFunction
-from yt.visualization.color_maps import *
-
-#This will be used to launch ffmpeg
-import subprocess as sp
-
-#Of course we need numpy for math magic
-import numpy as np
-
-#Opacity scaling function
-def scale_func(v, mi, ma):
-      return  np.minimum(1.0, (v-mi)/(ma-mi) + 0.0)
-
-#load the uniform grid from a numpy array file
-bolshoi = "/home/bogert/log_densities_1024.npy"
-density_grid = np.load(bolshoi)
-
-#Set the TheiaScene to use the density_grid and 
-#setup the raycaster for a resulting 1080p image
-ts = TheiaScene(volume = density_grid, raycaster = FrontToBackRaycaster(size = (1920,1080) ))
-
-#the min and max values in the data to color
-mi, ma = 0.0, 3.6
-
-#setup colortransferfunction
-bins = 5000
-tf = ColorTransferFunction( (mi, ma), bins)
-tf.map_to_colormap(0.5, ma, colormap="spring", scale_func = scale_func)
-
-#pass the transfer function to the ray caster
-ts.source.raycaster.set_transfer(tf)
-
-#Initial configuration for start of video
-#set initial opacity and brightness values
-#then zoom into the center of the data 30%
-ts.source.raycaster.set_opacity(0.03)
-ts.source.raycaster.set_brightness(2.3)
-ts.camera.zoom(30.0)
-
-#path to ffmpeg executable
-FFMPEG_BIN = "/usr/local/bin/ffmpeg"
-
-pipe = sp.Popen([ FFMPEG_BIN,
-        '-y', # (optional) overwrite the output file if it already exists
-	#This must be set to rawvideo because the image is an array
-        '-f', 'rawvideo', 
-	#This must be set to rawvideo because the image is an array
-        '-vcodec','rawvideo',
-	#The size of the image array and resulting video
-        '-s', '1920x1080', 
-	#This must be rgba to match array format (uint32)
-        '-pix_fmt', 'rgba',
-	#frame rate of video
-        '-r', '29.97', 
-        #Indicate that the input to ffmpeg comes from a pipe
-        '-i', '-', 
-        # Tells FFMPEG not to expect any audio
-        '-an', 
-        #Setup video encoder
-	#Use any encoder you life available from ffmpeg
-        '-vcodec', 'libx264', '-preset', 'ultrafast', '-qp', '0',
-        '-pix_fmt', 'yuv420p',
-        #Name of the output
-        'bolshoiplanck2.mkv' ],
-        stdin=sp.PIPE,stdout=sp.PIPE)
-		
-		
-#Now we loop and produce 500 frames
-for k in range (0,500) :
-    #update the scene resulting in a new image
-    ts.update()
-
-    #get the image array from the ray caster
-    array = ts.source.get_results()
-
-    #send the image array to ffmpeg
-    array.tofile(pipe.stdin)
-
-    #rotate the scene by 0.01 rads in x,y & z
-    ts.camera.rotateX(0.01)
-    ts.camera.rotateZ(0.01)
-    ts.camera.rotateY(0.01)
-
-    #zoom in 0.01% for a total of a 5% zoom
-    ts.camera.zoom(0.01)
-
-
-#Close the pipe to ffmpeg
-pipe.terminate()

diff -r be9fb8fc3cb54cefea1d1f9776c5ee22ea342185 -r b80648a5b8e7f41a029528512e33e72e246a09a8 doc/source/visualizing/hardware_volume_rendering.rst
--- a/doc/source/visualizing/hardware_volume_rendering.rst
+++ /dev/null
@@ -1,89 +0,0 @@
-.. _hardware_volume_rendering:
-
-Hardware Volume Rendering on NVidia Graphics cards
---------------------------------------------------
-
-Theia is a hardware volume renderer that takes advantage of NVidias CUDA language
-to peform ray casting with GPUs instead of the CPU. 
-
-Only unigrid rendering is supported, but yt provides a grid mapping function
-to get unigrid data from amr or sph formats, see :ref:`extract_frb`.
-
-System Requirements
-+++++++++++++++++++
-
-Nvidia graphics card - The memory limit of the graphics card sets the limit
-                       on the size of the data source.
-
-CUDA 5 or later and
-
-The environment variable CUDA_SAMPLES must be set pointing to
-the common/inc samples shipped with CUDA. The following shows an example
-in bash with CUDA 5.5 installed in /usr/local :
-
-    export CUDA_SAMPLES=/usr/local/cuda-5.5/samples/common/inc
-
-PyCUDA must also be installed to use Theia. 
-
-PyCUDA can be installed following these instructions :
-
-    git clone --recursive http://git.tiker.net/trees/pycuda.git
-
-    python configure.py
-    python setup.py install
-
-
-Tutorial
-++++++++
-
-Currently rendering only works on uniform grids. Here is an example
-on a 1024 cube of float32 scalars.
-
-.. code-block:: python
-
-   from yt.visualization.volume_rendering.theia.scene import TheiaScene
-   from yt.visualization.volume_rendering.algorithms.front_to_back import FrontToBackRaycaster
-   import numpy as np
-
-   #load 3D numpy array of float32
-   volume = np.load("/home/bogert/log_densities_1024.npy")
-
-   scene = TheiaScene( volume = volume, raycaster = FrontToBackRaycaster() )
-
-   scene.camera.rotateX(1.0)
-   scene.update()
-
-   surface = scene.get_results()
-   #surface now contains an image array 2x2 int32 rbga values
-
-.. _the-theiascene-interface:
-
-The TheiaScene Interface
-++++++++++++++++++++++++
-
-A TheiaScene object has been created to provide a high level entry point for
-controlling the raycaster's view onto the data. The class
-:class:`~yt.visualization.volume_rendering.theia.TheiaScene` encapsulates a
-Camera object and a TheiaSource that intern encapsulates a volume. The
-:class:`~yt.visualization.volume_rendering.theia.Camera` provides controls for
-rotating, translating, and zooming into the volume.  Using the
-:class:`~yt.visualization.volume_rendering.theia.TheiaSource` automatically
-transfers the volume to the graphic's card texture memory.
-
-Example Cookbooks
-+++++++++++++++++
-
-OpenGL Example for interactive volume rendering:
-
-.. literalinclude:: opengl_volume_rendering.py
-
-.. warning::  Frame rate will suffer significantly from stereoscopic rendering.
-              ~2x slower since the volume must be rendered twice.
-
-OpenGL Stereoscopic Example: 
-
-.. literalinclude:: opengl_stereo_volume_rendering.py
-
-Pseudo-Realtime video rendering with ffmpeg:
-
-.. literalinclude:: ffmpeg_volume_rendering.py

diff -r be9fb8fc3cb54cefea1d1f9776c5ee22ea342185 -r b80648a5b8e7f41a029528512e33e72e246a09a8 doc/source/visualizing/index.rst
--- a/doc/source/visualizing/index.rst
+++ b/doc/source/visualizing/index.rst
@@ -15,7 +15,6 @@
    callbacks
    manual_plotting
    volume_rendering
-   hardware_volume_rendering
    sketchfab
    mapserver
    streamlines

diff -r be9fb8fc3cb54cefea1d1f9776c5ee22ea342185 -r b80648a5b8e7f41a029528512e33e72e246a09a8 doc/source/visualizing/manual_plotting.rst
--- a/doc/source/visualizing/manual_plotting.rst
+++ b/doc/source/visualizing/manual_plotting.rst
@@ -125,7 +125,7 @@
 This is perhaps the simplest thing to do. yt provides a number of one
 dimensional objects, and these return a 1-D numpy array of their contents with
 direct dictionary access. As a simple example, take a
-:class:`~yt.data_objects.selection_data_containers.YTOrthoRayBase` object, which can be
+:class:`~yt.data_objects.selection_data_containers.YTOrthoRay` object, which can be
 created from a index by calling ``pf.ortho_ray(axis, center)``.
 
 .. python-script::

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/41abf53c12ec/
Changeset:   41abf53c12ec
Branch:      yt
User:        qobilidop
Date:        2015-10-23 03:23:50+00:00
Summary:     failed to delete the merge confliction info in the last commit, fix it here
Affected #:  1 file

diff -r b80648a5b8e7f41a029528512e33e72e246a09a8 -r 41abf53c12ec651eef15192dca4fe5312bb3a92b yt/fields/particle_fields.py
--- a/yt/fields/particle_fields.py
+++ b/yt/fields/particle_fields.py
@@ -826,8 +826,6 @@
                        units = "code_length")
     return [field_name]
 
-<<<<<<< local: be9fb8fc3cb5  bd_kernel_function yt - qobilidop: remove add_de...
-||||||| base
 def add_density_kernel(ptype, coord_name, mass_name, registry, nneighbors = 64,
                        kernel_name = 'cubic'):
     if kernel_name == 'cubic':
@@ -856,7 +854,6 @@
                        units = "g/cm**3")
     return [field_name]
 
-=======
 def add_density_kernel(ptype, coord_name, mass_name, registry, nneighbors = 64,
                        kernel_name = 'cubic'):
     if kernel_name == 'cubic':
@@ -885,7 +882,6 @@
                        units = "g/cm**3")
     return [field_name]
 
->>>>>>> other: a5d76f647fbb development yt - brittonsmith: Merged in ngoldbau...
 def add_union_field(registry, ptype, field_name, units):
     """
     Create a field that is the concatenation of multiple particle types.


https://bitbucket.org/yt_analysis/yt/commits/36f6d19476a9/
Changeset:   36f6d19476a9
Branch:      yt
User:        qobilidop
Date:        2015-10-23 03:52:52+00:00
Summary:     delete duplication
Affected #:  1 file

diff -r 41abf53c12ec651eef15192dca4fe5312bb3a92b -r 36f6d19476a908c9e779cc02800f744aa2d7a0b5 yt/fields/particle_fields.py
--- a/yt/fields/particle_fields.py
+++ b/yt/fields/particle_fields.py
@@ -832,34 +832,6 @@
         field_name = (ptype, "smoothed_density")
     else:
         field_name = (ptype, "%s_smoothed_density" % (kernel_name))
-    field_units = registry[ptype, mass_name].units
-    def _nth_neighbor(field, data):
-        pos = data[ptype, coord_name]
-        pos.convert_to_units("code_length")
-        mass = data[ptype, mass_name]
-        mass.convert_to_units("g")
-        densities = mass * 0.0
-        data.particle_operation(pos, [mass, densities],
-                         method="density",
-                         nneighbors = nneighbors,
-                         kernel_name = kernel_name)
-        ones = pos.prod(axis=1) # Get us in code_length**3
-        ones[:] = 1.0
-        densities /= ones
-        # Now some quick unit conversions.
-        return densities
-    registry.add_field(field_name, function = _nth_neighbor,
-                       validators = [ValidateSpatial(0)],
-                       particle_type = True,
-                       units = "g/cm**3")
-    return [field_name]
-
-def add_density_kernel(ptype, coord_name, mass_name, registry, nneighbors = 64,
-                       kernel_name = 'cubic'):
-    if kernel_name == 'cubic':
-        field_name = (ptype, "smoothed_density")
-    else:
-        field_name = (ptype, "%s_smoothed_density" % (kernel_name))
 
     def _nth_neighbor(field, data):
         pos = data[ptype, coord_name]


https://bitbucket.org/yt_analysis/yt/commits/4357a16fb2cd/
Changeset:   4357a16fb2cd
Branch:      yt
User:        qobilidop
Date:        2015-10-30 06:39:10+00:00
Summary:     add doc
Affected #:  1 file

diff -r 36f6d19476a908c9e779cc02800f744aa2d7a0b5 -r 4357a16fb2cd818f6425a4ae3e0ebfe91682097d doc/source/analyzing/fields.rst
--- a/doc/source/analyzing/fields.rst
+++ b/doc/source/analyzing/fields.rst
@@ -374,6 +374,17 @@
 "Gas_smoothed_Temperature")``, which in most cases would be aliased to the
 field ``("gas", "temperature")`` for convenience.
 
+Other smoothing kernels besides the cubic spline one are available through a
+keyword argument ``kernel_name`` of the method ``add_smoothed_particle_field``.
+Current available ``kernel_name``s include:
+
+* ``cubic``, ``quartic``, and ``quintic`` - spline kernels.
+* ``wendland2``, ``wendland4`` and ``wendland6`` - Wendland kernels.
+
+The added smoothed particle field can be accessed by
+``("deposit", "particletype_kernelname_smoothed_fieldname")`` (except for the
+cubic spline kernel, which obeys the naming scheme given above).
+
 Computing the Nth Nearest Neighbor
 ----------------------------------
 


https://bitbucket.org/yt_analysis/yt/commits/e4fd38cbb2aa/
Changeset:   e4fd38cbb2aa
Branch:      yt
User:        qobilidop
Date:        2015-10-30 06:45:02+00:00
Summary:     fix a format problem
Affected #:  1 file

diff -r 4357a16fb2cd818f6425a4ae3e0ebfe91682097d -r e4fd38cbb2aa2c63dce7dfed8778b0fc339a5c32 doc/source/analyzing/fields.rst
--- a/doc/source/analyzing/fields.rst
+++ b/doc/source/analyzing/fields.rst
@@ -376,7 +376,7 @@
 
 Other smoothing kernels besides the cubic spline one are available through a
 keyword argument ``kernel_name`` of the method ``add_smoothed_particle_field``.
-Current available ``kernel_name``s include:
+Current available kernel names include:
 
 * ``cubic``, ``quartic``, and ``quintic`` - spline kernels.
 * ``wendland2``, ``wendland4`` and ``wendland6`` - Wendland kernels.


https://bitbucket.org/yt_analysis/yt/commits/2d7d171519c7/
Changeset:   2d7d171519c7
Branch:      yt
User:        qobilidop
Date:        2015-10-30 17:56:41+00:00
Summary:     remove add_density_kernel (to be consistent with the YTEP discussion)
Affected #:  1 file

diff -r e4fd38cbb2aa2c63dce7dfed8778b0fc339a5c32 -r 2d7d171519c7c30762c4cfbf73bd79aebf98e0e7 yt/fields/particle_fields.py
--- a/yt/fields/particle_fields.py
+++ b/yt/fields/particle_fields.py
@@ -826,34 +826,6 @@
                        units = "code_length")
     return [field_name]
 
-def add_density_kernel(ptype, coord_name, mass_name, registry, nneighbors = 64,
-                       kernel_name = 'cubic'):
-    if kernel_name == 'cubic':
-        field_name = (ptype, "smoothed_density")
-    else:
-        field_name = (ptype, "%s_smoothed_density" % (kernel_name))
-
-    def _nth_neighbor(field, data):
-        pos = data[ptype, coord_name]
-        pos.convert_to_units("code_length")
-        mass = data[ptype, mass_name]
-        mass.convert_to_units("g")
-        densities = mass * 0.0
-        data.particle_operation(pos, [mass, densities],
-                         method="density",
-                         nneighbors = nneighbors,
-                         kernel_name = kernel_name)
-        ones = pos.prod(axis=1) # Get us in code_length**3
-        ones[:] = 1.0
-        densities /= ones
-        # Now some quick unit conversions.
-        return densities
-    registry.add_field(field_name, function = _nth_neighbor,
-                       validators = [ValidateSpatial(0)],
-                       particle_type = True,
-                       units = "g/cm**3")
-    return [field_name]
-
 def add_union_field(registry, ptype, field_name, units):
     """
     Create a field that is the concatenation of multiple particle types.


https://bitbucket.org/yt_analysis/yt/commits/664170fe4c37/
Changeset:   664170fe4c37
Branch:      yt
User:        qobilidop
Date:        2015-11-02 17:38:04+00:00
Summary:     update docstring
Affected #:  1 file

diff -r 2d7d171519c7c30762c4cfbf73bd79aebf98e0e7 -r 664170fe4c37b13fa007d0bbc723a603d650b34c yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -928,9 +928,10 @@
            methods include `count`, `simple_smooth`, `sum`, `std`, `cic`,
            `weighted_mean`, `mesh_id`, and `nearest`.
         kernel_name : string, default 'cubic'
-           This is the name of the smoothing kernel to use. Current supported
-           kernel names include `cubic`, `quartic`, `quintic`, `wendland2`,
-           `wendland4`, and `wendland6`.
+           This is the name of the smoothing kernel to use. It is only used for
+           the `simple_smooth` method and is otherwise ignored. Current
+           supported kernel names include `cubic`, `quartic`, `quintic`,
+           `wendland2`, `wendland4`, and `wendland6`.
 
         Returns
         -------


https://bitbucket.org/yt_analysis/yt/commits/d5b7f5d362c5/
Changeset:   d5b7f5d362c5
Branch:      yt
User:        bwkeller
Date:        2015-11-02 19:42:23+00:00
Summary:     Merged in qobilidop/yt (pull request #1830)

Alternative Smoothing Kernels (Reissued)
Affected #:  5 files

diff -r 697ca7baf306df33d900376704a185a48ff08723 -r d5b7f5d362c59fd408dbd5fd31a5cfa55c933e01 doc/source/analyzing/fields.rst
--- a/doc/source/analyzing/fields.rst
+++ b/doc/source/analyzing/fields.rst
@@ -374,6 +374,17 @@
 "Gas_smoothed_Temperature")``, which in most cases would be aliased to the
 field ``("gas", "temperature")`` for convenience.
 
+Other smoothing kernels besides the cubic spline one are available through a
+keyword argument ``kernel_name`` of the method ``add_smoothed_particle_field``.
+Current available kernel names include:
+
+* ``cubic``, ``quartic``, and ``quintic`` - spline kernels.
+* ``wendland2``, ``wendland4`` and ``wendland6`` - Wendland kernels.
+
+The added smoothed particle field can be accessed by
+``("deposit", "particletype_kernelname_smoothed_fieldname")`` (except for the
+cubic spline kernel, which obeys the naming scheme given above).
+
 Computing the Nth Nearest Neighbor
 ----------------------------------
 

diff -r 697ca7baf306df33d900376704a185a48ff08723 -r d5b7f5d362c59fd408dbd5fd31a5cfa55c933e01 yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -165,6 +165,10 @@
             `particle_deposit` namespace as `methodname_deposit`.  Current
             methods include `count`, `simple_smooth`, `sum`, `std`, `cic`,
             `weighted_mean`, `mesh_id`, and `nearest`.
+        kernel_name : string, default 'cubic'
+            This is the name of the smoothing kernel to use. Current supported
+            kernel names include `cubic`, `quartic`, `quintic`, `wendland2`,
+            `wendland4`, and `wendland6`.
 
         Returns
         -------
@@ -228,6 +232,10 @@
             we are able to find and identify all relevant particles.
         nneighbors : int, default 64
             The number of neighbors to examine during the process.
+        kernel_name : string, default 'cubic'
+            This is the name of the smoothing kernel to use. Current supported
+            kernel names include `cubic`, `quartic`, `quintic`, `wendland2`,
+            `wendland4`, and `wendland6`.
 
         Returns
         -------
@@ -313,6 +321,10 @@
             `particle_smooth` namespace as `methodname_smooth`.
         nneighbors : int, default 64
             The number of neighbors to examine during the process.
+        kernel_name : string, default 'cubic'
+            This is the name of the smoothing kernel to use. Current supported
+            kernel names include `cubic`, `quartic`, `quintic`, `wendland2`,
+            `wendland4`, and `wendland6`.
 
         Returns
         -------

diff -r 697ca7baf306df33d900376704a185a48ff08723 -r d5b7f5d362c59fd408dbd5fd31a5cfa55c933e01 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -913,7 +913,7 @@
         deps, _ = self.field_info.check_derived_fields([name])
         self.field_dependencies.update(deps)
 
-    def add_deposited_particle_field(self, deposit_field, method):
+    def add_deposited_particle_field(self, deposit_field, method, kernel_name='cubic'):
         """Add a new deposited particle field
 
         Creates a new deposited field based on the particle *deposit_field*.
@@ -925,8 +925,16 @@
            The field name tuple of the particle field the deposited field will
            be created from.  This must be a field name tuple so yt can
            appropriately infer the correct particle type.
-        method : one of 'count', 'sum', or 'cic'
-           The particle deposition method to use.
+        method : string
+           This is the "method name" which will be looked up in the
+           `particle_deposit` namespace as `methodname_deposit`.  Current
+           methods include `count`, `simple_smooth`, `sum`, `std`, `cic`,
+           `weighted_mean`, `mesh_id`, and `nearest`.
+        kernel_name : string, default 'cubic'
+           This is the name of the smoothing kernel to use. It is only used for
+           the `simple_smooth` method and is otherwise ignored. Current
+           supported kernel names include `cubic`, `quartic`, `quintic`,
+           `wendland2`, `wendland4`, and `wendland6`.
 
         Returns
         -------
@@ -950,15 +958,17 @@
             if method != 'count':
                 pden = data[ptype, "particle_mass"]
                 top = data.deposit(pos, [data[(ptype, deposit_field)]*pden],
-                                   method=method)
-                bottom = data.deposit(pos, [pden], method=method)
+                                   method=method, kernel_name=kernel_name)
+                bottom = data.deposit(pos, [pden], method=method,
+                                      kernel_name=kernel_name)
                 top[bottom == 0] = 0.0
                 bnz = bottom.nonzero()
                 top[bnz] /= bottom[bnz]
                 d = data.ds.arr(top, input_units=units)
             else:
                 d = data.ds.arr(data.deposit(pos, [data[ptype, deposit_field]],
-                                             method=method))
+                                             method=method,
+                                             kernel_name=kernel_name))
             return d
         name_map = {"cic": "cic", "sum": "nn", "count": "count"}
         field_name = "%s_" + name_map[method] + "_%s"

diff -r 697ca7baf306df33d900376704a185a48ff08723 -r d5b7f5d362c59fd408dbd5fd31a5cfa55c933e01 yt/fields/particle_fields.py
--- a/yt/fields/particle_fields.py
+++ b/yt/fields/particle_fields.py
@@ -826,34 +826,6 @@
                        units = "code_length")
     return [field_name]
 
-def add_density_kernel(ptype, coord_name, mass_name, registry, nneighbors = 64,
-                       kernel_name = 'cubic'):
-    if kernel_name == 'cubic':
-        field_name = (ptype, "smoothed_density")
-    else:
-        field_name = (ptype, "%s_smoothed_density" % (kernel_name))
-
-    def _nth_neighbor(field, data):
-        pos = data[ptype, coord_name]
-        pos.convert_to_units("code_length")
-        mass = data[ptype, mass_name]
-        mass.convert_to_units("g")
-        densities = mass * 0.0
-        data.particle_operation(pos, [mass, densities],
-                         method="density",
-                         nneighbors = nneighbors,
-                         kernel_name = kernel_name)
-        ones = pos.prod(axis=1) # Get us in code_length**3
-        ones[:] = 1.0
-        densities /= ones
-        # Now some quick unit conversions.
-        return densities
-    registry.add_field(field_name, function = _nth_neighbor,
-                       validators = [ValidateSpatial(0)],
-                       particle_type = True,
-                       units = "g/cm**3")
-    return [field_name]
-
 def add_union_field(registry, ptype, field_name, units):
     """
     Create a field that is the concatenation of multiple particle types.

diff -r 697ca7baf306df33d900376704a185a48ff08723 -r d5b7f5d362c59fd408dbd5fd31a5cfa55c933e01 yt/geometry/particle_deposit.pxd
--- a/yt/geometry/particle_deposit.pxd
+++ b/yt/geometry/particle_deposit.pxd
@@ -81,6 +81,36 @@
         kernel = 0.
     return kernel * C
 
+# Wendland C2
+cdef inline np.float64_t sph_kernel_wendland2(np.float64_t x):
+    cdef np.float64_t kernel
+    cdef np.float64_t C = 21./2/np.pi
+    if x < 1:
+        kernel = (1.-x)**4 * (1+4*x)
+    else:
+        kernel = 0.
+    return kernel * C
+
+# Wendland C4
+cdef inline np.float64_t sph_kernel_wendland4(np.float64_t x):
+    cdef np.float64_t kernel
+    cdef np.float64_t C = 495./32/np.pi
+    if x < 1:
+        kernel = (1.-x)**6 * (1+6*x+35./3*x**2)
+    else:
+        kernel = 0.
+    return kernel * C
+
+# Wendland C6
+cdef inline np.float64_t sph_kernel_wendland6(np.float64_t x):
+    cdef np.float64_t kernel
+    cdef np.float64_t C = 1365./64/np.pi
+    if x < 1:
+        kernel = (1.-x)**8 * (1+8*x+25*x**2+32*x**3)
+    else:
+        kernel = 0.
+    return kernel * C
+
 # I don't know the way to use a dict in a cdef class.
 # So in order to mimic a registry functionality,
 # I manually created a function to lookup the kernel functions.
@@ -92,6 +122,12 @@
         return sph_kernel_quartic
     elif kernel_name == 'quintic':
         return sph_kernel_quintic
+    elif kernel_name == 'wendland2':
+        return sph_kernel_wendland2
+    elif kernel_name == 'wendland4':
+        return sph_kernel_wendland4
+    elif kernel_name == 'wendland6':
+        return sph_kernel_wendland6
     else:
         raise NotImplementedError

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list