[yt-svn] commit/yt: 6 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Sat Jul 19 16:03:50 PDT 2014


6 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/bcd13a74d685/
Changeset:   bcd13a74d685
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-05-21 15:14:10
Summary:     Enable vector fields and add some SPH smoothing helpers.
Affected #:  3 files

diff -r c7a7d59373100ec97b0bb8e263f1c24c0ca2c8ff -r bcd13a74d685b40bcd4a963f0786869761ebd272 yt/fields/field_info_container.py
--- a/yt/fields/field_info_container.py
+++ b/yt/fields/field_info_container.py
@@ -117,6 +117,16 @@
         for _, alias_name in self.field_aliases:
             if alias_name in ("particle_position", "particle_velocity"):
                 continue
+            if (ptype, alias_name) not in self: continue
+            fn = add_volume_weighted_smoothed_field(ptype,
+                "particle_position", "particle_mass",
+                sml_name, "density", alias_name, self,
+                num_neighbors)
+            new_aliases.append(((ftype, alias_name), fn[0]))
+        for ptype2, alias_name in self.keys():
+            if ptype2 != ptype: continue
+            if alias_name in ("particle_position", "particle_velocity"):
+                continue
             fn = add_volume_weighted_smoothed_field(ptype,
                 "particle_position", "particle_mass",
                 sml_name, "density", alias_name, self,

diff -r c7a7d59373100ec97b0bb8e263f1c24c0ca2c8ff -r bcd13a74d685b40bcd4a963f0786869761ebd272 yt/frontends/stream/fields.py
--- a/yt/frontends/stream/fields.py
+++ b/yt/frontends/stream/fields.py
@@ -54,9 +54,11 @@
     )
 
     known_particle_fields = (
+        ("particle_position", ("code_length", [], None)),
         ("particle_position_x", ("code_length", [], None)),
         ("particle_position_y", ("code_length", [], None)),
         ("particle_position_z", ("code_length", [], None)),
+        ("particle_velocity", ("code_length/code_time", [], None)),
         ("particle_velocity_x", ("code_length/code_time", [], None)),
         ("particle_velocity_y", ("code_length/code_time", [], None)),
         ("particle_velocity_z", ("code_length/code_time", [], None)),
@@ -64,6 +66,8 @@
         ("particle_gas_density", ("code_mass/code_length**3", [], None)),
         ("particle_gas_temperature", ("K", [], None)),
         ("particle_mass", ("code_mass", [], None)),
+        ("smoothing_length", ("code_length", [], None)),
+        ("density", ("code_mass/code_length**3", [], None)),
     )
         
     def setup_fluid_fields(self):

diff -r c7a7d59373100ec97b0bb8e263f1c24c0ca2c8ff -r bcd13a74d685b40bcd4a963f0786869761ebd272 yt/frontends/stream/io.py
--- a/yt/frontends/stream/io.py
+++ b/yt/frontends/stream/io.py
@@ -28,6 +28,7 @@
 class IOHandlerStream(BaseIOHandler):
 
     _dataset_type = "stream"
+    _vector_fields = ("particle_velocity", "particle_position")
 
     def __init__(self, pf):
         self.fields = pf.stream_handler.fields
@@ -97,6 +98,7 @@
 class StreamParticleIOHandler(BaseIOHandler):
 
     _dataset_type = "stream_particles"
+    _vector_fields = ("particle_velocity", "particle_position")
 
     def __init__(self, pf):
         self.fields = pf.stream_handler.fields
@@ -163,6 +165,7 @@
 
 class IOHandlerStreamHexahedral(BaseIOHandler):
     _dataset_type = "stream_hexahedral"
+    _vector_fields = ("particle_velocity", "particle_position")
 
     def __init__(self, pf):
         self.fields = pf.stream_handler.fields
@@ -192,6 +195,7 @@
 
 class IOHandlerStreamOctree(BaseIOHandler):
     _dataset_type = "stream_octree"
+    _vector_fields = ("particle_velocity", "particle_position")
 
     def __init__(self, pf):
         self.fields = pf.stream_handler.fields


https://bitbucket.org/yt_analysis/yt/commits/71e6f828c652/
Changeset:   71e6f828c652
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-06-03 20:18:41
Summary:     Enable YTArrays to be passed in to stream frontends.
Affected #:  1 file

diff -r bcd13a74d685b40bcd4a963f0786869761ebd272 -r 71e6f828c6526d0409da920598da4a34dd036999 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -465,7 +465,13 @@
         pf.stream_handler.particle_count[gi] = npart
                                         
 def unitify_data(data):
-    if all([isinstance(val, np.ndarray) for val in data.values()]):
+    if all([hasattr(val, 'units') for val in data.values()]):
+        new_data, field_units = {}, {}
+        for k, v in data.items():
+            field_units[k] = v.units
+            new_data[k] = v.copy().d
+        data = new_data
+    elif all([isinstance(val, np.ndarray) for val in data.values()]):
         field_units = {field:'' for field in data.keys()}
     elif all([(len(val) == 2) for val in data.values()]):
         new_data, field_units = {}, {}


https://bitbucket.org/yt_analysis/yt/commits/a60d6c6efe78/
Changeset:   a60d6c6efe78
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-06-11 23:51:27
Summary:     Merging from tip
Affected #:  42 files

diff -r 71e6f828c6526d0409da920598da4a34dd036999 -r a60d6c6efe78b6da733c34b0cd83123a57269118 .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -41,6 +41,7 @@
 yt/utilities/lib/PointsInVolume.c
 yt/utilities/lib/QuadTree.c
 yt/utilities/lib/RayIntegrators.c
+yt/utilities/lib/ragged_arrays.c
 yt/utilities/lib/VolumeIntegrator.c
 yt/utilities/lib/grid_traversal.c
 yt/utilities/lib/GridTree.c

diff -r 71e6f828c6526d0409da920598da4a34dd036999 -r a60d6c6efe78b6da733c34b0cd83123a57269118 doc/source/analyzing/analysis_modules/PPVCube.ipynb
--- a/doc/source/analyzing/analysis_modules/PPVCube.ipynb
+++ b/doc/source/analyzing/analysis_modules/PPVCube.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:3a720e0a18272564522f9fc23553431908d6f2b4f3e3e7dfe5b3e690e2e37677"
+  "signature": "sha256:3f810954006851303837edb8fd85ee6583a883122b0f4867903562546c4f19d2"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -16,6 +16,18 @@
      ]
     },
     {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "%matplotlib inline\n",
+      "from yt.mods import *\n",
+      "from yt.analysis_modules.api import PPVCube"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
      "cell_type": "markdown",
      "metadata": {},
      "source": [
@@ -44,30 +56,40 @@
      ]
     },
     {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "First, we'll set up the grid and the parameters of the profiles:"
+     ]
+    },
+    {
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "%matplotlib inline\n",
-      "from yt.mods import *\n",
-      "from yt.analysis_modules.api import PPVCube"
+      "nx,ny,nz = (256,256,256) # domain dimensions\n",
+      "R = 10. # outer radius of disk, kpc\n",
+      "r_0 = 3. # scale radius, kpc\n",
+      "beta = 1.4 # for the tangential velocity profile\n",
+      "alpha = -1. # for the radial density profile\n",
+      "x, y = np.mgrid[-R:R:nx*1j,-R:R:ny*1j] # cartesian coordinates of x-y plane of disk\n",
+      "r = np.sqrt(x*x+y*y) # polar coordinates\n",
+      "theta = np.arctan2(y, x) # polar coordinates"
      ],
      "language": "python",
      "metadata": {},
      "outputs": []
     },
     {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Second, we'll construct the data arrays for the density and the velocity of the disk. Since we have the tangential velocity profile, we have to use the polar coordinates we derived earlier to compute `velx` and `vely`. Everywhere outside the disk, all fields are set to zero.  "
+     ]
+    },
+    {
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "data = {}\n",
-      "nx,ny,nz = (256,256,256)\n",
-      "R = 10. # kpc\n",
-      "r_0 = 3. # kpc\n",
-      "beta = 1.4\n",
-      "alpha = -1.\n",
-      "x, y = np.mgrid[-R:R:nx*1j,-R:R:ny*1j] # cartesian coordinates\n",
-      "r = np.sqrt(x*x+y*y) # polar coordinates\n",
-      "theta = np.arctan2(y, x) # polar coordinates\n",
       "dens = np.zeros((nx,ny,nz))\n",
       "dens[:,:,nz/2-3:nz/2+3] = (r**alpha).reshape(nx,ny,1) # the density profile of the disk\n",
       "vel_theta = r/(1.+(r/r_0)**beta) # the azimuthal velocity profile of the disk\n",
@@ -75,11 +97,31 @@
       "vely = np.zeros((nx,ny,nz))\n",
       "velx[:,:,nz/2-3:nz/2+3] = (-vel_theta*np.sin(theta)).reshape(nx,ny,1) # convert polar to cartesian\n",
       "vely[:,:,nz/2-3:nz/2+3] = (vel_theta*np.cos(theta)).reshape(nx,ny,1) # convert polar to cartesian\n",
+      "dens[r > R] = 0.0\n",
+      "velx[r > R] = 0.0\n",
+      "vely[r > R] = 0.0"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Finally, we'll package these data arrays up into a dictionary, which will then be shipped off to `load_uniform_grid`. We'll define the width of the grid to be `2*R` kpc, which will be equal to 1  `code_length`. "
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "data = {}\n",
       "data[\"density\"] = (dens,\"g/cm**3\")\n",
       "data[\"velocity_x\"] = (velx, \"km/s\")\n",
       "data[\"velocity_y\"] = (vely, \"km/s\")\n",
       "data[\"velocity_z\"] = (np.zeros((nx,ny,nz)), \"km/s\") # zero velocity in the z-direction\n",
-      "bbox = np.array([[-0.5,0.5],[-0.5,0.5],[-0.5,0.5]])\n",
+      "bbox = np.array([[-0.5,0.5],[-0.5,0.5],[-0.5,0.5]]) # bbox of width 1 on a side with center (0,0,0)\n",
       "ds = load_uniform_grid(data, (nx,ny,nz), length_unit=(2*R,\"kpc\"), nprocs=1, bbox=bbox)"
      ],
      "language": "python",
@@ -146,7 +188,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "cube = PPVCube(ds, L, \"density\", dims=(200,100,50), velocity_bounds=(-0.5,0.5,\"km/s\"))"
+      "cube = PPVCube(ds, L, \"density\", dims=(200,100,50), velocity_bounds=(-1.5,1.5,\"km/s\"))"
      ],
      "language": "python",
      "metadata": {},
@@ -180,8 +222,18 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "ds = load(\"cube.fits\")\n",
-      "slc = SlicePlot(ds, \"z\", [\"density\"], center=\"c\") # sliced at the center of the domain\n",
+      "pf = load(\"cube.fits\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "# Specifying no center gives us the center slice\n",
+      "slc = SlicePlot(pf, \"z\", [\"density\"])\n",
       "slc.show()"
      ],
      "language": "python",
@@ -192,19 +244,11 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "# To figure out what the domain center and width is in pixel (code length) units:\n",
-      "print ds.domain_center\n",
-      "print ds.domain_width"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "slc = SlicePlot(ds, \"z\", [\"density\"], center=[100.5,50.5,-250.0]) # \"z\" slice is in m/s\n",
+      "import yt.units as u\n",
+      "# Picking different velocities for the slices\n",
+      "new_center = pf.domain_center\n",
+      "new_center[2] = pf.spec2pixel(-1.0*u.km/u.s)\n",
+      "slc = SlicePlot(pf, \"z\", [\"density\"], center=new_center)\n",
       "slc.show()"
      ],
      "language": "python",
@@ -215,7 +259,8 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "slc = SlicePlot(ds, \"z\", [\"density\"], center=[100.5,50.5,300.0])\n",
+      "new_center[2] = pf.spec2pixel(0.7*u.km/u.s)\n",
+      "slc = SlicePlot(pf, \"z\", [\"density\"], center=new_center)\n",
       "slc.show()"
      ],
      "language": "python",
@@ -225,7 +270,31 @@
     {
      "cell_type": "code",
      "collapsed": false,
-     "input": [],
+     "input": [
+      "new_center[2] = pf.spec2pixel(-0.3*u.km/u.s)\n",
+      "slc = SlicePlot(pf, \"z\", [\"density\"], center=new_center)\n",
+      "slc.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "If we project all the emission at all the different velocities along the z-axis, we recover the entire disk:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "prj = ProjectionPlot(pf, \"z\", [\"density\"], proj_style=\"sum\")\n",
+      "prj.set_log(\"density\", True)\n",
+      "prj.set_zlim(\"density\", 1.0e-3, 0.2)\n",
+      "prj.show()"
+     ],
      "language": "python",
      "metadata": {},
      "outputs": []

diff -r 71e6f828c6526d0409da920598da4a34dd036999 -r a60d6c6efe78b6da733c34b0cd83123a57269118 doc/source/analyzing/analysis_modules/SZ_projections.ipynb
--- a/doc/source/analyzing/analysis_modules/SZ_projections.ipynb
+++ b/doc/source/analyzing/analysis_modules/SZ_projections.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:e5d3c629592c8aacbabf2e3fab2660703298886b8de6f36eb7cdc1f60b726496"
+  "signature": "sha256:7fc053480ba7896bfa5905bd69f7b3dd326364fbab324975b76f79640f2e0adf"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -18,7 +18,7 @@
       "projection of the pressure field of a cluster. However, the *full* S-Z signal is a combination of thermal and kinetic\n",
       "contributions, and for large frequencies and high temperatures\n",
       "relativistic effects are important. For computing the full S-Z signal\n",
-      "incorporating all of these effects, Jens Chluba has written a library:\n",
+      "incorporating all of these effects, there is a library:\n",
       "SZpack ([Chluba et al 2012](http://adsabs.harvard.edu/abs/2012MNRAS.426..510C)). \n",
       "\n",
       "The `sunyaev_zeldovich` analysis module in `yt` makes it possible\n",
@@ -93,10 +93,10 @@
       "from yt.mods import *\n",
       "from yt.analysis_modules.api import SZProjection\n",
       "\n",
-      "pf = load(\"enzo_tiny_cosmology/DD0046/DD0046\")\n",
+      "ds = load(\"enzo_tiny_cosmology/DD0046/DD0046\")\n",
       "\n",
       "freqs = [90.,180.,240.]\n",
-      "szprj = SZProjection(pf, freqs)"
+      "szprj = SZProjection(ds, freqs)"
      ],
      "language": "python",
      "metadata": {},
@@ -108,8 +108,8 @@
      "source": [
       "`freqs` is a list or array of frequencies in GHz at which the signal\n",
       "is to be computed. The `SZProjection` constructor also accepts the\n",
-      "optional keywords, **mue** (mean molecular weight for computing the\n",
-      "electron number density, 1.143 is the default) and **high_order** (set\n",
+      "optional keywords, `mue` (mean molecular weight for computing the\n",
+      "electron number density, 1.143 is the default) and `high_order` (set\n",
       "to True to compute terms in the S-Z signal expansion up to\n",
       "second-order in $T_{e,SZ}$ and $\\beta$). "
      ]
@@ -127,7 +127,7 @@
      "collapsed": false,
      "input": [
       "# An on-axis projection along the z-axis with width 10 Mpc, centered on the gas density maximum\n",
-      "szprj.on_axis(\"z\", center=\"max\", width=(10.0, \"mpc\"), nx=400)"
+      "szprj.on_axis(\"z\", center=\"max\", width=(10.0, \"Mpc\"), nx=400)"
      ],
      "language": "python",
      "metadata": {},
@@ -144,7 +144,7 @@
       "which can be accessed dict-like from the projection object (e.g.,\n",
       "`szprj[\"90_GHz\"]`). Projections of other quantities may also be\n",
       "accessed; to see what fields are available call `szprj.keys()`. The methods also accept standard ``yt``\n",
-      "keywords for projections such as **center**, **width**, and **source**. The image buffer size can be controlled by setting **nx**.  \n"
+      "keywords for projections such as `center`, `width`, and `source`. The image buffer size can be controlled by setting `nx`.  \n"
      ]
     },
     {
@@ -216,8 +216,16 @@
      "source": [
       "which would write all of the projections to a single FITS file,\n",
       "including coordinate information in kpc. The optional keyword\n",
-      "**clobber** allows a previous file to be overwritten. \n"
+      "`clobber` allows a previous file to be overwritten. \n"
      ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
     }
    ],
    "metadata": {}

diff -r 71e6f828c6526d0409da920598da4a34dd036999 -r a60d6c6efe78b6da733c34b0cd83123a57269118 doc/source/analyzing/analysis_modules/halo_catalogs.rst
--- a/doc/source/analyzing/analysis_modules/halo_catalogs.rst
+++ b/doc/source/analyzing/analysis_modules/halo_catalogs.rst
@@ -226,4 +226,4 @@
 =======
 
 For a full example of how to use these methods together see 
-:ref:`halo_analysis_example`.
+:doc:`halo_analysis_example`.

diff -r 71e6f828c6526d0409da920598da4a34dd036999 -r a60d6c6efe78b6da733c34b0cd83123a57269118 doc/source/bootcamp/4)_Data_Objects_and_Time_Series.ipynb
--- a/doc/source/bootcamp/4)_Data_Objects_and_Time_Series.ipynb
+++ b/doc/source/bootcamp/4)_Data_Objects_and_Time_Series.ipynb
@@ -23,7 +23,8 @@
      "input": [
       "%matplotlib inline\n",
       "from yt.mods import *\n",
-      "from matplotlib import pylab"
+      "from matplotlib import pylab\n",
+      "from yt.analysis_modules.halo_finding.api import HaloFinder"
      ],
      "language": "python",
      "metadata": {},

diff -r 71e6f828c6526d0409da920598da4a34dd036999 -r a60d6c6efe78b6da733c34b0cd83123a57269118 doc/source/examining/Loading_Generic_Particle_Data.ipynb
--- /dev/null
+++ b/doc/source/examining/Loading_Generic_Particle_Data.ipynb
@@ -0,0 +1,156 @@
+{
+ "metadata": {
+  "name": "",
+  "signature": "sha256:6da8ec00f414307f27544fbdbc6b4fa476e5e96809003426279b2a1c898b4546"
+ },
+ "nbformat": 3,
+ "nbformat_minor": 0,
+ "worksheets": [
+  {
+   "cells": [
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "This example creates a fake in-memory particle dataset and then loads it as a yt dataset using the `load_particles` function.\n",
+      "\n",
+      "Our \"fake\" dataset will be numpy arrays filled with normally distributed randoml particle positions and uniform particle masses.  Since real data is often scaled, I arbitrarily multiply by 1e6 to show how to deal with scaled data."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "import numpy as np\n",
+      "\n",
+      "n_particles = 5e6\n",
+      "\n",
+      "ppx, ppy, ppz = 1e6*np.random.normal(size=[3, n_particles])\n",
+      "\n",
+      "ppm = np.ones(n_particles)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "The `load_particles` function accepts a dictionary populated with particle data fields loaded in memory as numpy arrays or python lists:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "data = {'particle_position_x': ppx,\n",
+      "        'particle_position_y': ppy,\n",
+      "        'particle_position_z': ppz,\n",
+      "        'particle_mass': ppm}"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "To hook up with yt's internal field system, the dictionary keys must be 'particle_position_x', 'particle_position_y', 'particle_position_z', and 'particle_mass', as well as any other particle field provided by one of the particle frontends."
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "The `load_particles` function transforms the `data` dictionary into an in-memory yt `Dataset` object, providing an interface for further analysis with `yt`. The example below illustrates how to load the data dictionary we created above."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "import yt\n",
+      "from yt.units import parsec, Msun\n",
+      "\n",
+      "bbox = 1.1*np.array([[min(ppx), max(ppx)], [min(ppy), max(ppy)], [min(ppy), max(ppy)]])\n",
+      "\n",
+      "ds = yt.load_particles(data, length_unit=parsec, mass_unit=1e8*Msun, n_ref=256, bbox=bbox)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "The `length_unit` and `mass_unit` are the conversion from the units used in the `data` dictionary to CGS.  I've arbitrarily chosen one parsec and 10^8 Msun for this example. \n",
+      "\n",
+      "The `n_ref` parameter controls how many particle it takes to accumulate in an oct-tree cell to trigger refinement.  Larger `n_ref` will decrease poisson noise at the cost of resolution in the octree.  \n",
+      "\n",
+      "Finally, the `bbox` parameter is a bounding box in the units of the dataset that contains all of the particles.  This is used to set the size of the base octree block."
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "This new dataset acts like any other `yt` `Dataset` object, and can be used to create data objects and query for yt fields.  This example shows how to access \"deposit\" fields:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "ad = ds.all_data()\n",
+      "\n",
+      "# This is generated with \"cloud-in-cell\" interpolation.\n",
+      "cic_density = ad[\"deposit\", \"all_cic\"]\n",
+      "\n",
+      "# These three are based on nearest-neighbor cell deposition\n",
+      "nn_density = ad[\"deposit\", \"all_density\"]\n",
+      "nn_deposited_mass = ad[\"deposit\", \"all_mass\"]\n",
+      "particle_count_per_cell = ad[\"deposit\", \"all_count\"]"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "ds.field_list"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "ds.derived_field_list"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "slc = yt.SlicePlot(ds, 2, ('deposit', 'all_cic'))\n",
+      "slc.set_width((8, 'Mpc'))"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    }
+   ],
+   "metadata": {}
+  }
+ ]
+}
\ No newline at end of file

diff -r 71e6f828c6526d0409da920598da4a34dd036999 -r a60d6c6efe78b6da733c34b0cd83123a57269118 doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -898,3 +898,4 @@
 Generic Particle Data
 ---------------------
 
+.. notebook:: Loading_Generic_Particle_Data.ipynb

diff -r 71e6f828c6526d0409da920598da4a34dd036999 -r a60d6c6efe78b6da733c34b0cd83123a57269118 doc/source/reference/changelog.rst
--- a/doc/source/reference/changelog.rst
+++ b/doc/source/reference/changelog.rst
@@ -348,7 +348,7 @@
    there
  * WebGL interface for isocontours and a pannable map widget added to Reason
  * Performance improvements for volume rendering
- * Adaptive HEALPix support (see :ref:`adaptive_healpix_volume_rendering`)
+ * Adaptive HEALPix support
  * Column density calculations (see :ref:`radial-column-density`)
  * Massive speedup for 1D profiles
  * Lots more, bug fixes etc.

diff -r 71e6f828c6526d0409da920598da4a34dd036999 -r a60d6c6efe78b6da733c34b0cd83123a57269118 doc/source/reference/configuration.rst
--- a/doc/source/reference/configuration.rst
+++ b/doc/source/reference/configuration.rst
@@ -93,5 +93,5 @@
   uploading AMRSurface objects.
 * ``suppressStreamLogging`` (default: ``'False'``): If true, execution mode will be
   quiet.
-* ``stdoutStreamLogging`` (default: ``'False'``): If three, logging is directed
+* ``stdoutStreamLogging`` (default: ``'False'``): If true, logging is directed
   to stdout rather than stderr

diff -r 71e6f828c6526d0409da920598da4a34dd036999 -r a60d6c6efe78b6da733c34b0cd83123a57269118 doc/source/visualizing/_cb_docstrings.inc
--- a/doc/source/visualizing/_cb_docstrings.inc
+++ b/doc/source/visualizing/_cb_docstrings.inc
@@ -120,7 +120,7 @@
 .. python-script::
    
    from yt.mods import *
-   data_pf = load('Enzo_64/RD0006/RD0006')
+   data_pf = load('Enzo_64/RD0006/RedshiftOutput0006')
    halos_pf = load('rockstar_halos/halos_0.0.bin')
 
    hc = HaloCatalog(halos_pf=halos_pf)

diff -r 71e6f828c6526d0409da920598da4a34dd036999 -r a60d6c6efe78b6da733c34b0cd83123a57269118 yt/analysis_modules/halo_analysis/halo_callbacks.py
--- a/yt/analysis_modules/halo_analysis/halo_callbacks.py
+++ b/yt/analysis_modules/halo_analysis/halo_callbacks.py
@@ -81,13 +81,13 @@
     dpf = halo.halo_catalog.data_pf
     hpf = halo.halo_catalog.halos_pf
     center = dpf.arr([halo.quantities["particle_position_%s" % axis] \
-                      for axis in "xyz"]) / dpf.length_unit
-    radius = factor * halo.quantities[radius_field] / dpf.length_unit
+                      for axis in "xyz"])
+    radius = factor * halo.quantities[radius_field]
     if radius <= 0.0:
         halo.data_object = None
         return
     try:
-        sphere = dpf.sphere(center, (radius, "code_length"))
+        sphere = dpf.sphere(center, radius)
     except YTSphereTooSmall:
         halo.data_object = None
         return

diff -r 71e6f828c6526d0409da920598da4a34dd036999 -r a60d6c6efe78b6da733c34b0cd83123a57269118 yt/analysis_modules/particle_trajectories/particle_trajectories.py
--- a/yt/analysis_modules/particle_trajectories/particle_trajectories.py
+++ b/yt/analysis_modules/particle_trajectories/particle_trajectories.py
@@ -43,6 +43,10 @@
         collection is instantiated.
         Default : None (will default to the fields 'particle_position_x',
         'particle_position_y', 'particle_position_z')
+    suppress_logging : boolean
+        Suppress yt's logging when iterating over the simulation time
+        series.
+        Default : False
 
     Examples
     ________
@@ -59,7 +63,7 @@
     >>> for t in trajs :
     >>>     print t["particle_velocity_x"].max(), t["particle_velocity_x"].min()
     """
-    def __init__(self, outputs, indices, fields=None) :
+    def __init__(self, outputs, indices, fields=None, suppress_logging=False):
 
         indices.sort() # Just in case the caller wasn't careful
         self.field_data = YTFieldData()
@@ -74,6 +78,7 @@
         self.num_indices = len(indices)
         self.num_steps = len(outputs)
         self.times = []
+        self.suppress_logging = suppress_logging
 
         # Default fields 
         
@@ -83,8 +88,9 @@
         fields.append("particle_position_z")
         fields = list(OrderedDict.fromkeys(fields))
 
-        old_level = int(ytcfg.get("yt","loglevel"))
-        mylog.setLevel(40)
+        if self.suppress_logging:
+            old_level = int(ytcfg.get("yt","loglevel"))
+            mylog.setLevel(40)
         my_storage = {}
         pbar = get_pbar("Constructing trajectory information", len(self.data_series))
         for i, (sto, ds) in enumerate(self.data_series.piter(storage=my_storage)):
@@ -101,7 +107,8 @@
             pbar.update(i)
         pbar.finish()
 
-        mylog.setLevel(old_level)
+        if self.suppress_logging:
+            mylog.setLevel(old_level)
 
         times = []
         for fn, time in sorted(my_storage.items()):
@@ -191,14 +198,16 @@
         with shape (num_indices, num_steps)
         """
         if not self.field_data.has_key(field):
-            old_level = int(ytcfg.get("yt","loglevel"))
-            mylog.setLevel(40)
+            if self.suppress_logging:
+                old_level = int(ytcfg.get("yt","loglevel"))
+                mylog.setLevel(40)
             dd_first = self.data_series[0].all_data()
             fd = dd_first._determine_fields(field)[0]
             if field not in self.particle_fields:
                 if self.data_series[0].field_info[fd].particle_type:
                     self.particle_fields.append(field)
-            particles = np.empty((self.num_indices,self.num_steps)) * np.nan
+            particles = np.empty((self.num_indices,self.num_steps))
+            particles[:] = np.nan
             step = int(0)
             pbar = get_pbar("Generating field %s in trajectories." % (field), self.num_steps)
             my_storage={}
@@ -232,7 +241,8 @@
             for i, (fn, (indices, pfield)) in enumerate(sorted(my_storage.items())):
                 particles[indices,i] = pfield
             self.field_data[field] = array_like_field(dd_first, particles, fd)
-            mylog.setLevel(old_level)
+            if self.suppress_logging:
+                mylog.setLevel(old_level)
         return self.field_data[field]
 
     def trajectory_from_index(self, index):

diff -r 71e6f828c6526d0409da920598da4a34dd036999 -r a60d6c6efe78b6da733c34b0cd83123a57269118 yt/analysis_modules/ppv_cube/ppv_cube.py
--- a/yt/analysis_modules/ppv_cube/ppv_cube.py
+++ b/yt/analysis_modules/ppv_cube/ppv_cube.py
@@ -17,14 +17,6 @@
 from yt.visualization.volume_rendering.camera import off_axis_projection
 from yt.funcs import get_pbar
 
-def create_intensity(vmin, vmax, ifield):
-    def _intensity(field, data):
-        idxs = (data["v_los"] >= vmin) & (data["v_los"] < vmax)
-        f = np.zeros(data[ifield].shape)
-        f[idxs] = data[ifield][idxs]
-        return f
-    return _intensity
-
 def create_vlos(z_hat):
     def _v_los(field, data):
         vz = data["velocity_x"]*z_hat[0] + \
@@ -90,9 +82,11 @@
             self.v_bnd = -vmax, vmax
         else:
             self.v_bnd = (ds.quan(velocity_bounds[0], velocity_bounds[2]),
-                     ds.quan(velocity_bounds[1], velocity_bounds[2]))
+                          ds.quan(velocity_bounds[1], velocity_bounds[2]))
 
-        vbins = np.linspace(self.v_bnd[0], self.v_bnd[1], num=self.nv+1)
+        self.vbins = np.linspace(self.v_bnd[0], self.v_bnd[1], num=self.nv+1)
+        self.vmid = 0.5*(self.vbins[1:]+self.vbins[:-1])
+        self.dv = (self.v_bnd[1]-self.v_bnd[0])/self.nv
 
         _vlos = create_vlos(orient.unit_vectors[2])
         ds.field_info.add_field(("gas","v_los"), function=_vlos, units="cm/s")
@@ -100,11 +94,8 @@
         self.data = ds.arr(np.zeros((self.nx,self.ny,self.nv)), self.field_units)
         pbar = get_pbar("Generating cube.", self.nv)
         for i in xrange(self.nv):
-            v1 = vbins[i]
-            v2 = vbins[i+1]
-            _intensity = create_intensity(v1, v2, field)
-            ds.field_info.add_field(("gas","intensity"),
-                                    function=_intensity, units=self.field_units)
+            _intensity = self._create_intensity(i)
+            ds.add_field(("gas","intensity"), function=_intensity, units=self.field_units)
             prj = off_axis_projection(ds, ds.domain_center, normal, width,
                                       (self.nx, self.ny), "intensity")
             self.data[:,:,i] = prj[:,:]
@@ -145,7 +136,7 @@
 
         dx = length_unit[0]/self.nx
         dy = length_unit[0]/self.ny
-        dv = (self.v_bnd[1]-self.v_bnd[0]).in_units("m/s").value/self.nv
+        dv = self.dv.in_units("m/s").value
 
         if length_unit[1] == "deg":
             dx *= -1.
@@ -162,3 +153,11 @@
         fib[0].header["btype"] = self.field
 
         fib.writeto(filename, clobber=clobber)
+
+    def _create_intensity(self, i):
+        def _intensity(field, data):
+            w = np.abs(data["v_los"]-self.vmid[i])/self.dv
+            w = 1.-w
+            w[w < 0.0] = 0.0
+            return data[self.field]*w
+        return _intensity

diff -r 71e6f828c6526d0409da920598da4a34dd036999 -r a60d6c6efe78b6da733c34b0cd83123a57269118 yt/analysis_modules/sunyaev_zeldovich/projection.py
--- a/yt/analysis_modules/sunyaev_zeldovich/projection.py
+++ b/yt/analysis_modules/sunyaev_zeldovich/projection.py
@@ -36,35 +36,30 @@
     pass
 
 vlist = "xyz"
-def setup_sunyaev_zeldovich_fields(registry, ftype = "gas", slice_info = None):
+def setup_sunyaev_zeldovich_fields(ds):
     def _t_squared(field, data):
         return data["gas","density"]*data["gas","kT"]*data["gas","kT"]
-    registry.add_field(("gas", "t_squared"),
-                       function = _t_squared,
-                       units="g*keV**2/cm**3")
+    ds.add_field(("gas", "t_squared"), function = _t_squared,
+                 units="g*keV**2/cm**3")
     def _beta_perp_squared(field, data):
         return data["gas","density"]*data["gas","velocity_magnitude"]**2/clight/clight - data["gas","beta_par_squared"]
-    registry.add_field(("gas","beta_perp_squared"), 
-                       function = _beta_perp_squared,
-                       units="g/cm**3")
+    ds.add_field(("gas","beta_perp_squared"), function = _beta_perp_squared,
+                 units="g/cm**3")
 
     def _beta_par_squared(field, data):
         return data["gas","beta_par"]**2/data["gas","density"]
-    registry.add_field(("gas","beta_par_squared"),
-                       function = _beta_par_squared,
-                       units="g/cm**3")
+    ds.add_field(("gas","beta_par_squared"), function = _beta_par_squared,
+                 units="g/cm**3")
 
     def _t_beta_par(field, data):
         return data["gas","kT"]*data["gas","beta_par"]
-    registry.add_field(("gas","t_beta_par"),
-                       function = _t_beta_par,
-                       units="keV*g/cm**3")
+    ds.add_field(("gas","t_beta_par"), function = _t_beta_par,
+                 units="keV*g/cm**3")
 
     def _t_sz(field, data):
         return data["gas","density"]*data["gas","kT"]
-    registry.add_field(("gas","t_sz"),
-                       function = _t_sz,
-                       units="keV*g/cm**3")
+    ds.add_field(("gas","t_sz"), function = _t_sz,
+                 units="keV*g/cm**3")
 
 def generate_beta_par(L):
     def _beta_par(field, data):
@@ -79,8 +74,8 @@
 
     Parameters
     ----------
-    pf : parameter_file
-        The parameter file.
+    ds : Dataset
+        The dataset
     freqs : array_like
         The frequencies (in GHz) at which to compute the SZ spectral distortion.
     mue : float, optional
@@ -91,15 +86,15 @@
     Examples
     --------
     >>> freqs = [90., 180., 240.]
-    >>> szprj = SZProjection(pf, freqs, high_order=True)
+    >>> szprj = SZProjection(ds, freqs, high_order=True)
     """
-    def __init__(self, pf, freqs, mue=1.143, high_order=False):
+    def __init__(self, ds, freqs, mue=1.143, high_order=False):
 
-        self.pf = pf
-        pf.field_info.load_plugin(setup_sunyaev_zeldovich_fields)
+        self.ds = ds
+        setup_sunyaev_zeldovich_fields(self.ds)
         self.num_freqs = len(freqs)
         self.high_order = high_order
-        self.freqs = pf.arr(freqs, "GHz")
+        self.freqs = ds.arr(freqs, "GHz")
         self.mueinv = 1./mue
         self.xinit = hcgs*self.freqs.in_units("Hz")/(kboltz*Tcmb)
         self.freq_fields = ["%d_GHz" % (int(freq)) for freq in freqs]
@@ -132,12 +127,12 @@
         --------
         >>> szprj.on_axis("y", center="max", width=(1.0, "Mpc"), source=my_sphere)
         """
-        axis = fix_axis(axis, self.pf)
+        axis = fix_axis(axis, self.ds)
 
         if center == "c":
-            ctr = self.pf.domain_center
+            ctr = self.ds.domain_center
         elif center == "max":
-            v, ctr = self.pf.h.find_max("density")
+            v, ctr = self.ds.h.find_max("density")
         else:
             ctr = center
 
@@ -145,8 +140,8 @@
         L[axis] = 1.0
 
         beta_par = generate_beta_par(L)
-        self.pf.field_info.add_field(("gas","beta_par"), function=beta_par, units="g/cm**3")
-        proj = self.pf.h.proj("density", axis, center=ctr, data_source=source)
+        self.ds.add_field(("gas","beta_par"), function=beta_par, units="g/cm**3")
+        proj = self.ds.proj("density", axis, center=ctr, data_source=source)
         frb = proj.to_frb(width, nx)
         dens = frb["density"]
         Te = frb["t_sz"]/dens
@@ -171,7 +166,7 @@
                                 np.array(omega1), np.array(sigma1),
                                 np.array(kappa1), np.array(bperp2))
 
-        self.pf.field_info.pop(("gas","beta_par"))
+        self.ds.field_info.pop(("gas","beta_par"))
 
     def off_axis(self, L, center="c", width=(1, "unitary"), nx=800, source=None):
         r""" Make an off-axis projection of the SZ signal.
@@ -196,15 +191,15 @@
         >>> szprj.off_axis(L, center="c", width=(2.0, "Mpc"))
         """
         if iterable(width):
-            w = self.pf.quan(width[0], width[1]).in_units("code_length").value
+            w = self.ds.quan(width[0], width[1]).in_units("code_length").value
         elif isinstance(width, YTQuantity):
             w = width.in_units("code_length").value
         else:
             w = width
         if center == "c":
-            ctr = self.pf.domain_center
+            ctr = self.ds.domain_center
         elif center == "max":
-            v, ctr = self.pf.h.find_max("density")
+            v, ctr = self.ds.h.find_max("density")
         else:
             ctr = center
 
@@ -213,18 +208,18 @@
             raise NotImplementedError
 
         beta_par = generate_beta_par(L)
-        self.pf.field_info.add_field(("gas","beta_par"), function=beta_par, units="g/cm**3")
+        self.ds.add_field(("gas","beta_par"), function=beta_par, units="g/cm**3")
 
-        dens    = off_axis_projection(self.pf, ctr, L, w, nx, "density")
-        Te      = off_axis_projection(self.pf, ctr, L, w, nx, "t_sz")/dens
-        bpar    = off_axis_projection(self.pf, ctr, L, w, nx, "beta_par")/dens
-        omega1  = off_axis_projection(self.pf, ctr, L, w, nx, "t_squared")/dens
+        dens    = off_axis_projection(self.ds, ctr, L, w, nx, "density")
+        Te      = off_axis_projection(self.ds, ctr, L, w, nx, "t_sz")/dens
+        bpar    = off_axis_projection(self.ds, ctr, L, w, nx, "beta_par")/dens
+        omega1  = off_axis_projection(self.ds, ctr, L, w, nx, "t_squared")/dens
         omega1  = omega1/(Te*Te) - 1.
         if self.high_order:
-            bperp2  = off_axis_projection(self.pf, ctr, L, w, nx, "beta_perp_squared")/dens
-            sigma1  = off_axis_projection(self.pf, ctr, L, w, nx, "t_beta_par")/dens
+            bperp2  = off_axis_projection(self.ds, ctr, L, w, nx, "beta_perp_squared")/dens
+            sigma1  = off_axis_projection(self.ds, ctr, L, w, nx, "t_beta_par")/dens
             sigma1  = sigma1/Te - bpar
-            kappa1  = off_axis_projection(self.pf, ctr, L, w, nx, "beta_par_squared")/dens
+            kappa1  = off_axis_projection(self.ds, ctr, L, w, nx, "beta_par_squared")/dens
             kappa1 -= bpar
         else:
             bperp2 = np.zeros((nx,nx))
@@ -241,7 +236,7 @@
                                 np.array(omega1), np.array(sigma1),
                                 np.array(kappa1), np.array(bperp2))
 
-        self.pf.field_info.pop(("gas","beta_par"))
+        self.ds.field_info.pop(("gas","beta_par"))
 
     def _compute_intensity(self, tau, Te, bpar, omega1, sigma1, kappa1, bperp2):
 
@@ -278,8 +273,8 @@
 
         for i, field in enumerate(self.freq_fields):
             self.data[field] = I0*self.xinit[i]**3*signal[i,:,:]
-        self.data["Tau"] = self.pf.arr(tau, "dimensionless")
-        self.data["TeSZ"] = self.pf.arr(Te, "keV")
+        self.data["Tau"] = self.ds.arr(tau, "dimensionless")
+        self.data["TeSZ"] = self.ds.arr(Te, "keV")
 
     @parallel_root_only
     def write_fits(self, filename, units="kpc", sky_center=None, sky_scale=None,
@@ -327,7 +322,7 @@
         fib = FITSImageBuffer(self.data, fields=self.data.keys(),
                               center=center, units=units,
                               scale=deltas)
-        fib.update_all_headers("Time", float(self.pf.current_time.in_units(time_units).value))
+        fib.update_all_headers("Time", float(self.ds.current_time.in_units(time_units).value))
         fib.writeto(filename, clobber=clobber)
         
     @parallel_root_only

diff -r 71e6f828c6526d0409da920598da4a34dd036999 -r a60d6c6efe78b6da733c34b0cd83123a57269118 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -137,19 +137,25 @@
             self.center = None
             self.set_field_parameter('center', self.center)
             return
+        elif isinstance(center, YTArray):
+            self.center = self.pf.arr(center.in_cgs())
+            self.center.convert_to_units('code_length')
         elif isinstance(center, (types.ListType, types.TupleType, np.ndarray)):
-            center = self.pf.arr(center, 'code_length')
+            if isinstance(center[0], YTQuantity):
+                self.center = self.pf.arr([c.in_cgs() for c in center])
+                self.center.convert_to_units('code_length')
+            else:
+                self.center = self.pf.arr(center, 'code_length')
         elif isinstance(center, basestring):
             if center.lower() in ("c", "center"):
-                center = self.pf.domain_center
+                self.center = self.pf.domain_center
              # is this dangerous for race conditions?
             elif center.lower() in ("max", "m"):
-                center = self.pf.h.find_max(("gas", "density"))[1]
+                self.center = self.pf.h.find_max(("gas", "density"))[1]
             elif center.startswith("max_"):
-                center = self.pf.h.find_max(center[4:])[1]
+                self.center = self.pf.h.find_max(center[4:])[1]
         else:
-            center = np.array(center, dtype='float64')
-        self.center = self.pf.arr(center, 'code_length')
+            self.center = self.pf.arr(center, 'code_length', dtype='float64')
         self.set_field_parameter('center', self.center)
 
     def get_field_parameter(self, name, default=None):
@@ -593,6 +599,10 @@
             return
         elif self._locked == True:
             raise GenerationInProgress(fields)
+        # Track which ones we want in the end
+        ofields = set(self.field_data.keys()
+                    + fields_to_get
+                    + fields_to_generate)
         # At this point, we want to figure out *all* our dependencies.
         fields_to_get = self._identify_dependencies(fields_to_get,
             self._spatial)
@@ -621,6 +631,9 @@
 
         fields_to_generate += gen_fluids + gen_particles
         self._generate_fields(fields_to_generate)
+        for field in self.field_data.keys():
+            if field not in ofields:
+                self.field_data.pop(field)
 
     def _generate_fields(self, fields_to_generate):
         index = 0

diff -r 71e6f828c6526d0409da920598da4a34dd036999 -r a60d6c6efe78b6da733c34b0cd83123a57269118 yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -21,8 +21,6 @@
 
 from yt.config import ytcfg
 from yt.units.yt_array import YTArray, uconcatenate, array_like_field
-from yt.fields.field_info_container import \
-    FieldDetector
 from yt.utilities.data_point_utilities import FindBindingEnergy
 from yt.utilities.exceptions import YTFieldNotFound
 from yt.utilities.parallel_tools.parallel_analysis_interface import \

diff -r 71e6f828c6526d0409da920598da4a34dd036999 -r a60d6c6efe78b6da733c34b0cd83123a57269118 yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -52,12 +52,6 @@
     _type_name = 'octree_subset'
     _skip_add = True
     _con_args = ('base_region', 'domain', 'pf')
-    _container_fields = (("index", "dx"),
-                         ("index", "dy"),
-                         ("index", "dz"),
-                         ("index", "x"),
-                         ("index", "y"),
-                         ("index", "z"))
     _domain_offset = 0
     _cell_count = -1
 
@@ -78,19 +72,6 @@
         self.base_region = base_region
         self.base_selector = base_region.selector
 
-    def _generate_container_field(self, field):
-        if self._current_chunk is None:
-            self.index._identify_base_chunk(self)
-        if isinstance(field, tuple): field = field[1]
-        if field == "dx":
-            return self._current_chunk.fwidth[:,0]
-        elif field == "dy":
-            return self._current_chunk.fwidth[:,1]
-        elif field == "dz":
-            return self._current_chunk.fwidth[:,2]
-        else:
-            raise RuntimeError
-
     def __getitem__(self, key):
         tr = super(OctreeSubset, self).__getitem__(key)
         try:
@@ -111,16 +92,20 @@
         return self._num_zones + 2*self._num_ghost_zones
 
     def _reshape_vals(self, arr):
-        if len(arr.shape) == 4: return arr
+        if len(arr.shape) == 4 and arr.flags["F_CONTIGUOUS"]:
+            return arr
         nz = self.nz
         n_oct = arr.shape[0] / (nz**3.0)
         if arr.size == nz*nz*nz*n_oct:
-            arr = arr.reshape((nz, nz, nz, n_oct), order="F")
+            new_shape = (nz, nz, nz, n_oct)
         elif arr.size == nz*nz*nz*n_oct * 3:
-            arr = arr.reshape((nz, nz, nz, n_oct, 3), order="F")
+            new_shape = (nz, nz, nz, n_oct, 3)
         else:
             raise RuntimeError
-        arr = np.asfortranarray(arr)
+        # This will retain units now.
+        arr.shape = new_shape
+        if not arr.flags["F_CONTIGUOUS"]:
+            arr = arr.reshape(new_shape, order="F")
         return arr
 
     _domain_ind = None

diff -r 71e6f828c6526d0409da920598da4a34dd036999 -r a60d6c6efe78b6da733c34b0cd83123a57269118 yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -1154,7 +1154,10 @@
         for bin_field in bin_fields:
             bf_units = data_source.pf._get_field_info(bin_field[0],
                                                       bin_field[1]).units
-            field_ex = list(extrema[bin_field[-1]])
+            try:
+                field_ex = list(extrema[bin_field[-1]])
+            except KeyError:
+                field_ex = list(extrema[bin_field])
             if iterable(field_ex[0]):
                 field_ex[0] = data_source.pf.quan(field_ex[0][0], field_ex[0][1])
                 field_ex[0] = field_ex[0].in_units(bf_units)

diff -r 71e6f828c6526d0409da920598da4a34dd036999 -r a60d6c6efe78b6da733c34b0cd83123a57269118 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -527,7 +527,7 @@
             source.quantities.max_location(field)
         mylog.info("Max Value is %0.5e at %0.16f %0.16f %0.16f",
               max_val, mx, my, mz)
-        return max_val, np.array([mx, my, mz], dtype="float64")
+        return max_val, self.arr([mx, my, mz], 'code_length', dtype="float64")
 
     def find_min(self, field):
         """
@@ -539,7 +539,7 @@
             source.quantities.min_location(field)
         mylog.info("Min Value is %0.5e at %0.16f %0.16f %0.16f",
               min_val, mx, my, mz)
-        return min_val, np.array([mx, my, mz], dtype="float64")
+        return min_val, self.arr([mx, my, mz], 'code_length', dtype="float64")
 
     # Now all the object related stuff
     def all_data(self, find_max=False):

diff -r 71e6f828c6526d0409da920598da4a34dd036999 -r a60d6c6efe78b6da733c34b0cd83123a57269118 yt/fields/field_info_container.py
--- a/yt/fields/field_info_container.py
+++ b/yt/fields/field_info_container.py
@@ -27,8 +27,6 @@
     NullFunc, \
     TranslationFunc, \
     ValidateSpatial
-from .field_detector import \
-    FieldDetector
 from yt.utilities.exceptions import \
     YTFieldNotFound
 from .field_plugin_registry import \

diff -r 71e6f828c6526d0409da920598da4a34dd036999 -r a60d6c6efe78b6da733c34b0cd83123a57269118 yt/fields/particle_fields.py
--- a/yt/fields/particle_fields.py
+++ b/yt/fields/particle_fields.py
@@ -17,6 +17,7 @@
 import numpy as np
 
 from yt.funcs import *
+from yt.units.yt_array import YTArray
 from yt.fields.derived_field import \
     ValidateParameter, \
     ValidateSpatial
@@ -225,12 +226,12 @@
         yv = data[ptype, svel % 'y'] - bv[1]
         zv = data[ptype, svel % 'z'] - bv[2]
         center = data.get_field_parameter('center')
-        coords = np.array([data[ptype, spos % 'x'],
+        coords = YTArray([data[ptype, spos % 'x'],
                            data[ptype, spos % 'y'],
                            data[ptype, spos % 'z']], dtype=np.float64)
         new_shape = tuple([3] + [1]*(len(coords.shape)-1))
         r_vec = coords - np.reshape(center,new_shape)
-        v_vec = np.array([xv,yv,zv], dtype=np.float64)
+        v_vec = YTArray([xv,yv,zv], dtype=np.float64)
         return np.cross(r_vec, v_vec, axis=0)
 
     registry.add_field((ptype, "particle_specific_angular_momentum"),
@@ -340,7 +341,7 @@
         center = data.get_field_parameter('center')
         bv = data.get_field_parameter("bulk_velocity")
         pos = spos
-        pos = np.array([data[ptype, pos % ax] for ax in "xyz"])
+        pos = YTArray([data[ptype, pos % ax] for ax in "xyz"])
         theta = get_sph_theta(pos, center)
         phi = get_sph_phi(pos, center)
         pos = pos - np.reshape(center, (3, 1))
@@ -358,7 +359,7 @@
         center = data.get_field_parameter('center')
         bv = data.get_field_parameter("bulk_velocity")
         pos = spos
-        pos = np.array([data[ptype, pos % ax] for ax in "xyz"])
+        pos = YTArray([data[ptype, pos % ax] for ax in "xyz"])
         theta = get_sph_theta(pos, center)
         phi = get_sph_phi(pos, center)
         pos = pos - np.reshape(center, (3, 1))
@@ -376,12 +377,11 @@
         center = data.get_field_parameter('center')
         bv = data.get_field_parameter("bulk_velocity")
         pos = spos
-        pos = np.array([data[ptype, pos % ax] for ax in "xyz"])
+        pos = YTArray([data[ptype, pos % ax] for ax in "xyz"])
         theta = get_sph_theta(pos, center)
         phi = get_sph_phi(pos, center)
         pos = pos - np.reshape(center, (3, 1))
-        vel = vel - np.reshape(bv, (3, 1))
-        sphp = get_sph_phi_component(pos, theta, phi, normal)
+        sphp = get_sph_phi_component(pos, phi, normal)
         return sphp
 
     registry.add_field((ptype, "particle_phi_spherical"),
@@ -395,9 +395,9 @@
         center = data.get_field_parameter('center')
         bv = data.get_field_parameter("bulk_velocity")
         pos = spos
-        pos = np.array([data[ptype, pos % ax] for ax in "xyz"])
+        pos = YTArray([data[ptype, pos % ax] for ax in "xyz"])
         vel = svel
-        vel = np.array([data[ptype, vel % ax] for ax in "xyz"])
+        vel = YTArray([data[ptype, vel % ax] for ax in "xyz"])
         theta = get_sph_theta(pos, center)
         phi = get_sph_phi(pos, center)
         pos = pos - np.reshape(center, (3, 1))
@@ -416,9 +416,9 @@
         center = data.get_field_parameter('center')
         bv = data.get_field_parameter("bulk_velocity")
         pos = spos
-        pos = np.array([data[ptype, pos % ax] for ax in "xyz"])
+        pos = YTArray([data[ptype, pos % ax] for ax in "xyz"])
         vel = svel
-        vel = np.array([data[ptype, vel % ax] for ax in "xyz"])
+        vel = YTArray([data[ptype, vel % ax] for ax in "xyz"])
         theta = get_sph_theta(pos, center)
         phi = get_sph_phi(pos, center)
         pos = pos - np.reshape(center, (3, 1))
@@ -436,8 +436,8 @@
         normal = data.get_field_parameter('normal')
         center = data.get_field_parameter('center')
         bv = data.get_field_parameter("bulk_velocity")
-        pos = np.array([data[ptype, spos % ax] for ax in "xyz"])
-        vel = np.array([data[ptype, svel % ax] for ax in "xyz"])
+        pos = YTArray([data[ptype, spos % ax] for ax in "xyz"])
+        vel = YTArray([data[ptype, svel % ax] for ax in "xyz"])
         theta = get_sph_theta(pos, center)
         phi = get_sph_phi(pos, center)
         pos = pos - np.reshape(center, (3, 1))

diff -r 71e6f828c6526d0409da920598da4a34dd036999 -r a60d6c6efe78b6da733c34b0cd83123a57269118 yt/frontends/boxlib/fields.py
--- a/yt/frontends/boxlib/fields.py
+++ b/yt/frontends/boxlib/fields.py
@@ -188,7 +188,7 @@
         # Specific entropy
         ("entropy", ("erg/(g*K)", ["entropy"], None)),
         ("entropypert", ("", [], None)),
-        ("enucdot", ("ergs/(g*s)", [], None)),
+        ("enucdot", ("erg/(g*s)", [], None)),
         ("gpi_x", ("dyne/cm**3", [], None)), # Perturbational pressure grad
         ("gpi_y", ("dyne/cm**3", [], None)),
         ("gpi_z", ("dyne/cm**3", [], None)),

diff -r 71e6f828c6526d0409da920598da4a34dd036999 -r a60d6c6efe78b6da733c34b0cd83123a57269118 yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -162,7 +162,7 @@
 
     def _parse_index(self):
         f = self._handle # shortcut
-        self.max_level = f.attrs['max_level']
+        self.max_level = f.attrs['num_levels'] - 1
 
         grids = []
         self.dds_list = []

diff -r 71e6f828c6526d0409da920598da4a34dd036999 -r a60d6c6efe78b6da733c34b0cd83123a57269118 yt/frontends/chombo/io.py
--- a/yt/frontends/chombo/io.py
+++ b/yt/frontends/chombo/io.py
@@ -171,7 +171,7 @@
             return np.array([], dtype=np.float64)
 
         data = self._handle[lev]['particles:data'][offsets[lo]:offsets[hi]]
-        return data[field_index::items_per_particle]
+        return np.asarray(data[field_index::items_per_particle], dtype=np.float64, order='F')
 
 class IOHandlerChombo2DHDF5(IOHandlerChomboHDF5):
     _dataset_type = "chombo2d_hdf5"

diff -r 71e6f828c6526d0409da920598da4a34dd036999 -r a60d6c6efe78b6da733c34b0cd83123a57269118 yt/frontends/enzo/io.py
--- a/yt/frontends/enzo/io.py
+++ b/yt/frontends/enzo/io.py
@@ -235,7 +235,18 @@
         BaseIOHandler.__init__(self, pf)
 
     def _read_field_names(self, grid):
-        return [("enzo", field) for field in self.grids_in_memory[grid.id].keys()]
+        fields = []
+        add_io = "io" in grid.pf.particle_types
+        for name, v in self.grids_in_memory[grid.id].items():
+
+            # NOTE: This won't work with 1D datasets or references.
+            if not hasattr(v, "shape") or v.dtype == "O":
+                continue
+            elif v.ndim == 1:
+                if add_io: fields.append( ("io", str(name)) )
+            else:
+                fields.append( ("enzo", str(name)) )
+        return fields
 
     def _read_fluid_selection(self, chunks, selector, fields, size):
         rv = {}
@@ -296,7 +307,7 @@
                     x, y, z = self.grids_in_memory[g.id]['particle_position_x'], \
                                         self.grids_in_memory[g.id]['particle_position_y'], \
                                         self.grids_in_memory[g.id]['particle_position_z']
-                    mask = selector.select_points(x, y, z)
+                    mask = selector.select_points(x, y, z, 0.0)
                     if mask is None: continue
                     for field in field_list:
                         data = self.grids_in_memory[g.id][field]

diff -r 71e6f828c6526d0409da920598da4a34dd036999 -r a60d6c6efe78b6da733c34b0cd83123a57269118 yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -49,9 +49,9 @@
 regex_pattern = '|'.join(re.escape(_) for _ in delimiters)
 
 spec_names = {"V":"Velocity",
-              "FREQ":"Frequency",
-              "ENER":"Energy",
-              "WAV":"Wavelength"}
+              "F":"Frequency",
+              "E":"Energy",
+              "W":"Wavelength"}
 
 field_from_unit = {"Jy":"intensity",
                    "K":"temperature"}

diff -r 71e6f828c6526d0409da920598da4a34dd036999 -r a60d6c6efe78b6da733c34b0cd83123a57269118 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -991,8 +991,12 @@
         This is a dict of numpy arrays, where the keys are the field names.
         Particles positions must be named "particle_position_x",
         "particle_position_y", "particle_position_z".
-    sim_unit_to_cm : float
-        Conversion factor from simulation units to centimeters
+    length_unit : float
+        Conversion factor from simulation length units to centimeters
+    mass_unit : float
+        Conversion factor from simulation mass units to grams
+    time_unit : float
+        Conversion factor from simulation time units to seconds
     bbox : array_like (xdim:zdim, LE:RE), optional
         Size of computational domain in units sim_unit_to_cm
     sim_time : float, optional

diff -r 71e6f828c6526d0409da920598da4a34dd036999 -r a60d6c6efe78b6da733c34b0cd83123a57269118 yt/geometry/coordinate_handler.py
--- a/yt/geometry/coordinate_handler.py
+++ b/yt/geometry/coordinate_handler.py
@@ -34,10 +34,10 @@
 
 def _get_coord_fields(axi, units = "code_length"):
     def _dds(field, data):
-        rv = data.pf.arr(data.fwidth[...,axi], units)
+        rv = data.pf.arr(data.fwidth[...,axi].copy(), units)
         return data._reshape_vals(rv)
     def _coords(field, data):
-        rv = data.pf.arr(data.fcoords[...,axi], units)
+        rv = data.pf.arr(data.fcoords[...,axi].copy(), units)
         return data._reshape_vals(rv)
     return _dds, _coords
 

diff -r 71e6f828c6526d0409da920598da4a34dd036999 -r a60d6c6efe78b6da733c34b0cd83123a57269118 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -122,6 +122,8 @@
         cdef int i, j, k, n
         data.global_index = -1
         data.level = 0
+        data.oref = 0
+        data.nz = 1
         assert(ref_mask.shape[0] / float(data.nz) ==
             <int>(ref_mask.shape[0]/float(data.nz)))
         obj.allocate_domains([ref_mask.shape[0] / data.nz])
@@ -496,7 +498,7 @@
             coords[:,i] += self.DLE[i]
         return coords
 
-    def save_octree(self, always_descend = False):
+    def save_octree(self):
         # Get the header
         header = dict(dims = (self.nn[0], self.nn[1], self.nn[2]),
                       left_edge = (self.DLE[0], self.DLE[1], self.DLE[2]),
@@ -507,13 +509,12 @@
         # domain_id = -1 here, because we want *every* oct
         cdef OctVisitorData data
         self.setup_data(&data, -1)
-        data.oref = 1
+        data.oref = 0
+        data.nz = 1
         cdef np.ndarray[np.uint8_t, ndim=1] ref_mask
         ref_mask = np.zeros(self.nocts * data.nz, dtype="uint8") - 1
-        cdef void *p[2]
-        cdef np.uint8_t ad = int(always_descend)
-        p[0] = <void *> &ad
-        p[1] = ref_mask.data
+        cdef void *p[1]
+        p[0] = ref_mask.data
         data.array = p
         # Enforce partial_coverage here
         self.visit_all_octs(selector, oct_visitors.store_octree, &data, 1)

diff -r 71e6f828c6526d0409da920598da4a34dd036999 -r a60d6c6efe78b6da733c34b0cd83123a57269118 yt/geometry/oct_visitors.pyx
--- a/yt/geometry/oct_visitors.pyx
+++ b/yt/geometry/oct_visitors.pyx
@@ -179,12 +179,8 @@
     cdef np.uint8_t *arr, res, ii, *always_descend
     ii = cind(data.ind[0], data.ind[1], data.ind[2])
     cdef void **p = <void **> data.array
-    always_descend = <np.uint8_t *> p[0]
-    arr = <np.uint8_t *> p[1]
-    if always_descend[0] == 1 and data.last == o.domain_ind:
-        return
-    data.last = o.domain_ind
-    if o.children == NULL or o.children[ii] == NULL:
+    arr = <np.uint8_t *> p[0]
+    if o.children == NULL:
         # Not refined.
         res = 0
     else:
@@ -216,7 +212,7 @@
             o.children = <Oct **> malloc(sizeof(Oct *) * 8)
             for i in range(8):
                 o.children[i] = NULL
-        for i in range(arr[data.index]):
+        for i in range(8):
             o.children[ii + i] = &octs[nocts[0]]
             o.children[ii + i].domain_ind = nocts[0]
             o.children[ii + i].file_ind = -1

diff -r 71e6f828c6526d0409da920598da4a34dd036999 -r a60d6c6efe78b6da733c34b0cd83123a57269118 yt/geometry/particle_oct_container.pyx
--- a/yt/geometry/particle_oct_container.pyx
+++ b/yt/geometry/particle_oct_container.pyx
@@ -300,6 +300,7 @@
             # Now we locate the particle
             for i in range(3):
                 ind[i] = <int> ((pos[p, i] - self.left_edge[i])*self.idds[i])
+                ind[i] = iclip(ind[i], 0, self.dims[i])
             mask[ind[0],ind[1],ind[2]] |= val
 
     def identify_data_files(self, SelectorObject selector):

diff -r 71e6f828c6526d0409da920598da4a34dd036999 -r a60d6c6efe78b6da733c34b0cd83123a57269118 yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -541,7 +541,7 @@
 
     def __init__(self, dobj):
         for i in range(3):
-            self.center[i] = dobj.center[i]
+            self.center[i] = _ensure_code(dobj.center[i])
         self.radius = _ensure_code(dobj.radius)
         self.radius2 = self.radius * self.radius
 
@@ -702,7 +702,7 @@
         cdef int i
         for i in range(3):
             self.norm_vec[i] = dobj._norm_vec[i]
-            self.center[i] = dobj.center[i]
+            self.center[i] = _ensure_code(dobj.center[i])
         self.radius = _ensure_code(dobj._radius)
         self.radius2 = self.radius * self.radius
         self.height = _ensure_code(dobj._height)

diff -r 71e6f828c6526d0409da920598da4a34dd036999 -r a60d6c6efe78b6da733c34b0cd83123a57269118 yt/units/tests/test_ytarray.py
--- a/yt/units/tests/test_ytarray.py
+++ b/yt/units/tests/test_ytarray.py
@@ -36,7 +36,8 @@
     YTUnitOperationError, YTUfuncUnitError
 from yt.testing import fake_random_pf, requires_module
 from yt.funcs import fix_length
-
+from yt.units.unit_symbols import \
+    cm, m, g
 
 def operate_and_compare(a, b, op, answer):
     # Test generator for YTArrays tests
@@ -56,32 +57,46 @@
     # Same units
     a1 = YTArray([1, 2, 3], 'cm')
     a2 = YTArray([4, 5, 6], 'cm')
+    a3 = [4*cm, 5*cm, 6*cm]
     answer = YTArray([5, 7, 9], 'cm')
 
     yield operate_and_compare, a1, a2, operator.add, answer
     yield operate_and_compare, a2, a1, operator.add, answer
+    yield operate_and_compare, a1, a3, operator.add, answer
+    yield operate_and_compare, a3, a1, operator.add, answer
+    yield operate_and_compare, a2, a1, np.add, answer
     yield operate_and_compare, a1, a2, np.add, answer
-    yield operate_and_compare, a2, a1, np.add, answer
+    yield operate_and_compare, a1, a3, np.add, answer
+    yield operate_and_compare, a3, a1, np.add, answer
 
     # different units
     a1 = YTArray([1, 2, 3], 'cm')
     a2 = YTArray([4, 5, 6], 'm')
+    a3 = [4*m, 5*m, 6*m]
     answer1 = YTArray([401, 502, 603], 'cm')
     answer2 = YTArray([4.01, 5.02, 6.03], 'm')
 
     yield operate_and_compare, a1, a2, operator.add, answer1
     yield operate_and_compare, a2, a1, operator.add, answer2
+    yield operate_and_compare, a1, a3, operator.add, answer1
+    yield operate_and_compare, a3, a1, operator.add, answer1
     yield assert_raises, YTUfuncUnitError, np.add, a1, a2
+    yield assert_raises, YTUfuncUnitError, np.add, a1, a3
 
     # Test dimensionless quantities
     a1 = YTArray([1, 2, 3])
     a2 = array([4, 5, 6])
+    a3 = [4, 5, 6]
     answer = YTArray([5, 7, 9])
 
     yield operate_and_compare, a1, a2, operator.add, answer
     yield operate_and_compare, a2, a1, operator.add, answer
+    yield operate_and_compare, a1, a3, operator.add, answer
+    yield operate_and_compare, a3, a1, operator.add, answer
     yield operate_and_compare, a1, a2, np.add, answer
     yield operate_and_compare, a2, a1, np.add, answer
+    yield operate_and_compare, a1, a3, np.add, answer
+    yield operate_and_compare, a3, a1, np.add, answer
 
     # Catch the different dimensions error
     a1 = YTArray([1, 2, 3], 'm')
@@ -100,34 +115,49 @@
     # Same units
     a1 = YTArray([1, 2, 3], 'cm')
     a2 = YTArray([4, 5, 6], 'cm')
+    a3 = [4*cm, 5*cm, 6*cm]
     answer1 = YTArray([-3, -3, -3], 'cm')
     answer2 = YTArray([3, 3, 3], 'cm')
 
     yield operate_and_compare, a1, a2, operator.sub, answer1
     yield operate_and_compare, a2, a1, operator.sub, answer2
+    yield operate_and_compare, a1, a3, operator.sub, answer1
+    yield operate_and_compare, a3, a1, operator.sub, answer2
     yield operate_and_compare, a1, a2, np.subtract, answer1
     yield operate_and_compare, a2, a1, np.subtract, answer2
+    yield operate_and_compare, a1, a3, np.subtract, answer1
+    yield operate_and_compare, a3, a1, np.subtract, answer2
 
     # different units
     a1 = YTArray([1, 2, 3], 'cm')
     a2 = YTArray([4, 5, 6], 'm')
+    a3 = [4*m, 5*m, 6*m]
     answer1 = YTArray([-399, -498, -597], 'cm')
     answer2 = YTArray([3.99, 4.98, 5.97], 'm')
+    answer3 = YTArray([399, 498, 597], 'cm')
 
     yield operate_and_compare, a1, a2, operator.sub, answer1
     yield operate_and_compare, a2, a1, operator.sub, answer2
+    yield operate_and_compare, a1, a3, operator.sub, answer1
+    yield operate_and_compare, a3, a1, operator.sub, answer3
     yield assert_raises, YTUfuncUnitError, np.subtract, a1, a2
+    yield assert_raises, YTUfuncUnitError, np.subtract, a1, a3
 
     # Test dimensionless quantities
     a1 = YTArray([1, 2, 3])
     a2 = array([4, 5, 6])
+    a3 = [4, 5, 6]
     answer1 = YTArray([-3, -3, -3])
     answer2 = YTArray([3, 3, 3])
 
     yield operate_and_compare, a1, a2, operator.sub, answer1
     yield operate_and_compare, a2, a1, operator.sub, answer2
+    yield operate_and_compare, a1, a3, operator.sub, answer1
+    yield operate_and_compare, a3, a1, operator.sub, answer2
     yield operate_and_compare, a1, a2, np.subtract, answer1
     yield operate_and_compare, a2, a1, np.subtract, answer2
+    yield operate_and_compare, a1, a3, np.subtract, answer1
+    yield operate_and_compare, a3, a1, np.subtract, answer2
 
     # Catch the different dimensions error
     a1 = YTArray([1, 2, 3], 'm')
@@ -146,54 +176,79 @@
     # Same units
     a1 = YTArray([1, 2, 3], 'cm')
     a2 = YTArray([4, 5, 6], 'cm')
+    a3 = [4*cm, 5*cm, 6*cm]
     answer = YTArray([4, 10, 18], 'cm**2')
 
     yield operate_and_compare, a1, a2, operator.mul, answer
     yield operate_and_compare, a2, a1, operator.mul, answer
+    yield operate_and_compare, a1, a3, operator.mul, answer
+    yield operate_and_compare, a3, a1, operator.mul, answer
     yield operate_and_compare, a1, a2, np.multiply, answer
     yield operate_and_compare, a2, a1, np.multiply, answer
+    yield operate_and_compare, a1, a3, np.multiply, answer
+    yield operate_and_compare, a3, a1, np.multiply, answer
 
     # different units, same dimension
     a1 = YTArray([1, 2, 3], 'cm')
     a2 = YTArray([4, 5, 6], 'm')
+    a3 = [4*m, 5*m, 6*m]
     answer1 = YTArray([400, 1000, 1800], 'cm**2')
     answer2 = YTArray([.04, .10, .18], 'm**2')
     answer3 = YTArray([4, 10, 18], 'cm*m')
 
     yield operate_and_compare, a1, a2, operator.mul, answer1
     yield operate_and_compare, a2, a1, operator.mul, answer2
+    yield operate_and_compare, a1, a3, operator.mul, answer1
+    yield operate_and_compare, a3, a1, operator.mul, answer2
     yield operate_and_compare, a1, a2, np.multiply, answer3
     yield operate_and_compare, a2, a1, np.multiply, answer3
+    yield operate_and_compare, a1, a3, np.multiply, answer3
+    yield operate_and_compare, a3, a1, np.multiply, answer3
 
     # different dimensions
     a1 = YTArray([1, 2, 3], 'cm')
     a2 = YTArray([4, 5, 6], 'g')
+    a3 = [4*g, 5*g, 6*g]
     answer = YTArray([4, 10, 18], 'cm*g')
 
     yield operate_and_compare, a1, a2, operator.mul, answer
     yield operate_and_compare, a2, a1, operator.mul, answer
+    yield operate_and_compare, a1, a3, operator.mul, answer
+    yield operate_and_compare, a3, a1, operator.mul, answer
     yield operate_and_compare, a1, a2, np.multiply, answer
     yield operate_and_compare, a2, a1, np.multiply, answer
+    yield operate_and_compare, a1, a3, np.multiply, answer
+    yield operate_and_compare, a3, a1, np.multiply, answer
 
     # One dimensionless, one unitful
     a1 = YTArray([1, 2, 3], 'cm')
     a2 = array([4, 5, 6])
+    a3 = [4, 5, 6]
     answer = YTArray([4, 10, 18], 'cm')
 
     yield operate_and_compare, a1, a2, operator.mul, answer
     yield operate_and_compare, a2, a1, operator.mul, answer
+    yield operate_and_compare, a1, a3, operator.mul, answer
+    yield operate_and_compare, a3, a1, operator.mul, answer
     yield operate_and_compare, a1, a2, np.multiply, answer
     yield operate_and_compare, a2, a1, np.multiply, answer
+    yield operate_and_compare, a1, a3, np.multiply, answer
+    yield operate_and_compare, a3, a1, np.multiply, answer
 
     # Both dimensionless quantities
     a1 = YTArray([1, 2, 3])
     a2 = array([4, 5, 6])
+    a3 = [4, 5, 6]
     answer = YTArray([4, 10, 18])
 
     yield operate_and_compare, a1, a2, operator.mul, answer
     yield operate_and_compare, a2, a1, operator.mul, answer
+    yield operate_and_compare, a1, a3, operator.mul, answer
+    yield operate_and_compare, a3, a1, operator.mul, answer
     yield operate_and_compare, a1, a2, np.multiply, answer
     yield operate_and_compare, a2, a1, np.multiply, answer
+    yield operate_and_compare, a1, a3, np.multiply, answer
+    yield operate_and_compare, a3, a1, np.multiply, answer
 
 
 def test_division():
@@ -205,17 +260,23 @@
     # Same units
     a1 = YTArray([1., 2., 3.], 'cm')
     a2 = YTArray([4., 5., 6.], 'cm')
+    a3 = [4*cm, 5*cm, 6*cm]
     answer1 = YTArray([0.25, 0.4, 0.5])
     answer2 = YTArray([4, 2.5, 2])
 
     yield operate_and_compare, a1, a2, operator.div, answer1
     yield operate_and_compare, a2, a1, operator.div, answer2
+    yield operate_and_compare, a1, a3, operator.div, answer1
+    yield operate_and_compare, a3, a1, operator.div, answer2
     yield operate_and_compare, a1, a2, np.divide, answer1
     yield operate_and_compare, a2, a1, np.divide, answer2
+    yield operate_and_compare, a1, a3, np.divide, answer1
+    yield operate_and_compare, a3, a1, np.divide, answer2
 
     # different units, same dimension
     a1 = YTArray([1., 2., 3.], 'cm')
     a2 = YTArray([4., 5., 6.], 'm')
+    a3 = [4*m, 5*m, 6*m]
     answer1 = YTArray([.0025, .004, .005])
     answer2 = YTArray([400, 250, 200])
     answer3 = YTArray([0.25, 0.4, 0.5], 'cm/m')
@@ -223,41 +284,60 @@
 
     yield operate_and_compare, a1, a2, operator.div, answer1
     yield operate_and_compare, a2, a1, operator.div, answer2
+    yield operate_and_compare, a1, a3, operator.div, answer1
+    yield operate_and_compare, a3, a1, operator.div, answer2
     yield operate_and_compare, a1, a2, np.divide, answer3
     yield operate_and_compare, a2, a1, np.divide, answer4
+    yield operate_and_compare, a1, a3, np.divide, answer3
+    yield operate_and_compare, a3, a1, np.divide, answer4
 
     # different dimensions
     a1 = YTArray([1., 2., 3.], 'cm')
     a2 = YTArray([4., 5., 6.], 'g')
+    a3 = [4*g, 5*g, 6*g]
     answer1 = YTArray([0.25, 0.4, 0.5], 'cm/g')
     answer2 = YTArray([4, 2.5, 2], 'g/cm')
 
     yield operate_and_compare, a1, a2, operator.div, answer1
     yield operate_and_compare, a2, a1, operator.div, answer2
+    yield operate_and_compare, a1, a3, operator.div, answer1
+    yield operate_and_compare, a3, a1, operator.div, answer2
     yield operate_and_compare, a1, a2, np.divide, answer1
     yield operate_and_compare, a2, a1, np.divide, answer2
+    yield operate_and_compare, a1, a3, np.divide, answer1
+    yield operate_and_compare, a3, a1, np.divide, answer2
 
     # One dimensionless, one unitful
     a1 = YTArray([1., 2., 3.], 'cm')
     a2 = array([4., 5., 6.])
+    a3 = [4, 5, 6]
     answer1 = YTArray([0.25, 0.4, 0.5], 'cm')
     answer2 = YTArray([4, 2.5, 2], '1/cm')
 
     yield operate_and_compare, a1, a2, operator.div, answer1
     yield operate_and_compare, a2, a1, operator.div, answer2
+    yield operate_and_compare, a1, a3, operator.div, answer1
+    yield operate_and_compare, a3, a1, operator.div, answer2
     yield operate_and_compare, a1, a2, np.divide, answer1
     yield operate_and_compare, a2, a1, np.divide, answer2
+    yield operate_and_compare, a1, a3, np.divide, answer1
+    yield operate_and_compare, a3, a1, np.divide, answer2
 
     # Both dimensionless quantities
     a1 = YTArray([1., 2., 3.])
     a2 = array([4., 5., 6.])
+    a3 = [4, 5, 6]
     answer1 = YTArray([0.25, 0.4, 0.5])
     answer2 = YTArray([4, 2.5, 2])
 
     yield operate_and_compare, a1, a2, operator.div, answer1
     yield operate_and_compare, a2, a1, operator.div, answer2
-    yield operate_and_compare, a1, a2, np.divide, answer1
-    yield operate_and_compare, a2, a1, np.divide, answer2
+    yield operate_and_compare, a1, a3, operator.div, answer1
+    yield operate_and_compare, a3, a1, operator.div, answer2
+    yield operate_and_compare, a1, a3, np.divide, answer1
+    yield operate_and_compare, a3, a1, np.divide, answer2
+    yield operate_and_compare, a1, a3, np.divide, answer1
+    yield operate_and_compare, a3, a1, np.divide, answer2
 
 
 def test_power():
@@ -662,7 +742,7 @@
     yt_arr2 = YTArray.from_astropy(ap_arr)
 
     ap_quan = 10.*_astropy.units.Msun**0.5/(_astropy.units.kpc**3)
-    yt_quan = YTQuantity(10.,"sqrt(Msun)/kpc**3")
+    yt_quan = YTQuantity(10., "sqrt(Msun)/kpc**3")
     yt_quan2 = YTQuantity.from_astropy(ap_quan)
 
     yield assert_array_equal, ap_arr, yt_arr.to_astropy()
@@ -687,6 +767,7 @@
     nu = YTASubclass([10, 11, 12], '')
     nda = np.array([3, 4, 5])
     yta = YTArray([6, 7, 8], 'mg')
+    loq = [YTQuantity(6, 'mg'), YTQuantity(7, 'mg'), YTQuantity(8, 'mg')]
     ytq = YTQuantity(4, 'cm')
     ndf = np.float64(3)
 
@@ -695,7 +776,7 @@
         assert_isinstance(op(inst2, inst1), compare_class)
 
     for op in (operator.mul, operator.div, operator.truediv):
-        for inst in (b, ytq, ndf, yta, nda):
+        for inst in (b, ytq, ndf, yta, nda, loq):
             yield op_comparison, op, a, inst, YTASubclass
 
         yield op_comparison, op, ytq, nda, YTArray
@@ -705,6 +786,7 @@
         yield op_comparison, op, nu, nda, YTASubclass
         yield op_comparison, op, a, b, YTASubclass
         yield op_comparison, op, a, yta, YTASubclass
+        yield op_comparison, op, a, loq, YTASubclass
 
     yield assert_isinstance, a[0], YTQuantity
     yield assert_isinstance, a[:], YTASubclass

diff -r 71e6f828c6526d0409da920598da4a34dd036999 -r a60d6c6efe78b6da733c34b0cd83123a57269118 yt/units/yt_array.py
--- a/yt/units/yt_array.py
+++ b/yt/units/yt_array.py
@@ -33,7 +33,7 @@
 from yt.units.dimensions import dimensionless
 from yt.utilities.exceptions import \
     YTUnitOperationError, YTUnitConversionError, \
-    YTUfuncUnitError
+    YTUfuncUnitError, YTIterableUnitCoercionError
 from numbers import Number as numeric_type
 from yt.utilities.on_demand_imports import _astropy
 from sympy import Rational
@@ -118,31 +118,47 @@
 def comparison_unit(unit1, unit2):
     return None
 
+NULL_UNIT = Unit()
+
+def coerce_iterable_units(input_object):
+    if isinstance(input_object, np.ndarray):
+        return input_object
+    if iterable(input_object):
+        if any([isinstance(o, YTArray) for o in input_object]):
+            ff = getattr(input_object[0], 'units', NULL_UNIT, )
+            if any([ff != getattr(_, 'units', NULL_UNIT) for _ in input_object]):
+                raise YTIterableUnitCoercionError(input_object)
+            # This will create a copy of the data in the iterable.
+            return YTArray(input_object)
+        return input_object
+    else:
+        return input_object
+
 def sanitize_units_mul(this_object, other_object):
-    ret = other_object
+    inp = coerce_iterable_units(this_object)
+    ret = coerce_iterable_units(other_object)
     # If the other object is a YTArray and has the same dimensions as the object
     # under consideration, convert so we don't mix units with the same
     # dimensions.
-    if isinstance(other_object, YTArray):
-        if this_object.units.same_dimensions_as(other_object.units):
-            ret = other_object.in_units(this_object.units)
+    if isinstance(ret, YTArray):
+        if inp.units.same_dimensions_as(ret.units):
+            ret.in_units(inp.units)
     return ret
 
 def sanitize_units_add(this_object, other_object, op_string):
+    inp = coerce_iterable_units(this_object)
+    ret = coerce_iterable_units(other_object)
     # Make sure the other object is a YTArray before we use the `units`
     # attribute.
-    if isinstance(other_object, YTArray):
-        if not this_object.units.same_dimensions_as(other_object.units):
-            raise YTUnitOperationError(op_string, this_object.units,
-                                       other_object.units)
-        ret = other_object.in_units(this_object.units)
+    if isinstance(ret, YTArray):
+        if not inp.units.same_dimensions_as(ret.units):
+            raise YTUnitOperationError(op_string, inp.units, ret.units)
+        ret = ret.in_units(inp.units)
     # If the other object is not a YTArray, the only valid case is adding
     # dimensionless things.
     else:
-        if not this_object.units.is_dimensionless:
-            raise YTUnitOperationError(op_string, this_object.units,
-                                       dimensionless)
-        ret = other_object
+        if not inp.units.is_dimensionless:
+            raise YTUnitOperationError(op_string, inp.units, dimensionless)
     return ret
 
 unary_operators = (
@@ -268,8 +284,8 @@
             return input_array
         elif isinstance(input_array, np.ndarray):
             pass
-        elif iterable(input_array):
-            if isinstance(input_array[0], YTQuantity):
+        elif iterable(input_array) and input_array:
+            if isinstance(input_array[0], YTArray):
                 return YTArray(np.array(input_array, dtype=dtype),
                                input_array[0].units)
 
@@ -903,17 +919,19 @@
                 raise YTUnitOperationError(context[0], u)
             ret_class = type(self)
         elif context[0] in binary_operators:
-            unit1 = getattr(context[1][0], 'units', None)
-            unit2 = getattr(context[1][1], 'units', None)
-            cls1 = type(context[1][0])
-            cls2 = type(context[1][1])
+            oper1 = coerce_iterable_units(context[1][0])
+            oper2 = coerce_iterable_units(context[1][1])
+            cls1 = type(oper1)
+            cls2 = type(oper2)
+            unit1 = getattr(oper1, 'units', None)
+            unit2 = getattr(oper2, 'units', None)
             ret_class = get_binary_op_return_class(cls1, cls2)
             if unit1 is None:
                 unit1 = Unit(registry=getattr(unit2, 'registry', None))
             if unit2 is None and context[0] is not power:
                 unit2 = Unit(registry=getattr(unit1, 'registry', None))
             elif context[0] is power:
-                unit2 = context[1][1]
+                unit2 = oper2
                 if isinstance(unit2, np.ndarray):
                     if isinstance(unit2, YTArray):
                         if unit2.units.is_dimensionless:
@@ -937,7 +955,7 @@
         else:
             raise RuntimeError("Operation is not defined.")
         if unit is None:
-            out_arr = np.array(out_arr)
+            out_arr = np.array(out_arr, copy=False)
             return out_arr
         out_arr.units = unit
         if out_arr.size == 1:
@@ -947,7 +965,7 @@
                 # This happens if you do ndarray * YTQuantity. Explicitly
                 # casting to YTArray avoids creating a YTQuantity with size > 1
                 return YTArray(np.array(out_arr, unit))
-            return ret_class(np.array(out_arr), unit)
+            return ret_class(np.array(out_arr, copy=False), unit)
 
 
     def __reduce__(self):
@@ -1029,9 +1047,9 @@
 def get_binary_op_return_class(cls1, cls2):
     if cls1 is cls2:
         return cls1
-    if cls1 is np.ndarray or issubclass(cls1, numeric_type):
+    if cls1 is np.ndarray or issubclass(cls1, (numeric_type, np.number, list, tuple)):
         return cls2
-    if cls2 is np.ndarray or issubclass(cls2, numeric_type):
+    if cls2 is np.ndarray or issubclass(cls2, (numeric_type, np.number, list, tuple)):
         return cls1
     if issubclass(cls1, YTQuantity):
         return cls2
@@ -1042,5 +1060,5 @@
     if issubclass(cls2, cls1):
         return cls2
     else:
-        raise RuntimeError("Operations are only defined on pairs of objects"
-                           "in which one is a subclass of the other")
+        raise RuntimeError("Undefined operation for a YTArray subclass. "
+                           "Received operand types (%s) and (%s)" % (cls1, cls2))

diff -r 71e6f828c6526d0409da920598da4a34dd036999 -r a60d6c6efe78b6da733c34b0cd83123a57269118 yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -193,9 +193,18 @@
 
     def __str__(self):
         err = "The NumPy %s operation is only allowed on objects with " \
-        "identical units. Convert one of the arrays to the other\'s " \
-        "units first. Received units (%s) and (%s)." % \
-        (self.ufunc, self.unit1, self.unit2)
+              "identical units. Convert one of the arrays to the other\'s " \
+              "units first. Received units (%s) and (%s)." % \
+              (self.ufunc, self.unit1, self.unit2)
+        return err
+
+class YTIterableUnitCoercionError(YTException):
+    def __init__(self, quantity_list):
+        self.quantity_list = quantity_list
+
+    def __str__(self):
+        err = "Received a list or tuple of quantities with nonuniform units: " \
+              "%s" % self.quantity_list
         return err
 
 class YTHubRegisterError(YTException):

diff -r 71e6f828c6526d0409da920598da4a34dd036999 -r a60d6c6efe78b6da733c34b0cd83123a57269118 yt/visualization/_colormap_data.py
--- a/yt/visualization/_colormap_data.py
+++ b/yt/visualization/_colormap_data.py
@@ -1,5 +1,6 @@
 ### Auto-generated colormap tables, taken from Matplotlib ###
 
+import numpy as np
 from numpy import array
 color_map_luts = {}
 
@@ -7757,6 +7758,64 @@
          1.0, 1.0, 1.0, 1.0, 1.0, 1.0]),
    )
 
+color_map_luts["doom"] = (
+array([
+   0,  31,  23,  75, 255,  27,  19,  11,   7,  47,  35,  23,  15,  79,  71,  63,
+ 255, 247, 243, 235, 231, 223, 219, 211, 203, 199, 191, 187, 179, 175, 167, 163,
+ 155, 151, 143, 139, 131, 127, 119, 115, 107, 103,  95,  91,  83,  79,  71,  67,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 247, 239, 231, 223, 215, 207, 203,
+ 191, 179, 171, 163, 155, 143, 135, 127, 119, 107,  95,  83,  75,  63,  51,  43,
+ 239, 231, 223, 219, 211, 203, 199, 191, 183, 179, 171, 167, 159, 151, 147, 139,
+ 131, 127, 119, 111, 107,  99,  91,  87,  79,  71,  67,  59,  55,  47,  39,  35,
+ 119, 111, 103,  95,  91,  83,  75,  67,  63,  55,  47,  39,  31,  23,  19,  11,
+ 191, 183, 175, 167, 159, 155, 147, 139, 131, 123, 119, 111, 103,  95,  87,  83,
+ 159, 143, 131, 119, 103,  91,  79,  67, 123, 111, 103,  91,  83,  71,  63,  55,
+ 255, 235, 215, 195, 175, 155, 135, 115, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 239, 227, 215, 203, 191, 179, 167, 155, 139, 127, 115, 103,  91,  79,  67,
+ 231, 199, 171, 143, 115,  83,  55,  27,   0,   0,   0,   0,   0,   0,   0,   0,
+ 255, 255, 255, 255, 255, 255, 255, 255, 243, 235, 223, 215, 203, 195, 183, 175,
+ 255, 255, 255, 255, 255, 255, 255, 255, 167, 159, 147, 135,  79,  67,  55,  47,
+   0,   0,   0,   0,   0,   0,   0,   0, 255, 255, 255, 255, 207, 159, 111,
+   167]) / 255.0,
+array([
+   0,  23,  15,  75, 255,  27,  19,  11,   7,  55,  43,  31,  23,  59,  51,  43,
+ 183, 171, 163, 151, 143, 135, 123, 115, 107,  99,  91,  87,  79,  71,  63,  59,
+  51,  47,  43,  35,  31,  27,  23,  19,  15,  11,   7,   7,   7,   0,   0,   0,
+ 235, 227, 219, 211, 207, 199, 191, 187, 179, 171, 163, 155, 147, 139, 131, 127,
+ 123, 115, 111, 107,  99,  95,  87,  83,  79,  71,  67,  63,  55,  47,  43,  35,
+ 239, 231, 223, 219, 211, 203, 199, 191, 183, 179, 171, 167, 159, 151, 147, 139,
+ 131, 127, 119, 111, 107,  99,  91,  87,  79,  71,  67,  59,  55,  47,  39,  35,
+ 255, 239, 223, 207, 191, 175, 159, 147, 131, 115,  99,  83,  67,  51,  35,  23,
+ 167, 159, 151, 143, 135, 127, 123, 115, 107,  99,  95,  87,  83,  75,  67,  63,
+ 131, 119, 107,  95,  83,  71,  59,  51, 127, 115, 107,  99,  87,  79,  71,  63,
+ 255, 219, 187, 155, 123,  91,  67,  43, 255, 219, 187, 155, 123,  95,  63,  31,
+   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
+ 231, 199, 171, 143, 115,  83,  55,  27,   0,   0,   0,   0,   0,   0,   0,   0,
+ 255, 235, 215, 199, 179, 163, 143, 127, 115, 111, 103,  95,  87,  79,  71,  67,
+ 255, 255, 255, 255, 255, 255, 255, 255,  63,  55,  47,  35,  59,  47,  35,  27,
+   0,   0,   0,   0,   0,   0,   0,   0, 159, 231, 123,   0,   0,   0,   0,
+   107]) / 255.0,
+array([
+   0,  11,   7,  75, 255,  27,  19,  11,   7,  31,  15,   7,   0,  43,  35,  27,
+ 183, 171, 163, 151, 143, 135, 123, 115, 107,  99,  91,  87,  79,  71,  63,  59,
+  51,  47,  43,  35,  31,  27,  23,  19,  15,  11,   7,   7,   7,   0,   0,   0,
+ 223, 211, 199, 187, 179, 167, 155, 147, 131, 123, 115, 107,  99,  91,  83,  79,
+  75,  71,  67,  63,  59,  55,  51,  47,  43,  39,  35,  31,  27,  23,  19,  15,
+ 239, 231, 223, 219, 211, 203, 199, 191, 183, 179, 171, 167, 159, 151, 147, 139,
+ 131, 127, 119, 111, 107,  99,  91,  87,  79,  71,  67,  59,  55,  47,  39,  35,
+ 111, 103,  95,  87,  79,  71,  63,  55,  47,  43,  35,  27,  23,  15,  11,   7,
+ 143, 135, 127, 119, 111, 107,  99,  91,  87,  79,  75,  67,  63,  55,  51,  47,
+  99,  83,  75,  63,  51,  43,  35,  27,  99,  87,  79,  71,  59,  51,  43,  39,
+ 115,  87,  67,  47,  31,  19,   7,   0, 255, 219, 187, 155, 123,  95,  63,  31,
+   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 227, 203, 179, 155, 131, 107,  83,
+ 255, 219, 187, 155, 123,  91,  59,  27,  23,  15,  15,  11,   7,   0,   0,   0,
+ 255, 215, 179, 143, 107,  71,  35,   0,   0,   0,   0,   0,  39,  27,  19,  11,
+  83,  71,  59,  47,  35,  23,  11,   0,  67,  75, 255, 255, 207, 155, 107,
+  107]) / 255.0,
+np.ones(256),
+)
+
 color_map_luts['B-W LINEAR'] = color_map_luts['idl00']
 color_map_luts['BLUE'] = color_map_luts['idl01']
 color_map_luts['GRN-RED-BLU-WHT'] = color_map_luts['idl02']

diff -r 71e6f828c6526d0409da920598da4a34dd036999 -r a60d6c6efe78b6da733c34b0cd83123a57269118 yt/visualization/color_maps.py
--- a/yt/visualization/color_maps.py
+++ b/yt/visualization/color_maps.py
@@ -83,9 +83,9 @@
                 194.5*_vs**2.88+99.72*np.exp(-77.24*(_vs-0.742)**2.0)
               + 45.40*_vs**0.089+10.0)/255.0
 
-cdict = {'red':izip(_vs,_kamae_red,_kamae_red),
-         'green':izip(_vs,_kamae_grn,_kamae_grn),
-         'blue':izip(_vs,_kamae_blu,_kamae_blu)}
+cdict = {'red':zip(_vs,_kamae_red,_kamae_red),
+         'green':zip(_vs,_kamae_grn,_kamae_grn),
+         'blue':zip(_vs,_kamae_blu,_kamae_blu)}
 add_cmap('kamae', cdict)
 
 # This one is a simple black & green map
@@ -145,12 +145,12 @@
 add_cmap("cubehelix", _cubehelix_data)
 
 # Add colormaps in _colormap_data.py that weren't defined here
-_vs = np.linspace(0,1,255)
+_vs = np.linspace(0,1,256)
 for k,v in _cm.color_map_luts.iteritems():
     if k not in yt_colormaps and k not in mcm.cmap_d:
-        cdict = { 'red': izip(_vs,v[0],v[0]),
-                  'green': izip(_vs,v[1],v[1]),
-                  'blue': izip(_vs,v[2],v[2]) }
+        cdict = { 'red': zip(_vs,v[0],v[0]),
+                  'green': zip(_vs,v[1],v[1]),
+                  'blue': zip(_vs,v[2],v[2]) }
         add_cmap(k, cdict)
 
 def _extract_lookup_table(cmap_name):

diff -r 71e6f828c6526d0409da920598da4a34dd036999 -r a60d6c6efe78b6da733c34b0cd83123a57269118 yt/visualization/plot_container.py
--- a/yt/visualization/plot_container.py
+++ b/yt/visualization/plot_container.py
@@ -30,10 +30,10 @@
 
 from yt.funcs import \
     defaultdict, get_image_suffix, \
-    get_ipython_api_version, iterable
+    get_ipython_api_version, iterable, \
+    ensure_list
 from yt.utilities.exceptions import \
     YTNotInsideNotebook
-from ._mpl_imports import FigureCanvasAgg
 
 def invalidate_data(f):
     @wraps(f)
@@ -241,7 +241,7 @@
         if field is 'all':
             fields = self.plots.keys()
         else:
-            fields = [field]
+            fields = ensure_list(field)
         for field in self.data_source._determine_fields(fields):
             myzmin = zmin
             myzmax = zmax

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/9ebb03f4dcfb/
Changeset:   9ebb03f4dcfb
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-07-19 16:31:26
Summary:     Merging.
Affected #:  469 files

diff -r a60d6c6efe78b6da733c34b0cd83123a57269118 -r 9ebb03f4dcfb5fc2246fb0df6982cf8ba7ce9eca .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -7,6 +7,7 @@
 rockstar.cfg
 yt_updater.log
 yt/frontends/artio/_artio_caller.c
+yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.c
 yt/analysis_modules/halo_finding/rockstar/rockstar_interface.c
 yt/frontends/ramses/_ramses_reader.cpp
 yt/frontends/sph/smoothing_kernel.c

diff -r a60d6c6efe78b6da733c34b0cd83123a57269118 -r 9ebb03f4dcfb5fc2246fb0df6982cf8ba7ce9eca CREDITS
--- a/CREDITS
+++ b/CREDITS
@@ -2,15 +2,21 @@
 
 Contributors:   
                 Tom Abel (tabel at stanford.edu)
-                David Collins (dcollins at physics.ucsd.edu)
+                Gabriel Altay (gabriel.altay at gmail.com)
+                Kenza Arraki (karraki at gmail.com)
+                Alex Bogert (fbogert at ucsc.edu)
+                David Collins (dcollins4096 at gmail.com)
                 Brian Crosby (crosby.bd at gmail.com)
                 Andrew Cunningham (ajcunn at gmail.com)
+                Miguel de Val-Borro (miguel.deval at gmail.com)
                 Hilary Egan (hilaryye at gmail.com)
                 John Forces (jforbes at ucolick.org)
+                Sam Geen (samgeen at gmail.com)
                 Nathan Goldbaum (goldbaum at ucolick.org)
                 Markus Haider (markus.haider at uibk.ac.at)
                 Cameron Hummels (chummels at gmail.com)
                 Christian Karch (chiffre at posteo.de)
+                Ben W. Keller (kellerbw at mcmaster.ca)
                 Ji-hoon Kim (me at jihoonkim.org)
                 Steffen Klemer (sklemer at phys.uni-goettingen.de)
                 Kacper Kowalik (xarthisius.kk at gmail.com)
@@ -21,18 +27,23 @@
                 Chris Malone (chris.m.malone at gmail.com)
                 Josh Maloney (joshua.moloney at colorado.edu)
                 Chris Moody (cemoody at ucsc.edu)
+                Stuart Mumford (stuart at mumford.me.uk)
                 Andrew Myers (atmyers at astro.berkeley.edu)
                 Jill Naiman (jnaiman at ucolick.org)
+                Desika Narayanan (dnarayan at haverford.edu)
                 Kaylea Nelson (kaylea.nelson at yale.edu)
                 Jeff Oishi (jsoishi at gmail.com)
+                Brian O'Shea (bwoshea at gmail.com)
                 Jean-Claude Passy (jcpassy at uvic.ca)
+                John Regan (john.regan at helsinki.fi)
                 Mark Richardson (Mark.L.Richardson at asu.edu)
                 Thomas Robitaille (thomas.robitaille at gmail.com)
                 Anna Rosen (rosen at ucolick.org)
                 Douglas Rudd (drudd at uchicago.edu)
                 Anthony Scopatz (scopatz at gmail.com)
                 Noel Scudder (noel.scudder at stonybrook.edu)
-                Devin Silvia (devin.silvia at colorado.edu)
+                Pat Shriwise (shriwise at wisc.edu)
+                Devin Silvia (devin.silvia at gmail.com)
                 Sam Skillman (samskillman at gmail.com)
                 Stephen Skory (s at skory.us)
                 Britton Smith (brittonsmith at gmail.com)
@@ -42,8 +53,10 @@
                 Stephanie Tonnesen (stonnes at gmail.com)
                 Matthew Turk (matthewturk at gmail.com)
                 Rich Wagner (rwagner at physics.ucsd.edu)
+                Michael S. Warren (mswarren at gmail.com)
                 Andrew Wetzel (andrew.wetzel at yale.edu)
                 John Wise (jwise at physics.gatech.edu)
+                Michael Zingale (michael.zingale at stonybrook.edu)
                 John ZuHone (jzuhone at gmail.com)
 
 Several items included in the yt/extern directory were written by other

diff -r a60d6c6efe78b6da733c34b0cd83123a57269118 -r 9ebb03f4dcfb5fc2246fb0df6982cf8ba7ce9eca MANIFEST.in
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -12,4 +12,3 @@
 prune tests
 graft yt/gui/reason/html/resources
 exclude clean.sh .hgchurn
-recursive-include yt/utilities/kdtree *.f90 *.v Makefile LICENSE

diff -r a60d6c6efe78b6da733c34b0cd83123a57269118 -r 9ebb03f4dcfb5fc2246fb0df6982cf8ba7ce9eca doc/README
--- a/doc/README
+++ b/doc/README
@@ -5,6 +5,6 @@
 http://sphinx.pocoo.org/
 
 Because the documentation requires a number of dependencies, we provide
-pre-build versions online, accessible here:
+pre-built versions online, accessible here:
 
-http://yt-project.org/docs/
+http://yt-project.org/docs/dev-3.0/

diff -r a60d6c6efe78b6da733c34b0cd83123a57269118 -r 9ebb03f4dcfb5fc2246fb0df6982cf8ba7ce9eca doc/cheatsheet.tex
--- a/doc/cheatsheet.tex
+++ b/doc/cheatsheet.tex
@@ -208,38 +208,38 @@
 After that, simulation data is generally accessed in yt using {\it Data Containers} which are Python objects
 that define a region of simulation space from which data should be selected.
 \settowidth{\MyLen}{\texttt{multicol} }
-\texttt{pf = load(}{\it dataset}\texttt{)} \textemdash\   Reference a single snapshot.\\
-\texttt{dd = pf.h.all\_data()} \textemdash\ Select the entire volume.\\
+\texttt{ds = load(}{\it dataset}\texttt{)} \textemdash\   Reference a single snapshot.\\
+\texttt{dd = ds.all\_data()} \textemdash\ Select the entire volume.\\
 \texttt{a = dd[}{\it field\_name}\texttt{]} \textemdash\ Saves the contents of {\it field} into the
 numpy array \texttt{a}. Similarly for other data containers.\\
-\texttt{pf.h.field\_list} \textemdash\ A list of available fields in the snapshot. \\
-\texttt{pf.h.derived\_field\_list} \textemdash\ A list of available derived fields
+\texttt{ds.field\_list} \textemdash\ A list of available fields in the snapshot. \\
+\texttt{ds.derived\_field\_list} \textemdash\ A list of available derived fields
 in the snapshot. \\
-\texttt{val, loc = pf.h.find\_max("Density")} \textemdash\ Find the \texttt{val}ue of
+\texttt{val, loc = ds.find\_max("Density")} \textemdash\ Find the \texttt{val}ue of
 the maximum of the field \texttt{Density} and its \texttt{loc}ation. \\
-\texttt{sp = pf.sphere(}{\it cen}\texttt{,}{\it radius}\texttt{)} \textemdash\   Create a spherical data 
+\texttt{sp = ds.sphere(}{\it cen}\texttt{,}{\it radius}\texttt{)} \textemdash\   Create a spherical data 
 container. {\it cen} may be a coordinate, or ``max'' which 
 centers on the max density point. {\it radius} may be a float in 
 code units or a tuple of ({\it length, unit}).\\
 
-\texttt{re = pf.region({\it cen}, {\it left edge}, {\it right edge})} \textemdash\ Create a
+\texttt{re = ds.region({\it cen}, {\it left edge}, {\it right edge})} \textemdash\ Create a
 rectilinear data container. {\it cen} is required but not used.
 {\it left} and {\it right edge} are coordinate values that define the region.
 
-\texttt{di = pf.disk({\it cen}, {\it normal}, {\it radius}, {\it height})} \textemdash\ 
+\texttt{di = ds.disk({\it cen}, {\it normal}, {\it radius}, {\it height})} \textemdash\ 
 Create a cylindrical data container centered at {\it cen} along the 
 direction set by {\it normal},with total length
  2$\times${\it height} and with radius {\it radius}. \\
  
- \texttt{bl = pf.boolean({\it constructor})} \textemdash\ Create a boolean data
+ \texttt{bl = ds.boolean({\it constructor})} \textemdash\ Create a boolean data
  container. {\it constructor} is a list of pre-defined non-boolean 
  data containers with nested boolean logic using the
  ``AND'', ``NOT'', or ``OR'' operators. E.g. {\it constructor=}
  {\it [sp, ``NOT'', (di, ``OR'', re)]} gives a volume defined
  by {\it sp} minus the patches covered by {\it di} and {\it re}.\\
  
-\texttt{pf.h.save\_object(sp, {\it ``sp\_for\_later''})} \textemdash\ Save an object (\texttt{sp}) for later use.\\
-\texttt{sp = pf.h.load\_object({\it ``sp\_for\_later''})} \textemdash\ Recover a saved object.\\
+\texttt{ds.save\_object(sp, {\it ``sp\_for\_later''})} \textemdash\ Save an object (\texttt{sp}) for later use.\\
+\texttt{sp = ds.load\_object({\it ``sp\_for\_later''})} \textemdash\ Recover a saved object.\\
 
 
 \subsection{Defining New Fields \& Quantities}
@@ -261,15 +261,15 @@
 
 \subsection{Slices and Projections}
 \settowidth{\MyLen}{\texttt{multicol} }
-\texttt{slc = SlicePlot(pf, {\it axis}, {\it field}, {\it center=}, {\it width=}, {\it weight\_field=}, {\it additional parameters})} \textemdash\ Make a slice plot
+\texttt{slc = SlicePlot(ds, {\it axis}, {\it field}, {\it center=}, {\it width=}, {\it weight\_field=}, {\it additional parameters})} \textemdash\ Make a slice plot
 perpendicular to {\it axis} of {\it field} weighted by {\it weight\_field} at (code-units) {\it center} with 
 {\it width} in code units or a (value, unit) tuple. Hint: try {\it SlicePlot?} in IPython to see additional parameters.\\
 \texttt{slc.save({\it file\_prefix})} \textemdash\ Save the slice to a png with name prefix {\it file\_prefix}.
 \texttt{.save()} works similarly for the commands below.\\
 
-\texttt{prj = ProjectionPlot(pf, {\it axis}, {\it field}, {\it addit. params})} \textemdash\ Make a projection. \\
-\texttt{prj = OffAxisSlicePlot(pf, {\it normal}, {\it fields}, {\it center=}, {\it width=}, {\it depth=},{\it north\_vector=},{\it weight\_field=})} \textemdash Make an off-axis slice. Note this takes an array of fields. \\
-\texttt{prj = OffAxisProjectionPlot(pf, {\it normal}, {\it fields}, {\it center=}, {\it width=}, {\it depth=},{\it north\_vector=},{\it weight\_field=})} \textemdash Make an off axis projection. Note this takes an array of fields. \\
+\texttt{prj = ProjectionPlot(ds, {\it axis}, {\it field}, {\it addit. params})} \textemdash\ Make a projection. \\
+\texttt{prj = OffAxisSlicePlot(ds, {\it normal}, {\it fields}, {\it center=}, {\it width=}, {\it depth=},{\it north\_vector=},{\it weight\_field=})} \textemdash Make an off-axis slice. Note this takes an array of fields. \\
+\texttt{prj = OffAxisProjectionPlot(ds, {\it normal}, {\it fields}, {\it center=}, {\it width=}, {\it depth=},{\it north\_vector=},{\it weight\_field=})} \textemdash Make an off axis projection. Note this takes an array of fields. \\
 
 \subsection{Plot Annotations}
 \settowidth{\MyLen}{\texttt{multicol} }
@@ -365,8 +365,8 @@
 \subsection{FAQ}
 \settowidth{\MyLen}{\texttt{multicol}}
 
-\texttt{pf.field\_info[`field'].take\_log = False} \textemdash\ When plotting \texttt{field}, do not take log.
-Must enter \texttt{pf.h} before this command. \\
+\texttt{ds.field\_info[`field'].take\_log = False} \textemdash\ When plotting \texttt{field}, do not take log.
+Must enter \texttt{ds.index} before this command. \\
 
 
 %\rule{0.3\linewidth}{0.25pt}

diff -r a60d6c6efe78b6da733c34b0cd83123a57269118 -r 9ebb03f4dcfb5fc2246fb0df6982cf8ba7ce9eca doc/coding_styleguide.txt
--- a/doc/coding_styleguide.txt
+++ b/doc/coding_styleguide.txt
@@ -49,7 +49,7 @@
  * Don't create a new class to replicate the functionality of an old class --
    replace the old class.  Too many options makes for a confusing user
    experience.
- * Parameter files are a last resort.
+ * Parameter files external to yt are a last resort.
  * The usage of the **kwargs construction should be avoided.  If they cannot
    be avoided, they must be explained, even if they are only to be passed on to
    a nested function.
@@ -61,7 +61,7 @@
    * Hard-coding parameter names that are the same as those in Enzo.  The
      following translation table should be of some help.  Note that the
      parameters are now properties on a Dataset subclass: you access them
-     like pf.refine_by .
+     like ds.refine_by .
      * RefineBy => refine_by
      * TopGridRank => dimensionality
      * TopGridDimensions => domain_dimensions

diff -r a60d6c6efe78b6da733c34b0cd83123a57269118 -r 9ebb03f4dcfb5fc2246fb0df6982cf8ba7ce9eca doc/docstring_example.txt
--- a/doc/docstring_example.txt
+++ b/doc/docstring_example.txt
@@ -73,7 +73,7 @@
     Examples
     --------
     These are written in doctest format, and should illustrate how to
-    use the function.  Use the variables 'pf' for the parameter file, 'pc' for
+    use the function.  Use the variables 'ds' for the dataset, 'pc' for
     a plot collection, 'c' for a center, and 'L' for a vector. 
 
     >>> a=[1,2,3]

diff -r a60d6c6efe78b6da733c34b0cd83123a57269118 -r 9ebb03f4dcfb5fc2246fb0df6982cf8ba7ce9eca doc/docstring_idioms.txt
--- a/doc/docstring_idioms.txt
+++ b/doc/docstring_idioms.txt
@@ -19,7 +19,7 @@
 useful variable names that correspond to specific instances that the user is
 presupposed to have created.
 
-   * `pf`: a parameter file, loaded successfully
+   * `ds`: a dataset, loaded successfully
    * `sp`: a sphere
    * `c`: a 3-component "center"
    * `L`: a 3-component vector that corresponds to either angular momentum or a

diff -r a60d6c6efe78b6da733c34b0cd83123a57269118 -r 9ebb03f4dcfb5fc2246fb0df6982cf8ba7ce9eca doc/helper_scripts/parse_cb_list.py
--- a/doc/helper_scripts/parse_cb_list.py
+++ b/doc/helper_scripts/parse_cb_list.py
@@ -2,7 +2,7 @@
 import inspect
 from textwrap import TextWrapper
 
-pf = load("RD0005-mine/RedshiftOutput0005")
+ds = load("RD0005-mine/RedshiftOutput0005")
 
 output = open("source/visualizing/_cb_docstrings.inc", "w")
 

diff -r a60d6c6efe78b6da733c34b0cd83123a57269118 -r 9ebb03f4dcfb5fc2246fb0df6982cf8ba7ce9eca doc/helper_scripts/parse_dq_list.py
--- a/doc/helper_scripts/parse_dq_list.py
+++ b/doc/helper_scripts/parse_dq_list.py
@@ -2,7 +2,7 @@
 import inspect
 from textwrap import TextWrapper
 
-pf = load("RD0005-mine/RedshiftOutput0005")
+ds = load("RD0005-mine/RedshiftOutput0005")
 
 output = open("source/analyzing/_dq_docstrings.inc", "w")
 
@@ -29,7 +29,7 @@
                             docstring = docstring))
                             #docstring = "\n".join(tw.wrap(docstring))))
 
-dd = pf.h.all_data()
+dd = ds.all_data()
 for n,func in sorted(dd.quantities.functions.items()):
     print n, func
     write_docstring(output, n, func[1])

diff -r a60d6c6efe78b6da733c34b0cd83123a57269118 -r 9ebb03f4dcfb5fc2246fb0df6982cf8ba7ce9eca doc/helper_scripts/parse_object_list.py
--- a/doc/helper_scripts/parse_object_list.py
+++ b/doc/helper_scripts/parse_object_list.py
@@ -2,7 +2,7 @@
 import inspect
 from textwrap import TextWrapper
 
-pf = load("RD0005-mine/RedshiftOutput0005")
+ds = load("RD0005-mine/RedshiftOutput0005")
 
 output = open("source/analyzing/_obj_docstrings.inc", "w")
 
@@ -27,7 +27,7 @@
     f.write(template % dict(clsname = clsname, sig = sig, clsproxy=clsproxy,
                             docstring = 'physical-object-api'))
 
-for n,c in sorted(pf.h.__dict__.items()):
+for n,c in sorted(ds.__dict__.items()):
     if hasattr(c, '_con_args'):
         print n
         write_docstring(output, n, c)

diff -r a60d6c6efe78b6da733c34b0cd83123a57269118 -r 9ebb03f4dcfb5fc2246fb0df6982cf8ba7ce9eca doc/helper_scripts/show_fields.py
--- a/doc/helper_scripts/show_fields.py
+++ b/doc/helper_scripts/show_fields.py
@@ -17,15 +17,15 @@
 everywhere, "Enzo" fields in Enzo datasets, "Orion" fields in Orion datasets,
 and so on.
 
-Try using the ``pf.field_list`` and ``pf.derived_field_list`` to view the
+Try using the ``ds.field_list`` and ``ds.derived_field_list`` to view the
 native and derived fields available for your dataset respectively. For example
 to display the native fields in alphabetical order:
 
 .. notebook-cell::
 
   from yt.mods import *
-  pf = load("Enzo_64/DD0043/data0043")
-  for i in sorted(pf.field_list):
+  ds = load("Enzo_64/DD0043/data0043")
+  for i in sorted(ds.field_list):
     print i
 
 .. note:: Universal fields will be overridden by a code-specific field.

diff -r a60d6c6efe78b6da733c34b0cd83123a57269118 -r 9ebb03f4dcfb5fc2246fb0df6982cf8ba7ce9eca doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -567,8 +567,10 @@
 
 mkdir -p ${DEST_DIR}/data
 cd ${DEST_DIR}/data
-echo 'de6d8c6ea849f0206d219303329a0276b3cce7c051eec34377d42aacbe0a4f47ac5145eb08966a338ecddd2b83c8f787ca9956508ad5c39ee2088ad875166410  xray_emissivity.h5' > xray_emissivity.h5.sha512
-get_ytdata xray_emissivity.h5
+echo 'de6d8c6ea849f0206d219303329a0276b3cce7c051eec34377d42aacbe0a4f47ac5145eb08966a338ecddd2b83c8f787ca9956508ad5c39ee2088ad875166410  cloudy_emissivity.h5' > cloudy_emissivity.h5.sha512
+[ ! -e cloudy_emissivity.h5 ] && get_ytdata cloudy_emissivity.h5
+echo '0f714ae2eace0141b1381abf1160dc8f8a521335e886f99919caf3beb31df1fe271d67c7b2a804b1467949eb16b0ef87a3d53abad0e8160fccac1e90d8d9e85f  apec_emissivity.h5' > apec_emissivity.h5.sha512
+[ ! -e apec_emissivity.h5 ] && get_ytdata apec_emissivity.h5
 
 # Set paths to what they should be when yt is activated.
 export PATH=${DEST_DIR}/bin:$PATH
@@ -608,7 +610,6 @@
 echo '3f53d0b474bfd79fea2536d0a9197eaef6c0927e95f2f9fd52dbd6c1d46409d0e649c21ac418d8f7767a9f10fe6114b516e06f2be4b06aec3ab5bdebc8768220  Forthon-0.8.11.tar.gz' > Forthon-0.8.11.tar.gz.sha512
 echo '4941f5aa21aff3743546495fb073c10d2657ff42b2aff401903498638093d0e31e344cce778980f28a7170c6d29eab72ac074277b9d4088376e8692dc71e55c1  PyX-0.12.1.tar.gz' > PyX-0.12.1.tar.gz.sha512
 echo '3df0ba4b1cfef5f02fb27925de4c2ca414eca9000af6a3d475d39063720afe987287c3d51377e0a36b88015573ef699f700782e1749c7a357b8390971d858a79  Python-2.7.6.tgz' > Python-2.7.6.tgz.sha512
-echo '172f2bc671145ebb0add2669c117863db35851fb3bdb192006cd710d4d038e0037497eb39a6d01091cb923f71a7e8982a77b6e80bf71d6275d5d83a363c8d7e5  rockstar-0.99.6.tar.gz' > rockstar-0.99.6.tar.gz.sha512
 echo '276bd9c061ec9a27d478b33078a86f93164ee2da72210e12e2c9da71dcffeb64767e4460b93f257302b09328eda8655e93c4b9ae85e74472869afbeae35ca71e  blas.tar.gz' > blas.tar.gz.sha512
 echo '00ace5438cfa0c577e5f578d8a808613187eff5217c35164ffe044fbafdfec9e98f4192c02a7d67e01e5a5ccced630583ad1003c37697219b0f147343a3fdd12  bzip2-1.0.6.tar.gz' > bzip2-1.0.6.tar.gz.sha512
 echo 'a296dfcaef7e853e58eed4e24b37c4fa29cfc6ac688def048480f4bb384b9e37ca447faf96eec7b378fd764ba291713f03ac464581d62275e28eb2ec99110ab6  reason-js-20120623.zip' > reason-js-20120623.zip.sha512
@@ -624,7 +625,6 @@
 echo 'd58177f3971b6d07baf6f81a2088ba371c7e43ea64ee7ada261da97c6d725b4bd4927122ac373c55383254e4e31691939276dab08a79a238bfa55172a3eff684  numpy-1.7.1.tar.gz' > numpy-1.7.1.tar.gz.sha512
 echo '9c0a61299779aff613131aaabbc255c8648f0fa7ab1806af53f19fbdcece0c8a68ddca7880d25b926d67ff1b9201954b207919fb09f6a290acb078e8bbed7b68  python-hglib-1.0.tar.gz' > python-hglib-1.0.tar.gz.sha512
 echo 'c65013293dd4049af5db009fdf7b6890a3c6b1e12dd588b58fb5f5a5fef7286935851fb7a530e03ea16f28de48b964e50f48bbf87d34545fd23b80dd4380476b  pyzmq-13.1.0.tar.gz' > pyzmq-13.1.0.tar.gz.sha512
-echo '172f2bc671145ebb0add2669c117863db35851fb3bdb192006cd710d4d038e0037497eb39a6d01091cb923f71a7e8982a77b6e80bf71d6275d5d83a363c8d7e5  rockstar-0.99.6.tar.gz' > rockstar-0.99.6.tar.gz.sha512
 echo '80c8e137c3ccba86575d4263e144ba2c4684b94b5cd620e200f094c92d4e118ea6a631d27bdb259b0869771dfaeeae68c0fdd37fdd740b9027ee185026e921d4  scipy-0.12.0.tar.gz' > scipy-0.12.0.tar.gz.sha512
 echo '96f3e51b46741450bc6b63779c10ebb4a7066860fe544385d64d1eda52592e376a589ef282ace2e1df73df61c10eab1a0d793abbdaf770e60289494d4bf3bcb4  sqlite-autoconf-3071700.tar.gz' > sqlite-autoconf-3071700.tar.gz.sha512
 echo '2992baa3edfb4e1842fb642abf0bf0fc0bf56fc183aab8fed6b3c42fbea928fa110ede7fdddea2d63fc5953e8d304b04da433dc811134fadefb1eecc326121b8  sympy-0.7.3.tar.gz' > sympy-0.7.3.tar.gz.sha512
@@ -657,7 +657,6 @@
 get_ytproject $NOSE.tar.gz
 get_ytproject $PYTHON_HGLIB.tar.gz
 get_ytproject $SYMPY.tar.gz
-get_ytproject $ROCKSTAR.tar.gz
 if [ $INST_BZLIB -eq 1 ]
 then
     if [ ! -e $BZLIB/done ]
@@ -816,6 +815,7 @@
         YT_DIR=`dirname $ORIG_PWD`
     elif [ ! -e yt-hg ]
     then
+        echo "Cloning yt"
         YT_DIR="$PWD/yt-hg/"
         ( ${HG_EXEC} --debug clone https://bitbucket.org/yt_analysis/yt-supplemental/ 2>&1 ) 1>> ${LOG_FILE}
         # Recently the hg server has had some issues with timeouts.  In lieu of
@@ -824,9 +824,9 @@
         ( ${HG_EXEC} --debug clone https://bitbucket.org/yt_analysis/yt/ ./yt-hg 2>&1 ) 1>> ${LOG_FILE}
         # Now we update to the branch we're interested in.
         ( ${HG_EXEC} -R ${YT_DIR} up -C ${BRANCH} 2>&1 ) 1>> ${LOG_FILE}
-    elif [ -e yt-3.0-hg ] 
+    elif [ -e yt-hg ]
     then
-        YT_DIR="$PWD/yt-3.0-hg/"
+        YT_DIR="$PWD/yt-hg/"
     fi
     echo Setting YT_DIR=${YT_DIR}
 fi
@@ -943,14 +943,19 @@
 # Now we build Rockstar and set its environment variable.
 if [ $INST_ROCKSTAR -eq 1 ]
 then
-    if [ ! -e Rockstar/done ]
+    if [ ! -e rockstar/done ]
     then
-        [ ! -e Rockstar ] && tar xfz $ROCKSTAR.tar.gz
         echo "Building Rockstar"
-        cd Rockstar
+        if [ ! -e rockstar ]
+        then
+            ( hg clone http://bitbucket.org/MatthewTurk/rockstar 2>&1 ) 1>> ${LOG_FILE}
+        fi
+        cd rockstar
+        ( hg pull 2>&1 ) 1>> ${LOG_FILE}
+        ( hg up -C tip 2>&1 ) 1>> ${LOG_FILE}
         ( make lib 2>&1 ) 1>> ${LOG_FILE} || do_exit
         cp librockstar.so ${DEST_DIR}/lib
-        ROCKSTAR_DIR=${DEST_DIR}/src/Rockstar
+        ROCKSTAR_DIR=${DEST_DIR}/src/rockstar
         echo $ROCKSTAR_DIR > ${YT_DIR}/rockstar.cfg
         touch done
         cd ..

diff -r a60d6c6efe78b6da733c34b0cd83123a57269118 -r 9ebb03f4dcfb5fc2246fb0df6982cf8ba7ce9eca doc/source/analyzing/_obj_docstrings.inc
--- a/doc/source/analyzing/_obj_docstrings.inc
+++ b/doc/source/analyzing/_obj_docstrings.inc
@@ -1,12 +1,12 @@
 
 
-.. class:: boolean(self, regions, fields=None, pf=None, **field_parameters):
+.. class:: boolean(self, regions, fields=None, ds=None, **field_parameters):
 
    For more information, see :ref:`physical-object-api`
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMRBooleanRegionBase`.)
 
 
-.. class:: covering_grid(self, level, left_edge, dims, fields=None, pf=None, num_ghost_zones=0, use_pbar=True, **field_parameters):
+.. class:: covering_grid(self, level, left_edge, dims, fields=None, ds=None, num_ghost_zones=0, use_pbar=True, **field_parameters):
 
    For more information, see :ref:`physical-object-api`
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMRCoveringGridBase`.)
@@ -24,13 +24,13 @@
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMRCuttingPlaneBase`.)
 
 
-.. class:: disk(self, center, normal, radius, height, fields=None, pf=None, **field_parameters):
+.. class:: disk(self, center, normal, radius, height, fields=None, ds=None, **field_parameters):
 
    For more information, see :ref:`physical-object-api`
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMRCylinderBase`.)
 
 
-.. class:: ellipsoid(self, center, A, B, C, e0, tilt, fields=None, pf=None, **field_parameters):
+.. class:: ellipsoid(self, center, A, B, C, e0, tilt, fields=None, ds=None, **field_parameters):
 
    For more information, see :ref:`physical-object-api`
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMREllipsoidBase`.)
@@ -48,79 +48,79 @@
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMRFixedResCuttingPlaneBase`.)
 
 
-.. class:: fixed_res_proj(self, axis, level, left_edge, dims, fields=None, pf=None, **field_parameters):
+.. class:: fixed_res_proj(self, axis, level, left_edge, dims, fields=None, ds=None, **field_parameters):
 
    For more information, see :ref:`physical-object-api`
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMRFixedResProjectionBase`.)
 
 
-.. class:: grid_collection(self, center, grid_list, fields=None, pf=None, **field_parameters):
+.. class:: grid_collection(self, center, grid_list, fields=None, ds=None, **field_parameters):
 
    For more information, see :ref:`physical-object-api`
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMRGridCollectionBase`.)
 
 
-.. class:: grid_collection_max_level(self, center, max_level, fields=None, pf=None, **field_parameters):
+.. class:: grid_collection_max_level(self, center, max_level, fields=None, ds=None, **field_parameters):
 
    For more information, see :ref:`physical-object-api`
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMRMaxLevelCollectionBase`.)
 
 
-.. class:: inclined_box(self, origin, box_vectors, fields=None, pf=None, **field_parameters):
+.. class:: inclined_box(self, origin, box_vectors, fields=None, ds=None, **field_parameters):
 
    For more information, see :ref:`physical-object-api`
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMRInclinedBoxBase`.)
 
 
-.. class:: ortho_ray(self, axis, coords, fields=None, pf=None, **field_parameters):
+.. class:: ortho_ray(self, axis, coords, fields=None, ds=None, **field_parameters):
 
    For more information, see :ref:`physical-object-api`
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMROrthoRayBase`.)
 
 
-.. class:: overlap_proj(self, axis, field, weight_field=None, max_level=None, center=None, pf=None, source=None, node_name=None, field_cuts=None, preload_style='level', serialize=True, **field_parameters):
+.. class:: overlap_proj(self, axis, field, weight_field=None, max_level=None, center=None, ds=None, source=None, node_name=None, field_cuts=None, preload_style='level', serialize=True, **field_parameters):
 
    For more information, see :ref:`physical-object-api`
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMRProjBase`.)
 
 
-.. class:: periodic_region(self, center, left_edge, right_edge, fields=None, pf=None, **field_parameters):
+.. class:: periodic_region(self, center, left_edge, right_edge, fields=None, ds=None, **field_parameters):
 
    For more information, see :ref:`physical-object-api`
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMRPeriodicRegionBase`.)
 
 
-.. class:: periodic_region_strict(self, center, left_edge, right_edge, fields=None, pf=None, **field_parameters):
+.. class:: periodic_region_strict(self, center, left_edge, right_edge, fields=None, ds=None, **field_parameters):
 
    For more information, see :ref:`physical-object-api`
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMRPeriodicRegionStrictBase`.)
 
 
-.. class:: proj(self, axis, field, weight_field=None, max_level=None, center=None, pf=None, source=None, node_name=None, field_cuts=None, preload_style=None, serialize=True, style='integrate', **field_parameters):
+.. class:: proj(self, axis, field, weight_field=None, max_level=None, center=None, ds=None, source=None, node_name=None, field_cuts=None, preload_style=None, serialize=True, style='integrate', **field_parameters):
 
    For more information, see :ref:`physical-object-api`
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMRQuadTreeProjBase`.)
 
 
-.. class:: ray(self, start_point, end_point, fields=None, pf=None, **field_parameters):
+.. class:: ray(self, start_point, end_point, fields=None, ds=None, **field_parameters):
 
    For more information, see :ref:`physical-object-api`
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMRRayBase`.)
 
 
-.. class:: region(self, center, left_edge, right_edge, fields=None, pf=None, **field_parameters):
+.. class:: region(self, center, left_edge, right_edge, fields=None, ds=None, **field_parameters):
 
    For more information, see :ref:`physical-object-api`
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMRRegionBase`.)
 
 
-.. class:: region_strict(self, center, left_edge, right_edge, fields=None, pf=None, **field_parameters):
+.. class:: region_strict(self, center, left_edge, right_edge, fields=None, ds=None, **field_parameters):
 
    For more information, see :ref:`physical-object-api`
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMRRegionStrictBase`.)
 
 
-.. class:: slice(self, axis, coord, fields=None, center=None, pf=None, node_name=False, **field_parameters):
+.. class:: slice(self, axis, coord, fields=None, center=None, ds=None, node_name=False, **field_parameters):
 
    For more information, see :ref:`physical-object-api`
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMRSliceBase`.)
@@ -132,13 +132,13 @@
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMRSmoothedCoveringGridBase`.)
 
 
-.. class:: sphere(self, center, radius, fields=None, pf=None, **field_parameters):
+.. class:: sphere(self, center, radius, fields=None, ds=None, **field_parameters):
 
    For more information, see :ref:`physical-object-api`
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMRSphereBase`.)
 
 
-.. class:: streamline(self, positions, length=1.0, fields=None, pf=None, **field_parameters):
+.. class:: streamline(self, positions, length=1.0, fields=None, ds=None, **field_parameters):
 
    For more information, see :ref:`physical-object-api`
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMRStreamlineBase`.)

diff -r a60d6c6efe78b6da733c34b0cd83123a57269118 -r 9ebb03f4dcfb5fc2246fb0df6982cf8ba7ce9eca doc/source/analyzing/analysis_modules/Halo_Analysis.ipynb
--- a/doc/source/analyzing/analysis_modules/Halo_Analysis.ipynb
+++ b/doc/source/analyzing/analysis_modules/Halo_Analysis.ipynb
@@ -44,7 +44,7 @@
       "tmpdir = tempfile.mkdtemp()\n",
       "\n",
       "# Load the data set with the full simulation information\n",
-      "data_pf = load('Enzo_64/RD0006/RedshiftOutput0006')"
+      "data_ds = load('Enzo_64/RD0006/RedshiftOutput0006')"
      ],
      "language": "python",
      "metadata": {},
@@ -62,7 +62,7 @@
      "collapsed": false,
      "input": [
       "# Load the rockstar data files\n",
-      "halos_pf = load('rockstar_halos/halos_0.0.bin')"
+      "halos_ds = load('rockstar_halos/halos_0.0.bin')"
      ],
      "language": "python",
      "metadata": {},
@@ -80,7 +80,7 @@
      "collapsed": false,
      "input": [
       "# Instantiate a catalog using those two paramter files\n",
-      "hc = HaloCatalog(data_pf=data_pf, halos_pf=halos_pf, \n",
+      "hc = HaloCatalog(data_ds=data_ds, halos_ds=halos_ds, \n",
       "                 output_dir=os.path.join(tmpdir, 'halo_catalog'))"
      ],
      "language": "python",
@@ -295,9 +295,9 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "halos_pf =  load(os.path.join(tmpdir, 'halo_catalog/halo_catalog.0.h5'))\n",
+      "halos_ds =  load(os.path.join(tmpdir, 'halo_catalog/halo_catalog.0.h5'))\n",
       "\n",
-      "hc_reloaded = HaloCatalog(halos_pf=halos_pf,\n",
+      "hc_reloaded = HaloCatalog(halos_ds=halos_ds,\n",
       "                          output_dir=os.path.join(tmpdir, 'halo_catalog'))"
      ],
      "language": "python",
@@ -407,4 +407,4 @@
    "metadata": {}
   }
  ]
-}
\ No newline at end of file
+}

diff -r a60d6c6efe78b6da733c34b0cd83123a57269118 -r 9ebb03f4dcfb5fc2246fb0df6982cf8ba7ce9eca doc/source/analyzing/analysis_modules/PPVCube.ipynb
--- a/doc/source/analyzing/analysis_modules/PPVCube.ipynb
+++ b/doc/source/analyzing/analysis_modules/PPVCube.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:3f810954006851303837edb8fd85ee6583a883122b0f4867903562546c4f19d2"
+  "signature": "sha256:ba8b6a53571695ae1d0c236ad43875823746e979a329a9d35ab0a8b899cebbba"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -21,7 +21,7 @@
      "input": [
       "%matplotlib inline\n",
       "from yt.mods import *\n",
-      "from yt.analysis_modules.api import PPVCube"
+      "from yt.analysis_modules.ppv_cube.api import PPVCube"
      ],
      "language": "python",
      "metadata": {},
@@ -222,7 +222,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "pf = load(\"cube.fits\")"
+      "ds = load(\"cube.fits\")"
      ],
      "language": "python",
      "metadata": {},
@@ -233,7 +233,7 @@
      "collapsed": false,
      "input": [
       "# Specifying no center gives us the center slice\n",
-      "slc = SlicePlot(pf, \"z\", [\"density\"])\n",
+      "slc = SlicePlot(ds, \"z\", [\"density\"])\n",
       "slc.show()"
      ],
      "language": "python",
@@ -246,9 +246,9 @@
      "input": [
       "import yt.units as u\n",
       "# Picking different velocities for the slices\n",
-      "new_center = pf.domain_center\n",
-      "new_center[2] = pf.spec2pixel(-1.0*u.km/u.s)\n",
-      "slc = SlicePlot(pf, \"z\", [\"density\"], center=new_center)\n",
+      "new_center = ds.domain_center\n",
+      "new_center[2] = ds.spec2pixel(-1.0*u.km/u.s)\n",
+      "slc = SlicePlot(ds, \"z\", [\"density\"], center=new_center)\n",
       "slc.show()"
      ],
      "language": "python",
@@ -259,8 +259,8 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "new_center[2] = pf.spec2pixel(0.7*u.km/u.s)\n",
-      "slc = SlicePlot(pf, \"z\", [\"density\"], center=new_center)\n",
+      "new_center[2] = ds.spec2pixel(0.7*u.km/u.s)\n",
+      "slc = SlicePlot(ds, \"z\", [\"density\"], center=new_center)\n",
       "slc.show()"
      ],
      "language": "python",
@@ -271,8 +271,8 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "new_center[2] = pf.spec2pixel(-0.3*u.km/u.s)\n",
-      "slc = SlicePlot(pf, \"z\", [\"density\"], center=new_center)\n",
+      "new_center[2] = ds.spec2pixel(-0.3*u.km/u.s)\n",
+      "slc = SlicePlot(ds, \"z\", [\"density\"], center=new_center)\n",
       "slc.show()"
      ],
      "language": "python",
@@ -290,7 +290,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "prj = ProjectionPlot(pf, \"z\", [\"density\"], proj_style=\"sum\")\n",
+      "prj = ProjectionPlot(ds, \"z\", [\"density\"], proj_style=\"sum\")\n",
       "prj.set_log(\"density\", True)\n",
       "prj.set_zlim(\"density\", 1.0e-3, 0.2)\n",
       "prj.show()"
@@ -303,4 +303,4 @@
    "metadata": {}
   }
  ]
-}
\ No newline at end of file
+}

diff -r a60d6c6efe78b6da733c34b0cd83123a57269118 -r 9ebb03f4dcfb5fc2246fb0df6982cf8ba7ce9eca doc/source/analyzing/analysis_modules/SZ_projections.ipynb
--- a/doc/source/analyzing/analysis_modules/SZ_projections.ipynb
+++ b/doc/source/analyzing/analysis_modules/SZ_projections.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:7fc053480ba7896bfa5905bd69f7b3dd326364fbab324975b76f79640f2e0adf"
+  "signature": "sha256:4745a15abb6512547b50280b92c22567f89255189fd968ca706ef7c39d48024f"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -91,7 +91,7 @@
      "input": [
       "%matplotlib inline\n",
       "from yt.mods import *\n",
-      "from yt.analysis_modules.api import SZProjection\n",
+      "from yt.analysis_modules.sunyaev_zeldovich.api import SZProjection\n",
       "\n",
       "ds = load(\"enzo_tiny_cosmology/DD0046/DD0046\")\n",
       "\n",

diff -r a60d6c6efe78b6da733c34b0cd83123a57269118 -r 9ebb03f4dcfb5fc2246fb0df6982cf8ba7ce9eca doc/source/analyzing/analysis_modules/absorption_spectrum.rst
--- a/doc/source/analyzing/analysis_modules/absorption_spectrum.rst
+++ b/doc/source/analyzing/analysis_modules/absorption_spectrum.rst
@@ -35,7 +35,7 @@
 
 .. code-block:: python
 
-  from yt.analysis_modules.api import AbsorptionSpectrum
+  from yt.analysis_modules.absorption_spectrum.api import AbsorptionSpectrum
 
   sp = AbsorptionSpectrum(900.0, 1800.0, 10000)
 

diff -r a60d6c6efe78b6da733c34b0cd83123a57269118 -r 9ebb03f4dcfb5fc2246fb0df6982cf8ba7ce9eca doc/source/analyzing/analysis_modules/clump_finding.rst
--- a/doc/source/analyzing/analysis_modules/clump_finding.rst
+++ b/doc/source/analyzing/analysis_modules/clump_finding.rst
@@ -84,8 +84,8 @@
   
   from yt.mods import *
   
-  pf = load("DD0000")
-  sp = pf.sphere([0.5, 0.5, 0.5], radius=0.1)
+  ds = load("DD0000")
+  sp = ds.sphere([0.5, 0.5, 0.5], radius=0.1)
   
   ratio = sp.quantities["IsBound"](truncate=False, include_thermal_energy=True,
       treecode=True, opening_angle=2.0)
@@ -97,8 +97,8 @@
   
   from yt.mods import *
   
-  pf = load("DD0000")
-  sp = pf.sphere([0.5, 0.5, 0.5], radius=0.1)
+  ds = load("DD0000")
+  sp = ds.sphere([0.5, 0.5, 0.5], radius=0.1)
   
   ratio = sp.quantities["IsBound"](truncate=False, include_thermal_energy=True,
       treecode=False)

diff -r a60d6c6efe78b6da733c34b0cd83123a57269118 -r 9ebb03f4dcfb5fc2246fb0df6982cf8ba7ce9eca doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst
--- a/doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst
+++ b/doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst
@@ -58,8 +58,8 @@
   from yt.mods import *
   from yt.analysis_modules.halo_finding.api import *
 
-  pf=load('Enzo_64/RD0006/RedshiftOutput0006')
-  halo_list = parallelHF(pf)
+  ds=load('Enzo_64/RD0006/RedshiftOutput0006')
+  halo_list = parallelHF(ds)
   halo_list.dump('MyHaloList')
 
 Ellipsoid Parameters
@@ -69,8 +69,8 @@
   from yt.mods import *
   from yt.analysis_modules.halo_finding.api import *
 
-  pf=load('Enzo_64/RD0006/RedshiftOutput0006')
-  haloes = LoadHaloes(pf, 'MyHaloList')
+  ds=load('Enzo_64/RD0006/RedshiftOutput0006')
+  haloes = LoadHaloes(ds, 'MyHaloList')
 
 Once the halo information is saved you can load it into the data
 object "haloes", you can get loop over the list of haloes and do
@@ -107,7 +107,7 @@
 
 .. code-block:: python
 
-  ell = pf.ellipsoid(ell_param[0],
+  ell = ds.ellipsoid(ell_param[0],
   ell_param[1],
   ell_param[2],
   ell_param[3],

diff -r a60d6c6efe78b6da733c34b0cd83123a57269118 -r 9ebb03f4dcfb5fc2246fb0df6982cf8ba7ce9eca doc/source/analyzing/analysis_modules/halo_analysis.rst
--- a/doc/source/analyzing/analysis_modules/halo_analysis.rst
+++ b/doc/source/analyzing/analysis_modules/halo_analysis.rst
@@ -8,6 +8,7 @@
    :maxdepth: 1
 
    halo_catalogs
+   halo_transition
    halo_finding
    halo_mass_function
    halo_analysis_example

diff -r a60d6c6efe78b6da733c34b0cd83123a57269118 -r 9ebb03f4dcfb5fc2246fb0df6982cf8ba7ce9eca doc/source/analyzing/analysis_modules/halo_catalogs.rst
--- a/doc/source/analyzing/analysis_modules/halo_catalogs.rst
+++ b/doc/source/analyzing/analysis_modules/halo_catalogs.rst
@@ -7,9 +7,11 @@
 together into a single framework. This framework is substantially
 different from the limited framework included in yt-2.x and is only 
 backwards compatible in that output from old halo finders may be loaded.
+For a direct translation of various halo analysis tasks using yt-2.x
+to yt-3.0 please see :ref:`halo_transition`.
 
 A catalog of halos can be created from any initial dataset given to halo 
-catalog through data_pf. These halos can be found using friends-of-friends,
+catalog through data_ds. These halos can be found using friends-of-friends,
 HOP, and Rockstar. The finder_method keyword dictates which halo finder to
 use. The available arguments are 'fof', 'hop', and'rockstar'. For more
 details on the relative differences between these halo finders see 
@@ -19,32 +21,32 @@
 
    from yt.mods import *
    from yt.analysis_modules.halo_analysis.api import HaloCatalog
-   data_pf = load('Enzo_64/RD0006/RedshiftOutput0006')
-   hc = HaloCatalog(data_pf=data_pf, finder_method='hop')
+   data_ds = load('Enzo_64/RD0006/RedshiftOutput0006')
+   hc = HaloCatalog(data_ds=data_ds, finder_method='hop')
 
 A halo catalog may also be created from already run rockstar outputs. 
 This method is not implemented for previously run friends-of-friends or 
 HOP finders. Even though rockstar creates one file per processor, 
 specifying any one file allows the full catalog to be loaded. Here we 
 only specify the file output by the processor with ID 0. Note that the 
-argument for supplying a rockstar output is `halos_pf`, not `data_pf`.
+argument for supplying a rockstar output is `halos_ds`, not `data_ds`.
 
 .. code-block:: python
 
-   halos_pf = load(path+'rockstar_halos/halos_0.0.bin')
-   hc = HaloCatalog(halos_pf=halos_pf)
+   halos_ds = load(path+'rockstar_halos/halos_0.0.bin')
+   hc = HaloCatalog(halos_ds=halos_ds)
 
 Although supplying only the binary output of the rockstar halo finder 
 is sufficient for creating a halo catalog, it is not possible to find 
 any new information about the identified halos. To associate the halos 
 with the dataset from which they were found, supply arguments to both 
-halos_pf and data_pf.
+halos_ds and data_ds.
 
 .. code-block:: python
 
-   halos_pf = load(path+'rockstar_halos/halos_0.0.bin')
-   data_pf = load('Enzo_64/RD0006/RedshiftOutput0006')
-   hc = HaloCatalog(data_pf=data_pf, halos_pf=halos_pf)
+   halos_ds = load(path+'rockstar_halos/halos_0.0.bin')
+   data_ds = load('Enzo_64/RD0006/RedshiftOutput0006')
+   hc = HaloCatalog(data_ds=data_ds, halos_ds=halos_ds)
 
 A data container can also be supplied via keyword data_source, 
 associated with either dataset, to control the spatial region in 
@@ -215,8 +217,8 @@
 
 .. code-block:: python
 
-   hpf = load(path+"halo_catalogs/catalog_0046/catalog_0046.0.h5")
-   hc = HaloCatalog(halos_pf=hpf,
+   hds = load(path+"halo_catalogs/catalog_0046/catalog_0046.0.h5")
+   hc = HaloCatalog(halos_ds=hds,
                     output_dir="halo_catalogs/catalog_0046")
    hc.add_callback("load_profiles", output_dir="profiles",
                    filename="virial_profiles")

diff -r a60d6c6efe78b6da733c34b0cd83123a57269118 -r 9ebb03f4dcfb5fc2246fb0df6982cf8ba7ce9eca doc/source/analyzing/analysis_modules/halo_finders.rst
--- /dev/null
+++ b/doc/source/analyzing/analysis_modules/halo_finders.rst
@@ -0,0 +1,192 @@
+.. _halo_finding:
+
+Halo Finding
+============
+
+There are four methods of finding particle haloes in yt. The 
+recommended and default method is called HOP, a method described 
+in `Eisenstein and Hut (1998) 
+<http://adsabs.harvard.edu/abs/1998ApJ...498..137E>`_. A basic 
+friends-of-friends (e.g. `Efstathiou et al. (1985) 
+<http://adsabs.harvard.edu/abs/1985ApJS...57..241E>`_) halo 
+finder is also implemented. Finally Rockstar (`Behroozi et a. 
+(2011) <http://adsabs.harvard.edu/abs/2011arXiv1110.4372B>`_) is 
+a 6D-phase space halo finder developed by Peter Behroozi that 
+excels in finding subhalos and substrcture, but does not allow 
+multiple particle masses.
+
+HOP
+---
+
+The version of HOP used in yt is an upgraded version of the 
+`publicly available HOP code 
+<http://cmb.as.arizona.edu/~eisenste/hop/hop.html>`_. Support 
+for 64-bit floats and integers has been added, as well as 
+parallel analysis through spatial decomposition. HOP builds 
+groups in this fashion:
+
+  1. Estimates the local density at each particle using a 
+       smoothing kernel.
+  2. Builds chains of linked particles by 'hopping' from one 
+       particle to its densest neighbor. A particle which is 
+       its own densest neighbor is the end of the chain.
+  3. All chains that share the same densest particle are 
+       grouped together.
+  4. Groups are included, linked together, or discarded 
+       depending on the user-supplied over density
+       threshold parameter. The default is 160.0.
+
+Please see the `HOP method paper 
+<http://adsabs.harvard.edu/abs/1998ApJ...498..137E>`_ for 
+full details.
+
+.. warning:: The FoF halo finder in yt is not thoroughly tested! 
+    It is probably fine to use, but you are strongly encouraged 
+    to check your results against the data for errors.
+
+Rockstar Halo Finding
+---------------------
+
+Rockstar uses an adaptive hierarchical refinement of friends-of-friends 
+groups in six phase-space dimensions and one time dimension, which 
+allows for robust (grid-independent, shape-independent, and noise-
+resilient) tracking of substructure. The code is prepackaged with yt, 
+but also `separately available <http://code.google.com/p/rockstar>`_. The lead 
+developer is Peter Behroozi, and the methods are described in `Behroozi
+et al. 2011 <http://rockstar.googlecode.com/files/rockstar_ap101911.pdf>`_. 
+
+.. note:: At the moment, Rockstar does not support multiple particle masses, 
+  instead using a fixed particle mass. This will not affect most dark matter 
+  simulations, but does make it less useful for finding halos from the stellar
+  mass. In simulations where the highest-resolution particles all have the 
+  same mass (ie: zoom-in grid based simulations), one can set up a particle
+  filter to select the lowest mass particles and perform the halo finding
+  only on those.
+
+To run the Rockstar Halo finding, you must launch python with MPI and 
+parallelization enabled. While Rockstar itself does not require MPI to run, 
+the MPI libraries allow yt to distribute particle information across multiple 
+nodes.
+
+.. warning:: At the moment, running Rockstar inside of yt on multiple compute nodes
+   connected by an Infiniband network can be problematic. Therefore, for now
+   we recommend forcing the use of the non-Infiniband network (e.g. Ethernet)
+   using this flag: ``--mca btl ^openib``.
+   For example, here is how Rockstar might be called using 24 cores:
+   ``mpirun -n 24 --mca btl ^openib python ./run_rockstar.py --parallel``.
+
+The script above configures the Halo finder, launches a server process which 
+disseminates run information and coordinates writer-reader processes. 
+Afterwards, it launches reader and writer tasks, filling the available MPI 
+slots, which alternately read particle information and analyze for halo 
+content.
+
+The RockstarHaloFinder class has these options that can be supplied to the 
+halo catalog through the ``finder_kwargs`` argument:
+
+  * ``dm_type``, the index of the dark matter particle. Default is 1. 
+  * ``outbase``, This is where the out*list files that Rockstar makes should be
+    placed. Default is 'rockstar_halos'.
+  * ``num_readers``, the number of reader tasks (which are idle most of the 
+    time.) Default is 1.
+  * ``num_writers``, the number of writer tasks (which are fed particles and
+    do most of the analysis). Default is MPI_TASKS-num_readers-1. 
+    If left undefined, the above options are automatically 
+    configured from the number of available MPI tasks.
+  * ``force_res``, the resolution that Rockstar uses for various calculations
+    and smoothing lengths. This is in units of Mpc/h.
+    If no value is provided, this parameter is automatically set to
+    the width of the smallest grid element in the simulation from the
+    last data snapshot (i.e. the one where time has evolved the
+    longest) in the time series:
+    ``ds_last.index.get_smallest_dx() * ds_last['mpch']``.
+  * ``total_particles``, if supplied, this is a pre-calculated
+    total number of dark matter
+    particles present in the simulation. For example, this is useful
+    when analyzing a series of snapshots where the number of dark
+    matter particles should not change and this will save some disk
+    access time. If left unspecified, it will
+    be calculated automatically. Default: ``None``.
+  * ``dm_only``, if set to ``True``, it will be assumed that there are
+    only dark matter particles present in the simulation.
+    This option does not modify the halos found by Rockstar, however
+    this option can save disk access time if there are no star particles
+    (or other non-dark matter particles) in the simulation. Default: ``False``.
+
+Rockstar dumps halo information in a series of text (halo*list and 
+out*list) and binary (halo*bin) files inside the ``outbase`` directory. 
+We use the halo list classes to recover the information. 
+
+Inside the ``outbase`` directory there is a text file named ``datasets.txt``
+that records the connection between ds names and the Rockstar file names.
+
+Parallel HOP and FOF
+--------------------
+
+Both the HOP and FoF halo finders can run in parallel using simple 
+spatial decomposition. In order to run them in parallel it is helpful 
+to understand how it works. Below in the first plot (i) is a simplified 
+depiction of three haloes labeled 1,2 and 3:
+
+.. image:: _images/ParallelHaloFinder.png
+   :width: 500
+
+Halo 3 is twice reflected around the periodic boundary conditions.
+
+In (ii), the volume has been sub-divided into four equal subregions, 
+A,B,C and D, shown with dotted lines. Notice that halo 2 is now in 
+two different subregions, C and D, and that halo 3 is now in three, 
+A, B and D. If the halo finder is run on these four separate subregions,
+halo 1 is be identified as a single halo, but haloes 2 and 3 are split 
+up into multiple haloes, which is incorrect. The solution is to give 
+each subregion padding to oversample into neighboring regions.
+
+In (iii), subregion C has oversampled into the other three regions, 
+with the periodic boundary conditions taken into account, shown by 
+dot-dashed lines. The other subregions oversample in a similar way.
+
+The halo finder is then run on each padded subregion independently 
+and simultaneously. By oversampling like this, haloes 2 and 3 will 
+both be enclosed fully in at least one subregion and identified 
+completely.
+
+Haloes identified with centers of mass inside the padded part of a 
+subregion are thrown out, eliminating the problem of halo duplication. 
+The centers for the three haloes are shown with stars. Halo 1 will
+belong to subregion A, 2 to C and 3 to B.
+
+To run with parallel halo finding, you must supply a value for 
+padding in the finder_kwargs argument. The ``padding`` parameter 
+is in simulation units and defaults to 0.02. This parameter is how 
+much padding is added to each of the six sides of a subregion. 
+This value should be 2x-3x larger than the largest expected halo 
+in the simulation. It is unlikely, of course, that the largest 
+object in the simulation will be on a subregion boundary, but there 
+is no way of knowing before the halo finder is run.
+
+.. code-block:: python
+
+  from yt.mods import *
+  from yt.analysis_modules.halo_analysis.api import *
+  ds = load("data0001")
+  hc= HaloCatalog(data_ds =ds,finder_method='hop'
+    finder_kwargs={'padding':0.02})
+  # --or--
+  hc= HaloCatalog(data_ds =ds,finder_method='fof'
+    finder_kwargs={'padding':0.02})
+
+
+In general, a little bit of padding goes a long way, and too much 
+just slows down the analysis and doesn't improve the answer (but 
+doesn't change it).  It may be worth your time to run the parallel 
+halo finder at a few paddings to find the right amount, especially 
+if you're analyzing many similar datasets.
+
+Rockstar Installation
+=====================
+
+The Rockstar is slightly patched and modified to run as a library inside of 
+yt. By default it will be built with yt using the ``install_script.sh``.
+If it wasn't installed, please make sure that the installation setting
+``INST_ROCKSTAR=1`` is defined in the ``install_script.sh`` and re-run
+the installation script.

diff -r a60d6c6efe78b6da733c34b0cd83123a57269118 -r 9ebb03f4dcfb5fc2246fb0df6982cf8ba7ce9eca doc/source/analyzing/analysis_modules/halo_mass_function.rst
--- a/doc/source/analyzing/analysis_modules/halo_mass_function.rst
+++ b/doc/source/analyzing/analysis_modules/halo_mass_function.rst
@@ -60,8 +60,8 @@
 
   from yt.mods import *
   from yt.analysis_modules.halo_mass_function.api import *
-  pf = load("data0030")
-  hmf = HaloMassFcn(pf, halo_file="FilteredQuantities.out", num_sigma_bins=200,
+  ds = load("data0030")
+  hmf = HaloMassFcn(ds, halo_file="FilteredQuantities.out", num_sigma_bins=200,
   mass_column=5)
 
 Attached to ``hmf`` is the convenience function ``write_out``, which saves
@@ -102,8 +102,8 @@
 
   from yt.mods import *
   from yt.analysis_modules.halo_mass_function.api import *
-  pf = load("data0030")
-  hmf = HaloMassFcn(pf, halo_file="FilteredQuantities.out", 
+  ds = load("data0030")
+  hmf = HaloMassFcn(ds, halo_file="FilteredQuantities.out", 
   sigma8input=0.9, primordial_index=1., omega_baryon0=0.06,
   fitting_function=4)
   hmf.write_out(prefix='hmf')

diff -r a60d6c6efe78b6da733c34b0cd83123a57269118 -r 9ebb03f4dcfb5fc2246fb0df6982cf8ba7ce9eca doc/source/analyzing/analysis_modules/halo_profiling.rst
--- a/doc/source/analyzing/analysis_modules/halo_profiling.rst
+++ /dev/null
@@ -1,451 +0,0 @@
-.. _halo_profiling:
-
-Halo Profiling
-==============
-.. sectionauthor:: Britton Smith <brittonsmith at gmail.com>,
-   Stephen Skory <s at skory.us>
-
-The ``HaloProfiler`` provides a means of performing analysis on multiple halos 
-in a parallel-safe way.
-
-The halo profiler performs three primary functions: radial profiles, 
-projections, and custom analysis.  See the cookbook for a recipe demonstrating 
-all of these features.
-
-Configuring the Halo Profiler
------------------------------
-
-The only argument required to create a ``HaloProfiler`` object is the path 
-to the dataset.
-
-.. code-block:: python
-
-  from yt.analysis_modules.halo_profiler.api import *
-  hp = HaloProfiler("enzo_tiny_cosmology/DD0046/DD0046")
-
-Most of the halo profiler's options are configured with additional keyword 
-arguments:
-
- * **output_dir** (*str*): if specified, all output will be put into this path
-   instead of in the dataset directories.  Default: None.
-
- * **halos** (*str*): "multiple" for profiling more than one halo.  In this mode
-   halos are read in from a list or identified with a
-   `halo finder <../cookbook/running_halofinder.html>`_.  In "single" mode, the
-   one and only halo center is identified automatically as the location of the
-   peak in the density field.  Default: "multiple".
-
- * **halo_list_file** (*str*): name of file containing the list of halos.
-   The halo profiler will look for this file in the data directory.
-   Default: "HopAnalysis.out".
-
- * **halo_list_format** (*str* or *dict*): the format of the halo list file.
-   "yt_hop" for the format given by yt's halo finders.  "enzo_hop" for the
-   format written by enzo_hop.  This keyword can also be given in the form of a
-   dictionary specifying the column in which various properties can be found.
-   For example, {"id": 0, "center": [1, 2, 3], "mass": 4, "radius": 5}.
-   Default: "yt_hop".
-
- * **halo_finder_function** (*function*): If halos is set to multiple and the
-   file given by halo_list_file does not exit, the halo finding function
-   specified here will be called.  Default: HaloFinder (yt_hop).
-
- * **halo_finder_args** (*tuple*): args given with call to halo finder function.
-   Default: None.
-
- * **halo_finder_kwargs** (*dict*): kwargs given with call to halo finder
-   function. Default: None.
-
- * **recenter** (*string* or function name): The name of a function
-   that will be used to move the center of the halo for the purposes of
-   analysis. See explanation and examples, below. Default: None, which
-   is equivalent to the center of mass of the halo as output by the halo
-   finder.
-
- * **halo_radius** (*float*): if no halo radii are provided in the halo list
-   file, this parameter is used to specify the radius out to which radial
-   profiles will be made.  This keyword is also used when halos is set to
-   single.  Default: 0.1.
-
- * **radius_units** (*str*): the units of **halo_radius**. 
-   Default: "1" (code units).
-
- * **n_profile_bins** (*int*): the number of bins in the radial profiles.
-   Default: 50.
-
- * **profile_output_dir** (*str*): the subdirectory, inside the data directory,
-   in which radial profile output files will be created.  The directory will be
-   created if it does not exist.  Default: "radial_profiles".
-
- * **projection_output_dir** (*str*): the subdirectory, inside the data
-   directory, in which projection output files will be created.  The directory
-   will be created if it does not exist.  Default: "projections".
-
- * **projection_width** (*float*): the width of halo projections.
-   Default: 8.0.
-
- * **projection_width_units** (*str*): the units of projection_width.
-   Default: "mpc".
-
- * **project_at_level** (*int* or "max"): the maximum refinement level to be
-   included in projections.  Default: "max" (maximum level within the dataset).
-
- * **velocity_center** (*list*): the method in which the halo bulk velocity is
-   calculated (used for calculation of radial and tangential velocities.  Valid
-   options are:
-   - ["bulk", "halo"] (Default): the velocity provided in the halo list
-   - ["bulk", "sphere"]: the bulk velocity of the sphere centered on the halo center.
-   - ["max", field]: the velocity of the cell that is the location of the maximum of the field specified.
-
- * **filter_quantities** (*list*): quantities from the original halo list
-   file to be written out in the filtered list file.  Default: ['id','center'].
-
- * **use_critical_density** (*bool*): if True, the definition of overdensity 
-     for virial quantities is calculated with respect to the critical 
-     density.  If False, overdensity is with respect to mean matter density, 
-     which is lower by a factor of Omega_M.  Default: False.
-
-Profiles
---------
-
-Once the halo profiler object has been instantiated, fields can be added for 
-profiling with the :meth:`add_profile` method:
-
-.. code-block:: python
-
-  hp.add_profile('cell_volume', weight_field=None, accumulation=True)
-  hp.add_profile('TotalMassMsun', weight_field=None, accumulation=True)
-  hp.add_profile('density', weight_field=None, accumulation=False)
-  hp.add_profile('temperature', weight_field='cell_mass', accumulation=False)
-  hp.make_profiles(njobs=-1, prefilters=["halo['mass'] > 1e13"],
-                   filename='VirialQuantities.h5')
-
-The :meth:`make_profiles` method will begin the profiling.  Use the
-**njobs** keyword to control the number of jobs over which the
-profiling is divided.  Setting to -1 results in a single processor per
-halo.  Setting to 1 results in all available processors working on the
-same halo.  The prefilters keyword tells the profiler to skip all halos with 
-masses (as loaded from the halo finder) less than a given amount.  See below 
-for more information.  Additional keyword arguments are:
-
- * **filename** (*str*): If set, a file will be written with all of the 
-   filtered halos and the quantities returned by the filter functions.
-   Default: None.
-
- * **prefilters** (*list*): A single dataset can contain thousands or tens of 
-   thousands of halos. Significant time can be saved by not profiling halos
-   that are certain to not pass any filter functions in place.  Simple filters 
-   based on quantities provided in the initial halo list can be used to filter 
-   out unwanted halos using this parameter.  Default: None.
-
- * **njobs** (*int*): The number of jobs over which to split the profiling.  
-   Set to -1 so that each halo is done by a single processor.  Default: -1.
-
- * **dynamic** (*bool*): If True, distribute halos using a task queue.  If 
-   False, distribute halos evenly over all jobs.  Default: False.
-
- * **profile_format** (*str*): The file format for the radial profiles, 
-   'ascii' or 'hdf5'.  Default: 'ascii'.
-
-.. image:: _images/profiles.png
-   :width: 500
-
-Radial profiles of Overdensity (left) and Temperature (right) for five halos.
-
-Projections
------------
-
-The process of making projections is similar to that of profiles:
-
-.. code-block:: python
-
-  hp.add_projection('density', weight_field=None)
-  hp.add_projection('temperature', weight_field='density')
-  hp.add_projection('metallicity', weight_field='density')
-  hp.make_projections(axes=[0, 1, 2], save_cube=True, save_images=True, 
-                      halo_list="filtered", njobs=-1)
-
-If **save_cube** is set to True, the projection data
-will be written to a set of hdf5 files 
-in the directory given by **projection_output_dir**. 
-The keyword, **halo_list**, can be 
-used to select between the full list of halos ("all"),
-the filtered list ("filtered"), or 
-an entirely new list given in the form of a file name.
-See :ref:`filter_functions` for a 
-discussion of filtering halos.  Use the **njobs** keyword to control
-the number of jobs over which the profiling is divided.  Setting to -1
-results in a single processor per halo.  Setting to 1 results in all
-available processors working on the same halo.  The keyword arguments are:
-
- * **axes** (*list*): A list of the axes to project along, using the usual 
-   0,1,2 convention. Default=[0,1,2].
-
- * **halo_list** (*str*) {'filtered', 'all'}: Which set of halos to make 
-   profiles of, either ones passed by the halo filters (if enabled/added), or 
-   all halos.  Default='filtered'.
-
- * **save_images** (*bool*): Whether or not to save images of the projections. 
-   Default=False.
-
- * **save_cube** (*bool*): Whether or not to save the HDF5 files of the halo 
-   projections.  Default=True.
-
- * **njobs** (*int*): The number of jobs over which to split the projections.  
-   Set to -1 so that each halo is done by a single processor.  Default: -1.
-
- * **dynamic** (*bool*): If True, distribute halos using a task queue.  If 
-   False, distribute halos evenly over all jobs.  Default: False.
-
-.. image:: _images/projections.png
-   :width: 500
-
-Projections of Density (top) and Temperature,
-weighted by Density (bottom), in the x (left), 
-y (middle), and z (right) directions for a single halo with a width of 8 Mpc.
-
-Halo Filters
-------------
-
-Filters can be added to create a refined list of
-halos based on their profiles or to avoid 
-profiling halos altogether based on information
-given in the halo list file.
-
-.. _filter_functions:
-
-Filter Functions
-^^^^^^^^^^^^^^^^
-
-It is often the case that one is looking to
-identify halos with a specific set of 
-properties.  This can be accomplished through the creation
-of filter functions.  A filter 
-function can take as many args and kwargs as you like,
-as long as the first argument is a 
-profile object, or at least a dictionary which contains
-the profile arrays for each field.  
-Filter functions must return a list of two things.
-The first is a True or False indicating 
-whether the halo passed the filter. 
-The second is a dictionary containing quantities 
-calculated for that halo that will be written to a
-file if the halo passes the filter.
-A  sample filter function based on virial quantities can be found in 
-``yt/analysis_modules/halo_profiler/halo_filters.py``.
-
-Halo filtering takes place during the call to :meth:`make_profiles`.
-The  :meth:`add_halo_filter` method is used to add a filter to be used
-during the profiling:
-
-.. code-block:: python
-
-  hp.add_halo_filter(HP.VirialFilter, must_be_virialized=True, 
-                     overdensity_field='ActualOverdensity', 
-		     virial_overdensity=200, 
-		     virial_filters=[['TotalMassMsun','>=','1e14']],
-		     virial_quantities=['TotalMassMsun','RadiusMpc'],
-		     use_log=True)
-
-The addition above will calculate and return virial quantities,
-mass and radius, for an 
-overdensity of 200.  In order to pass the filter, at least one
-point in the profile must be 
-above the specified overdensity and the virial mass must be at
-least 1e14 solar masses.  The **use_log** keyword indicates that interpolation 
-should be done in log space.  If 
-the VirialFilter function has been added to the filter list,
-the halo profiler will make 
-sure that the fields necessary for calculating virial quantities are added.
-As  many filters as desired can be added.  If filters have been added,
-the next call to :meth:`make_profiles` will filter by all of
-the added filter functions:
-
-.. code-block:: python
-
-  hp.make_profiles(filename="FilteredQuantities.out")
-
-If the **filename** keyword is set, a file will be written with all of the 
-filtered halos and the quantities returned by the filter functions.
-
-.. note:: If the profiles have already been run, the halo profiler will read
-   in the previously created output files instead of re-running the profiles.
-   The halo profiler will check to make sure the output file contains all of
-   the requested halo fields.  If not, the profile will be made again from
-   scratch.
-
-.. _halo_profiler_pre_filters:
-
-Pre-filters
-^^^^^^^^^^^
-
-A single dataset can contain thousands or tens of thousands of halos.
-Significant time can 
-be saved by not profiling halos that are certain to not pass any filter
-functions in place.  
-Simple filters based on quantities provided in the initial halo list
-can be used to filter 
-out unwanted halos using the **prefilters** keyword:
-
-.. code-block:: python
-
-  hp.make_profiles(filename="FilteredQuantities.out",
-		   prefilters=["halo['mass'] > 1e13"])
-
-Arguments provided with the **prefilters** keyword should be given
-as a list of strings.  
-Each string in the list will be evaluated with an *eval*.
-
-.. note:: If a VirialFilter function has been added with a filter based
-   on mass (as in the example above), a prefilter will be automatically
-   added to filter out halos with masses greater or less than (depending
-   on the conditional of the filter) a factor of ten of the specified
-   virial mass.
-
-Recentering the Halo For Analysis
----------------------------------
-
-It is possible to move the center of the halo to a new point using an
-arbitrary function for making profiles.
-By default, the center is provided by the halo finder,
-which outputs the center of mass of the particles. For the purposes of
-analysis, it may be important to recenter onto a gas density maximum,
-or a temperature minimum.
-
-There are a number of built-in functions to do this, listed below.
-Each of the functions uses mass-weighted fields for the calculations
-of new center points.
-To use
-them, supply the HaloProfiler with the ``recenter`` option and 
-the name of the function, as in the example below.
-
-.. code-block:: python
-
-   hp = HaloProfiler("enzo_tiny_cosmology/DD0046/DD0046", 
-                     recenter="Max_Dark_Matter_Density")
-
-Additional options are:
-
-  * *Min_Dark_Matter_Density* - Recenter on the point of minimum dark matter
-    density in the halo.
-
-  * *Max_Dark_Matter_Density* - Recenter on the point of maximum dark matter
-    density in the halo.
-
-  * *CoM_Dark_Matter_Density* - Recenter on the center of mass of the dark
-    matter density field. This will be very similar to what the halo finder
-    provides, but not precisely similar.
-
-  * *Min_Gas_Density* - Recenter on the point of minimum gas density in the
-    halo.
-
-  * *Max_Gas_Density* - Recenter on the point of maximum gas density in the
-    halo.
-
-  * *CoM_Gas_Density* - Recenter on the center of mass of the gas density field
-    in the halo.
-
-  * *Min_Total_Density* - Recenter on the point of minimum total (gas + dark
-    matter) density in the halo.
-
-  * *Max_Total_Density* - Recenter on the point of maximum total density in the
-    halo.
-
-  * *CoM_Total_Density* - Recenter on the center of mass for the total density
-    in the halo.
-
-  * *Min_Temperature* - Recenter on the point of minimum temperature in the
-    halo.
-
-  * *Max_Temperature* - Recenter on the point of maximum temperature in the
-    halo.
-
-It is also possible to supply a user-defined function to the HaloProfiler.
-This can be used if the pre-defined functions above are not sufficient.
-The function takes a single argument, a data container for the halo,
-which is a sphere. The function returns a 3-list with the new center.
-
-In this example below, a function is used such that the halos will be
-re-centered on the point of absolute minimum temperature, that is not
-mass weighted.
-
-.. code-block:: python
-
-   from yt.mods import *
-   
-   def find_min_temp(sphere):
-       ma, mini, mx, my, mz, mg = sphere.quantities['MinLocation']('temperature')
-       return [mx,my,mz]
-   
-   hp = HaloProfiler("enzo_tiny_cosmology/DD0046/DD0046", recenter=find_min_temp)
-
-It is possible to make more complicated functions. This example below extends
-the example above to include a distance control that prevents the center from
-being moved too far. If the recenter moves too far, ``[-1, -1, -1]`` is
-returned which will prevent the halo from being profiled.
-Any triplet of values less than the ``domain_left_edge`` will suffice.
-There will be a note made in the output (stderr) showing which halos were
-skipped.
-
-.. code-block:: python
-
-   from yt.mods import *
-   from yt.utilities.math_utils import periodic_dist
-   
-   def find_min_temp_dist(sphere):
-       old = sphere.center
-       ma, mini, mx, my, mz, mg = sphere.quantities['MinLocation']('temperature')
-       d = sphere.pf['kpc'] * periodic_dist(old, [mx, my, mz],
-           sphere.pf.domain_right_edge - sphere.pf.domain_left_edge)
-       # If new center farther than 5 kpc away, don't recenter
-       if d > 5.: return [-1, -1, -1]
-       return [mx,my,mz]
-   
-   hp = HaloProfiler("enzo_tiny_cosmology/DD0046/DD0046", 
-                     recenter=find_min_temp_dist)
-
-Custom Halo Analysis
---------------------
-
-Besides radial profiles and projections, the halo profiler has the
-ability to run custom analysis functions on each halo.  Custom halo
-analysis functions take two arguments: a halo dictionary containing
-the id, center, etc; and a sphere object.  The example function shown
-below creates a 2D profile of the total mass in bins of density and
-temperature for a given halo.
-
-.. code-block:: python
-
-   from yt.mods import *
-   from yt.data_objects.profiles import BinnedProfile2D
-
-   def halo_2D_profile(halo, sphere):
-       "Make a 2D profile for a halo."
-       my_profile = BinnedProfile2D(sphere,
-             128, 'density', 1e-30, 1e-24, True,
-             128, 'temperature', 1e2, 1e7, True,
-             end_collect=False)
-       my_profile.add_fields('cell_mass', weight=None, fractional=False)
-       my_filename = os.path.join(sphere.pf.fullpath, '2D_profiles', 
-             'Halo_%04d.h5' % halo['id'])
-       my_profile.write_out_h5(my_filename)
-
-Using the  :meth:`analyze_halo_spheres` function, the halo profiler
-will create a sphere centered on each halo, and perform the analysis
-from the custom routine.
-
-.. code-block:: python
-
-    hp.analyze_halo_sphere(halo_2D_profile, halo_list='filtered',
-                           analysis_output_dir='2D_profiles', 
-                           njobs=-1, dynamic=False)
-
-Just like with the :meth:`make_projections` function, the keyword,
-**halo_list**, can be used to select between the full list of halos
-("all"), the filtered list ("filtered"), or an entirely new list given
-in the form of a file name.  If the **analysis_output_dir** keyword is
-set, the halo profiler will make sure the desired directory exists in
-a parallel-safe manner.  Use the **njobs** keyword to control the
-number of jobs over which the profiling is divided.  Setting to -1
-results in a single processor per halo.  Setting to 1 results in all
-available processors working on the same halo.

diff -r a60d6c6efe78b6da733c34b0cd83123a57269118 -r 9ebb03f4dcfb5fc2246fb0df6982cf8ba7ce9eca doc/source/analyzing/analysis_modules/halo_transition.rst
--- /dev/null
+++ b/doc/source/analyzing/analysis_modules/halo_transition.rst
@@ -0,0 +1,106 @@
+
+Getting up to Speed with Halo Analysis in yt-3.0
+================================================
+
+If you're used to halo analysis in yt-2.x, heres a guide to
+how to update your analysis pipeline to take advantage of
+the new halo catalog infrastructure. 
+
+Finding Halos
+-------------
+
+Previously, halos were found using calls to ``HaloFinder``, 
+``FOFHaloFinder`` and ``RockstarHaloFinder``. Now it is 
+encouraged that you find the halos upon creation of the halo catalog 
+by supplying a value to the ``finder_method`` keyword when calling
+``HaloCatalog``. Currently, only halos found using rockstar or a 
+previous instance of a halo catalog are able to be loaded 
+using the ``halos_ds`` keyword.
+
+To pass additional arguments to the halo finders 
+themselves, supply a dictionary to ``finder_kwargs`` where
+each key in the dictionary is a keyword of the halo finder
+and the corresponding value is the value to be passed for
+that keyword.
+
+Getting Halo Information
+------------------------
+All quantities that used to be present in a ``halo_list`` are
+still able to be found but are not necessarily included by default.
+Every halo will by default have the following properties:
+
+* particle_position_i (where i can be x,y,z)
+* particle_mass
+* virial_radius
+* particle_identifier
+
+If other quantities are desired, they can be included by adding
+the corresponding quantity before the catalog is created. See
+the full halo catalog documentation for further information about
+how to add these quantities and what quantities are available.
+
+You no longer have to iteratre over halos in the ``halo_list``.
+Now a halo dataset can be treated as a regular dataset and 
+all quantities are available by accessing ``all_data``.
+Specifically, all quantities can be accessed as shown:
+
+.. code-block:: python
+   from yt.mods import *
+   from yt.analysis_modules.halo_analysis.api import HaloCatalog
+   data_ds = load('Enzo_64/RD0006/RedshiftOutput0006')
+   hc = HaloCatalog(data_ds=data_ds, finder_method='hop')
+   hc.create()
+   ad = hc.all_data()
+   masses = ad['particle_mass'][:]
+
+
+Prefiltering Halos
+------------------
+
+Prefiltering halos before analysis takes place is now done
+by adding a filter before the call to create. An example
+is shown below
+
+.. code-block:: python
+   from yt.mods import *
+   from yt.analysis_modules.halo_analysis.api import HaloCatalog
+   data_ds = load('Enzo_64/RD0006/RedshiftOutput0006')
+   hc = HaloCatalog(data_ds=data_ds, finder_method='hop')
+   hc.add_filter("quantity_value", "particle_mass", ">", 1e13, "Msun")
+   hc.create()
+
+Profiling Halos
+---------------
+
+The halo profiler available in yt-2.x has been removed, and
+profiling functionality is now completely contained within the
+halo catalog. A complete example of how to profile halos by 
+radius using the new infrastructure is given in 
+:ref:`halo_analysis_example`. 
+
+Plotting Halos
+--------------
+
+Annotating halo locations onto a slice or projection works in 
+the same way as in yt-2.x, but now a halo catalog must be
+passed to the annotate halo call rather than a halo list.
+
+.. code-block:: python
+   from yt.mods import *
+   from yt.analysis_modules.halo_analysis.api import HaloCatalog
+
+   data_ds = load('Enzo_64/RD0006/RedshiftOutput0006')
+   hc = HaloCatalog(data_ds=data_ds, finder_method='hop')
+   hc.create()
+
+   prj = ProjectionPlot(data_ds, 'z', 'density')
+   prj.annotate_halos(hc)
+   prj.save()
+
+Written Data
+------------
+
+Data is now written out in the form of h5 files rather than
+text files. The directory they are written out to is 
+controlled by the keyword ``output_dir``. Each quantity
+is a field in the file.

diff -r a60d6c6efe78b6da733c34b0cd83123a57269118 -r 9ebb03f4dcfb5fc2246fb0df6982cf8ba7ce9eca doc/source/analyzing/analysis_modules/hmf_howto.rst
--- a/doc/source/analyzing/analysis_modules/hmf_howto.rst
+++ b/doc/source/analyzing/analysis_modules/hmf_howto.rst
@@ -27,8 +27,8 @@
 .. code-block:: python
 
   from yt.mods import *
-  pf = load("data0001")
-  halo_list = HaloFinder(pf)
+  ds = load("data0001")
+  halo_list = HaloFinder(ds)
   halo_list.write_out("HopAnalysis.out")
 
 The only important columns of data in the text file ``HopAnalysis.out``
@@ -79,8 +79,8 @@
 
   from yt.mods import *
   from yt.analysis_modules.halo_mass_function.api import *
-  pf = load("data0001")
-  hmf = HaloMassFcn(pf, halo_file="VirialHaloes.out", 
+  ds = load("data0001")
+  hmf = HaloMassFcn(ds, halo_file="VirialHaloes.out", 
   sigma8input=0.9, primordial_index=1., omega_baryon0=0.06,
   fitting_function=4, mass_column=5, num_sigma_bins=200)
   hmf.write_out(prefix='hmf')
@@ -107,9 +107,9 @@
   from yt.analysis_modules.halo_mass_function.api import *
   
   # If desired, start loop here.
-  pf = load("data0001")
+  ds = load("data0001")
   
-  halo_list = HaloFinder(pf)
+  halo_list = HaloFinder(ds)
   halo_list.write_out("HopAnalysis.out")
   
   hp = HP.HaloProfiler("data0001", halo_list_file='HopAnalysis.out')
@@ -120,7 +120,7 @@
                 virial_quantities=['TotalMassMsun','RadiusMpc'])
   hp.make_profiles(filename="VirialHaloes.out")
   
-  hmf = HaloMassFcn(pf, halo_file="VirialHaloes.out", 
+  hmf = HaloMassFcn(ds, halo_file="VirialHaloes.out", 
   sigma8input=0.9, primordial_index=1., omega_baryon0=0.06,
   fitting_function=4, mass_column=5, num_sigma_bins=200)
   hmf.write_out(prefix='hmf')

diff -r a60d6c6efe78b6da733c34b0cd83123a57269118 -r 9ebb03f4dcfb5fc2246fb0df6982cf8ba7ce9eca doc/source/analyzing/analysis_modules/light_cone_generator.rst
--- a/doc/source/analyzing/analysis_modules/light_cone_generator.rst
+++ b/doc/source/analyzing/analysis_modules/light_cone_generator.rst
@@ -2,15 +2,15 @@
 
 Light Cone Generator
 ====================
-.. sectionauthor:: Britton Smith <brittonsmith at gmail.com>
 
-Light cones are projections made by stacking multiple datasets together to 
-continuously span a given redshift interval.  The width of individual 
-projection slices is adjusted such that each slice has the same angular size.  
-Each projection slice is randomly shifted and projected along a random axis to 
-ensure that the same structures are not sampled multiple times.  Since deeper 
-images sample earlier epochs of the simulation, light cones represent the 
-closest thing to synthetic imaging observations.
+Light cones are created by stacking multiple datasets together to 
+continuously span a given redshift interval.  To make a projection of a 
+field through a light cone, the width of individual slices is adjusted 
+such that each slice has the same angular size.  
+Each slice is randomly shifted and projected along a random axis to 
+ensure that the same structures are not sampled multiple times.  A 
+recipe for creating a simple light cone projection can be found in 
+the cookbook under :ref:`cookbook-light_cone`.
 
 .. image:: _images/LightCone_full_small.png
    :width: 500
@@ -23,49 +23,44 @@
 Configuring the Light Cone Generator
 ------------------------------------
 
-A recipe for creating a simple light cone projection can be found in the 
-cookbook.  The required arguments to instantiate a ``LightCone`` objects are 
+The required arguments to instantiate a ``LightCone`` object are 
 the path to the simulation parameter file, the simulation type, the nearest 
 redshift, and the furthest redshift of the light cone.
 
 .. code-block:: python
 
-  from yt.analysis_modules.api import LightCone
+  from yt.analysis_modules.cosmological_observation.api import \
+       LightCone
 
   lc = LightCone('enzo_tiny_cosmology/32Mpc_32.enzo',
                  'Enzo', 0., 0.1)
 
 The additional keyword arguments are:
 
- * **field_of_view_in_arcminutes** (*float*): The field of view of the image 
-   in units of arcminutes.  Default: 600.0.
-
- * **image_resolution_in_arcseconds** (*float*): The size of each image pixel 
-   in units of arcseconds.  Default: 60.0.
-
- * **use_minimum_datasets** (*bool*):  If True, the minimum number of datasets 
-   is used to connect the initial and final redshift.  If false, the light 
-   cone solution will contain as many entries as possible within the redshift 
-   interval.  Default: True.
+ * **use_minimum_datasets** (*bool*):  If True, the minimum number of 
+   datasets is used to connect the initial and final redshift.  If False, 
+   the light cone solution will contain as many entries as possible within 
+   the redshift interval.  Default: True.
 
  * **deltaz_min** (*float*): Specifies the minimum Delta-z between 
    consecutive datasets in the returned list.  Default: 0.0.
 
- * **minimum_coherent_box_fraction** (*float*): Used with use_minimum_datasets 
-   set to False, this parameter specifies the fraction of the total box size 
-   to be traversed before rerandomizing the projection axis and center.  This 
-   was invented to allow light cones with thin slices to sample coherent large 
-   scale structure, but in practice does not work so well.  Try setting this 
-   parameter to 1 and see what happens.  Default: 0.0.
+ * **minimum_coherent_box_fraction** (*float*): Used with 
+   **use_minimum_datasets** set to False, this parameter specifies the 
+   fraction of the total box size to be traversed before rerandomizing the 
+   projection axis and center.  This was invented to allow light cones with 
+   thin slices to sample coherent large cale structure, but in practice does 
+   not work so well.  Try setting this parameter to 1 and see what happens.  
+   Default: 0.0.
 
  * **time_data** (*bool*): Whether or not to include time outputs when 
    gathering datasets for time series.  Default: True.
 
- * **redshift_data** (*bool*): Whether or not to include redshift outputs when 
-   gathering datasets for time series.  Default: True.
+ * **redshift_data** (*bool*): Whether or not to include redshift outputs 
+   when gathering datasets for time series.  Default: True.
 
  * **set_parameters** (*dict*): Dictionary of parameters to attach to 
-   pf.parameters.  Default: None.
+   ds.parameters.  Default: None.
 
  * **output_dir** (*string*): The directory in which images and data files
     will be written.  Default: 'LC'.
@@ -76,10 +71,10 @@
 Creating Light Cone Solutions
 -----------------------------
 
-A light cone solution consists of a list of datasets and the width, depth, 
-center, and axis of the projection to be made for that slice.  The 
-:meth:`LightCone.calculate_light_cone_solution` function is used to 
-calculate the random shifting and projection axis:
+A light cone solution consists of a list of datasets spanning a redshift 
+interval with a random orientation for each dataset.  A new solution 
+is calcuated with the :meth:`LightCone.calculate_light_cone_solution` 
+function:
 
 .. code-block:: python
 
@@ -87,70 +82,39 @@
 
 The keyword argument are:
 
- * **seed** (*int*): the seed for the random number generator.  Any light cone 
-   solution can be reproduced by giving the same random seed.  Default: None 
-   (each solution will be distinct).
+ * **seed** (*int*): the seed for the random number generator.  Any light 
+   cone solution can be reproduced by giving the same random seed.  
+   Default: None.
 
  * **filename** (*str*): if given, a text file detailing the solution will be 
    written out.  Default: None.
 
-If a new solution for the same LightCone object is desired, the 
-:meth:`rerandomize_light_cone_solution` method should be called in place of 
-:meth:`calculate_light_cone_solution`:
-
-.. code-block:: python
-
-  new_seed = 987654321
-  lc.rerandomize_light_cone_solution(new_seed, Recycle=True, 
-                                     filename='new_lightcone.dat')
-
-Additional keyword arguments are:
-
- * **recycle** (*bool*): if True, the new solution will have the same shift in 
-   the line of sight as the original solution.  Since the projections of each 
-   slice are serialized and stored for the entire width of the box (even if 
-   the width used is left than the total box), the projection data can be 
-   deserialized instead of being remade from scratch.  This can greatly speed 
-   up the creation of a large number of light cone projections.  Default: True.
-
- * **filename** (*str*): if given, a text file detailing the solution will be 
-   written out.  Default: None.
-
-If :meth:`rerandomize_light_cone_solution` is used, the LightCone object will 
-keep a copy of the original solution that can be returned to at any time by 
-calling :meth:`restore_master_solution`:
-
-.. code-block:: python
-
-  lc.restore_master_solution()
-
-.. note:: All light cone solutions made with the above method will still use 
-   the same list of datasets.  Only the shifting and projection axis will be 
-   different.
-
 Making a Light Cone Projection
 ------------------------------
 
-With the light cone solution set, projections can be made of any available 
-field:
+With the light cone solution in place, projections with a given field of 
+view and resolution can be made of any available field:
 
 .. code-block:: python
 
   field = 'density'
-  lc.project_light_cone(field , weight_field=None, 
+  field_of_view = (600.0, "arcmin")
+  resolution = (60.0, "arcsec")
+  lc.project_light_cone(field_of_vew, resolution,
+                        field , weight_field=None, 
                         save_stack=True, 
                         save_slice_images=True)
 
+The field of view and resolution can be specified either as a tuple of 
+value and unit string or as a unitful ``YTQuantity``.  
 Additional keyword arguments:
 
- * **weight_field** (*str*): the weight field of the projection.  This has the 
-   same meaning as in standard projections.  Default: None.
+ * **weight_field** (*str*): the weight field of the projection.  This has 
+   the same meaning as in standard projections.  Default: None.
 
- * **apply_halo_mask** (*bool*): if True, a boolean mask is apply to the light 
-   cone projection.  See below for a description of halo masks.  Default: False.
-
- * **node** (*str*): a prefix to be prepended to the node name under which the 
-   projection data is serialized.  Default: None.
+ * **photon_field** (*bool*): if True, the projection data for each slice is 
+   decremented by 4 pi R :superscript:`2` , where R is the luminosity 
+   distance between the observer and the slice redshift.  Default: False.
 
  * **save_stack** (*bool*): if True, the unflatted light cone data including 
    each individual slice is written to an hdf5 file.  Default: True.
@@ -161,13 +125,7 @@
  * **save_slice_images** (*bool*): save images for each individual projection 
    slice.  Default: False.
 
- * **flatten_stack** (*bool*): if True, the light cone stack is continually 
-   flattened each time a slice is added in order to save memory.  This is 
-   generally not necessary.  Default: False.
-
- * **photon_field** (*bool*): if True, the projection data for each slice is 
-   decremented by 4 pi R :superscript:`2` , where R is the luminosity 
-   distance between the observer and the slice redshift.  Default: False.
+ * **cmap_name** (*string*): color map for images.  Default: "algae".
 
  * **njobs** (*int*): The number of parallel jobs over which the light cone 
    projection will be split.  Choose -1 for one processor per individual
@@ -177,34 +135,4 @@
  * **dynamic** (*bool*): If True, use dynamic load balancing to create the 
    projections.  Default: False.
 
-Sampling Unique Light Cone Volumes
-----------------------------------
-
-When making a large number of light cones, particularly for statistical 
-analysis, it is important to have a handle on the amount of sampled volume in 
-common from one projection to another.  Any statistics may untrustworthy if a 
-set of light cones have too much volume in common, even if they may all be 
-entirely different in appearance.  LightCone objects have the ability to 
-calculate the volume in common between two solutions with the same dataset 
-ist.  The :meth:`find_unique_solutions` and 
-:meth:`project_unique_light_cones` functions can be used to create a set of 
-light cone solutions that have some maximum volume in common and create light 
-cone projections for those solutions.  If specified, the code will attempt to 
-use recycled solutions that can use the same serialized projection objects 
-that have already been created.  This can greatly increase the speed of making 
-multiple light cone projections.  See the cookbook for an example of doing this.
-
-Making Light Cones with a Halo Mask
------------------------------------
-
-The situation may arise where it is necessary or desirable to know the 
-location of halos within the light cone volume, and specifically their 
-location in the final image.  This can be useful for developing algorithms to 
-find galaxies or clusters in image data.  The light cone generator does this 
-by running the HaloProfiler (see :ref:`halo_profiling`) on each of the 
-datasets used in the light cone and shifting them accordingly with the light 
-cone solution.  The ability also exists to create a boolean mask with the 
-dimensions of the final light cone image that can be used to mask out the 
-halos in the image.  It is left as an exercise to the reader to find a use for 
-this functionality.  This process is somewhat complicated, but not terribly.  
-See the recipe in the cookbook for an example of this functionality.
+.. note:: As of :code:`yt-3.0`, the halo mask and unique light cone functionality no longer exist.  These are still available in :code:`yt-2.x`.  If you would like to use these features in :code:`yt-3.x`, help is needed to port them over.  Contact the yt-users mailing list if you are interested in doing this.
\ No newline at end of file

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/0134396f650a/
Changeset:   0134396f650a
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-07-19 23:59:06
Summary:     Two fixes to ensure that units are correctly set in smoothed fields.
Affected #:  3 files

diff -r 9ebb03f4dcfb5fc2246fb0df6982cf8ba7ce9eca -r 0134396f650a50d32ffb1d01732cf50c3287e2a4 yt/fields/field_info_container.py
--- a/yt/fields/field_info_container.py
+++ b/yt/fields/field_info_container.py
@@ -78,8 +78,8 @@
             if (ptype, f) not in self.field_list:
                 continue
             self.add_output_field((ptype, f),
-                units = units, particle_type = True, display_name = dn,
-                output_units = output_units)
+                units = units, particle_type = True,
+                display_name = dn, output_units = output_units)
             for alias in aliases:
                 self.alias((ptype, alias), (ptype, f), units = output_units)
 

diff -r 9ebb03f4dcfb5fc2246fb0df6982cf8ba7ce9eca -r 0134396f650a50d32ffb1d01732cf50c3287e2a4 yt/fields/particle_fields.py
--- a/yt/fields/particle_fields.py
+++ b/yt/fields/particle_fields.py
@@ -508,7 +508,7 @@
         pos = data[ptype, coord_name].in_units("code_length")
         mass = data[ptype, mass_name].in_cgs()
         dens = data[ptype, density_name].in_cgs()
-        quan = data[ptype, smoothed_field]
+        quan = data[ptype, smoothed_field].in_units(field_units)
         if smoothing_length_name is None:
             hsml = np.zeros(quan.shape, dtype='float64') - 1
             hsml = data.apply_units(hsml, "code_length")

diff -r 9ebb03f4dcfb5fc2246fb0df6982cf8ba7ce9eca -r 0134396f650a50d32ffb1d01732cf50c3287e2a4 yt/frontends/stream/fields.py
--- a/yt/frontends/stream/fields.py
+++ b/yt/frontends/stream/fields.py
@@ -69,10 +69,13 @@
         ("smoothing_length", ("code_length", [], None)),
         ("density", ("code_mass/code_length**3", [], None)),
     )
-        
+
     def setup_fluid_fields(self):
         for field in self.ds.stream_handler.field_units:
             units = self.ds.stream_handler.field_units[field]
             if units != '': self.add_output_field(field, units=units)
 
-        
+    def add_output_field(self, name, **kwargs):
+        if name in self.ds.stream_handler.field_units:
+            kwargs['units'] = self.ds.stream_handler.field_units[name]
+        super(StreamFieldInfo, self).add_output_field(name, **kwargs)


https://bitbucket.org/yt_analysis/yt/commits/bcc3145074dd/
Changeset:   bcc3145074dd
Branch:      yt-3.0
User:        ngoldbaum
Date:        2014-07-20 01:03:43
Summary:     Merged in MatthewTurk/yt/yt-3.0 (pull request #915)

Fixes for SPH smoothing in Stream outputs
Affected #:  5 files

diff -r c44a150175c9fb3ee59ad73bda43af21dd2e6c21 -r bcc3145074ddc908d41861917e277371edf62f65 yt/fields/field_info_container.py
--- a/yt/fields/field_info_container.py
+++ b/yt/fields/field_info_container.py
@@ -78,8 +78,8 @@
             if (ptype, f) not in self.field_list:
                 continue
             self.add_output_field((ptype, f),
-                units = units, particle_type = True, display_name = dn,
-                output_units = output_units)
+                units = units, particle_type = True,
+                display_name = dn, output_units = output_units)
             for alias in aliases:
                 self.alias((ptype, alias), (ptype, f), units = output_units)
 
@@ -133,6 +133,16 @@
         for _, alias_name in self.field_aliases:
             if alias_name in ("particle_position", "particle_velocity"):
                 continue
+            if (ptype, alias_name) not in self: continue
+            fn = add_volume_weighted_smoothed_field(ptype,
+                "particle_position", "particle_mass",
+                sml_name, "density", alias_name, self,
+                num_neighbors)
+            new_aliases.append(((ftype, alias_name), fn[0]))
+        for ptype2, alias_name in self.keys():
+            if ptype2 != ptype: continue
+            if alias_name in ("particle_position", "particle_velocity"):
+                continue
             fn = add_volume_weighted_smoothed_field(ptype,
                 "particle_position", "particle_mass",
                 sml_name, "density", alias_name, self,

diff -r c44a150175c9fb3ee59ad73bda43af21dd2e6c21 -r bcc3145074ddc908d41861917e277371edf62f65 yt/fields/particle_fields.py
--- a/yt/fields/particle_fields.py
+++ b/yt/fields/particle_fields.py
@@ -508,7 +508,7 @@
         pos = data[ptype, coord_name].in_units("code_length")
         mass = data[ptype, mass_name].in_cgs()
         dens = data[ptype, density_name].in_cgs()
-        quan = data[ptype, smoothed_field]
+        quan = data[ptype, smoothed_field].in_units(field_units)
         if smoothing_length_name is None:
             hsml = np.zeros(quan.shape, dtype='float64') - 1
             hsml = data.apply_units(hsml, "code_length")

diff -r c44a150175c9fb3ee59ad73bda43af21dd2e6c21 -r bcc3145074ddc908d41861917e277371edf62f65 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -465,7 +465,13 @@
         ds.stream_handler.particle_count[gi] = npart
                                         
 def unitify_data(data):
-    if all([isinstance(val, np.ndarray) for val in data.values()]):
+    if all([hasattr(val, 'units') for val in data.values()]):
+        new_data, field_units = {}, {}
+        for k, v in data.items():
+            field_units[k] = v.units
+            new_data[k] = v.copy().d
+        data = new_data
+    elif all([isinstance(val, np.ndarray) for val in data.values()]):
         field_units = {field:'' for field in data.keys()}
     elif all([(len(val) == 2) for val in data.values()]):
         new_data, field_units = {}, {}

diff -r c44a150175c9fb3ee59ad73bda43af21dd2e6c21 -r bcc3145074ddc908d41861917e277371edf62f65 yt/frontends/stream/fields.py
--- a/yt/frontends/stream/fields.py
+++ b/yt/frontends/stream/fields.py
@@ -58,6 +58,7 @@
         ("particle_position_x", ("code_length", [], None)),
         ("particle_position_y", ("code_length", [], None)),
         ("particle_position_z", ("code_length", [], None)),
+        ("particle_velocity", ("code_length/code_time", [], None)),
         ("particle_velocity_x", ("code_length/code_time", [], None)),
         ("particle_velocity_y", ("code_length/code_time", [], None)),
         ("particle_velocity_z", ("code_length/code_time", [], None)),
@@ -65,11 +66,16 @@
         ("particle_gas_density", ("code_mass/code_length**3", [], None)),
         ("particle_gas_temperature", ("K", [], None)),
         ("particle_mass", ("code_mass", [], None)),
+        ("smoothing_length", ("code_length", [], None)),
+        ("density", ("code_mass/code_length**3", [], None)),
     )
-        
+
     def setup_fluid_fields(self):
         for field in self.ds.stream_handler.field_units:
             units = self.ds.stream_handler.field_units[field]
             if units != '': self.add_output_field(field, units=units)
 
-        
+    def add_output_field(self, name, **kwargs):
+        if name in self.ds.stream_handler.field_units:
+            kwargs['units'] = self.ds.stream_handler.field_units[name]
+        super(StreamFieldInfo, self).add_output_field(name, **kwargs)

diff -r c44a150175c9fb3ee59ad73bda43af21dd2e6c21 -r bcc3145074ddc908d41861917e277371edf62f65 yt/frontends/stream/io.py
--- a/yt/frontends/stream/io.py
+++ b/yt/frontends/stream/io.py
@@ -28,6 +28,7 @@
 class IOHandlerStream(BaseIOHandler):
 
     _dataset_type = "stream"
+    _vector_fields = ("particle_velocity", "particle_position")
 
     def __init__(self, ds):
         self.fields = ds.stream_handler.fields
@@ -98,6 +99,7 @@
 
     _vector_fields = ("particle_position", "particle_velocity")
     _dataset_type = "stream_particles"
+    _vector_fields = ("particle_velocity", "particle_position")
 
     def __init__(self, ds):
         self.fields = ds.stream_handler.fields
@@ -182,6 +184,7 @@
 
 class IOHandlerStreamHexahedral(BaseIOHandler):
     _dataset_type = "stream_hexahedral"
+    _vector_fields = ("particle_velocity", "particle_position")
 
     def __init__(self, ds):
         self.fields = ds.stream_handler.fields
@@ -211,6 +214,7 @@
 
 class IOHandlerStreamOctree(BaseIOHandler):
     _dataset_type = "stream_octree"
+    _vector_fields = ("particle_velocity", "particle_position")
 
     def __init__(self, ds):
         self.fields = ds.stream_handler.fields

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list