[yt-svn] commit/yt: 10 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Mon Jun 16 10:46:45 PDT 2014


10 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/1c0ed5320ceb/
Changeset:   1c0ed5320ceb
Branch:      yt-3.0
User:        chummels
Date:        2014-05-18 23:00:09
Summary:     Adding exception and error message when trying to load yt before "python setup.py" has been run.
Affected #:  2 files

diff -r 036f745796c12e66b4636835800f2b029bbe7b4b -r 1c0ed5320cebbf0a9173b829b4f04bd2f89c70fe yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -95,23 +95,29 @@
     memory_checker, \
     deprecated_class
 from yt.utilities.logger import ytLogger as mylog
+from yt.utilities.exceptions import YTImportFailure
 
 import yt.utilities.physical_constants as physical_constants
 import yt.units as units
 from yt.units.yt_array import YTArray, YTQuantity
 
-from yt.fields.api import \
-    field_plugins, \
-    DerivedField, \
-    FieldDetector, \
-    FieldInfoContainer, \
-    ValidateParameter, \
-    ValidateDataField, \
-    ValidateProperty, \
-    ValidateSpatial, \
-    ValidateGridType, \
-    add_field, \
-    derived_field
+try:
+    from yt.fields.api import \
+        field_plugins, \
+        DerivedField, \
+        FieldDetector, \
+        FieldInfoContainer, \
+        ValidateParameter, \
+        ValidateDataField, \
+        ValidateProperty, \
+        ValidateSpatial, \
+        ValidateGridType, \
+        add_field, \
+        derived_field
+except ImportError:
+    raise YTImportFailure
+
+
 
 from yt.data_objects.api import \
     BinnedProfile1D, BinnedProfile2D, BinnedProfile3D, \

diff -r 036f745796c12e66b4636835800f2b029bbe7b4b -r 1c0ed5320cebbf0a9173b829b4f04bd2f89c70fe yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -377,3 +377,10 @@
         r = """Position arrays must be length and shape (N,3).
                But this one has %s and %s.""" % (self.dimensions, self.shape)
         return r
+
+class YTImportFailure(YTException):
+    def __str__(self):
+        return "You cannot import yt because you have not setup your environment for this \n" + \
+               "version of yt.  To resolve this, follow these steps at the command line: \n\n" + \
+               "$ cd $YT_DEST/src/yt-hg \n" + \
+               "$ python setup.py develop \n"


https://bitbucket.org/yt_analysis/yt/commits/4cafede7f67b/
Changeset:   4cafede7f67b
Branch:      yt-3.0
User:        chummels
Date:        2014-05-19 23:11:35
Summary:     Merging.
Affected #:  2 files

diff -r 7a08af110b448af6365b9167f0d3a9db019719aa -r 4cafede7f67b1e6bb3a2a6f0076b4dc3be8dfecb yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -95,23 +95,29 @@
     memory_checker, \
     deprecated_class
 from yt.utilities.logger import ytLogger as mylog
+from yt.utilities.exceptions import YTImportFailure
 
 import yt.utilities.physical_constants as physical_constants
 import yt.units as units
 from yt.units.yt_array import YTArray, YTQuantity
 
-from yt.fields.api import \
-    field_plugins, \
-    DerivedField, \
-    FieldDetector, \
-    FieldInfoContainer, \
-    ValidateParameter, \
-    ValidateDataField, \
-    ValidateProperty, \
-    ValidateSpatial, \
-    ValidateGridType, \
-    add_field, \
-    derived_field
+try:
+    from yt.fields.api import \
+        field_plugins, \
+        DerivedField, \
+        FieldDetector, \
+        FieldInfoContainer, \
+        ValidateParameter, \
+        ValidateDataField, \
+        ValidateProperty, \
+        ValidateSpatial, \
+        ValidateGridType, \
+        add_field, \
+        derived_field
+except ImportError:
+    raise YTImportFailure
+
+
 
 from yt.data_objects.api import \
     BinnedProfile1D, BinnedProfile2D, BinnedProfile3D, \

diff -r 7a08af110b448af6365b9167f0d3a9db019719aa -r 4cafede7f67b1e6bb3a2a6f0076b4dc3be8dfecb yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -377,3 +377,10 @@
         r = """Position arrays must be length and shape (N,3).
                But this one has %s and %s.""" % (self.dimensions, self.shape)
         return r
+
+class YTImportFailure(YTException):
+    def __str__(self):
+        return "You cannot import yt because you have not setup your environment for this \n" + \
+               "version of yt.  To resolve this, follow these steps at the command line: \n\n" + \
+               "$ cd $YT_DEST/src/yt-hg \n" + \
+               "$ python setup.py develop \n"


https://bitbucket.org/yt_analysis/yt/commits/3ffe6d5372f8/
Changeset:   3ffe6d5372f8
Branch:      yt-3.0
User:        chummels
Date:        2014-05-31 19:13:25
Summary:     Merging.
Affected #:  30 files

diff -r 4cafede7f67b1e6bb3a2a6f0076b4dc3be8dfecb -r 3ffe6d5372f88d620e023ebad6a61d99e40019f7 doc/source/analyzing/analysis_modules/PPVCube.ipynb
--- a/doc/source/analyzing/analysis_modules/PPVCube.ipynb
+++ b/doc/source/analyzing/analysis_modules/PPVCube.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:3a720e0a18272564522f9fc23553431908d6f2b4f3e3e7dfe5b3e690e2e37677"
+  "signature": "sha256:3f810954006851303837edb8fd85ee6583a883122b0f4867903562546c4f19d2"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -16,6 +16,18 @@
      ]
     },
     {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "%matplotlib inline\n",
+      "from yt.mods import *\n",
+      "from yt.analysis_modules.api import PPVCube"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
      "cell_type": "markdown",
      "metadata": {},
      "source": [
@@ -44,30 +56,40 @@
      ]
     },
     {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "First, we'll set up the grid and the parameters of the profiles:"
+     ]
+    },
+    {
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "%matplotlib inline\n",
-      "from yt.mods import *\n",
-      "from yt.analysis_modules.api import PPVCube"
+      "nx,ny,nz = (256,256,256) # domain dimensions\n",
+      "R = 10. # outer radius of disk, kpc\n",
+      "r_0 = 3. # scale radius, kpc\n",
+      "beta = 1.4 # for the tangential velocity profile\n",
+      "alpha = -1. # for the radial density profile\n",
+      "x, y = np.mgrid[-R:R:nx*1j,-R:R:ny*1j] # cartesian coordinates of x-y plane of disk\n",
+      "r = np.sqrt(x*x+y*y) # polar coordinates\n",
+      "theta = np.arctan2(y, x) # polar coordinates"
      ],
      "language": "python",
      "metadata": {},
      "outputs": []
     },
     {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Second, we'll construct the data arrays for the density and the velocity of the disk. Since we have the tangential velocity profile, we have to use the polar coordinates we derived earlier to compute `velx` and `vely`. Everywhere outside the disk, all fields are set to zero.  "
+     ]
+    },
+    {
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "data = {}\n",
-      "nx,ny,nz = (256,256,256)\n",
-      "R = 10. # kpc\n",
-      "r_0 = 3. # kpc\n",
-      "beta = 1.4\n",
-      "alpha = -1.\n",
-      "x, y = np.mgrid[-R:R:nx*1j,-R:R:ny*1j] # cartesian coordinates\n",
-      "r = np.sqrt(x*x+y*y) # polar coordinates\n",
-      "theta = np.arctan2(y, x) # polar coordinates\n",
       "dens = np.zeros((nx,ny,nz))\n",
       "dens[:,:,nz/2-3:nz/2+3] = (r**alpha).reshape(nx,ny,1) # the density profile of the disk\n",
       "vel_theta = r/(1.+(r/r_0)**beta) # the azimuthal velocity profile of the disk\n",
@@ -75,11 +97,31 @@
       "vely = np.zeros((nx,ny,nz))\n",
       "velx[:,:,nz/2-3:nz/2+3] = (-vel_theta*np.sin(theta)).reshape(nx,ny,1) # convert polar to cartesian\n",
       "vely[:,:,nz/2-3:nz/2+3] = (vel_theta*np.cos(theta)).reshape(nx,ny,1) # convert polar to cartesian\n",
+      "dens[r > R] = 0.0\n",
+      "velx[r > R] = 0.0\n",
+      "vely[r > R] = 0.0"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Finally, we'll package these data arrays up into a dictionary, which will then be shipped off to `load_uniform_grid`. We'll define the width of the grid to be `2*R` kpc, which will be equal to 1  `code_length`. "
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "data = {}\n",
       "data[\"density\"] = (dens,\"g/cm**3\")\n",
       "data[\"velocity_x\"] = (velx, \"km/s\")\n",
       "data[\"velocity_y\"] = (vely, \"km/s\")\n",
       "data[\"velocity_z\"] = (np.zeros((nx,ny,nz)), \"km/s\") # zero velocity in the z-direction\n",
-      "bbox = np.array([[-0.5,0.5],[-0.5,0.5],[-0.5,0.5]])\n",
+      "bbox = np.array([[-0.5,0.5],[-0.5,0.5],[-0.5,0.5]]) # bbox of width 1 on a side with center (0,0,0)\n",
       "ds = load_uniform_grid(data, (nx,ny,nz), length_unit=(2*R,\"kpc\"), nprocs=1, bbox=bbox)"
      ],
      "language": "python",
@@ -146,7 +188,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "cube = PPVCube(ds, L, \"density\", dims=(200,100,50), velocity_bounds=(-0.5,0.5,\"km/s\"))"
+      "cube = PPVCube(ds, L, \"density\", dims=(200,100,50), velocity_bounds=(-1.5,1.5,\"km/s\"))"
      ],
      "language": "python",
      "metadata": {},
@@ -180,8 +222,18 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "ds = load(\"cube.fits\")\n",
-      "slc = SlicePlot(ds, \"z\", [\"density\"], center=\"c\") # sliced at the center of the domain\n",
+      "pf = load(\"cube.fits\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "# Specifying no center gives us the center slice\n",
+      "slc = SlicePlot(pf, \"z\", [\"density\"])\n",
       "slc.show()"
      ],
      "language": "python",
@@ -192,19 +244,11 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "# To figure out what the domain center and width is in pixel (code length) units:\n",
-      "print ds.domain_center\n",
-      "print ds.domain_width"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "slc = SlicePlot(ds, \"z\", [\"density\"], center=[100.5,50.5,-250.0]) # \"z\" slice is in m/s\n",
+      "import yt.units as u\n",
+      "# Picking different velocities for the slices\n",
+      "new_center = pf.domain_center\n",
+      "new_center[2] = pf.spec2pixel(-1.0*u.km/u.s)\n",
+      "slc = SlicePlot(pf, \"z\", [\"density\"], center=new_center)\n",
       "slc.show()"
      ],
      "language": "python",
@@ -215,7 +259,8 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "slc = SlicePlot(ds, \"z\", [\"density\"], center=[100.5,50.5,300.0])\n",
+      "new_center[2] = pf.spec2pixel(0.7*u.km/u.s)\n",
+      "slc = SlicePlot(pf, \"z\", [\"density\"], center=new_center)\n",
       "slc.show()"
      ],
      "language": "python",
@@ -225,7 +270,31 @@
     {
      "cell_type": "code",
      "collapsed": false,
-     "input": [],
+     "input": [
+      "new_center[2] = pf.spec2pixel(-0.3*u.km/u.s)\n",
+      "slc = SlicePlot(pf, \"z\", [\"density\"], center=new_center)\n",
+      "slc.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "If we project all the emission at all the different velocities along the z-axis, we recover the entire disk:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "prj = ProjectionPlot(pf, \"z\", [\"density\"], proj_style=\"sum\")\n",
+      "prj.set_log(\"density\", True)\n",
+      "prj.set_zlim(\"density\", 1.0e-3, 0.2)\n",
+      "prj.show()"
+     ],
      "language": "python",
      "metadata": {},
      "outputs": []

diff -r 4cafede7f67b1e6bb3a2a6f0076b4dc3be8dfecb -r 3ffe6d5372f88d620e023ebad6a61d99e40019f7 doc/source/analyzing/analysis_modules/SZ_projections.ipynb
--- a/doc/source/analyzing/analysis_modules/SZ_projections.ipynb
+++ b/doc/source/analyzing/analysis_modules/SZ_projections.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:e5d3c629592c8aacbabf2e3fab2660703298886b8de6f36eb7cdc1f60b726496"
+  "signature": "sha256:7fc053480ba7896bfa5905bd69f7b3dd326364fbab324975b76f79640f2e0adf"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -18,7 +18,7 @@
       "projection of the pressure field of a cluster. However, the *full* S-Z signal is a combination of thermal and kinetic\n",
       "contributions, and for large frequencies and high temperatures\n",
       "relativistic effects are important. For computing the full S-Z signal\n",
-      "incorporating all of these effects, Jens Chluba has written a library:\n",
+      "incorporating all of these effects, there is a library:\n",
       "SZpack ([Chluba et al 2012](http://adsabs.harvard.edu/abs/2012MNRAS.426..510C)). \n",
       "\n",
       "The `sunyaev_zeldovich` analysis module in `yt` makes it possible\n",
@@ -93,10 +93,10 @@
       "from yt.mods import *\n",
       "from yt.analysis_modules.api import SZProjection\n",
       "\n",
-      "pf = load(\"enzo_tiny_cosmology/DD0046/DD0046\")\n",
+      "ds = load(\"enzo_tiny_cosmology/DD0046/DD0046\")\n",
       "\n",
       "freqs = [90.,180.,240.]\n",
-      "szprj = SZProjection(pf, freqs)"
+      "szprj = SZProjection(ds, freqs)"
      ],
      "language": "python",
      "metadata": {},
@@ -108,8 +108,8 @@
      "source": [
       "`freqs` is a list or array of frequencies in GHz at which the signal\n",
       "is to be computed. The `SZProjection` constructor also accepts the\n",
-      "optional keywords, **mue** (mean molecular weight for computing the\n",
-      "electron number density, 1.143 is the default) and **high_order** (set\n",
+      "optional keywords, `mue` (mean molecular weight for computing the\n",
+      "electron number density, 1.143 is the default) and `high_order` (set\n",
       "to True to compute terms in the S-Z signal expansion up to\n",
       "second-order in $T_{e,SZ}$ and $\\beta$). "
      ]
@@ -127,7 +127,7 @@
      "collapsed": false,
      "input": [
       "# An on-axis projection along the z-axis with width 10 Mpc, centered on the gas density maximum\n",
-      "szprj.on_axis(\"z\", center=\"max\", width=(10.0, \"mpc\"), nx=400)"
+      "szprj.on_axis(\"z\", center=\"max\", width=(10.0, \"Mpc\"), nx=400)"
      ],
      "language": "python",
      "metadata": {},
@@ -144,7 +144,7 @@
       "which can be accessed dict-like from the projection object (e.g.,\n",
       "`szprj[\"90_GHz\"]`). Projections of other quantities may also be\n",
       "accessed; to see what fields are available call `szprj.keys()`. The methods also accept standard ``yt``\n",
-      "keywords for projections such as **center**, **width**, and **source**. The image buffer size can be controlled by setting **nx**.  \n"
+      "keywords for projections such as `center`, `width`, and `source`. The image buffer size can be controlled by setting `nx`.  \n"
      ]
     },
     {
@@ -216,8 +216,16 @@
      "source": [
       "which would write all of the projections to a single FITS file,\n",
       "including coordinate information in kpc. The optional keyword\n",
-      "**clobber** allows a previous file to be overwritten. \n"
+      "`clobber` allows a previous file to be overwritten. \n"
      ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
     }
    ],
    "metadata": {}

diff -r 4cafede7f67b1e6bb3a2a6f0076b4dc3be8dfecb -r 3ffe6d5372f88d620e023ebad6a61d99e40019f7 doc/source/analyzing/analysis_modules/photon_simulator.rst
--- a/doc/source/analyzing/analysis_modules/photon_simulator.rst
+++ b/doc/source/analyzing/analysis_modules/photon_simulator.rst
@@ -36,20 +36,20 @@
 .. code:: python
 
     from yt.mods import *
-    from yt.analysis_modules.api import *
+    from yt.analysis_modules.photon_simulator.api import *
     from yt.utilities.cosmology import Cosmology
 
 We're going to load up an Athena dataset of a galaxy cluster core:
 
 .. code:: python
 
-    pf = load("MHDSloshing/virgo_low_res.0054.vtk", 
-              parameters={"TimeUnits":3.1557e13,
-                          "LengthUnits":3.0856e24,
-                          "DensityUnits":6.770424595218825e-27})
+    pf = load("MHDSloshing/virgo_low_res.0054.vtk",
+              parameters={"time_unit":(1.0,"Myr"),
+                          "length_unit":(1.0,"Mpc"),
+                          "mass_unit":(1.0e14,"Msun")}) 
 
 First, to get a sense of what the resulting image will look like, let's
-make a new ``yt`` field called ``"DensitySquared"``, since the X-ray
+make a new ``yt`` field called ``"density_squared"``, since the X-ray
 emission is proportional to :math:`\rho^2`, and a weak function of
 temperature and metallicity.
 
@@ -57,14 +57,14 @@
 
     def _density_squared(field, data):
         return data["density"]**2
-    add_field("DensitySquared", function=_density_squared)
+    add_field("density_squared", function=_density_squared)
 
 Then we'll project this field along the z-axis.
 
 .. code:: python
 
-    prj = ProjectionPlot(pf, "z", ["DensitySquared"], width=(500., "kpc"))
-    prj.set_cmap("DensitySquared", "gray_r")
+    prj = ProjectionPlot(ds, "z", ["density_squared"], width=(500., "kpc"))
+    prj.set_cmap("density_squared", "gray_r")
     prj.show()
 
 .. image:: _images/dsquared.png
@@ -89,7 +89,7 @@
 
 .. code:: python
 
-    sp = pf.sphere("c", (250., "kpc"))
+    sp = ds.sphere("c", (250., "kpc"))
 
 This will serve as our ``data_source`` that we will use later. Next, we
 need to create the ``SpectralModel`` instance that will determine how
@@ -258,11 +258,6 @@
     events = photons.project_photons(L, exp_time_new=2.0e5, redshift_new=0.07, absorb_model=abs_model,
                                      sky_center=(187.5,12.333), responses=[ARF,RMF])
 
-.. parsed-literal::
-
-    WARNING:yt:This routine has not been tested to work with all RMFs. YMMV.
-
-
 Also, the optional keyword ``psf_sigma`` specifies a Gaussian standard
 deviation to scatter the photon sky positions around with, providing a
 crude representation of a PSF.
@@ -282,17 +277,17 @@
 
 .. code:: python
 
-    {'eobs': array([  0.32086522,   0.32271389,   0.32562708, ...,   8.90600621,
-             9.73534237,  10.21614256]), 
-     'xsky': array([ 187.5177707 ,  187.4887825 ,  187.50733609, ...,  187.5059345 ,
-            187.49897546,  187.47307048]), 
-     'ysky': array([ 12.33519996,  12.3544496 ,  12.32750903, ...,  12.34907707,
-            12.33327653,  12.32955225]), 
-     'ypix': array([ 133.85374195,  180.68583074,  115.14110561, ...,  167.61447493,
-            129.17278711,  120.11508562]), 
+    {'eobs': YTArray([  0.32086522,   0.32271389,   0.32562708, ...,   8.90600621,
+             9.73534237,  10.21614256]) keV, 
+     'xsky': YTArray([ 187.5177707 ,  187.4887825 ,  187.50733609, ...,  187.5059345 ,
+            187.49897546,  187.47307048]) degree, 
+     'ysky': YTArray([ 12.33519996,  12.3544496 ,  12.32750903, ...,  12.34907707,
+            12.33327653,  12.32955225]) degree, 
+     'ypix': YTArray([ 133.85374195,  180.68583074,  115.14110561, ...,  167.61447493,
+            129.17278711,  120.11508562]) (dimensionless), 
      'PI': array([ 27,  15,  25, ..., 609, 611, 672]), 
-     'xpix': array([  86.26331108,  155.15934197,  111.06337043, ...,  114.39586907,
-            130.93509652,  192.50639633])}
+     'xpix': YTArray([  86.26331108,  155.15934197,  111.06337043, ...,  114.39586907,
+            130.93509652,  192.50639633]) (dimensionless)}
 
 
 We can bin up the events into an image and save it to a FITS file. The
@@ -436,7 +431,7 @@
 
    bbox = np.array([[-0.5,0.5],[-0.5,0.5],[-0.5,0.5]])
 
-   pf = load_uniform_grid(data, ddims, 2*R*cm_per_kpc, bbox=bbox)
+   ds = load_uniform_grid(data, ddims, 2*R*cm_per_kpc, bbox=bbox)
 
 where for simplicity we have set the velocities to zero, though we
 could have created a realistic velocity field as well. Now, we
@@ -445,7 +440,7 @@
 
 .. code:: python
 
-   sphere = pf.sphere(pf.domain_center, 1.0/pf["mpc"])
+   sphere = ds.sphere(pf.domain_center, (1.0,"Mpc"))
        
    A = 6000.
    exp_time = 2.0e5

diff -r 4cafede7f67b1e6bb3a2a6f0076b4dc3be8dfecb -r 3ffe6d5372f88d620e023ebad6a61d99e40019f7 doc/source/analyzing/objects.rst
--- a/doc/source/analyzing/objects.rst
+++ b/doc/source/analyzing/objects.rst
@@ -236,7 +236,7 @@
 -------------------------------
 
 Data objects can be cut by their field values using the ``cut_region`` 
-method.  For example, this could be used to compute the total mass within 
+method.  For example, this could be used to compute the total gas mass within
 a certain temperature range, as in the following example.
 
 .. notebook-cell::
@@ -244,11 +244,11 @@
    from yt.mods import *
    ds = load("enzo_tiny_cosmology/DD0046/DD0046")
    ad = ds.all_data()
-   total_mass = ad.quantities.total_mass()
+   total_mass = ad.quantities.total_quantity('cell_mass')
    # now select only gas with 1e5 K < T < 1e7 K.
    new_region = ad.cut_region(['obj["temperature"] > 1e5',
                                'obj["temperature"] < 1e7'])
-   cut_mass = new_region.quantities.total_mass()
+   cut_mass = new_region.quantities.total_quantity('cell_mass')
    print "The fraction of mass in this temperature range is %f." % \
      (cut_mass / total_mass)
 

diff -r 4cafede7f67b1e6bb3a2a6f0076b4dc3be8dfecb -r 3ffe6d5372f88d620e023ebad6a61d99e40019f7 doc/source/cookbook/fits_radio_cubes.ipynb
--- a/doc/source/cookbook/fits_radio_cubes.ipynb
+++ b/doc/source/cookbook/fits_radio_cubes.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:dbc41f6f836cdeb88a549d85e389d6e4e43d163d8c4c267baea8cce0ebdbf441"
+  "signature": "sha256:40add63976fd633e0542cf7674b166507985aa14685be6b4e4e53bd9a23befc2"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -23,7 +23,7 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
-      "This notebook demonstrates some of the capabilties of `yt` on some FITS \"position-position-velocity\" cubes of radio data. "
+      "This notebook demonstrates some of the capabilties of `yt` on some FITS \"position-position-spectrum\" cubes of radio data. "
      ]
     },
     {
@@ -82,7 +82,7 @@
      "input": [
       "from yt.frontends.fits.misc import PlotWindowWCS\n",
       "wcs_slc = PlotWindowWCS(slc)\n",
-      "wcs_slc.show()"
+      "wcs_slc[\"intensity\"]"
      ],
      "language": "python",
      "metadata": {},
@@ -109,14 +109,16 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
-      "We can also take slices of this dataset at a few different values along the \"z\" axis (corresponding to the velocity), so let's try a few. First, we'll check what the value along the velocity axis at the domain center is, as well as the range of possible values. This is the third value of each array. "
+      "We can also take slices of this dataset at a few different values along the \"z\" axis (corresponding to the velocity), so let's try a few. To pick specific velocity values for slices, we will need to use the dataset's `spec2pixel` method to determine which pixels to slice on:"
      ]
     },
     {
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "print ds.domain_left_edge[2], ds.domain_center[2], ds.domain_right_edge[2]"
+      "import yt.units as u\n",
+      "new_center = ds.domain_center\n",
+      "new_center[2] = ds.spec2pixel(-250000.*u.m/u.s)"
      ],
      "language": "python",
      "metadata": {},
@@ -126,15 +128,32 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
-      "Now, we'll choose a few values for the velocity within this range:"
+      "Now we can use this new center to create a new slice:"
      ]
     },
     {
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "new_center = ds.domain_center \n",
-      "new_center[2] = -250000.\n",
+      "slc = yt.SlicePlot(ds, \"z\", [\"intensity\"], center=new_center, origin=\"native\")\n",
+      "slc.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "We can do this a few more times for different values of the velocity:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "new_center[2] = ds.spec2pixel(-100000.*u.m/u.s)\n",
       "slc = yt.SlicePlot(ds, \"z\", [\"intensity\"], center=new_center, origin=\"native\")\n",
       "slc.show()"
      ],
@@ -146,21 +165,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "new_center = ds.domain_center \n",
-      "new_center[2] = -100000.\n",
-      "slc = yt.SlicePlot(ds, \"z\", [\"intensity\"], center=new_center, origin=\"native\")\n",
-      "slc.show()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "new_center = ds.domain_center \n",
-      "new_center[2] = -150000.\n",
+      "new_center[2] = ds.spec2pixel(-150000.*u.m/u.s)\n",
       "slc = yt.SlicePlot(ds, \"z\", [\"intensity\"], center=new_center, origin=\"native\")\n",
       "slc.show()"
      ],
@@ -179,14 +184,14 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
-      "We can also make a projection of all the emission along the line of sight:"
+      "We can also make a projection of all the emission along the line of sight. Since we're not doing an integration along a path length, we needed to specify `proj_style = \"sum\"`:"
      ]
     },
     {
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "prj = yt.ProjectionPlot(ds, \"z\", [\"intensity\"], origin=\"native\", proj_style=\"sum\")\n",
+      "prj = yt.ProjectionPlot(ds, \"z\", [\"intensity\"], proj_style=\"sum\", origin=\"native\")\n",
       "prj.show()"
      ],
      "language": "python",
@@ -197,13 +202,6 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
-      "Since we're not doing an integration along a path length, we needed to specify `proj_style = \"sum\"`. "
-     ]
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
       "We can also look at the slices perpendicular to the other axes, which will show us the structure along the velocity axis:"
      ]
     },
@@ -211,8 +209,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "slc = yt.SlicePlot(ds, \"x\", [\"intensity\"], origin=\"native\", \n",
-      "                   aspect=\"auto\", window_size=(8.0,8.0))\n",
+      "slc = yt.SlicePlot(ds, \"x\", [\"intensity\"], origin=\"native\", window_size=(8,8))\n",
       "slc.show()"
      ],
      "language": "python",
@@ -223,8 +220,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "slc = yt.SlicePlot(ds, \"y\", [\"intensity\"], origin=\"native\", \n",
-      "                   aspect=\"auto\", window_size=(8.0,8.0))\n",
+      "slc = yt.SlicePlot(ds, \"y\", [\"intensity\"], origin=\"native\", window_size=(8,8))\n",
       "slc.show()"
      ],
      "language": "python",
@@ -235,7 +231,7 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
-      "In these cases, we needed to set `aspect=\"auto\"` and explicitly declare a square `window_size` to get a figure that looks good. "
+      "In these cases, we needed to explicitly declare a square `window_size` to get a figure that looks good. "
      ]
     },
     {

diff -r 4cafede7f67b1e6bb3a2a6f0076b4dc3be8dfecb -r 3ffe6d5372f88d620e023ebad6a61d99e40019f7 doc/source/developing/developing.rst
--- a/doc/source/developing/developing.rst
+++ b/doc/source/developing/developing.rst
@@ -111,6 +111,8 @@
 out with them.  In :ref:`code-style-guide` there is a list of handy tips for
 how to structure and write your code.
 
+.. _mercurial-with-yt:
+
 How to Use Mercurial with yt
 ++++++++++++++++++++++++++++
 
@@ -135,6 +137,8 @@
   * If you run into any troubles, stop by IRC (see :ref:`irc`) or the mailing
     list.
 
+.. _building-yt:
+
 Building yt
 +++++++++++
 
@@ -148,19 +152,31 @@
 
 .. code-block:: bash
 
-   python2.7 setup.py develop
+  $ python2.7 setup.py develop
 
 If you have previously "installed" via ``setup.py install`` you have to
 re-install:
 
 .. code-block:: bash
 
-   python2.7 setup.py install
+  $ python2.7 setup.py install
 
-Only one of these two options is needed.  yt may require you to specify the
-location to libpng and hdf5.  This can be done through files named ``png.cfg``
-and ``hdf5.cfg``.  If you are using the installation script, these will already
-exist.
+Only one of these two options is needed.
+
+If you plan to develop yt on Windows, we recommend using the `MinGW <http://www.mingw.org/>`_ gcc
+compiler that can be installed using the `Anaconda Python
+Distribution <https://store.continuum.io/cshop/anaconda/>`_. Also, the syntax for the
+setup command is slightly different; you must type:
+
+.. code-block:: bash
+
+  $ python2.7 setup.py build --compiler=mingw32 develop
+
+or
+
+.. code-block:: bash
+
+  $ python2.7 setup.py build --compiler=mingw32 install
 
 Making and Sharing Changes
 ++++++++++++++++++++++++++

diff -r 4cafede7f67b1e6bb3a2a6f0076b4dc3be8dfecb -r 3ffe6d5372f88d620e023ebad6a61d99e40019f7 doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -676,8 +676,14 @@
 Additional Options
 ++++++++++++++++++
 
+The following are additional options that may be passed to the ``load`` command when analyzing
+FITS data:
+
+``nan_mask``
+~~~~~~~~~~~~
+
 FITS image data may include ``NaNs``. If you wish to mask this data out,
-you may supply a ``nan_mask`` parameter to ``load``, which may either be a
+you may supply a ``nan_mask`` parameter, which may either be a
 single floating-point number (applies to all fields) or a Python dictionary
 containing different mask values for different fields:
 
@@ -689,9 +695,27 @@
    # passing a dict
    ds = load("m33_hi.fits", nan_mask={"intensity":-1.0,"temperature":0.0})
 
+``suppress_astropy_warnings``
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
 Generally, AstroPy may generate a lot of warnings about individual FITS
 files, many of which you may want to ignore. If you want to see these
-warnings, set ``suppress_astropy_warnings = False`` in the call to ``load``.
+warnings, set ``suppress_astropy_warnings = False``.
+
+``z_axis_decomp``
+~~~~~~~~~~~~~~~~~
+
+For some applications, decomposing 3D FITS data into grids that span the x-y plane with short
+strides along the z-axis may result in a significant improvement in I/O speed. To enable this feature, set ``z_axis_decomp=True``.
+
+``spectral_factor``
+~~~~~~~~~~~~~~~~~~~
+
+Often, the aspect ratio of 3D spectral cubes can be far from unity. Because yt sets the pixel
+scale as the ``code_length``, certain visualizations (such as volume renderings) may look extended
+or distended in ways that are undesirable. To adjust the width in ``code_length`` of the spectral
+ axis, set ``spectral_factor`` equal to a constant which gives the desired scaling,
+ or set it to ``"auto"`` to make the width the same as the largest axis in the sky plane.
 
 Miscellaneous Tools for Use with FITS Data
 ++++++++++++++++++++++++++++++++++++++++++
@@ -703,7 +727,6 @@
 
   from yt.frontends.fits.misc import setup_counts_fields, PlotWindowWCS, ds9_region
 
-
 ``setup_counts_fields``
 ~~~~~~~~~~~~~~~~~~~~~~~
 

diff -r 4cafede7f67b1e6bb3a2a6f0076b4dc3be8dfecb -r 3ffe6d5372f88d620e023ebad6a61d99e40019f7 doc/source/installing.rst
--- a/doc/source/installing.rst
+++ b/doc/source/installing.rst
@@ -14,8 +14,7 @@
 be time-consuming, yt provides an installation script which downloads and builds
 a fully-isolated Python + NumPy + Matplotlib + HDF5 + Mercurial installation.  
 yt supports Linux and OSX deployment, with the possibility of deployment on 
-other Unix-like systems (XSEDE resources, clusters, etc.).  Windows is not 
-supported.
+other Unix-like systems (XSEDE resources, clusters, etc.).
 
 Since the install is fully-isolated, if you get tired of having yt on your 
 system, you can just delete its directory, and yt and all of its dependencies
@@ -83,14 +82,73 @@
 will also need to set ``LD_LIBRARY_PATH`` and ``PYTHONPATH`` to contain 
 ``$YT_DEST/lib`` and ``$YT_DEST/python2.7/site-packages``, respectively.
 
+.. _testing-installation:
+
+Testing Your Installation
+-------------------------
+
+To test to make sure everything is installed properly, try running yt at
+the command line:
+
+.. code-block:: bash
+
+  $ yt --help
+
+If this works, you should get a list of the various command-line options for
+yt, which means you have successfully installed yt.  Congratulations!
+
+If you get an error, follow the instructions it gives you to debug the problem.
+Do not hesitate to :ref:`contact us <asking-for-help>` so we can help you
+figure it out.
+
+If you like, this might be a good time :ref:`to run the test suite <testing>`.
+
+.. _updating-yt:
+
+Updating yt and its dependencies
+--------------------------------
+
+With many active developers, code development sometimes occurs at a furious
+pace in yt.  To make sure you're using the latest version of the code, run
+this command at a command-line:
+
+.. code-block:: bash
+
+  $ yt update
+
+Additionally, if you want to make sure you have the latest dependencies
+associated with yt and update the codebase simultaneously, type this:
+
+.. code-block:: bash
+
+  $ yt update --all
+
+.. _removing-yt:
+
+Removing yt and its dependencies
+--------------------------------
+
+Because yt and its dependencies are installed in an isolated directory when
+you use the script installer, you can easily remove yt and all of its
+dependencies cleanly.  Simply remove the install directory and its
+subdirectories and you're done.  If you *really* had problems with the
+code, this is a last defense for solving: remove and then fully
+:ref:`re-install <installing-yt>` from the install script again.
+
+.. _alternative-installation:
+
 Alternative Installation Methods
 --------------------------------
 
+.. _pip-installation:
+
+Installing yt Using pip or from Source
+++++++++++++++++++++++++++++++++++++++
+
 If you want to forego the use of the install script, you need to make sure you
 have yt's dependencies installed on your system.  These include: a C compiler,
-``HDF5``, ``Freetype``, ``libpng``, ``python``, ``cython``, ``NumPy``, and
-``matplotlib``.  From here, you can use ``pip`` (which comes with ``Python``) to
-install yt as:
+``HDF5``, ``python``, ``cython``, ``NumPy``, ``matplotlib``, and ``h5py``. From here,
+you can use ``pip`` (which comes with ``Python``) to install yt as:
 
 .. code-block:: bash
 
@@ -110,67 +168,46 @@
 It will install yt into ``$HOME/.local/lib64/python2.7/site-packages``. 
 Please refer to ``setuptools`` documentation for the additional options.
 
-Provided that the required dependencies are in a predictable location, yt should
-be able to find them automatically. However, you can manually specify prefix used
-for installation of ``HDF5``, ``Freetype`` and ``libpng`` by using ``hdf5.cfg``,
-``freetype.cfg``, ``png.cfg`` or setting ``HDF5_DIR``, ``FTYPE_DIR``, ``PNG_DIR``
-environmental variables respectively, e.g.
+If you choose this installation method, you do not need to run the activation
+script as it is unnecessary.
+
+.. _anaconda-installation:
+
+Installing yt Using Anaconda
+++++++++++++++++++++++++++++
+
+Perhaps the quickest way to get yt up and running is to install it using the `Anaconda Python
+Distribution <https://store.continuum.io/cshop/anaconda/>`_, which will provide you with a
+easy-to-use environment for installing Python packages. To install a bare-bones Python
+installation with yt, first visit http://repo.continuum.io/miniconda/ and download a recent
+version of the ``Miniconda-x.y.z`` script (corresponding to Python 2.7) for your platform and
+system architecture. Next, run the script, e.g.:
 
 .. code-block:: bash
 
-  $ echo '/usr/local' > hdf5.cfg
-  $ export FTYPE_DIR=/opt/freetype
+  $ bash Miniconda-3.3.0-Linux-x86_64.sh
 
-If you choose this installation method, you do not need to run the activation
-script as it is unnecessary.
-
-.. _testing-installation:
-
-Testing Your Installation
--------------------------
-
-To test to make sure everything is installed properly, try running yt at 
-the command line:
+Make sure that the Anaconda ``bin`` directory is in your path, and then issue:
 
 .. code-block:: bash
 
-  $ yt --help
+  $ conda install yt
 
-If this works, you should get a list of the various command-line options for
-yt, which means you have successfully installed yt.  Congratulations!  
+which will install yt along with all of its dependencies.
 
-If you get an error, follow the instructions it gives you to debug the problem.  
-Do not hesitate to :ref:`contact us <asking-for-help>` so we can help you 
-figure it out.
+.. _windows-installation:
 
-.. _updating-yt:
+Installing yt on Windows
+++++++++++++++++++++++++
 
-Updating yt and its dependencies
---------------------------------
+Installation on Microsoft Windows is only supported for Windows XP Service Pack 3 and
+higher (both 32-bit and 64-bit) using Anaconda.
 
-With many active developers, code development sometimes occurs at a furious 
-pace in yt.  To make sure you're using the latest version of the code, run
-this command at a command-line:
+Keeping yt Updated via Mercurial
+++++++++++++++++++++++++++++++++
 
-.. code-block:: bash
+If you want to maintain your yt installation via updates straight from the Bitbucket repository,
+or if you want to do some development on your own, we suggest you check out some of the
+:ref:`development docs <contributing-code>`, especially the sections on :ref:`Mercurial
+<mercurial-with-yt>` and :ref:`building yt from source <building-yt>`.
 
-  $ yt update
-
-Additionally, if you want to make sure you have the latest dependencies 
-associated with yt and update the codebase simultaneously, type this:
-
-.. code-block:: bash
-
-  $ yt update --all
-
-.. _removing-yt:
-
-Removing yt and its dependencies
---------------------------------
-
-Because yt and its dependencies are installed in an isolated directory when
-you use the script installer, you can easily remove yt and all of its 
-dependencies cleanly.  Simply remove the install directory and its 
-subdirectories and you're done.  If you *really* had problems with the
-code, this is a last defense for solving: remove and then fully
-:ref:`re-install <installing-yt>` from the install script again.

diff -r 4cafede7f67b1e6bb3a2a6f0076b4dc3be8dfecb -r 3ffe6d5372f88d620e023ebad6a61d99e40019f7 yt/analysis_modules/ppv_cube/ppv_cube.py
--- a/yt/analysis_modules/ppv_cube/ppv_cube.py
+++ b/yt/analysis_modules/ppv_cube/ppv_cube.py
@@ -17,14 +17,6 @@
 from yt.visualization.volume_rendering.camera import off_axis_projection
 from yt.funcs import get_pbar
 
-def create_intensity(vmin, vmax, ifield):
-    def _intensity(field, data):
-        idxs = (data["v_los"] >= vmin) & (data["v_los"] < vmax)
-        f = np.zeros(data[ifield].shape)
-        f[idxs] = data[ifield][idxs]
-        return f
-    return _intensity
-
 def create_vlos(z_hat):
     def _v_los(field, data):
         vz = data["velocity_x"]*z_hat[0] + \
@@ -90,9 +82,11 @@
             self.v_bnd = -vmax, vmax
         else:
             self.v_bnd = (ds.quan(velocity_bounds[0], velocity_bounds[2]),
-                     ds.quan(velocity_bounds[1], velocity_bounds[2]))
+                          ds.quan(velocity_bounds[1], velocity_bounds[2]))
 
-        vbins = np.linspace(self.v_bnd[0], self.v_bnd[1], num=self.nv+1)
+        self.vbins = np.linspace(self.v_bnd[0], self.v_bnd[1], num=self.nv+1)
+        self.vmid = 0.5*(self.vbins[1:]+self.vbins[:-1])
+        self.dv = (self.v_bnd[1]-self.v_bnd[0])/self.nv
 
         _vlos = create_vlos(orient.unit_vectors[2])
         ds.field_info.add_field(("gas","v_los"), function=_vlos, units="cm/s")
@@ -100,11 +94,8 @@
         self.data = ds.arr(np.zeros((self.nx,self.ny,self.nv)), self.field_units)
         pbar = get_pbar("Generating cube.", self.nv)
         for i in xrange(self.nv):
-            v1 = vbins[i]
-            v2 = vbins[i+1]
-            _intensity = create_intensity(v1, v2, field)
-            ds.field_info.add_field(("gas","intensity"),
-                                    function=_intensity, units=self.field_units)
+            _intensity = self._create_intensity(i)
+            ds.add_field(("gas","intensity"), function=_intensity, units=self.field_units)
             prj = off_axis_projection(ds, ds.domain_center, normal, width,
                                       (self.nx, self.ny), "intensity")
             self.data[:,:,i] = prj[:,:]
@@ -145,7 +136,7 @@
 
         dx = length_unit[0]/self.nx
         dy = length_unit[0]/self.ny
-        dv = (self.v_bnd[1]-self.v_bnd[0]).in_units("m/s").value/self.nv
+        dv = self.dv.in_units("m/s").value
 
         if length_unit[1] == "deg":
             dx *= -1.
@@ -162,3 +153,11 @@
         fib[0].header["btype"] = self.field
 
         fib.writeto(filename, clobber=clobber)
+
+    def _create_intensity(self, i):
+        def _intensity(field, data):
+            w = np.abs(data["v_los"]-self.vmid[i])/self.dv
+            w = 1.-w
+            w[w < 0.0] = 0.0
+            return data[self.field]*w
+        return _intensity

diff -r 4cafede7f67b1e6bb3a2a6f0076b4dc3be8dfecb -r 3ffe6d5372f88d620e023ebad6a61d99e40019f7 yt/analysis_modules/sunyaev_zeldovich/projection.py
--- a/yt/analysis_modules/sunyaev_zeldovich/projection.py
+++ b/yt/analysis_modules/sunyaev_zeldovich/projection.py
@@ -36,35 +36,30 @@
     pass
 
 vlist = "xyz"
-def setup_sunyaev_zeldovich_fields(registry, ftype = "gas", slice_info = None):
+def setup_sunyaev_zeldovich_fields(ds):
     def _t_squared(field, data):
         return data["gas","density"]*data["gas","kT"]*data["gas","kT"]
-    registry.add_field(("gas", "t_squared"),
-                       function = _t_squared,
-                       units="g*keV**2/cm**3")
+    ds.add_field(("gas", "t_squared"), function = _t_squared,
+                 units="g*keV**2/cm**3")
     def _beta_perp_squared(field, data):
         return data["gas","density"]*data["gas","velocity_magnitude"]**2/clight/clight - data["gas","beta_par_squared"]
-    registry.add_field(("gas","beta_perp_squared"), 
-                       function = _beta_perp_squared,
-                       units="g/cm**3")
+    ds.add_field(("gas","beta_perp_squared"), function = _beta_perp_squared,
+                 units="g/cm**3")
 
     def _beta_par_squared(field, data):
         return data["gas","beta_par"]**2/data["gas","density"]
-    registry.add_field(("gas","beta_par_squared"),
-                       function = _beta_par_squared,
-                       units="g/cm**3")
+    ds.add_field(("gas","beta_par_squared"), function = _beta_par_squared,
+                 units="g/cm**3")
 
     def _t_beta_par(field, data):
         return data["gas","kT"]*data["gas","beta_par"]
-    registry.add_field(("gas","t_beta_par"),
-                       function = _t_beta_par,
-                       units="keV*g/cm**3")
+    ds.add_field(("gas","t_beta_par"), function = _t_beta_par,
+                 units="keV*g/cm**3")
 
     def _t_sz(field, data):
         return data["gas","density"]*data["gas","kT"]
-    registry.add_field(("gas","t_sz"),
-                       function = _t_sz,
-                       units="keV*g/cm**3")
+    ds.add_field(("gas","t_sz"), function = _t_sz,
+                 units="keV*g/cm**3")
 
 def generate_beta_par(L):
     def _beta_par(field, data):
@@ -79,8 +74,8 @@
 
     Parameters
     ----------
-    pf : parameter_file
-        The parameter file.
+    ds : Dataset
+        The dataset
     freqs : array_like
         The frequencies (in GHz) at which to compute the SZ spectral distortion.
     mue : float, optional
@@ -91,15 +86,15 @@
     Examples
     --------
     >>> freqs = [90., 180., 240.]
-    >>> szprj = SZProjection(pf, freqs, high_order=True)
+    >>> szprj = SZProjection(ds, freqs, high_order=True)
     """
-    def __init__(self, pf, freqs, mue=1.143, high_order=False):
+    def __init__(self, ds, freqs, mue=1.143, high_order=False):
 
-        self.pf = pf
-        pf.field_info.load_plugin(setup_sunyaev_zeldovich_fields)
+        self.ds = ds
+        setup_sunyaev_zeldovich_fields(self.ds)
         self.num_freqs = len(freqs)
         self.high_order = high_order
-        self.freqs = pf.arr(freqs, "GHz")
+        self.freqs = ds.arr(freqs, "GHz")
         self.mueinv = 1./mue
         self.xinit = hcgs*self.freqs.in_units("Hz")/(kboltz*Tcmb)
         self.freq_fields = ["%d_GHz" % (int(freq)) for freq in freqs]
@@ -132,12 +127,12 @@
         --------
         >>> szprj.on_axis("y", center="max", width=(1.0, "Mpc"), source=my_sphere)
         """
-        axis = fix_axis(axis, self.pf)
+        axis = fix_axis(axis, self.ds)
 
         if center == "c":
-            ctr = self.pf.domain_center
+            ctr = self.ds.domain_center
         elif center == "max":
-            v, ctr = self.pf.h.find_max("density")
+            v, ctr = self.ds.h.find_max("density")
         else:
             ctr = center
 
@@ -145,8 +140,8 @@
         L[axis] = 1.0
 
         beta_par = generate_beta_par(L)
-        self.pf.field_info.add_field(("gas","beta_par"), function=beta_par, units="g/cm**3")
-        proj = self.pf.h.proj("density", axis, center=ctr, data_source=source)
+        self.ds.add_field(("gas","beta_par"), function=beta_par, units="g/cm**3")
+        proj = self.ds.proj("density", axis, center=ctr, data_source=source)
         frb = proj.to_frb(width, nx)
         dens = frb["density"]
         Te = frb["t_sz"]/dens
@@ -171,7 +166,7 @@
                                 np.array(omega1), np.array(sigma1),
                                 np.array(kappa1), np.array(bperp2))
 
-        self.pf.field_info.pop(("gas","beta_par"))
+        self.ds.field_info.pop(("gas","beta_par"))
 
     def off_axis(self, L, center="c", width=(1, "unitary"), nx=800, source=None):
         r""" Make an off-axis projection of the SZ signal.
@@ -196,15 +191,15 @@
         >>> szprj.off_axis(L, center="c", width=(2.0, "Mpc"))
         """
         if iterable(width):
-            w = self.pf.quan(width[0], width[1]).in_units("code_length").value
+            w = self.ds.quan(width[0], width[1]).in_units("code_length").value
         elif isinstance(width, YTQuantity):
             w = width.in_units("code_length").value
         else:
             w = width
         if center == "c":
-            ctr = self.pf.domain_center
+            ctr = self.ds.domain_center
         elif center == "max":
-            v, ctr = self.pf.h.find_max("density")
+            v, ctr = self.ds.h.find_max("density")
         else:
             ctr = center
 
@@ -213,18 +208,18 @@
             raise NotImplementedError
 
         beta_par = generate_beta_par(L)
-        self.pf.field_info.add_field(("gas","beta_par"), function=beta_par, units="g/cm**3")
+        self.ds.add_field(("gas","beta_par"), function=beta_par, units="g/cm**3")
 
-        dens    = off_axis_projection(self.pf, ctr, L, w, nx, "density")
-        Te      = off_axis_projection(self.pf, ctr, L, w, nx, "t_sz")/dens
-        bpar    = off_axis_projection(self.pf, ctr, L, w, nx, "beta_par")/dens
-        omega1  = off_axis_projection(self.pf, ctr, L, w, nx, "t_squared")/dens
+        dens    = off_axis_projection(self.ds, ctr, L, w, nx, "density")
+        Te      = off_axis_projection(self.ds, ctr, L, w, nx, "t_sz")/dens
+        bpar    = off_axis_projection(self.ds, ctr, L, w, nx, "beta_par")/dens
+        omega1  = off_axis_projection(self.ds, ctr, L, w, nx, "t_squared")/dens
         omega1  = omega1/(Te*Te) - 1.
         if self.high_order:
-            bperp2  = off_axis_projection(self.pf, ctr, L, w, nx, "beta_perp_squared")/dens
-            sigma1  = off_axis_projection(self.pf, ctr, L, w, nx, "t_beta_par")/dens
+            bperp2  = off_axis_projection(self.ds, ctr, L, w, nx, "beta_perp_squared")/dens
+            sigma1  = off_axis_projection(self.ds, ctr, L, w, nx, "t_beta_par")/dens
             sigma1  = sigma1/Te - bpar
-            kappa1  = off_axis_projection(self.pf, ctr, L, w, nx, "beta_par_squared")/dens
+            kappa1  = off_axis_projection(self.ds, ctr, L, w, nx, "beta_par_squared")/dens
             kappa1 -= bpar
         else:
             bperp2 = np.zeros((nx,nx))
@@ -241,7 +236,7 @@
                                 np.array(omega1), np.array(sigma1),
                                 np.array(kappa1), np.array(bperp2))
 
-        self.pf.field_info.pop(("gas","beta_par"))
+        self.ds.field_info.pop(("gas","beta_par"))
 
     def _compute_intensity(self, tau, Te, bpar, omega1, sigma1, kappa1, bperp2):
 
@@ -278,8 +273,8 @@
 
         for i, field in enumerate(self.freq_fields):
             self.data[field] = I0*self.xinit[i]**3*signal[i,:,:]
-        self.data["Tau"] = self.pf.arr(tau, "dimensionless")
-        self.data["TeSZ"] = self.pf.arr(Te, "keV")
+        self.data["Tau"] = self.ds.arr(tau, "dimensionless")
+        self.data["TeSZ"] = self.ds.arr(Te, "keV")
 
     @parallel_root_only
     def write_fits(self, filename, units="kpc", sky_center=None, sky_scale=None,
@@ -327,7 +322,7 @@
         fib = FITSImageBuffer(self.data, fields=self.data.keys(),
                               center=center, units=units,
                               scale=deltas)
-        fib.update_all_headers("Time", float(self.pf.current_time.in_units(time_units).value))
+        fib.update_all_headers("Time", float(self.ds.current_time.in_units(time_units).value))
         fib.writeto(filename, clobber=clobber)
         
     @parallel_root_only

diff -r 4cafede7f67b1e6bb3a2a6f0076b4dc3be8dfecb -r 3ffe6d5372f88d620e023ebad6a61d99e40019f7 yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -1154,7 +1154,10 @@
         for bin_field in bin_fields:
             bf_units = data_source.pf._get_field_info(bin_field[0],
                                                       bin_field[1]).units
-            field_ex = list(extrema[bin_field[-1]])
+            try:
+                field_ex = list(extrema[bin_field[-1]])
+            except KeyError:
+                field_ex = list(extrema[bin_field])
             if iterable(field_ex[0]):
                 field_ex[0] = data_source.pf.quan(field_ex[0][0], field_ex[0][1])
                 field_ex[0] = field_ex[0].in_units(bf_units)

diff -r 4cafede7f67b1e6bb3a2a6f0076b4dc3be8dfecb -r 3ffe6d5372f88d620e023ebad6a61d99e40019f7 yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -24,7 +24,10 @@
     YTSelectionContainer1D, YTSelectionContainer2D, YTSelectionContainer3D
 from yt.data_objects.derived_quantities import \
     DerivedQuantityCollection
-from yt.utilities.exceptions import YTSphereTooSmall
+from yt.utilities.exceptions import \
+    YTSphereTooSmall, \
+    YTIllDefinedCutRegion, \
+    YTMixedCutRegion
 from yt.utilities.linear_interpolators import TrilinearFieldInterpolator
 from yt.utilities.minimal_representation import \
     MinimalSliceData
@@ -683,6 +686,9 @@
         self.base_object.get_data(fields)
         ind = self._cond_ind
         for field in fields:
+            f = self.base_object[field]
+            if f.shape != ind.shape:
+                raise YTMixedCutRegion(self.conditionals, field)
             self.field_data[field] = self.base_object[field][ind]
 
     @property
@@ -693,6 +699,8 @@
             for cond in self.conditionals:
                 res = eval(cond)
                 if ind is None: ind = res
+                if ind.shape != res.shape:
+                    raise YTIllDefinedCutRegion(self.conditionals)
                 np.logical_and(res, ind, ind)
         return ind
 

diff -r 4cafede7f67b1e6bb3a2a6f0076b4dc3be8dfecb -r 3ffe6d5372f88d620e023ebad6a61d99e40019f7 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -55,8 +55,8 @@
     SphericalCoordinateHandler
 from yt.geometry.geographic_coordinates import \
     GeographicCoordinateHandler
-from yt.geometry.ppv_coordinates import \
-    PPVCoordinateHandler
+from yt.geometry.spec_cube_coordinates import \
+    SpectralCubeCoordinateHandler
 
 # We want to support the movie format in the future.
 # When such a thing comes to pass, I'll move all the stuff that is contant up
@@ -361,8 +361,8 @@
             self.coordinates = SphericalCoordinateHandler(self)
         elif self.geometry == "geographic":
             self.coordinates = GeographicCoordinateHandler(self)
-        elif self.geometry == "ppv":
-            self.coordinates = PPVCoordinateHandler(self)
+        elif self.geometry == "spectral_cube":
+            self.coordinates = SpectralCubeCoordinateHandler(self)
         else:
             raise YTGeometryNotSupported(self.geometry)
 
@@ -519,16 +519,28 @@
 
     def find_max(self, field):
         """
-        Returns (value, center) of location of maximum for a given field.
+        Returns (value, location) of the maximum of a given field.
         """
         mylog.debug("Searching for maximum value of %s", field)
         source = self.all_data()
         max_val, maxi, mx, my, mz = \
-            source.quantities["MaxLocation"](field)
+            source.quantities.max_location(field)
         mylog.info("Max Value is %0.5e at %0.16f %0.16f %0.16f",
               max_val, mx, my, mz)
         return max_val, np.array([mx, my, mz], dtype="float64")
 
+    def find_min(self, field):
+        """
+        Returns (value, location) for the minimum of a given field.
+        """
+        mylog.debug("Searching for minimum value of %s", field)
+        source = self.all_data()
+        min_val, maxi, mx, my, mz = \
+            source.quantities.min_location(field)
+        mylog.info("Min Value is %0.5e at %0.16f %0.16f %0.16f",
+              min_val, mx, my, mz)
+        return min_val, np.array([mx, my, mz], dtype="float64")
+
     # Now all the object related stuff
     def all_data(self, find_max=False):
         if find_max: c = self.find_max("density")[1]

diff -r 4cafede7f67b1e6bb3a2a6f0076b4dc3be8dfecb -r 3ffe6d5372f88d620e023ebad6a61d99e40019f7 yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -44,11 +44,15 @@
 
 lon_prefixes = ["X","RA","GLON"]
 lat_prefixes = ["Y","DEC","GLAT"]
-vel_prefixes = ["V","ENER","FREQ","WAV"]
 delimiters = ["*", "/", "-", "^"]
 delimiters += [str(i) for i in xrange(10)]
 regex_pattern = '|'.join(re.escape(_) for _ in delimiters)
 
+spec_names = {"V":"Velocity",
+              "F":"Frequency",
+              "E":"Energy",
+              "W":"Wavelength"}
+
 field_from_unit = {"Jy":"intensity",
                    "K":"temperature"}
 
@@ -136,6 +140,7 @@
         self._file_map = {}
         self._ext_map = {}
         self._scale_map = {}
+        dup_field_index = {}
         # Since FITS header keywords are case-insensitive, we only pick a subset of
         # prefixes, ones that we expect to end up in headers.
         known_units = dict([(unit.lower(),unit) for unit in self.pf.unit_registry.lut])
@@ -162,13 +167,19 @@
                         if fname is None: fname = "image_%d" % (j)
                     if self.pf.num_files > 1 and fname.startswith("image"):
                         fname += "_file_%d" % (i)
+                    if ("fits", fname) in self.field_list:
+                        if fname in dup_field_index:
+                            dup_field_index[fname] += 1
+                        else:
+                            dup_field_index[fname] = 1
+                        mylog.warning("This field has the same name as a previously loaded " +
+                                      "field. Changing the name from %s to %s_%d. To avoid " %
+                                      (fname, fname, dup_field_index[fname]) +
+                                      " this, change one of the BTYPE header keywords.")
+                        fname += "_%d" % (dup_field_index[fname])
                     for k in xrange(naxis4):
                         if naxis4 > 1:
                             fname += "_%s_%d" % (hdu.header["CTYPE4"], k+1)
-                        if fname in self.field_list:
-                            mylog.error("You have two fields with the same name. Change one of " +
-                                        "the names in the BTYPE header keyword to distinguish " +
-                                        "them.")
                         self._axis_map[fname] = k
                         self._file_map[fname] = fits_file
                         self._ext_map[fname] = j
@@ -210,7 +221,7 @@
         # If nprocs > 1, decompose the domain into virtual grids
         if self.num_grids > 1:
             if self.pf.z_axis_decomp:
-                dz = (pf.domain_width/pf.domain_dimensions)[2]
+                dz = pf.quan(1.0, "code_length")*pf.spectral_factor
                 self.grid_dimensions[:,2] = np.around(float(pf.domain_dimensions[2])/
                                                             self.num_grids).astype("int")
                 self.grid_dimensions[-1,2] += (pf.domain_dimensions[2] % self.num_grids)
@@ -227,7 +238,7 @@
                 dims = np.array(pf.domain_dimensions)
                 # If we are creating a dataset of lines, only decompose along the position axes
                 if len(pf.line_database) > 0:
-                    dims[pf.vel_axis] = 1
+                    dims[pf.spec_axis] = 1
                 psize = get_psize(dims, self.num_grids)
                 gle, gre, shapes, slices = decompose_array(dims, psize, bbox)
                 self.grid_left_edge = self.pf.arr(gle, "code_length")
@@ -235,9 +246,9 @@
                 self.grid_dimensions = np.array([shape for shape in shapes], dtype="int32")
                 # If we are creating a dataset of lines, only decompose along the position axes
                 if len(pf.line_database) > 0:
-                    self.grid_left_edge[:,pf.vel_axis] = pf.domain_left_edge[pf.vel_axis]
-                    self.grid_right_edge[:,pf.vel_axis] = pf.domain_right_edge[pf.vel_axis]
-                    self.grid_dimensions[:,pf.vel_axis] = pf.domain_dimensions[pf.vel_axis]
+                    self.grid_left_edge[:,pf.spec_axis] = pf.domain_left_edge[pf.spec_axis]
+                    self.grid_right_edge[:,pf.spec_axis] = pf.domain_right_edge[pf.spec_axis]
+                    self.grid_dimensions[:,pf.spec_axis] = pf.domain_dimensions[pf.spec_axis]
         else:
             self.grid_left_edge[0,:] = pf.domain_left_edge
             self.grid_right_edge[0,:] = pf.domain_right_edge
@@ -303,6 +314,7 @@
                  nprocs = None,
                  storage_filename = None,
                  nan_mask = None,
+                 spectral_factor = 1.0,
                  z_axis_decomp = False,
                  line_database = None,
                  line_width = None,
@@ -315,10 +327,13 @@
         self.specified_parameters = parameters
 
         self.z_axis_decomp = z_axis_decomp
+        self.spectral_factor = spectral_factor
 
         if line_width is not None:
             self.line_width = YTQuantity(line_width[0], line_width[1])
             self.line_units = line_width[1]
+            mylog.info("For line folding, spectral_factor = 1.0")
+            self.spectral_factor = 1.0
         else:
             self.line_width = None
 
@@ -356,8 +371,8 @@
                 else:
                     fn = os.path.join(ytcfg.get("yt","test_data_dir"),fits_file)
                 f = _astropy.pyfits.open(fn, memmap=True,
-                                   do_not_scale_image_data=True,
-                                   ignore_blank=True)
+                                         do_not_scale_image_data=True,
+                                         ignore_blank=True)
                 self._fits_files.append(f)
 
         if len(self._handle) > 1 and self._handle[1].name == "EVENTS":
@@ -399,13 +414,23 @@
             self.events_data = False
             self.first_image = 0
             self.primary_header = self._handle[self.first_image].header
-            self.wcs = _astropy.pywcs.WCS(header=self.primary_header)
             self.naxis = self.primary_header["naxis"]
             self.axis_names = [self.primary_header["ctype%d" % (i+1)]
                                for i in xrange(self.naxis)]
             self.dims = [self.primary_header["naxis%d" % (i+1)]
                          for i in xrange(self.naxis)]
 
+            wcs = _astropy.pywcs.WCS(header=self.primary_header)
+            if self.naxis == 4:
+                self.wcs = _astropy.pywcs.WCS(naxis=3)
+                self.wcs.wcs.crpix = wcs.wcs.crpix[:3]
+                self.wcs.wcs.cdelt = wcs.wcs.cdelt[:3]
+                self.wcs.wcs.crval = wcs.wcs.crval[:3]
+                self.wcs.wcs.cunit = [str(unit) for unit in wcs.wcs.cunit][:3]
+                self.wcs.wcs.ctype = [type for type in wcs.wcs.ctype][:3]
+            else:
+                self.wcs = wcs
+
         self.refine_by = 2
 
         Dataset.__init__(self, fn, dataset_type)
@@ -441,11 +466,12 @@
         self.time_unit = self.quan(1.0, "s")
         self.velocity_unit = self.quan(1.0, "cm/s")
         if "beam_size" in self.specified_parameters:
+            beam_size = self.specified_parameters["beam_size"]
             beam_size = self.quan(beam_size[0], beam_size[1]).in_cgs().value
         else:
             beam_size = 1.0
         self.unit_registry.add("beam",beam_size,dimensions=dimensions.solid_angle)
-        if self.ppv_data:
+        if self.spec_cube:
             units = self.wcs_2d.wcs.cunit[0]
             if units == "deg": units = "degree"
             if units == "rad": units = "radian"
@@ -520,17 +546,17 @@
         self.reversed = False
 
         # Check to see if this data is in some kind of (Lat,Lon,Vel) format
-        self.ppv_data = False
+        self.spec_cube = False
         x = 0
-        for p in lon_prefixes+lat_prefixes+vel_prefixes:
+        for p in lon_prefixes+lat_prefixes+spec_names.keys():
             y = np_char.startswith(self.axis_names[:self.dimensionality], p)
             x += y.sum()
-        if x == self.dimensionality: self._setup_ppv()
+        if x == self.dimensionality: self._setup_spec_cube()
 
-    def _setup_ppv(self):
+    def _setup_spec_cube(self):
 
-        self.ppv_data = True
-        self.geometry = "ppv"
+        self.spec_cube = True
+        self.geometry = "spectral_cube"
 
         end = min(self.dimensionality+1,4)
         if self.events_data:
@@ -556,11 +582,11 @@
 
         if self.wcs.naxis > 2:
 
-            self.vel_axis = np.zeros((end-1), dtype="bool")
-            for p in vel_prefixes:
-                self.vel_axis += np_char.startswith(ctypes, p)
-            self.vel_axis = np.where(self.vel_axis)[0][0]
-            self.vel_name = ctypes[self.vel_axis].split("-")[0].lower()
+            self.spec_axis = np.zeros((end-1), dtype="bool")
+            for p in spec_names.keys():
+                self.spec_axis += np_char.startswith(ctypes, p)
+            self.spec_axis = np.where(self.spec_axis)[0][0]
+            self.spec_name = spec_names[ctypes[self.spec_axis].split("-")[0][0]]
 
             self.wcs_2d = _astropy.pywcs.WCS(naxis=2)
             self.wcs_2d.wcs.crpix = self.wcs.wcs.crpix[[self.lon_axis, self.lat_axis]]
@@ -571,41 +597,60 @@
             self.wcs_2d.wcs.ctype = [self.wcs.wcs.ctype[self.lon_axis],
                                      self.wcs.wcs.ctype[self.lat_axis]]
 
-            x0 = self.wcs.wcs.crpix[self.vel_axis]
-            dz = self.wcs.wcs.cdelt[self.vel_axis]
-            z0 = self.wcs.wcs.crval[self.vel_axis]
-            self.vel_unit = str(self.wcs.wcs.cunit[self.vel_axis])
-
-            if dz < 0.0:
-                self.reversed = True
-                le = self.dims[self.vel_axis]+0.5
-                re = 0.5
-            else:
-                le = 0.5
-                re = self.dims[self.vel_axis]+0.5
-            self.domain_left_edge[self.vel_axis] = (le-x0)*dz + z0
-            self.domain_right_edge[self.vel_axis] = (re-x0)*dz + z0
-            if self.reversed: dz *= -1
+            self._p0 = self.wcs.wcs.crpix[self.spec_axis]
+            self._dz = self.wcs.wcs.cdelt[self.spec_axis]
+            self._z0 = self.wcs.wcs.crval[self.spec_axis]
+            self.spec_unit = str(self.wcs.wcs.cunit[self.spec_axis])
 
             if self.line_width is not None:
-                self.line_width = self.line_width.in_units(self.vel_unit)
-                self.freq_begin = self.domain_left_edge[self.vel_axis]
-                nz = np.rint(self.line_width.value/dz).astype("int")
-                self.line_width = dz*nz
-                self.domain_left_edge[self.vel_axis] = -self.line_width/2.
-                self.domain_right_edge[self.vel_axis] = self.line_width/2.
-                self.domain_dimensions[self.vel_axis] = nz
-
+                if self._dz < 0.0:
+                    self.reversed = True
+                    le = self.dims[self.spec_axis]+0.5
+                else:
+                    le = 0.5
+                self.line_width = self.line_width.in_units(self.spec_unit)
+                self.freq_begin = (le-self._p0)*self._dz + self._z0
+                # We now reset these so that they are consistent
+                # with the new setup
+                self._dz = np.abs(self._dz)
+                self._p0 = 0.0
+                self._z0 = 0.0
+                nz = np.rint(self.line_width.value/self._dz).astype("int")
+                self.line_width = self._dz*nz
+                self.domain_left_edge[self.spec_axis] = -0.5*float(nz)
+                self.domain_right_edge[self.spec_axis] = 0.5*float(nz)
+                self.domain_dimensions[self.spec_axis] = nz
+            else:
+                if self.spectral_factor == "auto":
+                    self.spectral_factor = float(max(self.domain_dimensions[[self.lon_axis,
+                                                                             self.lat_axis]]))
+                    self.spectral_factor /= self.domain_dimensions[self.spec_axis]
+                    mylog.info("Setting the spectral factor to %f" % (self.spectral_factor))
+                Dz = self.domain_right_edge[self.spec_axis]-self.domain_left_edge[self.spec_axis]
+                self.domain_right_edge[self.spec_axis] = self.domain_left_edge[self.spec_axis] + \
+                                                        self.spectral_factor*Dz
+                self._dz /= self.spectral_factor
+                self._p0 = (self._p0-0.5)*self.spectral_factor + 0.5
         else:
 
             self.wcs_2d = self.wcs
-            self.vel_axis = 2
-            self.vel_name = "z"
-            self.vel_unit = "code length"
+            self.spec_axis = 2
+            self.spec_name = "z"
+            self.spec_unit = "code length"
+
+    def spec2pixel(self, spec_value):
+        sv = self.arr(spec_value).in_units(self.spec_unit)
+        return self.arr((sv.v-self._z0)/self._dz+self._p0,
+                        "code_length")
+
+    def pixel2spec(self, pixel_value):
+        pv = self.arr(pixel_value, "code_length")
+        return self.arr((pv.v-self._p0)*self._dz+self._z0,
+                        self.spec_unit)
 
     def __del__(self):
-        for file in self._fits_files:
-            file.close()
+        for f in self._fits_files:
+            f.close()
             del file
         self._handle.close()
         del self._handle

diff -r 4cafede7f67b1e6bb3a2a6f0076b4dc3be8dfecb -r 3ffe6d5372f88d620e023ebad6a61d99e40019f7 yt/frontends/fits/fields.py
--- a/yt/frontends/fits/fields.py
+++ b/yt/frontends/fits/fields.py
@@ -23,7 +23,7 @@
         for field in pf.field_list:
             if field[0] == "fits": self[field].take_log = False
 
-    def _setup_ppv_fields(self):
+    def _setup_spec_cube_fields(self):
 
         def _get_2d_wcs(data, axis):
             w_coords = data.pf.wcs_2d.wcs_pix2world(data["x"], data["y"], 1)
@@ -42,17 +42,18 @@
             self.add_field(("fits",name), function=world_f(axis, unit), units=unit)
 
         if self.pf.dimensionality == 3:
-            def _vel_los(field, data):
-                axis = "xyz"[data.pf.vel_axis]
-                return data.pf.arr(data[axis].ndarray_view(),data.pf.vel_unit)
-            self.add_field(("fits",self.pf.vel_name), function=_vel_los,
-                           units=self.pf.vel_unit)
+            def _spec(field, data):
+                axis = "xyz"[data.pf.spec_axis]
+                sp = (data[axis].ndarray_view()-self.pf._p0)*self.pf._dz + self.pf._z0
+                return data.pf.arr(sp, data.pf.spec_unit)
+            self.add_field(("fits","spectral"), function=_spec,
+                           units=self.pf.spec_unit, display_name=self.pf.spec_name)
 
     def setup_fluid_fields(self):
 
-        if self.pf.ppv_data:
+        if self.pf.spec_cube:
             def _pixel(field, data):
                 return data.pf.arr(data["ones"], "pixel")
             self.add_field(("fits","pixel"), function=_pixel, units="pixel")
-            self._setup_ppv_fields()
+            self._setup_spec_cube_fields()
             return

diff -r 4cafede7f67b1e6bb3a2a6f0076b4dc3be8dfecb -r 3ffe6d5372f88d620e023ebad6a61d99e40019f7 yt/frontends/fits/io.py
--- a/yt/frontends/fits/io.py
+++ b/yt/frontends/fits/io.py
@@ -26,8 +26,10 @@
         self._handle = pf._handle
         if self.pf.line_width is not None:
             self.line_db = self.pf.line_database
+            self.dz = self.pf.line_width/self.domain_dimensions[self.pf.spec_axis]
         else:
             self.line_db = None
+            self.dz = 1.
 
     def _read_particles(self, fields_to_read, type, args, grid_list,
             count_list, conv_factors):
@@ -90,19 +92,19 @@
                     start = ((g.LeftEdge-self.pf.domain_left_edge)/dx).to_ndarray().astype("int")
                     end = start + g.ActiveDimensions
                     if self.line_db is not None and fname in self.line_db:
-                        my_off = self.line_db.get(fname).in_units(self.pf.vel_unit).value
+                        my_off = self.line_db.get(fname).in_units(self.pf.spec_unit).value
                         my_off = my_off - 0.5*self.pf.line_width
-                        my_off = int((my_off-self.pf.freq_begin)/dx[self.pf.vel_axis].value)
+                        my_off = int((my_off-self.pf.freq_begin)/self.dz)
                         my_off = max(my_off, 0)
-                        my_off = min(my_off, self.pf.dims[self.pf.vel_axis]-1)
-                        start[self.pf.vel_axis] += my_off
-                        end[self.pf.vel_axis] += my_off
+                        my_off = min(my_off, self.pf.dims[self.pf.spec_axis]-1)
+                        start[self.pf.spec_axis] += my_off
+                        end[self.pf.spec_axis] += my_off
                         mylog.debug("Reading from " + str(start) + str(end))
                     slices = [slice(start[i],end[i]) for i in xrange(3)]
                     if self.pf.reversed:
-                        new_start = self.pf.dims[self.pf.vel_axis]-1-start[self.pf.vel_axis]
-                        new_end = max(self.pf.dims[self.pf.vel_axis]-1-end[self.pf.vel_axis],0)
-                        slices[self.pf.vel_axis] = slice(new_start,new_end,-1)
+                        new_start = self.pf.dims[self.pf.spec_axis]-1-start[self.pf.spec_axis]
+                        new_end = max(self.pf.dims[self.pf.spec_axis]-1-end[self.pf.spec_axis],0)
+                        slices[self.pf.spec_axis] = slice(new_start,new_end,-1)
                     if self.pf.dimensionality == 2:
                         nx, ny = g.ActiveDimensions[:2]
                         nz = 1
@@ -114,8 +116,8 @@
                     else:
                         data = ds.data[slices[2],slices[1],slices[0]].transpose()
                     if self.line_db is not None:
-                        nz1 = data.shape[self.pf.vel_axis]
-                        nz2 = g.ActiveDimensions[self.pf.vel_axis]
+                        nz1 = data.shape[self.pf.spec_axis]
+                        nz2 = g.ActiveDimensions[self.pf.spec_axis]
                         if nz1 != nz2:
                             old_data = data.copy()
                             data = np.zeros(g.ActiveDimensions)

diff -r 4cafede7f67b1e6bb3a2a6f0076b4dc3be8dfecb -r 3ffe6d5372f88d620e023ebad6a61d99e40019f7 yt/frontends/fits/misc.py
--- a/yt/frontends/fits/misc.py
+++ b/yt/frontends/fits/misc.py
@@ -15,6 +15,7 @@
 from yt.utilities.on_demand_imports import _astropy
 from yt.funcs import mylog, get_image_suffix
 from yt.visualization._mpl_imports import FigureCanvasAgg
+
 import os
 
 def _make_counts(emin, emax):
@@ -130,26 +131,17 @@
             raise NotImplementedError("WCS axes are not implemented for oblique plots.")
         if not hasattr(pw.pf, "wcs_2d"):
             raise NotImplementedError("WCS axes are not implemented for this dataset.")
-        if pw.data_source.axis != pw.pf.vel_axis:
+        if pw.data_source.axis != pw.pf.spec_axis:
             raise NotImplementedError("WCS axes are not implemented for this axis.")
-        self.pf = pw.pf
+        self.plots = {}
         self.pw = pw
-        self.plots = {}
-        self.wcs_axes = []
         for f in pw.plots:
             rect = pw.plots[f]._get_best_layout()[1]
             fig = pw.plots[f].figure
-            ax = WCSAxes(fig, rect, wcs=pw.pf.wcs_2d, frameon=False)
-            fig.add_axes(ax)
-            self.wcs_axes.append(ax)
-        self._setup_plots()
-
-    def _setup_plots(self):
-        pw = self.pw
-        for f, ax in zip(pw.plots, self.wcs_axes):
-            wcs = ax.wcs.wcs
-            pw.plots[f].axes.get_xaxis().set_visible(False)
-            pw.plots[f].axes.get_yaxis().set_visible(False)
+            ax = fig.axes[0]
+            wcs_ax = WCSAxes(fig, rect, wcs=pw.pf.wcs_2d, frameon=False)
+            fig.add_axes(wcs_ax)
+            wcs = pw.pf.wcs_2d.wcs
             xax = pw.pf.coordinates.x_axis[pw.data_source.axis]
             yax = pw.pf.coordinates.y_axis[pw.data_source.axis]
             xlabel = "%s (%s)" % (wcs.ctype[xax].split("-")[0],
@@ -157,18 +149,18 @@
             ylabel = "%s (%s)" % (wcs.ctype[yax].split("-")[0],
                                   wcs.cunit[yax])
             fp = pw._font_properties
-            ax.coords[0].set_axislabel(xlabel, fontproperties=fp)
-            ax.coords[1].set_axislabel(ylabel, fontproperties=fp)
-            ax.set_xlim(pw.xlim[0].value, pw.xlim[1].value)
-            ax.set_ylim(pw.ylim[0].value, pw.ylim[1].value)
-            ax.coords[0].ticklabels.set_fontproperties(fp)
-            ax.coords[1].ticklabels.set_fontproperties(fp)
-            self.plots[f] = pw.plots[f]
-        self.pw = pw
-        self.pf = self.pw.pf
-
-    def refresh(self):
-        self._setup_plots(self)
+            wcs_ax.coords[0].set_axislabel(xlabel, fontproperties=fp)
+            wcs_ax.coords[1].set_axislabel(ylabel, fontproperties=fp)
+            wcs_ax.coords[0].ticklabels.set_fontproperties(fp)
+            wcs_ax.coords[1].ticklabels.set_fontproperties(fp)
+            ax.xaxis.set_visible(False)
+            ax.yaxis.set_visible(False)
+            wcs_ax.set_xlim(pw.xlim[0].value, pw.xlim[1].value)
+            wcs_ax.set_ylim(pw.ylim[0].value, pw.ylim[1].value)
+            wcs_ax.coords.frame._update_cache = []
+            ax.xaxis.set_visible(False)
+            ax.yaxis.set_visible(False)
+            self.plots[f] = fig
 
     def keys(self):
         return self.plots.keys()
@@ -187,8 +179,8 @@
     def show(self):
         from IPython.core.display import display
         for k, v in sorted(self.plots.iteritems()):
-            canvas = FigureCanvasAgg(v.figure)
-            display(v.figure)
+            canvas = FigureCanvasAgg(v)
+            display(v)
 
     def save(self, name=None, mpl_kwargs=None):
         if mpl_kwargs is None:

diff -r 4cafede7f67b1e6bb3a2a6f0076b4dc3be8dfecb -r 3ffe6d5372f88d620e023ebad6a61d99e40019f7 yt/geometry/ppv_coordinates.py
--- a/yt/geometry/ppv_coordinates.py
+++ /dev/null
@@ -1,58 +0,0 @@
-"""
-Cartesian fields
-
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import numpy as np
-from .cartesian_coordinates import \
-    CartesianCoordinateHandler
-
-class PPVCoordinateHandler(CartesianCoordinateHandler):
-
-    def __init__(self, pf):
-        super(PPVCoordinateHandler, self).__init__(pf)
-
-        self.axis_name = {}
-        self.axis_id = {}
-
-        for axis, axis_name in zip([pf.lon_axis, pf.lat_axis, pf.vel_axis],
-                                   ["Image\ x", "Image\ y", pf.vel_name]):
-            lower_ax = "xyz"[axis]
-            upper_ax = lower_ax.upper()
-
-            self.axis_name[axis] = axis_name
-            self.axis_name[lower_ax] = axis_name
-            self.axis_name[upper_ax] = axis_name
-            self.axis_name[axis_name] = axis_name
-
-            self.axis_id[lower_ax] = axis
-            self.axis_id[axis] = axis
-            self.axis_id[axis_name] = axis
-
-        self.default_unit_label = {}
-        self.default_unit_label[pf.lon_axis] = "pixel"
-        self.default_unit_label[pf.lat_axis] = "pixel"
-        self.default_unit_label[pf.vel_axis] = pf.vel_unit
-
-    def convert_to_cylindrical(self, coord):
-        raise NotImplementedError
-
-    def convert_from_cylindrical(self, coord):
-        raise NotImplementedError
-
-    x_axis = { 'x' : 1, 'y' : 0, 'z' : 0,
-                0  : 1,  1  : 0,  2  : 0}
-
-    y_axis = { 'x' : 2, 'y' : 2, 'z' : 1,
-                0  : 2,  1  : 2,  2  : 1}

diff -r 4cafede7f67b1e6bb3a2a6f0076b4dc3be8dfecb -r 3ffe6d5372f88d620e023ebad6a61d99e40019f7 yt/geometry/spec_cube_coordinates.py
--- /dev/null
+++ b/yt/geometry/spec_cube_coordinates.py
@@ -0,0 +1,65 @@
+"""
+Cartesian fields
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+from .cartesian_coordinates import \
+    CartesianCoordinateHandler
+
+class SpectralCubeCoordinateHandler(CartesianCoordinateHandler):
+
+    def __init__(self, pf):
+        super(SpectralCubeCoordinateHandler, self).__init__(pf)
+
+        self.axis_name = {}
+        self.axis_id = {}
+
+        for axis, axis_name in zip([pf.lon_axis, pf.lat_axis, pf.spec_axis],
+                                   ["Image\ x", "Image\ y", pf.spec_name]):
+            lower_ax = "xyz"[axis]
+            upper_ax = lower_ax.upper()
+
+            self.axis_name[axis] = axis_name
+            self.axis_name[lower_ax] = axis_name
+            self.axis_name[upper_ax] = axis_name
+            self.axis_name[axis_name] = axis_name
+
+            self.axis_id[lower_ax] = axis
+            self.axis_id[axis] = axis
+            self.axis_id[axis_name] = axis
+
+        self.default_unit_label = {}
+        self.default_unit_label[pf.lon_axis] = "pixel"
+        self.default_unit_label[pf.lat_axis] = "pixel"
+        self.default_unit_label[pf.spec_axis] = pf.spec_unit
+
+        def _spec_axis(ax, x, y):
+            p = (x,y)[ax]
+            return [self.pf.pixel2spec(pp).v for pp in p]
+
+        self.axis_field = {}
+        self.axis_field[self.pf.spec_axis] = _spec_axis
+
+    def convert_to_cylindrical(self, coord):
+        raise NotImplementedError
+
+    def convert_from_cylindrical(self, coord):
+        raise NotImplementedError
+
+    x_axis = { 'x' : 1, 'y' : 0, 'z' : 0,
+                0  : 1,  1  : 0,  2  : 0}
+
+    y_axis = { 'x' : 2, 'y' : 2, 'z' : 1,
+                0  : 2,  1  : 2,  2  : 1}

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/a5f2cac5c799/
Changeset:   a5f2cac5c799
Branch:      yt-3.0
User:        chummels
Date:        2014-06-14 01:30:47
Summary:     Backing out changeset 1c0ed53 to add an exception when import yt fails.
Affected #:  2 files

diff -r 3ffe6d5372f88d620e023ebad6a61d99e40019f7 -r a5f2cac5c7997ca5005cf3e707061e4f133d371d yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -95,29 +95,23 @@
     memory_checker, \
     deprecated_class
 from yt.utilities.logger import ytLogger as mylog
-from yt.utilities.exceptions import YTImportFailure
 
 import yt.utilities.physical_constants as physical_constants
 import yt.units as units
 from yt.units.yt_array import YTArray, YTQuantity
 
-try:
-    from yt.fields.api import \
-        field_plugins, \
-        DerivedField, \
-        FieldDetector, \
-        FieldInfoContainer, \
-        ValidateParameter, \
-        ValidateDataField, \
-        ValidateProperty, \
-        ValidateSpatial, \
-        ValidateGridType, \
-        add_field, \
-        derived_field
-except ImportError:
-    raise YTImportFailure
-
-
+from yt.fields.api import \
+    field_plugins, \
+    DerivedField, \
+    FieldDetector, \
+    FieldInfoContainer, \
+    ValidateParameter, \
+    ValidateDataField, \
+    ValidateProperty, \
+    ValidateSpatial, \
+    ValidateGridType, \
+    add_field, \
+    derived_field
 
 from yt.data_objects.api import \
     BinnedProfile1D, BinnedProfile2D, BinnedProfile3D, \

diff -r 3ffe6d5372f88d620e023ebad6a61d99e40019f7 -r a5f2cac5c7997ca5005cf3e707061e4f133d371d yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -387,13 +387,6 @@
                But this one has %s and %s.""" % (self.dimensions, self.shape)
         return r
 
-class YTImportFailure(YTException):
-    def __str__(self):
-        return "You cannot import yt because you have not setup your environment for this \n" + \
-               "version of yt.  To resolve this, follow these steps at the command line: \n\n" + \
-               "$ cd $YT_DEST/src/yt-hg \n" + \
-               "$ python setup.py develop \n"
-
 class YTIllDefinedCutRegion(Exception):
     def __init__(self, conditions):
         self.conditions = conditions


https://bitbucket.org/yt_analysis/yt/commits/5a37363c2829/
Changeset:   5a37363c2829
Branch:      yt-3.0
User:        chummels
Date:        2014-06-14 01:54:36
Summary:     Merging.
Affected #:  32 files

diff -r a5f2cac5c7997ca5005cf3e707061e4f133d371d -r 5a37363c2829d7280002fdf137bc0e1d8120401f .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -41,6 +41,7 @@
 yt/utilities/lib/PointsInVolume.c
 yt/utilities/lib/QuadTree.c
 yt/utilities/lib/RayIntegrators.c
+yt/utilities/lib/ragged_arrays.c
 yt/utilities/lib/VolumeIntegrator.c
 yt/utilities/lib/grid_traversal.c
 yt/utilities/lib/GridTree.c

diff -r a5f2cac5c7997ca5005cf3e707061e4f133d371d -r 5a37363c2829d7280002fdf137bc0e1d8120401f doc/source/analyzing/analysis_modules/halo_catalogs.rst
--- a/doc/source/analyzing/analysis_modules/halo_catalogs.rst
+++ b/doc/source/analyzing/analysis_modules/halo_catalogs.rst
@@ -226,4 +226,4 @@
 =======
 
 For a full example of how to use these methods together see 
-:ref:`halo_analysis_example`.
+:doc:`halo_analysis_example`.

diff -r a5f2cac5c7997ca5005cf3e707061e4f133d371d -r 5a37363c2829d7280002fdf137bc0e1d8120401f doc/source/bootcamp/4)_Data_Objects_and_Time_Series.ipynb
--- a/doc/source/bootcamp/4)_Data_Objects_and_Time_Series.ipynb
+++ b/doc/source/bootcamp/4)_Data_Objects_and_Time_Series.ipynb
@@ -23,7 +23,8 @@
      "input": [
       "%matplotlib inline\n",
       "from yt.mods import *\n",
-      "from matplotlib import pylab"
+      "from matplotlib import pylab\n",
+      "from yt.analysis_modules.halo_finding.api import HaloFinder"
      ],
      "language": "python",
      "metadata": {},

diff -r a5f2cac5c7997ca5005cf3e707061e4f133d371d -r 5a37363c2829d7280002fdf137bc0e1d8120401f doc/source/examining/Loading_Generic_Particle_Data.ipynb
--- /dev/null
+++ b/doc/source/examining/Loading_Generic_Particle_Data.ipynb
@@ -0,0 +1,156 @@
+{
+ "metadata": {
+  "name": "",
+  "signature": "sha256:6da8ec00f414307f27544fbdbc6b4fa476e5e96809003426279b2a1c898b4546"
+ },
+ "nbformat": 3,
+ "nbformat_minor": 0,
+ "worksheets": [
+  {
+   "cells": [
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "This example creates a fake in-memory particle dataset and then loads it as a yt dataset using the `load_particles` function.\n",
+      "\n",
+      "Our \"fake\" dataset will be numpy arrays filled with normally distributed randoml particle positions and uniform particle masses.  Since real data is often scaled, I arbitrarily multiply by 1e6 to show how to deal with scaled data."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "import numpy as np\n",
+      "\n",
+      "n_particles = 5e6\n",
+      "\n",
+      "ppx, ppy, ppz = 1e6*np.random.normal(size=[3, n_particles])\n",
+      "\n",
+      "ppm = np.ones(n_particles)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "The `load_particles` function accepts a dictionary populated with particle data fields loaded in memory as numpy arrays or python lists:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "data = {'particle_position_x': ppx,\n",
+      "        'particle_position_y': ppy,\n",
+      "        'particle_position_z': ppz,\n",
+      "        'particle_mass': ppm}"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "To hook up with yt's internal field system, the dictionary keys must be 'particle_position_x', 'particle_position_y', 'particle_position_z', and 'particle_mass', as well as any other particle field provided by one of the particle frontends."
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "The `load_particles` function transforms the `data` dictionary into an in-memory yt `Dataset` object, providing an interface for further analysis with `yt`. The example below illustrates how to load the data dictionary we created above."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "import yt\n",
+      "from yt.units import parsec, Msun\n",
+      "\n",
+      "bbox = 1.1*np.array([[min(ppx), max(ppx)], [min(ppy), max(ppy)], [min(ppy), max(ppy)]])\n",
+      "\n",
+      "ds = yt.load_particles(data, length_unit=parsec, mass_unit=1e8*Msun, n_ref=256, bbox=bbox)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "The `length_unit` and `mass_unit` are the conversion from the units used in the `data` dictionary to CGS.  I've arbitrarily chosen one parsec and 10^8 Msun for this example. \n",
+      "\n",
+      "The `n_ref` parameter controls how many particle it takes to accumulate in an oct-tree cell to trigger refinement.  Larger `n_ref` will decrease poisson noise at the cost of resolution in the octree.  \n",
+      "\n",
+      "Finally, the `bbox` parameter is a bounding box in the units of the dataset that contains all of the particles.  This is used to set the size of the base octree block."
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "This new dataset acts like any other `yt` `Dataset` object, and can be used to create data objects and query for yt fields.  This example shows how to access \"deposit\" fields:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "ad = ds.all_data()\n",
+      "\n",
+      "# This is generated with \"cloud-in-cell\" interpolation.\n",
+      "cic_density = ad[\"deposit\", \"all_cic\"]\n",
+      "\n",
+      "# These three are based on nearest-neighbor cell deposition\n",
+      "nn_density = ad[\"deposit\", \"all_density\"]\n",
+      "nn_deposited_mass = ad[\"deposit\", \"all_mass\"]\n",
+      "particle_count_per_cell = ad[\"deposit\", \"all_count\"]"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "ds.field_list"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "ds.derived_field_list"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "slc = yt.SlicePlot(ds, 2, ('deposit', 'all_cic'))\n",
+      "slc.set_width((8, 'Mpc'))"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    }
+   ],
+   "metadata": {}
+  }
+ ]
+}
\ No newline at end of file

diff -r a5f2cac5c7997ca5005cf3e707061e4f133d371d -r 5a37363c2829d7280002fdf137bc0e1d8120401f doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -898,3 +898,4 @@
 Generic Particle Data
 ---------------------
 
+.. notebook:: Loading_Generic_Particle_Data.ipynb

diff -r a5f2cac5c7997ca5005cf3e707061e4f133d371d -r 5a37363c2829d7280002fdf137bc0e1d8120401f doc/source/reference/changelog.rst
--- a/doc/source/reference/changelog.rst
+++ b/doc/source/reference/changelog.rst
@@ -348,7 +348,7 @@
    there
  * WebGL interface for isocontours and a pannable map widget added to Reason
  * Performance improvements for volume rendering
- * Adaptive HEALPix support (see :ref:`adaptive_healpix_volume_rendering`)
+ * Adaptive HEALPix support
  * Column density calculations (see :ref:`radial-column-density`)
  * Massive speedup for 1D profiles
  * Lots more, bug fixes etc.

diff -r a5f2cac5c7997ca5005cf3e707061e4f133d371d -r 5a37363c2829d7280002fdf137bc0e1d8120401f doc/source/reference/configuration.rst
--- a/doc/source/reference/configuration.rst
+++ b/doc/source/reference/configuration.rst
@@ -93,5 +93,5 @@
   uploading AMRSurface objects.
 * ``suppressStreamLogging`` (default: ``'False'``): If true, execution mode will be
   quiet.
-* ``stdoutStreamLogging`` (default: ``'False'``): If three, logging is directed
+* ``stdoutStreamLogging`` (default: ``'False'``): If true, logging is directed
   to stdout rather than stderr

diff -r a5f2cac5c7997ca5005cf3e707061e4f133d371d -r 5a37363c2829d7280002fdf137bc0e1d8120401f doc/source/visualizing/_cb_docstrings.inc
--- a/doc/source/visualizing/_cb_docstrings.inc
+++ b/doc/source/visualizing/_cb_docstrings.inc
@@ -120,7 +120,7 @@
 .. python-script::
    
    from yt.mods import *
-   data_pf = load('Enzo_64/RD0006/RD0006')
+   data_pf = load('Enzo_64/RD0006/RedshiftOutput0006')
    halos_pf = load('rockstar_halos/halos_0.0.bin')
 
    hc = HaloCatalog(halos_pf=halos_pf)

diff -r a5f2cac5c7997ca5005cf3e707061e4f133d371d -r 5a37363c2829d7280002fdf137bc0e1d8120401f doc/source/visualizing/manual_plotting.rst
--- a/doc/source/visualizing/manual_plotting.rst
+++ b/doc/source/visualizing/manual_plotting.rst
@@ -16,7 +16,7 @@
 packages.
 
 Note that the index object associated with your snapshot file contains a
-list of plots you've made in ``pf.h.plots``.
+list of plots you've made in ``ds.plots``.
 
 .. _fixed-resolution-buffers:
 

diff -r a5f2cac5c7997ca5005cf3e707061e4f133d371d -r 5a37363c2829d7280002fdf137bc0e1d8120401f yt/analysis_modules/halo_analysis/halo_callbacks.py
--- a/yt/analysis_modules/halo_analysis/halo_callbacks.py
+++ b/yt/analysis_modules/halo_analysis/halo_callbacks.py
@@ -81,13 +81,13 @@
     dpf = halo.halo_catalog.data_pf
     hpf = halo.halo_catalog.halos_pf
     center = dpf.arr([halo.quantities["particle_position_%s" % axis] \
-                      for axis in "xyz"]) / dpf.length_unit
-    radius = factor * halo.quantities[radius_field] / dpf.length_unit
+                      for axis in "xyz"])
+    radius = factor * halo.quantities[radius_field]
     if radius <= 0.0:
         halo.data_object = None
         return
     try:
-        sphere = dpf.sphere(center, (radius, "code_length"))
+        sphere = dpf.sphere(center, radius)
     except YTSphereTooSmall:
         halo.data_object = None
         return

diff -r a5f2cac5c7997ca5005cf3e707061e4f133d371d -r 5a37363c2829d7280002fdf137bc0e1d8120401f yt/analysis_modules/particle_trajectories/particle_trajectories.py
--- a/yt/analysis_modules/particle_trajectories/particle_trajectories.py
+++ b/yt/analysis_modules/particle_trajectories/particle_trajectories.py
@@ -43,6 +43,10 @@
         collection is instantiated.
         Default : None (will default to the fields 'particle_position_x',
         'particle_position_y', 'particle_position_z')
+    suppress_logging : boolean
+        Suppress yt's logging when iterating over the simulation time
+        series.
+        Default : False
 
     Examples
     ________
@@ -59,7 +63,7 @@
     >>> for t in trajs :
     >>>     print t["particle_velocity_x"].max(), t["particle_velocity_x"].min()
     """
-    def __init__(self, outputs, indices, fields=None) :
+    def __init__(self, outputs, indices, fields=None, suppress_logging=False):
 
         indices.sort() # Just in case the caller wasn't careful
         self.field_data = YTFieldData()
@@ -74,6 +78,7 @@
         self.num_indices = len(indices)
         self.num_steps = len(outputs)
         self.times = []
+        self.suppress_logging = suppress_logging
 
         # Default fields 
         
@@ -83,8 +88,9 @@
         fields.append("particle_position_z")
         fields = list(OrderedDict.fromkeys(fields))
 
-        old_level = int(ytcfg.get("yt","loglevel"))
-        mylog.setLevel(40)
+        if self.suppress_logging:
+            old_level = int(ytcfg.get("yt","loglevel"))
+            mylog.setLevel(40)
         my_storage = {}
         pbar = get_pbar("Constructing trajectory information", len(self.data_series))
         for i, (sto, ds) in enumerate(self.data_series.piter(storage=my_storage)):
@@ -101,7 +107,8 @@
             pbar.update(i)
         pbar.finish()
 
-        mylog.setLevel(old_level)
+        if self.suppress_logging:
+            mylog.setLevel(old_level)
 
         times = []
         for fn, time in sorted(my_storage.items()):
@@ -191,14 +198,16 @@
         with shape (num_indices, num_steps)
         """
         if not self.field_data.has_key(field):
-            old_level = int(ytcfg.get("yt","loglevel"))
-            mylog.setLevel(40)
+            if self.suppress_logging:
+                old_level = int(ytcfg.get("yt","loglevel"))
+                mylog.setLevel(40)
             dd_first = self.data_series[0].all_data()
             fd = dd_first._determine_fields(field)[0]
             if field not in self.particle_fields:
                 if self.data_series[0].field_info[fd].particle_type:
                     self.particle_fields.append(field)
-            particles = np.empty((self.num_indices,self.num_steps)) * np.nan
+            particles = np.empty((self.num_indices,self.num_steps))
+            particles[:] = np.nan
             step = int(0)
             pbar = get_pbar("Generating field %s in trajectories." % (field), self.num_steps)
             my_storage={}
@@ -232,7 +241,8 @@
             for i, (fn, (indices, pfield)) in enumerate(sorted(my_storage.items())):
                 particles[indices,i] = pfield
             self.field_data[field] = array_like_field(dd_first, particles, fd)
-            mylog.setLevel(old_level)
+            if self.suppress_logging:
+                mylog.setLevel(old_level)
         return self.field_data[field]
 
     def trajectory_from_index(self, index):

diff -r a5f2cac5c7997ca5005cf3e707061e4f133d371d -r 5a37363c2829d7280002fdf137bc0e1d8120401f yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -137,19 +137,25 @@
             self.center = None
             self.set_field_parameter('center', self.center)
             return
+        elif isinstance(center, YTArray):
+            self.center = self.pf.arr(center.in_cgs())
+            self.center.convert_to_units('code_length')
         elif isinstance(center, (types.ListType, types.TupleType, np.ndarray)):
-            center = self.pf.arr(center, 'code_length')
+            if isinstance(center[0], YTQuantity):
+                self.center = self.pf.arr([c.in_cgs() for c in center])
+                self.center.convert_to_units('code_length')
+            else:
+                self.center = self.pf.arr(center, 'code_length')
         elif isinstance(center, basestring):
             if center.lower() in ("c", "center"):
-                center = self.pf.domain_center
+                self.center = self.pf.domain_center
              # is this dangerous for race conditions?
             elif center.lower() in ("max", "m"):
-                center = self.pf.h.find_max(("gas", "density"))[1]
+                self.center = self.pf.h.find_max(("gas", "density"))[1]
             elif center.startswith("max_"):
-                center = self.pf.h.find_max(center[4:])[1]
+                self.center = self.pf.h.find_max(center[4:])[1]
         else:
-            center = np.array(center, dtype='float64')
-        self.center = self.pf.arr(center, 'code_length')
+            self.center = self.pf.arr(center, 'code_length', dtype='float64')
         self.set_field_parameter('center', self.center)
 
     def get_field_parameter(self, name, default=None):
@@ -593,6 +599,10 @@
             return
         elif self._locked == True:
             raise GenerationInProgress(fields)
+        # Track which ones we want in the end
+        ofields = set(self.field_data.keys()
+                    + fields_to_get
+                    + fields_to_generate)
         # At this point, we want to figure out *all* our dependencies.
         fields_to_get = self._identify_dependencies(fields_to_get,
             self._spatial)
@@ -621,6 +631,9 @@
 
         fields_to_generate += gen_fluids + gen_particles
         self._generate_fields(fields_to_generate)
+        for field in self.field_data.keys():
+            if field not in ofields:
+                self.field_data.pop(field)
 
     def _generate_fields(self, fields_to_generate):
         index = 0
@@ -801,7 +814,10 @@
                     "Currently we only support images centered at R=0. " +
                     "We plan to generalize this in the near future")
             from yt.visualization.fixed_resolution import CylindricalFixedResolutionBuffer
-            if iterable(width): radius = max(width)
+            if iterable(width):
+                radius = max(width)
+            else:
+                radius = width
             if iterable(resolution): resolution = max(resolution)
             frb = CylindricalFixedResolutionBuffer(self, radius, resolution)
             return frb

diff -r a5f2cac5c7997ca5005cf3e707061e4f133d371d -r 5a37363c2829d7280002fdf137bc0e1d8120401f yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -21,8 +21,6 @@
 
 from yt.config import ytcfg
 from yt.units.yt_array import YTArray, uconcatenate, array_like_field
-from yt.fields.field_info_container import \
-    FieldDetector
 from yt.utilities.data_point_utilities import FindBindingEnergy
 from yt.utilities.exceptions import YTFieldNotFound
 from yt.utilities.parallel_tools.parallel_analysis_interface import \

diff -r a5f2cac5c7997ca5005cf3e707061e4f133d371d -r 5a37363c2829d7280002fdf137bc0e1d8120401f yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -52,12 +52,6 @@
     _type_name = 'octree_subset'
     _skip_add = True
     _con_args = ('base_region', 'domain', 'pf')
-    _container_fields = (("index", "dx"),
-                         ("index", "dy"),
-                         ("index", "dz"),
-                         ("index", "x"),
-                         ("index", "y"),
-                         ("index", "z"))
     _domain_offset = 0
     _cell_count = -1
 
@@ -78,19 +72,6 @@
         self.base_region = base_region
         self.base_selector = base_region.selector
 
-    def _generate_container_field(self, field):
-        if self._current_chunk is None:
-            self.index._identify_base_chunk(self)
-        if isinstance(field, tuple): field = field[1]
-        if field == "dx":
-            return self._current_chunk.fwidth[:,0]
-        elif field == "dy":
-            return self._current_chunk.fwidth[:,1]
-        elif field == "dz":
-            return self._current_chunk.fwidth[:,2]
-        else:
-            raise RuntimeError
-
     def __getitem__(self, key):
         tr = super(OctreeSubset, self).__getitem__(key)
         try:
@@ -111,16 +92,20 @@
         return self._num_zones + 2*self._num_ghost_zones
 
     def _reshape_vals(self, arr):
-        if len(arr.shape) == 4: return arr
+        if len(arr.shape) == 4 and arr.flags["F_CONTIGUOUS"]:
+            return arr
         nz = self.nz
         n_oct = arr.shape[0] / (nz**3.0)
         if arr.size == nz*nz*nz*n_oct:
-            arr = arr.reshape((nz, nz, nz, n_oct), order="F")
+            new_shape = (nz, nz, nz, n_oct)
         elif arr.size == nz*nz*nz*n_oct * 3:
-            arr = arr.reshape((nz, nz, nz, n_oct, 3), order="F")
+            new_shape = (nz, nz, nz, n_oct, 3)
         else:
             raise RuntimeError
-        arr = np.asfortranarray(arr)
+        # This will retain units now.
+        arr.shape = new_shape
+        if not arr.flags["F_CONTIGUOUS"]:
+            arr = arr.reshape(new_shape, order="F")
         return arr
 
     _domain_ind = None

diff -r a5f2cac5c7997ca5005cf3e707061e4f133d371d -r 5a37363c2829d7280002fdf137bc0e1d8120401f yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -527,7 +527,7 @@
             source.quantities.max_location(field)
         mylog.info("Max Value is %0.5e at %0.16f %0.16f %0.16f",
               max_val, mx, my, mz)
-        return max_val, np.array([mx, my, mz], dtype="float64")
+        return max_val, self.arr([mx, my, mz], 'code_length', dtype="float64")
 
     def find_min(self, field):
         """
@@ -539,7 +539,7 @@
             source.quantities.min_location(field)
         mylog.info("Min Value is %0.5e at %0.16f %0.16f %0.16f",
               min_val, mx, my, mz)
-        return min_val, np.array([mx, my, mz], dtype="float64")
+        return min_val, self.arr([mx, my, mz], 'code_length', dtype="float64")
 
     # Now all the object related stuff
     def all_data(self, find_max=False):

diff -r a5f2cac5c7997ca5005cf3e707061e4f133d371d -r 5a37363c2829d7280002fdf137bc0e1d8120401f yt/fields/field_info_container.py
--- a/yt/fields/field_info_container.py
+++ b/yt/fields/field_info_container.py
@@ -27,8 +27,6 @@
     NullFunc, \
     TranslationFunc, \
     ValidateSpatial
-from .field_detector import \
-    FieldDetector
 from yt.utilities.exceptions import \
     YTFieldNotFound
 from .field_plugin_registry import \

diff -r a5f2cac5c7997ca5005cf3e707061e4f133d371d -r 5a37363c2829d7280002fdf137bc0e1d8120401f yt/fields/particle_fields.py
--- a/yt/fields/particle_fields.py
+++ b/yt/fields/particle_fields.py
@@ -17,6 +17,7 @@
 import numpy as np
 
 from yt.funcs import *
+from yt.units.yt_array import YTArray
 from yt.fields.derived_field import \
     ValidateParameter, \
     ValidateSpatial
@@ -225,12 +226,12 @@
         yv = data[ptype, svel % 'y'] - bv[1]
         zv = data[ptype, svel % 'z'] - bv[2]
         center = data.get_field_parameter('center')
-        coords = np.array([data[ptype, spos % 'x'],
+        coords = YTArray([data[ptype, spos % 'x'],
                            data[ptype, spos % 'y'],
                            data[ptype, spos % 'z']], dtype=np.float64)
         new_shape = tuple([3] + [1]*(len(coords.shape)-1))
         r_vec = coords - np.reshape(center,new_shape)
-        v_vec = np.array([xv,yv,zv], dtype=np.float64)
+        v_vec = YTArray([xv,yv,zv], dtype=np.float64)
         return np.cross(r_vec, v_vec, axis=0)
 
     registry.add_field((ptype, "particle_specific_angular_momentum"),
@@ -340,7 +341,7 @@
         center = data.get_field_parameter('center')
         bv = data.get_field_parameter("bulk_velocity")
         pos = spos
-        pos = np.array([data[ptype, pos % ax] for ax in "xyz"])
+        pos = YTArray([data[ptype, pos % ax] for ax in "xyz"])
         theta = get_sph_theta(pos, center)
         phi = get_sph_phi(pos, center)
         pos = pos - np.reshape(center, (3, 1))
@@ -358,7 +359,7 @@
         center = data.get_field_parameter('center')
         bv = data.get_field_parameter("bulk_velocity")
         pos = spos
-        pos = np.array([data[ptype, pos % ax] for ax in "xyz"])
+        pos = YTArray([data[ptype, pos % ax] for ax in "xyz"])
         theta = get_sph_theta(pos, center)
         phi = get_sph_phi(pos, center)
         pos = pos - np.reshape(center, (3, 1))
@@ -376,12 +377,11 @@
         center = data.get_field_parameter('center')
         bv = data.get_field_parameter("bulk_velocity")
         pos = spos
-        pos = np.array([data[ptype, pos % ax] for ax in "xyz"])
+        pos = YTArray([data[ptype, pos % ax] for ax in "xyz"])
         theta = get_sph_theta(pos, center)
         phi = get_sph_phi(pos, center)
         pos = pos - np.reshape(center, (3, 1))
-        vel = vel - np.reshape(bv, (3, 1))
-        sphp = get_sph_phi_component(pos, theta, phi, normal)
+        sphp = get_sph_phi_component(pos, phi, normal)
         return sphp
 
     registry.add_field((ptype, "particle_phi_spherical"),
@@ -395,9 +395,9 @@
         center = data.get_field_parameter('center')
         bv = data.get_field_parameter("bulk_velocity")
         pos = spos
-        pos = np.array([data[ptype, pos % ax] for ax in "xyz"])
+        pos = YTArray([data[ptype, pos % ax] for ax in "xyz"])
         vel = svel
-        vel = np.array([data[ptype, vel % ax] for ax in "xyz"])
+        vel = YTArray([data[ptype, vel % ax] for ax in "xyz"])
         theta = get_sph_theta(pos, center)
         phi = get_sph_phi(pos, center)
         pos = pos - np.reshape(center, (3, 1))
@@ -416,9 +416,9 @@
         center = data.get_field_parameter('center')
         bv = data.get_field_parameter("bulk_velocity")
         pos = spos
-        pos = np.array([data[ptype, pos % ax] for ax in "xyz"])
+        pos = YTArray([data[ptype, pos % ax] for ax in "xyz"])
         vel = svel
-        vel = np.array([data[ptype, vel % ax] for ax in "xyz"])
+        vel = YTArray([data[ptype, vel % ax] for ax in "xyz"])
         theta = get_sph_theta(pos, center)
         phi = get_sph_phi(pos, center)
         pos = pos - np.reshape(center, (3, 1))
@@ -436,8 +436,8 @@
         normal = data.get_field_parameter('normal')
         center = data.get_field_parameter('center')
         bv = data.get_field_parameter("bulk_velocity")
-        pos = np.array([data[ptype, spos % ax] for ax in "xyz"])
-        vel = np.array([data[ptype, svel % ax] for ax in "xyz"])
+        pos = YTArray([data[ptype, spos % ax] for ax in "xyz"])
+        vel = YTArray([data[ptype, svel % ax] for ax in "xyz"])
         theta = get_sph_theta(pos, center)
         phi = get_sph_phi(pos, center)
         pos = pos - np.reshape(center, (3, 1))

diff -r a5f2cac5c7997ca5005cf3e707061e4f133d371d -r 5a37363c2829d7280002fdf137bc0e1d8120401f yt/frontends/boxlib/fields.py
--- a/yt/frontends/boxlib/fields.py
+++ b/yt/frontends/boxlib/fields.py
@@ -188,7 +188,7 @@
         # Specific entropy
         ("entropy", ("erg/(g*K)", ["entropy"], None)),
         ("entropypert", ("", [], None)),
-        ("enucdot", ("ergs/(g*s)", [], None)),
+        ("enucdot", ("erg/(g*s)", [], None)),
         ("gpi_x", ("dyne/cm**3", [], None)), # Perturbational pressure grad
         ("gpi_y", ("dyne/cm**3", [], None)),
         ("gpi_z", ("dyne/cm**3", [], None)),

diff -r a5f2cac5c7997ca5005cf3e707061e4f133d371d -r 5a37363c2829d7280002fdf137bc0e1d8120401f yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -162,7 +162,7 @@
 
     def _parse_index(self):
         f = self._handle # shortcut
-        self.max_level = f.attrs['max_level']
+        self.max_level = f.attrs['num_levels'] - 1
 
         grids = []
         self.dds_list = []

diff -r a5f2cac5c7997ca5005cf3e707061e4f133d371d -r 5a37363c2829d7280002fdf137bc0e1d8120401f yt/frontends/chombo/io.py
--- a/yt/frontends/chombo/io.py
+++ b/yt/frontends/chombo/io.py
@@ -171,7 +171,7 @@
             return np.array([], dtype=np.float64)
 
         data = self._handle[lev]['particles:data'][offsets[lo]:offsets[hi]]
-        return data[field_index::items_per_particle]
+        return np.asarray(data[field_index::items_per_particle], dtype=np.float64, order='F')
 
 class IOHandlerChombo2DHDF5(IOHandlerChomboHDF5):
     _dataset_type = "chombo2d_hdf5"

diff -r a5f2cac5c7997ca5005cf3e707061e4f133d371d -r 5a37363c2829d7280002fdf137bc0e1d8120401f yt/frontends/enzo/io.py
--- a/yt/frontends/enzo/io.py
+++ b/yt/frontends/enzo/io.py
@@ -235,7 +235,18 @@
         BaseIOHandler.__init__(self, pf)
 
     def _read_field_names(self, grid):
-        return [("enzo", field) for field in self.grids_in_memory[grid.id].keys()]
+        fields = []
+        add_io = "io" in grid.pf.particle_types
+        for name, v in self.grids_in_memory[grid.id].items():
+
+            # NOTE: This won't work with 1D datasets or references.
+            if not hasattr(v, "shape") or v.dtype == "O":
+                continue
+            elif v.ndim == 1:
+                if add_io: fields.append( ("io", str(name)) )
+            else:
+                fields.append( ("enzo", str(name)) )
+        return fields
 
     def _read_fluid_selection(self, chunks, selector, fields, size):
         rv = {}
@@ -296,7 +307,7 @@
                     x, y, z = self.grids_in_memory[g.id]['particle_position_x'], \
                                         self.grids_in_memory[g.id]['particle_position_y'], \
                                         self.grids_in_memory[g.id]['particle_position_z']
-                    mask = selector.select_points(x, y, z)
+                    mask = selector.select_points(x, y, z, 0.0)
                     if mask is None: continue
                     for field in field_list:
                         data = self.grids_in_memory[g.id][field]

diff -r a5f2cac5c7997ca5005cf3e707061e4f133d371d -r 5a37363c2829d7280002fdf137bc0e1d8120401f yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -985,8 +985,12 @@
         This is a dict of numpy arrays, where the keys are the field names.
         Particles positions must be named "particle_position_x",
         "particle_position_y", "particle_position_z".
-    sim_unit_to_cm : float
-        Conversion factor from simulation units to centimeters
+    length_unit : float
+        Conversion factor from simulation length units to centimeters
+    mass_unit : float
+        Conversion factor from simulation mass units to grams
+    time_unit : float
+        Conversion factor from simulation time units to seconds
     bbox : array_like (xdim:zdim, LE:RE), optional
         Size of computational domain in units sim_unit_to_cm
     sim_time : float, optional

diff -r a5f2cac5c7997ca5005cf3e707061e4f133d371d -r 5a37363c2829d7280002fdf137bc0e1d8120401f yt/geometry/coordinate_handler.py
--- a/yt/geometry/coordinate_handler.py
+++ b/yt/geometry/coordinate_handler.py
@@ -34,10 +34,10 @@
 
 def _get_coord_fields(axi, units = "code_length"):
     def _dds(field, data):
-        rv = data.pf.arr(data.fwidth[...,axi], units)
+        rv = data.pf.arr(data.fwidth[...,axi].copy(), units)
         return data._reshape_vals(rv)
     def _coords(field, data):
-        rv = data.pf.arr(data.fcoords[...,axi], units)
+        rv = data.pf.arr(data.fcoords[...,axi].copy(), units)
         return data._reshape_vals(rv)
     return _dds, _coords
 

diff -r a5f2cac5c7997ca5005cf3e707061e4f133d371d -r 5a37363c2829d7280002fdf137bc0e1d8120401f yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -122,6 +122,8 @@
         cdef int i, j, k, n
         data.global_index = -1
         data.level = 0
+        data.oref = 0
+        data.nz = 1
         assert(ref_mask.shape[0] / float(data.nz) ==
             <int>(ref_mask.shape[0]/float(data.nz)))
         obj.allocate_domains([ref_mask.shape[0] / data.nz])
@@ -496,7 +498,7 @@
             coords[:,i] += self.DLE[i]
         return coords
 
-    def save_octree(self, always_descend = False):
+    def save_octree(self):
         # Get the header
         header = dict(dims = (self.nn[0], self.nn[1], self.nn[2]),
                       left_edge = (self.DLE[0], self.DLE[1], self.DLE[2]),
@@ -507,13 +509,12 @@
         # domain_id = -1 here, because we want *every* oct
         cdef OctVisitorData data
         self.setup_data(&data, -1)
-        data.oref = 1
+        data.oref = 0
+        data.nz = 1
         cdef np.ndarray[np.uint8_t, ndim=1] ref_mask
         ref_mask = np.zeros(self.nocts * data.nz, dtype="uint8") - 1
-        cdef void *p[2]
-        cdef np.uint8_t ad = int(always_descend)
-        p[0] = <void *> &ad
-        p[1] = ref_mask.data
+        cdef void *p[1]
+        p[0] = ref_mask.data
         data.array = p
         # Enforce partial_coverage here
         self.visit_all_octs(selector, oct_visitors.store_octree, &data, 1)

diff -r a5f2cac5c7997ca5005cf3e707061e4f133d371d -r 5a37363c2829d7280002fdf137bc0e1d8120401f yt/geometry/oct_visitors.pyx
--- a/yt/geometry/oct_visitors.pyx
+++ b/yt/geometry/oct_visitors.pyx
@@ -179,12 +179,8 @@
     cdef np.uint8_t *arr, res, ii, *always_descend
     ii = cind(data.ind[0], data.ind[1], data.ind[2])
     cdef void **p = <void **> data.array
-    always_descend = <np.uint8_t *> p[0]
-    arr = <np.uint8_t *> p[1]
-    if always_descend[0] == 1 and data.last == o.domain_ind:
-        return
-    data.last = o.domain_ind
-    if o.children == NULL or o.children[ii] == NULL:
+    arr = <np.uint8_t *> p[0]
+    if o.children == NULL:
         # Not refined.
         res = 0
     else:
@@ -216,7 +212,7 @@
             o.children = <Oct **> malloc(sizeof(Oct *) * 8)
             for i in range(8):
                 o.children[i] = NULL
-        for i in range(arr[data.index]):
+        for i in range(8):
             o.children[ii + i] = &octs[nocts[0]]
             o.children[ii + i].domain_ind = nocts[0]
             o.children[ii + i].file_ind = -1

diff -r a5f2cac5c7997ca5005cf3e707061e4f133d371d -r 5a37363c2829d7280002fdf137bc0e1d8120401f yt/geometry/particle_oct_container.pyx
--- a/yt/geometry/particle_oct_container.pyx
+++ b/yt/geometry/particle_oct_container.pyx
@@ -300,6 +300,7 @@
             # Now we locate the particle
             for i in range(3):
                 ind[i] = <int> ((pos[p, i] - self.left_edge[i])*self.idds[i])
+                ind[i] = iclip(ind[i], 0, self.dims[i])
             mask[ind[0],ind[1],ind[2]] |= val
 
     def identify_data_files(self, SelectorObject selector):

diff -r a5f2cac5c7997ca5005cf3e707061e4f133d371d -r 5a37363c2829d7280002fdf137bc0e1d8120401f yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -541,7 +541,7 @@
 
     def __init__(self, dobj):
         for i in range(3):
-            self.center[i] = dobj.center[i]
+            self.center[i] = _ensure_code(dobj.center[i])
         self.radius = _ensure_code(dobj.radius)
         self.radius2 = self.radius * self.radius
 
@@ -702,7 +702,7 @@
         cdef int i
         for i in range(3):
             self.norm_vec[i] = dobj._norm_vec[i]
-            self.center[i] = dobj.center[i]
+            self.center[i] = _ensure_code(dobj.center[i])
         self.radius = _ensure_code(dobj._radius)
         self.radius2 = self.radius * self.radius
         self.height = _ensure_code(dobj._height)

diff -r a5f2cac5c7997ca5005cf3e707061e4f133d371d -r 5a37363c2829d7280002fdf137bc0e1d8120401f yt/units/yt_array.py
--- a/yt/units/yt_array.py
+++ b/yt/units/yt_array.py
@@ -284,7 +284,7 @@
             return input_array
         elif isinstance(input_array, np.ndarray):
             pass
-        elif iterable(input_array):
+        elif iterable(input_array) and input_array:
             if isinstance(input_array[0], YTArray):
                 return YTArray(np.array(input_array, dtype=dtype),
                                input_array[0].units)
@@ -955,7 +955,7 @@
         else:
             raise RuntimeError("Operation is not defined.")
         if unit is None:
-            out_arr = np.array(out_arr)
+            out_arr = np.array(out_arr, copy=False)
             return out_arr
         out_arr.units = unit
         if out_arr.size == 1:
@@ -965,7 +965,7 @@
                 # This happens if you do ndarray * YTQuantity. Explicitly
                 # casting to YTArray avoids creating a YTQuantity with size > 1
                 return YTArray(np.array(out_arr, unit))
-            return ret_class(np.array(out_arr), unit)
+            return ret_class(np.array(out_arr, copy=False), unit)
 
 
     def __reduce__(self):

diff -r a5f2cac5c7997ca5005cf3e707061e4f133d371d -r 5a37363c2829d7280002fdf137bc0e1d8120401f yt/visualization/color_maps.py
--- a/yt/visualization/color_maps.py
+++ b/yt/visualization/color_maps.py
@@ -83,9 +83,9 @@
                 194.5*_vs**2.88+99.72*np.exp(-77.24*(_vs-0.742)**2.0)
               + 45.40*_vs**0.089+10.0)/255.0
 
-cdict = {'red':izip(_vs,_kamae_red,_kamae_red),
-         'green':izip(_vs,_kamae_grn,_kamae_grn),
-         'blue':izip(_vs,_kamae_blu,_kamae_blu)}
+cdict = {'red':zip(_vs,_kamae_red,_kamae_red),
+         'green':zip(_vs,_kamae_grn,_kamae_grn),
+         'blue':zip(_vs,_kamae_blu,_kamae_blu)}
 add_cmap('kamae', cdict)
 
 # This one is a simple black & green map

diff -r a5f2cac5c7997ca5005cf3e707061e4f133d371d -r 5a37363c2829d7280002fdf137bc0e1d8120401f yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -90,9 +90,9 @@
         self.axis = data_source.axis
         self.periodic = periodic
 
-        #h = getattr(data_source, "index", None)
-        #if h is not None:
-        #    h.plots.append(weakref.proxy(self))
+        ds = getattr(data_source, "pf", None)
+        if ds is not None:
+            ds.plots.append(weakref.proxy(self))
 
         # Handle periodicity, just in case
         if self.data_source.axis < 3:
@@ -349,9 +349,9 @@
         self.antialias = antialias
         self.data = {}
         
-        h = getattr(data_source, "index", None)
-        if h is not None:
-            h.plots.append(weakref.proxy(self))
+        ds = getattr(data_source, "pf", None)
+        if ds is not None:
+            ds.plots.append(weakref.proxy(self))
 
     def __getitem__(self, item) :
         if item in self.data: return self.data[item]

diff -r a5f2cac5c7997ca5005cf3e707061e4f133d371d -r 5a37363c2829d7280002fdf137bc0e1d8120401f yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -448,6 +448,44 @@
             self.frb[f].convert_to_units(u)
         return self
 
+    @invalidate_plot
+    def set_origin(self, origin):
+        """Set the plot origin.
+
+        Parameters
+        ----------
+        origin : string or length 1, 2, or 3 sequence of strings
+            The location of the origin of the plot coordinate system.  This is
+            represented by '-' separated string or a tuple of strings.  In the
+            first index the y-location is given by 'lower', 'upper', or 'center'.
+            The second index is the x-location, given as 'left', 'right', or
+            'center'.  Finally, the whether the origin is applied in 'domain'
+            space, plot 'window' space or 'native' simulation coordinate system
+            is given. For example, both 'upper-right-domain' and ['upper',
+            'right', 'domain'] both place the origin in the upper right hand
+            corner of domain space. If x or y are not given, a value is inffered.
+            For instance, 'left-domain' corresponds to the lower-left hand corner
+            of the simulation domain, 'center-domain' corresponds to the center
+            of the simulation domain, or 'center-window' for the center of the
+            plot window. Further examples:
+
+            ==================================     ============================
+            format                                 example
+            ==================================     ============================
+            '{space}'                              'domain'
+            '{xloc}-{space}'                       'left-window'
+            '{yloc}-{space}'                       'upper-domain'
+            '{yloc}-{xloc}-{space}'                'lower-right-window'
+            ('{space}',)                           ('window',)
+            ('{xloc}', '{space}')                  ('right', 'domain')
+            ('{yloc}', '{space}')                  ('lower', 'window')
+            ('{yloc}', '{xloc}', '{space}')        ('lower', 'right', 'window')
+            ==================================     ============================
+
+        """
+        self.origin = origin
+        return self
+
     @invalidate_data
     def _set_window(self, bounds):
         """Set the bounds of the plot window.
@@ -761,12 +799,20 @@
 
             image = self.frb[f]
 
-            if image.max() == image.min() and zlim == (None, None):
-                if self._field_transform[f] == log_transform:
-                    mylog.warning("Plot image for field %s has zero dynamic "
-                                  "range. Min = Max = %d." % (f, image.max()))
+            if self._field_transform[f] == log_transform:
+                msg = None
+                if zlim != (None, None):
+                    pass
+                elif image.max() == image.min():
+                    msg = "Plot image for field %s has zero dynamic " \
+                          "range. Min = Max = %d." % (f, image.max())
+                elif image.max() <= 0:
+                    msg = "Plot image for field %s has no positive " \
+                          "values.  Max = %d." % (f, image.max())
+                if msg is not None:
+                    mylog.warning(msg)
                     mylog.warning("Switching to linear colorbar scaling.")
-                self._field_transform[f] = linear_transform
+                    self._field_transform[f] = linear_transform
 
             fp = self._font_properties
 

diff -r a5f2cac5c7997ca5005cf3e707061e4f133d371d -r 5a37363c2829d7280002fdf137bc0e1d8120401f yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -620,13 +620,13 @@
             label = '$\\rm{log}\\/ $' + label
         self.transfer_function.vert_cbar(ax=cb.ax, label=label)
 
-    def show_mpl(self, im, enhance=True):
+    def show_mpl(self, im, enhance=True, clear_fig=True):
         if self._pylab is None:
             import pylab
             self._pylab = pylab
         if self._render_figure is None:
             self._render_figure = self._pylab.figure(1)
-        self._render_figure.clf()
+        if clear_fig: self._render_figure.clf()
 
         if enhance:
             nz = im[im > 0.0]
@@ -642,9 +642,9 @@
     def draw(self):
         self._pylab.draw()
     
-    def save_annotated(self, fn, image, enhance=True, dpi=100):
+    def save_annotated(self, fn, image, enhance=True, dpi=100, clear_fig=True):
         image = image.swapaxes(0,1) 
-        ax = self.show_mpl(image, enhance=enhance)
+        ax = self.show_mpl(image, enhance=enhance, clear_fig=clear_fig)
         self.annotate(ax.axes, enhance)
         self._pylab.savefig(fn, bbox_inches='tight', facecolor='black', dpi=dpi)
         


https://bitbucket.org/yt_analysis/yt/commits/436bf4d4c9e2/
Changeset:   436bf4d4c9e2
Branch:      yt-3.0
User:        chummels
Date:        2014-06-14 01:58:12
Summary:     Updating README to have an accurate link.
Affected #:  1 file

diff -r 5a37363c2829d7280002fdf137bc0e1d8120401f -r 436bf4d4c9e28672dc147d7e44ecf6181c791339 doc/README
--- a/doc/README
+++ b/doc/README
@@ -5,6 +5,6 @@
 http://sphinx.pocoo.org/
 
 Because the documentation requires a number of dependencies, we provide
-pre-build versions online, accessible here:
+pre-built versions online, accessible here:
 
-http://yt-project.org/docs/
+http://yt-project.org/docs/dev-3.0/


https://bitbucket.org/yt_analysis/yt/commits/468c4128dcd4/
Changeset:   468c4128dcd4
Branch:      yt-3.0
User:        chummels
Date:        2014-06-14 02:40:16
Summary:     Updating the documentation on how to write documentation.
Affected #:  1 file

diff -r 436bf4d4c9e28672dc147d7e44ecf6181c791339 -r 468c4128dcd4384f2cf413450ea853e63a4c4c53 doc/source/developing/developing.rst
--- a/doc/source/developing/developing.rst
+++ b/doc/source/developing/developing.rst
@@ -74,6 +74,8 @@
 this manner, but still want to contribute, please consider creating an external
 package, which we'll happily link to.
 
+.. _requirements-for-code-submission:
+
 Requirements for Code Submission
 ++++++++++++++++++++++++++++++++
 
@@ -88,22 +90,22 @@
   * New Features
 
     * New unit tests (possibly new answer tests) (See :ref:`testing`)
-    * Docstrings for public API
-    * Addition of new feature to the narrative documentation
-    * Addition of cookbook recipe
+    * Docstrings in the source code for the public API
+    * Addition of new feature to the narrative documentation (See :ref:`writing_documentation`)
+    * Addition of cookbook recipe (See :ref:`writing_documentation`) 
     * Issue created on issue tracker, to ensure this is added to the changelog
 
   * Extension or Breakage of API in Existing Features
 
-    * Update existing narrative docs and docstrings
-    * Update existing cookbook recipes
+    * Update existing narrative docs and docstrings (See :ref:`writing_documentation`) 
+    * Update existing cookbook recipes (See :ref:`writing_documentation`) 
     * Modify of create new unit tests (See :ref:`testing`)
     * Issue created on issue tracker, to ensure this is added to the changelog
 
   * Bug fixes
 
     * Unit test is encouraged, to ensure breakage does not happen again in the
-      future.
+      future. (See :ref:`testing`)
     * Issue created on issue tracker, to ensure this is added to the changelog
 
 When submitting, you will be asked to make sure that your changes meet all of
@@ -178,14 +180,14 @@
 
   $ python2.7 setup.py build --compiler=mingw32 install
 
+.. _sharing-changes:
+
 Making and Sharing Changes
 ++++++++++++++++++++++++++
 
 The simplest way to submit changes to yt is to commit changes in your
 ``$YT_DEST/src/yt-hg`` directory, fork the repository on BitBucket,  push the
-changesets to your fork, and then issue a pull request.  If you will be
-developing much more in-depth features for yt, you will also
-likely want to edit the paths in your 
+changesets to your fork, and then issue a pull request.  
 
 Here's a more detailed flowchart of how to submit changes.
 
@@ -230,19 +232,59 @@
 .. _writing_documentation:
 
 How to Write Documentation
-++++++++++++++++++++++++++
+--------------------------
 
-The process for writing documentation is identical to the above, except that
-you're modifying source files in the doc directory (i.e. ``$YT_DEST/src/yt-hg/doc``) 
-instead of the src directory (i.e. ``$YT_DEST/src/yt-hg/yt``) of the yt repository.
+Writing documentation is one of the most important but often overlooked tasks
+for increasing yt's impact in the community.  It is the way in which the 
+world will understand how to use our code, so it needs to be done concisely
+and understandably.  Typically, when a developer submits some piece of code 
+with new functionality, she should also include documentation on how to use 
+that functionality (as per :ref:`requirements-for-code-submission`).  
+Depending on the nature of the code addition, this could be a new narrative 
+docs section describing how the new code works and how to use it, it could 
+include a recipe in the cookbook section, or it could simply be adding a note 
+in the relevant docs text somewhere.
+
+The documentation exists in the main mercurial code repository for yt in the 
+``doc`` directory (i.e. ``$YT_DEST/src/yt-hg/doc/source`` on systems installed 
+using the installer script).  It is organized hierarchically into the main 
+categories of:
+
+ * Visualizing
+ * Analyzing
+ * Examining
+ * Cookbook
+ * Bootcamp
+ * Developing
+ * Reference
+ * Help
+
+You will have to figure out where your new/modified doc fits into this, but 
+browsing through the pre-built documentation is a good way to sort that out.
+
 All the source for the documentation is written in 
-`Sphinx <http://sphinx-doc.org/>`_, which uses ReST for markup.
+`Sphinx <http://sphinx-doc.org/>`_, which uses ReST for markup.  ReST is very
+straightforward to markup in a text editor, and if you are new to it, we
+recommend just using other .rst files in the existing documentation as 
+templates or checking out the link for more specific references.
 
-Cookbook recipes go in ``source/cookbook/`` and must be added to one of the
-``.rst`` files in that directory.  
+New cookbook recipes (see :ref:`cookbook`) are very helpful for the community 
+as they provide simple annotated recipes on how to use specific functionality.  
+To add one, create a concise python script which demonstrates some 
+functionality and pare it down to its minimum.  Add some comment lines to 
+describe what it is that you're doing along the way.  Place this ``.py`` file 
+in the ``source/cookbook/`` directory, and then link to it explicitly in one 
+of the relevant ``.rst`` files in that directory (e.g. ``complex_plots.rst``, 
+``cosmological_analysis.rst``, etc.), and add some description of what the script 
+actually does.  
 
-For more information on how to build the documentation to make sure it looks
-the way you expect it to after modifying it, see :ref:`docs_build`.
+After you have made your modifications to the docs, you will want to make sure
+that they render the way you expect them to render.  For more information on
+this, see the section on :ref:`docs_build`.
+
+When you have completed your documentation additions, commit your changes 
+to your repository and make a pull request in the same way you would contribute 
+a change to the codebase, as described in the section on :ref:`sharing-changes`.
 
 How To Get The Source Code For Editing
 --------------------------------------


https://bitbucket.org/yt_analysis/yt/commits/3bd706bee3f0/
Changeset:   3bd706bee3f0
Branch:      yt-3.0
User:        chummels
Date:        2014-06-14 23:14:46
Summary:     Adding some more notes on how to write documentation.
Affected #:  1 file

diff -r 468c4128dcd4384f2cf413450ea853e63a4c4c53 -r 3bd706bee3f064ef0ef12f7b072c94205cc72b32 doc/source/developing/developing.rst
--- a/doc/source/developing/developing.rst
+++ b/doc/source/developing/developing.rst
@@ -265,8 +265,9 @@
 All the source for the documentation is written in 
 `Sphinx <http://sphinx-doc.org/>`_, which uses ReST for markup.  ReST is very
 straightforward to markup in a text editor, and if you are new to it, we
-recommend just using other .rst files in the existing documentation as 
-templates or checking out the link for more specific references.
+recommend just using other .rst files in the existing yt documentation as 
+templates or checking out the 
+`ReST reference documentation <http://sphinx-doc.org/rest.html>`_.
 
 New cookbook recipes (see :ref:`cookbook`) are very helpful for the community 
 as they provide simple annotated recipes on how to use specific functionality.  
@@ -276,11 +277,18 @@
 in the ``source/cookbook/`` directory, and then link to it explicitly in one 
 of the relevant ``.rst`` files in that directory (e.g. ``complex_plots.rst``, 
 ``cosmological_analysis.rst``, etc.), and add some description of what the script 
-actually does.  
+actually does.  We recommend that you use one of the 
+`sample data sets <http://yt-project.org/data>`_ in your recipe.  When the full
+docs are built, each of the cookbook recipes are executed dynamically on 
+a system which has access to all of the sample datasets.  Any output images 
+generated by your script will then be attached inline in the built documentation 
+directly following your script.
 
 After you have made your modifications to the docs, you will want to make sure
 that they render the way you expect them to render.  For more information on
-this, see the section on :ref:`docs_build`.
+this, see the section on :ref:`docs_build`.  Unless you're contributing cookbook
+recipes or notebooks which require a dynamical build, you can probably get 
+away with just doing a 'quick' docs build.
 
 When you have completed your documentation additions, commit your changes 
 to your repository and make a pull request in the same way you would contribute 


https://bitbucket.org/yt_analysis/yt/commits/c180be94199d/
Changeset:   c180be94199d
Branch:      yt-3.0
User:        chummels
Date:        2014-06-14 23:17:39
Summary:     Adding note in docs about the auto-update pull request feature.
Affected #:  1 file

diff -r 3bd706bee3f064ef0ef12f7b072c94205cc72b32 -r c180be94199d28a6e50b5316ef78ef16e50275cf doc/source/developing/developing.rst
--- a/doc/source/developing/developing.rst
+++ b/doc/source/developing/developing.rst
@@ -226,8 +226,7 @@
 
         hg push https://bitbucket.org/YourUsername/yt/
 
-  #. Update your pull request by visiting
-     https://bitbucket.org/YourUsername/yt/pull-request/new
+  #. Your pull request will be automatically updated.
 
 .. _writing_documentation:
 


https://bitbucket.org/yt_analysis/yt/commits/4e6992f76dfc/
Changeset:   4e6992f76dfc
Branch:      yt-3.0
User:        ngoldbaum
Date:        2014-06-16 19:46:37
Summary:     Merged in chummels/yt/yt-3.0 (pull request #951)

Updating docs on how to write docs
Affected #:  4 files

diff -r f20d58ca2848dd2df5c6e97ae1627b0a623f130a -r 4e6992f76dfc157cfc5a7b2336ac8ed0d14811e2 doc/README
--- a/doc/README
+++ b/doc/README
@@ -5,6 +5,6 @@
 http://sphinx.pocoo.org/
 
 Because the documentation requires a number of dependencies, we provide
-pre-build versions online, accessible here:
+pre-built versions online, accessible here:
 
-http://yt-project.org/docs/
+http://yt-project.org/docs/dev-3.0/

diff -r f20d58ca2848dd2df5c6e97ae1627b0a623f130a -r 4e6992f76dfc157cfc5a7b2336ac8ed0d14811e2 doc/source/developing/developing.rst
--- a/doc/source/developing/developing.rst
+++ b/doc/source/developing/developing.rst
@@ -74,6 +74,8 @@
 this manner, but still want to contribute, please consider creating an external
 package, which we'll happily link to.
 
+.. _requirements-for-code-submission:
+
 Requirements for Code Submission
 ++++++++++++++++++++++++++++++++
 
@@ -88,22 +90,22 @@
   * New Features
 
     * New unit tests (possibly new answer tests) (See :ref:`testing`)
-    * Docstrings for public API
-    * Addition of new feature to the narrative documentation
-    * Addition of cookbook recipe
+    * Docstrings in the source code for the public API
+    * Addition of new feature to the narrative documentation (See :ref:`writing_documentation`)
+    * Addition of cookbook recipe (See :ref:`writing_documentation`) 
     * Issue created on issue tracker, to ensure this is added to the changelog
 
   * Extension or Breakage of API in Existing Features
 
-    * Update existing narrative docs and docstrings
-    * Update existing cookbook recipes
+    * Update existing narrative docs and docstrings (See :ref:`writing_documentation`) 
+    * Update existing cookbook recipes (See :ref:`writing_documentation`) 
     * Modify of create new unit tests (See :ref:`testing`)
     * Issue created on issue tracker, to ensure this is added to the changelog
 
   * Bug fixes
 
     * Unit test is encouraged, to ensure breakage does not happen again in the
-      future.
+      future. (See :ref:`testing`)
     * Issue created on issue tracker, to ensure this is added to the changelog
 
 When submitting, you will be asked to make sure that your changes meet all of
@@ -178,14 +180,14 @@
 
   $ python2.7 setup.py build --compiler=mingw32 install
 
+.. _sharing-changes:
+
 Making and Sharing Changes
 ++++++++++++++++++++++++++
 
 The simplest way to submit changes to yt is to commit changes in your
 ``$YT_DEST/src/yt-hg`` directory, fork the repository on BitBucket,  push the
-changesets to your fork, and then issue a pull request.  If you will be
-developing much more in-depth features for yt, you will also
-likely want to edit the paths in your 
+changesets to your fork, and then issue a pull request.  
 
 Here's a more detailed flowchart of how to submit changes.
 
@@ -224,25 +226,72 @@
 
         hg push https://bitbucket.org/YourUsername/yt/
 
-  #. Update your pull request by visiting
-     https://bitbucket.org/YourUsername/yt/pull-request/new
+  #. Your pull request will be automatically updated.
 
 .. _writing_documentation:
 
 How to Write Documentation
-++++++++++++++++++++++++++
+--------------------------
 
-The process for writing documentation is identical to the above, except that
-you're modifying source files in the doc directory (i.e. ``$YT_DEST/src/yt-hg/doc``) 
-instead of the src directory (i.e. ``$YT_DEST/src/yt-hg/yt``) of the yt repository.
+Writing documentation is one of the most important but often overlooked tasks
+for increasing yt's impact in the community.  It is the way in which the 
+world will understand how to use our code, so it needs to be done concisely
+and understandably.  Typically, when a developer submits some piece of code 
+with new functionality, she should also include documentation on how to use 
+that functionality (as per :ref:`requirements-for-code-submission`).  
+Depending on the nature of the code addition, this could be a new narrative 
+docs section describing how the new code works and how to use it, it could 
+include a recipe in the cookbook section, or it could simply be adding a note 
+in the relevant docs text somewhere.
+
+The documentation exists in the main mercurial code repository for yt in the 
+``doc`` directory (i.e. ``$YT_DEST/src/yt-hg/doc/source`` on systems installed 
+using the installer script).  It is organized hierarchically into the main 
+categories of:
+
+ * Visualizing
+ * Analyzing
+ * Examining
+ * Cookbook
+ * Bootcamp
+ * Developing
+ * Reference
+ * Help
+
+You will have to figure out where your new/modified doc fits into this, but 
+browsing through the pre-built documentation is a good way to sort that out.
+
 All the source for the documentation is written in 
-`Sphinx <http://sphinx-doc.org/>`_, which uses ReST for markup.
+`Sphinx <http://sphinx-doc.org/>`_, which uses ReST for markup.  ReST is very
+straightforward to markup in a text editor, and if you are new to it, we
+recommend just using other .rst files in the existing yt documentation as 
+templates or checking out the 
+`ReST reference documentation <http://sphinx-doc.org/rest.html>`_.
 
-Cookbook recipes go in ``source/cookbook/`` and must be added to one of the
-``.rst`` files in that directory.  
+New cookbook recipes (see :ref:`cookbook`) are very helpful for the community 
+as they provide simple annotated recipes on how to use specific functionality.  
+To add one, create a concise python script which demonstrates some 
+functionality and pare it down to its minimum.  Add some comment lines to 
+describe what it is that you're doing along the way.  Place this ``.py`` file 
+in the ``source/cookbook/`` directory, and then link to it explicitly in one 
+of the relevant ``.rst`` files in that directory (e.g. ``complex_plots.rst``, 
+``cosmological_analysis.rst``, etc.), and add some description of what the script 
+actually does.  We recommend that you use one of the 
+`sample data sets <http://yt-project.org/data>`_ in your recipe.  When the full
+docs are built, each of the cookbook recipes are executed dynamically on 
+a system which has access to all of the sample datasets.  Any output images 
+generated by your script will then be attached inline in the built documentation 
+directly following your script.
 
-For more information on how to build the documentation to make sure it looks
-the way you expect it to after modifying it, see :ref:`docs_build`.
+After you have made your modifications to the docs, you will want to make sure
+that they render the way you expect them to render.  For more information on
+this, see the section on :ref:`docs_build`.  Unless you're contributing cookbook
+recipes or notebooks which require a dynamical build, you can probably get 
+away with just doing a 'quick' docs build.
+
+When you have completed your documentation additions, commit your changes 
+to your repository and make a pull request in the same way you would contribute 
+a change to the codebase, as described in the section on :ref:`sharing-changes`.
 
 How To Get The Source Code For Editing
 --------------------------------------

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list