[yt-svn] commit/yt: 9 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Wed Oct 22 07:43:25 PDT 2014


9 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/3ee77daca8fd/
Changeset:   3ee77daca8fd
Branch:      yt
User:        mzingale
Date:        2014-10-01 00:39:02+00:00
Summary:     this change addresses issue #914 -- the orientation of draw_domain()
and draw_coordinate_vectors() is now correct.
Affected #:  1 file

diff -r 7c48b53f10cb25b0b2aa820706f5f54ec292e9ae -r 3ee77daca8fd28dd2fe5230c0c31978d0daf607b yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -316,8 +316,13 @@
         nim = im.rescale(inline=False)
         enhance_rgba(nim)
         nim.add_background_color('black', inline=True)
-       
+
+        # we flipped it in snapshot to get the orientation correct when
+        # it is output -- temporarily undo that here
+        im = im[:,::-1,:]       
         lines(nim, px, py, colors, 24)
+        im = im[:,::-1,:]
+
         return nim
 
     def draw_coordinate_vectors(self, im, length=0.05, thickness=1):
@@ -370,11 +375,15 @@
                   np.array([0.0, 1.0, 0.0, alpha]),
                   np.array([0.0, 0.0, 1.0, alpha])]
 
+        # we flipped it in snapshot to get the orientation correct when
+        # it is output -- temporarily undo that here
+        im = im[:,::-1,:]
         for vec, color in zip(coord_vectors, colors):
             dx = int(np.dot(vec, self.orienter.unit_vectors[0]))
             dy = int(np.dot(vec, self.orienter.unit_vectors[1]))
             lines(im, np.array([px0, px0+dx]), np.array([py0, py0+dy]),
                   np.array([color, color]), 1, thickness)
+        im = im[:,::-1,:]
 
     def draw_line(self, im, x0, x1, color=None):
         r"""Draws a line on an existing volume rendering.
@@ -415,7 +424,12 @@
         py1 = int(self.resolution[0]*(dx1/self.width[0]))
         px0 = int(self.resolution[1]*(dy0/self.width[1]))
         px1 = int(self.resolution[1]*(dy1/self.width[1]))
+
+        # we flipped it in snapshot to get the orientation correct when
+        # it is output -- temporarily undo that here
+        im = im[:,::-1,:]
         lines(im, np.array([px0,px1]), np.array([py0,py1]), color=np.array([color,color]))
+        im = im[:,::-1,:]
 
     def draw_domain(self,im,alpha=0.3):
         r"""Draws domain edges on an existing volume rendering.
@@ -497,7 +511,11 @@
 
         px, py, dz = self.project_to_plane(vertices, res=im.shape[:2])
        
+        # we flipped it in snapshot to get the orientation correct when
+        # it is output -- temporarily undo that here
+        im = im[:,::-1,:]
         lines(im, px, py, color.reshape(1,4), 24)
+        im = im[:,::-1,:]
 
     def look_at(self, new_center, north_vector = None):
         r"""Change the view direction based on a new focal point.


https://bitbucket.org/yt_analysis/yt/commits/67186b163c69/
Changeset:   67186b163c69
Branch:      yt
User:        mzingale
Date:        2014-08-27 00:01:34+00:00
Summary:     merge
Affected #:  1 file

diff -r 5615739b9085637b4d1b61d390c22abfc52a591f -r 67186b163c69bbce247b608a8c88089adbd18ff0 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -432,6 +432,12 @@
         min_level = self.min_level or 0
         max_level = self.max_level or levels.max()
 
+        # sorts the three arrays in order of ascending level - this makes images look nicer
+        new_indices = np.argsort(levels)
+        levels = levels[new_indices]
+        GLE = GLE[new_indices]
+        GRE = GRE[new_indices]
+        
         for px_off, py_off in zip(pxs.ravel(), pys.ravel()):
             pxo = px_off * DW[px_index]
             pyo = py_off * DW[py_index]


https://bitbucket.org/yt_analysis/yt/commits/1364dd1c2fab/
Changeset:   1364dd1c2fab
Branch:      yt
User:        mzingale
Date:        2014-09-03 21:44:04+00:00
Summary:     merge
Affected #:  63 files

diff -r 67186b163c69bbce247b608a8c88089adbd18ff0 -r 1364dd1c2fab6735316275334c66f4c1b3880c97 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -500,13 +500,28 @@
     fi
     [ ! -e $LIB/extracted ] && tar xfz $LIB.tar.gz
     touch $LIB/extracted
+    BUILD_ARGS=""
+    case $LIB in
+        *h5py*)
+            BUILD_ARGS="--hdf5=${HDF5_DIR}"
+            ;;
+        *numpy*)
+            if [ -e ${DEST_DIR}/lib/python2.7/site-packages/numpy/__init__.py ]
+            then
+                VER=$(${DEST_DIR}/bin/python -c 'from distutils.version import StrictVersion as SV; \
+                                                 import numpy; print SV(numpy.__version__) < SV("1.8.0")')
+                if [ $VER == "True" ]
+                then
+                    echo "Removing previous NumPy instance (see issue #889)"
+                    rm -rf ${DEST_DIR}/lib/python2.7/site-packages/{numpy*,*.pth}
+                fi
+            fi
+            ;;
+        *)
+            ;;
+    esac
     cd $LIB
-    if [ ! -z `echo $LIB | grep h5py` ]
-    then
-	( ${DEST_DIR}/bin/python2.7 setup.py build --hdf5=${HDF5_DIR} $* 2>&1 ) 1>> ${LOG_FILE} || do_exit
-    else
-        ( ${DEST_DIR}/bin/python2.7 setup.py build   $* 2>&1 ) 1>> ${LOG_FILE} || do_exit
-    fi
+    ( ${DEST_DIR}/bin/python2.7 setup.py build ${BUILD_ARGS} $* 2>&1 ) 1>> ${LOG_FILE} || do_exit
     ( ${DEST_DIR}/bin/python2.7 setup.py install    2>&1 ) 1>> ${LOG_FILE} || do_exit
     touch done
     cd ..
@@ -580,56 +595,54 @@
 mkdir -p ${DEST_DIR}/src
 cd ${DEST_DIR}/src
 
-CYTHON='Cython-0.19.1'
-FORTHON='Forthon-0.8.11'
+CYTHON='Cython-0.20.2'
 PYX='PyX-0.12.1'
-PYTHON='Python-2.7.6'
+PYTHON='Python-2.7.8'
 BZLIB='bzip2-1.0.6'
 FREETYPE_VER='freetype-2.4.12'
-H5PY='h5py-2.1.3'
+H5PY='h5py-2.3.1'
 HDF5='hdf5-1.8.11'
-IPYTHON='ipython-2.1.0'
+IPYTHON='ipython-2.2.0'
 LAPACK='lapack-3.4.2'
 PNG=libpng-1.6.3
-MATPLOTLIB='matplotlib-1.3.0'
-MERCURIAL='mercurial-3.0'
-NOSE='nose-1.3.0'
-NUMPY='numpy-1.7.1'
+MATPLOTLIB='matplotlib-1.4.0'
+MERCURIAL='mercurial-3.1'
+NOSE='nose-1.3.4'
+NUMPY='numpy-1.8.2'
 PYTHON_HGLIB='python-hglib-1.0'
-PYZMQ='pyzmq-13.1.0'
+PYZMQ='pyzmq-14.3.1'
 ROCKSTAR='rockstar-0.99.6'
-SCIPY='scipy-0.12.0'
+SCIPY='scipy-0.14.0'
 SQLITE='sqlite-autoconf-3071700'
-SYMPY='sympy-0.7.3'
-TORNADO='tornado-3.1'
-ZEROMQ='zeromq-3.2.4'
+SYMPY='sympy-0.7.5'
+TORNADO='tornado-4.0.1'
+ZEROMQ='zeromq-4.0.4'
 ZLIB='zlib-1.2.8'
 
 # Now we dump all our SHA512 files out.
-echo '9dcdda5b2ee2e63c2d3755245b7b4ed2f4592455f40feb6f8e86503195d9474559094ed27e789ab1c086d09da0bb21c4fe844af0e32a7d47c81ff59979b18ca0  Cython-0.19.1.tar.gz' > Cython-0.19.1.tar.gz.sha512
-echo '3f53d0b474bfd79fea2536d0a9197eaef6c0927e95f2f9fd52dbd6c1d46409d0e649c21ac418d8f7767a9f10fe6114b516e06f2be4b06aec3ab5bdebc8768220  Forthon-0.8.11.tar.gz' > Forthon-0.8.11.tar.gz.sha512
+echo '118e3ebd76f50bda8187b76654e65caab2c2c403df9b89da525c2c963dedc7b38d898ae0b92d44b278731d969a891eb3f7b5bcc138cfe3e037f175d4c87c29ec  Cython-0.20.2.tar.gz' > Cython-0.20.2.tar.gz.sha512
 echo '4941f5aa21aff3743546495fb073c10d2657ff42b2aff401903498638093d0e31e344cce778980f28a7170c6d29eab72ac074277b9d4088376e8692dc71e55c1  PyX-0.12.1.tar.gz' > PyX-0.12.1.tar.gz.sha512
-echo '3df0ba4b1cfef5f02fb27925de4c2ca414eca9000af6a3d475d39063720afe987287c3d51377e0a36b88015573ef699f700782e1749c7a357b8390971d858a79  Python-2.7.6.tgz' > Python-2.7.6.tgz.sha512
+echo '4b05f0a490ddee37e8fc7970403bb8b72c38e5d173703db40310e78140d9d5c5732789d69c68dbd5605a623e4582f5b9671f82b8239ecdb34ad4261019dace6a  Python-2.7.8.tgz' > Python-2.7.8.tgz.sha512
 echo '276bd9c061ec9a27d478b33078a86f93164ee2da72210e12e2c9da71dcffeb64767e4460b93f257302b09328eda8655e93c4b9ae85e74472869afbeae35ca71e  blas.tar.gz' > blas.tar.gz.sha512
 echo '00ace5438cfa0c577e5f578d8a808613187eff5217c35164ffe044fbafdfec9e98f4192c02a7d67e01e5a5ccced630583ad1003c37697219b0f147343a3fdd12  bzip2-1.0.6.tar.gz' > bzip2-1.0.6.tar.gz.sha512
 echo 'a296dfcaef7e853e58eed4e24b37c4fa29cfc6ac688def048480f4bb384b9e37ca447faf96eec7b378fd764ba291713f03ac464581d62275e28eb2ec99110ab6  reason-js-20120623.zip' > reason-js-20120623.zip.sha512
 echo '609a68a3675087e0cc95268574f31e104549daa48efe15a25a33b8e269a93b4bd160f4c3e8178dca9c950ef5ca514b039d6fd1b45db6af57f25342464d0429ce  freetype-2.4.12.tar.gz' > freetype-2.4.12.tar.gz.sha512
-echo '2eb7030f8559ff5cb06333223d98fda5b3a663b6f4a026949d1c423aa9a869d824e612ed5e1851f3bf830d645eea1a768414f73731c23ab4d406da26014fe202  h5py-2.1.3.tar.gz' > h5py-2.1.3.tar.gz.sha512
+echo 'f0da1d2ac855c02fb828444d719a1b23a580adb049335f3e732ace67558a125ac8cd3b3a68ac6bf9d10aa3ab19e4672b814eb28cc8c66910750c62efb655d744  h5py-2.3.1.tar.gz' > h5py-2.3.1.tar.gz.sha512
 echo 'e9db26baa297c8ed10f1ca4a3fcb12d6985c6542e34c18d48b2022db73014f054c8b8434f3df70dcf44631f38b016e8050701d52744953d0fced3272d7b6b3c1  hdf5-1.8.11.tar.gz' > hdf5-1.8.11.tar.gz.sha512
-echo '68c15f6402cacfd623f8e2b70c22d06541de3616fdb2d502ce93cd2fdb4e7507bb5b841a414a4123264221ee5ffb0ebefbb8541f79e647fcb9f73310b4c2d460  ipython-2.1.0.tar.gz' > ipython-2.1.0.tar.gz.sha512
+echo '4953bf5e9d6d5c6ad538d07d62b5b100fd86a37f6b861238501581c0059bd4655345ca05cf395e79709c38ce4cb9c6293f5d11ac0252a618ad8272b161140d13  ipython-2.2.0.tar.gz' > ipython-2.2.0.tar.gz.sha512
 echo '8770214491e31f0a7a3efaade90eee7b0eb20a8a6ab635c5f854d78263f59a1849133c14ef5123d01023f0110cbb9fc6f818da053c01277914ae81473430a952  lapack-3.4.2.tar.gz' > lapack-3.4.2.tar.gz.sha512
 echo '887582e5a22e4cde338aa8fec7a89f6dd31f2f02b8842735f00f970f64582333fa03401cea6d01704083403c7e8b7ebc26655468ce930165673b33efa4bcd586  libpng-1.6.3.tar.gz' > libpng-1.6.3.tar.gz.sha512
-echo '990e3a155ca7a9d329c41a43b44a9625f717205e81157c668a8f3f2ad5459ed3fed8c9bd85e7f81c509e0628d2192a262d4aa30c8bfc348bb67ed60a0362505a  matplotlib-1.3.0.tar.gz' > matplotlib-1.3.0.tar.gz.sha512
-echo '8cd387ea0d74d5ed01b58d5ef8e3fb408d4b05f7deb45a02e34fbb931fd920aafbfcb3a9b52a027ebcdb562837198637a0e51f2121c94e0fcf7f7d8c016f5342  mercurial-3.0.tar.gz' > mercurial-3.0.tar.gz.sha512
-echo 'a3b8060e415560a868599224449a3af636d24a060f1381990b175dcd12f30249edd181179d23aea06b0c755ff3dc821b7a15ed8840f7855530479587d4d814f4  nose-1.3.0.tar.gz' > nose-1.3.0.tar.gz.sha512
-echo 'd58177f3971b6d07baf6f81a2088ba371c7e43ea64ee7ada261da97c6d725b4bd4927122ac373c55383254e4e31691939276dab08a79a238bfa55172a3eff684  numpy-1.7.1.tar.gz' > numpy-1.7.1.tar.gz.sha512
+echo '60aa386639dec17b4f579955df60f2aa7c8ccd589b3490bb9afeb2929ea418d5d1a36a0b02b8d4a6734293076e9069429956c56cf8bd099b756136f2657cf9d4  matplotlib-1.4.0.tar.gz' > matplotlib-1.4.0.tar.gz.sha512
+echo '1ee2fe7a241bf81087e55d9e4ee8fa986f41bb0655d4828d244322c18f3958a1f3111506e2df15aefcf86100b4fe530fcab2d4c041b5945599ed3b3a889d50f5  mercurial-3.1.tar.gz' > mercurial-3.1.tar.gz.sha512
+echo '19499ab08018229ea5195cdac739d6c7c247c5aa5b2c91b801cbd99bad12584ed84c5cfaaa6fa8b4893a46324571a2f8a1988a1381f4ddd58390e597bd7bdc24  nose-1.3.4.tar.gz' > nose-1.3.4.tar.gz.sha512
+echo '996e6b8e2d42f223e44660f56bf73eb8ab124f400d89218f8f5e4d7c9860ada44a4d7c54526137b0695c7a10f36e8834fbf0d42b7cb20bcdb5d5c245d673385c  numpy-1.8.2.tar.gz' > numpy-1.8.2.tar.gz.sha512
 echo '9c0a61299779aff613131aaabbc255c8648f0fa7ab1806af53f19fbdcece0c8a68ddca7880d25b926d67ff1b9201954b207919fb09f6a290acb078e8bbed7b68  python-hglib-1.0.tar.gz' > python-hglib-1.0.tar.gz.sha512
-echo 'c65013293dd4049af5db009fdf7b6890a3c6b1e12dd588b58fb5f5a5fef7286935851fb7a530e03ea16f28de48b964e50f48bbf87d34545fd23b80dd4380476b  pyzmq-13.1.0.tar.gz' > pyzmq-13.1.0.tar.gz.sha512
-echo '80c8e137c3ccba86575d4263e144ba2c4684b94b5cd620e200f094c92d4e118ea6a631d27bdb259b0869771dfaeeae68c0fdd37fdd740b9027ee185026e921d4  scipy-0.12.0.tar.gz' > scipy-0.12.0.tar.gz.sha512
+echo '3d93a8fbd94fc3f1f90df68257cda548ba1adf3d7a819e7a17edc8681894003ac7ae6abd319473054340c11443a6a3817b931366fd7dae78e3807d549c544f8b  pyzmq-14.3.1.tar.gz' > pyzmq-14.3.1.tar.gz.sha512
+echo 'ad1278740c1dc44c5e1b15335d61c4552b66c0439325ed6eeebc5872a1c0ba3fce1dd8509116b318d01e2d41da2ee49ec168da330a7fafd22511138b29f7235d  scipy-0.14.0.tar.gz' > scipy-0.14.0.tar.gz.sha512
 echo '96f3e51b46741450bc6b63779c10ebb4a7066860fe544385d64d1eda52592e376a589ef282ace2e1df73df61c10eab1a0d793abbdaf770e60289494d4bf3bcb4  sqlite-autoconf-3071700.tar.gz' > sqlite-autoconf-3071700.tar.gz.sha512
-echo '2992baa3edfb4e1842fb642abf0bf0fc0bf56fc183aab8fed6b3c42fbea928fa110ede7fdddea2d63fc5953e8d304b04da433dc811134fadefb1eecc326121b8  sympy-0.7.3.tar.gz' > sympy-0.7.3.tar.gz.sha512
-echo '101544db6c97beeadc5a02b2ef79edefa0a07e129840ace2e4aa451f3976002a273606bcdc12d6cef5c22ff4c1c9dcf60abccfdee4cbef8e3f957cd25c0430cf  tornado-3.1.tar.gz' > tornado-3.1.tar.gz.sha512
-echo 'd8eef84860bc5314b42a2cc210340572a9148e008ea65f7650844d0edbe457d6758785047c2770399607f69ba3b3a544db9775a5cdf961223f7e278ef7e0f5c6  zeromq-3.2.4.tar.gz' > zeromq-3.2.4.tar.gz.sha512
+echo '8a46e75abc3ed2388b5da9cb0e5874ae87580cf3612e2920b662d8f8eee8047efce5aa998eee96661d3565070b1a6b916c8bed74138b821f4e09115f14b6677d  sympy-0.7.5.tar.gz' > sympy-0.7.5.tar.gz.sha512
+echo 'a4e0231e77ebbc2885bab648b292b842cb15c84d66a1972de18cb00fcc611eae2794b872f070ab7d5af32dd0c6c1773527fe1332bd382c1821e1f2d5d76808fb  tornado-4.0.1.tar.gz' > tornado-4.0.1.tar.gz.sha512
+echo '7d70855d0537971841810a66b7a943a88304f6991ce445df19eea034aadc53dbce9d13be92bf44cfef1f3e19511a754eb01006a3968edc1ec3d1766ea4730cda  zeromq-4.0.4.tar.gz' > zeromq-4.0.4.tar.gz.sha512
 echo 'ece209d4c7ec0cb58ede791444dc754e0d10811cbbdebe3df61c0fd9f9f9867c1c3ccd5f1827f847c005e24eef34fb5bf87b5d3f894d75da04f1797538290e4a  zlib-1.2.8.tar.gz' > zlib-1.2.8.tar.gz.sha512
 # Individual processes
 [ -z "$HDF5_DIR" ] && get_ytproject $HDF5.tar.gz
@@ -653,7 +666,6 @@
 get_ytproject $H5PY.tar.gz
 get_ytproject $CYTHON.tar.gz
 get_ytproject reason-js-20120623.zip
-get_ytproject $FORTHON.tar.gz
 get_ytproject $NOSE.tar.gz
 get_ytproject $PYTHON_HGLIB.tar.gz
 get_ytproject $SYMPY.tar.gz
@@ -729,7 +741,7 @@
         cd $FREETYPE_VER
         ( ./configure CFLAGS=-I${DEST_DIR}/include --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make 2>&1 ) 1>> ${LOG_FILE} || do_exit
-		( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
+        ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
         touch done
         cd ..
@@ -932,7 +944,6 @@
 do_setup_py $IPYTHON
 do_setup_py $H5PY
 do_setup_py $CYTHON
-do_setup_py $FORTHON
 do_setup_py $NOSE
 do_setup_py $PYTHON_HGLIB
 do_setup_py $SYMPY
@@ -1026,7 +1037,7 @@
     echo
     echo "To get started with yt, check out the orientation:"
     echo
-    echo "    http://yt-project.org/doc/bootcamp/"
+    echo "    http://yt-project.org/doc/quickstart/"
     echo
     echo "The source for yt is located at:"
     echo "    $YT_DIR"

diff -r 67186b163c69bbce247b608a8c88089adbd18ff0 -r 1364dd1c2fab6735316275334c66f4c1b3880c97 doc/source/analyzing/units/index.rst
--- a/doc/source/analyzing/units/index.rst
+++ b/doc/source/analyzing/units/index.rst
@@ -37,7 +37,7 @@
 .. note::
 
    The notebooks use sample datasets that are available for download at
-   http://yt-project.org/data.  See :ref:`bootcamp-introduction` for more
+   http://yt-project.org/data.  See :ref:`quickstart-introduction` for more
    details.
 
 Let us know if you would like to contribute other example notebooks, or have

diff -r 67186b163c69bbce247b608a8c88089adbd18ff0 -r 1364dd1c2fab6735316275334c66f4c1b3880c97 doc/source/bootcamp/1)_Introduction.ipynb
--- a/doc/source/bootcamp/1)_Introduction.ipynb
+++ /dev/null
@@ -1,72 +0,0 @@
-{
- "metadata": {
-  "name": "",
-  "signature": "sha256:39620670ce7751b23f30d2123fd3598de1c7843331f65de13e29f4ae9f759e0f"
- },
- "nbformat": 3,
- "nbformat_minor": 0,
- "worksheets": [
-  {
-   "cells": [
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "# Welcome to the yt bootcamp!\n",
-      "\n",
-      "In this brief tutorial, we'll go over how to load up data, analyze things, inspect your data, and make some visualizations.\n",
-      "\n",
-      "Our documentation page can provide information on a variety of the commands that are used here, both in narrative documentation as well as recipes for specific functionality in our cookbook.  The documentation exists at http://yt-project.org/doc/.  If you encounter problems, look for help here: http://yt-project.org/doc/help/index.html.\n",
-      "\n",
-      "## Acquiring the datasets for this tutorial\n",
-      "\n",
-      "If you are executing these tutorials interactively, you need some sample datasets on which to run the code.  You can download these datasets at http://yt-project.org/data/.  The datasets necessary for each lesson are noted next to the corresponding tutorial.\n",
-      "\n",
-      "## What's Next?\n",
-      "\n",
-      "The Notebooks are meant to be explored in this order:\n",
-      "\n",
-      "1. Introduction\n",
-      "2. Data Inspection (IsolatedGalaxy dataset)\n",
-      "3. Simple Visualization (enzo_tiny_cosmology & Enzo_64 datasets)\n",
-      "4. Data Objects and Time Series (IsolatedGalaxy dataset)\n",
-      "5. Derived Fields and Profiles (IsolatedGalaxy dataset)\n",
-      "6. Volume Rendering (IsolatedGalaxy dataset)"
-     ]
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "The following code will download the data needed for this tutorial automatically using `curl`. It may take some time so please wait when the kernel is busy. You will need to set `download_datasets` to True before using it."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "download_datasets = False\n",
-      "if download_datasets:\n",
-      "    !curl -sSO http://yt-project.org/data/enzo_tiny_cosmology.tar\n",
-      "    print \"Got enzo_tiny_cosmology\"\n",
-      "    !tar xf enzo_tiny_cosmology.tar\n",
-      "    \n",
-      "    !curl -sSO http://yt-project.org/data/Enzo_64.tar\n",
-      "    print \"Got Enzo_64\"\n",
-      "    !tar xf Enzo_64.tar\n",
-      "    \n",
-      "    !curl -sSO http://yt-project.org/data/IsolatedGalaxy.tar\n",
-      "    print \"Got IsolatedGalaxy\"\n",
-      "    !tar xf IsolatedGalaxy.tar\n",
-      "    \n",
-      "    print \"All done!\""
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    }
-   ],
-   "metadata": {}
-  }
- ]
-}
\ No newline at end of file

diff -r 67186b163c69bbce247b608a8c88089adbd18ff0 -r 1364dd1c2fab6735316275334c66f4c1b3880c97 doc/source/bootcamp/2)_Data_Inspection.ipynb
--- a/doc/source/bootcamp/2)_Data_Inspection.ipynb
+++ /dev/null
@@ -1,384 +0,0 @@
-{
- "metadata": {
-  "name": "",
-  "signature": "sha256:a8fe78715c1f3900c37c675d84320fe65f0ba8734abba60fd12e74d957e5d8ee"
- },
- "nbformat": 3,
- "nbformat_minor": 0,
- "worksheets": [
-  {
-   "cells": [
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "# Starting Out and Loading Data\n",
-      "\n",
-      "We're going to get started by loading up yt.  This next command brings all of the libraries into memory and sets up our environment."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "import yt"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Now that we've loaded yt, we can load up some data.  Let's load the `IsolatedGalaxy` dataset."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "ds = yt.load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "## Fields and Facts\n",
-      "\n",
-      "When you call the `load` function, yt tries to do very little -- this is designed to be a fast operation, just setting up some information about the simulation.  Now, the first time you access the \"index\" it will read and load the mesh and then determine where data is placed in the physical domain and on disk.  Once it knows that, yt can tell you some statistics about the simulation:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "ds.print_stats()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "yt can also tell you the fields it found on disk:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "ds.field_list"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "And, all of the fields it thinks it knows how to generate:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "ds.derived_field_list"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "yt can also transparently generate fields.  However, we encourage you to examine exactly what yt is doing when it generates those fields.  To see, you can ask for the source of a given field."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print ds.field_info[\"gas\", \"vorticity_x\"].get_source()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "yt stores information about the domain of the simulation:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print ds.domain_width"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "yt can also convert this into various units:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print ds.domain_width.in_units(\"kpc\")\n",
-      "print ds.domain_width.in_units(\"au\")\n",
-      "print ds.domain_width.in_units(\"mile\")"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "# Mesh Structure\n",
-      "\n",
-      "If you're using a simulation type that has grids (for instance, here we're using an Enzo simulation) you can examine the structure of the mesh.  For the most part, you probably won't have to use this unless you're debugging a simulation or examining in detail what is going on."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print ds.index.grid_left_edge"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "But, you may have to access information about individual grid objects!  Each grid object mediates accessing data from the disk and has a number of attributes that tell you about it.  The index (`ds.index` here) has an attribute `grids` which is all of the grid objects."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print ds.index.grids[1]"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "g = ds.index.grids[1]\n",
-      "print g"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Grids have dimensions, extents, level, and even a list of Child grids."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "g.ActiveDimensions"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "g.LeftEdge, g.RightEdge"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "g.Level"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "g.Children"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "## Advanced Grid Inspection\n",
-      "\n",
-      "If we want to examine grids only at a given level, we can!  Not only that, but we can load data and take a look at various fields.\n",
-      "\n",
-      "*This section can be skipped!*"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "gs = ds.index.select_grids(ds.index.max_level)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "g2 = gs[0]\n",
-      "print g2\n",
-      "print g2.Parent\n",
-      "print g2.get_global_startindex()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print g2[\"density\"][:,:,0]"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print (g2.Parent.child_mask == 0).sum() * 8\n",
-      "print g2.ActiveDimensions.prod()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "for f in ds.field_list:\n",
-      "    fv = g[f]\n",
-      "    if fv.size == 0: continue\n",
-      "    print f, fv.min(), fv.max()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "# Examining Data in Regions\n",
-      "\n",
-      "yt provides data object selectors.  In subsequent notebooks we'll examine these in more detail, but we can select a sphere of data and perform a number of operations on it.  yt makes it easy to operate on fluid fields in an object in *bulk*, but you can also examine individual field values.\n",
-      "\n",
-      "This creates a sphere selector positioned at the most dense point in the simulation that has a radius of 10 kpc."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "sp = ds.sphere(\"max\", (10, 'kpc'))"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print sp"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "We can calculate a bunch of bulk quantities.  Here's that list, but there's a list in the docs, too!"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print sp.quantities.keys()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Let's look at the total mass.  This is how you call a given quantity.  yt calls these \"Derived Quantities\".  We'll talk about a few in a later notebook."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print sp.quantities.total_mass()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    }
-   ],
-   "metadata": {}
-  }
- ]
-}
\ No newline at end of file

diff -r 67186b163c69bbce247b608a8c88089adbd18ff0 -r 1364dd1c2fab6735316275334c66f4c1b3880c97 doc/source/bootcamp/3)_Simple_Visualization.ipynb
--- a/doc/source/bootcamp/3)_Simple_Visualization.ipynb
+++ /dev/null
@@ -1,275 +0,0 @@
-{
- "metadata": {
-  "name": "",
-  "signature": "sha256:c00ba7fdbbd9ea957d06060ad70f06f629b1fd4ebf5379c1fdad2697ab0a4cd6"
- },
- "nbformat": 3,
- "nbformat_minor": 0,
- "worksheets": [
-  {
-   "cells": [
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "# Simple Visualizations of Data\n",
-      "\n",
-      "Just like in our first notebook, we have to load yt and then some data."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "import yt"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "For this notebook, we'll load up a cosmology dataset."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "ds = yt.load(\"enzo_tiny_cosmology/DD0046/DD0046\")\n",
-      "print \"Redshift =\", ds.current_redshift"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "In the terms that yt uses, a projection is a line integral through the domain.  This can either be unweighted (in which case a column density is returned) or weighted, in which case an average value is returned.  Projections are, like all other data objects in yt, full-fledged data objects that churn through data and present that to you.  However, we also provide a simple method of creating Projections and plotting them in a single step.  This is called a Plot Window, here specifically known as a `ProjectionPlot`.  One thing to note is that in yt, we project all the way through the entire domain at a single time.  This means that the first call to projecting can be somewhat time consuming, but panning, zooming and plotting are all quite fast.\n",
-      "\n",
-      "yt is designed to make it easy to make nice plots and straightforward to modify those plots directly.  The cookbook in the documentation includes detailed examples of this."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "p = yt.ProjectionPlot(ds, \"y\", \"density\")\n",
-      "p.show()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "The `show` command simply sends the plot to the IPython notebook.  You can also call `p.save()` which will save the plot to the file system.  This function accepts an argument, which will be pre-prended to the filename and can be used to name it based on the width or to supply a location.\n",
-      "\n",
-      "Now we'll zoom and pan a bit."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "p.zoom(2.0)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "p.pan_rel((0.1, 0.0))"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "p.zoom(10.0)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "p.pan_rel((-0.25, -0.5))"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "p.zoom(0.1)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "If we specify multiple fields, each time we call `show` we get multiple plots back.  Same for `save`!"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "p = yt.ProjectionPlot(ds, \"z\", [\"density\", \"temperature\"], weight_field=\"density\")\n",
-      "p.show()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "We can adjust the colormap on a field-by-field basis."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "p.set_cmap(\"temperature\", \"hot\")"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "And, we can re-center the plot on different locations.  One possible use of this would be to make a single `ProjectionPlot` which you move around to look at different regions in your simulation, saving at each one."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "v, c = ds.find_max(\"density\")\n",
-      "p.set_center((c[0], c[1]))\n",
-      "p.zoom(10)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Okay, let's load up a bigger simulation (from `Enzo_64` this time) and make a slice plot."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "ds = yt.load(\"Enzo_64/DD0043/data0043\")\n",
-      "s = yt.SlicePlot(ds, \"z\", [\"density\", \"velocity_magnitude\"], center=\"max\")\n",
-      "s.set_cmap(\"velocity_magnitude\", \"kamae\")\n",
-      "s.zoom(10.0)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "We can adjust the logging of various fields:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "s.set_log(\"velocity_magnitude\", True)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "yt provides many different annotations for your plots.  You can see all of these in the documentation, or if you type `s.annotate_` and press tab, a list will show up here.  We'll annotate with velocity arrows."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "s.annotate_velocity()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Contours can also be overlaid:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "s = yt.SlicePlot(ds, \"x\", [\"density\"], center=\"max\")\n",
-      "s.annotate_contour(\"temperature\")\n",
-      "s.zoom(2.5)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Finally, we can save out to the file system."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "s.save()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    }
-   ],
-   "metadata": {}
-  }
- ]
-}
\ No newline at end of file

diff -r 67186b163c69bbce247b608a8c88089adbd18ff0 -r 1364dd1c2fab6735316275334c66f4c1b3880c97 doc/source/bootcamp/4)_Data_Objects_and_Time_Series.ipynb
--- a/doc/source/bootcamp/4)_Data_Objects_and_Time_Series.ipynb
+++ /dev/null
@@ -1,382 +0,0 @@
-{
- "metadata": {
-  "name": "",
-  "signature": "sha256:a46e1baa90d32045c2b524100f28bad41b3665249612c9a275ee0375a6f4be20"
- },
- "nbformat": 3,
- "nbformat_minor": 0,
- "worksheets": [
-  {
-   "cells": [
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "# Data Objects and Time Series Data\n",
-      "\n",
-      "Just like before, we will load up yt.  Since we'll be using pylab to plot some data in this notebook, we additionally tell matplotlib to place plots inline inside the notebook."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "%matplotlib inline\n",
-      "import yt\n",
-      "import numpy as np\n",
-      "from matplotlib import pylab\n",
-      "from yt.analysis_modules.halo_finding.api import HaloFinder"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "## Time Series Data\n",
-      "\n",
-      "Unlike before, instead of loading a single dataset, this time we'll load a bunch which we'll examine in sequence.  This command creates a `DatasetSeries` object, which can be iterated over (including in parallel, which is outside the scope of this bootcamp) and analyzed.  There are some other helpful operations it can provide, but we'll stick to the basics here.\n",
-      "\n",
-      "Note that you can specify either a list of filenames, or a glob (i.e., asterisk) pattern in this."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "ts = yt.DatasetSeries(\"enzo_tiny_cosmology/*/*.hierarchy\")"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "### Example 1: Simple Time Series\n",
-      "\n",
-      "As a simple example of how we can use this functionality, let's find the min and max of the density as a function of time in this simulation.  To do this we use the construction `for ds in ts` where `ds` means \"Dataset\" and `ts` is the \"Time Series\" we just loaded up.  For each dataset, we'll create an object (`dd`) that covers the entire domain.  (`all_data` is a shorthand function for this.)  We'll then call the `extrema` Derived Quantity, and append the min and max to our extrema outputs."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "rho_ex = []\n",
-      "times = []\n",
-      "for ds in ts:\n",
-      "    dd = ds.all_data()\n",
-      "    rho_ex.append(dd.quantities.extrema(\"density\"))\n",
-      "    times.append(ds.current_time.in_units(\"Gyr\"))\n",
-      "rho_ex = np.array(rho_ex)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Now we plot the minimum and the maximum:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "pylab.semilogy(times, rho_ex[:,0], '-xk', label='Minimum')\n",
-      "pylab.semilogy(times, rho_ex[:,1], '-xr', label='Maximum')\n",
-      "pylab.ylabel(\"Density ($g/cm^3$)\")\n",
-      "pylab.xlabel(\"Time (Gyr)\")\n",
-      "pylab.legend()\n",
-      "pylab.ylim(1e-32, 1e-21)\n",
-      "pylab.show()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "### Example 2: Advanced Time Series\n",
-      "\n",
-      "Let's do something a bit different.  Let's calculate the total mass inside halos and outside halos.\n",
-      "\n",
-      "This actually touches a lot of different pieces of machinery in yt.  For every dataset, we will run the halo finder HOP.  Then, we calculate the total mass in the domain.  Then, for each halo, we calculate the sum of the baryon mass in that halo.  We'll keep running tallies of these two things."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "from yt.units import Msun\n",
-      "\n",
-      "mass = []\n",
-      "zs = []\n",
-      "for ds in ts:\n",
-      "    halos = HaloFinder(ds)\n",
-      "    dd = ds.all_data()\n",
-      "    total_mass = dd.quantities.total_quantity(\"cell_mass\").in_units(\"Msun\")\n",
-      "    total_in_baryons = 0.0*Msun\n",
-      "    for halo in halos:\n",
-      "        sp = halo.get_sphere()\n",
-      "        total_in_baryons += sp.quantities.total_quantity(\"cell_mass\").in_units(\"Msun\")\n",
-      "    mass.append(total_in_baryons/total_mass)\n",
-      "    zs.append(ds.current_redshift)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Now let's plot them!"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "pylab.semilogx(zs, mass, '-xb')\n",
-      "pylab.xlabel(\"Redshift\")\n",
-      "pylab.ylabel(\"Mass in halos / Total mass\")\n",
-      "pylab.xlim(max(zs), min(zs))\n",
-      "pylab.ylim(-0.01, .18)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "## Data Objects\n",
-      "\n",
-      "Time series data have many applications, but most of them rely on examining the underlying data in some way.  Below, we'll see how to use and manipulate data objects.\n",
-      "\n",
-      "### Ray Queries\n",
-      "\n",
-      "yt provides the ability to examine rays, or lines, through the domain.  Note that these are not periodic, unlike most other data objects.  We create a ray object and can then examine quantities of it.  Rays have the special fields `t` and `dts`, which correspond to the time the ray enters a given cell and the distance it travels through that cell.\n",
-      "\n",
-      "To create a ray, we specify the start and end points.\n",
-      "\n",
-      "Note that we need to convert these arrays to numpy arrays due to a bug in matplotlib 1.3.1."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "ray = ds.ray([0.1, 0.2, 0.3], [0.9, 0.8, 0.7])\n",
-      "pylab.semilogy(np.array(ray[\"t\"]), np.array(ray[\"density\"]))"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print ray[\"dts\"]"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print ray[\"t\"]"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print ray[\"x\"]"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "### Slice Queries\n",
-      "\n",
-      "While slices are often used for visualization, they can be useful for other operations as well.  yt regards slices as multi-resolution objects.  They are an array of cells that are not all the same size; it only returns the cells at the highest resolution that it intersects.  (This is true for all yt data objects.)  Slices and projections have the special fields `px`, `py`, `pdx` and `pdy`, which correspond to the coordinates and half-widths in the pixel plane."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "ds = yt.load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")\n",
-      "v, c = ds.find_max(\"density\")\n",
-      "sl = ds.slice(0, c[0])\n",
-      "print sl[\"index\", \"x\"]\n",
-      "print sl[\"index\", \"z\"]\n",
-      "print sl[\"pdx\"]\n",
-      "print sl[\"gas\", \"density\"].shape"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "If we want to do something interesting with a `Slice`, we can turn it into a `FixedResolutionBuffer`.  This object can be queried and will return a 2D array of values."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "frb = sl.to_frb((50.0, 'kpc'), 1024)\n",
-      "print frb[\"gas\", \"density\"].shape"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "yt provides a few functions for writing arrays to disk, particularly in image form.  Here we'll write out the log of `density`, and then use IPython to display it back here.  Note that for the most part, you will probably want to use a `PlotWindow` for this, but in the case that it is useful you can directly manipulate the data."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "yt.write_image(np.log10(frb[\"gas\", \"density\"]), \"temp.png\")\n",
-      "from IPython.display import Image\n",
-      "Image(filename = \"temp.png\")"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "### Off-Axis Slices\n",
-      "\n",
-      "yt provides not only slices, but off-axis slices that are sometimes called \"cutting planes.\"  These are specified by (in order) a normal vector and a center.  Here we've set the normal vector to `[0.2, 0.3, 0.5]` and the center to be the point of maximum density.\n",
-      "\n",
-      "We can then turn these directly into plot windows using `to_pw`.  Note that the `to_pw` and `to_frb` methods are available on slices, off-axis slices, and projections, and can be used on any of them."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "cp = ds.cutting([0.2, 0.3, 0.5], \"max\")\n",
-      "pw = cp.to_pw(fields = [(\"gas\", \"density\")])"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Once we have our plot window from our cutting plane, we can show it here."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "pw.show()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "We can, as noted above, do the same with our slice:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "pws = sl.to_pw(fields=[\"density\"])\n",
-      "#pws.show()\n",
-      "print pws.plots.keys()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "### Covering Grids\n",
-      "\n",
-      "If we want to access a 3D array of data that spans multiple resolutions in our simulation, we can use a covering grid.  This will return a 3D array of data, drawing from up to the resolution level specified when creating the data.  For example, if you create a covering grid that spans two child grids of a single parent grid, it will fill those zones covered by a zone of a child grid with the data from that child grid.  Where it is covered only by the parent grid, the cells from the parent grid will be duplicated (appropriately) to fill the covering grid.\n",
-      "\n",
-      "There are two different types of covering grids: unsmoothed and smoothed.  Smoothed grids will be filled through a cascading interpolation process; they will be filled at level 0, interpolated to level 1, filled at level 1, interpolated to level 2, filled at level 2, etc.  This will help to reduce edge effects.  Unsmoothed covering grids will not be interpolated, but rather values will be duplicated multiple times.\n",
-      "\n",
-      "Here we create an unsmoothed covering grid at level 2, with the left edge at `[0.0, 0.0, 0.0]` and with dimensions equal to those that would cover the entire domain at level 2.  We can then ask for the Density field, which will be a 3D array."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "cg = ds.covering_grid(2, [0.0, 0.0, 0.0], ds.domain_dimensions * 2**2)\n",
-      "print cg[\"density\"].shape"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "In this example, we do exactly the same thing: except we ask for a *smoothed* covering grid, which will reduce edge effects."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "scg = ds.smoothed_covering_grid(2, [0.0, 0.0, 0.0], ds.domain_dimensions * 2**2)\n",
-      "print scg[\"density\"].shape"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    }
-   ],
-   "metadata": {}
-  }
- ]
-}
\ No newline at end of file

diff -r 67186b163c69bbce247b608a8c88089adbd18ff0 -r 1364dd1c2fab6735316275334c66f4c1b3880c97 doc/source/bootcamp/5)_Derived_Fields_and_Profiles.ipynb
--- a/doc/source/bootcamp/5)_Derived_Fields_and_Profiles.ipynb
+++ /dev/null
@@ -1,254 +0,0 @@
-{
- "metadata": {
-  "name": "",
-  "signature": "sha256:eca573e749829cacda0a8c07c6d5d11d07a5de657563a44b8c4ffff8f735caed"
- },
- "nbformat": 3,
- "nbformat_minor": 0,
- "worksheets": [
-  {
-   "cells": [
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "# Derived Fields and Profiles\n",
-      "\n",
-      "One of the most powerful features in yt is the ability to create derived fields that act and look exactly like fields that exist on disk.  This means that they will be generated on demand and can be used anywhere a field that exists on disk would be used.  Additionally, you can create them by just writing python functions."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "%matplotlib inline\n",
-      "import yt\n",
-      "import numpy as np\n",
-      "from yt import derived_field\n",
-      "from matplotlib import pylab"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "## Derived Fields\n",
-      "\n",
-      "This is an example of the simplest possible way to create a derived field.  All derived fields are defined by a function and some metadata; that metadata can include units, LaTeX-friendly names, conversion factors, and so on.  Fields can be defined in the way in the next cell.  What this does is create a function which accepts two arguments and then provide the units for that field.  In this case, our field is `dinosaurs` and our units are `K*cm/s`.  The function itself can access any fields that are in the simulation, and it does so by requesting data from the object called `data`."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "@derived_field(name = \"dinosaurs\", units = \"K * cm/s\")\n",
-      "def _dinos(field, data):\n",
-      "    return data[\"temperature\"] * data[\"velocity_magnitude\"]"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "One important thing to note is that derived fields must be defined *before* any datasets are loaded.  Let's load up our data and take a look at some quantities."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "ds = yt.load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")\n",
-      "dd = ds.all_data()\n",
-      "print dd.quantities.keys()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "One interesting question is, what are the minimum and maximum values of dinosaur production rates in our isolated galaxy?  We can do that by examining the `extrema` quantity -- the exact same way that we would for density, temperature, and so on."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print dd.quantities.extrema(\"dinosaurs\")"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "We can do the same for the average quantities as well."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print dd.quantities.weighted_average_quantity(\"dinosaurs\", weight=\"temperature\")"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "## A Few Other Quantities\n",
-      "\n",
-      "We can ask other quantities of our data, as well.  For instance, this sequence of operations will find the most dense point, center a sphere on it, calculate the bulk velocity of that sphere, calculate the baryonic angular momentum vector, and then the density extrema.  All of this is done in a memory conservative way: if you have an absolutely enormous dataset, yt will split that dataset into pieces, apply intermediate reductions and then a final reduction to calculate your quantity."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "sp = ds.sphere(\"max\", (10.0, 'kpc'))\n",
-      "bv = sp.quantities.bulk_velocity()\n",
-      "L = sp.quantities.angular_momentum_vector()\n",
-      "rho_min, rho_max = sp.quantities.extrema(\"density\")\n",
-      "print bv\n",
-      "print L\n",
-      "print rho_min, rho_max"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "## Profiles\n",
-      "\n",
-      "yt provides the ability to bin in 1, 2 and 3 dimensions.  This means discretizing in one or more dimensions of phase space (density, temperature, etc) and then calculating either the total value of a field in each bin or the average value of a field in each bin.\n",
-      "\n",
-      "We do this using the objects `Profile1D`, `Profile2D`, and `Profile3D`.  The first two are the most common since they are the easiest to visualize.\n",
-      "\n",
-      "This first set of commands manually creates a profile object the sphere we created earlier, binned in 32 bins according to density between `rho_min` and `rho_max`, and then takes the density-weighted average of the fields `temperature` and (previously-defined) `dinosaurs`.  We then plot it in a loglog plot."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "prof = yt.Profile1D(sp, \"density\", 32, rho_min, rho_max, True, weight_field=\"cell_mass\")\n",
-      "prof.add_fields([\"temperature\",\"dinosaurs\"])\n",
-      "pylab.loglog(np.array(prof.x), np.array(prof[\"temperature\"]), \"-x\")\n",
-      "pylab.xlabel('Density $(g/cm^3)$')\n",
-      "pylab.ylabel('Temperature $(K)$')"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Now we plot the `dinosaurs` field."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "pylab.loglog(np.array(prof.x), np.array(prof[\"dinosaurs\"]), '-x')\n",
-      "pylab.xlabel('Density $(g/cm^3)$')\n",
-      "pylab.ylabel('Dinosaurs $(K cm / s)$')"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "If we want to see the total mass in every bin, we profile the `cell_mass` field with no weight.  Specifying `weight=None` will simply take the total value in every bin and add that up."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "prof = yt.Profile1D(sp, \"density\", 32, rho_min, rho_max, True, weight_field=None)\n",
-      "prof.add_fields([\"cell_mass\"])\n",
-      "pylab.loglog(np.array(prof.x), np.array(prof[\"cell_mass\"].in_units(\"Msun\")), '-x')\n",
-      "pylab.xlabel('Density $(g/cm^3)$')\n",
-      "pylab.ylabel('Cell mass $(M_\\odot)$')"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "In addition to the low-level `ProfileND` interface, it's also quite straightforward to quickly create plots of profiles using the `ProfilePlot` class.  Let's redo the last plot using `ProfilePlot`"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "prof = yt.ProfilePlot(sp, 'density', 'cell_mass', weight_field=None)\n",
-      "prof.set_unit('cell_mass', 'Msun')\n",
-      "prof.show()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "## Field Parameters\n",
-      "\n",
-      "Field parameters are a method of passing information to derived fields.  For instance, you might pass in information about a vector you want to use as a basis for a coordinate transformation.  yt often uses things like `bulk_velocity` to identify velocities that should be subtracted off.  Here we show how that works:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "sp_small = ds.sphere(\"max\", (50.0, 'kpc'))\n",
-      "bv = sp_small.quantities.bulk_velocity()\n",
-      "\n",
-      "sp = ds.sphere(\"max\", (0.1, 'Mpc'))\n",
-      "rv1 = sp.quantities.extrema(\"radial_velocity\")\n",
-      "\n",
-      "sp.clear_data()\n",
-      "sp.set_field_parameter(\"bulk_velocity\", bv)\n",
-      "rv2 = sp.quantities.extrema(\"radial_velocity\")\n",
-      "\n",
-      "print bv\n",
-      "print rv1\n",
-      "print rv2"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    }
-   ],
-   "metadata": {}
-  }
- ]
-}

diff -r 67186b163c69bbce247b608a8c88089adbd18ff0 -r 1364dd1c2fab6735316275334c66f4c1b3880c97 doc/source/bootcamp/6)_Volume_Rendering.ipynb
--- a/doc/source/bootcamp/6)_Volume_Rendering.ipynb
+++ /dev/null
@@ -1,96 +0,0 @@
-{
- "metadata": {
-  "name": "",
-  "signature": "sha256:2a24bbe82955f9d948b39cbd1b1302968ff57f62f73afb2c7a5c4953393d00ae"
- },
- "nbformat": 3,
- "nbformat_minor": 0,
- "worksheets": [
-  {
-   "cells": [
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "# A Brief Demo of Volume Rendering\n",
-      "\n",
-      "This shows a small amount of volume rendering.  Really, just enough to get your feet wet!"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "import yt\n",
-      "ds = yt.load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "To create a volume rendering, we need a camera and a transfer function.  We'll use the `ColorTransferFunction`, which accepts (in log space) the minimum and maximum bounds of our transfer function.  This means behavior for data outside these values is undefined.\n",
-      "\n",
-      "We then add on \"layers\" like an onion.  This function can accept a width (here specified) in data units, and also a color map.  Here we add on four layers.\n",
-      "\n",
-      "Finally, we create a camera.  The focal point is `[0.5, 0.5, 0.5]`, the width is 20 kpc (including front-to-back integration) and we specify a transfer function.  Once we've done that, we call `show` to actually cast our rays and display them inline."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "tf = yt.ColorTransferFunction((-28, -24))\n",
-      "tf.add_layers(4, w=0.01)\n",
-      "cam = ds.camera([0.5, 0.5, 0.5], [1.0, 1.0, 1.0], (20, 'kpc'), 512, tf, fields=[\"density\"])\n",
-      "cam.show()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "If we want to apply a clipping, we can specify the `clip_ratio`.  This will clip the upper bounds to this value times the standard deviation of the values in the image array."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "cam.show(clip_ratio=4)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "There are several other options we can specify.  Note that here we have turned on the use of ghost zones, shortened the data interval for the transfer function, and widened our gaussian layers."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "tf = yt.ColorTransferFunction((-28, -25))\n",
-      "tf.add_layers(4, w=0.03)\n",
-      "cam = ds.camera([0.5, 0.5, 0.5], [1.0, 1.0, 1.0], (20.0, 'kpc'), 512, tf, no_ghost=False)\n",
-      "cam.show(clip_ratio=4.0)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    }
-   ],
-   "metadata": {}
-  }
- ]
-}
\ No newline at end of file

diff -r 67186b163c69bbce247b608a8c88089adbd18ff0 -r 1364dd1c2fab6735316275334c66f4c1b3880c97 doc/source/bootcamp/data_inspection.rst
--- a/doc/source/bootcamp/data_inspection.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-.. _data_inspection:
-
-Data Inspection
----------------
-
-.. notebook:: 2)_Data_Inspection.ipynb

diff -r 67186b163c69bbce247b608a8c88089adbd18ff0 -r 1364dd1c2fab6735316275334c66f4c1b3880c97 doc/source/bootcamp/data_objects_and_time_series.rst
--- a/doc/source/bootcamp/data_objects_and_time_series.rst
+++ /dev/null
@@ -1,4 +0,0 @@
-Data Objects and Time Series
-----------------------------
-
-.. notebook:: 4)_Data_Objects_and_Time_Series.ipynb

diff -r 67186b163c69bbce247b608a8c88089adbd18ff0 -r 1364dd1c2fab6735316275334c66f4c1b3880c97 doc/source/bootcamp/derived_fields_and_profiles.rst
--- a/doc/source/bootcamp/derived_fields_and_profiles.rst
+++ /dev/null
@@ -1,4 +0,0 @@
-Derived Fields and Profiles
----------------------------
-
-.. notebook:: 5)_Derived_Fields_and_Profiles.ipynb

diff -r 67186b163c69bbce247b608a8c88089adbd18ff0 -r 1364dd1c2fab6735316275334c66f4c1b3880c97 doc/source/bootcamp/index.rst
--- a/doc/source/bootcamp/index.rst
+++ /dev/null
@@ -1,59 +0,0 @@
-.. _bootcamp:
-
-yt Bootcamp
-===========
-
-The bootcamp is a series of worked examples of how to use much of the
-funtionality of yt.  These are simple, short introductions to give you a taste
-of what the code can do and are not meant to be detailed walkthroughs.
-
-There are two ways in which you can go through the bootcamp: interactively and 
-non-interactively.  We recommend the interactive method, but if you're pressed 
-on time, you can non-interactively go through the linked pages below and view the 
-worked examples.
-
-To execute the bootcamp interactively, you need to download the repository and
-start the IPython notebook.  If you do not already have the yt repository, the
-easiest way to get the repository is to clone it using mercurial:
-
-.. code-block:: bash
-
-   hg clone https://bitbucket.org/yt_analysis/yt
-
-Now start the IPython notebook from within the repository:
-
-.. code-block:: bash
-
-   cd yt/doc/source/bootcamp
-   yt notebook
-
-This command will give you information about the notebook server and how to
-access it.  You will basically just pick a password (for security reasons) and then 
-redirect your web browser to point to the notebook server.
-Once you have done so, choose "Introduction" from the list of
-notebooks, which includes an introduction and information about how to download
-the sample data.
-
-.. warning:: The pre-filled out notebooks are *far* less fun than running them
-             yourselves!  Check out the repo and give it a try.
-
-Here are the notebooks, which have been filled in for inspection:
-
-.. toctree::
-   :maxdepth: 1
-
-   introduction
-   data_inspection
-   simple_visualization
-   data_objects_and_time_series
-   derived_fields_and_profiles
-   volume_rendering
-
-.. note::
-
-   The notebooks use sample datasets that are available for download at
-   http://yt-project.org/data.  See :ref:`bootcamp-introduction` for more
-   details.
-
-Let us know if you would like to contribute other example notebooks, or have
-any suggestions for how these can be improved.

diff -r 67186b163c69bbce247b608a8c88089adbd18ff0 -r 1364dd1c2fab6735316275334c66f4c1b3880c97 doc/source/bootcamp/introduction.rst
--- a/doc/source/bootcamp/introduction.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-.. _bootcamp-introduction:
-
-Introduction
-------------
-
-.. notebook:: 1)_Introduction.ipynb

diff -r 67186b163c69bbce247b608a8c88089adbd18ff0 -r 1364dd1c2fab6735316275334c66f4c1b3880c97 doc/source/bootcamp/simple_visualization.rst
--- a/doc/source/bootcamp/simple_visualization.rst
+++ /dev/null
@@ -1,4 +0,0 @@
-Simple Visualization
---------------------
-
-.. notebook:: 3)_Simple_Visualization.ipynb

diff -r 67186b163c69bbce247b608a8c88089adbd18ff0 -r 1364dd1c2fab6735316275334c66f4c1b3880c97 doc/source/bootcamp/volume_rendering.rst
--- a/doc/source/bootcamp/volume_rendering.rst
+++ /dev/null
@@ -1,4 +0,0 @@
-Volume Rendering
-----------------
-
-.. notebook:: 6)_Volume_Rendering.ipynb

diff -r 67186b163c69bbce247b608a8c88089adbd18ff0 -r 1364dd1c2fab6735316275334c66f4c1b3880c97 doc/source/conf.py
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -122,7 +122,7 @@
     bootswatch_theme = "readable",
     navbar_links = [
         ("How to get help", "help/index"),
-        ("Bootcamp notebooks", "bootcamp/index"),
+        ("Quickstart notebooks", "quickstart/index"),
         ("Cookbook", "cookbook/index"),
         ],
     navbar_sidebarrel = False,

diff -r 67186b163c69bbce247b608a8c88089adbd18ff0 -r 1364dd1c2fab6735316275334c66f4c1b3880c97 doc/source/cookbook/calculating_information.rst
--- a/doc/source/cookbook/calculating_information.rst
+++ b/doc/source/cookbook/calculating_information.rst
@@ -90,3 +90,14 @@
 See :ref:`filtering-particles` for more information.
 
 .. yt_cookbook:: particle_filter_sfr.py
+
+Making a Turbulent Kinetic Energy Power Spectrum
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This recipe shows how to use `yt` to read data and put it on a uniform
+grid to interface with the NumPy FFT routines and create a turbulent
+kinetic energy power spectrum.  (Note: the dataset used here is of low
+resolution, so the turbulence is not very well-developed.  The spike
+at high wavenumbers is due to non-periodicity in the z-direction).
+
+.. yt_cookbook:: power_spectrum_example.py

diff -r 67186b163c69bbce247b608a8c88089adbd18ff0 -r 1364dd1c2fab6735316275334c66f4c1b3880c97 doc/source/cookbook/custom_colorbar_tickmarks.rst
--- a/doc/source/cookbook/custom_colorbar_tickmarks.rst
+++ b/doc/source/cookbook/custom_colorbar_tickmarks.rst
@@ -1,4 +1,4 @@
-Custom Colorabar Tickmarks
---------------------------
+Custom Colorbar Tickmarks
+-------------------------
 
 .. notebook:: custom_colorbar_tickmarks.ipynb

diff -r 67186b163c69bbce247b608a8c88089adbd18ff0 -r 1364dd1c2fab6735316275334c66f4c1b3880c97 doc/source/cookbook/power_spectrum_example.py
--- /dev/null
+++ b/doc/source/cookbook/power_spectrum_example.py
@@ -0,0 +1,118 @@
+import numpy as np
+import matplotlib.pyplot as plt
+import yt
+
+"""
+Make a turbulent KE power spectrum.  Since we are stratified, we use
+a rho**(1/3) scaling to the velocity to get something that would
+look Kolmogorov (if the turbulence were fully developed).
+
+Ultimately, we aim to compute:
+
+                      1  ^      ^*                                           
+     E(k) = integral  -  V(k) . V(k) dS                                      
+                      2                                                      
+ 
+             n                                               ^               
+where V = rho  U is the density-weighted velocity field, and V is the
+FFT of V.
+ 
+(Note: sometimes we normalize by 1/volume to get a spectral
+energy density spectrum).
+
+
+"""
+ 
+
+def doit(ds):
+
+    # a FFT operates on uniformly gridded data.  We'll use the yt
+    # covering grid for this.
+
+    max_level = ds.index.max_level
+
+    ref = int(np.product(ds.ref_factors[0:max_level]))
+
+    low = ds.domain_left_edge
+    dims = ds.domain_dimensions*ref
+
+    nx, ny, nz = dims
+
+    nindex_rho = 1./3.
+
+    Kk = np.zeros( (nx/2+1, ny/2+1, nz/2+1))
+
+    for vel in [("gas", "velocity_x"), ("gas", "velocity_y"), 
+                ("gas", "velocity_z")]:
+
+        Kk += 0.5*fft_comp(ds, ("gas", "density"), vel,
+                           nindex_rho, max_level, low, dims)
+
+    # wavenumbers
+    L = (ds.domain_right_edge - ds.domain_left_edge).d
+
+    kx = np.fft.rfftfreq(nx)*nx/L[0]
+    ky = np.fft.rfftfreq(ny)*ny/L[1]
+    kz = np.fft.rfftfreq(nz)*nz/L[2]
+    
+    # physical limits to the wavenumbers
+    kmin = np.min(1.0/L)
+    kmax = np.max(0.5*dims/L)
+    
+    kbins = np.arange(kmin, kmax, kmin)
+    N = len(kbins)
+
+    # bin the Fourier KE into radial kbins
+    kx3d, ky3d, kz3d = np.meshgrid(kx, ky, kz, indexing="ij")
+    k = np.sqrt(kx3d**2 + ky3d**2 + kz3d**2)
+
+    whichbin = np.digitize(k.flat, kbins)
+    ncount = np.bincount(whichbin)
+    
+    E_spectrum = np.zeros(len(ncount)-1)
+
+    for n in range(1,len(ncount)):
+        E_spectrum[n-1] = np.sum(Kk.flat[whichbin==n])
+
+    k = 0.5*(kbins[0:N-1] + kbins[1:N])
+    E_spectrum = E_spectrum[1:N]
+
+    index = np.argmax(E_spectrum)
+    kmax = k[index]
+    Emax = E_spectrum[index]
+
+    plt.loglog(k, E_spectrum)
+    plt.loglog(k, Emax*(k/kmax)**(-5./3.), ls=":", color="0.5")
+
+    plt.xlabel(r"$k$")
+    plt.ylabel(r"$E(k)dk$")
+
+    plt.savefig("spectrum.png")
+
+
+def fft_comp(ds, irho, iu, nindex_rho, level, low, delta ):
+
+    cube = ds.covering_grid(level, left_edge=low,
+                            dims=delta,
+                            fields=[irho, iu])
+
+    rho = cube[irho].d
+    u = cube[iu].d
+
+    nx, ny, nz = rho.shape
+
+    # do the FFTs -- note that since our data is real, there will be
+    # too much information here.  fftn puts the positive freq terms in
+    # the first half of the axes -- that's what we keep.  Our
+    # normalization has an '8' to account for this clipping to one
+    # octant.
+    ru = np.fft.fftn(rho**nindex_rho * u)[0:nx/2+1,0:ny/2+1,0:nz/2+1]
+    ru = 8.0*ru/(nx*ny*nz)
+
+    return np.abs(ru)**2
+
+
+if __name__ == "__main__":
+
+    ds = yt.load("maestro_xrb_lores_23437")
+    doit(ds)

diff -r 67186b163c69bbce247b608a8c88089adbd18ff0 -r 1364dd1c2fab6735316275334c66f4c1b3880c97 doc/source/developing/building_the_docs.rst
--- a/doc/source/developing/building_the_docs.rst
+++ b/doc/source/developing/building_the_docs.rst
@@ -28,7 +28,7 @@
 * Analyzing
 * Examining
 * Cookbook
-* Bootcamp
+* Quickstart
 * Developing
 * Reference
 * Help

diff -r 67186b163c69bbce247b608a8c88089adbd18ff0 -r 1364dd1c2fab6735316275334c66f4c1b3880c97 doc/source/developing/developing.rst
--- a/doc/source/developing/developing.rst
+++ b/doc/source/developing/developing.rst
@@ -170,10 +170,16 @@
 Developing yt on Windows
 ^^^^^^^^^^^^^^^^^^^^^^^^
 
-If you plan to develop yt on Windows, we recommend using the `MinGW
+If you plan to develop yt on Windows, it is necessary to use the `MinGW
 <http://www.mingw.org/>`_ gcc compiler that can be installed using the `Anaconda
-Python Distribution <https://store.continuum.io/cshop/anaconda/>`_. Also, the
-syntax for the setup command is slightly different; you must type:
+Python Distribution <https://store.continuum.io/cshop/anaconda/>`_. The libpython package must be
+ installed from Anaconda as well. These can both be installed with a single command:
+
+.. code-block:: bash
+
+  $ conda install libpython mingw
+
+Additionally, the syntax for the setup command is slightly different; you must type:
 
 .. code-block:: bash
 

diff -r 67186b163c69bbce247b608a8c88089adbd18ff0 -r 1364dd1c2fab6735316275334c66f4c1b3880c97 doc/source/examining/low_level_inspection.rst
--- a/doc/source/examining/low_level_inspection.rst
+++ b/doc/source/examining/low_level_inspection.rst
@@ -12,7 +12,7 @@
           based simulations.  For now, these are represented as patches, with
           the attendant properties.
 
-For a more basic introduction, see :ref:`bootcamp` and more specifically
+For a more basic introduction, see :ref:`quickstart` and more specifically
 :ref:`data_inspection`.
 
 .. _examining-grid-hierarchies:

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/7e77f0ff4731/
Changeset:   7e77f0ff4731
Branch:      yt
User:        mzingale
Date:        2014-09-15 15:46:13+00:00
Summary:     merge
Affected #:  66 files

diff -r 1364dd1c2fab6735316275334c66f4c1b3880c97 -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f doc/source/analyzing/analysis_modules/halo_catalogs.rst
--- a/doc/source/analyzing/analysis_modules/halo_catalogs.rst
+++ b/doc/source/analyzing/analysis_modules/halo_catalogs.rst
@@ -129,7 +129,14 @@
 are center_of_mass and bulk_velocity. Their definitions are available in 
 ``yt/analysis_modules/halo_analysis/halo_quantities.py``. If you think that 
 your quantity may be of use to the general community, add it to 
-``halo_quantities.py`` and issue a pull request.
+``halo_quantities.py`` and issue a pull request.  Default halo quantities are:
+
+* ``particle_identifier`` -- Halo ID (e.g. 0 to N)
+* ``particle_mass`` -- Mass of halo
+* ``particle_position_x`` -- Location of halo
+* ``particle_position_y`` -- Location of halo
+* ``particle_position_z`` -- Location of halo
+* ``virial_radius`` -- Virial radius of halo
 
 An example of adding a quantity:
 

diff -r 1364dd1c2fab6735316275334c66f4c1b3880c97 -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f doc/source/analyzing/analysis_modules/halo_finders.rst
--- a/doc/source/analyzing/analysis_modules/halo_finders.rst
+++ b/doc/source/analyzing/analysis_modules/halo_finders.rst
@@ -75,7 +75,8 @@
   mass. In simulations where the highest-resolution particles all have the 
   same mass (ie: zoom-in grid based simulations), one can set up a particle
   filter to select the lowest mass particles and perform the halo finding
-  only on those.
+  only on those.  See the this cookbook recipe for an example: 
+  :ref:`cookbook-rockstar-nested-grid`.
 
 To run the Rockstar Halo finding, you must launch python with MPI and 
 parallelization enabled. While Rockstar itself does not require MPI to run, 

diff -r 1364dd1c2fab6735316275334c66f4c1b3880c97 -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f doc/source/cookbook/cosmological_analysis.rst
--- a/doc/source/cookbook/cosmological_analysis.rst
+++ b/doc/source/cookbook/cosmological_analysis.rst
@@ -14,6 +14,22 @@
 
 .. yt_cookbook:: halo_plotting.py
 
+.. _cookbook-rockstar-nested-grid:
+
+Running Rockstar to Find Halos on Multi-Resolution-Particle Datasets
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The version of Rockstar installed with yt does not have the capability
+to work on datasets with particles of different masses.  Unfortunately,
+many simulations possess particles of different masses, notably cosmological 
+zoom datasets.  This recipe uses Rockstar in two different ways to generate a 
+HaloCatalog from the highest resolution dark matter particles (the ones 
+inside the zoom region).  It then overlays some of those halos on a projection
+as a demonstration.  See :ref:`halo-analysis` and :ref:`annotate-halos` for
+more information.
+
+.. yt_cookbook:: rockstar_nest.py
+
 .. _cookbook-halo_finding:
 
 Halo Profiling and Custom Analysis

diff -r 1364dd1c2fab6735316275334c66f4c1b3880c97 -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f doc/source/cookbook/power_spectrum_example.py
--- a/doc/source/cookbook/power_spectrum_example.py
+++ b/doc/source/cookbook/power_spectrum_example.py
@@ -57,7 +57,7 @@
     
     # physical limits to the wavenumbers
     kmin = np.min(1.0/L)
-    kmax = np.max(0.5*dims/L)
+    kmax = np.min(0.5*dims/L)
     
     kbins = np.arange(kmin, kmax, kmin)
     N = len(kbins)
@@ -112,7 +112,6 @@
     return np.abs(ru)**2
 
 
-if __name__ == "__main__":
 
-    ds = yt.load("maestro_xrb_lores_23437")
-    doit(ds)
+ds = yt.load("maestro_xrb_lores_23437")
+doit(ds)

diff -r 1364dd1c2fab6735316275334c66f4c1b3880c97 -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f doc/source/cookbook/rockstar_nest.py
--- /dev/null
+++ b/doc/source/cookbook/rockstar_nest.py
@@ -0,0 +1,74 @@
+# You must run this job in parallel.  
+# There are several mpi flags which can be useful in order for it to work OK.
+# It requires at least 3 processors in order to run because of the way in which 
+# rockstar divides up the work.  Make sure you have mpi4py installed as per 
+# http://yt-project.org/docs/dev/analyzing/parallel_computation.html#setting-up-parallel-yt
+    
+# Usage: mpirun -np <num_procs> --mca btl ^openib python this_script.py
+
+import yt
+from yt.analysis_modules.halo_analysis.halo_catalog import HaloCatalog
+from yt.data_objects.particle_filters import add_particle_filter
+from yt.analysis_modules.halo_finding.rockstar.api import RockstarHaloFinder
+yt.enable_parallelism() # rockstar halofinding requires parallelism
+
+# Create a dark matter particle filter
+# This will be code dependent, but this function here is true for enzo
+
+def DarkMatter(pfilter, data):
+    filter = data[("all", "particle_type")] == 1 # DM = 1, Stars = 2
+    return filter
+
+add_particle_filter("dark_matter", function=DarkMatter, filtered_type='all', \
+                    requires=["particle_type"])
+
+# First, we make sure that this script is being run using mpirun with
+# at least 3 processors as indicated in the comments above.
+assert(yt.communication_system.communicators[-1].size >= 3)
+
+# Load the dataset and apply dark matter filter
+fn = "Enzo_64/DD0043/data0043"
+ds = yt.load(fn)
+ds.add_particle_filter('dark_matter')
+
+# Determine highest resolution DM particle mass in sim by looking
+# at the extrema of the dark_matter particle_mass field.
+ad = ds.all_data()
+min_dm_mass = ad.quantities.extrema(('dark_matter','particle_mass'))[0]
+
+# Define a new particle filter to isolate all highest resolution DM particles
+# and apply it to dataset
+def MaxResDarkMatter(pfilter, data):
+    return data["particle_mass"] <= 1.01 * min_dm_mass
+
+add_particle_filter("max_res_dark_matter", function=MaxResDarkMatter, \
+                    filtered_type='dark_matter', requires=["particle_mass"])
+ds.add_particle_filter('max_res_dark_matter')
+
+# If desired, we can see the total number of DM and High-res DM particles
+#if yt.is_root():
+#    print "Simulation has %d DM particles." % ad['dark_matter','particle_type'].shape
+#    print "Simulation has %d Highest Res DM particles." % ad['max_res_dark_matter', 'particle_type'].shape
+
+# Run the halo catalog on the dataset only on the highest resolution dark matter 
+# particles
+hc = HaloCatalog(data_ds=ds, finder_method='rockstar', \
+                 finder_kwargs={'dm_only':True, 'particle_type':'max_res_dark_matter'})
+hc.create()
+
+# Or alternatively, just run the RockstarHaloFinder and later import the 
+# output file as necessary.  You can skip this step if you've already run it
+# once, but be careful since subsequent halo finds will overwrite this data.
+#rhf = RockstarHaloFinder(ds, particle_type="max_res_dark_matter")
+#rhf.run()
+# Load the halo list from a rockstar output for this dataset
+# Create a projection with the halos overplot on top
+#halos = yt.load('rockstar_halos/halos_0.0.bin')
+#hc = HaloCatalog(halos_ds=halos)
+#hc.load()
+
+# Regardless of your method of creating the halo catalog, use it to overplot the
+# halos on a projection.
+p = yt.ProjectionPlot(ds, "x", "density")
+p.annotate_halos(hc, annotate_field = 'particle_identifier', width=(10,'Mpc'), factor=2)
+p.save()

diff -r 1364dd1c2fab6735316275334c66f4c1b3880c97 -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f doc/source/cookbook/tests/test_cookbook.py
--- /dev/null
+++ b/doc/source/cookbook/tests/test_cookbook.py
@@ -0,0 +1,29 @@
+# -*- coding: utf-8 -*-
+"""Module for cookbook testing
+
+
+This test should be run from main yt directory.
+
+Example:
+
+      $ sed -e '/where/d' -i nose.cfg setup.cfg
+      $ nosetests doc/source/cookbook/tests/test_cookbook.py -P -v
+"""
+import glob
+import os
+import sys
+
+sys.path.append(os.path.join(os.getcwd(), "doc/source/cookbook"))
+
+
+def test_recipe():
+    '''Dummy test grabbing all cookbook's recipes'''
+    for fname in glob.glob("doc/source/cookbook/*.py"):
+        module_name = os.path.splitext(os.path.basename(fname))[0]
+        yield check_recipe, module_name
+
+
+def check_recipe(module_name):
+    '''Run single recipe'''
+    __import__(module_name)
+    assert True

diff -r 1364dd1c2fab6735316275334c66f4c1b3880c97 -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f doc/source/cookbook/thin_slice_projection.py
--- a/doc/source/cookbook/thin_slice_projection.py
+++ b/doc/source/cookbook/thin_slice_projection.py
@@ -4,7 +4,7 @@
 ds = yt.load("Enzo_64/DD0030/data0030")
 
 # Make a projection that is the full width of the domain,
-# but only 10 Mpc in depth.  This is done by creating a
+# but only 5 Mpc in depth.  This is done by creating a
 # region object with this exact geometry and providing it
 # as a data_source for the projection.
 
@@ -17,12 +17,12 @@
 right_corner = ds.domain_right_edge
 
 # Now adjust the size of the region along the line of sight (x axis).
-depth = ds.quan(10.0,'Mpc')
+depth = ds.quan(5.0,'Mpc')
 left_corner[0] = center[0] - 0.5 * depth
-left_corner[0] = center[0] + 0.5 * depth
+right_corner[0] = center[0] + 0.5 * depth
 
 # Create the region
-region = ds.region(center, left_corner, right_corner)
+region = ds.box(left_corner, right_corner)
 
 # Create a density projection and supply the region we have just created.
 # Only cells within the region will be included in the projection.

diff -r 1364dd1c2fab6735316275334c66f4c1b3880c97 -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f doc/source/examining/Loading_Generic_Particle_Data.ipynb
--- a/doc/source/examining/Loading_Generic_Particle_Data.ipynb
+++ b/doc/source/examining/Loading_Generic_Particle_Data.ipynb
@@ -74,7 +74,7 @@
       "import yt\n",
       "from yt.units import parsec, Msun\n",
       "\n",
-      "bbox = 1.1*np.array([[min(ppx), max(ppx)], [min(ppy), max(ppy)], [min(ppy), max(ppy)]])\n",
+      "bbox = 1.1*np.array([[min(ppx), max(ppx)], [min(ppy), max(ppy)], [min(ppz), max(ppz)]])\n",
       "\n",
       "ds = yt.load_particles(data, length_unit=parsec, mass_unit=1e8*Msun, n_ref=256, bbox=bbox)"
      ],

diff -r 1364dd1c2fab6735316275334c66f4c1b3880c97 -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f doc/source/visualizing/_cb_docstrings.inc
--- a/doc/source/visualizing/_cb_docstrings.inc
+++ b/doc/source/visualizing/_cb_docstrings.inc
@@ -151,19 +151,28 @@
 Overplot Halo Annotations
 ~~~~~~~~~~~~~~~~~~~~~~~~~
 
-.. function:: annotate_halos(self, halo_catalog, col='white', alpha=1, \
-                             width=None):
+.. function:: annotate_halos(self, halo_catalog, circle_kwargs=None, width=None, \ 
+                             annotate_field=False, font_kwargs=None, factor=1.0):
 
    (This is a proxy for
    :class:`~yt.visualization.plot_modifications.HaloCatalogCallback`.)
 
    Accepts a :class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog` 
-   and plots a circle at the location of each
-   halo with the radius of the circle corresponding to the virial radius of the
-   halo.  If ``width`` is set to None (default) all halos are plotted.
-   Otherwise, only halos that fall within a slab with width ``width`` centered
-   on the center of the plot data. The color and transparency of the circles can
-   be controlled with ``col`` and ``alpha`` respectively.
+   and plots a circle at the location of each halo with the radius of the 
+   circle corresponding to the virial radius of the halo.  If ``width`` is set 
+   to None (default) all halos are plotted, otherwise it accepts a tuple in 
+   the form (1.0, ‘Mpc’) to only display halos that fall within a slab with 
+   width ``width`` centered on the center of the plot data.  The appearance of 
+   the circles can be changed with the circle_kwargs dictionary, which is 
+   supplied to the Matplotlib patch Circle.  One can label each of the halos 
+   with the annotate_field, which accepts a field contained in the halo catalog 
+   to add text to the plot near the halo (example: annotate_field = 
+   ``particle_mass`` will write the halo mass next to each halo, whereas 
+   ``particle_identifier`` shows the halo number).  font_kwargs contains the 
+   arguments controlling the text appearance of the annotated field.
+   Factor is the number the virial radius is multiplied by for plotting the 
+   circles. Ex: factor = 2.0 will plot circles with twice the radius of each 
+   halo virial radius.
 
 .. python-script::
 
@@ -177,7 +186,7 @@
    hc.create()
 
    prj = yt.ProjectionPlot(data_ds, 'z', 'density')
-   prj.annotate_halos(hc)
+   prj.annotate_halos(hc, annotate_field=particle_identifier)
    prj.save()
 
 Overplot a Straight Line

diff -r 1364dd1c2fab6735316275334c66f4c1b3880c97 -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f doc/source/visualizing/colormaps/index.rst
--- a/doc/source/visualizing/colormaps/index.rst
+++ b/doc/source/visualizing/colormaps/index.rst
@@ -6,12 +6,17 @@
 There are several colormaps available for yt.  yt includes all of the 
 matplotlib colormaps as well for nearly all functions.  Individual visualization
 functions usually allow you to specify a colormap with the ``cmap`` flag.
-There are a small number of functions (mostly contained in the image_writer 
-module; e.g. write_bitmap, write_image, write_projection, etc.), which do 
-not load the matplotlib infrastructure and can only access the colormaps 
-native to yt.  
 
-Here is a chart of all of the colormaps available.  In addition to each 
+If you have installed `brewer2mpl`
+(`pip install brewer2mpl` or see `https://github.com/jiffyclub/brewer2mpl`_),
+you can also access the discrete colormaps available on
+`http://colorbrewer2.org`_. Instead of supplying the colormap name, specify
+a tuple of the form (name, type, number), for example `('RdBu', 'Diverging', 9)`.
+These discrete colormaps will not be interpolated, and can be useful for
+creating colorblind/printer/grayscale-friendly plots. For more information,
+visit `http://colorbrewer2.org`_.
+
+Here is a chart of all of the yt and matplotlib colormaps available.  In addition to each 
 colormap displayed here, you can access its "reverse" by simply appending a 
 ``"_r"`` to the end of the colormap name.
 

diff -r 1364dd1c2fab6735316275334c66f4c1b3880c97 -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f scripts/iyt
--- a/scripts/iyt
+++ b/scripts/iyt
@@ -90,6 +90,7 @@
     kwargs = dict()
 
 ip.ex("from yt.mods import *")
+ip.ex("import yt")
 
 # Now we add some tab completers, in the vein of:
 # http://pymel.googlecode.com/svn/trunk/tools/ipymel.py

diff -r 1364dd1c2fab6735316275334c66f4c1b3880c97 -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f yt/analysis_modules/absorption_spectrum/absorption_line.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_line.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_line.py
@@ -195,7 +195,6 @@
     ## tau_0
     tau_X = np.sqrt(np.pi) * e**2 / (me * ccgs) * \
         column_density * fval / vdop
-    tau1 = tau_X * lam1cgs
     tau0 = tau_X * lam0cgs
 
     # dimensionless frequency offset in units of doppler freq

diff -r 1364dd1c2fab6735316275334c66f4c1b3880c97 -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
+++ b/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
@@ -328,7 +328,7 @@
                                                         output["redshift"])
                 proper_box_size = self.simulation.box_size / \
                   (1.0 + output["redshift"])
-                pixel_xarea = (proper_box_size.in_cgs() / pixels)**2 #in proper cm^2
+                pixel_area = (proper_box_size.in_cgs() / pixels)**2 #in proper cm^2
                 factor = pixel_area / (4.0 * np.pi * dL.in_cgs()**2)
                 mylog.info("Distance to slice = %s" % dL)
                 frb[field] *= factor #in erg/s/cm^2/Hz on observer"s image plane.

diff -r 1364dd1c2fab6735316275334c66f4c1b3880c97 -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f yt/analysis_modules/halo_analysis/fields.py
--- a/yt/analysis_modules/halo_analysis/fields.py
+++ b/yt/analysis_modules/halo_analysis/fields.py
@@ -30,7 +30,7 @@
         sl_right = slice(2, None, None)
         div_fac = 2.0
     else:
-        sl_left, sl_right, div_face = slice_info
+        sl_left, sl_right, div_fac = slice_info
 
     def _virial_radius(field, data):
         virial_radius = data.get_field_parameter("virial_radius")

diff -r 1364dd1c2fab6735316275334c66f4c1b3880c97 -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f yt/analysis_modules/halo_analysis/halo_callbacks.py
--- a/yt/analysis_modules/halo_analysis/halo_callbacks.py
+++ b/yt/analysis_modules/halo_analysis/halo_callbacks.py
@@ -80,7 +80,6 @@
     """
 
     dds = halo.halo_catalog.data_ds
-    hds = halo.halo_catalog.halos_ds
     center = dds.arr([halo.quantities["particle_position_%s" % axis] \
                       for axis in "xyz"])
     radius = factor * halo.quantities[radius_field]

diff -r 1364dd1c2fab6735316275334c66f4c1b3880c97 -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f yt/analysis_modules/halo_mass_function/halo_mass_function.py
--- a/yt/analysis_modules/halo_mass_function/halo_mass_function.py
+++ b/yt/analysis_modules/halo_mass_function/halo_mass_function.py
@@ -788,7 +788,7 @@
     
         # Now compute the CDM+HDM+baryon transfer functions
         tf_cb = self.tf_master*self.growth_cb/self.growth_k0;
-        tf_cbnu = self.tf_master*self.growth_cbnu/self.growth_k0;
+        #tf_cbnu = self.tf_master*self.growth_cbnu/self.growth_k0;
         return tf_cb
 
 
@@ -832,7 +832,6 @@
     area1 = np.sum(areas)
     # Now we refine until the error is smaller than *error*.
     diff = area1 - area0
-    area_final = area1
     area_last = area1
     one_pow = 3
     while diff > error:

diff -r 1364dd1c2fab6735316275334c66f4c1b3880c97 -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f yt/analysis_modules/level_sets/contour_finder.py
--- a/yt/analysis_modules/level_sets/contour_finder.py
+++ b/yt/analysis_modules/level_sets/contour_finder.py
@@ -32,7 +32,6 @@
     contours = {}
     node_ids = []
     DLE = data_source.ds.domain_left_edge
-    total_vol = None
     selector = getattr(data_source, "base_object", data_source).selector
     masks = dict((g.id, m) for g, m in data_source.blocks)
     for (g, node, (sl, dims, gi)) in data_source.tiles.slice_traverse():

diff -r 1364dd1c2fab6735316275334c66f4c1b3880c97 -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f yt/analysis_modules/photon_simulator/photon_models.py
--- a/yt/analysis_modules/photon_simulator/photon_models.py
+++ b/yt/analysis_modules/photon_simulator/photon_models.py
@@ -128,7 +128,6 @@
         energy = self.spectral_model.ebins
     
         cell_em = EM[idxs]*vol_scale
-        cell_vol = vol[idxs]*vol_scale
     
         number_of_photons = np.zeros(dshape, dtype='uint64')
         energies = []
@@ -139,7 +138,6 @@
 
         for i, ikT in enumerate(kT_idxs):
 
-            ncells = int(bcounts[i])
             ibegin = bcell[i]
             iend = ecell[i]
             kT = kT_bins[ikT] + 0.5*dkT

diff -r 1364dd1c2fab6735316275334c66f4c1b3880c97 -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f yt/analysis_modules/photon_simulator/photon_simulator.py
--- a/yt/analysis_modules/photon_simulator/photon_simulator.py
+++ b/yt/analysis_modules/photon_simulator/photon_simulator.py
@@ -490,7 +490,6 @@
         z_hat = orient.unit_vectors[2]
 
         n_ph = self.photons["NumberOfPhotons"]
-        num_cells = len(n_ph)
         n_ph_tot = n_ph.sum()
         
         eff_area = None
@@ -667,7 +666,6 @@
         tblhdu = hdulist["MATRIX"]
         n_de = len(tblhdu.data["ENERG_LO"])
         mylog.info("Number of energy bins in RMF: %d" % (n_de))
-        de = tblhdu.data["ENERG_HI"] - tblhdu.data["ENERG_LO"]
         mylog.info("Energy limits: %g %g" % (min(tblhdu.data["ENERG_LO"]),
                                              max(tblhdu.data["ENERG_HI"])))
 
@@ -682,7 +680,6 @@
         phYY = events["ypix"][eidxs]
 
         detectedChannels = []
-        pindex = 0
 
         # run through all photon energies and find which bin they go in
         k = 0

diff -r 1364dd1c2fab6735316275334c66f4c1b3880c97 -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f yt/analysis_modules/sunrise_export/sunrise_exporter.py
--- a/yt/analysis_modules/sunrise_export/sunrise_exporter.py
+++ b/yt/analysis_modules/sunrise_export/sunrise_exporter.py
@@ -128,7 +128,6 @@
     if fni.endswith('.fits'):
         fni = fni.replace('.fits','')
 
-    ndomains_finished = 0
     for (num_halos, domain, halos) in domains_list:
         dle,dre = domain
         print 'exporting: '
@@ -154,7 +153,6 @@
             fh.write("%6.6e \n"%(halo.Rvir*ds['kpc']))
         fh.close()
         export_to_sunrise(ds, fnf, star_particle_type, dle*1.0/dn, dre*1.0/dn)
-        ndomains_finished +=1
 
 def domains_from_halos(ds,halo_list,frvir=0.15):
     domains = {}
@@ -172,8 +170,6 @@
     domains_list = [(len(v),k,v) for k,v in domains.iteritems()]
     domains_list.sort() 
     domains_list.reverse() #we want the most populated domains first
-    domains_limits = [d[1] for d in domains_list]
-    domains_halos  = [d[2] for d in domains_list]
     return domains_list
 
 def prepare_octree(ds,ile,start_level=0,debug=True,dd=None,center=None):
@@ -245,10 +241,6 @@
     hs       = hilbert_state()
     start_time = time.time()
     if debug:
-        if center is not None: 
-            c = center*ds['kpc']
-        else:
-            c = ile*1.0/ds.domain_dimensions*ds['kpc']
         printing = lambda x: print_oct(x)
     else:
         printing = None
@@ -332,7 +324,7 @@
         #then translate onto the subgrid integer index 
         parent_fle  = grid.left_edges + cell_index*grid.dx
         subgrid_ile = np.floor((parent_fle - subgrid.left_edges)/subgrid.dx)
-        for i, (vertex,hilbert_child) in enumerate(hilbert):
+        for (vertex, hilbert_child) in hilbert:
             #vertex is a combination of three 0s and 1s to 
             #denote each of the 8 octs
             if level < 0:

diff -r 1364dd1c2fab6735316275334c66f4c1b3880c97 -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f yt/analysis_modules/sunyaev_zeldovich/tests/test_projection.py
--- a/yt/analysis_modules/sunyaev_zeldovich/tests/test_projection.py
+++ b/yt/analysis_modules/sunyaev_zeldovich/tests/test_projection.py
@@ -89,8 +89,6 @@
     L = 2 * R * cm_per_kpc
     bbox = np.array([[-0.5,0.5],[-0.5,0.5],[-0.5,0.5]]) * L
 
-    dl = L/nz
-
     ds = load_uniform_grid(data, ddims, length_unit='cm', bbox=bbox)
     ds.index
 

diff -r 1364dd1c2fab6735316275334c66f4c1b3880c97 -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -418,7 +418,6 @@
         otherwise Glue will be started.
         """
         from glue.core import DataCollection, Data
-        from glue.core.coordinates import coordinates_from_header
         from glue.qt.glue_application import GlueApplication
         
         gdata = Data(label=label)
@@ -494,6 +493,18 @@
                     ftype = self._current_fluid_type
                     if (ftype, fname) not in self.ds.field_info:
                         ftype = self.ds._last_freq[0]
+
+                # really ugly check to ensure that this field really does exist somewhere,
+                # in some naming convention, before returning it as a possible field type
+                if (ftype,fname) not in self.ds.field_list and \
+                        fname not in self.ds.field_list and \
+                        (ftype,fname) not in self.ds.derived_field_list and \
+                        fname not in self.ds.derived_field_list and \
+                        (ftype,fname) not in self._container_fields:
+                    raise YTFieldNotFound((ftype,fname),self.ds)
+
+            # these tests are really insufficient as a field type may be valid, and the
+            # field name may be valid, but not the combination (field type, field name)
             if finfo.particle_type and ftype not in self.ds.particle_types:
                 raise YTFieldTypeNotFound(ftype)
             elif not finfo.particle_type and ftype not in self.ds.fluid_types:
@@ -621,7 +632,7 @@
                 fields_to_generate.append(field)
                 continue
             fields_to_get.append(field)
-        if len(fields_to_get) == 0 and fields_to_generate == 0:
+        if len(fields_to_get) == 0 and len(fields_to_generate) == 0:
             return
         elif self._locked == True:
             raise GenerationInProgress(fields)

diff -r 1364dd1c2fab6735316275334c66f4c1b3880c97 -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -460,8 +460,6 @@
             self._last_freq = field
             self._last_finfo = self.field_info[(ftype, fname)]
             return self._last_finfo
-        if fname == self._last_freq[1]:
-            return self._last_finfo
         if fname in self.field_info:
             # Sometimes, if guessing_type == True, this will be switched for
             # the type of field it is.  So we look at the field type and

diff -r 1364dd1c2fab6735316275334c66f4c1b3880c97 -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f yt/data_objects/tests/test_spheres.py
--- a/yt/data_objects/tests/test_spheres.py
+++ b/yt/data_objects/tests/test_spheres.py
@@ -6,10 +6,11 @@
     from yt.config import ytcfg
     ytcfg["yt","__withintesting"] = "True"
 
+_fields_to_compare = ("spherical_r", "cylindrical_r",
+                      "spherical_theta", "cylindrical_theta",
+                      "spherical_phi", "cylindrical_z")
+
 def test_domain_sphere():
-    ds = fake_random_ds(16, fields = ("density"))
-    sp = ds.sphere(ds.domain_center, ds.domain_width[0])
-
     # Now we test that we can get different radial velocities based on field
     # parameters.
 
@@ -51,3 +52,12 @@
     yield assert_equal, np.any(rp0["radial_velocity"][rp0.used] ==
                                rp1["radial_velocity"][rp1.used]), \
                                False
+
+    ref_sp = ds.sphere("c", 0.25)
+    for f in _fields_to_compare:
+        ref_sp[f].sort()
+    for center in periodicity_cases(ds):
+        sp = ds.sphere(center, 0.25)
+        for f in _fields_to_compare:
+            sp[f].sort()
+            yield assert_equal, sp[f], ref_sp[f]

diff -r 1364dd1c2fab6735316275334c66f4c1b3880c97 -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f yt/fields/particle_fields.py
--- a/yt/fields/particle_fields.py
+++ b/yt/fields/particle_fields.py
@@ -322,10 +322,6 @@
     create_magnitude_field(registry, "particle_specific_angular_momentum",
                            "cm**2/s", ftype=ptype, particle_type=True)
     
-    def _particle_angular_momentum(field, data):
-        return data[ptype, "particle_mass"] \
-             * data[ptype, "particle_specific_angular_momentum"]
-
     def _particle_angular_momentum_x(field, data):
         return data[ptype, "particle_mass"] * \
                data[ptype, "particle_specific_angular_momentum_x"]
@@ -350,6 +346,15 @@
              units="g*cm**2/s", particle_type=True,
              validators=[ValidateParameter('center')])
 
+    def _particle_angular_momentum(field, data):
+        return data[ptype, "particle_mass"] \
+            * data[ptype, "particle_specific_angular_momentum"]
+    registry.add_field((ptype, "particle_angular_momentum"),
+              function=_particle_angular_momentum,
+              particle_type=True,
+              units="g*cm**2/s",
+              validators=[ValidateParameter("center")])
+
     create_magnitude_field(registry, "particle_angular_momentum",
                            "g*cm**2/s", ftype=ptype, particle_type=True)
     

diff -r 1364dd1c2fab6735316275334c66f4c1b3880c97 -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f yt/fields/vector_operations.py
--- a/yt/fields/vector_operations.py
+++ b/yt/fields/vector_operations.py
@@ -131,7 +131,7 @@
     registry.add_field((ftype, "radial_%s" % basename),
                        function = _radial, units = field_units)
     registry.add_field((ftype, "radial_%s_absolute" % basename),
-                       function = _radial, units = field_units)
+                       function = _radial_absolute, units = field_units)
     registry.add_field((ftype, "tangential_%s" % basename),
                        function=_tangential, units = field_units)
 

diff -r 1364dd1c2fab6735316275334c66f4c1b3880c97 -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f yt/frontends/art/tests/test_outputs.py
--- a/yt/frontends/art/tests/test_outputs.py
+++ b/yt/frontends/art/tests/test_outputs.py
@@ -14,11 +14,13 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-from yt.testing import *
+from yt.testing import \
+    requires_file, \
+    assert_equal
 from yt.utilities.answer_testing.framework import \
     requires_ds, \
-    small_patch_amr, \
     big_patch_amr, \
+    PixelizedProjectionValuesTest, \
     data_dir_load
 from yt.frontends.art.api import ARTDataset
 
@@ -41,3 +43,8 @@
                     yield PixelizedProjectionValuesTest(
                         d9p, axis, field, weight_field,
                         dobj_name)
+
+
+ at requires_file(d9p)
+def test_ARTDataset():
+    assert isinstance(data_dir_load(d9p), ARTDataset)

diff -r 1364dd1c2fab6735316275334c66f4c1b3880c97 -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f yt/frontends/artio/tests/test_outputs.py
--- a/yt/frontends/artio/tests/test_outputs.py
+++ b/yt/frontends/artio/tests/test_outputs.py
@@ -1,5 +1,5 @@
 """
-ARTIO frontend tests 
+ARTIO frontend tests
 
 
 
@@ -24,7 +24,7 @@
 from yt.frontends.artio.api import ARTIODataset
 
 _fields = ("temperature", "density", "velocity_magnitude",
-           ("deposit", "all_density"), ("deposit", "all_count")) 
+           ("deposit", "all_density"), ("deposit", "all_count"))
 
 sizmbhloz = "sizmbhloz-clref04SNth-rs9_a0.9011/sizmbhloz-clref04SNth-rs9_a0.9011.art"
 @requires_ds(sizmbhloz)
@@ -45,3 +45,8 @@
         s1 = dobj["ones"].sum()
         s2 = sum(mask.sum() for block, mask in dobj.blocks)
         yield assert_equal, s1, s2
+
+
+ at requires_file(sizmbhloz)
+def test_ARTIODataset():
+    assert isinstance(data_dir_load(sizmbhloz), ARTIODataset)

diff -r 1364dd1c2fab6735316275334c66f4c1b3880c97 -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f yt/frontends/athena/tests/test_outputs.py
--- a/yt/frontends/athena/tests/test_outputs.py
+++ b/yt/frontends/athena/tests/test_outputs.py
@@ -57,3 +57,8 @@
     for test in small_patch_amr(stripping, _fields_stripping):
         test_stripping.__name__ = test.description
         yield test
+
+
+ at requires_file(cloud)
+def test_AthenaDataset():
+    assert isinstance(data_dir_load(cloud), AthenaDataset)

diff -r 1364dd1c2fab6735316275334c66f4c1b3880c97 -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -1,5 +1,5 @@
 """
-Data structures for Boxlib Codes 
+Data structures for BoxLib Codes
 
 
 
@@ -15,10 +15,8 @@
 
 import os
 import re
-import weakref
 import itertools
 
-from collections import defaultdict
 from stat import ST_CTIME
 
 import numpy as np
@@ -27,53 +25,46 @@
 from yt.data_objects.grid_patch import AMRGridPatch
 from yt.geometry.grid_geometry_handler import GridIndex
 from yt.data_objects.static_output import Dataset
-from yt.utilities.definitions import \
-    mpc_conversion, sec_conversion
+
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     parallel_root_only
 from yt.utilities.lib.misc_utilities import \
     get_box_grids_level
-from yt.geometry.selection_routines import \
-    RegionSelector
 from yt.utilities.io_handler import \
     io_registry
-from yt.utilities.physical_constants import \
-    cm_per_mpc
 
 from .fields import \
     BoxlibFieldInfo, \
     MaestroFieldInfo, \
     CastroFieldInfo
 
-from .io import IOHandlerBoxlib
 # This is what we use to find scientific notation that might include d's
 # instead of e's.
 _scinot_finder = re.compile(r"[-+]?[0-9]*\.?[0-9]+([eEdD][-+]?[0-9]+)?")
 # This is the dimensions in the Cell_H file for each level
 # It is different for different dimensionalities, so we make a list
-_dim_finder = [ \
+_dim_finder = [
     re.compile(r"\(\((\d+)\) \((\d+)\) \(\d+\)\)$"),
     re.compile(r"\(\((\d+,\d+)\) \((\d+,\d+)\) \(\d+,\d+\)\)$"),
     re.compile(r"\(\((\d+,\d+,\d+)\) \((\d+,\d+,\d+)\) \(\d+,\d+,\d+\)\)$")]
 # This is the line that prefixes each set of data for a FAB in the FAB file
 # It is different for different dimensionalities, so we make a list
 _endian_regex = r"^FAB \(\(\d+, \([0-9 ]+\)\),\((\d+), \(([0-9 ]+)\)\)\)"
-_header_pattern = [ \
-    re.compile(_endian_regex + 
+_header_pattern = [
+    re.compile(_endian_regex +
                r"\(\((\d+)\) \((\d+)\) \((\d+)\)\) (\d+)\n"),
-    re.compile(_endian_regex + 
+    re.compile(_endian_regex +
                r"\(\((\d+,\d+)\) \((\d+,\d+)\) \((\d+,\d+)\)\) (\d+)\n"),
-    re.compile(_endian_regex + 
+    re.compile(_endian_regex +
                r"\(\((\d+,\d+,\d+)\) \((\d+,\d+,\d+)\) \((\d+,\d+,\d+)\)\) (\d+)\n")]
 
 
-
 class BoxlibGrid(AMRGridPatch):
     _id_offset = 0
     _offset = -1
 
-    def __init__(self, grid_id, offset, filename = None,
-                 index = None):
+    def __init__(self, grid_id, offset, filename=None,
+                 index=None):
         super(BoxlibGrid, self).__init__(grid_id, filename, index)
         self._base_offset = offset
         self._parent_id = []
@@ -126,7 +117,7 @@
         return coords
 
     # Override this as well, since refine_by can vary
-    def _fill_child_mask(self, child, mask, tofill, dlevel = 1):
+    def _fill_child_mask(self, child, mask, tofill, dlevel=1):
         rf = self.ds.ref_factors[self.Level]
         if dlevel != 1:
             raise NotImplementedError
@@ -139,8 +130,10 @@
              startIndex[1]:endIndex[1],
              startIndex[2]:endIndex[2]] = tofill
 
+
 class BoxlibHierarchy(GridIndex):
     grid = BoxlibGrid
+
     def __init__(self, ds, dataset_type='boxlib_native'):
         self.dataset_type = dataset_type
         self.header_filename = os.path.join(ds.output_dir, 'Header')
@@ -149,19 +142,17 @@
         GridIndex.__init__(self, ds, dataset_type)
         self._cache_endianness(self.grids[-1])
 
-        #self._read_particles()
-
     def _parse_index(self):
         """
         read the global header file for an Boxlib plotfile output.
         """
         self.max_level = self.dataset._max_level
-        header_file = open(self.header_filename,'r')
+        header_file = open(self.header_filename, 'r')
 
         self.dimensionality = self.dataset.dimensionality
         _our_dim_finder = _dim_finder[self.dimensionality-1]
-        DRE = self.dataset.domain_right_edge # shortcut
-        DLE = self.dataset.domain_left_edge # shortcut
+        DRE = self.dataset.domain_right_edge  # shortcut
+        DLE = self.dataset.domain_left_edge   # shortcut
 
         # We can now skip to the point in the file we want to start parsing.
         header_file.seek(self.dataset._header_mesh_start)
@@ -190,13 +181,13 @@
         if int(header_file.next()) != 0:
             raise RuntimeError("INTERNAL ERROR! This should be a zero.")
 
-        # each level is one group with ngrids on it. 
-        # each grid has self.dimensionality number of lines of 2 reals 
+        # each level is one group with ngrids on it.
+        # each grid has self.dimensionality number of lines of 2 reals
         self.grids = []
         grid_counter = 0
         for level in range(self.max_level + 1):
             vals = header_file.next().split()
-            lev, ngrids, cur_time = int(vals[0]),int(vals[1]),float(vals[2])
+            lev, ngrids = int(vals[0]), int(vals[1])
             assert(lev == level)
             nsteps = int(header_file.next())
             for gi in range(ngrids):
@@ -232,10 +223,10 @@
             for gi in range(ngrids):
                 # components within it
                 start, stop = _our_dim_finder.match(level_header_file.next()).groups()
-                # fix for non-3d data 
+                # fix for non-3d data
                 # note we append '0' to both ends b/c of the '+1' in dims below
                 start += ',0'*(3-self.dimensionality)
-                stop  += ',0'*(3-self.dimensionality)
+                stop += ',0'*(3-self.dimensionality)
                 start = np.array(start.split(","), dtype="int64")
                 stop = np.array(stop.split(","), dtype="int64")
                 dims = stop - start + 1
@@ -259,7 +250,7 @@
             # already read the filenames above...
         self.float_type = 'float64'
 
-    def _cache_endianness(self,test_grid):
+    def _cache_endianness(self, test_grid):
         """
         Cache the endianness and bytes perreal of the grids by using a
         test grid and assuming that all grids have the same
@@ -270,7 +261,7 @@
         # open the test file & grab the header
         with open(os.path.expanduser(test_grid.filename), 'rb') as f:
             header = f.readline()
-        
+
         bpr, endian, start, stop, centering, nc = \
             _header_pattern[self.dimensionality-1].search(header).groups()
         # Note that previously we were using a different value for BPR than we
@@ -294,7 +285,8 @@
         self.grids = np.array(self.grids, dtype='object')
         self._reconstruct_parent_child()
         for i, grid in enumerate(self.grids):
-            if (i%1e4) == 0: mylog.debug("Prepared % 7i / % 7i grids", i, self.num_grids)
+            if (i % 1e4) == 0: mylog.debug("Prepared % 7i / % 7i grids", i,
+                                           self.num_grids)
             grid._prepare_grid()
             grid._setup_dx()
         mylog.debug("Done creating grid objects")
@@ -308,10 +300,10 @@
                                 self.grid_levels[i] + 1,
                                 self.grid_left_edge, self.grid_right_edge,
                                 self.grid_levels, mask)
-            ids = np.where(mask.astype("bool")) # where is a tuple
-            grid._children_ids = ids[0] + grid._id_offset 
+            ids = np.where(mask.astype("bool"))  # where is a tuple
+            grid._children_ids = ids[0] + grid._id_offset
         mylog.debug("Second pass; identifying parents")
-        for i, grid in enumerate(self.grids): # Second pass
+        for i, grid in enumerate(self.grids):  # Second pass
             for child in grid.Children:
                 child._parent_id.append(i + grid._id_offset)
 
@@ -331,10 +323,10 @@
         for line in header_file:
             if len(line.split()) != 3: continue
             self.num_grids += int(line.split()[1])
-        
+
     def _initialize_grid_arrays(self):
         super(BoxlibHierarchy, self)._initialize_grid_arrays()
-        self.grid_start_index = np.zeros((self.num_grids,3), 'int64')
+        self.grid_start_index = np.zeros((self.num_grids, 3), 'int64')
 
     def _initialize_state_variables(self):
         """override to not re-initialize num_grids in AMRHierarchy.__init__
@@ -349,7 +341,7 @@
         self.field_list = [("boxlib", f) for f in
                            self.dataset._field_list]
         self.field_indexes = dict((f[1], i)
-                                for i, f in enumerate(self.field_list))
+                                  for i, f in enumerate(self.field_list))
         # There are times when field_list may change.  We copy it here to
         # avoid that possibility.
         self.field_order = [f for f in self.field_list]
@@ -357,6 +349,7 @@
     def _setup_data_io(self):
         self.io = io_registry[self.dataset_type](self.dataset)
 
+
 class BoxlibDataset(Dataset):
     """
     This class is a stripped down class that simply reads and parses
@@ -370,10 +363,10 @@
     periodicity = (True, True, True)
 
     def __init__(self, output_dir,
-                 cparam_filename = "inputs",
-                 fparam_filename = "probin",
+                 cparam_filename="inputs",
+                 fparam_filename="probin",
                  dataset_type='boxlib_native',
-                 storage_filename = None):
+                 storage_filename=None):
         """
         The paramfile is usually called "inputs"
         and there may be a fortran inputs file usually called "probin"
@@ -390,14 +383,13 @@
         Dataset.__init__(self, output_dir, dataset_type)
 
         # These are still used in a few places.
-        if not "HydroMethod" in self.parameters.keys():
+        if "HydroMethod" not in self.parameters.keys():
             self.parameters["HydroMethod"] = 'boxlib'
-        self.parameters["Time"] = 1. # default unit is 1...
-        self.parameters["EOSType"] = -1 # default
+        self.parameters["Time"] = 1.     # default unit is 1...
+        self.parameters["EOSType"] = -1  # default
         self.parameters["gamma"] = self.parameters.get(
             "materials.gamma", 1.6667)
 
-
     def _localize_check(self, fn):
         # If the file exists, use it.  If not, set it to None.
         root_dir = os.path.dirname(self.output_dir)
@@ -410,6 +402,8 @@
     def _is_valid(cls, *args, **kwargs):
         # fill our args
         output_dir = args[0]
+        # boxlib datasets are always directories
+        if not os.path.isdir(output_dir): return False
         header_filename = os.path.join(output_dir, "Header")
         jobinfo_filename = os.path.join(output_dir, "job_info")
         if not os.path.exists(header_filename):
@@ -418,11 +412,11 @@
         args = inspect.getcallargs(cls.__init__, args, kwargs)
         # This might need to be localized somehow
         inputs_filename = os.path.join(
-                            os.path.dirname(os.path.abspath(output_dir)),
-                            args['cparam_filename'])
+            os.path.dirname(os.path.abspath(output_dir)),
+            args['cparam_filename'])
         if not os.path.exists(inputs_filename) and \
            not os.path.exists(jobinfo_filename):
-            return True # We have no parameters to go off of
+            return True  # We have no parameters to go off of
         # If we do have either inputs or jobinfo, we should be deferring to a
         # different frontend.
         return False
@@ -464,7 +458,7 @@
             self.omega_lambda = self.parameters["comoving_OmL"]
             self.omega_matter = self.parameters["comoving_OmM"]
             self.hubble_constant = self.parameters["comoving_h"]
-            a_file = open(os.path.join(self.output_dir,'comoving_a'))
+            a_file = open(os.path.join(self.output_dir, 'comoving_a'))
             line = a_file.readline().strip()
             a_file.close()
             self.current_redshift = 1/float(line) - 1
@@ -491,7 +485,7 @@
             # So we'll try to determine this.
             vals = vals.split()
             if any(_scinot_finder.match(v) for v in vals):
-                vals = [float(v.replace("D","e").replace("d","e"))
+                vals = [float(v.replace("D", "e").replace("d", "e"))
                         for v in vals]
             if len(vals) == 1:
                 vals = vals[0]
@@ -509,22 +503,22 @@
         # call readline() if we want to end up with an offset at the very end.
         # Fortunately, elsewhere we don't care about the offset, so we're fine
         # everywhere else using iteration exclusively.
-        header_file = open(os.path.join(self.output_dir,'Header'))
+        header_file = open(os.path.join(self.output_dir, 'Header'))
         self.orion_version = header_file.readline().rstrip()
         n_fields = int(header_file.readline())
 
         self._field_list = [header_file.readline().strip()
-                           for i in range(n_fields)]
+                            for i in range(n_fields)]
 
         self.dimensionality = int(header_file.readline())
         self.current_time = float(header_file.readline())
         # This is traditionally a index attribute, so we will set it, but
         # in a slightly hidden variable.
-        self._max_level = int(header_file.readline()) 
+        self._max_level = int(header_file.readline())
         self.domain_left_edge = np.array(header_file.readline().split(),
                                          dtype="float64")
         self.domain_right_edge = np.array(header_file.readline().split(),
-                                         dtype="float64")
+                                          dtype="float64")
         ref_factors = np.array([int(i) for i in
                                 header_file.readline().split()])
         if ref_factors.size == 0:
@@ -540,26 +534,26 @@
             self.refine_by = min(ref_factors)
             # Check that they're all multiples of the minimum.
             if not all(float(rf)/self.refine_by ==
-                   int(float(rf)/self.refine_by) for rf in ref_factors):
+                       int(float(rf)/self.refine_by) for rf in ref_factors):
                 raise RuntimeError
             base_log = np.log2(self.refine_by)
-            self.level_offsets = [0] # level 0 has to have 0 offset
+            self.level_offsets = [0]  # level 0 has to have 0 offset
             lo = 0
             for lm1, rf in enumerate(self.ref_factors):
                 lo += int(np.log2(rf) / base_log) - 1
                 self.level_offsets.append(lo)
-        #assert(np.unique(ref_factors).size == 1)
+        # assert(np.unique(ref_factors).size == 1)
         else:
             self.refine_by = ref_factors[0]
             self.level_offsets = [0 for l in range(self._max_level + 1)]
-        # Now we read the global index space, to get 
+        # Now we read the global index space, to get
         index_space = header_file.readline()
         # This will be of the form:
         #  ((0,0,0) (255,255,255) (0,0,0)) ((0,0,0) (511,511,511) (0,0,0))
         # So note that if we split it all up based on spaces, we should be
         # fine, as long as we take the first two entries, which correspond to
         # the root level.  I'm not 100% pleased with this solution.
-        root_space = index_space.replace("(","").replace(")","").split()[:2]
+        root_space = index_space.replace("(", "").replace(")", "").split()[:2]
         start = np.array(root_space[0].split(","), dtype="int64")
         stop = np.array(root_space[1].split(","), dtype="int64")
         self.domain_dimensions = stop - start + 1
@@ -582,9 +576,9 @@
             raise RuntimeError("yt does not yet support spherical geometry")
 
         # overrides for 1/2-dimensional data
-        if self.dimensionality == 1: 
+        if self.dimensionality == 1:
             self._setup1d()
-        elif self.dimensionality == 2: 
+        elif self.dimensionality == 2:
             self._setup2d()
 
     def _set_code_unit_attributes(self):
@@ -594,20 +588,20 @@
         self.velocity_unit = self.quan(1.0, "cm/s")
 
     def _setup1d(self):
-#        self._index_class = BoxlibHierarchy1D
-#        self._fieldinfo_fallback = Orion1DFieldInfo
+        # self._index_class = BoxlibHierarchy1D
+        # self._fieldinfo_fallback = Orion1DFieldInfo
         self.domain_left_edge = \
             np.concatenate([self.domain_left_edge, [0.0, 0.0]])
         self.domain_right_edge = \
             np.concatenate([self.domain_right_edge, [1.0, 1.0]])
         tmp = self.domain_dimensions.tolist()
-        tmp.extend((1,1))
+        tmp.extend((1, 1))
         self.domain_dimensions = np.array(tmp)
         tmp = list(self.periodicity)
         tmp[1] = False
         tmp[2] = False
         self.periodicity = ensure_tuple(tmp)
-        
+
     def _setup2d(self):
         self.domain_left_edge = \
             np.concatenate([self.domain_left_edge, [0.0]])
@@ -636,12 +630,13 @@
         offset = self.level_offsets[l1] - self.level_offsets[l0]
         return self.refine_by**(l1-l0 + offset)
 
+
 class OrionHierarchy(BoxlibHierarchy):
-    
+
     def __init__(self, ds, dataset_type='orion_native'):
         BoxlibHierarchy.__init__(self, ds, dataset_type)
         self._read_particles()
-        #self.io = IOHandlerOrion
+        # self.io = IOHandlerOrion
 
     def _read_particles(self):
         """
@@ -673,7 +668,7 @@
                 coord = [particle_position_x, particle_position_y, particle_position_z]
                 # for each particle, determine which grids contain it
                 # copied from object_finding_mixin.py
-                mask=np.ones(self.num_grids)
+                mask = np.ones(self.num_grids)
                 for i in xrange(len(coord)):
                     np.choose(np.greater(self.grid_left_edge[:,i],coord[i]), (mask,0), mask)
                     np.choose(np.greater(self.grid_right_edge[:,i],coord[i]), (0,mask), mask)
@@ -688,39 +683,42 @@
                     self.grid_particle_count[ind] += 1
                     self.grids[ind].NumberOfParticles += 1
         return True
-                
+
+
 class OrionDataset(BoxlibDataset):
 
     _index_class = OrionHierarchy
 
     def __init__(self, output_dir,
-                 cparam_filename = "inputs",
-                 fparam_filename = "probin",
+                 cparam_filename="inputs",
+                 fparam_filename="probin",
                  dataset_type='orion_native',
-                 storage_filename = None):
+                 storage_filename=None):
 
         BoxlibDataset.__init__(self, output_dir,
-                 cparam_filename, fparam_filename, dataset_type)
-          
+                               cparam_filename, fparam_filename, dataset_type)
+
     @classmethod
     def _is_valid(cls, *args, **kwargs):
-        # fill our args                                                                               
+        # fill our args
         output_dir = args[0]
+        # boxlib datasets are always directories
+        if not os.path.isdir(output_dir): return False
         header_filename = os.path.join(output_dir, "Header")
         jobinfo_filename = os.path.join(output_dir, "job_info")
         if not os.path.exists(header_filename):
-            # We *know* it's not boxlib if Header doesn't exist.                                      
+            # We *know* it's not boxlib if Header doesn't exist.
             return False
         args = inspect.getcallargs(cls.__init__, args, kwargs)
-        # This might need to be localized somehow                                                     
+        # This might need to be localized somehow
         inputs_filename = os.path.join(
-                            os.path.dirname(os.path.abspath(output_dir)),
-                            args['cparam_filename'])
+            os.path.dirname(os.path.abspath(output_dir)),
+            args['cparam_filename'])
         if not os.path.exists(inputs_filename):
             return False
         if os.path.exists(jobinfo_filename):
             return False
-        # Now we check for all the others                                                             
+        # Now we check for all the others
         lines = open(inputs_filename).readlines()
         if any(("castro." in line for line in lines)): return False
         if any(("nyx." in line for line in lines)): return False
@@ -728,6 +726,7 @@
         if any(("geometry.prob_lo" in line for line in lines)): return True
         return False
 
+
 class CastroDataset(BoxlibDataset):
 
     _field_info_class = CastroFieldInfo
@@ -736,6 +735,8 @@
     def _is_valid(cls, *args, **kwargs):
         # fill our args
         output_dir = args[0]
+        # boxlib datasets are always directories
+        if not os.path.isdir(output_dir): return False
         header_filename = os.path.join(output_dir, "Header")
         jobinfo_filename = os.path.join(output_dir, "job_info")
         if not os.path.exists(header_filename):
@@ -748,6 +749,7 @@
         if any(line.startswith("Castro   ") for line in lines): return True
         return False
 
+
 class MaestroDataset(BoxlibDataset):
 
     _field_info_class = MaestroFieldInfo
@@ -756,6 +758,8 @@
     def _is_valid(cls, *args, **kwargs):
         # fill our args
         output_dir = args[0]
+        # boxlib datasets are always directories
+        if not os.path.isdir(output_dir): return False
         header_filename = os.path.join(output_dir, "Header")
         jobinfo_filename = os.path.join(output_dir, "job_info")
         if not os.path.exists(header_filename):
@@ -765,7 +769,7 @@
             return False
         # Now we check the job_info for the mention of maestro
         lines = open(jobinfo_filename).readlines()
-        if any("maestro" in line.lower() for line in lines): return True
+        if any(line.startswith("MAESTRO   ") for line in lines): return True
         return False
 
     def _parse_parameter_file(self):
@@ -782,7 +786,7 @@
                 line = f.next()
             # get the runtime parameters
             for line in f:
-                p, v = (_.strip() for _ in line[4:].split("=",1))
+                p, v = (_.strip() for _ in line[4:].split("=", 1))
                 if len(v) == 0:
                     self.parameters[p] = ""
                 else:
@@ -827,7 +831,7 @@
         maxlevel = int(header.readline()) # max level
 
         # Skip over how many grids on each level; this is degenerate
-        for i in range(maxlevel + 1):dummy = header.readline()
+        for i in range(maxlevel + 1): dummy = header.readline()
 
         grid_info = np.fromiter((int(i) for line in header.readlines()
                                  for i in line.split()),
@@ -844,6 +848,7 @@
 
         self.grid_particle_count[:, 0] = grid_info[:, 1]
 
+
 class NyxDataset(BoxlibDataset):
 
     _index_class = NyxHierarchy
@@ -852,6 +857,8 @@
     def _is_valid(cls, *args, **kwargs):
         # fill our args
         pname = args[0].rstrip("/")
+        # boxlib datasets are always directories
+        if not os.path.isdir(pname): return False
         dn = os.path.dirname(pname)
         if len(args) > 1:
             kwargs['paramFilename'] = args[1]
@@ -862,15 +869,13 @@
         # We check for the job_info file's existence because this is currently
         # what distinguishes Nyx data from MAESTRO data.
         pfn = os.path.join(pfname)
-        if not os.path.exists(pfn): return False
+        if not os.path.exists(pfn) or os.path.isdir(pfn): return False
         nyx = any(("nyx." in line for line in open(pfn)))
-        maestro = os.path.exists(os.path.join(pname, "job_info"))
-        orion = (not nyx) and (not maestro)
         return nyx
 
     def _parse_parameter_file(self):
         super(NyxDataset, self)._parse_parameter_file()
-        #return
+        # return
         # Nyx is always cosmological.
         self.cosmological_simulation = 1
         self.omega_lambda = self.parameters["comoving_OmL"]
@@ -904,7 +909,7 @@
     v = vals.split()[0] # Just in case there are multiple; we'll go
                         # back afterward to using vals.
     try:
-        float(v.upper().replace("D","E"))
+        float(v.upper().replace("D", "E"))
     except:
         pcast = str
         if v in ("F", "T"):

diff -r 1364dd1c2fab6735316275334c66f4c1b3880c97 -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f yt/frontends/boxlib/tests/test_orion.py
--- a/yt/frontends/boxlib/tests/test_orion.py
+++ b/yt/frontends/boxlib/tests/test_orion.py
@@ -42,3 +42,8 @@
     for test in small_patch_amr(rt, _fields):
         test_radtube.__name__ = test.description
         yield test
+
+
+ at requires_file(rt)
+def test_OrionDataset():
+    assert isinstance(data_dir_load(rt), OrionDataset)

diff -r 1364dd1c2fab6735316275334c66f4c1b3880c97 -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f yt/frontends/chombo/tests/test_outputs.py
--- a/yt/frontends/chombo/tests/test_outputs.py
+++ b/yt/frontends/chombo/tests/test_outputs.py
@@ -13,15 +13,18 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-from yt.testing import *
+from yt.testing import \
+    requires_file, \
+    assert_equal
 from yt.utilities.answer_testing.framework import \
     requires_ds, \
     small_patch_amr, \
-    big_patch_amr, \
     data_dir_load
-from yt.frontends.chombo.api import ChomboDataset
+from yt.frontends.chombo.api import \
+    ChomboDataset, \
+    Orion2Dataset
 
-_fields = ("density", "velocity_magnitude", #"velocity_divergence",
+_fields = ("density", "velocity_magnitude",  # "velocity_divergence",
            "magnetic_field_x")
 
 gc = "GaussianCloud/data.0077.3d.hdf5"
@@ -49,6 +52,22 @@
 def test_zp():
     ds = data_dir_load(zp)
     yield assert_equal, str(ds), "plt32.2d.hdf5"
-    for test in small_patch_amr(zp, _zp_fields, input_center="c", input_weight="rhs"):
+    for test in small_patch_amr(zp, _zp_fields, input_center="c",
+                                input_weight="rhs"):
         test_tb.__name__ = test.description
         yield test
+
+
+ at requires_file(zp)
+def test_ChomboDataset():
+    assert isinstance(data_dir_load(zp), ChomboDataset)
+
+
+ at requires_file(gc)
+def test_Orion2Dataset():
+    assert isinstance(data_dir_load(gc), Orion2Dataset)
+
+
+#@requires_file(kho)
+#def test_PlutoDataset():
+#    assert isinstance(data_dir_load(kho), PlutoDataset)

diff -r 1364dd1c2fab6735316275334c66f4c1b3880c97 -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f yt/frontends/flash/tests/test_outputs.py
--- a/yt/frontends/flash/tests/test_outputs.py
+++ b/yt/frontends/flash/tests/test_outputs.py
@@ -42,3 +42,8 @@
     for test in small_patch_amr(wt, _fields_2d):
         test_wind_tunnel.__name__ = test.description
         yield test
+
+
+ at requires_file(wt)
+def test_FLASHDataset():
+    assert isinstance(data_dir_load(wt), FLASHDataset)

diff -r 1364dd1c2fab6735316275334c66f4c1b3880c97 -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f yt/frontends/moab/tests/test_c5.py
--- a/yt/frontends/moab/tests/test_c5.py
+++ b/yt/frontends/moab/tests/test_c5.py
@@ -56,3 +56,7 @@
         for dobj_name in dso:
             yield FieldValuesTest(c5, field, dobj_name)
 
+
+ at requires_file(c5)
+def test_MoabHex8Dataset():
+    assert isinstance(data_dir_load(c5), MoabHex8Dataset)

diff -r 1364dd1c2fab6735316275334c66f4c1b3880c97 -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f yt/frontends/ramses/fields.py
--- a/yt/frontends/ramses/fields.py
+++ b/yt/frontends/ramses/fields.py
@@ -94,8 +94,9 @@
             return rv
         self.add_field(("gas", "temperature"), function=_temperature,
                         units="K")
+        self.create_cooling_fields()
 
-    def create_cooling_fields(self, filename):
+    def create_cooling_fields(self):
         num = os.path.basename(self.ds.parameter_filename).split("."
                 )[0].split("_")[1]
         filename = "%s/cooling_%05i.out" % (
@@ -104,7 +105,7 @@
         if not os.path.exists(filename): return
         def _create_field(name, interp_object):
             def _func(field, data):
-                shape = data["Temperature"].shape
+                shape = data["temperature"].shape
                 d = {'lognH': np.log10(_X*data["density"]/mh).ravel(),
                      'logT' : np.log10(data["temperature"]).ravel()}
                 rv = 10**interp_object(d).reshape(shape)
@@ -131,4 +132,4 @@
             interp = BilinearFieldInterpolator(tvals[n],
                         (avals["lognH"], avals["logT"]),
                         ["lognH", "logT"], truncate = True)
-            _create_field(n, interp)
+            _create_field(("gas", n), interp)

diff -r 1364dd1c2fab6735316275334c66f4c1b3880c97 -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f yt/frontends/ramses/tests/test_outputs.py
--- a/yt/frontends/ramses/tests/test_outputs.py
+++ b/yt/frontends/ramses/tests/test_outputs.py
@@ -1,5 +1,5 @@
 """
-RAMSES frontend tests 
+RAMSES frontend tests
 
 
 
@@ -21,10 +21,10 @@
     PixelizedProjectionValuesTest, \
     FieldValuesTest, \
     create_obj
-from yt.frontends.artio.api import ARTIODataset
+from yt.frontends.ramses.api import RAMSESDataset
 
 _fields = ("temperature", "density", "velocity_magnitude",
-           ("deposit", "all_density"), ("deposit", "all_count")) 
+           ("deposit", "all_density"), ("deposit", "all_count"))
 
 output_00080 = "output_00080/info_00080.txt"
 @requires_ds(output_00080)
@@ -44,3 +44,8 @@
         s1 = dobj["ones"].sum()
         s2 = sum(mask.sum() for block, mask in dobj.blocks)
         yield assert_equal, s1, s2
+
+
+ at requires_file(output_00080)
+def test_RAMSESDataset():
+    assert isinstance(data_dir_load(output_00080), RAMSESDataset)

diff -r 1364dd1c2fab6735316275334c66f4c1b3880c97 -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f yt/frontends/sph/io.py
--- a/yt/frontends/sph/io.py
+++ b/yt/frontends/sph/io.py
@@ -53,7 +53,7 @@
     _vector_fields = ("Coordinates", "Velocity", "Velocities")
     _known_ptypes = ghdf5_ptypes
     _var_mass = None
-    _element_names = ('Hydrogen', 'Helium', 'Carbon', 'Nitrogen', 'Oxygen', 
+    _element_names = ('Hydrogen', 'Helium', 'Carbon', 'Nitrogen', 'Oxygen',
                        'Neon', 'Magnesium', 'Silicon', 'Iron' )
 
 
@@ -81,6 +81,8 @@
             f = _get_h5_handle(data_file.filename)
             # This double-reads
             for ptype, field_list in sorted(ptf.items()):
+                if data_file.total_particles[ptype] == 0:
+                    continue
                 x = f["/%s/Coordinates" % ptype][:,0].astype("float64")
                 y = f["/%s/Coordinates" % ptype][:,1].astype("float64")
                 z = f["/%s/Coordinates" % ptype][:,2].astype("float64")
@@ -96,6 +98,8 @@
         for data_file in sorted(data_files):
             f = _get_h5_handle(data_file.filename)
             for ptype, field_list in sorted(ptf.items()):
+                if data_file.total_particles[ptype] == 0:
+                    continue
                 g = f["/%s" % ptype]
                 coords = g["Coordinates"][:].astype("float64")
                 mask = selector.select_points(
@@ -103,11 +107,11 @@
                 del coords
                 if mask is None: continue
                 for field in field_list:
-                    
+
                     if field in ("Mass", "Masses") and \
                         ptype not in self.var_mass:
                         data = np.empty(mask.sum(), dtype="float64")
-                        ind = self._known_ptypes.index(ptype) 
+                        ind = self._known_ptypes.index(ptype)
                         data[:] = self.ds["Massarr"][ind]
 
                     elif field in self._element_names:
@@ -152,7 +156,7 @@
         f = _get_h5_handle(data_file.filename)
         pcount = f["/Header"].attrs["NumPart_ThisFile"][:]
         f.close()
-        npart = dict(("PartType%s" % (i), v) for i, v in enumerate(pcount)) 
+        npart = dict(("PartType%s" % (i), v) for i, v in enumerate(pcount))
         return npart
 
 
@@ -164,7 +168,7 @@
 
         # loop over all keys in OWLS hdf5 file
         #--------------------------------------------------
-        for key in f.keys():   
+        for key in f.keys():
 
             # only want particle data
             #--------------------------------------
@@ -334,7 +338,7 @@
 
     def _count_particles(self, data_file):
         npart = dict((self._ptypes[i], v)
-            for i, v in enumerate(data_file.header["Npart"])) 
+            for i, v in enumerate(data_file.header["Npart"]))
         return npart
 
     # header is 256, but we have 4 at beginning and end for ints
@@ -443,13 +447,13 @@
         dtype = None
         # We need to do some fairly ugly detection to see what format the auxiliary
         # files are in.  They can be either ascii or binary, and the binary files can be
-        # either floats, ints, or doubles.  We're going to use a try-catch cascade to 
+        # either floats, ints, or doubles.  We're going to use a try-catch cascade to
         # determine the format.
         try:#ASCII
             auxdata = np.genfromtxt(filename, skip_header=1)
             if auxdata.size != np.sum(data_file.total_particles.values()):
                 print "Error reading auxiliary tipsy file"
-                raise RuntimeError 
+                raise RuntimeError
         except ValueError:#binary/xdr
             f = open(filename, 'rb')
             l = struct.unpack(data_file.ds.endian+"i", f.read(4))[0]
@@ -469,7 +473,7 @@
                 except struct.error: # None of the binary attempts to read succeeded
                     print "Error reading auxiliary tipsy file"
                     raise RuntimeError
-            
+
         # Use the mask to slice out the appropriate particle type data
         if mask.size == data_file.total_particles['Gas']:
             return auxdata[:data_file.total_particles['Gas']]
@@ -556,14 +560,14 @@
 
     def _update_domain(self, data_file):
         '''
-        This method is used to determine the size needed for a box that will 
+        This method is used to determine the size needed for a box that will
         bound the particles.  It simply finds the largest position of the
         whole set of particles, and sets the domain to +/- that value.
         '''
         ds = data_file.ds
         ind = 0
         # Check to make sure that the domain hasn't already been set
-        # by the parameter file 
+        # by the parameter file
         if np.all(np.isfinite(ds.domain_left_edge)) and np.all(np.isfinite(ds.domain_right_edge)):
             return
         with open(data_file.filename, "rb") as f:
@@ -682,11 +686,11 @@
                 continue
             field_list.append((ptype, field))
         if any(["Gas"==f[0] for f in field_list]): #Add the auxiliary fields to each ptype we have
-            field_list += [("Gas",a) for a in self._aux_fields] 
+            field_list += [("Gas",a) for a in self._aux_fields]
         if any(["DarkMatter"==f[0] for f in field_list]):
-            field_list += [("DarkMatter",a) for a in self._aux_fields] 
+            field_list += [("DarkMatter",a) for a in self._aux_fields]
         if any(["Stars"==f[0] for f in field_list]):
-            field_list += [("Stars",a) for a in self._aux_fields] 
+            field_list += [("Stars",a) for a in self._aux_fields]
         self._field_list = field_list
         return self._field_list
 
@@ -706,11 +710,11 @@
 class IOHandlerHTTPStream(BaseIOHandler):
     _dataset_type = "http_particle_stream"
     _vector_fields = ("Coordinates", "Velocity", "Velocities")
-    
+
     def __init__(self, ds):
         if requests is None:
             raise RuntimeError
-        self._url = ds.base_url 
+        self._url = ds.base_url
         # This should eventually manage the IO and cache it
         self.total_bytes = 0
         super(IOHandlerHTTPStream, self).__init__(ds)

diff -r 1364dd1c2fab6735316275334c66f4c1b3880c97 -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f yt/frontends/sph/tests/test_owls.py
--- a/yt/frontends/sph/tests/test_owls.py
+++ b/yt/frontends/sph/tests/test_owls.py
@@ -53,3 +53,8 @@
         s1 = dobj["ones"].sum()
         s2 = sum(mask.sum() for block, mask in dobj.blocks)
         yield assert_equal, s1, s2
+
+
+ at requires_file(os33)
+def test_OWLSDataset():
+    assert isinstance(data_dir_load(os33), OWLSDataset)

diff -r 1364dd1c2fab6735316275334c66f4c1b3880c97 -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f yt/frontends/sph/tests/test_tipsy.py
--- a/yt/frontends/sph/tests/test_tipsy.py
+++ b/yt/frontends/sph/tests/test_tipsy.py
@@ -92,3 +92,8 @@
         s1 = dobj["ones"].sum()
         s2 = sum(mask.sum() for block, mask in dobj.blocks)
         yield assert_equal, s1, s2
+
+
+ at requires_file(pkdgrav)
+def test_TipsyDataset():
+    assert isinstance(data_dir_load(pkdgrav), TipsyDataset)

diff -r 1364dd1c2fab6735316275334c66f4c1b3880c97 -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -270,7 +270,6 @@
 
     api_version = get_ipython_api_version()
 
-    stack = inspect.stack()
     frame = inspect.stack()[num_up]
     loc = frame[0].f_locals.copy()
     glo = frame[0].f_globals
@@ -537,7 +536,6 @@
     return version_info
 
 def get_script_contents():
-    stack = inspect.stack()
     top_frame = inspect.stack()[-1]
     finfo = inspect.getframeinfo(top_frame[0])
     if finfo[2] != "<module>": return None
@@ -749,6 +747,7 @@
     return _func
     
 def enable_plugins():
+    import yt
     from yt.config import ytcfg
     my_plugin_name = ytcfg.get("yt","pluginfilename")
     # We assume that it is with respect to the $HOME/.yt directory
@@ -758,4 +757,4 @@
         _fn = os.path.expanduser("~/.yt/%s" % my_plugin_name)
     if os.path.isfile(_fn):
         mylog.info("Loading plugins from %s", _fn)
-        execfile(_fn)
+        execfile(_fn, yt.__dict__)

diff -r 1364dd1c2fab6735316275334c66f4c1b3880c97 -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f yt/geometry/geometry_handler.py
--- a/yt/geometry/geometry_handler.py
+++ b/yt/geometry/geometry_handler.py
@@ -214,40 +214,37 @@
         for ftype, fname in fields:
             if fname in self.field_list or (ftype, fname) in self.field_list:
                 fields_to_read.append((ftype, fname))
+            elif fname in self.ds.derived_field_list or (ftype, fname) in self.ds.derived_field_list:
+                fields_to_generate.append((ftype, fname))
             else:
-                fields_to_generate.append((ftype, fname))
+                raise YTFieldNotFound((ftype,fname), self.ds)
         return fields_to_read, fields_to_generate
 
     def _read_particle_fields(self, fields, dobj, chunk = None):
         if len(fields) == 0: return {}, []
+        fields_to_read, fields_to_generate = self._split_fields(fields)
+        if len(fields_to_read) == 0:
+            return {}, fields_to_generate
         selector = dobj.selector
         if chunk is None:
             self._identify_base_chunk(dobj)
-        fields_to_return = {}
-        fields_to_read, fields_to_generate = self._split_fields(fields)
-        if len(fields_to_read) == 0:
-            return {}, fields_to_generate
         fields_to_return = self.io._read_particle_selection(
             self._chunk_io(dobj, cache = False),
             selector,
             fields_to_read)
-        for field in fields_to_read:
-            ftype, fname = field
-            finfo = self.ds._get_field_info(*field)
         return fields_to_return, fields_to_generate
 
     def _read_fluid_fields(self, fields, dobj, chunk = None):
         if len(fields) == 0: return {}, []
+        fields_to_read, fields_to_generate = self._split_fields(fields)
+        if len(fields_to_read) == 0:
+            return {}, fields_to_generate
         selector = dobj.selector
         if chunk is None:
             self._identify_base_chunk(dobj)
             chunk_size = dobj.size
         else:
             chunk_size = chunk.data_size
-        fields_to_return = {}
-        fields_to_read, fields_to_generate = self._split_fields(fields)
-        if len(fields_to_read) == 0:
-            return {}, fields_to_generate
         fields_to_return = self.io._read_fluid_selection(
             self._chunk_io(dobj),
             selector,

diff -r 1364dd1c2fab6735316275334c66f4c1b3880c97 -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -418,7 +418,7 @@
         cdef np.ndarray[np.uint8_t, ndim=1] coords
         cdef OctVisitorData data
         self.setup_data(&data, domain_id)
-        coords = np.zeros((num_cells*8), dtype="uint8")
+        coords = np.zeros((num_cells*data.nz), dtype="uint8")
         data.array = <void *> coords.data
         self.visit_all_octs(selector, oct_visitors.mask_octs, &data)
         return coords.astype("bool")

diff -r 1364dd1c2fab6735316275334c66f4c1b3880c97 -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -624,6 +624,18 @@
         return _func
     return compare_results(func)
 
+def periodicity_cases(ds):
+    # This is a generator that yields things near the corners.  It's good for
+    # getting different places to check periodicity.
+    yield (ds.domain_left_edge + ds.domain_right_edge)/2.0
+    dx = ds.domain_width / ds.domain_dimensions
+    # We start one dx in, and only go to one in as well.
+    for i in (1, ds.domain_dimensions[0] - 2):
+        for j in (1, ds.domain_dimensions[1] - 2):
+            for k in (1, ds.domain_dimensions[2] - 2):
+                center = dx * np.array([i,j,k]) + ds.domain_left_edge
+                yield center
+
 def run_nose(verbose=False, run_answer_tests=False, answer_big_data=False):
     import nose, os, sys, yt
     from yt.funcs import mylog

diff -r 1364dd1c2fab6735316275334c66f4c1b3880c97 -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -81,8 +81,6 @@
         for lvl in lvl_range:
             #grids = self.data_source.select_grids(lvl)
             grids = np.array([b for b, mask in self.data_source.blocks if b.Level == lvl])
-            gids = np.array([g.id for g in grids if g.Level == lvl],
-                            dtype="int64")
             if len(grids) == 0: continue
             self.add_grids(grids)
 
@@ -93,7 +91,6 @@
             grid = self.ds.index.grids[node.grid - self._id_offset]
             dds = grid.dds
             gle = grid.LeftEdge
-            gre = grid.RightEdge
             nle = self.ds.arr(get_left_edge(node), input_units="code_length")
             nre = self.ds.arr(get_right_edge(node), input_units="code_length")
             li = np.rint((nle-gle)/dds).astype('int32')

diff -r 1364dd1c2fab6735316275334c66f4c1b3880c97 -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -574,7 +574,7 @@
         for newp, oldp in zip(new_result["parents"], old_result["parents"]):
             assert(newp == oldp)
         for newc, oldc in zip(new_result["children"], old_result["children"]):
-            assert(newp == oldp)
+            assert(newc == oldc)
 
 class SimulatedHaloMassFunctionTest(AnswerTestingTest):
     _type_name = "SimulatedHaloMassFunction"

diff -r 1364dd1c2fab6735316275334c66f4c1b3880c97 -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f yt/utilities/answer_testing/runner.py
--- a/yt/utilities/answer_testing/runner.py
+++ b/yt/utilities/answer_testing/runner.py
@@ -89,8 +89,7 @@
         self.plot_tests = plot_tests
 
     def run_all_tests(self):
-        plot_list = []
-        for i,name in enumerate(sorted(test_registry)):
+        for name in sorted(test_registry):
             self.run_test(name)
         return self.passed_tests
 
@@ -98,7 +97,6 @@
         # We'll also need to call the "compare" operation,
         # but for that we'll need a data store.
         test = test_registry[name]
-        plot_list = []
         if test.output_type == 'single':
             mot = MultipleOutputTest(self.io_log)
             for i,fn in enumerate(mot):

diff -r 1364dd1c2fab6735316275334c66f4c1b3880c97 -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f yt/utilities/decompose.py
--- a/yt/utilities/decompose.py
+++ b/yt/utilities/decompose.py
@@ -34,13 +34,7 @@
     """ Calculate list of product(psize) subarrays of arr, along with their
         left and right edges
     """
-    grid_left_edges = np.empty([np.product(psize), 3], dtype=np.float64)
-    grid_right_edges = np.empty([np.product(psize), 3], dtype=np.float64)
-    n_d = shape
-    d_s = (bbox[:, 1] - bbox[:, 0]) / n_d
-    grid_left_edges, grid_right_edges, shapes, slices = \
-            split_array(bbox[:, 0], bbox[:, 1], shape, psize)
-    return grid_left_edges, grid_right_edges, shapes, slices
+    return split_array(bbox[:, 0], bbox[:, 1], shape, psize)
 
 
 def evaluate_domain_decomposition(n_d, pieces, ldom):

diff -r 1364dd1c2fab6735316275334c66f4c1b3880c97 -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f yt/utilities/flagging_methods.py
--- a/yt/utilities/flagging_methods.py
+++ b/yt/utilities/flagging_methods.py
@@ -147,8 +147,6 @@
         for dim in range(3):
             sig = self.sigs[dim]
             sd = sig[:-2] - 2.0*sig[1:-1] + sig[2:]
-            grid_ends = np.zeros((sig.size, 2))
-            ng = 0
             center = int((self.flagged.shape[dim] - 1) / 2)
             strength = zero_strength = 0
             for i in range(1, sig.size-2):

diff -r 1364dd1c2fab6735316275334c66f4c1b3880c97 -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f yt/utilities/fortran_utils.py
--- a/yt/utilities/fortran_utils.py
+++ b/yt/utilities/fortran_utils.py
@@ -208,7 +208,6 @@
     >>> skip(f, 3)
     """
     skipped = []
-    pos = f.tell()
     for i in range(n):
         fmt = endian+"I"
         size = f.read(struct.calcsize(fmt))

diff -r 1364dd1c2fab6735316275334c66f4c1b3880c97 -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f yt/utilities/lib/tests/test_alt_ray_tracers.py
--- a/yt/utilities/lib/tests/test_alt_ray_tracers.py
+++ b/yt/utilities/lib/tests/test_alt_ray_tracers.py
@@ -14,7 +14,7 @@
 
 def setup():
     # set up some sample cylindrical grid data, radiating out from center
-    global left_grid, right_grid, amr_levels, center_grid
+    global left_grid, right_grid, amr_levels, center_grid, data
     np.seterr(all='ignore')
     l1, r1, lvl1 = amrspace([0.0, 1.0, 0.0, -1.0, 0.0, 2*np.pi], levels=(7,7,0))
     l2, r2, lvl2 = amrspace([0.0, 1.0, 0.0,  1.0, 0.0, 2*np.pi], levels=(7,7,0))

diff -r 1364dd1c2fab6735316275334c66f4c1b3880c97 -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f yt/utilities/linear_interpolators.py
--- a/yt/utilities/linear_interpolators.py
+++ b/yt/utilities/linear_interpolators.py
@@ -240,7 +240,6 @@
     Return an iterator over EnzoSphere objects generated from the appropriate 
     columns in *filename*.  Optionally specify the *unit* radius is in.
     """
-    sp_list = []
     for line in open(filename):
         if line.startswith("#"): continue
         vals = line.split()

diff -r 1364dd1c2fab6735316275334c66f4c1b3880c97 -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f yt/utilities/minimal_representation.py
--- a/yt/utilities/minimal_representation.py
+++ b/yt/utilities/minimal_representation.py
@@ -207,7 +207,6 @@
 
     def _generate_post(self):
         metadata = self._attrs
-        chunks = []
         return (metadata, ("chunks", []))
 
 class MinimalNotebook(MinimalRepresentation):

diff -r 1364dd1c2fab6735316275334c66f4c1b3880c97 -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f yt/utilities/parallel_tools/io_runner.py
--- a/yt/utilities/parallel_tools/io_runner.py
+++ b/yt/utilities/parallel_tools/io_runner.py
@@ -57,7 +57,7 @@
         ds = self.ds
         fields = [f for f in ds.field_list
                   if not ds.field_info[f].particle_type]
-        dsields = [f for f in ds.field_list
+        pfields = [f for f in ds.field_list
                    if ds.field_info[f].particle_type]
         # Preload is only defined for Enzo ...
         if ds.index.io._dataset_type == "enzo_packed_3d":

diff -r 1364dd1c2fab6735316275334c66f4c1b3880c97 -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f yt/utilities/spatial/setup.py
--- a/yt/utilities/spatial/setup.py
+++ b/yt/utilities/spatial/setup.py
@@ -4,8 +4,8 @@
 
 def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration, get_numpy_include_dirs
-    from numpy.distutils.system_info import get_info
-    from distutils.sysconfig import get_python_inc
+#    from numpy.distutils.system_info import get_info
+#    from distutils.sysconfig import get_python_inc
 
     config = Configuration('spatial', parent_package, top_path)
 

diff -r 1364dd1c2fab6735316275334c66f4c1b3880c97 -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f yt/utilities/spatial/setupscons.py
--- a/yt/utilities/spatial/setupscons.py
+++ b/yt/utilities/spatial/setupscons.py
@@ -3,7 +3,7 @@
 from os.path import join
 
 def configuration(parent_package = '', top_path = None):
-    from numpy.distutils.misc_util import Configuration, get_numpy_include_dirs
+    from numpy.distutils.misc_util import Configuration
     config = Configuration('spatial', parent_package, top_path)
 
     config.add_data_dir('tests')

diff -r 1364dd1c2fab6735316275334c66f4c1b3880c97 -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f yt/visualization/base_plot_types.py
--- a/yt/visualization/base_plot_types.py
+++ b/yt/visualization/base_plot_types.py
@@ -19,6 +19,12 @@
 from yt.funcs import \
     get_image_suffix, mylog, iterable
 import numpy as np
+try:
+    import brewer2mpl
+    has_brewer = True
+except:
+    has_brewer = False
+
 
 class CallbackWrapper(object):
     def __init__(self, viewer, window_plot, frb, field):
@@ -110,6 +116,13 @@
         elif (cbnorm == 'linear'):
             norm = matplotlib.colors.Normalize()
         extent = [float(e) for e in extent]
+        if isinstance(cmap, tuple):
+            if has_brewer:
+                bmap = brewer2mpl.get_map(*cmap)
+                cmap = bmap.get_mpl_colormap(N=cmap[2])
+            else:
+                raise RuntimeError("Please install brewer2mpl to use colorbrewer colormaps")
+
         self.image = self.axes.imshow(data.to_ndarray(), origin='lower',
                                       extent=extent, norm=norm, vmin=self.zmin,
                                       aspect=aspect, vmax=self.zmax, cmap=cmap)

diff -r 1364dd1c2fab6735316275334c66f4c1b3880c97 -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f yt/visualization/eps_writer.py
--- a/yt/visualization/eps_writer.py
+++ b/yt/visualization/eps_writer.py
@@ -1013,7 +1013,6 @@
             for i in range(npanels): ylabels.append("")
 
     d = DualEPS(figsize=figsize)
-    count = 0
     for j in range(nrow):
         invj = nrow - j - 1
         ypos = invj*(figsize[1] + margins[1])

diff -r 1364dd1c2fab6735316275334c66f4c1b3880c97 -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f yt/visualization/image_panner/vm_panner.py
--- a/yt/visualization/image_panner/vm_panner.py
+++ b/yt/visualization/image_panner/vm_panner.py
@@ -215,8 +215,8 @@
         dy = (self.ylim[1] - self.ylim[0])/self.size[1]
         my_lim = (self.xlim[0] + dx*self.start_indices[0],
                   self.xlim[0] + dx*(self.start_indices[0] + self.my_size[0]),
-                  self.ylim[0] + dx*self.start_indices[1],
-                  self.ylim[0] + dx*(self.start_indices[1] + self.my_size[1]))
+                  self.ylim[0] + dy*self.start_indices[1],
+                  self.ylim[0] + dy*(self.start_indices[1] + self.my_size[1]))
         new_buffer = FixedResolutionBuffer(self.source, my_lim, self.my_size)
         self._buffer = new_buffer
 

diff -r 1364dd1c2fab6735316275334c66f4c1b3880c97 -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f yt/visualization/image_writer.py
--- a/yt/visualization/image_writer.py
+++ b/yt/visualization/image_writer.py
@@ -23,6 +23,12 @@
 import yt.utilities.lib.image_utilities as au
 import yt.utilities.png_writer as pw
 from yt.extern.six.moves import builtins
+try:
+    import brewer2mpl
+    has_brewer = True
+except:
+    has_brewer = False
+
 
 def scale_image(image, mi=None, ma=None):
     r"""Scale an image ([NxNxM] where M = 1-4) to be uint8 and values scaled 
@@ -248,7 +254,14 @@
         lut = cmd.color_map_luts[cmap_name]
     except KeyError:
         try:
-            cmap = mcm.get_cmap(cmap_name)
+            if isinstance(cmap_name, tuple):
+                if has_brewer:
+                    bmap = brewer2mpl.get_map(*cmap_name)
+                    cmap = bmap.get_mpl_colormap(N=cmap_name[2])
+                else:
+                    raise RuntimeError("Please install brewer2mpl to use colorbrewer colormaps")
+            else:
+                cmap = mcm.get_cmap(cmap_name)
             dummy = cmap(0.0)
             lut = cmap._lut.T
         except ValueError:
@@ -256,10 +269,19 @@
                 " colormap file or matplotlib colormaps"
             raise KeyError(cmap_name)
 
-    x = np.mgrid[0.0:1.0:lut[0].shape[0]*1j]
-    shape = buff.shape
-    mapped = np.dstack(
-            [(np.interp(buff, x, v)*255) for v in lut ]).astype("uint8")
+    if isinstance(cmap_name, tuple) and has_brewer:
+        # If we are using the colorbrewer maps, don't interpolate
+        shape = buff.shape
+        # We add float_eps so that digitize doesn't go out of bounds
+        x = np.mgrid[0.0:1.0+np.finfo(np.float32).eps:lut[0].shape[0]*1j]
+        inds = np.digitize(buff.ravel(), x)
+        inds.shape = (shape[0], shape[1])
+        mapped = np.dstack([(v[inds]*255).astype('uint8') for v in lut])
+        del inds
+    else:
+        x = np.mgrid[0.0:1.0:lut[0].shape[0]*1j]
+        mapped = np.dstack(
+                [(np.interp(buff, x, v)*255).astype('uint8') for v in lut ])
     return mapped.copy("C")
 
 def strip_colormap_data(fn = "color_map_data.py",

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/0c5846bbb80d/
Changeset:   0c5846bbb80d
Branch:      yt
User:        mzingale
Date:        2014-10-01 01:27:23+00:00
Summary:     merge
Affected #:  77 files

diff -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f -r 0c5846bbb80d640564dbf84af61c2ba33c108a0a doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -986,8 +986,10 @@
 
 if !( ( ${DEST_DIR}/bin/python2.7 -c "import readline" 2>&1 )>> ${LOG_FILE})
 then
-    echo "Installing pure-python readline"
-    ( ${DEST_DIR}/bin/pip install readline 2>&1 ) 1>> ${LOG_FILE}
+    if !( ( ${DEST_DIR}/bin/python2.7 -c "import gnureadline" 2>&1 )>> ${LOG_FILE})
+        echo "Installing pure-python readline"
+        ( ${DEST_DIR}/bin/pip install gnureadline 2>&1 ) 1>> ${LOG_FILE}
+    fi
 fi
 
 if [ $INST_ENZO -eq 1 ]

diff -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f -r 0c5846bbb80d640564dbf84af61c2ba33c108a0a doc/source/analyzing/analysis_modules/PPVCube.ipynb
--- a/doc/source/analyzing/analysis_modules/PPVCube.ipynb
+++ b/doc/source/analyzing/analysis_modules/PPVCube.ipynb
@@ -291,7 +291,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "prj = yt.ProjectionPlot(ds, \"z\", [\"density\"], proj_style=\"sum\")\n",
+      "prj = yt.ProjectionPlot(ds, \"z\", [\"density\"], method=\"sum\")\n",
       "prj.set_log(\"density\", True)\n",
       "prj.set_zlim(\"density\", 1.0e-3, 0.2)\n",
       "prj.show()"
@@ -304,4 +304,4 @@
    "metadata": {}
   }
  ]
-}
\ No newline at end of file
+}

diff -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f -r 0c5846bbb80d640564dbf84af61c2ba33c108a0a doc/source/analyzing/analysis_modules/halo_catalogs.rst
--- a/doc/source/analyzing/analysis_modules/halo_catalogs.rst
+++ b/doc/source/analyzing/analysis_modules/halo_catalogs.rst
@@ -72,6 +72,8 @@
 * Quantities
 * Callbacks
 
+A list of all available filters, quantities, and callbacks can be found in 
+:ref:`halo_analysis_ref`.  
 All interaction with this analysis can be performed by importing from 
 halo_analysis.
 
@@ -161,6 +163,18 @@
    # ... Later on in your script
    hc.add_quantity("my_quantity") 
 
+This quantity will then be accessible for functions called later via the 
+*quantities* dictionary that is associated with the halo object.
+
+.. code-block:: python
+
+   def my_new_function(halo):
+       print halo.quantities["my_quantity"]
+   add_callback("print_quantity", my_new_function)
+
+   # ... Anywhere after "my_quantity" has been called
+   hc.add_callback("print_quantity")
+
 Callbacks
 ^^^^^^^^^
 
@@ -178,10 +192,10 @@
    hc.add_callback("sphere", factor=2.0)
     
 Currently available callbacks are located in 
-``yt/analysis_modules/halo_analysis/halo_callbacks.py``. New callbacks may 
+``yt/analysis_modules/halo_analysis/halo_callbacks.py``.  New callbacks may 
 be added by using the syntax shown below. If you think that your 
 callback may be of use to the general community, add it to 
-halo_callbacks.py and issue a pull request
+halo_callbacks.py and issue a pull request.
 
 An example of defining your own callback:
 

diff -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f -r 0c5846bbb80d640564dbf84af61c2ba33c108a0a doc/source/analyzing/objects.rst
--- a/doc/source/analyzing/objects.rst
+++ b/doc/source/analyzing/objects.rst
@@ -225,12 +225,12 @@
 
 **Projection** 
     | Class :class:`~yt.data_objects.construction_data_containers.YTQuadTreeProjBase`
-    | Usage: ``proj(field, axis, weight_field=None, center=None, ds=None, data_source=None, style="integrate", field_parameters=None)``
+    | Usage: ``proj(field, axis, weight_field=None, center=None, ds=None, data_source=None, method="integrate", field_parameters=None)``
     | A 2D projection of a 3D volume along one of the axis directions.  
       By default, this is a line integral through the entire simulation volume 
       (although it can be a subset of that volume specified by a data object
       with the ``data_source`` keyword).  Alternatively, one can specify 
-      a weight_field and different ``style`` values to change the nature
+      a weight_field and different ``method`` values to change the nature
       of the projection outcome.  See :ref:`projection-types` for more information.
 
 **Streamline** 
@@ -263,7 +263,7 @@
 
    ds = load("my_data")
    sp = ds.sphere('c', (10, 'kpc'))
-   print ad.quantities.angular_momentum_vector()
+   print sp.quantities.angular_momentum_vector()
 
 Available Derived Quantities
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^

diff -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f -r 0c5846bbb80d640564dbf84af61c2ba33c108a0a doc/source/cookbook/fits_radio_cubes.ipynb
--- a/doc/source/cookbook/fits_radio_cubes.ipynb
+++ b/doc/source/cookbook/fits_radio_cubes.ipynb
@@ -183,14 +183,14 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
-      "We can also make a projection of all the emission along the line of sight. Since we're not doing an integration along a path length, we needed to specify `proj_style = \"sum\"`:"
+      "We can also make a projection of all the emission along the line of sight. Since we're not doing an integration along a path length, we needed to specify `method = \"sum\"`:"
      ]
     },
     {
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "prj = yt.ProjectionPlot(ds, \"z\", [\"intensity\"], proj_style=\"sum\", origin=\"native\")\n",
+      "prj = yt.ProjectionPlot(ds, \"z\", [\"intensity\"], method=\"sum\", origin=\"native\")\n",
       "prj.show()"
      ],
      "language": "python",

diff -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f -r 0c5846bbb80d640564dbf84af61c2ba33c108a0a doc/source/cookbook/fits_xray_images.ipynb
--- a/doc/source/cookbook/fits_xray_images.ipynb
+++ b/doc/source/cookbook/fits_xray_images.ipynb
@@ -264,7 +264,7 @@
       "                   [\"flux\",\"projected_temperature\",\"pseudo_pressure\",\"pseudo_entropy\"], \n",
       "                   origin=\"native\", field_parameters={\"exposure_time\":exposure_time},\n",
       "                   data_source=circle_reg,\n",
-      "                   proj_style=\"sum\")\n",
+      "                   method=\"sum\")\n",
       "prj.set_log(\"flux\",True)\n",
       "prj.set_log(\"pseudo_pressure\",False)\n",
       "prj.set_log(\"pseudo_entropy\",False)\n",

diff -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f -r 0c5846bbb80d640564dbf84af61c2ba33c108a0a doc/source/cookbook/halo_profiler.py
--- a/doc/source/cookbook/halo_profiler.py
+++ b/doc/source/cookbook/halo_profiler.py
@@ -18,8 +18,8 @@
 
 # use the sphere to calculate radial profiles of gas density
 # weighted by cell volume in terms of the virial radius
-hc.add_callback("profile", x_field="radius",
-                y_fields=[("gas", "overdensity")],
+hc.add_callback("profile", ["radius"],
+                [("gas", "overdensity")],
                 weight_field="cell_volume",
                 accumulation=False,
                 storage="virial_quantities_profiles")
@@ -32,7 +32,8 @@
 field_params = dict(virial_radius=('quantity', 'radius_200'))
 hc.add_callback('sphere', radius_field='radius_200', factor=5,
                 field_parameters=field_params)
-hc.add_callback('profile', 'virial_radius', [('gas', 'temperature')],
+hc.add_callback('profile', ['virial_radius'], 
+                [('gas', 'temperature')],
                 storage='virial_profiles',
                 weight_field='cell_mass',
                 accumulation=False, output_dir='profiles')

diff -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f -r 0c5846bbb80d640564dbf84af61c2ba33c108a0a doc/source/developing/building_the_docs.rst
--- a/doc/source/developing/building_the_docs.rst
+++ b/doc/source/developing/building_the_docs.rst
@@ -105,7 +105,10 @@
 
 You will need to have the yt repository available on your computer, which
 is done by default if you have yt installed.  In addition, you need a 
-current version of Sphinx_ (1.1.3) documentation software installed.
+current version of Sphinx_ (1.1.3) documentation software installed, as
+well as the Sphinx
+`Bootstrap theme <https://pypi.python.org/pypi/sphinx-bootstrap-theme/>`_,
+which can be installed via ``pip install sphinx_bootstrap_theme``.
 
 In order to tell sphinx not to do all of the dynamical building, you must
 set the ``$READTHEDOCS`` environment variable to be True by typing this at 

diff -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f -r 0c5846bbb80d640564dbf84af61c2ba33c108a0a doc/source/developing/developing.rst
--- a/doc/source/developing/developing.rst
+++ b/doc/source/developing/developing.rst
@@ -173,7 +173,7 @@
 If you plan to develop yt on Windows, it is necessary to use the `MinGW
 <http://www.mingw.org/>`_ gcc compiler that can be installed using the `Anaconda
 Python Distribution <https://store.continuum.io/cshop/anaconda/>`_. The libpython package must be
- installed from Anaconda as well. These can both be installed with a single command:
+installed from Anaconda as well. These can both be installed with a single command:
 
 .. code-block:: bash
 
@@ -229,6 +229,19 @@
  
    If you end up doing considerable development, you can set an alias in the
    file ``.hg/hgrc`` to point to this path.
+
+   .. note::
+     Note that the above approach uses HTTPS as the transfer protocol
+     between your machine and BitBucket.  If you prefer to use SSH - or
+     perhaps you're behind a proxy that doesn't play well with SSL via
+     HTTPS - you may want to set up an `SSH key`_ on BitBucket.  Then, you use
+     the syntax ``ssh://hg@bitbucket.org/YourUsername/yt``, or equivalent, in
+     place of ``https://bitbucket.org/YourUsername/yt`` in Mercurial commands.
+     For consistency, all commands we list in this document will use the HTTPS
+     protocol.
+
+     .. _SSH key: https://confluence.atlassian.com/display/BITBUCKET/Set+up+SSH+for+Mercurial
+
 #. Issue a pull request at
    https://bitbucket.org/YourUsername/yt/pull-request/new
 
@@ -246,6 +259,88 @@
 
 #. Your pull request will be automatically updated.
 
+.. _multiple-PRs:
+
+Working with Multiple BitBucket Pull Requests
++++++++++++++++++++++++++++++++++++++++++++++
+
+Once you become active developing for yt, you may be working on
+various aspects of the code or bugfixes at the same time.  Currently,
+BitBucket's *modus operandi* for pull requests automatically updates
+your active pull request with every ``hg push`` of commits that are a
+descendant of the head of your pull request.  In a normal workflow,
+this means that if you have an active pull request, make some changes
+locally for, say, an unrelated bugfix, then push those changes back to
+your fork in the hopes of creating a *new* pull request, you'll
+actually end up updating your current pull request!
+
+There are a few ways around this feature of BitBucket that will allow
+for multiple pull requests to coexist; we outline one such method
+below.  We assume that you have a fork of yt at
+``http://bitbucket.org/YourUsername/Your_yt`` (see
+:ref:`sharing-changes` for instructions on creating a fork) and that
+you have an active pull request to the main repository.
+
+The main issue with starting another pull request is to make sure that
+your push to BitBucket doesn't go to the same head as your
+existing pull request and trigger BitBucket's auto-update feature.
+Here's how to get your local repository away from your current pull
+request head using `revsets <http://www.selenic.com/hg/help/revsets>`_
+and your ``hgrc`` file:
+   
+#. Set up a Mercurial path for the main yt repository (note this is a convenience
+   step and only needs to be done once).  Add the following to your
+   ``Your_yt/.hg/hgrc``::
+
+     [paths]
+     upstream = https://bitbucket.org/yt_analysis/yt
+
+   This will create a path called ``upstream`` that is aliased to the URL of the
+   main yt repository.
+#. Now we'll use revsets_ to update your local repository to the tip of the
+   ``upstream`` path:
+
+   .. code-block:: bash
+
+      $ hg pull
+      $ hg update -r "remote(tip,'upstream')"
+
+After the above steps, your local repository should be at the tip of
+the main yt repository.  If you find yourself doing this a lot, it may
+be worth aliasing this task in your ``hgrc`` file by adding something like::
+
+  [alias]
+  myupdate = update -r "remote(tip,'upstream')"
+
+And then you can just issue ``hg myupdate`` to get at the tip of the yt
+branch of the main yt repository.
+
+You can then make changes and ``hg commit`` them.  If you prefer
+working with `bookmarks <http://mercurial.selenic.com/wiki/Bookmarks>`_, you may
+want to make a bookmark before committing your changes, such as
+``hg bookmark mybookmark``.
+
+To push to your fork on BitBucket if you didn't use a bookmark, you issue the following:
+
+.. code-block:: bash
+
+  $ hg push -r . -f https://bitbucket.org/YourUsername/Your_yt
+
+The ``-r .`` means "push only the commit I'm standing on and any ancestors."  The
+``-f`` is to force Mecurial to do the push since we are creating a new remote head.
+
+Note that if you *did* use a bookmark, you don't have to force the push, but you do
+need to push the bookmark; in other words do the following instead of the above:
+
+.. code-block:: bash
+		
+   $ hg push -B mybookmark https://bitbucket.org/YourUsername/Your_yt
+
+The ``-B`` means "publish my bookmark and any relevant changesets to the remote server."
+		
+You can then go to the BitBucket interface and issue a new pull request based on
+your last changes, as usual.
+
 How To Get The Source Code For Editing
 --------------------------------------
 

diff -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f -r 0c5846bbb80d640564dbf84af61c2ba33c108a0a doc/source/examining/Loading_Spherical_Data.ipynb
--- /dev/null
+++ b/doc/source/examining/Loading_Spherical_Data.ipynb
@@ -0,0 +1,188 @@
+{
+ "metadata": {
+  "name": "",
+  "signature": "sha256:88ed88ce8d8f4a359052f287aea17a7cbed435ff960e195097b440191ce6c2ab"
+ },
+ "nbformat": 3,
+ "nbformat_minor": 0,
+ "worksheets": [
+  {
+   "cells": [
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "# Loading Spherical Data\n",
+      "\n",
+      "With version 3.0 of yt, it has gained the ability to load data from non-Cartesian systems.  This support is still being extended, but here is an example of how to load spherical data from a regularly-spaced grid.  For irregularly spaced grids, a similar setup can be used, but the `load_hexahedral_mesh` method will have to be used instead.\n",
+      "\n",
+      "Note that in yt, \"spherical\" means that it is ordered $r$, $\\theta$, $\\phi$, where $\\theta$ is the declination from the azimuth (running from $0$ to $\\pi$ and $\\phi$ is the angle around the zenith (running from $0$ to $2\\pi$).\n",
+      "\n",
+      "We first start out by loading yt."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "import numpy as np\n",
+      "import yt"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Now, we create a few derived fields.  The first three are just straight translations of the Cartesian coordinates, so that we can see where we are located in the data, and understand what we're seeing.  The final one is just a fun field that is some combination of the three coordinates, and will vary in all dimensions."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "@yt.derived_field(name = \"sphx\", units = \"cm\", take_log=False)\n",
+      "def sphx(field, data):\n",
+      "    return np.cos(data[\"phi\"]) * np.sin(data[\"theta\"])*data[\"r\"]\n",
+      "@yt.derived_field(name = \"sphy\", units = \"cm\", take_log=False)\n",
+      "def sphy(field, data):\n",
+      "    return np.sin(data[\"phi\"]) * np.sin(data[\"theta\"])*data[\"r\"]\n",
+      "@yt.derived_field(name = \"sphz\", units = \"cm\", take_log=False)\n",
+      "def sphz(field, data):\n",
+      "    return np.cos(data[\"theta\"])*data[\"r\"]\n",
+      "@yt.derived_field(name = \"funfield\", units=\"cm\", take_log=False)\n",
+      "def funfield(field, data):\n",
+      "    return (np.sin(data[\"phi\"])**2 + np.cos(data[\"theta\"])**2) * (1.0*data[\"r\"].uq+data[\"r\"])"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "## Loading Data\n",
+      "\n",
+      "Now we can actually load our data.  We use the `load_uniform_grid` function here.  Normally, the first argument would be a dictionary of field data, where the keys were the field names and the values the field data arrays.  Here, we're just going to look at derived fields, so we supply an empty one.\n",
+      "\n",
+      "The next few arguments are the number of dimensions, the bounds, and we then specify the geometry as spherical."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "ds = yt.load_uniform_grid({}, [128, 128, 128],\n",
+      "                          bbox=np.array([[0.0, 1.0], [0.0, np.pi], [0.0, 2*np.pi]]),\n",
+      "                          geometry=\"spherical\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "## Looking at Data\n",
+      "\n",
+      "Now we can take slices.  The first thing we will try is making a slice of data along the \"phi\" axis, here $\\pi/2$, which will be along the y axis in the positive direction.  We use the `.slice` attribute, which creates a slice, and then we convert this into a plot window.  Note that here 2 is used to indicate the third axis (0-indexed) which for spherical data is $\\phi$.\n",
+      "\n",
+      "This is the manual way of creating a plot -- below, we'll use the standard, automatic ways.  Note that the coordinates run from $-r$ to $r$ along the $z$ axis and from $0$ to $r$ along the $R$ axis.  We use the capital $R$ to indicate that it's the $R$ along the $x-y$ plane."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "s = ds.slice(2, np.pi/2)\n",
+      "p = s.to_pw(\"funfield\", origin=\"native\")\n",
+      "p.set_zlim(\"all\", 0.0, 4.0)\n",
+      "p.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "We can also slice along $r$.  For now, this creates a regular grid with *incorrect* units for phi and theta.  We are currently exploring two other options -- a simple aitoff projection, and fixing it to use the correct units as-is."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "s = yt.SlicePlot(ds, \"r\", \"funfield\")\n",
+      "s.set_zlim(\"all\", 0.0, 4.0)\n",
+      "s.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "We can also slice at constant $\\theta$.  But, this is a weird thing!  We're slicing at a constant declination from the azimuth.  What this means is that when thought of in a Cartesian domain, this slice is actually a cone.  The axes have been labeled appropriately, to indicate that these are not exactly the $x$ and $y$ axes, but instead differ by a factor of $\\sin(\\theta))$."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "s = yt.SlicePlot(ds, \"theta\", \"funfield\")\n",
+      "s.set_zlim(\"all\", 0.0, 4.0)\n",
+      "s.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "We've seen lots of the `funfield` plots, but we can also look at the Cartesian axes.  This next plot plots the Cartesian $x$, $y$ and $z$ values on a $\\theta$ slice.  Because we're not supplying an argument to the `center` parameter, yt will place it at the center of the $\\theta$ axis, which will be at $\\pi/2$, where it will be aligned with the $x-y$ plane.  The slight change in `sphz` results from the cells themselves migrating, and plotting the center of those cells."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "s = yt.SlicePlot(ds, \"theta\", [\"sphx\", \"sphy\", \"sphz\"])\n",
+      "s.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "We can do the same with the $\\phi$ axis."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": true,
+     "input": [
+      "s = yt.SlicePlot(ds, \"phi\", [\"sphx\", \"sphy\", \"sphz\"])\n",
+      "s.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    }
+   ],
+   "metadata": {}
+  }
+ ]
+}
\ No newline at end of file

diff -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f -r 0c5846bbb80d640564dbf84af61c2ba33c108a0a doc/source/examining/index.rst
--- a/doc/source/examining/index.rst
+++ b/doc/source/examining/index.rst
@@ -9,4 +9,5 @@
    loading_data
    generic_array_data
    generic_particle_data
+   spherical_data
    low_level_inspection

diff -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f -r 0c5846bbb80d640564dbf84af61c2ba33c108a0a doc/source/examining/spherical_data.rst
--- /dev/null
+++ b/doc/source/examining/spherical_data.rst
@@ -0,0 +1,6 @@
+.. _loading-spherical-data:
+
+Loading Spherical Data
+======================
+
+.. notebook:: Loading_Spherical_Data.ipynb

diff -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f -r 0c5846bbb80d640564dbf84af61c2ba33c108a0a doc/source/index.rst
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -44,6 +44,16 @@
      <tr valign="top"><td width="25%"><p>
+           <a href="examining/index.html">Loading and Examining Data</a>
+         </p>
+       </td>
+       <td width="75%">
+         <p class="linkdescr">How to load and examine all dataset types in yt</p>
+       </td>
+     </tr>
+     <tr valign="top">
+       <td width="25%">
+         <p><a href="yt3differences.html">yt 3.0</a></p></td>
@@ -84,16 +94,6 @@
      <tr valign="top"><td width="25%"><p>
-           <a href="examining/index.html">Examining Data</a>
-         </p>
-       </td>
-       <td width="75%">
-         <p class="linkdescr">Load data and directly access raw values for low-level analysis</p>
-       </td>
-     </tr>
-     <tr valign="top">
-       <td width="25%">
-         <p><a href="developing/index.html">Developing in yt</a></p></td>

diff -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f -r 0c5846bbb80d640564dbf84af61c2ba33c108a0a doc/source/reference/api/api.rst
--- a/doc/source/reference/api/api.rst
+++ b/doc/source/reference/api/api.rst
@@ -407,6 +407,8 @@
    ~yt.data_objects.profiles.Profile3D
    ~yt.data_objects.profiles.create_profile
 
+.. _halo_analysis_ref:
+
 Halo Analysis
 ^^^^^^^^^^^^^
 
@@ -419,21 +421,22 @@
    ~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog
    ~yt.analysis_modules.halo_analysis.halo_finding_methods.HaloFindingMethod
    ~yt.analysis_modules.halo_analysis.halo_callbacks.HaloCallback
+   ~yt.analysis_modules.halo_analysis.halo_callbacks.delete_attribute
    ~yt.analysis_modules.halo_analysis.halo_callbacks.halo_sphere
-   ~yt.analysis_modules.halo_analysis.halo_callbacks.sphere_field_max_recenter
-   ~yt.analysis_modules.halo_analysis.halo_callbacks.sphere_bulk_velocity
+   ~yt.analysis_modules.halo_analysis.halo_callbacks.iterative_center_of_mass
+   ~yt.analysis_modules.halo_analysis.halo_callbacks.load_profiles
+   ~yt.analysis_modules.halo_analysis.halo_callbacks.phase_plot
    ~yt.analysis_modules.halo_analysis.halo_callbacks.profile
    ~yt.analysis_modules.halo_analysis.halo_callbacks.save_profiles
-   ~yt.analysis_modules.halo_analysis.halo_callbacks.load_profiles
+   ~yt.analysis_modules.halo_analysis.halo_callbacks.sphere_bulk_velocity
+   ~yt.analysis_modules.halo_analysis.halo_callbacks.sphere_field_max_recenter
    ~yt.analysis_modules.halo_analysis.halo_callbacks.virial_quantities
-   ~yt.analysis_modules.halo_analysis.halo_callbacks.phase_plot
-   ~yt.analysis_modules.halo_analysis.halo_callbacks.delete_attribute
    ~yt.analysis_modules.halo_analysis.halo_filters.HaloFilter
+   ~yt.analysis_modules.halo_analysis.halo_filters.not_subhalo
    ~yt.analysis_modules.halo_analysis.halo_filters.quantity_value
-   ~yt.analysis_modules.halo_analysis.halo_filters.not_subhalo
    ~yt.analysis_modules.halo_analysis.halo_quantities.HaloQuantity
+   ~yt.analysis_modules.halo_analysis.halo_quantities.bulk_velocity
    ~yt.analysis_modules.halo_analysis.halo_quantities.center_of_mass
-   ~yt.analysis_modules.halo_analysis.halo_quantities.bulk_velocity
 
 Halo Finding
 ^^^^^^^^^^^^

diff -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f -r 0c5846bbb80d640564dbf84af61c2ba33c108a0a doc/source/reference/code_support.rst
--- a/doc/source/reference/code_support.rst
+++ b/doc/source/reference/code_support.rst
@@ -24,7 +24,7 @@
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ 
 | Castro                |     Y      |     Y     |   Partial  |   Y   |    Y     |    Y     |     N      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ 
-| Chombo                |     Y      |     N     |      Y     |   Y   |    Y     |    Y     |     Y      | Partial  |
+| Chombo                |     Y      |     Y     |      Y     |   Y   |    Y     |    Y     |     Y      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ 
 | Enzo                  |     Y      |     Y     |      Y     |   Y   |    Y     |    Y     |     Y      |   Full   |
 +-----------------------+------------+-----------+------------+-------+----------+----------+------------+----------+ 

diff -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f -r 0c5846bbb80d640564dbf84af61c2ba33c108a0a doc/source/visualizing/_cb_docstrings.inc
--- a/doc/source/visualizing/_cb_docstrings.inc
+++ b/doc/source/visualizing/_cb_docstrings.inc
@@ -152,7 +152,7 @@
 ~~~~~~~~~~~~~~~~~~~~~~~~~
 
 .. function:: annotate_halos(self, halo_catalog, circle_kwargs=None, width=None, \ 
-                             annotate_field=False, font_kwargs=None, factor=1.0):
+                             annotate_field=None, font_kwargs=None, factor=1.0):
 
    (This is a proxy for
    :class:`~yt.visualization.plot_modifications.HaloCatalogCallback`.)
@@ -166,12 +166,12 @@
    the circles can be changed with the circle_kwargs dictionary, which is 
    supplied to the Matplotlib patch Circle.  One can label each of the halos 
    with the annotate_field, which accepts a field contained in the halo catalog 
-   to add text to the plot near the halo (example: annotate_field = 
-   ``particle_mass`` will write the halo mass next to each halo, whereas 
-   ``particle_identifier`` shows the halo number).  font_kwargs contains the 
+   to add text to the plot near the halo (example: ``annotate_field=
+   'particle_mass'`` will write the halo mass next to each halo, whereas 
+   ``'particle_identifier'`` shows the halo number).  font_kwargs contains the 
    arguments controlling the text appearance of the annotated field.
    Factor is the number the virial radius is multiplied by for plotting the 
-   circles. Ex: factor = 2.0 will plot circles with twice the radius of each 
+   circles. Ex: ``factor=2.0`` will plot circles with twice the radius of each 
    halo virial radius.
 
 .. python-script::
@@ -186,7 +186,7 @@
    hc.create()
 
    prj = yt.ProjectionPlot(data_ds, 'z', 'density')
-   prj.annotate_halos(hc, annotate_field=particle_identifier)
+   prj.annotate_halos(hc, annotate_field='particle_identifier')
    prj.save()
 
 Overplot a Straight Line

diff -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f -r 0c5846bbb80d640564dbf84af61c2ba33c108a0a doc/source/visualizing/colormaps/index.rst
--- a/doc/source/visualizing/colormaps/index.rst
+++ b/doc/source/visualizing/colormaps/index.rst
@@ -7,18 +7,19 @@
 matplotlib colormaps as well for nearly all functions.  Individual visualization
 functions usually allow you to specify a colormap with the ``cmap`` flag.
 
-If you have installed `brewer2mpl`
-(`pip install brewer2mpl` or see `https://github.com/jiffyclub/brewer2mpl`_),
-you can also access the discrete colormaps available on
-`http://colorbrewer2.org`_. Instead of supplying the colormap name, specify
-a tuple of the form (name, type, number), for example `('RdBu', 'Diverging', 9)`.
-These discrete colormaps will not be interpolated, and can be useful for
-creating colorblind/printer/grayscale-friendly plots. For more information,
-visit `http://colorbrewer2.org`_.
+If you have installed brewer2mpl (``pip install brewer2mpl`` or see
+`https://github.com/jiffyclub/brewer2mpl
+<https://github.com/jiffyclub/brewer2mpl>`_), you can also access the discrete
+colormaps available on `http://colorbrewer2.org <http://colorbrewer2.org>`_.
+Instead of supplying the colormap name, specify a tuple of the form (name, type,
+number), for example ``('RdBu', 'Diverging', 9)``.  These discrete colormaps will
+not be interpolated, and can be useful for creating
+colorblind/printer/grayscale-friendly plots. For more information, visit
+`http://colorbrewer2.org <http://colorbrewer2.org>`_.
 
-Here is a chart of all of the yt and matplotlib colormaps available.  In addition to each 
-colormap displayed here, you can access its "reverse" by simply appending a 
-``"_r"`` to the end of the colormap name.
+Here is a chart of all of the yt and matplotlib colormaps available.  In
+addition to each colormap displayed here, you can access its "reverse" by simply
+appending a ``"_r"`` to the end of the colormap name.
 
 All Colormaps (including matplotlib)
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

diff -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f -r 0c5846bbb80d640564dbf84af61c2ba33c108a0a doc/source/visualizing/plots.rst
--- a/doc/source/visualizing/plots.rst
+++ b/doc/source/visualizing/plots.rst
@@ -239,13 +239,13 @@
 Types of Projections
 """"""""""""""""""""
 
-There are several different styles of projections that can be made either 
+There are several different methods of projections that can be made either 
 when creating a projection with ds.proj() or when making a ProjectionPlot.  
-In either construction method, set the ``style`` keyword to be one of the 
+In either construction method, set the ``method`` keyword to be one of the 
 following:
 
 ``integrate`` (unweighted)
-    This is the default projection style. It simply integrates the 
+    This is the default projection method. It simply integrates the 
     requested field  :math:`f(x)` along a line of sight  :math:`\hat{n}` , 
     given by the axis parameter (e.g. :math:`\hat{i},\hat{j},` or 
     :math:`\hat{k}`).  The units of the projected field  
@@ -258,7 +258,7 @@
     g(X) = {\int\ {f(x)\hat{n}\cdot{dx}}}
 
 ``integrate`` (weighted)
-    When using the ``integrate``  style, a ``weight_field`` argument may also 
+    When using the ``integrate``  method, a ``weight_field`` argument may also 
     be specified, which will produce a weighted projection.  :math:`w(x)` 
     is the field used as a weight. One common example would 
     be to weight the "temperature" field by the "density" field. In this case, 
@@ -269,15 +269,15 @@
     g(X) = \frac{\int\ {f(x)w(x)\hat{n}\cdot{dx}}}{\int\ {w(x)\hat{n}\cdot{dx}}}
 
 ``mip`` 
-    This style picks out the maximum value of a field along the line of 
+    This method picks out the maximum value of a field along the line of 
     sight given by the axis parameter.
 
 ``sum``
-    This style is the same as ``integrate``, except that it does not 
+    This method is the same as ``integrate``, except that it does not 
     multiply by a path length when performing the integration, and is just a 
     straight summation of the field along the given axis. The units of the 
     projected field will be the same as those of the unprojected field. This 
-    style is typically only useful for datasets such as 3D FITS cubes where 
+    method is typically only useful for datasets such as 3D FITS cubes where 
     the third axis of the dataset is something like velocity or frequency.
 
 .. _off-axis-projections:

diff -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f -r 0c5846bbb80d640564dbf84af61c2ba33c108a0a setup.py
--- a/setup.py
+++ b/setup.py
@@ -103,7 +103,7 @@
         options = Cython.Compiler.Main.CompilationOptions(
             defaults=Cython.Compiler.Main.default_options,
             include_path=extension.include_dirs,
-            language=extension.language, cplus=cplus,
+            cplus=cplus,
             output_file=target_file)
         cython_result = Cython.Compiler.Main.compile(source,
                                                      options=options)

diff -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f -r 0c5846bbb80d640564dbf84af61c2ba33c108a0a yt/analysis_modules/halo_analysis/halo_callbacks.py
--- a/yt/analysis_modules/halo_analysis/halo_callbacks.py
+++ b/yt/analysis_modules/halo_analysis/halo_callbacks.py
@@ -18,7 +18,7 @@
 import os
 
 from yt.data_objects.profiles import \
-     Profile1D
+     create_profile
 from yt.units.yt_array import \
      YTArray, YTQuantity
 from yt.utilities.exceptions import \
@@ -147,11 +147,11 @@
 
 add_callback("sphere_bulk_velocity", sphere_bulk_velocity)
 
-def profile(halo, x_field, y_fields, x_bins=32, x_range=None, x_log=True,
-            weight_field="cell_mass", accumulation=False, storage="profiles",
-            output_dir="."):
+def profile(halo, bin_fields, profile_fields, n_bins=32, extrema=None, logs=None,
+            weight_field="cell_mass", accumulation=False, fractional=False,
+            storage="profiles", output_dir="."):
     r"""
-    Create 1d profiles.
+    Create 1, 2, or 3D profiles of a halo.
 
     Store profile data in a dictionary associated with the halo object.
 
@@ -159,26 +159,37 @@
     ----------
     halo : Halo object
         The Halo object to be provided by the HaloCatalog.
-    x_field : string
-        The binning field for the profile.
-    y_fields : string or list of strings
+    bin_fields : list of strings
+        The binning fields for the profile.
+    profile_fields : string or list of strings
         The fields to be propython
         filed.
-    x_bins : int
-        The number of bins in the profile.
-        Default: 32
-    x_range : (float, float)
-        The range of the x_field.  If None, the extrema are used.
-        Default: None
-    x_log : bool
-        Flag for logarithmmic binning.
-        Default: True
+    n_bins : int or list of ints
+        The number of bins in each dimension.  If None, 32 bins for
+        each bin are used for each bin field.
+        Default: 32.
+    extrema : dict of min, max tuples
+        Minimum and maximum values of the bin_fields for the profiles.
+        The keys correspond to the field names. Defaults to the extrema
+        of the bin_fields of the dataset. If a units dict is provided, extrema
+        are understood to be in the units specified in the dictionary.
+    logs : dict of boolean values
+        Whether or not to log the bin_fields for the profiles.
+        The keys correspond to the field names. Defaults to the take_log
+        attribute of the field.
     weight_field : string
         Weight field for profiling.
         Default : "cell_mass"
-    accumulation : bool
-        If True, profile data is a cumulative sum.
-        Default : False
+    accumulation : bool or list of bools
+        If True, the profile values for a bin n are the cumulative sum of
+        all the values from bin 0 to n.  If -True, the sum is reversed so
+        that the value for bin n is the cumulative sum from bin N (total bins)
+        to n.  If the profile is 2D or 3D, a list of values can be given to
+        control the summation in each dimension independently.
+        Default: False.
+    fractional : If True the profile values are divided by the sum of all
+        the profile data such that the profile represents a probability
+        distribution function.
     storage : string
         Name of the dictionary to store profiles.
         Default: "profiles"
@@ -209,37 +220,18 @@
         output_dir = storage
     output_dir = os.path.join(halo.halo_catalog.output_dir, output_dir)
     
-    if x_range is None:
-        x_range = list(halo.data_object.quantities.extrema(x_field, non_zero=True))
-
-    my_profile = Profile1D(halo.data_object, x_field, x_bins, 
-                           x_range[0], x_range[1], x_log, 
-                           weight_field=weight_field)
-    my_profile.add_fields(ensure_list(y_fields))
-
-    # temporary fix since profiles do not have units at the moment
-    for field in my_profile.field_data:
-        my_profile.field_data[field] = dds.arr(my_profile[field],
-                                               dds.field_info[field].units)
-
-    # accumulate, if necessary
-    if accumulation:
-        used = my_profile.used
-        for field in my_profile.field_data:
-            if weight_field is None:
-                my_profile.field_data[field][used] = \
-                    np.cumsum(my_profile.field_data[field][used])
-            else:
-                my_weight = my_profile.weight
-                my_profile.field_data[field][used] = \
-                  np.cumsum(my_profile.field_data[field][used] * my_weight[used]) / \
-                  np.cumsum(my_weight[used])
+    bin_fields = ensure_list(bin_fields)
+    my_profile = create_profile(halo.data_object, bin_fields, profile_fields, n_bins=n_bins,
+                                extrema=extrema, logs=logs, weight_field=weight_field,
+                                accumulation=accumulation, fractional=fractional)
                   
-    # create storage dictionary
     prof_store = dict([(field, my_profile[field]) \
                        for field in my_profile.field_data])
     prof_store[my_profile.x_field] = my_profile.x
-
+    if len(bin_fields) > 1:
+        prof_store[my_profile.y_field] = my_profile.y
+    if len(bin_fields) > 2:
+        prof_store[my_profile.z_field] = my_profile.z
     if hasattr(halo, storage):
         halo_store = getattr(halo, storage)
         if "used" in halo_store:
@@ -249,6 +241,17 @@
         setattr(halo, storage, halo_store)
     halo_store.update(prof_store)
 
+    if hasattr(my_profile, "variance"):
+        variance_store = dict([(field, my_profile.variance[field]) \
+                           for field in my_profile.variance])
+        variance_storage = "%s_variance" % storage
+        if hasattr(halo, variance_storage):
+            halo_variance_store = getattr(halo, variance_storage)
+        else:
+            halo_variance_store = {}
+            setattr(halo, variance_storage, halo_variance_store)
+        halo_variance_store.update(variance_store)
+
 add_callback("profile", profile)
 
 @parallel_root_only
@@ -287,18 +290,24 @@
     mylog.info("Saving halo %d profile data to %s." %
                (halo.quantities["particle_identifier"], output_file))
 
-    out_file = h5py.File(output_file, "w")
+    fh = h5py.File(output_file, "w")
     my_profile = getattr(halo, storage)
+    profile_group = fh.create_group("profiles")
     for field in my_profile:
         # Don't write code units because we might not know those later.
         if isinstance(my_profile[field], YTArray):
             my_profile[field].convert_to_cgs()
-        dataset = out_file.create_dataset(str(field), data=my_profile[field])
-        units = ""
-        if isinstance(my_profile[field], YTArray):
-            units = str(my_profile[field].units)
-        dataset.attrs["units"] = units
-    out_file.close()
+        _yt_array_hdf5(profile_group, str(field), my_profile[field])
+    variance_storage = "%s_variance" % storage
+    if hasattr(halo, variance_storage):
+        my_profile = getattr(halo, variance_storage)
+        variance_group = fh.create_group("variance")
+        for field in my_profile:
+            # Don't write code units because we might not know those later.
+            if isinstance(my_profile[field], YTArray):
+                my_profile[field].convert_to_cgs()
+            _yt_array_hdf5(variance_group, str(field), my_profile[field])
+    fh.close()
 
 add_callback("save_profiles", save_profiles)
 
@@ -339,20 +348,33 @@
     mylog.info("Loading halo %d profile data from %s." %
                (halo.quantities["particle_identifier"], output_file))
 
-    out_file = h5py.File(output_file, "r")
+    fh = h5py.File(output_file, "r")
+    if fields is None:
+        profile_fields = fh["profiles"].keys()
+    else:
+        profile_fields = fields
     my_profile = {}
-    if fields is None:
-        fields = out_file.keys()
-    for field in fields:
-        if field not in out_file:
+    my_group = fh["profiles"]
+    for field in profile_fields:
+        if field not in my_group:
             raise RuntimeError("%s field not present in %s." % (field, output_file))
-        units = ""
-        if "units" in out_file[field].attrs:
-            units = out_file[field].attrs["units"]
-        if units == "dimensionless": units = ""
-        my_profile[field] = halo.halo_catalog.halos_ds.arr(out_file[field].value, units)
-    out_file.close()
+        my_profile[field] = _hdf5_yt_array(my_group, field,
+                                           ds=halo.halo_catalog.halos_ds)
     setattr(halo, storage, my_profile)
+    
+    if "variance" in fh:
+        my_variance = {}
+        my_group = fh["variance"]
+        if fields is None:
+            profile_fields = my_group.keys()
+        for field in profile_fields:
+            if field not in my_group:
+                raise RuntimeError("%s field not present in %s." % (field, output_file))
+            my_variance[field] = _hdf5_yt_array(my_group, field,
+                                                ds=halo.halo_catalog.halos_ds)
+        setattr(halo, "%s_variance" % storage, my_variance)
+        
+    fh.close()
 
 add_callback("load_profiles", load_profiles)
 
@@ -382,6 +404,8 @@
                halo.quantities["particle_identifier"])
 
     fields = ensure_list(fields)
+    fields = [halo.halo_catalog.data_source._determine_fields(field)[0]
+              for field in fields]
     
     dds = halo.halo_catalog.data_ds
     profile_data = getattr(halo, profile_storage)
@@ -497,3 +521,74 @@
         delattr(halo, attribute)
 
 add_callback("delete_attribute", delete_attribute)
+
+def iterative_center_of_mass(halo, radius_field="virial_radius", inner_ratio=0.1, step_ratio=0.9,
+                             units="pc"):
+    r"""
+    Adjust halo position by iteratively recalculating the center of mass while 
+    decreasing the radius.
+
+    Parameters
+    ----------
+    halo : Halo object
+        The Halo object to be provided by the HaloCatalog.
+    radius_field : string
+        The halo quantity to be used as the radius for the sphere.
+        Default: "virial_radius"
+    inner_ratio : float
+        The ratio of the smallest sphere radius used for calculating the center of 
+        mass to the initial radius.  The sphere radius is reduced and center of mass 
+        recalculated until the sphere has reached this size.
+        Default: 0.1
+    step_ratio : float
+        The multiplicative factor used to reduce the radius of the sphere after the 
+        center of mass is calculated.
+        Default: 0.9
+    units : str
+        The units for printing out the distance between the initial and final centers.
+        Default : "pc"
+        
+    """
+    if inner_ratio <= 0.0 or inner_ratio >= 1.0:
+        raise RuntimeError("iterative_center_of_mass: inner_ratio must be between 0 and 1.")
+    if step_ratio <= 0.0 or step_ratio >= 1.0:
+        raise RuntimeError("iterative_center_of_mass: step_ratio must be between 0 and 1.")
+
+    center_orig = halo.halo_catalog.data_ds.arr([halo.quantities["particle_position_%s" % axis]
+                                                 for axis in "xyz"])
+    sphere = halo.halo_catalog.data_ds.sphere(center_orig, halo.quantities[radius_field])
+
+    while sphere.radius > inner_ratio * halo.quantities[radius_field]:
+        new_center = sphere.quantities.center_of_mass(use_gas=True, use_particles=True)
+        sphere = sphere.ds.sphere(new_center, step_ratio * sphere.radius)
+
+    distance = periodic_distance(center_orig.in_units("code_length").to_ndarray(),
+                                 new_center.in_units("code_length").to_ndarray())
+    distance = halo.halo_catalog.data_ds.quan(distance, "code_length")
+    mylog.info("Recentering halo %d %f %s away." %
+               (halo.quantities["particle_identifier"],
+                distance.in_units(units), units))
+
+    for i, axis in enumerate("xyz"):
+        halo.quantities["particle_position_%s" % axis] = sphere.center[i]
+    del sphere
+    
+add_callback("iterative_center_of_mass", iterative_center_of_mass)
+
+def _yt_array_hdf5(fh, fieldname, data):
+    dataset = fh.create_dataset(fieldname, data=data)
+    units = ""
+    if isinstance(data, YTArray):
+        units = str(data.units)
+    dataset.attrs["units"] = units
+
+def _hdf5_yt_array(fh, fieldname, ds=None):
+    if ds is None:
+        new_arr = YTArray
+    else:
+        new_arr = ds.arr
+    units = ""
+    if "units" in fh[fieldname].attrs:
+        units = fh[fieldname].attrs["units"]
+    if units == "dimensionless": units = ""
+    return new_arr(fh[fieldname].value, units)

diff -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f -r 0c5846bbb80d640564dbf84af61c2ba33c108a0a yt/analysis_modules/halo_analysis/halo_catalog.py
--- a/yt/analysis_modules/halo_analysis/halo_catalog.py
+++ b/yt/analysis_modules/halo_analysis/halo_catalog.py
@@ -364,7 +364,6 @@
         if self.halos_ds is None:
             # Find the halos and make a dataset of them
             self.halos_ds = self.finder_method(self.data_ds)
-            self.halos_ds.index
             if self.halos_ds is None:
                 mylog.warning('No halos were found for {0}'.format(\
                         self.data_ds.basename))
@@ -373,6 +372,7 @@
                     self.save_catalog()
                     self.halos_ds = None
                 return
+            self.halos_ds.index
 
             # Assign ds and data sources appropriately
             self.data_source = self.halos_ds.all_data()

diff -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f -r 0c5846bbb80d640564dbf84af61c2ba33c108a0a yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -186,11 +186,14 @@
     data_source : `yt.data_objects.api.AMRData`, optional
         If specified, this will be the data source used for selecting
         regions to project.
-    style : string, optional
-        The style of projection to be performed.
+    method : string, optional
+        The method of projection to be performed.
         "integrate" : integration along the axis
         "mip" : maximum intensity projection
         "sum" : same as "integrate", except that we don't multiply by the path length
+    style : string, optional
+        The same as the method keyword.  Deprecated as of version 3.0.2.  
+        Please use method keyword instead.
     field_parameters : dict of items
         Values to be passed as field parameters that can be
         accessed by generated fields.
@@ -208,20 +211,26 @@
     _container_fields = ('px', 'py', 'pdx', 'pdy', 'weight_field')
     def __init__(self, field, axis, weight_field = None,
                  center = None, ds = None, data_source = None,
-                 style = "integrate", field_parameters = None):
+                 style = None, method = "integrate", 
+                 field_parameters = None):
         YTSelectionContainer2D.__init__(self, axis, ds, field_parameters)
-        if style == "sum":
-            self.proj_style = "integrate"
+        # Style is deprecated, but if it is set, then it trumps method
+        # keyword.  TODO: Remove this keyword and this check at some point in 
+        # the future.
+        if style is not None:
+            method = style
+        if method == "sum":
+            self.method = "integrate"
             self._sum_only = True
         else:
-            self.proj_style = style
+            self.method = method
             self._sum_only = False
-        if style == "mip":
+        if self.method == "mip":
             self.func = np.max
-        elif style == "integrate" or style == "sum":
+        elif self.method == "integrate":
             self.func = np.sum # for the future
         else:
-            raise NotImplementedError(style)
+            raise NotImplementedError(self.method)
         self._set_center(center)
         if data_source is None: data_source = self.ds.all_data()
         for k, v in data_source.field_parameters.items():
@@ -260,7 +269,7 @@
                   self.ds.domain_left_edge[xax],
                   self.ds.domain_right_edge[yax])
         return QuadTree(np.array([xd,yd], dtype='int64'), nvals,
-                        bounds, style = self.proj_style)
+                        bounds, method = self.method)
 
     def get_data(self, fields = None):
         fields = fields or []
@@ -282,10 +291,10 @@
                     get_memory_usage()/1024.)
                 self._handle_chunk(chunk, fields, tree)
         # Note that this will briefly double RAM usage
-        if self.proj_style == "mip":
+        if self.method == "mip":
             merge_style = -1
             op = "max"
-        elif self.proj_style == "integrate":
+        elif self.method == "integrate":
             merge_style = 1
             op = "sum"
         else:
@@ -324,7 +333,10 @@
             finfo = self.ds._get_field_info(*field)
             mylog.debug("Setting field %s", field)
             units = finfo.units
-            if self.weight_field is None and not self._sum_only:
+            # add length units to "projected units" if non-weighted 
+            # integral projection
+            if self.weight_field is None and not self._sum_only and \
+               self.method == 'integrate':
                 # See _handle_chunk where we mandate cm
                 if units == '':
                     input_units = "cm"
@@ -336,7 +348,9 @@
             self[field] = YTArray(field_data[fi].ravel(),
                                   input_units=input_units,
                                   registry=self.ds.unit_registry)
-            if self.weight_field is None and not self._sum_only:
+            # convert units if non-weighted integral projection
+            if self.weight_field is None and not self._sum_only and \
+               self.method == 'integrate':
                 u_obj = Unit(units, registry=self.ds.unit_registry)
                 if ((u_obj.is_code_unit or self.ds.no_cgs_equiv_length) and
                     not u_obj.is_dimensionless) and input_units != units:
@@ -355,7 +369,7 @@
         tree.initialize_chunk(i1, i2, ilevel)
 
     def _handle_chunk(self, chunk, fields, tree):
-        if self.proj_style == "mip" or self._sum_only:
+        if self.method == "mip" or self._sum_only:
             dl = 1.0
         else:
             # This gets explicitly converted to cm

diff -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f -r 0c5846bbb80d640564dbf84af61c2ba33c108a0a yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -496,7 +496,8 @@
 
                 # really ugly check to ensure that this field really does exist somewhere,
                 # in some naming convention, before returning it as a possible field type
-                if (ftype,fname) not in self.ds.field_list and \
+                if (ftype,fname) not in self.ds.field_info and \
+                        (ftype,fname) not in self.ds.field_list and \
                         fname not in self.ds.field_list and \
                         (ftype,fname) not in self.ds.derived_field_list and \
                         fname not in self.ds.derived_field_list and \
@@ -798,13 +799,16 @@
     def _get_pw(self, fields, center, width, origin, plot_type):
         from yt.visualization.plot_window import \
             get_window_parameters, PWViewerMPL
-        from yt.visualization.fixed_resolution import FixedResolutionBuffer as frb
+        from yt.visualization.fixed_resolution import \
+            FixedResolutionBuffer as frb
         axis = self.axis
         skip = self._key_fields
         skip += list(set(frb._exclude_fields).difference(set(self._key_fields)))
-        self.fields = ensure_list(fields) + \
-            [k for k in self.field_data if k not in skip]
-        (bounds, center) = get_window_parameters(axis, center, width, self.ds)
+        self.fields = [k for k in self.field_data if k not in skip]
+        if fields is not None:
+            self.fields = ensure_list(fields) + self.fields
+        (bounds, center, display_center) = \
+            get_window_parameters(axis, center, width, self.ds)
         pw = PWViewerMPL(self, bounds, fields=self.fields, origin=origin,
                          frb_generator=frb, plot_type=plot_type)
         pw._setup_plots()
@@ -1188,16 +1192,15 @@
         return self._particle_handler
 
 
-    def volume(self, unit = "unitary"):
+    def volume(self):
         """
-        Return the volume of the data container in units *unit*.
+        Return the volume of the data container.
         This is found by adding up the volume of the cells with centers
         in the container, rather than using the geometric shape of
         the container, so this may vary very slightly
         from what might be expected from the geometric volume.
         """
-        return self.quantities["TotalQuantity"]("CellVolume")[0] * \
-            (self.ds[unit] / self.ds['cm']) ** 3.0
+        return self.quantities.total_quantity(("index", "cell_volume"))
 
 # Many of these items are set up specifically to ensure that
 # we are not breaking old pickle files.  This means we must only call the

diff -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f -r 0c5846bbb80d640564dbf84af61c2ba33c108a0a yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -177,7 +177,7 @@
                 self.ds.domain_left_edge,
                 self.ds.domain_right_edge,
                 over_refine = self._oref)
-            particle_octree.n_ref = nneighbors / 2
+            particle_octree.n_ref = nneighbors
             particle_octree.add(morton)
             particle_octree.finalize()
             pdom_ind = particle_octree.domain_ind(self.selector)

diff -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f -r 0c5846bbb80d640564dbf84af61c2ba33c108a0a yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -955,7 +955,7 @@
     x_min : float
         The minimum value of the x profile field.
     x_max : float
-        The maximum value of hte x profile field.
+        The maximum value of the x profile field.
     x_log : boolean
         Controls whether or not the bins for the x field are evenly
         spaced in linear (False) or log (True) space.

diff -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f -r 0c5846bbb80d640564dbf84af61c2ba33c108a0a yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -45,17 +45,12 @@
     YTArray, \
     YTQuantity
 
-from yt.geometry.cartesian_coordinates import \
-    CartesianCoordinateHandler
-from yt.geometry.polar_coordinates import \
-    PolarCoordinateHandler
-from yt.geometry.cylindrical_coordinates import \
-    CylindricalCoordinateHandler
-from yt.geometry.spherical_coordinates import \
-    SphericalCoordinateHandler
-from yt.geometry.geographic_coordinates import \
-    GeographicCoordinateHandler
-from yt.geometry.spec_cube_coordinates import \
+from yt.geometry.coordinates.api import \
+    CartesianCoordinateHandler, \
+    PolarCoordinateHandler, \
+    CylindricalCoordinateHandler, \
+    SphericalCoordinateHandler, \
+    GeographicCoordinateHandler, \
     SpectralCubeCoordinateHandler
 
 # We want to support the movie format in the future.

diff -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f -r 0c5846bbb80d640564dbf84af61c2ba33c108a0a yt/data_objects/tests/test_projection.py
--- a/yt/data_objects/tests/test_projection.py
+++ b/yt/data_objects/tests/test_projection.py
@@ -54,12 +54,13 @@
                 yield assert_equal, np.unique(proj["py"]), uc[yax]
                 yield assert_equal, np.unique(proj["pdx"]), 1.0/(dims[xax]*2.0)
                 yield assert_equal, np.unique(proj["pdy"]), 1.0/(dims[yax]*2.0)
-                pw = proj.to_pw(fields='density')
-                for p in pw.plots.values():
-                    tmpfd, tmpname = tempfile.mkstemp(suffix='.png')
-                    os.close(tmpfd)
-                    p.save(name=tmpname)
-                    fns.append(tmpname)
+                plots = [proj.to_pw(fields='density'), proj.to_pw()]
+                for pw in plots:
+                    for p in pw.plots.values():
+                        tmpfd, tmpname = tempfile.mkstemp(suffix='.png')
+                        os.close(tmpfd)
+                        p.save(name=tmpname)
+                        fns.append(tmpname)
                 frb = proj.to_frb((1.0, 'unitary'), 64)
                 for proj_field in ['ones', 'density']:
                     fi = ds._get_field_info(proj_field)

diff -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f -r 0c5846bbb80d640564dbf84af61c2ba33c108a0a yt/fields/derived_field.py
--- a/yt/fields/derived_field.py
+++ b/yt/fields/derived_field.py
@@ -195,7 +195,7 @@
 
     def get_label(self, projected=False):
         """
-        Return a data label for the given field, inluding units.
+        Return a data label for the given field, including units.
         """
         name = self.name[1]
         if self.display_name is not None:

diff -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f -r 0c5846bbb80d640564dbf84af61c2ba33c108a0a yt/fields/field_info_container.py
--- a/yt/fields/field_info_container.py
+++ b/yt/fields/field_info_container.py
@@ -314,6 +314,7 @@
                 if type(e) != YTFieldNotFound:
                     mylog.debug("Raises %s during field %s detection.",
                                 str(type(e)), field)
+                self.pop(field)
                 continue
             # This next bit checks that we can't somehow generate everything.
             # We also manually update the 'requested' attribute

diff -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f -r 0c5846bbb80d640564dbf84af61c2ba33c108a0a yt/fields/vector_operations.py
--- a/yt/fields/vector_operations.py
+++ b/yt/fields/vector_operations.py
@@ -43,18 +43,14 @@
                            ftype = "gas", slice_info = None,
                            validators = None, particle_type=False):
 
-    xn, yn, zn = [(ftype, "%s_%s" % (basename, ax)) for ax in 'xyz']
-
-    # Is this safe?
-    if registry.ds.dimensionality < 3:
-        zn = ("index", "zeros")
-    if registry.ds.dimensionality < 2:
-        yn = ("index", "zeros")
+    field_components = [(ftype, "%s_%s" % (basename, ax)) for ax in 'xyz']
 
     def _magnitude(field, data):
-        mag  = data[xn] * data[xn]
-        mag += data[yn] * data[yn]
-        mag += data[zn] * data[zn]
+        fn = field_components[0]
+        mag = data[fn] * data[fn]
+        for idim in range(1, registry.ds.dimensionality):
+            fn = field_components[idim]
+            mag += data[fn] * data[fn]
         return np.sqrt(mag)
 
     registry.add_field((ftype, "%s_magnitude" % basename),
@@ -65,18 +61,14 @@
                          ftype = "gas", slice_info = None,
                          validators = None, particle_type=False):
 
-    xn, yn, zn = [(ftype, "%s_%s" % (basename, ax)) for ax in 'xyz']
-
-    # Is this safe?
-    if registry.ds.dimensionality < 3:
-        zn = ("index", "zeros")
-    if registry.ds.dimensionality < 2:
-        yn = ("index", "zeros")
+    field_components = [(ftype, "%s_%s" % (basename, ax)) for ax in 'xyz']
 
     def _squared(field, data):
-        squared  = data[xn] * data[xn]
-        squared += data[yn] * data[yn]
-        squared += data[zn] * data[zn]
+        fn = field_components[0]
+        squared  = data[fn] * data[fn]
+        for idim in range(1, registry.ds.dimensionality):
+            fn = field_components[idim]
+            squared += data[fn] * data[fn]
         return squared
 
     registry.add_field((ftype, "%s_squared" % basename),

diff -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f -r 0c5846bbb80d640564dbf84af61c2ba33c108a0a yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -638,6 +638,29 @@
         self._read_particles()
         # self.io = IOHandlerOrion
 
+    def _detect_output_fields(self):
+        # This is all done in _parse_header_file
+        self.field_list = [("boxlib", f) for f in
+                           self.dataset._field_list]
+        self.field_indexes = dict((f[1], i)
+                                  for i, f in enumerate(self.field_list))
+        # There are times when field_list may change.  We copy it here to
+        # avoid that possibility.
+        self.field_order = [f for f in self.field_list]
+
+        # look for particle fields
+        self.particle_filename = None
+        for particle_filename in ["StarParticles", "SinkParticles"]:
+            fn = os.path.join(self.ds.output_dir, particle_filename)
+            if os.path.exists(fn):
+                self.particle_filename = fn
+
+        if self.particle_filename is None:
+            return
+
+        pfield_list = [("io", c) for c in self.io.particle_field_index.keys()]
+        self.field_list.extend(pfield_list)
+
     def _read_particles(self):
         """
         reads in particles and assigns them to grids. Will search for
@@ -649,9 +672,8 @@
         """
         self.grid_particle_count = np.zeros(len(self.grids))
 
-        for particle_filename in ["StarParticles", "SinkParticles"]:
-            fn = os.path.join(self.ds.output_dir, particle_filename)
-            if os.path.exists(fn): self._read_particle_file(fn)
+        if self.particle_filename is not None:
+            self._read_particle_file(self.particle_filename)
 
     def _read_particle_file(self, fn):
         """actually reads the orion particle data file itself.
@@ -670,8 +692,8 @@
                 # copied from object_finding_mixin.py
                 mask = np.ones(self.num_grids)
                 for i in xrange(len(coord)):
-                    np.choose(np.greater(self.grid_left_edge[:,i],coord[i]), (mask,0), mask)
-                    np.choose(np.greater(self.grid_right_edge[:,i],coord[i]), (0,mask), mask)
+                    np.choose(np.greater(self.grid_left_edge.d[:,i],coord[i]), (mask,0), mask)
+                    np.choose(np.greater(self.grid_right_edge.d[:,i],coord[i]), (0,mask), mask)
                 ind = np.where(mask == 1)
                 selected_grids = self.grids[ind]
                 # in orion, particles always live on the finest level.
@@ -915,7 +937,7 @@
         if v in ("F", "T"):
             pcast = bool
     else:
-        syms = (".", "D+", "D-", "E+", "E-")
+        syms = (".", "D+", "D-", "E+", "E-", "E", "D")
         if any(sym in v.upper() for sym in syms for v in vals.split()):
             pcast = float
         else:

diff -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f -r 0c5846bbb80d640564dbf84af61c2ba33c108a0a yt/frontends/boxlib/io.py
--- a/yt/frontends/boxlib/io.py
+++ b/yt/frontends/boxlib/io.py
@@ -21,6 +21,7 @@
 from yt.utilities.io_handler import \
            BaseIOHandler
 from yt.funcs import mylog, defaultdict
+from yt.frontends.chombo.io import parse_orion_sinks
 
 class IOHandlerBoxlib(BaseIOHandler):
 
@@ -82,69 +83,59 @@
 class IOHandlerOrion(IOHandlerBoxlib):
     _dataset_type = "orion_native"
 
+    _particle_filename = None
+    @property
+    def particle_filename(self):
+        fn = self.ds.output_dir + "/StarParticles"
+        if not os.path.exists(fn):
+            fn = self.ds.output_dir + "/SinkParticles"
+        self._particle_filename = fn
+        return self._particle_filename
+
+    _particle_field_index = None
+    @property
+    def particle_field_index(self):
+
+        index = parse_orion_sinks(self.particle_filename)
+
+        self._particle_field_index = index
+        return self._particle_field_index
+
+    def _read_particle_selection(self, chunks, selector, fields):
+        rv = {}
+        chunks = list(chunks)
+
+        if selector.__class__.__name__ == "GridSelector":
+
+            if not (len(chunks) == len(chunks[0].objs) == 1):
+                raise RuntimeError
+
+            grid = chunks[0].objs[0]
+
+            for ftype, fname in fields:
+                rv[ftype, fname] = self._read_particles(grid, fname)
+
+            return rv
+
+        rv = {f: np.array([]) for f in fields}
+        for chunk in chunks:
+            for grid in chunk.objs:
+                for ftype, fname in fields:
+                    data = self._read_particles(grid, fname)
+                    rv[ftype, fname] = np.concatenate((data, rv[ftype, fname]))
+        return rv
+
     def _read_particles(self, grid, field): 
         """
         parses the Orion Star Particle text files
-        
+
         """
 
-        fn = grid.ds.fullplotdir + "/StarParticles"
-        if not os.path.exists(fn):
-            fn = grid.ds.fullplotdir + "/SinkParticles"
-
-        # Figure out the format of the particle file
-        with open(fn, 'r') as f:
-            lines = f.readlines()
-        line = lines[1]
-        
-        # The basic fields that all sink particles have
-        index = {'particle_mass': 0,
-                 'particle_position_x': 1,
-                 'particle_position_y': 2,
-                 'particle_position_z': 3,
-                 'particle_momentum_x': 4,
-                 'particle_momentum_y': 5,
-                 'particle_momentum_z': 6,
-                 'particle_angmomen_x': 7,
-                 'particle_angmomen_y': 8,
-                 'particle_angmomen_z': 9,
-                 'particle_mlast': 10,
-                 'particle_mdeut': 11,
-                 'particle_n': 12,
-                 'particle_mdot': 13,
-                 'particle_burnstate': 14,
-                 'particle_id': 15}
-
-        if len(line.strip().split()) == 11:
-            # these are vanilla sinks, do nothing
-            pass  
-
-        elif len(line.strip().split()) == 17:
-            # these are old-style stars, add stellar model parameters
-            index['particle_mlast']     = 10
-            index['particle_r']         = 11
-            index['particle_mdeut']     = 12
-            index['particle_n']         = 13
-            index['particle_mdot']      = 14,
-            index['particle_burnstate'] = 15
-
-        elif len(line.strip().split()) == 18:
-            # these are the newer style, add luminosity as well
-            index['particle_mlast']     = 10
-            index['particle_r']         = 11
-            index['particle_mdeut']     = 12
-            index['particle_n']         = 13
-            index['particle_mdot']      = 14,
-            index['particle_burnstate'] = 15,
-            index['particle_luminosity']= 16
-
-        else:
-            # give a warning if none of the above apply:
-            mylog.warning('Warning - could not figure out particle output file')
-            mylog.warning('These results could be nonsense!')
+        fn = self.particle_filename
 
         def read(line, field):
-            return float(line.strip().split(' ')[index[field]])
+            entry = line.strip().split(' ')[self.particle_field_index[field]]
+            return np.float(entry)
 
         with open(fn, 'r') as f:
             lines = f.readlines()
@@ -154,11 +145,12 @@
                     coord = read(line, "particle_position_x"), \
                             read(line, "particle_position_y"), \
                             read(line, "particle_position_z") 
-                    if ( (grid.LeftEdge < coord).all() and 
+                    if ( (grid.LeftEdge <= coord).all() and 
                          (coord <= grid.RightEdge).all() ):
                         particles.append(read(line, field))
         return np.array(particles)
 
+
 class IOHandlerCastro(IOHandlerBoxlib):
     _dataset_type = "castro_native"
 

diff -r 7e77f0ff4731d6a5f04c9436fbd3503b6e3fad2f -r 0c5846bbb80d640564dbf84af61c2ba33c108a0a yt/frontends/chombo/api.py
--- a/yt/frontends/chombo/api.py
+++ b/yt/frontends/chombo/api.py
@@ -14,14 +14,24 @@
 #-----------------------------------------------------------------------------
 
 from .data_structures import \
-      ChomboGrid, \
-      ChomboHierarchy, \
-      ChomboDataset, \
-      Orion2Hierarchy, \
-      Orion2Dataset
+    ChomboGrid, \
+    ChomboHierarchy, \
+    ChomboDataset, \
+    Orion2Hierarchy, \
+    Orion2Dataset, \
+    ChomboPICHierarchy, \
+    ChomboPICDataset, \
+    PlutoHierarchy, \
+    PlutoDataset
 
 from .fields import \
-      ChomboFieldInfo
+    ChomboFieldInfo, \
+    Orion2FieldInfo, \
+    ChomboPICFieldInfo1D, \
+    ChomboPICFieldInfo2D, \
+    ChomboPICFieldInfo3D, \
+    PlutoFieldInfo
 
 from .io import \
-      IOHandlerChomboHDF5
+    IOHandlerChomboHDF5,\
+    IOHandlerPlutoHDF5

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/a462b61eb3e5/
Changeset:   a462b61eb3e5
Branch:      yt
User:        mzingale
Date:        2014-10-21 23:10:44+00:00
Summary:     merge
Affected #:  46 files

diff -r 0c5846bbb80d640564dbf84af61c2ba33c108a0a -r a462b61eb3e5851484b0d50a81bf11049ca29366 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -987,6 +987,7 @@
 if !( ( ${DEST_DIR}/bin/python2.7 -c "import readline" 2>&1 )>> ${LOG_FILE})
 then
     if !( ( ${DEST_DIR}/bin/python2.7 -c "import gnureadline" 2>&1 )>> ${LOG_FILE})
+    then
         echo "Installing pure-python readline"
         ( ${DEST_DIR}/bin/pip install gnureadline 2>&1 ) 1>> ${LOG_FILE}
     fi

diff -r 0c5846bbb80d640564dbf84af61c2ba33c108a0a -r a462b61eb3e5851484b0d50a81bf11049ca29366 doc/source/analyzing/analysis_modules/halo_transition.rst
--- a/doc/source/analyzing/analysis_modules/halo_transition.rst
+++ b/doc/source/analyzing/analysis_modules/halo_transition.rst
@@ -52,7 +52,7 @@
    data_ds = yt.load('Enzo_64/RD0006/RedshiftOutput0006')
    hc = HaloCatalog(data_ds=data_ds, finder_method='hop')
    hc.create()
-   ad = hc.all_data()
+   ad = hc.halos_ds.all_data()
    masses = ad['particle_mass'][:]
 
 

diff -r 0c5846bbb80d640564dbf84af61c2ba33c108a0a -r a462b61eb3e5851484b0d50a81bf11049ca29366 doc/source/cookbook/free_free_field.py
--- a/doc/source/cookbook/free_free_field.py
+++ b/doc/source/cookbook/free_free_field.py
@@ -6,6 +6,7 @@
 # Need to grab the proton mass from the constants database
 from yt.utilities.physical_constants import mp
 
+exit()
 # Define the emission field
 
 keVtoerg = 1.602e-9  # Convert energy in keV to energy in erg

diff -r 0c5846bbb80d640564dbf84af61c2ba33c108a0a -r a462b61eb3e5851484b0d50a81bf11049ca29366 doc/source/cookbook/gadget_notebook.rst
--- /dev/null
+++ b/doc/source/cookbook/gadget_notebook.rst
@@ -0,0 +1,7 @@
+.. _gadget-notebook:
+
+Using yt to view and analyze Gadget outputs
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+.. notebook:: yt_gadget_analysis.ipynb
+

diff -r 0c5846bbb80d640564dbf84af61c2ba33c108a0a -r a462b61eb3e5851484b0d50a81bf11049ca29366 doc/source/cookbook/hse_field.py
--- a/doc/source/cookbook/hse_field.py
+++ b/doc/source/cookbook/hse_field.py
@@ -1,163 +1,35 @@
-### THIS RECIPE IS CURRENTLY BROKEN IN YT-3.0
-### DO NOT TRUST THIS RECIPE UNTIL THIS LINE IS REMOVED
-
 import numpy as np
 import yt
 
+from yt.fields.field_plugin_registry import \
+    register_field_plugin
+from yt.fields.fluid_fields import \
+    setup_gradient_fields
+
+
 # Define the components of the gravitational acceleration vector field by
 # taking the gradient of the gravitational potential
-
- at yt.derived_field(name='gravitational_acceleration_x',
-                  units='cm/s**2', take_log=False,
-                  validators=[yt.ValidateSpatial(1,["gravitational_potential"])])
-def gravitational_acceleration_x(field, data):
-
-    # We need to set up stencils
-
-    sl_left = slice(None, -2, None)
-    sl_right = slice(2, None, None)
-    div_fac = 2.0
-
-    dx = div_fac * data['dx'][0]
-
-    gx = data["gravitational_potential"][sl_right, 1:-1, 1:-1]/dx
-    gx -= data["gravitational_potential"][sl_left, 1:-1, 1:-1]/dx
-
-    new_field = np.zeros(data["gravitational_potential"].shape,
-                         dtype='float64')*gx.uq
-    new_field[1:-1, 1:-1, 1:-1] = -gx
-
-    return new_field
-
-
- at yt.derived_field(name='gravitational_acceleration_y',
-                  units='cm/s**2', take_log=False,
-                  validators=[yt.ValidateSpatial(1,["gravitational_potential"])])
-def gravitational_acceleration_y(field, data):
-
-    # We need to set up stencils
-
-    sl_left = slice(None, -2, None)
-    sl_right = slice(2, None, None)
-    div_fac = 2.0
-
-    dy = div_fac * data['dy'].flatten()[0]
-
-    gy = data["gravitational_potential"][1:-1, sl_right, 1:-1]/dy
-    gy -= data["gravitational_potential"][1:-1, sl_left, 1:-1]/dy
-
-    new_field = np.zeros(data["gravitational_potential"].shape,
-                         dtype='float64')*gy.uq
-
-    new_field[1:-1, 1:-1, 1:-1] = -gy
-
-    return new_field
-
-
- at yt.derived_field(name='gravitational_acceleration_z',
-                  units='cm/s**2', take_log=False,
-                  validators=[yt.ValidateSpatial(1,["gravitational_potential"])])
-def gravitational_acceleration_z(field, data):
-
-    # We need to set up stencils
-
-    sl_left = slice(None, -2, None)
-    sl_right = slice(2, None, None)
-    div_fac = 2.0
-
-    dz = div_fac * data['dz'].flatten()[0]
-
-    gz = data["gravitational_potential"][1:-1, 1:-1, sl_right]/dz
-    gz -= data["gravitational_potential"][1:-1, 1:-1, sl_left]/dz
-
-    new_field = np.zeros(data["gravitational_potential"].shape,
-                         dtype='float64')*gz.uq
-    new_field[1:-1, 1:-1, 1:-1] = -gz
-
-    return new_field
-
-
-# Define the components of the pressure gradient field
-
-
- at yt.derived_field(name='grad_pressure_x', units='g/(cm*s)**2', take_log=False,
-                  validators=[yt.ValidateSpatial(1,["pressure"])])
-def grad_pressure_x(field, data):
-
-    # We need to set up stencils
-
-    sl_left = slice(None, -2, None)
-    sl_right = slice(2, None, None)
-    div_fac = 2.0
-
-    dx = div_fac * data['dx'].flatten()[0]
-
-    px = data["pressure"][sl_right, 1:-1, 1:-1]/dx
-    px -= data["pressure"][sl_left, 1:-1, 1:-1]/dx
-
-    new_field = np.zeros(data["pressure"].shape, dtype='float64')*px.uq
-    new_field[1:-1, 1:-1, 1:-1] = px
-
-    return new_field
-
-
- at yt.derived_field(name='grad_pressure_y', units='g/(cm*s)**2', take_log=False,
-                  validators=[yt.ValidateSpatial(1,["pressure"])])
-def grad_pressure_y(field, data):
-
-    # We need to set up stencils
-
-    sl_left = slice(None, -2, None)
-    sl_right = slice(2, None, None)
-    div_fac = 2.0
-
-    dy = div_fac * data['dy'].flatten()[0]
-
-    py = data["pressure"][1:-1, sl_right, 1:-1]/dy
-    py -= data["pressure"][1:-1, sl_left, 1:-1]/dy
-
-    new_field = np.zeros(data["pressure"].shape, dtype='float64')*py.uq
-    new_field[1:-1, 1:-1, 1:-1] = py
-
-    return new_field
-
-
- at yt.derived_field(name='grad_pressure_z', units='g/(cm*s)**2', take_log=False,
-                  validators=[yt.ValidateSpatial(1,["pressure"])])
-def grad_pressure_z(field, data):
-
-    # We need to set up stencils
-
-    sl_left = slice(None, -2, None)
-    sl_right = slice(2, None, None)
-    div_fac = 2.0
-
-    dz = div_fac * data['dz'].flatten()[0]
-
-    pz = data["pressure"][1:-1, 1:-1, sl_right]/dz
-    pz -= data["pressure"][1:-1, 1:-1, sl_left]/dz
-
-    new_field = np.zeros(data["pressure"].shape, dtype='float64')*pz.uq
-    new_field[1:-1, 1:-1, 1:-1] = pz
-
-    return new_field
-
+ at register_field_plugin
+def setup_my_fields(registry, ftype="gas", slice_info=None):
+    setup_gradient_fields(registry, (ftype, "gravitational_potential"),
+                          "cm ** 2 / s ** 2", slice_info)
 
 # Define the "degree of hydrostatic equilibrium" field
 
+
 @yt.derived_field(name='HSE', units=None, take_log=False,
                   display_name='Hydrostatic Equilibrium')
 def HSE(field, data):
 
-    gx = data["density"]*data["gravitational_acceleration_x"]
-    gy = data["density"]*data["gravitational_acceleration_y"]
-    gz = data["density"]*data["gravitational_acceleration_z"]
+    gx = data["density"] * data["gravitational_potential_gradient_x"]
+    gy = data["density"] * data["gravitational_potential_gradient_y"]
+    gz = data["density"] * data["gravitational_potential_gradient_z"]
 
-    hx = data["grad_pressure_x"] - gx
-    hy = data["grad_pressure_y"] - gy
-    hz = data["grad_pressure_z"] - gz
+    hx = data["pressure_gradient_x"] - gx
+    hy = data["pressure_gradient_y"] - gy
+    hz = data["pressure_gradient_z"] - gz
 
-    h = np.sqrt((hx*hx+hy*hy+hz*hz)/(gx*gx+gy*gy+gz*gz))
+    h = np.sqrt((hx * hx + hy * hy + hz * hz) / (gx * gx + gy * gy + gz * gz))
 
     return h
 
@@ -166,6 +38,10 @@
 
 ds = yt.load("GasSloshingLowRes/sloshing_low_res_hdf5_plt_cnt_0350")
 
+# gradient operator requires periodic boundaries.  This dataset has
+# open boundary conditions.  We need to hack it for now (this will be fixed
+# in future version of yt)
+ds.periodicity = (True, True, True)
 
 # Take a slice through the center of the domain
 slc = yt.SlicePlot(ds, 2, ["density", "HSE"], width=(1, 'Mpc'))

diff -r 0c5846bbb80d640564dbf84af61c2ba33c108a0a -r a462b61eb3e5851484b0d50a81bf11049ca29366 doc/source/cookbook/tests/test_cookbook.py
--- a/doc/source/cookbook/tests/test_cookbook.py
+++ b/doc/source/cookbook/tests/test_cookbook.py
@@ -11,19 +11,30 @@
 """
 import glob
 import os
-import sys
+import subprocess
 
-sys.path.append(os.path.join(os.getcwd(), "doc/source/cookbook"))
+
+PARALLEL_TEST = {"rockstar_nest.py": "3"}
 
 
 def test_recipe():
     '''Dummy test grabbing all cookbook's recipes'''
     for fname in glob.glob("doc/source/cookbook/*.py"):
-        module_name = os.path.splitext(os.path.basename(fname))[0]
-        yield check_recipe, module_name
+        recipe = os.path.basename(fname)
+        check_recipe.description = "Testing recipe: %s" % recipe
+        if recipe in PARALLEL_TEST:
+            yield check_recipe, \
+                ["mpiexec", "-n", PARALLEL_TEST[recipe], "python", fname]
+        else:
+            yield check_recipe, ["python", fname]
 
 
-def check_recipe(module_name):
+def check_recipe(cmd):
     '''Run single recipe'''
-    __import__(module_name)
-    assert True
+    try:
+        subprocess.check_call(cmd)
+        result = True
+    except subprocess.CalledProcessError, e:
+        print("Stdout output:\n", e.output)
+        result = False
+    assert result

diff -r 0c5846bbb80d640564dbf84af61c2ba33c108a0a -r a462b61eb3e5851484b0d50a81bf11049ca29366 doc/source/cookbook/tipsy_and_yt.ipynb
--- a/doc/source/cookbook/tipsy_and_yt.ipynb
+++ b/doc/source/cookbook/tipsy_and_yt.ipynb
@@ -1,7 +1,16 @@
 {
  "metadata": {
+  "kernelspec": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 2
+   },
+   "display_name": "IPython (Python 2)",
+   "language": "python",
+   "name": "python2"
+  },
   "name": "",
-  "signature": "sha256:2ae8b1599fa35495fa1bb8deb1c67094e3529e70093b30e20354122cd9403d9d"
+  "signature": "sha256:1f6e5cf50123ad75676f035a2a36cd60f4987832462907b9cb78cb25548d8afd"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -10,14 +19,6 @@
    "cells": [
     {
      "cell_type": "heading",
-     "level": 1,
-     "metadata": {},
-     "source": [
-      "Using yt to view and analyze Tipsy outputs from Gasoline"
-     ]
-    },
-    {
-     "cell_type": "heading",
      "level": 2,
      "metadata": {},
      "source": [
@@ -193,4 +194,4 @@
    "metadata": {}
   }
  ]
-}
+}
\ No newline at end of file

diff -r 0c5846bbb80d640564dbf84af61c2ba33c108a0a -r a462b61eb3e5851484b0d50a81bf11049ca29366 doc/source/cookbook/yt_gadget_analysis.ipynb
--- /dev/null
+++ b/doc/source/cookbook/yt_gadget_analysis.ipynb
@@ -0,0 +1,263 @@
+{
+ "metadata": {
+  "kernelspec": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 2
+   },
+   "display_name": "IPython (Python 2)",
+   "language": "python",
+   "name": "python2"
+  },
+  "name": "",
+  "signature": "sha256:42e2b7cc4c70a501432f24bc0d62d0723605d50196399148dd365d28387dd55d"
+ },
+ "nbformat": 3,
+ "nbformat_minor": 0,
+ "worksheets": [
+  {
+   "cells": [
+    {
+     "cell_type": "heading",
+     "level": 2,
+     "metadata": {},
+     "source": [
+      "Loading the data"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "First we set up our imports:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "import yt\n",
+      "import numpy as np\n",
+      "import yt.units as units\n",
+      "import pylab"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "First we load the data set, specifying both the unit length/mass/velocity, as well as the size of the bounding box (which should encapsulate all the particles in the data set)\n",
+      "\n",
+      "At the end, we flatten the data into \"ad\" in case we want access to the raw simulation data"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      ">This dataset is available for download at http://yt-project.org/data/GadgetDiskGalaxy.tar.gz (430 MB)."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "fname = 'GadgetDiskGalaxy/snapshot_200.hdf5'\n",
+      "\n",
+      "unit_base = {'UnitLength_in_cm'         : 3.08568e+21,\n",
+      "             'UnitMass_in_g'            :   1.989e+43,\n",
+      "             'UnitVelocity_in_cm_per_s' :      100000}\n",
+      "\n",
+      "bbox_lim = 1e5 #kpc\n",
+      "\n",
+      "bbox = [[-bbox_lim,bbox_lim],\n",
+      "        [-bbox_lim,bbox_lim],\n",
+      "        [-bbox_lim,bbox_lim]]\n",
+      " \n",
+      "ds = yt.load(fname,unit_base=unit_base,bounding_box=bbox)\n",
+      "ds.index\n",
+      "ad= ds.all_data()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Let's make a projection plot to look at the entire volume"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "px = yt.ProjectionPlot(ds, 'x', ('gas', 'density'))\n",
+      "px.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Let's print some quantities about the domain, as well as the physical properties of the simulation\n"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print 'left edge: ',ds.domain_left_edge\n",
+      "print 'right edge: ',ds.domain_right_edge\n",
+      "print 'center: ',ds.domain_center"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "We can also see the fields that are available to query in the dataset"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "sorted(ds.field_list)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Let's create a data object that represents the full simulation domain, and find the total mass in gas and dark matter particles contained in it:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "ad = ds.all_data()\n",
+      "\n",
+      "# total_mass returns a list, representing the total gas and dark matter + stellar mass, respectively\n",
+      "print [tm.in_units('Msun') for tm in ad.quantities.total_mass()]"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Now let's say we want to zoom in on the box (since clearly the bounding we chose initially is much larger than the volume containing the gas particles!), and center on wherever the highest gas density peak is.  First, let's find this peak:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "density = ad[(\"PartType0\",\"density\")]\n",
+      "wdens = np.where(density == np.max(density))\n",
+      "coordinates = ad[(\"PartType0\",\"Coordinates\")]\n",
+      "center = coordinates[wdens][0]\n",
+      "print 'center = ',center"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Set up the box to zoom into"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "new_box_size = ds.quan(250,'code_length')\n",
+      "\n",
+      "left_edge = center - new_box_size/2\n",
+      "right_edge = center + new_box_size/2\n",
+      "\n",
+      "print new_box_size.in_units('Mpc')\n",
+      "print left_edge.in_units('Mpc')\n",
+      "print right_edge.in_units('Mpc')"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "ad2= ds.region(center=center, left_edge=left_edge, right_edge=right_edge)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Using this new data object, let's confirm that we're only looking at a subset of the domain by first calculating thte total mass in gas and particles contained in the subvolume:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print [tm.in_units('Msun') for tm in ad.quantities.total_mass()]"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "And then by visualizing what the new zoomed region looks like"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "px = yt.ProjectionPlot(ds, 'x', ('gas', 'density'), center=center, width=new_box_size)\n",
+      "px.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Cool - there's a disk galaxy there!"
+     ]
+    }
+   ],
+   "metadata": {}
+  }
+ ]
+}
\ No newline at end of file

diff -r 0c5846bbb80d640564dbf84af61c2ba33c108a0a -r a462b61eb3e5851484b0d50a81bf11049ca29366 doc/source/developing/creating_frontend.rst
--- a/doc/source/developing/creating_frontend.rst
+++ b/doc/source/developing/creating_frontend.rst
@@ -7,14 +7,14 @@
             have a question about making a custom derived quantity, please
             contact the mailing list.
 
-yt is designed to support analysis and visualization of data from multiple
-different simulation codes, although it has so far been most successfully
-applied to Adaptive Mesh Refinement (AMR) data. For a list of codes and the
-level of support they enjoy, see :ref:`code-support`.
+yt is designed to support analysis and visualization of data from
+multiple different simulation codes. For a list of codes and the level
+of support they enjoy, see :ref:`code-support`.
 
-We'd like to support a broad range of codes, both AMR-based and otherwise. To
-add support for a new code, a few things need to be put into place. These
-necessary structures can be classified into a couple categories:
+We'd like to support a broad range of codes, both Adaptive Mesh
+Refinement (AMR)-based and otherwise. To add support for a new code, a
+few things need to be put into place. These necessary structures can
+be classified into a couple categories:
 
  * Data meaning: This is the set of parameters that convert the data into
    physically relevant units; things like spatial and mass conversions, time
@@ -33,73 +33,147 @@
 If you are interested in adding a new code, be sure to drop us a line on
 `yt-dev <http://lists.spacepope.org/listinfo.cgi/yt-dev-spacepope.org>`_!
 
-To get started, make a new directory in ``yt/frontends`` with the name of your
-code -- you can start by copying into it the contents of the ``stream``
-directory, which is a pretty empty format. You'll then have to create a subclass
-of ``Dataset``. This subclass will need to handle conversion between the
-different physical units and the code units; for the most part, the examples of
-``OrionDataset`` and ``EnzoDataset`` should be followed, but
-``ChomboDataset``, as a slightly newer addition, can also be used as an
-instructive example -- be sure to add an ``_is_valid`` classmethod that will
-verify if a filename is valid for that output type, as that is how "load" works.
+To get started, make a new directory in ``yt/frontends`` with the name
+of your code.  Copying the contents of the ``yt/frontends/_skeleton``
+directory will add a lot of boilerplate for the required classes and
+methods that are needed.  In particular, you'll have to create a
+subclass of ``Dataset`` in the data_structures.py file. This subclass
+will need to handle conversion between the different physical units
+and the code units (typically in the ``_set_code_unit_attributes()``
+method), read in metadata describing the overall data on disk (via the
+``_parse_parameter_file()`` method), and provide a ``classmethod``
+called ``_is_valid()`` that lets the ``yt.load`` method help identify an
+input file as belonging to *this* particular ``Dataset`` subclass.
+For the most part, the examples of
+``yt.frontends.boxlib.data_structures.OrionDataset`` and
+``yt.frontends.enzo.data_structures.EnzoDataset`` should be followed,
+but ``yt.frontends.chombo.data_structures.ChomboDataset``, as a
+slightly newer addition, can also be used as an instructive example.
 
-A new set of fields must be added in the file ``fields.py`` in that directory.
-For the most part this means subclassing ``CodeFieldInfoContainer`` and adding
-the necessary fields specific to that code. Here is the Chombo field container:
+A new set of fields must be added in the file ``fields.py`` in your
+new directory.  For the most part this means subclassing 
+``FieldInfoContainer`` and adding the necessary fields specific to
+your code. Here is a snippet from the base BoxLib field container:
 
 .. code-block:: python
 
-    from UniversalFields import *
-    class ChomboFieldContainer(CodeFieldInfoContainer):
-        _shared_state = {}
-        _field_list = {}
-    ChomboFieldInfo = ChomboFieldContainer()
-    add_chombo_field = ChomboFieldInfo.add_field
+    from yt.fields.field_info_container import FieldInfoContainer
+    class BoxlibFieldInfo(FieldInfoContainer):
+        known_other_fields = (
+            ("density", (rho_units, ["density"], None)),
+	    ("eden", (eden_units, ["energy_density"], None)),
+	    ("xmom", (mom_units, ["momentum_x"], None)),
+	    ("ymom", (mom_units, ["momentum_y"], None)),
+	    ("zmom", (mom_units, ["momentum_z"], None)),
+	    ("temperature", ("K", ["temperature"], None)),
+	    ("Temp", ("K", ["temperature"], None)),
+	    ("x_velocity", ("cm/s", ["velocity_x"], None)),
+	    ("y_velocity", ("cm/s", ["velocity_y"], None)),
+	    ("z_velocity", ("cm/s", ["velocity_z"], None)),
+	    ("xvel", ("cm/s", ["velocity_x"], None)),
+	    ("yvel", ("cm/s", ["velocity_y"], None)),
+	    ("zvel", ("cm/s", ["velocity_z"], None)),
+	)
 
-The field container is a shared state object, which is why we explicitly set
-``_shared_state`` equal to a mutable.
+	known_particle_fields = (
+	    ("particle_mass", ("code_mass", [], None)),
+	    ("particle_position_x", ("code_length", [], None)),
+	    ("particle_position_y", ("code_length", [], None)),
+	    ("particle_position_z", ("code_length", [], None)),
+	    ("particle_momentum_x", (mom_units, [], None)),
+	    ("particle_momentum_y", (mom_units, [], None)),
+	    ("particle_momentum_z", (mom_units, [], None)),
+	    ("particle_angmomen_x", ("code_length**2/code_time", [], None)),
+	    ("particle_angmomen_y", ("code_length**2/code_time", [], None)),
+	    ("particle_angmomen_z", ("code_length**2/code_time", [], None)),
+	    ("particle_id", ("", ["particle_index"], None)),
+	    ("particle_mdot", ("code_mass/code_time", [], None)),
+	)
+
+The tuples, ``known_other_fields`` and ``known_particle_fields``
+contain entries, which are tuples of the form ``("name", ("units",
+["fields", "to", "alias"], "display_name"))``.  ``"name"`` is the name
+of a field stored on-disk in the dataset. ``"units"`` corresponds to
+the units of that field.  The list ``["fields", "to", "alias"]``
+allows you to specify additional aliases to this particular field; for
+example, if your on-disk field for the x-direction velocity were
+``"x-direction-velocity"``, maybe you'd prefer to alias to the more
+terse name of ``"xvel"``.  ``"display_name"`` is an optional parameter
+that can be used to specify how you want the field to be displayed on
+a plot; this can be LaTeX code, for example the density field could
+have a display name of ``r"\rho"``.  Omitting the ``"display_name"``
+will result in using a capitalized version of the ``"name"``.
 
 Data Localization Structures
 ----------------------------
 
-As of right now, the "grid patch" mechanism is going to remain in yt, however in
-the future that may change. As such, some other output formats -- like Gadget --
-may be shoe-horned in, slightly.
+These functions and classes let yt know about how the arrangement of
+data on disk corresponds to the physical arrangement of data within
+the simulation.  yt has grid datastructures for handling both
+patch-based and octree-based AMR codes.  The terms 'patch-based'
+and 'octree-based' are used somewhat loosely here.  For example,
+traditionally, the FLASH code used the paramesh AMR library, which is
+based on a tree structure, but the FLASH frontend in yt utilizes yt's
+patch-based datastructures.  It is up to the frontend developer to
+determine which yt datastructures best match the datastructures of
+their simulation code.
 
-Hierarchy
-^^^^^^^^^
+Both approaches -- patch-based and octree-based -- have a concept of a
+*Hierarchy* or *Index* (used somewhat interchangeably in the code) of
+datastructures and something that describes the elements that make up
+the Hierarchy or Index.  For patch-based codes, the Index is a
+collection of ``AMRGridPatch`` objects that describe a block of zones.
+For octree-based codes, the Index contains datastructures that hold
+information about the individual octs, namely an ``OctreeContainer``.
 
-To set up data localization, an ``AMRHierarchy`` subclass must be added in the
-file ``data_structures.py``. The index object must override the following
-methods:
+Hierarchy or Index
+^^^^^^^^^^^^^^^^^^
 
- * ``_detect_fields``: ``self.field_list`` must be populated as a list of
-   strings corresponding to "native" fields in the data files.
- * ``_setup_classes``: it's probably safe to crib this from one of the other
-   ``AMRHierarchy`` subclasses.
- * ``_count_grids``: this must set self.num_grids to be the total number of
-   grids in the simulation.
- * ``_parse_index``: this must fill in ``grid_left_edge``,
+To set up data localization, a ``GridIndex`` subclass for patch-based
+codes or an ``OctreeIndex`` subclass for octree-based codes must be
+added in the file ``data_structures.py``. Examples of these different
+types of ``Index`` can be found in, for example, the
+``yt.frontends.chombo.data_structures.ChomboHierarchy`` for patch-based
+codes and ``yt.frontends.ramses.data_structures.RAMSESIndex`` for
+octree-based codes.  
+
+For the most part, the ``GridIndex`` subclass must override (at a
+minimum) the following methods:
+
+ * ``_detect_output_fields()``: ``self.field_list`` must be populated as a list
+   of strings corresponding to "native" fields in the data files.
+ * ``_count_grids()``: this must set ``self.num_grids`` to be the total number
+   of grids (equivalently ``AMRGridPatch``'es) in the simulation.
+ * ``_parse_index()``: this must fill in ``grid_left_edge``,
    ``grid_right_edge``, ``grid_particle_count``, ``grid_dimensions`` and
-   ``grid_levels`` with the appropriate information. Additionally, ``grids``
-   must be an array of grid objects that already know their IDs.
- * ``_populate_grid_objects``: this initializes the grids by calling
-   ``_prepare_grid`` and ``_setup_dx`` on all of them.  Additionally, it should
-   set up ``Children`` and ``Parent`` lists on each grid object.
- * ``_setup_unknown_fields``: If a field is in the data file that yt doesn't
-   already know, this is where you make a guess at it.
- * ``_setup_derived_fields``: ``self.derived_field_list`` needs to be made a
-   list of strings that correspond to all derived fields valid for this
-   index.
+   ``grid_levels`` with the appropriate information.  Each of these variables 
+   is an array, with an entry for each of the ``self.num_grids`` grids.  
+   Additionally, ``grids``  must be an array of ``AMRGridPatch`` objects that 
+   already know their IDs.
+ * ``_populate_grid_objects()``: this initializes the grids by calling
+   ``_prepare_grid()`` and ``_setup_dx()`` on all of them.  Additionally, it 
+   should set up ``Children`` and ``Parent`` lists on each grid object.
 
-For the most part, the ``ChomboHierarchy`` should be the first place to look for
-hints on how to do this; ``EnzoHierarchy`` is also instructive.
+The ``OctreeIndex`` has somewhat analogous methods, but often with
+different names; both ``OctreeIndex`` and ``GridIndex`` are subclasses
+of the ``Index`` class.  In particular, for the ``OctreeIndex``, the
+method ``_initialize_oct_handler()`` setups up much of the oct
+metadata that is analogous to the grid metadata created in the
+``GridIndex`` methods ``_count_grids()``, ``_parse_index()``, and
+``_populate_grid_objects()``.
 
 Grids
 ^^^^^
 
-A new grid object, subclassing ``AMRGridPatch``, will also have to be added.
-This should go in ``data_structures.py``. For the most part, this may be all
+.. note:: This section only applies to the approach using yt's patch-based
+	  datastructures.  For the octree-based approach, one does not create
+	  a grid object, but rather an ``OctreeSubset``, which has methods
+	  for filling out portions of the octree structure.  Again, see the
+	  code in ``yt.frontends.ramses.data_structures`` for an example of
+	  the octree approach.
+
+A new grid object, subclassing ``AMRGridPatch``, will also have to be added in
+``data_structures.py``. For the most part, this may be all
 that is needed:
 
 .. code-block:: python
@@ -115,32 +189,46 @@
             self.Level = level
 
 
-Even the most complex grid object, ``OrionGrid``, is still relatively simple.
+Even one of the more complex grid objects,
+``yt.frontends.boxlib.BoxlibGrid``, is still relatively simple.
 
 Data Reading Functions
 ----------------------
 
-In ``io.py``, there are a number of IO handlers that handle the mechanisms by
-which data is read off disk.  To implement a new data reader, you must subclass
-``BaseIOHandler`` and override the following methods:
+In ``io.py``, there are a number of IO handlers that handle the
+mechanisms by which data is read off disk.  To implement a new data
+reader, you must subclass ``BaseIOHandler``.  The various frontend IO
+handlers are stored in an IO registry - essentially a dictionary that
+uses the name of the frontend as a key, and the specific IO handler as
+a value.  It is important, therefore, to set the ``dataset_type``
+attribute of your subclass, which is what is used as the key in the IO
+registry.  For example:
 
- * ``_read_field_names``: this routine accepts a grid object and must return all
-   the fields in the data file affiliated with that grid. It is used at the
-   initialization of the ``AMRHierarchy`` but likely not later.
- * ``modify``: This accepts a field from a data file and returns it ready to be
-   used by yt. This is used in Enzo data for preloading.
- * ``_read_data_set``: This accepts a grid object and a field name and must
-   return that field, ready to be used by yt as a NumPy array. Note that this
-   presupposes that any actions done in ``modify`` (above) have been executed.
- * ``_read_data_slice``: This accepts a grid object, a field name, an axis and
-   an (integer) coordinate, and it must return a slice through the array at that
-   value.
- * ``preload``: (optional) This accepts a list of grids and a list of datasets
-   and it populates ``self.queue`` (a dict keyed by grid id) with dicts of
-   datasets.
- * ``_read_exception``: (property) This is a tuple of exceptions that can be
-   raised by the data reading to indicate a field does not exist in the file.
+.. code-block:: python
 
+    class IOHandlerBoxlib(BaseIOHandler):
+        _dataset_type = "boxlib_native"
+	...
+
+At a minimum, one should also override the following methods
+
+* ``_read_fluid_selection()``: this receives a collection of data "chunks", a 
+  selector describing which "chunks" you are concerned with, a list of fields,
+  and the size of the data to read.  It should create and return a dictionary 
+  whose keys are the fields, and whose values are numpy arrays containing the 
+  data.  The data should actually be read via the ``_read_chunk_data()`` 
+  method.
+* ``_read_chunk_data()``: this method receives a "chunk" of data along with a 
+  list of fields we want to read.  It loops over all the grid objects within 
+  the "chunk" of data and reads from disk the specific fields, returning a 
+  dictionary whose keys are the fields and whose values are numpy arrays of
+  the data.
+
+If your dataset has particle information, you'll want to override the
+``_read_particle_coords()`` and ``read_particle_fields()`` methods as
+well.  Each code is going to read data from disk in a different
+fashion, but the ``yt.frontends.boxlib.io.IOHandlerBoxlib`` is a
+decent place to start.
 
 And that just about covers it. Please feel free to email
 `yt-users <http://lists.spacepope.org/listinfo.cgi/yt-users-spacepope.org>`_ or

diff -r 0c5846bbb80d640564dbf84af61c2ba33c108a0a -r a462b61eb3e5851484b0d50a81bf11049ca29366 doc/source/developing/developing.rst
--- a/doc/source/developing/developing.rst
+++ b/doc/source/developing/developing.rst
@@ -302,7 +302,7 @@
 
    .. code-block:: bash
 
-      $ hg pull
+      $ hg pull upstream
       $ hg update -r "remote(tip,'upstream')"
 
 After the above steps, your local repository should be at the tip of
@@ -312,13 +312,13 @@
   [alias]
   myupdate = update -r "remote(tip,'upstream')"
 
-And then you can just issue ``hg myupdate`` to get at the tip of the yt
-branch of the main yt repository.
+And then you can just issue ``hg myupdate`` to get at the tip of the main yt repository.
 
-You can then make changes and ``hg commit`` them.  If you prefer
-working with `bookmarks <http://mercurial.selenic.com/wiki/Bookmarks>`_, you may
-want to make a bookmark before committing your changes, such as
-``hg bookmark mybookmark``.
+Make sure you are on the branch you want to be on, and then you can
+make changes and ``hg commit`` them.  If you prefer working with
+`bookmarks <http://mercurial.selenic.com/wiki/Bookmarks>`_, you may
+want to make a bookmark before committing your changes, such as ``hg
+bookmark mybookmark``.
 
 To push to your fork on BitBucket if you didn't use a bookmark, you issue the following:
 

diff -r 0c5846bbb80d640564dbf84af61c2ba33c108a0a -r a462b61eb3e5851484b0d50a81bf11049ca29366 doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -983,6 +983,8 @@
 onto the grid, you can also effectively mimic what your data would look like at
 lower resolution.
 
+See :ref:`gadget-notebook` for an example.
+
 .. _loading-tipsy-data:
 
 Tipsy Data

diff -r 0c5846bbb80d640564dbf84af61c2ba33c108a0a -r a462b61eb3e5851484b0d50a81bf11049ca29366 doc/source/visualizing/plots.rst
--- a/doc/source/visualizing/plots.rst
+++ b/doc/source/visualizing/plots.rst
@@ -469,6 +469,21 @@
    slc.set_log('density', False)
    slc.save()
 
+Specifically, a field containing both positive and negative values can be plotted
+with symlog scale, by seting the boolean to be ``True`` and providing an extra
+parameter ``linthresh``. In the region around zero (when the log scale approaches
+to infinity), the linear scale will be applied to the region ``(-linthresh, linthresh)``
+and stretched relative to the logarithmic range. You can also plot a positive field 
+under symlog scale with the linear range of ``(0, linthresh)``.
+
+.. python-script::
+
+   import yt
+   ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
+   slc = yt.SlicePlot(ds, 'z', 'x-velocity', width=(30,'kpc'))
+   slc.set_log('x-velocity', True, linthresh=1.e1)
+   slc.save()
+
 Lastly, the :meth:`~yt.visualization.plot_window.AxisAlignedSlicePlot.set_zlim`
 function makes it possible to set a custom colormap range.
 
@@ -531,6 +546,26 @@
    slc.set_buff_size(1600)
    slc.save()
 
+Turning off minorticks
+~~~~~~~~~~~~~~~~~~~~~~
+
+By default minorticks for the x and y axes are turned on.
+The minorticks may be removed using the
+:meth:`~yt.visualization.plot_window.AxisAlignedSlicePlot.set_minorticks`
+function, which either accepts a specific field name including the 'all' alias
+and the desired state for the plot as 'on' or 'off'. There is also an analogous
+:meth:`~yt.visualization.plot_window.AxisAlignedSlicePlot.set_cbar_minorticks`
+function for the colorbar axis.
+
+.. python-script::
+
+   import yt
+   ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
+   slc = yt.SlicePlot(ds, 'z', 'density', width=(10,'kpc'))
+   slc.set_minorticks('all', 'off')
+   slc.set_cbar_minorticks('all', 'off')
+   slc.save()
+
 .. _matplotlib-customization:
 
 Further customization via matplotlib
@@ -743,7 +778,7 @@
 Adjusting the plot units does not require recreating the histogram, so adjusting
 units will always be inexpensive, requiring only an in-place unit conversion.
 
-In the following example we create a a plot of the average density in solar
+In the following example we create a plot of the average density in solar
 masses per cubic parsec as a function of radius in kiloparsecs.
 
 .. python-script::
@@ -892,7 +927,7 @@
 ``fractional`` keyword to ``True``.  When set to ``True``, the value in each bin
 is divided by the sum total from all bins.  These can be turned into cumulative
 distribution functions (CDFs) by setting the ``accumulation`` keyword to
-``True``.  This will make is so that the value in any bin N is the cumulative
+``True``.  This will make it so that the value in any bin N is the cumulative
 sum of all bins from 0 to N.  The direction of the summation can be reversed by
 setting ``accumulation`` to ``-True``.  For ``PhasePlot``, the accumulation can
 be set independently for each axis by setting ``accumulation`` to a list of

diff -r 0c5846bbb80d640564dbf84af61c2ba33c108a0a -r a462b61eb3e5851484b0d50a81bf11049ca29366 setup.py
--- a/setup.py
+++ b/setup.py
@@ -6,6 +6,12 @@
 import subprocess
 import shutil
 import glob
+
+if sys.version_info < (2, 7):
+    print("yt currently requires Python version 2.7")
+    print("certain features may fail unexpectedly and silently with older versions.")
+    sys.exit(1)
+
 import setuptools
 from distutils.version import StrictVersion
 if StrictVersion(setuptools.__version__) < StrictVersion('0.7.0'):

diff -r 0c5846bbb80d640564dbf84af61c2ba33c108a0a -r a462b61eb3e5851484b0d50a81bf11049ca29366 yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
+++ b/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
@@ -186,13 +186,13 @@
             # Simple error check to make sure more than 100% of box depth
             # is never required.
             if self.light_cone_solution[q]["box_depth_fraction"] > 1.0:
-                mylog.debug(("Warning: box fraction required to go from " +
+                mylog.error(("Warning: box fraction required to go from " +
                              "z = %f to %f is %f") %
                             (self.light_cone_solution[q]["redshift"], z_next,
                              self.light_cone_solution[q]["box_depth_fraction"]))
-                mylog.debug(("Full box delta z is %f, but it is %f to the " +
+                mylog.error(("Full box delta z is %f, but it is %f to the " +
                              "next data dump.") %
-                            (self.light_cone_solution[q]["deltazMax"],
+                            (self.light_cone_solution[q]["dz_max"],
                              self.light_cone_solution[q]["redshift"]-z_next))
 
             # Get projection axis and center.

diff -r 0c5846bbb80d640564dbf84af61c2ba33c108a0a -r a462b61eb3e5851484b0d50a81bf11049ca29366 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -169,7 +169,7 @@
                                 (self.light_ray_solution[q]['redshift'], z_next,
                                  self.light_ray_solution[q]['traversal_box_fraction']))
                     mylog.error("Full box delta z is %f, but it is %f to the next data dump." %
-                                (self.light_ray_solution[q]['deltazMax'],
+                                (self.light_ray_solution[q]['dz_max'],
                                  self.light_ray_solution[q]['redshift']-z_next))
 
                 # Get dataset axis and center.

diff -r 0c5846bbb80d640564dbf84af61c2ba33c108a0a -r a462b61eb3e5851484b0d50a81bf11049ca29366 yt/analysis_modules/photon_simulator/photon_simulator.py
--- a/yt/analysis_modules/photon_simulator/photon_simulator.py
+++ b/yt/analysis_modules/photon_simulator/photon_simulator.py
@@ -729,6 +729,7 @@
         events[tblhdu.header["CHANTYPE"]] = dchannel.astype(int)
 
         info = {"ChannelType" : tblhdu.header["CHANTYPE"],
+                "Mission" : tblhdu.header["MISSION"],
                 "Telescope" : tblhdu.header["TELESCOP"],
                 "Instrument" : tblhdu.header["INSTRUME"]}
         
@@ -789,6 +790,8 @@
             parameters["ARF"] = f["/arf"].value
         if "channel_type" in f:
             parameters["ChannelType"] = f["/channel_type"].value
+        if "mission" in f:
+            parameters["Mission"] = f["/mission"].value
         if "telescope" in f:
             parameters["Telescope"] = f["/telescope"].value
         if "instrument" in f:
@@ -831,6 +834,8 @@
             parameters["ARF"] = tblhdu["ARF"]
         if "CHANTYPE" in tblhdu.header:
             parameters["ChannelType"] = tblhdu["CHANTYPE"]
+        if "MISSION" in tblhdu.header:
+            parameters["Mission"] = tblhdu["MISSION"]
         if "TELESCOP" in tblhdu.header:
             parameters["Telescope"] = tblhdu["TELESCOP"]
         if "INSTRUME" in tblhdu.header:
@@ -920,11 +925,13 @@
         tbhdu.header["RADECSYS"] = "FK5"
         tbhdu.header["EQUINOX"] = 2000.0
         if "RMF" in self.parameters:
-            tbhdu.header["RMF"] = self.parameters["RMF"]
+            tbhdu.header["RESPFILE"] = self.parameters["RMF"]
         if "ARF" in self.parameters:
-            tbhdu.header["ARF"] = self.parameters["ARF"]
+            tbhdu.header["ANCRFILE"] = self.parameters["ARF"]
         if "ChannelType" in self.parameters:
             tbhdu.header["CHANTYPE"] = self.parameters["ChannelType"]
+        if "Mission" in self.parameters:
+            tbhdu.header["MISSION"] = self.parameters["Mission"]
         if "Telescope" in self.parameters:
             tbhdu.header["TELESCOP"] = self.parameters["Telescope"]
         if "Instrument" in self.parameters:
@@ -1041,6 +1048,8 @@
             f.create_dataset("/rmf", data=self.parameters["RMF"])
         if "ChannelType" in self.parameters:
             f.create_dataset("/channel_type", data=self.parameters["ChannelType"])
+        if "Mission" in self.parameters:
+            f.create_dataset("/mission", data=self.parameters["Mission"]) 
         if "Telescope" in self.parameters:
             f.create_dataset("/telescope", data=self.parameters["Telescope"])
         if "Instrument" in self.parameters:
@@ -1209,6 +1218,10 @@
                 tbhdu.header["ANCRFILE"] = self.parameters["ARF"]
             else:        
                 tbhdu.header["ANCRFILE"] = "none"
+            if self.parameters.has_key("Mission"):
+                tbhdu.header["MISSION"] = self.parameters["Mission"]
+            else:
+                tbhdu.header["MISSION"] = "none"
             if self.parameters.has_key("Telescope"):
                 tbhdu.header["TELESCOP"] = self.parameters["Telescope"]
             else:

diff -r 0c5846bbb80d640564dbf84af61c2ba33c108a0a -r a462b61eb3e5851484b0d50a81bf11049ca29366 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -807,6 +807,8 @@
         self.fields = [k for k in self.field_data if k not in skip]
         if fields is not None:
             self.fields = ensure_list(fields) + self.fields
+        if len(self.fields) == 0:
+            raise ValueError("No fields found to plot in get_pw")
         (bounds, center, display_center) = \
             get_window_parameters(axis, center, width, self.ds)
         pw = PWViewerMPL(self, bounds, fields=self.fields, origin=origin,

diff -r 0c5846bbb80d640564dbf84af61c2ba33c108a0a -r a462b61eb3e5851484b0d50a81bf11049ca29366 yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -333,61 +333,6 @@
     def normal(self):
         return self._norm_vec
 
-    def to_frb(self, width, resolution, height=None,
-               periodic=False):
-        r"""This function returns an ObliqueFixedResolutionBuffer generated
-        from this object.
-
-        An ObliqueFixedResolutionBuffer is an object that accepts a
-        variable-resolution 2D object and transforms it into an NxM bitmap that
-        can be plotted, examined or processed.  This is a convenience function
-        to return an FRB directly from an existing 2D data object.  Unlike the
-        corresponding to_frb function for other YTSelectionContainer2D objects, 
-        this does not accept a 'center' parameter as it is assumed to be 
-        centered at the center of the cutting plane.
-
-        Parameters
-        ----------
-        width : width specifier
-            This can either be a floating point value, in the native domain
-            units of the simulation, or a tuple of the (value, unit) style.
-            This will be the width of the FRB.
-        height : height specifier, optional
-            This will be the height of the FRB, by default it is equal to width.
-        resolution : int or tuple of ints
-            The number of pixels on a side of the final FRB.
-
-        Returns
-        -------
-        frb : :class:`~yt.visualization.fixed_resolution.ObliqueFixedResolutionBuffer`
-            A fixed resolution buffer, which can be queried for fields.
-
-        Examples
-        --------
-
-        >>> v, c = ds.find_max("density")
-        >>> sp = ds.sphere(c, (100.0, 'au'))
-        >>> L = sp.quantities.angular_momentum_vector()
-        >>> cutting = ds.cutting(L, c)
-        >>> frb = cutting.to_frb( (1.0, 'pc'), 1024)
-        >>> write_image(np.log10(frb["Density"]), 'density_1pc.png')
-        """
-        if iterable(width):
-            w, u = width
-            width = self.ds.quan(w, input_units = u)
-        if height is None:
-            height = width
-        elif iterable(height):
-            h, u = height
-            height = self.ds.quan(w, input_units = u)
-        if not iterable(resolution):
-            resolution = (resolution, resolution)
-        from yt.visualization.fixed_resolution import ObliqueFixedResolutionBuffer
-        bounds = (-width/2.0, width/2.0, -height/2.0, height/2.0)
-        frb = ObliqueFixedResolutionBuffer(self, bounds, resolution,
-                    periodic = periodic)
-        return frb
-
     def _generate_container_field(self, field):
         if self._current_chunk is None:
             self.index._identify_base_chunk(self)
@@ -455,7 +400,7 @@
         pw._setup_plots()
         return pw
 
-    def to_frb(self, width, resolution, height=None):
+    def to_frb(self, width, resolution, height=None, periodic=False):
         r"""This function returns an ObliqueFixedResolutionBuffer generated
         from this object.
 
@@ -477,6 +422,9 @@
             This will be the height of the FRB, by default it is equal to width.
         resolution : int or tuple of ints
             The number of pixels on a side of the final FRB.
+        periodic : boolean
+            This can be true or false, and governs whether the pixelization
+            will span the domain boundaries.
 
         Returns
         -------
@@ -505,7 +453,8 @@
             resolution = (resolution, resolution)
         from yt.visualization.fixed_resolution import ObliqueFixedResolutionBuffer
         bounds = (-width/2.0, width/2.0, -height/2.0, height/2.0)
-        frb = ObliqueFixedResolutionBuffer(self, bounds, resolution)
+        frb = ObliqueFixedResolutionBuffer(self, bounds, resolution,
+                                           periodic=periodic)
         return frb
 
 class YTDiskBase(YTSelectionContainer3D):
@@ -593,7 +542,7 @@
     """
     _type_name = "data_collection"
     _con_args = ("_obj_list",)
-    def __init__(self, center, obj_list, ds = None, field_parameters = None):
+    def __init__(self, obj_list, ds=None, field_parameters=None, center=None):
         YTSelectionContainer3D.__init__(self, center, ds, field_parameters)
         self._obj_ids = np.array([o.id - o._id_offset for o in obj_list],
                                 dtype="int64")

diff -r 0c5846bbb80d640564dbf84af61c2ba33c108a0a -r a462b61eb3e5851484b0d50a81bf11049ca29366 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -328,10 +328,10 @@
             mylog.debug("Creating Particle Union 'all'")
             pu = ParticleUnion("all", list(self.particle_types_raw))
             self.add_particle_union(pu)
+        mylog.info("Loading field plugins.")
+        self.field_info.load_all_plugins()
         deps, unloaded = self.field_info.check_derived_fields()
         self.field_dependencies.update(deps)
-        mylog.info("Loading field plugins.")
-        self.field_info.load_all_plugins()
 
     def setup_deprecated_fields(self):
         from yt.fields.field_aliases import _field_name_aliases

diff -r 0c5846bbb80d640564dbf84af61c2ba33c108a0a -r a462b61eb3e5851484b0d50a81bf11049ca29366 yt/data_objects/tests/test_data_collection.py
--- a/yt/data_objects/tests/test_data_collection.py
+++ b/yt/data_objects/tests/test_data_collection.py
@@ -8,7 +8,7 @@
     # We decompose in different ways
     for nprocs in [1, 2, 4, 8]:
         ds = fake_random_ds(16, nprocs = nprocs)
-        coll = ds.data_collection(ds.domain_center, ds.index.grids)
+        coll = ds.data_collection(ds.index.grids)
         crho = coll["density"].sum(dtype="float64").to_ndarray()
         grho = np.sum([g["density"].sum(dtype="float64") for g in ds.index.grids],
                       dtype="float64")
@@ -16,7 +16,7 @@
         yield assert_equal, coll.size, ds.domain_dimensions.prod()
         for gi in range(ds.index.num_grids):
             grids = ds.index.grids[:gi+1]
-            coll = ds.data_collection(ds.domain_center, grids)
+            coll = ds.data_collection(grids)
             crho = coll["density"].sum(dtype="float64")
             grho = np.sum([g["density"].sum(dtype="float64") for g in grids],
                           dtype="float64")

diff -r 0c5846bbb80d640564dbf84af61c2ba33c108a0a -r a462b61eb3e5851484b0d50a81bf11049ca29366 yt/fields/species_fields.py
--- a/yt/fields/species_fields.py
+++ b/yt/fields/species_fields.py
@@ -178,3 +178,4 @@
             # Skip it
             continue
         func(registry, ftype, species, particle_type)
+    add_nuclei_density_fields(registry, ftype, particle_type=particle_type)

diff -r 0c5846bbb80d640564dbf84af61c2ba33c108a0a -r a462b61eb3e5851484b0d50a81bf11049ca29366 yt/frontends/_skeleton/api.py
--- a/yt/frontends/_skeleton/api.py
+++ b/yt/frontends/_skeleton/api.py
@@ -19,8 +19,7 @@
       SkeletonDataset
 
 from .fields import \
-      SkeletonFieldInfo, \
-      add_skeleton_field
+      SkeletonFieldInfo
 
 from .io import \
       IOHandlerSkeleton

diff -r 0c5846bbb80d640564dbf84af61c2ba33c108a0a -r a462b61eb3e5851484b0d50a81bf11049ca29366 yt/frontends/_skeleton/data_structures.py
--- a/yt/frontends/_skeleton/data_structures.py
+++ b/yt/frontends/_skeleton/data_structures.py
@@ -13,18 +13,13 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import numpy as np
-
-from yt.data_objects.grid_patch import \
-    AMRGridPatch
 from yt.data_objects.grid_patch import \
     AMRGridPatch
 from yt.geometry.grid_geometry_handler import \
     GridIndex
 from yt.data_objects.static_output import \
     Dataset
-from yt.utilities.lib.misc_utilities import \
-    get_box_grids_level
+from .fields import SkeletonFieldInfo
 
 class SkeletonGrid(AMRGridPatch):
     _id_offset = 0
@@ -41,20 +36,15 @@
     def __repr__(self):
         return "SkeletonGrid_%04i (%s)" % (self.id, self.ActiveDimensions)
 
-class SkeletonHierarchy(AMRHierarchy):
-
+class SkeletonHierarchy(GridIndex):
     grid = SkeletonGrid
     
     def __init__(self, ds, dataset_type='skeleton'):
         self.dataset_type = dataset_type
-        self.dataset = weakref.proxy(ds)
         # for now, the index file is the dataset!
         self.index_filename = self.dataset.parameter_filename
         self.directory = os.path.dirname(self.index_filename)
-        AMRHierarchy.__init__(self, ds, dataset_type)
-
-    def _initialize_data_storage(self):
-        pass
+        GridIndex.__init__(self, ds, dataset_type)
 
     def _detect_output_fields(self):
         # This needs to set a self.field_list that contains all the available,

diff -r 0c5846bbb80d640564dbf84af61c2ba33c108a0a -r a462b61eb3e5851484b0d50a81bf11049ca29366 yt/frontends/_skeleton/io.py
--- a/yt/frontends/_skeleton/io.py
+++ b/yt/frontends/_skeleton/io.py
@@ -13,9 +13,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import numpy as np
-import h5py
-
 from yt.utilities.io_handler import \
     BaseIOHandler
 

diff -r 0c5846bbb80d640564dbf84af61c2ba33c108a0a -r a462b61eb3e5851484b0d50a81bf11049ca29366 yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -683,7 +683,7 @@
         with open(fn, 'r') as f:
             lines = f.readlines()
             self.num_stars = int(lines[0].strip()[0])
-            for line in lines[1:]:
+            for num, line in enumerate(lines[1:]):
                 particle_position_x = float(line.split(' ')[1])
                 particle_position_y = float(line.split(' ')[2])
                 particle_position_z = float(line.split(' ')[3])
@@ -704,6 +704,12 @@
                     ind = np.where(self.grids == grid)[0][0]
                     self.grid_particle_count[ind] += 1
                     self.grids[ind].NumberOfParticles += 1
+
+                    # store the position in the particle file for fast access.
+                    try:
+                        self.grids[ind]._particle_line_numbers.append(num + 1)
+                    except AttributeError:
+                        self.grids[ind]._particle_line_numbers = [num + 1]
         return True
 
 

diff -r 0c5846bbb80d640564dbf84af61c2ba33c108a0a -r a462b61eb3e5851484b0d50a81bf11049ca29366 yt/frontends/boxlib/fields.py
--- a/yt/frontends/boxlib/fields.py
+++ b/yt/frontends/boxlib/fields.py
@@ -15,6 +15,7 @@
 
 import numpy as np
 import string
+import re
 
 from yt.utilities.physical_constants import \
     mh, boltzmann_constant_cgs, amu_cgs
@@ -25,6 +26,9 @@
 mom_units = "code_mass / (code_time * code_length**2)"
 eden_units = "code_mass / (code_time**2 * code_length)" # erg / cm^3
 
+spec_finder = re.compile(r'.*\((\D*)(\d*)\).*')
+
+
 def _thermal_energy_density(field, data):
     # What we've got here is UEINT:
     # u here is velocity
@@ -35,18 +39,21 @@
                + data["momentum_z"]**2) / data["density"]
     return data["eden"] - ke
 
+
 def _thermal_energy(field, data):
     # This is little e, so we take thermal_energy_density and divide by density
     return data["thermal_energy_density"] / data["density"]
 
-def _temperature(field,data):
+
+def _temperature(field, data):
     mu = data.ds.parameters["mu"]
     gamma = data.ds.parameters["gamma"]
-    tr  = data["thermal_energy_density"] / data["density"]
+    tr = data["thermal_energy_density"] / data["density"]
     tr *= mu * amu_cgs / boltzmann_constant_cgs
     tr *= (gamma - 1.0)
     return tr
 
+
 class BoxlibFieldInfo(FieldInfoContainer):
     known_other_fields = (
         ("density", (rho_units, ["density"], None)),
@@ -91,11 +98,11 @@
         if any(f[1] == "xmom" for f in self.field_list):
             self.setup_momentum_to_velocity()
         self.add_field(("gas", "thermal_energy"),
-                       function = _thermal_energy,
-                       units = "erg/g")
+                       function=_thermal_energy,
+                       units="erg/g")
         self.add_field(("gas", "thermal_energy_density"),
-                       function = _thermal_energy_density,
-                       units = "erg/cm**3")
+                       function=_thermal_energy_density,
+                       units="erg/cm**3")
         if ("gas", "temperature") not in self.field_aliases:
             self.add_field(("gas", "temperature"),
                            function=_temperature,
@@ -107,8 +114,9 @@
                 return data["%smom" % axis]/data["density"]
         for ax in 'xyz':
             self.add_field(("gas", "velocity_%s" % ax),
-                           function = _get_vel(ax),
-                           units = "cm/s")
+                           function=_get_vel(ax),
+                           units="cm/s")
+
 
 class CastroFieldInfo(FieldInfoContainer):
 
@@ -125,22 +133,27 @@
         # internal energy density (not just thermal)
         ("rho_e", ("erg/cm**3", [], r"\rho e")),
         ("Temp", ("K", ["temperature"], r"T")),
-        ("grav_x", ("cm/s**2", [], r"g\cdot e_x")),
-        ("grav_y", ("cm/s**2", [], r"g\cdot e_y")),
-        ("grav_z", ("cm/s**2", [], r"g\cdot e_z")),
+        ("grav_x", ("cm/s**2", [],
+                    r"\left(\mathbf{g} \cdot \mathbf{e}\right)_x")),
+        ("grav_y", ("cm/s**2", [],
+                    r"\left(\mathbf{g} \cdot \mathbf{e}\right)_y")),
+        ("grav_z", ("cm/s**2", [],
+                    r"\left(\mathbf{g} \cdot \mathbf{e}\right)_z")),
         ("pressure", ("dyne/cm**2", [], r"p")),
-        ("kineng", ("erg/cm**3", [], r"\frac{1}{2}\rho|U|**2")),
-        ("soundspeed", ("cm/s", ["sound_speed"], None)),
-        ("Machnumber", ("", ["mach_number"], None)),
+        ("kineng", ("erg/cm**3", [], r"\frac{1}{2}\rho|\mathbf{U}|**2")),
+        ("soundspeed", ("cm/s", ["sound_speed"], "Sound Speed")),
+        ("Machnumber", ("", ["mach_number"], "Mach Number")),
         ("entropy", ("erg/(g*K)", ["entropy"], r"s")),
-        ("magvort", ("1/s", ["vorticity_magnitude"], r"|\nabla \times U|")),
-        ("divu", ("1/s", [], r"\nabla \cdot U")),
+        ("magvort", ("1/s", ["vorticity_magnitude"],
+                     r"|\nabla \times \mathbf{U}|")),
+        ("divu", ("1/s", [], r"\nabla \cdot \mathbf{U}")),
         ("eint_E", ("erg/g", [], r"e(E,U)")),
         ("eint_e", ("erg/g", [], r"e")),
-        ("magvel", ("cm/s", ["velocity_magnitude"], r"|U|")),
-        ("radvel", ("cm/s", [], r"U\cdot e_r")),
-        ("magmom", ("g*cm/s", ["momentum_magnitude"], r"|\rho U|")),
-        ("maggrav", ("cm/s**2", [], r"|g|")),
+        ("magvel", ("cm/s", ["velocity_magnitude"], r"|\mathbf{U}|")),
+        ("radvel", ("cm/s", [],
+                    r"\left(\mathbf{U} \cdot \mathbf{e}\right)_r")),
+        ("magmom", ("g*cm/s", ["momentum_magnitude"], r"|\rho \mathbf{U}|")),
+        ("maggrav", ("cm/s**2", [], r"|\mathbf{g}|")),
         ("phiGrav", ("erg/g", [], r"|\Phi|")),
     )
 
@@ -149,15 +162,12 @@
         for _, field in self.ds.field_list:
             if field.startswith("X("):
                 # We have a fraction
-                nice_name = field[2:-1]
-                self.alias(("gas", "%s_fraction" % nice_name), ("boxlib", field),
-                           units = "")
-                def _create_density_func(field_name):
-                    def _func(field, data):
-                        return data[field_name] * data["gas", "density"]
-                    return _func
+                nice_name, tex_label = _nice_species_name(field)
+                self.alias(("gas", "%s_fraction" % nice_name),
+                           ("boxlib", field),
+                           units="")
                 func = _create_density_func(("gas", "%s_fraction" % nice_name))
-                self.add_field(name = ("gas", "%s_density" % nice_name),
+                self.add_field(name=("gas", "%s_density" % nice_name),
                                function = func,
                                units = "g/cm**3")
                 # We know this will either have one letter, or two.
@@ -168,6 +178,7 @@
                 weight = int(weight)
                 # Here we can, later, add number density.
 
+
 class MaestroFieldInfo(FieldInfoContainer):
 
     known_other_fields = (
@@ -175,32 +186,35 @@
         ("x_vel", ("cm/s", ["velocity_x"], r"\tilde{u}")),
         ("y_vel", ("cm/s", ["velocity_y"], r"\tilde{v}")),
         ("z_vel", ("cm/s", ["velocity_z"], r"\tilde{w}")),
-        ("magvel", ("cm/s", ["velocity_magnitude"], r"|\tilde{U} + w_0 e_r|")),
+        ("magvel", ("cm/s", ["velocity_magnitude"],
+                    r"|\tilde{\mathbf{U}} + w_0 \mathbf{e}_r|")),
         ("radial_velocity", ("cm/s", [], r"U\cdot e_r")),
-        ("tfromp", ("K", [], None)),
-        ("tfromh", ("K", [], None)),
-        ("Machnumber", ("", ["mach_number"], None)),
+        ("tfromp", ("K", [], "T(\\rho,p,X)")),
+        ("tfromh", ("K", [], "T(\\rho,h,X)")),
+        ("Machnumber", ("", ["mach_number"], "Mach Number")),
         ("S", ("1/s", [], None)),
         ("ad_excess", ("", [], "Adiabatic Excess")),
-        ("deltaT", ("", [], None)),
-        ("deltagamma", ("", [], None)),
-        ("deltap", ("", [], None)),
-        ("divw0", ("1/s", [], None)),
+        ("deltaT", ("", [], "[T(\\rho,h,X) - T(\\rho,p,X)]/T(\\rho,h,X)")),
+        ("deltagamma", ("", [], "\Gamma_1 - \overline{\Gamma_1}")),
+        ("deltap", ("", [], "[p(\\rho,h,X) - p_0] / p_0")),
+        ("divw0", ("1/s", [], "\nabla \cdot \mathbf{w}_0")),
         # Specific entropy
-        ("entropy", ("erg/(g*K)", ["entropy"], None)),
-        ("entropypert", ("", [], None)),
-        ("enucdot", ("erg/(g*s)", [], None)),
-        ("gpi_x", ("dyne/cm**3", [], None)), # Perturbational pressure grad
-        ("gpi_y", ("dyne/cm**3", [], None)),
-        ("gpi_z", ("dyne/cm**3", [], None)),
-        ("h", ("erg/g", [], "Specific Enthalpy")),
-        ("h0", ("erg/g", [], "Base State Specific Enthalpy")),
+        ("entropy", ("erg/(g*K)", ["entropy"], "s")),
+        ("entropypert", ("", [], "[s - \overline{s}] / \overline{s}")),
+        ("enucdot", ("erg/(g*s)", [], "\dot{\epsilon_{nuc}}")),
+        ("Hext", ("erg/(g*s)", [], "H_{ext}")),
+        # Perturbational pressure grad
+        ("gpi_x", ("dyne/cm**3", [], "\left(\nabla\pi\right)_x")),
+        ("gpi_y", ("dyne/cm**3", [], "\left(\nabla\pi\right)_y")),
+        ("gpi_z", ("dyne/cm**3", [], "\left(\nabla\pi\right)_z")),
+        ("h", ("erg/g", [], "h")),
+        ("h0", ("erg/g", [], "h_0")),
         # Momentum cannot be computed because we need to include base and
         # full state.
         ("momentum", ("g*cm/s", ["momentum_magnitude"], None)),
         ("p0", ("erg/cm**3", [], "p_0")),
         ("p0pluspi", ("erg/cm**3", [], "p_0 + \pi")),
-        ("pi", ("erg/cm**3", [], None)),
+        ("pi", ("erg/cm**3", [], "\pi")),
         ("pioverp0", ("", [], "\pi/p_0")),
         # Base state density
         ("rho0", ("g/cm**3", [], "\\rho_0")),
@@ -211,39 +225,42 @@
         ("rhopert", ("g/cm**3", [], "\\rho^\prime")),
         ("soundspeed", ("cm/s", ["sound_speed"], None)),
         ("sponge", ("", [], None)),
-        ("tpert", ("K", [], None)),
+        ("tpert", ("K", [], "T - \overline{T}")),
         # Again, base state -- so we can't compute ourselves.
-        ("vort", ("1/s", ["vorticity_magnitude"], None)),
+        ("vort", ("1/s", ["vorticity_magnitude"], "|\nabla\times\tilde{U}|")),
         # Base state
-        ("w0_x", ("cm/s", [], None)),
-        ("w0_y", ("cm/s", [], None)),
-        ("w0_z", ("cm/s", [], None)),
+        ("w0_x", ("cm/s", [], "(w_0)_x")),
+        ("w0_y", ("cm/s", [], "(w_0)_y")),
+        ("w0_z", ("cm/s", [], "(w_0)_z")),
     )
 
     def setup_fluid_fields(self):
         # pick the correct temperature field
         if self.ds.parameters["use_tfromp"]:
             self.alias(("gas", "temperature"), ("boxlib", "tfromp"),
-                       units = "K")
+                       units="K")
         else:
             self.alias(("gas", "temperature"), ("boxlib", "tfromh"),
-                       units = "K")
+                       units="K")
 
         # Add X's and omegadots, units of 1/s
         for _, field in self.ds.field_list:
             if field.startswith("X("):
-                # We have a fraction
-                nice_name = field[2:-1]
-                self.alias(("gas", "%s_fraction" % nice_name), ("boxlib", field),
-                           units = "")
-                def _create_density_func(field_name):
-                    def _func(field, data):
-                        return data[field_name] * data["gas", "density"]
-                    return _func
+                # We have a mass fraction
+                nice_name, tex_label = _nice_species_name(field)
+                # Overwrite field to use nicer tex_label display_name
+                self.add_output_field(("boxlib", field),
+                                      units="",
+                                      display_name=tex_label)
+                self.alias(("gas", "%s_fraction" % nice_name),
+                           ("boxlib", field),
+                           units="")
                 func = _create_density_func(("gas", "%s_fraction" % nice_name))
-                self.add_field(name = ("gas", "%s_density" % nice_name),
-                               function = func,
-                               units = "g/cm**3")
+                self.add_field(name=("gas", "%s_density" % nice_name),
+                               function=func,
+                               units="g/cm**3",
+                               display_name=r'\rho %s' % tex_label)
+
                 # Most of the time our species will be of the form
                 # element name + atomic weight (e.g. C12), but
                 # sometimes we make up descriptive names (e.g. ash)
@@ -256,8 +273,30 @@
                     weight = int(weight)
 
                 # Here we can, later, add number density.
-            if field.startswith("omegadot("):
-                nice_name = field[9:-1]
-                self.add_output_field(("boxlib", field), units = "1/s")
+            elif field.startswith("omegadot("):
+                nice_name, tex_label = _nice_species_name(field)
+                display_name = r'\dot{\omega}\left[%s\right]' % tex_label
+                # Overwrite field to use nicer tex_label'ed display_name
+                self.add_output_field(("boxlib", field), units="1/s",
+                                      display_name=display_name)
                 self.alias(("gas", "%s_creation_rate" % nice_name),
-                           ("boxlib", field), units = "1/s")
+                           ("boxlib", field), units="1/s")
+
+
+def _nice_species_name(field):
+    spec_match = spec_finder.search(field)
+    nice_name = ''.join(spec_match.groups())
+    # if the species field is a descriptive name, then the match
+    # on the integer will be blank
+    # modify the tex string in this case to remove spurious tex spacing
+    lab = r"X\left(^{%s}%s\right)"
+    if spec_match.groups()[-1] == "":
+        lab = r"X\left(%s%s\right)"
+    tex_label = lab % spec_match.groups()[::-1]
+    return nice_name, tex_label
+
+
+def _create_density_func(field_name):
+    def _func(field, data):
+        return data[field_name] * data["gas", "density"]
+    return _func

diff -r 0c5846bbb80d640564dbf84af61c2ba33c108a0a -r a462b61eb3e5851484b0d50a81bf11049ca29366 yt/frontends/boxlib/io.py
--- a/yt/frontends/boxlib/io.py
+++ b/yt/frontends/boxlib/io.py
@@ -125,30 +125,36 @@
                     rv[ftype, fname] = np.concatenate((data, rv[ftype, fname]))
         return rv
 
-    def _read_particles(self, grid, field): 
+    def _read_particles(self, grid, field):
         """
         parses the Orion Star Particle text files
 
         """
 
-        fn = self.particle_filename
+        particles = []
+
+        if grid.NumberOfParticles == 0:
+            return np.array(particles)
 
         def read(line, field):
             entry = line.strip().split(' ')[self.particle_field_index[field]]
             return np.float(entry)
 
-        with open(fn, 'r') as f:
-            lines = f.readlines()
-            particles = []
-            for line in lines[1:]:
-                if grid.NumberOfParticles > 0:
-                    coord = read(line, "particle_position_x"), \
-                            read(line, "particle_position_y"), \
-                            read(line, "particle_position_z") 
-                    if ( (grid.LeftEdge <= coord).all() and 
-                         (coord <= grid.RightEdge).all() ):
-                        particles.append(read(line, field))
-        return np.array(particles)
+        try:
+            lines = self._cached_lines
+            for num in grid._particle_line_numbers:
+                line = lines[num]
+                particles.append(read(line, field))
+            return np.array(particles)
+        except AttributeError:
+            fn = self.particle_filename
+            with open(fn, 'r') as f:
+                lines = f.readlines()
+                self._cached_lines = lines
+                for num in grid._particle_line_numbers:
+                    line = lines[num]
+                    particles.append(read(line, field))
+            return np.array(particles)
 
 
 class IOHandlerCastro(IOHandlerBoxlib):

diff -r 0c5846bbb80d640564dbf84af61c2ba33c108a0a -r a462b61eb3e5851484b0d50a81bf11049ca29366 yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -113,7 +113,8 @@
         self.directory = ds.fullpath
         self._handle = ds._handle
 
-        self.float_type = self._handle['Chombo_global'].attrs['testReal'].dtype.name
+        tr = self._handle['Chombo_global'].attrs.get("testReal", "float32")
+            
         self._levels = [key for key in self._handle.keys() if key.startswith('level')]
         GridIndex.__init__(self, ds, dataset_type)
 
@@ -156,12 +157,18 @@
         for key, val in self._handle.attrs.items():
             if key.startswith("particle"):
                 particle_fields.append(val)
-        self.field_list.extend([("io", c) for c in particle_fields])        
+        self.field_list.extend([("io", c) for c in particle_fields])
 
     def _count_grids(self):
         self.num_grids = 0
         for lev in self._levels:
-            self.num_grids += self._handle[lev]['Processors'].len()
+            d = self._handle[lev]
+            if 'Processors' in d:
+                self.num_grids += d['Processors'].len()
+            elif 'boxes' in d:
+                self.num_grids += d['boxes'].len()
+            else:
+                raise RuntimeError("Uknown file specification")
 
     def _parse_index(self):
         f = self._handle # shortcut
@@ -255,18 +262,6 @@
         if D == 2:
             self.dataset_type = 'chombo2d_hdf5'
 
-        # some datasets will not be time-dependent, and to make
-        # matters worse, the simulation time is not always
-        # stored in the same place in the hdf file! Make
-        # sure we handle that here.
-        try:
-            self.current_time = self._handle.attrs['time']
-        except KeyError:
-            try:
-                self.current_time = self._handle['/level_0'].attrs['time']
-            except KeyError:
-                self.current_time = 0.0
-
         self.geometry = "cartesian"
         self.ini_filename = ini_filename
         self.fullplotdir = os.path.abspath(filename)
@@ -315,6 +310,20 @@
 
         self.refine_by = self._handle['/level_0'].attrs['ref_ratio']
         self._determine_periodic()
+        self._determine_current_time()
+
+    def _determine_current_time(self):
+        # some datasets will not be time-dependent, and to make
+        # matters worse, the simulation time is not always
+        # stored in the same place in the hdf file! Make
+        # sure we handle that here.
+        try:
+            self.current_time = self._handle.attrs['time']
+        except KeyError:
+            try:
+                self.current_time = self._handle['/level_0'].attrs['time']
+            except KeyError:
+                self.current_time = 0.0
 
     def _determine_periodic(self):
         # we default to true unless the HDF5 file says otherwise
@@ -498,6 +507,7 @@
             self.domain_right_edge = np.concatenate((self.domain_right_edge, [1.0]))
             self.domain_dimensions = np.concatenate((self.domain_dimensions, [1]))
 
+        self._determine_current_time()
 
     @classmethod
     def _is_valid(self, *args, **kwargs):
@@ -531,7 +541,8 @@
 
         # look for particle fields
         self.particle_filename = self.index_filename[:-4] + 'sink'
-        if not os.path.exists(self.particle_filename): return
+        if not os.path.exists(self.particle_filename):
+            return
         pfield_list = [("io", c) for c in self.io.particle_field_index.keys()]
         self.field_list.extend(pfield_list)
 
@@ -540,14 +551,14 @@
         with open(self.particle_filename, 'r') as f:
             lines = f.readlines()
             self.num_stars = int(lines[0].strip().split(' ')[0])
-            for line in lines[1:]:
+            for num, line in enumerate(lines[1:]):
                 particle_position_x = float(line.split(' ')[1])
                 particle_position_y = float(line.split(' ')[2])
                 particle_position_z = float(line.split(' ')[3])
                 coord = [particle_position_x, particle_position_y, particle_position_z]
                 # for each particle, determine which grids contain it
                 # copied from object_finding_mixin.py
-                mask=np.ones(self.num_grids)
+                mask = np.ones(self.num_grids)
                 for i in xrange(len(coord)):
                     np.choose(np.greater(self.grid_left_edge.d[:,i],coord[i]), (mask,0), mask)
                     np.choose(np.greater(self.grid_right_edge.d[:,i],coord[i]), (0,mask), mask)
@@ -562,6 +573,12 @@
                     self.grid_particle_count[ind] += 1
                     self.grids[ind].NumberOfParticles += 1
 
+                    # store the position in the *.sink file for fast access.
+                    try:
+                        self.grids[ind]._particle_line_numbers.append(num + 1)
+                    except AttributeError:
+                        self.grids[ind]._particle_line_numbers = [num + 1]
+
 
 class Orion2Dataset(ChomboDataset):
 
@@ -595,6 +612,7 @@
         self.domain_dimensions = self._calc_domain_dimensions()
         self.refine_by = self._handle['/level_0'].attrs['ref_ratio']
         self._determine_periodic()
+        self._determine_current_time()
 
     def _parse_inputs_file(self, ini_filename):
         self.fullplotdir = os.path.abspath(self.parameter_filename)

diff -r 0c5846bbb80d640564dbf84af61c2ba33c108a0a -r a462b61eb3e5851484b0d50a81bf11049ca29366 yt/frontends/chombo/io.py
--- a/yt/frontends/chombo/io.py
+++ b/yt/frontends/chombo/io.py
@@ -25,6 +25,7 @@
     _dataset_type = "chombo_hdf5"
     _offset_string = 'data:offsets=0'
     _data_string = 'data:datatype=0'
+    _offsets = None
 
     def __init__(self, ds, *args, **kwargs):
         BaseIOHandler.__init__(self, ds, *args, **kwargs)
@@ -32,6 +33,29 @@
         self._handle = ds._handle
         self.dim = self._handle['Chombo_global/'].attrs['SpaceDim']
         self._read_ghost_info()
+        if self._offset_string not in self._handle['level_0']:
+            self._calculate_offsets()
+
+    def _calculate_offsets(self):
+        def box_size(corners):
+            size = 1
+            for idim in range(self.dim):
+                size *= (corners[idim+self.dim] - corners[idim] + 1)
+            return size
+
+        self._offsets = {}
+        num_comp = self._handle.attrs['num_components']
+        level = 0
+        while 1:
+            lname = 'level_%i' % level
+            if lname not in self._handle: break
+            boxes = self._handle['level_0']['boxes'].value
+            box_sizes = np.array([box_size(box) for box in boxes])
+
+            offsets = np.cumsum(box_sizes*num_comp, dtype='int64') 
+            offsets -= offsets[0]
+            self._offsets[level] = offsets
+            level += 1
 
     def _read_ghost_info(self):
         try:
@@ -41,7 +65,7 @@
             self.ghost = np.array(self.ghost)
         except KeyError:
             # assume zero ghosts if outputGhosts not present
-            self.ghost = np.zeros(self.dim)
+            self.ghost = np.zeros(self.dim, 'int64')
 
     _field_dict = None
     @property
@@ -69,18 +93,21 @@
         self._particle_field_index = field_dict
         return self._particle_field_index
 
-    def _read_field_names(self,grid):
+    def _read_field_names(self, grid):
         ncomp = int(self._handle.attrs['num_components'])
         fns = [c[1] for c in f.attrs.items()[-ncomp-1:-1]]
 
-    def _read_data(self,grid,field):
+    def _read_data(self, grid, field):
         lstring = 'level_%i' % grid.Level
         lev = self._handle[lstring]
         dims = grid.ActiveDimensions
         shape = grid.ActiveDimensions + 2*self.ghost
         boxsize = shape.prod()
 
-        grid_offset = lev[self._offset_string][grid._level_id]
+        if self._offsets is not None:
+            grid_offset = self._offsets[grid.Level][grid._level_id]
+        else:
+            grid_offset = lev[self._offset_string][grid._level_id]
         start = grid_offset+self.field_dict[field]*boxsize
         stop = start + boxsize
         data = lev[self._data_string][start:stop]
@@ -223,7 +250,14 @@
     # Figure out the format of the particle file
     with open(fn, 'r') as f:
         lines = f.readlines()
-    line = lines[1]
+
+    try:
+        line = lines[1]
+    except IndexError:
+        # a particle file exists, but there is only one line,
+        # so no sinks have been created yet.
+        index = {}
+        return index
 
     # The basic fields that all sink particles have
     index = {'particle_mass': 0,
@@ -289,20 +323,27 @@
 
         """
 
+        particles = []
+
+        if grid.NumberOfParticles == 0:
+            return np.array(particles)
+
         def read(line, field):
             entry = line.strip().split(' ')[self.particle_field_index[field]]
             return np.float(entry)
 
-        fn = grid.ds.fullplotdir[:-4] + "sink"
-        with open(fn, 'r') as f:
-            lines = f.readlines()
-            particles = []
-            for line in lines[1:]:
-                if grid.NumberOfParticles > 0:
-                    coord = read(line, "particle_position_x"), \
-                        read(line, "particle_position_y"), \
-                        read(line, "particle_position_z")
-                    if ((grid.LeftEdge <= coord).all() and
-                       (coord <= grid.RightEdge).all() ):
-                        particles.append(read(line, field))
-        return np.array(particles)
+        try:
+            lines = self._cached_lines
+            for num in grid._particle_line_numbers:
+                line = lines[num]
+                particles.append(read(line, field))
+            return np.array(particles)
+        except AttributeError:
+            fn = grid.ds.fullplotdir[:-4] + "sink"
+            with open(fn, 'r') as f:
+                lines = f.readlines()
+                self._cached_lines = lines
+                for num in grid._particle_line_numbers:
+                    line = lines[num]
+                    particles.append(read(line, field))
+            return np.array(particles)

diff -r 0c5846bbb80d640564dbf84af61c2ba33c108a0a -r a462b61eb3e5851484b0d50a81bf11049ca29366 yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -20,9 +20,6 @@
     FieldInfoContainer
 from yt.units.yt_array import \
     YTArray
-from yt.fields.species_fields import \
-    add_nuclei_density_fields, \
-    add_species_field_by_density
 from yt.utilities.physical_constants import \
     mh, me, mp, \
     mass_sun_cgs
@@ -152,7 +149,6 @@
         for sp in species_names:
             self.add_species_field(sp)
             self.species_names.append(known_species_names[sp])
-        add_nuclei_density_fields(self, "gas")
 
     def setup_fluid_fields(self):
         # Now we conditionally load a few other things.

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/0913c1288c05/
Changeset:   0913c1288c05
Branch:      yt
User:        mzingale
Date:        2014-10-21 23:12:48+00:00
Summary:     merge
Affected #:  1 file

diff -r a462b61eb3e5851484b0d50a81bf11049ca29366 -r 0913c1288c05077ec8987b221fe33eea89b6c173 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -316,8 +316,13 @@
         nim = im.rescale(inline=False)
         enhance_rgba(nim)
         nim.add_background_color('black', inline=True)
-       
+
+        # we flipped it in snapshot to get the orientation correct when
+        # it is output -- temporarily undo that here
+        im = im[:,::-1,:]       
         lines(nim, px, py, colors, 24)
+        im = im[:,::-1,:]
+
         return nim
 
     def draw_coordinate_vectors(self, im, length=0.05, thickness=1):
@@ -370,11 +375,15 @@
                   np.array([0.0, 1.0, 0.0, alpha]),
                   np.array([0.0, 0.0, 1.0, alpha])]
 
+        # we flipped it in snapshot to get the orientation correct when
+        # it is output -- temporarily undo that here
+        im = im[:,::-1,:]
         for vec, color in zip(coord_vectors, colors):
             dx = int(np.dot(vec, self.orienter.unit_vectors[0]))
             dy = int(np.dot(vec, self.orienter.unit_vectors[1]))
             lines(im, np.array([px0, px0+dx]), np.array([py0, py0+dy]),
                   np.array([color, color]), 1, thickness)
+        im = im[:,::-1,:]
 
     def draw_line(self, im, x0, x1, color=None):
         r"""Draws a line on an existing volume rendering.
@@ -415,7 +424,12 @@
         py1 = int(self.resolution[0]*(dx1/self.width[0]))
         px0 = int(self.resolution[1]*(dy0/self.width[1]))
         px1 = int(self.resolution[1]*(dy1/self.width[1]))
+
+        # we flipped it in snapshot to get the orientation correct when
+        # it is output -- temporarily undo that here
+        im = im[:,::-1,:]
         lines(im, np.array([px0,px1]), np.array([py0,py1]), color=np.array([color,color]))
+        im = im[:,::-1,:]
 
     def draw_domain(self,im,alpha=0.3):
         r"""Draws domain edges on an existing volume rendering.
@@ -497,7 +511,11 @@
 
         px, py, dz = self.project_to_plane(vertices, res=im.shape[:2])
        
+        # we flipped it in snapshot to get the orientation correct when
+        # it is output -- temporarily undo that here
+        im = im[:,::-1,:]
         lines(im, px, py, color.reshape(1,4), 24)
+        im = im[:,::-1,:]
 
     def look_at(self, new_center, north_vector = None):
         r"""Change the view direction based on a new focal point.


https://bitbucket.org/yt_analysis/yt/commits/247a40f94341/
Changeset:   247a40f94341
Branch:      yt
User:        mzingale
Date:        2014-10-22 01:42:00+00:00
Summary:     instead of flipping the image, drawing the lines, then flipping it back,
have the lines() function know about orientation and optionally flip
when drawing the lines
Affected #:  2 files

diff -r 0913c1288c05077ec8987b221fe33eea89b6c173 -r 247a40f943419e69d6c06b1c74f250d0dbb8e3e3 yt/utilities/lib/misc_utilities.pyx
--- a/yt/utilities/lib/misc_utilities.pyx
+++ b/yt/utilities/lib/misc_utilities.pyx
@@ -217,7 +217,8 @@
           np.ndarray[np.int64_t, ndim=1] ys,
           np.ndarray[np.float64_t, ndim=2] colors,
           int points_per_color=1,
-          int thick=1):
+          int thick=1,
+	  int flip=0):
 
     cdef int nx = image.shape[0]
     cdef int ny = image.shape[1]
@@ -256,18 +257,23 @@
             if x0 >= thick and x0 < nx-thick and y0 >= thick and y0 < ny-thick:
                 for xi in range(x0-thick/2, x0+(1+thick)/2):
                     for yi in range(y0-thick/2, y0+(1+thick)/2):
+                        if flip: 
+                            yi0 = ny - yi
+                        else:
+                            yi0 = yi
+
                         if has_alpha:
-                            image[xi, yi, 3] = outa = alpha[3] + image[xi, yi, 3]*(1-alpha[3])
+                            image[xi, yi0, 3] = outa = alpha[3] + image[xi, yi0, 3]*(1-alpha[3])
                             if outa != 0.0:
                                 outa = 1.0/outa
                             for i in range(3):
-                                image[xi, yi, i] = \
-                                        ((1.-alpha[3])*image[xi, yi, i]*image[xi, yi, 3]
+                                image[xi, yi0, i] = \
+                                        ((1.-alpha[3])*image[xi, yi0, i]*image[xi, yi0, 3]
                                          + alpha[3]*alpha[i])*outa
                         else:
                             for i in range(3):
-                                image[xi, yi, i] = \
-                                        (1.-alpha[i])*image[xi,yi,i] + alpha[i]
+                                image[xi, yi0, i] = \
+                                        (1.-alpha[i])*image[xi,yi0,i] + alpha[i]
 
             if (x0 == x1 and y0 == y1):
                 break

diff -r 0913c1288c05077ec8987b221fe33eea89b6c173 -r 247a40f943419e69d6c06b1c74f250d0dbb8e3e3 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -317,11 +317,9 @@
         enhance_rgba(nim)
         nim.add_background_color('black', inline=True)
 
-        # we flipped it in snapshot to get the orientation correct when
-        # it is output -- temporarily undo that here
-        im = im[:,::-1,:]       
-        lines(nim, px, py, colors, 24)
-        im = im[:,::-1,:]
+        # we flipped it in snapshot to get the orientation correct, so
+        # flip the lines
+        lines(nim, px, py, colors, 24, flip=1)
 
         return nim
 
@@ -375,15 +373,13 @@
                   np.array([0.0, 1.0, 0.0, alpha]),
                   np.array([0.0, 0.0, 1.0, alpha])]
 
-        # we flipped it in snapshot to get the orientation correct when
-        # it is output -- temporarily undo that here
-        im = im[:,::-1,:]
+        # we flipped it in snapshot to get the orientation correct, so
+        # flip the lines
         for vec, color in zip(coord_vectors, colors):
             dx = int(np.dot(vec, self.orienter.unit_vectors[0]))
             dy = int(np.dot(vec, self.orienter.unit_vectors[1]))
             lines(im, np.array([px0, px0+dx]), np.array([py0, py0+dy]),
-                  np.array([color, color]), 1, thickness)
-        im = im[:,::-1,:]
+                  np.array([color, color]), 1, thickness, flip=1)
 
     def draw_line(self, im, x0, x1, color=None):
         r"""Draws a line on an existing volume rendering.
@@ -425,11 +421,9 @@
         px0 = int(self.resolution[1]*(dy0/self.width[1]))
         px1 = int(self.resolution[1]*(dy1/self.width[1]))
 
-        # we flipped it in snapshot to get the orientation correct when
-        # it is output -- temporarily undo that here
-        im = im[:,::-1,:]
-        lines(im, np.array([px0,px1]), np.array([py0,py1]), color=np.array([color,color]))
-        im = im[:,::-1,:]
+        # we flipped it in snapshot to get the orientation correct, so
+        # flip the lines
+        lines(im, np.array([px0,px1]), np.array([py0,py1]), color=np.array([color,color]),flip=1)
 
     def draw_domain(self,im,alpha=0.3):
         r"""Draws domain edges on an existing volume rendering.
@@ -511,11 +505,9 @@
 
         px, py, dz = self.project_to_plane(vertices, res=im.shape[:2])
        
-        # we flipped it in snapshot to get the orientation correct when
-        # it is output -- temporarily undo that here
-        im = im[:,::-1,:]
-        lines(im, px, py, color.reshape(1,4), 24)
-        im = im[:,::-1,:]
+        # we flipped it in snapshot to get the orientation correct, so
+        # flip the lines
+        lines(im, px, py, color.reshape(1,4), 24, flip=1)
 
     def look_at(self, new_center, north_vector = None):
         r"""Change the view direction based on a new focal point.
@@ -766,7 +758,7 @@
                                         image, sampler),
                            info=self.get_information())
 
-        # flip it up/down to handle how the png orientation is donetest.png
+        # flip it up/down to handle how the png orientation is done
         image = image[:,::-1,:]
         self.save_image(image, fn=fn, clip_ratio=clip_ratio, 
                        transparent=transparent)


https://bitbucket.org/yt_analysis/yt/commits/4ac89a837391/
Changeset:   4ac89a837391
Branch:      yt
User:        jzuhone
Date:        2014-10-22 14:43:15+00:00
Summary:     Merged in mzingale/yt-new (pull request #1229)

this change addresses issue #914 -- the orientation of draw_domain()
Affected #:  2 files

diff -r 1fb34fd879d0f912e531eb69253b82df9b28ffc8 -r 4ac89a8373911095eab03cbd009ca1bbf10c940e yt/utilities/lib/misc_utilities.pyx
--- a/yt/utilities/lib/misc_utilities.pyx
+++ b/yt/utilities/lib/misc_utilities.pyx
@@ -217,7 +217,8 @@
           np.ndarray[np.int64_t, ndim=1] ys,
           np.ndarray[np.float64_t, ndim=2] colors,
           int points_per_color=1,
-          int thick=1):
+          int thick=1,
+	  int flip=0):
 
     cdef int nx = image.shape[0]
     cdef int ny = image.shape[1]
@@ -256,18 +257,23 @@
             if x0 >= thick and x0 < nx-thick and y0 >= thick and y0 < ny-thick:
                 for xi in range(x0-thick/2, x0+(1+thick)/2):
                     for yi in range(y0-thick/2, y0+(1+thick)/2):
+                        if flip: 
+                            yi0 = ny - yi
+                        else:
+                            yi0 = yi
+
                         if has_alpha:
-                            image[xi, yi, 3] = outa = alpha[3] + image[xi, yi, 3]*(1-alpha[3])
+                            image[xi, yi0, 3] = outa = alpha[3] + image[xi, yi0, 3]*(1-alpha[3])
                             if outa != 0.0:
                                 outa = 1.0/outa
                             for i in range(3):
-                                image[xi, yi, i] = \
-                                        ((1.-alpha[3])*image[xi, yi, i]*image[xi, yi, 3]
+                                image[xi, yi0, i] = \
+                                        ((1.-alpha[3])*image[xi, yi0, i]*image[xi, yi0, 3]
                                          + alpha[3]*alpha[i])*outa
                         else:
                             for i in range(3):
-                                image[xi, yi, i] = \
-                                        (1.-alpha[i])*image[xi,yi,i] + alpha[i]
+                                image[xi, yi0, i] = \
+                                        (1.-alpha[i])*image[xi,yi0,i] + alpha[i]
 
             if (x0 == x1 and y0 == y1):
                 break

diff -r 1fb34fd879d0f912e531eb69253b82df9b28ffc8 -r 4ac89a8373911095eab03cbd009ca1bbf10c940e yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -316,8 +316,11 @@
         nim = im.rescale(inline=False)
         enhance_rgba(nim)
         nim.add_background_color('black', inline=True)
-       
-        lines(nim, px, py, colors, 24)
+
+        # we flipped it in snapshot to get the orientation correct, so
+        # flip the lines
+        lines(nim, px, py, colors, 24, flip=1)
+
         return nim
 
     def draw_coordinate_vectors(self, im, length=0.05, thickness=1):
@@ -370,11 +373,13 @@
                   np.array([0.0, 1.0, 0.0, alpha]),
                   np.array([0.0, 0.0, 1.0, alpha])]
 
+        # we flipped it in snapshot to get the orientation correct, so
+        # flip the lines
         for vec, color in zip(coord_vectors, colors):
             dx = int(np.dot(vec, self.orienter.unit_vectors[0]))
             dy = int(np.dot(vec, self.orienter.unit_vectors[1]))
             lines(im, np.array([px0, px0+dx]), np.array([py0, py0+dy]),
-                  np.array([color, color]), 1, thickness)
+                  np.array([color, color]), 1, thickness, flip=1)
 
     def draw_line(self, im, x0, x1, color=None):
         r"""Draws a line on an existing volume rendering.
@@ -415,7 +420,10 @@
         py1 = int(self.resolution[0]*(dx1/self.width[0]))
         px0 = int(self.resolution[1]*(dy0/self.width[1]))
         px1 = int(self.resolution[1]*(dy1/self.width[1]))
-        lines(im, np.array([px0,px1]), np.array([py0,py1]), color=np.array([color,color]))
+
+        # we flipped it in snapshot to get the orientation correct, so
+        # flip the lines
+        lines(im, np.array([px0,px1]), np.array([py0,py1]), color=np.array([color,color]),flip=1)
 
     def draw_domain(self,im,alpha=0.3):
         r"""Draws domain edges on an existing volume rendering.
@@ -497,7 +505,9 @@
 
         px, py, dz = self.project_to_plane(vertices, res=im.shape[:2])
        
-        lines(im, px, py, color.reshape(1,4), 24)
+        # we flipped it in snapshot to get the orientation correct, so
+        # flip the lines
+        lines(im, px, py, color.reshape(1,4), 24, flip=1)
 
     def look_at(self, new_center, north_vector = None):
         r"""Change the view direction based on a new focal point.
@@ -748,7 +758,7 @@
                                         image, sampler),
                            info=self.get_information())
 
-        # flip it up/down to handle how the png orientation is donetest.png
+        # flip it up/down to handle how the png orientation is done
         image = image[:,::-1,:]
         self.save_image(image, fn=fn, clip_ratio=clip_ratio, 
                        transparent=transparent)

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.


More information about the yt-svn mailing list