[yt-svn] commit/yt: 5 new changesets

Bitbucket commits-noreply at bitbucket.org
Fri Mar 16 12:22:41 PDT 2012


5 new commits in yt:


https://bitbucket.org/yt_analysis/yt/changeset/bebe2f1eefad/
changeset:   bebe2f1eefad
branch:      yt
user:        ngoldbaum
date:        2012-03-16 20:05:06
summary:     Added some boilerplate to the install script about xcode.
affected #:  1 file

diff -r 782b3dd8bb92b1697124d6b05416b943679096a7 -r bebe2f1eefad7d54fa295a06be41f6d7e6547aa4 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -17,7 +17,7 @@
 
 DEST_SUFFIX="yt-`uname -p`"
 DEST_DIR="`pwd`/${DEST_SUFFIX/ /}"   # Installation location
-BRANCH="yt" # This is the branch to which we will forcibly update.
+BRANCH="stable" # This is the branch to which we will forcibly update.
 
 # Here's where you put the HDF5 path if you like; otherwise it'll download it
 # and install it on its own
@@ -41,8 +41,6 @@
 INST_ENZO=0     # Clone a copy of Enzo?
 INST_SQLITE3=1  # Install a local version of SQLite3?
 INST_FORTHON=1  # Install Forthon?
-INST_PYX=0      # Install PyX?  Sometimes PyX can be problematic without a
-                # working TeX installation.
 
 # If you've got YT some other place, set this to point to it.
 YT_DIR=""
@@ -133,9 +131,9 @@
         echo "NOTE: YOU MUST BE IN THE GNU PROGRAMMING ENVIRONMENT"
         echo "These commands should take care of that for you:"
         echo
-        echo "   $ module unload mvapich2"
+        echo "   $ module unload mvapich-devel"
         echo "   $ module swap pgi gcc"
-        echo "   $ module load mvapich2"
+        echo "   $ module load mvapich-devel"
         echo
     fi
     if [ "${MYHOST##honest}" != "${MYHOST}" ]
@@ -159,8 +157,20 @@
     if [ "${MYOS##Darwin}" != "${MYOS}" ]
     then
         echo "Looks like you're running on Mac OSX."
+	echo
+	echo "NOTE: you must have the Xcode command line tools installed."
         echo
-        echo "NOTE: You may have problems if you are running OSX 10.6 (Snow"
+	echo "OS X 10.5: download Xcode 3.0 from the mac developer tools"
+	echo "website"
+        echo
+	echo "OS X 10.6: download Xcode 3.2 from the mac developer tools" 
+	echo "website"
+        echo
+	echo "OS X 10.7: download Xcode 4.0 from the mac app store or" 
+	echo "alternatively download the Xcode command line tools from" 
+	echo "the mac developer tools website"
+        echo
+	echo "NOTE: You may have problems if you are running OSX 10.6 (Snow"
         echo "Leopard) or newer.  If you do, please set the following"
         echo "environment variables, remove any broken installation tree, and"
         echo "re-run this script verbatim."
@@ -216,10 +226,6 @@
 get_willwont ${INST_ENZO}
 echo "be checking out Enzo"
 
-printf "%-15s = %s so I " "INST_PYX" "${INST_PYX}"
-get_willwont ${INST_PYX}
-echo "be installing PyX"
-
 echo
 
 if [ -z "$HDF5_DIR" ]
@@ -283,30 +289,14 @@
     export GETFILE="curl -sSO"
 fi
 
-if type -P sha512sum &> /dev/null
-then
-    echo "Using sha512sum"
-    export SHASUM="sha512sum"
-elif type -P shasum &> /dev/null
-then
-    echo "Using shasum -a 512"
-    export SHASUM="shasum -a 512"
-else
-    echo
-    echo "I am unable to locate any shasum-like utility."
-    echo "ALL FILE INTEGRITY IS NOT VERIFIABLE."
-    echo "THIS IS PROBABLY A BIG DEAL."
-    echo
-    echo "(I'll hang out for a minute for you to consider this.)"
-    sleep 60
-fi
-
 function get_enzotools
 {
     echo "Downloading $1 from yt-project.org"
     [ -e $1 ] && return
     ${GETFILE} "http://yt-project.org/dependencies/$1" || do_exit
-    ( ${SHASUM} -c $1.sha512 2>&1 ) 1>> ${LOG_FILE} || do_exit
+    ${GETFILE} "http://yt-project.org/dependencies/$1.md5" || do_exit
+    ( which md5sum &> /dev/null ) || return # return if we don't have md5sum
+    ( md5sum -c $1.md5 2>&1 ) 1>> ${LOG_FILE} || do_exit
 }
 
 ORIG_PWD=`pwd`
@@ -320,27 +310,6 @@
 mkdir -p ${DEST_DIR}/src
 cd ${DEST_DIR}/src
 
-# Now we dump all our SHA512 files out.
-
-echo '8da1b0af98203254a1cf776d73d09433f15b5090871f9fd6d712cea32bcd44446b7323ae1069b28907d2728e77944a642825c61bc3b54ceb46c91897cc4f6051  Cython-0.15.1.tar.gz' > Cython-0.15.1.tar.gz.sha512
-echo '2564011f64cd7ea24d49c6103603ced857bcb79a3837032b959005b64f9da226a08c95d920ae59034ca2c5957a45c99949811649de9e5e73cdbb23396e11f756  Forthon-0.8.5.tar.gz' > Forthon-0.8.5.tar.gz.sha512
-echo 'b8a12bf05b3aafa71135e47da81440fd0f16a4bd91954bc5615ad3d3b7f9df7d5a7d5620dc61088dc6b04952c5c66ebda947a4cfa33ed1be614c8ca8c0f11dff  PhiloGL-1.4.2.zip' > PhiloGL-1.4.2.zip.sha512
-echo '44eea803870a66ff0bab08d13a8b3388b5578ebc1c807d1d9dca0a93e6371e91b15d02917a00b3b20dc67abb5a21dabaf9b6e9257a561f85eeff2147ac73b478  PyX-0.11.1.tar.gz' > PyX-0.11.1.tar.gz.sha512
-echo '1a754d560bfa433f0960ab3b5a62edb5f291be98ec48cf4e5941fa5b84139e200b87a52efbbd6fa4a76d6feeff12439eed3e7a84db4421940d1bbb576f7a684e  Python-2.7.2.tgz' > Python-2.7.2.tgz.sha512
-echo 'c017d3d59dd324ac91af0edc178c76b60a5f90fbb775cf843e39062f95bd846238f2c53705f8890ed3f34bc0e6e75671a73d13875eb0287d6201cb45f0a2d338  bzip2-1.0.5.tar.gz' > bzip2-1.0.5.tar.gz.sha512
-echo 'de73b14727c2a6623c19896d4c034ad0f705bf5ccbb8501c786a9d074cce97a7760db9246ae7da3db47dd2de29a1707a8a0ee17ab41a6d9140f2a7dbf455af0f  ext-3.3.2.zip' > ext-3.3.2.zip.sha512
-echo '6d65dcbb77978d4f4a9711062f11ae9d61133ca086f9207a8c1ecea8807dc9612cc8c3b2428157d2fb00dea8e0958f61e35cce4e07987c80bc808bbda3608a6c  ext-slate-110328.zip' > ext-slate-110328.zip.sha512
-echo 'b519218f93946400326e9b656669269ecb3e5232b944e18fbc3eadc4fe2b56244d68aae56d6f69042b4c87c58c881ee2aaa279561ea0f0f48d5842155f4de9de  freetype-2.4.4.tar.gz' > freetype-2.4.4.tar.gz.sha512
-echo '1531789e0a77d4829796d18552a4de7aecae7e8b63763a7951a8091921995800740fe03e72a7dbd496a5590828131c5f046ddead695e5cba79343b8c205148d1  h5py-2.0.1.tar.gz' > h5py-2.0.1.tar.gz.sha512
-echo '9644896e4a84665ad22f87eb885cbd4a0c60a5c30085d5dd5dba5f3c148dbee626f0cb01e59a7995a84245448a3f1e9ba98687d3f10250e2ee763074ed8ddc0e  hdf5-1.8.7.tar.gz' > hdf5-1.8.7.tar.gz.sha512
-echo '2c883d64886e5d595775dde497f101ff2ecec0786eabcdc69861c20e7d081e67b5e97551194236933b78f1ff7b119fcba0a9ce3aa4851440fc58f84d2094177b  ipython-0.10.tar.gz' > ipython-0.10.tar.gz.sha512
-echo 'e748b66a379ee1e7963b045c3737670acf6aeeff1ebed679f427e74b642faa77404c2d5bbddb922339f009c229d0af1ae77cc43eab290e50af6157a6406d833f  libpng-1.2.43.tar.gz' > libpng-1.2.43.tar.gz.sha512
-echo 'f5ab95c29ef6958096970265a6079f0eb8c43a500924346c4a6c6eb89d9110eeeb6c34a53715e71240e82ded2b76a7b8d5a9b05a07baa000b2926718264ad8ff  matplotlib-1.1.0.tar.gz' > matplotlib-1.1.0.tar.gz.sha512
-echo '78715bb2bd7ed3291089948530a59d5eff146a64179eae87904a2c328716f26749abb0c5417d6001cadfeebabb4e24985d5a59ceaae4d98c4762163970f83975  mercurial-2.0.tar.gz' > mercurial-2.0.tar.gz.sha512
-echo 'de3dd37f753614055dcfed910e9886e03688b8078492df3da94b1ec37be796030be93291cba09e8212fffd3e0a63b086902c3c25a996cf1439e15c5b16e014d9  numpy-1.6.1.tar.gz' > numpy-1.6.1.tar.gz.sha512
-echo '5ad681f99e75849a5ca6f439c7a19bb51abc73d121b50f4f8e4c0da42891950f30407f761a53f0fe51b370b1dbd4c4f5a480557cb2444c8c7c7d5412b328a474  sqlite-autoconf-3070500.tar.gz' > sqlite-autoconf-3070500.tar.gz.sha512
-echo 'edae735960279d92acf58e1f4095c6392a7c2059b8f1d2c46648fc608a0fb06b392db2d073f4973f5762c034ea66596e769b95b3d26ad963a086b9b2d09825f2  zlib-1.2.3.tar.bz2' > zlib-1.2.3.tar.bz2.sha512
-
 # Individual processes
 if [ -z "$HDF5_DIR" ]
 then
@@ -353,7 +322,6 @@
 [ $INST_PNG -eq 1 ] && get_enzotools libpng-1.2.43.tar.gz
 [ $INST_FTYPE -eq 1 ] && get_enzotools freetype-2.4.4.tar.gz
 [ $INST_SQLITE3 -eq 1 ] && get_enzotools sqlite-autoconf-3070500.tar.gz
-[ $INST_PYX -eq 1 ] && get_enzotools PyX-0.11.1.tar.gz
 get_enzotools Python-2.7.2.tgz
 get_enzotools numpy-1.6.1.tar.gz
 get_enzotools matplotlib-1.1.0.tar.gz
@@ -578,7 +546,6 @@
 do_setup_py h5py-2.0.1
 do_setup_py Cython-0.15.1
 [ $INST_FORTHON -eq 1 ] && do_setup_py Forthon-0.8.5
-[ $INST_PYX -eq 1 ] && do_setup_py PyX-0.11.1
 
 echo "Doing yt update, wiping local changes and updating to branch ${BRANCH}"
 MY_PWD=`pwd`
@@ -594,12 +561,6 @@
 touch done
 cd $MY_PWD
 
-if !(${DEST_DIR}/bin/python2.7 -c "import readline" >> ${LOG_FILE})
-then
-    echo "Installing pure-python readline"
-    ${DEST_DIR}/bin/pip install readline 1>> ${LOG_FILE}
-fi
-
 if [ $INST_ENZO -eq 1 ]
 then
     echo "Cloning a copy of Enzo."



https://bitbucket.org/yt_analysis/yt/changeset/b0ac030e0d97/
changeset:   b0ac030e0d97
branch:      yt
user:        ngoldbaum
date:        2012-03-16 20:06:58
summary:     Merging
affected #:  41 files

diff -r bebe2f1eefad7d54fa295a06be41f6d7e6547aa4 -r b0ac030e0d978e4997eabe6b87d0542ab26efdfd CREDITS
--- a/CREDITS
+++ b/CREDITS
@@ -21,6 +21,9 @@
                                 JC Passy (jcpassy at gmail.com)
                                 Eve Lee (elee at cita.utoronto.ca)
                                 Elizabeth Tasker (tasker at astro1.sci.hokudai.ac.jp)
+                                Kacper Kowalik (xarthisius.kk at gmail.com)
+                                Nathan Goldbaum (goldbaum at ucolick.org)
+                                Anna Rosen (rosen at ucolick.org)
 
 We also include the Delaunay Triangulation module written by Robert Kern of
 Enthought, the cmdln.py module by Trent Mick, and the progressbar module by


diff -r bebe2f1eefad7d54fa295a06be41f6d7e6547aa4 -r b0ac030e0d978e4997eabe6b87d0542ab26efdfd doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -15,7 +15,7 @@
 # And, feel free to drop me a line: matthewturk at gmail.com
 #
 
-DEST_SUFFIX="yt-`uname -p`"
+DEST_SUFFIX="yt-`uname -m`"
 DEST_DIR="`pwd`/${DEST_SUFFIX/ /}"   # Installation location
 BRANCH="stable" # This is the branch to which we will forcibly update.
 
@@ -40,7 +40,6 @@
 INST_FTYPE=1    # Install FreeType2 locally?
 INST_ENZO=0     # Clone a copy of Enzo?
 INST_SQLITE3=1  # Install a local version of SQLite3?
-INST_FORTHON=1  # Install Forthon?
 
 # If you've got YT some other place, set this to point to it.
 YT_DIR=""
@@ -214,10 +213,6 @@
 get_willwont ${INST_SQLITE3}
 echo "be installing SQLite3"
 
-printf "%-15s = %s so I " "INST_FORTHON" "${INST_FORTHON}"
-get_willwont ${INST_FORTHON}
-echo "be installing Forthon (for Halo Finding, etc)"
-
 printf "%-15s = %s so I " "INST_HG" "${INST_HG}"
 get_willwont ${INST_HG}
 echo "be installing Mercurial"
@@ -329,7 +324,6 @@
 get_enzotools ipython-0.10.tar.gz
 get_enzotools h5py-2.0.1.tar.gz
 get_enzotools Cython-0.15.1.tar.gz
-get_enzotools Forthon-0.8.5.tar.gz
 get_enzotools ext-3.3.2.zip
 get_enzotools ext-slate-110328.zip
 get_enzotools PhiloGL-1.4.2.zip
@@ -370,6 +364,7 @@
         cd zlib-1.2.3
         ( ./configure --shared --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
+        ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
         touch done
         cd ..
     fi
@@ -387,6 +382,7 @@
         cd libpng-1.2.43
         ( ./configure CPPFLAGS=-I${DEST_DIR}/include CFLAGS=-I${DEST_DIR}/include --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
+        ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
         touch done
         cd ..
     fi
@@ -404,6 +400,7 @@
         cd freetype-2.4.4
         ( ./configure CFLAGS=-I${DEST_DIR}/include --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
+        ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
         touch done
         cd ..
     fi
@@ -421,6 +418,7 @@
         cd hdf5-1.8.7
         ( ./configure --prefix=${DEST_DIR}/ --enable-shared 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make ${MAKE_PROCS} install 2>&1 ) 1>> ${LOG_FILE} || do_exit
+        ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
         touch done
         cd ..
     fi
@@ -439,6 +437,7 @@
         cd sqlite-autoconf-3070500
         ( ./configure --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make ${MAKE_PROCS} install 2>&1 ) 1>> ${LOG_FILE} || do_exit
+        ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
         touch done
         cd ..
     fi
@@ -454,6 +453,7 @@
     ( make ${MAKE_PROCS} 2>&1 ) 1>> ${LOG_FILE} || do_exit
     ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
     ( ln -sf ${DEST_DIR}/bin/python2.7 ${DEST_DIR}/bin/pyyt 2>&1 ) 1>> ${LOG_FILE}
+    ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
     touch done
     cd ..
 fi
@@ -545,7 +545,6 @@
 do_setup_py ipython-0.10
 do_setup_py h5py-2.0.1
 do_setup_py Cython-0.15.1
-[ $INST_FORTHON -eq 1 ] && do_setup_py Forthon-0.8.5
 
 echo "Doing yt update, wiping local changes and updating to branch ${BRANCH}"
 MY_PWD=`pwd`
@@ -556,7 +555,6 @@
 echo $HDF5_DIR > hdf5.cfg
 [ $INST_PNG -eq 1 ] && echo $PNG_DIR > png.cfg
 [ $INST_FTYPE -eq 1 ] && echo $FTYPE_DIR > freetype.cfg
-[ $INST_FORTHON -eq 1 ] && ( ( cd yt/utilities/kdtree && FORTHON_EXE=${DEST_DIR}/bin/Forthon make 2>&1 ) 1>> ${LOG_FILE} )
 ( ${DEST_DIR}/bin/python2.7 setup.py develop 2>&1 ) 1>> ${LOG_FILE} || do_exit
 touch done
 cd $MY_PWD


diff -r bebe2f1eefad7d54fa295a06be41f6d7e6547aa4 -r b0ac030e0d978e4997eabe6b87d0542ab26efdfd tests/projections.py
--- a/tests/projections.py
+++ b/tests/projections.py
@@ -28,8 +28,10 @@
                 field=field, axis=axis, weight_field="Density")
 
 for field in field_list:
-    create_test(TestGasDistribution, "density_%s" % field,
-                field_x="Density", field_y=field)
-    create_test(Test2DGasDistribution, "density_x-vel_%s" % field,
-                field_x="Density", field_y="x-velocity", field_z=field,
-                weight="CellMassMsun")
+    if field != "Density":
+        create_test(TestGasDistribution, "density_%s" % field,
+                    field_x="Density", field_y=field)
+    if field not in ("x-velocity", "Density"):
+        create_test(Test2DGasDistribution, "density_x-vel_%s" % field,
+                    field_x="Density", field_y="x-velocity", field_z=field,
+                    weight="CellMassMsun")


diff -r bebe2f1eefad7d54fa295a06be41f6d7e6547aa4 -r b0ac030e0d978e4997eabe6b87d0542ab26efdfd yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -105,11 +105,19 @@
         --------
         >>> com = halos[0].center_of_mass()
         """
-        c_vec = self.maximum_density_location() - na.array([0.5, 0.5, 0.5])
+        if self.CoM is not None:
+            return self.CoM
         pm = self["ParticleMassMsun"]
-        cx = (self["particle_position_x"] - c_vec[0])
-        cy = (self["particle_position_y"] - c_vec[1])
-        cz = (self["particle_position_z"] - c_vec[2])
+        cx = self["particle_position_x"]
+        cy = self["particle_position_y"]
+        cz = self["particle_position_z"]
+        if isinstance(self, FOFHalo):
+            c_vec = na.array([cx[0], cy[0], cz[0]]) - self.pf.domain_center
+        else:
+            c_vec = self.maximum_density_location() - self.pf.domain_center
+        cx = (cx - c_vec[0])
+        cy = (cy - c_vec[1])
+        cz = (cz - c_vec[2])
         com = na.array([v - na.floor(v) for v in [cx, cy, cz]])
         return (com * pm).sum(axis=1) / pm.sum() + c_vec
 
@@ -123,6 +131,8 @@
         --------
         >>> max_dens = halos[0].maximum_density()
         """
+        if self.max_dens_point is not None:
+            return self.max_dens_point[0]
         return self._max_dens[self.id][0]
 
     def maximum_density_location(self):
@@ -135,6 +145,8 @@
         --------
         >>> max_dens_loc = halos[0].maximum_density_location()
         """
+        if self.max_dens_point is not None:
+            return self.max_dens_point[1:]
         return na.array([
                 self._max_dens[self.id][1],
                 self._max_dens[self.id][2],
@@ -150,6 +162,8 @@
         --------
         >>> halos[0].total_mass()
         """
+        if self.group_total_mass is not None:
+            return self.group_total_mass
         return self["ParticleMassMsun"].sum()
 
     def bulk_velocity(self):
@@ -162,6 +176,8 @@
         --------
         >>> bv = halos[0].bulk_velocity()
         """
+        if self.bulk_vel is not None:
+            return self.bulk_vel
         pm = self["ParticleMassMsun"]
         vx = (self["particle_velocity_x"] * pm).sum()
         vy = (self["particle_velocity_y"] * pm).sum()
@@ -180,6 +196,8 @@
         --------
         >>> rms_vel = halos[0].rms_velocity()
         """
+        if self.rms_vel is not None:
+            return self.rms_vel
         bv = self.bulk_velocity()
         pm = self["ParticleMassMsun"]
         sm = pm.sum()
@@ -211,6 +229,8 @@
         --------
         >>> radius = halos[0].maximum_radius()
         """
+        if self.max_radius is not None:
+            return self.max_radius
         if center_of_mass:
             center = self.center_of_mass()
         else:
@@ -266,6 +286,8 @@
         return sphere
 
     def get_size(self):
+        if self.size is not None:
+            return self.size
         return self.indices.size
 
     def write_particle_list(self, handle):
@@ -418,6 +440,7 @@
 
 
 class HOPHalo(Halo):
+    _name = "HOPHalo"
     pass
 
 
@@ -428,292 +451,6 @@
         "virial_info", "virial_bin", "virial_mass", "virial_radius",
         "rms_velocity"]
 
-    def maximum_density(self):
-        r"""Return the HOP-identified maximum density.
-
-        Return the HOP-identified maximum density.
-
-        Examples
-        --------
-        >>> max_dens = halos[0].maximum_density()
-        """
-        if self.max_dens_point is not None:
-            return self.max_dens_point[0]
-        max = self.comm.mpi_allreduce(self._max_dens[self.id][0], op='max')
-        return max
-
-    def maximum_density_location(self):
-        r"""Return the location HOP identified as maximally dense.
-
-        Return the location HOP identified as maximally dense.
-
-        Examples
-        --------
-        >>> max_dens_loc = halos[0].maximum_density_location()
-        """
-        if self.max_dens_point is not None:
-            return self.max_dens_point[1:]
-        # If I own the maximum density, my location is globally correct.
-        max_dens = self.maximum_density()
-        if self._max_dens[self.id][0] == max_dens:
-            value = na.array([
-                self._max_dens[self.id][1],
-                self._max_dens[self.id][2],
-                self._max_dens[self.id][3]])
-        else:
-            value = na.array([0, 0, 0])
-        # This works, and isn't appropriate but for now will be fine...
-        value = self.comm.mpi_allreduce(value, op='sum')
-        return value
-
-    def center_of_mass(self):
-        r"""Calculate and return the center of mass.
-
-        The center of mass of the halo is directly calculated and returned.
-
-        Examples
-        --------
-        >>> com = halos[0].center_of_mass()
-        """
-        # If it's precomputed, we save time!
-        if self.CoM is not None:
-            return self.CoM
-        # This need to be called by all tasks, but not all will end up using
-        # it.
-        c_vec = self.maximum_density_location() - na.array([0.5, 0.5, 0.5])
-        if self.indices is not None:
-            pm = self["ParticleMassMsun"]
-            cx = (self["particle_position_x"] - c_vec[0])
-            cy = (self["particle_position_y"] - c_vec[1])
-            cz = (self["particle_position_z"] - c_vec[2])
-            com = na.array([v - na.floor(v) for v in [cx, cy, cz]])
-            my_mass = pm.sum()
-            my_com = ((com * pm).sum(axis=1) / my_mass + c_vec) * my_mass
-        else:
-            my_mass = 0.
-            my_com = na.array([0., 0., 0.])
-        global_mass = self.comm.mpi_allreduce(my_mass, op='sum')
-        global_com = self.comm.mpi_allreduce(my_com, op='sum')
-        return global_com / global_mass
-
-    def total_mass(self):
-        r"""Returns the total mass in solar masses of the halo.
-
-        Returns the total mass in solar masses of just the particles in the
-        halo.
-
-        Examples
-        --------
-        >>> halos[0].total_mass()
-        """
-        if self.group_total_mass is not None:
-            return self.group_total_mass
-        if self.indices is not None:
-            my_mass = self["ParticleMassMsun"].sum()
-        else:
-            my_mass = 0.
-        global_mass = self.comm.mpi_allreduce(float(my_mass), op='sum')
-        return global_mass
-
-    def bulk_velocity(self):
-        r"""Returns the mass-weighted average velocity in cm/s.
-
-        This calculates and returns the mass-weighted average velocity of just
-        the particles in the halo in cm/s.
-
-        Examples
-        --------
-        >>> bv = halos[0].bulk_velocity()
-        """
-        if self.bulk_vel is not None:
-            return self.bulk_vel
-        # Unf. this cannot be reasonably computed inside of parallelHOP because
-        # we don't pass velocities in.
-        if self.indices is not None:
-            pm = self["ParticleMassMsun"]
-            vx = (self["particle_velocity_x"] * pm).sum()
-            vy = (self["particle_velocity_y"] * pm).sum()
-            vz = (self["particle_velocity_z"] * pm).sum()
-            pm = pm.sum()
-        else:
-            pm = 0.
-            vx = 0.
-            vy = 0.
-            vz = 0.
-        bv = na.array([vx, vy, vz, pm])
-        global_bv = self.comm.mpi_allreduce(bv, op='sum')
-        return global_bv[:3] / global_bv[3]
-
-    def rms_velocity(self):
-        r"""Returns the mass-weighted RMS velocity for the halo
-        particles in cgs units.
-
-        Calculate and return the mass-weighted RMS velocity for just the
-        particles in the halo.  The bulk velocity of the halo is subtracted
-        before computation.
-
-        Examples
-        --------
-        >>> rms_vel = halos[0].rms_velocity()
-        """
-        if self.rms_vel is not None:
-            return self.rms_vel
-        bv = self.bulk_velocity()
-        pm = self["ParticleMassMsun"]
-        sm = pm.sum()
-        if self.indices is not None:
-            vx = (self["particle_velocity_x"] - bv[0]) * pm / sm
-            vy = (self["particle_velocity_y"] - bv[1]) * pm / sm
-            vz = (self["particle_velocity_z"] - bv[2]) * pm / sm
-            s = vx ** 2 + vy ** 2 + vz ** 2
-            s = na.sum(s)
-            size = vx.size
-            ss = na.array([s, float(size)])
-        else:
-            ss = na.array([0., 0.])
-        global_ss = self.comm.mpi_allreduce(ss, op='sum')
-        ms = global_ss[0] / global_ss[1]
-        return na.sqrt(ms) * global_ss[1]
-
-    def maximum_radius(self, center_of_mass=True):
-        r"""Returns the maximum radius in the halo for all particles,
-        either from the point of maximum density or from the
-        center of mass.
-
-        The maximum radius from the most dense point is calculated.  This
-        accounts for periodicity.
-
-        Parameters
-        ----------
-        center_of_mass : bool
-            True chooses the center of mass when
-            calculating the maximum radius.
-            False chooses from the maximum density location for HOP halos
-            (it has no effect for FOF halos).
-            Default = True.
-
-        Examples
-        --------
-        >>> radius = halos[0].maximum_radius()
-        """
-        if self.max_radius is not None:
-            return self.max_radius
-        if center_of_mass:
-            center = self.center_of_mass()
-        else:
-            center = self.maximum_density_location()
-        DW = self.data.pf.domain_right_edge - self.data.pf.domain_left_edge
-        if self.indices is not None:
-            rx = na.abs(self["particle_position_x"] - center[0])
-            ry = na.abs(self["particle_position_y"] - center[1])
-            rz = na.abs(self["particle_position_z"] - center[2])
-            r = na.sqrt(na.minimum(rx, DW[0] - rx) ** 2.0
-                    + na.minimum(ry, DW[1] - ry) ** 2.0
-                    + na.minimum(rz, DW[2] - rz) ** 2.0)
-            my_max = r.max()
-
-        else:
-            my_max = 0.
-        return self.comm.mpi_allreduce(my_max, op='max')
-
-    def get_size(self):
-        if self.size is not None:
-            return self.size
-        if self.indices is not None:
-            my_size = self.indices.size
-        else:
-            my_size = 0
-        global_size = self.comm.mpi_allreduce(my_size, op='sum')
-        return global_size
-
-    def __getitem__(self, key):
-        if ytcfg.getboolean("yt", "inline") == False:
-            return self.data[key][self.indices]
-        else:
-            return self.data[key][self.indices]
-
-    def virial_mass(self, virial_overdensity=200., bins=300):
-        r"""Return the virial mass of the halo
-        in Msun, using only the particles
-        in the halo (no baryonic information used).
-
-        The virial mass is calculated, using the built in `Halo.virial_info`
-        functionality.  The mass is then returned.
-
-        Parameters
-        ----------
-        virial_overdensity : float
-            The overdensity threshold compared to the universal average when
-            calculating the virial mass. Default = 200.
-        bins : int
-            The number of spherical bins used to calculate overdensities.
-            Default = 300.
-
-        Returns
-        -------
-        mass : float
-            The virial mass in solar masses of the particles in the halo.  -1
-            if not virialized.
-
-        Examples
-        --------
-        >>> vm = halos[0].virial_mass()
-        """
-        self.virial_info(bins=bins)
-        vir_bin = self.virial_bin(virial_overdensity=virial_overdensity,
-            bins=bins)
-        if vir_bin != -1:
-            return self.mass_bins[vir_bin]
-        else:
-            return -1
-
-    def virial_radius(self, virial_overdensity=200., bins=300):
-        r"""Return the virial radius of the halo in code units.
-
-        The virial radius of the halo is calculated, using only the particles
-        in the halo (no baryonic information used). Returns -1 if the halo is
-        not virialized.
-
-        Parameters
-        ----------
-        virial_overdensity : float
-            The overdensity threshold compared to the universal average when
-            calculating the virial radius. Default = 200.
-        bins : integer
-            The number of spherical bins used to calculate overdensities.
-            Default = 300.
-
-        Returns
-        -------
-        radius : float
-            The virial raius in code units of the particles in the halo.  -1
-            if not virialized.
-
-        Examples
-        --------
-        >>> vr = halos[0].virial_radius()
-        """
-        self.virial_info(bins=bins)
-        vir_bin = self.virial_bin(virial_overdensity=virial_overdensity,
-            bins=bins)
-        if vir_bin != -1:
-            return self.radial_bins[vir_bin]
-        else:
-            return -1
-
-    def virial_bin(self, virial_overdensity=200., bins=300):
-        r"""Returns the bin index of the virial radius of the halo. Generally,
-        it is better to call virial_radius instead, which calls this function
-        automatically.
-        """
-        self.virial_info(bins=bins)
-        over = (self.overdensity > virial_overdensity)
-        if (over == True).any():
-            vir_bin = max(na.arange(bins + 1)[over])
-            return vir_bin
-        else:
-            return -1
-
     def virial_info(self, bins=300):
         r"""Calculates the virial information for the halo. Generally, it is
         better to call virial_radius or virial_mass instead, which calls this
@@ -781,27 +518,6 @@
 
 class FOFHalo(Halo):
 
-    def center_of_mass(self):
-        r"""Calculate and return the center of mass.
-
-        The center of mass of the halo is directly calculated and returned.
-
-        Examples
-        --------
-        >>> com = halos[0].center_of_mass()
-        """
-        pm = self["ParticleMassMsun"]
-        cx = self["particle_position_x"]
-        cy = self["particle_position_y"]
-        cz = self["particle_position_z"]
-        c_vec = na.array([cx[0], cy[0], cz[0]]) - na.array([0.5, 0.5, 0.5])
-        cx = cx - c_vec[0]
-        cy = cy - c_vec[1]
-        cz = cz - c_vec[2]
-        com = na.array([v - na.floor(v) for v in [cx, cy, cz]])
-        com = (pm * com).sum(axis=1) / pm.sum() + c_vec
-        return com
-
     def maximum_density(self):
         r"""Not implemented."""
         return -1
@@ -905,100 +621,6 @@
             del f
         return field_data
 
-    def center_of_mass(self):
-        r"""Calculate and return the center of mass.
-
-        The center of mass of the halo is directly calculated and returned.
-
-        Examples
-        --------
-        >>> com = halos[0].center_of_mass()
-        """
-        return self.CoM
-
-    def maximum_density_location(self):
-        r"""Return the location HOP identified as maximally dense.
-
-        Return the location HOP identified as maximally dense.
-
-        Examples
-        --------
-        >>> max_dens_loc = halos[0].maximum_density_location()
-        """
-        return self.max_dens_point[1:]
-
-    def maximum_density(self):
-        r"""Return the HOP-identified maximum density.
-
-        Return the HOP-identified maximum density.
-
-        Examples
-        --------
-        >>> max_dens = halos[0].maximum_density()
-        """
-        return self.max_dens_point[0]
-
-    def total_mass(self):
-        r"""Returns the total mass in solar masses of the halo.
-
-        Returns the total mass in solar masses of just the particles in the
-        halo.
-
-        Examples
-        --------
-        >>> halos[0].total_mass()
-        """
-        return self.group_total_mass
-
-    def bulk_velocity(self):
-        r"""Returns the mass-weighted average velocity in cm/s.
-
-        This calculates and returns the mass-weighted average velocity of just
-        the particles in the halo in cm/s.
-
-        Examples
-        --------
-        >>> bv = halos[0].bulk_velocity()
-        """
-        return self.bulk_vel
-
-    def rms_velocity(self):
-        r"""Returns the mass-weighted RMS velocity for the halo
-        particles in cgs units.
-
-        Calculate and return the mass-weighted RMS velocity for just the
-        particles in the halo.  The bulk velocity of the halo is subtracted
-        before computation.
-
-        Examples
-        --------
-        >>> rms_vel = halos[0].rms_velocity()
-        """
-        return self.rms_vel
-
-    def maximum_radius(self):
-        r"""Returns the maximum radius in the halo for all particles,
-        either from the point of maximum density or from the
-        center of mass.
-
-        The maximum radius from the most dense point is calculated.  This
-        accounts for periodicity.
-
-        Parameters
-        ----------
-        center_of_mass : bool
-            True chooses the center of mass when
-            calculating the maximum radius.
-            False chooses from the maximum density location for HOP halos
-            (it has no effect for FOF halos).
-            Default = True.
-
-        Examples
-        --------
-        >>> radius = halos[0].maximum_radius()
-        """
-        return self.max_radius
-
     def get_sphere(self):
         r"""Returns a sphere source.
 
@@ -2243,26 +1865,31 @@
                 total_mass = \
                     self.comm.mpi_allreduce((self._data_source["ParticleMassMsun"][select]).sum(dtype='float64'), op='sum')
             else:
-                total_mass = self.comm.mpi_allreduce(self._data_source["ParticleMassMsun"].sum(dtype='float64'), op='sum')
+                total_mass = self.comm.mpi_allreduce(self._data_source.quantities["TotalQuantity"]("ParticleMassMsun")[0], op='sum')
         # MJT: Note that instead of this, if we are assuming that the particles
         # are all on different processors, we should instead construct an
         # object representing the entire domain and sum it "lazily" with
         # Derived Quantities.
         if subvolume is not None:
             self._data_source = pf.h.periodic_region_strict([0.] * 3, ds_LE, ds_RE)
+        else:
+            self._data_source = pf.h.all_data()
         self.padding = padding  # * pf["unitary"] # This should be clevererer
         padded, LE, RE, self._data_source = \
             self.partition_hierarchy_3d(ds=self._data_source,
             padding=self.padding)
         self.bounds = (LE, RE)
-        # reflect particles around the periodic boundary
-        #self._reposition_particles((LE, RE))
-        if dm_only:
+        # sub_mass can be skipped if subvolume is not used and this is not
+        # parallel.
+        if subvolume is None and \
+                ytcfg.getint("yt", "__topcomm_parallel_size") == 1:
+            sub_mass = total_mass
+        elif dm_only:
             select = self._get_dm_indices()
             sub_mass = self._data_source["ParticleMassMsun"][select].sum(dtype='float64')
         else:
             sub_mass = \
-                self._data_source["ParticleMassMsun"].sum(dtype='float64')
+                self._data_source.quantities["TotalQuantity"]("ParticleMassMsun")[0]
         HOPHaloList.__init__(self, self._data_source,
             threshold * total_mass / sub_mass, dm_only)
         self._parse_halolist(total_mass / sub_mass)
@@ -2341,6 +1968,8 @@
         if subvolume is not None:
             self._data_source = pf.h.periodic_region_strict([0.] * 3, ds_LE,
                 ds_RE)
+        else:
+            self._data_source = pf.h.all_data()
         padded, LE, RE, self._data_source = \
             self.partition_hierarchy_3d(ds=self._data_source,
             padding=self.padding)


diff -r bebe2f1eefad7d54fa295a06be41f6d7e6547aa4 -r b0ac030e0d978e4997eabe6b87d0542ab26efdfd yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py
--- a/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py
+++ b/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py
@@ -388,7 +388,7 @@
             self.pos[self.psize:, 2] = self.zpos_pad
             del self.xpos_pad, self.ypos_pad, self.zpos_pad
             gc.collect()
-            self.kdtree = cKDTree(self.pos, leafsize = 32)
+            self.kdtree = cKDTree(self.pos, leafsize = 64)
         self.__max_memory()
         yt_counters("init kd tree")
 
@@ -613,66 +613,73 @@
         chain_map = defaultdict(set)
         for i in xrange(max(self.chainID)+1):
             chain_map[i].add(i)
-        if self.tree == 'F':
+        yt_counters("preconnect kd tree search.")
+        if self.tree == 'C':
+            nn = self.nMerge + 2
+            rv = self.kdtree.chainHOP_preconnect(
+                self.chainID, self.density, self.densest_in_chain,
+                self.is_inside, self.search_again,
+                self.peakthresh, self.saddlethresh, nn, self.nMerge,
+                chain_map)
+            self.search_again = rv.astype("bool")
+            yt_counters("preconnect kd tree search.")
+        elif self.tree == 'F':
             # Plus 2 because we're looking for that neighbor, but only keeping 
             # nMerge + 1 neighbor tags, skipping ourselves.
             fKD.dist = na.empty(self.nMerge+2, dtype='float64')
             fKD.tags = na.empty(self.nMerge+2, dtype='int64')
             # We can change this here to make the searches faster.
             fKD.nn = self.nMerge + 2
-        elif self.tree == 'C':
-            nn = self.nMerge + 2
-        yt_counters("preconnect kd tree search.")
-        for i in xrange(self.size):
-            # Don't consider this particle if it's not part of a chain.
-            if self.chainID[i] < 0: continue
-            chainID_i = self.chainID[i]
-            # If this particle is in the padding, don't make a connection.
-            if not self.is_inside[i]: continue
-            # Find this particle's chain max_dens.
-            part_max_dens = self.densest_in_chain[chainID_i]
-            # We're only connecting >= peakthresh chains now.
-            if part_max_dens < self.peakthresh: continue
-            # Loop over nMerge closest nearest neighbors.
-            if self.tree == 'F':
-                fKD.qv = fKD.pos[:, i]
-                find_nn_nearest_neighbors()
-                NNtags = fKD.tags[:] - 1
-            elif self.tree == 'C':
-                qv = self.pos[i, :]
-                NNtags = self.kdtree.query(qv, nn)[1]
-            same_count = 0
-            for j in xrange(int(self.nMerge+1)):
-                thisNN = NNtags[j+1] # Don't consider ourselves at NNtags[0]
-                thisNN_chainID = self.chainID[thisNN]
-                # If our neighbor is in the same chain, move on.
-                # Move on if these chains are already connected:
-                if chainID_i == thisNN_chainID or \
-                        thisNN_chainID in chain_map[chainID_i]:
-                    same_count += 1
-                    continue
-                # Everything immediately below is for
-                # neighboring particles with a chainID. 
-                if thisNN_chainID >= 0:
-                    # Find thisNN's chain's max_dens.
-                    thisNN_max_dens = self.densest_in_chain[thisNN_chainID]
-                    # We're only linking peakthresh chains
-                    if thisNN_max_dens < self.peakthresh: continue
-                    # Calculate the two groups boundary density.
-                    boundary_density = (self.density[thisNN] + self.density[i]) / 2.
-                    # Don't connect if the boundary is too low.
-                    if boundary_density < self.saddlethresh: continue
-                    # Mark these chains as related.
-                    chain_map[thisNN_chainID].add(chainID_i)
-                    chain_map[chainID_i].add(thisNN_chainID)
-            if same_count == self.nMerge + 1:
-                # All our neighbors are in the same chain already, so 
-                # we don't need to search again.
-                self.search_again[i] = False
-        try:
-            del NNtags
-        except UnboundLocalError:
-            pass
+            for i in xrange(self.size):
+                # Don't consider this particle if it's not part of a chain.
+                if self.chainID[i] < 0: continue
+                chainID_i = self.chainID[i]
+                # If this particle is in the padding, don't make a connection.
+                if not self.is_inside[i]: continue
+                # Find this particle's chain max_dens.
+                part_max_dens = self.densest_in_chain[chainID_i]
+                # We're only connecting >= peakthresh chains now.
+                if part_max_dens < self.peakthresh: continue
+                # Loop over nMerge closest nearest neighbors.
+                if self.tree == 'F':
+                    fKD.qv = fKD.pos[:, i]
+                    find_nn_nearest_neighbors()
+                    NNtags = fKD.tags[:] - 1
+                elif self.tree == 'C':
+                    qv = self.pos[i, :]
+                    NNtags = self.kdtree.query(qv, nn)[1]
+                same_count = 0
+                for j in xrange(int(self.nMerge+1)):
+                    thisNN = NNtags[j+1] # Don't consider ourselves at NNtags[0]
+                    thisNN_chainID = self.chainID[thisNN]
+                    # If our neighbor is in the same chain, move on.
+                    # Move on if these chains are already connected:
+                    if chainID_i == thisNN_chainID or \
+                            thisNN_chainID in chain_map[chainID_i]:
+                        same_count += 1
+                        continue
+                    # Everything immediately below is for
+                    # neighboring particles with a chainID. 
+                    if thisNN_chainID >= 0:
+                        # Find thisNN's chain's max_dens.
+                        thisNN_max_dens = self.densest_in_chain[thisNN_chainID]
+                        # We're only linking peakthresh chains
+                        if thisNN_max_dens < self.peakthresh: continue
+                        # Calculate the two groups boundary density.
+                        boundary_density = (self.density[thisNN] + self.density[i]) / 2.
+                        # Don't connect if the boundary is too low.
+                        if boundary_density < self.saddlethresh: continue
+                        # Mark these chains as related.
+                        chain_map[thisNN_chainID].add(chainID_i)
+                        chain_map[chainID_i].add(thisNN_chainID)
+                if same_count == self.nMerge + 1:
+                    # All our neighbors are in the same chain already, so 
+                    # we don't need to search again.
+                    self.search_again[i] = False
+            try:
+                del NNtags
+            except UnboundLocalError:
+                pass
         yt_counters("preconnect kd tree search.")
         # Recursively jump links until we get to a chain whose densest
         # link is to itself. At that point we've found the densest chain
@@ -680,7 +687,7 @@
         yt_counters("preconnect pregrouping.")
         final_chain_map = na.empty(max(self.chainID)+1, dtype='int64')
         removed = 0
-        for i in xrange(max(self.chainID)+1):
+        for i in xrange(self.chainID.max()+1):
             j = chain_count - i - 1
             densest_link = self._recurse_preconnected_links(chain_map, j)
             final_chain_map[j] = densest_link


diff -r bebe2f1eefad7d54fa295a06be41f6d7e6547aa4 -r b0ac030e0d978e4997eabe6b87d0542ab26efdfd yt/analysis_modules/star_analysis/sfr_spectrum.py
--- a/yt/analysis_modules/star_analysis/sfr_spectrum.py
+++ b/yt/analysis_modules/star_analysis/sfr_spectrum.py
@@ -66,8 +66,8 @@
         """
         self._pf = pf
         self._data_source = data_source
-        self.star_mass = star_mass
-        self.star_creation_time = star_creation_time
+        self.star_mass = na.array(star_mass)
+        self.star_creation_time = na.array(star_creation_time)
         self.volume = volume
         self.bin_count = bins
         # Check to make sure we have the right set of informations.


diff -r bebe2f1eefad7d54fa295a06be41f6d7e6547aa4 -r b0ac030e0d978e4997eabe6b87d0542ab26efdfd yt/analysis_modules/sunrise_export/sunrise_exporter.py
--- a/yt/analysis_modules/sunrise_export/sunrise_exporter.py
+++ b/yt/analysis_modules/sunrise_export/sunrise_exporter.py
@@ -290,7 +290,7 @@
 
 def generate_levels_octree(pf, fields):
     fields = ensure_list(fields) + ["Ones", "Ones"]
-    ogl, levels_finest, levels_all = initialize_octree_list(fields)
+    ogl, levels_finest, levels_all = initialize_octree_list(pf, fields)
     o_length = na.sum(levels_finest.values())
     r_length = na.sum(levels_all.values())
     output = na.zeros((r_length,len(fields)), dtype='float64')


diff -r bebe2f1eefad7d54fa295a06be41f6d7e6547aa4 -r b0ac030e0d978e4997eabe6b87d0542ab26efdfd yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -2413,9 +2413,6 @@
         for field in fields_to_get:
             if self.field_data.has_key(field):
                 continue
-            if field not in self.hierarchy.field_list and not in_grids:
-                if self._generate_field(field):
-                    continue # True means we already assigned it
             # There are a lot of 'ands' here, but I think they are all
             # necessary.
             if force_particle_read == False and \
@@ -2426,6 +2423,10 @@
                 self.particles.get_data(field)
                 if field not in self.field_data:
                     if self._generate_field(field): continue
+                continue
+            if field not in self.hierarchy.field_list and not in_grids:
+                if self._generate_field(field):
+                    continue # True means we already assigned it
             mylog.info("Getting field %s from %s", field, len(self._grids))
             self[field] = na.concatenate(
                 [self._get_data_from_grid(grid, field)


diff -r bebe2f1eefad7d54fa295a06be41f6d7e6547aa4 -r b0ac030e0d978e4997eabe6b87d0542ab26efdfd yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -367,7 +367,10 @@
 
     #@time_execution
     def __fill_child_mask(self, child, mask, tofill):
-        rf = self.pf.refine_by
+        try:
+            rf = self.pf.refine_by[child.Level-1]
+        except TypeError:
+            rf = self.pf.refine_by
         gi, cgi = self.get_global_startindex(), child.get_global_startindex()
         startIndex = na.maximum(0, cgi / rf - gi)
         endIndex = na.minimum((cgi + child.ActiveDimensions) / rf - gi,




diff -r bebe2f1eefad7d54fa295a06be41f6d7e6547aa4 -r b0ac030e0d978e4997eabe6b87d0542ab26efdfd yt/data_objects/particle_io.py
--- a/yt/data_objects/particle_io.py
+++ b/yt/data_objects/particle_io.py
@@ -29,6 +29,17 @@
 
 particle_handler_registry = defaultdict()
 
+def particle_converter(func):
+    from .data_containers import YTFieldData
+    def save_state(grid):
+        old_params = grid.field_parameters
+        old_keys = grid.field_data.keys()
+        tr = func(grid)
+        grid.field_parameters = old_params
+        grid.field_data = YTFieldData( [(k, grid.field_data[k]) for k in old_keys] )
+        return tr
+    return save_state
+
 class ParticleIOHandler(object):
     class __metaclass__(type):
         def __init__(cls, name, b, d):
@@ -82,6 +93,7 @@
                 func = f._convert_function
             else:
                 func = f.particle_convert
+            func = particle_converter(func)
             conv_factors.append(
               na.fromiter((func(g) for g in grid_list),
                           count=len(grid_list), dtype='float64'))
@@ -90,7 +102,7 @@
         rvs = self.pf.h.io._read_particles(
             fields_to_read, rtype, args, grid_list, count_list,
             conv_factors)
-        for [n, v] in zip(fields_to_read, rvs):
+        for [n, v] in zip(fields, rvs):
             self.source.field_data[n] = v
 
 class ParticleIOHandlerRegion(ParticleIOHandlerImplemented):


diff -r bebe2f1eefad7d54fa295a06be41f6d7e6547aa4 -r b0ac030e0d978e4997eabe6b87d0542ab26efdfd yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -31,8 +31,7 @@
 from yt.funcs import *
 
 from yt.data_objects.data_containers import YTFieldData
-from yt.utilities.data_point_utilities import \
-    Bin1DProfile, Bin2DProfile, Bin3DProfile
+from yt.utilities.amr_utils import bin_profile1d, bin_profile2d, bin_profile3d
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     ParallelAnalysisInterface
 
@@ -88,9 +87,11 @@
         self._ngrids = 0
         self.__data = {}         # final results will go here
         self.__weight_data = {}  # we need to track the weights as we go
+        self.__std_data = {}
         for field in fields:
             self.__data[field] = self._get_empty_field()
             self.__weight_data[field] = self._get_empty_field()
+            self.__std_data[field] = self._get_empty_field()
         self.__used = self._get_empty_field().astype('bool')
         #pbar = get_pbar('Binning grids', len(self._data_source._grids))
         for gi,grid in enumerate(self._get_grids(fields)):
@@ -103,10 +104,13 @@
                 continue
             for field in fields:
                 # We get back field values, weight values, used bins
-                f, w, u = self._bin_field(grid, field, weight, accumulation,
+                f, w, q, u = self._bin_field(grid, field, weight, accumulation,
                                           args=args, check_cut=True)
                 self.__data[field] += f        # running total
                 self.__weight_data[field] += w # running total
+                self.__std_data[field][u] += w[u] * (q[u]/w[u] + \
+                    (f[u]/w[u] -
+                     self.__data[field][u]/self.__weight_data[field][u])**2) # running total
                 self.__used = (self.__used | u)       # running 'or'
             grid.clear_data()
         # When the loop completes the parallel finalizer gets called
@@ -115,24 +119,41 @@
         for field in fields:
             if weight: # Now, at the end, we divide out.
                 self.__data[field][ub] /= self.__weight_data[field][ub]
+                self.__std_data[field][ub] /= self.__weight_data[field][ub]
             self[field] = self.__data[field]
+            self["%s_std" % field] = na.sqrt(self.__std_data[field])
         self["UsedBins"] = self.__used
-        del self.__data, self.__weight_data, self.__used
+        del self.__data, self.__std_data, self.__weight_data, self.__used
 
     def _finalize_parallel(self):
+        my_mean = {}
+        my_weight = {}
+        for key in self.__data:
+            my_mean[key] = self._get_empty_field()
+            my_weight[key] = self._get_empty_field()
+        ub = na.where(self.__used)
+        for key in self.__data:
+            my_mean[key][ub] = self.__data[key][ub] / self.__weight_data[key][ub]
+            my_weight[key][ub] = self.__weight_data[key][ub]
         for key in self.__data:
             self.__data[key] = self.comm.mpi_allreduce(self.__data[key], op='sum')
         for key in self.__weight_data:
             self.__weight_data[key] = self.comm.mpi_allreduce(self.__weight_data[key], op='sum')
+        for key in self.__std_data:
+            self.__std_data[key][ub] = my_weight[key][ub] * (self.__std_data[key][ub] / my_weight[key][ub] + \
+                (my_mean[key][ub] - self.__data[key][ub]/self.__weight_data[key][ub])**2)
+            self.__std_data[key] = self.comm.mpi_allreduce(self.__std_data[key], op='sum')
         self.__used = self.comm.mpi_allreduce(self.__used, op='sum')
 
     def _unlazy_add_fields(self, fields, weight, accumulation):
         for field in fields:
-            f, w, u = self._bin_field(self._data_source, field, weight,
-                                      accumulation, self._args, check_cut = False)
+            f, w, q, u = self._bin_field(self._data_source, field, weight,
+                                         accumulation, self._args, check_cut = False)
             if weight:
                 f[u] /= w[u]
+                q[u] = na.sqrt(q[u] / w[u])
             self[field] = f
+            self["%s_std" % field] = q
         self["UsedBins"] = u
 
     def add_fields(self, fields, weight = "CellMassMsun", accumulation = False, fractional=False):
@@ -246,20 +267,24 @@
         self.total_stuff = source_data.sum()
         binned_field = self._get_empty_field()
         weight_field = self._get_empty_field()
+        m_field = self._get_empty_field()
+        q_field = self._get_empty_field()
         used_field = self._get_empty_field()
         mi = args[0]
         bin_indices_x = args[1].ravel().astype('int64')
         source_data = source_data[mi]
         weight_data = weight_data[mi]
-        Bin1DProfile(bin_indices_x, weight_data, source_data,
-                     weight_field, binned_field, used_field)
+        bin_profile1d(bin_indices_x, weight_data, source_data,
+                      weight_field, binned_field,
+                      m_field, q_field, used_field)
         # Fix for laziness, because at the *end* we will be
         # summing up all of the histograms and dividing by the
         # weights.  Accumulation likely doesn't work with weighted
         # average fields.
         if accumulation: 
             binned_field = na.add.accumulate(binned_field)
-        return binned_field, weight_field, used_field.astype("bool")
+        return binned_field, weight_field, q_field, \
+            used_field.astype("bool")
 
     @preserve_source_parameters
     def _get_bins(self, source, check_cut=False):
@@ -415,6 +440,8 @@
         self.total_stuff = source_data.sum()
         binned_field = self._get_empty_field()
         weight_field = self._get_empty_field()
+        m_field = self._get_empty_field()
+        q_field = self._get_empty_field()
         used_field = self._get_empty_field()
         mi = args[0]
         bin_indices_x = args[1].ravel().astype('int64')
@@ -423,8 +450,8 @@
         weight_data = weight_data[mi]
         nx = bin_indices_x.size
         #mylog.debug("Binning %s / %s times", source_data.size, nx)
-        Bin2DProfile(bin_indices_x, bin_indices_y, weight_data, source_data,
-                     weight_field, binned_field, used_field)
+        bin_profile2d(bin_indices_x, bin_indices_y, weight_data, source_data,
+                      weight_field, binned_field, m_field, q_field, used_field)
         if accumulation: # Fix for laziness
             if not iterable(accumulation):
                 raise SyntaxError("Accumulation needs to have length 2")
@@ -432,7 +459,8 @@
                 binned_field = na.add.accumulate(binned_field, axis=0)
             if accumulation[1]:
                 binned_field = na.add.accumulate(binned_field, axis=1)
-        return binned_field, weight_field, used_field.astype('bool')
+        return binned_field, weight_field, q_field, \
+            used_field.astype("bool")
 
     @preserve_source_parameters
     def _get_bins(self, source, check_cut=False):
@@ -667,6 +695,8 @@
         self.total_stuff = source_data.sum()
         binned_field = self._get_empty_field()
         weight_field = self._get_empty_field()
+        m_field = self._get_empty_field()
+        q_field = self._get_empty_field()
         used_field = self._get_empty_field()
         mi = args[0]
         bin_indices_x = args[1].ravel().astype('int64')
@@ -674,10 +704,9 @@
         bin_indices_z = args[3].ravel().astype('int64')
         source_data = source_data[mi]
         weight_data = weight_data[mi]
-        Bin3DProfile(
-            bin_indices_x, bin_indices_y, bin_indices_z,
-            weight_data, source_data,
-            weight_field, binned_field, used_field)
+        bin_profile3d(bin_indices_x, bin_indices_y, bin_indices_z,
+                      weight_data, source_data, weight_field, binned_field,
+                      m_field, q_field, used_field)
         if accumulation: # Fix for laziness
             if not iterable(accumulation):
                 raise SyntaxError("Accumulation needs to have length 2")
@@ -687,7 +716,8 @@
                 binned_field = na.add.accumulate(binned_field, axis=1)
             if accumulation[2]:
                 binned_field = na.add.accumulate(binned_field, axis=2)
-        return binned_field, weight_field, used_field.astype('bool')
+        return binned_field, weight_field, q_field, \
+            used_field.astype("bool")
 
     @preserve_source_parameters
     def _get_bins(self, source, check_cut=False):


diff -r bebe2f1eefad7d54fa295a06be41f6d7e6547aa4 -r b0ac030e0d978e4997eabe6b87d0542ab26efdfd yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -68,16 +68,32 @@
         self.Parent = []
         self.Children = []
         self.Level = level
-        self.start_index = start.copy()#.transpose()
-        self.stop_index = stop.copy()#.transpose()
         self.ActiveDimensions = stop - start + 1
 
+    def get_global_startindex(self):
+        """
+        Return the integer starting index for each dimension at the current
+        level.
+        
+        """
+        if self.start_index != None:
+            return self.start_index
+        if self.Parent == []:
+            iLE = self.LeftEdge - self.pf.domain_left_edge
+            start_index = iLE / self.dds
+            return na.rint(start_index).astype('int64').ravel()
+        pdx = self.Parent[0].dds
+        start_index = (self.Parent[0].get_global_startindex()) + \
+            na.rint((self.LeftEdge - self.Parent[0].LeftEdge)/pdx)
+        self.start_index = (start_index*self.pf.refine_by[self.Level-1]).astype('int64').ravel()
+        return self.start_index
+
     def _setup_dx(self):
         # So first we figure out what the index is.  We don't assume
         # that dx=dy=dz , at least here.  We probably do elsewhere.
         id = self.id - self._id_offset
         if len(self.Parent) > 0:
-            self.dds = self.Parent[0].dds / self.pf.refine_by
+            self.dds = self.Parent[0].dds / self.pf.refine_by[self.Level-1]
         else:
             LE, RE = self.hierarchy.grid_left_edge[id,:], \
                      self.hierarchy.grid_right_edge[id,:]
@@ -91,8 +107,8 @@
     grid = ChomboGrid
     
     def __init__(self,pf,data_style='chombo_hdf5'):
-        self.domain_left_edge = pf.domain_left_edge # need these to determine absolute grid locations
-        self.domain_right_edge = pf.domain_right_edge # need these to determine absolute grid locations
+        self.domain_left_edge = pf.domain_left_edge
+        self.domain_right_edge = pf.domain_right_edge
         self.data_style = data_style
         self.field_indexes = {}
         self.parameter_file = weakref.proxy(pf)
@@ -100,12 +116,11 @@
         self.hierarchy_filename = self.parameter_file.parameter_filename
         self.hierarchy = os.path.abspath(self.hierarchy_filename)
         self.directory = os.path.dirname(self.hierarchy_filename)
-        self._fhandle = h5py.File(self.hierarchy_filename)
+        self._fhandle = h5py.File(self.hierarchy_filename, 'r')
 
         self.float_type = self._fhandle['/level_0']['data:datatype=0'].dtype.name
-        self._levels = [fn for fn in self._fhandle if fn != "Chombo_global"]
+        self._levels = self._fhandle.keys()[1:]
         AMRHierarchy.__init__(self,pf,data_style)
-
         self._fhandle.close()
 
     def _initialize_data_storage(self):
@@ -113,7 +128,7 @@
 
     def _detect_fields(self):
         ncomp = int(self._fhandle['/'].attrs['num_components'])
-        self.field_list = [c[1] for c in self._fhandle['/'].attrs.listitems()[-ncomp:]]
+        self.field_list = [c[1] for c in self._fhandle['/'].attrs.items()[-ncomp:]]
     
     def _setup_classes(self):
         dd = self._get_data_reader_dict()
@@ -130,8 +145,8 @@
         
         # this relies on the first Group in the H5 file being
         # 'Chombo_global'
-        levels = [fn for fn in f if fn != "Chombo_global"]
-        self.grids = []
+        levels = f.keys()[1:]
+        grids = []
         i = 0
         for lev in levels:
             level_number = int(re.match('level_(\d+)',lev).groups()[0])
@@ -140,17 +155,18 @@
             for level_id, box in enumerate(boxes):
                 si = na.array([box['lo_%s' % ax] for ax in 'ijk'])
                 ei = na.array([box['hi_%s' % ax] for ax in 'ijk'])
-                pg = self.grid(len(self.grids),self,level=level_number,
+                pg = self.grid(len(grids),self,level=level_number,
                                start = si, stop = ei)
-                self.grids.append(pg)
-                self.grids[-1]._level_id = level_id
-                self.grid_left_edge[i] = dx*si.astype(self.float_type) + self.domain_left_edge
-                self.grid_right_edge[i] = dx*(ei.astype(self.float_type)+1) + self.domain_left_edge
+                grids.append(pg)
+                grids[-1]._level_id = level_id
+                self.grid_left_edge[i] = dx*si.astype(self.float_type)
+                self.grid_right_edge[i] = dx*(ei.astype(self.float_type)+1)
                 self.grid_particle_count[i] = 0
                 self.grid_dimensions[i] = ei - si + 1
                 i += 1
         self.grids = na.empty(len(grids), dtype='object')
         for gi, g in enumerate(grids): self.grids[gi] = g
+#        self.grids = na.array(self.grids, dtype='object')
 
     def _populate_grid_objects(self):
         for g in self.grids:
@@ -179,8 +195,8 @@
     
     def __init__(self, filename, data_style='chombo_hdf5',
                  storage_filename = None, ini_filename = None):
-        # hardcoded for now 
-        self.current_time = 0.0
+        fileh = h5py.File(filename,'r')
+        self.current_time = fileh.attrs['time']
         self.ini_filename = ini_filename
         StaticOutput.__init__(self,filename,data_style)
         self.storage_filename = storage_filename
@@ -230,15 +246,18 @@
         """
         if os.path.isfile('pluto.ini'):
             self._parse_pluto_file('pluto.ini')
-        elif os.path.isfile('orion2.ini'):
-            self._parse_pluto_file('orion2.ini')
         else:
+            if os.path.isfile('orion2.ini'): self._parse_pluto_file('orion2.ini')
             self.unique_identifier = \
-                                   int(os.stat(self.parameter_filename)[ST_CTIME])
-            self.domain_left_edge = na.array([0.,0.,0.])
+                int(os.stat(self.parameter_filename)[ST_CTIME])
+            self.domain_left_edge = self.__calc_left_edge()
             self.domain_right_edge = self.__calc_right_edge()
+            self.domain_dimensions = self.__calc_domain_dimensions()
             self.dimensionality = 3
-            self.refine_by = 2
+            self.refine_by = []
+            fileh = h5py.File(self.parameter_filename,'r')
+            for level in range(0,fileh.attrs['num_levels']):
+                self.refine_by.append(fileh['/level_'+str(level)].attrs['ref_ratio'])
 
     def _parse_pluto_file(self, ini_filename):
         """
@@ -268,36 +287,26 @@
                     else:
                         self.parameters[paramName] = t
 
-            # assumes 3D for now
-            elif param.startswith("X1-grid"):
-                t = vals.split()
-                low1 = float(t[1])
-                high1 = float(t[4])
-                N1 = int(t[2])
-            elif param.startswith("X2-grid"):
-                t = vals.split()
-                low2 = float(t[1])
-                high2 = float(t[4])
-                N2 = int(t[2])
-            elif param.startswith("X3-grid"):
-                t = vals.split()
-                low3 = float(t[1])
-                high3 = float(t[4])
-                N3 = int(t[2])
+    def __calc_left_edge(self):
+        fileh = h5py.File(self.parameter_filename,'r')
+        dx0 = fileh['/level_0'].attrs['dx']
+        LE = dx0*((na.array(list(fileh['/level_0'].attrs['prob_domain'])))[0:3])
+        fileh.close()
+        return LE
 
-        self.dimensionality = 3
-        self.domain_left_edge = na.array([low1,low2,low3])
-        self.domain_right_edge = na.array([high1,high2,high3])
-        self.domain_dimensions = na.array([N1,N2,N3])
-        self.refine_by = self.parameters["RefineBy"]
-            
     def __calc_right_edge(self):
         fileh = h5py.File(self.parameter_filename,'r')
         dx0 = fileh['/level_0'].attrs['dx']
-        RE = dx0*((na.array(fileh['/level_0'].attrs['prob_domain']))[3:] + 1)
+        RE = dx0*((na.array(list(fileh['/level_0'].attrs['prob_domain'])))[3:] + 1)
         fileh.close()
         return RE
-                   
+                  
+    def __calc_domain_dimensions(self):
+        fileh = h5py.File(self.parameter_filename,'r')
+        L_index = ((na.array(list(fileh['/level_0'].attrs['prob_domain'])))[0:3])
+        R_index = ((na.array(list(fileh['/level_0'].attrs['prob_domain'])))[3:] + 1)
+        return R_index - L_index
+ 
     @classmethod
     def _is_valid(self, *args, **kwargs):
         try:
@@ -309,7 +318,6 @@
             pass
         return False
 
-
     @parallel_root_only
     def print_key_parameters(self):
         for a in ["current_time", "domain_dimensions", "domain_left_edge",


diff -r bebe2f1eefad7d54fa295a06be41f6d7e6547aa4 -r b0ac030e0d978e4997eabe6b87d0542ab26efdfd yt/frontends/chombo/fields.py
--- a/yt/frontends/chombo/fields.py
+++ b/yt/frontends/chombo/fields.py
@@ -38,45 +38,64 @@
 add_chombo_field = KnownChomboFields.add_field
 
 ChomboFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
-add_chombo_field = ChomboFieldInfo.add_field
+add_field = ChomboFieldInfo.add_field
 
-add_field = add_chombo_field
+add_chombo_field("density", function=NullFunc, take_log=True,
+                 validators = [ValidateDataField("density")],
+                 units=r"\rm{g}/\rm{cm}^3")
 
-add_field("density", function=NullFunc, take_log=True,
-          validators = [ValidateDataField("density")],
-          units=r"\rm{g}/\rm{cm}^3")
+KnownChomboFields["density"]._projected_units =r"\rm{g}/\rm{cm}^2"
 
-ChomboFieldInfo["density"]._projected_units =r"\rm{g}/\rm{cm}^2"
+add_chombo_field("X-momentum", function=NullFunc, take_log=False,
+                 validators = [ValidateDataField("X-Momentum")],
+                 units=r"",display_name=r"M_x")
+KnownChomboFields["X-momentum"]._projected_units=r""
 
-add_field("X-momentum", function=NullFunc, take_log=False,
-          validators = [ValidateDataField("X-Momentum")],
-          units=r"",display_name=r"B_x")
-ChomboFieldInfo["X-momentum"]._projected_units=r""
+add_chombo_field("Y-momentum", function=NullFunc, take_log=False,
+                 validators = [ValidateDataField("Y-Momentum")],
+                 units=r"",display_name=r"M_y")
+KnownChomboFields["Y-momentum"]._projected_units=r""
 
-add_field("Y-momentum", function=NullFunc, take_log=False,
-          validators = [ValidateDataField("Y-Momentum")],
-          units=r"",display_name=r"B_y")
-ChomboFieldInfo["Y-momentum"]._projected_units=r""
+add_chombo_field("Z-momentum", function=NullFunc, take_log=False,
+                 validators = [ValidateDataField("Z-Momentum")],
+                 units=r"",display_name=r"M_z")
+KnownChomboFields["Z-momentum"]._projected_units=r""
 
-add_field("Z-momentum", function=NullFunc, take_log=False,
-          validators = [ValidateDataField("Z-Momentum")],
-          units=r"",display_name=r"B_z")
-ChomboFieldInfo["Z-momentum"]._projected_units=r""
+add_chombo_field("X-magnfield", function=NullFunc, take_log=False,
+                 validators = [ValidateDataField("X-Magnfield")],
+                 units=r"",display_name=r"B_x")
+KnownChomboFields["X-magnfield"]._projected_units=r""
 
-add_field("X-magnfield", function=NullFunc, take_log=False,
-          validators = [ValidateDataField("X-Magnfield")],
-          units=r"",display_name=r"B_x")
-ChomboFieldInfo["X-magnfield"]._projected_units=r""
+add_chombo_field("Y-magnfield", function=NullFunc, take_log=False,
+                 validators = [ValidateDataField("Y-Magnfield")],
+                 units=r"",display_name=r"B_y")
+KnownChomboFields["Y-magnfield"]._projected_units=r""
 
-add_field("Y-magnfield", function=NullFunc, take_log=False,
-          validators = [ValidateDataField("Y-Magnfield")],
-          units=r"",display_name=r"B_y")
-ChomboFieldInfo["Y-magnfield"]._projected_units=r""
+add_chombo_field("Z-magnfield", function=NullFunc, take_log=False,
+                  validators = [ValidateDataField("Z-Magnfield")],
+                  units=r"",display_name=r"B_z")
+KnownChomboFields["Z-magnfield"]._projected_units=r""
 
-add_field("Z-magnfield", function=NullFunc, take_log=False,
-          validators = [ValidateDataField("Z-Magnfield")],
-          units=r"",display_name=r"B_z")
-ChomboFieldInfo["Z-magnfield"]._projected_units=r""
+add_chombo_field("energy-density", function=lambda a,b: None, take_log=True,
+                 validators = [ValidateDataField("energy-density")],
+                 units=r"\rm{erg}/\rm{cm}^3")
+KnownChomboFields["energy-density"]._projected_units =r""
+
+add_chombo_field("radiation-energy-density", function=lambda a,b: None, take_log=True,
+                 validators = [ValidateDataField("radiation-energy-density")],
+                 units=r"\rm{erg}/\rm{cm}^3")
+KnownChomboFields["radiation-energy-density"]._projected_units =r""
+
+def _Density(field,data):
+    """A duplicate of the density field. This is needed because when you try 
+    to instantiate a PlotCollection without passing in a center, the code
+    will try to generate one for you using the "Density" field, which gives an error 
+    if it isn't defined.
+
+    """
+    return data["density"]
+add_field("Density",function=_Density, take_log=True,
+          units=r'\rm{g}/\rm{cm^3}')
 
 def _MagneticEnergy(field,data):
     return (data["X-magnfield"]**2 +


diff -r bebe2f1eefad7d54fa295a06be41f6d7e6547aa4 -r b0ac030e0d978e4997eabe6b87d0542ab26efdfd yt/frontends/chombo/io.py
--- a/yt/frontends/chombo/io.py
+++ b/yt/frontends/chombo/io.py
@@ -36,7 +36,7 @@
 
     def _field_dict(self,fhandle):
         ncomp = int(fhandle['/'].attrs['num_components'])
-        temp =  fhandle['/'].attrs.listitems()[-ncomp:]
+        temp =  fhandle['/'].attrs.items()[-ncomp:]
         val, keys = zip(*temp)
         val = [int(re.match('component_(\d+)',v).groups()[0]) for v in val]
         return dict(zip(keys,val))
@@ -45,7 +45,7 @@
         fhandle = h5py.File(grid.filename,'r')
         ncomp = int(fhandle['/'].attrs['num_components'])
 
-        fns = [c[1] for c in f['/'].attrs.listitems()[-ncomp:]]
+        fns = [c[1] for c in f['/'].attrs.items()[-ncomp-1:-1]]
         fhandle.close()
     
     def _read_data_set(self,grid,field):
@@ -64,7 +64,6 @@
 
         fhandle.close()
         return data.reshape(dims, order='F')
-                                          
 
     def _read_data_slice(self, grid, field, axis, coord):
         sl = [slice(None), slice(None), slice(None)]


diff -r bebe2f1eefad7d54fa295a06be41f6d7e6547aa4 -r b0ac030e0d978e4997eabe6b87d0542ab26efdfd yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -317,16 +317,19 @@
             nxb = self._find_parameter("integer", "nxb", scalar = True)
             nyb = self._find_parameter("integer", "nyb", scalar = True)
             nzb = self._find_parameter("integer", "nzb", scalar = True)
-            dimensionality = self._find_parameter("integer", "dimensionality",
-                                    scalar = True)
         except KeyError:
             nxb, nyb, nzb = [int(self._handle["/simulation parameters"]['n%sb' % ax])
-                              for ax in 'xyz']
+                              for ax in 'xyz'] # FLASH2 only!
+        try:
+            dimensionality = self._find_parameter("integer", "dimensionality",
+                                                  scalar = True)
+        except KeyError:
             dimensionality = 3
             if nzb == 1: dimensionality = 2
             if nyb == 1: dimensionality = 1
             if dimensionality < 3:
                 mylog.warning("Guessing dimensionality as %s", dimensionality)
+
         nblockx = self._find_parameter("integer", "nblockx")
         nblocky = self._find_parameter("integer", "nblocky")
         nblockz = self._find_parameter("integer", "nblockz")
@@ -334,7 +337,10 @@
         self.domain_dimensions = \
             na.array([nblockx*nxb,nblocky*nyb,nblockz*nzb])
 
-        self.parameters['Gamma'] = self._find_parameter("real", "gamma")
+        try:
+            self.parameters['Gamma'] = self._find_parameter("real", "gamma")
+        except KeyError:
+            pass
 
         if self._flash_version == 7:
             self.current_time = float(


diff -r bebe2f1eefad7d54fa295a06be41f6d7e6547aa4 -r b0ac030e0d978e4997eabe6b87d0542ab26efdfd yt/frontends/orion/data_structures.py
--- a/yt/frontends/orion/data_structures.py
+++ b/yt/frontends/orion/data_structures.py
@@ -131,11 +131,24 @@
 
     def _read_particles(self):
         """
-        reads in particles and assigns them to grids
+        reads in particles and assigns them to grids. Will search for
+        Star particles, then sink particles if no star particle file
+        is found, and finally will simply note that no particles are
+        found if neither works. To add a new Orion particle type,
+        simply add it to the if/elif/else block.
 
         """
         self.grid_particle_count = na.zeros(len(self.grids))
-        fn = self.pf.fullplotdir + "/StarParticles"
+
+        for particle_filename in ["StarParticles", "SinkParticles"]:
+            fn = os.path.join(self.pf.fullplotdir, particle_filename)
+            if os.path.exists(fn): self._read_particle_file(fn)
+
+    def _read_particle_file(self, fn):
+        """actually reads the orion particle data file itself.
+
+        """
+        if not os.path.exists(fn): return
         with open(fn, 'r') as f:
             lines = f.readlines()
             self.num_stars = int(lines[0].strip())
@@ -151,16 +164,17 @@
                     na.choose(na.greater(self.grid_left_edge[:,i],coord[i]), (mask,0), mask)
                     na.choose(na.greater(self.grid_right_edge[:,i],coord[i]), (0,mask), mask)
                     ind = na.where(mask == 1)
-                selected_grids = self.grids[ind]
-                # in orion, particles always live on the finest level.
-                # so, we want to assign the particle to the finest of
-                # the grids we just found
-                if len(selected_grids) != 0:
-                    grid = sorted(selected_grids, key=lambda grid: grid.Level)[-1]
-                    ind = na.where(self.grids == grid)[0][0]
-                    self.grid_particle_count[ind] += 1
-                    self.grids[ind].NumberOfParticles += 1
-
+                    selected_grids = self.grids[ind]
+                    # in orion, particles always live on the finest level.
+                    # so, we want to assign the particle to the finest of
+                    # the grids we just found
+                    if len(selected_grids) != 0:
+                        grid = sorted(selected_grids, key=lambda grid: grid.Level)[-1]
+                        ind = na.where(self.grids == grid)[0][0]
+                        self.grid_particle_count[ind] += 1
+                        self.grids[ind].NumberOfParticles += 1
+        return True
+                
     def readGlobalHeader(self,filename,paranoid_read):
         """
         read the global header file for an Orion plotfile output.
@@ -488,7 +502,8 @@
         castro = any(("castro." in line for line in open(pfn)))
         nyx = any(("nyx." in line for line in open(pfn)))
         maestro = os.path.exists(os.path.join(pname, "job_info"))
-        orion = (not castro) and (not maestro) and (not nyx)
+        really_orion = any(("geometry.prob_lo" in line for line in open(pfn)))
+        orion = (not castro) and (not maestro) and (not nyx) and really_orion
         return orion
         
     def _parse_parameter_file(self):


diff -r bebe2f1eefad7d54fa295a06be41f6d7e6547aa4 -r b0ac030e0d978e4997eabe6b87d0542ab26efdfd yt/frontends/orion/fields.py
--- a/yt/frontends/orion/fields.py
+++ b/yt/frontends/orion/fields.py
@@ -44,26 +44,26 @@
 OrionFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
 add_field = OrionFieldInfo.add_field
 
-add_field("density", function=lambda a,b: None, take_log=True,
-          validators = [ValidateDataField("density")],
-          units=r"\rm{g}/\rm{cm}^3")
-OrionFieldInfo["density"]._projected_units =r"\rm{g}/\rm{cm}^2"
+add_orion_field("density", function=lambda a,b: None, take_log=True,
+                validators = [ValidateDataField("density")],
+                units=r"\rm{g}/\rm{cm}^3")
+KnownOrionFields["density"]._projected_units =r"\rm{g}/\rm{cm}^2"
 
-add_field("eden", function=lambda a,b: None, take_log=True,
-          validators = [ValidateDataField("eden")],
-          units=r"\rm{erg}/\rm{cm}^3")
+add_orion_field("eden", function=lambda a,b: None, take_log=True,
+                validators = [ValidateDataField("eden")],
+                units=r"\rm{erg}/\rm{cm}^3")
 
-add_field("xmom", function=lambda a,b: None, take_log=False,
-          validators = [ValidateDataField("xmom")],
-          units=r"\rm{g}/\rm{cm^2\ s}")
+add_orion_field("xmom", function=lambda a,b: None, take_log=False,
+                validators = [ValidateDataField("xmom")],
+                units=r"\rm{g}/\rm{cm^2\ s}")
 
-add_field("ymom", function=lambda a,b: None, take_log=False,
-          validators = [ValidateDataField("ymom")],
-          units=r"\rm{gm}/\rm{cm^2\ s}")
+add_orion_field("ymom", function=lambda a,b: None, take_log=False,
+                validators = [ValidateDataField("ymom")],
+                units=r"\rm{gm}/\rm{cm^2\ s}")
 
-add_field("zmom", function=lambda a,b: None, take_log=False,
-          validators = [ValidateDataField("zmom")],
-          units=r"\rm{g}/\rm{cm^2\ s}")
+add_orion_field("zmom", function=lambda a,b: None, take_log=False,
+                validators = [ValidateDataField("zmom")],
+                units=r"\rm{g}/\rm{cm^2\ s}")
 
 translation_dict = {"x-velocity": "xvel",
                     "y-velocity": "yvel",
@@ -88,11 +88,11 @@
 
 def _xVelocity(field, data):
     """generate x-velocity from x-momentum and density
-
+    
     """
     return data["xmom"]/data["density"]
-add_field("x-velocity",function=_xVelocity, take_log=False,
-          units=r'\rm{cm}/\rm{s}')
+add_orion_field("x-velocity",function=_xVelocity, take_log=False,
+                units=r'\rm{cm}/\rm{s}')
 
 def _yVelocity(field,data):
     """generate y-velocity from y-momentum and density
@@ -102,16 +102,16 @@
     #    return data["xvel"]
     #except KeyError:
     return data["ymom"]/data["density"]
-add_field("y-velocity",function=_yVelocity, take_log=False,
-          units=r'\rm{cm}/\rm{s}')
+add_orion_field("y-velocity",function=_yVelocity, take_log=False,
+                units=r'\rm{cm}/\rm{s}')
 
 def _zVelocity(field,data):
     """generate z-velocity from z-momentum and density
-
+    
     """
     return data["zmom"]/data["density"]
-add_field("z-velocity",function=_zVelocity, take_log=False,
-          units=r'\rm{cm}/\rm{s}')
+add_orion_field("z-velocity",function=_zVelocity, take_log=False,
+                units=r'\rm{cm}/\rm{s}')
 
 def _ThermalEnergy(field, data):
     """generate thermal (gas energy). Dual Energy Formalism was
@@ -125,19 +125,19 @@
         data["x-velocity"]**2.0
         + data["y-velocity"]**2.0
         + data["z-velocity"]**2.0 )
-add_field("ThermalEnergy", function=_ThermalEnergy,
-          units=r"\rm{ergs}/\rm{cm^3}")
+add_orion_field("ThermalEnergy", function=_ThermalEnergy,
+                units=r"\rm{ergs}/\rm{cm^3}")
 
 def _Pressure(field,data):
     """M{(Gamma-1.0)*e, where e is thermal energy density
        NB: this will need to be modified for radiation
     """
     return (data.pf["Gamma"] - 1.0)*data["ThermalEnergy"]
-add_field("Pressure", function=_Pressure, units=r"\rm{dyne}/\rm{cm}^{2}")
+add_orion_field("Pressure", function=_Pressure, units=r"\rm{dyne}/\rm{cm}^{2}")
 
 def _Temperature(field,data):
     return (data.pf["Gamma"]-1.0)*data.pf["mu"]*mh*data["ThermalEnergy"]/(kboltz*data["Density"])
-add_field("Temperature",function=_Temperature,units=r"\rm{Kelvin}",take_log=False)
+add_orion_field("Temperature",function=_Temperature,units=r"\rm{Kelvin}",take_log=False)
 
 # particle fields
 
@@ -170,6 +170,6 @@
 
 for pf in _particle_field_list:
     pfunc = particle_func("particle_%s" % (pf))
-    add_field("particle_%s" % pf, function=pfunc,
-              validators = [ValidateSpatial(0)],
-              particle_type=True)
+    add_orion_field("particle_%s" % pf, function=pfunc,
+                    validators = [ValidateSpatial(0)],
+                    particle_type=True)




diff -r bebe2f1eefad7d54fa295a06be41f6d7e6547aa4 -r b0ac030e0d978e4997eabe6b87d0542ab26efdfd yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -93,6 +93,22 @@
 except ImportError:
     pass
 
+def __memory_fallback(pid):
+    """
+    Get process memory from a system call.
+    """
+    value = os.popen('ps -o rss= -p %d' % pid).read().strip().split('\n')
+    if len(value) == 1: return float(value[0])
+    value.pop(0)
+    for line in value:
+        online = line.split()
+        if online[0] != pid: continue
+        try:
+            return float(online[2])
+        except:
+            return 0.0
+    return 0.0
+
 def get_memory_usage():
     """
     Returning resident size in megabytes
@@ -101,10 +117,10 @@
     try:
         pagesize = resource.getpagesize()
     except NameError:
-        return float(os.popen('ps -o rss= -p %d' % pid).read()) / 1024
+        return __memory_fallback(pid) / 1024
     status_file = "/proc/%s/statm" % (pid)
     if not os.path.isfile(status_file):
-        return float(os.popen('ps -o rss= -p %d' % pid).read()) / 1024
+        return __memory_fallback(pid) / 1024
     line = open(status_file).read()
     size, resident, share, text, library, data, dt = [int(i) for i in line.split()]
     return resident * pagesize / (1024 * 1024) # return in megs


diff -r bebe2f1eefad7d54fa295a06be41f6d7e6547aa4 -r b0ac030e0d978e4997eabe6b87d0542ab26efdfd yt/gui/reason/extdirect_repl.py
--- a/yt/gui/reason/extdirect_repl.py
+++ b/yt/gui/reason/extdirect_repl.py
@@ -130,7 +130,6 @@
 
     def execute_one(self, code, hide):
         self.repl.executed_cell_texts.append(code)
-
         result = ProgrammaticREPL.execute(self.repl, code)
         if self.repl.debug:
             print "==================== Cell Execution ===================="
@@ -562,10 +561,12 @@
         _tfield_list = list(set(_tpf.h.field_list + _tpf.h.derived_field_list))
         _tfield_list.sort()
         _tcb = _tpw._get_cbar_image()
+        _ttrans = _tpw._field_transform[_tpw._current_field].name
         _twidget_data = {'fields': _tfield_list,
                          'initial_field': _tfield,
                          'title': "%%s Slice" %% (_tpf),
-                         'colorbar': _tcb}
+                         'colorbar': _tcb,
+                         'initial_transform' : _ttrans}
         """ % dict(pfname = pfname,
                    center_string = center_string,
                    axis = inv_axis_names[axis],


diff -r bebe2f1eefad7d54fa295a06be41f6d7e6547aa4 -r b0ac030e0d978e4997eabe6b87d0542ab26efdfd yt/gui/reason/html/js/functions.js
--- a/yt/gui/reason/html/js/functions.js
+++ b/yt/gui/reason/html/js/functions.js
@@ -65,7 +65,6 @@
                 repl_input.get("input_line").setValue("");
             }
             if (OutputContainer.items.length > 1) {
-                examine = cell;
                 OutputContainer.body.dom.scrollTop = 
                 OutputContainer.body.dom.scrollHeight -
                 cell.body.dom.scrollHeight - 20;
@@ -142,7 +141,6 @@
             iconCls: 'pf_icon'}));
         this_pf = treePanel.root.lastChild
         Ext.each(pf.objects, function(obj, obj_index) {
-            examine = this_pf;
             this_pf.appendChild(new Ext.tree.TreeNode(
                 {text: obj.name,
                  leaf: true,


diff -r bebe2f1eefad7d54fa295a06be41f6d7e6547aa4 -r b0ac030e0d978e4997eabe6b87d0542ab26efdfd yt/gui/reason/html/js/widget_plotwindow.js
--- a/yt/gui/reason/html/js/widget_plotwindow.js
+++ b/yt/gui/reason/html/js/widget_plotwindow.js
@@ -43,7 +43,78 @@
     }
 
     this.widget_keys = new Ext.KeyMap(document, [
-        {key: 'z', fn: function(){control_panel.get("zoom10x").handler();}}
+        {key: 'z',
+         shift: false,
+         fn: function(){
+               control_panel.get("zoom2x").handler();
+            }
+        },
+        {key: 'Z',
+         shift: true,
+         fn: function(){
+               control_panel.get("zoom10x").handler();
+            }
+        },
+        {key: 'x',
+         shift: false,
+         fn: function(){
+               control_panel.get("zoomout2x").handler();
+            }
+        },
+        {key: 'X',
+         shift: true,
+         fn: function(){
+               control_panel.get("zoomout10x").handler();
+            }
+        },
+        {key: 'k',
+         shift: false,
+         fn: function(){
+               control_panel.get("singleuparrow").handler();
+            }
+        },
+        {key: 'j',
+         shift: false,
+         fn: function(){
+               control_panel.get("singledownarrow").handler();
+            }
+        },
+        {key: 'h',
+         shift: false,
+         fn: function(){
+               control_panel.get("singleleftarrow").handler();
+            }
+        },
+        {key: 'l',
+         shift: false,
+         fn: function(){
+               control_panel.get("singlerightarrow").handler();
+            }
+        },
+        {key: 'K',
+         shift: true,
+         fn: function(){
+               control_panel.get("doubleuparrow").handler();
+            }
+        },
+        {key: 'J',
+         shift: true,
+         fn: function(){
+               control_panel.get("doubledownarrow").handler();
+            }
+        },
+        {key: 'H',
+         shift: true,
+         fn: function(){
+               control_panel.get("doubleleftarrow").handler();
+            }
+        },
+        {key: 'L',
+         shift: true,
+         fn: function(){
+               control_panel.get("doublerightarrow").handler();
+            }
+        },
     ]);
     var widget_keys = this.widget_keys;
     widget_keys.disable();
@@ -74,11 +145,13 @@
                         id: "img_" + this.id,
                         width: 400,
                         height: 400,
+                        draggable: false,
                     },
                     x: 100,
                     y: 10,
                     width: 400,
                     height: 400,
+                    draggable: false,
                     listeners: {
                         afterrender: function(c){
                             c.el.on('click', function(e){
@@ -92,6 +165,25 @@
                                 yt_rpc.ExtDirectREPL.execute(
                                 {code:cc, hide:true}, cell_finished); 
                             });
+                            c.el.on('mousedown', function(e){
+                                c.drag_start = true;
+                                c.drag_start_pos = e.getXY();
+                            });
+                            c.el.on('mouseup', function(e){
+                                c.drag_start = false;
+                                drag_stop = e.getXY();
+                                delta_x = drag_stop[0] - c.drag_start_pos[0];
+                                delta_y = drag_stop[1] - c.drag_start_pos[1];
+                                if (((delta_x < -10) || (delta_x > 10)) ||
+                                    ((delta_y < -10) || (delta_y > 10))) {
+                                    rel_x = -delta_x / 400;
+                                    rel_y = -delta_y / 400;
+                                    cc = python_varname + '.pan_rel((' + 
+                                        rel_x + ',' + rel_y + '))';
+                                    yt_rpc.ExtDirectREPL.execute(
+                                    {code:cc, hide:true}, cell_finished); 
+                                }
+                            });
                         }
                     }
                 }, {
@@ -159,6 +251,7 @@
                 /* the single buttons for 10% pan*/
                     xtype:'button',
                     iconCls: 'singleuparrow',
+                    id: 'singleuparrow',
                     //text: 'North',
                     x: 40,
                     y: 10,
@@ -170,6 +263,7 @@
                 }, {
                     xtype:'button',
                     iconCls: 'singlerightarrow',
+                    id: 'singlerightarrow',
                     //text:'East',
                     x : 60,
                     y : 30,
@@ -182,6 +276,7 @@
                 }, {
                     xtype:'button',
                     iconCls: 'singledownarrow',
+                    id: 'singledownarrow',
                     //text: 'South',
                     x: 40,
                     y: 50,
@@ -194,6 +289,7 @@
                 }, {
                     xtype: 'button',
                     iconCls: 'singleleftarrow',
+                    id: 'singleleftarrow',
                     //text: 'West',
                     x: 20,
                     y: 30,
@@ -208,6 +304,7 @@
                 {
                     xtype:'button',
                     iconCls: 'doubleuparrow',
+                    id:'doubleuparrow',
                     //text: 'North',
                     x: 40,
                     y: 80,
@@ -219,6 +316,7 @@
                 }, {
                     xtype:'button',
                     iconCls: 'doublerightarrow',
+                    id:'doublerightarrow',
                     //text:'East',
                     x : 60,
                     y : 100,
@@ -232,6 +330,7 @@
                     xtype:'button',
                     iconCls: 'doubledownarrow',
                     //text: 'South',
+                    id: 'doubledownarrow',
                     x: 40,
                     y: 120,
                     handler: function(b,e) {
@@ -243,6 +342,7 @@
                 }, {
                     xtype: 'button',
                     iconCls: 'doubleleftarrow',
+                    id: 'doubleleftarrow',
                     //text: 'West',
                     x: 20,
                     y: 100,
@@ -270,6 +370,7 @@
                 },{
                     xtype: 'button',
                     text: 'Zoom In 2x',
+                    id: "zoom2x",
                     x: 10,
                     y: 185,
                     width: 80,
@@ -282,6 +383,7 @@
                 },{
                     xtype: 'button',
                     text: 'Zoom Out 2x',
+                    id:'zoomout2x',
                     x: 10,
                     y: 210,
                     width: 80,
@@ -294,6 +396,7 @@
                 },{
                     xtype: 'button',
                     text: 'Zoom Out 10x',
+                    id:'zoomout10x',
                     x: 10,
                     y: 235,
                     width: 80,
@@ -365,11 +468,168 @@
                           html: 'Welcome to the Plot Window.',
                           height: 200,
                         }, {
+                          xtype: 'tabpanel',
+                          id: 'editor_panel',
+                          flex: 1,
+                          activeTab: 0,
+                          items: [
+                        {
                           xtype: 'panel',
                           title: 'Plot Editor',
                           id: 'plot_edit',
+                          style: {fontFamily: '"Inconsolata", monospace'},
+                          layout: 'absolute',
                           flex: 1,
-                        }]
+                          items : [
+                             {
+                               x: 10,
+                               y: 20,
+                               width: 70,
+                               xtype: 'label',
+                               text: 'Display',
+                             },
+                             {
+                               x: 80,
+                               y: 20,
+                               width : 80,
+                               xtype: 'combo',
+                               editable: false,
+                               triggerAction: 'all',
+                               validateOnBlur: false,
+                               store: ['log10', 'linear'],
+                               value: widget_data['initial_transform'],
+                               listeners: {select: function(combo, record, index){ 
+                                   var newValue = '"' + record.data['field1'] + '"';
+                                   yt_rpc.ExtDirectREPL.execute(
+                                       {code:python_varname + '.set_transform('
+                                         + python_varname + '._current_field, '
+                                         + newValue + ')', hide:false},
+                                         cell_finished);
+                               }}
+                             },
+                             {
+                               x: 10,
+                               y: 60,
+                               width: 70,
+                               xtype: 'label',
+                               text: 'Colormap',
+                             },
+                             {
+                               x: 80,
+                               y: 60,
+                               width : 140,
+                               xtype: 'combo',
+                               editable: false,
+                               triggerAction: 'all',
+                               validateOnBlur: false,
+                               store: ['algae', 'RdBu', 'gist_stern',  
+                                       'hot', 'jet', 'kamae', 
+                                        'B-W LINEAR', 'BLUE',
+                                        'GRN-RED-BLU-WHT', 'RED TEMPERATURE',
+                                        'BLUE', 'STD GAMMA-II', 'PRISM',
+                                        'RED-PURPLE', 'GREEN', 'GRN',
+                                        'GREEN-PINK', 'BLUE-RED', '16 LEVEL',
+                                        'RAINBOW', 'STEPS', 'STERN SPECIAL',
+                                        'Haze', 'Blue - Pastel - Red',
+                                        'Pastels', 'Hue Sat Lightness 1',
+                                        'Hue Sat Lightness 2', 'Hue Sat Value 1',
+                                        'Hue Sat Value 2', 'Purple-Red + Stripes',
+                                        'Beach', 'Mac Style', 'Eos A', 'Eos B',
+                                        'Hardcandy', 'Nature', 'Ocean', 'Peppermint',
+                                        'Plasma', 'Blue-Red', 'Rainbow', 'Blue Waves',
+                                        'Volcano', 'Waves', 'Rainbow18',
+                                        'Rainbow + white', 'Rainbow + black'],
+                               value: 'algae',
+                               listeners: {select: function(combo, record, index){ 
+                                   var newValue = '"' + record.data['field1'] + '"';
+                                   yt_rpc.ExtDirectREPL.execute(
+                                       {code:python_varname + '.set_cmap('
+                                         + python_varname + '._current_field, '
+                                         + newValue + ')', hide:false},
+                                         cell_finished);
+                               }}
+                             }
+                          ]
+                        }, {
+                          xtype: 'panel',
+                          title: 'Contours',
+                          id: 'contour_edit',
+                          style: {fontFamily: '"Inconsolata", monospace'},
+                          layout: 'absolute',
+                          flex: 1,
+                          items : [
+                             {
+                               x: 10,
+                               y: 20,
+                               width: 70,
+                               xtype: 'label',
+                               text: 'Field',
+                             },
+                             {
+                               x: 80,
+                               y: 20,
+                               width : 160,
+                               xtype: 'combo',
+                               editable: false,
+                               id: 'field',
+                               triggerAction: 'all',
+                               validateOnBlur: false,
+                               value:widget_data['initial_field'],
+                               store: widget_data['fields'],
+                             }, {
+                               x: 10,
+                               y: 60,
+                               width: 70,
+                               xtype: 'label',
+                               text: 'Levels',
+                             }, {
+                               x: 80,
+                               y: 60,
+                               width : 160,
+                               xtype: 'slider',
+                               id: 'ncont',
+                               minValue: 0,
+                               maxValue: 10,
+                               value: 5,
+                               increment: 1,
+                               plugins: new Ext.slider.Tip(),
+                             }, {
+                               x: 10,
+                               y: 100,
+                               width: 70,
+                               xtype: 'label',
+                               text: 'Logspaced',
+                             }, {
+                               x: 80,
+                               y: 100,
+                               width : 160,
+                               xtype: 'checkbox',
+                               id: 'logit',
+                               checked: true,
+                             }, {
+                               x: 10,
+                               y: 180,
+                               width: 80,
+                               xtype: 'button',
+                               text: 'Apply',
+                               handler: function(b, e) {
+                                  field = contour_window.get('field').getValue();
+                                  ncont = contour_window.get('ncont').getValue();
+                                  logit = contour_window.get('logit').getValue();
+                                  if (logit == false) logit = 'False';
+                                  else if (logit == true) logit = 'True';
+                                  yt_rpc.ExtDirectREPL.execute(
+                                      {code:python_varname
+                                       + '.set_contour_info("' + field + '", '
+                                       + ncont + ', ' + logit + ')',
+                                        hide:false},
+                                      cell_finished);
+                               }
+                             }
+                          ]
+                        }
+                        ] } /* tabpanel items and entry */
+                        ]
                 }
             ]
         }
@@ -384,8 +644,12 @@
     this.image_panel = this.panel.get("image_panel_"+python_varname);
     this.ticks = this.panel.get("ticks_"+python_varname);
     var ticks = this.ticks;
+    var colorbar = this.panel.get("colorbar_"+python_varname);
     this.metadata_panel = this.panel.get("rhs_panel_" + python_varname).get("metadata_" + python_varname);
     this.zoom_scroll = this.panel.get("slider_" + python_varname);
+    var contour_window = this.panel.get("rhs_panel_" + python_varname);
+    contour_window = contour_window.get("editor_panel");
+    contour_window = contour_window.get("contour_edit");
     var image_dom = this.image_panel.el.dom;
     var control_panel = this.panel;
     var metadata_string;
@@ -393,12 +657,10 @@
     this.accept_results = function(payload) {
         this.image_panel.el.dom.src = "data:image/png;base64," + payload['image_data'];
         this.zoom_scroll.setValue(0, payload['zoom'], true);
-        examine = this.metadata_panel;
         this.metadata_panel.update(payload['metadata_string']);
         metadata_string = payload['metadata_string'];
         ticks.removeAll();
         Ext.each(payload['ticks'], function(tick, index) {
-            console.log(tick);
             ticks.add({xtype:'panel',
                        width: 10, height:1,
                        style: 'background-color: #000000;',
@@ -411,9 +673,11 @@
                               'font-size: 12px;',
                        html: '' + tick[2] + '',
                        x:12, y: 4 + tick[0]});
-            examine = tick;
         });
-        examine = payload['ticks'];
+        if (payload['colorbar_image'] != null) {
+            colorbar.el.dom.src = "data:image/png;base64," +
+                payload['colorbar_image'];
+        }
         ticks.doLayout();
     }
 


diff -r bebe2f1eefad7d54fa295a06be41f6d7e6547aa4 -r b0ac030e0d978e4997eabe6b87d0542ab26efdfd yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -116,7 +116,7 @@
     PlotCollection, PlotCollectionInteractive, \
     get_multi_plot, FixedResolutionBuffer, ObliqueFixedResolutionBuffer, \
     callback_registry, write_bitmap, write_image, annotate_image, \
-    apply_colormap, scale_image, write_projection
+    apply_colormap, scale_image, write_projection, write_fits
 
 from yt.visualization.volume_rendering.api import \
     ColorTransferFunction, PlanckTransferFunction, ProjectionTransferFunction, \


diff -r bebe2f1eefad7d54fa295a06be41f6d7e6547aa4 -r b0ac030e0d978e4997eabe6b87d0542ab26efdfd yt/utilities/_amr_utils/DepthFirstOctree.pyx
--- a/yt/utilities/_amr_utils/DepthFirstOctree.pyx
+++ b/yt/utilities/_amr_utils/DepthFirstOctree.pyx
@@ -130,10 +130,10 @@
     cdef np.ndarray[np.float64_t, ndim=1] child_leftedges
     cdef np.float64_t cx, cy, cz
     cdef int cp
+    cdef int s = 0
     for i_off in range(i_f):
         i = i_off + i_i
         cx = (leftedges[0] + i*dx)
-        if i_f > 2: print k, cz
         for j_off in range(j_f):
             j = j_off + j_i
             cy = (leftedges[1] + j*dx)
@@ -160,9 +160,8 @@
                     genealogy[cp, 0] = curpos[level+1] 
                     # set next parent id to id of current cell
                     genealogy[curpos[level+1]:curpos[level+1]+8, 1] = cp
-                    s = RecurseOctreeByLevels(child_i, child_j, child_k, 2, 2, 2,
+                    RecurseOctreeByLevels(child_i, child_j, child_k, 2, 2, 2,
                                               curpos, ci, output, genealogy,
                                               corners, grids)
                 curpos[level] += 1
-    return s
-
+    return


diff -r bebe2f1eefad7d54fa295a06be41f6d7e6547aa4 -r b0ac030e0d978e4997eabe6b87d0542ab26efdfd yt/utilities/_amr_utils/misc_utilities.pyx
--- a/yt/utilities/_amr_utils/misc_utilities.pyx
+++ b/yt/utilities/_amr_utils/misc_utilities.pyx
@@ -34,6 +34,87 @@
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
+def bin_profile1d(np.ndarray[np.int64_t, ndim=1] bins_x,
+                  np.ndarray[np.float64_t, ndim=1] wsource,
+                  np.ndarray[np.float64_t, ndim=1] bsource,
+                  np.ndarray[np.float64_t, ndim=1] wresult,
+                  np.ndarray[np.float64_t, ndim=1] bresult,
+                  np.ndarray[np.float64_t, ndim=1] mresult,
+                  np.ndarray[np.float64_t, ndim=1] qresult,
+                  np.ndarray[np.float64_t, ndim=1] used):
+    cdef int n
+    cdef np.int64_t bin
+    cdef np.float64_t wval, bval
+    for n in range(bins_x.shape[0]):
+        bin = bins_x[n]
+        bval = bsource[n]
+        wval = wsource[n]
+        qresult[bin] += (wresult[bin] * wval * (bval - mresult[bin])**2) / \
+            (wresult[bin] + wval)
+        wresult[bin] += wval
+        bresult[bin] += wval*bval
+        mresult[bin] += wval * (bval - mresult[bin]) / wresult[bin]
+        used[bin] = 1
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+def bin_profile2d(np.ndarray[np.int64_t, ndim=1] bins_x,
+                  np.ndarray[np.int64_t, ndim=1] bins_y,
+                  np.ndarray[np.float64_t, ndim=1] wsource,
+                  np.ndarray[np.float64_t, ndim=1] bsource,
+                  np.ndarray[np.float64_t, ndim=2] wresult,
+                  np.ndarray[np.float64_t, ndim=2] bresult,
+                  np.ndarray[np.float64_t, ndim=2] mresult,
+                  np.ndarray[np.float64_t, ndim=2] qresult,
+                  np.ndarray[np.float64_t, ndim=2] used):
+    cdef int n
+    cdef np.int64_t bin
+    cdef np.float64_t wval, bval
+    for n in range(bins_x.shape[0]):
+        bini = bins_x[n]
+        binj = bins_y[n]
+        bval = bsource[n]
+        wval = wsource[n]
+        qresult[bini, binj] += (wresult[bini, binj] * wval * (bval - mresult[bini, binj])**2) / \
+            (wresult[bini, binj] + wval)
+        wresult[bini, binj] += wval
+        bresult[bini, binj] += wval*bval
+        mresult[bini, binj] += wval * (bval - mresult[bini, binj]) / wresult[bini, binj]
+        used[bini, binj] = 1
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+def bin_profile3d(np.ndarray[np.int64_t, ndim=1] bins_x,
+                  np.ndarray[np.int64_t, ndim=1] bins_y,
+                  np.ndarray[np.int64_t, ndim=1] bins_z,
+                  np.ndarray[np.float64_t, ndim=1] wsource,
+                  np.ndarray[np.float64_t, ndim=1] bsource,
+                  np.ndarray[np.float64_t, ndim=3] wresult,
+                  np.ndarray[np.float64_t, ndim=3] bresult,
+                  np.ndarray[np.float64_t, ndim=3] mresult,
+                  np.ndarray[np.float64_t, ndim=3] qresult,
+                  np.ndarray[np.float64_t, ndim=3] used):
+    cdef int n
+    cdef np.int64_t bin
+    cdef np.float64_t wval, bval
+    for n in range(bins_x.shape[0]):
+        bini = bins_x[n]
+        binj = bins_y[n]
+        bink = bins_z[n]
+        bval = bsource[n]
+        wval = wsource[n]
+        qresult[bini, binj, bink] += (wresult[bini, binj, bink] * wval * (bval - mresult[bini, binj, bink])**2) / \
+            (wresult[bini, binj, bink] + wval)
+        wresult[bini, binj, bink] += wval
+        bresult[bini, binj, bink] += wval*bval
+        mresult[bini, binj, bink] += wval * (bval - mresult[bini, binj, bink]) / wresult[bini, binj, bink]
+        used[bini, binj, bink] = 1
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
 def get_color_bounds(np.ndarray[np.float64_t, ndim=1] px,
                      np.ndarray[np.float64_t, ndim=1] py,
                      np.ndarray[np.float64_t, ndim=1] pdx,
@@ -228,7 +309,7 @@
         if n_unique > my_max:
             best_dim = dim
             my_max = n_unique
-            my_split = (n_unique-1)/2
+            my_split = (n_unique)/2
     # I recognize how lame this is.
     cdef np.ndarray[np.float64_t, ndim=1] tarr = np.empty(my_max, dtype='float64')
     for i in range(my_max):


diff -r bebe2f1eefad7d54fa295a06be41f6d7e6547aa4 -r b0ac030e0d978e4997eabe6b87d0542ab26efdfd yt/utilities/command_line.py
--- a/yt/utilities/command_line.py
+++ b/yt/utilities/command_line.py
@@ -47,10 +47,11 @@
 def _add_arg(sc, arg):
     if isinstance(arg, types.StringTypes):
         arg = _common_options[arg].copy()
+    argc = dict(arg.items())
     argnames = []
-    if "short" in arg: argnames.append(arg.pop('short'))
-    if "long" in arg: argnames.append(arg.pop('long'))
-    sc.add_argument(*argnames, **arg)
+    if "short" in argc: argnames.append(argc.pop('short'))
+    if "long" in argc: argnames.append(argc.pop('long'))
+    sc.add_argument(*argnames, **argc)
 
 class YTCommand(object):
     args = ()
@@ -63,12 +64,14 @@
         def __init__(cls, name, b, d):
             type.__init__(cls, name, b, d)
             if cls.name is not None:
-                sc = subparsers.add_parser(cls.name,
-                    description = cls.description,
-                    help = cls.description)
-                sc.set_defaults(func=cls.run)
-                for arg in cls.args:
-                    _add_arg(sc, arg)
+                names = ensure_list(cls.name)
+                for name in names:
+                    sc = subparsers.add_parser(name,
+                        description = cls.description,
+                        help = cls.description)
+                    sc.set_defaults(func=cls.run)
+                    for arg in cls.args:
+                        _add_arg(sc, arg)
 
     @classmethod
     def run(cls, args):
@@ -1140,76 +1143,6 @@
         if not os.path.isdir(args.output): os.makedirs(args.output)
         pc.save(os.path.join(args.output,"%s" % (pf)))
 
-class YTReasonCmd(YTCommand):
-    name = "reason"
-    args = (
-            dict(short="-o", long="--open-browser", action="store_true",
-                 default = False, dest='open_browser',
-                 help="Open a web browser."),
-            dict(short="-p", long="--port", action="store",
-                 default = 0, dest='port',
-                 help="Port to listen on"),
-            dict(short="-f", long="--find", action="store_true",
-                 default = False, dest="find",
-                 help="At startup, find all *.hierarchy files in the CWD"),
-            dict(short="-d", long="--debug", action="store_true",
-                 default = False, dest="debug",
-                 help="Add a debugging mode for cell execution")
-            )
-    description = \
-        """
-        Run the Web GUI Reason
-        """
-
-    def __call__(self, args):
-        # We have to do a couple things.
-        # First, we check that YT_DEST is set.
-        if "YT_DEST" not in os.environ:
-            print
-            print "*** You must set the environment variable YT_DEST ***"
-            print "*** to point to the installation location!        ***"
-            print
-            sys.exit(1)
-        if args.port == 0:
-            # This means, choose one at random.  We do this by binding to a
-            # socket and allowing the OS to choose the port for that socket.
-            import socket
-            sock = socket.socket()
-            sock.bind(('', 0))
-            args.port = sock.getsockname()[-1]
-            del sock
-        elif args.port == '-1':
-            port = raw_input("Desired yt port? ")
-            try:
-                args.port = int(port)
-            except ValueError:
-                print "Please try a number next time."
-                return 1
-        base_extjs_path = os.path.join(os.environ["YT_DEST"], "src")
-        if not os.path.isfile(os.path.join(base_extjs_path, "ext-resources", "ext-all.js")):
-            print
-            print "*** You are missing the ExtJS support files. You  ***"
-            print "*** You can get these by either rerunning the     ***"
-            print "*** install script installing, or downloading     ***"
-            print "*** them manually.                                ***"
-            print
-            sys.exit(1)
-        from yt.config import ytcfg;ytcfg["yt","__withinreason"]="True"
-        import yt.utilities.bottle as bottle
-        from yt.gui.reason.extdirect_repl import ExtDirectREPL
-        from yt.gui.reason.bottle_mods import uuid_serve_functions, PayloadHandler
-        hr = ExtDirectREPL(base_extjs_path)
-        hr.debug = PayloadHandler.debug = args.debug
-        if args.find:
-            # We just have to find them and store references to them.
-            command_line = ["pfs = []"]
-            for fn in sorted(glob.glob("*/*.hierarchy")):
-                command_line.append("pfs.append(load('%s'))" % fn[:-10])
-            hr.execute("\n".join(command_line))
-        bottle.debug()
-        uuid_serve_functions(open_browser=args.open_browser,
-                    port=int(args.port), repl=hr)
-
 class YTRenderCmd(YTCommand):
         
     args = ("width", "unit", "center","enhance",'outputfn',
@@ -1307,13 +1240,18 @@
         uncaught exceptions.
 
         """
+    args = (
+            dict(short="-t", long="--task", action="store",
+                 default = 0, dest='task',
+                 help="Open a web browser."),
+           )
 
     def __call__(self, args):
         import rpdb
-        rpdb.run_rpdb(int(task))
+        rpdb.run_rpdb(int(args.task))
 
-class YTServeCmd(YTCommand):
-    name = "serve"
+class YTGUICmd(YTCommand):
+    name = ["serve", "reason"]
     args = (
             dict(short="-o", long="--open-browser", action="store_true",
                  default = False, dest='open_browser',


diff -r bebe2f1eefad7d54fa295a06be41f6d7e6547aa4 -r b0ac030e0d978e4997eabe6b87d0542ab26efdfd yt/utilities/data_point_utilities.c
--- a/yt/utilities/data_point_utilities.c
+++ b/yt/utilities/data_point_utilities.c
@@ -273,359 +273,6 @@
 
 }
 
-static PyObject *_profile1DError;
-
-static PyObject *Py_Bin1DProfile(PyObject *obj, PyObject *args)
-{
-    int i, j;
-    PyObject *obins_x, *owsource, *obsource, *owresult, *obresult, *oused;
-    PyArrayObject *bins_x, *wsource, *bsource, *wresult, *bresult, *used;
-    bins_x = wsource = bsource = wresult = bresult = used = NULL;
-
-    if (!PyArg_ParseTuple(args, "OOOOOO",
-                &obins_x, &owsource, &obsource,
-                &owresult, &obresult, &oused))
-        return PyErr_Format(_profile1DError,
-                "Bin1DProfile: Invalid parameters.");
-    i = 0;
-
-    bins_x = (PyArrayObject *) PyArray_FromAny(obins_x,
-                    PyArray_DescrFromType(NPY_INT64), 1, 1,
-                    NPY_IN_ARRAY, NULL);
-    if(bins_x==NULL) {
-    PyErr_Format(_profile1DError,
-             "Bin1DProfile: One dimension required for bins_x.");
-    goto _fail;
-    }
-    
-    wsource = (PyArrayObject *) PyArray_FromAny(owsource,
-                    PyArray_DescrFromType(NPY_FLOAT64), 1, 1,
-                    NPY_IN_ARRAY, NULL);
-    if((wsource==NULL) || (PyArray_SIZE(bins_x) != PyArray_SIZE(wsource))) {
-    PyErr_Format(_profile1DError,
-             "Bin1DProfile: One dimension required for wsource, same size as bins_x.");
-    goto _fail;
-    }
-    
-    bsource = (PyArrayObject *) PyArray_FromAny(obsource,
-                    PyArray_DescrFromType(NPY_FLOAT64), 1, 1,
-                    NPY_IN_ARRAY, NULL);
-    if((bsource==NULL) || (PyArray_SIZE(bins_x) != PyArray_SIZE(bsource))) {
-    PyErr_Format(_profile1DError,
-             "Bin1DProfile: One dimension required for bsource, same size as bins_x.");
-    goto _fail;
-    }
-
-    wresult = (PyArrayObject *) PyArray_FromAny(owresult,
-                    PyArray_DescrFromType(NPY_FLOAT64), 1,1,
-                    NPY_INOUT_ARRAY | NPY_UPDATEIFCOPY, NULL);
-    if(wresult==NULL){
-    PyErr_Format(_profile1DError,
-             "Bin1DProfile: Two dimensions required for wresult.");
-    goto _fail;
-    }
-
-    bresult = (PyArrayObject *) PyArray_FromAny(obresult,
-                    PyArray_DescrFromType(NPY_FLOAT64), 1,1,
-                    NPY_INOUT_ARRAY | NPY_UPDATEIFCOPY, NULL);
-    if((bresult==NULL) ||(PyArray_SIZE(wresult) != PyArray_SIZE(bresult))
-       || (PyArray_DIM(bresult,0) != PyArray_DIM(wresult,0))){
-    PyErr_Format(_profile1DError,
-             "Bin1DProfile: Two dimensions required for bresult, same shape as wresult.");
-    goto _fail;
-    }
-    
-    used = (PyArrayObject *) PyArray_FromAny(oused,
-                    PyArray_DescrFromType(NPY_FLOAT64), 1,1,
-                    NPY_INOUT_ARRAY | NPY_UPDATEIFCOPY, NULL);
-    if((used==NULL) ||(PyArray_SIZE(used) != PyArray_SIZE(wresult))
-       || (PyArray_DIM(used,0) != PyArray_DIM(wresult,0))){
-    PyErr_Format(_profile1DError,
-             "Bin1DProfile: Two dimensions required for used, same shape as wresult.");
-    goto _fail;
-    }
-
-    npy_float64 wval, bval;
-    int n;
-
-    for(n=0; n<bins_x->dimensions[0]; n++) {
-      i = *(npy_int64*)PyArray_GETPTR1(bins_x, n);
-      bval = *(npy_float64*)PyArray_GETPTR1(bsource, n);
-      wval = *(npy_float64*)PyArray_GETPTR1(wsource, n);
-      *(npy_float64*)PyArray_GETPTR1(wresult, i) += wval;
-      *(npy_float64*)PyArray_GETPTR1(bresult, i) += wval*bval;
-      *(npy_float64*)PyArray_GETPTR1(used, i) = 1.0;
-    }
-
-      Py_DECREF(bins_x); 
-      Py_DECREF(wsource); 
-      Py_DECREF(bsource); 
-      Py_DECREF(wresult); 
-      Py_DECREF(bresult); 
-      Py_DECREF(used);
-    
-      PyObject *onum_found = PyInt_FromLong((long)1);
-      return onum_found;
-    
-    _fail:
-      Py_XDECREF(bins_x); 
-      Py_XDECREF(wsource); 
-      Py_XDECREF(bsource); 
-      Py_XDECREF(wresult); 
-      Py_XDECREF(bresult); 
-      Py_XDECREF(used);
-      return NULL;
-
-}
-
-static PyObject *_profile2DError;
-
-static PyObject *Py_Bin2DProfile(PyObject *obj, PyObject *args)
-{
-    int i, j;
-    PyObject *obins_x, *obins_y, *owsource, *obsource, *owresult, *obresult, *oused;
-    PyArrayObject *bins_x, *bins_y, *wsource, *bsource, *wresult, *bresult, *used;
-    bins_x = bins_y = wsource = bsource = wresult = bresult = used = NULL;
-
-    if (!PyArg_ParseTuple(args, "OOOOOOO",
-                &obins_x, &obins_y, &owsource, &obsource,
-                &owresult, &obresult, &oused))
-        return PyErr_Format(_profile2DError,
-                "Bin2DProfile: Invalid parameters.");
-    i = 0;
-
-    bins_x = (PyArrayObject *) PyArray_FromAny(obins_x,
-                    PyArray_DescrFromType(NPY_INT64), 1, 1,
-                    NPY_IN_ARRAY, NULL);
-    if(bins_x==NULL) {
-    PyErr_Format(_profile2DError,
-             "Bin2DProfile: One dimension required for bins_x.");
-    goto _fail;
-    }
-    
-    bins_y = (PyArrayObject *) PyArray_FromAny(obins_y,
-                    PyArray_DescrFromType(NPY_INT64), 1, 1,
-                    NPY_IN_ARRAY, NULL);
-    if((bins_y==NULL) || (PyArray_SIZE(bins_x) != PyArray_SIZE(bins_y))) {
-    PyErr_Format(_profile2DError,
-             "Bin2DProfile: One dimension required for bins_y, same size as bins_x.");
-    goto _fail;
-    }
-    
-    wsource = (PyArrayObject *) PyArray_FromAny(owsource,
-                    PyArray_DescrFromType(NPY_FLOAT64), 1, 1,
-                    NPY_IN_ARRAY, NULL);
-    if((wsource==NULL) || (PyArray_SIZE(bins_x) != PyArray_SIZE(wsource))) {
-    PyErr_Format(_profile2DError,
-             "Bin2DProfile: One dimension required for wsource, same size as bins_x.");
-    goto _fail;
-    }
-    
-    bsource = (PyArrayObject *) PyArray_FromAny(obsource,
-                    PyArray_DescrFromType(NPY_FLOAT64), 1, 1,
-                    NPY_IN_ARRAY, NULL);
-    if((bsource==NULL) || (PyArray_SIZE(bins_x) != PyArray_SIZE(bsource))) {
-    PyErr_Format(_profile2DError,
-             "Bin2DProfile: One dimension required for bsource, same size as bins_x.");
-    goto _fail;
-    }
-
-    wresult = (PyArrayObject *) PyArray_FromAny(owresult,
-                    PyArray_DescrFromType(NPY_FLOAT64), 2,2,
-                    NPY_INOUT_ARRAY | NPY_UPDATEIFCOPY, NULL);
-    if(wresult==NULL){
-    PyErr_Format(_profile2DError,
-             "Bin2DProfile: Two dimensions required for wresult.");
-    goto _fail;
-    }
-
-    bresult = (PyArrayObject *) PyArray_FromAny(obresult,
-                    PyArray_DescrFromType(NPY_FLOAT64), 2,2,
-                    NPY_INOUT_ARRAY | NPY_UPDATEIFCOPY, NULL);
-    if((bresult==NULL) ||(PyArray_SIZE(wresult) != PyArray_SIZE(bresult))
-       || (PyArray_DIM(bresult,0) != PyArray_DIM(wresult,0))){
-    PyErr_Format(_profile2DError,
-             "Bin2DProfile: Two dimensions required for bresult, same shape as wresult.");
-    goto _fail;
-    }
-    
-    used = (PyArrayObject *) PyArray_FromAny(oused,
-                    PyArray_DescrFromType(NPY_FLOAT64), 2,2,
-                    NPY_INOUT_ARRAY | NPY_UPDATEIFCOPY, NULL);
-    if((used==NULL) ||(PyArray_SIZE(used) != PyArray_SIZE(wresult))
-       || (PyArray_DIM(used,0) != PyArray_DIM(wresult,0))){
-    PyErr_Format(_profile2DError,
-             "Bin2DProfile: Two dimensions required for used, same shape as wresult.");
-    goto _fail;
-    }
-
-    npy_float64 wval, bval;
-    int n;
-
-    for(n=0; n<bins_x->dimensions[0]; n++) {
-      i = *(npy_int64*)PyArray_GETPTR1(bins_x, n);
-      j = *(npy_int64*)PyArray_GETPTR1(bins_y, n);
-      bval = *(npy_float64*)PyArray_GETPTR1(bsource, n);
-      wval = *(npy_float64*)PyArray_GETPTR1(wsource, n);
-      *(npy_float64*)PyArray_GETPTR2(wresult, i, j) += wval;
-      *(npy_float64*)PyArray_GETPTR2(bresult, i, j) += wval*bval;
-      *(npy_float64*)PyArray_GETPTR2(used, i, j) = 1.0;
-    }
-
-      Py_DECREF(bins_x); 
-      Py_DECREF(bins_y); 
-      Py_DECREF(wsource); 
-      Py_DECREF(bsource); 
-      Py_DECREF(wresult); 
-      Py_DECREF(bresult); 
-      Py_DECREF(used);
-    
-      PyObject *onum_found = PyInt_FromLong((long)1);
-      return onum_found;
-    
-    _fail:
-      Py_XDECREF(bins_x); 
-      Py_XDECREF(bins_y); 
-      Py_XDECREF(wsource); 
-      Py_XDECREF(bsource); 
-      Py_XDECREF(wresult); 
-      Py_XDECREF(bresult); 
-      Py_XDECREF(used);
-      return NULL;
-
-}
-
-static PyObject *_profile3DError;
-
-static PyObject *Py_Bin3DProfile(PyObject *obj, PyObject *args)
-{
-    int i, j, k;
-    PyObject *obins_x, *obins_y, *obins_z, *owsource, *obsource, *owresult,
-             *obresult, *oused;
-    PyArrayObject *bins_x, *bins_y, *bins_z, *wsource, *bsource, *wresult,
-                  *bresult, *used;
-    bins_x = bins_y = bins_z = wsource = bsource = wresult = bresult = used = NULL;
-
-    if (!PyArg_ParseTuple(args, "OOOOOOOO",
-                &obins_x, &obins_y, &obins_z, &owsource, &obsource,
-                &owresult, &obresult, &oused))
-        return PyErr_Format(_profile3DError,
-                "Bin3DProfile: Invalid parameters.");
-    i = 0;
-
-    bins_x = (PyArrayObject *) PyArray_FromAny(obins_x,
-                    PyArray_DescrFromType(NPY_INT64), 1, 1,
-                    NPY_IN_ARRAY, NULL);
-    if(bins_x==NULL) {
-    PyErr_Format(_profile3DError,
-             "Bin3DProfile: One dimension required for bins_x.");
-    goto _fail;
-    }
-    
-    bins_y = (PyArrayObject *) PyArray_FromAny(obins_y,
-                    PyArray_DescrFromType(NPY_INT64), 1, 1,
-                    NPY_IN_ARRAY, NULL);
-    if((bins_y==NULL) || (PyArray_SIZE(bins_x) != PyArray_SIZE(bins_y))) {
-    PyErr_Format(_profile3DError,
-             "Bin3DProfile: One dimension required for bins_y, same size as bins_x.");
-    goto _fail;
-    }
-    
-    bins_z = (PyArrayObject *) PyArray_FromAny(obins_z,
-                    PyArray_DescrFromType(NPY_INT64), 1, 1,
-                    NPY_IN_ARRAY, NULL);
-    if((bins_z==NULL) || (PyArray_SIZE(bins_x) != PyArray_SIZE(bins_z))) {
-    PyErr_Format(_profile3DError,
-             "Bin3DProfile: One dimension required for bins_z, same size as bins_x.");
-    goto _fail;
-    }
-    
-    wsource = (PyArrayObject *) PyArray_FromAny(owsource,
-                    PyArray_DescrFromType(NPY_FLOAT64), 1, 1,
-                    NPY_IN_ARRAY, NULL);
-    if((wsource==NULL) || (PyArray_SIZE(bins_x) != PyArray_SIZE(wsource))) {
-    PyErr_Format(_profile3DError,
-             "Bin3DProfile: One dimension required for wsource, same size as bins_x.");
-    goto _fail;
-    }
-    
-    bsource = (PyArrayObject *) PyArray_FromAny(obsource,
-                    PyArray_DescrFromType(NPY_FLOAT64), 1, 1,
-                    NPY_IN_ARRAY, NULL);
-    if((bsource==NULL) || (PyArray_SIZE(bins_x) != PyArray_SIZE(bsource))) {
-    PyErr_Format(_profile3DError,
-             "Bin3DProfile: One dimension required for bsource, same size as bins_x.");
-    goto _fail;
-    }
-
-    wresult = (PyArrayObject *) PyArray_FromAny(owresult,
-                    PyArray_DescrFromType(NPY_FLOAT64), 3,3,
-                    NPY_INOUT_ARRAY | NPY_UPDATEIFCOPY, NULL);
-    if(wresult==NULL){
-    PyErr_Format(_profile3DError,
-             "Bin3DProfile: Two dimensions required for wresult.");
-    goto _fail;
-    }
-
-    bresult = (PyArrayObject *) PyArray_FromAny(obresult,
-                    PyArray_DescrFromType(NPY_FLOAT64), 3,3,
-                    NPY_INOUT_ARRAY | NPY_UPDATEIFCOPY, NULL);
-    if((bresult==NULL) ||(PyArray_SIZE(wresult) != PyArray_SIZE(bresult))
-       || (PyArray_DIM(bresult,0) != PyArray_DIM(wresult,0))){
-    PyErr_Format(_profile3DError,
-             "Bin3DProfile: Two dimensions required for bresult, same shape as wresult.");
-    goto _fail;
-    }
-    
-    used = (PyArrayObject *) PyArray_FromAny(oused,
-                    PyArray_DescrFromType(NPY_FLOAT64), 3,3,
-                    NPY_INOUT_ARRAY | NPY_UPDATEIFCOPY, NULL);
-    if((used==NULL) ||(PyArray_SIZE(used) != PyArray_SIZE(wresult))
-       || (PyArray_DIM(used,0) != PyArray_DIM(wresult,0))){
-    PyErr_Format(_profile3DError,
-             "Bin3DProfile: Two dimensions required for used, same shape as wresult.");
-    goto _fail;
-    }
-
-    npy_float64 wval, bval;
-    int n;
-
-    for(n=0; n<bins_x->dimensions[0]; n++) {
-      i = *(npy_int64*)PyArray_GETPTR1(bins_x, n);
-      j = *(npy_int64*)PyArray_GETPTR1(bins_y, n);
-      k = *(npy_int64*)PyArray_GETPTR1(bins_z, n);
-      bval = *(npy_float64*)PyArray_GETPTR1(bsource, n);
-      wval = *(npy_float64*)PyArray_GETPTR1(wsource, n);
-      *(npy_float64*)PyArray_GETPTR3(wresult, i, j, k) += wval;
-      *(npy_float64*)PyArray_GETPTR3(bresult, i, j, k) += wval*bval;
-      *(npy_float64*)PyArray_GETPTR3(used, i, j, k) = 1.0;
-    }
-
-      Py_DECREF(bins_x); 
-      Py_DECREF(bins_y); 
-      Py_DECREF(bins_z); 
-      Py_DECREF(wsource); 
-      Py_DECREF(bsource); 
-      Py_DECREF(wresult); 
-      Py_DECREF(bresult); 
-      Py_DECREF(used);
-    
-      PyObject *onum_found = PyInt_FromLong((long)1);
-      return onum_found;
-    
-    _fail:
-      Py_XDECREF(bins_x); 
-      Py_XDECREF(bins_y); 
-      Py_XDECREF(bins_z); 
-      Py_XDECREF(wsource); 
-      Py_XDECREF(bsource); 
-      Py_XDECREF(wresult); 
-      Py_XDECREF(bresult); 
-      Py_XDECREF(used);
-      return NULL;
-
-}
-
 static PyObject *_dataCubeError;
 
 static PyObject *DataCubeGeneric(PyObject *obj, PyObject *args,
@@ -1430,7 +1077,7 @@
                     0, NULL);
     if(xi==NULL) {
     PyErr_Format(_findContoursError,
-             "Bin2DProfile: One dimension required for xi.");
+             "FindContours: One dimension required for xi.");
     goto _fail;
     }
     
@@ -1439,7 +1086,7 @@
                     0, NULL);
     if((yi==NULL) || (PyArray_SIZE(xi) != PyArray_SIZE(yi))) {
     PyErr_Format(_findContoursError,
-             "Bin2DProfile: One dimension required for yi, same size as xi.");
+             "FindContours: One dimension required for yi, same size as xi.");
     goto _fail;
     }
     
@@ -1448,7 +1095,7 @@
                     0, NULL);
     if((zi==NULL) || (PyArray_SIZE(xi) != PyArray_SIZE(zi))) {
     PyErr_Format(_findContoursError,
-             "Bin2DProfile: One dimension required for zi, same size as xi.");
+             "FindContours: One dimension required for zi, same size as xi.");
     goto _fail;
     }
     
@@ -1789,9 +1436,6 @@
     {"Interpolate", Py_Interpolate, METH_VARARGS},
     {"DataCubeRefine", Py_DataCubeRefine, METH_VARARGS},
     {"DataCubeReplace", Py_DataCubeReplace, METH_VARARGS},
-    {"Bin1DProfile", Py_Bin1DProfile, METH_VARARGS},
-    {"Bin2DProfile", Py_Bin2DProfile, METH_VARARGS},
-    {"Bin3DProfile", Py_Bin3DProfile, METH_VARARGS},
     {"FindContours", Py_FindContours, METH_VARARGS},
     {"FindBindingEnergy", Py_FindBindingEnergy, METH_VARARGS},
     {"OutputFloatsToFile", Py_OutputFloatsToFile, METH_VARARGS},
@@ -1816,10 +1460,6 @@
     PyDict_SetItemString(d, "error", _interpolateError);
     _dataCubeError = PyErr_NewException("data_point_utilities.DataCubeError", NULL, NULL);
     PyDict_SetItemString(d, "error", _dataCubeError);
-    _profile2DError = PyErr_NewException("data_point_utilities.Profile2DError", NULL, NULL);
-    PyDict_SetItemString(d, "error", _profile2DError);
-    _profile3DError = PyErr_NewException("data_point_utilities.Profile3DError", NULL, NULL);
-    PyDict_SetItemString(d, "error", _profile3DError);
     _findContoursError = PyErr_NewException("data_point_utilities.FindContoursError", NULL, NULL);
     PyDict_SetItemString(d, "error", _findContoursError);
     _outputFloatsToFileError = PyErr_NewException("data_point_utilities.OutputFloatsToFileError", NULL, NULL);


diff -r bebe2f1eefad7d54fa295a06be41f6d7e6547aa4 -r b0ac030e0d978e4997eabe6b87d0542ab26efdfd yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -48,3 +48,13 @@
 
     def __str__(self):
         return "The supplied axes are not orthogonal.  %s" % (self.axes)
+
+class YTNoDataInObjectError(YTException):
+    def __init__(self, obj):
+        self.obj_type = getattr(obj, "_type_name", "")
+
+    def __str__(self):
+        s = "The object requested has no data included in it."
+        if self.obj_type == "slice":
+            s += "  It may lie on a grid face.  Try offsetting slightly."
+        return s


diff -r bebe2f1eefad7d54fa295a06be41f6d7e6547aa4 -r b0ac030e0d978e4997eabe6b87d0542ab26efdfd yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -154,6 +154,9 @@
     @wraps(func)
     def single_proc_results(self, *args, **kwargs):
         retval = None
+        if hasattr(self, "dont_wrap"):
+            if func.func_name in self.dont_wrap:
+                return func(self, *args, **kwargs)
         if self._processing or not self._distributed:
             return func(self, *args, **kwargs)
         comm = _get_comm((self,))
@@ -349,7 +352,7 @@
         else:
             yield obj
     if parallel_capable:
-        communication_system.communicators.pop()
+        communication_system.pop()
     if storage is not None:
         # Now we have to broadcast it
         new_storage = my_communicator.par_combine_object(


diff -r bebe2f1eefad7d54fa295a06be41f6d7e6547aa4 -r b0ac030e0d978e4997eabe6b87d0542ab26efdfd yt/utilities/spatial/ckdtree.pyx
--- a/yt/utilities/spatial/ckdtree.pyx
+++ b/yt/utilities/spatial/ckdtree.pyx
@@ -2,23 +2,26 @@
 # Released under the scipy license
 import numpy as np
 cimport numpy as np
-cimport stdlib
+cimport libc.stdlib as stdlib
 cimport cython
 
 import kdtree
 
-cdef double infinity = np.inf
+cdef extern from "stdlib.h":
+    # NOTE that size_t might not be int
+    void *alloca(int)
+
+cdef np.float64_t infinity = np.inf
 
 __all__ = ['cKDTree']
 
-
 # priority queue
 cdef union heapcontents:
     int intdata
     char* ptrdata
 
 cdef struct heapitem:
-    double priority
+    np.float64_t priority
     heapcontents contents
 
 cdef struct heap:
@@ -97,23 +100,23 @@
 
 
 # utility functions
-cdef inline double dmax(double x, double y):
+cdef inline np.float64_t dmax(np.float64_t x, np.float64_t y):
     if x>y:
         return x
     else:
         return y
-cdef inline double dabs(double x):
+cdef inline np.float64_t dabs(np.float64_t x):
     if x>0:
         return x
     else:
         return -x
-cdef inline double dmin(double x, double y):
+cdef inline np.float64_t dmin(np.float64_t x, np.float64_t y):
     if x<y:
         return x
     else:
         return y
-cdef inline double _distance_p(double*x,double*y,double p,int k,double upperbound,
-    double*period):
+cdef inline np.float64_t _distance_p(np.float64_t*x,np.float64_t*y,np.float64_t p,int k,np.float64_t upperbound,
+    np.float64_t*period):
     """Compute the distance between x and y
 
     Computes the Minkowski p-distance to the power p between two points.
@@ -123,7 +126,7 @@
     Periodicity added by S. Skory.
     """
     cdef int i
-    cdef double r, m
+    cdef np.float64_t r, m
     r = 0
     if p==infinity:
         for i in range(k):
@@ -137,6 +140,12 @@
             r += m
             if r>upperbound:
                 return r
+    elif p==2:
+        for i in range(k):
+            m = dmin(dabs(x[i] - y[i]), period[i] - dabs(x[i] - y[i]))
+            r += m*m
+            if r>upperbound:
+                return r
     else:
         for i in range(k):
             m = dmin(dabs(x[i] - y[i]), period[i] - dabs(x[i] - y[i]))
@@ -151,9 +160,9 @@
 cdef struct innernode:
     int split_dim
     int n_points
-    double split
-    double* maxes
-    double* mins
+    np.float64_t split
+    np.float64_t* maxes
+    np.float64_t* mins
     innernode* less
     innernode* greater
 cdef struct leafnode:
@@ -161,14 +170,14 @@
     int n_points
     int start_idx
     int end_idx
-    double* maxes
-    double* mins
+    np.float64_t* maxes
+    np.float64_t* mins
 
 # this is the standard trick for variable-size arrays:
-# malloc sizeof(nodeinfo)+self.m*sizeof(double) bytes.
+# malloc sizeof(nodeinfo)+self.m*sizeof(np.float64_t) bytes.
 cdef struct nodeinfo:
     innernode* node
-    double side_distances[0]
+    np.float64_t side_distances[0]
 
 cdef class cKDTree:
     """kd-tree for quick nearest-neighbor lookup
@@ -201,7 +210,7 @@
     data : array-like, shape (n,m)
         The n data points of dimension m to be indexed. This array is 
         not copied unless this is necessary to produce a contiguous 
-        array of doubles, and so modifying this data will result in 
+        array of np.float64_ts, and so modifying this data will result in 
         bogus results.
     leafsize : positive integer
         The number of points at which the algorithm switches over to
@@ -211,21 +220,21 @@
 
     cdef innernode* tree 
     cdef readonly object data
-    cdef double* raw_data
+    cdef np.float64_t* raw_data
     cdef readonly int n, m
     cdef readonly int leafsize
     cdef readonly object maxes
-    cdef double* raw_maxes
+    cdef np.float64_t* raw_maxes
     cdef readonly object mins
-    cdef double* raw_mins
+    cdef np.float64_t* raw_mins
     cdef object indices
     cdef np.int64_t* raw_indices
     def __init__(cKDTree self, data, int leafsize=10):
-        cdef np.ndarray[double, ndim=2] inner_data
-        cdef np.ndarray[double, ndim=1] inner_maxes
-        cdef np.ndarray[double, ndim=1] inner_mins
+        cdef np.ndarray[np.float64_t, ndim=2] inner_data
+        cdef np.ndarray[np.float64_t, ndim=1] inner_maxes
+        cdef np.ndarray[np.float64_t, ndim=1] inner_mins
         cdef np.ndarray[np.int64_t, ndim=1] inner_indices
-        self.data = np.ascontiguousarray(data,dtype=np.double)
+        self.data = np.ascontiguousarray(data,dtype="float64")
         self.n, self.m = np.shape(self.data)
         self.leafsize = leafsize
         if self.leafsize<1:
@@ -235,27 +244,27 @@
         self.indices = np.ascontiguousarray(np.arange(self.n,dtype=np.int64))
 
         inner_data = self.data
-        self.raw_data = <double*>inner_data.data
+        self.raw_data = <np.float64_t*>inner_data.data
         inner_maxes = self.maxes
-        self.raw_maxes = <double*>inner_maxes.data
+        self.raw_maxes = <np.float64_t*>inner_maxes.data
         inner_mins = self.mins
-        self.raw_mins = <double*>inner_mins.data
+        self.raw_mins = <np.float64_t*>inner_mins.data
         inner_indices = self.indices
         self.raw_indices = <np.int64_t*>inner_indices.data
 
         self.tree = self.__build(0, self.n, self.raw_maxes, self.raw_mins)
 
-    cdef innernode* __build(cKDTree self, int start_idx, int end_idx, double* maxes, double* mins):
+    cdef innernode* __build(cKDTree self, int start_idx, int end_idx, np.float64_t* maxes, np.float64_t* mins):
         cdef leafnode* n
         cdef innernode* ni
         cdef int i, j, t, p, q, d
-        cdef double size, split, minval, maxval
-        cdef double*mids
+        cdef np.float64_t size, split, minval, maxval
+        cdef np.float64_t*mids
         if end_idx-start_idx<=self.leafsize:
             n = <leafnode*>stdlib.malloc(sizeof(leafnode))
             # Skory
-            n.maxes = <double*>stdlib.malloc(sizeof(double)*self.m)
-            n.mins = <double*>stdlib.malloc(sizeof(double)*self.m)
+            n.maxes = <np.float64_t*>stdlib.malloc(sizeof(np.float64_t)*self.m)
+            n.mins = <np.float64_t*>stdlib.malloc(sizeof(np.float64_t)*self.m)
             for i in range(self.m):
                 n.maxes[i] = maxes[i]
                 n.mins[i] = mins[i]
@@ -327,7 +336,7 @@
             # construct new node representation
             ni = <innernode*>stdlib.malloc(sizeof(innernode))
 
-            mids = <double*>stdlib.malloc(sizeof(double)*self.m)
+            mids = <np.float64_t*>stdlib.malloc(sizeof(np.float64_t)*self.m)
             for i in range(self.m):
                 mids[i] = maxes[i]
             mids[d] = split
@@ -343,8 +352,8 @@
             ni.split_dim = d
             ni.split = split
             # Skory
-            ni.maxes = <double*>stdlib.malloc(sizeof(double)*self.m)
-            ni.mins = <double*>stdlib.malloc(sizeof(double)*self.m)
+            ni.maxes = <np.float64_t*>stdlib.malloc(sizeof(np.float64_t)*self.m)
+            ni.mins = <np.float64_t*>stdlib.malloc(sizeof(np.float64_t)*self.m)
             for i in range(self.m):
                 ni.maxes[i] = maxes[i]
                 ni.mins[i] = mins[i]
@@ -366,32 +375,35 @@
         self.__free_tree(self.tree)
 
     cdef void __query(cKDTree self, 
-            double*result_distances, 
-            long*result_indices, 
-            double*x, 
+            np.float64_t*result_distances, 
+            np.int64_t*result_indices, 
+            np.float64_t*x, 
             int k, 
-            double eps, 
-            double p, 
-            double distance_upper_bound,
-            double*period):
+            np.float64_t eps, 
+            np.float64_t p, 
+            np.float64_t distance_upper_bound,
+            np.float64_t*period):
+        assert(p == 2)
+        assert(eps == 0.0)
+        assert(distance_upper_bound == infinity)
         cdef heap q
         cdef heap neighbors
 
-        cdef int i, j
-        cdef double t
+        cdef int i, j, i2, j2
+        cdef np.float64_t t, y
         cdef nodeinfo* inf
         cdef nodeinfo* inf2
-        cdef double d
-        cdef double m_left, m_right, m
-        cdef double epsfac
-        cdef double min_distance
-        cdef double far_min_distance
+        cdef np.float64_t d, di
+        cdef np.float64_t m_left, m_right, m
+        cdef np.float64_t epsfac
+        cdef np.float64_t min_distance
+        cdef np.float64_t far_min_distance
         cdef heapitem it, it2, neighbor
         cdef leafnode* node
         cdef innernode* inode
         cdef innernode* near
         cdef innernode* far
-        cdef double* side_distances
+        cdef np.float64_t* side_distances
 
         # priority queue for chasing nodes
         # entries are:
@@ -406,7 +418,7 @@
         heapcreate(&neighbors,k)
 
         # set up first nodeinfo
-        inf = <nodeinfo*>stdlib.malloc(sizeof(nodeinfo)+self.m*sizeof(double)) 
+        inf = <nodeinfo*>stdlib.malloc(sizeof(nodeinfo)+self.m*sizeof(np.float64_t)) 
         inf.node = self.tree
         for i in range(self.m):
             inf.side_distances[i] = 0
@@ -417,28 +429,15 @@
                 t = self.raw_mins[i]-x[i]
                 if t>inf.side_distances[i]:
                     inf.side_distances[i] = t
-            if p!=1 and p!=infinity:
-                inf.side_distances[i]=inf.side_distances[i]**p
+            inf.side_distances[i]=inf.side_distances[i]*inf.side_distances[i]
 
         # compute first distance
         min_distance = 0.
         for i in range(self.m):
-            if p==infinity:
-                min_distance = dmax(min_distance,inf.side_distances[i])
-            else:
-                min_distance += inf.side_distances[i]
+            min_distance += inf.side_distances[i]
 
         # fiddle approximation factor
-        if eps==0:
-            epsfac=1
-        elif p==infinity:
-            epsfac = 1/(1+eps)
-        else:
-            epsfac = 1/(1+eps)**p
-
-        # internally we represent all distances as distance**p
-        if p!=infinity and distance_upper_bound!=infinity:
-            distance_upper_bound = distance_upper_bound**p
+        epsfac=1
 
         while True:
             if inf.node.split_dim==-1:
@@ -446,10 +445,11 @@
 
                 # brute-force
                 for i in range(node.start_idx,node.end_idx):
-                    d = _distance_p(
-                            self.raw_data+self.raw_indices[i]*self.m,
-                            x,p,self.m,distance_upper_bound,period)
-                        
+                    d = 0.0
+                    for i2 in range(self.m):
+                        y = self.raw_data[self.raw_indices[i]*self.m + i2]
+                        di = dmin(dabs(x[i2] - y), period[i2] - dabs(x[i2] - y))
+                        d += di*di
                     if d<distance_upper_bound:
                         # replace furthest neighbor
                         if neighbors.n==k:
@@ -500,7 +500,7 @@
                 # far child is further by an amount depending only
                 # on the split value; compute its distance and side_distances
                 # and push it on the queue if it's near enough
-                inf2 = <nodeinfo*>stdlib.malloc(sizeof(nodeinfo)+self.m*sizeof(double)) 
+                inf2 = <nodeinfo*>stdlib.malloc(sizeof(nodeinfo)+self.m*sizeof(np.float64_t)) 
                 it2.contents.ptrdata = <char*> inf2
                 inf2.node = far
 
@@ -517,17 +517,9 @@
 
                 # one side distance changes
                 # we can adjust the minimum distance without recomputing
-                if p == infinity:
-                    # we never use side_distances in the l_infinity case
-                    # inf2.side_distances[inode.split_dim] = dabs(inode.split-x[inode.split_dim])
-                    far_min_distance = dmax(min_distance, m)
-                elif p == 1:
-                    inf2.side_distances[inode.split_dim] = m
-                    far_min_distance = dmax(min_distance, m)
-                else:
-                    inf2.side_distances[inode.split_dim] = m**p
-                    #far_min_distance = min_distance - inf.side_distances[inode.split_dim] + inf2.side_distances[inode.split_dim]
-                    far_min_distance = m**p
+                inf2.side_distances[inode.split_dim] = m*m
+                #far_min_distance = min_distance - inf.side_distances[inode.split_dim] + inf2.side_distances[inode.split_dim]
+                far_min_distance = m*m
 
                 it2.priority = far_min_distance
 
@@ -544,16 +536,13 @@
         for i in range(neighbors.n-1,-1,-1):
             neighbor = heappop(&neighbors) # FIXME: neighbors may be realloced
             result_indices[i] = neighbor.contents.intdata
-            if p==1 or p==infinity:
-                result_distances[i] = -neighbor.priority
-            else:
-                result_distances[i] = (-neighbor.priority) #**(1./p) S. Skory
+            result_distances[i] = (-neighbor.priority) #**(1./p) S. Skory
 
         heapdestroy(&q)
         heapdestroy(&neighbors)
 
-    def query(cKDTree self, object x, int k=1, double eps=0, double p=2, 
-            double distance_upper_bound=infinity, object period=None):
+    def query(cKDTree self, object x, int k=1, np.float64_t eps=0, np.float64_t p=2, 
+            np.float64_t distance_upper_bound=infinity, object period=None):
         """query(self, x, k=1, eps=0, p=2, distance_upper_bound=np.inf,
            period=None)
         
@@ -592,16 +581,16 @@
             Missing neighbors are indicated with self.n.
 
         """
-        cdef np.ndarray[long, ndim=2] ii
-        cdef np.ndarray[double, ndim=2] dd
-        cdef np.ndarray[double, ndim=2] xx
-        cdef np.ndarray[double, ndim=1] cperiod
+        cdef np.ndarray[np.int64_t, ndim=2] ii
+        cdef np.ndarray[np.float64_t, ndim=2] dd
+        cdef np.ndarray[np.float64_t, ndim=2] xx
+        cdef np.ndarray[np.float64_t, ndim=1] cperiod
         cdef int c
-        x = np.asarray(x).astype(np.double)
+        x = np.asarray(x).astype("float64")
         if period is None:
             period = np.array([np.inf]*self.m)
         else:
-            period = np.asarray(period).astype(np.double)
+            period = np.asarray(period).astype("float64")
         cperiod = np.ascontiguousarray(period)
         if np.shape(x)[-1] != self.m:
             raise ValueError("x must consist of vectors of length %d but has shape %s" % (self.m, np.shape(x)))
@@ -616,20 +605,20 @@
         n = np.prod(retshape)
         xx = np.reshape(x,(n,self.m))
         xx = np.ascontiguousarray(xx)
-        dd = np.empty((n,k),dtype=np.double)
+        dd = np.empty((n,k),dtype="float64")
         dd.fill(infinity)
-        ii = np.empty((n,k),dtype=np.long)
+        ii = np.empty((n,k),dtype="int64")
         ii.fill(self.n)
         for c in range(n):
             self.__query(
-                    (<double*>dd.data)+c*k,
-                    (<long*>ii.data)+c*k,
-                    (<double*>xx.data)+c*self.m, 
+                    (<np.float64_t*>dd.data)+c*k,
+                    (<np.int64_t*>ii.data)+c*k,
+                    (<np.float64_t*>xx.data)+c*self.m, 
                     k, 
                     eps,
                     p, 
                     distance_upper_bound,
-                    <double*>cperiod.data)
+                    <np.float64_t*>cperiod.data)
         if single:
             if k==1:
                 return dd[0,0], ii[0,0]
@@ -641,7 +630,10 @@
             else:
                 return np.reshape(dd,retshape+(k,)), np.reshape(ii,retshape+(k,))
 
-    def chainHOP_get_dens(cKDTree self, object mass, int num_neighbors=65, \
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    def chainHOP_get_dens(cKDTree self, object omass, int num_neighbors=65, \
             int nMerge=6):
         """ query the tree for the nearest neighbors, to get the density
             of particles for chainHOP.
@@ -669,38 +661,46 @@
         
         """
         
-        # We're no longer returning all the tags in this step.
+        # We're no np.int64_ter returning all the tags in this step.
         # We do it chunked, in find_chunk_nearest_neighbors.
-        #cdef np.ndarray[long, ndim=2] tags
-        cdef np.ndarray[double, ndim=1] dens
-        cdef np.ndarray[double, ndim=1] query
-        cdef np.ndarray[long, ndim=1] tags_temp
-        cdef np.ndarray[double, ndim=1] dist_temp
+        #cdef np.ndarray[np.int64_t, ndim=2] tags
+        cdef np.ndarray[np.float64_t, ndim=1] dens
         cdef int i, pj, j
-        cdef double ih2, fNorm, r2, rs
+        cdef np.float64_t ih2, fNorm, r2, rs
         
-        #tags = np.empty((self.n, nMerge), dtype=np.long)
-        dens = np.empty(self.n, dtype=np.double)
-        query = np.empty(self.m, dtype=np.double)
-        tags_temp = np.empty(num_neighbors, dtype=np.long)
-        dist_temp = np.empty(num_neighbors, dtype=np.double)
-        # Need to start out with zeros before we start adding to it.
-        dens.fill(0.0)
+        #tags = np.empty((self.n, nMerge), dtype="int64")
+        dens = np.zeros(self.n, dtype="float64")
+        cdef np.ndarray[np.float64_t, ndim=2] local_data = self.data
 
-        mass = np.array(mass).astype(np.double)
-        mass = np.ascontiguousarray(mass)
+        cdef np.ndarray[np.float64_t, ndim=1] mass = np.array(omass).astype("float64")
+        cdef np.float64_t ipi = 1.0/np.pi
         
+        cdef np.float64_t *query = <np.float64_t *> alloca(
+                    sizeof(np.float64_t) * self.m)
+        cdef np.float64_t *dist_temp = <np.float64_t *> alloca(
+                    sizeof(np.float64_t) * num_neighbors)
+        cdef np.int64_t *tags_temp = <np.int64_t *> alloca(
+                    sizeof(np.int64_t) * num_neighbors)
+        cdef np.float64_t period[3]
+        for i in range(3): period[i] = 1.0
+
         for i in range(self.n):
-            query = self.data[i]
-            (dist_temp, tags_temp) = self.query(query, k=num_neighbors, period=[1.]*3)
+            for j in range(self.m):
+                query[j] = local_data[i,j]
+            self.__query(dist_temp, tags_temp,
+                         query, num_neighbors, 0.0, 
+                         2, infinity, period)
             
             #calculate the density for this particle
-            ih2 = 4.0/np.max(dist_temp)
-            fNorm = 0.5*np.sqrt(ih2)*ih2/np.pi
+            ih2 = -1
+            for j in range(num_neighbors):
+                ih2 = dmax(ih2, dist_temp[j])
+            ih2 = 4.0/ih2
+            fNorm = 0.5*(ih2**1.5)*ipi
             for j in range(num_neighbors):
                 pj = tags_temp[j]
                 r2 = dist_temp[j] * ih2
-                rs = 2.0 - np.sqrt(r2)
+                rs = 2.0 - (r2**0.5)
                 if (r2 < 1.0):
                     rs = (1.0 - 0.75*rs*r2)
                 else:
@@ -715,6 +715,8 @@
         #return (dens, tags)
         return dens
 
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
     def find_chunk_nearest_neighbors(cKDTree self, int start, int finish, \
         int num_neighbors=65):
         """ query the tree in chunks, between start and finish, recording the
@@ -738,21 +740,99 @@
         
         """
         
-        cdef np.ndarray[long, ndim=2] chunk_tags
-        cdef np.ndarray[double, ndim=1] query
-        cdef np.ndarray[long, ndim=1] tags_temp
-        cdef np.ndarray[double, ndim=1] dist_temp
-        cdef int i
+        cdef np.ndarray[np.int64_t, ndim=2] chunk_tags
+        cdef np.ndarray[np.float64_t, ndim=2] local_data = self.data
+        cdef int i, j
         
-        chunk_tags = np.empty((finish-start, num_neighbors), dtype=np.long)
-        query = np.empty(self.m, dtype=np.double)
-        tags_temp = np.empty(num_neighbors, dtype=np.long)
-        dist_temp = np.empty(num_neighbors, dtype=np.double)
-        
+        chunk_tags = np.empty((finish-start, num_neighbors), dtype="int64")
+        cdef np.float64_t *query = <np.float64_t *> alloca(
+                    sizeof(np.float64_t) * self.m)
+        cdef np.float64_t *dist_temp = <np.float64_t *> alloca(
+                    sizeof(np.float64_t) * num_neighbors)
+        cdef np.int64_t *tags_temp = <np.int64_t *> alloca(
+                    sizeof(np.int64_t) * num_neighbors)
+        cdef np.float64_t period[3]
+        for i in range(3): period[i] = 1.0
+
         for i in range(finish-start):
-            query = self.data[i+start]
-            (dist_temp, tags_temp) = self.query(query, k=num_neighbors, period=[1.]*3)
-            chunk_tags[i,:] = tags_temp[:]
+            for j in range(self.m):
+                query[j] = local_data[i+start,j]
+            self.__query(dist_temp, tags_temp,
+                         query, num_neighbors, 0.0, 
+                         2, infinity, period)
+            for j in range(num_neighbors):
+                chunk_tags[i,j] = tags_temp[j]
         
         return chunk_tags
 
+    def chainHOP_preconnect(self, np.ndarray[np.int64_t, ndim=1] chainID,
+                                  np.ndarray[np.float64_t, ndim=1] density,
+                                  np.ndarray[np.float64_t, ndim=1] densest_in_chain,
+                                  np.ndarray bis_inside,
+                                  np.ndarray bsearch_again,
+                                  np.float64_t peakthresh,
+                                  np.float64_t saddlethresh,
+                                  int nn, int nMerge,
+                                  object chain_map):
+        cdef np.ndarray[np.int32_t, ndim=1] is_inside
+        cdef np.ndarray[np.int32_t, ndim=1] search_again
+        cdef np.ndarray[np.float64_t, ndim=2] pos 
+        cdef np.int64_t thisNN, thisNN_chainID, same_count
+        cdef np.float64_t *query = <np.float64_t *> alloca(
+                    sizeof(np.float64_t) * self.m)
+        cdef np.float64_t *dist_temp = <np.float64_t *> alloca(
+                    sizeof(np.float64_t) * nn)
+        cdef np.int64_t *tags_temp = <np.int64_t *> alloca(
+                    sizeof(np.int64_t) * nn)
+        cdef np.float64_t period[3], thisNN_max_dens, boundary_density
+        cdef int i, j, npart, chainID_i, part_mas_dens
+        is_inside = bis_inside.astype("int32")
+        search_again = bsearch_again.astype("int32")
+        pos = self.data
+        npart = pos.shape[0]
+        for i in range(3): period[i] = 1.0
+        for i in xrange(npart):
+            # Don't consider this particle if it's not part of a chain.
+            if chainID[i] < 0: continue
+            chainID_i = chainID[i]
+            # If this particle is in the padding, don't make a connection.
+            if not is_inside[i]: continue
+            # Find this particle's chain max_dens.
+            part_max_dens = densest_in_chain[chainID_i]
+            # We're only connecting >= peakthresh chains now.
+            if part_max_dens < peakthresh: continue
+            # Loop over nMerge closest nearest neighbors.
+            for j in range(self.m):
+                query[j] = pos[i,j]
+            self.__query(dist_temp, tags_temp,
+                         query, nn, 0.0, 
+                         2, infinity, period)
+            same_count = 0
+            for j in xrange(int(nMerge+1)):
+                thisNN = tags_temp[j+1] # Don't consider ourselves at tags_temp[0]
+                thisNN_chainID = chainID[thisNN]
+                # If our neighbor is in the same chain, move on.
+                # Move on if these chains are already connected:
+                if chainID_i == thisNN_chainID or \
+                        thisNN_chainID in chain_map[chainID_i]:
+                    same_count += 1
+                    continue
+                # Everything immediately below is for
+                # neighboring particles with a chainID. 
+                if thisNN_chainID >= 0:
+                    # Find thisNN's chain's max_dens.
+                    thisNN_max_dens = densest_in_chain[thisNN_chainID]
+                    # We're only linking peakthresh chains
+                    if thisNN_max_dens < peakthresh: continue
+                    # Calculate the two groups boundary density.
+                    boundary_density = (density[thisNN] + density[i]) / 2.
+                    # Don't connect if the boundary is too low.
+                    if boundary_density < saddlethresh: continue
+                    # Mark these chains as related.
+                    chain_map[thisNN_chainID].add(chainID_i)
+                    chain_map[chainID_i].add(thisNN_chainID)
+            if same_count == nMerge + 1:
+                # All our neighbors are in the same chain already, so 
+                # we don't need to search again.
+                search_again[i] = 0
+        return search_again


diff -r bebe2f1eefad7d54fa295a06be41f6d7e6547aa4 -r b0ac030e0d978e4997eabe6b87d0542ab26efdfd yt/visualization/_colormap_data.py
--- a/yt/visualization/_colormap_data.py
+++ b/yt/visualization/_colormap_data.py
@@ -7757,3 +7757,44 @@
          1.0, 1.0, 1.0, 1.0, 1.0, 1.0]),
    )
 
+color_map_luts['B-W LINEAR'] = color_map_luts['idl00']
+color_map_luts['BLUE'] = color_map_luts['idl01']
+color_map_luts['GRN-RED-BLU-WHT'] = color_map_luts['idl02']
+color_map_luts['RED TEMPERATURE'] = color_map_luts['idl03']
+color_map_luts['BLUE'] = color_map_luts['idl04']
+color_map_luts['STD GAMMA-II'] = color_map_luts['idl05']
+color_map_luts['PRISM'] = color_map_luts['idl06']
+color_map_luts['RED-PURPLE'] = color_map_luts['idl07']
+color_map_luts['GREEN'] = color_map_luts['idl08']
+color_map_luts['GRN'] = color_map_luts['idl09']
+color_map_luts['GREEN-PINK'] = color_map_luts['idl10']
+color_map_luts['BLUE-RED'] = color_map_luts['idl11']
+color_map_luts['16 LEVEL'] = color_map_luts['idl12']
+color_map_luts['RAINBOW'] = color_map_luts['idl13']
+color_map_luts['STEPS'] = color_map_luts['idl14']
+color_map_luts['STERN SPECIAL'] = color_map_luts['idl15']
+color_map_luts['Haze'] = color_map_luts['idl16']
+color_map_luts['Blue - Pastel - Red'] = color_map_luts['idl17']
+color_map_luts['Pastels'] = color_map_luts['idl18']
+color_map_luts['Hue Sat Lightness 1'] = color_map_luts['idl19']
+color_map_luts['Hue Sat Lightness 2'] = color_map_luts['idl20']
+color_map_luts['Hue Sat Value 1'] = color_map_luts['idl21']
+color_map_luts['Hue Sat Value 2'] = color_map_luts['idl22']
+color_map_luts['Purple-Red + Stripes'] = color_map_luts['idl23']
+color_map_luts['Beach'] = color_map_luts['idl24']
+color_map_luts['Mac Style'] = color_map_luts['idl25']
+color_map_luts['Eos A'] = color_map_luts['idl26']
+color_map_luts['Eos B'] = color_map_luts['idl27']
+color_map_luts['Hardcandy'] = color_map_luts['idl28']
+color_map_luts['Nature'] = color_map_luts['idl29']
+color_map_luts['Ocean'] = color_map_luts['idl30']
+color_map_luts['Peppermint'] = color_map_luts['idl31']
+color_map_luts['Plasma'] = color_map_luts['idl32']
+color_map_luts['Blue-Red'] = color_map_luts['idl33']
+color_map_luts['Rainbow'] = color_map_luts['idl34']
+color_map_luts['Blue Waves'] = color_map_luts['idl35']
+color_map_luts['Volcano'] = color_map_luts['idl36']
+color_map_luts['Waves'] = color_map_luts['idl37']
+color_map_luts['Rainbow18'] = color_map_luts['idl38']
+color_map_luts['Rainbow + white'] = color_map_luts['idl39']
+color_map_luts['Rainbow + black'] = color_map_luts['idl40']


diff -r bebe2f1eefad7d54fa295a06be41f6d7e6547aa4 -r b0ac030e0d978e4997eabe6b87d0542ab26efdfd yt/visualization/api.py
--- a/yt/visualization/api.py
+++ b/yt/visualization/api.py
@@ -50,7 +50,8 @@
     annotate_image, \
     apply_colormap, \
     scale_image, \
-    write_projection
+    write_projection, \
+    write_fits
 
 from plot_modifications import \
     PlotCallback, \


diff -r bebe2f1eefad7d54fa295a06be41f6d7e6547aa4 -r b0ac030e0d978e4997eabe6b87d0542ab26efdfd yt/visualization/eps_writer.py
--- a/yt/visualization/eps_writer.py
+++ b/yt/visualization/eps_writer.py
@@ -251,7 +251,8 @@
 
 #=============================================================================
 
-    def axis_box_yt(self, plot, units=None, bare_axes=False, **kwargs):
+    def axis_box_yt(self, plot, units=None, bare_axes=False,
+                    tickcolor=None, **kwargs):
         r"""Wrapper around DualEPS.axis_box to automatically fill in the
         axis ranges and labels from a yt plot.
 
@@ -296,7 +297,8 @@
                 units = units.replace('mpc', 'Mpc')
                 _xlabel = '%s (%s)' % (x_names[plot.data.axis], units)
                 _ylabel = '%s (%s)' % (y_names[plot.data.axis], units)
-            _tickcolor = pyx.color.cmyk.white
+            if tickcolor == None:
+                _tickcolor = pyx.color.cmyk.white
         else:
             _xrange = plot._axes.get_xlim()
             _yrange = plot._axes.get_ylim()
@@ -308,7 +310,10 @@
             else:
                 _xlabel = plot._x_label
                 _ylabel = plot._y_label
-            _tickcolor = None
+            if tickcolor == None:
+                _tickcolor = None
+        if tickcolor != None:
+            _tickcolor = tickcolor
         self.axis_box(xrange=_xrange, yrange=_yrange, xlabel=_xlabel,
                       ylabel=_ylabel, tickcolor=_tickcolor, xlog=_xlog,
                       ylog=_ylog, bare_axes=bare_axes, **kwargs)
@@ -350,7 +355,7 @@
 
 #=============================================================================
 
-    def insert_image_yt(self, plot, pos=(0,0)):
+    def insert_image_yt(self, plot, pos=(0,0), scale=1.0):
         r"""Inserts a bitmap taken from a yt plot.
 
         Parameters
@@ -398,8 +403,8 @@
                                  figure_canvas.tostring_rgb())
         #figure_canvas.print_png('test.png')
         self.canvas.insert(pyx.bitmap.bitmap(pos[0], pos[1], image,
-                                             width=self.figsize[0],
-                                             height=self.figsize[1]))
+                                             width=scale*self.figsize[0],
+                                             height=scale*self.figsize[1]))
 
 #=============================================================================
 
@@ -871,44 +876,43 @@
                 if cb_flags != None:
                     if cb_flags[index] == False:
                         continue
-                if _yt or colorbars[index] != None:
-                    if ncol == 1:
-                        orientation = "right"
-                        xpos = bbox[1]
-                        ypos = ypos0
-                    elif i == 0:
-                        orientation = "left"
-                        xpos = bbox[0]
-                        ypos = ypos0
-                    elif i+1 == ncol:
-                        orientation = "right"
-                        xpos = bbox[1]
-                        ypos = ypos0
-                    elif j == 0:
-                        orientation = "bottom"
-                        ypos = bbox[2]
-                        xpos = xpos0
-                    elif j+1 == nrow:
-                        orientation = "top"
-                        ypos = bbox[3]
-                        xpos = xpos0
+                if ncol == 1:
+                    orientation = "right"
+                    xpos = bbox[1]
+                    ypos = ypos0
+                elif j == 0:
+                    orientation = "bottom"
+                    ypos = bbox[2]
+                    xpos = xpos0
+                elif i == 0:
+                    orientation = "left"
+                    xpos = bbox[0]
+                    ypos = ypos0
+                elif i+1 == ncol:
+                    orientation = "right"
+                    xpos = bbox[1]
+                    ypos = ypos0
+                elif j+1 == nrow:
+                    orientation = "top"
+                    ypos = bbox[3]
+                    xpos = xpos0
+                else:
+                    orientation = None  # Marker for interior plot
+
+                if orientation != None:
+                    if _yt:
+                        d.colorbar_yt(yt_plots[index],
+                                      pos=[xpos,ypos],
+                                      shrink=shrink_cb,
+                                      orientation=orientation)
                     else:
-                        orientation = None  # Marker for interior plot
-
-                    if orientation != None:
-                        if _yt:
-                            d.colorbar_yt(yt_plots[index],
-                                          pos=[xpos,ypos],
-                                          shrink=shrink_cb,
-                                          orientation=orientation)
-                        else:
-                            d.colorbar(colorbars[index]["cmap"],
-                                       zrange=colorbars[index]["range"],
-                                       label=colorbars[index]["name"],
-                                       log=colorbars[index]["log"],
-                                       orientation=orientation,
-                                       pos=[xpos,ypos],
-                                       shrink=shrink_cb)
+                        d.colorbar(colorbars[index]["cmap"],
+                                   zrange=colorbars[index]["range"],
+                                   label=colorbars[index]["name"],
+                                   log=colorbars[index]["log"],
+                                   orientation=orientation,
+                                   pos=[xpos,ypos],
+                                   shrink=shrink_cb)
 
     if savefig != None:
         d.save_fig(savefig, format=format)
@@ -958,7 +962,7 @@
 #=============================================================================
 
 def single_plot(plot, figsize=(12,12), cb_orient="right", bare_axes=False,
-                savefig=None, file_format='eps'):
+                savefig=None, colorbar=True, file_format='eps', **kwargs):
     r"""Wrapper for DualEPS routines to create a figure directy from a yt
     plot.  Calls insert_image_yt, axis_box_yt, and colorbar_yt.
 
@@ -975,6 +979,8 @@
         Set to true to have no annotations or tick marks on all of the axes.
     savefig : string
         Name of the saved file without the extension.
+    colorbar : boolean
+        Set to true to include a colorbar
     file_format : string
         Format type.  Can be "eps" or "pdf"
 
@@ -986,8 +992,9 @@
     """
     d = DualEPS(figsize=figsize)
     d.insert_image_yt(plot)
-    d.axis_box_yt(plot, bare_axes=bare_axes)
-    d.colorbar_yt(plot, orientation=cb_orient)
+    d.axis_box_yt(plot, bare_axes=bare_axes, **kwargs)
+    if colorbar:
+        d.colorbar_yt(plot, orientation=cb_orient)
     if savefig != None:
         d.save_fig(savefig, format=file_format)
     return d


diff -r bebe2f1eefad7d54fa295a06be41f6d7e6547aa4 -r b0ac030e0d978e4997eabe6b87d0542ab26efdfd yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -214,21 +214,23 @@
             output.create_dataset(field,data=self[field])
         output.close()
 
-    def export_fits(self, filename_prefix, fields = None, clobber=False):
+    def export_fits(self, filename_prefix, fields = None, clobber=False,
+                    other_keys=None, gzip_file=False, units="1"):
+
         """
         This will export a set of FITS images of either the fields specified
-        or all the fields already in the object.  The output filenames are
-        *filename_prefix* plus an underscore plus the name of the field. If 
-        clobber is set to True, this will overwrite any existing FITS file.
+        or all the fields already in the object.  The output filename is
+        *filename_prefix*. If clobber is set to True, this will overwrite any
+        existing FITS file.
 
         This requires the *pyfits* module, which is a standalone module
         provided by STSci to interface with FITS-format files.
         """
-        r"""Export a set of pixelized fields to a set of fits files.
+        r"""Export a set of pixelized fields to a FITS file.
 
         This will export a set of FITS images of either the fields specified
-        or all the fields already in the object.  The output filenames are
-        the specified prefix plus an underscore plus the name of the field.
+        or all the fields already in the object.  The output filename is the
+        the specified prefix.
 
         Parameters
         ----------
@@ -238,21 +240,90 @@
             These fields will be pixelized and output.
         clobber : boolean
             If the file exists, this governs whether we will overwrite.
+        other_keys : dictionary, optional
+            A set of header keys and values to write into the FITS header.
+        gzip_file : boolean, optional
+            gzip the file after writing, default False
+        units : string, optional
+            the length units that the coordinates are written in, default '1'
         """
+        
         import pyfits
+        from os import system
+        
         extra_fields = ['x','y','z','px','py','pz','pdx','pdy','pdz','weight_field']
         if filename_prefix.endswith('.fits'): filename_prefix=filename_prefix[:-5]
         if fields is None: 
             fields = [field for field in self.data_source.fields 
                       if field not in extra_fields]
+
+        nx, ny = self.buff_size
+        dx = (self.bounds[1]-self.bounds[0])/nx*self.pf[units]
+        dy = (self.bounds[3]-self.bounds[2])/ny*self.pf[units]
+        xmin = self.bounds[0]*self.pf[units]
+        ymin = self.bounds[2]*self.pf[units]
+        simtime = self.pf.current_time
+
+        hdus = []
+
+        first = True
+        
         for field in fields:
-            hdu = pyfits.PrimaryHDU(self[field])
+
+            if (first) :
+                hdu = pyfits.PrimaryHDU(self[field])
+                first = False
+            else :
+                hdu = pyfits.ImageHDU(self[field])
+                
             if self.data_source.has_key('weight_field'):
                 weightname = self.data_source._weight
                 if weightname is None: weightname = 'None'
                 field = field +'_'+weightname
-            hdu.writeto("%s_%s.fits" % (filename_prefix, field),clobber=clobber)
 
+            hdu.header.update("Field", field)
+            hdu.header.update("Time", simtime)
+
+            hdu.header.update('WCSNAMEP', "PHYSICAL")            
+            hdu.header.update('CTYPE1P', "LINEAR")
+            hdu.header.update('CTYPE2P', "LINEAR")
+            hdu.header.update('CRPIX1P', 0.5)
+            hdu.header.update('CRPIX2P', 0.5)
+            hdu.header.update('CRVAL1P', xmin)
+            hdu.header.update('CRVAL2P', ymin)
+            hdu.header.update('CDELT1P', dx)
+            hdu.header.update('CDELT2P', dy)
+                    
+            hdu.header.update('CTYPE1', "LINEAR")
+            hdu.header.update('CTYPE2', "LINEAR")                                
+            hdu.header.update('CUNIT1', units)
+            hdu.header.update('CUNIT2', units)
+            hdu.header.update('CRPIX1', 0.5)
+            hdu.header.update('CRPIX2', 0.5)
+            hdu.header.update('CRVAL1', xmin)
+            hdu.header.update('CRVAL2', ymin)
+            hdu.header.update('CDELT1', dx)
+            hdu.header.update('CDELT2', dy)
+
+            if (other_keys is not None) :
+
+                for k,v in other_keys.items() :
+
+                    hdu.header.update(k,v)
+
+            hdus.append(hdu)
+
+            del hdu
+            
+        hdulist = pyfits.HDUList(hdus)
+
+        hdulist.writeto("%s.fits" % (filename_prefix), clobber=clobber)
+        
+        if (gzip_file) :
+            clob = ""
+            if (clobber) : clob = "-f"
+            system("gzip "+clob+" %s.fits" % (filename_prefix))
+        
     def open_in_ds9(self, field, take_log=True):
         """
         This will open a given field in the DS9 viewer.


diff -r bebe2f1eefad7d54fa295a06be41f6d7e6547aa4 -r b0ac030e0d978e4997eabe6b87d0542ab26efdfd yt/visualization/image_writer.py
--- a/yt/visualization/image_writer.py
+++ b/yt/visualization/image_writer.py
@@ -411,3 +411,70 @@
 
     pl.clf()
     pl.close()
+
+def write_fits(image, filename_prefix, clobber=True, coords=None, gzip_file=False) :
+
+    """
+    This will export a FITS image of a floating point array. The output filename is
+    *filename_prefix*. If clobber is set to True, this will overwrite any existing
+    FITS file.
+    
+    This requires the *pyfits* module, which is a standalone module
+    provided by STSci to interface with FITS-format files.
+    """
+    r"""Write out a floating point array directly to a FITS file, optionally
+    adding coordinates. 
+        
+    Parameters
+    ----------
+    image : array_like
+        This is an (unscaled) array of floating point values, shape (N,N,) to save
+        in a FITS file.
+    filename_prefix : string
+        This prefix will be prepended to every FITS file name.
+    clobber : boolean
+        If the file exists, this governs whether we will overwrite.
+    coords : dictionary, optional
+        A set of header keys and values to write to the FITS header to set up
+        a coordinate system. 
+    gzip_file : boolean, optional
+        gzip the file after writing, default False
+    """
+    
+    import pyfits
+    from os import system
+    
+    if filename_prefix.endswith('.fits'): filename_prefix=filename_prefix[:-5]
+    
+    hdu = pyfits.PrimaryHDU(image)
+
+    if (coords is not None) :
+
+        hdu.header.update('WCSNAMEP', "PHYSICAL")
+        hdu.header.update('CTYPE1P', "LINEAR")
+        hdu.header.update('CTYPE2P', "LINEAR")
+        hdu.header.update('CRPIX1P', 0.5)
+        hdu.header.update('CRPIX2P', 0.5)
+        hdu.header.update('CRVAL1P', coords["xmin"])
+        hdu.header.update('CRVAL2P', coords["ymin"])
+        hdu.header.update('CDELT1P', coords["dx"])
+        hdu.header.update('CDELT2P', coords["dy"])
+        
+        hdu.header.update('CTYPE1', "LINEAR")
+        hdu.header.update('CTYPE2', "LINEAR")
+        hdu.header.update('CUNIT1', coords["units"])
+        hdu.header.update('CUNIT2', coords["units"])
+        hdu.header.update('CRPIX1', 0.5)
+        hdu.header.update('CRPIX2', 0.5)
+        hdu.header.update('CRVAL1', coords["xmin"])
+        hdu.header.update('CRVAL2', coords["ymin"])
+        hdu.header.update('CDELT1', coords["dx"])
+        hdu.header.update('CDELT2', coords["dy"])
+
+    hdu.writeto("%s.fits" % (filename_prefix), clobber=clobber)
+
+    if (gzip_file) :
+        clob = ""
+        if (clobber) : clob="-f"
+        system("gzip "+clob+" %s.fits" % (filename_prefix))
+    


diff -r bebe2f1eefad7d54fa295a06be41f6d7e6547aa4 -r b0ac030e0d978e4997eabe6b87d0542ab26efdfd yt/visualization/plot_collection.py
--- a/yt/visualization/plot_collection.py
+++ b/yt/visualization/plot_collection.py
@@ -940,7 +940,7 @@
                                   x_bins, fields[0], x_min, x_max, x_log,
                                   lazy_reader)
         if len(fields) > 1:
-            profile.add_fields(fields[1], weight=weight, accumulation=accumulation)
+            profile.add_fields(fields[1:], weight=weight, accumulation=accumulation)
         if id is None: id = self._get_new_id()
         p = self._add_plot(Profile1DPlot(profile, fields, id,
                                                    axes=axes, figure=figure))
@@ -1148,13 +1148,15 @@
                                   x_bins, fields[0], x_min, x_max, x_log,
                                   y_bins, fields[1], y_min, y_max, y_log,
                                   lazy_reader)
+        # This will add all the fields to the profile object
+        if len(fields)>2:
+            profile.add_fields(fields[2:], weight=weight,
+                    accumulation=accumulation, fractional=fractional)
+
         if id is None: id = self._get_new_id()
         p = self._add_plot(PhasePlot(profile, fields, 
                                                id, cmap=cmap,
                                                figure=figure, axes=axes))
-        if len(fields) > 2:
-            # This will add all the fields to the profile object
-            p.switch_z(fields[2], weight=weight, accumulation=accumulation, fractional=fractional)
         return p
 
     def add_phase_sphere(self, radius, unit, fields, center = None, cmap=None,


diff -r bebe2f1eefad7d54fa295a06be41f6d7e6547aa4 -r b0ac030e0d978e4997eabe6b87d0542ab26efdfd yt/visualization/plot_types.py
--- a/yt/visualization/plot_types.py
+++ b/yt/visualization/plot_types.py
@@ -36,6 +36,7 @@
     y_dict, \
     axis_names
 from .color_maps import yt_colormaps, is_colormap
+from yt.utilities.exceptions import YTNoDataInObjectError
 
 class CallbackRegistryHandler(object):
     def __init__(self, plot):
@@ -379,6 +380,8 @@
 
     def _redraw_image(self, *args):
         buff = self._get_buff()
+        if self[self.axis_names["Z"]].size == 0:
+            raise YTNoDataInObjectError(self.data)
         mylog.debug("Received buffer of min %s and max %s (data: %s %s)",
                     na.nanmin(buff), na.nanmax(buff),
                     self[self.axis_names["Z"]].min(),


diff -r bebe2f1eefad7d54fa295a06be41f6d7e6547aa4 -r b0ac030e0d978e4997eabe6b87d0542ab26efdfd yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -24,6 +24,7 @@
 """
 import base64
 import matplotlib.pyplot
+import cStringIO
 from functools import wraps
 
 import numpy as na
@@ -36,27 +37,59 @@
 
 from yt.funcs import *
 from yt.utilities.amr_utils import write_png_to_string
+from yt.utilities.definitions import \
+    x_dict, x_names, \
+    y_dict, y_names, \
+    axis_names, \
+    axis_labels
 
 def invalidate_data(f):
     @wraps(f)
     def newfunc(*args, **kwargs):
-        f(*args, **kwargs)
+        rv = f(*args, **kwargs)
         args[0]._data_valid = False
         args[0]._plot_valid = False
         args[0]._recreate_frb()
         if args[0]._initfinished:
             args[0]._setup_plots()
+        return rv
     return newfunc
 
 def invalidate_plot(f):
     @wraps(f)
     def newfunc(*args, **kwargs):
+        rv = f(*args, **kwargs)
         args[0]._plot_valid = False
         args[0]._setup_plots()
-        return f(*args, **kwargs)
+        return rv
     return newfunc
 
+field_transforms = {}
+
+class FieldTransform(object):
+    def __init__(self, name, func, locator):
+        self.name = name
+        self.func = func
+        self.locator = locator
+        field_transforms[name] = self
+
+    def __call__(self, *args, **kwargs):
+        return self.func(*args, **kwargs)
+
+    def ticks(self, mi, ma):
+        try:
+            ticks = self.locator(mi, ma)
+        except:
+            ticks = []
+        return ticks
+
+log_transform = FieldTransform('log10', na.log10, LogLocator())
+linear_transform = FieldTransform('linear', lambda x: x, LinearLocator())
+
 class PlotWindow(object):
+    _plot_valid = False
+    _colorbar_valid = False
+    _contour_info = None
     def __init__(self, data_source, bounds, buff_size=(800,800), antialias = True, periodic = True):
         r"""
         PlotWindow(data_source, bounds, buff_size=(800,800), antialias = True)
@@ -232,6 +265,14 @@
     def set_antialias(self,aa):
         self.antialias = aa
 
+    @invalidate_plot
+    def set_contour_info(self, field_name, n_cont = 8, colors = None,
+                         logit = True):
+        if field_name == "None" or n_cont == 0:
+            self._contour_info = None
+            return
+        self._contour_info = (field_name, n_cont, colors, logit)
+
 class PWViewer(PlotWindow):
     """A viewer for PlotWindows.
 
@@ -240,16 +281,17 @@
         setup = kwargs.pop("setup", True)
         PlotWindow.__init__(self, *args,**kwargs)
         self._field_transform = {}
+        self._colormaps = defaultdict(lambda: 'algae')
         for field in self._frb.data.keys():
             if self._frb.pf.field_info[field].take_log:
-                self._field_transform[field] = na.log
+                self._field_transform[field] = log_transform
             else:
-                self._field_transform[field] = lambda x: x
+                self._field_transform[field] = linear_transform
 
         if setup: self._setup_plots()
 
     @invalidate_plot
-    def set_log(self,field,log):
+    def set_log(self, field, log):
         """set a field to log or linear.
         
         Parameters
@@ -261,16 +303,20 @@
 
         """
         if log:
-            self._field_transform[field] = na.log
+            self._field_transform[field] = log_transform
         else:
-            self._field_transform[field] = lambda x: x
-
-    def set_transform(self, field, func):
-        self._field_transform[field] = func
+            self._field_transform[field] = linear_transform
 
     @invalidate_plot
-    def set_cmap(self):
-        pass
+    def set_transform(self, field, name):
+        if name not in field_transforms: 
+            raise KeyError(name)
+        self._field_transform[field] = field_transforms[name]
+
+    @invalidate_plot
+    def set_cmap(self, field, cmap_name):
+        self._colorbar_valid = False
+        self._colormaps[field] = cmap_name
 
     @invalidate_plot
     def set_zlim(self):
@@ -309,7 +355,11 @@
 <br>
 Field of View:  %(x_width)0.3f %(unit)s<br>
 Minimum Value:  %(mi)0.3e %(units)s<br>
-Maximum Value:  %(ma)0.3e %(units)s
+Maximum Value:  %(ma)0.3e %(units)s<br>
+Central Point:  (data coords)<br>
+   %(xc)0.14f<br>
+   %(yc)0.14f<br>
+   %(zc)0.14f
 """
 
 class PWViewerExtJS(PWViewer):
@@ -319,7 +369,6 @@
     _ext_widget_id = None
     _current_field = None
     _widget_name = "plot_window"
-    cmap = 'algae'
 
     def _setup_plots(self):
         from yt.gui.reason.bottle_mods import PayloadHandler
@@ -332,18 +381,21 @@
         else:
             fields = self._frb.data.keys()
             addl_keys = {}
+        if self._colorbar_valid == False:
+            addl_keys['colorbar_image'] = self._get_cbar_image()
+            self._colorbar_valid = True
         min_zoom = 200*self._frb.pf.h.get_smallest_dx() * self._frb.pf['unitary']
         for field in fields:
-            to_plot = apply_colormap(self._frb[field], func = self._field_transform[field])
-            pngs = write_png_to_string(to_plot)
+            to_plot = apply_colormap(self._frb[field],
+                func = self._field_transform[field],
+                cmap_name = self._colormaps[field])
+            pngs = self._apply_modifications(to_plot)
             img_data = base64.b64encode(pngs)
             # We scale the width between 200*min_dx and 1.0
             x_width = self.xlim[1] - self.xlim[0]
             zoom_fac = na.log10(x_width*self._frb.pf['unitary'])/na.log10(min_zoom)
             zoom_fac = 100.0*max(0.0, zoom_fac)
-            ticks = self.get_ticks(self._frb[field].min(),
-                                   self._frb[field].max(), 
-                                   take_log = self._frb.pf.field_info[field].take_log)
+            ticks = self.get_ticks(field)
             payload = {'type':'png_string',
                        'image_data':img_data,
                        'metadata_string': self.get_metadata(field),
@@ -352,34 +404,64 @@
             payload.update(addl_keys)
             ph.add_payload(payload)
 
-    def get_ticks(self, mi, ma, height = 400, take_log = False):
+    def _apply_modifications(self, img):
+        if self._contour_info is None:
+            return write_png_to_string(img)
+        from matplotlib.figure import Figure
+        from yt.visualization._mpl_imports import \
+            FigureCanvasAgg, FigureCanvasPdf, FigureCanvasPS
+        from yt.utilities.delaunay.triangulate import Triangulation as triang
+        plot_args = {}
+        field, number, colors, logit = self._contour_info
+        if colors is not None: plot_args['colors'] = colors
+
+        vi, vj, vn = img.shape
+
+        # Now we need to get our field values
+        raw_data = self._frb.data_source
+        b = self._frb.bounds
+        xi, yi = na.mgrid[b[0]:b[1]:(vi / 8) * 1j,
+                          b[2]:b[3]:(vj / 8) * 1j]
+        x = raw_data['px']
+        y = raw_data['py']
+        z = raw_data[field]
+        if logit: z = na.log10(z)
+        fvals = triang(x,y).nn_interpolator(z)(xi,yi).transpose()[::-1,:]
+
+        fig = Figure((vi/100.0, vj/100.0), dpi = 100)
+        fig.figimage(img)
+        # Add our contour
+        ax = fig.add_axes([0.0, 0.0, 1.0, 1.0], frameon=False)
+        ax.patch.set_alpha(0.0)
+
+        # Now contour it
+        ax.contour(fvals, number, colors='w')
+        canvas = FigureCanvasAgg(fig)
+        f = cStringIO.StringIO()
+        canvas.print_figure(f)
+        f.seek(0)
+        img = f.read()
+        return img
+        
+    def get_ticks(self, field, height = 400):
         # This will eventually change to work with non-logged fields
         ticks = []
-        if take_log and mi > 0.0 and ma > 0.0:
-            ll = LogLocator() 
-            tick_locs = ll(mi, ma)
-            mi = na.log10(mi)
-            ma = na.log10(ma)
-            for v1,v2 in zip(tick_locs, na.log10(tick_locs)):
-                if v2 < mi or v2 > ma: continue
-                p = height - height * (v2 - mi)/(ma - mi)
-                ticks.append((p,v1,v2))
-                #print v1, v2, mi, ma, height, p
-        else:
-            ll = LinearLocator()
-            tick_locs = ll(mi, ma)
-            for v in tick_locs:
-                p = height - height * (v - mi)/(ma-mi)
-                ticks.append((p,v,"%0.3e" % (v)))
-
+        transform = self._field_transform[field]
+        mi, ma = self._frb[field].min(), self._frb[field].max()
+        tick_locs = transform.ticks(mi, ma)
+        mi, ma = transform((mi, ma))
+        for v1,v2 in zip(tick_locs, transform(tick_locs)):
+            if v2 < mi or v2 > ma: continue
+            p = height - height * (v2 - mi)/(ma - mi)
+            ticks.append((p,v1,v2))
         return ticks
 
-    def _get_cbar_image(self, height = 400, width = 40):
-        # Right now there's just the single 'cmap', but that will eventually
-        # change.  I think?
+    def _get_cbar_image(self, height = 400, width = 40, field = None):
+        if field is None: field = self._current_field
+        cmap_name = self._colormaps[field]
         vals = na.mgrid[1:0:height * 1j] * na.ones(width)[:,None]
         vals = vals.transpose()
-        to_plot = apply_colormap(vals)
+        to_plot = apply_colormap(vals, cmap_name = cmap_name)
         pngs = write_png_to_string(to_plot)
         img_data = base64.b64encode(pngs)
         return img_data
@@ -402,11 +484,21 @@
         y_width = self.ylim[1] - self.ylim[0]
         unit = get_smallest_appropriate_unit(x_width, self._frb.pf)
         units = self.get_field_units(field)
+        center = getattr(self._frb.data_source, "center", None)
+        if center is None or self._frb.axis == 4:
+            xc, yc, zc = -999, -999, -999
+        else:
+            center[x_dict[self._frb.axis]] = 0.5 * (
+                self.xlim[0] + self.xlim[1])
+            center[y_dict[self._frb.axis]] = 0.5 * (
+                self.ylim[0] + self.ylim[1])
+            xc, yc, zc = center
         md = _metadata_template % dict(
                 pf = self._frb.pf,
                 x_width = x_width*self._frb.pf[unit],
                 y_width = y_width*self._frb.pf[unit],
-                unit = unit, units = units, mi = mi, ma = ma)
+                unit = unit, units = units, mi = mi, ma = ma,
+                xc = xc, yc = yc, zc = zc)
         return md
 
     def image_recenter(self, img_x, img_y, img_size_x, img_size_y):
@@ -422,9 +514,9 @@
         self._current_field = field
         self._frb[field]
         if self._frb.pf.field_info[field].take_log:
-            self._field_transform[field] = na.log
+            self._field_transform[field] = log_transform
         else:
-            self._field_transform[field] = lambda x: x
+            self._field_transform[field] = linear_transform
 
     def get_field_units(self, field, strip_mathml = True):
         ds = self._frb.data_source
@@ -439,7 +531,6 @@
             units = units.replace(r"\rm{", "").replace("}","")
         return units
 
-
 class YtPlot(object):
     """A base class for all yt plots. It should abstract the actual
     plotting engine completely, allowing plotting even without matplotlib. 
@@ -474,7 +565,6 @@
 class Yt2DPlot(YtPlot):
     zmin = None
     zmax = None
-    cmap = 'algae'
     zlabel = None
 
     # def __init__(self, data):
@@ -485,17 +575,14 @@
         self.zmin = zmin
         self.zmax = zmax
 
-    @invalidate_plot
-    def set_cmap(self,cmap):
-        self.cmap = cmap
-
 class YtWindowPlot(Yt2DPlot):
     def __init__(self, data, size=(10,8)):
         YtPlot.__init__(self, data, size)
         self.__init_image(data)
 
     def __init_image(self, data):
-        self.image = self.axes.imshow(data,cmap=self.cmap)
+        #self.image = self.axes.imshow(data, cmap=self.cmap)
+        pass
 
 class YtProfilePlot(Yt2DPlot):
     def __init__(self):


diff -r bebe2f1eefad7d54fa295a06be41f6d7e6547aa4 -r b0ac030e0d978e4997eabe6b87d0542ab26efdfd yt/visualization/profile_plotter.py
--- a/yt/visualization/profile_plotter.py
+++ b/yt/visualization/profile_plotter.py
@@ -340,6 +340,8 @@
             func = na.log10
         else:
             func = lambda a: a
+        raw_data = na.repeat(raw_data, 3, axis=0)
+        raw_data = na.repeat(raw_data, 3, axis=1)
         to_plot = apply_colormap(raw_data, self.plot.cbar.bounds,
                                  self.plot.cbar.cmap, func)
         if self.plot.cbar.scale == 'log':


diff -r bebe2f1eefad7d54fa295a06be41f6d7e6547aa4 -r b0ac030e0d978e4997eabe6b87d0542ab26efdfd yt/visualization/volume_rendering/image_handling.py
--- a/yt/visualization/volume_rendering/image_handling.py
+++ b/yt/visualization/volume_rendering/image_handling.py
@@ -45,18 +45,12 @@
         f.close()
     if fits:
         try:
-            hdu = pyfits.PrimaryHDU(image[:,:,0])
-            hdulist = pyfits.HDUList([hdu])
-            hdulist.writeto('%s_r.fits'%fn,clobber=True)
-            hdu = pyfits.PrimaryHDU(image[:,:,1])
-            hdulist = pyfits.HDUList([hdu])
-            hdulist.writeto('%s_g.fits'%fn,clobber=True)
-            hdu = pyfits.PrimaryHDU(image[:,:,2])
-            hdulist = pyfits.HDUList([hdu])
-            hdulist.writeto('%s_b.fits'%fn,clobber=True)
-            hdu = pyfits.PrimaryHDU(image[:,:,3])
-            hdulist = pyfits.HDUList([hdu])
-            hdulist.writeto('%s_a.fits'%fn,clobber=True)
+            hdur = pyfits.PrimaryHDU(image[:,:,0])
+            hdug = pyfits.ImageHDU(image[:,:,1])
+            hdub = pyfits.ImageHDU(image[:,:,2])
+            hdua = pyfits.ImageHDU(image[:,:,3])
+            hdulist = pyfits.HDUList([hdur,hdug,hdub,hdua])
+            hdulist.writeto('%s.fits'%fn,clobber=True)
         except: print 'You do not have pyfits, install before attempting to use fits exporter'
 
 def import_rgba(name, h5=True):



https://bitbucket.org/yt_analysis/yt/changeset/7e10bc517866/
changeset:   7e10bc517866
branch:      yt
user:        ngoldbaum
date:        2012-03-16 20:07:24
summary:     Merging
affected #:  14 files



diff -r b0ac030e0d978e4997eabe6b87d0542ab26efdfd -r 7e10bc517866f000f28785099b535dc6775803c4 yt/analysis_modules/halo_finding/rockstar/api.py
--- /dev/null
+++ b/yt/analysis_modules/halo_finding/rockstar/api.py
@@ -0,0 +1,27 @@
+"""
+API for Rockstar halo finding
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2011 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""
+
+from .rockstar import RockstarHaloFinder


diff -r b0ac030e0d978e4997eabe6b87d0542ab26efdfd -r 7e10bc517866f000f28785099b535dc6775803c4 yt/analysis_modules/halo_finding/rockstar/rockstar.py
--- /dev/null
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar.py
@@ -0,0 +1,105 @@
+"""
+Operations to get Rockstar loaded up
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt.enzotools.org/
+License:
+  Copyright (C) 2011 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from yt.mods import *
+from os import environ
+from yt.utilities.parallel_tools.parallel_analysis_interface import \
+    ParallelAnalysisInterface, ProcessorPool, Communicator
+
+import rockstar_interface
+import socket
+import time
+
+class DomainDecomposer(ParallelAnalysisInterface):
+    def __init__(self, pf, comm):
+        ParallelAnalysisInterface.__init__(self, comm=comm)
+        self.pf = pf
+        self.hierarchy = pf.h
+        self.center = (pf.domain_left_edge + pf.domain_right_edge)/2.0
+
+    def decompose(self):
+        dd = self.pf.h.all_data()
+        check, LE, RE, data_source = self.partition_hierarchy_3d(dd)
+        return data_source
+
+class RockstarHaloFinder(ParallelAnalysisInterface):
+    def __init__(self, pf, num_readers = 0, num_writers = 0):
+        ParallelAnalysisInterface.__init__(self)
+        # No subvolume support
+        self.pf = pf
+        self.hierarchy = pf.h
+        self.num_readers = num_readers
+        self.num_writers = num_writers
+        if self.num_readers + self.num_writers + 1 != self.comm.size:
+            raise RuntimeError
+        self.center = (pf.domain_right_edge + pf.domain_left_edge)/2.0
+        data_source = None
+        if self.comm.size > 1:
+            self.pool = ProcessorPool()
+            self.pool.add_workgroup(1, name = "server")
+            self.pool.add_workgroup(num_readers, name = "readers")
+            self.pool.add_workgroup(num_writers, name = "writers")
+            for wg in self.pool.workgroups:
+                if self.comm.rank in wg.ranks: self.workgroup = wg
+        data_source = self.pf.h.all_data()
+        self.handler = rockstar_interface.RockstarInterface(
+                self.pf, data_source)
+
+    def _get_hosts(self):
+        if self.comm.size == 1 or self.workgroup.name == "server":
+            server_address = socket.gethostname()
+            sock = socket.socket()
+            sock.bind(('', 0))
+            port = sock.getsockname()[-1]
+            del sock
+        else:
+            server_address, port = None, None
+        self.server_address, self.port = self.comm.mpi_bcast_pickled(
+            (server_address, port))
+        self.port = str(self.port)
+
+    def run(self, block_ratio = 1):
+        if block_ratio != 1:
+            raise NotImplementedError
+        self._get_hosts()
+        self.handler.setup_rockstar(self.server_address, self.port,
+                    parallel = self.comm.size > 1,
+                    num_readers = self.num_readers,
+                    num_writers = self.num_writers,
+                    writing_port = -1,
+                    block_ratio = block_ratio)
+        if self.comm.size == 1:
+            self.handler.call_rockstar()
+        else:
+            self.comm.barrier()
+            if self.workgroup.name == "server":
+                self.handler.start_server()
+            elif self.workgroup.name == "readers":
+                #time.sleep(0.5 + self.workgroup.comm.rank/10.0)
+                self.handler.start_client()
+            elif self.workgroup.name == "writers":
+                #time.sleep(1.0 + self.workgroup.comm.rank/10.0)
+                self.handler.start_client()
+        self.comm.barrier()


diff -r b0ac030e0d978e4997eabe6b87d0542ab26efdfd -r 7e10bc517866f000f28785099b535dc6775803c4 yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
--- /dev/null
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
@@ -0,0 +1,349 @@
+"""
+Particle operations for Lagrangian Volume
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt.enzotools.org/
+License:
+  Copyright (C) 2011 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import numpy as np
+import os, sys
+cimport numpy as np
+cimport cython
+from stdlib cimport malloc
+
+cdef import from "particle.h":
+    struct particle:
+        np.int64_t id
+        float pos[6]
+
+cdef import from "io_generic.h":
+    ctypedef void (*LPG) (char *filename, particle **p, np.int64_t *num_p)
+    void set_load_particles_generic(LPG func)
+
+cdef import from "rockstar.h":
+    void rockstar(float *bounds, np.int64_t manual_subs)
+
+cdef import from "config.h":
+    void setup_config()
+
+cdef import from "server.h":
+    int server()
+
+cdef import from "client.h":
+    void client()
+
+cdef import from "meta_io.h":
+    void read_particles(char *filename)
+    void output_and_free_halos(np.int64_t id_offset, np.int64_t snap, 
+			   np.int64_t chunk, float *bounds)
+
+cdef import from "config_vars.h":
+    # Rockstar cleverly puts all of the config variables inside a templated
+    # definition of their vaiables.
+    char *FILE_FORMAT
+    np.float64_t PARTICLE_MASS
+
+    char *MASS_DEFINITION
+    np.int64_t MIN_HALO_OUTPUT_SIZE
+    np.float64_t FORCE_RES
+
+    np.float64_t SCALE_NOW
+    np.float64_t h0
+    np.float64_t Ol
+    np.float64_t Om
+
+    np.int64_t GADGET_ID_BYTES
+    np.float64_t GADGET_MASS_CONVERSION
+    np.float64_t GADGET_LENGTH_CONVERSION
+    np.int64_t GADGET_SKIP_NON_HALO_PARTICLES
+    np.int64_t RESCALE_PARTICLE_MASS
+
+    np.int64_t PARALLEL_IO
+    char *PARALLEL_IO_SERVER_ADDRESS
+    char *PARALLEL_IO_SERVER_PORT
+    np.int64_t PARALLEL_IO_WRITER_PORT
+    char *PARALLEL_IO_SERVER_INTERFACE
+    char *RUN_ON_SUCCESS
+
+    char *INBASE
+    char *FILENAME
+    np.int64_t STARTING_SNAP
+    np.int64_t NUM_SNAPS
+    np.int64_t NUM_BLOCKS
+    np.int64_t NUM_READERS
+    np.int64_t PRELOAD_PARTICLES
+    char *SNAPSHOT_NAMES
+    char *LIGHTCONE_ALT_SNAPS
+    char *BLOCK_NAMES
+
+    char *OUTBASE
+    np.float64_t OVERLAP_LENGTH
+    np.int64_t NUM_WRITERS
+    np.int64_t FORK_READERS_FROM_WRITERS
+    np.int64_t FORK_PROCESSORS_PER_MACHINE
+
+    char *OUTPUT_FORMAT
+    np.int64_t DELETE_BINARY_OUTPUT_AFTER_FINISHED
+    np.int64_t FULL_PARTICLE_CHUNKS
+    char *BGC2_SNAPNAMES
+
+    np.int64_t BOUND_PROPS
+    np.int64_t BOUND_OUT_TO_HALO_EDGE
+    np.int64_t DO_MERGER_TREE_ONLY
+    np.int64_t IGNORE_PARTICLE_IDS
+    np.float64_t TRIM_OVERLAP
+    np.float64_t ROUND_AFTER_TRIM
+    np.int64_t LIGHTCONE
+    np.int64_t PERIODIC
+
+    np.float64_t LIGHTCONE_ORIGIN[3]
+    np.float64_t LIGHTCONE_ALT_ORIGIN[3]
+
+    np.float64_t LIMIT_CENTER[3]
+    np.float64_t LIMIT_RADIUS
+
+    np.int64_t SWAP_ENDIANNESS
+    np.int64_t GADGET_VARIANT
+
+    np.float64_t FOF_FRACTION
+    np.float64_t FOF_LINKING_LENGTH
+    np.float64_t INCLUDE_HOST_POTENTIAL_RATIO
+    np.float64_t DOUBLE_COUNT_SUBHALO_MASS_RATIO
+    np.int64_t TEMPORAL_HALO_FINDING
+    np.int64_t MIN_HALO_PARTICLES
+    np.float64_t UNBOUND_THRESHOLD
+    np.int64_t ALT_NFW_METRIC
+
+    np.int64_t TOTAL_PARTICLES
+    np.float64_t BOX_SIZE
+    np.int64_t OUTPUT_HMAD
+    np.int64_t OUTPUT_PARTICLES
+    np.int64_t OUTPUT_LEVELS
+    np.float64_t DUMP_PARTICLES[3]
+
+    np.float64_t AVG_PARTICLE_SPACING
+    np.int64_t SINGLE_SNAP
+
+def print_rockstar_settings():
+    # We have to do the config
+    print "FILE_FORMAT =", FILE_FORMAT
+    print "PARTICLE_MASS =", PARTICLE_MASS
+
+    print "MASS_DEFINITION =", MASS_DEFINITION
+    print "MIN_HALO_OUTPUT_SIZE =", MIN_HALO_OUTPUT_SIZE
+    print "FORCE_RES =", FORCE_RES
+
+    print "SCALE_NOW =", SCALE_NOW
+    print "h0 =", h0
+    print "Ol =", Ol
+    print "Om =", Om
+
+    print "GADGET_ID_BYTES =", GADGET_ID_BYTES
+    print "GADGET_MASS_CONVERSION =", GADGET_MASS_CONVERSION
+    print "GADGET_LENGTH_CONVERSION =", GADGET_LENGTH_CONVERSION
+    print "GADGET_SKIP_NON_HALO_PARTICLES =", GADGET_SKIP_NON_HALO_PARTICLES
+    print "RESCALE_PARTICLE_MASS =", RESCALE_PARTICLE_MASS
+
+    print "PARALLEL_IO =", PARALLEL_IO
+    print "PARALLEL_IO_SERVER_ADDRESS =", PARALLEL_IO_SERVER_ADDRESS
+    print "PARALLEL_IO_SERVER_PORT =", PARALLEL_IO_SERVER_PORT
+    print "PARALLEL_IO_WRITER_PORT =", PARALLEL_IO_WRITER_PORT
+    print "PARALLEL_IO_SERVER_INTERFACE =", PARALLEL_IO_SERVER_INTERFACE
+    print "RUN_ON_SUCCESS =", RUN_ON_SUCCESS
+
+    print "INBASE =", INBASE
+    print "FILENAME =", FILENAME
+    print "STARTING_SNAP =", STARTING_SNAP
+    print "NUM_SNAPS =", NUM_SNAPS
+    print "NUM_BLOCKS =", NUM_BLOCKS
+    print "NUM_READERS =", NUM_READERS
+    print "PRELOAD_PARTICLES =", PRELOAD_PARTICLES
+    print "SNAPSHOT_NAMES =", SNAPSHOT_NAMES
+    print "LIGHTCONE_ALT_SNAPS =", LIGHTCONE_ALT_SNAPS
+    print "BLOCK_NAMES =", BLOCK_NAMES
+
+    print "OUTBASE =", OUTBASE
+    print "OVERLAP_LENGTH =", OVERLAP_LENGTH
+    print "NUM_WRITERS =", NUM_WRITERS
+    print "FORK_READERS_FROM_WRITERS =", FORK_READERS_FROM_WRITERS
+    print "FORK_PROCESSORS_PER_MACHINE =", FORK_PROCESSORS_PER_MACHINE
+
+    print "OUTPUT_FORMAT =", OUTPUT_FORMAT
+    print "DELETE_BINARY_OUTPUT_AFTER_FINISHED =", DELETE_BINARY_OUTPUT_AFTER_FINISHED
+    print "FULL_PARTICLE_CHUNKS =", FULL_PARTICLE_CHUNKS
+    print "BGC2_SNAPNAMES =", BGC2_SNAPNAMES
+
+    print "BOUND_PROPS =", BOUND_PROPS
+    print "BOUND_OUT_TO_HALO_EDGE =", BOUND_OUT_TO_HALO_EDGE
+    print "DO_MERGER_TREE_ONLY =", DO_MERGER_TREE_ONLY
+    print "IGNORE_PARTICLE_IDS =", IGNORE_PARTICLE_IDS
+    print "TRIM_OVERLAP =", TRIM_OVERLAP
+    print "ROUND_AFTER_TRIM =", ROUND_AFTER_TRIM
+    print "LIGHTCONE =", LIGHTCONE
+    print "PERIODIC =", PERIODIC
+
+    print "LIGHTCONE_ORIGIN =", LIGHTCONE_ORIGIN[0]
+    print "LIGHTCONE_ORIGIN[1] =", LIGHTCONE_ORIGIN[1]
+    print "LIGHTCONE_ORIGIN[2] =", LIGHTCONE_ORIGIN[2]
+    print "LIGHTCONE_ALT_ORIGIN =", LIGHTCONE_ALT_ORIGIN[0]
+    print "LIGHTCONE_ALT_ORIGIN[1] =", LIGHTCONE_ALT_ORIGIN[1]
+    print "LIGHTCONE_ALT_ORIGIN[2] =", LIGHTCONE_ALT_ORIGIN[2]
+
+    print "LIMIT_CENTER =", LIMIT_CENTER[0]
+    print "LIMIT_CENTER[1] =", LIMIT_CENTER[1]
+    print "LIMIT_CENTER[2] =", LIMIT_CENTER[2]
+    print "LIMIT_RADIUS =", LIMIT_RADIUS
+
+    print "SWAP_ENDIANNESS =", SWAP_ENDIANNESS
+    print "GADGET_VARIANT =", GADGET_VARIANT
+
+    print "FOF_FRACTION =", FOF_FRACTION
+    print "FOF_LINKING_LENGTH =", FOF_LINKING_LENGTH
+    print "INCLUDE_HOST_POTENTIAL_RATIO =", INCLUDE_HOST_POTENTIAL_RATIO
+    print "DOUBLE_COUNT_SUBHALO_MASS_RATIO =", DOUBLE_COUNT_SUBHALO_MASS_RATIO
+    print "TEMPORAL_HALO_FINDING =", TEMPORAL_HALO_FINDING
+    print "MIN_HALO_PARTICLES =", MIN_HALO_PARTICLES
+    print "UNBOUND_THRESHOLD =", UNBOUND_THRESHOLD
+    print "ALT_NFW_METRIC =", ALT_NFW_METRIC
+
+    print "TOTAL_PARTICLES =", TOTAL_PARTICLES
+    print "BOX_SIZE =", BOX_SIZE
+    print "OUTPUT_HMAD =", OUTPUT_HMAD
+    print "OUTPUT_PARTICLES =", OUTPUT_PARTICLES
+    print "OUTPUT_LEVELS =", OUTPUT_LEVELS
+    print "DUMP_PARTICLES =", DUMP_PARTICLES[0]
+    print "DUMP_PARTICLES[1] =", DUMP_PARTICLES[1]
+    print "DUMP_PARTICLES[2] =", DUMP_PARTICLES[2]
+
+    print "AVG_PARTICLE_SPACING =", AVG_PARTICLE_SPACING
+    print "SINGLE_SNAP =", SINGLE_SNAP
+
+cdef class RockstarInterface
+
+cdef RockstarInterface rh
+cdef void rh_read_particles(char *filename, particle **p, np.int64_t *num_p):
+    cdef int i, fi, npart, tnpart
+    cdef np.float64_t conv[6], left_edge[6]
+    dd = rh.data_source
+    cdef np.ndarray[np.int64_t, ndim=1] arri
+    cdef np.ndarray[np.float64_t, ndim=1] arr
+    block = int(str(filename).rsplit(".")[-1])
+
+    # Now we want to grab data from only a subset of the grids.
+    n = rh.block_ratio
+    grids = np.array_split(dd._grids, NUM_BLOCKS)[block]
+    tnpart = 0
+    for g in grids:
+        tnpart += dd._get_data_from_grid(g, "particle_index").size
+    p[0] = <particle *> malloc(sizeof(particle) * tnpart)
+    #print "Loading indices: size = ", tnpart
+    conv[0] = conv[1] = conv[2] = rh.pf["mpchcm"]
+    conv[3] = conv[4] = conv[5] = 1e-5
+    left_edge[0] = rh.pf.domain_left_edge[0]
+    left_edge[1] = rh.pf.domain_left_edge[1]
+    left_edge[2] = rh.pf.domain_left_edge[2]
+    left_edge[3] = left_edge[4] = left_edge[5] = 0.0
+    pi = 0
+    for g in grids:
+        arri = dd._get_data_from_grid(g, "particle_index").astype("int64")
+        npart = arri.size
+        for i in range(npart):
+            p[0][i+pi].id = arri[i]
+        fi = 0
+        for field in ["particle_position_x", "particle_position_y",
+                      "particle_position_z",
+                      "particle_velocity_x", "particle_velocity_y",
+                      "particle_velocity_z"]:
+            arr = dd._get_data_from_grid(g, field).astype("float64")
+            for i in range(npart):
+                p[0][i+pi].pos[fi] = (arr[i]-left_edge[fi])*conv[fi]
+            fi += 1
+        pi += npart
+    num_p[0] = tnpart
+    print "TOTAL", block, pi, tnpart, len(grids)
+
+cdef class RockstarInterface:
+
+    cdef public object pf
+    cdef public object data_source
+    cdef int rank
+    cdef int size
+    cdef int block_ratio
+
+    def __cinit__(self, pf, data_source):
+        self.pf = pf
+        self.data_source = data_source
+
+    def setup_rockstar(self, char *server_address, char *server_port,
+                       np.float64_t particle_mass = -1.0,
+                       int parallel = False, int num_readers = 1,
+                       int num_writers = 1,
+                       int writing_port = -1, int block_ratio = 1):
+        global PARALLEL_IO, PARALLEL_IO_SERVER_ADDRESS, PARALLEL_IO_SERVER_PORT
+        global FILENAME, FILE_FORMAT, NUM_SNAPS, STARTING_SNAP, h0, Ol, Om
+        global BOX_SIZE, PERIODIC, PARTICLE_MASS, NUM_BLOCKS, NUM_READERS
+        global FORK_READERS_FROM_WRITERS, PARALLEL_IO_WRITER_PORT, NUM_WRITERS
+        global rh
+        if parallel:
+            PARALLEL_IO = 1
+            PARALLEL_IO_SERVER_ADDRESS = server_address
+            PARALLEL_IO_SERVER_PORT = server_port
+            if writing_port > 0:
+                PARALLEL_IO_WRITER_PORT = writing_port
+        else:
+            PARALLEL_IO = 0
+            PARALLEL_IO_SERVER_ADDRESS = server_address
+            PARALLEL_IO_SERVER_PORT = server_port
+        FILENAME = "inline.<block>"
+        FILE_FORMAT = "GENERIC"
+        OUTPUT_FORMAT = "ASCII"
+        NUM_SNAPS = 1
+        NUM_READERS = num_readers
+        NUM_BLOCKS = num_readers * block_ratio
+        NUM_WRITERS = num_writers
+        self.block_ratio = block_ratio
+
+        h0 = self.pf.hubble_constant
+        Ol = self.pf.omega_lambda
+        Om = self.pf.omega_matter
+
+        if particle_mass < 0:
+            print "Assuming single-mass particle."
+            particle_mass = self.pf.h.grids[0]["ParticleMassMsun"][0] / h0
+        PARTICLE_MASS = particle_mass
+        PERIODIC = 1
+        BOX_SIZE = (self.pf.domain_right_edge[0] -
+                    self.pf.domain_left_edge[0]) * self.pf['mpchcm']
+        setup_config()
+        rh = self
+        cdef LPG func = rh_read_particles
+        set_load_particles_generic(func)
+
+    def call_rockstar(self):
+        read_particles("generic")
+        rockstar(NULL, 0)
+        output_and_free_halos(0, 0, 0, NULL)
+
+    def start_server(self):
+        server()
+
+    def start_client(self):
+        client()


diff -r b0ac030e0d978e4997eabe6b87d0542ab26efdfd -r 7e10bc517866f000f28785099b535dc6775803c4 yt/analysis_modules/halo_finding/rockstar/setup.py
--- /dev/null
+++ b/yt/analysis_modules/halo_finding/rockstar/setup.py
@@ -0,0 +1,21 @@
+#!/usr/bin/env python
+import setuptools
+import os, sys, os.path
+
+import os.path
+
+def configuration(parent_package='',top_path=None):
+    from numpy.distutils.misc_util import Configuration
+    config = Configuration('rockstar',parent_package,top_path)
+    config.make_config_py() # installs __config__.py
+    #config.make_svn_version_py()
+    rd = os.environ["ROCKSTAR_DIR"]
+    config.add_extension("rockstar_interface",
+                         "yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx",
+                         library_dirs=[rd],
+                         libraries=["rockstar"],
+                         include_dirs=[rd,
+                                       os.path.join(rd, "io"),
+                                       os.path.join(rd, "util")])
+    return config
+


diff -r b0ac030e0d978e4997eabe6b87d0542ab26efdfd -r 7e10bc517866f000f28785099b535dc6775803c4 yt/analysis_modules/halo_finding/setup.py
--- a/yt/analysis_modules/halo_finding/setup.py
+++ b/yt/analysis_modules/halo_finding/setup.py
@@ -4,13 +4,14 @@
 import sys
 import os.path
 
-
 def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
     config = Configuration('halo_finding', parent_package, top_path)
     config.add_subpackage("fof")
     config.add_subpackage("hop")
     config.add_subpackage("parallel_hop")
-    config.make_config_py()  # installs __config__.py
+    if "ROCKSTAR_DIR" in os.environ:
+        config.add_subpackage("rockstar")
+    config.make_config_py() # installs __config__.py
     #config.make_svn_version_py()
     return config


diff -r b0ac030e0d978e4997eabe6b87d0542ab26efdfd -r 7e10bc517866f000f28785099b535dc6775803c4 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -1564,7 +1564,8 @@
     def __init__(self, axis, field, weight_field = None,
                  max_level = None, center = None, pf = None,
                  source=None, node_name = None, field_cuts = None,
-                 preload_style='level', serialize=True,**kwargs):
+                 preload_style='level', serialize=True,
+                 style = "integrate", **kwargs):
         """
         This is a data object corresponding to a line integral through the
         simulation domain.
@@ -1628,6 +1629,13 @@
         >>> print qproj["Density"]
         """
         AMR2DData.__init__(self, axis, field, pf, node_name = None, **kwargs)
+        self.proj_style = style
+        if style == "mip":
+            self.func = na.max
+        elif style == "integrate":
+            self.func = na.sum # for the future
+        else:
+            raise NotImplementedError(style)
         self.weight_field = weight_field
         self._field_cuts = field_cuts
         self.serialize = serialize
@@ -1635,7 +1643,6 @@
         if center is not None: self.set_field_parameter('center',center)
         self._node_name = node_name
         self._initialize_source(source)
-        self.func = na.sum # for the future
         self._grids = self.source._grids
         if max_level == None:
             max_level = self.hierarchy.max_level
@@ -1678,7 +1685,8 @@
     def _get_tree(self, nvals):
         xd = self.pf.domain_dimensions[x_dict[self.axis]]
         yd = self.pf.domain_dimensions[y_dict[self.axis]]
-        return QuadTree(na.array([xd,yd], dtype='int64'), nvals)
+        return QuadTree(na.array([xd,yd], dtype='int64'), nvals,
+                        style = self.proj_style)
 
     def _get_dls(self, grid, fields):
         # Place holder for a time when maybe we will not be doing just
@@ -1689,7 +1697,12 @@
             if field is None: continue
             dls.append(just_one(grid['d%s' % axis_names[self.axis]]))
             convs.append(self.pf.units[self.pf.field_info[field].projection_conversion])
-        return na.array(dls), na.array(convs)
+        dls = na.array(dls)
+        convs = na.array(convs)
+        if self.proj_style == "mip":
+            dls[:] = 1.0
+            convs[:] = 1.0
+        return dls, convs
 
     def get_data(self, fields = None):
         if fields is None: fields = ensure_list(self.fields)[:]
@@ -1723,7 +1736,13 @@
             mylog.debug("End of projecting level level %s, memory usage %0.3e", 
                         level, get_memory_usage()/1024.)
         # Note that this will briefly double RAM usage
-        tree = self.comm.merge_quadtree_buffers(tree)
+        if self.proj_style == "mip":
+            merge_style = -1
+        elif self.proj_style == "integrate":
+            merge_style = 1
+        else:
+            raise NotImplementedError
+        tree = self.comm.merge_quadtree_buffers(tree, merge_style=merge_style)
         coord_data, field_data, weight_data, dxs = [], [], [], []
         for level in range(0, self._max_level + 1):
             npos, nvals, nwvals = tree.get_all_from_level(level, False)
@@ -2613,7 +2632,7 @@
     def _extract_isocontours_from_grid(self, grid, field, value,
                                        sample_values = None):
         mask = self._get_cut_mask(grid) * grid.child_mask
-        vals = grid.get_vertex_centered_data(field)
+        vals = grid.get_vertex_centered_data(field, no_ghost = False)
         if sample_values is not None:
             svals = grid.get_vertex_centered_data(sample_values)
         else:


diff -r b0ac030e0d978e4997eabe6b87d0542ab26efdfd -r 7e10bc517866f000f28785099b535dc6775803c4 yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -289,9 +289,13 @@
         nn = "/%s %s" % (ptype,
                 {False: "runtime parameters", True: "scalars"}[scalar])
         if nn not in self._handle: raise KeyError(nn)
-        for tpname, pval in self._handle[nn][:]:
+        for tpname, pval in zip(self._handle[nn][:,'name'],
+                                self._handle[nn][:,'value']):
             if tpname.strip() == pname:
-                return pval
+                if ptype == "string" :
+                    return pval.strip()
+                else :
+                    return pval
         raise KeyError(pname)
 
     def _parse_parameter_file(self):


diff -r b0ac030e0d978e4997eabe6b87d0542ab26efdfd -r 7e10bc517866f000f28785099b535dc6775803c4 yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -24,6 +24,7 @@
 """
 
 import time, types, signal, inspect, traceback, sys, pdb, os
+import contextlib
 import warnings, struct, subprocess
 from math import floor, ceil
 
@@ -562,3 +563,15 @@
        isinstance(length[1], types.StringTypes):
        length = length[0]/pf[length[1]]
     return length
+
+ at contextlib.contextmanager
+def parallel_profile(prefix):
+    import cProfile
+    from yt.config import ytcfg
+    fn = "%s_%04i.cprof" % (prefix,
+                ytcfg.getint("yt", "__topcomm_parallel_rank"))
+    p = cProfile.Profile()
+    p.enable()
+    yield
+    p.disable()
+    p.dump_stats(fn)


diff -r b0ac030e0d978e4997eabe6b87d0542ab26efdfd -r 7e10bc517866f000f28785099b535dc6775803c4 yt/gui/reason/html/js/widget_plotwindow.js
--- a/yt/gui/reason/html/js/widget_plotwindow.js
+++ b/yt/gui/reason/html/js/widget_plotwindow.js
@@ -564,8 +564,7 @@
                                width: 70,
                                xtype: 'label',
                                text: 'Field',
-                             },
-                             {
+                             }, {
                                x: 80,
                                y: 20,
                                width : 160,
@@ -627,6 +626,47 @@
                                }
                              }
                           ]
+                        }, {
+                          xtype: 'panel',
+                          title: 'Velocity Vectors',
+                          id: 'vector_edit',
+                          style: {fontFamily: '"Inconsolata", monospace'},
+                          layout: 'absolute',
+                          flex: 1,
+                          items : [
+                             {
+                               x: 10,
+                               y: 60,
+                               width: 70,
+                               xtype: 'label',
+                               text: 'Skip Factor',
+                             }, {
+                               x: 80,
+                               y: 60,
+                               width : 160,
+                               xtype: 'slider',
+                               id: 'skip',
+                               minValue: 1,
+                               maxValue: 64,
+                               value: 32,
+                               increment: 1,
+                               plugins: new Ext.slider.Tip(),
+                             }, {
+                               x: 10,
+                               y: 180,
+                               width: 80,
+                               xtype: 'button',
+                               text: 'Apply',
+                               handler: function(b, e) {
+                                  skip = vector_window.get('skip').getValue();
+                                  yt_rpc.ExtDirectREPL.execute(
+                                      {code:python_varname
+                                       + '.set_vector_info('+skip+')',
+                                        hide:false},
+                                      cell_finished);
+                               }
+                             }
+                          ]
                         }
                         ] } /* tabpanel items and entry */
                         ]
@@ -650,6 +690,9 @@
     var contour_window = this.panel.get("rhs_panel_" + python_varname);
     contour_window = contour_window.get("editor_panel");
     contour_window = contour_window.get("contour_edit");
+    var vector_window = this.panel.get("rhs_panel_" + python_varname);
+    vector_window = vector_window.get("editor_panel");
+    vector_window = vector_window.get("vector_edit");
     var image_dom = this.image_panel.el.dom;
     var control_panel = this.panel;
     var metadata_string;


diff -r b0ac030e0d978e4997eabe6b87d0542ab26efdfd -r 7e10bc517866f000f28785099b535dc6775803c4 yt/utilities/_amr_utils/FixedInterpolator.c
--- a/yt/utilities/_amr_utils/FixedInterpolator.c
+++ b/yt/utilities/_amr_utils/FixedInterpolator.c
@@ -96,6 +96,11 @@
          {0,0,1}, {1,0,1}, {1,1,1}, {0,1,1}};
 
     npy_float64 mu = ((isovalue - v1) / (v2 - v1));
+
+    if (fabs(1.0 - isovalue/v1) < 0.000001) mu = 0.0;
+    if (fabs(1.0 - isovalue/v2) < 0.000001) mu = 1.0;
+    if (fabs(v1/v2) < 0.000001) mu = 0.0;
+
     vl[0] = x; vl[1] = y; vl[2] = z;
     for (i=0;i<3;i++)
         vl[i] += dds[i] * cverts[vind1][i]


diff -r b0ac030e0d978e4997eabe6b87d0542ab26efdfd -r 7e10bc517866f000f28785099b535dc6775803c4 yt/utilities/_amr_utils/QuadTree.pyx
--- a/yt/utilities/_amr_utils/QuadTree.pyx
+++ b/yt/utilities/_amr_utils/QuadTree.pyx
@@ -43,6 +43,10 @@
     np.int64_t pos[2]
     QuadTreeNode *children[2][2]
 
+ctypedef void QTN_combine(QuadTreeNode *self,
+        np.float64_t *val, np.float64_t weight_val,
+        int nvals)
+
 cdef void QTN_add_value(QuadTreeNode *self,
         np.float64_t *val, np.float64_t weight_val,
         int nvals):
@@ -51,6 +55,14 @@
         self.val[i] += val[i]
     self.weight_val += weight_val
 
+cdef void QTN_max_value(QuadTreeNode *self,
+        np.float64_t *val, np.float64_t weight_val,
+        int nvals):
+    cdef int i
+    for i in range(nvals):
+        self.val[i] = fmax(val[i], self.val[i])
+    self.weight_val = 1.0
+
 cdef void QTN_refine(QuadTreeNode *self, int nvals):
     cdef int i, j, i1, j1
     cdef np.int64_t npos[2]
@@ -101,9 +113,16 @@
     cdef np.int64_t top_grid_dims[2]
     cdef int merged
     cdef int num_cells
+    cdef QTN_combine *combine
 
     def __cinit__(self, np.ndarray[np.int64_t, ndim=1] top_grid_dims,
-                  int nvals):
+                  int nvals, style = "integrate"):
+        if style == "integrate":
+            self.combine = QTN_add_value
+        elif style == "mip":
+            self.combine = QTN_max_value
+        else:
+            raise NotImplementedError
         self.merged = 1
         cdef int i, j
         cdef QuadTreeNode *node
@@ -190,8 +209,12 @@
     @cython.wraparound(False)
     def frombuffer(self, np.ndarray[np.int32_t, ndim=1] refined,
                          np.ndarray[np.float64_t, ndim=2] values,
-                         np.ndarray[np.float64_t, ndim=1] wval):
-        self.merged = 1 # Just on the safe side
+                         np.ndarray[np.float64_t, ndim=1] wval,
+                         style):
+        if style == "mip" or style == -1:
+            self.merged = -1
+        elif style == "integrate" or style == 1:
+            self.merged = 1
         cdef int curpos = 0
         cdef QuadTreeNode *root
         self.num_cells = wval.shape[0]
@@ -241,7 +264,7 @@
             i = (pos[0] >= fac*(2*node.pos[0]+1))
             j = (pos[1] >= fac*(2*node.pos[1]+1))
             node = node.children[i][j]
-        QTN_add_value(node, val, weight_val, self.nvals)
+        self.combine(node, val, weight_val, self.nvals)
             
     @cython.cdivision(True)
     cdef QuadTreeNode *find_on_root_level(self, np.int64_t pos[2], int level):
@@ -335,12 +358,17 @@
                               np.float64_t *vtoadd,
                               np.float64_t wtoadd,
                               int cur_level):
-        cdef int i, j
+        cdef int i, j, n
         if cur_level == level:
             if node.children[0][0] != NULL: return 0
-            for i in range(self.nvals):
-                vdata[self.nvals * curpos + i] = node.val[i] + vtoadd[i]
-            wdata[curpos] = node.weight_val + wtoadd
+            if self.merged == -1:
+                for i in range(self.nvals):
+                    vdata[self.nvals * curpos + i] = fmax(node.val[i], vtoadd[i])
+                wdata[curpos] = 1.0
+            else:
+                for i in range(self.nvals):
+                    vdata[self.nvals * curpos + i] = node.val[i] + vtoadd[i]
+                wdata[curpos] = node.weight_val + wtoadd
             pdata[curpos * 2] = node.pos[0]
             pdata[curpos * 2 + 1] = node.pos[1]
             return 1
@@ -350,8 +378,14 @@
             for i in range(self.nvals):
                 vtoadd[i] += node.val[i]
             wtoadd += node.weight_val
+        elif self.merged == -1:
+            for i in range(self.nvals):
+                vtoadd[i] = node.val[i]
         for i in range(2):
             for j in range(2):
+                if self.merged == -1:
+                    for n in range(self.nvals):
+                        vtoadd[n] = node.val[n]
                 added += self.fill_from_level(node.children[i][j],
                         level, curpos + added, pdata, vdata, wdata,
                         vtoadd, wtoadd, cur_level + 1)
@@ -369,7 +403,8 @@
             free(self.root_nodes[i])
         free(self.root_nodes)
 
-cdef void QTN_merge_nodes(QuadTreeNode *n1, QuadTreeNode *n2, int nvals):
+cdef void QTN_merge_nodes(QuadTreeNode *n1, QuadTreeNode *n2, int nvals,
+                          QTN_combine *func):
     # We have four choices when merging nodes.
     # 1. If both nodes have no refinement, then we add values of n2 to n1.
     # 2. If both have refinement, we call QTN_merge_nodes on all four children.
@@ -378,13 +413,13 @@
     # 4. If n1 has refinement and n2 does not, we add the value of n2 to n1.
     cdef int i, j
 
-    QTN_add_value(n1, n2.val, n2.weight_val, nvals)
+    func(n1, n2.val, n2.weight_val, nvals)
     if n1.children[0][0] == n2.children[0][0] == NULL:
         pass
     elif n1.children[0][0] != NULL and n2.children[0][0] != NULL:
         for i in range(2):
             for j in range(2):
-                QTN_merge_nodes(n1.children[i][j], n2.children[i][j], nvals)
+                QTN_merge_nodes(n1.children[i][j], n2.children[i][j], nvals, func)
     elif n1.children[0][0] == NULL and n2.children[0][0] != NULL:
         for i in range(2):
             for j in range(2):
@@ -395,14 +430,24 @@
     else:
         raise RuntimeError
 
-def merge_quadtrees(QuadTree qt1, QuadTree qt2):
+def merge_quadtrees(QuadTree qt1, QuadTree qt2, style = 1):
     cdef int i, j
     qt1.num_cells = 0
+    cdef QTN_combine *func
+    if style == 1:
+        qt1.merged = 1
+        func = QTN_add_value
+    elif style == -1:
+        qt1.merged = -1
+        func = QTN_max_value
+    else:
+        raise NotImplementedError
+    if qt1.merged != 0 or qt2.merged != 0:
+        assert(qt1.merged == qt2.merged)
     for i in range(qt1.top_grid_dims[0]):
         for j in range(qt1.top_grid_dims[1]):
             QTN_merge_nodes(qt1.root_nodes[i][j],
                             qt2.root_nodes[i][j],
-                            qt1.nvals)
+                            qt1.nvals, func)
             qt1.num_cells += qt1.count_total_cells(
                                 qt1.root_nodes[i][j])
-    qt1.merged = 1


diff -r b0ac030e0d978e4997eabe6b87d0542ab26efdfd -r 7e10bc517866f000f28785099b535dc6775803c4 yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -643,7 +643,7 @@
         return buf
 
     @parallel_passthrough
-    def merge_quadtree_buffers(self, qt):
+    def merge_quadtree_buffers(self, qt, merge_style):
         # This is a modified version of pairwise reduction from Lisandro Dalcin,
         # in the reductions demo of mpi4py
         size = self.comm.size
@@ -668,8 +668,8 @@
                     #print "RECEIVING FROM %02i on %02i" % (target, rank)
                     buf = self.recv_quadtree(target, tgd, args)
                     qto = QuadTree(tgd, args[2])
-                    qto.frombuffer(*buf)
-                    merge_quadtrees(qt, qto)
+                    qto.frombuffer(buf[0], buf[1], buf[2], merge_style)
+                    merge_quadtrees(qt, qto, style = merge_style)
                     del qto
                     #self.send_quadtree(target, qt, tgd, args)
             mask <<= 1
@@ -688,7 +688,7 @@
         self.refined = buf[0]
         if rank != 0:
             qt = QuadTree(tgd, args[2])
-            qt.frombuffer(*buf)
+            qt.frombuffer(buf[0], buf[1], buf[2], merge_style)
         return qt
 
 


diff -r b0ac030e0d978e4997eabe6b87d0542ab26efdfd -r 7e10bc517866f000f28785099b535dc6775803c4 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -90,6 +90,7 @@
     _plot_valid = False
     _colorbar_valid = False
     _contour_info = None
+    _vector_info = None
     def __init__(self, data_source, bounds, buff_size=(800,800), antialias = True, periodic = True):
         r"""
         PlotWindow(data_source, bounds, buff_size=(800,800), antialias = True)
@@ -273,6 +274,10 @@
             return
         self._contour_info = (field_name, n_cont, colors, logit)
 
+    @invalidate_plot
+    def set_vector_info(self, skip, scale = 1):
+        self._vector_info = (skip, scale)
+
 class PWViewer(PlotWindow):
     """A viewer for PlotWindows.
 
@@ -405,19 +410,39 @@
             ph.add_payload(payload)
 
     def _apply_modifications(self, img):
-        if self._contour_info is None:
+        if self._contour_info is None and self._vector_info is None:
             return write_png_to_string(img)
         from matplotlib.figure import Figure
         from yt.visualization._mpl_imports import \
             FigureCanvasAgg, FigureCanvasPdf, FigureCanvasPS
         from yt.utilities.delaunay.triangulate import Triangulation as triang
+
+        vi, vj, vn = img.shape
+
+        # Now we need to get our field values
+        fig = Figure((vi/100.0, vj/100.0), dpi = 100)
+        fig.figimage(img)
+        # Add our contour
+        ax = fig.add_axes([0.0, 0.0, 1.0, 1.0], frameon=False)
+        ax.patch.set_alpha(0.0)
+
+        # Now apply our modifications
+        self._apply_contours(ax, vi, vj)
+        self._apply_vectors(ax, vi, vj)
+
+        canvas = FigureCanvasAgg(fig)
+        f = cStringIO.StringIO()
+        canvas.print_figure(f)
+        f.seek(0)
+        img = f.read()
+        return img
+
+    def _apply_contours(self, ax, vi, vj):
+        if self._contour_info is None: return 
         plot_args = {}
         field, number, colors, logit = self._contour_info
         if colors is not None: plot_args['colors'] = colors
 
-        vi, vj, vn = img.shape
-
-        # Now we need to get our field values
         raw_data = self._frb.data_source
         b = self._frb.bounds
         xi, yi = na.mgrid[b[0]:b[1]:(vi / 8) * 1j,
@@ -428,20 +453,30 @@
         if logit: z = na.log10(z)
         fvals = triang(x,y).nn_interpolator(z)(xi,yi).transpose()[::-1,:]
 
-        fig = Figure((vi/100.0, vj/100.0), dpi = 100)
-        fig.figimage(img)
-        # Add our contour
-        ax = fig.add_axes([0.0, 0.0, 1.0, 1.0], frameon=False)
-        ax.patch.set_alpha(0.0)
+        ax.contour(fvals, number, colors='w')
+        
+    def _apply_vectors(self, ax, vi, vj):
+        if self._vector_info is None: return 
+        skip, scale = self._vector_info
 
-        # Now contour it
-        ax.contour(fvals, number, colors='w')
-        canvas = FigureCanvasAgg(fig)
-        f = cStringIO.StringIO()
-        canvas.print_figure(f)
-        f.seek(0)
-        img = f.read()
-        return img
+        nx = self._frb.buff_size[0]/skip
+        ny = self._frb.buff_size[1]/skip
+        new_frb = FixedResolutionBuffer(self._frb.data_source,
+                        self._frb.bounds, (nx,ny))
+
+        axis = self._frb.data_source.axis
+        fx = "%s-velocity" % (axis_names[x_dict[axis]])
+        fy = "%s-velocity" % (axis_names[y_dict[axis]])
+        px = new_frb[fx][::-1,:]
+        py = new_frb[fy][::-1,:]
+        x = na.mgrid[0:vi-1:ny*1j]
+        y = na.mgrid[0:vj-1:nx*1j]
+        # Always normalize, then we scale
+        nn = ((px**2.0 + py**2.0)**0.5).max()
+        px /= nn
+        py /= nn
+        print scale, px.min(), px.max(), py.min(), py.max()
+        ax.quiver(x, y, px, py, scale=float(vi)/skip)
         
     def get_ticks(self, field, height = 400):
         # This will eventually change to work with non-logged fields



https://bitbucket.org/yt_analysis/yt/changeset/3330bd246214/
changeset:   3330bd246214
branch:      yt
user:        ngoldbaum
date:        2012-03-16 20:11:07
summary:     Going back to the original install script
affected #:  1 file

diff -r 7e10bc517866f000f28785099b535dc6775803c4 -r 3330bd246214e2b562b2b96170df7bf505946caa doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -15,7 +15,7 @@
 # And, feel free to drop me a line: matthewturk at gmail.com
 #
 
-DEST_SUFFIX="yt-`uname -m`"
+DEST_SUFFIX="yt-`uname -p`"
 DEST_DIR="`pwd`/${DEST_SUFFIX/ /}"   # Installation location
 BRANCH="stable" # This is the branch to which we will forcibly update.
 
@@ -40,6 +40,7 @@
 INST_FTYPE=1    # Install FreeType2 locally?
 INST_ENZO=0     # Clone a copy of Enzo?
 INST_SQLITE3=1  # Install a local version of SQLite3?
+INST_FORTHON=1  # Install Forthon?
 
 # If you've got YT some other place, set this to point to it.
 YT_DIR=""
@@ -156,20 +157,8 @@
     if [ "${MYOS##Darwin}" != "${MYOS}" ]
     then
         echo "Looks like you're running on Mac OSX."
-	echo
-	echo "NOTE: you must have the Xcode command line tools installed."
         echo
-	echo "OS X 10.5: download Xcode 3.0 from the mac developer tools"
-	echo "website"
-        echo
-	echo "OS X 10.6: download Xcode 3.2 from the mac developer tools" 
-	echo "website"
-        echo
-	echo "OS X 10.7: download Xcode 4.0 from the mac app store or" 
-	echo "alternatively download the Xcode command line tools from" 
-	echo "the mac developer tools website"
-        echo
-	echo "NOTE: You may have problems if you are running OSX 10.6 (Snow"
+        echo "NOTE: You may have problems if you are running OSX 10.6 (Snow"
         echo "Leopard) or newer.  If you do, please set the following"
         echo "environment variables, remove any broken installation tree, and"
         echo "re-run this script verbatim."
@@ -213,6 +202,10 @@
 get_willwont ${INST_SQLITE3}
 echo "be installing SQLite3"
 
+printf "%-15s = %s so I " "INST_FORTHON" "${INST_FORTHON}"
+get_willwont ${INST_FORTHON}
+echo "be installing Forthon (for Halo Finding, etc)"
+
 printf "%-15s = %s so I " "INST_HG" "${INST_HG}"
 get_willwont ${INST_HG}
 echo "be installing Mercurial"
@@ -324,6 +317,7 @@
 get_enzotools ipython-0.10.tar.gz
 get_enzotools h5py-2.0.1.tar.gz
 get_enzotools Cython-0.15.1.tar.gz
+get_enzotools Forthon-0.8.5.tar.gz
 get_enzotools ext-3.3.2.zip
 get_enzotools ext-slate-110328.zip
 get_enzotools PhiloGL-1.4.2.zip
@@ -364,7 +358,6 @@
         cd zlib-1.2.3
         ( ./configure --shared --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
-        ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
         touch done
         cd ..
     fi
@@ -382,7 +375,6 @@
         cd libpng-1.2.43
         ( ./configure CPPFLAGS=-I${DEST_DIR}/include CFLAGS=-I${DEST_DIR}/include --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
-        ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
         touch done
         cd ..
     fi
@@ -400,7 +392,6 @@
         cd freetype-2.4.4
         ( ./configure CFLAGS=-I${DEST_DIR}/include --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
-        ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
         touch done
         cd ..
     fi
@@ -418,7 +409,6 @@
         cd hdf5-1.8.7
         ( ./configure --prefix=${DEST_DIR}/ --enable-shared 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make ${MAKE_PROCS} install 2>&1 ) 1>> ${LOG_FILE} || do_exit
-        ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
         touch done
         cd ..
     fi
@@ -437,7 +427,6 @@
         cd sqlite-autoconf-3070500
         ( ./configure --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make ${MAKE_PROCS} install 2>&1 ) 1>> ${LOG_FILE} || do_exit
-        ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
         touch done
         cd ..
     fi
@@ -453,7 +442,6 @@
     ( make ${MAKE_PROCS} 2>&1 ) 1>> ${LOG_FILE} || do_exit
     ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
     ( ln -sf ${DEST_DIR}/bin/python2.7 ${DEST_DIR}/bin/pyyt 2>&1 ) 1>> ${LOG_FILE}
-    ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
     touch done
     cd ..
 fi
@@ -545,6 +533,7 @@
 do_setup_py ipython-0.10
 do_setup_py h5py-2.0.1
 do_setup_py Cython-0.15.1
+[ $INST_FORTHON -eq 1 ] && do_setup_py Forthon-0.8.5
 
 echo "Doing yt update, wiping local changes and updating to branch ${BRANCH}"
 MY_PWD=`pwd`
@@ -555,6 +544,7 @@
 echo $HDF5_DIR > hdf5.cfg
 [ $INST_PNG -eq 1 ] && echo $PNG_DIR > png.cfg
 [ $INST_FTYPE -eq 1 ] && echo $FTYPE_DIR > freetype.cfg
+[ $INST_FORTHON -eq 1 ] && ( ( cd yt/utilities/kdtree && FORTHON_EXE=${DEST_DIR}/bin/Forthon make 2>&1 ) 1>> ${LOG_FILE} )
 ( ${DEST_DIR}/bin/python2.7 setup.py develop 2>&1 ) 1>> ${LOG_FILE} || do_exit
 touch done
 cd $MY_PWD



https://bitbucket.org/yt_analysis/yt/changeset/860a09f7021a/
changeset:   860a09f7021a
branch:      yt
user:        ngoldbaum
date:        2012-03-16 20:14:08
summary:     Adding xcode boilerplate
affected #:  1 file

diff -r 3330bd246214e2b562b2b96170df7bf505946caa -r 860a09f7021a854922cf9a546d30dc5aba708f90 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -15,9 +15,9 @@
 # And, feel free to drop me a line: matthewturk at gmail.com
 #
 
-DEST_SUFFIX="yt-`uname -p`"
+DEST_SUFFIX="yt-`uname -m`"
 DEST_DIR="`pwd`/${DEST_SUFFIX/ /}"   # Installation location
-BRANCH="stable" # This is the branch to which we will forcibly update.
+BRANCH="yt" # This is the branch to which we will forcibly update.
 
 # Here's where you put the HDF5 path if you like; otherwise it'll download it
 # and install it on its own
@@ -40,7 +40,8 @@
 INST_FTYPE=1    # Install FreeType2 locally?
 INST_ENZO=0     # Clone a copy of Enzo?
 INST_SQLITE3=1  # Install a local version of SQLite3?
-INST_FORTHON=1  # Install Forthon?
+INST_PYX=0      # Install PyX?  Sometimes PyX can be problematic without a
+                # working TeX installation.
 
 # If you've got YT some other place, set this to point to it.
 YT_DIR=""
@@ -131,9 +132,9 @@
         echo "NOTE: YOU MUST BE IN THE GNU PROGRAMMING ENVIRONMENT"
         echo "These commands should take care of that for you:"
         echo
-        echo "   $ module unload mvapich-devel"
+        echo "   $ module unload mvapich2"
         echo "   $ module swap pgi gcc"
-        echo "   $ module load mvapich-devel"
+        echo "   $ module load mvapich2"
         echo
     fi
     if [ "${MYHOST##honest}" != "${MYHOST}" ]
@@ -158,6 +159,18 @@
     then
         echo "Looks like you're running on Mac OSX."
         echo
+        echo "NOTE: you must have the Xcode command line tools installed."
+        echo
+        echo "OS X 10.5: download Xcode 3.0 from the mac developer tools"
+        echo "website"
+        echo
+        echo "OS X 10.6: download Xcode 3.2 from the mac developer tools"
+        echo "website"
+        echo
+        echo "OS X 10.7: download Xcode 4.0 from the mac app store or"
+        echo "alternatively download the Xcode command line tools from"
+        echo "the mac developer tools website"
+        echo
         echo "NOTE: You may have problems if you are running OSX 10.6 (Snow"
         echo "Leopard) or newer.  If you do, please set the following"
         echo "environment variables, remove any broken installation tree, and"
@@ -202,10 +215,6 @@
 get_willwont ${INST_SQLITE3}
 echo "be installing SQLite3"
 
-printf "%-15s = %s so I " "INST_FORTHON" "${INST_FORTHON}"
-get_willwont ${INST_FORTHON}
-echo "be installing Forthon (for Halo Finding, etc)"
-
 printf "%-15s = %s so I " "INST_HG" "${INST_HG}"
 get_willwont ${INST_HG}
 echo "be installing Mercurial"
@@ -214,6 +223,10 @@
 get_willwont ${INST_ENZO}
 echo "be checking out Enzo"
 
+printf "%-15s = %s so I " "INST_PYX" "${INST_PYX}"
+get_willwont ${INST_PYX}
+echo "be installing PyX"
+
 echo
 
 if [ -z "$HDF5_DIR" ]
@@ -277,14 +290,30 @@
     export GETFILE="curl -sSO"
 fi
 
+if type -P sha512sum &> /dev/null
+then
+    echo "Using sha512sum"
+    export SHASUM="sha512sum"
+elif type -P shasum &> /dev/null
+then
+    echo "Using shasum -a 512"
+    export SHASUM="shasum -a 512"
+else
+    echo
+    echo "I am unable to locate any shasum-like utility."
+    echo "ALL FILE INTEGRITY IS NOT VERIFIABLE."
+    echo "THIS IS PROBABLY A BIG DEAL."
+    echo
+    echo "(I'll hang out for a minute for you to consider this.)"
+    sleep 60
+fi
+
 function get_enzotools
 {
     echo "Downloading $1 from yt-project.org"
     [ -e $1 ] && return
     ${GETFILE} "http://yt-project.org/dependencies/$1" || do_exit
-    ${GETFILE} "http://yt-project.org/dependencies/$1.md5" || do_exit
-    ( which md5sum &> /dev/null ) || return # return if we don't have md5sum
-    ( md5sum -c $1.md5 2>&1 ) 1>> ${LOG_FILE} || do_exit
+    ( ${SHASUM} -c $1.sha512 2>&1 ) 1>> ${LOG_FILE} || do_exit
 }
 
 ORIG_PWD=`pwd`
@@ -298,6 +327,26 @@
 mkdir -p ${DEST_DIR}/src
 cd ${DEST_DIR}/src
 
+# Now we dump all our SHA512 files out.
+
+echo '8da1b0af98203254a1cf776d73d09433f15b5090871f9fd6d712cea32bcd44446b7323ae1069b28907d2728e77944a642825c61bc3b54ceb46c91897cc4f6051  Cython-0.15.1.tar.gz' > Cython-0.15.1.tar.gz.sha512
+echo 'b8a12bf05b3aafa71135e47da81440fd0f16a4bd91954bc5615ad3d3b7f9df7d5a7d5620dc61088dc6b04952c5c66ebda947a4cfa33ed1be614c8ca8c0f11dff  PhiloGL-1.4.2.zip' > PhiloGL-1.4.2.zip.sha512
+echo '44eea803870a66ff0bab08d13a8b3388b5578ebc1c807d1d9dca0a93e6371e91b15d02917a00b3b20dc67abb5a21dabaf9b6e9257a561f85eeff2147ac73b478  PyX-0.11.1.tar.gz' > PyX-0.11.1.tar.gz.sha512
+echo '1a754d560bfa433f0960ab3b5a62edb5f291be98ec48cf4e5941fa5b84139e200b87a52efbbd6fa4a76d6feeff12439eed3e7a84db4421940d1bbb576f7a684e  Python-2.7.2.tgz' > Python-2.7.2.tgz.sha512
+echo 'c017d3d59dd324ac91af0edc178c76b60a5f90fbb775cf843e39062f95bd846238f2c53705f8890ed3f34bc0e6e75671a73d13875eb0287d6201cb45f0a2d338  bzip2-1.0.5.tar.gz' > bzip2-1.0.5.tar.gz.sha512
+echo 'de73b14727c2a6623c19896d4c034ad0f705bf5ccbb8501c786a9d074cce97a7760db9246ae7da3db47dd2de29a1707a8a0ee17ab41a6d9140f2a7dbf455af0f  ext-3.3.2.zip' > ext-3.3.2.zip.sha512
+echo '6d65dcbb77978d4f4a9711062f11ae9d61133ca086f9207a8c1ecea8807dc9612cc8c3b2428157d2fb00dea8e0958f61e35cce4e07987c80bc808bbda3608a6c  ext-slate-110328.zip' > ext-slate-110328.zip.sha512
+echo 'b519218f93946400326e9b656669269ecb3e5232b944e18fbc3eadc4fe2b56244d68aae56d6f69042b4c87c58c881ee2aaa279561ea0f0f48d5842155f4de9de  freetype-2.4.4.tar.gz' > freetype-2.4.4.tar.gz.sha512
+echo '1531789e0a77d4829796d18552a4de7aecae7e8b63763a7951a8091921995800740fe03e72a7dbd496a5590828131c5f046ddead695e5cba79343b8c205148d1  h5py-2.0.1.tar.gz' > h5py-2.0.1.tar.gz.sha512
+echo '9644896e4a84665ad22f87eb885cbd4a0c60a5c30085d5dd5dba5f3c148dbee626f0cb01e59a7995a84245448a3f1e9ba98687d3f10250e2ee763074ed8ddc0e  hdf5-1.8.7.tar.gz' > hdf5-1.8.7.tar.gz.sha512
+echo '2c883d64886e5d595775dde497f101ff2ecec0786eabcdc69861c20e7d081e67b5e97551194236933b78f1ff7b119fcba0a9ce3aa4851440fc58f84d2094177b  ipython-0.10.tar.gz' > ipython-0.10.tar.gz.sha512
+echo 'e748b66a379ee1e7963b045c3737670acf6aeeff1ebed679f427e74b642faa77404c2d5bbddb922339f009c229d0af1ae77cc43eab290e50af6157a6406d833f  libpng-1.2.43.tar.gz' > libpng-1.2.43.tar.gz.sha512
+echo 'f5ab95c29ef6958096970265a6079f0eb8c43a500924346c4a6c6eb89d9110eeeb6c34a53715e71240e82ded2b76a7b8d5a9b05a07baa000b2926718264ad8ff  matplotlib-1.1.0.tar.gz' > matplotlib-1.1.0.tar.gz.sha512
+echo '78715bb2bd7ed3291089948530a59d5eff146a64179eae87904a2c328716f26749abb0c5417d6001cadfeebabb4e24985d5a59ceaae4d98c4762163970f83975  mercurial-2.0.tar.gz' > mercurial-2.0.tar.gz.sha512
+echo 'de3dd37f753614055dcfed910e9886e03688b8078492df3da94b1ec37be796030be93291cba09e8212fffd3e0a63b086902c3c25a996cf1439e15c5b16e014d9  numpy-1.6.1.tar.gz' > numpy-1.6.1.tar.gz.sha512
+echo '5ad681f99e75849a5ca6f439c7a19bb51abc73d121b50f4f8e4c0da42891950f30407f761a53f0fe51b370b1dbd4c4f5a480557cb2444c8c7c7d5412b328a474  sqlite-autoconf-3070500.tar.gz' > sqlite-autoconf-3070500.tar.gz.sha512
+echo 'edae735960279d92acf58e1f4095c6392a7c2059b8f1d2c46648fc608a0fb06b392db2d073f4973f5762c034ea66596e769b95b3d26ad963a086b9b2d09825f2  zlib-1.2.3.tar.bz2' > zlib-1.2.3.tar.bz2.sha512
+
 # Individual processes
 if [ -z "$HDF5_DIR" ]
 then
@@ -310,6 +359,7 @@
 [ $INST_PNG -eq 1 ] && get_enzotools libpng-1.2.43.tar.gz
 [ $INST_FTYPE -eq 1 ] && get_enzotools freetype-2.4.4.tar.gz
 [ $INST_SQLITE3 -eq 1 ] && get_enzotools sqlite-autoconf-3070500.tar.gz
+[ $INST_PYX -eq 1 ] && get_enzotools PyX-0.11.1.tar.gz
 get_enzotools Python-2.7.2.tgz
 get_enzotools numpy-1.6.1.tar.gz
 get_enzotools matplotlib-1.1.0.tar.gz
@@ -317,7 +367,6 @@
 get_enzotools ipython-0.10.tar.gz
 get_enzotools h5py-2.0.1.tar.gz
 get_enzotools Cython-0.15.1.tar.gz
-get_enzotools Forthon-0.8.5.tar.gz
 get_enzotools ext-3.3.2.zip
 get_enzotools ext-slate-110328.zip
 get_enzotools PhiloGL-1.4.2.zip
@@ -358,6 +407,7 @@
         cd zlib-1.2.3
         ( ./configure --shared --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
+        ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
         touch done
         cd ..
     fi
@@ -375,6 +425,7 @@
         cd libpng-1.2.43
         ( ./configure CPPFLAGS=-I${DEST_DIR}/include CFLAGS=-I${DEST_DIR}/include --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
+        ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
         touch done
         cd ..
     fi
@@ -392,6 +443,7 @@
         cd freetype-2.4.4
         ( ./configure CFLAGS=-I${DEST_DIR}/include --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
+        ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
         touch done
         cd ..
     fi
@@ -409,6 +461,7 @@
         cd hdf5-1.8.7
         ( ./configure --prefix=${DEST_DIR}/ --enable-shared 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make ${MAKE_PROCS} install 2>&1 ) 1>> ${LOG_FILE} || do_exit
+        ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
         touch done
         cd ..
     fi
@@ -427,6 +480,7 @@
         cd sqlite-autoconf-3070500
         ( ./configure --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make ${MAKE_PROCS} install 2>&1 ) 1>> ${LOG_FILE} || do_exit
+        ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
         touch done
         cd ..
     fi
@@ -442,6 +496,7 @@
     ( make ${MAKE_PROCS} 2>&1 ) 1>> ${LOG_FILE} || do_exit
     ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
     ( ln -sf ${DEST_DIR}/bin/python2.7 ${DEST_DIR}/bin/pyyt 2>&1 ) 1>> ${LOG_FILE}
+    ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
     touch done
     cd ..
 fi
@@ -533,7 +588,7 @@
 do_setup_py ipython-0.10
 do_setup_py h5py-2.0.1
 do_setup_py Cython-0.15.1
-[ $INST_FORTHON -eq 1 ] && do_setup_py Forthon-0.8.5
+[ $INST_PYX -eq 1 ] && do_setup_py PyX-0.11.1
 
 echo "Doing yt update, wiping local changes and updating to branch ${BRANCH}"
 MY_PWD=`pwd`
@@ -544,11 +599,16 @@
 echo $HDF5_DIR > hdf5.cfg
 [ $INST_PNG -eq 1 ] && echo $PNG_DIR > png.cfg
 [ $INST_FTYPE -eq 1 ] && echo $FTYPE_DIR > freetype.cfg
-[ $INST_FORTHON -eq 1 ] && ( ( cd yt/utilities/kdtree && FORTHON_EXE=${DEST_DIR}/bin/Forthon make 2>&1 ) 1>> ${LOG_FILE} )
 ( ${DEST_DIR}/bin/python2.7 setup.py develop 2>&1 ) 1>> ${LOG_FILE} || do_exit
 touch done
 cd $MY_PWD
 
+if !(${DEST_DIR}/bin/python2.7 -c "import readline" >> ${LOG_FILE})
+then
+    echo "Installing pure-python readline"
+    ${DEST_DIR}/bin/pip install readline 1>> ${LOG_FILE}
+fi
+
 if [ $INST_ENZO -eq 1 ]
 then
     echo "Cloning a copy of Enzo."

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list