[yt-svn] commit/yt: 12 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Fri Mar 14 05:25:31 PDT 2014


12 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/8a04b384f9fa/
Changeset:   8a04b384f9fa
Branch:      yt-3.0
User:        jzuhone
Date:        2013-12-05 03:31:03
Summary:     Set your own WCS info in FITSImageBuffer
Affected #:  1 file

diff -r c062561ed189cf5493a7e38c79f4652189e32509 -r 8a04b384f9fa23eb5ef5aacd3bc1ecd68ec852ec yt/utilities/fits_image.py
--- a/yt/utilities/fits_image.py
+++ b/yt/utilities/fits_image.py
@@ -24,7 +24,7 @@
 class FITSImageBuffer(HDUList):
 
     def __init__(self, data, fields=None, units="cm",
-                 center=None, scale=None):
+                 center=None, scale=None, wcs=None):
         r""" Initialize a FITSImageBuffer object.
 
         FITSImageBuffer contains a list of FITS ImageHDU instances, and optionally includes
@@ -51,6 +51,8 @@
             Pixel scale in unit *units*. Will be ignored if *data* is
             a FixedResolutionBuffer or a YTCoveringGrid. Must be
             specified otherwise, or if *units* is "deg".
+        wcs : `astropy.wcs.WCS` instance, optional
+            Supply an AstroPy WCS instance to override automatic WCS creation.
 
         Examples
         --------
@@ -123,50 +125,48 @@
                 mylog.error("Please specify scale=(dx,dy[,dz]) in %s." % (units))
                 raise ValueError
 
-        w = pywcs.WCS(header=self[0].header, naxis=self.dimensionality)
-        w.wcs.crpix = 0.5*(np.array(self.shape)+1)
-
-        proj_type = ["linear"]*self.dimensionality
-
-        if isinstance(img_data, FixedResolutionBuffer) and units != "deg":
-            # FRBs are a special case where we have coordinate
-            # information, so we take advantage of this and
-            # construct the WCS object
-            dx = (img_data.bounds[1]-img_data.bounds[0])/self.nx
-            dy = (img_data.bounds[3]-img_data.bounds[2])/self.ny
-            dx *= img_data.pf.units[units]
-            dy *= img_data.pf.units[units]
-            xctr = 0.5*(img_data.bounds[1]+img_data.bounds[0])
-            yctr = 0.5*(img_data.bounds[3]+img_data.bounds[2])
-            xctr *= img_data.pf.units[units]
-            yctr *= img_data.pf.units[units]
-            center = [xctr, yctr]
-        elif isinstance(img_data, YTCoveringGridBase):
-            dx, dy, dz = img_data.dds
-            dx *= img_data.pf.units[units]
-            dy *= img_data.pf.units[units]
-            dz *= img_data.pf.units[units]
-            center = 0.5*(img_data.left_edge+img_data.right_edge)
-            center *= img_data.pf.units[units]
-        elif units == "deg" and self.dimensionality == 2:
-            dx = -scale[0]
-            dy = scale[1]
-            proj_type = ["RA---TAN","DEC--TAN"]
+        if wcs is None:
+            w = pywcs.WCS(header=self[0].header, naxis=self.dimensionality)
+            w.wcs.crpix = 0.5*(np.array(self.shape)+1)
+            proj_type = ["linear"]*self.dimensionality
+            if isinstance(img_data, FixedResolutionBuffer) and units != "deg":
+                # FRBs are a special case where we have coordinate
+                # information, so we take advantage of this and
+                # construct the WCS object
+                dx = (img_data.bounds[1]-img_data.bounds[0])/self.nx
+                dy = (img_data.bounds[3]-img_data.bounds[2])/self.ny
+                dx *= img_data.pf.units[units]
+                dy *= img_data.pf.units[units]
+                xctr = 0.5*(img_data.bounds[1]+img_data.bounds[0])
+                yctr = 0.5*(img_data.bounds[3]+img_data.bounds[2])
+                xctr *= img_data.pf.units[units]
+                yctr *= img_data.pf.units[units]
+                center = [xctr, yctr]
+            elif isinstance(img_data, YTCoveringGridBase):
+                dx, dy, dz = img_data.dds
+                dx *= img_data.pf.units[units]
+                dy *= img_data.pf.units[units]
+                dz *= img_data.pf.units[units]
+                center = 0.5*(img_data.left_edge+img_data.right_edge)
+                center *= img_data.pf.units[units]
+            elif units == "deg" and self.dimensionality == 2:
+                dx = -scale[0]
+                dy = scale[1]
+                proj_type = ["RA---TAN","DEC--TAN"]
+            else:
+                dx = scale[0]
+                dy = scale[1]
+                if self.dimensionality == 3: dz = scale[2]
+            w.wcs.crval = center
+            w.wcs.cunit = [units]*self.dimensionality
+            w.wcs.ctype = proj_type
+            if self.dimensionality == 2:
+                w.wcs.cdelt = [dx,dy]
+            elif self.dimensionality == 3:
+                w.wcs.cdelt = [dx,dy,dz]
+            self._set_wcs(w)
         else:
-            dx = scale[0]
-            dy = scale[1]
-            if self.dimensionality == 3: dz = scale[2]
-            
-        w.wcs.crval = center
-        w.wcs.cunit = [units]*self.dimensionality
-        w.wcs.ctype = proj_type
-        
-        if self.dimensionality == 2:
-            w.wcs.cdelt = [dx,dy]
-        elif self.dimensionality == 3:
-            w.wcs.cdelt = [dx,dy,dz]
-
-        self._set_wcs(w)
+            self._set_wcs(wcs)
             
     def _set_wcs(self, wcs):
         """


https://bitbucket.org/yt_analysis/yt/commits/0e8f5752efd2/
Changeset:   0e8f5752efd2
Branch:      yt-3.0
User:        jzuhone
Date:        2013-12-07 20:59:03
Summary:     Adding a keyword argument to pass a Glue data_collection in here.
Affected #:  2 files

diff -r 8a04b384f9fa23eb5ef5aacd3bc1ecd68ec852ec -r 0e8f5752efd27aaea78354de1c0b3ea88dc36bb7 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -353,11 +353,13 @@
         else:
             self.hierarchy.save_object(self, name)
 
-    def to_glue(self, fields, label="yt"):
+    def to_glue(self, fields, label="yt", data_collection=None):
         """
         Takes specific *fields* in the container and exports them to
         Glue (http://www.glueviz.org) for interactive
-        analysis. Optionally add a *label*.  
+        analysis. Optionally add a *label*. If you are already within
+        the Glue environment, you can pass a *data_collection* object,
+        otherwise Glue will be started.
         """
         from glue.core import DataCollection, Data
         from glue.core.coordinates import coordinates_from_header
@@ -366,11 +368,14 @@
         gdata = Data(label=label)
         for component_name in fields:
             gdata.add_component(self[component_name], component_name)
-        dc = DataCollection([gdata])
 
-        app = GlueApplication(dc)
-        app.start()
-
+        if data_collection is None:
+            dc = DataCollection([gdata])
+            app = GlueApplication(dc)
+            app.start()
+        else:
+            data_collection.append(gdata)
+        
     def __reduce__(self):
         args = tuple([self.pf._hash(), self._type_name] +
                      [getattr(self, n) for n in self._con_args] +

diff -r 8a04b384f9fa23eb5ef5aacd3bc1ecd68ec852ec -r 0e8f5752efd27aaea78354de1c0b3ea88dc36bb7 yt/utilities/fits_image.py
--- a/yt/utilities/fits_image.py
+++ b/yt/utilities/fits_image.py
@@ -220,11 +220,13 @@
         elif self.dimensionality == 3:
             return self.nx, self.ny, self.nz
 
-    def to_glue(self, label="yt"):
+    def to_glue(self, label="yt", data_collection=None):
         """
         Takes the data in the FITSImageBuffer and exports it to
         Glue (http://www.glueviz.org) for interactive
-        analysis. Optionally add a *label*. 
+        analysis. Optionally add a *label*. If you are already within
+        the Glue environment, you can pass a *data_collection* object,
+        otherwise Glue will be started.
         """
         from glue.core import DataCollection, Data
         from glue.core.coordinates import coordinates_from_header
@@ -236,10 +238,12 @@
         image.coords = coordinates_from_header(self.wcs.to_header())
         for k,v in field_dict.items():
             image.add_component(v, k)
-        dc = DataCollection([image])
-
-        app = GlueApplication(dc)
-        app.start()
+        if data_collection is None:
+            dc = DataCollection([image])
+            app = GlueApplication(dc)
+            app.start()
+        else:
+            data_collection.append(image)
 
         
 


https://bitbucket.org/yt_analysis/yt/commits/d19161f2cb6b/
Changeset:   d19161f2cb6b
Branch:      yt-3.0
User:        jzuhone
Date:        2013-12-09 23:45:07
Summary:     Adding purple X-ray colormap
Affected #:  1 file

diff -r 0e8f5752efd27aaea78354de1c0b3ea88dc36bb7 -r d19161f2cb6b9d8f561052b58ee4c443407a657e yt/visualization/color_maps.py
--- a/yt/visualization/color_maps.py
+++ b/yt/visualization/color_maps.py
@@ -98,6 +98,32 @@
 
 add_cmap('black_green', cdict)
 
+# This one is a variant of a colormap commonly
+# used for X-ray observations by Maxim Markevitch
+
+cdict = {'red': ((0.0, 0.0, 0.0),
+                 (0.3, 0.0, 0.0),
+                 (0.352, 0.245, 0.245),
+                 (0.42, 0.5, 0.5),
+                 (0.51, 0.706, 0.706),
+                 (0.613, 0.882, 0.882),
+                 (0.742, 1.0, 1.0),
+                 (1.0, 1.0, 1.0)),
+         'green': ((0.0, 0.0, 0.0),
+                   (0.585, 0.0, 0.0),
+                   (0.613, 0.196, 0.196),
+                   (0.693, 0.48, 0.48),
+                   (0.785, 0.696, 0.696),
+                   (0.885, 0.882, 0.882),
+                   (1.0, 1.0, 1.0)),
+         'blue': ((0.0, 0.0, 0.0),
+                  (0.136, 0.0, 0.0),
+                  (0.136, 0.373, 0.373),
+                  (0.391, 1.0, 1.0),
+                  (1.0, 1.0, 1.0))}
+
+add_cmap("purple_mm", cdict)
+
 # This one comes from
 # http://permalink.gmane.org/gmane.comp.python.matplotlib.devel/10518
 # and is an implementation of http://arxiv.org/abs/1108.5083


https://bitbucket.org/yt_analysis/yt/commits/7653d437a58a/
Changeset:   7653d437a58a
Branch:      yt-3.0
User:        jzuhone
Date:        2013-12-11 18:15:57
Summary:     Added to_aplpy method for plotting, added extra information about the coordinates to the FITS header
Affected #:  1 file

diff -r d19161f2cb6b9d8f561052b58ee4c443407a657e -r 7653d437a58af66d4b55fd02f183735e9ab274fb yt/utilities/fits_image.py
--- a/yt/utilities/fits_image.py
+++ b/yt/utilities/fits_image.py
@@ -142,6 +142,7 @@
                 xctr *= img_data.pf.units[units]
                 yctr *= img_data.pf.units[units]
                 center = [xctr, yctr]
+                w.wcs.cname = ["x","y"] 
             elif isinstance(img_data, YTCoveringGridBase):
                 dx, dy, dz = img_data.dds
                 dx *= img_data.pf.units[units]
@@ -149,6 +150,7 @@
                 dz *= img_data.pf.units[units]
                 center = 0.5*(img_data.left_edge+img_data.right_edge)
                 center *= img_data.pf.units[units]
+                w.wcs.cname = ["x","y","z"] 
             elif units == "deg" and self.dimensionality == 2:
                 dx = -scale[0]
                 dy = scale[1]
@@ -156,7 +158,11 @@
             else:
                 dx = scale[0]
                 dy = scale[1]
-                if self.dimensionality == 3: dz = scale[2]
+                if self.dimensionality == 3:
+                    dz = scale[2]
+                    w.wcs.cname = ["x","y","z"] 
+                else:
+                    w.wcs.cname = ["x","y"]
             w.wcs.crval = center
             w.wcs.cunit = [units]*self.dimensionality
             w.wcs.ctype = proj_type
@@ -245,6 +251,14 @@
         else:
             data_collection.append(image)
 
+    def to_aplpy(self, **kwargs):
+        """
+        Use APLpy (http://aplpy.github.io) for plotting. Returns an `aplpy.FITSFigure`
+        instance. All keyword arguments are passed to the
+        `aplpy.FITSFigure` constructor.
+        """
+        import aplpy
+        return aplpy.FITSFigure(self, **kwargs)
         
 
     


https://bitbucket.org/yt_analysis/yt/commits/ab4ab606116c/
Changeset:   ab4ab606116c
Branch:      yt-3.0
User:        jzuhone
Date:        2013-12-11 18:18:59
Summary:     Better handling of FITS files with incomplete or nonexistent WCS information.
Affected #:  2 files

diff -r 7653d437a58af66d4b55fd02f183735e9ab274fb -r ab4ab606116c89bb0894991c8a8539bc44b951d7 yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -162,11 +162,11 @@
         self._conversion_override = conversion_override
 
         self.wcs = pywcs.WCS(header=self.primary_header)
-        
+
+        self.file_unit = None
         for i, unit in enumerate(self.wcs.wcs.cunit):
             if unit in all_units:
                 self.file_unit = unit.name
-                idx = i
                 break
         self.new_unit = None
         self.pixel_scale = 1.0

diff -r 7653d437a58af66d4b55fd02f183735e9ab274fb -r ab4ab606116c89bb0894991c8a8539bc44b951d7 yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -311,9 +311,3 @@
         v += "mass %0.3e.  Multi-mass particles are not currently supported." % (
             self.ma)
         return v
-
-class YTFITSHeaderNotUnderstood(YTException):
-    def __str__(self):
-        return "This FITS header is not recognizable in its current form.\n" + \
-                "If you would like to force loading, specify: \n" + \
-                "ignore_unit_names = True"


https://bitbucket.org/yt_analysis/yt/commits/57877fd6550e/
Changeset:   57877fd6550e
Branch:      yt-3.0
User:        jzuhone
Date:        2013-12-11 18:27:09
Summary:     Merging
Affected #:  28 files

diff -r ab4ab606116c89bb0894991c8a8539bc44b951d7 -r 57877fd6550ec7d8950815511a187b878f45ad29 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -264,28 +264,45 @@
         echo "Alternatively, download the Xcode command line tools from"
         echo "the Apple developer tools website."
         echo
-	echo "OS X 10.8.2: download Xcode 4.6.1 from the mac app store."
+	echo "OS X 10.8.4 and 10.9: download Xcode 5.02 from the mac app store."
 	echo "(search for Xcode)."
+    echo
 	echo "Additionally, you will have to manually install the Xcode"
-	echo "command line tools, see:"
-	echo "http://stackoverflow.com/questions/9353444"
-	echo "Alternatively, download the Xcode command line tools from"
-	echo "the Apple developer tools website."
+	echo "command line tools."
+    echo
+    echo "For OS X 10.8, see:"
+   	echo "http://stackoverflow.com/questions/9353444"
 	echo
-        echo "NOTE: It's possible that the installation will fail, if so,"
-	echo "please set the following environment variables, remove any"
-	echo "broken installation tree, and re-run this script verbatim."
-        echo
-        echo "$ export CC=gcc"
-        echo "$ export CXX=g++"
-	echo
-        OSX_VERSION=`sw_vers -productVersion`
-        if [ "${OSX_VERSION##10.8}" != "${OSX_VERSION}" ]
+    echo "For OS X 10.9, the command line tools can be installed"
+    echo "with the following command:"
+    echo "    xcode-select --install"
+    echo
+    OSX_VERSION=`sw_vers -productVersion`
+    if [ "${OSX_VERSION##10.8}" != "${OSX_VERSION}" ]
         then
             MPL_SUPP_CFLAGS="${MPL_SUPP_CFLAGS} -mmacosx-version-min=10.7"
             MPL_SUPP_CXXFLAGS="${MPL_SUPP_CXXFLAGS} -mmacosx-version-min=10.7"
         fi
     fi
+    if [ -f /etc/redhat-release ]
+    then
+        echo "Looks like you're on an Redhat-compatible machine."
+        echo
+        echo "You need to have these packages installed:"
+        echo
+        echo "  * openssl-devel"
+        echo "  * uuid-devel"
+        echo "  * readline-devel"
+        echo "  * ncurses-devel"
+        echo "  * zip"
+        echo "  * gcc-{,c++,gfortran}"
+        echo "  * make"
+        echo "  * patch"
+        echo 
+        echo "You can accomplish this by executing:"
+        echo "$ sudo yum install gcc gcc-g++ gcc-gfortran make patch zip"
+        echo "$ sudo yum install ncurses-devel uuid-devel openssl-devel readline-devel"
+    fi
     if [ -f /etc/SuSE-release ] && [ `grep --count SUSE /etc/SuSE-release` -gt 0 ]
     then
         echo "Looks like you're on an OpenSUSE-compatible machine."
@@ -566,16 +583,16 @@
 CYTHON='Cython-0.19.1'
 FORTHON='Forthon-0.8.11'
 PYX='PyX-0.12.1'
-PYTHON='Python-2.7.5'
+PYTHON='Python-2.7.6'
 BZLIB='bzip2-1.0.6'
 FREETYPE_VER='freetype-2.4.12'
 H5PY='h5py-2.1.3'
 HDF5='hdf5-1.8.11'
-IPYTHON='ipython-1.0.0'
+IPYTHON='ipython-1.1.0'
 LAPACK='lapack-3.4.2'
 PNG=libpng-1.6.3
 MATPLOTLIB='matplotlib-1.3.0'
-MERCURIAL='mercurial-2.7'
+MERCURIAL='mercurial-2.8'
 NOSE='nose-1.3.0'
 NUMPY='numpy-1.7.1'
 PYTHON_HGLIB='python-hglib-1.0'
@@ -585,14 +602,14 @@
 SQLITE='sqlite-autoconf-3071700'
 SYMPY='sympy-0.7.3'
 TORNADO='tornado-3.1'
-ZEROMQ='zeromq-3.2.3'
+ZEROMQ='zeromq-3.2.4'
 ZLIB='zlib-1.2.8'
 
 # Now we dump all our SHA512 files out.
 echo '9dcdda5b2ee2e63c2d3755245b7b4ed2f4592455f40feb6f8e86503195d9474559094ed27e789ab1c086d09da0bb21c4fe844af0e32a7d47c81ff59979b18ca0  Cython-0.19.1.tar.gz' > Cython-0.19.1.tar.gz.sha512
 echo '3f53d0b474bfd79fea2536d0a9197eaef6c0927e95f2f9fd52dbd6c1d46409d0e649c21ac418d8f7767a9f10fe6114b516e06f2be4b06aec3ab5bdebc8768220  Forthon-0.8.11.tar.gz' > Forthon-0.8.11.tar.gz.sha512
 echo '4941f5aa21aff3743546495fb073c10d2657ff42b2aff401903498638093d0e31e344cce778980f28a7170c6d29eab72ac074277b9d4088376e8692dc71e55c1  PyX-0.12.1.tar.gz' > PyX-0.12.1.tar.gz.sha512
-echo 'd6580eb170b36ad50f3a30023fe6ca60234156af91ccb3971b0b0983119b86f3a9f6c717a515c3c6cb72b3dcbf1d02695c6d0b92745f460b46a3defd3ff6ef2f  Python-2.7.5.tgz' > Python-2.7.5.tgz.sha512
+echo '3df0ba4b1cfef5f02fb27925de4c2ca414eca9000af6a3d475d39063720afe987287c3d51377e0a36b88015573ef699f700782e1749c7a357b8390971d858a79  Python-2.7.6.tgz' > Python-2.7.6.tgz.sha512
 echo '172f2bc671145ebb0add2669c117863db35851fb3bdb192006cd710d4d038e0037497eb39a6d01091cb923f71a7e8982a77b6e80bf71d6275d5d83a363c8d7e5  rockstar-0.99.6.tar.gz' > rockstar-0.99.6.tar.gz.sha512
 echo '276bd9c061ec9a27d478b33078a86f93164ee2da72210e12e2c9da71dcffeb64767e4460b93f257302b09328eda8655e93c4b9ae85e74472869afbeae35ca71e  blas.tar.gz' > blas.tar.gz.sha512
 echo '00ace5438cfa0c577e5f578d8a808613187eff5217c35164ffe044fbafdfec9e98f4192c02a7d67e01e5a5ccced630583ad1003c37697219b0f147343a3fdd12  bzip2-1.0.6.tar.gz' > bzip2-1.0.6.tar.gz.sha512
@@ -600,11 +617,11 @@
 echo '609a68a3675087e0cc95268574f31e104549daa48efe15a25a33b8e269a93b4bd160f4c3e8178dca9c950ef5ca514b039d6fd1b45db6af57f25342464d0429ce  freetype-2.4.12.tar.gz' > freetype-2.4.12.tar.gz.sha512
 echo '2eb7030f8559ff5cb06333223d98fda5b3a663b6f4a026949d1c423aa9a869d824e612ed5e1851f3bf830d645eea1a768414f73731c23ab4d406da26014fe202  h5py-2.1.3.tar.gz' > h5py-2.1.3.tar.gz.sha512
 echo 'e9db26baa297c8ed10f1ca4a3fcb12d6985c6542e34c18d48b2022db73014f054c8b8434f3df70dcf44631f38b016e8050701d52744953d0fced3272d7b6b3c1  hdf5-1.8.11.tar.gz' > hdf5-1.8.11.tar.gz.sha512
-echo '1b309c08009583e66d1725a2d2051e6de934db246129568fa6d5ba33ad6babd3b443e7c2782d817128d2b112e21bcdd71e66be34fbd528badd900f1d0ed3db56  ipython-1.0.0.tar.gz' > ipython-1.0.0.tar.gz.sha512
+echo '46b8ae25df2ced674b3b3629070aafac955ba3aa2a5e749f8e63ef1f459126e1c4a9a03661406151622590a90c73b527716ad71bc626f57f52b51abfae0f43ca  ipython-1.1.0.tar.gz' > ipython-1.1.0.tar.gz.sha512
 echo '8770214491e31f0a7a3efaade90eee7b0eb20a8a6ab635c5f854d78263f59a1849133c14ef5123d01023f0110cbb9fc6f818da053c01277914ae81473430a952  lapack-3.4.2.tar.gz' > lapack-3.4.2.tar.gz.sha512
 echo '887582e5a22e4cde338aa8fec7a89f6dd31f2f02b8842735f00f970f64582333fa03401cea6d01704083403c7e8b7ebc26655468ce930165673b33efa4bcd586  libpng-1.6.3.tar.gz' > libpng-1.6.3.tar.gz.sha512
 echo '990e3a155ca7a9d329c41a43b44a9625f717205e81157c668a8f3f2ad5459ed3fed8c9bd85e7f81c509e0628d2192a262d4aa30c8bfc348bb67ed60a0362505a  matplotlib-1.3.0.tar.gz' > matplotlib-1.3.0.tar.gz.sha512
-echo 'e425778edb0f71c34e719e04561ee3de37feaa1be4d60b94c780aebdbe6d41f8f4ab15103a8bbe8894ebeb228c42f0e2cd41b8db840f8384e1cd7cd2d5b67b97  mercurial-2.7.tar.gz' > mercurial-2.7.tar.gz.sha512
+echo 'b08dcd746728d89f1f96036f39df1608fad0ff863ae48fe12424b1645936ebbf59b9068b93fe3c7cfd2036db046df3dc814119f89a827bd5f008d32f323d45a8  mercurial-2.8.tar.gz' > mercurial-2.8.tar.gz.sha512
 echo 'a3b8060e415560a868599224449a3af636d24a060f1381990b175dcd12f30249edd181179d23aea06b0c755ff3dc821b7a15ed8840f7855530479587d4d814f4  nose-1.3.0.tar.gz' > nose-1.3.0.tar.gz.sha512
 echo 'd58177f3971b6d07baf6f81a2088ba371c7e43ea64ee7ada261da97c6d725b4bd4927122ac373c55383254e4e31691939276dab08a79a238bfa55172a3eff684  numpy-1.7.1.tar.gz' > numpy-1.7.1.tar.gz.sha512
 echo '9c0a61299779aff613131aaabbc255c8648f0fa7ab1806af53f19fbdcece0c8a68ddca7880d25b926d67ff1b9201954b207919fb09f6a290acb078e8bbed7b68  python-hglib-1.0.tar.gz' > python-hglib-1.0.tar.gz.sha512
@@ -614,7 +631,7 @@
 echo '96f3e51b46741450bc6b63779c10ebb4a7066860fe544385d64d1eda52592e376a589ef282ace2e1df73df61c10eab1a0d793abbdaf770e60289494d4bf3bcb4  sqlite-autoconf-3071700.tar.gz' > sqlite-autoconf-3071700.tar.gz.sha512
 echo '2992baa3edfb4e1842fb642abf0bf0fc0bf56fc183aab8fed6b3c42fbea928fa110ede7fdddea2d63fc5953e8d304b04da433dc811134fadefb1eecc326121b8  sympy-0.7.3.tar.gz' > sympy-0.7.3.tar.gz.sha512
 echo '101544db6c97beeadc5a02b2ef79edefa0a07e129840ace2e4aa451f3976002a273606bcdc12d6cef5c22ff4c1c9dcf60abccfdee4cbef8e3f957cd25c0430cf  tornado-3.1.tar.gz' > tornado-3.1.tar.gz.sha512
-echo '34ffb6aa645f62bd1158a8f2888bf92929ccf90917a6c50ed51ed1240732f498522e164d1536f26480c87ad5457fe614a93bf0e15f2f89b0b168e64a30de68ca  zeromq-3.2.3.tar.gz' > zeromq-3.2.3.tar.gz.sha512
+echo 'd8eef84860bc5314b42a2cc210340572a9148e008ea65f7650844d0edbe457d6758785047c2770399607f69ba3b3a544db9775a5cdf961223f7e278ef7e0f5c6  zeromq-3.2.4.tar.gz' > zeromq-3.2.4.tar.gz.sha512
 echo 'ece209d4c7ec0cb58ede791444dc754e0d10811cbbdebe3df61c0fd9f9f9867c1c3ccd5f1827f847c005e24eef34fb5bf87b5d3f894d75da04f1797538290e4a  zlib-1.2.8.tar.gz' > zlib-1.2.8.tar.gz.sha512
 # Individual processes
 [ -z "$HDF5_DIR" ] && get_ytproject $HDF5.tar.gz
@@ -1006,10 +1023,7 @@
     echo
     echo "To get started with yt, check out the orientation:"
     echo
-    echo "    http://yt-project.org/doc/orientation/"
-    echo
-    echo "or just activate your environment and run 'yt serve' to bring up the"
-    echo "yt GUI."
+    echo "    http://yt-project.org/doc/bootcamp/"
     echo
     echo "The source for yt is located at:"
     echo "    $YT_DIR"

diff -r ab4ab606116c89bb0894991c8a8539bc44b951d7 -r 57877fd6550ec7d8950815511a187b878f45ad29 yt/analysis_modules/halo_profiler/multi_halo_profiler.py
--- a/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
+++ b/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
@@ -25,7 +25,7 @@
 from yt.convenience import \
     load
 from yt.data_objects.profiles import \
-    BinnedProfile1D, EmptyProfileData
+    BinnedProfile1D, YTEmptyProfileData
 from yt.analysis_modules.halo_finding.api import *
 from .halo_filters import \
     VirialFilter
@@ -588,7 +588,7 @@
                 profile = BinnedProfile1D(sphere, self.n_profile_bins, "RadiusMpc",
                                                 r_min, halo['r_max'],
                                                 log_space=True, end_collect=True)
-            except EmptyProfileData:
+            except YTEmptyProfileData:
                 mylog.error("Caught EmptyProfileData exception, returning None for this halo.")
                 return None
             # Figure out which fields to add simultaneously

diff -r ab4ab606116c89bb0894991c8a8539bc44b951d7 -r 57877fd6550ec7d8950815511a187b878f45ad29 yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
--- a/yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
+++ b/yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
@@ -19,7 +19,10 @@
 import numpy as np
 import os
 
-from yt.funcs import *
+from yt.funcs import \
+     download_file, \
+     mylog, \
+     only_on_root
 
 from yt.data_objects.field_info_container import add_field
 from yt.utilities.exceptions import YTException
@@ -31,6 +34,23 @@
 
 xray_data_version = 1
 
+def _get_data_file():
+    data_file = "xray_emissivity.h5"
+    data_url = "http://yt-project.org/data"
+    if "YT_DEST" in os.environ and \
+      os.path.isdir(os.path.join(os.environ["YT_DEST"], "data")):
+        data_dir = os.path.join(os.environ["YT_DEST"], "data")
+    else:
+        data_dir = "."
+    data_path = os.path.join(data_dir, data_file)
+    if not os.path.exists(data_path):
+        mylog.info("Attempting to download supplementary data from %s to %s." % 
+                   (data_url, data_dir))
+        fn = download_file(os.path.join(data_url, data_file), data_path)
+        if fn != data_path:
+            raise RuntimeError, "Failed to download supplementary data."
+    return data_path
+
 class EnergyBoundsException(YTException):
     def __init__(self, lower, upper):
         self.lower = lower
@@ -65,8 +85,7 @@
 
         default_filename = False
         if filename is None:
-            filename = os.path.join(os.environ["YT_DEST"], 
-                                    "data", "xray_emissivity.h5")
+            filename = _get_data_file()
             default_filename = True
 
         if not os.path.exists(filename):

diff -r ab4ab606116c89bb0894991c8a8539bc44b951d7 -r 57877fd6550ec7d8950815511a187b878f45ad29 yt/convenience.py
--- a/yt/convenience.py
+++ b/yt/convenience.py
@@ -130,6 +130,20 @@
     if simulation_type not in simulation_time_series_registry:
         raise YTSimulationNotIdentified(simulation_type)
 
+    if os.path.exists(parameter_filename):
+        valid_file = True
+    elif os.path.exists(os.path.join(ytcfg.get("yt", "test_data_dir"),
+                                     parameter_filename)):
+        parameter_filename = os.path.join(ytcfg.get("yt", "test_data_dir"),
+                                          parameter_filename)
+        valid_file = True
+    else:
+        valid_file = False
+        
+    if not valid_file:
+        raise YTOutputNotIdentified((parameter_filename, simulation_type), 
+                                    dict(find_outputs=find_outputs))
+    
     return simulation_time_series_registry[simulation_type](parameter_filename, 
                                                             find_outputs=find_outputs)
 

diff -r ab4ab606116c89bb0894991c8a8539bc44b951d7 -r 57877fd6550ec7d8950815511a187b878f45ad29 yt/data_objects/api.py
--- a/yt/data_objects/api.py
+++ b/yt/data_objects/api.py
@@ -27,11 +27,12 @@
     particle_handler_registry
 
 from profiles import \
-    EmptyProfileData, \
+    YTEmptyProfileData, \
     BinnedProfile, \
     BinnedProfile1D, \
     BinnedProfile2D, \
-    BinnedProfile3D
+    BinnedProfile3D, \
+    create_profile
 
 from time_series import \
     TimeSeriesData, \

diff -r ab4ab606116c89bb0894991c8a8539bc44b951d7 -r 57877fd6550ec7d8950815511a187b878f45ad29 yt/data_objects/field_info_container.py
--- a/yt/data_objects/field_info_container.py
+++ b/yt/data_objects/field_info_container.py
@@ -60,21 +60,21 @@
         
         def _gradx(f, data):
             grad = data[field][sl,1:-1,1:-1] - data[field][sr,1:-1,1:-1]
-            grad /= 2.0*data["dx"].flat[0]
+            grad /= 2.0*data["dx"].flat[0]*data.pf.units["cm"]
             g = np.zeros(data[field].shape, dtype='float64')
             g[1:-1,1:-1,1:-1] = grad
             return g
             
         def _grady(f, data):
             grad = data[field][1:-1,sl,1:-1] - data[field][1:-1,sr,1:-1]
-            grad /= 2.0*data["dy"].flat[0]
+            grad /= 2.0*data["dy"].flat[0]*data.pf.units["cm"]
             g = np.zeros(data[field].shape, dtype='float64')
             g[1:-1,1:-1,1:-1] = grad
             return g
             
         def _gradz(f, data):
             grad = data[field][1:-1,1:-1,sl] - data[field][1:-1,1:-1,sr]
-            grad /= 2.0*data["dz"].flat[0]
+            grad /= 2.0*data["dz"].flat[0]*data.pf.units["cm"]
             g = np.zeros(data[field].shape, dtype='float64')
             g[1:-1,1:-1,1:-1] = grad
             return g

diff -r ab4ab606116c89bb0894991c8a8539bc44b951d7 -r 57877fd6550ec7d8950815511a187b878f45ad29 yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -112,6 +112,10 @@
 
     _domain_ind = None
 
+    def mask_refinement(self, selector):
+        mask = self.oct_handler.mask(selector, domain_id = self.domain_id)
+        return mask
+
     def select_blocks(self, selector):
         mask = self.oct_handler.mask(selector, domain_id = self.domain_id)
         mask = self._reshape_vals(mask)

diff -r ab4ab606116c89bb0894991c8a8539bc44b951d7 -r 57877fd6550ec7d8950815511a187b878f45ad29 yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -20,16 +20,10 @@
 
 from yt.data_objects.data_containers import YTFieldData
 from yt.utilities.lib import bin_profile1d, bin_profile2d, bin_profile3d
+from yt.utilities.lib import new_bin_profile1d, new_bin_profile2d, \
+                             new_bin_profile3d
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
-    ParallelAnalysisInterface
-
-_field_mapping = {
-    "total_mass": ("CellMassMsun", "ParticleMassMsun"),
-    "hybrid_radius": ("RadiusCode", "ParticleRadiusCode"),
-                 }
-
-class EmptyProfileData(Exception):
-    pass
+    ParallelAnalysisInterface, parallel_objects
 
 def preserve_source_parameters(func):
     def save_state(*args, **kwargs):
@@ -55,7 +49,6 @@
         self._data_source = data_source
         self.pf = data_source.pf
         self.field_data = YTFieldData()
-        self._pdata = {}
 
     @property
     def hierarchy(self):
@@ -131,14 +124,11 @@
     def __setitem__(self, key, value):
         self.field_data[key] = value
 
-    def _get_field(self, source, this_field, check_cut):
+    def _get_field(self, source, field, check_cut):
         # This is where we will iterate to get all contributions to a field
         # which is how we will implement hybrid particle/cell fields
         # but...  we default to just the field.
-        data = []
-        for field in _field_mapping.get(this_field, (this_field,)):
-            data.append(source[field].astype('float64'))
-        return np.concatenate(data, axis=0)
+        return source[field].astype('float64')
 
     def _fix_pickle(self):
         if isinstance(self._data_source, tuple):
@@ -225,7 +215,7 @@
     def _get_bins(self, source, check_cut=False):
         source_data = self._get_field(source, self.bin_field, check_cut)
         if source_data.size == 0: # Nothing for us here.
-            raise EmptyProfileData()
+            raise YTEmptyProfileData()
         # Truncate at boundaries.
         if self.end_collect:
             mi = np.ones_like(source_data).astype('bool')
@@ -234,7 +224,7 @@
                &  (source_data < self._bins.max()))
         sd = source_data[mi]
         if sd.size == 0:
-            raise EmptyProfileData()
+            raise YTEmptyProfileData()
         # Stick the bins into our fixed bins, set at initialization
         bin_indices = np.digitize(sd, self._bins)
         if self.end_collect: #limit the range of values to 0 and n_bins-1
@@ -400,7 +390,7 @@
         source_data_x = self._get_field(source, self.x_bin_field, check_cut)
         source_data_y = self._get_field(source, self.y_bin_field, check_cut)
         if source_data_x.size == 0:
-            raise EmptyProfileData()
+            raise YTEmptyProfileData()
 
         if self.end_collect:
             mi = np.arange(source_data_x.size)
@@ -412,7 +402,7 @@
         sd_x = source_data_x[mi]
         sd_y = source_data_y[mi]
         if sd_x.size == 0 or sd_y.size == 0:
-            raise EmptyProfileData()
+            raise YTEmptyProfileData()
 
         bin_indices_x = np.digitize(sd_x, self._x_bins) - 1
         bin_indices_y = np.digitize(sd_y, self._y_bins) - 1
@@ -518,48 +508,6 @@
         return np.log10(upper), np.log10(lower)
     return upper, lower
 
-class BinnedProfile2DInlineCut(BinnedProfile2D):
-    def __init__(self, data_source,
-                 x_n_bins, x_bin_field, x_lower_bound, x_upper_bound, x_log,
-                 y_n_bins, y_bin_field, y_lower_bound, y_upper_bound, y_log,
-                 end_collect=False):
-        self.indices = data_source["Ones"].astype("bool")
-        BinnedProfile2D.__init__(self, data_source,
-                 x_n_bins, x_bin_field, x_lower_bound, x_upper_bound, x_log,
-                 y_n_bins, y_bin_field, y_lower_bound, y_upper_bound, y_log,
-                 end_collect)
-
-    @preserve_source_parameters
-    def _bin_field(self, source, field, weight, accumulation,
-                   args, check_cut=False):
-        source_data = self._get_field(source, field, check_cut)
-        if weight: weight_data = self._get_field(source, weight, check_cut)
-        else: weight_data = np.ones(source_data.shape, dtype='float64')
-        self.total_stuff = source_data.sum()
-        binned_field = self._get_empty_field()
-        weight_field = self._get_empty_field()
-        m_field = self._get_empty_field()
-        q_field = self._get_empty_field()
-        used_field = self._get_empty_field()
-        mi = args[0]
-        bin_indices_x = args[1][self.indices].ravel().astype('int64')
-        bin_indices_y = args[2][self.indices].ravel().astype('int64')
-        source_data = source_data[mi][self.indices]
-        weight_data = weight_data[mi][self.indices]
-        nx = bin_indices_x.size
-        #mylog.debug("Binning %s / %s times", source_data.size, nx)
-        bin_profile2d(bin_indices_x, bin_indices_y, weight_data, source_data,
-                      weight_field, binned_field, m_field, q_field, used_field)
-        if accumulation: # Fix for laziness
-            if not iterable(accumulation):
-                raise SyntaxError("Accumulation needs to have length 2")
-            if accumulation[0]:
-                binned_field = np.add.accumulate(binned_field, axis=0)
-            if accumulation[1]:
-                binned_field = np.add.accumulate(binned_field, axis=1)
-        return binned_field, weight_field, used_field.astype('bool')
-
-        
 class BinnedProfile3D(BinnedProfile):
     """
     A 'Profile' produces either a weighted (or unweighted) average
@@ -658,7 +606,7 @@
         source_data_y = self._get_field(source, self.y_bin_field, check_cut)
         source_data_z = self._get_field(source, self.z_bin_field, check_cut)
         if source_data_x.size == 0:
-            raise EmptyProfileData()
+            raise YTEmptyProfileData()
         if self.end_collect:
             mi = np.arange(source_data_x.size)
         else:
@@ -672,7 +620,7 @@
         sd_y = source_data_y[mi]
         sd_z = source_data_z[mi]
         if sd_x.size == 0 or sd_y.size == 0 or sd_z.size == 0:
-            raise EmptyProfileData()
+            raise YTEmptyProfileData()
 
         bin_indices_x = np.digitize(sd_x, self._x_bins) - 1
         bin_indices_y = np.digitize(sd_y, self._y_bins) - 1
@@ -790,25 +738,280 @@
         self._data_source.hierarchy.save_data(values, "/Profiles", name,
                                               set_attr, force=force)
 
-class StoredBinnedProfile3D(BinnedProfile3D):
-    def __init__(self, pf, name):
-        """
-        Given a *pf* parameterfile and the *name* of a stored profile, retrieve
-        it into a read-only data structure.
-        """
+class ProfileFieldAccumulator(object):
+    def __init__(self, n_fields, size):
+        shape = size + (n_fields,)
+        self.values = np.zeros(shape, dtype="float64")
+        self.mvalues = np.zeros(shape, dtype="float64")
+        self.qvalues = np.zeros(shape, dtype="float64")
+        self.used = np.zeros(size, dtype='bool')
+        self.weight_values = np.zeros(size, dtype="float64")
+
+class ProfileND(ParallelAnalysisInterface):
+    def __init__(self, data_source, weight_field = None):
+        self.data_source = data_source
+        self.pf = data_source.pf
         self.field_data = YTFieldData()
-        prof_arr = pf.h.get_data("/Profiles", name)
-        if prof_arr is None: raise KeyError("No such array")
-        for ax in 'xyz':
-            for base in ['%s_bin_field', '_%s_log']:
-                setattr(self, base % ax, prof_arr.getAttr(base % ax))
-        for ax in 'xyz':
-            fn = getattr(self, '%s_bin_field' % ax)
-            self.field_data[fn] = prof_arr.getAttr('%s_bin_values' % ax)
-        shape = prof_arr.getAttr('shape')
-        for fn, fd in zip(prof_arr.getAttr('field_order'),
-                          prof_arr.read().transpose()):
-            self.field_data[fn] = fd.reshape(shape)
+        self.weight_field = weight_field
 
-    def add_fields(self, *args, **kwargs):
-        raise RuntimeError("Sorry, you can't add to a stored profile.")
+    def add_fields(self, fields):
+        fields = ensure_list(fields)
+        temp_storage = ProfileFieldAccumulator(len(fields), self.size)
+        for g in parallel_objects(self.data_source._grids):
+            self._bin_grid(g, fields, temp_storage)
+        self._finalize_storage(fields, temp_storage)
+
+    def _finalize_storage(self, fields, temp_storage):
+        # We use our main comm here
+        # This also will fill _field_data
+        # FIXME: Add parallelism and combining std stuff
+        if self.weight_field is not None:
+            temp_storage.values /= temp_storage.weight_values[...,None]
+        blank = ~temp_storage.used
+        for i, field in enumerate(fields):
+            self.field_data[field] = temp_storage.values[...,i]
+            self.field_data[field][blank] = 0.0
+        
+    def _bin_grid(self, grid, fields, storage):
+        raise NotImplementedError
+
+    def _filter(self, bin_fields, cut_points):
+        # cut_points is initially just the points inside our region
+        # we also want to apply a filtering based on min/max
+        filter = np.zeros(bin_fields[0].shape, dtype='bool')
+        filter[cut_points] = True
+        for (mi, ma), data in zip(self.bounds, bin_fields):
+            filter &= (data > mi)
+            filter &= (data < ma)
+        return filter, [data[filter] for data in bin_fields]
+        
+    def _get_data(self, grid, fields):
+        # Save the values in the grid beforehand.
+        old_params = grid.field_parameters
+        old_keys = grid.field_data.keys()
+        grid.field_parameters = self.data_source.field_parameters
+        # Now we ask our source which values to include
+        pointI = self.data_source._get_point_indices(grid)
+        bin_fields = [grid[bf] for bf in self.bin_fields]
+        # We want to make sure that our fields are within the bounds of the
+        # binning
+        filter, bin_fields = self._filter(bin_fields, pointI)
+        if not np.any(filter): return None
+        arr = np.zeros((bin_fields[0].size, len(fields)), dtype="float64")
+        for i, field in enumerate(fields):
+            arr[:,i] = grid[field][filter]
+        if self.weight_field is not None:
+            weight_data = grid[self.weight_field]
+        else:
+            weight_data = np.ones(grid.ActiveDimensions, dtype="float64")
+        weight_data = weight_data[filter]
+        # So that we can pass these into 
+        grid.field_parameters = old_params
+        grid.field_data = YTFieldData( [(k, grid.field_data[k]) for k in old_keys] )
+        return arr, weight_data, bin_fields
+
+    def __getitem__(self, key):
+        return self.field_data[key]
+
+    def __iter__(self):
+        return sorted(self.field_data.items())
+
+    def _get_bins(self, mi, ma, n, take_log):
+        if take_log:
+            return np.logspace(np.log10(mi), np.log10(ma), n+1)
+        else:
+            return np.linspace(mi, ma, n+1)
+
+class Profile1D(ProfileND):
+    def __init__(self, data_source, x_field, x_n, x_min, x_max, x_log,
+                 weight_field = None):
+        super(Profile1D, self).__init__(data_source, weight_field)
+        self.x_field = x_field
+        self.x_log = x_log
+        self.x_bins = self._get_bins(x_min, x_max, x_n, x_log)
+
+        self.size = (self.x_bins.size - 1,)
+        self.bin_fields = (self.x_field,)
+        self.bounds = ((self.x_bins[0], self.x_bins[-1]),)
+        self.x = self.x_bins
+
+    def _bin_grid(self, grid, fields, storage):
+        gd = self._get_data(grid, fields)
+        if gd is None: return
+        fdata, wdata, (bf_x,) = gd
+        bin_ind = np.digitize(bf_x, self.x_bins) - 1
+        new_bin_profile1d(bin_ind, wdata, fdata,
+                      storage.weight_values, storage.values,
+                      storage.mvalues, storage.qvalues,
+                      storage.used)
+        # We've binned it!
+
+class Profile2D(ProfileND):
+    def __init__(self, data_source,
+                 x_field, x_n, x_min, x_max, x_log,
+                 y_field, y_n, y_min, y_max, y_log,
+                 weight_field = None):
+        super(Profile2D, self).__init__(data_source, weight_field)
+        self.x_field = x_field
+        self.x_log = x_log
+        self.x_bins = self._get_bins(x_min, x_max, x_n, x_log)
+        self.y_field = y_field
+        self.y_log = y_log
+        self.y_bins = self._get_bins(y_min, y_max, y_n, y_log)
+
+        self.size = (self.x_bins.size - 1, self.y_bins.size - 1)
+
+        self.bin_fields = (self.x_field, self.y_field)
+        self.bounds = ((self.x_bins[0], self.x_bins[-1]),
+                       (self.y_bins[0], self.y_bins[-1]))
+        self.x = self.x_bins
+        self.y = self.y_bins
+
+    def _bin_grid(self, grid, fields, storage):
+        rv = self._get_data(grid, fields)
+        if rv is None: return
+        fdata, wdata, (bf_x, bf_y) = rv
+        bin_ind_x = np.digitize(bf_x, self.x_bins) - 1
+        bin_ind_y = np.digitize(bf_y, self.y_bins) - 1
+        new_bin_profile2d(bin_ind_x, bin_ind_y, wdata, fdata,
+                      storage.weight_values, storage.values,
+                      storage.mvalues, storage.qvalues,
+                      storage.used)
+        # We've binned it!
+
+class Profile3D(ProfileND):
+    def __init__(self, data_source,
+                 x_field, x_n, x_min, x_max, x_log,
+                 y_field, y_n, y_min, y_max, y_log,
+                 z_field, z_n, z_min, z_max, z_log,
+                 weight_field = None):
+        super(Profile3D, self).__init__(data_source, weight_field)
+        # X
+        self.x_field = x_field
+        self.x_log = x_log
+        self.x_bins = self._get_bins(x_min, x_max, x_n, x_log)
+        # Y
+        self.y_field = y_field
+        self.y_log = y_log
+        self.y_bins = self._get_bins(y_min, y_max, y_n, y_log)
+        # Z
+        self.z_field = z_field
+        self.z_log = z_log
+        self.z_bins = self._get_bins(z_min, z_max, z_n, z_log)
+
+        self.size = (self.x_bins.size - 1,
+                     self.y_bins.size - 1,
+                     self.z_bins.size - 1)
+
+        self.bin_fields = (self.x_field, self.y_field, self.z_field)
+        self.bounds = ((self.x_bins[0], self.x_bins[-1]),
+                       (self.y_bins[0], self.y_bins[-1]),
+                       (self.z_bins[0], self.z_bins[-1]))
+
+        self.x = self.x_bins
+        self.y = self.y_bins
+        self.z = self.z_bins
+
+    def _bin_grid(self, grid, fields, storage):
+        rv = self._get_data(grid, fields)
+        if rv is None: return
+        fdata, wdata, (bf_x, bf_y, bf_z) = rv
+        bin_ind_x = np.digitize(bf_x, self.x_bins) - 1
+        bin_ind_y = np.digitize(bf_y, self.y_bins) - 1
+        bin_ind_z = np.digitize(bf_z, self.z_bins) - 1
+        new_bin_profile3d(bin_ind_x, bin_ind_y, bin_ind_z, wdata, fdata,
+                      storage.weight_values, storage.values,
+                      storage.mvalues, storage.qvalues,
+                      storage.used)
+        # We've binned it!
+
+def create_profile(data_source, bin_fields, n = 64, 
+                   weight_field = "CellMass", fields = None,
+                   accumulation = False, fractional = False):
+    r"""
+    Create a 1, 2, or 3D profile object.
+
+    The dimensionality of the profile object is chosen by the number of 
+    fields given in the bin_fields argument.
+
+    Parameters
+    ----------
+    data_source : AMR3DData Object
+        The data object to be profiled.
+    bin_fields : list of strings
+        List of the binning fields for profiling.
+    n : int or list of ints
+        The number of bins in each dimension.  If None, 64 bins for 
+        each bin are used for each bin field.
+        Default: 64.
+    weight_field : str
+        The weight field for computing weighted average for the profile 
+        values.  If None, the profile values are sums of the data in 
+        each bin.
+    fields : list of strings
+        The fields to be profiled.
+    accumulation : bool or list of bools
+        If True, the profile values for a bin n are the cumulative sum of 
+        all the values from bin 0 to n.  If -True, the sum is reversed so 
+        that the value for bin n is the cumulative sum from bin N (total bins) 
+        to n.  If the profile is 2D or 3D, a list of values can be given to 
+        control the summation in each dimension independently.
+        Default: False.
+    fractional : If True the profile values are divided by the sum of all 
+        the profile data such that the profile represents a probability 
+        distribution function.
+
+    Examples
+    --------
+
+    Create a 1d profile.  Access bin field from profile.x and field 
+    data from profile.field_data.
+
+    >>> pf = load("DD0046/DD0046")
+    >>> ad = pf.h.all_data()
+    >>> profile = create_profile(ad, ["Density"],
+    ...                          fields=["Temperature", "x-velocity"]))
+    >>> print profile.x
+    >>> print profile.field_data["Temperature"]
+    
+    """
+    if len(bin_fields) == 1:
+        cls = Profile1D
+    elif len(bin_fields) == 2:
+        cls = Profile2D
+    elif len(bin_fields) == 3:
+        cls = Profile3D
+    else:
+        raise NotImplementedError
+    if not iterable(n):
+        n = [n] * len(bin_fields)
+    if not iterable(accumulation):
+        accumulation = [accumulation] * len(bin_fields)
+    logs = [data_source.pf.field_info[f].take_log for f in bin_fields]
+    ex = [data_source.quantities["Extrema"](f, non_zero=l)[0] \
+          for f, l in zip(bin_fields, logs)]
+    args = [data_source]
+    for f, n, (mi, ma), l in zip(bin_fields, n, ex, logs):
+        args += [f, n, mi, ma, l] 
+    obj = cls(*args, weight_field = weight_field)
+    setattr(obj, "accumulation", accumulation)
+    setattr(obj, "fractional", fractional)
+    if fields is not None:
+        obj.add_fields(fields)
+    for field in fields:
+        if fractional:
+            obj.field_data[field] /= obj.field_data[field].sum()
+        for axis, acc in enumerate(accumulation):
+            if not acc: continue
+            temp = obj.field_data[field]
+            temp = np.rollaxis(temp, axis)
+            if acc < 0:
+                temp = temp[::-1]
+            temp = temp.cumsum(axis=0)
+            if acc < 0:
+                temp = temp[::-1]
+            temp = np.rollaxis(temp, axis)
+            obj.field_data[field] = temp
+            
+    return obj
+

diff -r ab4ab606116c89bb0894991c8a8539bc44b951d7 -r 57877fd6550ec7d8950815511a187b878f45ad29 yt/data_objects/tests/test_profiles.py
--- a/yt/data_objects/tests/test_profiles.py
+++ b/yt/data_objects/tests/test_profiles.py
@@ -1,10 +1,11 @@
 from yt.testing import *
 from yt.data_objects.profiles import \
-    BinnedProfile1D, BinnedProfile2D, BinnedProfile3D
+    BinnedProfile1D, BinnedProfile2D, BinnedProfile3D, \
+    Profile1D, Profile2D, Profile3D
 
 _fields = ("Density", "Temperature", "Dinosaurs", "Tribbles")
 
-def test_profiles():
+def test_binned_profiles():
     pf = fake_random_pf(64, nprocs = 8, fields = _fields)
     nv = pf.domain_dimensions.prod()
     dd = pf.h.all_data()
@@ -71,3 +72,83 @@
         p3d.add_fields(["Ones"], weight="Temperature")
         yield assert_equal, p3d["Ones"][:-1,:-1,:-1], np.ones((nb,nb,nb))
 
+def test_profiles():
+    pf = fake_random_pf(64, nprocs = 8, fields = _fields)
+    nv = pf.domain_dimensions.prod()
+    dd = pf.h.all_data()
+    (rmi, rma), (tmi, tma), (dmi, dma) = dd.quantities["Extrema"](
+        ["Density", "Temperature", "Dinosaurs"])
+    rt, tt, dt = dd.quantities["TotalQuantity"](
+        ["Density", "Temperature", "Dinosaurs"])
+    # First we look at the 
+    e1, e2 = 0.9, 1.1
+    for nb in [8, 16, 32, 64]:
+        # We log all the fields or don't log 'em all.  No need to do them
+        # individually.
+        for lf in [True, False]: 
+            p1d = Profile1D(dd, 
+                "Density",     nb, rmi*e1, rma*e2, lf,
+                weight_field = None)
+            p1d.add_fields(["Ones", "Temperature"])
+            yield assert_equal, p1d["Ones"].sum(), nv
+            yield assert_rel_equal, tt, p1d["Temperature"].sum(), 7
+
+            p2d = Profile2D(dd, 
+                "Density",     nb, rmi*e1, rma*e2, lf,
+                "Temperature", nb, tmi*e1, tma*e2, lf,
+                weight_field = None)
+            p2d.add_fields(["Ones", "Temperature"])
+            yield assert_equal, p2d["Ones"].sum(), nv
+            yield assert_rel_equal, tt, p2d["Temperature"].sum(), 7
+
+            p3d = Profile3D(dd, 
+                "Density",     nb, rmi*e1, rma*e2, lf,
+                "Temperature", nb, tmi*e1, tma*e2, lf,
+                "Dinosaurs",   nb, dmi*e1, dma*e2, lf,
+                weight_field = None)
+            p3d.add_fields(["Ones", "Temperature"])
+            yield assert_equal, p3d["Ones"].sum(), nv
+            yield assert_rel_equal, tt, p3d["Temperature"].sum(), 7
+
+        p1d = Profile1D(dd, "x", nb, 0.0, 1.0, False,
+                        weight_field = None)
+        p1d.add_fields("Ones")
+        av = nv / nb
+        yield assert_equal, p1d["Ones"], np.ones(nb)*av
+
+        # We re-bin ones with a weight now
+        p1d = Profile1D(dd, "x", nb, 0.0, 1.0, False,
+                        weight_field = "Temperature")
+        p1d.add_fields(["Ones"])
+        yield assert_equal, p1d["Ones"], np.ones(nb)
+
+        p2d = Profile2D(dd, "x", nb, 0.0, 1.0, False,
+                            "y", nb, 0.0, 1.0, False,
+                            weight_field = None)
+        p2d.add_fields("Ones")
+        av = nv / nb**2
+        yield assert_equal, p2d["Ones"], np.ones((nb, nb))*av
+
+        # We re-bin ones with a weight now
+        p2d = Profile2D(dd, "x", nb, 0.0, 1.0, False,
+                            "y", nb, 0.0, 1.0, False,
+                            weight_field = "Temperature")
+        p2d.add_fields(["Ones"])
+        yield assert_equal, p2d["Ones"], np.ones((nb, nb))
+
+        p3d = Profile3D(dd, "x", nb, 0.0, 1.0, False,
+                            "y", nb, 0.0, 1.0, False,
+                            "z", nb, 0.0, 1.0, False,
+                            weight_field = None)
+        p3d.add_fields("Ones")
+        av = nv / nb**3
+        yield assert_equal, p3d["Ones"], np.ones((nb, nb, nb))*av
+
+        # We re-bin ones with a weight now
+        p3d = Profile3D(dd, "x", nb, 0.0, 1.0, False,
+                            "y", nb, 0.0, 1.0, False,
+                            "z", nb, 0.0, 1.0, False,
+                            weight_field = "Temperature")
+        p3d.add_fields(["Ones"])
+        yield assert_equal, p3d["Ones"], np.ones((nb,nb,nb))
+

diff -r ab4ab606116c89bb0894991c8a8539bc44b951d7 -r 57877fd6550ec7d8950815511a187b878f45ad29 yt/frontends/athena/data_structures.py
--- a/yt/frontends/athena/data_structures.py
+++ b/yt/frontends/athena/data_structures.py
@@ -266,7 +266,7 @@
         # know the extent of all the grids. 
         glis = np.round((glis - self.parameter_file.domain_left_edge)/gdds).astype('int')
         new_dre = np.max(gres,axis=0)
-        self.parameter_file.domain_right_edge = np.round(new_dre, decimals=6)
+        self.parameter_file.domain_right_edge = np.round(new_dre, decimals=12)
         self.parameter_file.domain_width = \
                 (self.parameter_file.domain_right_edge - 
                  self.parameter_file.domain_left_edge)
@@ -294,9 +294,9 @@
             dxs.append(dx)
         
         dx = np.array(dxs)
-        self.grid_left_edge = np.round(self.parameter_file.domain_left_edge + dx*glis, decimals=6)
+        self.grid_left_edge = np.round(self.parameter_file.domain_left_edge + dx*glis, decimals=12)
         self.grid_dimensions = gdims.astype("int32")
-        self.grid_right_edge = np.round(self.grid_left_edge + dx*self.grid_dimensions, decimals=6)
+        self.grid_right_edge = np.round(self.grid_left_edge + dx*self.grid_dimensions, decimals=12)
         if self.parameter_file.dimensionality <= 2:
             self.grid_right_edge[:,2] = self.parameter_file.domain_right_edge[2]
         if self.parameter_file.dimensionality == 1:

diff -r ab4ab606116c89bb0894991c8a8539bc44b951d7 -r 57877fd6550ec7d8950815511a187b878f45ad29 yt/frontends/athena/fields.py
--- a/yt/frontends/athena/fields.py
+++ b/yt/frontends/athena/fields.py
@@ -78,7 +78,7 @@
 def _convertDensity(data) :
     return data.convert("Density")
 def _density(field, data) :
-    return data["density"]
+    return data["density"].copy()
 add_field("Density", function=_density, take_log=False,
           units=r"\rm{g}/\rm{cm}^3", projected_units=r"\rm{g}/\rm{cm}^2",
           convert_function=_convertDensity)
@@ -87,21 +87,21 @@
     return data.convert("x-velocity")
 def _xvelocity(field, data):
     if "velocity_x" in data.pf.field_info:
-        return data["velocity_x"]
+        return data["velocity_x"].copy()
     else:
         return data["momentum_x"]/data["density"]           
 add_field("x-velocity", function=_xvelocity, take_log=False,
           units=r"\rm{cm}/\rm{s}", convert_function=_convertVelocity)
 def _yvelocity(field, data):
     if "velocity_y" in data.pf.field_info:
-        return data["velocity_y"]
+        return data["velocity_y"].copy()
     else:
         return data["momentum_y"]/data["density"]
 add_field("y-velocity", function=_yvelocity, take_log=False,
           units=r"\rm{cm}/\rm{s}", convert_function=_convertVelocity)
 def _zvelocity(field, data):
     if "velocity_z" in data.pf.field_info:
-        return data["velocity_z"]
+        return data["velocity_z"].copy()
     else:
         return data["momentum_z"]/data["density"]
 add_field("z-velocity", function=_zvelocity, take_log=False,
@@ -128,7 +128,7 @@
     return data.convert("Density")*data.convert("x-velocity")**2
 def _pressure(field, data) :
     if "pressure" in data.pf.field_info:
-        return data["pressure"]
+        return data["pressure"].copy()
     else:
         eint = data["total_energy"] - 0.5*(data["momentum_x"]**2 +
                                            data["momentum_y"]**2 +
@@ -154,17 +154,17 @@
 def _convertBfield(data):
     return np.sqrt(4*np.pi*data.convert("Density")*data.convert("x-velocity")**2)
 def _Bx(field, data):
-    return data['cell_centered_B_x']
+    return data['cell_centered_B_x'].copy()
 add_field("Bx", function=_Bx, take_log=False,
           units=r"\rm{Gauss}", display_name=r"B_x",
           convert_function=_convertBfield)
 def _By(field, data):
-    return data['cell_centered_B_y']
+    return data['cell_centered_B_y'].copy()
 add_field("By", function=_By, take_log=False,
           units=r"\rm{Gauss}", display_name=r"B_y",
           convert_function=_convertBfield)
 def _Bz(field, data):
-    return data['cell_centered_B_z']
+    return data['cell_centered_B_z'].copy()
 add_field("Bz", function=_Bz, take_log=False,
           units=r"\rm{Gauss}", display_name=r"B_z",
           convert_function=_convertBfield)

diff -r ab4ab606116c89bb0894991c8a8539bc44b951d7 -r 57877fd6550ec7d8950815511a187b878f45ad29 yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -538,6 +538,16 @@
         contents = None
     return contents
 
+def download_file(url, filename):
+    import urllib
+    class MyURLopener(urllib.FancyURLopener):
+        def http_error_default(self, url, fp, errcode, errmsg, headers):
+            raise RuntimeError, \
+              "Attempt to download file from %s failed with error %s: %s." % \
+              (url, errcode, errmsg)
+    fn, h = MyURLopener().retrieve(url, filename)
+    return fn
+
 # This code snippet is modified from Georg Brandl
 def bb_apicall(endpoint, data, use_pass = True):
     import urllib, urllib2

diff -r ab4ab606116c89bb0894991c8a8539bc44b951d7 -r 57877fd6550ec7d8950815511a187b878f45ad29 yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -54,7 +54,7 @@
     ValidateParameter, ValidateDataField, ValidateProperty, \
     ValidateSpatial, ValidateGridType, \
     TimeSeriesData, AnalysisTask, analysis_task, \
-    ImageArray, particle_filter
+    ImageArray, particle_filter, create_profile
 
 from yt.data_objects.derived_quantities import \
     add_quantity, quantity_info
@@ -83,6 +83,9 @@
 from yt.frontends.flash.api import \
     FLASHStaticOutput, FLASHFieldInfo, add_flash_field
 
+from yt.frontends.art.api import \
+    ARTStaticOutput, ARTFieldInfo, add_art_field
+
 from yt.frontends.artio.api import \
     ARTIOStaticOutput, ARTIOFieldInfo, add_artio_field
 
@@ -138,7 +141,7 @@
     apply_colormap, scale_image, write_projection, \
     SlicePlot, AxisAlignedSlicePlot, OffAxisSlicePlot, \
     ProjectionPlot, OffAxisProjectionPlot, \
-    show_colormaps
+    show_colormaps, ProfilePlot, PhasePlot
 
 from yt.visualization.volume_rendering.api import \
     ColorTransferFunction, PlanckTransferFunction, ProjectionTransferFunction, \

diff -r ab4ab606116c89bb0894991c8a8539bc44b951d7 -r 57877fd6550ec7d8950815511a187b878f45ad29 yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -13,6 +13,7 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
+
 # We don't need to import 'exceptions'
 #import exceptions
 import os.path
@@ -311,3 +312,19 @@
         v += "mass %0.3e.  Multi-mass particles are not currently supported." % (
             self.ma)
         return v
+
+class YTEmptyProfileData(Exception):
+    pass
+
+class YTDuplicateFieldInProfile(Exception):
+    def __init__(self, field, new_spec, old_spec):
+        self.field = field
+        self.new_spec = new_spec
+        self.old_spec = old_spec
+
+    def __str__(self):
+        r = """Field %s already exists with field spec:
+               %s
+               But being asked to add it with:
+               %s""" % (self.field, self.old_spec, self.new_spec)
+        return r

diff -r ab4ab606116c89bb0894991c8a8539bc44b951d7 -r 57877fd6550ec7d8950815511a187b878f45ad29 yt/utilities/lib/misc_utilities.pyx
--- a/yt/utilities/lib/misc_utilities.pyx
+++ b/yt/utilities/lib/misc_utilities.pyx
@@ -25,6 +25,108 @@
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
+def new_bin_profile1d(np.ndarray[np.int64_t, ndim=1] bins_x,
+                  np.ndarray[np.float64_t, ndim=1] wsource,
+                  np.ndarray[np.float64_t, ndim=2] bsource,
+                  np.ndarray[np.float64_t, ndim=1] wresult,
+                  np.ndarray[np.float64_t, ndim=2] bresult,
+                  np.ndarray[np.float64_t, ndim=2] mresult,
+                  np.ndarray[np.float64_t, ndim=2] qresult,
+                  np.ndarray[np.uint8_t, ndim=1, cast=True] used):
+    cdef int n, fi, bin
+    cdef np.float64_t wval, bval, oldwr
+    cdef int nb = bins_x.shape[0]
+    cdef int nf = bsource.shape[1]
+    for n in range(nb):
+        bin = bins_x[n]
+        wval = wsource[n]
+        oldwr = wresult[bin]
+        wresult[bin] += wval
+        for fi in range(nf):
+            bval = bsource[n,fi]
+            # qresult has to have the previous wresult
+            qresult[bin,fi] += (oldwr * wval * (bval - mresult[bin,fi])**2) / \
+                (oldwr + wval)
+            bresult[bin,fi] += wval*bval
+            # mresult needs the new wresult
+            mresult[bin,fi] += wval * (bval - mresult[bin,fi]) / wresult[bin]
+        used[bin] = 1
+    return
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+def new_bin_profile2d(np.ndarray[np.int64_t, ndim=1] bins_x,
+                  np.ndarray[np.int64_t, ndim=1] bins_y,
+                  np.ndarray[np.float64_t, ndim=1] wsource,
+                  np.ndarray[np.float64_t, ndim=2] bsource,
+                  np.ndarray[np.float64_t, ndim=2] wresult,
+                  np.ndarray[np.float64_t, ndim=3] bresult,
+                  np.ndarray[np.float64_t, ndim=3] mresult,
+                  np.ndarray[np.float64_t, ndim=3] qresult,
+                  np.ndarray[np.uint8_t, ndim=2, cast=True] used):
+    cdef int n, fi, bin_x, bin_y
+    cdef np.float64_t wval, bval, oldwr
+    cdef int nb = bins_x.shape[0]
+    cdef int nf = bsource.shape[1]
+    for n in range(nb):
+        bin_x = bins_x[n]
+        bin_y = bins_y[n]
+        wval = wsource[n]
+        oldwr = wresult[bin_x, bin_y]
+        wresult[bin_x,bin_y] += wval
+        for fi in range(nf):
+            bval = bsource[n,fi]
+            # qresult has to have the previous wresult
+            qresult[bin_x,bin_y,fi] += (oldwr * wval * (bval - mresult[bin_x,bin_y,fi])**2) / \
+                (oldwr + wval)
+            bresult[bin_x,bin_y,fi] += wval*bval
+            # mresult needs the new wresult
+            mresult[bin_x,bin_y,fi] += wval * (bval - mresult[bin_x,bin_y,fi]) / wresult[bin_x,bin_y]
+        used[bin_x,bin_y] = 1
+    return
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+def new_bin_profile3d(np.ndarray[np.int64_t, ndim=1] bins_x,
+                  np.ndarray[np.int64_t, ndim=1] bins_y,
+                  np.ndarray[np.int64_t, ndim=1] bins_z,
+                  np.ndarray[np.float64_t, ndim=1] wsource,
+                  np.ndarray[np.float64_t, ndim=2] bsource,
+                  np.ndarray[np.float64_t, ndim=3] wresult,
+                  np.ndarray[np.float64_t, ndim=4] bresult,
+                  np.ndarray[np.float64_t, ndim=4] mresult,
+                  np.ndarray[np.float64_t, ndim=4] qresult,
+                  np.ndarray[np.uint8_t, ndim=3, cast=True] used):
+    cdef int n, fi, bin_x, bin_y, bin_z
+    cdef np.float64_t wval, bval, oldwr
+    cdef int nb = bins_x.shape[0]
+    cdef int nf = bsource.shape[1]
+    for n in range(nb):
+        bin_x = bins_x[n]
+        bin_y = bins_y[n]
+        bin_z = bins_z[n]
+        wval = wsource[n]
+        oldwr = wresult[bin_x, bin_y, bin_z]
+        wresult[bin_x,bin_y,bin_z] += wval
+        for fi in range(nf):
+            bval = bsource[n,fi]
+            # qresult has to have the previous wresult
+            qresult[bin_x,bin_y,bin_z,fi] += \
+                (oldwr * wval * (bval - mresult[bin_x,bin_y,bin_z,fi])**2) / \
+                (oldwr + wval)
+            bresult[bin_x,bin_y,bin_z,fi] += wval*bval
+            # mresult needs the new wresult
+            mresult[bin_x,bin_y,bin_z,fi] += wval * \
+                (bval - mresult[bin_x,bin_y,bin_z,fi]) / \
+                 wresult[bin_x,bin_y,bin_z]
+        used[bin_x,bin_y,bin_z] = 1
+    return
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
 def bin_profile1d(np.ndarray[np.int64_t, ndim=1] bins_x,
                   np.ndarray[np.float64_t, ndim=1] wsource,
                   np.ndarray[np.float64_t, ndim=1] bsource,

diff -r ab4ab606116c89bb0894991c8a8539bc44b951d7 -r 57877fd6550ec7d8950815511a187b878f45ad29 yt/visualization/api.py
--- a/yt/visualization/api.py
+++ b/yt/visualization/api.py
@@ -53,6 +53,10 @@
     ProjectionPlot, \
     OffAxisProjectionPlot
 
+from profile_plotter import \
+     ProfilePlot, \
+     PhasePlot
+    
 from base_plot_types import \
     get_multi_plot
 

diff -r ab4ab606116c89bb0894991c8a8539bc44b951d7 -r 57877fd6550ec7d8950815511a187b878f45ad29 yt/visualization/base_plot_types.py
--- a/yt/visualization/base_plot_types.py
+++ b/yt/visualization/base_plot_types.py
@@ -17,9 +17,28 @@
 from ._mpl_imports import \
     FigureCanvasAgg, FigureCanvasPdf, FigureCanvasPS
 from yt.funcs import \
-    get_image_suffix, mylog
+    get_image_suffix, mylog, x_dict, y_dict
 
-
+class CallbackWrapper(object):
+    def __init__(self, viewer, window_plot, frb, field):
+        self.frb = frb
+        self.data = frb.data_source
+        self._axes = window_plot.axes
+        self._figure = window_plot.figure
+        if len(self._axes.images) > 0:
+            self.image = self._axes.images[0]
+        if frb.axis < 3:
+            DD = frb.pf.domain_width
+            xax = x_dict[frb.axis]
+            yax = y_dict[frb.axis]
+            self._period = (DD[xax], DD[yax])
+        self.pf = frb.pf
+        self.xlim = viewer.xlim
+        self.ylim = viewer.ylim
+        if 'OffAxisSlice' in viewer._plot_type:
+            self._type_name = "CuttingPlane"
+        else:
+            self._type_name = viewer._plot_type
 class PlotMPL(object):
     """A base class for all yt plots made using matplotlib.
 
@@ -36,6 +55,7 @@
             self.axes = self.figure.add_axes(axrect)
         else:
             axes.cla()
+            axes.set_position(axrect)
             self.axes = axes
         self.canvas = FigureCanvasAgg(self.figure)
 

diff -r ab4ab606116c89bb0894991c8a8539bc44b951d7 -r 57877fd6550ec7d8950815511a187b878f45ad29 yt/visualization/eps_writer.py
--- a/yt/visualization/eps_writer.py
+++ b/yt/visualization/eps_writer.py
@@ -26,7 +26,9 @@
 from .plot_types import \
     VMPlot, \
     ProfilePlot
+from .plot_collection import PlotCollection
 from .plot_window import PlotWindow
+from .profile_plotter import PhasePlot
 from .plot_modifications import get_smallest_appropriate_unit
 
 class DualEPS(object):
@@ -57,6 +59,14 @@
 
 #=============================================================================
 
+    def return_field(self, plot):
+        if isinstance(plot, PlotWindow) or isinstance(plot, PhasePlot):
+            return plot.plots.keys()[0]
+        else:
+            return None
+
+#=============================================================================
+
     def axis_box(self, xrange=(0,1), yrange=(0,1), xlabel="", ylabel="",
                  xlog=False, ylog=False, xdata=None, ydata=None,
                  tickcolor=None, bare_axes=False,
@@ -275,18 +285,17 @@
         >>> d.axis_box_yt(p)
         >>> d.save_fig()
         """
-        if isinstance(plot, PlotWindow):
+        if isinstance(plot, (PlotWindow, PhasePlot)):
             plot.refresh()
-            width = plot.width[0]
         else:
             plot._redraw_image()
-            if isinstance(plot, VMPlot):
-                width = plot.width
-        if isinstance(plot, VMPlot) or isinstance(plot, PlotWindow):
+        if isinstance(plot, (VMPlot, PlotWindow)):
             if isinstance(plot, PlotWindow):
                 data = plot._frb
+                width = plot.width[0]
             else:
                 data = plot.data
+                width = plot.width
             if units == None:
                 units = get_smallest_appropriate_unit(width, plot.pf)
             _xrange = (0, width * plot.pf[units])
@@ -314,6 +323,26 @@
                         _ylabel = 'Image y (%s)' % (units)
             if tickcolor == None:
                 _tickcolor = pyx.color.cmyk.white
+        elif isinstance(plot, PhasePlot):
+            k = plot.plots.keys()[0]
+            _xrange = plot[k].axes.get_xlim()
+            _yrange = plot[k].axes.get_ylim()
+            _xlog = plot.profile.x_log
+            _ylog = plot.profile.y_log
+            if bare_axes:
+                _xlabel = ""
+                _ylabel = ""
+            else:
+                if xlabel != None:
+                    _xlabel = xlabel
+                else:
+                    _xlabel = plot[k].axes.get_xlabel()
+                if ylabel != None:
+                    _ylabel = ylabel
+                else:
+                    _ylabel = plot[k].axes.get_ylabel()
+            if tickcolor == None:
+                _tickcolor = None
         else:
             _xrange = plot._axes.get_xlim()
             _yrange = plot._axes.get_ylim()
@@ -412,7 +441,7 @@
                 plot.colorbar = None
             plot._redraw_image()
             _p1 = plot._figure
-        elif isinstance(plot, PlotWindow):
+        elif isinstance(plot, (PlotWindow, PhasePlot)):
             self.field = field
             if self.field == None:
                 self.field = plot.plots.keys()[0]
@@ -424,18 +453,22 @@
             plot.refresh()
             _p1 = plot.plots[self.field].figure
             # hack to account for non-square display ratios (not sure why)
-            shift = 12.0 / 340
+            if isinstance(plot, PlotWindow):
+                shift = 12.0 / 340
         elif isinstance(plot, ProfilePlot):
             plot._redraw_image()
             # Remove colorbar
             _p1 = plot._figure
             _p1.delaxes(_p1.axes[1])
+        else:
+            raise RuntimeError("Unknown plot type")
+
         _p1.axes[0].set_axis_off()  # remove axes
         _p1.axes[0].set_position([-shift,0,1,1])  # rescale figure
         _p1.set_facecolor('w')  # set background color
         figure_canvas = FigureCanvasAgg(_p1)
         figure_canvas.draw()
-        size = _p1.get_size_inches() * _p1.dpi
+        size = (_p1.get_size_inches() * _p1.dpi).astype('int')
         image = pyx.bitmap.image(size[0], size[1], "RGB",
                                  figure_canvas.tostring_rgb())
         #figure_canvas.print_png('test.png')
@@ -567,7 +600,7 @@
 
 #=============================================================================
 
-    def colorbar_yt(self, plot, **kwargs):
+    def colorbar_yt(self, plot, field=None, **kwargs):
         r"""Wrapper around DualEPS.colorbar to take information from a yt plot.
 
         Accepts all parameters that DualEPS.colorbar takes.
@@ -587,7 +620,9 @@
         >>> d.save_fig()
         """
         _cmap = None
-        if isinstance(plot, PlotWindow):
+        if field != None:
+            self.field = field
+        if isinstance(plot, (PlotWindow, PhasePlot)):
             _cmap = plot._colormaps[self.field]
         else:
             if plot.cmap != None:
@@ -601,10 +636,13 @@
             _zlabel = _zlabel.replace("_","\;")
             _zlog = plot.log_field
             _zrange = (plot.norm.vmin, plot.norm.vmax)
-        elif isinstance(plot, PlotWindow):
+        elif isinstance(plot, (PlotWindow, PhasePlot)):
             proj = plot._plot_type.endswith("Projection") and \
                 plot.data_source.weight_field == None
-            _zlabel = plot.pf.field_info[self.field].get_label(proj)
+            if isinstance(plot, PlotWindow):
+                _zlabel = plot.pf.field_info[self.field].get_label(proj)
+            else:
+                _zlabel = plot.data_source.pf.field_info[self.field].get_label(proj)
             _zlabel = _zlabel.replace("_","\;")
             _zlog = plot.get_log(self.field)[self.field]
             if plot.plots[self.field].zmin == None:
@@ -783,13 +821,13 @@
 #=============================================================================
 #=============================================================================
 
-def multiplot(ncol, nrow, yt_plots=None, images=None, xranges=None,
-              yranges=None, xlabels=None, ylabels=None,
+def multiplot(ncol, nrow, yt_plots=None, fields=None, images=None, 
+              xranges=None, yranges=None, xlabels=None, ylabels=None,
               xdata=None, ydata=None, colorbars=None,
               shrink_cb=0.95, figsize=(8,8), margins=(0,0), titles=None,
               savefig=None, format="eps", yt_nocbar=False, bare_axes=False,
               xaxis_flags=None, yaxis_flags=None,
-              cb_flags=None):
+              cb_flags=None, cb_location=None, plot_collection=False):
     r"""Convenience routine to create a multi-panel figure from yt plots or
     JPEGs.  The images are first placed from the origin, and then
     bottom-to-top and left-to-right.
@@ -834,6 +872,11 @@
         axes.
     cb_flags : list of booleans
         Flags for each plot to have a colorbar or not.
+    cb_location : list of strings
+        Strings to control the location of the colorbar (left, right, 
+        top, bottom)
+    plot_collection : boolean
+        Set to true to yt_plots is a PlotCollection
 
     Examples
     --------
@@ -858,8 +901,9 @@
     yt plots.
     """
     # Error check
+    npanels = ncol*nrow
     if images != None:
-        if len(images) != ncol*nrow:
+        if len(images) != npanels:
             raise RuntimeError("Number of images (%d) doesn't match nrow(%d)"\
                                " x ncol(%d)." % (len(images), nrow, ncol))
             return
@@ -872,21 +916,23 @@
         _yt = True
     else:
         _yt = False
+    if fields == None:
+        fields = [None] * npanels
 
     # If no ranges or labels given and given only images, fill them in.
     if not _yt:
         if xranges is None:
             xranges = []
-            for i in range(nrow*ncol): xranges.append((0,1))
+            for i in range(npanels): xranges.append((0,1))
         if yranges is None:
             yranges = []
-            for i in range(nrow*ncol): yranges.append((0,1))
+            for i in range(npanels): yranges.append((0,1))
         if xlabels is None:
             xlabels = []
-            for i in range(nrow*ncol): xlabels.append("")
+            for i in range(npanels): xlabels.append("")
         if ylabels is None:
             ylabels = []
-            for i in range(nrow*ncol): ylabels.append("")
+            for i in range(npanels): ylabels.append("")
 
     d = DualEPS(figsize=figsize)
     count = 0
@@ -930,7 +976,8 @@
                     ylabel = ylabels[j]
                 else:
                     ylabel = None
-                d.insert_image_yt(yt_plots[index], pos=(xpos, ypos))
+                d.insert_image_yt(yt_plots[index], pos=(xpos, ypos),
+                                  field=fields[index])
                 d.axis_box_yt(yt_plots[index], pos=(xpos, ypos),
                               bare_axes=bare_axes, xaxis_side=xaxis,
                               yaxis_side=yaxis,
@@ -964,32 +1011,49 @@
                 if cb_flags != None:
                     if cb_flags[index] == False:
                         continue
-                if ncol == 1:
-                    orientation = "right"
+                if cb_location == None:
+                    if ncol == 1:
+                        orientation = "right"
+                    elif i == 0:
+                        orientation = "left"
+                    elif i+1 == ncol:
+                        orientation = "right"
+                    elif j == 0:
+                        orientation = "bottom"
+                    elif j+1 == nrow:
+                        orientation = "top"
+                    else:
+                        orientation = None  # Marker for interior plot
+                else:
+                    if fields[index] not in cb_location.keys():
+                        raise RuntimeError("%s not found in cb_location dict" %
+                                           fields[index])
+                        return
+                    orientation = cb_location[fields[index]]
+                if orientation == "right":
                     xpos = bbox[1]
                     ypos = ypos0
-                elif j == 0:
-                    orientation = "bottom"
+                elif orientation == "left":
+                    xpos = bbox[0]
+                    ypos = ypos0
+                elif orientation == "bottom":
                     ypos = bbox[2]
                     xpos = xpos0
-                elif i == 0:
-                    orientation = "left"
-                    xpos = bbox[0]
-                    ypos = ypos0
-                elif i+1 == ncol:
-                    orientation = "right"
-                    xpos = bbox[1]
-                    ypos = ypos0
-                elif j+1 == nrow:
-                    orientation = "top"
+                elif orientation == "top":
                     ypos = bbox[3]
                     xpos = xpos0
                 else:
+                    mylog.warning("Unknown colorbar location %s. "
+                                  "No colorbar displayed." % orientation)
                     orientation = None  # Marker for interior plot
 
                 if orientation != None:
                     if _yt:
+                        # Set field if undefined
+                        if fields[index] == None:
+                            fields[index] = d.return_field(yt_plots[index])
                         d.colorbar_yt(yt_plots[index],
+                                      field=fields[index],
                                       pos=[xpos,ypos],
                                       shrink=shrink_cb,
                                       orientation=orientation)
@@ -1009,8 +1073,8 @@
 
 #=============================================================================
 
-def multiplot_yt(ncol, nrow, plot_col, **kwargs):
-    r"""Wrapper for multiplot that takes a yt PlotCollection.
+def multiplot_yt(ncol, nrow, plots, fields=None, **kwargs):
+    r"""Wrapper for multiplot that takes a yt PlotWindow or PlotCollection.
 
     Accepts all parameters used in multiplot.
 
@@ -1020,8 +1084,8 @@
         Number of columns in the figure.
     nrow : integer
         Number of rows in the figure.
-    plot_col : `PlotCollection`
-        yt PlotCollection that has the plots to be used.
+    plots : `PlotCollection` or `PlotWindow`
+        yt PlotCollection or PlotWindow that has the plots to be used.
 
     Examples
     --------
@@ -1040,18 +1104,42 @@
     >>> mp = multiplot_yt(2,2,pc,savefig="yt",shrink_cb=0.9, bare_axes=False,
     >>>                   yt_nocbar=False, margins=(0.5,0.5))
     """
-    if len(plot_col.plots) < nrow*ncol:
-        raise RuntimeError("Number of plots in PlotCollection is less "\
-                           "than nrow(%d) x ncol(%d)." % \
-                           (len(plot_col.plots), nrow, ncol))
+    # Determine whether the plots are organized in a PlotCollection,
+    # PlotWindow, or list of PlotWindows
+    if isinstance(plots, PlotCollection):
+        if len(plots.plots) < nrow*ncol:
+            raise RuntimeError("Number of plots in PlotCollection is less "\
+                               "than nrow(%d) x ncol(%d)." % \
+                               (len(plots.plots), nrow, ncol))
+            return
+        figure = multiplot(ncol, nrow, yt_plots=plots.plots, 
+                           plot_collection=True, **kwargs)
+    elif isinstance(plots, PlotWindow):
+        if fields == None:
+            fields = plots.fields
+        if len(fields) < nrow*ncol:
+            raise RuntimeError("Number of plots is less "\
+                               "than nrow(%d) x ncol(%d)." % \
+                               (len(fields), nrow, ncol))
+            return
+        figure = multiplot(ncol, nrow, yt_plots=plots, fields=fields, **kwargs)
+    elif isinstance(plots, list) and isinstance(plots[0], PlotWindow):
+        if len(plots) < nrow*ncol:
+            raise RuntimeError("Number of plots is less "\
+                               "than nrow(%d) x ncol(%d)." % \
+                               (len(fields), nrow, ncol))
+            return
+        figure = multiplot(ncol, nrow, yt_plots=plots, fields=fields, **kwargs)
+    else:
+        raise RuntimeError("Unknown plot type in multiplot_yt")
         return
-    figure = multiplot(ncol, nrow, yt_plots=plot_col.plots, **kwargs)
     return figure
 
 #=============================================================================
 
-def single_plot(plot, figsize=(12,12), cb_orient="right", bare_axes=False,
-                savefig=None, colorbar=True, file_format='eps', **kwargs):
+def single_plot(plot, field=None, figsize=(12,12), cb_orient="right", 
+                bare_axes=False, savefig=None, colorbar=True, 
+                file_format='eps', **kwargs):
     r"""Wrapper for DualEPS routines to create a figure directy from a yt
     plot.  Calls insert_image_yt, axis_box_yt, and colorbar_yt.
 
@@ -1080,7 +1168,7 @@
     >>> single_plot(p, savefig="figure1")
     """
     d = DualEPS(figsize=figsize)
-    d.insert_image_yt(plot)
+    d.insert_image_yt(plot, field=field)
     d.axis_box_yt(plot, bare_axes=bare_axes, **kwargs)
     if colorbar:
         d.colorbar_yt(plot, orientation=cb_orient)

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/94bf550f21d8/
Changeset:   94bf550f21d8
Branch:      yt-3.0
User:        jzuhone
Date:        2014-01-04 16:57:55
Summary:     Putting this back in because we need it.
Affected #:  1 file

diff -r 57877fd6550ec7d8950815511a187b878f45ad29 -r 94bf550f21d8b5e6e8a353ec1d478bbbb39505fd yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -167,6 +167,7 @@
         for i, unit in enumerate(self.wcs.wcs.cunit):
             if unit in all_units:
                 self.file_unit = unit.name
+                idx = i
                 break
         self.new_unit = None
         self.pixel_scale = 1.0


https://bitbucket.org/yt_analysis/yt/commits/0e8f3a63d250/
Changeset:   0e8f3a63d250
Branch:      yt-3.0
User:        jzuhone
Date:        2014-01-07 20:48:07
Summary:     Merging
Affected #:  51 files

diff -r 94bf550f21d8b5e6e8a353ec1d478bbbb39505fd -r 0e8f3a63d2505959576d96070c1863b5cde8a96a setup.py
--- a/setup.py
+++ b/setup.py
@@ -83,7 +83,7 @@
 ]
 
 for subdir in REASON_DIRS:
-    dir_name = "yt/gui/reason/html/%s/" % (subdir)
+    dir_name = os.path.join("yt", "gui", "reason", "html", subdir)
     files = []
     for ext in ["js", "html", "css", "png", "ico", "gif"]:
         files += glob.glob("%s/*.%s" % (dir_name, ext))

diff -r 94bf550f21d8b5e6e8a353ec1d478bbbb39505fd -r 0e8f3a63d2505959576d96070c1863b5cde8a96a yt/analysis_modules/halo_finding/fof/kd.c
--- a/yt/analysis_modules/halo_finding/fof/kd.c
+++ b/yt/analysis_modules/halo_finding/fof/kd.c
@@ -2,7 +2,11 @@
 #include <stdlib.h>
 #include <math.h>
 #include <sys/time.h>
+#if defined(WIN32) || defined(WIN64) 
+#include <windows.h> 
+#else
 #include <sys/resource.h>
+#endif
 #include <assert.h>
 #include "kd.h"
 #include "tipsydefs.h"
@@ -10,19 +14,41 @@
 
 void kdTimeFoF(KDFOF kd,int *puSecond,int *puMicro)
 {
+
+#if defined(WIN32) || defined(WIN64)
+        int secs, usecs;
+        HANDLE hProcess = GetCurrentProcess();
+	FILETIME ftCreation, ftExit, ftKernel, ftUser;
+	SYSTEMTIME stUser;
+	GetProcessTimes(hProcess, &ftCreation, &ftExit, 
+			&ftKernel, &ftUser);
+	FileTimeToSystemTime(&ftUser, &stUser);
+	secs = (int)((double)stUser.wHour*3600.0 +
+			  (double)stUser.wMinute*60.0 +
+			  (double)stUser.wSecond);
+	usecs = (int)((double)stUser.wMilliseconds/1000.0);
+	*puMicro = usecs;
+	*puSecond = secs;
+	if (*puMicro < 0) {
+	  *puMicro += 1000000;
+	  *puSecond -= 1;
+	}
+	kd->uSecond = secs;
+	kd->uMicro = usecs;
+#else
 	struct rusage ru;
 
 	getrusage(0,&ru);
 	*puMicro = ru.ru_utime.tv_usec - kd->uMicro;
 	*puSecond = ru.ru_utime.tv_sec - kd->uSecond;
 	if (*puMicro < 0) {
-		*puMicro += 1000000;
-		*puSecond -= 1;
-		}
+	  *puMicro += 1000000;
+	  *puSecond -= 1;
+	}
 	kd->uSecond = ru.ru_utime.tv_sec;
 	kd->uMicro = ru.ru_utime.tv_usec;
-	}
-
+#endif
+}
 
 int kdInitFoF(KDFOF *pkd,int nBucket,float *fPeriod)
 {

diff -r 94bf550f21d8b5e6e8a353ec1d478bbbb39505fd -r 0e8f3a63d2505959576d96070c1863b5cde8a96a yt/analysis_modules/halo_finding/hop/hop_kd.c
--- a/yt/analysis_modules/halo_finding/hop/hop_kd.c
+++ b/yt/analysis_modules/halo_finding/hop/hop_kd.c
@@ -13,7 +13,11 @@
 #include <stdlib.h>
 #include <math.h>
 #include <sys/time.h>
+#if defined(WIN32) || defined(WIN64) 
+#include <windows.h> 
+#else
 #include <sys/resource.h>
+#endif
 #include <assert.h>
 #include "kd.h"
 #include "hop_numpy.h"
@@ -26,6 +30,28 @@
  
 void kdTime(KD kd,int *puSecond,int *puMicro)
 {
+
+#if defined(WIN32) || defined(WIN64)
+        int secs, usecs;
+        HANDLE hProcess = GetCurrentProcess();
+	FILETIME ftCreation, ftExit, ftKernel, ftUser;
+	SYSTEMTIME stUser;
+	GetProcessTimes(hProcess, &ftCreation, &ftExit, 
+			&ftKernel, &ftUser);
+	FileTimeToSystemTime(&ftUser, &stUser);
+	secs = (int)((double)stUser.wHour*3600.0 +
+			  (double)stUser.wMinute*60.0 +
+			  (double)stUser.wSecond);
+	usecs = (int)((double)stUser.wMilliseconds/1000.0);
+	*puMicro = usecs;
+	*puSecond = secs;
+	if (*puMicro < 0) {
+	  *puMicro += 1000000;
+	  *puSecond -= 1;
+	}
+	kd->uSecond = secs;
+	kd->uMicro = usecs;
+#else
 	struct rusage ru;
  
 	getrusage(0,&ru);
@@ -37,9 +63,9 @@
 		}
 	kd->uSecond = ru.ru_utime.tv_sec;
 	kd->uMicro = ru.ru_utime.tv_usec;
-	}
- 
- 
+#endif
+}
+
 int kdInit(KD *pkd,int nBucket)
 {
 	KD kd;

diff -r 94bf550f21d8b5e6e8a353ec1d478bbbb39505fd -r 0e8f3a63d2505959576d96070c1863b5cde8a96a yt/analysis_modules/setup.py
--- a/yt/analysis_modules/setup.py
+++ b/yt/analysis_modules/setup.py
@@ -1,7 +1,6 @@
 #!/usr/bin/env python
 import setuptools
 
-
 def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
     config = Configuration('analysis_modules', parent_package, top_path)

diff -r 94bf550f21d8b5e6e8a353ec1d478bbbb39505fd -r 0e8f3a63d2505959576d96070c1863b5cde8a96a yt/data_objects/api.py
--- a/yt/data_objects/api.py
+++ b/yt/data_objects/api.py
@@ -32,7 +32,10 @@
     BinnedProfile1D, \
     BinnedProfile2D, \
     BinnedProfile3D, \
-    create_profile
+    create_profile, \
+    Profile1D, \
+    Profile2D, \
+    Profile3D
 
 from time_series import \
     TimeSeriesData, \

diff -r 94bf550f21d8b5e6e8a353ec1d478bbbb39505fd -r 0e8f3a63d2505959576d96070c1863b5cde8a96a yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -234,6 +234,10 @@
         self.get_data(field)
 
     @property
+    def blocks(self):
+        return self.data_source.blocks
+
+    @property
     def _mrep(self):
         return MinimalProjectionData(self)
 
@@ -263,6 +267,7 @@
         return convs
 
     def get_data(self, fields = None):
+        fields = fields or []
         fields = self._determine_fields(ensure_list(fields))
         # We need a new tree for every single set of fields we add
         if len(fields) == 0: return

diff -r 94bf550f21d8b5e6e8a353ec1d478bbbb39505fd -r 0e8f3a63d2505959576d96070c1863b5cde8a96a yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -508,7 +508,16 @@
                 continue
             fd = self.pf.field_dependencies.get(field, None) or \
                  self.pf.field_dependencies.get(field[1], None)
-            if fd is None: continue
+            # This is long overdue.  Any time we *can't* find a field
+            # dependency -- for instance, if the derived field has been added
+            # after parameter file instantiation -- let's just try to
+            # recalculate it.
+            if fd is None:
+                try:
+                    fd = fi.get_dependencies(pf = self.pf)
+                    self.pf.field_dependencies[field] = fd
+                except:
+                    continue
             requested = self._determine_fields(list(set(fd.requested)))
             deps = [d for d in requested if d not in fields_to_get]
             fields_to_get += deps

diff -r 94bf550f21d8b5e6e8a353ec1d478bbbb39505fd -r 0e8f3a63d2505959576d96070c1863b5cde8a96a yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -757,8 +757,10 @@
     def add_fields(self, fields):
         fields = ensure_list(fields)
         temp_storage = ProfileFieldAccumulator(len(fields), self.size)
-        for g in parallel_objects(self.data_source._grids):
-            self._bin_grid(g, fields, temp_storage)
+        cfields = fields + list(self.bin_fields)
+        citer = self.data_source.chunks(cfields, "io")
+        for chunk in parallel_objects(citer):
+            self._bin_chunk(chunk, fields, temp_storage)
         self._finalize_storage(fields, temp_storage)
 
     def _finalize_storage(self, fields, temp_storage):
@@ -772,42 +774,35 @@
             self.field_data[field] = temp_storage.values[...,i]
             self.field_data[field][blank] = 0.0
         
-    def _bin_grid(self, grid, fields, storage):
+    def _bin_chunk(self, chunk, fields, storage):
         raise NotImplementedError
 
-    def _filter(self, bin_fields, cut_points):
-        # cut_points is initially just the points inside our region
+    def _filter(self, bin_fields):
+        # cut_points is set to be everything initially, but
         # we also want to apply a filtering based on min/max
-        filter = np.zeros(bin_fields[0].shape, dtype='bool')
-        filter[cut_points] = True
+        filter = np.ones(bin_fields[0].shape, dtype='bool')
         for (mi, ma), data in zip(self.bounds, bin_fields):
             filter &= (data > mi)
             filter &= (data < ma)
         return filter, [data[filter] for data in bin_fields]
         
-    def _get_data(self, grid, fields):
-        # Save the values in the grid beforehand.
-        old_params = grid.field_parameters
-        old_keys = grid.field_data.keys()
-        grid.field_parameters = self.data_source.field_parameters
-        # Now we ask our source which values to include
-        pointI = self.data_source._get_point_indices(grid)
-        bin_fields = [grid[bf] for bf in self.bin_fields]
+    def _get_data(self, chunk, fields):
+        # We are using chunks now, which will manage the field parameters and
+        # the like.
+        bin_fields = [chunk[bf] for bf in self.bin_fields]
         # We want to make sure that our fields are within the bounds of the
         # binning
-        filter, bin_fields = self._filter(bin_fields, pointI)
+        filter, bin_fields = self._filter(bin_fields)
         if not np.any(filter): return None
         arr = np.zeros((bin_fields[0].size, len(fields)), dtype="float64")
         for i, field in enumerate(fields):
-            arr[:,i] = grid[field][filter]
+            arr[:,i] = chunk[field][filter]
         if self.weight_field is not None:
-            weight_data = grid[self.weight_field]
+            weight_data = chunk[self.weight_field]
         else:
-            weight_data = np.ones(grid.ActiveDimensions, dtype="float64")
+            weight_data = np.ones(chunk.ires.size, dtype="float64")
         weight_data = weight_data[filter]
         # So that we can pass these into 
-        grid.field_parameters = old_params
-        grid.field_data = YTFieldData( [(k, grid.field_data[k]) for k in old_keys] )
         return arr, weight_data, bin_fields
 
     def __getitem__(self, key):
@@ -835,10 +830,10 @@
         self.bounds = ((self.x_bins[0], self.x_bins[-1]),)
         self.x = self.x_bins
 
-    def _bin_grid(self, grid, fields, storage):
-        gd = self._get_data(grid, fields)
-        if gd is None: return
-        fdata, wdata, (bf_x,) = gd
+    def _bin_chunk(self, chunk, fields, storage):
+        rv = self._get_data(chunk, fields)
+        if rv is None: return
+        fdata, wdata, (bf_x,) = rv
         bin_ind = np.digitize(bf_x, self.x_bins) - 1
         new_bin_profile1d(bin_ind, wdata, fdata,
                       storage.weight_values, storage.values,
@@ -867,8 +862,8 @@
         self.x = self.x_bins
         self.y = self.y_bins
 
-    def _bin_grid(self, grid, fields, storage):
-        rv = self._get_data(grid, fields)
+    def _bin_chunk(self, chunk, fields, storage):
+        rv = self._get_data(chunk, fields)
         if rv is None: return
         fdata, wdata, (bf_x, bf_y) = rv
         bin_ind_x = np.digitize(bf_x, self.x_bins) - 1
@@ -912,8 +907,8 @@
         self.y = self.y_bins
         self.z = self.z_bins
 
-    def _bin_grid(self, grid, fields, storage):
-        rv = self._get_data(grid, fields)
+    def _bin_chunk(self, chunk, fields, storage):
+        rv = self._get_data(chunk, fields)
         if rv is None: return
         fdata, wdata, (bf_x, bf_y, bf_z) = rv
         bin_ind_x = np.digitize(bf_x, self.x_bins) - 1
@@ -983,11 +978,15 @@
         cls = Profile3D
     else:
         raise NotImplementedError
+    bin_fields = data_source._determine_fields(bin_fields)
+    fields = data_source._determine_fields(fields)
+    if weight_field is not None:
+        weight_field, = data_source._determine_fields([weight_field])
     if not iterable(n):
         n = [n] * len(bin_fields)
     if not iterable(accumulation):
         accumulation = [accumulation] * len(bin_fields)
-    logs = [data_source.pf.field_info[f].take_log for f in bin_fields]
+    logs = [data_source.pf._get_field_info(f).take_log for f in bin_fields]
     ex = [data_source.quantities["Extrema"](f, non_zero=l)[0] \
           for f, l in zip(bin_fields, logs)]
     args = [data_source]

diff -r 94bf550f21d8b5e6e8a353ec1d478bbbb39505fd -r 0e8f3a63d2505959576d96070c1863b5cde8a96a yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -22,7 +22,7 @@
 from yt.utilities.lib import \
     VoxelTraversal, planar_points_in_volume, find_grids_in_inclined_box, \
     grid_points_in_volume
-from yt.utilities.lib.alt_ray_tracers import clyindrical_ray_trace
+from yt.utilities.lib.alt_ray_tracers import cylindrical_ray_trace
 from yt.utilities.orientation import Orientation
 from .data_containers import \
     YTSelectionContainer1D, YTSelectionContainer2D, YTSelectionContainer3D

diff -r 94bf550f21d8b5e6e8a353ec1d478bbbb39505fd -r 0e8f3a63d2505959576d96070c1863b5cde8a96a yt/data_objects/tests/test_cutting_plane.py
--- a/yt/data_objects/tests/test_cutting_plane.py
+++ b/yt/data_objects/tests/test_cutting_plane.py
@@ -32,6 +32,7 @@
         fns += pw.save(name=tmpname)
         frb = cut.to_frb((1.0,'unitary'), 64)
         for cut_field in ['Ones', 'Density']:
+            fi = pf._get_field_info("unknown", cut_field)
             yield assert_equal, frb[cut_field].info['data_source'], \
                 cut.__str__()
             yield assert_equal, frb[cut_field].info['axis'], \
@@ -39,7 +40,7 @@
             yield assert_equal, frb[cut_field].info['field'], \
                 cut_field
             yield assert_equal, frb[cut_field].info['units'], \
-                pf.field_info[cut_field].get_units()
+                fi.get_units()
             yield assert_equal, frb[cut_field].info['xlim'], \
                 frb.bounds[:2]
             yield assert_equal, frb[cut_field].info['ylim'], \

diff -r 94bf550f21d8b5e6e8a353ec1d478bbbb39505fd -r 0e8f3a63d2505959576d96070c1863b5cde8a96a yt/data_objects/tests/test_profiles.py
--- a/yt/data_objects/tests/test_profiles.py
+++ b/yt/data_objects/tests/test_profiles.py
@@ -152,3 +152,25 @@
         p3d.add_fields(["Ones"])
         yield assert_equal, p3d["Ones"], np.ones((nb,nb,nb))
 
+def test_particle_profiles():
+    for nproc in [1, 2, 4, 8]:
+        pf = fake_random_pf(32, nprocs=nproc, particles = 32**3)
+        dd = pf.h.all_data()
+
+        p1d = Profile1D(dd, "particle_position_x", 128,
+                        0.0, 1.0, False, weight_field = None)
+        p1d.add_fields(["particle_ones"])
+        yield assert_equal, p1d["particle_ones"].sum(), 32**3
+
+        p2d = Profile2D(dd, "particle_position_x", 128, 0.0, 1.0, False,
+                            "particle_position_y", 128, 0.0, 1.0, False,
+                        weight_field = None)
+        p2d.add_fields(["particle_ones"])
+        yield assert_equal, p2d["particle_ones"].sum(), 32**3
+
+        p3d = Profile3D(dd, "particle_position_x", 128, 0.0, 1.0, False,
+                            "particle_position_y", 128, 0.0, 1.0, False,
+                            "particle_position_z", 128, 0.0, 1.0, False,
+                        weight_field = None)
+        p3d.add_fields(["particle_ones"])
+        yield assert_equal, p3d["particle_ones"].sum(), 32**3

diff -r 94bf550f21d8b5e6e8a353ec1d478bbbb39505fd -r 0e8f3a63d2505959576d96070c1863b5cde8a96a yt/data_objects/tests/test_projection.py
--- a/yt/data_objects/tests/test_projection.py
+++ b/yt/data_objects/tests/test_projection.py
@@ -46,6 +46,7 @@
                 fns += pw.save(name=tmpname)
                 frb = proj.to_frb((1.0,'unitary'), 64)
                 for proj_field in ['Ones', 'Density']:
+                    fi = pf._get_field_info(proj_field)
                     yield assert_equal, frb[proj_field].info['data_source'], \
                             proj.__str__()
                     yield assert_equal, frb[proj_field].info['axis'], \
@@ -53,7 +54,7 @@
                     yield assert_equal, frb[proj_field].info['field'], \
                             proj_field
                     yield assert_equal, frb[proj_field].info['units'], \
-                            pf.field_info[proj_field].get_units()
+                            fi.get_units()
                     yield assert_equal, frb[proj_field].info['xlim'], \
                             frb.bounds[:2]
                     yield assert_equal, frb[proj_field].info['ylim'], \

diff -r 94bf550f21d8b5e6e8a353ec1d478bbbb39505fd -r 0e8f3a63d2505959576d96070c1863b5cde8a96a yt/data_objects/tests/test_slice.py
--- a/yt/data_objects/tests/test_slice.py
+++ b/yt/data_objects/tests/test_slice.py
@@ -67,6 +67,7 @@
                 fns += pw.save(name=tmpname)
                 frb = slc.to_frb((1.0, 'unitary'), 64)
                 for slc_field in ['Ones', 'Density']:
+                    fi = pf._get_field_info(slc_field)
                     yield assert_equal, frb[slc_field].info['data_source'], \
                         slc.__str__()
                     yield assert_equal, frb[slc_field].info['axis'], \
@@ -74,7 +75,7 @@
                     yield assert_equal, frb[slc_field].info['field'], \
                         slc_field
                     yield assert_equal, frb[slc_field].info['units'], \
-                        pf.field_info[slc_field].get_units()
+                        fi.get_units()
                     yield assert_equal, frb[slc_field].info['xlim'], \
                         frb.bounds[:2]
                     yield assert_equal, frb[slc_field].info['ylim'], \

diff -r 94bf550f21d8b5e6e8a353ec1d478bbbb39505fd -r 0e8f3a63d2505959576d96070c1863b5cde8a96a yt/fields/tests/test_fields.py
--- a/yt/fields/tests/test_fields.py
+++ b/yt/fields/tests/test_fields.py
@@ -21,17 +21,24 @@
     cp_z_vec = np.array((0.0, 0.0, 1.0)),
 )
 
-_base_fields = ["Density", "x-velocity", "y-velocity", "z-velocity"]
+_base_fields = (("gas", "Density"),
+                ("gas", "x-velocity"),
+                ("gas", "y-velocity"),
+                ("gas", "z-velocity"))
+_base_field_names = [f[1] for f in _base_fields]
 
 def realistic_pf(fields, nprocs):
     np.random.seed(int(0x4d3d3d3))
-    pf = fake_random_pf(16, fields = fields, nprocs = nprocs)
+    fields = list(set([_strip_ftype(f) for f in fields]))
+    pf = fake_random_pf(16, fields = fields, nprocs = nprocs,
+                        particles = 4**3)
     pf.parameters["HydroMethod"] = "streaming"
     pf.parameters["Gamma"] = 5.0/3.0
     pf.parameters["EOSType"] = 1.0
     pf.parameters["EOSSoundSpeed"] = 1.0
     pf.conversion_factors["Time"] = 1.0
     pf.conversion_factors.update( dict((f, 1.0) for f in fields) )
+    pf.gamma = 5.0/3.0
     pf.current_redshift = 0.0001
     pf.hubble_constant = 0.7
     pf.omega_matter = 0.27
@@ -41,6 +48,20 @@
         pf.units[unit+'hcm'] = pf.units[unit]
     return pf
 
+def _strip_ftype(field):
+    if not isinstance(field, tuple):
+        return field
+    elif field[0] == "all":
+        return field
+    return field[1]
+
+def _expand_field(field):
+    if isinstance(field, tuple):
+        return field
+    if "particle" in field:
+        return ("all", field)
+    return ("gas", field)
+
 class TestFieldAccess(object):
     description = None
 
@@ -52,8 +73,13 @@
 
     def __call__(self):
         field = FieldInfo[self.field_name]
+        # Don't test the base fields
+        if field in _base_fields or field in _base_field_names: return
         deps = field.get_dependencies()
-        fields = list(set(deps.requested + _base_fields))
+        fields = set([])
+        for f in deps.requested + list(_base_fields):
+            fields.add(_expand_field(f))
+        fields = list(fields)
         skip_grids = False
         needs_spatial = False
         for v in field.validators:
@@ -85,7 +111,7 @@
                 assert_array_almost_equal_nulp(v1, conv*field._function(field, g), 4)
 
 def test_all_fields():
-    for field in FieldInfo:
+    for field in sorted(FieldInfo):
         if isinstance(field, types.TupleType):
             fname = field[0]
         else:
@@ -93,13 +119,16 @@
         if fname.startswith("CuttingPlane"): continue
         if fname.startswith("particle"): continue
         if fname.startswith("CIC"): continue
-        if field.startswith("BetaPar"): continue
-        if field.startswith("TBetaPar"): continue
-        if field.startswith("BetaPerp"): continue
+        if fname.startswith("BetaPar"): continue
+        if fname.startswith("TBetaPar"): continue
+        if fname.startswith("BetaPerp"): continue
         if fname.startswith("WeakLensingConvergence"): continue
         if fname.startswith("DensityPerturbation"): continue
         if fname.startswith("Matter_Density"): continue
         if fname.startswith("Overdensity"): continue
+        # TotalMass is disabled because of issues with mixed particle/fluid
+        # field detection in current field system.
+        if fname.startswith("TotalMass"): continue
         if FieldInfo[field].particle_type: continue
         for nproc in [1, 4, 8]:
             test_all_fields.__name__ = "%s_%s" % (field, nproc)

diff -r 94bf550f21d8b5e6e8a353ec1d478bbbb39505fd -r 0e8f3a63d2505959576d96070c1863b5cde8a96a yt/frontends/artio/_artio_caller.pyx
--- a/yt/frontends/artio/_artio_caller.pyx
+++ b/yt/frontends/artio/_artio_caller.pyx
@@ -18,8 +18,12 @@
 from libc.string cimport memcpy
 import data_structures  
 
-cdef extern from "alloca.h":
-    void *alloca(int)
+IF UNAME_SYSNAME == "Windows":
+    cdef extern from "malloc.h":
+        void *alloca(int)
+ELSE:
+    cdef extern from "alloca.h":
+        void *alloca(int)
 
 cdef extern from "artio.h":
     ctypedef struct artio_fileset_handle "artio_fileset" :

diff -r 94bf550f21d8b5e6e8a353ec1d478bbbb39505fd -r 0e8f3a63d2505959576d96070c1863b5cde8a96a yt/frontends/ramses/api.py
--- a/yt/frontends/ramses/api.py
+++ b/yt/frontends/ramses/api.py
@@ -22,3 +22,6 @@
 
 from .io import \
       IOHandlerRAMSES
+
+from .definitions import \
+      field_aliases

diff -r 94bf550f21d8b5e6e8a353ec1d478bbbb39505fd -r 0e8f3a63d2505959576d96070c1863b5cde8a96a yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -13,6 +13,7 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
+import os
 import numpy as np
 import stat
 import weakref
@@ -28,7 +29,7 @@
 from yt.data_objects.octree_subset import \
     OctreeSubset
 
-from .definitions import ramses_header
+from .definitions import ramses_header, field_aliases
 from yt.utilities.definitions import \
     mpc_conversion, sec_conversion
 from yt.utilities.lib import \
@@ -52,10 +53,10 @@
     _last_mask = None
     _last_selector_id = None
 
-    def __init__(self, pf, domain_id, nvar):
-        self.nvar = nvar
+    def __init__(self, pf, domain_id):
         self.pf = pf
         self.domain_id = domain_id
+        self.nvar = 0 # Set this later!
         num = os.path.basename(pf.parameter_filename).split("."
                 )[0].split("_")[1]
         basename = "%s/%%s_%s.out%05i" % (
@@ -65,6 +66,7 @@
         for t in ['grav', 'hydro', 'part', 'amr']:
             setattr(self, "%s_fn" % t, basename % t)
         self._read_amr_header()
+        self._read_hydro_header()
         self._read_particle_header()
         self._read_amr()
 
@@ -102,9 +104,9 @@
                     hvals = fpu.read_attrs(f, header, "=")
                 except AssertionError:
                     print "You are running with the wrong number of fields."
-                    print "Please specify these in the load command."
-                    print "We are looking for %s fields." % self.nvar
-                    print "The last set of field sizes was: %s" % skipped
+                    print "If you specified these in the load command, check the array length."
+                    print "In this file there are %s hydro fields." % skipped
+                    #print "The last set of field sizes was: %s" % skipped
                     raise
                 if hvals['file_ncache'] == 0: continue
                 assert(hvals['file_ilevel'] == level+1)
@@ -116,6 +118,13 @@
         self._level_count = level_count
         return self._hydro_offset
 
+    def _read_hydro_header(self):
+        if self.nvar > 0: return self.nvar
+        # Read the number of hydro  variables
+        f = open(self.hydro_fn, "rb")
+        fpu.skip(f, 1)
+        self.nvar = fpu.read_vector(f, "i")[0]
+
     def _read_particle_header(self):
         if not os.path.exists(self.part_fn):
             self.local_particle_count = 0
@@ -320,6 +329,7 @@
 class RAMSESGeometryHandler(OctreeGeometryHandler):
 
     def __init__(self, pf, data_style='ramses'):
+        self._pf = pf # TODO: Figure out the class composition better!
         self.fluid_field_list = pf._fields_in_file
         self.data_style = data_style
         self.parameter_file = weakref.proxy(pf)
@@ -332,8 +342,7 @@
         super(RAMSESGeometryHandler, self).__init__(pf, data_style)
 
     def _initialize_oct_handler(self):
-        nv = len(self.fluid_field_list)
-        self.domains = [RAMSESDomainFile(self.parameter_file, i + 1, nv)
+        self.domains = [RAMSESDomainFile(self.parameter_file, i + 1)
                         for i in range(self.parameter_file['ncpu'])]
         total_octs = sum(dom.local_oct_count #+ dom.ngridbound.sum()
                          for dom in self.domains)
@@ -341,14 +350,69 @@
         self.num_grids = total_octs
 
     def _detect_fields(self):
-        # TODO: Add additional fields
+        # Do we want to attempt to figure out what the fields are in the file?
         pfl = set([])
+        if self.fluid_field_list is None or len(self.fluid_field_list) <= 0:
+            self._setup_auto_fields()
         for domain in self.domains:
             pfl.update(set(domain.particle_field_offsets.keys()))
         self.particle_field_list = list(pfl)
         self.field_list = [("gas", f) for f in self.fluid_field_list] \
                         + self.particle_field_list
 
+    def _setup_auto_fields(self):
+        '''
+        If no fluid fields are set, the code tries to set up a fluids array by hand
+        '''
+        # TODO: SUPPORT RT - THIS REQUIRES IMPLEMENTING A NEW FILE READER!
+        # Find nvar
+        # TODO: copy/pasted from DomainFile; needs refactoring!
+        num = os.path.basename(self._pf.parameter_filename).split("."
+                )[0].split("_")[1]
+        testdomain = 1 # Just pick the first domain file to read
+        basename = "%s/%%s_%s.out%05i" % (
+            os.path.abspath(
+              os.path.dirname(self._pf.parameter_filename)),
+            num, testdomain)
+        hydro_fn = basename % "hydro"
+        # Do we have a hydro file?
+        if hydro_fn:
+            # Read the number of hydro  variables
+            f = open(hydro_fn, "rb")
+            fpu.skip(f, 1)
+            nvar = fpu.read_vector(f, "i")[0]
+        # OK, we got NVAR, now set up the arrays depending on what NVAR is
+        # Allow some wiggle room for users to add too many variables
+        if nvar < 5:
+            mylog.debug("nvar=%s is too small! YT doesn't currently support 1D/2D runs in RAMSES %s")
+            raise ValueError
+        # Basic hydro runs
+        if nvar == 5:
+            fields = ["Density", 
+                      "x-velocity", "y-velocity", "z-velocity", 
+                      "Pressure"]
+        if nvar > 5 and nvar < 11:
+            fields = ["Density", 
+                      "x-velocity", "y-velocity", "z-velocity", 
+                      "Pressure", "Metallicity"]
+        # MHD runs - NOTE: THE MHD MODULE WILL SILENTLY ADD 3 TO THE NVAR IN THE MAKEFILE
+        if nvar == 11:
+            fields = ["Density", 
+                      "x-velocity", "y-velocity", "z-velocity", 
+                      "x-Bfield-left", "y-Bfield-left", "z-Bfield-left", 
+                      "x-Bfield-right", "y-Bfield-right", "z-Bfield-right", 
+                      "Pressure"]
+        if nvar > 11:
+            fields = ["Density", 
+                      "x-velocity", "y-velocity", "z-velocity", 
+                      "x-Bfield-left", "y-Bfield-left", "z-Bfield-left", 
+                      "x-Bfield-right", "y-Bfield-right", "z-Bfield-right", 
+                      "Pressure","Metallicity"]
+        while len(fields) < nvar:
+            fields.append("var"+str(len(fields)))
+        mylog.debug("No fields specified by user; automatically setting fields array to %s", str(fields))
+        self.fluid_field_list = fields
+
     def _setup_derived_fields(self):
         self._parse_cooling()
         super(RAMSESGeometryHandler, self)._setup_derived_fields()
@@ -404,12 +468,14 @@
     _particle_coordinates_name = "Coordinates"
     
     def __init__(self, filename, data_style='ramses',
-                 fields = None,
-                 storage_filename = None):
+                 fields = None, storage_filename = None):
         # Here we want to initiate a traceback, if the reader is not built.
-        if fields is None:
-            fields = ["Density", "x-velocity", "y-velocity",
-	                  "z-velocity", "Pressure", "Metallicity"]
+        if isinstance(fields, types.StringTypes):
+            fields = field_aliases[fields]
+        '''
+        fields: An array of hydro variable fields in order of position in the hydro_XXXXX.outYYYYY file
+                If set to None, will try a default set of fields
+        '''
         self._fields_in_file = fields
         StaticOutput.__init__(self, filename, data_style)
         self.storage_filename = storage_filename
@@ -522,4 +588,3 @@
         if not os.path.basename(args[0]).startswith("info_"): return False
         fn = args[0].replace("info_", "amr_").replace(".txt", ".out00001")
         return os.path.exists(fn)
-

diff -r 94bf550f21d8b5e6e8a353ec1d478bbbb39505fd -r 0e8f3a63d2505959576d96070c1863b5cde8a96a yt/frontends/ramses/definitions.py
--- a/yt/frontends/ramses/definitions.py
+++ b/yt/frontends/ramses/definitions.py
@@ -28,6 +28,17 @@
                ('nout', 3, 'I')
               )
     yield header
+    # TODO: REMOVE
+    '''
+    hydro_header = ( ('ncpu', 1, 'i'),
+                     ('nvar', 1, 'i'),
+                     ('ndim', 1, 'i'),
+                     ('nlevelmax', 1, 'i'),
+                     ('nboundary', 1, 'i'),
+                     ('gamma', 1, 'd')
+                    )
+    yield hydro_header
+    '''
     noutput, iout, ifout = hvals['nout']
     next_set = ( ('tout', noutput, 'd'),
                  ('aout', noutput, 'd'),
@@ -45,3 +56,18 @@
                     ('numbl', hvals['nlevelmax'] * hvals['ncpu'], 'i'),
                   )
     yield tree_header
+
+field_aliases = {
+    'standard_five':     ('Density',
+                          'x-velocity',
+                          'y-velocity',
+                          'z-velocity',
+                          'Pressure'),
+    'standard_six':      ('Density',
+                          'x-velocity',
+                          'y-velocity',
+                          'z-velocity',
+                          'Pressure',
+                          'Metallicity'),
+
+}

diff -r 94bf550f21d8b5e6e8a353ec1d478bbbb39505fd -r 0e8f3a63d2505959576d96070c1863b5cde8a96a yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -25,6 +25,8 @@
     YTFieldData, \
     YTDataContainer, \
     YTSelectionContainer
+from yt.data_objects.particle_unions import \
+    ParticleUnion
 from yt.data_objects.grid_patch import \
     AMRGridPatch
 from yt.geometry.geometry_handler import \
@@ -66,7 +68,8 @@
 from .fields import \
     StreamFieldInfo, \
     add_stream_field, \
-    KnownStreamFields
+    KnownStreamFields, \
+    _setup_particle_fields
 
 class StreamGrid(AMRGridPatch):
     """
@@ -224,7 +227,12 @@
         GridGeometryHandler._setup_classes(self, dd)
 
     def _detect_fields(self):
-        self.field_list = list(set(self.stream_handler.get_fields()))
+        # NOTE: Because particle unions add to the actual field list, without
+        # having the keys in the field list itself, we need to double check
+        # here.
+        fl = set(self.stream_handler.get_fields())
+        fl.update(set(getattr(self, "field_list", [])))
+        self.field_list = list(fl)
 
     def _populate_grid_objects(self):
         for g in self.grids:
@@ -245,16 +253,14 @@
         already in the stream but not part of the data dict will be left
         alone. 
         """
-        
+        [update_field_names(d) for d in data]
         particle_types = set_particle_types(data[0])
-        ftype = "all"
+        ftype = "io"
 
         for key in data[0].keys() :
             if key is "number_of_particles": continue
             self.stream_handler.particle_types[key] = particle_types[key]
-            if key not in self.field_list:
-                self.field_list.append(key)
-                
+
         for i, grid in enumerate(self.grids) :
             if data[i].has_key("number_of_particles") :
                 grid.NumberOfParticles = data[i].pop("number_of_particles")
@@ -262,10 +268,16 @@
                 if fname in grid.field_data:
                     grid.field_data.pop(fname, None)
                 elif (ftype, fname) in grid.field_data:
-                    grid.field_data.pop( ("all", fname) )
+                    grid.field_data.pop( ("io", fname) )
                 self.stream_handler.fields[grid.id][fname] = data[i][fname]
             
+
+        # We only want to create a superset of fields here.
         self._detect_fields()
+        mylog.debug("Creating Particle Union 'all'")
+        pu = ParticleUnion("all", list(self.pf.particle_types_raw))
+        self.pf.add_particle_union(pu)
+        self.pf.particle_types = tuple(set(self.pf.particle_types))
         self._setup_unknown_fields()
                 
 class StreamStaticOutput(StaticOutput):
@@ -323,10 +335,34 @@
     def _skip_cache(self):
         return True
 
+    def _setup_particle_type(self, ptype):
+        orig = set(self.field_info.items())
+        _setup_particle_fields(self.field_info, ptype)
+        return [n for n, v in set(self.field_info.items()).difference(orig)]
+
 class StreamDictFieldHandler(dict):
+    _additional_fields = ()
 
     @property
-    def all_fields(self): return self[0].keys()
+    def all_fields(self): 
+        fields = list(self._additional_fields) + self[0].keys()
+        fields = list(set(fields))
+        return fields
+
+def update_field_names(data):
+    orig_names = data.keys()
+    for k in orig_names:
+        if isinstance(k, tuple): continue
+        s = getattr(data[k], "shape", ())
+        if len(s) == 1:
+            field = ("io", k)
+        elif len(s) == 3:
+            field = ("gas", k)
+        elif len(s) == 0:
+            continue
+        else:
+            raise NotImplementedError
+        data[field] = data.pop(k)
 
 def set_particle_types(data) :
 
@@ -353,7 +389,7 @@
     if pf.h.num_grids > 1 :
 
         try:
-            x, y, z = (pdata["all","particle_position_%s" % ax] for ax in 'xyz')
+            x, y, z = (pdata["io","particle_position_%s" % ax] for ax in 'xyz')
         except KeyError:
             raise KeyError("Cannot decompose particle data without position fields!")
         
@@ -450,9 +486,16 @@
         pdata["number_of_particles"] = number_of_particles
         for key in data.keys() :
             if len(data[key].shape) == 1 :
-                pdata[key] = data.pop(key)
+                if not isinstance(key, tuple):
+                    field = ("io", key)
+                    mylog.debug("Reassigning '%s' to '%s'", key, field)
+                else:
+                    field = key
+                sfh._additional_fields += (field,)
+                pdata[field] = data.pop(key)
     else :
         particle_types = {}
+    update_field_names(data)
     
     if nprocs > 1:
         temp = {}
@@ -508,12 +551,11 @@
     # Now figure out where the particles go
 
     if number_of_particles > 0 :
-        if ("all", "particle_position_x") not in pdata:
+        if ("io", "particle_position_x") not in pdata:
             pdata_ftype = {}
             for f in [k for k in sorted(pdata)]:
                 if not hasattr(pdata[f], "shape"): continue
-                mylog.debug("Reassigning '%s' to ('all','%s')", f, f)
-                pdata_ftype["all",f] = pdata.pop(f)
+                pdata_ftype["io",f] = pdata.pop(f)
             pdata_ftype.update(pdata)
             pdata = pdata_ftype
         assign_particle_data(spf, pdata)
@@ -593,6 +635,7 @@
         grid_levels[i,:] = g.pop("level")
         if g.has_key("number_of_particles") :
             number_of_particles[i,:] = g.pop("number_of_particles")  
+        update_field_names(g)
         sfh[i] = g
             
     handler = StreamHandler(
@@ -665,7 +708,10 @@
     if number_of_particles > 0 :
         pdata = {}
         for field in base_pf.h.field_list :
-            if base_pf.field_info[field].particle_type :
+            if not isinstance(field, tuple):
+                field = ("unknown", field)
+            fi = base_pf._get_field_info(*field)
+            if fi.particle_type :
                 pdata[field] = np.concatenate([grid[field]
                                                for grid in base_pf.h.grids])
         pdata["number_of_particles"] = number_of_particles
@@ -688,7 +734,10 @@
                        level = g.Level,
                        dimensions = g.ActiveDimensions )
             for field in pf.h.field_list:
-                if not pf.field_info[field].particle_type :
+                if not isinstance(field, tuple):
+                    field = ("unknown", field)
+                fi = pf._get_field_info(*field)
+                if not fi.particle_type :
                     gd[field] = g[field]
             grid_data.append(gd)
             if g.Level < pf.h.max_level: continue
@@ -701,7 +750,10 @@
                 gd = dict(left_edge = LE, right_edge = grid.right_edge,
                           level = g.Level + 1, dimensions = dims)
                 for field in pf.h.field_list:
-                    if not pf.field_info[field].particle_type :
+                    if not isinstance(field, tuple):
+                        field = ("unknown", field)
+                    fi = pf._get_field_info(*field)
+                    if not fi.particle_type :
                         gd[field] = grid[field]
                 grid_data.append(gd)
         
@@ -712,12 +764,12 @@
     # Now reassign particle data to grids
 
     if number_of_particles > 0:
-        if ("all", "particle_position_x") not in pdata:
+        if ("io", "particle_position_x") not in pdata:
             pdata_ftype = {}
             for f in [k for k in sorted(pdata)]:
                 if not hasattr(pdata[f], "shape"): continue
-                mylog.debug("Reassigning '%s' to ('all','%s')", f, f)
-                pdata_ftype["all",f] = pdata.pop(f)
+                mylog.debug("Reassigning '%s' to ('io','%s')", f, f)
+                pdata_ftype["io",f] = pdata.pop(f)
             pdata_ftype.update(pdata)
             pdata = pdata_ftype
         assign_particle_data(pf, pdata)
@@ -751,17 +803,6 @@
     n_ref = 64
     over_refine_factor = 1
 
-    def _setup_particle_type(self, ptype):
-        orig = set(self.field_info.items())
-        particle_vector_functions(ptype,
-            ["particle_position_%s" % ax for ax in 'xyz'],
-            ["particle_velocity_%s" % ax for ax in 'xyz'],
-            self.field_info)
-        particle_deposition_functions(ptype,
-            "Coordinates", "particle_mass", self.field_info)
-        standard_particle_fields(self.field_info, ptype)
-        return [n for n, v in set(self.field_info.items()).difference(orig)]
-
 def load_particles(data, sim_unit_to_cm, bbox=None,
                       sim_time=0.0, periodicity=(True, True, True),
                       n_ref = 64, over_refine_factor = 1):
@@ -819,8 +860,19 @@
 
     sfh = StreamDictFieldHandler()
     
+    pdata = {}
+    for key in data.keys() :
+        if not isinstance(key, tuple):
+            field = ("io", key)
+            mylog.debug("Reassigning '%s' to '%s'", key, field)
+        else:
+            field = key
+        pdata[field] = data[key]
+        sfh._additional_fields += (field,)
+    data = pdata # Drop reference count
+    update_field_names(data)
     particle_types = set_particle_types(data)
-    
+
     sfh.update({'stream_file':data})
     grid_left_edges = domain_left_edge
     grid_right_edges = domain_right_edge
@@ -874,7 +926,7 @@
         self.oct_handler = oct_handler
         self._last_mask = None
         self._last_selector_id = None
-        self._current_particle_type = 'all'
+        self._current_particle_type = 'io'
         self._current_fluid_type = self.pf.default_fluid_type
         self.base_region = base_region
         self.base_selector = base_region.selector
@@ -953,7 +1005,13 @@
         super(StreamOctreeHandler, self)._setup_classes(dd)
 
     def _detect_fields(self):
-        self.field_list = list(set(self.stream_handler.get_fields()))
+        # NOTE: Because particle unions add to the actual field list, without
+        # having the keys in the field list itself, we need to double check
+        # here.
+        fl = set(self.stream_handler.get_fields())
+        fl.update(set(getattr(self, "field_list", [])))
+        self.field_list = list(fl)
+
 
 class StreamOctreeStaticOutput(StreamStaticOutput):
     _hierarchy_class = StreamOctreeHandler
@@ -1005,6 +1063,7 @@
     domain_left_edge = np.array(bbox[:, 0], 'float64')
     domain_right_edge = np.array(bbox[:, 1], 'float64')
     grid_levels = np.zeros(nprocs, dtype='int32').reshape((nprocs,1))
+    update_field_names(data)
 
     sfh = StreamDictFieldHandler()
     
@@ -1093,7 +1152,13 @@
             self.io = io_registry[self.data_style](self.pf)
 
     def _detect_fields(self):
-        self.field_list = list(set(self.stream_handler.get_fields()))
+        # NOTE: Because particle unions add to the actual field list, without
+        # having the keys in the field list itself, we need to double check
+        # here.
+        fl = set(self.stream_handler.get_fields())
+        fl.update(set(getattr(self, "field_list", [])))
+        self.field_list = list(fl)
+
 
 class StreamHexahedralStaticOutput(StreamStaticOutput):
     _hierarchy_class = StreamHexahedralHierarchy

diff -r 94bf550f21d8b5e6e8a353ec1d478bbbb39505fd -r 0e8f3a63d2505959576d96070c1863b5cde8a96a yt/frontends/stream/fields.py
--- a/yt/frontends/stream/fields.py
+++ b/yt/frontends/stream/fields.py
@@ -34,37 +34,20 @@
 StreamFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
 add_field = StreamFieldInfo.add_field
 
-add_stream_field("density", function = NullFunc)
+add_stream_field("Density", function = NullFunc)
 add_stream_field("x-velocity", function = NullFunc)
 add_stream_field("y-velocity", function = NullFunc)
 add_stream_field("z-velocity", function = NullFunc)
 
-add_field("Density", function = TranslationFunc("density"))
-
-add_stream_field("particle_position_x", function = NullFunc, particle_type=True)
-add_stream_field("particle_position_y", function = NullFunc, particle_type=True)
-add_stream_field("particle_position_z", function = NullFunc, particle_type=True)
-add_stream_field("particle_index", function = NullFunc, particle_type=True)
-add_stream_field("particle_gas_density", function = NullFunc, particle_type=True)
-add_stream_field("particle_gas_temperature", function = NullFunc, particle_type=True)
-add_stream_field("particle_mass", function = NullFunc, particle_type=True)
-
-add_field("ParticleMass", function = TranslationFunc("particle_mass"),
-          particle_type=True)
-
-add_stream_field(("all", "particle_position_x"), function = NullFunc, particle_type=True)
-add_stream_field(("all", "particle_position_y"), function = NullFunc, particle_type=True)
-add_stream_field(("all", "particle_position_z"), function = NullFunc, particle_type=True)
-add_stream_field(("all", "particle_index"), function = NullFunc, particle_type=True)
-add_stream_field(("all", "particle_gas_density"), function = NullFunc, particle_type=True)
-add_stream_field(("all", "particle_gas_temperature"), function = NullFunc, particle_type=True)
-add_stream_field(("all", "particle_mass"), function = NullFunc, particle_type=True)
-
-add_field(("all", "ParticleMass"), function = TranslationFunc("particle_mass"),
-          particle_type=True)
-
-particle_vector_functions("all", ["particle_position_%s" % ax for ax in 'xyz'],
-                                 ["particle_velocity_%s" % ax for ax in 'xyz'],
-                          StreamFieldInfo)
-particle_deposition_functions("all", "Coordinates", "ParticleMass",
-                               StreamFieldInfo)
+def _setup_particle_fields(registry, ptype):
+    for fn in ["creation_time", "dynamical_time", "metallicity_fraction"] + \
+              ["particle_type", "particle_index", "particle_mass"] + \
+              ["particle_position_%s" % ax for ax in 'xyz'] + \
+              ["particle_velocity_%s" % ax for ax in 'xyz']:
+        registry.add_field((ptype, fn), function=NullFunc, particle_type=True)
+    particle_vector_functions(ptype,
+        ["particle_position_%s" % ax for ax in 'xyz'],
+        ["particle_velocity_%s" % ax for ax in 'xyz'],
+        registry)
+    particle_deposition_functions(ptype,
+        "Coordinates", "particle_mass", registry)

diff -r 94bf550f21d8b5e6e8a353ec1d478bbbb39505fd -r 0e8f3a63d2505959576d96070c1863b5cde8a96a yt/frontends/stream/io.py
--- a/yt/frontends/stream/io.py
+++ b/yt/frontends/stream/io.py
@@ -46,7 +46,7 @@
 
     def _read_fluid_selection(self, chunks, selector, fields, size):
         chunks = list(chunks)
-        if any((ftype not in ("gas", "deposit") for ftype, fname in fields)):
+        if any((ftype not in ("gas",) for ftype, fname in fields)):
             raise NotImplementedError
         rv = {}
         for field in fields:
@@ -57,63 +57,38 @@
                     size, [f2 for f1, f2 in fields], ng)
         for field in fields:
             ftype, fname = field
-            if ftype == 'deposit':
-                fname = field
             ind = 0
             for chunk in chunks:
                 for g in chunk.objs:
-                    ds = self.fields[g.id][fname]
+                    ds = self.fields[g.id][field]
                     ind += g.select(selector, ds, rv[field], ind) # caches
         return rv
 
-    def _read_particle_selection(self, chunks, selector, fields):
+    def _read_particle_coords(self, chunks, ptf):
         chunks = list(chunks)
-        if any((ftype != "all" for ftype, fname in fields)):
-            raise NotImplementedError
-        rv = {}
-        # Now we have to do something unpleasant
-        mylog.debug("First pass: counting particles.")
-        size = 0
-        pfields = [("all", "particle_position_%s" % ax) for ax in 'xyz']
         for chunk in chunks:
             for g in chunk.objs:
                 if g.NumberOfParticles == 0: continue
                 gf = self.fields[g.id]
-                # Sometimes the stream operator won't have the 
-                # ("all", "Something") fields, but instead just "Something".
-                pns = []
-                for pn in pfields:
-                    if pn in gf: pns.append(pn)
-                    else: pns.append(pn[1])
-                size += g.count_particles(selector, 
-                    gf[pns[0]], gf[pns[1]], gf[pns[2]])
-        for field in fields:
-            # TODO: figure out dataset types
-            rv[field] = np.empty(size, dtype='float64')
-        ng = sum(len(c.objs) for c in chunks)
-        mylog.debug("Reading %s points of %s fields in %s grids",
-                   size, [f2 for f1, f2 in fields], ng)
-        ind = 0
+                for ptype, field_list in sorted(ptf.items()):
+                    x, y, z  = (gf[ptype, "particle_position_%s" % ax]
+                                for ax in 'xyz')
+                    yield ptype, (x, y, z)
+
+    def _read_particle_fields(self, chunks, ptf, selector):
+        chunks = list(chunks)
         for chunk in chunks:
             for g in chunk.objs:
                 if g.NumberOfParticles == 0: continue
                 gf = self.fields[g.id]
-                pns = []
-                for pn in pfields:
-                    if pn in gf: pns.append(pn)
-                    else: pns.append(pn[1])
-                mask = g.select_particles(selector,
-                    gf[pns[0]], gf[pns[1]], gf[pns[2]])
-                if mask is None: continue
-                for field in set(fields):
-                    if field in gf:
-                        fn = field
-                    else:
-                        fn = field[1]
-                    gdata = gf[fn][mask]
-                    rv[field][ind:ind+gdata.size] = gdata
-                ind += gdata.size
-        return rv
+                for ptype, field_list in sorted(ptf.items()):
+                    x, y, z  = (gf[ptype, "particle_position_%s" % ax]
+                                for ax in 'xyz')
+                    mask = selector.select_points(x, y, z)
+                    if mask is None: continue
+                    for field in field_list:
+                        data = np.asarray(gf[ptype, field])
+                        yield (ptype, field), data[mask]
 
     @property
     def _read_exception(self):
@@ -127,19 +102,8 @@
         self.fields = pf.stream_handler.fields
         super(StreamParticleIOHandler, self).__init__(pf)
 
-    def _read_particle_selection(self, chunks, selector, fields):
-        rv = {}
-        # We first need a set of masks for each particle type
-        ptf = defaultdict(list)
-        psize = defaultdict(lambda: 0)
+    def _read_particle_coords(self, chunks, ptf):
         chunks = list(chunks)
-        for ftype, fname in fields:
-            ptf[ftype].append(fname)
-        # For this type of file, we actually have something slightly different.
-        # We are given a list of ParticleDataChunks, which is composed of
-        # individual ParticleOctreeSubsets.  The data_files attribute on these
-        # may in fact overlap.  So we will iterate over a union of all the
-        # data_files.
         data_files = set([])
         for chunk in chunks:
             for obj in chunk.objs:
@@ -148,39 +112,32 @@
             f = self.fields[data_file.filename]
             # This double-reads
             for ptype, field_list in sorted(ptf.items()):
-                assert(ptype == "all")
-                psize[ptype] += selector.count_points(
-                        f["particle_position_x"],
-                        f["particle_position_y"],
-                        f["particle_position_z"])
-        # Now we have all the sizes, and we can allocate
-        ind = {}
-        for field in fields:
-            mylog.debug("Allocating %s values for %s", psize[field[0]], field)
-            rv[field] = np.empty(psize[field[0]], dtype="float64")
-            ind[field] = 0
+                yield ptype, (f[ptype, "particle_position_x"],
+                              f[ptype, "particle_position_y"],
+                              f[ptype, "particle_position_z"])
+            
+    def _read_particle_fields(self, chunks, ptf, selector):
+        data_files = set([])
+        for chunk in chunks:
+            for obj in chunk.objs:
+                data_files.update(obj.data_files)
         for data_file in data_files:
             f = self.fields[data_file.filename]
             for ptype, field_list in sorted(ptf.items()):
-                assert(ptype == "all")
-                mask = selector.select_points(
-                        f["particle_position_x"],
-                        f["particle_position_y"],
-                        f["particle_position_z"])
+                x, y, z = (f[ptype, "particle_position_%s" % ax]
+                           for ax in 'xyz')
+                mask = selector.select_points(x, y, z)
                 if mask is None: continue
                 for field in field_list:
-                    data = f[field][mask,...]
-                    my_ind = ind[ptype, field]
-                    mylog.debug("Filling from %s to %s with %s",
-                        my_ind, my_ind+data.shape[0], field)
-                    rv[ptype, field][my_ind:my_ind + data.shape[0],...] = data
-                    ind[ptype, field] += data.shape[0]
-        return rv
+                    data = f[ptype, field][mask]
+                    yield (ptype, field), data
+
 
     def _initialize_index(self, data_file, regions):
         # self.fields[g.id][fname] is the pattern here
         pos = np.column_stack(self.fields[data_file.filename][
-                              "particle_position_%s" % ax] for ax in 'xyz')
+                              ("io", "particle_position_%s" % ax)]
+                              for ax in 'xyz')
         if np.any(pos.min(axis=0) < data_file.pf.domain_left_edge) or \
            np.any(pos.max(axis=0) > data_file.pf.domain_right_edge):
             raise YTDomainOverflow(pos.min(axis=0), pos.max(axis=0),
@@ -194,11 +151,11 @@
         return morton
 
     def _count_particles(self, data_file):
-        npart = self.fields[data_file.filename]["particle_position_x"].size
-        return {'all': npart}
+        npart = self.fields[data_file.filename]["io", "particle_position_x"].size
+        return {'io': npart}
 
     def _identify_fields(self, data_file):
-        return [ ("all", k) for k in self.fields[data_file.filename].keys()]
+        return self.fields[data_file.filename].keys()
 
 class IOHandlerStreamHexahedral(BaseIOHandler):
     _data_style = "stream_hexahedral"

diff -r 94bf550f21d8b5e6e8a353ec1d478bbbb39505fd -r 0e8f3a63d2505959576d96070c1863b5cde8a96a yt/frontends/stream/tests/test_stream_particles.py
--- a/yt/frontends/stream/tests/test_stream_particles.py
+++ b/yt/frontends/stream/tests/test_stream_particles.py
@@ -28,7 +28,6 @@
     
     # Check that all of this runs ok without particles
     
-    ug0 = load_uniform_grid({"Density": dens}, domain_dims, 1.0)
     ug0 = load_uniform_grid({"Density": dens}, domain_dims, 1.0, nprocs=8)
     amr0 = refine_amr(ug0, rc, fo, 3)
 
@@ -74,16 +73,18 @@
 
     # Check to make sure the fields have been defined correctly
     
-    assert ug1._get_field_info("all", "particle_position_x").particle_type
-    assert ug1._get_field_info("all", "particle_position_y").particle_type
-    assert ug1._get_field_info("all", "particle_position_z").particle_type
-    assert ug1._get_field_info("all", "particle_mass").particle_type
+    for ptype in ("all", "io"):
+        assert ug1._get_field_info(ptype, "particle_position_x").particle_type
+        assert ug1._get_field_info(ptype, "particle_position_y").particle_type
+        assert ug1._get_field_info(ptype, "particle_position_z").particle_type
+        assert ug1._get_field_info(ptype, "particle_mass").particle_type
     assert not ug1._get_field_info("gas", "Density").particle_type
 
-    assert ug2._get_field_info("all", "particle_position_x").particle_type
-    assert ug2._get_field_info("all", "particle_position_y").particle_type
-    assert ug2._get_field_info("all", "particle_position_z").particle_type
-    assert ug2._get_field_info("all", "particle_mass").particle_type
+    for ptype in ("all", "io"):
+        assert ug2._get_field_info(ptype, "particle_position_x").particle_type
+        assert ug2._get_field_info(ptype, "particle_position_y").particle_type
+        assert ug2._get_field_info(ptype, "particle_position_z").particle_type
+        assert ug2._get_field_info(ptype, "particle_mass").particle_type
     assert not ug2._get_field_info("gas", "Density").particle_type
     
     # Now refine this

diff -r 94bf550f21d8b5e6e8a353ec1d478bbbb39505fd -r 0e8f3a63d2505959576d96070c1863b5cde8a96a yt/geometry/particle_deposit.pxd
--- a/yt/geometry/particle_deposit.pxd
+++ b/yt/geometry/particle_deposit.pxd
@@ -23,8 +23,12 @@
 from fp_utils cimport *
 from .oct_container cimport Oct, OctAllocationContainer, OctreeContainer
 
-cdef extern from "alloca.h":
-    void *alloca(int)
+IF UNAME_SYSNAME == "Windows":
+    cdef extern from "malloc.h":
+        void *alloca(int)
+ELSE:
+    cdef extern from "alloca.h":
+        void *alloca(int)
 
 cdef inline int gind(int i, int j, int k, int dims[3]):
     # The ordering is such that we want i to vary the slowest in this instance,

diff -r 94bf550f21d8b5e6e8a353ec1d478bbbb39505fd -r 0e8f3a63d2505959576d96070c1863b5cde8a96a yt/geometry/particle_smooth.pxd
--- a/yt/geometry/particle_smooth.pxd
+++ b/yt/geometry/particle_smooth.pxd
@@ -24,8 +24,12 @@
 from oct_container cimport Oct, OctAllocationContainer, OctreeContainer
 from .particle_deposit cimport sph_kernel, gind
 
-cdef extern from "alloca.h":
-    void *alloca(int)
+IF UNAME_SYSNAME == "Windows":
+    cdef extern from "malloc.h":
+        void *alloca(int)
+ELSE:
+    cdef extern from "alloca.h":
+        void *alloca(int)
 
 cdef struct NeighborList
 cdef struct NeighborList:

diff -r 94bf550f21d8b5e6e8a353ec1d478bbbb39505fd -r 0e8f3a63d2505959576d96070c1863b5cde8a96a yt/geometry/tests/test_particle_octree.py
--- a/yt/geometry/tests/test_particle_octree.py
+++ b/yt/geometry/tests/test_particle_octree.py
@@ -88,9 +88,11 @@
     for n_ref in [16, 32, 64, 512, 1024]:
         pf = load_particles(data, 1.0, bbox = bbox, n_ref = n_ref)
         dd = pf.h.all_data()
-        bi = dd["all","mesh_id"]
+        bi = dd["io","mesh_id"]
         v = np.bincount(bi.astype("int64"))
         yield assert_equal, v.max() <= n_ref, True
+        bi2 = dd["all","mesh_id"]
+        yield assert_equal, bi, bi2
 
 def test_particle_overrefine():
     np.random.seed(int(0x4d3d3d3))

diff -r 94bf550f21d8b5e6e8a353ec1d478bbbb39505fd -r 0e8f3a63d2505959576d96070c1863b5cde8a96a yt/gui/reason/pannable_map.py
--- a/yt/gui/reason/pannable_map.py
+++ b/yt/gui/reason/pannable_map.py
@@ -19,7 +19,8 @@
 
 from yt.visualization.image_writer import apply_colormap
 from yt.visualization.fixed_resolution import FixedResolutionBuffer
-from yt.utilities.lib import write_png_to_string, get_color_bounds
+from yt.utilities.lib import get_color_bounds
+from yt.utilities.png_writer import write_png_to_string
 
 import yt.extern.bottle as bottle
 

diff -r 94bf550f21d8b5e6e8a353ec1d478bbbb39505fd -r 0e8f3a63d2505959576d96070c1863b5cde8a96a yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -54,7 +54,8 @@
     ValidateParameter, ValidateDataField, ValidateProperty, \
     ValidateSpatial, ValidateGridType, \
     TimeSeriesData, AnalysisTask, analysis_task, \
-    ImageArray, particle_filter, create_profile
+    ImageArray, particle_filter, create_profile, \
+    Profile1D, Profile2D, Profile3D
 
 from yt.data_objects.derived_quantities import \
     add_quantity, quantity_info

diff -r 94bf550f21d8b5e6e8a353ec1d478bbbb39505fd -r 0e8f3a63d2505959576d96070c1863b5cde8a96a yt/startup_tasks.py
--- a/yt/startup_tasks.py
+++ b/yt/startup_tasks.py
@@ -60,7 +60,7 @@
     mylog.debug("SIGUSR1 registered for traceback printing")
     signal.signal(signal.SIGUSR2, signal_ipython)
     mylog.debug("SIGUSR2 registered for IPython Insertion")
-except ValueError:  # Not in main thread
+except (ValueError, AttributeError) as e:  # Not in main thread
     pass
 
 class SetExceptionHandling(argparse.Action):

diff -r 94bf550f21d8b5e6e8a353ec1d478bbbb39505fd -r 0e8f3a63d2505959576d96070c1863b5cde8a96a yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -138,7 +138,7 @@
     return left, right, level
 
 def fake_random_pf(ndims, peak_value = 1.0, fields = ("Density",),
-                   negative = False, nprocs = 1):
+                   negative = False, nprocs = 1, particles = 0):
     from yt.data_objects.api import data_object_registry
     from yt.frontends.stream.api import load_uniform_grid
     if not iterable(ndims):
@@ -156,9 +156,30 @@
             offsets.append(0.0)
     data = dict((field, (np.random.random(ndims) - offset) * peak_value)
                  for field,offset in zip(fields,offsets))
+    if particles:
+        for f in ('particle_position_%s' % ax for ax in 'xyz'):
+            data[f] = np.random.uniform(size = particles)
+        for f in ('particle_velocity_%s' % ax for ax in 'xyz'):
+            data[f] = np.random.random(size = particles) - 0.5
+        data['particle_mass'] = np.random.random(particles)
+        data['number_of_particles'] = particles
     ug = load_uniform_grid(data, ndims, 1.0, nprocs = nprocs)
     return ug
 
+def fake_amr_pf(fields = ("Density",)):
+    from yt.frontends.stream.api import load_amr_grids
+    data = []
+    for gspec in _amr_grid_hierarchy:
+        level, left_edge, right_edge, dims = gspec
+        gdata = dict(level = level,
+                     left_edge = left_edge,
+                     right_edge = right_edge,
+                     dimensions = dims)
+        for f in fields:
+            gdata[f] = np.random.random(dims)
+        data.append(gdata)
+    return load_amr_grids(data, [32, 32, 32], 1.0)
+
 def expand_keywords(keywords, full=False):
     """
     expand_keywords is a means for testing all possible keyword
@@ -289,3 +310,207 @@
         else:
             return ffalse
                                         
+# This is an export of the 40 grids in IsolatedGalaxy that are of level 4 or
+# lower.  It's just designed to give a sample AMR hierarchy to deal with.
+_amr_grid_hierarchy = [
+ [ 0,
+  [0.0,0.0,0.0],
+  [1.0,1.0,1.0],
+  [32,32,32],
+ ],
+ [ 1,
+  [0.25,0.21875,0.25],
+  [0.5,0.5,0.5],
+  [16,18,16],
+ ],
+ [ 1,
+  [0.5,0.21875,0.25],
+  [0.75,0.5,0.5],
+  [16,18,16],
+ ],
+ [ 1,
+  [0.21875,0.5,0.25],
+  [0.5,0.75,0.5],
+  [18,16,16],
+ ],
+ [ 1,
+  [0.5,0.5,0.25],
+  [0.75,0.75,0.5],
+  [16,16,16],
+ ],
+ [ 1,
+  [0.25,0.25,0.5],
+  [0.5,0.5,0.75],
+  [16,16,16],
+ ],
+ [ 1,
+  [0.5,0.25,0.5],
+  [0.75,0.5,0.75],
+  [16,16,16],
+ ],
+ [ 1,
+  [0.25,0.5,0.5],
+  [0.5,0.75,0.75],
+  [16,16,16],
+ ],
+ [ 1,
+  [0.5,0.5,0.5],
+  [0.75,0.75,0.75],
+  [16,16,16],
+ ],
+ [ 2,
+  [0.5,0.5,0.5],
+  [0.71875,0.71875,0.71875],
+  [28,28,28],
+ ],
+ [ 3,
+  [0.5,0.5,0.5],
+  [0.6640625,0.65625,0.6796875],
+  [42,40,46],
+ ],
+ [ 4,
+  [0.5,0.5,0.5],
+  [0.59765625,0.6015625,0.6015625],
+  [50,52,52],
+ ],
+ [ 2,
+  [0.28125,0.5,0.5],
+  [0.5,0.734375,0.71875],
+  [28,30,28],
+ ],
+ [ 3,
+  [0.3359375,0.5,0.5],
+  [0.5,0.671875,0.6640625],
+  [42,44,42],
+ ],
+ [ 4,
+  [0.40625,0.5,0.5],
+  [0.5,0.59765625,0.59765625],
+  [48,50,50],
+ ],
+ [ 2,
+  [0.5,0.28125,0.5],
+  [0.71875,0.5,0.71875],
+  [28,28,28],
+ ],
+ [ 3,
+  [0.5,0.3359375,0.5],
+  [0.671875,0.5,0.6640625],
+  [44,42,42],
+ ],
+ [ 4,
+  [0.5,0.40625,0.5],
+  [0.6015625,0.5,0.59765625],
+  [52,48,50],
+ ],
+ [ 2,
+  [0.28125,0.28125,0.5],
+  [0.5,0.5,0.71875],
+  [28,28,28],
+ ],
+ [ 3,
+  [0.3359375,0.3359375,0.5],
+  [0.5,0.5,0.671875],
+  [42,42,44],
+ ],
+ [ 4,
+  [0.46484375,0.37890625,0.50390625],
+  [0.4765625,0.390625,0.515625],
+  [6,6,6],
+ ],
+ [ 4,
+  [0.40625,0.40625,0.5],
+  [0.5,0.5,0.59765625],
+  [48,48,50],
+ ],
+ [ 2,
+  [0.5,0.5,0.28125],
+  [0.71875,0.71875,0.5],
+  [28,28,28],
+ ],
+ [ 3,
+  [0.5,0.5,0.3359375],
+  [0.6796875,0.6953125,0.5],
+  [46,50,42],
+ ],
+ [ 4,
+  [0.5,0.5,0.40234375],
+  [0.59375,0.6015625,0.5],
+  [48,52,50],
+ ],
+ [ 2,
+  [0.265625,0.5,0.28125],
+  [0.5,0.71875,0.5],
+  [30,28,28],
+ ],
+ [ 3,
+  [0.3359375,0.5,0.328125],
+  [0.5,0.65625,0.5],
+  [42,40,44],
+ ],
+ [ 4,
+  [0.40234375,0.5,0.40625],
+  [0.5,0.60546875,0.5],
+  [50,54,48],
+ ],
+ [ 2,
+  [0.5,0.265625,0.28125],
+  [0.71875,0.5,0.5],
+  [28,30,28],
+ ],
+ [ 3,
+  [0.5,0.3203125,0.328125],
+  [0.6640625,0.5,0.5],
+  [42,46,44],
+ ],
+ [ 4,
+  [0.5,0.3984375,0.40625],
+  [0.546875,0.5,0.5],
+  [24,52,48],
+ ],
+ [ 4,
+  [0.546875,0.41796875,0.4453125],
+  [0.5625,0.4375,0.5],
+  [8,10,28],
+ ],
+ [ 4,
+  [0.546875,0.453125,0.41796875],
+  [0.5546875,0.48046875,0.4375],
+  [4,14,10],
+ ],
+ [ 4,
+  [0.546875,0.4375,0.4375],
+  [0.609375,0.5,0.5],
+  [32,32,32],
+ ],
+ [ 4,
+  [0.546875,0.4921875,0.41796875],
+  [0.56640625,0.5,0.4375],
+  [10,4,10],
+ ],
+ [ 4,
+  [0.546875,0.48046875,0.41796875],
+  [0.5703125,0.4921875,0.4375],
+  [12,6,10],
+ ],
+ [ 4,
+  [0.55859375,0.46875,0.43359375],
+  [0.5703125,0.48046875,0.4375],
+  [6,6,2],
+ ],
+ [ 2,
+  [0.265625,0.28125,0.28125],
+  [0.5,0.5,0.5],
+  [30,28,28],
+ ],
+ [ 3,
+  [0.328125,0.3359375,0.328125],
+  [0.5,0.5,0.5],
+  [44,42,44],
+ ],
+ [ 4,
+  [0.4140625,0.40625,0.40625],
+  [0.5,0.5,0.5],
+  [44,48,48],
+ ],
+]

diff -r 94bf550f21d8b5e6e8a353ec1d478bbbb39505fd -r 0e8f3a63d2505959576d96070c1863b5cde8a96a yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -71,7 +71,7 @@
     def add_grids(self, grids):
         gles = np.array([g.LeftEdge for g in grids])
         gres = np.array([g.RightEdge for g in grids])
-        gids = np.array([g.id for g in grids])
+        gids = np.array([g.id for g in grids], dtype="int64")
         add_pygrids(self.trunk, gids.size, gles, gres, gids,
                     self.comm_rank, self.comm_size)
         del gles, gres, gids, grids

diff -r 94bf550f21d8b5e6e8a353ec1d478bbbb39505fd -r 0e8f3a63d2505959576d96070c1863b5cde8a96a yt/utilities/grid_data_format/writer.py
--- a/yt/utilities/grid_data_format/writer.py
+++ b/yt/utilities/grid_data_format/writer.py
@@ -59,7 +59,9 @@
         A dictionary of field parameters to set.
     """
 
-    field_obj = pf.field_info[field_name]
+    if isinstance(field_name, tuple):
+        field_name = field_name[1]
+    field_obj = pf._get_field_info(field_name)
     if field_obj.particle_type:
         print( "Saving particle fields currently not supported." )
         return
@@ -84,6 +86,9 @@
     # add field info to field_types group
     g = fhandle["field_types"]
     # create the subgroup with the field's name
+    if isinstance(field_name, tuple):
+        field_name = field_name[1]
+    fi = pf._get_field_info(field_name)
     try:
         sg = g.create_group(field_name)
     except ValueError:
@@ -91,8 +96,8 @@
         sys.exit(1)
         
     # grab the display name and units from the field info container.
-    display_name = pf.field_info[field_name].display_name
-    units = pf.field_info[field_name].get_units()
+    display_name = fi.display_name
+    units = fi.get_units()
 
     # check that they actually contain something...
     if display_name:
@@ -122,9 +127,8 @@
         pt_group = particles_group[particle_type_name]
         # add the field data to the grid group
         # Check if this is a real field or particle data.
-        field_obj = pf.field_info[field_name]
         grid.get_data(field_name)
-        if field_obj.particle_type:  # particle data
+        if fi.particle_type:  # particle data
             pt_group[field_name] = grid[field_name]
         else:  # a field
             grid_group[field_name] = grid[field_name]

diff -r 94bf550f21d8b5e6e8a353ec1d478bbbb39505fd -r 0e8f3a63d2505959576d96070c1863b5cde8a96a yt/utilities/lib/__init__.py
--- a/yt/utilities/lib/__init__.py
+++ b/yt/utilities/lib/__init__.py
@@ -1,3 +1,4 @@
+import os
 """
 Compatibility module
 
@@ -20,7 +21,7 @@
 from .Interpolators import *
 from .misc_utilities import *
 from .Octree import *
-from .png_writer import *
+from .image_utilities import *
 from .PointsInVolume import *
 from .QuadTree import *
 from .RayIntegrators import *

diff -r 94bf550f21d8b5e6e8a353ec1d478bbbb39505fd -r 0e8f3a63d2505959576d96070c1863b5cde8a96a yt/utilities/lib/alt_ray_tracers.pyx
--- a/yt/utilities/lib/alt_ray_tracers.pyx
+++ b/yt/utilities/lib/alt_ray_tracers.pyx
@@ -65,7 +65,7 @@
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
-def clyindrical_ray_trace(np.ndarray[np.float64_t, ndim=1] p1, 
+def cylindrical_ray_trace(np.ndarray[np.float64_t, ndim=1] p1, 
                           np.ndarray[np.float64_t, ndim=1] p2, 
                           np.ndarray[np.float64_t, ndim=2] left_edges, 
                           np.ndarray[np.float64_t, ndim=2] right_edges):
@@ -152,7 +152,7 @@
                                      np.argwhere(tmmright).flat, 
                                      np.argwhere(tpmright).flat,]))
     if 0 == inds.shape[0]:
-        inds = np.arange(I)
+        inds = np.arange(np.int64(I))
         thetaleft = np.empty(I)
         thetaleft.fill(p1[2])
         thetaright = np.empty(I)

diff -r 94bf550f21d8b5e6e8a353ec1d478bbbb39505fd -r 0e8f3a63d2505959576d96070c1863b5cde8a96a yt/utilities/lib/fortran_reader.pyx
--- a/yt/utilities/lib/fortran_reader.pyx
+++ b/yt/utilities/lib/fortran_reader.pyx
@@ -29,8 +29,12 @@
     void FIX_LONG( unsigned )
     void FIX_FLOAT( float )
 
-cdef extern from "alloca.h":
-    void *alloca(int)
+IF UNAME_SYSNAME == "Windows":
+    cdef extern from "malloc.h":
+        void *alloca(int)
+ELSE:
+    cdef extern from "alloca.h":
+        void *alloca(int)
 
 cdef extern from "stdio.h":
     cdef int SEEK_SET

diff -r 94bf550f21d8b5e6e8a353ec1d478bbbb39505fd -r 0e8f3a63d2505959576d96070c1863b5cde8a96a yt/utilities/lib/image_utilities.pyx
--- /dev/null
+++ b/yt/utilities/lib/image_utilities.pyx
@@ -0,0 +1,39 @@
+"""
+Utilities for images
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+cimport numpy as np
+cimport cython
+from fp_utils cimport iclip
+
+def add_points_to_image(
+        np.ndarray[np.uint8_t, ndim=3] buffer,
+        np.ndarray[np.float64_t, ndim=1] px,
+        np.ndarray[np.float64_t, ndim=1] py,
+        np.float64_t pv):
+    cdef int i, j, k, pi
+    cdef int np = px.shape[0]
+    cdef int xs = buffer.shape[0]
+    cdef int ys = buffer.shape[1]
+    cdef int v
+    v = iclip(<int>(pv * 255), 0, 255)
+    for pi in range(np):
+        j = <int> (xs * px[pi])
+        i = <int> (ys * py[pi])
+        for k in range(3):
+            buffer[i, j, k] = 0
+    return
+    #for i in range(xs):
+    #    for j in range(ys):
+    #        for k in range(3):
+    #            v = buffer[i, j, k]
+    #            buffer[i, j, k] = iclip(v, 0, 255)

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/9624a463b7ff/
Changeset:   9624a463b7ff
Branch:      yt-3.0
User:        jzuhone
Date:        2014-01-21 15:43:54
Summary:     Merged yt_analysis/yt/yt-3.0 into yt-3.0
Affected #:  10 files

diff -r 0e8f3a63d2505959576d96070c1863b5cde8a96a -r 9624a463b7ffbf7a219ce31e28ea9461553c9f73 yt/analysis_modules/halo_finding/rockstar/rockstar.py
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar.py
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar.py
@@ -264,7 +264,14 @@
 
     def _get_hosts(self):
         if self.comm.rank == 0 or self.comm.size == 1:
-            server_address = socket.gethostname()
+            
+            #Temporary mac hostname fix
+            try:
+                server_address = socket.gethostname()
+                socket.gethostbyname(server_address)
+            except socket.gaierror:
+                server_address = "localhost"
+
             sock = socket.socket()
             sock.bind(('', 0))
             port = sock.getsockname()[-1]

diff -r 0e8f3a63d2505959576d96070c1863b5cde8a96a -r 9624a463b7ffbf7a219ce31e28ea9461553c9f73 yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -595,20 +595,6 @@
     _cached_field_list = None
     _cached_derived_field_list = None
 
-    def _detect_fields(self):
-        if self.__class__._cached_field_list is None:
-            EnzoHierarchy._detect_fields(self)
-            self.__class__._cached_field_list = self.field_list
-        else:
-            self.field_list = self.__class__._cached_field_list
-
-    def _setup_derived_fields(self):
-        if self.__class__._cached_derived_field_list is None:
-            EnzoHierarchy._setup_derived_fields(self)
-            self.__class__._cached_derived_field_list = self.derived_field_list
-        else:
-            self.derived_field_list = self.__class__._cached_derived_field_list
-
     def _generate_random_grids(self):
         my_rank = self.comm.rank
         my_grids = self.grids[self.grid_procs.ravel() == my_rank]

diff -r 0e8f3a63d2505959576d96070c1863b5cde8a96a -r 9624a463b7ffbf7a219ce31e28ea9461553c9f73 yt/frontends/enzo/io.py
--- a/yt/frontends/enzo/io.py
+++ b/yt/frontends/enzo/io.py
@@ -39,12 +39,13 @@
         f = h5py.File(grid.filename, "r")
         group = f["/Grid%08i" % grid.id]
         fields = []
+        add_io = "io" in grid.pf.particle_types
         for name, v in group.iteritems():
             # NOTE: This won't work with 1D datasets.
             if not hasattr(v, "shape"):
                 continue
             elif len(v.dims) == 1:
-                fields.append( ("io", str(name)) )
+                if add_io: fields.append( ("io", str(name)) )
             else:
                 fields.append( ("gas", str(name)) )
         f.close()
@@ -240,6 +241,74 @@
         # In-place unit conversion requires we return a copy
         return tr.copy()
 
+    def _read_fluid_selection(self, chunks, selector, fields, size):
+        rv = {}
+        # Now we have to do something unpleasant
+        chunks = list(chunks)
+        if selector.__class__.__name__ == "GridSelector":
+            if not (len(chunks) == len(chunks[0].objs) == 1):
+                raise RuntimeError
+            g = chunks[0].objs[0]
+            for ftype, fname in fields:
+                rv[(ftype, fname)] = self.grids_in_memory[grid.id][fname].swapaxes(0,2)
+            return rv
+        if size is None:
+            size = sum((g.count(selector) for chunk in chunks
+                        for g in chunk.objs))
+
+        for field in fields:
+            ftype, fname = field
+            fsize = size
+            rv[field] = np.empty(fsize, dtype="float64")
+        ng = sum(len(c.objs) for c in chunks)
+        mylog.debug("Reading %s cells of %s fields in %s grids",
+                   size, [f2 for f1, f2 in fields], ng)
+
+        ind = 0
+        for chunk in chunks:
+            for g in chunk.objs:
+                if g.id not in self.grids_in_memory: continue
+
+                data = np.empty(g.ActiveDimensions[::-1], dtype="float64")
+                data_view = data.swapaxes(0,2)
+                for field in fields:
+                    ftype, fname = field
+                    data_view = self.grids_in_memory[g.id][fname]
+                    nd = g.select(selector, data_view, rv[field], ind)
+        return rv
+
+    def _read_particle_coords(self, chunks, ptf):
+        chunks = list(chunks)
+        for chunk in chunks: # These should be organized by grid filename
+            for g in chunk.objs:
+                if g.id not in self.grids_in_memory: continue
+                nap = sum(g.NumberOfActiveParticles.values())
+                if g.NumberOfParticles == 0 and nap == 0: continue
+                for ptype, field_list in sorted(ptf.items()):
+                    x, y, z = self.grids_in_memory[g.id]['particle_position_x'], \
+                                        self.grids_in_memory[g.id]['particle_position_y'], \
+                                        self.grids_in_memory[g.id]['particle_position_z']
+                    yield ptype, (x, y, z)
+
+    def _read_particle_fields(self, chunks, ptf, selector):
+        chunks = list(chunks)
+        for chunk in chunks: # These should be organized by grid filename
+            for g in chunk.objs:
+                if g.id not in self.grids_in_memory: continue
+                nap = sum(g.NumberOfActiveParticles.values())
+                if g.NumberOfParticles == 0 and nap == 0: continue
+                for ptype, field_list in sorted(ptf.items()):
+                    x, y, z = self.grids_in_memory[g.id]['particle_position_x'], \
+                                        self.grids_in_memory[g.id]['particle_position_y'], \
+                                        self.grids_in_memory[g.id]['particle_position_z']
+                    mask = selector.select_points(x, y, z)
+                    if mask is None: continue
+                    for field in field_list:
+                        data = self.grids_in_memory[g.id][field]
+                        if field in _convert_mass:
+                            data *= g.dds.prod(dtype="f8")
+                        yield (ptype, field), data[mask]
+
     @property
     def _read_exception(self):
         return KeyError

diff -r 0e8f3a63d2505959576d96070c1863b5cde8a96a -r 9624a463b7ffbf7a219ce31e28ea9461553c9f73 yt/frontends/flash/io.py
--- a/yt/frontends/flash/io.py
+++ b/yt/frontends/flash/io.py
@@ -100,26 +100,6 @@
                     data = p_fields[start:end, fi]
                     yield (ptype, field), data[mask]
 
-    def _read_data_set(self, grid, field):
-        f = self._handle
-        f_part = self._particle_handle
-        if field in self._particle_fields:
-            if grid.NumberOfParticles == 0: return np.array([], dtype='float64')
-            start = self.pf.h._particle_indices[grid.id - grid._id_offset]
-            end = self.pf.h._particle_indices[grid.id - grid._id_offset + 1]
-            fi = self._particle_fields[field]
-            tr = f_part["/tracer particles"][start:end, fi]
-        else:
-            tr = f["/%s" % field][grid.id - grid._id_offset,:,:,:].transpose()
-        return tr.astype("float64")
-
-    def _read_data_slice(self, grid, field, axis, coord):
-        sl = [slice(None), slice(None), slice(None)]
-        sl[axis] = slice(coord, coord + 1)
-        f = self._handle
-        tr = f["/%s" % field][grid.id - grid._id_offset].transpose()[sl]
-        return tr.astype("float64")
-
     def _read_fluid_selection(self, chunks, selector, fields, size):
         chunks = list(chunks)
         if any((ftype != "gas" for ftype, fname in fields)):
@@ -129,8 +109,8 @@
         for field in fields:
             ftype, fname = field
             dt = f["/%s" % fname].dtype
-            if dt == "float32": dt = "float64"
-            rv[field] = np.empty(size, dtype=dt)
+            # Always use *native* 64-bit float.
+            rv[field] = np.empty(size, dtype="=f8")
         ng = sum(len(c.objs) for c in chunks)
         mylog.debug("Reading %s cells of %s fields in %s blocks",
                     size, [f2 for f1, f2 in fields], ng)
@@ -161,6 +141,6 @@
                 end = gs[-1].id - gs[-1]._id_offset + 1
                 data = ds[start:end,:,:,:].transpose()
                 for i, g in enumerate(gs):
-                    rv[g.id][field] = data[...,i]
+                    rv[g.id][field] = np.asarray(data[...,i], "=f8")
         return rv
 

diff -r 0e8f3a63d2505959576d96070c1863b5cde8a96a -r 9624a463b7ffbf7a219ce31e28ea9461553c9f73 yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -22,6 +22,7 @@
 import glob
 import time
 import os
+import types
 
 from yt.utilities.fortran_utils import read_record
 from yt.utilities.logger import ytLogger as mylog
@@ -50,6 +51,10 @@
 from yt.fields.particle_fields import \
     particle_deposition_functions, \
     standard_particle_fields
+from .definitions import \
+    gadget_header_specs, \
+    gadget_field_specs, \
+    gadget_ptype_specs
 
 try:
     import requests
@@ -147,29 +152,21 @@
     _particle_coordinates_name = "Coordinates"
     _particle_velocity_name = "Velocities"
     _suffix = ""
-    _header_spec = (('Npart', 6, 'i'),
-                    ('Massarr', 6, 'd'),
-                    ('Time', 1, 'd'),
-                    ('Redshift', 1, 'd'),
-                    ('FlagSfr', 1, 'i'),
-                    ('FlagFeedback', 1, 'i'),
-                    ('Nall', 6, 'i'),
-                    ('FlagCooling', 1, 'i'),
-                    ('NumFiles', 1, 'i'),
-                    ('BoxSize', 1, 'd'),
-                    ('Omega0', 1, 'd'),
-                    ('OmegaLambda', 1, 'd'),
-                    ('HubbleParam', 1, 'd'),
-                    ('FlagAge', 1, 'i'),
-                    ('FlagMEtals', 1, 'i'),
-                    ('NallHW', 6, 'i'),
-                    ('unused', 16, 'i'))
 
     def __init__(self, filename, data_style="gadget_binary",
                  additional_fields=(),
                  unit_base=None, n_ref=64,
                  over_refine_factor=1,
-                 bounding_box = None):
+                 bounding_box = None,
+                 header_spec = "default",
+                 field_spec = "default",
+                 ptype_spec = "default"):
+        self._header_spec = self._setup_binary_spec(
+            header_spec, gadget_header_specs)
+        self._field_spec = self._setup_binary_spec(
+            field_spec, gadget_field_specs)
+        self._ptype_spec = self._setup_binary_spec(
+            ptype_spec, gadget_ptype_specs)
         self.n_ref = n_ref
         self.over_refine_factor = over_refine_factor
         self.storage_filename = None
@@ -188,6 +185,14 @@
             self.domain_left_edge = self.domain_right_edge = None
         super(GadgetStaticOutput, self).__init__(filename, data_style)
 
+    def _setup_binary_spec(self, spec, spec_dict):
+        if isinstance(spec, types.StringTypes):
+            _hs = ()
+            for hs in spec.split("+"):
+                _hs += spec_dict[hs]
+            spec = _hs
+        return spec
+
     def __repr__(self):
         return os.path.basename(self.parameter_filename).split(".")[0]
 

diff -r 0e8f3a63d2505959576d96070c1863b5cde8a96a -r 9624a463b7ffbf7a219ce31e28ea9461553c9f73 yt/frontends/sph/definitions.py
--- a/yt/frontends/sph/definitions.py
+++ b/yt/frontends/sph/definitions.py
@@ -3,3 +3,46 @@
 ghdf5_ptypes  = ("PartType0", "PartType1", "PartType2", "PartType3",
                  "PartType4", "PartType5")
 
+gadget_header_specs = dict(
+    default      = (('Npart', 6, 'i'),
+                    ('Massarr', 6, 'd'),
+                    ('Time', 1, 'd'),
+                    ('Redshift', 1, 'd'),
+                    ('FlagSfr', 1, 'i'),
+                    ('FlagFeedback', 1, 'i'),
+                    ('Nall', 6, 'i'),
+                    ('FlagCooling', 1, 'i'),
+                    ('NumFiles', 1, 'i'),
+                    ('BoxSize', 1, 'd'),
+                    ('Omega0', 1, 'd'),
+                    ('OmegaLambda', 1, 'd'),
+                    ('HubbleParam', 1, 'd'),
+                    ('FlagAge', 1, 'i'),
+                    ('FlagMEtals', 1, 'i'),
+                    ('NallHW', 6, 'i'),
+                    ('unused', 16, 'i')),
+    pad32       = (('empty',  32, 'c'),),
+    pad64       = (('empty',  64, 'c'),),
+    pad128      = (('empty', 128, 'c'),),
+    pad256      = (('empty', 256, 'c'),),
+)
+
+gadget_ptype_specs = dict(
+    default = ( "Gas",
+                "Halo",
+                "Disk",
+                "Bulge",
+                "Stars",
+                "Bndry" )
+)
+
+gadget_field_specs = dict(
+    default = ( "Coordinates",
+                "Velocities",
+                "ParticleIDs",
+                "Mass",
+                ("InternalEnergy", "Gas"),
+                ("Density", "Gas"),
+                ("SmoothingLength", "Gas"),
+    )
+)

diff -r 0e8f3a63d2505959576d96070c1863b5cde8a96a -r 9624a463b7ffbf7a219ce31e28ea9461553c9f73 yt/frontends/sph/io.py
--- a/yt/frontends/sph/io.py
+++ b/yt/frontends/sph/io.py
@@ -174,12 +174,6 @@
     _vector_fields = ("Coordinates", "Velocity", "Velocities")
 
     # Particle types (Table 3 in GADGET-2 user guide)
-    _ptypes = ( "Gas",
-                "Halo",
-                "Disk",
-                "Bulge",
-                "Stars",
-                "Bndry" )
     #
     # Blocks in the file:
     #   HEAD
@@ -195,16 +189,12 @@
     #   ENDT    (only if enabled in makefile)
     #   TSTP    (only if enabled in makefile)
 
-    _fields = ( "Coordinates",
-                "Velocities",
-                "ParticleIDs",
-                "Mass",
-                ("InternalEnergy", "Gas"),
-                ("Density", "Gas"),
-                ("SmoothingLength", "Gas"),
-    )
+    _var_mass = None
 
-    _var_mass = None
+    def __init__(self, pf, *args, **kwargs):
+        self._fields = pf._field_spec
+        self._ptypes = pf._ptype_spec
+        super(IOHandlerGadgetBinary, self).__init__(pf, *args, **kwargs)
 
     @property
     def var_mass(self):

diff -r 0e8f3a63d2505959576d96070c1863b5cde8a96a -r 9624a463b7ffbf7a219ce31e28ea9461553c9f73 yt/visualization/plot_container.py
--- a/yt/visualization/plot_container.py
+++ b/yt/visualization/plot_container.py
@@ -386,7 +386,7 @@
         name = os.path.expanduser(name)
         if name[-1] == os.sep and not os.path.isdir(name):
             os.mkdir(name)
-        if os.path.isdir(name):
+        if os.path.isdir(name) and name != str(self.pf):
             name = name + (os.sep if name[-1] != os.sep else '') + str(self.pf)
         suffix = get_image_suffix(name)
         if suffix != '':

diff -r 0e8f3a63d2505959576d96070c1863b5cde8a96a -r 9624a463b7ffbf7a219ce31e28ea9461553c9f73 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -259,11 +259,10 @@
         >>> write_bitmap(im, 'render_with_grids.png')
 
         """
-        if self.region is None:
-            self.region = self.pf.h.region((self.re + self.le) / 2.0,
-                                           self.le, self.re)
-        corners = self.region.grid_corners
-        levels = self.region.grid_levels[:,0]
+        region = self.pf.h.region((self.re + self.le) / 2.0,
+                                  self.le, self.re)
+        corners = region.grid_corners
+        levels = region.grid_levels[:,0]
 
         if max_level is not None:
             subset = levels <= max_level
@@ -603,7 +602,7 @@
         ax.get_yaxis().set_visible(False)
         ax.get_yaxis().set_ticks([])
         cb = self._pylab.colorbar(ax.images[0], pad=0.0, fraction=0.05, drawedges=True, shrink=0.9)
-        label = self.pf.field_info[self.fields[0]].get_label()
+        label = self.pf._get_field_info(self.fields[0]).get_label()
         if self.log_fields[0]:
             label = '$\\rm{log}\\/ $' + label
         self.transfer_function.vert_cbar(ax=cb.ax, label=label)

diff -r 0e8f3a63d2505959576d96070c1863b5cde8a96a -r 9624a463b7ffbf7a219ce31e28ea9461553c9f73 yt/visualization/volume_rendering/transfer_function_helper.py
--- a/yt/visualization/volume_rendering/transfer_function_helper.py
+++ b/yt/visualization/volume_rendering/transfer_function_helper.py
@@ -99,7 +99,7 @@
         """
         self.log = log
         self.pf.h
-        self.pf.field_info[self.field].take_log = log
+        self.pf._get_field_info(self.field).take_log = log
 
     def build_transfer_function(self):
         """
@@ -185,7 +185,7 @@
 
         ax.set_xscale({True: 'log', False: 'linear'}[self.log])
         ax.set_xlim(x.min(), x.max())
-        ax.set_xlabel(self.pf.field_info[self.field].get_label())
+        ax.set_xlabel(self.pf._get_field_info(self.field).get_label())
         ax.set_ylabel(r'$\mathrm{alpha}$')
         ax.set_ylim(y.max()*1.0e-3, y.max()*2)
 
@@ -204,7 +204,7 @@
         prof = BinnedProfile1D(self.pf.h.all_data(), 128, self.field,
                                self.bounds[0], self.bounds[1],
                                log_space=self.log,
-                               lazy_reader=False, end_collect=False)
+                               end_collect=False)
         prof.add_fields([profile_field], fractional=False,
                         weight=profile_weight)
         self.profiles[self.field] = prof


https://bitbucket.org/yt_analysis/yt/commits/573c8b1bff41/
Changeset:   573c8b1bff41
Branch:      yt-3.0
User:        jzuhone
Date:        2014-03-08 17:45:09
Summary:     Merging
Affected #:  23 files

diff -r 9624a463b7ffbf7a219ce31e28ea9461553c9f73 -r 573c8b1bff4118efc3ea73bb07d34186ba0e472a .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -30,6 +30,7 @@
 yt/utilities/lib/fortran_reader.c
 yt/utilities/lib/freetype_writer.c
 yt/utilities/lib/geometry_utils.c
+yt/utilities/lib/image_utilities.c
 yt/utilities/lib/Interpolators.c
 yt/utilities/lib/kdtree.c
 yt/utilities/lib/mesh_utilities.c

diff -r 9624a463b7ffbf7a219ce31e28ea9461553c9f73 -r 573c8b1bff4118efc3ea73bb07d34186ba0e472a doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -503,10 +503,8 @@
     cd $LIB
     if [ ! -z `echo $LIB | grep h5py` ]
     then
-        shift
 	( ${DEST_DIR}/bin/python2.7 setup.py build --hdf5=${HDF5_DIR} $* 2>&1 ) 1>> ${LOG_FILE} || do_exit
     else
-        shift
         ( ${DEST_DIR}/bin/python2.7 setup.py build   $* 2>&1 ) 1>> ${LOG_FILE} || do_exit
     fi
     ( ${DEST_DIR}/bin/python2.7 setup.py install    2>&1 ) 1>> ${LOG_FILE} || do_exit

diff -r 9624a463b7ffbf7a219ce31e28ea9461553c9f73 -r 573c8b1bff4118efc3ea73bb07d34186ba0e472a yt/analysis_modules/halo_finding/rockstar/rockstar.py
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar.py
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar.py
@@ -222,6 +222,10 @@
         if self.workgroup.name != "readers": return None
         tpf = ts[0]
         ptype = self.particle_type
+        if ptype not in tpf.particle_types and ptype != 'all':
+            has_particle_filter = tpf.add_particle_filter(ptype)
+            if not has_particle_filter:
+                raise RuntimeError("Particle type (filter) %s not found." % (ptype))
 
         dd = tpf.h.all_data()
         # Get DM particle mass.
@@ -250,6 +254,7 @@
         p['right_edge'] = tpf.domain_right_edge
         p['center'] = (tpf.domain_right_edge + tpf.domain_left_edge)/2.0
         p['particle_mass'] = self.particle_mass = particle_mass
+        del tpf
         return p
 
     def __del__(self):
@@ -282,15 +287,37 @@
             (server_address, port))
         self.port = str(self.port)
 
-    def run(self, block_ratio = 1, callbacks = None):
+    def run(self, block_ratio = 1, callbacks = None, restart = False):
         """
         
         """
         if block_ratio != 1:
             raise NotImplementedError
         self._get_hosts()
+        # Find restart output number
+        num_outputs = len(self.ts)
+        if restart:
+            restart_file = os.path.join(self.outbase, "restart.cfg")
+            if not os.path.exists(restart_file):
+                raise RuntimeError("Restart file %s not found" % (restart_file))
+            with open(restart_file) as restart_fh:
+                for l in restart_fh:
+                    if l.startswith("RESTART_SNAP"):
+                        restart_num = int(l.split("=")[1])
+                    if l.startswith("NUM_WRITERS"):
+                        num_writers = int(l.split("=")[1])
+            if num_writers != self.num_writers:
+                raise RuntimeError(
+                    "Number of writers in restart has changed from the original "
+                    "run (OLD = %d, NEW = %d).  To avoid problems in the "
+                    "restart, choose the same number of writers." % \
+                        (num_writers, self.num_writers))
+            # Remove the datasets that were already analyzed
+            self.ts._pre_outputs = self.ts._pre_outputs[restart_num:]
+        else:
+            restart_num = 0
         self.handler.setup_rockstar(self.server_address, self.port,
-                    len(self.ts), self.total_particles, 
+                    num_outputs, self.total_particles, 
                     self.particle_type,
                     particle_mass = self.particle_mass,
                     parallel = self.comm.size > 1,
@@ -300,11 +327,12 @@
                     block_ratio = block_ratio,
                     outbase = self.outbase,
                     force_res = self.force_res,
-                    callbacks = callbacks)
+                    callbacks = callbacks,
+                    restart_num = restart_num)
         # Make the directory to store the halo lists in.
         if not self.outbase:
             self.outbase = os.getcwd()
-        if self.comm.rank == 0:
+        if self.comm.rank == 0 and not restart:
             if not os.path.exists(self.outbase):
                 os.makedirs(self.outbase)
             # Make a record of which dataset corresponds to which set of

diff -r 9624a463b7ffbf7a219ce31e28ea9461553c9f73 -r 573c8b1bff4118efc3ea73bb07d34186ba0e472a yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
@@ -100,6 +100,7 @@
     char *INBASE
     char *FILENAME
     np.int64_t STARTING_SNAP
+    np.int64_t RESTART_SNAP
     np.int64_t NUM_SNAPS
     np.int64_t NUM_BLOCKS
     np.int64_t NUM_READERS
@@ -188,6 +189,10 @@
     # if the number of readers > 1.
     dd = pf.h.all_data()
 
+    # Add particle type filter if not defined
+    if rh.particle_type not in pf.particle_types and rh.particle_type != 'all':
+        pf.add_particle_filter(rh.particle_type)
+
     if NUM_BLOCKS > 1:
         local_parts = 0
         for chunk in parallel_objects(
@@ -226,6 +231,8 @@
             fi += 1
         pi += npart
     num_p[0] = local_parts
+    del pf._instantiated_hierarchy
+    del pf
 
 cdef class RockstarInterface:
 
@@ -253,13 +260,13 @@
                        int writing_port = -1, int block_ratio = 1,
                        int periodic = 1, force_res=None,
                        int min_halo_size = 25, outbase = "None",
-                       callbacks = None):
+                       callbacks = None, int restart_num = 0):
         global PARALLEL_IO, PARALLEL_IO_SERVER_ADDRESS, PARALLEL_IO_SERVER_PORT
         global FILENAME, FILE_FORMAT, NUM_SNAPS, STARTING_SNAP, h0, Ol, Om
         global BOX_SIZE, PERIODIC, PARTICLE_MASS, NUM_BLOCKS, NUM_READERS
         global FORK_READERS_FROM_WRITERS, PARALLEL_IO_WRITER_PORT, NUM_WRITERS
         global rh, SCALE_NOW, OUTBASE, MIN_HALO_OUTPUT_SIZE
-        global OVERLAP_LENGTH, TOTAL_PARTICLES, FORCE_RES
+        global OVERLAP_LENGTH, TOTAL_PARTICLES, FORCE_RES, RESTART_SNAP
         if force_res is not None:
             FORCE_RES=np.float64(force_res)
             #print "set force res to ",FORCE_RES
@@ -278,6 +285,7 @@
         FILE_FORMAT = "GENERIC"
         OUTPUT_FORMAT = "ASCII"
         NUM_SNAPS = num_snaps
+        RESTART_SNAP = restart_num
         NUM_READERS = num_readers
         NUM_WRITERS = num_writers
         NUM_BLOCKS = num_readers

diff -r 9624a463b7ffbf7a219ce31e28ea9461553c9f73 -r 573c8b1bff4118efc3ea73bb07d34186ba0e472a yt/analysis_modules/halo_profiler/multi_halo_profiler.py
--- a/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
+++ b/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
@@ -617,17 +617,6 @@
             mylog.info("Re-writing halo %d" % halo['id'])
             self._write_profile(profile, filename, format='%0.6e')
 
-        if newProfile:
-            # Temporary solution to memory leak.
-            for g in self.pf.h.grids:
-                g.clear_data()
-            sphere.clear_data()
-            del sphere
-            # Currently, this seems to be the only way to prevent large 
-            # halo profiling runs from running out of ram.
-            # It would be good to track down the real cause at some point.
-            gc.collect()
-
         return profile
 
     def _get_halo_sphere(self, halo):
@@ -637,7 +626,6 @@
         """
 
         sphere = self.pf.h.sphere(halo['center'], halo['r_max']/self.pf.units['mpc'])
-        #if len(sphere._grids) == 0: return None
         new_sphere = False
 
         if self.recenter:
@@ -663,11 +651,6 @@
             new_sphere = True
 
         if new_sphere:
-            # Temporary solution to memory leak.
-            for g in self.pf.h.grids:
-                g.clear_data()
-            sphere.clear_data()
-            del sphere
             sphere = self.pf.h.sphere(halo['center'], halo['r_max']/self.pf.units['mpc'])
 
         if self._need_bulk_velocity:

diff -r 9624a463b7ffbf7a219ce31e28ea9461553c9f73 -r 573c8b1bff4118efc3ea73bb07d34186ba0e472a yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -409,6 +409,7 @@
 
         rdx = self.pf.domain_dimensions*self.pf.relative_refinement(0, level)
         rdx[np.where(dims - 2 * num_ghost_zones <= 1)] = 1   # issue 602
+        self.base_dds = self.pf.domain_width / self.pf.domain_dimensions
         self.dds = self.pf.domain_width / rdx.astype("float64")
         self.ActiveDimensions = np.array(dims, dtype='int32')
         self.right_edge = self.left_edge + self.ActiveDimensions*self.dds
@@ -456,8 +457,9 @@
         return tuple(self.ActiveDimensions.tolist())
 
     def _setup_data_source(self):
-        self._data_source = self.pf.h.region(
-            self.center, self.left_edge, self.right_edge)
+        self._data_source = self.pf.h.region(self.center,
+            self.left_edge - self.base_dds,
+            self.right_edge + self.base_dds)
         self._data_source.min_level = 0
         self._data_source.max_level = self.level
 

diff -r 9624a463b7ffbf7a219ce31e28ea9461553c9f73 -r 573c8b1bff4118efc3ea73bb07d34186ba0e472a yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -317,7 +317,7 @@
         coords[:] = self.Level
         return coords
 
-    def tcoords(self, dobj):
+    def select_tcoords(self, dobj):
         dt, t = dobj.selector.get_dt(self)
         return dt, t
 

diff -r 9624a463b7ffbf7a219ce31e28ea9461553c9f73 -r 573c8b1bff4118efc3ea73bb07d34186ba0e472a yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -123,6 +123,17 @@
         for i, sl in slicer:
             yield sl, mask[:,:,:,i]
 
+    def select_tcoords(self, dobj):
+        # These will not be pre-allocated, which can be a problem for speed and
+        # memory usage.
+        dts, ts = [], []
+        for sl, mask in self.select_blocks(dobj.selector):
+            sl.child_mask = mask
+            dt, t = dobj.selector.get_dt(sl)
+            dts.append(dt)
+            ts.append(t)
+        return np.concatenate(dts), np.concatenate(ts)
+
     @property
     def domain_ind(self):
         if self._domain_ind is None:

diff -r 9624a463b7ffbf7a219ce31e28ea9461553c9f73 -r 573c8b1bff4118efc3ea73bb07d34186ba0e472a yt/data_objects/unstructured_mesh.py
--- a/yt/data_objects/unstructured_mesh.py
+++ b/yt/data_objects/unstructured_mesh.py
@@ -122,7 +122,7 @@
         if mask is None: return np.empty(0, dtype='int32')
         return ind[mask]
 
-    def tcoords(self, dobj):
+    def select_tcoords(self, dobj):
         mask = self._get_selector_mask(dobj.selector)
         if mask is None: return np.empty(0, dtype='float64')
         dt, t = dobj.selector.get_dt_mesh(self, mask.sum(), self._index_offset)

diff -r 9624a463b7ffbf7a219ce31e28ea9461553c9f73 -r 573c8b1bff4118efc3ea73bb07d34186ba0e472a yt/extern/progressbar.py
--- /dev/null
+++ b/yt/extern/progressbar.py
@@ -0,0 +1,368 @@
+#!/usr/bin/python
+# -*- coding: iso-8859-1 -*-
+#
+# progressbar  - Text progressbar library for python.
+# Copyright (c) 2005 Nilton Volpato
+# 
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+# 
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+# 
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+
+"""Text progressbar library for python.
+
+This library provides a text mode progressbar. This is tipically used
+to display the progress of a long running operation, providing a
+visual clue that processing is underway.
+
+The ProgressBar class manages the progress, and the format of the line
+is given by a number of widgets. A widget is an object that may
+display diferently depending on the state of the progress. There are
+three types of widget:
+- a string, which always shows itself;
+- a ProgressBarWidget, which may return a diferent value every time
+it's update method is called; and
+- a ProgressBarWidgetHFill, which is like ProgressBarWidget, except it
+expands to fill the remaining width of the line.
+
+The progressbar module is very easy to use, yet very powerful. And
+automatically supports features like auto-resizing when available.
+"""
+
+__author__ = "Nilton Volpato"
+__author_email__ = "first-name dot last-name @ gmail.com"
+__date__ = "2006-05-07"
+__version__ = "2.2"
+
+# Changelog
+#
+# 2006-05-07: v2.2 fixed bug in windows
+# 2005-12-04: v2.1 autodetect terminal width, added start method
+# 2005-12-04: v2.0 everything is now a widget (wow!)
+# 2005-12-03: v1.0 rewrite using widgets
+# 2005-06-02: v0.5 rewrite
+# 2004-??-??: v0.1 first version
+
+
+import sys, time
+from array import array
+try:
+    from fcntl import ioctl
+    import termios
+except ImportError:
+    pass
+import signal
+
+class ProgressBarWidget(object):
+    """This is an element of ProgressBar formatting.
+
+    The ProgressBar object will call it's update value when an update
+    is needed. It's size may change between call, but the results will
+    not be good if the size changes drastically and repeatedly.
+    """
+    def update(self, pbar):
+        """Returns the string representing the widget.
+
+        The parameter pbar is a reference to the calling ProgressBar,
+        where one can access attributes of the class for knowing how
+        the update must be made.
+
+        At least this function must be overriden."""
+        pass
+
+class ProgressBarWidgetHFill(object):
+    """This is a variable width element of ProgressBar formatting.
+
+    The ProgressBar object will call it's update value, informing the
+    width this object must the made. This is like TeX \\hfill, it will
+    expand to fill the line. You can use more than one in the same
+    line, and they will all have the same width, and together will
+    fill the line.
+    """
+    def update(self, pbar, width):
+        """Returns the string representing the widget.
+
+        The parameter pbar is a reference to the calling ProgressBar,
+        where one can access attributes of the class for knowing how
+        the update must be made. The parameter width is the total
+        horizontal width the widget must have.
+
+        At least this function must be overriden."""
+        pass
+
+
+class ETA(ProgressBarWidget):
+    "Widget for the Estimated Time of Arrival"
+    def format_time(self, seconds):
+        return time.strftime('%H:%M:%S', time.gmtime(seconds))
+    def update(self, pbar):
+        if pbar.currval == 0:
+            return 'ETA:  --:--:--'
+        elif pbar.finished:
+            return 'Time: %s' % self.format_time(pbar.seconds_elapsed)
+        else:
+            elapsed = pbar.seconds_elapsed
+            eta = elapsed * pbar.maxval / pbar.currval - elapsed
+            return 'ETA:  %s' % self.format_time(eta)
+
+class FileTransferSpeed(ProgressBarWidget):
+    "Widget for showing the transfer speed (useful for file transfers)."
+    def __init__(self):
+        self.fmt = '%6.2f %s'
+        self.units = ['B','K','M','G','T','P']
+    def update(self, pbar):
+        if pbar.seconds_elapsed < 2e-6:#== 0:
+            bps = 0.0
+        else:
+            bps = float(pbar.currval) / pbar.seconds_elapsed
+        spd = bps
+        for u in self.units:
+            if spd < 1000:
+                break
+            spd /= 1000
+        return self.fmt % (spd, u+'/s')
+
+class RotatingMarker(ProgressBarWidget):
+    "A rotating marker for filling the bar of progress."
+    def __init__(self, markers='|/-\\'):
+        self.markers = markers
+        self.curmark = -1
+    def update(self, pbar):
+        if pbar.finished:
+            return self.markers[0]
+        self.curmark = (self.curmark + 1)%len(self.markers)
+        return self.markers[self.curmark]
+
+class Percentage(ProgressBarWidget):
+    "Just the percentage done."
+    def update(self, pbar):
+        return '%3d%%' % pbar.percentage()
+
+class Bar(ProgressBarWidgetHFill):
+    "The bar of progress. It will strech to fill the line."
+    def __init__(self, marker='#', left='|', right='|'):
+        self.marker = marker
+        self.left = left
+        self.right = right
+    def _format_marker(self, pbar):
+        if isinstance(self.marker, (str, unicode)):
+            return self.marker
+        else:
+            return self.marker.update(pbar)
+    def update(self, pbar, width):
+        percent = pbar.percentage()
+        cwidth = width - len(self.left) - len(self.right)
+        marked_width = int(percent * cwidth / 100)
+        m = self._format_marker(pbar)
+        bar = (self.left + (m*marked_width).ljust(cwidth) + self.right)
+        return bar
+
+class ReverseBar(Bar):
+    "The reverse bar of progress, or bar of regress. :)"
+    def update(self, pbar, width):
+        percent = pbar.percentage()
+        cwidth = width - len(self.left) - len(self.right)
+        marked_width = int(percent * cwidth / 100)
+        m = self._format_marker(pbar)
+        bar = (self.left + (m*marked_width).rjust(cwidth) + self.right)
+        return bar
+
+default_widgets = [Percentage(), ' ', Bar()]
+class ProgressBar(object):
+    """This is the ProgressBar class, it updates and prints the bar.
+
+    The term_width parameter may be an integer. Or None, in which case
+    it will try to guess it, if it fails it will default to 80 columns.
+
+    The simple use is like this:
+    >>> pbar = ProgressBar().start()
+    >>> for i in xrange(100):
+    ...    # do something
+    ...    pbar.update(i+1)
+    ...
+    >>> pbar.finish()
+
+    But anything you want to do is possible (well, almost anything).
+    You can supply different widgets of any type in any order. And you
+    can even write your own widgets! There are many widgets already
+    shipped and you should experiment with them.
+
+    When implementing a widget update method you may access any
+    attribute or function of the ProgressBar object calling the
+    widget's update method. The most important attributes you would
+    like to access are:
+    - currval: current value of the progress, 0 <= currval <= maxval
+    - maxval: maximum (and final) value of the progress
+    - finished: True if the bar is have finished (reached 100%), False o/w
+    - start_time: first time update() method of ProgressBar was called
+    - seconds_elapsed: seconds elapsed since start_time
+    - percentage(): percentage of the progress (this is a method)
+    """
+    def __init__(self, maxval=100, widgets=default_widgets, term_width=None,
+                 fd=sys.stderr):
+        assert maxval > 0
+        self.maxval = maxval
+        self.widgets = widgets
+        self.fd = fd
+        self.signal_set = False
+        if term_width is None:
+            try:
+                self.handle_resize(None,None)
+                signal.signal(signal.SIGWINCH, self.handle_resize)
+                self.signal_set = True
+            except:
+                self.term_width = 79
+        else:
+            self.term_width = term_width
+
+        self.currval = 0
+        self.finished = False
+        self.prev_percentage = -1
+        self.start_time = None
+        self.seconds_elapsed = 0
+
+    def handle_resize(self, signum, frame):
+        h,w=array('h', ioctl(self.fd,termios.TIOCGWINSZ,'\0'*8))[:2]
+        self.term_width = w
+
+    def percentage(self):
+        "Returns the percentage of the progress."
+        return self.currval*100.0 / self.maxval
+
+    def _format_widgets(self):
+        r = []
+        hfill_inds = []
+        num_hfill = 0
+        currwidth = 0
+        for i, w in enumerate(self.widgets):
+            if isinstance(w, ProgressBarWidgetHFill):
+                r.append(w)
+                hfill_inds.append(i)
+                num_hfill += 1
+            elif isinstance(w, (str, unicode)):
+                r.append(w)
+                currwidth += len(w)
+            else:
+                weval = w.update(self)
+                currwidth += len(weval)
+                r.append(weval)
+        for iw in hfill_inds:
+            r[iw] = r[iw].update(self, (self.term_width-currwidth)/num_hfill)
+        return r
+
+    def _format_line(self):
+        return ''.join(self._format_widgets()).ljust(self.term_width)
+
+    def _need_update(self):
+        return int(self.percentage()) != int(self.prev_percentage)
+
+    def update(self, value):
+        "Updates the progress bar to a new value."
+        assert 0 <= value <= self.maxval
+        self.currval = value
+        if not self._need_update() or self.finished:
+            return
+        if not self.start_time:
+            self.start_time = time.time()
+        self.seconds_elapsed = time.time() - self.start_time
+        self.prev_percentage = self.percentage()
+        if value != self.maxval:
+            self.fd.write(self._format_line() + '\r')
+        else:
+            self.finished = True
+            self.fd.write(self._format_line() + '\n')
+
+    def start(self):
+        """Start measuring time, and prints the bar at 0%.
+
+        It returns self so you can use it like this:
+        >>> pbar = ProgressBar().start()
+        >>> for i in xrange(100):
+        ...    # do something
+        ...    pbar.update(i+1)
+        ...
+        >>> pbar.finish()
+        """
+        self.update(0)
+        return self
+
+    def finish(self):
+        """Used to tell the progress is finished."""
+        self.update(self.maxval)
+        if self.signal_set:
+            signal.signal(signal.SIGWINCH, signal.SIG_DFL)
+        
+
+
+
+
+
+if __name__=='__main__':
+    import os
+
+    def example1():
+        widgets = ['Test: ', Percentage(), ' ', Bar(marker=RotatingMarker()),
+                   ' ', ETA(), ' ', FileTransferSpeed()]
+        pbar = ProgressBar(widgets=widgets, maxval=10000000).start()
+        for i in range(1000000):
+            # do something
+            pbar.update(10*i+1)
+        pbar.finish()
+        print
+
+    def example2():
+        class CrazyFileTransferSpeed(FileTransferSpeed):
+            "It's bigger between 45 and 80 percent"
+            def update(self, pbar):
+                if 45 < pbar.percentage() < 80:
+                    return 'Bigger Now ' + FileTransferSpeed.update(self,pbar)
+                else:
+                    return FileTransferSpeed.update(self,pbar)
+
+        widgets = [CrazyFileTransferSpeed(),' <<<', Bar(), '>>> ', Percentage(),' ', ETA()]
+        pbar = ProgressBar(widgets=widgets, maxval=10000000)
+        # maybe do something
+        pbar.start()
+        for i in range(2000000):
+            # do something
+            pbar.update(5*i+1)
+        pbar.finish()
+        print
+
+    def example3():
+        widgets = [Bar('>'), ' ', ETA(), ' ', ReverseBar('<')]
+        pbar = ProgressBar(widgets=widgets, maxval=10000000).start()
+        for i in range(1000000):
+            # do something
+            pbar.update(10*i+1)
+        pbar.finish()
+        print
+
+    def example4():
+        widgets = ['Test: ', Percentage(), ' ',
+                   Bar(marker='0',left='[',right=']'),
+                   ' ', ETA(), ' ', FileTransferSpeed()]
+        pbar = ProgressBar(widgets=widgets, maxval=500)
+        pbar.start()
+        for i in range(100,500+1,50):
+            time.sleep(0.2)
+            pbar.update(i)
+        pbar.finish()
+        print
+
+
+    example1()
+    example2()
+    example3()
+    example4()
+

diff -r 9624a463b7ffbf7a219ce31e28ea9461553c9f73 -r 573c8b1bff4118efc3ea73bb07d34186ba0e472a yt/extern/progressbar/__init__.py
--- a/yt/extern/progressbar/__init__.py
+++ /dev/null
@@ -1,49 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# progressbar  - Text progress bar library for Python.
-# Copyright (c) 2005 Nilton Volpato
-#
-# This library is free software; you can redistribute it and/or
-# modify it under the terms of the GNU Lesser General Public
-# License as published by the Free Software Foundation; either
-# version 2.1 of the License, or (at your option) any later version.
-#
-# This library is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-# Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public
-# License along with this library; if not, write to the Free Software
-# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
-
-"""Text progress bar library for Python.
-
-A text progress bar is typically used to display the progress of a long
-running operation, providing a visual cue that processing is underway.
-
-The ProgressBar class manages the current progress, and the format of the line
-is given by a number of widgets. A widget is an object that may display
-differently depending on the state of the progress bar. There are three types
-of widgets:
- - a string, which always shows itself
-
- - a ProgressBarWidget, which may return a different value every time its
-   update method is called
-
- - a ProgressBarWidgetHFill, which is like ProgressBarWidget, except it
-   expands to fill the remaining width of the line.
-
-The progressbar module is very easy to use, yet very powerful. It will also
-automatically enable features like auto-resizing when the system supports it.
-"""
-
-__author__ = 'Nilton Volpato'
-__author_email__ = 'first-name dot last-name @ gmail.com'
-__date__ = '2011-05-14'
-__version__ = '2.3'
-
-from compat import *
-from widgets import *
-from progressbar import *

diff -r 9624a463b7ffbf7a219ce31e28ea9461553c9f73 -r 573c8b1bff4118efc3ea73bb07d34186ba0e472a yt/extern/progressbar/compat.py
--- a/yt/extern/progressbar/compat.py
+++ /dev/null
@@ -1,45 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# progressbar  - Text progress bar library for Python.
-# Copyright (c) 2005 Nilton Volpato
-#
-# This library is free software; you can redistribute it and/or
-# modify it under the terms of the GNU Lesser General Public
-# License as published by the Free Software Foundation; either
-# version 2.1 of the License, or (at your option) any later version.
-#
-# This library is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-# Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public
-# License along with this library; if not, write to the Free Software
-# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
-
-"""Compatibility methods and classes for the progressbar module."""
-
-
-# Python 3.x (and backports) use a modified iterator syntax
-# This will allow 2.x to behave with 3.x iterators
-try:
-  next
-except NameError:
-    def next(iter):
-        try:
-            # Try new style iterators
-            return iter.__next__()
-        except AttributeError:
-            # Fallback in case of a "native" iterator
-            return iter.next()
-
-
-# Python < 2.5 does not have "any"
-try:
-  any
-except NameError:
-   def any(iterator):
-      for item in iterator:
-         if item: return True
-      return False

diff -r 9624a463b7ffbf7a219ce31e28ea9461553c9f73 -r 573c8b1bff4118efc3ea73bb07d34186ba0e472a yt/extern/progressbar/progressbar.py
--- a/yt/extern/progressbar/progressbar.py
+++ /dev/null
@@ -1,426 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# progressbar  - Text progress bar library for Python.
-# Copyright (c) 2005 Nilton Volpato
-#
-# This library is free software; you can redistribute it and/or
-# modify it under the terms of the GNU Lesser General Public
-# License as published by the Free Software Foundation; either
-# version 2.1 of the License, or (at your option) any later version.
-#
-# This library is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-# Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public
-# License along with this library; if not, write to the Free Software
-# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
-
-"""Main ProgressBar class."""
-
-from __future__ import division
-
-import math
-import os
-import signal
-import sys
-import time
-import uuid
-
-try:
-    from fcntl import ioctl
-    from array import array
-    import termios
-except ImportError:
-    pass
-
-import widgets
-
-# Test to see if we are in an IPython session.
-ipython = None
-for key in ['KernelApp','IPKernelApp']:
-  try:
-    ipython = get_ipython().config[key]['parent_appname']
-  except (NameError, KeyError):
-    pass
-
-ipython_notebook_css = """
-td.pb_widget {
-    width: auto;
-}
-td.pb_widget_fill {
-    width: 100%;
-}
-table.pb {
-    font-family: monospace;
-    border: 0;
-    margin: 0;
-}
-table.pb tr { border: 0; }
-table.pb td {
-    white-space: nowrap;
-    border: 0;
-}
-div.pb {
-    border: 1px solid #ddd;
-    border-radius: 3px;
-}
-div.pb_bar {
-    height: 1.5em;
-}
-""".replace('\n', ' ')
-
-class UnknownLength: pass
-
-
-class ProgressBar(object):
-    """The ProgressBar class which updates and prints the bar.
-
-    A common way of using it is like:
-    >>> pbar = ProgressBar().start()
-    >>> for i in range(100):
-    ...    # do something
-    ...    pbar.update(i+1)
-    ...
-    >>> pbar.finish()
-
-    You can also use a ProgressBar as an iterator:
-    >>> progress = ProgressBar()
-    >>> for i in progress(some_iterable):
-    ...    # do something
-    ...
-
-    Since the progress bar is incredibly customizable you can specify
-    different widgets of any type in any order. You can even write your own
-    widgets! However, since there are already a good number of widgets you
-    should probably play around with them before moving on to create your own
-    widgets.
-
-    The term_width parameter represents the current terminal width. If the
-    parameter is set to an integer then the progress bar will use that,
-    otherwise it will attempt to determine the terminal width falling back to
-    80 columns if the width cannot be determined.
-
-    When implementing a widget's update method you are passed a reference to
-    the current progress bar. As a result, you have access to the
-    ProgressBar's methods and attributes. Although there is nothing preventing
-    you from changing the ProgressBar you should treat it as read only.
-
-    Useful methods and attributes include (Public API):
-     - currval: current progress (0 <= currval <= maxval)
-     - maxval: maximum (and final) value
-     - finished: True if the bar has finished (reached 100%)
-     - start_time: the time when start() method of ProgressBar was called
-     - seconds_elapsed: seconds elapsed since start_time and last call to
-                        update
-     - percentage(): progress in percent [0..100]
-    """
-
-    __slots__ = ('currval', 'fd', 'finished', 'last_update_time',
-                 'left_justify', 'maxval', 'next_update', 'num_intervals',
-                 'poll', 'seconds_elapsed', 'signal_set', 'start_time',
-                 'term_width', 'update_interval', 'widgets', '_time_sensitive',
-                 '__iterable', 'attr', 'html_written', 'uuid')
-
-    _DEFAULT_MAXVAL = 100
-    _DEFAULT_TERMSIZE = 80
-    _DEFAULT_WIDGETS = [widgets.Percentage, widgets.Bar]
-
-    def __init__(self, maxval=None, widgets=None, term_width=None, poll=1,
-                 left_justify=True, fd=sys.stdout, attr={}):
-        """Initializes a progress bar with sane defaults."""
-
-        # Don't share a reference with any other progress bars
-        if widgets is None:
-            widgets = [widget() for widget in self._DEFAULT_WIDGETS]
-
-        self.maxval = maxval
-        self.widgets = widgets
-        self.fd = fd
-        self.left_justify = left_justify
-
-        self.signal_set = False
-        if term_width is not None:
-            self.term_width = term_width
-        else:
-            try:
-                self._handle_resize()
-                signal.signal(signal.SIGWINCH, self._handle_resize)
-                self.signal_set = True
-            except (SystemExit, KeyboardInterrupt): raise
-            except:
-                self.term_width = self._env_size()
-
-        self.__iterable = None
-        self._update_widgets()
-        self.currval = 0
-        self.finished = False
-        self.last_update_time = None
-        self.poll = poll
-        self.seconds_elapsed = 0
-        self.start_time = None
-        self.update_interval = 1
-        self.attr = attr
-
-        # Set flag so we only write out the HTML once,
-        # then update with javascript
-        self.html_written = False
-
-        self.uuid = str(uuid.uuid4())
-
-        # Install our CSS if we are in an IPython notebook
-        if ipython == 'ipython-notebook':
-            from IPython.display import Javascript, display
-            display(Javascript('//%s\n$("head").append("<style>%s</style>")' %
-                               (self.uuid,ipython_notebook_css)))
-            
-            # Also add a function that removes progressbar output from the cells
-            js = '''
-                  // %s -- used to remove this code blob in the end
-                  IPython.OutputArea.prototype.cleanProgressBar = function(uuids) {
-                      // filter by uuid-strings 
-                      var myfilter = function(output) { 
-                          var nuids = uuids.length;
-                          for (var i=0; i<nuids; i++) {
-                              if (output.hasOwnProperty('html')) {
-                                  if (output.html.indexOf(uuids[i]) != -1) {
-                                      return false;
-                                  }
-                              }
-                              if (output.hasOwnProperty('javascript')) {
-                                  if (output.javascript.indexOf(uuids[i]) != -1) {
-                                      return false;
-                                  }
-                              }
-                          }
-                          // keep all others
-                          return true;
-                      };
-
-                      // Filter the ouputs
-                      this.outputs = this.outputs.filter(myfilter);
-                };
-                ''' % self.uuid
-            display(Javascript(js))
-
-    def __call__(self, iterable):
-        """Use a ProgressBar to iterate through an iterable."""
-
-        try:
-            self.maxval = len(iterable)
-        except:
-            if self.maxval is None:
-                self.maxval = UnknownLength
-
-        self.__iterable = iter(iterable)
-        return self
-
-
-    def __iter__(self):
-        return self
-
-
-    def __next__(self):
-        try:
-            value = next(self.__iterable)
-            if self.start_time is None: self.start()
-            else: self.update(self.currval + 1)
-            return value
-        except StopIteration:
-            self.finish()
-            raise
-
-
-    # Create an alias so that Python 2.x won't complain about not being
-    # an iterator.
-    next = __next__
-
-
-    def _env_size(self):
-        """Tries to find the term_width from the environment."""
-
-        return int(os.environ.get('COLUMNS', self._DEFAULT_TERMSIZE)) - 1
-
-
-    def _handle_resize(self, signum=None, frame=None):
-        """Tries to catch resize signals sent from the terminal."""
-
-        h, w = array('h', ioctl(self.fd, termios.TIOCGWINSZ, '\0' * 8))[:2]
-        self.term_width = w
-
-
-    def percentage(self):
-        """Returns the progress as a percentage."""
-        return self.currval * 100.0 / self.maxval
-
-    percent = property(percentage)
-
-
-    def _format_widgets(self):
-        result = []
-        expanding = []
-        width = self.term_width
-
-        for index, widget in enumerate(self.widgets):
-            if isinstance(widget, widgets.WidgetHFill):
-                result.append(widget)
-                expanding.insert(0, index)
-            else:
-                widget = widgets.format_updatable(widget, self)
-                result.append(widget)
-                width -= len(widget)
-
-        count = len(expanding)
-        while count:
-            portion = max(int(math.ceil(width * 1. / count)), 0)
-            index = expanding.pop()
-            count -= 1
-
-            widget = result[index].update(self, portion)
-            width -= len(widget)
-            result[index] = widget
-
-        return result
-
-
-    def _format_line(self):
-        """Joins the widgets and justifies the line."""
-
-        widgets = ''.join(self._format_widgets())
-
-        if self.left_justify: return widgets.ljust(self.term_width)
-        else: return widgets.rjust(self.term_width)
-
-
-    def _format_html(self):
-      html = '<div class="pb" id="%s"><table class="pb ui-widget"><tr>\n' % self.uuid
-      for widget in self.widgets:
-        if isinstance(widget, widgets.WidgetHFill):
-          td_class = 'pb_widget_fill'
-        else:
-          td_class = 'pb_widget'
-
-        html += ('<td class="%s">' % td_class) + \
-                widgets.format_updatable_html(widget, self) + \
-                '</td>\n'
-      html += '</tr></table><div>'
-      return html
-
-
-    def _need_update(self):
-        """Returns whether the ProgressBar should redraw the line."""
-        if self.currval >= self.next_update or self.finished: return True
-
-        delta = time.time() - self.last_update_time
-        return self._time_sensitive and delta > self.poll
-
-
-    def _update_widgets(self):
-        """Checks all widgets for the time sensitive bit."""
-
-        self._time_sensitive = any(getattr(w, 'TIME_SENSITIVE', False)
-                                    for w in self.widgets)
-
-
-    def update(self, value=None, attr={}):
-        """Updates the ProgressBar to a new value."""
-
-        if value is not None and value is not UnknownLength:
-            if (self.maxval is not UnknownLength
-                and not 0 <= value <= self.maxval):
-
-                raise ValueError('Value out of range')
-
-            self.currval = value
-
-        self.attr.update(attr)
-
-        if not self._need_update(): return
-        if self.start_time is None:
-            raise RuntimeError('You must call "start" before calling "update"')
-
-        now = time.time()
-        self.seconds_elapsed = now - self.start_time
-        self.next_update = self.currval + self.update_interval
-
-        if ipython == 'ipython-notebook':
-            if not self.html_written:
-                # We have yet to display the HTML, do that first
-                from IPython.display import HTML, display
-                display(HTML(self._format_html()))
-                self.html_written = True
-            else:
-                # The HTML has been written once, now update with JS
-                from IPython.display import Javascript, display
-                for widget in self.widgets:
-                    js = widgets.updatable_js(widget, self)
-                    if js:
-                        display(Javascript(js))
-        else:
-            self.fd.write('\r' + self._format_line())
-            self.fd.flush()
-
-        self.last_update_time = now
-
-
-    def start(self):
-        """Starts measuring time, and prints the bar at 0%.
-
-        It returns self so you can use it like this:
-        >>> pbar = ProgressBar().start()
-        >>> for i in range(100):
-        ...    # do something
-        ...    pbar.update(i+1)
-        ...
-        >>> pbar.finish()
-        """
-
-        if self.maxval is None:
-            self.maxval = self._DEFAULT_MAXVAL
-
-        self.num_intervals = max(100, self.term_width)
-        self.next_update = 0
-
-        if self.maxval is not UnknownLength:
-            if self.maxval < 0: raise ValueError('Value out of range')
-            self.update_interval = self.maxval / self.num_intervals
-
-
-        self.start_time = self.last_update_time = time.time()
-        self.html_written = False
-        self.finished = False
-        self.update(0)
-
-        return self
-
-
-    def finish(self):
-        """Puts the ProgressBar bar in the finished state."""
-
-        self.finished = True
-        self.update(self.maxval)
-        self.start_time = None
-
-        # Clean up notebook stuff, quite differently from regular
-        if not ipython == 'ipython-notebook':
-            self.fd.write('\n')
-        else:
-            from IPython.display import Javascript, display
-            # First delete the node that held the progress bar from the page
-            js = """var element = document.getElementById('%s');
-                  var parent = element.parentNode
-                  parent.removeChild(element);
-                  parent.parentElement.remove();""" % self.uuid
-            display(Javascript(js))
-
-            # Then also remove its trace from the cell output (so it doesn't get
-            # stored with the notebook). This needs to be done for all widgets as
-            # well as for progressBar
-            uuids = [str(self.uuid)]
-            uuids += [w.uuid for w in self.widgets if isinstance(w, widgets.Widget)]
-            display(Javascript('this.cleanProgressBar(%s)' % uuids))
-
-        if self.signal_set:
-            signal.signal(signal.SIGWINCH, signal.SIG_DFL)

diff -r 9624a463b7ffbf7a219ce31e28ea9461553c9f73 -r 573c8b1bff4118efc3ea73bb07d34186ba0e472a yt/extern/progressbar/widgets.py
--- a/yt/extern/progressbar/widgets.py
+++ /dev/null
@@ -1,388 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# progressbar  - Text progress bar library for Python.
-# Copyright (c) 2005 Nilton Volpato
-#
-# This library is free software; you can redistribute it and/or
-# modify it under the terms of the GNU Lesser General Public
-# License as published by the Free Software Foundation; either
-# version 2.1 of the License, or (at your option) any later version.
-#
-# This library is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-# Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public
-# License along with this library; if not, write to the Free Software
-# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
-
-"""Default ProgressBar widgets."""
-
-from __future__ import division
-
-import datetime
-import math
-import uuid
-
-try:
-    from abc import ABCMeta, abstractmethod
-except ImportError:
-    AbstractWidget = object
-    abstractmethod = lambda fn: fn
-else:
-    AbstractWidget = ABCMeta('AbstractWidget', (object,), {})
-
-
-def format_updatable(updatable, pbar):
-    if hasattr(updatable, 'update'): return updatable.update(pbar)
-    else: return updatable
-
-def format_updatable_html(updatable, pbar):
-    if hasattr(updatable, 'update_html'): return updatable.update_html(pbar)
-    else: return updatable
-
-def updatable_js(updatable, pbar):
-    if hasattr(updatable, 'update_js'): return updatable.update_js(pbar)
-    else: return None
-
-
-class Widget(AbstractWidget):
-    """The base class for all widgets.
-
-    The ProgressBar will call the widget's update value when the widget should
-    be updated. The widget's size may change between calls, but the widget may
-    display incorrectly if the size changes drastically and repeatedly.
-
-    The boolean TIME_SENSITIVE informs the ProgressBar that it should be
-    updated more often because it is time sensitive.
-    """
-
-    TIME_SENSITIVE = False
-    __slots__ = ()
-    uuid = None
-
-    @abstractmethod
-    def update(self, pbar):
-        """Updates the widget.
-
-        pbar - a reference to the calling ProgressBar
-        """
-
-    def update_html(self, pbar):
-        if self.uuid is None:
-            self.uuid = str(uuid.uuid4())
-        return '<div id="%s">%s</div>' % (self.uuid, self.update(pbar))
-
-    def update_js(self, pbar):
-        if self.uuid is None:
-            self.uuid = str(uuid.uuid4())
-        return "$('div#%s').text('%s');" % (self.uuid, self.update(pbar))
-
-
-class WidgetHFill(Widget):
-    """The base class for all variable width widgets.
-
-    This widget is much like the \\hfill command in TeX, it will expand to
-    fill the line. You can use more than one in the same line, and they will
-    all have the same width, and together will fill the line.
-    """
-
-    DEFAULT_WIDTH = 50
-
-    @abstractmethod
-    def update(self, pbar, width=DEFAULT_WIDTH):
-        """Updates the widget providing the total width the widget must fill.
-
-        pbar - a reference to the calling ProgressBar
-        width - The total width the widget must fill
-        """
-
-
-class Timer(Widget):
-    """Widget which displays the elapsed seconds."""
-
-    __slots__ = ('format_string',)
-    TIME_SENSITIVE = True
-
-    def __init__(self, format='Elapsed Time: %s'):
-        self.format_string = format
-
-    @staticmethod
-    def format_time(seconds):
-        """Formats time as the string "HH:MM:SS"."""
-
-        return str(datetime.timedelta(seconds=int(seconds)))
-
-
-    def update(self, pbar):
-        """Updates the widget to show the elapsed time."""
-
-        return self.format_string % self.format_time(pbar.seconds_elapsed)
-
-
-class ETA(Timer):
-    """Widget which attempts to estimate the time of arrival."""
-
-    TIME_SENSITIVE = True
-
-    def update(self, pbar):
-        """Updates the widget to show the ETA or total time when finished."""
-
-        if pbar.currval == 0:
-            return 'ETA:  --:--:--'
-        elif pbar.finished:
-            return 'Time: %s' % self.format_time(pbar.seconds_elapsed)
-        else:
-            elapsed = pbar.seconds_elapsed
-            eta = elapsed * pbar.maxval / pbar.currval - elapsed
-            return 'ETA:  %s' % self.format_time(eta)
-
-
-class FileTransferSpeed(Widget):
-    """Widget for showing the transfer speed (useful for file transfers)."""
-
-    FORMAT = '%6.2f %s%s/s'
-    PREFIXES = ' kMGTPEZY'
-    __slots__ = ('unit',)
-
-    def __init__(self, unit='B'):
-        self.unit = unit
-
-    def update(self, pbar):
-        """Updates the widget with the current SI prefixed speed."""
-
-        if pbar.seconds_elapsed < 2e-6 or pbar.currval < 2e-6: # =~ 0
-            scaled = power = 0
-        else:
-            speed = pbar.currval / pbar.seconds_elapsed
-            power = int(math.log(speed, 1000))
-            scaled = speed / 1000.**power
-
-        return self.FORMAT % (scaled, self.PREFIXES[power], self.unit)
-
-
-class AnimatedMarker(Widget):
-    """An animated marker for the progress bar which defaults to appear as if
-    it were rotating.
-    """
-
-    __slots__ = ('markers', 'curmark')
-
-    def __init__(self, markers='|/-\\'):
-        self.markers = markers
-        self.curmark = -1
-
-    def update(self, pbar):
-        """Updates the widget to show the next marker or the first marker when
-        finished"""
-
-        if pbar.finished: return self.markers[0]
-
-        self.curmark = (self.curmark + 1) % len(self.markers)
-        return self.markers[self.curmark]
-
-# Alias for backwards compatibility
-RotatingMarker = AnimatedMarker
-
-
-class Counter(Widget):
-    """Displays the current count."""
-
-    __slots__ = ('format_string',)
-
-    def __init__(self, format='%d'):
-        self.format_string = format
-
-    def update(self, pbar):
-        return self.format_string % pbar.currval
-
-
-class Attribute(Widget):
-    """Displays the values of ProgressBar attributes.
-
-    attr_name - ProgressBar attribute dictionary key or list of keys
-    format_string - Format for the output. Attributes are looked up according
-      to attr_name and then used as a tuple with this format string, i.e.
-      format_string % attr_tuple
-    fallback - If an attribute lookup fails, this string is displayed instead.
-
-    """
-
-    __slots__ = ('attr_name', 'format_string', 'fallback')
-
-    def __init__(self, attr_name, format='%s', fallback='?'):
-        self.attr_name = attr_name
-        self.format_string = format
-        self.fallback = fallback
-
-    def update(self, pbar):
-        try:
-          if isinstance(self.attr_name, basestring) or len(self.attr_name) == 1:
-            # If attr_name is just a string or a single item,
-            # use it as the key as is
-            format_vars = (pbar.attr[self.attr_name],)
-          else:
-            # else, expand it as a tuple of attributes
-            format_vars = tuple([pbar.attr[a] for a in self.attr_name])
-          return self.format_string % format_vars
-        except KeyError:
-          return self.fallback
-
-
-class Percentage(Widget):
-    """Displays the current percentage as a number with a percent sign."""
-
-    def update(self, pbar):
-        return '%3d%%' % pbar.percentage()
-
-
-class FormatLabel(Timer):
-    """Displays a formatted label."""
-
-    mapping = {
-        'elapsed': ('seconds_elapsed', Timer.format_time),
-        'finished': ('finished', None),
-        'last_update': ('last_update_time', None),
-        'max': ('maxval', None),
-        'seconds': ('seconds_elapsed', None),
-        'start': ('start_time', None),
-        'value': ('currval', None)
-    }
-
-    __slots__ = ('format_string',)
-    def __init__(self, format):
-        self.format_string = format
-
-    def update(self, pbar):
-        context = {}
-        for name, (key, transform) in self.mapping.items():
-            try:
-                value = getattr(pbar, key)
-
-                if transform is None:
-                   context[name] = value
-                else:
-                   context[name] = transform(value)
-            except: pass
-
-        return self.format_string % context
-
-
-class SimpleProgress(Widget):
-    """Returns progress as a count of the total (e.g.: "5 of 47")."""
-
-    __slots__ = ('sep',)
-
-    def __init__(self, sep=' of '):
-        self.sep = sep
-
-    def update(self, pbar):
-        return '%d%s%d' % (pbar.currval, self.sep, pbar.maxval)
-
-
-class Bar(WidgetHFill):
-    """A progress bar which stretches to fill the line."""
-
-    __slots__ = ('marker', 'left', 'right', 'fill', 'fill_left')
-
-    def __init__(self, marker='#', left='|', right='|', fill=' ',
-                 fill_left=True):
-        """Creates a customizable progress bar.
-
-        marker - string or updatable object to use as a marker
-        left - string or updatable object to use as a left border
-        right - string or updatable object to use as a right border
-        fill - character to use for the empty part of the progress bar
-        fill_left - whether to fill from the left or the right
-        """
-        self.marker = marker
-        self.left = left
-        self.right = right
-        self.fill = fill
-        self.fill_left = fill_left
-
-    def update(self, pbar, width=WidgetHFill.DEFAULT_WIDTH):
-        """Updates the progress bar and its subcomponents."""
-
-        left, marked, right = (format_updatable(i, pbar) for i in
-                               (self.left, self.marker, self.right))
-
-        width -= len(left) + len(right)
-        # Marked must *always* have length of 1
-        if pbar.maxval:
-          marked *= int(pbar.currval / pbar.maxval * width)
-        else:
-          marked = ''
-
-        if self.fill_left:
-            return '%s%s%s' % (left, marked.ljust(width, self.fill), right)
-        else:
-            return '%s%s%s' % (left, marked.rjust(width, self.fill), right)
-
-
-    def update_html(self, pbar):
-        if self.uuid is None:
-            self.uuid = str(uuid.uuid4())
-        return """
-        <div class="pb_bar" id="%s"></div>
-        <script type="text/javascript">
-            $("div#%s").progressbar({value: 0, max: %d});
-        </script>
-        """ % (self.uuid, self.uuid,pbar.maxval)
-
-
-    def update_js(self, pbar):
-        if self.uuid is None:
-            self.uuid = str(uuid.uuid4())
-        return """
-        var $myPB = $("div#{divid}")
-        if ($myPB.hasClass('ui-progressbar')) {{
-            $myPB.progressbar('value', {pbar.currval:d});
-        }} else {{
-            $myPB.progressbar({{value: 0, max: {pbar.maxval:d}}});
-        }}
-        """.format(divid=self.uuid, pbar=pbar)
-
-
-class ReverseBar(Bar):
-    """A bar which has a marker which bounces from side to side."""
-
-    def __init__(self, marker='#', left='|', right='|', fill=' ',
-                 fill_left=False):
-        """Creates a customizable progress bar.
-
-        marker - string or updatable object to use as a marker
-        left - string or updatable object to use as a left border
-        right - string or updatable object to use as a right border
-        fill - character to use for the empty part of the progress bar
-        fill_left - whether to fill from the left or the right
-        """
-        self.marker = marker
-        self.left = left
-        self.right = right
-        self.fill = fill
-        self.fill_left = fill_left
-
-
-class BouncingBar(Bar):
-    def update(self, pbar, width=WidgetHFill.DEFAULT_WIDTH):
-        """Updates the progress bar and its subcomponents."""
-
-        left, marker, right = (format_updatable(i, pbar) for i in
-                               (self.left, self.marker, self.right))
-
-        width -= len(left) + len(right)
-
-        if pbar.finished: return '%s%s%s' % (left, width * marker, right)
-
-        position = int(pbar.currval % (width * 2 - 1))
-        if position > width: position = width * 2 - position
-        lpad = self.fill * (position - 1)
-        rpad = self.fill * (width - len(marker) - len(lpad))
-
-        # Swap if we want to bounce the other way
-        if not self.fill_left: rpad, lpad = lpad, rpad
-
-        return '%s%s%s%s%s' % (left, lpad, marker, rpad, right)

diff -r 9624a463b7ffbf7a219ce31e28ea9461553c9f73 -r 573c8b1bff4118efc3ea73bb07d34186ba0e472a yt/extern/setup.py
--- a/yt/extern/setup.py
+++ b/yt/extern/setup.py
@@ -11,6 +11,5 @@
 def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
     config = Configuration('extern', parent_package, top_path)
-    config.add_subpackage("progressbar")
     config.make_config_py()
     return config

diff -r 9624a463b7ffbf7a219ce31e28ea9461553c9f73 -r 573c8b1bff4118efc3ea73bb07d34186ba0e472a yt/frontends/enzo/io.py
--- a/yt/frontends/enzo/io.py
+++ b/yt/frontends/enzo/io.py
@@ -41,8 +41,8 @@
         fields = []
         add_io = "io" in grid.pf.particle_types
         for name, v in group.iteritems():
-            # NOTE: This won't work with 1D datasets.
-            if not hasattr(v, "shape"):
+            # NOTE: This won't work with 1D datasets or references.
+            if not hasattr(v, "shape") or v.dtype == "O":
                 continue
             elif len(v.dims) == 1:
                 if add_io: fields.append( ("io", str(name)) )
@@ -306,7 +306,7 @@
                     for field in field_list:
                         data = self.grids_in_memory[g.id][field]
                         if field in _convert_mass:
-                            data *= g.dds.prod(dtype="f8")
+                            data = data * g.dds.prod(dtype="f8")
                         yield (ptype, field), data[mask]
 
     @property

diff -r 9624a463b7ffbf7a219ce31e28ea9461553c9f73 -r 573c8b1bff4118efc3ea73bb07d34186ba0e472a yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -337,6 +337,7 @@
     maxval = max(maxval, 1)
     from yt.config import ytcfg
     if ytcfg.getboolean("yt", "suppressStreamLogging") or \
+       "__IPYTHON__" in dir(__builtin__) or \
        ytcfg.getboolean("yt", "__withintesting"):
         return DummyProgressBar()
     elif ytcfg.getboolean("yt", "__withinreason"):
@@ -344,13 +345,12 @@
         return ExtProgressBar(title, maxval)
     elif ytcfg.getboolean("yt", "__parallel"):
         return ParallelProgressBar(title, maxval)
-    else:
-        widgets = [ title,
-                    pb.Percentage(), ' ',
-                    pb.Bar(marker=pb.RotatingMarker()),
-                    ' ', pb.ETA(), ' ']
-        pbar = pb.ProgressBar(widgets=widgets,
-                              maxval=maxval).start()
+    widgets = [ title,
+            pb.Percentage(), ' ',
+            pb.Bar(marker=pb.RotatingMarker()),
+            ' ', pb.ETA(), ' ']
+    pbar = pb.ProgressBar(widgets=widgets,
+                          maxval=maxval).start()
     return pbar
 
 def only_on_root(func, *args, **kwargs):

diff -r 9624a463b7ffbf7a219ce31e28ea9461553c9f73 -r 573c8b1bff4118efc3ea73bb07d34186ba0e472a yt/geometry/geometry_handler.py
--- a/yt/geometry/geometry_handler.py
+++ b/yt/geometry/geometry_handler.py
@@ -568,6 +568,7 @@
         else:
             tr = func(self)
         if self._cache:
+        
             setattr(self, n, tr)
         return tr
     return property(cached_func)
@@ -591,6 +592,10 @@
         for obj in self.objs:
             f = getattr(obj, mname)
             arrs.append(f(self.dobj))
+        if method == "dtcoords":
+            arrs = [arr[0] for arr in arrs]
+        elif method == "tcoords":
+            arrs = [arr[1] for arr in arrs]
         arrs = np.concatenate(arrs)
         self.data_size = arrs.shape[0]
         return arrs
@@ -656,7 +661,7 @@
         if self.data_size == 0: return cdt
         ind = 0
         for obj in self.objs:
-            gdt, gt = obj.tcoords(self.dobj)
+            gdt, gt = obj.select_tcoords(self.dobj)
             if gt.shape == 0: continue
             ct[ind:ind+gt.size] = gt
             cdt[ind:ind+gdt.size] = gdt

diff -r 9624a463b7ffbf7a219ce31e28ea9461553c9f73 -r 573c8b1bff4118efc3ea73bb07d34186ba0e472a yt/geometry/grid_geometry_handler.py
--- a/yt/geometry/grid_geometry_handler.py
+++ b/yt/geometry/grid_geometry_handler.py
@@ -56,6 +56,14 @@
         mylog.debug("Re-examining hierarchy")
         self._initialize_level_stats()
 
+    def __del__(self):
+        del self.grid_dimensions
+        del self.grid_left_edge
+        del self.grid_right_edge
+        del self.grid_levels
+        del self.grid_particle_count
+        del self.grids
+
     @property
     def parameters(self):
         return self.parameter_file.parameters

diff -r 9624a463b7ffbf7a219ce31e28ea9461553c9f73 -r 573c8b1bff4118efc3ea73bb07d34186ba0e472a yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -126,14 +126,12 @@
         cdef SelectorObject selector = selection_routines.AlwaysSelector(None)
         cdef OctVisitorData data
         obj.setup_data(&data, -1)
-        assert(ref_mask.shape[0] / 8.0 == <int>(ref_mask.shape[0]/8.0))
-        obj.allocate_domains([ref_mask.shape[0] / 8.0])
         cdef int i, j, k, n
         data.global_index = -1
         data.level = 0
-        # This is not something I terribly like, but it needs to be done.
-        data.oref = 1
-        data.nz = 8
+        assert(ref_mask.shape[0] / float(data.nz) ==
+            <int>(ref_mask.shape[0]/float(data.nz)))
+        obj.allocate_domains([ref_mask.shape[0] / data.nz])
         cdef np.float64_t pos[3], dds[3]
         # This dds is the oct-width
         for i in range(3):
@@ -173,8 +171,7 @@
                 pos[1] += dds[1]
             pos[0] += dds[0]
         obj.nocts = cur.n_assigned
-        if obj.nocts * 8 != ref_mask.size:
-            print "SOMETHING WRONG", ref_mask.size, obj.nocts, obj.oref
+        if obj.nocts * data.nz != ref_mask.size:
             raise KeyError(ref_mask.size, obj.nocts, obj.oref,
                 obj.partial_coverage)
         return obj

diff -r 9624a463b7ffbf7a219ce31e28ea9461553c9f73 -r 573c8b1bff4118efc3ea73bb07d34186ba0e472a yt/geometry/oct_visitors.pyx
--- a/yt/geometry/oct_visitors.pyx
+++ b/yt/geometry/oct_visitors.pyx
@@ -202,15 +202,22 @@
             o.file_ind = nfinest[0]
             o.domain = 1
             nfinest[0] += 1
-    elif arr[data.index] == 1:
+    elif arr[data.index] > 0:
+        if arr[data.index] != 1 and arr[data.index] != 8:
+            print "ARRAY CLUE: ", arr[data.index], "UNKNOWN"
+            raise RuntimeError
         if o.children == NULL:
             o.children = <Oct **> malloc(sizeof(Oct *) * 8)
             for i in range(8):
                 o.children[i] = NULL
-        o.children[ii] = &octs[nocts[0]]
-        o.children[ii].domain_ind = nocts[0]
-        o.children[ii].file_ind = -1
-        o.children[ii].domain = -1
-        o.children[ii].children = NULL
-        nocts[0] += 1
+        for i in range(arr[data.index]):
+            o.children[ii + i] = &octs[nocts[0]]
+            o.children[ii + i].domain_ind = nocts[0]
+            o.children[ii + i].file_ind = -1
+            o.children[ii + i].domain = -1
+            o.children[ii + i].children = NULL
+            nocts[0] += 1
+    else:
+        print "SOMETHING IS AMISS", data.index
+        raise RuntimeError
     data.index += 1

diff -r 9624a463b7ffbf7a219ce31e28ea9461553c9f73 -r 573c8b1bff4118efc3ea73bb07d34186ba0e472a yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -59,8 +59,8 @@
 def convert_mask_to_indices(np.ndarray[np.uint8_t, ndim=3, cast=True] mask,
             int count, int transpose = 0):
     cdef int i, j, k, cpos
-    cdef np.ndarray[np.int32_t, ndim=2] indices 
-    indices = np.zeros((count, 3), dtype='int32')
+    cdef np.ndarray[np.int64_t, ndim=2] indices 
+    indices = np.zeros((count, 3), dtype='int64')
     cpos = 0
     for i in range(mask.shape[0]):
         for j in range(mask.shape[1]):
@@ -1213,6 +1213,20 @@
             return 1
         return 0
 
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef int select_cell(self, np.float64_t pos[3],
+                               np.float64_t dds[3]) nogil:
+        # This is terribly inefficient for Octrees.  For grids, it will never
+        # get called.
+        cdef int i
+        cdef np.float64_t left_edge[3], right_edge[3]
+        for i in range(3):
+            left_edge[i] = pos[i] - dds[i]/2.0
+            right_edge[i] = pos[i] + dds[i]/2.0
+        return self.select_bbox(left_edge, right_edge)
+
     def _hash_vals(self):
         return (self.p1[0], self.p1[1], self.p1[2],
                 self.p2[0], self.p2[1], self.p2[2],

diff -r 9624a463b7ffbf7a219ce31e28ea9461553c9f73 -r 573c8b1bff4118efc3ea73bb07d34186ba0e472a yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -12,6 +12,8 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
+import md5
+import cPickle
 import itertools as it
 import numpy as np
 import importlib
@@ -514,3 +516,100 @@
   [44,48,48],
  ],
 ]
+
+def check_results(func):
+    r"""This is a decorator for a function to verify that the (numpy ndarray)
+    result of a function is what it should be.
+
+    This function is designed to be used for very light answer testing.
+    Essentially, it wraps around a larger function that returns a numpy array,
+    and that has results that should not change.  It is not necessarily used
+    inside the testing scripts themselves, but inside testing scripts written
+    by developers during the testing of pull requests and new functionality.
+    If a hash is specified, it "wins" and the others are ignored.  Otherwise,
+    tolerance is 1e-8 (just above single precision.)
+
+    The correct results will be stored if the command line contains
+    --answer-reference , and otherwise it will compare against the results on
+    disk.  The filename will be func_results_ref_FUNCNAME.cpkl where FUNCNAME
+    is the name of the function being tested.
+
+    If you would like more control over the name of the pickle file the results
+    are stored in, you can pass the result_basename keyword argument to the
+    function you are testing.  The check_results decorator will use the value
+    of the keyword to construct the filename of the results data file.  If
+    result_basename is not specified, the name of the testing function is used.
+
+    This will raise an exception if the results are not correct.
+
+    Examples
+    --------
+
+    @check_results
+    def my_func(pf):
+        return pf.domain_width
+
+    my_func(pf)
+
+    @check_results
+    def field_checker(dd, field_name):
+        return dd[field_name]
+
+    field_cheker(pf.h.all_data(), 'density', result_basename='density')
+
+    """
+    def compute_results(func):
+        def _func(*args, **kwargs):
+            name = kwargs.pop("result_basename", func.func_name)
+            rv = func(*args, **kwargs)
+            if hasattr(rv, "convert_to_cgs"):
+                rv.convert_to_cgs()
+                _rv = rv.ndarray_view()
+            else:
+                _rv = rv
+            mi = _rv.min()
+            ma = _rv.max()
+            st = _rv.std(dtype="float64")
+            su = _rv.sum(dtype="float64")
+            si = _rv.size
+            ha = md5.md5(_rv.tostring()).hexdigest()
+            fn = "func_results_ref_%s.cpkl" % (name)
+            with open(fn, "wb") as f:
+                cPickle.dump( (mi, ma, st, su, si, ha), f)
+            return rv
+        return _func
+    from yt.mods import unparsed_args
+    if "--answer-reference" in unparsed_args:
+        return compute_results(func)
+    
+    def compare_results(func):
+        def _func(*args, **kwargs):
+            name = kwargs.pop("result_basename", func.func_name)
+            rv = func(*args, **kwargs)
+            if hasattr(rv, "convert_to_cgs"):
+                rv.convert_to_cgs()
+                _rv = rv.ndarray_view()
+            else:
+                _rv = rv
+            vals = (_rv.min(),
+                    _rv.max(),
+                    _rv.std(dtype="float64"),
+                    _rv.sum(dtype="float64"),
+                    _rv.size,
+                    md5.md5(_rv.tostring()).hexdigest() )
+            fn = "func_results_ref_%s.cpkl" % (name)
+            if not os.path.exists(fn):
+                print "Answers need to be created with --answer-reference ."
+                return False
+            with open(fn, "rb") as f:
+                ref = cPickle.load(f)
+            print "Sizes: %s (%s, %s)" % (vals[4] == ref[4], vals[4], ref[4])
+            assert_allclose(vals[0], ref[0], 1e-8, err_msg="min")
+            assert_allclose(vals[1], ref[1], 1e-8, err_msg="max")
+            assert_allclose(vals[2], ref[2], 1e-8, err_msg="std")
+            assert_allclose(vals[3], ref[3], 1e-8, err_msg="sum")
+            assert_equal(vals[4], ref[4])
+            print "Hashes equal: %s" % (vals[-1] == ref[-1])
+            return rv
+        return _func
+    return compare_results(func)


https://bitbucket.org/yt_analysis/yt/commits/957a66bee23c/
Changeset:   957a66bee23c
Branch:      yt-3.0
User:        jzuhone
Date:        2014-03-13 17:41:09
Summary:     Merge
Affected #:  5 files

diff -r cc4e15d9f4d95b558690cd94e4f74b30bed6a02b -r 957a66bee23ce0bb0699b96daa18ffbd08654753 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -381,11 +381,13 @@
         else:
             self.index.save_object(self, name)
 
-    def to_glue(self, fields, label="yt"):
+    def to_glue(self, fields, label="yt", data_collection=None):
         """
         Takes specific *fields* in the container and exports them to
         Glue (http://www.glueviz.org) for interactive
-        analysis. Optionally add a *label*.  
+        analysis. Optionally add a *label*. If you are already within
+        the Glue environment, you can pass a *data_collection* object,
+        otherwise Glue will be started.
         """
         from glue.core import DataCollection, Data
         from glue.core.coordinates import coordinates_from_header
@@ -394,11 +396,14 @@
         gdata = Data(label=label)
         for component_name in fields:
             gdata.add_component(self[component_name], component_name)
-        dc = DataCollection([gdata])
 
-        app = GlueApplication(dc)
-        app.start()
-
+        if data_collection is None:
+            dc = DataCollection([gdata])
+            app = GlueApplication(dc)
+            app.start()
+        else:
+            data_collection.append(gdata)
+        
     def __reduce__(self):
         args = tuple([self.pf._hash(), self._type_name] +
                      [getattr(self, n) for n in self._con_args] +

diff -r cc4e15d9f4d95b558690cd94e4f74b30bed6a02b -r 957a66bee23ce0bb0699b96daa18ffbd08654753 yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -349,12 +349,6 @@
             self.ma)
         return v
 
-class YTFITSHeaderNotUnderstood(YTException):
-    def __str__(self):
-        return "This FITS header is not recognizable in its current form.\n" + \
-                "If you would like to force loading, specify: \n" + \
-                "ignore_unit_names = True"
-
 class YTEmptyProfileData(Exception):
     pass
 

diff -r cc4e15d9f4d95b558690cd94e4f74b30bed6a02b -r 957a66bee23ce0bb0699b96daa18ffbd08654753 yt/utilities/fits_image.py
--- a/yt/utilities/fits_image.py
+++ b/yt/utilities/fits_image.py
@@ -24,7 +24,7 @@
 class FITSImageBuffer(HDUList):
 
     def __init__(self, data, fields=None, units="cm",
-                 center=None, scale=None):
+                 center=None, scale=None, wcs=None):
         r""" Initialize a FITSImageBuffer object.
 
         FITSImageBuffer contains a list of FITS ImageHDU instances, and optionally includes
@@ -51,6 +51,8 @@
             Pixel scale in unit *units*. Will be ignored if *data* is
             a FixedResolutionBuffer or a YTCoveringGrid. Must be
             specified otherwise, or if *units* is "deg".
+        wcs : `astropy.wcs.WCS` instance, optional
+            Supply an AstroPy WCS instance to override automatic WCS creation.
 
         Examples
         --------
@@ -128,38 +130,43 @@
 
         proj_type = ["linear"]*self.dimensionality
 
-        if isinstance(img_data, FixedResolutionBuffer) and units != "deg":
-            # FRBs are a special case where we have coordinate
-            # information, so we take advantage of this and
-            # construct the WCS object
-            dx = (img_data.bounds[1]-img_data.bounds[0]).in_units(units)/self.nx
-            dy = (img_data.bounds[3]-img_data.bounds[2]).in_units(units)/self.ny
-            xctr = 0.5*(img_data.bounds[1]+img_data.bounds[0]).in_units(units)
-            yctr = 0.5*(img_data.bounds[3]+img_data.bounds[2]).in_units(units)
-            center = [xctr, yctr]
-        elif isinstance(img_data, YTCoveringGridBase):
-            dx, dy, dz = img_data.dds.in_units(units)
-            center = 0.5*(img_data.left_edge+img_data.right_edge).in_units(units)
-        elif units == "deg" and self.dimensionality == 2:
-            dx = -scale[0]
-            dy = scale[1]
-            proj_type = ["RA---TAN","DEC--TAN"]
+        if wcs is None:
+            if isinstance(img_data, FixedResolutionBuffer) and units != "deg":
+                # FRBs are a special case where we have coordinate
+                # information, so we take advantage of this and
+                # construct the WCS object
+                dx = (img_data.bounds[1]-img_data.bounds[0]).in_units(units)/self.nx
+                dy = (img_data.bounds[3]-img_data.bounds[2]).in_units(units)/self.ny
+                xctr = 0.5*(img_data.bounds[1]+img_data.bounds[0]).in_units(units)
+                yctr = 0.5*(img_data.bounds[3]+img_data.bounds[2]).in_units(units)
+                center = [xctr, yctr]
+            elif isinstance(img_data, YTCoveringGridBase):
+                dx, dy, dz = img_data.dds.in_units(units)
+                center = 0.5*(img_data.left_edge+img_data.right_edge).in_units(units)
+            elif units == "deg" and self.dimensionality == 2:
+                dx = -scale[0]
+                dy = scale[1]
+                proj_type = ["RA---TAN","DEC--TAN"]
+            else:
+                dx = scale[0]
+                dy = scale[1]
+                if self.dimensionality == 3: dz = scale[2]
+            
+            w.wcs.crval = center
+            w.wcs.cunit = [units]*self.dimensionality
+            w.wcs.ctype = proj_type
+        
+            if self.dimensionality == 2:
+                w.wcs.cdelt = [dx,dy]
+            elif self.dimensionality == 3:
+                w.wcs.cdelt = [dx,dy,dz]
+
+            self._set_wcs(w)
+
         else:
-            dx = scale[0]
-            dy = scale[1]
-            if self.dimensionality == 3: dz = scale[2]
-            
-        w.wcs.crval = center
-        w.wcs.cunit = [units]*self.dimensionality
-        w.wcs.ctype = proj_type
-        
-        if self.dimensionality == 2:
-            w.wcs.cdelt = [dx,dy]
-        elif self.dimensionality == 3:
-            w.wcs.cdelt = [dx,dy,dz]
 
-        self._set_wcs(w)
-            
+            self._set_wcs(wcs)
+
     def _set_wcs(self, wcs):
         """
         Set the WCS coordinate information for all images
@@ -212,11 +219,13 @@
         elif self.dimensionality == 3:
             return self.nx, self.ny, self.nz
 
-    def to_glue(self, label="yt"):
+    def to_glue(self, label="yt", data_collection=None):
         """
         Takes the data in the FITSImageBuffer and exports it to
         Glue (http://www.glueviz.org) for interactive
-        analysis. Optionally add a *label*. 
+        analysis. Optionally add a *label*. If you are already within
+        the Glue environment, you can pass a *data_collection* object,
+        otherwise Glue will be started.
         """
         from glue.core import DataCollection, Data
         from glue.core.coordinates import coordinates_from_header
@@ -228,10 +237,22 @@
         image.coords = coordinates_from_header(self.wcs.to_header())
         for k,v in field_dict.items():
             image.add_component(v, k)
-        dc = DataCollection([image])
+        if data_collection is None:
+            dc = DataCollection([image])
+            app = GlueApplication(dc)
+            app.start()
+        else:
+            data_collection.append(image)
 
-        app = GlueApplication(dc)
-        app.start()
+    def to_aplpy(self, **kwargs):
+        """
+        Use APLpy (http://aplpy.github.io) for plotting. Returns an `aplpy.FITSFigure`
+        instance. All keyword arguments are passed to the
+        `aplpy.FITSFigure` constructor.
+        """
+        import aplpy
+        return aplpy.FITSFigure(self, **kwargs)
+
 
         
 

diff -r cc4e15d9f4d95b558690cd94e4f74b30bed6a02b -r 957a66bee23ce0bb0699b96daa18ffbd08654753 yt/visualization/color_maps.py
--- a/yt/visualization/color_maps.py
+++ b/yt/visualization/color_maps.py
@@ -98,6 +98,32 @@
 
 add_cmap('black_green', cdict)
 
+# This one is a variant of a colormap commonly
+# used for X-ray observations by Maxim Markevitch
+
+cdict = {'red': ((0.0, 0.0, 0.0),
+                 (0.3, 0.0, 0.0),
+                 (0.352, 0.245, 0.245),
+                 (0.42, 0.5, 0.5),
+                 (0.51, 0.706, 0.706),
+                 (0.613, 0.882, 0.882),
+                 (0.742, 1.0, 1.0),
+                 (1.0, 1.0, 1.0)),
+         'green': ((0.0, 0.0, 0.0),
+                   (0.585, 0.0, 0.0),
+                   (0.613, 0.196, 0.196),
+                   (0.693, 0.48, 0.48),
+                   (0.785, 0.696, 0.696),
+                   (0.885, 0.882, 0.882),
+                   (1.0, 1.0, 1.0)),
+         'blue': ((0.0, 0.0, 0.0),
+                  (0.136, 0.0, 0.0),
+                  (0.136, 0.373, 0.373),
+                  (0.391, 1.0, 1.0),
+                  (1.0, 1.0, 1.0))}
+
+add_cmap("purple_mm", cdict)
+
 # This one comes from
 # http://permalink.gmane.org/gmane.comp.python.matplotlib.devel/10518
 # and is an implementation of http://arxiv.org/abs/1108.5083


https://bitbucket.org/yt_analysis/yt/commits/4fdf65441a28/
Changeset:   4fdf65441a28
Branch:      yt-3.0
User:        jzuhone
Date:        2014-03-13 20:08:17
Summary:     This is ugly, but it's the only way it works
Affected #:  1 file

diff -r 957a66bee23ce0bb0699b96daa18ffbd08654753 -r 4fdf65441a28c200fab0128f55bb43808247fc21 yt/fields/geometric_fields.py
--- a/yt/fields/geometric_fields.py
+++ b/yt/fields/geometric_fields.py
@@ -140,11 +140,11 @@
     def _cylindrical_r(field, data):
         center = data.get_field_parameter("center")
         normal = data.get_field_parameter("normal")
-        coords = data.pf.arr(obtain_rvec(data), "code_length")
+        coords = obtain_rvec(data)
         coords[0,...] -= center[0]
         coords[1,...] -= center[1]
         coords[2,...] -= center[2]
-        return get_cyl_r(coords, normal).in_cgs()
+        return data.pf.arr(get_cyl_r(coords, normal), "code_length").in_cgs()
 
     registry.add_field(("index", "cylindrical_r"),
              function=_cylindrical_r,

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list