[yt-svn] commit/yt: 2 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Wed May 8 11:27:28 PDT 2013


2 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/996841e8daf4/
Changeset:   996841e8daf4
Branch:      yt
User:        brittonsmith
Date:        2013-05-08 20:10:06
Summary:     Adding option to field interpolators to give x, y, z bins explicitly
as arrays instead of just bounds.  Also, added tests for this.
Affected #:  2 files

diff -r 41aa542420be1ac1e3c7248753471c7f4d4b0ad8 -r 996841e8daf4a2327ab4c47b5f53ab51b999b78f yt/utilities/linear_interpolators.py
--- a/yt/utilities/linear_interpolators.py
+++ b/yt/utilities/linear_interpolators.py
@@ -31,12 +31,44 @@
 
 class UnilinearFieldInterpolator:
     def __init__(self, table, boundaries, field_names, truncate=False):
+        r"""Initialize a 1D interpolator for field data.
+
+        table : array
+            The data table over which interpolation is performed.
+        boundaries: tuple or array
+            If a tuple, this should specify the upper and lower bounds 
+            for the bins of the data table.  This assumes the bins are 
+            evenly spaced.  If an array, this specifies the bins 
+            explicitly.
+        field_names: str
+            Name of the field to be used as input data for interpolation.
+        truncate : bool
+            If False, an exception is raised if the input values are 
+            outside the bounds of the table.  If True, extrapolation is 
+            performed.
+        
+        Examples
+        --------
+
+        ad = pf.h.all_data()
+        table_data = np.random.random(64)
+        interp = UnilinearFieldInterpolator(table_data, (0.0, 1.0), "x",
+                                            truncate=True)
+        field_data = interp(ad)
+        
+        """
         self.table = table.astype('float64')
         self.truncate = truncate
-        x0, x1 = boundaries
         self.x_name = field_names
-        self.x_bins = np.linspace(x0, x1, table.shape[0]).astype('float64')
-
+        if isinstance(boundaries, np.ndarray):
+            if boundaries.size != table.shape[0]:
+                mylog.error("Bins array not the same length as the data.")
+                raise ValuesError
+            self.x_bins = boundaries
+        else:
+            x0, x1 = boundaries
+            self.x_bins = np.linspace(x0, x1, table.shape[0]).astype('float64')
+        
     def __call__(self, data_object):
         orig_shape = data_object[self.x_name].shape
         x_vals = data_object[self.x_name].ravel().astype('float64')
@@ -57,12 +89,51 @@
 
 class BilinearFieldInterpolator:
     def __init__(self, table, boundaries, field_names, truncate=False):
+        r"""Initialize a 2D interpolator for field data.
+
+        table : array
+            The data table over which interpolation is performed.
+        boundaries: tuple
+            Either a tuple of lower and upper bounds for the x and y bins 
+            given as (x0, x1, y0, y1) or a tuple of two arrays containing the 
+            x and y bins.
+        field_names: list
+            Names of the fields to be used as input data for interpolation.
+        truncate : bool
+            If False, an exception is raised if the input values are 
+            outside the bounds of the table.  If True, extrapolation is 
+            performed.
+        
+        Examples
+        --------
+
+        ad = pf.h.all_data()
+        table_data = np.random.random((64, 64))
+        interp = BilinearFieldInterpolator(table_data, (0.0, 1.0, 0.0, 1.0), 
+                                           ["x", "y"],
+                                           truncate=True)
+        field_data = interp(ad)
+        
+        """
         self.table = table.astype('float64')
         self.truncate = truncate
-        x0, x1, y0, y1 = boundaries
         self.x_name, self.y_name = field_names
-        self.x_bins = np.linspace(x0, x1, table.shape[0]).astype('float64')
-        self.y_bins = np.linspace(y0, y1, table.shape[1]).astype('float64')
+        if len(boundaries) == 4:
+            x0, x1, y0, y1 = boundaries
+            self.x_bins = np.linspace(x0, x1, table.shape[0]).astype('float64')
+            self.y_bins = np.linspace(y0, y1, table.shape[1]).astype('float64')
+        elif len(boundaries) == 2:
+            if boundaries[0].size != table.shape[0]:
+                mylog.error("X bins array not the same length as the data.")
+                raise ValueError
+            if boundaries[1].size != table.shape[1]:
+                mylog.error("Y bins array not the same length as the data.")
+                raise ValueError
+            self.x_bins = boundaries[0]
+            self.y_bins = boundaries[1]
+        else:
+            mylog.error("Boundaries must be given as (x0, x1, y0, y1) or as (x_bins, y_bins)")
+            raise ValueError
 
     def __call__(self, data_object):
         orig_shape = data_object[self.x_name].shape
@@ -90,14 +161,58 @@
 
 class TrilinearFieldInterpolator:
     def __init__(self, table, boundaries, field_names, truncate = False):
+        r"""Initialize a 3D interpolator for field data.
+
+        table : array
+            The data table over which interpolation is performed.
+        boundaries: tuple
+            Either a tuple of lower and upper bounds for the x, y, and z bins 
+            given as (x0, x1, y0, y1, z0, z1) or a tuple of three arrays 
+            containing the x, y, and z bins.
+        field_names: list
+            Names of the fields to be used as input data for interpolation.
+        truncate : bool
+            If False, an exception is raised if the input values are 
+            outside the bounds of the table.  If True, extrapolation is 
+            performed.
+        
+        Examples
+        --------
+
+        ad = pf.h.all_data()
+        table_data = np.random.random((64, 64, 64))
+        interp = BilinearFieldInterpolator(table_data, 
+                                           (0.0, 1.0, 0.0, 1.0, 0.0, 1.0), 
+                                           ["x", "y", "z"],
+                                           truncate=True)
+        field_data = interp(ad)
+        
+        """
         self.table = table.astype('float64')
         self.truncate = truncate
-        x0, x1, y0, y1, z0, z1 = boundaries
         self.x_name, self.y_name, self.z_name = field_names
-        self.x_bins = np.linspace(x0, x1, table.shape[0]).astype('float64')
-        self.y_bins = np.linspace(y0, y1, table.shape[1]).astype('float64')
-        self.z_bins = np.linspace(z0, z1, table.shape[2]).astype('float64')
-
+        if len(boundaries) == 6:
+            x0, x1, y0, y1, z0, z1 = boundaries
+            self.x_bins = np.linspace(x0, x1, table.shape[0]).astype('float64')
+            self.y_bins = np.linspace(y0, y1, table.shape[1]).astype('float64')
+            self.z_bins = np.linspace(z0, z1, table.shape[2]).astype('float64')
+        elif len(boundaries) == 3:
+            if boundaries[0].size != table.shape[0]:
+                mylog.error("X bins array not the same length as the data.")
+                raise ValueError
+            if boundaries[1].size != table.shape[1]:
+                mylog.error("Y bins array not the same length as the data.")
+                raise ValueError
+            if boundaries[2].size != table.shape[2]:
+                mylog.error("Z bins array not the same length as the data.")
+                raise ValueError
+            self.x_bins = boundaries[0]
+            self.y_bins = boundaries[1]
+            self.z_bins = boundaries[2]
+        else:
+            mylog.error("Boundaries must be given as (x0, x1, y0, y1, z0, z1) or as (x_bins, y_bins, z_bins)")
+            raise ValueError
+        
     def __call__(self, data_object):
         orig_shape = data_object[self.x_name].shape
         x_vals = data_object[self.x_name].ravel().astype('float64')

diff -r 41aa542420be1ac1e3c7248753471c7f4d4b0ad8 -r 996841e8daf4a2327ab4c47b5f53ab51b999b78f yt/utilities/tests/test_interpolators.py
--- a/yt/utilities/tests/test_interpolators.py
+++ b/yt/utilities/tests/test_interpolators.py
@@ -7,21 +7,58 @@
 def test_linear_interpolator_1d():
     random_data = np.random.random(64)
     fv = {'x': np.mgrid[0.0:1.0:64j]}
+    # evenly spaced bins
     ufi = lin.UnilinearFieldInterpolator(random_data, (0.0, 1.0), "x", True)
-    assert_array_equal(ufi(fv), random_data)
+    yield assert_array_equal, ufi(fv), random_data
+    
+    # randomly spaced bins
+    size = 64
+    shift = (1. / size) * np.random.random(size) - (0.5 / size)
+    fv["x"] += shift
+    ufi = lin.UnilinearFieldInterpolator(random_data, 
+                                         np.linspace(0.0, 1.0, size) + shift, 
+                                         "x", True)
+    yield assert_array_almost_equal, ufi(fv), random_data, 15
 
 def test_linear_interpolator_2d():
     random_data = np.random.random((64, 64))
+    # evenly spaced bins
     fv = dict((ax, v) for ax, v in zip("xyz",
                np.mgrid[0.0:1.0:64j, 0.0:1.0:64j]))
     bfi = lin.BilinearFieldInterpolator(random_data,
             (0.0, 1.0, 0.0, 1.0), "xy", True)
-    assert_array_equal(bfi(fv), random_data)
+    yield assert_array_equal, bfi(fv), random_data
+
+    # randomly spaced bins
+    size = 64
+    bins = np.linspace(0.0, 1.0, size)
+    shifts = dict((ax, (1. / size) * np.random.random(size) - (0.5 / size)) \
+                  for ax in "xy")
+    fv["x"] += shifts["x"][:, np.newaxis]
+    fv["y"] += shifts["y"]
+    bfi = lin.BilinearFieldInterpolator(random_data,
+            (bins + shifts["x"], bins + shifts["y"]), "xy", True)
+    yield assert_array_almost_equal, bfi(fv), random_data, 15
 
 def test_linear_interpolator_3d():
     random_data = np.random.random((64, 64, 64))
+    # evenly spaced bins
     fv = dict((ax, v) for ax, v in zip("xyz",
                np.mgrid[0.0:1.0:64j, 0.0:1.0:64j, 0.0:1.0:64j]))
     tfi = lin.TrilinearFieldInterpolator(random_data,
             (0.0, 1.0, 0.0, 1.0, 0.0, 1.0), "xyz", True)
-    assert_array_equal(tfi(fv), random_data)
+    yield assert_array_equal, tfi(fv), random_data
+
+    # randomly spaced bins
+    size = 64
+    bins = np.linspace(0.0, 1.0, size)
+    shifts = dict((ax, (1. / size) * np.random.random(size) - (0.5 / size)) \
+                  for ax in "xyz")
+    fv["x"] += shifts["x"][:, np.newaxis, np.newaxis]
+    fv["y"] += shifts["y"][:, np.newaxis]
+    fv["z"] += shifts["z"]
+    tfi = lin.TrilinearFieldInterpolator(random_data,
+            (bins + shifts["x"], bins + shifts["y"], 
+             bins + shifts["z"]), "xyz", True)
+    yield assert_array_almost_equal, tfi(fv), random_data, 15
+    


https://bitbucket.org/yt_analysis/yt/commits/cbdb1ea15403/
Changeset:   cbdb1ea15403
Branch:      yt
User:        brittonsmith
Date:        2013-05-08 20:12:52
Summary:     Merged.
Affected #:  90 files

diff -r 996841e8daf4a2327ab4c47b5f53ab51b999b78f -r cbdb1ea15403843d50f48ea55e09e934b58f2a6d .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -4,7 +4,9 @@
 freetype.cfg
 hdf5.cfg
 png.cfg
+rockstar.cfg
 yt_updater.log
+yt/analysis_modules/halo_finding/rockstar/rockstar_interface.c
 yt/frontends/ramses/_ramses_reader.cpp
 yt/utilities/amr_utils.c
 yt/utilities/kdtree/forthonf2c.h

diff -r 996841e8daf4a2327ab4c47b5f53ab51b999b78f -r cbdb1ea15403843d50f48ea55e09e934b58f2a6d doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -34,7 +34,7 @@
 
 INST_HG=1       # Install Mercurial or not?  If hg is not already
                 # installed, yt cannot be installed.
-INST_ZLIB=1     # On some systems (Kraken) matplotlib has issues with 
+INST_ZLIB=1     # On some systems (Kraken) matplotlib has issues with
                 # the system zlib, which is compiled statically.
                 # If need be, you can turn this off.
 INST_BZLIB=1    # On some systems, libbzip2 is missing.  This can
@@ -76,7 +76,7 @@
    echo "the script to re-enable root-level installation.  Sorry!"
    exit 1
 fi
-if [[ ${DEST_DIR%/} == /usr/local ]] 
+if [[ ${DEST_DIR%/} == /usr/local ]]
 then
    echo "******************************************************"
    echo "*                                                    *"
@@ -170,6 +170,19 @@
         echo "   $ module load gcc"
         echo
     fi
+    if [ "${MYHOST##midway}" != "${MYHOST}" ]
+    then
+        echo "Looks like you're on Midway."
+        echo
+        echo " ******************************************"
+        echo " * It may be better to use the yt module! *"
+        echo " *                                        *"
+        echo " *   $ module load yt                     *"
+        echo " *                                        *"
+        echo " ******************************************"
+        echo
+        return
+    fi
     if [ "${MYOS##Darwin}" != "${MYOS}" ]
     then
         echo "Looks like you're running on Mac OSX."
@@ -181,7 +194,7 @@
 	echo "must register for an account on the apple developer tools"
 	echo "website: https://developer.apple.com/downloads to obtain the"
 	echo "download link."
-	echo 
+	echo
 	echo "We have gathered some additional instructions for each"
 	echo "version of OS X below. If you have trouble installing yt"
 	echo "after following these instructions, don't hesitate to contact"
@@ -192,15 +205,15 @@
 	echo "menu bar.  We're assuming that you've installed all operating"
 	echo "system updates; if you have an older version, we suggest"
 	echo "running software update and installing all available updates."
-	echo 
-        echo "OS X 10.5.8: search for and download Xcode 3.1.4 from the" 
+	echo
+        echo "OS X 10.5.8: search for and download Xcode 3.1.4 from the"
 	echo "Apple developer tools website."
         echo
         echo "OS X 10.6.8: search for and download Xcode 3.2 from the Apple"
 	echo "developer tools website.  You can either download the"
 	echo "Xcode 3.2.2 Developer Tools package (744 MB) and then use"
-	echo "Software Update to update to XCode 3.2.6 or" 
-	echo "alternatively, you can download the Xcode 3.2.6/iOS SDK" 
+	echo "Software Update to update to XCode 3.2.6 or"
+	echo "alternatively, you can download the Xcode 3.2.6/iOS SDK"
 	echo "bundle (4.1 GB)."
         echo
         echo "OS X 10.7.5: download Xcode 4.2 from the mac app store"
@@ -208,20 +221,20 @@
         echo "Alternatively, download the Xcode command line tools from"
         echo "the Apple developer tools website."
         echo
-	echo "OS X 10.8.2: download Xcode 4.6 from the mac app store."
+	echo "OS X 10.8.2: download Xcode 4.6.1 from the mac app store."
 	echo "(search for Xcode)."
 	echo "Additionally, you will have to manually install the Xcode"
-	echo "command line tools, see:" 
+	echo "command line tools, see:"
 	echo "http://stackoverflow.com/questions/9353444"
 	echo "Alternatively, download the Xcode command line tools from"
 	echo "the Apple developer tools website."
 	echo
-        echo "NOTE: It's possible that the installation will fail, if so," 
-	echo "please set the following environment variables, remove any" 
+        echo "NOTE: It's possible that the installation will fail, if so,"
+	echo "please set the following environment variables, remove any"
 	echo "broken installation tree, and re-run this script verbatim."
         echo
-        echo "$ export CC=gcc-4.2"
-        echo "$ export CXX=g++-4.2"
+        echo "$ export CC=gcc"
+        echo "$ export CXX=g++"
 	echo
         OSX_VERSION=`sw_vers -productVersion`
         if [ "${OSX_VERSION##10.8}" != "${OSX_VERSION}" ]
@@ -278,7 +291,7 @@
         echo
         echo " INST_ZLIB=0"
         echo " INST_FTYPE=0"
-        echo 
+        echo
         echo " to avoid conflicts with other command-line programs "
         echo " (like eog and evince, for example)."
     fi
@@ -424,7 +437,7 @@
     cd ..
 }
 
-if type -P wget &>/dev/null 
+if type -P wget &>/dev/null
 then
     echo "Using wget"
     export GETFILE="wget -nv"
@@ -486,28 +499,27 @@
 cd ${DEST_DIR}/src
 
 # Now we dump all our SHA512 files out.
-
-echo 'eda1b8090e5e21e7e039ef4dd03de186a7b416df9d5a4e4422abeeb4d51383b9a6858e1ac4902d8e5010f661b295bbb2452c43c8738be668379b4eb4835d0f61  Cython-0.17.1.tar.gz' > Cython-0.17.1.tar.gz.sha512
-echo '44eea803870a66ff0bab08d13a8b3388b5578ebc1c807d1d9dca0a93e6371e91b15d02917a00b3b20dc67abb5a21dabaf9b6e9257a561f85eeff2147ac73b478  PyX-0.11.1.tar.gz' > PyX-0.11.1.tar.gz.sha512
-echo 'b981f8464575bb24c297631c87a3b9172312804a0fc14ce1fa7cb41ce2b0d2fd383cd1c816d6e10c36467d18bf9492d6faf557c81c04ff3b22debfa93f30ad0b  Python-2.7.3.tgz' > Python-2.7.3.tgz.sha512
-echo 'c017d3d59dd324ac91af0edc178c76b60a5f90fbb775cf843e39062f95bd846238f2c53705f8890ed3f34bc0e6e75671a73d13875eb0287d6201cb45f0a2d338  bzip2-1.0.5.tar.gz' > bzip2-1.0.5.tar.gz.sha512
+echo 'fb85d71bb4f80b35f0d0f1735c650dd75c5f84b05635ddf91d6241ff103b5a49158c5b851a20c15e05425f6dde32a4971b35fcbd7445f61865b4d61ffd1fbfa1  Cython-0.18.tar.gz' > Cython-0.18.tar.gz.sha512
+echo '4941f5aa21aff3743546495fb073c10d2657ff42b2aff401903498638093d0e31e344cce778980f28a7170c6d29eab72ac074277b9d4088376e8692dc71e55c1  PyX-0.12.1.tar.gz' > PyX-0.12.1.tar.gz.sha512
+echo '3349152c47ed2b63c5c9aabcfa92b8497ea9d71ca551fd721e827fcb8f91ff9fbbee6bba8f8cb2dea185701b8798878b4b2435c1496b63d4b4a37c624a625299  Python-2.7.4.tgz' > Python-2.7.4.tgz.sha512
+echo '00ace5438cfa0c577e5f578d8a808613187eff5217c35164ffe044fbafdfec9e98f4192c02a7d67e01e5a5ccced630583ad1003c37697219b0f147343a3fdd12  bzip2-1.0.6.tar.gz' > bzip2-1.0.6.tar.gz.sha512
 echo 'a296dfcaef7e853e58eed4e24b37c4fa29cfc6ac688def048480f4bb384b9e37ca447faf96eec7b378fd764ba291713f03ac464581d62275e28eb2ec99110ab6  reason-js-20120623.zip' > reason-js-20120623.zip.sha512
-echo 'b519218f93946400326e9b656669269ecb3e5232b944e18fbc3eadc4fe2b56244d68aae56d6f69042b4c87c58c881ee2aaa279561ea0f0f48d5842155f4de9de  freetype-2.4.4.tar.gz' > freetype-2.4.4.tar.gz.sha512
-echo 'b3290c498191684781ca5286ab454eb1bd045e8d894f5b86fb86beb88f174e22ac3ab008fb02d6562051d9fa6a9593920cab433223f6d5473999913223b8e183  h5py-2.1.0.tar.gz' > h5py-2.1.0.tar.gz.sha512
+echo 'b46c93d76f8ce09c94765b20b2eeadf71207671f1131777de178b3727c235b4dd77f6e60d62442b96648c3c6749e9e4c1194c1b02af7e946576be09e1ff7ada3  freetype-2.4.11.tar.gz' > freetype-2.4.11.tar.gz.sha512
+echo '15ca0209e8d8f172cb0708a2de946fbbde8551d9bebc4a95fa7ae31558457a7f43249d5289d7675490c577deb4e0153698fd2407644078bf30bd5ab10135fce3  h5py-2.1.2.tar.gz' > h5py-2.1.2.tar.gz.sha512
 echo 'c68a425bacaa7441037910b9166f25b89e1387776a7749a5350793f89b1690350df5f018060c31d03686e7c3ed2aa848bd2b945c96350dc3b6322e087934783a  hdf5-1.8.9.tar.gz' > hdf5-1.8.9.tar.gz.sha512
-echo 'dbefad00fa34f4f21dca0f1e92e95bd55f1f4478fa0095dcf015b4d06f0c823ff11755cd777e507efaf1c9098b74af18f613ec9000e5c3a5cc1c7554fb5aefb8  libpng-1.5.12.tar.gz' > libpng-1.5.12.tar.gz.sha512
-echo '5b1a0fb52dcb21ca5f0ab71c8a49550e1e8cf633552ec6598dc43f0b32c03422bf5af65b30118c163231ecdddfd40846909336f16da318959106076e80a3fad0  matplotlib-1.2.0.tar.gz' > matplotlib-1.2.0.tar.gz.sha512
-echo '91693ca5f34934956a7c2c98bb69a5648b2a5660afd2ecf4a05035c5420450d42c194eeef0606d7683e267e4eaaaab414df23f30b34c88219bdd5c1a0f1f66ed  mercurial-2.5.1.tar.gz' > mercurial-2.5.1.tar.gz.sha512
-echo 'de3dd37f753614055dcfed910e9886e03688b8078492df3da94b1ec37be796030be93291cba09e8212fffd3e0a63b086902c3c25a996cf1439e15c5b16e014d9  numpy-1.6.1.tar.gz' > numpy-1.6.1.tar.gz.sha512
-echo '5ad681f99e75849a5ca6f439c7a19bb51abc73d121b50f4f8e4c0da42891950f30407f761a53f0fe51b370b1dbd4c4f5a480557cb2444c8c7c7d5412b328a474  sqlite-autoconf-3070500.tar.gz' > sqlite-autoconf-3070500.tar.gz.sha512
-echo 'edae735960279d92acf58e1f4095c6392a7c2059b8f1d2c46648fc608a0fb06b392db2d073f4973f5762c034ea66596e769b95b3d26ad963a086b9b2d09825f2  zlib-1.2.3.tar.bz2' > zlib-1.2.3.tar.bz2.sha512
+echo 'b2b53ed358bacab9e8d63a51f17bd5f121ece60a1d7c53e8a8eb08ad8b1e4393a8d7a86eec06e2efc62348114f0d84c0a3dfc805e68e6edd93b20401962b3554  libpng-1.6.1.tar.gz' > libpng-1.6.1.tar.gz.sha512
+echo '497f91725eaf361bdb9bdf38db2bff5068a77038f1536df193db64c9b887e3b0d967486daee722eda6e2c4e60f034eee030673e53d07bf0db0f3f7c0ef3bd208  matplotlib-1.2.1.tar.gz' > matplotlib-1.2.1.tar.gz.sha512
+echo '928fdeaaf0eaec80adbd8765521de9666ab56aaa2101fb9ab2cb392d8b29475d3b052d89652ff9b67522cfcc6cd958717ac715f51b0573ee088e9a595f29afe2  mercurial-2.5.4.tar.gz' > mercurial-2.5.4.tar.gz.sha512
+echo 'a485daa556f6c76003de1dbb3e42b3daeee0a320c69c81b31a7d2ebbc2cf8ab8e96c214a4758e5e7bf814295dc1d6aa563092b714db7e719678d8462135861a8  numpy-1.7.0.tar.gz' > numpy-1.7.0.tar.gz.sha512
+echo '293d78d14a9347cb83e1a644e5f3e4447ed6fc21642c51683e5495dda08d2312194a73d1fc3c1d78287e33ed065aa251ecbaa7c0ea9189456c1702e96d78becd  sqlite-autoconf-3071601.tar.gz' > sqlite-autoconf-3071601.tar.gz.sha512
+echo 'b1c073ad26684e354f7c522c14655840592e03872bc0a94690f89cae2ff88f146fce1dad252ff27a889dac4a32ff9f8ab63ba940671f9da89e9ba3e19f1bf58d  zlib-1.2.7.tar.gz' > zlib-1.2.7.tar.gz.sha512
 echo '05ac335727a2c3036f31a2506fdd2615aa436bfbe2f81799fe6c51bffe2591ad6a8427f3b25c34e7e709fb4e7607a0589dc7a22185c1f9b894e90de6711a88aa  ipython-0.13.1.tar.gz' > ipython-0.13.1.tar.gz.sha512
-echo 'fb3cf421b2dc48c31956b3e3ee4ab6ebc743deec3bf626c2238a1996c8c51be87260bd6aa662793a1f0c34dcda9b3146763777bb162dfad6fec4ca7acc403b2e  zeromq-2.2.0.tar.gz' > zeromq-2.2.0.tar.gz.sha512
-echo 'd761b492352841cdc125d9f0c99ee6d6c435812472ea234728b7f0fb4ad1048e1eec9b399df2081fbc926566f333f7780fedd0ce23255a6633fe5c60ed15a6af  pyzmq-2.1.11.tar.gz' > pyzmq-2.1.11.tar.gz.sha512
-echo '57fa5e57dfb98154a42d2d477f29401c2260ae7ad3a8128a4098b42ee3b35c54367b1a3254bc76b9b3b14b4aab7c3e1135858f68abc5636daedf2f01f9b8a3cf  tornado-2.2.tar.gz' > tornado-2.2.tar.gz.sha512
-echo '1332e3d5465ca249c357314cf15d2a4e5e83a941841021b8f6a17a107dce268a7a082838ade5e8db944ecde6bfb111211ab218aa414ee90aafbb81f1491b3b93  Forthon-0.8.10.tar.gz' > Forthon-0.8.10.tar.gz.sha512
+echo 'b9d061ca49e54ea917e0aed2b2a48faef33061dbf6d17eae7f8c3fff0b35ca883e7324f6cb24bda542443f669dcd5748037a5f2309f4c359d68adef520894865  zeromq-3.2.2.tar.gz' > zeromq-3.2.2.tar.gz.sha512
+echo '852fce8a8308c4e1e4b19c77add2b2055ca2ba570b28e8364888df490af92b860c72e860adfb075b3405a9ceb62f343889f20a8711c9353a7d9059adee910f83  pyzmq-13.0.2.tar.gz' > pyzmq-13.0.2.tar.gz.sha512
+echo '303bd3fbea22be57fddf7df78ddf5a783d355a0c8071b1363250daafc20232ddd28eedc44aa1194f4a7afd82f9396628c5bb06819e02b065b6a1b1ae8a7c19e1  tornado-3.0.tar.gz' > tornado-3.0.tar.gz.sha512
+echo '3f53d0b474bfd79fea2536d0a9197eaef6c0927e95f2f9fd52dbd6c1d46409d0e649c21ac418d8f7767a9f10fe6114b516e06f2be4b06aec3ab5bdebc8768220  Forthon-0.8.11.tar.gz' > Forthon-0.8.11.tar.gz.sha512
 echo 'c13116c1f0547000cc565e15774687b9e884f8b74fb62a84e578408a868a84961704839065ae4f21b662e87f2aaedf6ea424ea58dfa9d3d73c06281f806d15dd  nose-1.2.1.tar.gz' > nose-1.2.1.tar.gz.sha512
-echo '73de2c99406a38f85273931597525cec4ebef55b93712adca3b0bfea8ca3fc99446e5d6495817e9ad55cf4d48feb7fb49734675c4cc8938db8d4a5225d30eca7  python-hglib-0.2.tar.gz' > python-hglib-0.2.tar.gz.sha512
+echo 'd67de9567256e6f1649e4f3f7dfee63371d5f00fd3fd4f92426198f862e97c57f70e827d19f4e5e1929ad85ef2ce7aa5a0596b101cafdac71672e97dc115b397  python-hglib-0.3.tar.gz' > python-hglib-0.3.tar.gz.sha512
 echo 'ffc602eb346717286b3d0a6770c60b03b578b3cf70ebd12f9e8b1c8c39cdb12ef219ddaa041d7929351a6b02dbb8caf1821b5452d95aae95034cbf4bc9904a7a  sympy-0.7.2.tar.gz' > sympy-0.7.2.tar.gz.sha512
 echo '172f2bc671145ebb0add2669c117863db35851fb3bdb192006cd710d4d038e0037497eb39a6d01091cb923f71a7e8982a77b6e80bf71d6275d5d83a363c8d7e5  rockstar-0.99.6.tar.gz' > rockstar-0.99.6.tar.gz.sha512
 echo 'd4fdd62f2db5285cd133649bd1bfa5175cb9da8304323abd74e0ef1207d55e6152f0f944da1da75f73e9dafb0f3bb14efba3c0526c732c348a653e0bd223ccfa  scipy-0.11.0.tar.gz' > scipy-0.11.0.tar.gz.sha512
@@ -515,50 +527,50 @@
 echo '8770214491e31f0a7a3efaade90eee7b0eb20a8a6ab635c5f854d78263f59a1849133c14ef5123d01023f0110cbb9fc6f818da053c01277914ae81473430a952  lapack-3.4.2.tar.gz' > lapack-3.4.2.tar.gz.sha512
 # Individual processes
 [ -z "$HDF5_DIR" ] && get_ytproject hdf5-1.8.9.tar.gz
-[ $INST_ZLIB -eq 1 ] && get_ytproject zlib-1.2.3.tar.bz2 
-[ $INST_BZLIB -eq 1 ] && get_ytproject bzip2-1.0.5.tar.gz
-[ $INST_PNG -eq 1 ] && get_ytproject libpng-1.5.12.tar.gz
-[ $INST_FTYPE -eq 1 ] && get_ytproject freetype-2.4.4.tar.gz
-[ $INST_SQLITE3 -eq 1 ] && get_ytproject sqlite-autoconf-3070500.tar.gz
-[ $INST_PYX -eq 1 ] && get_ytproject PyX-0.11.1.tar.gz
-[ $INST_0MQ -eq 1 ] && get_ytproject zeromq-2.2.0.tar.gz
-[ $INST_0MQ -eq 1 ] && get_ytproject pyzmq-2.1.11.tar.gz
-[ $INST_0MQ -eq 1 ] && get_ytproject tornado-2.2.tar.gz
+[ $INST_ZLIB -eq 1 ] && get_ytproject zlib-1.2.7.tar.gz
+[ $INST_BZLIB -eq 1 ] && get_ytproject bzip2-1.0.6.tar.gz
+[ $INST_PNG -eq 1 ] && get_ytproject libpng-1.6.1.tar.gz
+[ $INST_FTYPE -eq 1 ] && get_ytproject freetype-2.4.11.tar.gz
+[ $INST_SQLITE3 -eq 1 ] && get_ytproject sqlite-autoconf-3071601.tar.gz
+[ $INST_PYX -eq 1 ] && get_ytproject PyX-0.12.1.tar.gz
+[ $INST_0MQ -eq 1 ] && get_ytproject zeromq-3.2.2.tar.gz
+[ $INST_0MQ -eq 1 ] && get_ytproject pyzmq-13.0.2.tar.gz
+[ $INST_0MQ -eq 1 ] && get_ytproject tornado-3.0.tar.gz
 [ $INST_SCIPY -eq 1 ] && get_ytproject scipy-0.11.0.tar.gz
 [ $INST_SCIPY -eq 1 ] && get_ytproject blas.tar.gz
 [ $INST_SCIPY -eq 1 ] && get_ytproject lapack-3.4.2.tar.gz
-get_ytproject Python-2.7.3.tgz
-get_ytproject numpy-1.6.1.tar.gz
-get_ytproject matplotlib-1.2.0.tar.gz
-get_ytproject mercurial-2.5.1.tar.gz
+get_ytproject Python-2.7.4.tgz
+get_ytproject numpy-1.7.0.tar.gz
+get_ytproject matplotlib-1.2.1.tar.gz
+get_ytproject mercurial-2.5.4.tar.gz
 get_ytproject ipython-0.13.1.tar.gz
-get_ytproject h5py-2.1.0.tar.gz
-get_ytproject Cython-0.17.1.tar.gz
+get_ytproject h5py-2.1.2.tar.gz
+get_ytproject Cython-0.18.tar.gz
 get_ytproject reason-js-20120623.zip
-get_ytproject Forthon-0.8.10.tar.gz
-get_ytproject nose-1.2.1.tar.gz 
-get_ytproject python-hglib-0.2.tar.gz
+get_ytproject Forthon-0.8.11.tar.gz
+get_ytproject nose-1.2.1.tar.gz
+get_ytproject python-hglib-0.3.tar.gz
 get_ytproject sympy-0.7.2.tar.gz
 get_ytproject rockstar-0.99.6.tar.gz
 if [ $INST_BZLIB -eq 1 ]
 then
-    if [ ! -e bzip2-1.0.5/done ]
+    if [ ! -e bzip2-1.0.6/done ]
     then
-        [ ! -e bzip2-1.0.5 ] && tar xfz bzip2-1.0.5.tar.gz
+        [ ! -e bzip2-1.0.6 ] && tar xfz bzip2-1.0.6.tar.gz
         echo "Installing BZLIB"
-        cd bzip2-1.0.5
-        if [ `uname` = "Darwin" ] 
+        cd bzip2-1.0.6
+        if [ `uname` = "Darwin" ]
         then
-            if [ -z "${CC}" ] 
+            if [ -z "${CC}" ]
             then
                 sed -i.bak 's/soname/install_name/' Makefile-libbz2_so
             else
-                sed -i.bak -e 's/soname/install_name/' -e "s/CC=gcc/CC=${CC}/" Makefile-libbz2_so 
+                sed -i.bak -e 's/soname/install_name/' -e "s/CC=gcc/CC=${CC}/" Makefile-libbz2_so
             fi
         fi
         ( make install CFLAGS=-fPIC LDFLAGS=-fPIC PREFIX=${DEST_DIR} 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make -f Makefile-libbz2_so CFLAGS=-fPIC LDFLAGS=-fPIC PREFIX=${DEST_DIR} 2>&1 ) 1>> ${LOG_FILE} || do_exit
-        ( cp -v libbz2.so.1.0.4 ${DEST_DIR}/lib 2>&1 ) 1>> ${LOG_FILE} || do_exit
+        ( cp -v libbz2.so.1.0.6 ${DEST_DIR}/lib 2>&1 ) 1>> ${LOG_FILE} || do_exit
         touch done
         cd ..
     fi
@@ -569,11 +581,11 @@
 
 if [ $INST_ZLIB -eq 1 ]
 then
-    if [ ! -e zlib-1.2.3/done ]
+    if [ ! -e zlib-1.2.7/done ]
     then
-        [ ! -e zlib-1.2.3 ] && tar xfj zlib-1.2.3.tar.bz2
+        [ ! -e zlib-1.2.7 ] && tar xfz zlib-1.2.7.tar.gz
         echo "Installing ZLIB"
-        cd zlib-1.2.3
+        cd zlib-1.2.7
         ( ./configure --shared --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -587,11 +599,11 @@
 
 if [ $INST_PNG -eq 1 ]
 then
-    if [ ! -e libpng-1.5.12/done ]
+    if [ ! -e libpng-1.6.1/done ]
     then
-        [ ! -e libpng-1.5.12 ] && tar xfz libpng-1.5.12.tar.gz
+        [ ! -e libpng-1.6.1 ] && tar xfz libpng-1.6.1.tar.gz
         echo "Installing PNG"
-        cd libpng-1.5.12
+        cd libpng-1.6.1
         ( ./configure CPPFLAGS=-I${DEST_DIR}/include CFLAGS=-I${DEST_DIR}/include --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -605,11 +617,11 @@
 
 if [ $INST_FTYPE -eq 1 ]
 then
-    if [ ! -e freetype-2.4.4/done ]
+    if [ ! -e freetype-2.4.11/done ]
     then
-        [ ! -e freetype-2.4.4 ] && tar xfz freetype-2.4.4.tar.gz
+        [ ! -e freetype-2.4.11 ] && tar xfz freetype-2.4.11.tar.gz
         echo "Installing FreeType2"
-        cd freetype-2.4.4
+        cd freetype-2.4.11
         ( ./configure CFLAGS=-I${DEST_DIR}/include --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -642,11 +654,11 @@
 
 if [ $INST_SQLITE3 -eq 1 ]
 then
-    if [ ! -e sqlite-autoconf-3070500/done ]
+    if [ ! -e sqlite-autoconf-3071601/done ]
     then
-        [ ! -e sqlite-autoconf-3070500 ] && tar xfz sqlite-autoconf-3070500.tar.gz
+        [ ! -e sqlite-autoconf-3071601 ] && tar xfz sqlite-autoconf-3071601.tar.gz
         echo "Installing SQLite3"
-        cd sqlite-autoconf-3070500
+        cd sqlite-autoconf-3071601
         ( ./configure --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make ${MAKE_PROCS} install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -655,11 +667,11 @@
     fi
 fi
 
-if [ ! -e Python-2.7.3/done ]
+if [ ! -e Python-2.7.4/done ]
 then
     echo "Installing Python.  This may take a while, but don't worry.  yt loves you."
-    [ ! -e Python-2.7.3 ] && tar xfz Python-2.7.3.tgz
-    cd Python-2.7.3
+    [ ! -e Python-2.7.4 ] && tar xfz Python-2.7.4.tgz
+    cd Python-2.7.4
     ( ./configure --prefix=${DEST_DIR}/ ${PYCONF_ARGS} 2>&1 ) 1>> ${LOG_FILE} || do_exit
 
     ( make ${MAKE_PROCS} 2>&1 ) 1>> ${LOG_FILE} || do_exit
@@ -674,12 +686,11 @@
 
 if [ $INST_HG -eq 1 ]
 then
-    echo "Installing Mercurial."
-    do_setup_py mercurial-2.5.1
+    do_setup_py mercurial-2.5.4
     export HG_EXEC=${DEST_DIR}/bin/hg
 else
     # We assume that hg can be found in the path.
-    if type -P hg &>/dev/null 
+    if type -P hg &>/dev/null
     then
         export HG_EXEC=hg
     else
@@ -696,7 +707,7 @@
     elif [ -e $ORIG_PWD/../yt/mods.py ]
     then
         YT_DIR=`dirname $ORIG_PWD`
-    elif [ ! -e yt-hg ] 
+    elif [ ! -e yt-hg ]
     then
         YT_DIR="$PWD/yt-hg/"
         ( ${HG_EXEC} --debug clone https://bitbucket.org/yt_analysis/yt-supplemental/ 2>&1 ) 1>> ${LOG_FILE}
@@ -706,7 +717,7 @@
         ( ${HG_EXEC} --debug clone https://bitbucket.org/yt_analysis/yt/ ./yt-hg 2>&1 ) 1>> ${LOG_FILE}
         # Now we update to the branch we're interested in.
         ( ${HG_EXEC} -R ${YT_DIR} up -C ${BRANCH} 2>&1 ) 1>> ${LOG_FILE}
-    elif [ -e yt-hg ] 
+    elif [ -e yt-hg ]
     then
         YT_DIR="$PWD/yt-hg/"
     fi
@@ -714,7 +725,7 @@
 fi
 
 # This fixes problems with gfortran linking.
-unset LDFLAGS 
+unset LDFLAGS
 
 echo "Installing distribute"
 ( ${DEST_DIR}/bin/python2.7 ${YT_DIR}/distribute_setup.py 2>&1 ) 1>> ${LOG_FILE} || do_exit
@@ -724,7 +735,7 @@
 
 if [ $INST_SCIPY -eq 0 ]
 then
-    do_setup_py numpy-1.6.1 ${NUMPY_ARGS}
+    do_setup_py numpy-1.7.0 ${NUMPY_ARGS}
 else
     if [ ! -e scipy-0.11.0/done ]
     then
@@ -752,8 +763,8 @@
 	fi
     fi
     export BLAS=$PWD/BLAS/libfblas.a
-    export LAPACK=$PWD/lapack-3.4.2/liblapack.a    
-    do_setup_py numpy-1.6.1 ${NUMPY_ARGS}
+    export LAPACK=$PWD/lapack-3.4.2/liblapack.a
+    do_setup_py numpy-1.7.0 ${NUMPY_ARGS}
     do_setup_py scipy-0.11.0 ${NUMPY_ARGS}
 fi
 
@@ -776,10 +787,10 @@
     echo "Setting CFLAGS ${CFLAGS}"
 fi
 # Now we set up the basedir for matplotlib:
-mkdir -p ${DEST_DIR}/src/matplotlib-1.2.0
-echo "[directories]" >> ${DEST_DIR}/src/matplotlib-1.2.0/setup.cfg
-echo "basedirlist = ${DEST_DIR}" >> ${DEST_DIR}/src/matplotlib-1.2.0/setup.cfg
-do_setup_py matplotlib-1.2.0
+mkdir -p ${DEST_DIR}/src/matplotlib-1.2.1
+echo "[directories]" >> ${DEST_DIR}/src/matplotlib-1.2.1/setup.cfg
+echo "basedirlist = ${DEST_DIR}" >> ${DEST_DIR}/src/matplotlib-1.2.1/setup.cfg
+do_setup_py matplotlib-1.2.1
 if [ -n "${OLD_LDFLAGS}" ]
 then
     export LDFLAG=${OLD_LDFLAGS}
@@ -791,29 +802,29 @@
 # Now we do our IPython installation, which has two optional dependencies.
 if [ $INST_0MQ -eq 1 ]
 then
-    if [ ! -e zeromq-2.2.0/done ]
+    if [ ! -e zeromq-3.2.2/done ]
     then
-        [ ! -e zeromq-2.2.0 ] && tar xfz zeromq-2.2.0.tar.gz
+        [ ! -e zeromq-3.2.2 ] && tar xfz zeromq-3.2.2.tar.gz
         echo "Installing ZeroMQ"
-        cd zeromq-2.2.0
+        cd zeromq-3.2.2
         ( ./configure --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
         touch done
         cd ..
     fi
-    do_setup_py pyzmq-2.1.11 --zmq=${DEST_DIR}
-    do_setup_py tornado-2.2
+    do_setup_py pyzmq-13.0.2 --zmq=${DEST_DIR}
+    do_setup_py tornado-3.0
 fi
 
 do_setup_py ipython-0.13.1
-do_setup_py h5py-2.1.0
-do_setup_py Cython-0.17.1
-do_setup_py Forthon-0.8.10
+do_setup_py h5py-2.1.2
+do_setup_py Cython-0.18
+do_setup_py Forthon-0.8.11
 do_setup_py nose-1.2.1
-do_setup_py python-hglib-0.2
+do_setup_py python-hglib-0.3
 do_setup_py sympy-0.7.2
-[ $INST_PYX -eq 1 ] && do_setup_py PyX-0.11.1
+[ $INST_PYX -eq 1 ] && do_setup_py PyX-0.12.1
 
 # Now we build Rockstar and set its environment variable.
 if [ $INST_ROCKSTAR -eq 1 ]
@@ -837,16 +848,11 @@
 cd $YT_DIR
 ( ${HG_EXEC} pull 2>1 && ${HG_EXEC} up -C 2>1 ${BRANCH} 2>&1 ) 1>> ${LOG_FILE}
 
-echo "Building Fortran kD-tree module."
-cd yt/utilities/kdtree
-( make 2>&1 ) 1>> ${LOG_FILE}
-cd ../../..
-
 echo "Installing yt"
 echo $HDF5_DIR > hdf5.cfg
 [ $INST_PNG -eq 1 ] && echo $PNG_DIR > png.cfg
 [ $INST_FTYPE -eq 1 ] && echo $FTYPE_DIR > freetype.cfg
-( ${DEST_DIR}/bin/python2.7 setup.py develop 2>&1 ) 1>> ${LOG_FILE} || do_exit
+( export PATH=$DEST_DIR/bin:$PATH ; ${DEST_DIR}/bin/python2.7 setup.py develop 2>&1 ) 1>> ${LOG_FILE} || do_exit
 touch done
 cd $MY_PWD
 

diff -r 996841e8daf4a2327ab4c47b5f53ab51b999b78f -r cbdb1ea15403843d50f48ea55e09e934b58f2a6d setup.py
--- a/setup.py
+++ b/setup.py
@@ -4,14 +4,62 @@
 import sys
 import time
 import subprocess
+import shutil
+import glob
 import distribute_setup
 distribute_setup.use_setuptools()
 
 from distutils.command.build_py import build_py
 from numpy.distutils.misc_util import appendpath
+from numpy.distutils.command import install_data as np_install_data
 from numpy.distutils import log
 from distutils import version
 
+from distutils.core import Command
+from distutils.spawn import find_executable
+
+def find_fortran_deps():
+    return (find_executable("Forthon"),
+            find_executable("gfortran"))
+
+class BuildForthon(Command):
+
+    """Command for building Forthon modules"""
+
+    description = "Build Forthon modules"
+    user_options = []
+
+    def initialize_options(self):
+
+        """init options"""
+
+        pass
+
+    def finalize_options(self):
+
+        """finalize options"""
+
+        pass
+
+    def run(self):
+
+        """runner"""
+        (Forthon_exe, gfortran_exe) = find_fortran_deps()
+        if None in (Forthon_exe, gfortran_exe):
+            sys.stderr.write(
+                "fKDpy.so won't be built due to missing Forthon/gfortran\n"
+            )
+            return
+
+        cwd = os.getcwd()
+        os.chdir(os.path.join(cwd, 'yt/utilities/kdtree'))
+        cmd = [Forthon_exe, "-F", "gfortran", "--compile_first",
+               "fKD_source", "--no2underscores", "--fopt", "'-O3'", "fKD",
+               "fKD_source.f90"]
+        subprocess.check_call(cmd, shell=False)
+        shutil.move(glob.glob('build/lib*/fKDpy.so')[0], os.getcwd())
+        os.chdir(cwd)
+
 REASON_FILES = []
 REASON_DIRS = [
     "",
@@ -36,7 +84,7 @@
     files = []
     for ext in ["js", "html", "css", "png", "ico", "gif"]:
         files += glob.glob("%s/*.%s" % (dir_name, ext))
-    REASON_FILES.append( (dir_name, files) )
+    REASON_FILES.append((dir_name, files))
 
 # Verify that we have Cython installed
 try:
@@ -93,10 +141,10 @@
             language=extension.language, cplus=cplus,
             output_file=target_file)
         cython_result = Cython.Compiler.Main.compile(source,
-                                                   options=options)
+                                                     options=options)
         if cython_result.num_errors != 0:
-            raise DistutilsError("%d errors while compiling %r with Cython" \
-                  % (cython_result.num_errors, source))
+            raise DistutilsError("%d errors while compiling %r with Cython"
+                                 % (cython_result.num_errors, source))
     return target_file
 
 
@@ -107,9 +155,11 @@
 
 import setuptools
 
-VERSION = "2.5dev"
+VERSION = "2.6dev"
 
-if os.path.exists('MANIFEST'): os.remove('MANIFEST')
+if os.path.exists('MANIFEST'):
+    os.remove('MANIFEST')
+
 
 def get_mercurial_changeset_id(target_dir):
     """adapted from a script by Jason F. Harris, published at
@@ -123,11 +173,11 @@
                                      stdout=subprocess.PIPE,
                                      stderr=subprocess.PIPE,
                                      shell=True)
-        
+
     if (get_changeset.stderr.read() != ""):
         print "Error in obtaining current changeset of the Mercurial repository"
         changeset = None
-        
+
     changeset = get_changeset.stdout.read().strip()
     if (not re.search("^[0-9a-f]{12}", changeset)):
         print "Current changeset of the Mercurial repository is malformed"
@@ -135,12 +185,30 @@
 
     return changeset
 
+
+class my_build_src(build_src.build_src):
+    def run(self):
+        self.run_command("build_forthon")
+        build_src.build_src.run(self)
+
+
+class my_install_data(np_install_data.install_data):
+    def run(self):
+        (Forthon_exe, gfortran_exe) = find_fortran_deps()
+        if None in (Forthon_exe, gfortran_exe):
+            pass
+        else:
+            self.distribution.data_files.append(
+                ('yt/utilities/kdtree', ['yt/utilities/kdtree/fKDpy.so'])
+                )
+        np_install_data.install_data.run(self)
+
 class my_build_py(build_py):
     def run(self):
         # honor the --dry-run flag
         if not self.dry_run:
-            target_dir = os.path.join(self.build_lib,'yt')
-            src_dir =  os.getcwd() 
+            target_dir = os.path.join(self.build_lib, 'yt')
+            src_dir = os.getcwd()
             changeset = get_mercurial_changeset_id(src_dir)
             self.mkpath(target_dir)
             with open(os.path.join(target_dir, '__hg_version__.py'), 'w') as fobj:
@@ -148,6 +216,7 @@
 
             build_py.run(self)
 
+
 def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
 
@@ -158,7 +227,7 @@
                        quiet=True)
 
     config.make_config_py()
-    #config.make_svn_version_py()
+    # config.make_svn_version_py()
     config.add_subpackage('yt', 'yt')
     config.add_scripts("scripts/*")
 
@@ -176,25 +245,25 @@
                     + "simulations, focusing on Adaptive Mesh Refinement data "
                       "from Enzo, Orion, FLASH, and others.",
         classifiers=["Development Status :: 5 - Production/Stable",
-            "Environment :: Console",
-            "Intended Audience :: Science/Research",
-            "License :: OSI Approved :: GNU General Public License (GPL)",
-            "Operating System :: MacOS :: MacOS X",
-            "Operating System :: POSIX :: AIX",
-            "Operating System :: POSIX :: Linux",
-            "Programming Language :: C",
-            "Programming Language :: Python",
-            "Topic :: Scientific/Engineering :: Astronomy",
-            "Topic :: Scientific/Engineering :: Physics",
-            "Topic :: Scientific/Engineering :: Visualization"],
-        keywords='astronomy astrophysics visualization ' + \
-            'amr adaptivemeshrefinement',
+                     "Environment :: Console",
+                     "Intended Audience :: Science/Research",
+                     "License :: OSI Approved :: GNU General Public License (GPL)",
+                     "Operating System :: MacOS :: MacOS X",
+                     "Operating System :: POSIX :: AIX",
+                     "Operating System :: POSIX :: Linux",
+                     "Programming Language :: C",
+                     "Programming Language :: Python",
+                     "Topic :: Scientific/Engineering :: Astronomy",
+                     "Topic :: Scientific/Engineering :: Physics",
+                     "Topic :: Scientific/Engineering :: Visualization"],
+        keywords='astronomy astrophysics visualization ' +
+        'amr adaptivemeshrefinement',
         entry_points={'console_scripts': [
-                            'yt = yt.utilities.command_line:run_main',
-                      ],
-                      'nose.plugins.0.10': [
-                            'answer-testing = yt.utilities.answer_testing.framework:AnswerTesting'
-                      ]
+        'yt = yt.utilities.command_line:run_main',
+        ],
+            'nose.plugins.0.10': [
+                'answer-testing = yt.utilities.answer_testing.framework:AnswerTesting'
+            ]
         },
         author="Matthew J. Turk",
         author_email="matthewturk at gmail.com",
@@ -203,8 +272,9 @@
         configuration=configuration,
         zip_safe=False,
         data_files=REASON_FILES,
-        cmdclass = {'build_py': my_build_py},
-        )
+        cmdclass={'build_py': my_build_py, 'build_forthon': BuildForthon,
+                  'build_src': my_build_src, 'install_data': my_install_data},
+    )
     return
 
 if __name__ == '__main__':

diff -r 996841e8daf4a2327ab4c47b5f53ab51b999b78f -r cbdb1ea15403843d50f48ea55e09e934b58f2a6d yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -244,8 +244,9 @@
             If True, use dynamic load balancing to create the projections.
             Default: False.
 
-        Getting the Nearest Galaxies
-        ----------------------------
+        Notes
+        -----
+
         The light ray tool will use the HaloProfiler to calculate the
         distance and mass of the nearest halo to that pixel.  In order
         to do this, a dictionary called halo_profiler_parameters is used

diff -r 996841e8daf4a2327ab4c47b5f53ab51b999b78f -r cbdb1ea15403843d50f48ea55e09e934b58f2a6d yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -1367,6 +1367,7 @@
         self._groups = []
         self._max_dens = -1
         self.pf = pf
+        self.redshift = pf.current_redshift
         self.out_list = out_list
         self._data_source = pf.h.all_data()
         mylog.info("Parsing Rockstar halo list")

diff -r 996841e8daf4a2327ab4c47b5f53ab51b999b78f -r cbdb1ea15403843d50f48ea55e09e934b58f2a6d yt/analysis_modules/halo_finding/rockstar/rockstar.py
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar.py
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar.py
@@ -238,6 +238,7 @@
         tpf = ts[0]
 
         def _particle_count(field, data):
+            if data.NumberOfParticles == 0: return 0
             try:
                 data["particle_type"]
                 has_particle_type=True

diff -r 996841e8daf4a2327ab4c47b5f53ab51b999b78f -r cbdb1ea15403843d50f48ea55e09e934b58f2a6d yt/analysis_modules/halo_merger_tree/enzofof_merger_tree.py
--- a/yt/analysis_modules/halo_merger_tree/enzofof_merger_tree.py
+++ b/yt/analysis_modules/halo_merger_tree/enzofof_merger_tree.py
@@ -454,8 +454,8 @@
         halonum : int
             Halo number at the last output to trace.
 
-        Output
-        ------
+        Returns
+        -------
         output : dict
             Dictionary of redshifts, cycle numbers, and halo numbers
             of the most massive progenitor.  keys = {redshift, cycle,

diff -r 996841e8daf4a2327ab4c47b5f53ab51b999b78f -r cbdb1ea15403843d50f48ea55e09e934b58f2a6d yt/analysis_modules/halo_merger_tree/merger_tree.py
--- a/yt/analysis_modules/halo_merger_tree/merger_tree.py
+++ b/yt/analysis_modules/halo_merger_tree/merger_tree.py
@@ -143,7 +143,7 @@
         Note that this is not a string, so no quotes. Default = HaloFinder.
     halo_finder_threshold : Float
         If using HaloFinder or parallelHF, the value of the density threshold
-        used when halo finding. Default = 80.0.
+        used when halo finding. Default = 160.0.
     FOF_link_length : Float
         If using FOFHaloFinder, the linking length between particles.
         Default = 0.2.
@@ -169,7 +169,7 @@
     ... halo_finder_function=parallelHF)
     """
     def __init__(self, restart_files=[], database='halos.db',
-            halo_finder_function=HaloFinder, halo_finder_threshold=80.0,
+            halo_finder_function=HaloFinder, halo_finder_threshold=160.0,
             FOF_link_length=0.2, dm_only=False, refresh=False,
             index=True):
         ParallelAnalysisInterface.__init__(self)
@@ -758,17 +758,19 @@
     
     def query(self, string):
         r"""Performs a query of the database and returns the results as a list
-        of tuple(s), even if the result is singular.
+        of tuples, even if the result is singular.
         
         Parameters
         ----------
-        string : String
+        
+        string : str
             The SQL query of the database.
         
         Examples
-        -------
+        --------
+
         >>> results = mtc.query("SELECT GlobalHaloID from Halos where SnapHaloID = 0 and \
-        ... SnapZ = 0;")
+        ...    SnapZ = 0;")
         """
         # Query the database and return a list of tuples.
         if string is None:

diff -r 996841e8daf4a2327ab4c47b5f53ab51b999b78f -r cbdb1ea15403843d50f48ea55e09e934b58f2a6d yt/analysis_modules/halo_profiler/multi_halo_profiler.py
--- a/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
+++ b/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
@@ -430,8 +430,8 @@
         After all the calls to `add_profile`, this will trigger the actual
         calculations and output the profiles to disk.
 
-        Paramters
-        ---------
+        Parameters
+        ----------
 
         filename : str
             If set, a file will be written with all of the filtered halos

diff -r 996841e8daf4a2327ab4c47b5f53ab51b999b78f -r cbdb1ea15403843d50f48ea55e09e934b58f2a6d yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
--- a/yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
+++ b/yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
@@ -60,9 +60,9 @@
     
     Initialize an EmissivityIntegrator object.
 
-    Keyword Parameters
-    ------------------
-    filename: string
+    Parameters
+    ----------
+    filename: string, default None
         Path to data file containing emissivity values.  If None,
         a file called xray_emissivity.h5 is used.  This file contains 
         emissivity tables for primordial elements and for metals at 
@@ -146,8 +146,8 @@
     e_min: float
         the maximum energy in keV for the energy band.
 
-    Keyword Parameters
-    ------------------
+    Other Parameters
+    ----------------
     filename: string
         Path to data file containing emissivity values.  If None,
         a file called xray_emissivity.h5 is used.  This file contains 
@@ -220,8 +220,8 @@
     e_min: float
         the maximum energy in keV for the energy band.
 
-    Keyword Parameters
-    ------------------
+    Other Parameters
+    ----------------
     filename: string
         Path to data file containing emissivity values.  If None,
         a file called xray_emissivity.h5 is used.  This file contains 
@@ -277,8 +277,8 @@
     e_min: float
         the maximum energy in keV for the energy band.
 
-    Keyword Parameters
-    ------------------
+    Other Parameters
+    ----------------
     filename: string
         Path to data file containing emissivity values.  If None,
         a file called xray_emissivity.h5 is used.  This file contains 

diff -r 996841e8daf4a2327ab4c47b5f53ab51b999b78f -r cbdb1ea15403843d50f48ea55e09e934b58f2a6d yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -62,7 +62,7 @@
     notebook_password = '',
     answer_testing_tolerance = '3',
     answer_testing_bitwise = 'False',
-    gold_standard_filename = 'gold006',
+    gold_standard_filename = 'gold007',
     local_standard_filename = 'local001',
     sketchfab_api_key = 'None'
     )

diff -r 996841e8daf4a2327ab4c47b5f53ab51b999b78f -r cbdb1ea15403843d50f48ea55e09e934b58f2a6d yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -36,6 +36,8 @@
 import itertools
 import shelve
 import cStringIO
+import fileinput
+from re import finditer
 
 from yt.funcs import *
 from yt.config import ytcfg
@@ -178,7 +180,7 @@
         self.child_mask = 1
         self.ActiveDimensions = self.field_data['x'].shape
         self.DW = grid.pf.domain_right_edge - grid.pf.domain_left_edge
-        
+
     def __getitem__(self, field):
         if field not in self.field_data.keys():
             if field == "RadiusCode":
@@ -424,7 +426,7 @@
         return grids
 
     def select_grid_indices(self, level):
-        return np.where(self.grid_levels == level)
+        return np.where(self.grid_levels[:,0] == level)
 
     def __get_grid_left_edge(self):
         if self.__grid_left_edge == None:
@@ -461,6 +463,7 @@
     def __get_grid_levels(self):
         if self.__grid_levels == None:
             self.__grid_levels = np.array([g.Level for g in self._grids])
+            self.__grid_levels.shape = (self.__grid_levels.size, 1)
         return self.__grid_levels
 
     def __del_grid_levels(self):
@@ -474,7 +477,6 @@
     grid_levels = property(__get_grid_levels, __set_grid_levels,
                              __del_grid_levels)
 
-
     def __get_grid_dimensions(self):
         if self.__grid_dimensions == None:
             self.__grid_dimensions = np.array([g.ActiveDimensions for g in self._grids])
@@ -491,6 +493,19 @@
     grid_dimensions = property(__get_grid_dimensions, __set_grid_dimensions,
                              __del_grid_dimensions)
 
+    @property
+    def grid_corners(self):
+        return np.array([
+          [self.grid_left_edge[:,0], self.grid_left_edge[:,1], self.grid_left_edge[:,2]],
+          [self.grid_right_edge[:,0], self.grid_left_edge[:,1], self.grid_left_edge[:,2]],
+          [self.grid_right_edge[:,0], self.grid_right_edge[:,1], self.grid_left_edge[:,2]],
+          [self.grid_left_edge[:,0], self.grid_right_edge[:,1], self.grid_left_edge[:,2]],
+          [self.grid_left_edge[:,0], self.grid_left_edge[:,1], self.grid_right_edge[:,2]],
+          [self.grid_right_edge[:,0], self.grid_left_edge[:,1], self.grid_right_edge[:,2]],
+          [self.grid_right_edge[:,0], self.grid_right_edge[:,1], self.grid_right_edge[:,2]],
+          [self.grid_left_edge[:,0], self.grid_right_edge[:,1], self.grid_right_edge[:,2]],
+        ], dtype='float64')
+
 
 class AMR1DData(AMRData, GridPropertiesMixin):
     _spatial = False
@@ -530,7 +545,7 @@
             # generated it above.  This way, fields that are grabbed from the
             # grids are sorted properly.
             self[field] = self[field][self._sortkey]
-       
+
 class AMROrthoRayBase(AMR1DData):
     """
     This is an orthogonal ray cast through the entire domain, at a specific
@@ -673,9 +688,9 @@
             vs = self._get_line_at_coord(RE[:,i], i)
             p = p | ( ( (LE[:,i1] <= vs[:,i1]) & (RE[:,i1] >= vs[:,i1]) ) \
                     & ( (LE[:,i2] <= vs[:,i2]) & (RE[:,i2] >= vs[:,i2]) ) )
-        p = p | ( np.all( LE <= self.start_point, axis=1 ) 
+        p = p | ( np.all( LE <= self.start_point, axis=1 )
                 & np.all( RE >= self.start_point, axis=1 ) )
-        p = p | ( np.all( LE <= self.end_point,   axis=1 ) 
+        p = p | ( np.all( LE <= self.end_point,   axis=1 )
                 & np.all( RE >= self.end_point,   axis=1 ) )
         self._grids = self.hierarchy.grids[p]
 
@@ -695,7 +710,7 @@
         if not iterable(gf):
             gf = gf * np.ones(grid.child_mask.shape)
         return gf[mask]
-        
+
     @cache_mask
     def _get_cut_mask(self, grid):
         mask = np.zeros(grid.ActiveDimensions, dtype='int')
@@ -738,11 +753,11 @@
     --------
 
     >>> from yt.visualization.api import Streamlines
-    >>> streamlines = Streamlines(pf, [0.5]*3) 
+    >>> streamlines = Streamlines(pf, [0.5]*3)
     >>> streamlines.integrate_through_volume()
     >>> stream = streamlines.path(0)
     >>> matplotlib.pylab.semilogy(stream['t'], stream['Density'], '-x')
-    
+
     """
     _type_name = "streamline"
     _con_args = ('positions')
@@ -775,16 +790,16 @@
     @restore_grid_state
     def _get_data_from_grid(self, grid, field):
         # No child masking here; it happens inside the mask cut
-        mask = self._get_cut_mask(grid) 
+        mask = self._get_cut_mask(grid)
         if field == 'dts': return self._dts[grid.id]
         if field == 't': return self._ts[grid.id]
         return grid[field].flat[mask]
-        
+
     @cache_mask
     def _get_cut_mask(self, grid):
         #pdb.set_trace()
         points_in_grid = np.all(self.positions > grid.LeftEdge, axis=1) & \
-                         np.all(self.positions <= grid.RightEdge, axis=1) 
+                         np.all(self.positions <= grid.RightEdge, axis=1)
         pids = np.where(points_in_grid)[0]
         mask = np.zeros(points_in_grid.sum(), dtype='int')
         dts = np.zeros(points_in_grid.sum(), dtype='float64')
@@ -819,7 +834,7 @@
         AMRData.__init__(self, pf, fields, **kwargs)
         self.field = ensure_list(fields)[0]
         self.set_field_parameter("axis",axis)
-        
+
     def _convert_field_name(self, field):
         return field
 
@@ -838,7 +853,6 @@
             fields_to_get = self.fields[:]
         else:
             fields_to_get = ensure_list(fields)
-        temp_data = {}
         for field in fields_to_get:
             if self.field_data.has_key(field): continue
             if field not in self.hierarchy.field_list:
@@ -848,18 +862,13 @@
             # we're going to have to set the same thing several times
             data = [self._get_data_from_grid(grid, field)
                     for grid in self._get_grids()]
-            if len(data) == 0: data = np.array([])
-            else: data = np.concatenate(data)
-            temp_data[field] = data
+            if len(data) == 0:
+                data = np.array([])
+            else:
+                data = np.concatenate(data)
             # Now the next field can use this field
-            self[field] = temp_data[field] 
-        # We finalize
-        if temp_data != {}:
-            temp_data = self.comm.par_combine_object(temp_data,
-                    datatype='dict', op='cat')
-        # And set, for the next group
-        for field in temp_data.keys():
-            self[field] = temp_data[field]
+            self[field] = self.comm.par_combine_object(data, op='cat',
+                                                       datatype='array')
 
     def _get_pw(self, fields, center, width, origin, axes_unit, plot_type):
         axis = self.axis
@@ -874,7 +883,7 @@
         (bounds, center, units) = GetWindowParameters(axis, center, width, self.pf)
         if axes_unit is None and units != ('1', '1'):
             axes_unit = units
-        pw = PWViewerMPL(self, bounds, origin=origin, frb_generator=FixedResolutionBuffer, 
+        pw = PWViewerMPL(self, bounds, origin=origin, frb_generator=FixedResolutionBuffer,
                          plot_type=plot_type)
         pw.set_axes_unit(axes_unit)
         return pw
@@ -980,7 +989,7 @@
         for field in fields:
             #mylog.debug("Trying to obtain %s from node %s",
                 #self._convert_field_name(field), node_name)
-            fdata=self.hierarchy.get_data(node_name, 
+            fdata=self.hierarchy.get_data(node_name,
                 self._convert_field_name(field))
             if fdata is not None:
                 #mylog.debug("Got %s from node %s", field, node_name)
@@ -1138,7 +1147,7 @@
         t = points * ind[cm] * dx + (grid.LeftEdge[xaxis] + 0.5 * dx)
         # calculate ypoints array
         ind = cmI[1, :].ravel()   # yind
-        del cmI   # no longer needed 
+        del cmI   # no longer needed
         t = np.vstack( (t, points * ind[cm] * dy + \
                 (grid.LeftEdge[yaxis] + 0.5 * dy))
             )
@@ -1197,7 +1206,7 @@
     def hub_upload(self):
         self._mrep.upload()
 
-    def to_pw(self, fields=None, center='c', width=None, axes_unit=None, 
+    def to_pw(self, fields=None, center='c', width=None, axes_unit=None,
                origin='center-window'):
         r"""Create a :class:`~yt.visualization.plot_window.PWViewerMPL` from this
         object.
@@ -1477,7 +1486,7 @@
         self.dims = dims
         self.dds = self.width / self.dims
         self.bounds = np.array([0.0,1.0,0.0,1.0])
-        
+
         self.set_field_parameter('center', center)
         # Let's set up our plane equation
         # ax + by + cz + d = 0
@@ -1563,7 +1572,7 @@
 
             # Mark these pixels to speed things up
             self._pixelmask[pointI] = 0
-            
+
             return
         else:
             raise SyntaxError("Making a fixed resolution slice with "
@@ -1651,7 +1660,7 @@
         L_name = ("%s" % self._norm_vec).replace(" ","_")[1:-1]
         return "%s/c%s_L%s" % \
             (self._top_node, cen_name, L_name)
-        
+
 class AMRQuadTreeProjBase(AMR2DData):
     """
     This is a data object corresponding to a line integral through the
@@ -1809,7 +1818,7 @@
             convs[:] = 1.0
         return dls, convs
 
-    def to_pw(self, fields=None, center='c', width=None, axes_unit=None, 
+    def to_pw(self, fields=None, center='c', width=None, axes_unit=None,
                origin='center-window'):
         r"""Create a :class:`~yt.visualization.plot_window.PWViewerMPL` from this
         object.
@@ -1850,7 +1859,7 @@
                                  if g.Level == level],
                               self.get_dependencies(fields), self.hierarchy.io)
             self._add_level_to_tree(tree, level, fields)
-            mylog.debug("End of projecting level level %s, memory usage %0.3e", 
+            mylog.debug("End of projecting level level %s, memory usage %0.3e",
                         level, get_memory_usage()/1024.)
         # Note that this will briefly double RAM usage
         if self.proj_style == "mip":
@@ -1942,7 +1951,7 @@
         xpoints = (xind + (start_index[x_dict[self.axis]])).astype('int64')
         ypoints = (yind + (start_index[y_dict[self.axis]])).astype('int64')
         to_add = np.array([d[used_points].ravel() for d in full_proj], order='F')
-        tree.add_array_to_tree(grid.Level, xpoints, ypoints, 
+        tree.add_array_to_tree(grid.Level, xpoints, ypoints,
                     to_add, weight_proj[used_points].ravel())
 
     def _add_level_to_tree(self, tree, level, fields):
@@ -2283,7 +2292,7 @@
                 del self.__retval_coords[grid.id]
                 del self.__retval_fields[grid.id]
                 del self.__overlap_masks[grid.id]
-            mylog.debug("End of projecting level level %s, memory usage %0.3e", 
+            mylog.debug("End of projecting level level %s, memory usage %0.3e",
                         level, get_memory_usage()/1024.)
         coord_data = np.concatenate(coord_data, axis=1)
         field_data = np.concatenate(field_data, axis=1)
@@ -2314,7 +2323,7 @@
     def add_fields(self, fields, weight = "CellMassMsun"):
         pass
 
-    def to_pw(self, fields=None, center='c', width=None, axes_unit=None, 
+    def to_pw(self, fields=None, center='c', width=None, axes_unit=None,
                origin='center-window'):
         r"""Create a :class:`~yt.visualization.plot_window.PWViewerMPL` from this
         object.
@@ -2522,7 +2531,7 @@
         ref_ratio = self.pf.refine_by**(self.level - grid.Level)
         FillBuffer(ref_ratio,
             grid.get_global_startindex(), self.global_startindex,
-            c_fields, g_fields, 
+            c_fields, g_fields,
             self.ActiveDimensions, grid.ActiveDimensions,
             grid.child_mask, self.domain_width, dls[grid.Level],
             self.axis)
@@ -2683,9 +2692,9 @@
     def cut_region(self, field_cuts):
         """
         Return an InLineExtractedRegion, where the grid cells are cut on the
-        fly with a set of field_cuts.  It is very useful for applying 
+        fly with a set of field_cuts.  It is very useful for applying
         conditions to the fields in your data object.
-        
+
         Examples
         --------
         To find the total mass of gas above 10^6 K in your volume:
@@ -2726,7 +2735,7 @@
         useful for calculating, for instance, total isocontour area, or
         visualizing in an external program (such as `MeshLab
         <http://meshlab.sf.net>`_.)
-        
+
         Parameters
         ----------
         field : string
@@ -2840,7 +2849,7 @@
 
         Additionally, the returned flux is defined as flux *into* the surface,
         not flux *out of* the surface.
-        
+
         Parameters
         ----------
         field : string
@@ -2897,7 +2906,7 @@
             ff = np.ones(vals.shape, dtype="float64")
         else:
             ff = grid.get_vertex_centered_data(fluxing_field)
-        xv, yv, zv = [grid.get_vertex_centered_data(f) for f in 
+        xv, yv, zv = [grid.get_vertex_centered_data(f) for f in
                      [field_x, field_y, field_z]]
         return march_cubes_grid_flux(value, vals, xv, yv, zv,
                     ff, mask, grid.LeftEdge, grid.dds)
@@ -2990,7 +2999,7 @@
     ----------------
     force_refresh : bool
        Force a refresh of the data. Defaults to True.
-    
+
     Examples
     --------
     """
@@ -3230,7 +3239,7 @@
         if self._grids is not None: return
         GLE = self.pf.h.grid_left_edge
         GRE = self.pf.h.grid_right_edge
-        goodI = find_grids_in_inclined_box(self.box_vectors, self.center, 
+        goodI = find_grids_in_inclined_box(self.box_vectors, self.center,
                                            GLE, GRE)
         cgrids = self.pf.h.grids[goodI.astype('bool')]
        # find_grids_in_inclined_box seems to be broken.
@@ -3238,13 +3247,13 @@
         grids = []
         for i,grid in enumerate(cgrids):
             v = grid_points_in_volume(self.box_lengths, self.origin,
-                                      self._rot_mat, grid.LeftEdge, 
+                                      self._rot_mat, grid.LeftEdge,
                                       grid.RightEdge, grid.dds,
                                       grid.child_mask, 1)
             if v: grids.append(grid)
         self._grids = np.empty(len(grids), dtype='object')
         for gi, g in enumerate(grids): self._grids[gi] = g
-            
+
 
     def _is_fully_enclosed(self, grid):
         # This should be written at some point.
@@ -3257,10 +3266,10 @@
             return True
         pm = np.zeros(grid.ActiveDimensions, dtype='int32')
         grid_points_in_volume(self.box_lengths, self.origin,
-                              self._rot_mat, grid.LeftEdge, 
+                              self._rot_mat, grid.LeftEdge,
                               grid.RightEdge, grid.dds, pm, 0)
         return pm
-        
+
 
 class AMRRegionBase(AMR3DData):
     """A 3D region of data with an arbitrary center.
@@ -3396,9 +3405,9 @@
     _dx_pad = 0.0
     def __init__(self, center, left_edge, right_edge, fields = None,
                  pf = None, **kwargs):
-        AMRPeriodicRegionBase.__init__(self, center, left_edge, right_edge, 
+        AMRPeriodicRegionBase.__init__(self, center, left_edge, right_edge,
                                        fields = None, pf = None, **kwargs)
-    
+
 
 class AMRGridCollectionBase(AMR3DData):
     """
@@ -3565,7 +3574,7 @@
         self._C = C
         self._e0 = e0 = e0 / (e0**2.0).sum()**0.5
         self._tilt = tilt
-        
+
         # find the t1 angle needed to rotate about z axis to align e0 to x
         t1 = np.arctan(e0[1] / e0[0])
         # rotate e0 by -t1
@@ -3575,15 +3584,15 @@
         t2 = np.arctan(-r1[2] / r1[0])
         """
         calculate the original e1
-        given the tilt about the x axis when e0 was aligned 
+        given the tilt about the x axis when e0 was aligned
         to x after t1, t2 rotations about z, y
         """
-        RX = get_rotation_matrix(-tilt, (1,0,0)).transpose()
-        RY = get_rotation_matrix(-t2,   (0,1,0)).transpose()
-        RZ = get_rotation_matrix(-t1,   (0,0,1)).transpose()
-        e1 = ((0, 1, 0) * RX).sum(axis = 1)
-        e1 = (e1 * RY).sum(axis = 1)
-        e1 = (e1 * RZ).sum(axis = 1)
+        RX = get_rotation_matrix(-tilt, (1, 0, 0)).transpose()
+        RY = get_rotation_matrix(-t2,   (0, 1, 0)).transpose()
+        RZ = get_rotation_matrix(-t1,   (0, 0, 1)).transpose()
+        e1 = ((0, 1, 0) * RX).sum(axis=1)
+        e1 = (e1 * RY).sum(axis=1)
+        e1 = (e1 * RZ).sum(axis=1)
         e2 = np.cross(e0, e1)
 
         self._e1 = e1
@@ -3599,95 +3608,72 @@
         self._refresh_data()
 
         """
-        Having another function find_ellipsoid_grids is too much work, 
+        Having another function find_ellipsoid_grids is too much work,
         can just use the sphere one and forget about checking orientation
         but feed in the A parameter for radius
         """
-    def _get_list_of_grids(self, field = None):
+    def _get_list_of_grids(self, field=None):
         """
         This returns the grids that are possibly within the ellipse
         """
-        grids,ind = self.hierarchy.find_sphere_grids(self.center, self._A)
+        grids, ind = self.hierarchy.find_sphere_grids(self.center, self._A)
         # Now we sort by level
         grids = grids.tolist()
-        grids.sort(key=lambda x: (x.Level, \
-                                  x.LeftEdge[0], \
-                                  x.LeftEdge[1], \
+        grids.sort(key=lambda x: (x.Level,
+                                  x.LeftEdge[0],
+                                  x.LeftEdge[1],
                                   x.LeftEdge[2]))
-        self._grids = np.array(grids, dtype = 'object')
+        self._grids = np.array(grids, dtype='object')
 
     def _is_fully_enclosed(self, grid):
         """
         check if all grid corners are inside the ellipsoid
         """
-        # vector from corner to center
-        vr = (grid._corners - self.center)
-        # 3 possible cases of locations taking periodic BC into account
-        # just listing the components, find smallest later
-        dotarr=np.array([vr, vr + self.DW, vr - self.DW])
-        # these vrdote# finds the product of vr components with e#
-        # square the results
-        # find the smallest
-        # sums it
-        vrdote0_2 = (np.multiply(dotarr, self._e0)**2).min(axis \
-                                                           = 0).sum(axis = 1)
-        vrdote1_2 = (np.multiply(dotarr, self._e1)**2).min(axis \
-                                                           = 0).sum(axis = 1)
-        vrdote2_2 = (np.multiply(dotarr, self._e2)**2).min(axis \
-                                                           = 0).sum(axis = 1)
-        return np.all(vrdote0_2 / self._A**2 + \
-                      vrdote1_2 / self._B**2 + \
-                      vrdote2_2 / self._C**2 <=1.0)
-
-    @restore_grid_state # Pains me not to decorate with cache_mask here
-    def _get_cut_mask(self, grid, field = None):
+        return False
+
+    @restore_grid_state  # Pains me not to decorate with cache_mask here
+    def _get_cut_mask(self, grid, field=None):
         """
         This checks if each cell is inside the ellipsoid
         """
         # We have the *property* center, which is not necessarily
         # the same as the field_parameter
         if self._is_fully_enclosed(grid):
-            return True # We do not want child masking here
+            return True  # We do not want child masking here
         if not isinstance(grid, (FakeGridForParticles, GridChildMaskWrapper)) \
            and grid.id in self._cut_masks:
             return self._cut_masks[grid.id]
-        Inside = np.zeros(grid["x"].shape, dtype = 'float64')
-        dim = grid["x"].shape
-        # need this to take into account non-cube root grid tiles
-        if (len(dim) == 1):
-            dot_evec = np.zeros([3, dim[0]])
-        elif (len(dim) == 2):
-            dot_evec = np.zeros([3, dim[0], dim[1]])
-        elif (len(dim) == 3):
-            dot_evec = np.zeros([3, dim[0], dim[1], dim[2]])
+
+        dot_evecx = np.zeros(grid.ActiveDimensions)
+        dot_evecy = np.zeros(grid.ActiveDimensions)
+        dot_evecz = np.zeros(grid.ActiveDimensions)
 
         for i, ax in enumerate('xyz'):
             # distance to center
-            ar  = grid[ax]-self.center[i]
-            # cases to take into account periodic BC
-            case = np.array([ar, ar + self.DW[i], ar - self.DW[i]])
-            # find which of the 3 cases is smallest in magnitude
-            index = np.abs(case).argmin(axis = 0)
-            # restrict distance to only the smallest cases
-            vec = np.choose(index, case)
+            ar = grid[ax]-self.center[i]
+            # correct for periodicity
+            vec = np.array([ar, ar + self.DW[i], ar - self.DW[i]])
+            ind = np.argmin(np.abs(vec), axis=0)
+            vec = np.choose(ind, vec)
             # sum up to get the dot product with e_vectors
-            dot_evec += np.array([vec * self._e0[i], \
-                                  vec * self._e1[i], \
-                                  vec * self._e2[i]])
+            dot_evecx += vec * self._e0[i] / self._A
+            dot_evecy += vec * self._e1[i] / self._B
+            dot_evecz += vec * self._e2[i] / self._C
+
         # Calculate the eqn of ellipsoid, if it is inside
         # then result should be <= 1.0
-        Inside = dot_evec[0]**2 / self._A**2 + \
-                 dot_evec[1]**2 / self._B**2 + \
-                 dot_evec[2]**2 / self._C**2
-        cm = ((Inside <= 1.0) & grid.child_mask)
+        cm = ((dot_evecx**2 +
+               dot_evecy**2 +
+               dot_evecz**2 <= 1.0) & grid.child_mask)
         if not isinstance(grid, (FakeGridForParticles, GridChildMaskWrapper)):
             self._cut_masks[grid.id] = cm
         return cm
 
+
 class AMRCoveringGridBase(AMR3DData):
     """A 3D region with all data extracted to a single, specified
     resolution.
-    
+
     Parameters
     ----------
     level : int
@@ -3785,7 +3771,7 @@
             n_bad = np.where(self[obtain_fields[0]]==-999)[0].size
             mylog.error("Covering problem: %s cells are uncovered", n_bad)
             raise KeyError(n_bad)
-            
+
     def _generate_field(self, field):
         if self.pf.field_info.has_key(field):
             # First we check the validator; this might even raise!
@@ -3813,13 +3799,13 @@
     def _get_data_from_grid(self, grid, fields):
         ll = int(grid.Level == self.level)
         ref_ratio = self.pf.refine_by**(self.level - grid.Level)
-        g_fields = [gf.astype("float64") 
+        g_fields = [gf.astype("float64")
                     if gf.dtype != "float64"
                     else gf for gf in (grid[field] for field in fields)]
         c_fields = [self[field] for field in fields]
         count = FillRegion(ref_ratio,
             grid.get_global_startindex(), self.global_startindex,
-            c_fields, g_fields, 
+            c_fields, g_fields,
             self.ActiveDimensions, grid.ActiveDimensions,
             grid.child_mask, self.domain_width, ll, 0)
         return count
@@ -3835,7 +3821,7 @@
         c_fields = [self[field] for field in fields]
         FillRegion(ref_ratio,
             grid.get_global_startindex(), self.global_startindex,
-            c_fields, g_fields, 
+            c_fields, g_fields,
             self.ActiveDimensions, grid.ActiveDimensions,
             grid.child_mask, self.domain_width, ll, 1)
 
@@ -3856,7 +3842,7 @@
     fill the region to level 1, replacing any cells actually
     covered by level 1 data, and then recursively repeating this
     process until it reaches the specified `level`.
-    
+
     Parameters
     ----------
     level : int
@@ -3868,10 +3854,11 @@
     fields : array_like, optional
         A list of fields that you'd like pre-generated for your object
 
-    Example
-    -------
-    cube = pf.h.smoothed_covering_grid(2, left_edge=[0.0, 0.0, 0.0], \
-                              dims=[128, 128, 128])
+    Examples
+    --------
+
+    >>> cube = pf.h.smoothed_covering_grid(2, left_edge=[0.0, 0.0, 0.0], \
+    ...                          dims=[128, 128, 128])
     """
     _type_name = "smoothed_covering_grid"
     def __init__(self, *args, **kwargs):
@@ -3976,7 +3963,7 @@
     def _refine(self, dlevel, fields):
         rf = float(self.pf.refine_by**dlevel)
 
-        input_left = (self._old_global_startindex + 0.5) * rf 
+        input_left = (self._old_global_startindex + 0.5) * rf
         dx = np.fromiter((self['cd%s' % ax] for ax in 'xyz'), count=3, dtype='float64')
         output_dims = np.rint((self.ActiveDimensions*self.dds)/dx+0.5).astype('int32') + 2
         self._cur_dims = output_dims
@@ -3990,13 +3977,13 @@
 
     @restore_field_information_state
     def _get_data_from_grid(self, grid, fields):
-        g_fields = [gf.astype("float64") 
+        g_fields = [gf.astype("float64")
                     if gf.dtype != "float64"
                     else gf for gf in (grid[field] for field in fields)]
         c_fields = [self.field_data[field] for field in fields]
         count = FillRegion(1,
             grid.get_global_startindex(), self.global_startindex,
-            c_fields, g_fields, 
+            c_fields, g_fields,
             self._cur_dims, grid.ActiveDimensions,
             grid.child_mask, self.domain_width, 1, 0)
         return count
@@ -4008,14 +3995,14 @@
     """
     This will build a hybrid region based on the boolean logic
     of the regions.
-    
+
     Parameters
     ----------
     regions : list
         A list of region objects and strings describing the boolean logic
         to use when building the hybrid region. The boolean logic can be
         nested using parentheses.
-    
+
     Examples
     --------
     >>> re1 = pf.h.region([0.5, 0.5, 0.5], [0.4, 0.4, 0.4],
@@ -4028,7 +4015,7 @@
         sp1, ")"])
     """
     _type_name = "boolean"
-    _con_args = ("regions")
+    _con_args = ("regions",)
     def __init__(self, regions, fields = None, pf = None, **kwargs):
         # Center is meaningless, but we'll define it all the same.
         AMR3DData.__init__(self, [0.5]*3, fields, pf, **kwargs)
@@ -4040,7 +4027,7 @@
         self._get_all_regions()
         self._make_overlaps()
         self._get_list_of_grids()
-    
+
     def _get_all_regions(self):
         # Before anything, we simply find out which regions are involved in all
         # of this process, uniquely.
@@ -4050,7 +4037,7 @@
             # So cut_masks don't get messed up.
             item._boolean_touched = True
         self._all_regions = np.unique(self._all_regions)
-    
+
     def _make_overlaps(self):
         # Using the processed cut_masks, we'll figure out what grids
         # are left in the hybrid region.
@@ -4084,7 +4071,7 @@
                     continue
             pbar.update(i)
         pbar.finish()
-    
+
     def __repr__(self):
         # We'll do this the slow way to be clear what's going on
         s = "%s (%s): " % (self.__class__.__name__, self.pf)
@@ -4097,7 +4084,7 @@
             if i < (len(self.regions) - 1): s += ", "
         s += "]"
         return s
-    
+
     def _is_fully_enclosed(self, grid):
         return (grid in self._all_overlap)
 
@@ -4184,7 +4171,7 @@
     <http://meshlab.sf.net>`_.)  The object has the properties .vertices
     and will sample values if a field is requested.  The values are
     interpolated to the center of a given face.
-    
+
     Parameters
     ----------
     data_source : AMR3DDataObject
@@ -4259,7 +4246,7 @@
                 self[fields] = samples
             elif sample_type == "vertex":
                 self.vertex_samples[fields] = samples
-        
+
 
     @restore_grid_state
     def _extract_isocontours_from_grid(self, grid, field, value,
@@ -4296,7 +4283,7 @@
 
         Additionally, the returned flux is defined as flux *into* the surface,
         not flux *out of* the surface.
-        
+
         Parameters
         ----------
         field_x : string
@@ -4343,7 +4330,7 @@
         return flux
 
     @restore_grid_state
-    def _calculate_flux_in_grid(self, grid, 
+    def _calculate_flux_in_grid(self, grid,
                     field_x, field_y, field_z, fluxing_field = None):
         mask = self.data_source._get_cut_mask(grid) * grid.child_mask
         vals = grid.get_vertex_centered_data(self.surface_field)
@@ -4351,7 +4338,7 @@
             ff = np.ones(vals.shape, dtype="float64")
         else:
             ff = grid.get_vertex_centered_data(fluxing_field)
-        xv, yv, zv = [grid.get_vertex_centered_data(f) for f in 
+        xv, yv, zv = [grid.get_vertex_centered_data(f) for f in
                      [field_x, field_y, field_z]]
         return march_cubes_grid_flux(self.field_value, vals, xv, yv, zv,
                     ff, mask, grid.LeftEdge, grid.dds)
@@ -4366,6 +4353,230 @@
                 vv[:,i,j] = self.vertices[j,i::3]
         return vv
 
+    def export_obj(self, filename, transparency = 1.0, dist_fac = None,
+                   color_field = None, emit_field = None, color_map = "algae", 
+                   color_log = True, emit_log = True, plot_index = None, 
+                   color_field_max = None, color_field_min = None, 
+                   emit_field_max = None, emit_field_min = None):
+        r"""This exports the surface to the OBJ format, suitable for visualization
+        in many different programs (e.g., Blender).  NOTE: this exports an .obj file 
+        and an .mtl file, both with the general 'filename' as a prefix.  
+        The .obj file points to the .mtl file in its header, so if you move the 2 
+        files, make sure you change the .obj header to account for this. ALSO NOTE: 
+        the emit_field needs to be a combination of the other 2 fields used to 
+        have the emissivity track with the color.
+
+        Parameters
+        ----------
+        filename : string
+            The file this will be exported to.  This cannot be a file-like object.
+            Note - there are no file extentions included - both obj & mtl files 
+            are created.
+        transparency : float
+            This gives the transparency of the output surface plot.  Values
+            from 0.0 (invisible) to 1.0 (opaque).
+        dist_fac : float
+            Divide the axes distances by this amount.
+        color_field : string
+            Should a field be sample and colormapped?
+        emit_field : string
+            Should we track the emissivity of a field?
+              NOTE: this should be a combination of the other 2 fields being used.
+        color_map : string
+            Which color map should be applied?
+        color_log : bool
+            Should the color field be logged before being mapped?
+        emit_log : bool
+            Should the emitting field be logged before being mapped?
+        plot_index : integer
+            Index of plot for multiple plots.  If none, then only 1 plot.
+        color_field_max : float
+            Maximum value of the color field across all surfaces.
+        color_field_min : float
+            Minimum value of the color field across all surfaces.
+        emit_field_max : float
+            Maximum value of the emitting field across all surfaces.
+        emit_field_min : float
+            Minimum value of the emitting field across all surfaces.
+
+        Examples
+        --------
+
+        >>> sp = pf.h.sphere("max", (10, "kpc"))
+        >>> trans = 1.0
+        >>> distf = 3.1e18*1e3 # distances into kpc
+        >>> surf = pf.h.surface(sp, "Density", 5e-27)
+        >>> surf.export_obj("my_galaxy", transparency=trans, dist_fac = distf)
+
+        >>> sp = pf.h.sphere("max", (10, "kpc"))
+        >>> mi, ma = sp.quantities['Extrema']('Temperature')[0]
+        >>> rhos = [1e-24, 1e-25]
+        >>> trans = [0.5, 1.0]
+        >>> distf = 3.1e18*1e3 # distances into kpc
+        >>> for i, r in enumerate(rhos):
+        ...     surf = pf.h.surface(sp,'Density',r)
+        ...     surf.export_obj("my_galaxy", transparency=trans[i], 
+        ...                      color_field='Temperature', dist_fac = distf, 
+        ...                      plot_index = i, color_field_max = ma, 
+        ...                      color_field_min = mi)
+
+        >>> sp = pf.h.sphere("max", (10, "kpc"))
+        >>> rhos = [1e-24, 1e-25]
+        >>> trans = [0.5, 1.0]
+        >>> distf = 3.1e18*1e3 # distances into kpc
+        >>> def _Emissivity(field, data):
+        ...     return (data['Density']*data['Density']*np.sqrt(data['Temperature']))
+        >>> add_field("Emissivity", function=_Emissivity, units=r"\rm{g K}/\rm{cm}^{6}")
+        >>> for i, r in enumerate(rhos):
+        ...     surf = pf.h.surface(sp,'Density',r)
+        ...     surf.export_obj("my_galaxy", transparency=trans[i], 
+        ...                      color_field='Temperature', emit_field = 'Emissivity', 
+        ...                      dist_fac = distf, plot_index = i)
+
+        """
+        if self.vertices is None:
+            self.get_data(color_field,"face")
+        elif color_field is not None:
+            if color_field not in self.field_data:
+                self[color_field]
+        if emit_field is not None:
+            if color_field not in self.field_data:
+                self[emit_field]
+        only_on_root(self._export_obj, filename, transparency, dist_fac, color_field, emit_field, 
+                             color_map, color_log, emit_log, plot_index, color_field_max, 
+                             color_field_min, emit_field_max, emit_field_min)
+
+    def _color_samples_obj(self, cs, em, color_log, emit_log, color_map, arr, 
+                           color_field_max, color_field_min, 
+                           emit_field_max, emit_field_min): # this now holds for obj files
+        if color_log: cs = np.log10(cs)
+        if emit_log: em = np.log10(em)
+        if color_field_min is None:
+            mi = cs.min()
+        else:
+            mi = color_field_min
+            if color_log: mi = np.log10(mi)
+        if color_field_max is None:
+            ma = cs.max()
+        else:
+            ma = color_field_max
+            if color_log: ma = np.log10(ma)
+        cs = (cs - mi) / (ma - mi)
+        # to get color indicies for OBJ formatting
+        from yt.visualization._colormap_data import color_map_luts
+        lut = color_map_luts[color_map]
+        x = np.mgrid[0.0:1.0:lut[0].shape[0]*1j]
+        arr["cind"][:] = (np.interp(cs,x,x)*(lut[0].shape[0]-1)).astype("uint8")
+        # now, get emission
+        if emit_field_min is None:
+            emi = em.min()
+        else:
+            emi = emit_field_min
+            if emit_log: emi = np.log10(emi)
+        if emit_field_max is None:
+            ema = em.max()
+        else:
+            ema = emit_field_max
+            if emit_log: ema = np.log10(ema)
+        em = (em - emi)/(ema - emi)
+        x = np.mgrid[0.0:255.0:2j] # assume 1 emissivity per color
+        arr["emit"][:] = (np.interp(em,x,x))*2.0 # for some reason, max emiss = 2
+
+    @parallel_root_only
+    def _export_obj(self, filename, transparency, dist_fac = None, 
+                    color_field = None, emit_field = None, color_map = "algae", 
+                    color_log = True, emit_log = True, plot_index = None, 
+                    color_field_max = None, color_field_min = None, 
+                    emit_field_max = None, emit_field_min = None):
+        if plot_index is None:
+            plot_index = 0
+        if isinstance(filename, file):
+            fobj = filename + '.obj'
+            fmtl = filename + '.mtl'
+        else:
+            if plot_index == 0:
+                fobj = open(filename + '.obj', "w")
+                fmtl = open(filename + '.mtl', 'w')
+                cc = 1
+            else:
+                # read in last vertex
+                linesave = ''
+                for line in fileinput.input(filename + '.obj'):
+                    if line[0] == 'f':
+                        linesave = line
+                p = [m.start() for m in finditer(' ', linesave)]
+                cc = int(linesave[p[len(p)-1]:])+1
+                fobj = open(filename + '.obj', "a")
+                fmtl = open(filename + '.mtl', 'a')
+        ftype = [("cind", "uint8"), ("emit", "float")]
+        vtype = [("x","float"),("y","float"), ("z","float")]
+        if plot_index == 0:
+            fobj.write("# yt OBJ file\n")
+            fobj.write("# www.yt-project.com\n")
+            fobj.write("mtllib " + filename + '.mtl\n\n')  # use this material file for the faces
+            fmtl.write("# yt MLT file\n")
+            fmtl.write("# www.yt-project.com\n\n")
+        #(0) formulate vertices
+        nv = self.vertices.shape[1] # number of groups of vertices
+        f = np.empty(nv/self.vertices.shape[0], dtype=ftype) # store sets of face colors
+        v = np.empty(nv, dtype=vtype) # stores vertices
+        if color_field is not None:
+            cs = self[color_field]
+        else:
+            cs = np.empty(self.vertices.shape[1]/self.vertices.shape[0])
+        if emit_field is not None:
+            em = self[emit_field]
+        else:
+            em = np.empty(self.vertices.shape[1]/self.vertices.shape[0])            
+        self._color_samples_obj(cs, em, color_log, emit_log, color_map, f, 
+                                color_field_max, color_field_min, 
+                                emit_field_max, emit_field_min) # map color values to color scheme
+        from yt.visualization._colormap_data import color_map_luts # import colors for mtl file
+        lut = color_map_luts[color_map] # enumerate colors
+        # interpolate emissivity to enumerated colors
+        emiss = np.interp(np.mgrid[0:lut[0].shape[0]],np.mgrid[0:len(cs)],f["emit"][:])
+        if dist_fac is None: # then normalize by bounds
+            DLE = self.pf.domain_left_edge
+            DRE = self.pf.domain_right_edge
+            bounds = [(DLE[i], DRE[i]) for i in range(3)]
+            for i, ax in enumerate("xyz"):
+                # Do the bounds first since we cast to f32
+                tmp = self.vertices[i,:]
+                np.subtract(tmp, bounds[i][0], tmp)
+                w = bounds[i][1] - bounds[i][0]
+                np.divide(tmp, w, tmp)
+                np.subtract(tmp, 0.5, tmp) # Center at origin.
+                v[ax][:] = tmp   
+        else:
+            for i, ax in enumerate("xyz"):
+                tmp = self.vertices[i,:]
+                np.divide(tmp, dist_fac, tmp)
+                v[ax][:] = tmp
+        #(1) write all colors per surface to mtl file
+        for i in range(0,lut[0].shape[0]): 
+            omname = "material_" + str(i) + '_' + str(plot_index)  # name of the material
+            fmtl.write("newmtl " + omname +'\n') # the specific material (color) for this face
+            fmtl.write("Ka %.6f %.6f %.6f\n" %(0.0, 0.0, 0.0)) # ambient color, keep off
+            fmtl.write("Kd %.6f %.6f %.6f\n" %(lut[0][i], lut[1][i], lut[2][i])) # color of face
+            fmtl.write("Ks %.6f %.6f %.6f\n" %(0.0, 0.0, 0.0)) # specular color, keep off
+            fmtl.write("d %.6f\n" %(transparency))  # transparency
+            fmtl.write("em %.6f\n" %(emiss[i])) # emissivity per color
+            fmtl.write("illum 2\n") # not relevant, 2 means highlights on?
+            fmtl.write("Ns %.6f\n\n" %(0.0)) #keep off, some other specular thing
+        #(2) write vertices
+        for i in range(0,self.vertices.shape[1]):
+            fobj.write("v %.6f %.6f %.6f\n" %(v["x"][i], v["y"][i], v["z"][i]))    
+        fobj.write("#done defining vertices\n\n")
+        #(3) define faces and materials for each face
+        for i in range(0,self.triangles.shape[0]):
+            omname = 'material_' + str(f["cind"][i]) + '_' + str(plot_index) # which color to use
+            fobj.write("usemtl " + omname + '\n') # which material to use for this face (color)
+            fobj.write("f " + str(cc) + ' ' + str(cc+1) + ' ' + str(cc+2) + '\n\n') # vertices to color
+            cc = cc+3
+        fmtl.close()
+        fobj.close()
+
+
     def export_ply(self, filename, bounds = None, color_field = None,
                    color_map = "algae", color_log = True, sample_type = "face"):
         r"""This exports the surface to the PLY format, suitable for visualization
@@ -4469,7 +4680,7 @@
             w = bounds[i][1] - bounds[i][0]
             np.divide(tmp, w, tmp)
             np.subtract(tmp, 0.5, tmp) # Center at origin.
-            v[ax][:] = tmp 
+            v[ax][:] = tmp
         f.write("end_header\n")
         v.tofile(f)
         arr["ni"][:] = 3
@@ -4598,22 +4809,46 @@
             mylog.error("Problem uploading.")
         return upload_id
 
+# Many of these items are set up specifically to ensure that
+# we are not breaking old pickle files.  This means we must only call the
+# _reconstruct_object and that we cannot mandate any additional arguments to
+# the reconstruction function.
+#
+# In the future, this would be better off being set up to more directly
+# reference objects or retain state, perhaps with a context manager.
+#
+# One final detail: time series or multiple parameter files in a single pickle
+# seems problematic.
+
+class ReconstructedObject(tuple):
+    pass
+
+def _check_nested_args(arg, ref_pf):
+    if not isinstance(arg, (tuple, list, ReconstructedObject)):
+        return arg
+    elif isinstance(arg, ReconstructedObject) and ref_pf == arg[0]:
+        return arg[1]
+    narg = [_check_nested_args(a, ref_pf) for a in arg]
+    return narg
+
+def _get_pf_by_hash(hash):
+    from yt.data_objects.static_output import _cached_pfs
+    for pf in _cached_pfs.values():
+        if pf._hash() == hash: return pf
+    return None
 
 def _reconstruct_object(*args, **kwargs):
     pfid = args[0]
     dtype = args[1]
+    pf = _get_pf_by_hash(pfid)
+    if not pf:
+        pfs = ParameterFileStore()
+        pf = pfs.get_pf_hash(pfid)
     field_parameters = args[-1]
     # will be much nicer when we can do pfid, *a, fp = args
-    args, new_args = args[2:-1], []
-    for arg in args:
-        if iterable(arg) and len(arg) == 2 \
-           and not isinstance(arg, types.DictType) \
-           and isinstance(arg[1], AMRData):
-            new_args.append(arg[1])
-        else: new_args.append(arg)
-    pfs = ParameterFileStore()
-    pf = pfs.get_pf_hash(pfid)
+    args = args[2:-1]
+    new_args = [_check_nested_args(a, pf) for a in args]
     cls = getattr(pf.h, dtype)
     obj = cls(*new_args)
     obj.field_parameters.update(field_parameters)
-    return pf, obj
+    return ReconstructedObject((pf, obj))

diff -r 996841e8daf4a2327ab4c47b5f53ab51b999b78f -r cbdb1ea15403843d50f48ea55e09e934b58f2a6d yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -151,8 +151,12 @@
     particle masses in the object.
     """
     baryon_mass = data["CellMassMsun"].sum()
-    particle_mass = data["ParticleMassMsun"].sum()
-    return [baryon_mass + particle_mass]
+    try:
+        particle_mass = data["ParticleMassMsun"].sum()
+        total_mass = baryon_mass + particle_mass
+    except KeyError:
+        total_mass = baryon_mass
+    return [total_mass]
 def _combTotalMass(data, total_mass):
     return total_mass.sum()
 add_quantity("TotalMass", function=_TotalMass,

diff -r 996841e8daf4a2327ab4c47b5f53ab51b999b78f -r cbdb1ea15403843d50f48ea55e09e934b58f2a6d yt/data_objects/hierarchy.py
--- a/yt/data_objects/hierarchy.py
+++ b/yt/data_objects/hierarchy.py
@@ -209,7 +209,7 @@
         pf = self.parameter_file
         if find_max: c = self.find_max("Density")[1]
         else: c = (pf.domain_right_edge + pf.domain_left_edge)/2.0
-        return self.region(c, 
+        return self.region(c,
             pf.domain_left_edge, pf.domain_right_edge)
 
     def clear_all_data(self):
@@ -236,6 +236,8 @@
                 fn = os.path.join(self.directory,
                         "%s.yt" % self.parameter_file.basename)
         dir_to_check = os.path.dirname(fn)
+        if dir_to_check == '':
+            dir_to_check = '.'
         # We have four options:
         #    Writeable, does not exist      : create, open as append
         #    Writeable, does exist          : open as append
@@ -308,7 +310,7 @@
             self.save_data = self._save_data
         else:
             self.save_data = parallel_splitter(self._save_data, self._reload_data_file)
-    
+
     save_data = parallel_splitter(_save_data, _reload_data_file)
 
     def save_object(self, obj, name):
@@ -317,7 +319,7 @@
         under the name *name* on the node /Objects.
         """
         s = cPickle.dumps(obj, protocol=-1)
-        self.save_data(s, "/Objects", name, force = True)
+        self.save_data(np.array(s, dtype='c'), "/Objects", name, force = True)
 
     def load_object(self, name):
         """
@@ -367,7 +369,7 @@
         """
         Returns (in code units) the smallest cell size in the simulation.
         """
-        return self.select_grids(self.grid_levels.max())[0].dds[0]
+        return self.select_grids(self.grid_levels.max())[0].dds[:].min()
 
     def _add_object_class(self, name, class_name, base, dd):
         self.object_types.append(name)

diff -r 996841e8daf4a2327ab4c47b5f53ab51b999b78f -r cbdb1ea15403843d50f48ea55e09e934b58f2a6d yt/data_objects/object_finding_mixin.py
--- a/yt/data_objects/object_finding_mixin.py
+++ b/yt/data_objects/object_finding_mixin.py
@@ -198,8 +198,10 @@
         """
         Gets back all the grids between a left edge and right edge
         """
-        grid_i = np.where((np.all(self.grid_right_edge > left_edge, axis=1)
-                         & np.all(self.grid_left_edge < right_edge, axis=1)) == True)
+        eps = np.finfo(np.float64).eps
+        grid_i = np.where((np.all((self.grid_right_edge - left_edge) > eps, axis=1)
+                         & np.all((right_edge - self.grid_left_edge) > eps, axis=1)) == True)
+
         return self.grids[grid_i], grid_i
 
     def get_periodic_box_grids(self, left_edge, right_edge):

diff -r 996841e8daf4a2327ab4c47b5f53ab51b999b78f -r cbdb1ea15403843d50f48ea55e09e934b58f2a6d yt/data_objects/particle_io.py
--- a/yt/data_objects/particle_io.py
+++ b/yt/data_objects/particle_io.py
@@ -58,7 +58,8 @@
 
     def get_data(self, fields):
         fields = ensure_list(fields)
-        rvs = self.source.get_data(fields, force_particle_read=True)
+        self.source.get_data(fields, force_particle_read=True)
+        rvs = [self.source[field] for field in fields]
         if len(fields) == 1: return rvs[0]
         return rvs
 

This diff is so big that we needed to truncate the remainder.

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list