[yt-svn] commit/yt: 36 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Tue May 6 06:34:23 PDT 2014


36 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/7c7cd3b2178f/
Changeset:   7c7cd3b2178f
Branch:      yt
User:        MatthewTurk
Date:        2013-03-13 02:18:52
Summary:     Converting setup.py to be Python3 compatible.
Affected #:  4 files

diff -r 34b95297062b9f6dedf50d8a127e94ba1ec8e278 -r 7c7cd3b2178fea1fc2d070d3644f01443ce2fcbc setup.py
--- a/setup.py
+++ b/setup.py
@@ -96,11 +96,11 @@
     needs_cython = True
 
 if needs_cython:
-    print "Cython is a build-time requirement for the source tree of yt."
-    print "Please either install yt from a provided, release tarball,"
-    print "or install Cython (version 0.16 or higher)."
-    print "You may be able to accomplish this by typing:"
-    print "     pip install -U Cython"
+    print("Cython is a build-time requirement for the source tree of yt.")
+    print("Please either install yt from a provided, release tarball,")
+    print("or install Cython (version 0.16 or higher).")
+    print("You may be able to accomplish this by typing:")
+    print("     pip install -U Cython")
     sys.exit(1)
 
 ######
@@ -174,12 +174,12 @@
                                      shell=True)
 
     if (get_changeset.stderr.read() != ""):
-        print "Error in obtaining current changeset of the Mercurial repository"
+        print("Error in obtaining current changeset of the Mercurial repository")
         changeset = None
 
-    changeset = get_changeset.stdout.read().strip()
+    changeset = get_changeset.stdout.read().strip().decode("UTF-8")
     if (not re.search("^[0-9a-f]{12}", changeset)):
-        print "Current changeset of the Mercurial repository is malformed"
+        print("Current changeset of the Mercurial repository is malformed")
         changeset = None
 
     return changeset
@@ -265,6 +265,7 @@
         url="http://yt-project.org/",
         license="GPL-3",
         configuration=configuration,
+        use_2to3=True,
         zip_safe=False,
         data_files=REASON_FILES,
         cmdclass={'build_py': my_build_py, 'build_forthon': BuildForthon,

diff -r 34b95297062b9f6dedf50d8a127e94ba1ec8e278 -r 7c7cd3b2178fea1fc2d070d3644f01443ce2fcbc yt/analysis_modules/halo_finding/rockstar/setup.py
--- a/yt/analysis_modules/halo_finding/rockstar/setup.py
+++ b/yt/analysis_modules/halo_finding/rockstar/setup.py
@@ -12,10 +12,10 @@
     try:
         rd = open("rockstar.cfg").read().strip()
     except IOError:
-        print "Reading Rockstar location from rockstar.cfg failed."
-        print "Please place the base directory of your"
-        print "Rockstar install in rockstar.cfg and restart."
-        print "(ex: \"echo '/path/to/Rockstar-0.99' > rockstar.cfg\" )"
+        print("Reading Rockstar location from rockstar.cfg failed.")
+        print("Please place the base directory of your")
+        print("Rockstar install in rockstar.cfg and restart.")
+        print("(ex: \"echo '/path/to/Rockstar-0.99' > rockstar.cfg\" )")
         sys.exit(1)
     config.add_extension("rockstar_interface",
                          "yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx",

diff -r 34b95297062b9f6dedf50d8a127e94ba1ec8e278 -r 7c7cd3b2178fea1fc2d070d3644f01443ce2fcbc yt/utilities/lib/setup.py
--- a/yt/utilities/lib/setup.py
+++ b/yt/utilities/lib/setup.py
@@ -8,14 +8,14 @@
         png_dir = os.environ["PNG_DIR"]
         png_inc = os.path.join(png_dir, "include")
         png_lib = os.path.join(png_dir, "lib")
-        print "PNG_LOCATION: PNG_DIR: %s, %s" % (png_inc, png_lib)
+        print("PNG_LOCATION: PNG_DIR: %s, %s" % (png_inc, png_lib))
         return (png_inc, png_lib)
     # Next up, we try png.cfg
     elif os.path.exists("png.cfg"):
         png_dir = open("png.cfg").read().strip()
         png_inc = os.path.join(png_dir, "include")
         png_lib = os.path.join(png_dir, "lib")
-        print "PNG_LOCATION: png.cfg: %s, %s" % (png_inc, png_lib)
+        print("PNG_LOCATION: png.cfg: %s, %s" % (png_inc, png_lib))
         return (png_inc, png_lib)
     # Now we see if ctypes can help us:
     try:
@@ -30,7 +30,7 @@
                os.path.isfile(os.path.join(png_dir, "include", "png.h")):
                 png_inc = os.path.join(png_dir, "include")
                 png_lib = os.path.join(png_dir, "lib")
-                print "PNG_LOCATION: png found in: %s, %s" % (png_inc, png_lib)
+                print("PNG_LOCATION: png found in: %s, %s" % (png_inc, png_lib))
                 return png_inc, png_lib
     except ImportError:
         pass
@@ -42,11 +42,11 @@
                os.path.isfile(os.path.join(png_dir, "include", "png.h")):
                 png_inc = os.path.join(png_dir, "include")
                 png_lib = os.path.join(png_dir, "lib")
-                print "PNG_LOCATION: png found in: %s, %s" % (png_inc, png_lib)
+                print("PNG_LOCATION: png found in: %s, %s" % (png_inc, png_lib))
                 return png_inc, png_lib
-    print "Reading png location from png.cfg failed."
-    print "Please place the base directory of your png install in png.cfg and restart."
-    print "(ex: \"echo '/usr/local/' > png.cfg\" )"
+    print("Reading png location from png.cfg failed.")
+    print("Please place the base directory of your png install in png.cfg and restart.")
+    print("(ex: \"echo '/usr/local/' > png.cfg\" )")
     sys.exit(1)
 
 def check_for_freetype():
@@ -55,14 +55,14 @@
         freetype_dir = os.environ["FTYPE_DIR"]
         freetype_inc = os.path.join(freetype_dir, "include")
         freetype_lib = os.path.join(freetype_dir, "lib")
-        print "FTYPE_LOCATION: FTYPE_DIR: %s, %s" % (freetype_inc, freetype_lib)
+        print("FTYPE_LOCATION: FTYPE_DIR: %s, %s" % (freetype_inc, freetype_lib))
         return (freetype_inc, freetype_lib)
     # Next up, we try freetype.cfg
     elif os.path.exists("freetype.cfg"):
         freetype_dir = open("freetype.cfg").read().strip()
         freetype_inc = os.path.join(freetype_dir, "include")
         freetype_lib = os.path.join(freetype_dir, "lib")
-        print "FTYPE_LOCATION: freetype.cfg: %s, %s" % (freetype_inc, freetype_lib)
+        print("FTYPE_LOCATION: freetype.cfg: %s, %s" % (freetype_inc, freetype_lib))
         return (freetype_inc, freetype_lib)
     # Now we see if ctypes can help us:
     try:
@@ -77,7 +77,7 @@
                os.path.isfile(os.path.join(freetype_dir, "include", "ft2build.h")):
                 freetype_inc = os.path.join(freetype_dir, "include")
                 freetype_lib = os.path.join(freetype_dir, "lib")
-                print "FTYPE_LOCATION: freetype found in: %s, %s" % (freetype_inc, freetype_lib)
+                print("FTYPE_LOCATION: freetype found in: %s, %s" % (freetype_inc, freetype_lib))
                 return freetype_inc, freetype_lib
     except ImportError:
         pass
@@ -89,12 +89,12 @@
                os.path.isfile(os.path.join(freetype_dir, "include", "ft2build.h")):
                 freetype_inc = os.path.join(freetype_dir, "include")
                 freetype_lib = os.path.join(freetype_dir, "lib")
-                print "FTYPE_LOCATION: freetype found in: %s, %s" % (freetype_inc, freetype_lib)
+                print("FTYPE_LOCATION: freetype found in: %s, %s" % (freetype_inc, freetype_lib))
                 return freetype_inc, freetype_lib
-    print "Reading freetype location from freetype.cfg failed."
-    print "Please place the base directory of your freetype install in freetype.cfg and restart."
-    print "(ex: \"echo '/usr/local/' > freetype.cfg\" )"
-    print "You can locate this by looking for the file ft2build.h"
+    print("Reading freetype location from freetype.cfg failed.")
+    print("Please place the base directory of your freetype install in freetype.cfg and restart.")
+    print("(ex: \"echo '/usr/local/' > freetype.cfg\" )")
+    print("You can locate this by looking for the file ft2build.h")
     sys.exit(1)
 
 def configuration(parent_package='',top_path=None):
@@ -212,7 +212,7 @@
         gpd = os.environ["GPERFTOOLS"]
         idir = os.path.join(gpd, "include")
         ldir = os.path.join(gpd, "lib")
-        print "INCLUDE AND LIB DIRS", idir, ldir
+        print("INCLUDE AND LIB DIRS", idir, ldir)
         config.add_extension("perftools_wrap",
                 ["yt/utilities/lib/perftools_wrap.pyx"],
                 libraries=["profiler"],

diff -r 34b95297062b9f6dedf50d8a127e94ba1ec8e278 -r 7c7cd3b2178fea1fc2d070d3644f01443ce2fcbc yt/utilities/setup.py
--- a/yt/utilities/setup.py
+++ b/yt/utilities/setup.py
@@ -8,14 +8,14 @@
         hdf5_dir = os.environ["HDF5_DIR"]
         hdf5_inc = os.path.join(hdf5_dir, "include")
         hdf5_lib = os.path.join(hdf5_dir, "lib")
-        print "HDF5_LOCATION: HDF5_DIR: %s, %s" % (hdf5_inc, hdf5_lib)
+        print("HDF5_LOCATION: HDF5_DIR: %s, %s" % (hdf5_inc, hdf5_lib))
         return (hdf5_inc, hdf5_lib)
     # Next up, we try hdf5.cfg
     elif os.path.exists("hdf5.cfg"):
         hdf5_dir = open("hdf5.cfg").read().strip()
         hdf5_inc = os.path.join(hdf5_dir, "include")
         hdf5_lib = os.path.join(hdf5_dir, "lib")
-        print "HDF5_LOCATION: hdf5.cfg: %s, %s" % (hdf5_inc, hdf5_lib)
+        print("HDF5_LOCATION: hdf5.cfg: %s, %s" % (hdf5_inc, hdf5_lib))
         return (hdf5_inc, hdf5_lib)
     # Now we see if ctypes can help us:
     try:
@@ -30,15 +30,15 @@
                os.path.isfile(os.path.join(hdf5_dir, "include", "hdf5.h")):
                 hdf5_inc = os.path.join(hdf5_dir, "include")
                 hdf5_lib = os.path.join(hdf5_dir, "lib")
-                print "HDF5_LOCATION: HDF5 found in: %s, %s" % (hdf5_inc,
-                    hdf5_lib)
+                print("HDF5_LOCATION: HDF5 found in: %s, %s" % (hdf5_inc,
+                    hdf5_lib))
                 return hdf5_inc, hdf5_lib
     except ImportError:
         pass
-    print "Reading HDF5 location from hdf5.cfg failed."
-    print "Please place the base directory of your"
-    print "HDF5 install in hdf5.cfg and restart."
-    print "(ex: \"echo '/usr/local/' > hdf5.cfg\" )"
+    print("Reading HDF5 location from hdf5.cfg failed.")
+    print("Please place the base directory of your")
+    print("HDF5 install in hdf5.cfg and restart.")
+    print("(ex: \"echo '/usr/local/' > hdf5.cfg\" )")
     sys.exit(1)
 
 


https://bitbucket.org/yt_analysis/yt/commits/aba8cd2a9b59/
Changeset:   aba8cd2a9b59
Branch:      yt
User:        MatthewTurk
Date:        2013-03-13 02:40:12
Summary:     Updating C extensions to support Python 3
Affected #:  4 files

diff -r 7c7cd3b2178fea1fc2d070d3644f01443ce2fcbc -r aba8cd2a9b59f2b985d6fec85740f845fb38b722 yt/analysis_modules/halo_finding/hop/EnzoHop.c
--- a/yt/analysis_modules/halo_finding/hop/EnzoHop.c
+++ b/yt/analysis_modules/halo_finding/hop/EnzoHop.c
@@ -35,6 +35,10 @@
 
 #include "numpy/ndarrayobject.h"
 
+#ifndef Py_TYPE
+    #define Py_TYPE(ob) (((PyObject*)(ob))->ob_type)
+#endif
+
 void initgrouplist(Grouplist *g);
 void hop_main(KD kd, HC *my_comm, float densthres);
 void regroup_main(float dens_outer, HC *my_comm);
@@ -306,7 +310,7 @@
    Py_XDECREF(self->zpos);
    Py_XDECREF(self->mass);
 
-   self->ob_type->tp_free((PyObject*)self);
+   Py_TYPE(self)->tp_free((PyObject*)self);
 }
 
 static PyObject *
@@ -421,10 +425,33 @@
    0,                         /* tp_new */
 };
 
-void initEnzoHop(void)
+#if PY_MAJOR_VERSION >= 3
+    static struct PyModuleDef moduledef = {
+        PyModuleDef_HEAD_INIT,
+        "EnzoHop",           /* m_name */
+        "EnzoHop Module",    /* m_doc */
+        -1,                  /* m_size */
+        _HOPMethods,          /* m_methods */
+        NULL,                /* m_reload */
+        NULL,                /* m_traverse */
+        NULL,                /* m_clear */
+        NULL,                /* m_free */
+    };
+#endif
+
+PyMODINIT_FUNC
+#if PY_MAJOR_VERSION >= 3
+PyInit_EnzoHop(void)
+#else
+initEnzoHop(void)
+#endif
 {
     PyObject *m, *d;
+#if PY_MAJOR_VERSION >= 3
+    m = PyModule_Create(&moduledef); 
+#else
     m = Py_InitModule("EnzoHop", _HOPMethods);
+#endif
     d = PyModule_GetDict(m);
     _HOPerror = PyErr_NewException("EnzoHop.HOPerror", NULL, NULL);
     PyDict_SetItemString(d, "error", _HOPerror);

diff -r 7c7cd3b2178fea1fc2d070d3644f01443ce2fcbc -r aba8cd2a9b59f2b985d6fec85740f845fb38b722 yt/utilities/delaunay/_delaunay.cpp
--- a/yt/utilities/delaunay/_delaunay.cpp
+++ b/yt/utilities/delaunay/_delaunay.cpp
@@ -726,16 +726,44 @@
     {NULL, NULL, 0, NULL}
 };
 
+#if PY_MAJOR_VERSION >= 3
+    static struct PyModuleDef moduledef = {
+        PyModuleDef_HEAD_INIT,
+        "_delaunay",           /* m_name */
+        "Tools for computing the Delaunay triangulation and some operations on it.\n",
+                             /* m_doc */
+        -1,                  /* m_size */
+        delaunay_methods,    /* m_methods */
+        NULL,                /* m_reload */
+        NULL,                /* m_traverse */
+        NULL,                /* m_clear */
+        NULL,                /* m_free */
+    };
+#endif
 
-PyMODINIT_FUNC init_delaunay(void)
+
+PyMODINIT_FUNC
+#if PY_MAJOR_VERSION >= 3
+#define _RETVAL NULL
+PyInit__delaunay(void)
+#else
+#define _RETVAL 
+init_delaunay(void)
+#endif
 {
     PyObject* m;
+#if PY_MAJOR_VERSION >= 3
+    m = PyModule_Create(&moduledef); 
+#else
     m = Py_InitModule3("_delaunay", delaunay_methods, 
         "Tools for computing the Delaunay triangulation and some operations on it.\n"
         );
-    if (m == NULL)
-        return;
+#endif
+    if (m == NULL) {
+        return _RETVAL;
+    }
     import_array();
+    return _RETVAL;
 }
 
 } // extern "C"

diff -r 7c7cd3b2178fea1fc2d070d3644f01443ce2fcbc -r aba8cd2a9b59f2b985d6fec85740f845fb38b722 yt/utilities/hdf5_light_reader.c
--- a/yt/utilities/hdf5_light_reader.c
+++ b/yt/utilities/hdf5_light_reader.c
@@ -1848,12 +1848,39 @@
 __declspec(dllexport)
 #endif
 
-void inithdf5_light_reader(void)
+#if PY_MAJOR_VERSION >= 3
+    static struct PyModuleDef moduledef = {
+        PyModuleDef_HEAD_INIT,
+        "hdf5_light_reader",           /* m_name */
+        "Light HDF5 reading.\n",
+                             /* m_doc */
+        -1,                  /* m_size */
+        _hdf5LightReaderMethods,    /* m_methods */
+        NULL,                /* m_reload */
+        NULL,                /* m_traverse */
+        NULL,                /* m_clear */
+        NULL,                /* m_free */
+    };
+#endif
+
+PyMODINIT_FUNC
+#if PY_MAJOR_VERSION >= 3
+#define _RETVAL NULL
+PyInit_hdf5_light_reader(void)
+#else
+#define _RETVAL 
+inithdf5_light_reader(void)
+#endif
 {
     PyObject *m, *d;
+#if PY_MAJOR_VERSION >= 3
+    m = PyModule_Create(&moduledef); 
+#else
     m = Py_InitModule("hdf5_light_reader", _hdf5LightReaderMethods);
+#endif
     d = PyModule_GetDict(m);
     _hdf5ReadError = PyErr_NewException("hdf5_light_reader.ReadingError", NULL, NULL);
     PyDict_SetItemString(d, "ReadingError", _hdf5ReadError);
     import_array();
+    return _RETVAL;
 }

diff -r 7c7cd3b2178fea1fc2d070d3644f01443ce2fcbc -r aba8cd2a9b59f2b985d6fec85740f845fb38b722 yt/visualization/_MPL.c
--- a/yt/visualization/_MPL.c
+++ b/yt/visualization/_MPL.c
@@ -440,12 +440,40 @@
 __declspec(dllexport)
 #endif
 
-void init_MPL(void)
+#if PY_MAJOR_VERSION >= 3
+    static struct PyModuleDef moduledef = {
+        PyModuleDef_HEAD_INIT,
+        "_MPL",           /* m_name */
+        "Pixelization routines\n",
+                             /* m_doc */
+        -1,                  /* m_size */
+        __MPLMethods,    /* m_methods */
+        NULL,                /* m_reload */
+        NULL,                /* m_traverse */
+        NULL,                /* m_clear */
+        NULL,                /* m_free */
+    };
+#endif
+
+
+PyMODINIT_FUNC
+#if PY_MAJOR_VERSION >= 3
+#define _RETVAL NULL
+PyInit__MPL(void)
+#else
+#define _RETVAL 
+init_MPL(void)
+#endif
 {
     PyObject *m, *d;
+#if PY_MAJOR_VERSION >= 3
+    m = PyModule_Create(&moduledef); 
+#else
     m = Py_InitModule("_MPL", __MPLMethods);
+#endif
     d = PyModule_GetDict(m);
     _pixelizeError = PyErr_NewException("_MPL.error", NULL, NULL);
     PyDict_SetItemString(d, "error", _pixelizeError);
     import_array();
+    return _RETVAL;
 }


https://bitbucket.org/yt_analysis/yt/commits/9e5289f864fe/
Changeset:   9e5289f864fe
Branch:      yt
User:        MatthewTurk
Date:        2013-03-13 13:44:47
Summary:     Merging setup.py fix
Affected #:  1 file

diff -r aba8cd2a9b59f2b985d6fec85740f845fb38b722 -r 9e5289f864fed7ee59a9612e7e18627034b65e7e setup.py
--- a/setup.py
+++ b/setup.py
@@ -18,6 +18,9 @@
 from distutils.core import Command
 from distutils.spawn import find_executable
 
+def find_fortran_deps():
+    return (find_executable("Forthon"),
+            find_executable("gfortran"))
 
 class BuildForthon(Command):
 
@@ -41,9 +44,7 @@
     def run(self):
 
         """runner"""
-        Forthon_exe = find_executable("Forthon")
-        gfortran_exe = find_executable("gfortran")
-
+        (Forthon_exe, gfortran_exe) = find_fortran_deps()
         if None in (Forthon_exe, gfortran_exe):
             sys.stderr.write(
                 "fKDpy.so won't be built due to missing Forthon/gfortran\n"
@@ -193,9 +194,13 @@
 
 class my_install_data(np_install_data.install_data):
     def run(self):
-        self.distribution.data_files.append(
-            ('yt/utilities/kdtree', ['yt/utilities/kdtree/fKDpy.so'])
-        )
+        (Forthon_exe, gfortran_exe) = find_fortran_deps()
+        if None in (Forthon_exe, gfortran_exe):
+            pass
+        else:
+            self.distribution.data_files.append(
+                ('yt/utilities/kdtree', ['yt/utilities/kdtree/fKDpy.so'])
+                )
         np_install_data.install_data.run(self)
 
 class my_build_py(build_py):


https://bitbucket.org/yt_analysis/yt/commits/8b81363e120a/
Changeset:   8b81363e120a
Branch:      yt
User:        MatthewTurk
Date:        2013-03-13 14:18:49
Summary:     Removing "exceptions" module and usage
Affected #:  5 files

diff -r 9e5289f864fed7ee59a9612e7e18627034b65e7e -r 8b81363e120a642bbd60b3830d26eaff75a8fae9 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -32,7 +32,6 @@
 import numpy as np
 import math
 import weakref
-import exceptions
 import itertools
 import shelve
 import cStringIO

diff -r 9e5289f864fed7ee59a9612e7e18627034b65e7e -r 8b81363e120a642bbd60b3830d26eaff75a8fae9 yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -23,7 +23,6 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import exceptions
 import pdb
 import weakref
 
@@ -144,7 +143,7 @@
             else:
                 self[field] = self.pf.field_info[field](self)
         else: # Can't find the field, try as it might
-            raise exceptions.KeyError(field)
+            raise KeyError(field)
 
     def has_key(self, key):
         return (key in self.field_data)

diff -r 9e5289f864fed7ee59a9612e7e18627034b65e7e -r 8b81363e120a642bbd60b3830d26eaff75a8fae9 yt/frontends/enzo/io.py
--- a/yt/frontends/enzo/io.py
+++ b/yt/frontends/enzo/io.py
@@ -30,7 +30,6 @@
 except ImportError:
     pass
 
-import exceptions
 import os
 
 from yt.utilities import hdf5_light_reader
@@ -145,7 +144,7 @@
 
     @property
     def _read_exception(self):
-        return (exceptions.KeyError, hdf5_light_reader.ReadingError)
+        return (KeyError, hdf5_light_reader.ReadingError)
 
 class IOHandlerPackedHDF5GhostZones(IOHandlerPackedHDF5):
     _data_style = "enzo_packed_3d_gz"

diff -r 9e5289f864fed7ee59a9612e7e18627034b65e7e -r 8b81363e120a642bbd60b3830d26eaff75a8fae9 yt/frontends/stream/io.py
--- a/yt/frontends/stream/io.py
+++ b/yt/frontends/stream/io.py
@@ -25,7 +25,6 @@
 
 from collections import defaultdict
 
-import exceptions
 import os
 
 from yt.utilities.io_handler import \

diff -r 9e5289f864fed7ee59a9612e7e18627034b65e7e -r 8b81363e120a642bbd60b3830d26eaff75a8fae9 yt/utilities/io_handler.py
--- a/yt/utilities/io_handler.py
+++ b/yt/utilities/io_handler.py
@@ -26,7 +26,6 @@
 from collections import defaultdict
 
 import yt.utilities.lib as au
-import exceptions
 import cPickle
 import os
 import h5py


https://bitbucket.org/yt_analysis/yt/commits/2e876baa5101/
Changeset:   2e876baa5101
Branch:      yt
User:        MatthewTurk
Date:        2013-03-14 02:22:04
Summary:     Changing a few tabs to spaces
Affected #:  1 file

diff -r bda5c9c8fe1bfdda64bdd5f476690ad43fff9b4b -r 2e876baa5101bd5ec618df696a9087b39f38e56f yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -351,8 +351,8 @@
         Used for testing the periodic adjustment machinery
         of this derived quantity.
     include_particles : Bool
-	Should we add the mass contribution of particles
-	to calculate binding energy?
+        Should we add the mass contribution of particles
+        to calculate binding energy?
 
     Examples
     --------
@@ -371,13 +371,13 @@
                       (data["z-velocity"] - bv_z)**2)).sum()
 
     if (include_particles):
-	mass_to_use = data["TotalMass"]
+        mass_to_use = data["TotalMass"]
         kinetic += 0.5 * (data["Dark_Matter_Mass"] *
                           ((data["cic_particle_velocity_x"] - bv_x)**2 +
                            (data["cic_particle_velocity_y"] - bv_y)**2 +
                            (data["cic_particle_velocity_z"] - bv_z)**2)).sum()
     else:
-	mass_to_use = data["CellMass"]
+        mass_to_use = data["CellMass"]
     # Add thermal energy to kinetic energy
     if (include_thermal_energy):
         thermal = (data["ThermalEnergy"] * mass_to_use).sum()
@@ -414,8 +414,8 @@
     for label in ["x", "y", "z"]: # Separating CellMass from the for loop
         local_data[label] = data[label]
     local_data["CellMass"] = mass_to_use # Adding CellMass separately
-					 # NOTE: if include_particles = True, local_data["CellMass"]
-					 #       is not the same as data["CellMass"]!!!
+        # NOTE: if include_particles = True, local_data["CellMass"]
+        #       is not the same as data["CellMass"]!!!
     if periodic.any():
         # Adjust local_data to re-center the clump to remove the periodicity
         # by the gap calculated above.
@@ -470,7 +470,7 @@
             thisx = (local_data["x"][sel] / dx).astype('int64') - cover_imin[0] * 2**L
             thisy = (local_data["y"][sel] / dy).astype('int64') - cover_imin[1] * 2**L
             thisz = (local_data["z"][sel] / dz).astype('int64') - cover_imin[2] * 2**L
-	    vals = np.array([local_data["CellMass"][sel]], order='F')
+        vals = np.array([local_data["CellMass"][sel]], order='F')
             octree.add_array_to_tree(L, thisx, thisy, thisz, vals,
                np.ones_like(thisx).astype('float64'), treecode = 1)
         # Now we calculate the binding energy using a treecode.


https://bitbucket.org/yt_analysis/yt/commits/290e1c6291b5/
Changeset:   290e1c6291b5
Branch:      yt
User:        MatthewTurk
Date:        2013-03-14 02:22:41
Summary:     Explicitly handle excepts
Affected #:  2 files

diff -r 8b81363e120a642bbd60b3830d26eaff75a8fae9 -r 290e1c6291b593e5358cd9a9a07ad5883548faf4 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -351,7 +351,7 @@
             # First we check the validator
             try:
                 self.pf.field_info[field].check_available(self)
-            except NeedsGridType, ngt_exception:
+            except NeedsGridType as ngt_exception:
                 # We leave this to be implementation-specific
                 self._generate_field_in_grids(field, ngt_exception.ghost_zones)
                 return False

diff -r 8b81363e120a642bbd60b3830d26eaff75a8fae9 -r 290e1c6291b593e5358cd9a9a07ad5883548faf4 yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -129,7 +129,7 @@
             # First we check the validator
             try:
                 self.pf.field_info[field].check_available(self)
-            except NeedsGridType, ngt_exception:
+            except NeedsGridType as ngt_exception:
                 # This is only going to be raised if n_gz > 0
                 n_gz = ngt_exception.ghost_zones
                 f_gz = ngt_exception.fields


https://bitbucket.org/yt_analysis/yt/commits/d6e78f7ba956/
Changeset:   d6e78f7ba956
Branch:      yt
User:        MatthewTurk
Date:        2013-03-14 02:23:16
Summary:     Enabling running build_py_2to3 in setup.py.
Affected #:  1 file

diff -r 290e1c6291b593e5358cd9a9a07ad5883548faf4 -r d6e78f7ba956bca2a4d3773f9ffa1335e6964599 setup.py
--- a/setup.py
+++ b/setup.py
@@ -9,7 +9,11 @@
 import distribute_setup
 distribute_setup.use_setuptools()
 
-from distutils.command.build_py import build_py
+try:
+   from distutils.command.build_py import build_py_2to3 \
+        as build_py
+except ImportError:
+   from distutils.command.build_py import build_py
 from numpy.distutils.misc_util import appendpath
 from numpy.distutils.command import install_data as np_install_data
 from numpy.distutils import log
@@ -214,7 +218,7 @@
             with open(os.path.join(target_dir, '__hg_version__.py'), 'w') as fobj:
                 fobj.write("hg_version = '%s'\n" % changeset)
 
-            build_py.run(self)
+        build_py.run(self)
 
 
 def configuration(parent_package='', top_path=None):


https://bitbucket.org/yt_analysis/yt/commits/e6bdd197f04b/
Changeset:   e6bdd197f04b
Branch:      yt
User:        MatthewTurk
Date:        2013-03-14 02:26:15
Summary:     More tabs->spaces.
Affected #:  1 file

diff -r 2e876baa5101bd5ec618df696a9087b39f38e56f -r e6bdd197f04bcf97ea0b25cb324b9cbef9275aff yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -498,39 +498,39 @@
         # all the parameters except for the center of mass.
         com = self.center_of_mass()
         position = [self["particle_position_x"],
-		    self["particle_position_y"],
-		    self["particle_position_z"]]
+                    self["particle_position_y"],
+                    self["particle_position_z"]]
         # Locate the furthest particle from com, its vector length and index
-	DW = np.array([self.gridsize[0],self.gridsize[1],self.gridsize[2]])
-	position = [position[0] - com[0],
-		    position[1] - com[1],
-		    position[2] - com[2]]
-	# different cases of particles being on other side of boundary
-	for axis in range(np.size(DW)):
-	    cases = np.array([position[axis],
-	  		      position[axis] + DW[axis],
-			      position[axis] - DW[axis]])        
+        DW = np.array([self.gridsize[0],self.gridsize[1],self.gridsize[2]])
+        position = [position[0] - com[0],
+                    position[1] - com[1],
+                    position[2] - com[2]]
+        # different cases of particles being on other side of boundary
+        for axis in range(np.size(DW)):
+            cases = np.array([position[axis],
+                                position[axis] + DW[axis],
+                              position[axis] - DW[axis]])        
             # pick out the smallest absolute distance from com
             position[axis] = np.choose(np.abs(cases).argmin(axis=0), cases)
-	# find the furthest particle's index
-	r = np.sqrt(position[0]**2 +
-		    position[1]**2 +
-		    position[2]**2)
+        # find the furthest particle's index
+        r = np.sqrt(position[0]**2 +
+                    position[1]**2 +
+                    position[2]**2)
         A_index = r.argmax()
         mag_A = r.max()
         # designate the A vector
-	A_vector = (position[0][A_index],
-		    position[1][A_index],
-		    position[2][A_index])
+        A_vector = (position[0][A_index],
+                    position[1][A_index],
+                    position[2][A_index])
         # designate the e0 unit vector
         e0_vector = A_vector / mag_A
         # locate the tB particle position by finding the max B
-	e0_vector_copy = np.empty((np.size(position[0]), 3), dtype='float64')
+        e0_vector_copy = np.empty((np.size(position[0]), 3), dtype='float64')
         for i in range(3):
             e0_vector_copy[:, i] = e0_vector[i]
         rr = np.array([position[0],
-		       position[1],
-		       position[2]]).T # Similar to tB_vector in old code.
+                       position[1],
+                       position[2]]).T # Similar to tB_vector in old code.
         tC_vector = np.cross(e0_vector_copy, rr)
         te2 = tC_vector.copy()
         for dim in range(3):
@@ -944,7 +944,7 @@
         Examples
         --------
         >>> params = halos[0].get_ellipsoid_parameters()
-	"""
+        """
 
         basic_parameters = self._get_ellipsoid_parameters_basic_loadedhalo()
         toreturn = [self.center_of_mass()]


https://bitbucket.org/yt_analysis/yt/commits/fd1083971a98/
Changeset:   fd1083971a98
Branch:      yt
User:        MatthewTurk
Date:        2013-03-14 02:26:48
Summary:     Fixing a Python3 issue in spatial/tests, and also changing how spatial calls a data directory.
Affected #:  2 files

diff -r d6e78f7ba956bca2a4d3773f9ffa1335e6964599 -r fd1083971a98a478650b2288023b2911ffbf955d tests/runall.py
--- a/tests/runall.py
+++ b/tests/runall.py
@@ -87,7 +87,7 @@
             keys = set(registry_entries())
             tests_to_run += [t for t in new_tests if t in keys]
         tests = list(set(tests_to_run))
-        print "\n    ".join(tests)
+        print ("\n    ".join(tests))
         sys.exit(0)
 
     # Load the test pf and make sure it's good.

diff -r d6e78f7ba956bca2a4d3773f9ffa1335e6964599 -r fd1083971a98a478650b2288023b2911ffbf955d yt/utilities/spatial/setup.py
--- a/yt/utilities/spatial/setup.py
+++ b/yt/utilities/spatial/setup.py
@@ -9,7 +9,7 @@
 
     config = Configuration('spatial', parent_package, top_path)
 
-    config.add_data_dir('tests')
+    config.add_data_dir('yt/utilities/spatial/tests')
 
 #    qhull_src = ['geom2.c', 'geom.c', 'global.c', 'io.c', 'libqhull.c',
 #                 'mem.c', 'merge.c', 'poly2.c', 'poly.c', 'qset.c',


https://bitbucket.org/yt_analysis/yt/commits/898b8eb5b696/
Changeset:   898b8eb5b696
Branch:      yt
User:        MatthewTurk
Date:        2013-03-14 02:27:42
Summary:     Quick indentation fix.
Affected #:  1 file

diff -r e6bdd197f04bcf97ea0b25cb324b9cbef9275aff -r 898b8eb5b69684a3eade92a568b73e712ce607a4 yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -471,7 +471,7 @@
             thisy = (local_data["y"][sel] / dy).astype('int64') - cover_imin[1] * 2**L
             thisz = (local_data["z"][sel] / dz).astype('int64') - cover_imin[2] * 2**L
         vals = np.array([local_data["CellMass"][sel]], order='F')
-            octree.add_array_to_tree(L, thisx, thisy, thisz, vals,
+        octree.add_array_to_tree(L, thisx, thisy, thisz, vals,
                np.ones_like(thisx).astype('float64'), treecode = 1)
         # Now we calculate the binding energy using a treecode.
         octree.finalize(treecode = 1)


https://bitbucket.org/yt_analysis/yt/commits/85a96b23945f/
Changeset:   85a96b23945f
Branch:      yt
User:        MatthewTurk
Date:        2013-03-14 02:27:54
Summary:     Merging
Affected #:  4 files

diff -r fd1083971a98a478650b2288023b2911ffbf955d -r 85a96b23945f7d33b4c75e7a83e1b8a24005972b yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -498,39 +498,39 @@
         # all the parameters except for the center of mass.
         com = self.center_of_mass()
         position = [self["particle_position_x"],
-		    self["particle_position_y"],
-		    self["particle_position_z"]]
+                    self["particle_position_y"],
+                    self["particle_position_z"]]
         # Locate the furthest particle from com, its vector length and index
-	DW = np.array([self.gridsize[0],self.gridsize[1],self.gridsize[2]])
-	position = [position[0] - com[0],
-		    position[1] - com[1],
-		    position[2] - com[2]]
-	# different cases of particles being on other side of boundary
-	for axis in range(np.size(DW)):
-	    cases = np.array([position[axis],
-	  		      position[axis] + DW[axis],
-			      position[axis] - DW[axis]])        
+        DW = np.array([self.gridsize[0],self.gridsize[1],self.gridsize[2]])
+        position = [position[0] - com[0],
+                    position[1] - com[1],
+                    position[2] - com[2]]
+        # different cases of particles being on other side of boundary
+        for axis in range(np.size(DW)):
+            cases = np.array([position[axis],
+                                position[axis] + DW[axis],
+                              position[axis] - DW[axis]])        
             # pick out the smallest absolute distance from com
             position[axis] = np.choose(np.abs(cases).argmin(axis=0), cases)
-	# find the furthest particle's index
-	r = np.sqrt(position[0]**2 +
-		    position[1]**2 +
-		    position[2]**2)
+        # find the furthest particle's index
+        r = np.sqrt(position[0]**2 +
+                    position[1]**2 +
+                    position[2]**2)
         A_index = r.argmax()
         mag_A = r.max()
         # designate the A vector
-	A_vector = (position[0][A_index],
-		    position[1][A_index],
-		    position[2][A_index])
+        A_vector = (position[0][A_index],
+                    position[1][A_index],
+                    position[2][A_index])
         # designate the e0 unit vector
         e0_vector = A_vector / mag_A
         # locate the tB particle position by finding the max B
-	e0_vector_copy = np.empty((np.size(position[0]), 3), dtype='float64')
+        e0_vector_copy = np.empty((np.size(position[0]), 3), dtype='float64')
         for i in range(3):
             e0_vector_copy[:, i] = e0_vector[i]
         rr = np.array([position[0],
-		       position[1],
-		       position[2]]).T # Similar to tB_vector in old code.
+                       position[1],
+                       position[2]]).T # Similar to tB_vector in old code.
         tC_vector = np.cross(e0_vector_copy, rr)
         te2 = tC_vector.copy()
         for dim in range(3):
@@ -944,7 +944,7 @@
         Examples
         --------
         >>> params = halos[0].get_ellipsoid_parameters()
-	"""
+        """
 
         basic_parameters = self._get_ellipsoid_parameters_basic_loadedhalo()
         toreturn = [self.center_of_mass()]

diff -r fd1083971a98a478650b2288023b2911ffbf955d -r 85a96b23945f7d33b4c75e7a83e1b8a24005972b yt/analysis_modules/halo_finding/rockstar/rockstar.py
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar.py
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar.py
@@ -238,6 +238,7 @@
         tpf = ts[0]
 
         def _particle_count(field, data):
+            if data.NumberOfParticles == 0: return 0
             try:
                 data["particle_type"]
                 has_particle_type=True

diff -r fd1083971a98a478650b2288023b2911ffbf955d -r 85a96b23945f7d33b4c75e7a83e1b8a24005972b yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -351,8 +351,8 @@
         Used for testing the periodic adjustment machinery
         of this derived quantity.
     include_particles : Bool
-	Should we add the mass contribution of particles
-	to calculate binding energy?
+        Should we add the mass contribution of particles
+        to calculate binding energy?
 
     Examples
     --------
@@ -371,13 +371,13 @@
                       (data["z-velocity"] - bv_z)**2)).sum()
 
     if (include_particles):
-	mass_to_use = data["TotalMass"]
+        mass_to_use = data["TotalMass"]
         kinetic += 0.5 * (data["Dark_Matter_Mass"] *
                           ((data["cic_particle_velocity_x"] - bv_x)**2 +
                            (data["cic_particle_velocity_y"] - bv_y)**2 +
                            (data["cic_particle_velocity_z"] - bv_z)**2)).sum()
     else:
-	mass_to_use = data["CellMass"]
+        mass_to_use = data["CellMass"]
     # Add thermal energy to kinetic energy
     if (include_thermal_energy):
         thermal = (data["ThermalEnergy"] * mass_to_use).sum()
@@ -414,8 +414,8 @@
     for label in ["x", "y", "z"]: # Separating CellMass from the for loop
         local_data[label] = data[label]
     local_data["CellMass"] = mass_to_use # Adding CellMass separately
-					 # NOTE: if include_particles = True, local_data["CellMass"]
-					 #       is not the same as data["CellMass"]!!!
+        # NOTE: if include_particles = True, local_data["CellMass"]
+        #       is not the same as data["CellMass"]!!!
     if periodic.any():
         # Adjust local_data to re-center the clump to remove the periodicity
         # by the gap calculated above.
@@ -470,7 +470,7 @@
             thisx = (local_data["x"][sel] / dx).astype('int64') - cover_imin[0] * 2**L
             thisy = (local_data["y"][sel] / dy).astype('int64') - cover_imin[1] * 2**L
             thisz = (local_data["z"][sel] / dz).astype('int64') - cover_imin[2] * 2**L
-	    vals = np.array([local_data["CellMass"][sel]], order='F')
+        vals = np.array([local_data["CellMass"][sel]], order='F')
             octree.add_array_to_tree(L, thisx, thisy, thisz, vals,
                np.ones_like(thisx).astype('float64'), treecode = 1)
         # Now we calculate the binding energy using a treecode.

diff -r fd1083971a98a478650b2288023b2911ffbf955d -r 85a96b23945f7d33b4c75e7a83e1b8a24005972b yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -369,7 +369,7 @@
         if not filter.any(): return blank
         num = filter.sum()
     else:
-        filter = None
+        filter = Ellipsis
         num = data["particle_position_x"].size
     amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(np.float64),
                            data["particle_position_y"][filter].astype(np.float64),


https://bitbucket.org/yt_analysis/yt/commits/ee1fadce20e0/
Changeset:   ee1fadce20e0
Branch:      yt
User:        MatthewTurk
Date:        2013-03-14 02:28:04
Summary:     Merging
Affected #:  1 file

diff -r 85a96b23945f7d33b4c75e7a83e1b8a24005972b -r ee1fadce20e0726550d7c2a2e9856c8523de15c5 yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -471,7 +471,7 @@
             thisy = (local_data["y"][sel] / dy).astype('int64') - cover_imin[1] * 2**L
             thisz = (local_data["z"][sel] / dz).astype('int64') - cover_imin[2] * 2**L
         vals = np.array([local_data["CellMass"][sel]], order='F')
-            octree.add_array_to_tree(L, thisx, thisy, thisz, vals,
+        octree.add_array_to_tree(L, thisx, thisy, thisz, vals,
                np.ones_like(thisx).astype('float64'), treecode = 1)
         # Now we calculate the binding energy using a treecode.
         octree.finalize(treecode = 1)


https://bitbucket.org/yt_analysis/yt/commits/53ce98584106/
Changeset:   53ce98584106
Branch:      yt
User:        MatthewTurk
Date:        2013-03-14 03:03:52
Summary:     Several more changes to compile under Python 3.3.
Affected #:  7 files

diff -r ee1fadce20e0726550d7c2a2e9856c8523de15c5 -r 53ce98584106518df207cd875519de21d5429d4d yt/analysis_modules/halo_finding/fof/EnzoFOF.c
--- a/yt/analysis_modules/halo_finding/fof/EnzoFOF.c
+++ b/yt/analysis_modules/halo_finding/fof/EnzoFOF.c
@@ -185,14 +185,37 @@
 __declspec(dllexport)
 #endif
 
-void initEnzoFOF(void)
+PyMODINIT_FUNC
+#if PY_MAJOR_VERSION >= 3
+#define _RETVAL m
+PyInit_EnzoFOF(void)
+#else
+#define _RETVAL 
+initEnzoFOF(void)
+#endif
 {
     PyObject *m, *d;
+#if PY_MAJOR_VERSION >= 3
+    static struct PyModuleDef moduledef = {
+        PyModuleDef_HEAD_INIT,
+        "EnzoFOF",           /* m_name */
+        "EnzoFOF Module",    /* m_doc */
+        -1,                  /* m_size */
+        _FOFMethods,          /* m_methods */
+        NULL,                /* m_reload */
+        NULL,                /* m_traverse */
+        NULL,                /* m_clear */
+        NULL,                /* m_free */
+    };
+    m = PyModule_Create(&moduledef); 
+#else
     m = Py_InitModule("EnzoFOF", _FOFMethods);
+#endif
     d = PyModule_GetDict(m);
     _FOFerror = PyErr_NewException("EnzoFOF.FOFerror", NULL, NULL);
     PyDict_SetItemString(d, "error", _FOFerror);
     import_array();
+    return _RETVAL;
 }
 
 /*

diff -r ee1fadce20e0726550d7c2a2e9856c8523de15c5 -r 53ce98584106518df207cd875519de21d5429d4d yt/analysis_modules/halo_finding/hop/EnzoHop.c
--- a/yt/analysis_modules/halo_finding/hop/EnzoHop.c
+++ b/yt/analysis_modules/halo_finding/hop/EnzoHop.c
@@ -425,6 +425,16 @@
    0,                         /* tp_new */
 };
 
+PyMODINIT_FUNC
+#if PY_MAJOR_VERSION >= 3
+#define _RETVAL m
+PyInit_EnzoHop(void)
+#else
+#define _RETVAL 
+initEnzoHop(void)
+#endif
+{
+    PyObject *m, *d;
 #if PY_MAJOR_VERSION >= 3
     static struct PyModuleDef moduledef = {
         PyModuleDef_HEAD_INIT,
@@ -437,17 +447,6 @@
         NULL,                /* m_clear */
         NULL,                /* m_free */
     };
-#endif
-
-PyMODINIT_FUNC
-#if PY_MAJOR_VERSION >= 3
-PyInit_EnzoHop(void)
-#else
-initEnzoHop(void)
-#endif
-{
-    PyObject *m, *d;
-#if PY_MAJOR_VERSION >= 3
     m = PyModule_Create(&moduledef); 
 #else
     m = Py_InitModule("EnzoHop", _HOPMethods);
@@ -465,6 +464,7 @@
    PyModule_AddObject(m, "kDTree", (PyObject*)&kDTreeTypeDict);
 
    import_array();
+   return _RETVAL;
 }
 
 /*

diff -r ee1fadce20e0726550d7c2a2e9856c8523de15c5 -r 53ce98584106518df207cd875519de21d5429d4d yt/utilities/data_point_utilities.c
--- a/yt/utilities/data_point_utilities.c
+++ b/yt/utilities/data_point_utilities.c
@@ -37,6 +37,14 @@
 #define max(A,B) ((A) > (B) ? (A) : (B))
 #define min(A,B) ((A) < (B) ? (A) : (B))
 
+#if PY_MAJOR_VERSION >= 3
+#define PYINTCONV_AS   PyLong_AsLong
+#define PYINTCONV_FROM PyLong_FromLong
+#else
+#define PYINTCONV_AS   PyInt_AsLong
+#define PYINTCONV_FROM PyInt_FromLong
+#endif
+
 static PyObject *_combineGridsError;
 
 static PyObject *
@@ -249,7 +257,7 @@
     free(src_vals);
     free(dst_vals);
 
-    PyObject *onum_found = PyInt_FromLong((long)num_found);
+    PyObject *onum_found = PYINTCONV_FROM((long)num_found);
     return onum_found;
 
 _fail:
@@ -561,7 +569,7 @@
     free(g_data);
     free(c_data);
 
-    PyObject *status = PyInt_FromLong(total);
+    PyObject *status = PYINTCONV_FROM(total);
     return status;
     
 _fail:
@@ -811,7 +819,7 @@
     }
     free(g_data);
     free(c_data);
-    PyObject *status = PyInt_FromLong(total);
+    PyObject *status = PYINTCONV_FROM(total);
     return status;
 
 _fail:
@@ -1025,7 +1033,7 @@
     if(dls!=NULL)free(dls);
     if(g_data!=NULL)free(g_data);
     if(c_data!=NULL)free(c_data);
-    PyObject *status = PyInt_FromLong(total);
+    PyObject *status = PYINTCONV_FROM(total);
     return status;
 
 _fail:
@@ -1112,7 +1120,7 @@
     Py_DECREF(yi);
     Py_DECREF(zi);
 
-    PyObject *retval = PyInt_FromLong(status);
+    PyObject *retval = PYINTCONV_FROM(status);
     return retval;
 
     _fail:
@@ -1449,10 +1457,33 @@
 __declspec(dllexport)
 #endif
 
-void initdata_point_utilities(void)
+PyMODINIT_FUNC
+#if PY_MAJOR_VERSION >= 3
+#define _RETVAL m
+PyInit_data_point_utilities(void)
+#else
+#define _RETVAL 
+initdata_point_utilities(void)
+#endif
 {
     PyObject *m, *d;
+#if PY_MAJOR_VERSION >= 3
+    static struct PyModuleDef moduledef = {
+        PyModuleDef_HEAD_INIT,
+        "data_point_utilities",           /* m_name */
+        "Utilities for data combination.\n",
+                             /* m_doc */
+        -1,                  /* m_size */
+        _combineMethods,     /* m_methods */
+        NULL,                /* m_reload */
+        NULL,                /* m_traverse */
+        NULL,                /* m_clear */
+        NULL,                /* m_free */
+    };
+    m = PyModule_Create(&moduledef); 
+#else
     m = Py_InitModule("data_point_utilities", _combineMethods);
+#endif
     d = PyModule_GetDict(m);
     _combineGridsError = PyErr_NewException("data_point_utilities.CombineGridsError", NULL, NULL);
     PyDict_SetItemString(d, "error", _combineGridsError);
@@ -1465,6 +1496,7 @@
     _outputFloatsToFileError = PyErr_NewException("data_point_utilities.OutputFloatsToFileError", NULL, NULL);
     PyDict_SetItemString(d, "error", _outputFloatsToFileError);
     import_array();
+    return _RETVAL;
 }
 
 /*

diff -r ee1fadce20e0726550d7c2a2e9856c8523de15c5 -r 53ce98584106518df207cd875519de21d5429d4d yt/utilities/hdf5_light_reader.c
--- a/yt/utilities/hdf5_light_reader.c
+++ b/yt/utilities/hdf5_light_reader.c
@@ -40,6 +40,14 @@
 
 #define MIN(a,b) ((a) <= (b) ? (a) : (b))
 
+#if PY_MAJOR_VERSION >= 3
+#define PYINTCONV_AS   PyLong_AsLong
+#define PYINTCONV_FROM PyLong_FromLong
+#else
+#define PYINTCONV_AS   PyInt_AsLong
+#define PYINTCONV_FROM PyInt_FromLong
+#endif
+
 static PyObject *_hdf5ReadError;
 herr_t iterate_dataset(hid_t loc_id, const char *name, void *nodelist);
 
@@ -619,7 +627,7 @@
 
     for(i = 0; i < num_grids; i++) {
         grid_key = PyList_GetItem(grid_ids, i);
-        id = PyInt_AsLong(grid_key);
+        id = PYINTCONV_AS(grid_key);
         sprintf(grid_node_name, "Grid%08li", id);
         grid_data = PyDict_New(); // New reference
         PyDict_SetItem(grids_dict, grid_key, grid_data);
@@ -858,7 +866,7 @@
       temp = PyList_GetItem(filename_list, ig);
       filename = PyString_AsString(temp);
       temp = PyList_GetItem(grid_ids, ig);
-      id = PyInt_AsLong(temp);
+      id = PYINTCONV_AS(temp);
       //fprintf(stderr, "Counting from grid %d\n", id);
       if(run_validators(&pv, filename, id, 0, packed, ig) < 0) {
         goto _fail;
@@ -892,7 +900,7 @@
       temp = PyList_GetItem(filename_list, ig);
       filename = PyString_AsString(temp);
       temp = PyList_GetItem(grid_ids, ig);
-      id = PyInt_AsLong(temp);
+      id = PYINTCONV_AS(temp);
       //fprintf(stderr, "Reading from grid %d\n", id);
       if(run_validators(&pv, filename, id, 1, packed, ig) < 0) {
         goto _fail;
@@ -967,7 +975,7 @@
         rv->right_edge[i] = *(npy_float64*) PyArray_GETPTR1(right_edge, i);
     }
 
-    rv->periodic = PyInt_AsLong(operiodic);
+    rv->periodic = PYINTCONV_AS(operiodic);
     if(rv->periodic == 1) {
       PyArrayObject *domain_left_edge = (PyArrayObject *) PyTuple_GetItem(InputData, 3);
       PyArrayObject *domain_right_edge = (PyArrayObject *) PyTuple_GetItem(InputData, 4);
@@ -1009,7 +1017,7 @@
 
     sv->radius = (npy_float64) PyFloat_AsDouble(radius);
 
-    sv->periodic = PyInt_AsLong(operiodic);
+    sv->periodic = PYINTCONV_AS(operiodic);
     if(sv->periodic == 1) {
       PyArrayObject *domain_left_edge = (PyArrayObject *) PyTuple_GetItem(InputData, 3);
       PyArrayObject *domain_right_edge = (PyArrayObject *) PyTuple_GetItem(InputData, 4);
@@ -1848,6 +1856,17 @@
 __declspec(dllexport)
 #endif
 
+
+PyMODINIT_FUNC
+#if PY_MAJOR_VERSION >= 3
+#define _RETVAL m
+PyInit_hdf5_light_reader(void)
+#else
+#define _RETVAL 
+inithdf5_light_reader(void)
+#endif
+{
+    PyObject *m, *d;
 #if PY_MAJOR_VERSION >= 3
     static struct PyModuleDef moduledef = {
         PyModuleDef_HEAD_INIT,
@@ -1861,19 +1880,6 @@
         NULL,                /* m_clear */
         NULL,                /* m_free */
     };
-#endif
-
-PyMODINIT_FUNC
-#if PY_MAJOR_VERSION >= 3
-#define _RETVAL NULL
-PyInit_hdf5_light_reader(void)
-#else
-#define _RETVAL 
-inithdf5_light_reader(void)
-#endif
-{
-    PyObject *m, *d;
-#if PY_MAJOR_VERSION >= 3
     m = PyModule_Create(&moduledef); 
 #else
     m = Py_InitModule("hdf5_light_reader", _hdf5LightReaderMethods);

diff -r ee1fadce20e0726550d7c2a2e9856c8523de15c5 -r 53ce98584106518df207cd875519de21d5429d4d yt/utilities/lib/png_writer.pyx
--- a/yt/utilities/lib/png_writer.pyx
+++ b/yt/utilities/lib/png_writer.pyx
@@ -41,7 +41,7 @@
 # First, declare the Python macro to access files:
 cdef extern from "Python.h":
     ctypedef struct FILE
-    FILE* PyFile_AsFile(object)
+    FILE* PyObject_AsFileDescriptor(object)
     void  fprintf(FILE* f, char* s, char* s)
 
 cdef extern from "png.h":
@@ -131,7 +131,7 @@
     cdef png_byte *pix_buffer = <png_byte *> buffer.data
     cdef int width = buffer.shape[1]
     cdef int height = buffer.shape[0]
-    cdef FILE *fileobj = PyFile_AsFile(py_fileobj)
+    cdef FILE *fileobj = PyObject_AsFileDescriptor(py_fileobj)
 
     cdef png_bytep *row_pointers
     cdef png_structp png_ptr
@@ -299,7 +299,7 @@
 
     png_destroy_write_struct(&png_ptr, &info_ptr)
 
-    pp = PyString_FromStringAndSize(state.buffer, state.size)
+    pp = str(state.buffer)
     if state.buffer != NULL: free(state.buffer)
     return pp
 

diff -r ee1fadce20e0726550d7c2a2e9856c8523de15c5 -r 53ce98584106518df207cd875519de21d5429d4d yt/utilities/poster/streaminghttp.py
--- a/yt/utilities/poster/streaminghttp.py
+++ b/yt/utilities/poster/streaminghttp.py
@@ -26,6 +26,7 @@
 ...                       {'Content-Length': str(len(s))})
 """
 
+import httplib as httplib # hack for py3
 import httplib, urllib2, socket
 from httplib import NotConnected
 

diff -r ee1fadce20e0726550d7c2a2e9856c8523de15c5 -r 53ce98584106518df207cd875519de21d5429d4d yt/visualization/_MPL.c
--- a/yt/visualization/_MPL.c
+++ b/yt/visualization/_MPL.c
@@ -440,6 +440,17 @@
 __declspec(dllexport)
 #endif
 
+
+PyMODINIT_FUNC
+#if PY_MAJOR_VERSION >= 3
+#define _RETVAL m
+PyInit__MPL(void)
+#else
+#define _RETVAL 
+init_MPL(void)
+#endif
+{
+    PyObject *m, *d;
 #if PY_MAJOR_VERSION >= 3
     static struct PyModuleDef moduledef = {
         PyModuleDef_HEAD_INIT,
@@ -453,20 +464,6 @@
         NULL,                /* m_clear */
         NULL,                /* m_free */
     };
-#endif
-
-
-PyMODINIT_FUNC
-#if PY_MAJOR_VERSION >= 3
-#define _RETVAL NULL
-PyInit__MPL(void)
-#else
-#define _RETVAL 
-init_MPL(void)
-#endif
-{
-    PyObject *m, *d;
-#if PY_MAJOR_VERSION >= 3
     m = PyModule_Create(&moduledef); 
 #else
     m = Py_InitModule("_MPL", __MPLMethods);


https://bitbucket.org/yt_analysis/yt/commits/802fdb182bf2/
Changeset:   802fdb182bf2
Branch:      yt
User:        MatthewTurk
Date:        2013-03-14 16:46:05
Summary:     Fixing a leftover use_2to3 that we do not need.
Affected #:  1 file

diff -r 53ce98584106518df207cd875519de21d5429d4d -r 802fdb182bf2ba798d346d24e2721c4ea5c5e7f9 setup.py
--- a/setup.py
+++ b/setup.py
@@ -274,7 +274,6 @@
         url="http://yt-project.org/",
         license="GPL-3",
         configuration=configuration,
-        use_2to3=True,
         zip_safe=False,
         data_files=REASON_FILES,
         cmdclass={'build_py': my_build_py, 'build_forthon': BuildForthon,


https://bitbucket.org/yt_analysis/yt/commits/79bca3a81b7e/
Changeset:   79bca3a81b7e
Branch:      yt
User:        MatthewTurk
Date:        2013-03-14 17:09:20
Summary:     Kacper noted that this needs to be inside the loop; I agree.
Affected #:  1 file

diff -r 802fdb182bf2ba798d346d24e2721c4ea5c5e7f9 -r 79bca3a81b7e51cafd74973985ef98dacb8e85dc yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -470,9 +470,9 @@
             thisx = (local_data["x"][sel] / dx).astype('int64') - cover_imin[0] * 2**L
             thisy = (local_data["y"][sel] / dy).astype('int64') - cover_imin[1] * 2**L
             thisz = (local_data["z"][sel] / dz).astype('int64') - cover_imin[2] * 2**L
-        vals = np.array([local_data["CellMass"][sel]], order='F')
-        octree.add_array_to_tree(L, thisx, thisy, thisz, vals,
-               np.ones_like(thisx).astype('float64'), treecode = 1)
+            vals = np.array([local_data["CellMass"][sel]], order='F')
+            octree.add_array_to_tree(L, thisx, thisy, thisz, vals,
+                   np.ones_like(thisx).astype('float64'), treecode = 1)
         # Now we calculate the binding energy using a treecode.
         octree.finalize(treecode = 1)
         mylog.info("Using a treecode to find gravitational energy for %d cells." % local_data['x'].size)


https://bitbucket.org/yt_analysis/yt/commits/68f53a855cc8/
Changeset:   68f53a855cc8
Branch:      yt
User:        MatthewTurk
Date:        2013-03-17 00:08:33
Summary:     This set of changes makes yt import properly in Python 3.3.

This changes a few things.

 * Fixing HOP's object initialization in C
 * Removing usage of map and the string module strip and rstrip from Boxlib
 * Move to relative imports for C modules imported in Python code.
 * Add PyUnicode / PyString / char* conversion.
 * Import pyparsing_py3 if needed.
Affected #:  16 files

diff -r 79bca3a81b7e51cafd74973985ef98dacb8e85dc -r 68f53a855cc8c0df29360dfb59b9b696cc48bbc9 yt/analysis_modules/halo_finding/hop/EnzoHop.c
--- a/yt/analysis_modules/halo_finding/hop/EnzoHop.c
+++ b/yt/analysis_modules/halo_finding/hop/EnzoHop.c
@@ -351,7 +351,7 @@
 
     int median = kdMedianJst(self->kd, d, l, u);
 
-    PyObject *omedian = PyInt_FromLong((long)median);
+    PyObject *omedian = PyLong_FromLong((long)median);
     return omedian;
 }
 
@@ -384,8 +384,8 @@
 
 static PyTypeObject
 kDTreeTypeDict = {
-   PyObject_HEAD_INIT(NULL)
-   0,                         /* ob_size */
+   PyVarObject_HEAD_INIT(NULL, 0)
+                            /* ob_size */
    "kDTree",               /* tp_name */
    sizeof(kDTreeType),         /* tp_basicsize */
    0,                         /* tp_itemsize */

diff -r 79bca3a81b7e51cafd74973985ef98dacb8e85dc -r 68f53a855cc8c0df29360dfb59b9b696cc48bbc9 yt/data_objects/hierarchy.py
--- a/yt/data_objects/hierarchy.py
+++ b/yt/data_objects/hierarchy.py
@@ -29,7 +29,6 @@
 import weakref
 
 from itertools import chain, izip
-from new import classobj
 
 from yt.funcs import *
 
@@ -371,7 +370,7 @@
 
     def _add_object_class(self, name, class_name, base, dd):
         self.object_types.append(name)
-        obj = classobj(class_name, (base,), dd)
+        obj = type(class_name, (base,), dd)
         setattr(self, name, obj)
 
     def _initialize_level_stats(self):

diff -r 79bca3a81b7e51cafd74973985ef98dacb8e85dc -r 68f53a855cc8c0df29360dfb59b9b696cc48bbc9 yt/frontends/castro/data_structures.py
--- a/yt/frontends/castro/data_structures.py
+++ b/yt/frontends/castro/data_structures.py
@@ -28,7 +28,6 @@
 import weakref
 import itertools
 from collections import defaultdict
-from string import strip, rstrip
 from stat import ST_CTIME
 
 import numpy as np
@@ -598,7 +597,9 @@
                 continue
 
             try:
-                param, vals = map(strip, map(rstrip, line.split("=")))
+                param, vals = [strip(i) for i in
+                                (j.rstrip() for j in line.split("="))]
+                #param, vals = map(strip, map(rstrip, line.split("=")))
             except ValueError:
                 mylog.error("ValueError: '%s'", line)
 
@@ -654,7 +655,9 @@
         lines = open(self.fparameter_filename).readlines()
         for line in lines:
             if line.count("=") == 1:
-                param, vals = map(strip, map(rstrip, line.split("=")))
+                param, vals = [strip(i) for i in
+                                (j.rstrip() for j in line.split("="))]
+                #param, vals = map(strip, map(rstrip, line.split("=")))
                 if vals.count("'") == 0:
                     t = map(float, [a.replace('D','e').replace('d','e') for a in vals.split()]) # all are floating point.
                 else:

diff -r 79bca3a81b7e51cafd74973985ef98dacb8e85dc -r 68f53a855cc8c0df29360dfb59b9b696cc48bbc9 yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -32,9 +32,6 @@
 
 from collections import \
      defaultdict
-from string import \
-     strip, \
-     rstrip
 from stat import \
      ST_CTIME
 
@@ -295,7 +292,8 @@
         # read the file line by line, storing important parameters
         for lineI, line in enumerate(lines):
             try:
-                param, sep, vals = map(rstrip,line.partition(' '))
+                param, sep, vals = [v.rstrip() for v in line.partition(' ')]
+                #param, sep, vals = map(rstrip,line.partition(' '))
             except ValueError:
                 mylog.error("ValueError: '%s'", line)
             if chombo2enzoDict.has_key(param):

diff -r 79bca3a81b7e51cafd74973985ef98dacb8e85dc -r 68f53a855cc8c0df29360dfb59b9b696cc48bbc9 yt/frontends/nyx/data_structures.py
--- a/yt/frontends/nyx/data_structures.py
+++ b/yt/frontends/nyx/data_structures.py
@@ -32,7 +32,6 @@
 import os
 import re
 from stat import ST_CTIME
-from string import strip, rstrip
 import weakref
 
 import numpy as np
@@ -580,7 +579,9 @@
                 continue
 
             try:
-                param, val_string = map(strip, line.split("="))
+                param, vals = [strip(i) for i in
+                                (j.rstrip() for j in line.split("="))]
+                #param, val_string = map(strip, line.split("="))
             except ValueError:
                 mylog.error("ValueError: '%s'", line)
 
@@ -659,7 +660,9 @@
         lines = open(self.fparameter_file_path).readlines()
         for line in lines:
             if line.count("=") == 1:
-                nyx_param, val_string = map(strip, line.split("="))
+                param, vals = [strip(i) for i in
+                                (j.rstrip() for j in line.split("="))]
+                #nyx_param, val_string = map(strip, line.split("="))
 
                 # Check if we care about this param. If so, translate it.
                 if nyx_to_enzo_dict.has_key(nyx_param):

diff -r 79bca3a81b7e51cafd74973985ef98dacb8e85dc -r 68f53a855cc8c0df29360dfb59b9b696cc48bbc9 yt/frontends/orion/data_structures.py
--- a/yt/frontends/orion/data_structures.py
+++ b/yt/frontends/orion/data_structures.py
@@ -28,7 +28,6 @@
 import weakref
 
 from collections import defaultdict
-from string import strip, rstrip
 from stat import ST_CTIME
 
 import numpy as np
@@ -528,7 +527,9 @@
             if len(line) < 2 or line.find("#") == 0: # ...but skip comments
                 continue
             try:
-                param, vals = map(strip,map(rstrip,line.split("=")))
+                param, vals = [strip(i) for i in
+                                (j.rstrip() for j in line.split("="))]
+                #param, vals = map(strip,map(rstrip,line.split("=")))
             except ValueError:
                 mylog.error("ValueError: '%s'", line)
                 continue
@@ -580,7 +581,9 @@
         lines = open(self.fparameter_filename).readlines()
         for line in lines:
             if line.count("=") == 1:
-                param, vals = map(strip,map(rstrip,line.split("=")))
+                param, vals = [strip(i) for i in
+                                (j.rstrip() for j in line.split("="))]
+                #param, vals = map(strip,map(rstrip,line.split("=")))
                 if vals.count("'") == 0:
                     t = map(float,[a.replace('D','e').replace('d','e') for a in vals.split()]) # all are floating point.
                 else:

diff -r 79bca3a81b7e51cafd74973985ef98dacb8e85dc -r 68f53a855cc8c0df29360dfb59b9b696cc48bbc9 yt/frontends/pluto/data_structures.py
--- a/yt/frontends/pluto/data_structures.py
+++ b/yt/frontends/pluto/data_structures.py
@@ -32,9 +32,6 @@
 
 from collections import \
      defaultdict
-from string import \
-     strip, \
-     rstrip
 from stat import \
      ST_CTIME
 
@@ -255,7 +252,8 @@
         # read the file line by line, storing important parameters
         for lineI, line in enumerate(lines):
             try:
-                param, sep, vals = map(rstrip,line.partition(' '))
+                param, sep, vals = [v.rstrip() for v in line.partition(' ')]
+                #param, sep, vals = map(rstrip,line.partition(' '))
             except ValueError:
                 mylog.error("ValueError: '%s'", line)
             if pluto2enzoDict.has_key(param):

diff -r 79bca3a81b7e51cafd74973985ef98dacb8e85dc -r 68f53a855cc8c0df29360dfb59b9b696cc48bbc9 yt/utilities/delaunay/__init__.py
--- a/yt/utilities/delaunay/__init__.py
+++ b/yt/utilities/delaunay/__init__.py
@@ -5,6 +5,6 @@
 :License: BSD-style license. See LICENSE.txt
 """
 
-from _delaunay import delaunay
+from ._delaunay import delaunay
 from triangulate import *
 from interpolate import *

diff -r 79bca3a81b7e51cafd74973985ef98dacb8e85dc -r 68f53a855cc8c0df29360dfb59b9b696cc48bbc9 yt/utilities/delaunay/_delaunay.cpp
--- a/yt/utilities/delaunay/_delaunay.cpp
+++ b/yt/utilities/delaunay/_delaunay.cpp
@@ -744,7 +744,7 @@
 
 PyMODINIT_FUNC
 #if PY_MAJOR_VERSION >= 3
-#define _RETVAL NULL
+#define _RETVAL m
 PyInit__delaunay(void)
 #else
 #define _RETVAL 

diff -r 79bca3a81b7e51cafd74973985ef98dacb8e85dc -r 68f53a855cc8c0df29360dfb59b9b696cc48bbc9 yt/utilities/delaunay/interpolate.py
--- a/yt/utilities/delaunay/interpolate.py
+++ b/yt/utilities/delaunay/interpolate.py
@@ -1,7 +1,7 @@
 import numpy as np
 
-from _delaunay import compute_planes, linear_interpolate_grid, nn_interpolate_grid
-from _delaunay import nn_interpolate_unstructured
+from ._delaunay import compute_planes, linear_interpolate_grid, nn_interpolate_grid
+from ._delaunay import nn_interpolate_unstructured
 
 __all__ = ['LinearInterpolator', 'NNInterpolator']
 

diff -r 79bca3a81b7e51cafd74973985ef98dacb8e85dc -r 68f53a855cc8c0df29360dfb59b9b696cc48bbc9 yt/utilities/delaunay/triangulate.py
--- a/yt/utilities/delaunay/triangulate.py
+++ b/yt/utilities/delaunay/triangulate.py
@@ -8,7 +8,7 @@
 
 import numpy as np
 
-from _delaunay import delaunay
+from ._delaunay import delaunay
 from interpolate import LinearInterpolator, NNInterpolator
 
 __all__ = ['Triangulation', 'DuplicatePointWarning']

diff -r 79bca3a81b7e51cafd74973985ef98dacb8e85dc -r 68f53a855cc8c0df29360dfb59b9b696cc48bbc9 yt/utilities/hdf5_light_reader.c
--- a/yt/utilities/hdf5_light_reader.c
+++ b/yt/utilities/hdf5_light_reader.c
@@ -30,6 +30,7 @@
 #include <math.h>
 #include <signal.h>
 #include <ctype.h>
+#include <string.h>
 #include "hdf5.h"
 
 #include "numpy/ndarrayobject.h"
@@ -53,6 +54,34 @@
 
 /* Structures for particle reading */
 
+PyObject *char_to_obj(char *cs) {
+  /* This returns a *new* reference */
+  PyObject *tr;
+#if PY_MAJOR_VERSION >= 3
+  tr = PyBytes_FromString(cs);
+#else
+  tr = PyString_FromString(cs);
+#endif
+  return tr;
+}
+
+char *obj_to_char(PyObject *po) {
+  /* This returns a *new* reference */
+  char *tr, *nc;
+  PyObject *as = NULL;
+#if PY_MAJOR_VERSION >= 3
+  /* Reference count increased for 'as' */
+  as = PyUnicode_AsASCIIString(po);
+  nc = PyBytes_AsString(as);
+#else
+  nc = PyString_AsString(po);
+#endif
+  tr = strdup(nc);
+  if (as != NULL) Py_XDECREF(as);
+  /* In both cases, tr needs to be de-allocated */
+  return tr;
+}
+
 typedef struct particle_validation_ {
     int total_valid_particles;
     int particles_to_check;
@@ -568,7 +597,7 @@
 
     H5Gget_objinfo(loc_id, name, 0, &statbuf);
     if (statbuf.type == H5G_DATASET) {
-        node_name = PyString_FromString(name);
+        node_name = char_to_obj(name);
         if (node_name == NULL) {return -1;}
         if (PyList_Append((PyObject *)nodelist, node_name)) {return -1;}
     }
@@ -641,11 +670,12 @@
         for(n = 0; n < num_sets; n++) {
             // This points to the in-place internal char*
             oset_name = PyList_GetItem(set_names, n);
-            set_name = PyString_AsString(oset_name);
+            set_name = obj_to_char(oset_name);
             cur_data = get_array_from_nodename(set_name, grid_node);
             if (cur_data != NULL) {
                 PyDict_SetItem(grid_data, oset_name, (PyObject *) cur_data);
             }
+            free(set_name);
             Py_XDECREF(cur_data); // still one left
         }
         // We just want the one reference from the grids_dict value set
@@ -864,13 +894,14 @@
 
     for (ig = 0; ig < ngrids ; ig++) {
       temp = PyList_GetItem(filename_list, ig);
-      filename = PyString_AsString(temp);
+      filename = obj_to_char(temp);
       temp = PyList_GetItem(grid_ids, ig);
       id = PYINTCONV_AS(temp);
       //fprintf(stderr, "Counting from grid %d\n", id);
       if(run_validators(&pv, filename, id, 0, packed, ig) < 0) {
         goto _fail;
       }
+      free(filename);
     }
     if(pv.file_id >= 0) {
       H5Fclose(pv.file_id);
@@ -888,7 +919,7 @@
     for (ifield = 0; ifield < nfields; ifield++) {
         pv.return_values[ifield] = NULL;
         pv.npy_types[ifield] = -999;
-        pv.field_names[ifield] = PyString_AsString(PyList_GetItem(field_list, ifield));
+        pv.field_names[ifield] = obj_to_char(PyList_GetItem(field_list, ifield));
     }
 
     /* Now we know how many particles we want. */
@@ -898,13 +929,14 @@
          in a stride, without checking particle positions,
          if it's fully-enclosed. */
       temp = PyList_GetItem(filename_list, ig);
-      filename = PyString_AsString(temp);
+      filename = obj_to_char(temp);
       temp = PyList_GetItem(grid_ids, ig);
       id = PYINTCONV_AS(temp);
       //fprintf(stderr, "Reading from grid %d\n", id);
       if(run_validators(&pv, filename, id, 1, packed, ig) < 0) {
         goto _fail;
       }
+      free(filename);
     }
     if(pv.file_id >= 0) {H5Fclose(pv.file_id); pv.file_id = -1;}
 
@@ -918,6 +950,9 @@
     /* Now we do some finalization */
     free(pv.mask);
     free(pv.field_names);
+    for (ifield = 0; ifield < nfields; ifield++) {
+        free(pv.field_names[ifield]);
+     }
     free(pv.return_values); /* Has to happen after packing our return value */
     free(pv.npy_types);
     for (i = 0; i<3; i++) {

diff -r 79bca3a81b7e51cafd74973985ef98dacb8e85dc -r 68f53a855cc8c0df29360dfb59b9b696cc48bbc9 yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -30,7 +30,7 @@
     axis_names
 from .volume_rendering.api import off_axis_projection
 from yt.data_objects.image_array import ImageArray
-import _MPL
+from . import _MPL
 import numpy as np
 import weakref
 

diff -r 79bca3a81b7e51cafd74973985ef98dacb8e85dc -r 68f53a855cc8c0df29360dfb59b9b696cc48bbc9 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -45,7 +45,7 @@
     sec_per_kyr, sec_per_year, \
     sec_per_day, sec_per_hr
 
-import _MPL
+from . import _MPL
 
 callback_registry = {}
 

diff -r 79bca3a81b7e51cafd74973985ef98dacb8e85dc -r 68f53a855cc8c0df29360dfb59b9b696cc48bbc9 yt/visualization/plot_types.py
--- a/yt/visualization/plot_types.py
+++ b/yt/visualization/plot_types.py
@@ -29,7 +29,7 @@
 
 from yt.funcs import *
 from _mpl_imports import *
-import _MPL
+from . import _MPL
 from .plot_modifications import callback_registry
 from yt.utilities.definitions import \
     x_dict, \

diff -r 79bca3a81b7e51cafd74973985ef98dacb8e85dc -r 68f53a855cc8c0df29360dfb59b9b696cc48bbc9 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -29,6 +29,7 @@
 import matplotlib
 import cStringIO
 import types
+import sys
 import __builtin__
 
 from matplotlib.mathtext import MathTextParser
@@ -75,7 +76,10 @@
     if version.LooseVersion(matplotlib.__version__) < version.LooseVersion("1.2.0"):
         from matplotlib.pyparsing import ParseFatalException
     else:
-        from matplotlib.pyparsing_py2 import ParseFatalException
+        if sys.version_info[0] == 3:
+            from matplotlib.pyparsing_py3 import ParseFatalException
+        else:
+            from matplotlib.pyparsing_py2 import ParseFatalException
 except ImportError:
     from pyparsing import ParseFatalException
 


https://bitbucket.org/yt_analysis/yt/commits/05efc6b7e935/
Changeset:   05efc6b7e935
Branch:      yt
User:        MatthewTurk
Date:        2013-10-22 19:34:10
Summary:     Merging, including relicensing.
Affected #:  473 files

diff -r 68f53a855cc8c0df29360dfb59b9b696cc48bbc9 -r 05efc6b7e9359d7c5c923b0d34a806be02f54172 .hgchurn
--- a/.hgchurn
+++ b/.hgchurn
@@ -4,8 +4,17 @@
 juxtaposicion at gmail.com = cemoody at ucsc.edu
 chummels at gmail.com = chummels at astro.columbia.edu
 jwise at astro.princeton.edu = jwise at physics.gatech.edu
-atmyers = atmyers at berkeley.edu
 sam.skillman at gmail.com = samskillman at gmail.com
 casey at thestarkeffect.com = caseywstark at gmail.com
 chiffre = chiffre at posteo.de
 Christian Karch = chiffre at posteo.de
+atmyers at berkeley.edu = atmyers2 at gmail.com
+atmyers = atmyers2 at gmail.com
+drudd = drudd at uchicago.edu
+awetzel = andrew.wetzel at yale.edu
+David Collins (dcollins4096 at gmail.com) = dcollins4096 at gmail.com
+dcollins at physics.ucsd.edu = dcollins4096 at gmail.com
+tabel = tabel at slac.stanford.edu
+sername=kayleanelson = kaylea.nelson at yale.edu
+kayleanelson = kaylea.nelson at yale.edu
+jcforbes at ucsc.edu = jforbes at ucolick.org

diff -r 68f53a855cc8c0df29360dfb59b9b696cc48bbc9 -r 05efc6b7e9359d7c5c923b0d34a806be02f54172 .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -4,12 +4,15 @@
 freetype.cfg
 hdf5.cfg
 png.cfg
+rockstar.cfg
 yt_updater.log
+yt/analysis_modules/halo_finding/rockstar/rockstar_interface.c
 yt/frontends/ramses/_ramses_reader.cpp
 yt/utilities/amr_utils.c
 yt/utilities/kdtree/forthonf2c.h
 yt/utilities/libconfig_wrapper.c
 yt/utilities/spatial/ckdtree.c
+yt/utilities/lib/amr_kdtools.c
 yt/utilities/lib/CICDeposit.c
 yt/utilities/lib/ContourFinding.c
 yt/utilities/lib/DepthFirstOctree.c
@@ -30,6 +33,7 @@
 yt/utilities/lib/GridTree.c
 yt/utilities/lib/marching_cubes.c
 yt/utilities/lib/png_writer.h
+yt/utilities/lib/write_array.c
 syntax: glob
 *.pyc
 .*.swp

diff -r 68f53a855cc8c0df29360dfb59b9b696cc48bbc9 -r 05efc6b7e9359d7c5c923b0d34a806be02f54172 .hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -5156,3 +5156,4 @@
 0000000000000000000000000000000000000000 mpi-opaque
 f15825659f5af3ce64aaad30062aff3603cbfb66 hop callback
 0000000000000000000000000000000000000000 hop callback
+079e456c38a87676472a458210077e2be325dc85 last_gplv3

diff -r 68f53a855cc8c0df29360dfb59b9b696cc48bbc9 -r 05efc6b7e9359d7c5c923b0d34a806be02f54172 CITATION
--- /dev/null
+++ b/CITATION
@@ -0,0 +1,31 @@
+To cite yt in publications, please use:
+
+Turk, M. J., Smith, B. D., Oishi, J. S., et al. 2011, ApJS, 192, 9
+
+In the body of the text, please add a footnote to the yt webpage:
+
+http://yt-project.org/
+
+For LaTex and BibTex users:
+
+\bibitem[Turk et al.(2011)]{2011ApJS..192....9T} Turk, M.~J., Smith, B.~D.,
+Oishi, J.~S., et al.\ 2011, \apjs, 192, 9
+
+ at ARTICLE{2011ApJS..192....9T,
+   author = {{Turk}, M.~J. and {Smith}, B.~D. and {Oishi}, J.~S. and {Skory}, S. and
+{Skillman}, S.~W. and {Abel}, T. and {Norman}, M.~L.},
+    title = "{yt: A Multi-code Analysis Toolkit for Astrophysical Simulation Data}",
+  journal = {\apjs},
+archivePrefix = "arXiv",
+   eprint = {1011.3514},
+ primaryClass = "astro-ph.IM",
+ keywords = {cosmology: theory, methods: data analysis, methods: numerical},
+     year = 2011,
+    month = jan,
+   volume = 192,
+      eid = {9},
+    pages = {9},
+      doi = {10.1088/0067-0049/192/1/9},
+   adsurl = {http://adsabs.harvard.edu/abs/2011ApJS..192....9T},
+  adsnote = {Provided by the SAO/NASA Astrophysics Data System}
+}

diff -r 68f53a855cc8c0df29360dfb59b9b696cc48bbc9 -r 05efc6b7e9359d7c5c923b0d34a806be02f54172 COPYING.txt
--- /dev/null
+++ b/COPYING.txt
@@ -0,0 +1,81 @@
+===============================
+ The yt project licensing terms
+===============================
+
+yt is licensed under the terms of the Modified BSD License (also known as New
+or Revised BSD), as follows:
+
+Copyright (c) 2013-, yt Development Team
+Copyright (c) 2006-2013, Matthew Turk <matthewturk at gmail.com>
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+Redistributions of source code must retain the above copyright notice, this
+list of conditions and the following disclaimer.
+
+Redistributions in binary form must reproduce the above copyright notice, this
+list of conditions and the following disclaimer in the documentation and/or
+other materials provided with the distribution.
+
+Neither the name of the yt Development Team nor the names of its
+contributors may be used to endorse or promote products derived from this
+software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+About the yt Development Team
+-----------------------------
+
+Matthew Turk began yt in 2006 and remains the project lead.  Over time yt has
+grown to include contributions from a large number of individuals from many
+diverse institutions, scientific, and technical backgrounds.
+
+Until the fall of 2013, yt was licensed under the GPLv3.  However, with consent
+from all developers and on a public mailing list, yt has been relicensed under
+the BSD 3-clause under a shared copyright model.  For more information, see:
+http://lists.spacepope.org/pipermail/yt-dev-spacepope.org/2013-July/003239.html
+All versions of yt prior to this licensing change are available under the
+GPLv3; all subsequent versions are available under the BSD 3-clause license.
+
+The yt Development Team is the set of all contributors to the yt project.  This
+includes all of the yt subprojects.
+
+The core team that coordinates development on BitBucket can be found here:
+http://bitbucket.org/yt_analysis/ 
+
+
+Our Copyright Policy
+--------------------
+
+yt uses a shared copyright model. Each contributor maintains copyright
+over their contributions to yt. But, it is important to note that these
+contributions are typically only changes to the repositories. Thus, the yt
+source code, in its entirety is not the copyright of any single person or
+institution.  Instead, it is the collective copyright of the entire yt
+Development Team.  If individual contributors want to maintain a record of what
+changes/contributions they have specific copyright on, they should indicate
+their copyright in the commit message of the change, when they commit the
+change to one of the yt repositories.
+
+With this in mind, the following banner should be used in any source code file
+to indicate the copyright and license terms:
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------

diff -r 68f53a855cc8c0df29360dfb59b9b696cc48bbc9 -r 05efc6b7e9359d7c5c923b0d34a806be02f54172 CREDITS
--- a/CREDITS
+++ b/CREDITS
@@ -1,51 +1,55 @@
-YT is a group effort.
+yt is a group effort.
 
-Contributors:                   Tom Abel (tabel at stanford.edu)
-				David Collins (dcollins at physics.ucsd.edu)
-				Brian Crosby (crosby.bd at gmail.com)
-				Andrew Cunningham (ajcunn at gmail.com)
-				Nathan Goldbaum (goldbaum at ucolick.org)
-				Markus Haider (markus.haider at uibk.ac.at)
-				Cameron Hummels (chummels at gmail.com)
-				Christian Karch (chiffre at posteo.de)
-				Ji-hoon Kim (me at jihoonkim.org)
-				Steffen Klemer (sklemer at phys.uni-goettingen.de)
-				Kacper Kowalik (xarthisius.kk at gmail.com)
-				Michael Kuhlen (mqk at astro.berkeley.edu)
-				Eve Lee (elee at cita.utoronto.ca)
-				Yuan Li (yuan at astro.columbia.edu)
-				Chris Malone (chris.m.malone at gmail.com)
-				Josh Maloney (joshua.moloney at colorado.edu)
-				Chris Moody (cemoody at ucsc.edu)
-				Andrew Myers (atmyers at astro.berkeley.edu)
-				Jeff Oishi (jsoishi at gmail.com)
-				Jean-Claude Passy (jcpassy at uvic.ca)
-				Mark Richardson (Mark.L.Richardson at asu.edu)
-				Thomas Robitaille (thomas.robitaille at gmail.com)
-				Anna Rosen (rosen at ucolick.org)
-				Anthony Scopatz (scopatz at gmail.com)
-				Devin Silvia (devin.silvia at colorado.edu)
-				Sam Skillman (samskillman at gmail.com)
-				Stephen Skory (s at skory.us)
-				Britton Smith (brittonsmith at gmail.com)
-				Geoffrey So (gsiisg at gmail.com)
-				Casey Stark (caseywstark at gmail.com)
-				Elizabeth Tasker (tasker at astro1.sci.hokudai.ac.jp)
-				Stephanie Tonnesen (stonnes at gmail.com)
-				Matthew Turk (matthewturk at gmail.com)
-				Rich Wagner (rwagner at physics.ucsd.edu)
-				John Wise (jwise at physics.gatech.edu)
-				John ZuHone (jzuhone at gmail.com)
+Contributors:   
+                Tom Abel (tabel at stanford.edu)
+                David Collins (dcollins at physics.ucsd.edu)
+                Brian Crosby (crosby.bd at gmail.com)
+                Andrew Cunningham (ajcunn at gmail.com)
+                Hilary Egan (hilaryye at gmail.com)
+                John Forces (jforbes at ucolick.org)
+                Nathan Goldbaum (goldbaum at ucolick.org)
+                Markus Haider (markus.haider at uibk.ac.at)
+                Cameron Hummels (chummels at gmail.com)
+                Christian Karch (chiffre at posteo.de)
+                Ji-hoon Kim (me at jihoonkim.org)
+                Steffen Klemer (sklemer at phys.uni-goettingen.de)
+                Kacper Kowalik (xarthisius.kk at gmail.com)
+                Michael Kuhlen (mqk at astro.berkeley.edu)
+                Eve Lee (elee at cita.utoronto.ca)
+                Sam Leitner (sam.leitner at gmail.com)
+                Yuan Li (yuan at astro.columbia.edu)
+                Chris Malone (chris.m.malone at gmail.com)
+                Josh Maloney (joshua.moloney at colorado.edu)
+                Chris Moody (cemoody at ucsc.edu)
+                Andrew Myers (atmyers at astro.berkeley.edu)
+                Jill Naiman (jnaiman at ucolick.org)
+                Kaylea Nelson (kaylea.nelson at yale.edu)
+                Jeff Oishi (jsoishi at gmail.com)
+                Jean-Claude Passy (jcpassy at uvic.ca)
+                Mark Richardson (Mark.L.Richardson at asu.edu)
+                Thomas Robitaille (thomas.robitaille at gmail.com)
+                Anna Rosen (rosen at ucolick.org)
+                Douglas Rudd (drudd at uchicago.edu)
+                Anthony Scopatz (scopatz at gmail.com)
+                Noel Scudder (noel.scudder at stonybrook.edu)
+                Devin Silvia (devin.silvia at colorado.edu)
+                Sam Skillman (samskillman at gmail.com)
+                Stephen Skory (s at skory.us)
+                Britton Smith (brittonsmith at gmail.com)
+                Geoffrey So (gsiisg at gmail.com)
+                Casey Stark (caseywstark at gmail.com)
+                Elizabeth Tasker (tasker at astro1.sci.hokudai.ac.jp)
+                Stephanie Tonnesen (stonnes at gmail.com)
+                Matthew Turk (matthewturk at gmail.com)
+                Rich Wagner (rwagner at physics.ucsd.edu)
+                Andrew Wetzel (andrew.wetzel at yale.edu)
+                John Wise (jwise at physics.gatech.edu)
+                John ZuHone (jzuhone at gmail.com)
 
-We also include the Delaunay Triangulation module written by Robert Kern of
-Enthought, the cmdln.py module by Trent Mick, and the progressbar module by
+Several items included in the yt/extern directory were written by other
+individuals and may bear their own license, including the progressbar module by
 Nilton Volpato.  The PasteBin interface code (as well as the PasteBin itself)
-was written by the Pocoo collective (pocoo.org).  The RamsesRead++ library was
-developed by Oliver Hahn.  yt also includes a slightly-modified version of
-libconfig (http://www.hyperrealm.com/libconfig/) and an unmodified version of
-several routines from HEALpix (http://healpix.jpl.nasa.gov/).
-
-Large parts of development of yt were guided by discussions with Tom Abel, Ralf
-Kaehler, Mike Norman and Greg Bryan.
+was written by the Pocoo collective (pocoo.org).  
+developed by Oliver Hahn.  
 
 Thanks to everyone for all your contributions!

diff -r 68f53a855cc8c0df29360dfb59b9b696cc48bbc9 -r 05efc6b7e9359d7c5c923b0d34a806be02f54172 FUNDING
--- a/FUNDING
+++ /dev/null
@@ -1,35 +0,0 @@
-The development of yt has benefited from funding from many different sources
-and institutions.  Here is an incomplete list of these sources:
-
-  * NSF grant OCI-1048505
-  * NSF grant AST-0239709 
-  * NSF grant AST-0707474
-  * NSF grant AST-0708960
-  * NSF grant AST-0808184
-  * NSF grant AST-0807215 
-  * NSF grant AST-0807312
-  * NSF grant AST-0807075
-  * NSF grant AST-0908199
-  * NSF grant AST-0908553 
-  * NASA grant ATFP NNX08-AH26G
-  * NASA grant ATFP NNX09-AD80G
-  * NASA grant ATFP NNZ07-AG77G
-  * DOE Computational Science Graduate Fellowship under grant number DE-FG02-97ER25308
-
-Additionally, development of yt has benefited from the hospitality and hosting
-of the following institutions:
-
-  * Columbia University
-  * Harvard-Smithsonian Center for Astrophysics
-  * Institute for Advanced Study
-  * Kavli Institute for Particle Astrophysics and Cosmology
-  * Kavli Institute for Theoretical Physics
-  * Los Alamos National Lab
-  * Michigan State University
-  * Princeton University
-  * Stanford University
-  * University of California High-Performance Astro-Computing Center
-  * University of California at Berkeley
-  * University of California at San Diego
-  * University of California at Santa Cruz
-  * University of Colorado at Boulder

diff -r 68f53a855cc8c0df29360dfb59b9b696cc48bbc9 -r 05efc6b7e9359d7c5c923b0d34a806be02f54172 LICENSE.txt
--- a/LICENSE.txt
+++ /dev/null
@@ -1,674 +0,0 @@
-                    GNU GENERAL PUBLIC LICENSE
-                       Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
-                            Preamble
-
-  The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
-  The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works.  By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users.  We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors.  You can apply it to
-your programs, too.
-
-  When we speak of free software, we are referring to freedom, not
-price.  Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
-  To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights.  Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
-  For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received.  You must make sure that they, too, receive
-or can get the source code.  And you must show them these terms so they
-know their rights.
-
-  Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
-  For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software.  For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
-  Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so.  This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software.  The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable.  Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products.  If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
-  Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary.  To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
-  The precise terms and conditions for copying, distribution and
-modification follow.
-
-                       TERMS AND CONDITIONS
-
-  0. Definitions.
-
-  "This License" refers to version 3 of the GNU General Public License.
-
-  "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
-  "The Program" refers to any copyrightable work licensed under this
-License.  Each licensee is addressed as "you".  "Licensees" and
-"recipients" may be individuals or organizations.
-
-  To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy.  The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
-  A "covered work" means either the unmodified Program or a work based
-on the Program.
-
-  To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy.  Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
-  To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies.  Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
-  An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License.  If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
-  1. Source Code.
-
-  The "source code" for a work means the preferred form of the work
-for making modifications to it.  "Object code" means any non-source
-form of a work.
-
-  A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
-  The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form.  A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
-  The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities.  However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work.  For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
-  The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
-  The Corresponding Source for a work in source code form is that
-same work.
-
-  2. Basic Permissions.
-
-  All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met.  This License explicitly affirms your unlimited
-permission to run the unmodified Program.  The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work.  This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
-  You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force.  You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright.  Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
-  Conveying under any other circumstances is permitted solely under
-the conditions stated below.  Sublicensing is not allowed; section 10
-makes it unnecessary.
-
-  3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
-  No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
-  When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
-  4. Conveying Verbatim Copies.
-
-  You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
-  You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
-  5. Conveying Modified Source Versions.
-
-  You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
-    a) The work must carry prominent notices stating that you modified
-    it, and giving a relevant date.
-
-    b) The work must carry prominent notices stating that it is
-    released under this License and any conditions added under section
-    7.  This requirement modifies the requirement in section 4 to
-    "keep intact all notices".
-
-    c) You must license the entire work, as a whole, under this
-    License to anyone who comes into possession of a copy.  This
-    License will therefore apply, along with any applicable section 7
-    additional terms, to the whole of the work, and all its parts,
-    regardless of how they are packaged.  This License gives no
-    permission to license the work in any other way, but it does not
-    invalidate such permission if you have separately received it.
-
-    d) If the work has interactive user interfaces, each must display
-    Appropriate Legal Notices; however, if the Program has interactive
-    interfaces that do not display Appropriate Legal Notices, your
-    work need not make them do so.
-
-  A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit.  Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
-  6. Conveying Non-Source Forms.
-
-  You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
-    a) Convey the object code in, or embodied in, a physical product
-    (including a physical distribution medium), accompanied by the
-    Corresponding Source fixed on a durable physical medium
-    customarily used for software interchange.
-
-    b) Convey the object code in, or embodied in, a physical product
-    (including a physical distribution medium), accompanied by a
-    written offer, valid for at least three years and valid for as
-    long as you offer spare parts or customer support for that product
-    model, to give anyone who possesses the object code either (1) a
-    copy of the Corresponding Source for all the software in the
-    product that is covered by this License, on a durable physical
-    medium customarily used for software interchange, for a price no
-    more than your reasonable cost of physically performing this
-    conveying of source, or (2) access to copy the
-    Corresponding Source from a network server at no charge.
-
-    c) Convey individual copies of the object code with a copy of the
-    written offer to provide the Corresponding Source.  This
-    alternative is allowed only occasionally and noncommercially, and
-    only if you received the object code with such an offer, in accord
-    with subsection 6b.
-
-    d) Convey the object code by offering access from a designated
-    place (gratis or for a charge), and offer equivalent access to the
-    Corresponding Source in the same way through the same place at no
-    further charge.  You need not require recipients to copy the
-    Corresponding Source along with the object code.  If the place to
-    copy the object code is a network server, the Corresponding Source
-    may be on a different server (operated by you or a third party)
-    that supports equivalent copying facilities, provided you maintain
-    clear directions next to the object code saying where to find the
-    Corresponding Source.  Regardless of what server hosts the
-    Corresponding Source, you remain obligated to ensure that it is
-    available for as long as needed to satisfy these requirements.
-
-    e) Convey the object code using peer-to-peer transmission, provided
-    you inform other peers where the object code and Corresponding
-    Source of the work are being offered to the general public at no
-    charge under subsection 6d.
-
-  A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
-  A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling.  In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage.  For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product.  A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
-  "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source.  The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
-  If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information.  But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
-  The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed.  Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
-  Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
-  7. Additional Terms.
-
-  "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law.  If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
-  When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it.  (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.)  You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
-  Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
-    a) Disclaiming warranty or limiting liability differently from the
-    terms of sections 15 and 16 of this License; or
-
-    b) Requiring preservation of specified reasonable legal notices or
-    author attributions in that material or in the Appropriate Legal
-    Notices displayed by works containing it; or
-
-    c) Prohibiting misrepresentation of the origin of that material, or
-    requiring that modified versions of such material be marked in
-    reasonable ways as different from the original version; or
-
-    d) Limiting the use for publicity purposes of names of licensors or
-    authors of the material; or
-
-    e) Declining to grant rights under trademark law for use of some
-    trade names, trademarks, or service marks; or
-
-    f) Requiring indemnification of licensors and authors of that
-    material by anyone who conveys the material (or modified versions of
-    it) with contractual assumptions of liability to the recipient, for
-    any liability that these contractual assumptions directly impose on
-    those licensors and authors.
-
-  All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10.  If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term.  If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
-  If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
-  Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
-  8. Termination.
-
-  You may not propagate or modify a covered work except as expressly
-provided under this License.  Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
-  However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
-  Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
-  Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License.  If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
-  9. Acceptance Not Required for Having Copies.
-
-  You are not required to accept this License in order to receive or
-run a copy of the Program.  Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance.  However,
-nothing other than this License grants you permission to propagate or
-modify any covered work.  These actions infringe copyright if you do
-not accept this License.  Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
-  10. Automatic Licensing of Downstream Recipients.
-
-  Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License.  You are not responsible
-for enforcing compliance by third parties with this License.
-
-  An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations.  If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
-  You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License.  For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
-  11. Patents.
-
-  A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based.  The
-work thus licensed is called the contributor's "contributor version".
-
-  A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version.  For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
-  Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
-  In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement).  To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
-  If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients.  "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
-  If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
-  A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License.  You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
-  Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
-  12. No Surrender of Others' Freedom.
-
-  If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License.  If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all.  For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
-  13. Use with the GNU Affero General Public License.
-
-  Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work.  The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
-  14. Revised Versions of this License.
-
-  The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time.  Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
-  Each version is given a distinguishing version number.  If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation.  If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
-  If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
-  Later license versions may give you additional or different
-permissions.  However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
-  15. Disclaimer of Warranty.
-
-  THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW.  EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU.  SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
-  16. Limitation of Liability.
-
-  IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
-  17. Interpretation of Sections 15 and 16.
-
-  If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
-                     END OF TERMS AND CONDITIONS
-
-            How to Apply These Terms to Your New Programs
-
-  If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
-  To do so, attach the following notices to the program.  It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
-    <one line to give the program's name and a brief idea of what it does.>
-    Copyright (C) <year><name of author>
-
-    This program is free software: you can redistribute it and/or modify
-    it under the terms of the GNU General Public License as published by
-    the Free Software Foundation, either version 3 of the License, or
-    (at your option) any later version.
-
-    This program is distributed in the hope that it will be useful,
-    but WITHOUT ANY WARRANTY; without even the implied warranty of
-    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-    GNU General Public License for more details.
-
-    You should have received a copy of the GNU General Public License
-    along with this program.  If not, see <http://www.gnu.org/licenses/>.
-
-Also add information on how to contact you by electronic and paper mail.
-
-  If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
-    <program>  Copyright (C) <year><name of author>
-    This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
-    This is free software, and you are welcome to redistribute it
-    under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License.  Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
-  You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-<http://www.gnu.org/licenses/>.
-
-  The GNU General Public License does not permit incorporating your program
-into proprietary programs.  If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library.  If this is what you want to do, use the GNU Lesser General
-Public License instead of this License.  But first, please read
-<http://www.gnu.org/philosophy/why-not-lgpl.html>.

diff -r 68f53a855cc8c0df29360dfb59b9b696cc48bbc9 -r 05efc6b7e9359d7c5c923b0d34a806be02f54172 MANIFEST.in
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,3 +1,4 @@
 include distribute_setup.py README* CREDITS FUNDING LICENSE.txt
 recursive-include yt/gui/reason/html *.html *.png *.ico *.js
 recursive-include yt *.pyx *.pxd *.hh *.h README*
+recursive-include yt/utilities/kdtree *.f90 *.v Makefile LICENSE
\ No newline at end of file

diff -r 68f53a855cc8c0df29360dfb59b9b696cc48bbc9 -r 05efc6b7e9359d7c5c923b0d34a806be02f54172 doc/get_yt.sh
--- /dev/null
+++ b/doc/get_yt.sh
@@ -0,0 +1,358 @@
+#
+# Hi there!  Welcome to the yt installation script.
+#
+# This script is designed to create a fully isolated Python installation
+# with the dependencies you need to run yt.
+#
+# This script is based on Conda, a distribution mechanism from Continuum
+# Analytics.  The process is as follows:
+#
+#  1. Download the appropriate Conda installation package
+#  2. Install Conda into the specified directory
+#  3. Install yt-specific dependencies
+#  4. Install yt
+#
+# There are a few options listed below, but by default, this will install
+# everything.  At the end, it will tell you what to do to use yt.
+#
+# By default this will install yt from source.
+#
+# If you experience problems, please visit the Help section at 
+# http://yt-project.org.
+#
+DEST_SUFFIX="yt-conda"
+DEST_DIR="`pwd`/${DEST_SUFFIX/ /}"   # Installation location
+BRANCH="yt" # This is the branch to which we will forcibly update.
+INST_YT_SOURCE=1 # Do we do a source install of yt?
+
+##################################################################
+#                                                                #
+# You will likely not have to modify anything below this region. #
+#                                                                #
+##################################################################
+
+LOG_FILE="`pwd`/yt_install.log"
+
+# Here is the idiom for redirecting to the log file:
+# ( SOMECOMMAND 2>&1 ) 1>> ${LOG_FILE} || do_exit
+
+MINICONDA_URLBASE="http://repo.continuum.io/miniconda"
+MINICONDA_VERSION="1.9.1"
+YT_RECIPE_REPO="https://bitbucket.org/yt_analysis/yt_conda/raw/default"
+
+function do_exit
+{
+    echo "********************************************"
+    echo "        FAILURE REPORT:"
+    echo "********************************************"
+    echo
+    tail -n 10 ${LOG_FILE}
+    echo
+    echo "********************************************"
+    echo "********************************************"
+    echo "Failure.  Check ${LOG_FILE}.  The last 10 lines are above."
+    exit 1
+}
+
+function log_cmd
+{
+    echo "EXECUTING:" >> ${LOG_FILE}
+    echo "  $*" >> ${LOG_FILE}
+    ( $* 2>&1 ) 1>> ${LOG_FILE} || do_exit
+}
+
+function get_ytproject
+{
+    [ -e $1 ] && return
+    echo "Downloading $1 from yt-project.org"
+    ${GETFILE} "http://yt-project.org/dependencies/$1" || do_exit
+    ( ${SHASUM} -c $1.sha512 2>&1 ) 1>> ${LOG_FILE} || do_exit
+}
+
+function get_ytdata
+{
+    echo "Downloading $1 from yt-project.org"
+    [ -e $1 ] && return
+    ${GETFILE} "http://yt-project.org/data/$1" || do_exit
+    ( ${SHASUM} -c $1.sha512 2>&1 ) 1>> ${LOG_FILE} || do_exit
+}
+
+function get_ytrecipe {
+    RDIR=${DEST_DIR}/src/yt-recipes/$1
+    mkdir -p ${RDIR}
+    pushd ${RDIR}
+    log_cmd ${GETFILE} ${YT_RECIPE_REPO}/$1/meta.yaml
+    log_cmd ${GETFILE} ${YT_RECIPE_REPO}/$1/build.sh
+    NEW_PKG=`conda build --output ${RDIR}`
+    log_cmd conda build --no-binstar-upload ${RDIR}
+    log_cmd conda install ${NEW_PKG}
+    popd
+}
+
+
+echo
+echo
+echo "========================================================================"
+echo
+echo "Hi there!  This is the yt installation script.  We're going to download"
+echo "some stuff and install it to create a self-contained, isolated"
+echo "environment for yt to run within."
+echo
+echo "This will install Miniconda from Continuum Analytics, the necessary"
+echo "packages to run yt, and create a self-contained environment for you to"
+echo "use yt.  Additionally, Conda itself provides the ability to install"
+echo "many other packages that can be used for other purposes."
+echo
+MYOS=`uname -s`       # A guess at the OS
+if [ "${MYOS##Darwin}" != "${MYOS}" ]
+then
+  echo "Looks like you're running on Mac OSX."
+  echo
+  echo "NOTE: you must have the Xcode command line tools installed."
+  echo
+  echo "The instructions for obtaining these tools varies according"
+  echo "to your exact OS version.  On older versions of OS X, you"
+  echo "must register for an account on the apple developer tools"
+  echo "website: https://developer.apple.com/downloads to obtain the"
+  echo "download link."
+  echo
+  echo "We have gathered some additional instructions for each"
+  echo "version of OS X below. If you have trouble installing yt"
+  echo "after following these instructions, don't hesitate to contact"
+  echo "the yt user's e-mail list."
+  echo
+  echo "You can see which version of OSX you are running by clicking"
+  echo "'About This Mac' in the apple menu on the left hand side of"
+  echo "menu bar.  We're assuming that you've installed all operating"
+  echo "system updates; if you have an older version, we suggest"
+  echo "running software update and installing all available updates."
+  echo
+  echo "OS X 10.5.8: search for and download Xcode 3.1.4 from the"
+  echo "Apple developer tools website."
+  echo
+  echo "OS X 10.6.8: search for and download Xcode 3.2 from the Apple"
+  echo "developer tools website.  You can either download the"
+  echo "Xcode 3.2.2 Developer Tools package (744 MB) and then use"
+  echo "Software Update to update to XCode 3.2.6 or"
+  echo "alternatively, you can download the Xcode 3.2.6/iOS SDK"
+  echo "bundle (4.1 GB)."
+  echo
+  echo "OS X 10.7.5: download Xcode 4.2 from the mac app store"
+  echo "(search for Xcode)."
+  echo "Alternatively, download the Xcode command line tools from"
+  echo "the Apple developer tools website."
+  echo
+  echo "OS X 10.8.2: download Xcode 4.6.1 from the mac app store."
+  echo "(search for Xcode)."
+  echo "Additionally, you will have to manually install the Xcode"
+  echo "command line tools, see:"
+  echo "http://stackoverflow.com/questions/9353444"
+  echo "Alternatively, download the Xcode command line tools from"
+  echo "the Apple developer tools website."
+  echo
+  echo "NOTE: It's possible that the installation will fail, if so,"
+  echo "please set the following environment variables, remove any"
+  echo "broken installation tree, and re-run this script verbatim."
+  echo
+  echo "$ export CC=gcc"
+  echo "$ export CXX=g++"
+  echo
+  MINICONDA_OS="MacOSX-x86_64"
+fi
+if [ "${MYOS##Linux}" != "${MYOS}" ]
+then
+  echo "Looks like you're on Linux."
+  echo
+  echo "Please make sure you have the developer tools for your OS installed."
+  echo
+  if [ -f /etc/SuSE-release ] && [ `grep --count SUSE /etc/SuSE-release` -gt 0 ]
+  then
+    echo "Looks like you're on an OpenSUSE-compatible machine."
+    echo
+    echo "You need to have these packages installed:"
+    echo
+    echo "  * devel_C_C++"
+    echo "  * libopenssl-devel"
+    echo "  * libuuid-devel"
+    echo "  * zip"
+    echo "  * gcc-c++"
+    echo "  * chrpath"
+    echo
+    echo "You can accomplish this by executing:"
+    echo
+    echo "$ sudo zypper install -t pattern devel_C_C++"
+    echo "$ sudo zypper install gcc-c++ libopenssl-devel libuuid-devel zip"
+    echo "$ sudo zypper install chrpath"
+  fi
+  if [ -f /etc/lsb-release ] && [ `grep --count buntu /etc/lsb-release` -gt 0 ]
+  then
+    echo "Looks like you're on an Ubuntu-compatible machine."
+    echo
+    echo "You need to have these packages installed:"
+    echo
+    echo "  * libssl-dev"
+    echo "  * build-essential"
+    echo "  * libncurses5"
+    echo "  * libncurses5-dev"
+    echo "  * zip"
+    echo "  * uuid-dev"
+    echo "  * chrpath"
+    echo
+    echo "You can accomplish this by executing:"
+    echo
+    echo "$ sudo apt-get install libssl-dev build-essential libncurses5 libncurses5-dev zip uuid-dev chrpath"
+    echo
+  fi
+  echo
+  echo "If you are running on a supercomputer or other module-enabled"
+  echo "system, please make sure that the GNU module has been loaded."
+  echo
+  if [ "${MYOS##x86_64}" != "${MYOS}" ]
+  then
+    MINICONDA_OS="Linux-x86_64"
+  elif [ "${MYOS##i386}" != "${MYOS}" ]
+  then
+    MINICONDA_OS="Linux-x86"
+  else
+    echo "Not sure which type of Linux you're on.  Going with x86_64."
+    MINICONDA_OS="Linux-x86_64"
+  fi
+fi
+echo
+echo "If you'd rather not continue, hit Ctrl-C."
+echo
+echo "========================================================================"
+echo
+read -p "[hit enter] "
+echo
+echo "Awesome!  Here we go."
+echo
+
+MINICONDA_PKG=Miniconda-${MINICONDA_VERSION}-${MINICONDA_OS}.sh
+
+if type -P wget &>/dev/null
+then
+    echo "Using wget"
+    export GETFILE="wget -nv"
+else
+    echo "Using curl"
+    export GETFILE="curl -sSO"
+fi
+
+echo
+echo "Downloading ${MINICONDA_URLBASE}/${MINICONDA_PKG}"
+echo "Downloading ${MINICONDA_URLBASE}/${MINICONDA_PKG}" >> ${LOG_FILE}
+echo
+
+${GETFILE} ${MINICONDA_URLBASE}/${MINICONDA_PKG} || do_exit
+
+echo "Installing the Miniconda python environment."
+
+log_cmd bash ./${MINICONDA_PKG} -b -p $DEST_DIR
+
+# I don't think we need OR want this anymore:
+#export LD_LIBRARY_PATH=${DEST_DIR}/lib:$LD_LIBRARY_PATH
+
+# This we *do* need.
+export PATH=${DEST_DIR}/bin:$PATH
+
+echo "Installing the necessary packages for yt."
+echo "This may take a while, but don't worry.  yt loves you."
+
+declare -a YT_DEPS
+YT_DEPS+=('python')
+YT_DEPS+=('distribute')
+YT_DEPS+=('libpng')
+YT_DEPS+=('freetype')
+YT_DEPS+=('hdf5')
+YT_DEPS+=('numpy')
+YT_DEPS+=('pygments')
+YT_DEPS+=('jinja2')
+YT_DEPS+=('tornado')
+YT_DEPS+=('pyzmq')
+YT_DEPS+=('ipython')
+YT_DEPS+=('sphinx')
+YT_DEPS+=('h5py')
+YT_DEPS+=('matplotlib')
+YT_DEPS+=('cython')
+
+# Here is our dependency list for yt
+log_cmd conda config --system --add channels http://repo.continuum.io/pkgs/free
+log_cmd conda config --system --add channels http://repo.continuum.io/pkgs/dev
+log_cmd conda config --system --add channels http://repo.continuum.io/pkgs/gpl
+log_cmd conda update --yes conda
+
+echo "Current dependencies: ${YT_DEPS[@]}"
+log_cmd echo "DEPENDENCIES" ${YT_DEPS[@]}
+log_cmd conda install --yes ${YT_DEPS[@]}
+
+echo "Installing mercurial."
+get_ytrecipe mercurial
+
+if [ $INST_YT_SOURCE -eq 0 ]
+then
+  echo "Installing yt as a package."
+  get_ytrecipe yt
+else
+  # We do a source install.
+  YT_DIR="${DEST_DIR}/src/yt-hg"
+  export PNG_DIR=${DEST_DIR}
+  export FTYPE_DIR=${DEST_DIR}
+  export HDF5_DIR=${DEST_DIR}
+  log_cmd hg clone -r ${BRANCH} https://bitbucket.org/yt_analysis/yt ${YT_DIR}
+  pushd ${YT_DIR}
+  echo $DEST_DIR > hdf5.cfg
+  log_cmd python setup.py develop
+  popd
+  log_cmd cp ${YT_DIR}/doc/activate ${DEST_DIR}/bin/activate 
+  log_cmd sed -i.bak -e "s,__YT_DIR__,${DEST_DIR}," ${DEST_DIR}/bin/activate
+  log_cmd cp ${YT_DIR}/doc/activate.csh ${DEST_DIR}/bin/activate.csh
+  log_cmd sed -i.bak -e "s,__YT_DIR__,${DEST_DIR}," ${DEST_DIR}/bin/activate.csh
+fi
+
+echo
+echo
+echo "========================================================================"
+echo
+echo "yt and the Conda system are now installed in $DEST_DIR ."
+echo
+if [ $INST_YT_SOURCE -eq 0 ]
+then
+  echo "You must now modify your PATH variable by prepending:"
+  echo 
+  echo "   $DEST_DIR/bin"
+  echo
+  echo "For example, if you use bash, place something like this at the end"
+  echo "of your ~/.bashrc :"
+  echo
+  echo "   export PATH=$DEST_DIR/bin:$PATH"
+else
+  echo "To run from this new installation, use the activate script for this "
+  echo "environment."
+  echo
+  echo "    $ source $DEST_DIR/bin/activate"
+  echo
+  echo "This modifies the environment variables YT_DEST, PATH, PYTHONPATH, and"
+  echo "LD_LIBRARY_PATH to match your new yt install.  If you use csh, just"
+  echo "append .csh to the above."
+fi
+echo
+echo "To get started with yt, check out the orientation:"
+echo
+echo "    http://yt-project.org/doc/orientation/"
+echo
+echo "or just activate your environment and run 'yt serve' to bring up the"
+echo "yt GUI."
+echo
+echo "For support, see the website and join the mailing list:"
+echo
+echo "    http://yt-project.org/"
+echo "    http://yt-project.org/data/      (Sample data)"
+echo "    http://yt-project.org/doc/       (Docs)"
+echo
+echo "    http://lists.spacepope.org/listinfo.cgi/yt-users-spacepope.org"
+echo
+echo "========================================================================"
+echo
+echo "Oh, look at me, still talking when there's science to do!"
+echo "Good luck, and email the user list if you run into any problems."

diff -r 68f53a855cc8c0df29360dfb59b9b696cc48bbc9 -r 05efc6b7e9359d7c5c923b0d34a806be02f54172 doc/how_to_develop_yt.txt
--- a/doc/how_to_develop_yt.txt
+++ b/doc/how_to_develop_yt.txt
@@ -25,7 +25,7 @@
 Licenses
 --------
 
-All code in yt should be under the GPL-3 (preferred) or a compatible license.
+All code in yt should be under the BSD 3-clause license.
 
 How To Get The Source Code
 --------------------------

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/3939268e7ee2/
Changeset:   3939268e7ee2
Branch:      yt
User:        MatthewTurk
Date:        2013-10-22 19:56:45
Summary:     Fixing a bunch of minor parsing errors for 2to3.
Affected #:  12 files

diff -r 05efc6b7e9359d7c5c923b0d34a806be02f54172 -r 3939268e7ee2a1b5ff452c7c3a68bb78dc21a0e4 yt/frontends/castro/data_structures.py
--- a/yt/frontends/castro/data_structures.py
+++ b/yt/frontends/castro/data_structures.py
@@ -635,7 +635,7 @@
                 if vals.count("'") == 0 and vals.count("\"") == 0:
                     try:
                         t = [float(a.replace('D','e').replace('d','e'))
-                                   for a in in vals.split()])
+                                   for a in vals.split()]
                     except ValueError:
                         print "Failed on line", line
                 else:

diff -r 05efc6b7e9359d7c5c923b0d34a806be02f54172 -r 3939268e7ee2a1b5ff452c7c3a68bb78dc21a0e4 yt/visualization/eps_writer.py
--- a/yt/visualization/eps_writer.py
+++ b/yt/visualization/eps_writer.py
@@ -389,9 +389,9 @@
         # axes, and resize the figure to span the entire figure
         if plot.colorbar != None and \
                isinstance(plot, VMPlot):
-            print "WARNING: Image (slices, projections, etc.) plots must not"\
-                  "have a colorbar."
-            print "Removing it."
+            print ("WARNING: Image (slices, projections, etc.) plots must not")
+            print ("have a colorbar.")
+            print ("Removing it.")
             plot.colorbar = None
         if self.canvas is None:
             self.canvas = pyx.canvas.canvas()

diff -r 05efc6b7e9359d7c5c923b0d34a806be02f54172 -r 3939268e7ee2a1b5ff452c7c3a68bb78dc21a0e4 yt/visualization/image_panner/pan_and_scan_widget.py
--- a/yt/visualization/image_panner/pan_and_scan_widget.py
+++ b/yt/visualization/image_panner/pan_and_scan_widget.py
@@ -33,13 +33,13 @@
      ImageInspectorOverlay
 
 if not hasattr(DataRange2D, "_subranges_updated"):
-    print "You'll need to add _subranges updated to enthought/chaco/data_range_2d.py"
-    print 'Add this at the correct indentation level:'
-    print
-    print '    @on_trait_change("_xrange.updated,_yrange.updated")'
-    print '    def _subranges_updated(self):'
-    print '        self.updated = True'
-    print
+    print ("You'll need to add _subranges updated to enthought/chaco/data_range_2d.py")
+    print ('Add this at the correct indentation level:')
+    print ()
+    print ('    @on_trait_change("_xrange.updated,_yrange.updated")')
+    print ('    def _subranges_updated(self):')
+    print ('        self.updated = True')
+    print ()
     raise RuntimeError
 
 # We like the algae colormap; for now we re-implement it here.

diff -r 05efc6b7e9359d7c5c923b0d34a806be02f54172 -r 3939268e7ee2a1b5ff452c7c3a68bb78dc21a0e4 yt/visualization/image_writer.py
--- a/yt/visualization/image_writer.py
+++ b/yt/visualization/image_writer.py
@@ -288,7 +288,7 @@
 
 def map_to_colors(buff, cmap_name):
     if cmap_name not in cmd.color_map_luts:
-        print "Your color map was not found in the extracted colormap file."
+        print ("Your color map was not found in the extracted colormap file.")
         raise KeyError(cmap_name)
     lut = cmd.color_map_luts[cmap_name]
     x = np.mgrid[0.0:1.0:lut[0].shape[0]*1j]

diff -r 05efc6b7e9359d7c5c923b0d34a806be02f54172 -r 3939268e7ee2a1b5ff452c7c3a68bb78dc21a0e4 yt/visualization/plot_collection.py
--- a/yt/visualization/plot_collection.py
+++ b/yt/visualization/plot_collection.py
@@ -1686,7 +1686,7 @@
         Returns:
         width: (float) The final width of the plot collection
         """
-        print 'Enter Zoom Factor, 0 to exit, -1 to reset to width=1.0'
+        print ('Enter Zoom Factor, 0 to exit, -1 to reset to width=1.0')
         zfactor = 1.0
         while(True):
             new_zoom = raw_input('zoom:')
@@ -1704,7 +1704,7 @@
                 self.set_width(1.0,'1')
             else:
                 self.set_width(self.plots[0].__dict__['width']/zfactor,'1')
-        print 'Returning final width of %e' % self.plots[0].width
+        print ('Returning final width of %e' % self.plots[0].width)
         return self.plots[0].width
 
 class PlotCollectionIPython(PlotCollection):

diff -r 05efc6b7e9359d7c5c923b0d34a806be02f54172 -r 3939268e7ee2a1b5ff452c7c3a68bb78dc21a0e4 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -978,7 +978,7 @@
         unit_conversion = plot.pf[plot.im["Unit"]]
         aspect = (plot.xlim[1]-plot.xlim[0])/(plot.ylim[1]-plot.ylim[0])
 
-        print "aspect ratio = ", aspect
+        print ("aspect ratio = %s" % aspect)
 
         # if coords is False, label axes relative to the center of the
         # display. if coords is True, label axes with the absolute

diff -r 05efc6b7e9359d7c5c923b0d34a806be02f54172 -r 3939268e7ee2a1b5ff452c7c3a68bb78dc21a0e4 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -162,7 +162,7 @@
         msg = "width (%s) is invalid. " % str(width)
         msg += "Valid widths look like this: (12, 'au')"
         assert valid, msg
-    except AssertionError, e:
+    except AssertionError as e:
         raise YTInvalidWidthError(e)
 
 def validate_iterable_width(width, unit=None):

diff -r 05efc6b7e9359d7c5c923b0d34a806be02f54172 -r 3939268e7ee2a1b5ff452c7c3a68bb78dc21a0e4 yt/visualization/volume_rendering/CUDARayCast.py
--- a/yt/visualization/volume_rendering/CUDARayCast.py
+++ b/yt/visualization/volume_rendering/CUDARayCast.py
@@ -34,14 +34,14 @@
     cuda.init()
     assert (cuda.Device.count() >= 1)
 
-    print "Extracting hierarchy."
+    print ("Extracting hierarchy.")
     opf = load("/u/ki/mturk/ki05/MSM96-SIM3-restart-J64/DataDump0081.dir/DataDump0081")
     pf = hs.ExtractedParameterFile(opf, 20)
 
     cpu = {}
     gpu = {}
 
-    print "Reading data."
+    print ("Reading data.")
     #fn = "DataDump0081_partitioned.h5"
     fn = "RedshiftOutput0005_partitioned.h5"
     f = h5py.File("/u/ki/mturk/ki05/%s" % fn)
@@ -50,7 +50,7 @@
     cpu['left_edge'] = f["/PGrids/LeftEdges"][:].astype("float32")
     cpu['right_edge'] = f["/PGrids/RightEdges"][:].astype("float32")
 
-    print "Constructing transfer function."
+    print ("Constructing transfer function.")
     if "Data" in fn:
         mh = np.log10(1.67e-24)
         tf = ColorTransferFunction((7.5+mh, 14.0+mh))
@@ -79,7 +79,7 @@
 
     c = np.array([0.47284317, 0.48062515, 0.58282089], dtype='float32')
 
-    print "Getting cutting plane."
+    print ("Getting cutting plane.")
     cp = pf.h.cutting(cpu['v_dir'], c)
 
     W = 2000.0/pf['au']
@@ -99,21 +99,21 @@
     cpu['image_b'] = np.zeros((Nvec, Nvec), dtype='float32').ravel()
     cpu['image_a'] = np.zeros((Nvec, Nvec), dtype='float32').ravel()
 
-    print "Generating module"
+    print ("Generating module")
     source = open("yt/extensions/volume_rendering/_cuda_caster.cu").read()
     mod = compiler.SourceModule(source)
     func = mod.get_function("ray_cast")
 
     for n, a in cpu.items():
         ss = a.size * a.dtype.itemsize
-        print "Allocating %0.3e megabytes for %s" % (ss/(1024*1024.), n)
+        print ("Allocating %0.3e megabytes for %s" % (ss/(1024*1024.), n))
         gpu[n] = cuda.to_device(a.ravel('F'))
         #pycuda.autoinit.context.synchronize()
 
     BLOCK_SIZE = 8
     grid_size = Nvec / BLOCK_SIZE
 
-    print "Running ray_cast function."
+    print ("Running ray_cast function.")
     t1 = time.time()
     ret = func(gpu['ngrids'],
                gpu['grid_data'],
@@ -134,7 +134,7 @@
          block=(BLOCK_SIZE,BLOCK_SIZE,1),
          grid=(grid_size, grid_size), time_kernel=True)
     t2 = time.time()
-    print "BACK: %0.3e" % (t2-t1)
+    print ("BACK: %0.3e" % (t2-t1))
 
     mi, ma = 1e300, -1e300
     image = []
@@ -145,8 +145,8 @@
         cpu[ii] = cuda.from_device(gpu[ii], sh, dtype).reshape((Nvec,Nvec))
         mi, ma = min(cpu[ii].min(),mi), max(cpu[ii].max(), ma)
         image.append(cpu[ii])
-        print "Min/max of %s %0.3e %0.3e" % (
-                im, image[-1].min(), image[-1].max())
+        print ("Min/max of %s %0.3e %0.3e" % (
+                im, image[-1].min(), image[-1].max()))
         pylab.clf()
         pylab.imshow(image[-1], interpolation='nearest')
         pylab.savefig("/u/ki/mturk/public_html/vr6/%s.png" % (ii))

diff -r 05efc6b7e9359d7c5c923b0d34a806be02f54172 -r 3939268e7ee2a1b5ff452c7c3a68bb78dc21a0e4 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -1161,9 +1161,9 @@
                  sub_samples = 5, log_fields = None, volume = None,
                  pf = None, use_kd=True, no_ghost=False, use_light=False,
                  inner_radius = 10):
-        print "Because of recent relicensing, we currently cannot provide"
-        print "HEALpix functionality.  Please visit yt-users for more"
-        print "information."
+        print ("Because of recent relicensing, we currently cannot provide")
+        print ("HEALpix functionality.  Please visit yt-users for more")
+        print ("information.")
         raise NotImplementedError
         ParallelAnalysisInterface.__init__(self)
         if pf is not None: self.pf = pf

diff -r 05efc6b7e9359d7c5c923b0d34a806be02f54172 -r 3939268e7ee2a1b5ff452c7c3a68bb78dc21a0e4 yt/visualization/volume_rendering/camera_path.py
--- a/yt/visualization/volume_rendering/camera_path.py
+++ b/yt/visualization/volume_rendering/camera_path.py
@@ -80,7 +80,7 @@
             Nz = 1
             ndims = 2
         if Nx*Ny*Nz != Nx**ndims:
-            print "Need Nx (%d) == Ny (%d) == Nz (%d)" % (Nx, Ny, Nz)
+            print("Need Nx (%d) == Ny (%d) == Nz (%d)" % (Nx, Ny, Nz))
             sys.exit()
         self.nframes = Nx
         self.pos = np.zeros((Nx,3))

diff -r 05efc6b7e9359d7c5c923b0d34a806be02f54172 -r 3939268e7ee2a1b5ff452c7c3a68bb78dc21a0e4 yt/visualization/volume_rendering/create_spline.py
--- a/yt/visualization/volume_rendering/create_spline.py
+++ b/yt/visualization/volume_rendering/create_spline.py
@@ -53,7 +53,7 @@
         elif ind[i] == im1[i]:
             u = 0
         else:
-            print "Bad index during interpolation?"
+            print ("Bad index during interpolation?")
             sys.exit()
         b0 = -tension * u + 2*tension * u**2 - tension * u**3
         b1 = 1.0 + (tension-3) * u**2 + (2-tension) * u**3

diff -r 05efc6b7e9359d7c5c923b0d34a806be02f54172 -r 3939268e7ee2a1b5ff452c7c3a68bb78dc21a0e4 yt/visualization/volume_rendering/multi_texture.py
--- a/yt/visualization/volume_rendering/multi_texture.py
+++ b/yt/visualization/volume_rendering/multi_texture.py
@@ -148,7 +148,7 @@
         # clean up
         gl.glFlush()
         t2 = time.time()
-        print "Rendering: %0.3e" % (t2-t1)
+        print ("Rendering: %0.3e" % (t2-t1))
         self._colormap.Disable()
         self._program1.Disable()
         #
@@ -292,7 +292,7 @@
         origin = g.LeftEdge.astype("float32").tolist()
         dd = (g.my_data[0].astype("float32") - mi)/(ma - mi)
         dd = np.clip(dd, 0.0, 1.0)
-        print ss
+        print (ss)
         texes.append(vv.Aarray(dd, origin = origin, sampling = ss))
 
     mtex = MultipleTexture(ax, texes, global_size=vp.pf.domain_dimensions)


https://bitbucket.org/yt_analysis/yt/commits/3f58626a7a8b/
Changeset:   3f58626a7a8b
Branch:      yt
User:        MatthewTurk
Date:        2013-10-22 20:05:06
Summary:     Updating a few more import problems.
Affected #:  3 files

diff -r 3939268e7ee2a1b5ff452c7c3a68bb78dc21a0e4 -r 3f58626a7a8bacaf3337f9db72b0ed2042b48d89 yt/utilities/grid_data_format/conversion/conversion_athena.py
--- a/yt/utilities/grid_data_format/conversion/conversion_athena.py
+++ b/yt/utilities/grid_data_format/conversion/conversion_athena.py
@@ -6,9 +6,6 @@
 from glob import glob
 from collections import \
     defaultdict
-from string import \
-    strip, \
-    rstrip
 from stat import \
     ST_CTIME
 

diff -r 3939268e7ee2a1b5ff452c7c3a68bb78dc21a0e4 -r 3f58626a7a8bacaf3337f9db72b0ed2042b48d89 yt/utilities/spatial/__init__.py
--- a/yt/utilities/spatial/__init__.py
+++ b/yt/utilities/spatial/__init__.py
@@ -23,12 +23,12 @@
 """
 
 from kdtree import *
-from ckdtree import *
+from .ckdtree import *
 #from qhull import *
 
 __all__ = filter(lambda s: not s.startswith('_'), dir())
 __all__ += ['distance']
 
-import distance
+from . import distance
 from numpy.testing import Tester
 test = Tester().test

diff -r 3939268e7ee2a1b5ff452c7c3a68bb78dc21a0e4 -r 3f58626a7a8bacaf3337f9db72b0ed2042b48d89 yt/utilities/spatial/distance.py
--- a/yt/utilities/spatial/distance.py
+++ b/yt/utilities/spatial/distance.py
@@ -114,7 +114,7 @@
 import numpy as np
 from numpy.linalg import norm
 
-import _distance_wrap
+from . import _distance_wrap
 
 
 def _copy_array_if_base_present(a):


https://bitbucket.org/yt_analysis/yt/commits/ba491935468f/
Changeset:   ba491935468f
Branch:      yt
User:        MatthewTurk
Date:        2013-10-22 20:21:29
Summary:     Many more relative import fixes.
Affected #:  12 files

diff -r 3f58626a7a8bacaf3337f9db72b0ed2042b48d89 -r ba491935468fb220de0a021adcf10a47391f98c6 yt/analysis_modules/halo_finding/api.py
--- a/yt/analysis_modules/halo_finding/api.py
+++ b/yt/analysis_modules/halo_finding/api.py
@@ -13,7 +13,7 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-from halo_objects import \
+from .halo_objects import \
     Halo, \
     HOPHalo, \
     parallelHOPHalo, \

diff -r 3f58626a7a8bacaf3337f9db72b0ed2042b48d89 -r ba491935468fb220de0a021adcf10a47391f98c6 yt/analysis_modules/sunyaev_zeldovich/api.py
--- a/yt/analysis_modules/sunyaev_zeldovich/api.py
+++ b/yt/analysis_modules/sunyaev_zeldovich/api.py
@@ -9,4 +9,4 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-from projection import SZProjection
+from .projection import SZProjection

diff -r 3f58626a7a8bacaf3337f9db72b0ed2042b48d89 -r ba491935468fb220de0a021adcf10a47391f98c6 yt/data_objects/api.py
--- a/yt/data_objects/api.py
+++ b/yt/data_objects/api.py
@@ -13,47 +13,47 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-from grid_patch import \
+from .grid_patch import \
     AMRGridPatch
 
-from hierarchy import \
+from .hierarchy import \
     AMRHierarchy
 
-from static_output import \
+from .static_output import \
     StaticOutput
 
-from object_finding_mixin import \
+from .object_finding_mixin import \
     ObjectFindingMixin
 
-from particle_io import \
+from .particle_io import \
     ParticleIOHandler, \
     particle_handler_registry
 
-from profiles import \
+from .profiles import \
     EmptyProfileData, \
     BinnedProfile, \
     BinnedProfile1D, \
     BinnedProfile2D, \
     BinnedProfile3D
 
-from time_series import \
+from .time_series import \
     TimeSeriesData, \
     TimeSeriesDataObject
 
-from analyzer_objects import \
+from .analyzer_objects import \
     AnalysisTask, analysis_task
 
-from data_containers import \
+from .data_containers import \
     data_object_registry
 
-from derived_quantities import \
+from .derived_quantities import \
     quantity_info, \
     add_quantity
 
-from image_array import \
+from .image_array import \
     ImageArray
 
-from field_info_container import \
+from .field_info_container import \
     FieldInfoContainer, \
     FieldInfo, \
     NeedsGridType, \
@@ -72,5 +72,5 @@
     add_grad, \
     derived_field
 
-from particle_trajectories import \
+from .particle_trajectories import \
     ParticleTrajectoryCollection

diff -r 3f58626a7a8bacaf3337f9db72b0ed2042b48d89 -r ba491935468fb220de0a021adcf10a47391f98c6 yt/utilities/amr_kdtree/api.py
--- a/yt/utilities/amr_kdtree/api.py
+++ b/yt/utilities/amr_kdtree/api.py
@@ -12,4 +12,4 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-from amr_kdtree import AMRKDTree
+from .amr_kdtree import AMRKDTree

diff -r 3f58626a7a8bacaf3337f9db72b0ed2042b48d89 -r ba491935468fb220de0a021adcf10a47391f98c6 yt/utilities/kdtree/api.py
--- a/yt/utilities/kdtree/api.py
+++ b/yt/utilities/kdtree/api.py
@@ -1,4 +1,4 @@
-from fKDpy import \
+from .fKDpy import \
     chainHOP_tags_dens, \
     create_tree, \
     fKD, \

diff -r 3f58626a7a8bacaf3337f9db72b0ed2042b48d89 -r ba491935468fb220de0a021adcf10a47391f98c6 yt/visualization/_colormap_data.py
--- a/yt/visualization/_colormap_data.py
+++ b/yt/visualization/_colormap_data.py
@@ -7803,6 +7803,6 @@
 # and append a "_r" (for reversal. consistent with MPL convention).
 # So for example, the reversal of "Waves" is "Waves_r"
 temp = {}
-for k,v in color_map_luts.iteritems():
+for k,v in color_map_luts.items():
     temp[k+"_r"] = (v[0][::-1], v[1][::-1], v[2][::-1], v[3][::-1])
 color_map_luts.update(temp)

diff -r 3f58626a7a8bacaf3337f9db72b0ed2042b48d89 -r ba491935468fb220de0a021adcf10a47391f98c6 yt/visualization/api.py
--- a/yt/visualization/api.py
+++ b/yt/visualization/api.py
@@ -13,20 +13,20 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-from color_maps import \
+from .color_maps import \
     add_cmap, \
     show_colormaps
 
-from plot_collection import \
+from .plot_collection import \
     PlotCollection, \
     PlotCollectionInteractive, \
     concatenate_pdfs
 
-from fixed_resolution import \
+from .fixed_resolution import \
     FixedResolutionBuffer, \
     ObliqueFixedResolutionBuffer
 
-from image_writer import \
+from .image_writer import \
     multi_image_composite, \
     write_bitmap, \
     write_image, \
@@ -38,22 +38,22 @@
     write_projection, \
     write_fits
 
-from plot_modifications import \
+from .plot_modifications import \
     PlotCallback, \
     callback_registry
 
-from easy_plots import \
+from .easy_plots import \
     plot_type_registry
 
-from streamlines import \
+from .streamlines import \
     Streamlines
 
-from plot_window import \
+from .plot_window import \
     SlicePlot, \
     OffAxisSlicePlot, \
     ProjectionPlot, \
     OffAxisProjectionPlot
 
-from base_plot_types import \
+from .base_plot_types import \
     get_multi_plot
 

diff -r 3f58626a7a8bacaf3337f9db72b0ed2042b48d89 -r ba491935468fb220de0a021adcf10a47391f98c6 yt/visualization/color_maps.py
--- a/yt/visualization/color_maps.py
+++ b/yt/visualization/color_maps.py
@@ -15,7 +15,7 @@
 import matplotlib
 import matplotlib.colors as cc
 import matplotlib.cm as mcm
-import _colormap_data as _cm
+from . import _colormap_data as _cm
 
 def is_colormap(cmap):
     return isinstance(cmap,cc.Colormap)

diff -r 3f58626a7a8bacaf3337f9db72b0ed2042b48d89 -r ba491935468fb220de0a021adcf10a47391f98c6 yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -19,7 +19,7 @@
     y_dict, \
     axis_names
 from .volume_rendering.api import off_axis_projection
-from image_writer import write_fits
+from .image_writer import write_fits
 from yt.data_objects.image_array import ImageArray
 from . import _MPL
 import numpy as np

diff -r 3f58626a7a8bacaf3337f9db72b0ed2042b48d89 -r ba491935468fb220de0a021adcf10a47391f98c6 yt/visualization/image_panner/api.py
--- a/yt/visualization/image_panner/api.py
+++ b/yt/visualization/image_panner/api.py
@@ -13,6 +13,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-from vm_panner import VariableMeshPanner, WindowedVariableMeshPanner, \
+from .vm_panner import VariableMeshPanner, WindowedVariableMeshPanner, \
                 MultipleWindowVariableMeshPanner, ImageSaver, \
                 PanningCeleritasStreamer, NonLocalDataImagePanner

diff -r 3f58626a7a8bacaf3337f9db72b0ed2042b48d89 -r ba491935468fb220de0a021adcf10a47391f98c6 yt/visualization/plot_types.py
--- a/yt/visualization/plot_types.py
+++ b/yt/visualization/plot_types.py
@@ -18,7 +18,7 @@
 import numpy as np
 
 from yt.funcs import *
-from _mpl_imports import *
+from ._mpl_imports import *
 from . import _MPL
 from .plot_modifications import callback_registry
 from yt.utilities.definitions import \

diff -r 3f58626a7a8bacaf3337f9db72b0ed2042b48d89 -r ba491935468fb220de0a021adcf10a47391f98c6 yt/visualization/volume_rendering/api.py
--- a/yt/visualization/volume_rendering/api.py
+++ b/yt/visualization/volume_rendering/api.py
@@ -13,16 +13,16 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-from transfer_functions import TransferFunction, ColorTransferFunction, \
+from .transfer_functions import TransferFunction, ColorTransferFunction, \
                              PlanckTransferFunction, \
                              MultiVariateTransferFunction, \
                              ProjectionTransferFunction
-from grid_partitioner import HomogenizedVolume, \
+from .grid_partitioner import HomogenizedVolume, \
                              export_partitioned_grids, \
                              import_partitioned_grids
-from image_handling import export_rgba, import_rgba, \
+from .image_handling import export_rgba, import_rgba, \
                            plot_channel, plot_rgb
 
-from camera import Camera, PerspectiveCamera, StereoPairCamera, \
+from .camera import Camera, PerspectiveCamera, StereoPairCamera, \
     off_axis_projection, FisheyeCamera, MosaicFisheyeCamera, \
     HEALpixCamera, InteractiveCamera, ProjectionCamera


https://bitbucket.org/yt_analysis/yt/commits/66b3d83ce6da/
Changeset:   66b3d83ce6da
Branch:      yt
User:        MatthewTurk
Date:        2013-10-22 21:08:51
Summary:     Updating six to 1.4.1.
Affected #:  1 file

diff -r ba491935468fb220de0a021adcf10a47391f98c6 -r 66b3d83ce6da33f82dc44b400286dbfb0aab1852 yt/extern/six.py
--- a/yt/extern/six.py
+++ b/yt/extern/six.py
@@ -2,32 +2,34 @@
 
 # Copyright (c) 2010-2013 Benjamin Peterson
 #
-# Permission is hereby granted, free of charge, to any person obtaining a copy of
-# this software and associated documentation files (the "Software"), to deal in
-# the Software without restriction, including without limitation the rights to
-# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
-# the Software, and to permit persons to whom the Software is furnished to do so,
-# subject to the following conditions:
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
 #
 # The above copyright notice and this permission notice shall be included in all
 # copies or substantial portions of the Software.
 #
 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
-# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
-# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
-# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
 
 import operator
 import sys
 import types
 
 __author__ = "Benjamin Peterson <benjamin at python.org>"
-__version__ = "1.3.0"
+__version__ = "1.4.1"
 
 
-# True if we are running on Python 3.
+# Useful for very coarse version differentiation.
+PY2 = sys.version_info[0] == 2
 PY3 = sys.version_info[0] == 3
 
 if PY3:
@@ -61,7 +63,7 @@
         else:
             # 64-bit
             MAXSIZE = int((1 << 63) - 1)
-            del X
+        del X
 
 
 def _add_doc(func, doc):
@@ -136,13 +138,17 @@
 _moved_attributes = [
     MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
     MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
+    MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
     MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
     MovedAttribute("map", "itertools", "builtins", "imap", "map"),
+    MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
     MovedAttribute("reload_module", "__builtin__", "imp", "reload"),
     MovedAttribute("reduce", "__builtin__", "functools"),
     MovedAttribute("StringIO", "StringIO", "io"),
+    MovedAttribute("UserString", "UserString", "collections"),
     MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
     MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
+    MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
 
     MovedModule("builtins", "__builtin__"),
     MovedModule("configparser", "ConfigParser"),
@@ -179,6 +185,9 @@
     MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
     MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
                 "tkinter.simpledialog"),
+    MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
+    MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
+    MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
     MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
     MovedModule("winreg", "_winreg"),
 ]
@@ -186,7 +195,144 @@
     setattr(_MovedItems, attr.name, attr)
 del attr
 
-moves = sys.modules[__name__ + ".moves"] = _MovedItems("moves")
+moves = sys.modules[__name__ + ".moves"] = _MovedItems(__name__ + ".moves")
+
+
+
+class Module_six_moves_urllib_parse(types.ModuleType):
+    """Lazy loading of moved objects in six.moves.urllib_parse"""
+
+
+_urllib_parse_moved_attributes = [
+    MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
+    MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
+    MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
+    MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
+    MovedAttribute("urljoin", "urlparse", "urllib.parse"),
+    MovedAttribute("urlparse", "urlparse", "urllib.parse"),
+    MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
+    MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
+    MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
+    MovedAttribute("quote", "urllib", "urllib.parse"),
+    MovedAttribute("quote_plus", "urllib", "urllib.parse"),
+    MovedAttribute("unquote", "urllib", "urllib.parse"),
+    MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
+    MovedAttribute("urlencode", "urllib", "urllib.parse"),
+]
+for attr in _urllib_parse_moved_attributes:
+    setattr(Module_six_moves_urllib_parse, attr.name, attr)
+del attr
+
+sys.modules[__name__ + ".moves.urllib_parse"] = Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse")
+sys.modules[__name__ + ".moves.urllib.parse"] = Module_six_moves_urllib_parse(__name__ + ".moves.urllib.parse")
+
+
+class Module_six_moves_urllib_error(types.ModuleType):
+    """Lazy loading of moved objects in six.moves.urllib_error"""
+
+
+_urllib_error_moved_attributes = [
+    MovedAttribute("URLError", "urllib2", "urllib.error"),
+    MovedAttribute("HTTPError", "urllib2", "urllib.error"),
+    MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
+]
+for attr in _urllib_error_moved_attributes:
+    setattr(Module_six_moves_urllib_error, attr.name, attr)
+del attr
+
+sys.modules[__name__ + ".moves.urllib_error"] = Module_six_moves_urllib_error(__name__ + ".moves.urllib_error")
+sys.modules[__name__ + ".moves.urllib.error"] = Module_six_moves_urllib_error(__name__ + ".moves.urllib.error")
+
+
+class Module_six_moves_urllib_request(types.ModuleType):
+    """Lazy loading of moved objects in six.moves.urllib_request"""
+
+
+_urllib_request_moved_attributes = [
+    MovedAttribute("urlopen", "urllib2", "urllib.request"),
+    MovedAttribute("install_opener", "urllib2", "urllib.request"),
+    MovedAttribute("build_opener", "urllib2", "urllib.request"),
+    MovedAttribute("pathname2url", "urllib", "urllib.request"),
+    MovedAttribute("url2pathname", "urllib", "urllib.request"),
+    MovedAttribute("getproxies", "urllib", "urllib.request"),
+    MovedAttribute("Request", "urllib2", "urllib.request"),
+    MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
+    MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
+    MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
+    MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
+    MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
+    MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
+    MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
+    MovedAttribute("FileHandler", "urllib2", "urllib.request"),
+    MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
+    MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
+    MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
+    MovedAttribute("urlretrieve", "urllib", "urllib.request"),
+    MovedAttribute("urlcleanup", "urllib", "urllib.request"),
+    MovedAttribute("URLopener", "urllib", "urllib.request"),
+    MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
+]
+for attr in _urllib_request_moved_attributes:
+    setattr(Module_six_moves_urllib_request, attr.name, attr)
+del attr
+
+sys.modules[__name__ + ".moves.urllib_request"] = Module_six_moves_urllib_request(__name__ + ".moves.urllib_request")
+sys.modules[__name__ + ".moves.urllib.request"] = Module_six_moves_urllib_request(__name__ + ".moves.urllib.request")
+
+
+class Module_six_moves_urllib_response(types.ModuleType):
+    """Lazy loading of moved objects in six.moves.urllib_response"""
+
+
+_urllib_response_moved_attributes = [
+    MovedAttribute("addbase", "urllib", "urllib.response"),
+    MovedAttribute("addclosehook", "urllib", "urllib.response"),
+    MovedAttribute("addinfo", "urllib", "urllib.response"),
+    MovedAttribute("addinfourl", "urllib", "urllib.response"),
+]
+for attr in _urllib_response_moved_attributes:
+    setattr(Module_six_moves_urllib_response, attr.name, attr)
+del attr
+
+sys.modules[__name__ + ".moves.urllib_response"] = Module_six_moves_urllib_response(__name__ + ".moves.urllib_response")
+sys.modules[__name__ + ".moves.urllib.response"] = Module_six_moves_urllib_response(__name__ + ".moves.urllib.response")
+
+
+class Module_six_moves_urllib_robotparser(types.ModuleType):
+    """Lazy loading of moved objects in six.moves.urllib_robotparser"""
+
+
+_urllib_robotparser_moved_attributes = [
+    MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
+]
+for attr in _urllib_robotparser_moved_attributes:
+    setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
+del attr
+
+sys.modules[__name__ + ".moves.urllib_robotparser"] = Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib_robotparser")
+sys.modules[__name__ + ".moves.urllib.robotparser"] = Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser")
+
+
+class Module_six_moves_urllib(types.ModuleType):
+    """Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
+    parse = sys.modules[__name__ + ".moves.urllib_parse"]
+    error = sys.modules[__name__ + ".moves.urllib_error"]
+    request = sys.modules[__name__ + ".moves.urllib_request"]
+    response = sys.modules[__name__ + ".moves.urllib_response"]
+    robotparser = sys.modules[__name__ + ".moves.urllib_robotparser"]
+
+
+sys.modules[__name__ + ".moves.urllib"] = Module_six_moves_urllib(__name__ + ".moves.urllib")
 
 
 def add_move(move):
@@ -252,11 +398,16 @@
     def get_unbound_function(unbound):
         return unbound
 
+    create_bound_method = types.MethodType
+
     Iterator = object
 else:
     def get_unbound_function(unbound):
         return unbound.im_func
 
+    def create_bound_method(func, obj):
+        return types.MethodType(func, obj, obj.__class__)
+
     class Iterator(object):
 
         def next(self):
@@ -297,12 +448,16 @@
         return s.encode("latin-1")
     def u(s):
         return s
+    unichr = chr
     if sys.version_info[1] <= 1:
         def int2byte(i):
             return bytes((i,))
     else:
         # This is about 2x faster than the implementation above on 3.2+
         int2byte = operator.methodcaller("to_bytes", 1, "big")
+    byte2int = operator.itemgetter(0)
+    indexbytes = operator.getitem
+    iterbytes = iter
     import io
     StringIO = io.StringIO
     BytesIO = io.BytesIO
@@ -311,7 +466,14 @@
         return s
     def u(s):
         return unicode(s, "unicode_escape")
+    unichr = unichr
     int2byte = chr
+    def byte2int(bs):
+        return ord(bs[0])
+    def indexbytes(buf, i):
+        return ord(buf[i])
+    def iterbytes(buf):
+        return (ord(byte) for byte in buf)
     import StringIO
     StringIO = BytesIO = StringIO.StringIO
 _add_doc(b, """Byte literal""")
@@ -399,6 +561,17 @@
 _add_doc(reraise, """Reraise an exception.""")
 
 
-def with_metaclass(meta, base=object):
+def with_metaclass(meta, *bases):
     """Create a base class with a metaclass."""
-    return meta("NewBase", (base,), {})
+    return meta("NewBase", bases, {})
+
+def add_metaclass(metaclass):
+    """Class decorator for creating a class with a metaclass."""
+    def wrapper(cls):
+        orig_vars = cls.__dict__.copy()
+        orig_vars.pop('__dict__', None)
+        orig_vars.pop('__weakref__', None)
+        for slots_var in orig_vars.get('__slots__', ()):
+            orig_vars.pop(slots_var)
+        return metaclass(cls.__name__, cls.__bases__, orig_vars)
+    return wrapper


https://bitbucket.org/yt_analysis/yt/commits/040d2177829d/
Changeset:   040d2177829d
Branch:      yt
User:        MatthewTurk
Date:        2013-10-22 21:13:10
Summary:     Replacing a bunch of Metaclasses with add_metaclass.
Affected #:  23 files

diff -r 66b3d83ce6da33f82dc44b400286dbfb0aab1852 -r 040d2177829d01ed46f527cbf9f203035776ade4 setup.py
--- a/setup.py
+++ b/setup.py
@@ -16,7 +16,7 @@
    from distutils.command.build_py import build_py_2to3 \
         as build_py
 except ImportError:
-   from distutils.command.build_py import build_py
+    from distutils.command.build_py import build_py
 from numpy.distutils.misc_util import appendpath
 from numpy.distutils.command import install_data as np_install_data
 from numpy.distutils import log

diff -r 66b3d83ce6da33f82dc44b400286dbfb0aab1852 -r 040d2177829d01ed46f527cbf9f203035776ade4 yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -24,6 +24,7 @@
 import os
 import os.path as path
 from collections import defaultdict
+from yt.extern.six import add_metaclass
 
 from yt.funcs import *
 
@@ -48,12 +49,12 @@
 
 TINY = 1.e-40
 
+ at add_metaclass(ParallelDummy)
 class Halo(object):
     """
     A data source that returns particle information about the members of a
     HOP-identified halo.
     """
-    __metaclass__ = ParallelDummy  # This will proxy up our methods
     _distributed = False
     _processing = False
     _owner = 0

diff -r 66b3d83ce6da33f82dc44b400286dbfb0aab1852 -r 040d2177829d01ed46f527cbf9f203035776ade4 yt/data_objects/analyzer_objects.py
--- a/yt/data_objects/analyzer_objects.py
+++ b/yt/data_objects/analyzer_objects.py
@@ -16,16 +16,19 @@
 import inspect
 
 from yt.funcs import *
+from yt.extern.six import add_metaclass
 
 analysis_task_registry = {}
 
+class RegisteredTask(type):
+    def __init__(cls, name, b, d):
+        type.__init__(cls, name, b, d)
+        if hasattr(cls, "skip") and cls.skip == False:
+            return
+        analysis_task_registry[cls.__name__] = cls
+
+ at add_metaclass(RegisteredTask)
 class AnalysisTask(object):
-    class __metaclass__(type):
-        def __init__(cls, name, b, d):
-            type.__init__(cls, name, b, d)
-            if hasattr(cls, "skip") and cls.skip == False:
-                return
-            analysis_task_registry[cls.__name__] = cls
 
     def __init__(self, *args, **kwargs):
         # This should only get called if the subclassed object

diff -r 66b3d83ce6da33f82dc44b400286dbfb0aab1852 -r 040d2177829d01ed46f527cbf9f203035776ade4 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -20,12 +20,13 @@
 import weakref
 import itertools
 import shelve
-import cStringIO
+from yt.extern.six.moves import StringIO
 import fileinput
 from re import finditer
 
 from yt.funcs import *
 from yt.config import ytcfg
+from yt.extern.six import add_metaclass
 
 from yt.data_objects.derived_quantities import GridChildMaskWrapper
 from yt.data_objects.particle_io import particle_handler_registry
@@ -182,6 +183,13 @@
         else: tr = self.field_data[field]
         return tr
 
+class RegisteredDataObject(type):
+    def __init__(cls, name, b, d):
+        type.__init__(cls, name, b, d)
+        if hasattr(cls, "_type_name") and not cls._skip_add:
+            data_object_registry[cls._type_name] = cls
+
+ at add_metaclass(RegisteredDataObject)
 class AMRData(object):
     """
     Generic AMRData container.  By itself, will attempt to
@@ -193,12 +201,6 @@
     _con_args = ()
     _skip_add = False
 
-    class __metaclass__(type):
-        def __init__(cls, name, b, d):
-            type.__init__(cls, name, b, d)
-            if hasattr(cls, "_type_name") and not cls._skip_add:
-                data_object_registry[cls._type_name] = cls
-
     def __init__(self, pf, fields, **kwargs):
         """
         Typically this is never called directly, but only due to inheritance.

diff -r 66b3d83ce6da33f82dc44b400286dbfb0aab1852 -r 040d2177829d01ed46f527cbf9f203035776ade4 yt/data_objects/particle_io.py
--- a/yt/data_objects/particle_io.py
+++ b/yt/data_objects/particle_io.py
@@ -16,6 +16,7 @@
 import numpy as np
 
 from yt.funcs import *
+from yt.extern.six import add_metaclass
 
 particle_handler_registry = defaultdict()
 
@@ -30,13 +31,14 @@
         return tr
     return save_state
 
+class RegisteredParticleIOType(type):
+    def __init__(cls, name, b, d):
+        type.__init__(cls, name, b, d)
+        if hasattr(cls, "_source_type"):
+            particle_handler_registry[cls._source_type] = cls
+
+ at add_metaclass(RegisteredParticleIOType)
 class ParticleIOHandler(object):
-    class __metaclass__(type):
-        def __init__(cls, name, b, d):
-            type.__init__(cls, name, b, d)
-            if hasattr(cls, "_source_type"):
-                particle_handler_registry[cls._source_type] = cls
-
     _source_type = None
 
     def __init__(self, pf, source):

diff -r 66b3d83ce6da33f82dc44b400286dbfb0aab1852 -r 040d2177829d01ed46f527cbf9f203035776ade4 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -17,6 +17,7 @@
 import string, re, gc, time, os, os.path, weakref
 
 from yt.funcs import *
+from yt.extern.six import add_metaclass
 
 from yt.config import ytcfg
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
@@ -37,13 +38,14 @@
 _cached_pfs = weakref.WeakValueDictionary()
 _pf_store = ParameterFileStore()
 
+class RegisteredStaticOutput(type):
+    def __init__(cls, name, b, d):
+        type.__init__(cls, name, b, d)
+        output_type_registry[name] = cls
+        mylog.debug("Registering: %s as %s", name, cls)
+
+ at add_metaclass(RegisteredStaticOutput)
 class StaticOutput(object):
-    class __metaclass__(type):
-        def __init__(cls, name, b, d):
-            type.__init__(cls, name, b, d)
-            output_type_registry[name] = cls
-            mylog.debug("Registering: %s as %s", name, cls)
-
     def __new__(cls, filename=None, *args, **kwargs):
         if not isinstance(filename, types.StringTypes):
             obj = object.__new__(cls)

diff -r 66b3d83ce6da33f82dc44b400286dbfb0aab1852 -r 040d2177829d01ed46f527cbf9f203035776ade4 yt/data_objects/time_series.py
--- a/yt/data_objects/time_series.py
+++ b/yt/data_objects/time_series.py
@@ -16,6 +16,7 @@
 import inspect, functools, weakref, glob, types
 
 from yt.funcs import *
+from yt.extern.six import add_metaclass
 from yt.convenience import load
 from .data_containers import data_object_registry
 from .analyzer_objects import create_quantity_proxy, \
@@ -305,15 +306,16 @@
         return cls(*self._args, **self._kwargs)
 
 
+class RegisteredSimulationTimeSeries(type):
+    def __init__(cls, name, b, d):
+        type.__init__(cls, name, b, d)
+        code_name = name[:name.find('Simulation')]
+        if code_name:
+            simulation_time_series_registry[code_name] = cls
+            mylog.debug("Registering simulation: %s as %s", code_name, cls)
+
+ at add_metaclass(RegisteredSimulationTimeSeries)
 class SimulationTimeSeries(TimeSeriesData):
-    class __metaclass__(type):
-        def __init__(cls, name, b, d):
-            type.__init__(cls, name, b, d)
-            code_name = name[:name.find('Simulation')]
-            if code_name:
-                simulation_time_series_registry[code_name] = cls
-                mylog.debug("Registering simulation: %s as %s", code_name, cls)
-
     def __init__(self, parameter_filename, find_outputs=False):
         """
         Base class for generating simulation time series types.

diff -r 66b3d83ce6da33f82dc44b400286dbfb0aab1852 -r 040d2177829d01ed46f527cbf9f203035776ade4 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -17,7 +17,6 @@
 import glob
 import stat
 import weakref
-import cStringIO
 
 from yt.funcs import *
 from yt.data_objects.grid_patch import \

diff -r 66b3d83ce6da33f82dc44b400286dbfb0aab1852 -r 040d2177829d01ed46f527cbf9f203035776ade4 yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -402,11 +402,12 @@
     Should only be used in sys.excepthook.
     """
     sys.__excepthook__(exc_type, exc, tb)
-    import xmlrpclib, cStringIO
+    from yt.extern.six.moves import StringIO
+    import xmlrpclib
     p = xmlrpclib.ServerProxy(
             "http://paste.yt-project.org/xmlrpc/",
             allow_none=True)
-    s = cStringIO.StringIO()
+    s = StringIO()
     traceback.print_exception(exc_type, exc, tb, file=s)
     s = s.getvalue()
     ret = p.pastes.newPaste('pytb', s, None, '', '', True)
@@ -419,8 +420,9 @@
     This is a traceback handler that knows how to paste to the pastebin.
     Should only be used in sys.excepthook.
     """
-    import xmlrpclib, cStringIO, cgitb
-    s = cStringIO.StringIO()
+    import xmlrpclib, cgitb
+    from yt.extern.six.moves import StringIO
+    s = StringIO()
     handler = cgitb.Hook(format="text", file = s)
     handler(exc_type, exc, tb)
     s = s.getvalue()

diff -r 66b3d83ce6da33f82dc44b400286dbfb0aab1852 -r 040d2177829d01ed46f527cbf9f203035776ade4 yt/gui/reason/basic_repl.py
--- a/yt/gui/reason/basic_repl.py
+++ b/yt/gui/reason/basic_repl.py
@@ -21,7 +21,7 @@
 import json
 import sys
 import traceback
-from cStringIO import StringIO
+from yt.extern.six.moves import StringIO
 
 class ProgrammaticREPL(object):
     stopped = False

diff -r 66b3d83ce6da33f82dc44b400286dbfb0aab1852 -r 040d2177829d01ed46f527cbf9f203035776ade4 yt/gui/reason/extdirect_repl.py
--- a/yt/gui/reason/extdirect_repl.py
+++ b/yt/gui/reason/extdirect_repl.py
@@ -17,7 +17,7 @@
 import json
 import os
 import stat
-import cStringIO
+from cStringIO import StringIO
 import logging
 import uuid
 import numpy as np
@@ -370,11 +370,11 @@
 
     @lockit
     def paste_session(self):
-        import xmlrpclib, cStringIO
+        import xmlrpclib
         p = xmlrpclib.ServerProxy(
             "http://paste.yt-project.org/xmlrpc/",
             allow_none=True)
-        cs = cStringIO.StringIO()
+        cs = StringIO()
         cs.write("\n######\n".join(self.executed_cell_texts))
         cs = cs.getvalue()
         ret = p.pastes.newPaste('python', cs, None, '', '', True)
@@ -383,7 +383,7 @@
 
     @lockit
     def paste_text(self, to_paste):
-        import xmlrpclib, cStringIO
+        import xmlrpclib
         p = xmlrpclib.ServerProxy(
             "http://paste.yt-project.org/xmlrpc/",
             allow_none=True)
@@ -412,7 +412,7 @@
 
     @lockit
     def _session_py(self):
-        cs = cStringIO.StringIO()
+        cs = StringIO()
         cs.write("\n######\n".join(self.executed_cell_texts))
         cs.seek(0)
         response.headers["content-disposition"] = "attachment;"

diff -r 66b3d83ce6da33f82dc44b400286dbfb0aab1852 -r 040d2177829d01ed46f527cbf9f203035776ade4 yt/utilities/command_line.py
--- a/yt/utilities/command_line.py
+++ b/yt/utilities/command_line.py
@@ -18,6 +18,7 @@
 from yt.startup_tasks import parser, subparsers
 from yt.mods import *
 from yt.funcs import *
+from yt.extern.six import add_metaclass
 from yt.utilities.minimal_representation import MinimalProjectDescription
 import argparse, os, os.path, math, sys, time, subprocess, getpass, tempfile
 import urllib, urllib2, base64, os
@@ -44,6 +45,20 @@
     if "long" in argc: argnames.append(argc.pop('long'))
     sc.add_argument(*argnames, **argc)
 
+class YTCommandSubtype(type):
+    def __init__(cls, name, b, d):
+        type.__init__(cls, name, b, d)
+        if cls.name is not None:
+            names = ensure_list(cls.name)
+            for name in names:
+                sc = subparsers.add_parser(name,
+                    description = cls.description,
+                    help = cls.description)
+                sc.set_defaults(func=cls.run)
+                for arg in cls.args:
+                    _add_arg(sc, arg)
+
+ at add_metaclass(YTCommandSubtype)
 class YTCommand(object):
     args = ()
     name = None
@@ -51,19 +66,6 @@
     aliases = ()
     npfs = 1
 
-    class __metaclass__(type):
-        def __init__(cls, name, b, d):
-            type.__init__(cls, name, b, d)
-            if cls.name is not None:
-                names = ensure_list(cls.name)
-                for name in names:
-                    sc = subparsers.add_parser(name,
-                        description = cls.description,
-                        help = cls.description)
-                    sc.set_defaults(func=cls.run)
-                    for arg in cls.args:
-                        _add_arg(sc, arg)
-
     @classmethod
     def run(cls, args):
         self = cls()

diff -r 66b3d83ce6da33f82dc44b400286dbfb0aab1852 -r 040d2177829d01ed46f527cbf9f203035776ade4 yt/utilities/flagging_methods.py
--- a/yt/utilities/flagging_methods.py
+++ b/yt/utilities/flagging_methods.py
@@ -15,16 +15,19 @@
 
 import numpy as np # For modern purposes
 from yt.utilities.lib import grow_flagging_field
+from yt.extern.six import add_metaclass
 
 flagging_method_registry = {}
 
+class RegisteredFlaggingMethod(type):
+    def __init__(cls, name, b, d):
+        type.__init__(cls, name, b, d)
+        if hasattr(cls, "_type_name") and not cls._skip_add:
+            flagging_method_registry[cls._type_name] = cls
+
+ at add_metaclass(RegisteredFlaggingMethod)
 class FlaggingMethod(object):
     _skip_add = False
-    class __metaclass__(type):
-        def __init__(cls, name, b, d):
-            type.__init__(cls, name, b, d)
-            if hasattr(cls, "_type_name") and not cls._skip_add:
-                flagging_method_registry[cls._type_name] = cls
 
 class OverDensity(FlaggingMethod):
     _type_name = "overdensity"

diff -r 66b3d83ce6da33f82dc44b400286dbfb0aab1852 -r 040d2177829d01ed46f527cbf9f203035776ade4 yt/utilities/io_handler.py
--- a/yt/utilities/io_handler.py
+++ b/yt/utilities/io_handler.py
@@ -19,22 +19,24 @@
 import cPickle
 import os
 import h5py
+from yt.extern.six import add_metaclass
 
 _axis_ids = {0:2,1:1,2:0}
 
 io_registry = {}
 
+class RegisteredIOHandler(type):
+    def __init__(cls, name, b, d):
+        type.__init__(cls, name, b, d)
+        if hasattr(cls, "_data_style"):
+            io_registry[cls._data_style] = cls
+
+ at add_metaclass(RegisteredIOHandler)
 class BaseIOHandler(object):
 
     _data_style = None
     _particle_reader = False
 
-    class __metaclass__(type):
-        def __init__(cls, name, b, d):
-            type.__init__(cls, name, b, d)
-            if hasattr(cls, "_data_style"):
-                io_registry[cls._data_style] = cls
-
     def __init__(self):
         self.queue = defaultdict(dict)
 

diff -r 66b3d83ce6da33f82dc44b400286dbfb0aab1852 -r 040d2177829d01ed46f527cbf9f203035776ade4 yt/utilities/minimal_representation.py
--- a/yt/utilities/minimal_representation.py
+++ b/yt/utilities/minimal_representation.py
@@ -20,6 +20,7 @@
 from tempfile import TemporaryFile
 from yt.config import ytcfg
 from yt.funcs import *
+from yt.extern.six import add_metaclass
 from yt.utilities.exceptions import *
 
 from .poster.streaminghttp import register_openers
@@ -41,8 +42,8 @@
 class ContainerClass(object):
     pass
 
+ at add_metaclass(abc.ABCMeta)
 class MinimalRepresentation(object):
-    __metaclass__ = abc.ABCMeta
 
     def _update_attrs(self, obj, attr_list):
         for attr in attr_list:

diff -r 66b3d83ce6da33f82dc44b400286dbfb0aab1852 -r 040d2177829d01ed46f527cbf9f203035776ade4 yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -14,7 +14,6 @@
 #-----------------------------------------------------------------------------
 
 import cPickle
-import cStringIO
 import itertools
 import logging
 import numpy as np

diff -r 66b3d83ce6da33f82dc44b400286dbfb0aab1852 -r 040d2177829d01ed46f527cbf9f203035776ade4 yt/utilities/rpdb.py
--- a/yt/utilities/rpdb.py
+++ b/yt/utilities/rpdb.py
@@ -13,7 +13,8 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import cmd, pdb, cStringIO, xmlrpclib, socket, sys
+import cmd, pdb, xmlrpclib, socket, sys
+from yt.extern.six.moves import StringIO
 import traceback
 from SimpleXMLRPCServer import SimpleXMLRPCServer
 from yt.config import ytcfg
@@ -64,9 +65,9 @@
 
 class pdb_handler(object):
     def __init__(self, tb):
-        self.cin = cStringIO.StringIO()
+        self.cin = StringIO()
         sys.stdin = self.cin
-        self.cout = cStringIO.StringIO()
+        self.cout = StringIO()
         sys.stdout = self.cout
         sys.stderr = self.cout
         self.debugger = pdb.Pdb(stdin=self.cin, stdout=self.cout)

diff -r 66b3d83ce6da33f82dc44b400286dbfb0aab1852 -r 040d2177829d01ed46f527cbf9f203035776ade4 yt/visualization/base_plot_types.py
--- a/yt/visualization/base_plot_types.py
+++ b/yt/visualization/base_plot_types.py
@@ -13,7 +13,7 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 import matplotlib
-import cStringIO
+from yt.extern.six.moves import StringIO
 from ._mpl_imports import \
     FigureCanvasAgg, FigureCanvasPdf, FigureCanvasPS
 from yt.funcs import \
@@ -90,7 +90,7 @@
 
     def _repr_png_(self):
         canvas = FigureCanvasAgg(self.figure)
-        f = cStringIO.StringIO()
+        f = StringIO()
         canvas.print_figure(f)
         f.seek(0)
         return f.read()

diff -r 66b3d83ce6da33f82dc44b400286dbfb0aab1852 -r 040d2177829d01ed46f527cbf9f203035776ade4 yt/visualization/easy_plots.py
--- a/yt/visualization/easy_plots.py
+++ b/yt/visualization/easy_plots.py
@@ -13,7 +13,7 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-from _mpl_imports import *
+from ._mpl_imports import *
 from yt.data_objects.profiles import BinnedProfile1D
 
 plot_type_registry = {}

diff -r 66b3d83ce6da33f82dc44b400286dbfb0aab1852 -r 040d2177829d01ed46f527cbf9f203035776ade4 yt/visualization/plot_collection.py
--- a/yt/visualization/plot_collection.py
+++ b/yt/visualization/plot_collection.py
@@ -20,6 +20,7 @@
 import os
 
 from yt.funcs import *
+from yt.extern.six import add_metaclass
 
 from yt.config import ytcfg
 from yt.data_objects.profiles import \
@@ -1634,8 +1635,8 @@
                     elif attrname.startswith("set_"):
                         setattr(cls, attrname, wrap_pylab_show(attr))
 
+ at add_metaclass(_Interactify)
 class PlotCollectionInteractive(PlotCollection):
-    __metaclass__ = _Interactify
 
     autoscale = wrap_pylab_show(PlotCollection.autoscale)
     switch_field = wrap_pylab_show(PlotCollection.switch_field)

diff -r 66b3d83ce6da33f82dc44b400286dbfb0aab1852 -r 040d2177829d01ed46f527cbf9f203035776ade4 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -16,6 +16,7 @@
 import numpy as np
 
 from yt.funcs import *
+from yt.extern.six import add_metaclass
 from _mpl_imports import *
 from yt.utilities.definitions import \
     x_dict, x_names, \
@@ -32,12 +33,13 @@
 
 callback_registry = {}
 
+class RegisteredCallback(type):
+    def __init__(cls, name, b, d):
+        type.__init__(cls, name, b, d)
+        callback_registry[name] = cls
+
+ at add_metaclass(RegisteredCallback)
 class PlotCallback(object):
-    class __metaclass__(type):
-        def __init__(cls, name, b, d):
-            type.__init__(cls, name, b, d)
-            callback_registry[name] = cls
-
     def __init__(self, *args, **kwargs):
         pass
 

diff -r 66b3d83ce6da33f82dc44b400286dbfb0aab1852 -r 040d2177829d01ed46f527cbf9f203035776ade4 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -15,8 +15,8 @@
 import base64
 import numpy as np
 import matplotlib
-import cStringIO
 import types
+from yt.extern.six.moves import StringIO
 import sys
 import os
 import __builtin__
@@ -1653,7 +1653,7 @@
         self._apply_vectors(ax, vi, vj)
 
         canvas = FigureCanvasAgg(fig)
-        f = cStringIO.StringIO()
+        f = StringIO()
         canvas.print_figure(f)
         f.seek(0)
         img = f.read()

diff -r 66b3d83ce6da33f82dc44b400286dbfb0aab1852 -r 040d2177829d01ed46f527cbf9f203035776ade4 yt/visualization/volume_rendering/transfer_function_helper.py
--- a/yt/visualization/volume_rendering/transfer_function_helper.py
+++ b/yt/visualization/volume_rendering/transfer_function_helper.py
@@ -20,7 +20,7 @@
 from yt.visualization._mpl_imports import FigureCanvasAgg
 from matplotlib.figure import Figure
 from IPython.core.display import Image
-import cStringIO
+from yt.extern.six.moves import StringIO
 import numpy as np
 
 
@@ -190,7 +190,7 @@
         ax.set_ylim(y.max()*1.0e-3, y.max()*2)
 
         if fn is None:
-            f = cStringIO.StringIO()
+            f = StringIO()
             canvas.print_figure(f)
             f.seek(0)
             img = f.read()


https://bitbucket.org/yt_analysis/yt/commits/7ca4e552a962/
Changeset:   7ca4e552a962
Branch:      yt
User:        MatthewTurk
Date:        2013-10-22 21:23:48
Summary:     Transforming __builtins__ to builtins.
Affected #:  8 files

diff -r 040d2177829d01ed46f527cbf9f203035776ade4 -r 7ca4e552a962a22610ef6baaf3b657543b6a6ca2 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -124,7 +124,7 @@
             self.current_time, self.unique_identifier)
         try:
             import hashlib
-            return hashlib.md5(s).hexdigest()
+            return hashlib.md5(s.encode('utf-8')).hexdigest()
         except ImportError:
             return s.replace(";", "*")
 

diff -r 040d2177829d01ed46f527cbf9f203035776ade4 -r 7ca4e552a962a22610ef6baaf3b657543b6a6ca2 yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -1063,11 +1063,11 @@
     # the rest aligned on a blocksize boundary.  This may be more 
     # efficient than having the last (first in file) block be short
     f.seek(-lastblock,2)
-    yield f.read(lastblock)
+    yield f.read(lastblock).decode('ascii')
 
     for i in range(fullblocks-1,-1, -1):
         f.seek(i * blocksize)
-        yield f.read(blocksize)
+        yield f.read(blocksize).decode('ascii')
 
 def rlines(f, keepends=False):
     """Iterate through the lines of a file in reverse order.

diff -r 040d2177829d01ed46f527cbf9f203035776ade4 -r 7ca4e552a962a22610ef6baaf3b657543b6a6ca2 yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -13,7 +13,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import __builtin__
 import time, types, signal, inspect, traceback, sys, pdb, os
 import contextlib
 import warnings, struct, subprocess
@@ -21,6 +20,7 @@
 from distutils.version import LooseVersion
 from math import floor, ceil
 
+from yt.extern.six.moves import builtins
 from yt.utilities.exceptions import *
 from yt.utilities.logger import ytLogger as mylog
 from yt.utilities.definitions import inv_axis_names, axis_names, x_dict, y_dict
@@ -337,7 +337,7 @@
     maxval = max(maxval, 1)
     from yt.config import ytcfg
     if ytcfg.getboolean("yt", "suppressStreamLogging") or \
-       "__IPYTHON__" in dir(__builtin__) or \
+       "__IPYTHON__" in dir(builtins) or \
        ytcfg.getboolean("yt", "__withintesting"):
         return DummyProgressBar()
     elif ytcfg.getboolean("yt", "__withinreason"):

diff -r 040d2177829d01ed46f527cbf9f203035776ade4 -r 7ca4e552a962a22610ef6baaf3b657543b6a6ca2 yt/imods.py
--- a/yt/imods.py
+++ b/yt/imods.py
@@ -1,8 +1,8 @@
 # This custom importer for yt will set up some IPython notebook-specific
 # helpers.  For instance, it will eventually add items to the menubar.
 
-import __builtin__
-if "__IPYTHON__" not in dir(__builtin__):
+from yt.extern.six.moves import builtins
+if "__IPYTHON__" not in dir(builtins):
     raise ImportError
 
 from IPython.core.interactiveshell import InteractiveShell

diff -r 040d2177829d01ed46f527cbf9f203035776ade4 -r 7ca4e552a962a22610ef6baaf3b657543b6a6ca2 yt/pmods.py
--- a/yt/pmods.py
+++ b/yt/pmods.py
@@ -119,8 +119,8 @@
 importer, which can be used to disable rank-asymmetric behavior in a
 module import:
 
-import __builtin__
-hasattr(__builtin__.__import__,"mpi_import")
+from yt.extern.six.moves import builtins
+hasattr(builtins.__import__,"mpi_import")
 
 This evaluates to True only when we're in an mpi_import() context
 manager.
@@ -141,8 +141,8 @@
 
 # Either importer is None (standard import) or it's a reference to
 # the mpi_import object that owns the current importer.
-import __builtin__
-importer = getattr(__builtin__.__import__,"mpi_import",None)
+from yt.extern.six.moves import builtins
+importer = getattr(builtins.__import__,"mpi_import",None)
 if importer:
     importer.callAfterImport(f)
 else:
@@ -192,7 +192,8 @@
  more information about the level parameter, run 'help(__import__)'.
 """
 
-import sys, imp, __builtin__,types
+import sys, imp, types
+from yt.extern.six.moves import builtins
 from mpi4py import MPI
 class mpi(object):
     rank = MPI.COMM_WORLD.Get_rank()
@@ -205,11 +206,11 @@
         imp.acquire_lock()
         __import_hook__.mpi_import = self
         self.__funcs = []
-        self.original_import = __builtin__.__import__
-        __builtin__.__import__ = __import_hook__
+        self.original_import = builtins.__import__
+        builtins.__import__ = __import_hook__
 
     def __exit__(self,type,value,traceback):
-        __builtin__.__import__ = self.original_import
+        builtins.__import__ = self.original_import
         __import_hook__.mpi_import = None
         imp.release_lock()
         for f in self.__funcs:

diff -r 040d2177829d01ed46f527cbf9f203035776ade4 -r 7ca4e552a962a22610ef6baaf3b657543b6a6ca2 yt/visualization/image_writer.py
--- a/yt/visualization/image_writer.py
+++ b/yt/visualization/image_writer.py
@@ -19,7 +19,7 @@
 from yt.funcs import *
 import _colormap_data as cmd
 import yt.utilities.lib as au
-import __builtin__
+from yt.extern.six.moves import builtins
 
 def scale_image(image, mi=None, ma=None):
     r"""Scale an image ([NxNxM] where M = 1-4) to be uint8 and values scaled 
@@ -540,7 +540,7 @@
         three channels.
     """
  
-    if "__IPYTHON__" in dir(__builtin__):
+    if "__IPYTHON__" in dir(builtins):
         from IPython.core.displaypub import publish_display_data
         data = write_bitmap(image, None, max_val=max_val)
         publish_display_data(

diff -r 040d2177829d01ed46f527cbf9f203035776ade4 -r 7ca4e552a962a22610ef6baaf3b657543b6a6ca2 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -19,7 +19,7 @@
 from yt.extern.six.moves import StringIO
 import sys
 import os
-import __builtin__
+from yt.extern.six.moves import builtins
 
 from matplotlib.delaunay.triangulate import Triangulation as triang
 from matplotlib.mathtext import MathTextParser
@@ -1154,7 +1154,7 @@
         >>> slc.show()
 
         """
-        if "__IPYTHON__" in dir(__builtin__):
+        if "__IPYTHON__" in dir(builtins):
             api_version = get_ipython_api_version()
             if api_version in ('0.10', '0.11'):
                 self._send_zmq()

diff -r 040d2177829d01ed46f527cbf9f203035776ade4 -r 7ca4e552a962a22610ef6baaf3b657543b6a6ca2 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -13,7 +13,7 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import __builtin__
+from yt.extern.six.moves import builtins
 import numpy as np
 
 from yt.funcs import *
@@ -761,7 +761,7 @@
         >>> cam.show()
 
         """
-        if "__IPYTHON__" in dir(__builtin__):
+        if "__IPYTHON__" in dir(builtins):
             from IPython.core.displaypub import publish_display_data
             image = self.snapshot()[:,:,:3]
             if clip_ratio is not None: clip_ratio *= image.std()


https://bitbucket.org/yt_analysis/yt/commits/15c2a12d0ae9/
Changeset:   15c2a12d0ae9
Branch:      yt
User:        MatthewTurk
Date:        2013-10-22 21:28:23
Summary:     A few more fixes for encoding, floats, ints.
Affected #:  2 files

diff -r 7ca4e552a962a22610ef6baaf3b657543b6a6ca2 -r 15c2a12d0ae9568b791657413d1e7b1b4dacec17 yt/extern/progressbar.py
--- a/yt/extern/progressbar.py
+++ b/yt/extern/progressbar.py
@@ -161,7 +161,7 @@
             return self.marker.update(pbar)
     def update(self, pbar, width):
         percent = pbar.percentage()
-        cwidth = width - len(self.left) - len(self.right)
+        cwidth = int(width - len(self.left) - len(self.right))
         marked_width = int(percent * cwidth / 100)
         m = self._format_marker(pbar)
         bar = (self.left + (m*marked_width).ljust(cwidth) + self.right)

diff -r 7ca4e552a962a22610ef6baaf3b657543b6a6ca2 -r 15c2a12d0ae9568b791657413d1e7b1b4dacec17 yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -288,7 +288,7 @@
         t1 = time.time()
         pattern = r"Pointer: Grid\[(\d*)\]->NextGrid(Next|This)Level = (\d*)\s+$"
         patt = re.compile(pattern)
-        f = open(self.hierarchy_filename, "rb")
+        f = open(self.hierarchy_filename, "rt")
         self.grids = [self.grid(1, self)]
         self.grids[0].Level = 0
         si, ei, LE, RE, fn, npart = [], [], [], [], [], []
@@ -477,7 +477,7 @@
             field_list = None
         field_list = self.comm.mpi_bcast(field_list)
         self.save_data(list(field_list),"/","DataFields",passthrough=True)
-        self.field_list = list(field_list)
+        self.field_list = [f.decode("ascii") for f in field_list]
 
     def _generate_random_grids(self):
         if self.num_grids > 40:


https://bitbucket.org/yt_analysis/yt/commits/63aaba3da744/
Changeset:   63aaba3da744
Branch:      yt
User:        MatthewTurk
Date:        2013-10-22 21:43:51
Summary:     A few fixes for get_data.
Affected #:  1 file

diff -r 15c2a12d0ae9568b791657413d1e7b1b4dacec17 -r 63aaba3da7444ced50cbdd4f44a1040e5fd954e4 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -834,7 +834,7 @@
         # We take a 3-tuple of the coordinate we want to slice through, as well
         # as the axis we're slicing along
         self._get_list_of_grids()
-        if not self.has_key('pdx'):
+        if 'pdx' not in self.field_data:
             self._generate_coords()
         if fields == None:
             fields_to_get = self.fields[:]
@@ -1606,7 +1606,7 @@
         Iterates over the list of fields and generates/reads them all.
         """
         self._get_list_of_grids()
-        if not self.has_key('pdx'):
+        if 'pdx' not in self.field_data:
             self._generate_coords()
         if fields == None:
             fields_to_get = self.fields[:]
@@ -2485,7 +2485,7 @@
         Iterates over the list of fields and generates/reads them all.
         """
         self._get_list_of_grids()
-        if not self.has_key('pdx'):
+        if 'pdx' not in self.field_data():
             self._generate_coords()
         if fields == None:
             fields_to_get = [f for f in self.fields if f not in self._key_fields]


https://bitbucket.org/yt_analysis/yt/commits/f0c7a0e030e7/
Changeset:   f0c7a0e030e7
Branch:      yt
User:        MatthewTurk
Date:        2013-10-22 21:58:32
Summary:     A few fixes for tests and boolean regions.
Affected #:  2 files

diff -r 63aaba3da7444ced50cbdd4f44a1040e5fd954e4 -r f0c7a0e030e73a51f8133efece04d16173346ac2 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -1162,7 +1162,7 @@
             return grid[field]
         elif field in self.pf.field_info and self.pf.field_info[field].not_in_all:
             dv = grid[field][sl]
-        elif not grid.has_key(field):
+        elif field not in grid.field_data:
             conv_factor = 1.0
             if self.pf.field_info.has_key(field):
                 conv_factor = self.pf.field_info[field]._convert_function(self)
@@ -2652,7 +2652,7 @@
         for grid in self._grids:
             pointI = self._get_point_indices(grid)
             np = pointI[0].ravel().size
-            if grid.has_key(field):
+            if field in grid.field_data:
                 new_field = grid[field]
             else:
                 new_field = np.ones(grid.ActiveDimensions, dtype=dtype) * default_val
@@ -3809,8 +3809,10 @@
         ref_ratio = self.pf.refine_by**(self.level - grid.Level)
         g_fields = []
         for field in fields:
-            if not grid.has_key(field): grid[field] = \
-               np.zeros(grid.ActiveDimensions, dtype=self[field].dtype)
+            if field not in grid.field_data:
+                grid[field] = \
+                    np.zeros(grid.ActiveDimensions,
+                        dtype=self[field].dtype)
             g_fields.append(grid[field])
         c_fields = [self[field] for field in fields]
         FillRegion(ref_ratio,
@@ -4026,11 +4028,11 @@
         # Before anything, we simply find out which regions are involved in all
         # of this process, uniquely.
         for item in self.regions:
-            if isinstance(item, types.StringType): continue
-            self._all_regions.append(item)
+            if isinstance(item, (str, types.StringType)): continue
+            if item not in self._all_regions:
+                self._all_regions.append(item)
             # So cut_masks don't get messed up.
             item._boolean_touched = True
-        self._all_regions = np.unique(self._all_regions)
 
     def _make_overlaps(self):
         # Using the processed cut_masks, we'll figure out what grids

diff -r 63aaba3da7444ced50cbdd4f44a1040e5fd954e4 -r f0c7a0e030e73a51f8133efece04d16173346ac2 yt/utilities/command_line.py
--- a/yt/utilities/command_line.py
+++ b/yt/utilities/command_line.py
@@ -42,7 +42,7 @@
     argc = dict(arg.items())
     argnames = []
     if "short" in argc: argnames.append(argc.pop('short'))
-    if "long" in argc: argnames.append(argc.pop('long'))
+    if "longname" in argc: argnames.append(argc.pop('longname'))
     sc.add_argument(*argnames, **argc)
 
 class YTCommandSubtype(type):
@@ -100,192 +100,192 @@
         namespace.pf = [_fix_pf(pf) for pf in pfs]
 
 _common_options = dict(
-    all     = dict(long="--all", dest="reinstall",
+    all     = dict(longname="--all", dest="reinstall",
                    default=False, action="store_true",
                    help="Reinstall the full yt stack in the current location."),
     pf      = dict(short="pf", action=GetParameterFiles,
                    nargs="+", help="Parameter files to run on"),
     opf     = dict(action=GetParameterFiles, dest="pf",
                    nargs="*", help="(Optional) Parameter files to run on"),
-    axis    = dict(short="-a", long="--axis",
+    axis    = dict(short="-a", longname="--axis",
                    action="store", type=int,
                    dest="axis", default=4,
                    help="Axis (4 for all three)"),
-    log     = dict(short="-l", long="--log",
+    log     = dict(short="-l", longname="--log",
                    action="store_true",
                    dest="takelog", default=True,
                    help="Use logarithmic scale for image"),
-    linear  = dict(long="--linear",
+    linear  = dict(longname="--linear",
                    action="store_false",
                    dest="takelog",
                    help="Use linear scale for image"),
-    text    = dict(short="-t", long="--text",
+    text    = dict(short="-t", longname="--text",
                    action="store", type=str,
                    dest="text", default=None,
                    help="Textual annotation"),
-    field   = dict(short="-f", long="--field",
+    field   = dict(short="-f", longname="--field",
                    action="store", type=str,
                    dest="field", default="Density",
                    help="Field to color by"),
-    weight  = dict(short="-g", long="--weight",
+    weight  = dict(short="-g", longname="--weight",
                    action="store", type=str,
                    dest="weight", default=None,
                    help="Field to weight projections with"),
-    cmap    = dict(long="--colormap",
+    cmap    = dict(longname="--colormap",
                    action="store", type=str,
                    dest="cmap", default="algae",
                    help="Colormap name"),
-    zlim    = dict(short="-z", long="--zlim",
+    zlim    = dict(short="-z", longname="--zlim",
                    action="store", type=float,
                    dest="zlim", default=None,
                    nargs=2,
                    help="Color limits (min, max)"),
-    dex     = dict(long="--dex",
+    dex     = dict(longname="--dex",
                    action="store", type=float,
                    dest="dex", default=None,
                    nargs=1,
                    help="Number of dex above min to display"),
-    width   = dict(short="-w", long="--width",
+    width   = dict(short="-w", longname="--width",
                    action="store", type=float,
                    dest="width", default=None,
                    help="Width in specified units"),
-    unit    = dict(short="-u", long="--unit",
+    unit    = dict(short="-u", longname="--unit",
                    action="store", type=str,
                    dest="unit", default='1',
                    help="Desired units"),
-    center  = dict(short="-c", long="--center",
+    center  = dict(short="-c", longname="--center",
                    action="store", type=float,
                    dest="center", default=None,
                    nargs=3,
                    help="Center, space separated (-1 -1 -1 for max)"),
-    max     = dict(short="-m", long="--max",
+    max     = dict(short="-m", longname="--max",
                    action="store_true",
                    dest="max",default=False,
                    help="Center the plot on the density maximum"),
-    bn      = dict(short="-b", long="--basename",
+    bn      = dict(short="-b", longname="--basename",
                    action="store", type=str,
                    dest="basename", default=None,
                    help="Basename of parameter files"),
-    output  = dict(short="-o", long="--output",
+    output  = dict(short="-o", longname="--output",
                    action="store", type=str,
                    dest="output", default="frames/",
                    help="Folder in which to place output images"),
-    outputfn= dict(short="-o", long="--output",
+    outputfn= dict(short="-o", longname="--output",
                    action="store", type=str,
                    dest="output", default=None,
                    help="File in which to place output"),
-    skip    = dict(short="-s", long="--skip",
+    skip    = dict(short="-s", longname="--skip",
                    action="store", type=int,
                    dest="skip", default=1,
                    help="Skip factor for outputs"),
-    proj    = dict(short="-p", long="--projection",
+    proj    = dict(short="-p", longname="--projection",
                    action="store_true",
                    dest="projection", default=False,
                    help="Use a projection rather than a slice"),
-    maxw    = dict(long="--max-width",
+    maxw    = dict(longname="--max-width",
                    action="store", type=float,
                    dest="max_width", default=1.0,
                    help="Maximum width in code units"),
-    minw    = dict(long="--min-width",
+    minw    = dict(longname="--min-width",
                    action="store", type=float,
                    dest="min_width", default=50,
                    help="Minimum width in units of smallest dx (default: 50)"),
-    nframes = dict(short="-n", long="--nframes",
+    nframes = dict(short="-n", longname="--nframes",
                    action="store", type=int,
                    dest="nframes", default=100,
                    help="Number of frames to generate"),
-    slabw   = dict(long="--slab-width",
+    slabw   = dict(longname="--slab-width",
                    action="store", type=float,
                    dest="slab_width", default=1.0,
                    help="Slab width in specified units"),
-    slabu   = dict(short="-g", long="--slab-unit",
+    slabu   = dict(short="-g", longname="--slab-unit",
                    action="store", type=str,
                    dest="slab_unit", default='1',
                    help="Desired units for the slab"),
-    ptype   = dict(long="--particle-type",
+    ptype   = dict(longname="--particle-type",
                    action="store", type=int,
                    dest="ptype", default=2,
                    help="Particle type to select"),
-    agecut  = dict(long="--age-cut",
+    agecut  = dict(longname="--age-cut",
                    action="store", type=float,
                    dest="age_filter", default=None,
                    nargs=2,
                    help="Bounds for the field to select"),
-    uboxes  = dict(long="--unit-boxes",
+    uboxes  = dict(longname="--unit-boxes",
                    action="store_true",
                    dest="unit_boxes",
                    help="Display helpful unit boxes"),
-    thresh  = dict(long="--threshold",
+    thresh  = dict(longname="--threshold",
                    action="store", type=float,
                    dest="threshold", default=None,
                    help="Density threshold"),
-    dm_only = dict(long="--all-particles",
+    dm_only = dict(longname="--all-particles",
                    action="store_false",
                    dest="dm_only", default=True,
                    help="Use all particles"),
-    grids   = dict(long="--show-grids",
+    grids   = dict(longname="--show-grids",
                    action="store_true",
                    dest="grids", default=False,
                    help="Show the grid boundaries"),
-    time    = dict(long="--time",
+    time    = dict(longname="--time",
                    action="store_true",
                    dest="time", default=False,
                    help="Print time in years on image"),
-    contours    = dict(long="--contours",
+    contours    = dict(longname="--contours",
                    action="store",type=int,
                    dest="contours", default=None,
                    help="Number of Contours for Rendering"),
-    contour_width  = dict(long="--contour_width",
+    contour_width  = dict(longname="--contour_width",
                    action="store",type=float,
                    dest="contour_width", default=None,
                    help="Width of gaussians used for rendering."),
-    enhance   = dict(long="--enhance",
+    enhance   = dict(longname="--enhance",
                    action="store_true",
                    dest="enhance", default=False,
                    help="Enhance!"),
-    valrange  = dict(short="-r", long="--range",
+    valrange  = dict(short="-r", longname="--range",
                    action="store", type=float,
                    dest="valrange", default=None,
                    nargs=2,
                    help="Range, space separated"),
-    up  = dict(long="--up",
+    up  = dict(longname="--up",
                    action="store", type=float,
                    dest="up", default=None,
                    nargs=3,
                    help="Up, space separated"),
-    viewpoint  = dict(long="--viewpoint",
+    viewpoint  = dict(longname="--viewpoint",
                    action="store", type=float,
                    dest="viewpoint", default=[1., 1., 1.],
                    nargs=3,
                    help="Viewpoint, space separated"),
-    pixels    = dict(long="--pixels",
+    pixels    = dict(longname="--pixels",
                    action="store",type=int,
                    dest="pixels", default=None,
                    help="Number of Pixels for Rendering"),
-    halos   = dict(long="--halos",
+    halos   = dict(longname="--halos",
                    action="store", type=str,
                    dest="halos",default="multiple",
                    help="Run halo profiler on a 'single' halo or 'multiple' halos."),
-    halo_radius = dict(long="--halo_radius",
+    halo_radius = dict(longname="--halo_radius",
                        action="store", type=float,
                        dest="halo_radius",default=0.1,
                        help="Constant radius for profiling halos if using hop output files with no radius entry. Default: 0.1."),
-    halo_radius_units = dict(long="--halo_radius_units",
+    halo_radius_units = dict(longname="--halo_radius_units",
                              action="store", type=str,
                              dest="halo_radius_units",default="1",
                              help="Units for radius used with --halo_radius flag. Default: '1' (code units)."),
-    halo_hop_style = dict(long="--halo_hop_style",
+    halo_hop_style = dict(longname="--halo_hop_style",
                           action="store", type=str,
                           dest="halo_hop_style",default="new",
                           help="Style of hop output file.  'new' for yt_hop files and 'old' for enzo_hop files."),
-    halo_parameter_file = dict(long="--halo_parameter_file",
+    halo_parameter_file = dict(longname="--halo_parameter_file",
                                action="store", type=str,
                                dest="halo_parameter_file",default=None,
                                help="HaloProfiler parameter file."),
-    make_profiles = dict(long="--make_profiles",
+    make_profiles = dict(longname="--make_profiles",
                          action="store_true", default=False,
                          help="Make profiles with halo profiler."),
-    make_projections = dict(long="--make_projections",
+    make_projections = dict(longname="--make_projections",
                             action="store_true", default=False,
                             help="Make projections with halo profiler.")
 
@@ -869,7 +869,7 @@
 class YTHubSubmitCmd(YTCommand):
     name = "hub_submit"
     args = (
-            dict(long="--repo", action="store", type=str,
+            dict(longname="--repo", action="store", type=str,
                  dest="repo", default=".", help="Repository to upload"),
            )
     description = \
@@ -1041,10 +1041,10 @@
 class YTInstInfoCmd(YTCommand):
     name = "instinfo"
     args = (
-            dict(short="-u", long="--update-source", action="store_true",
+            dict(short="-u", longname="--update-source", action="store_true",
                  default = False,
                  help="Update the yt installation, if able"),
-            dict(short="-o", long="--output-version", action="store",
+            dict(short="-o", longname="--output-version", action="store",
                   default = None, dest="outputfile",
                   help="File into which the current revision number will be" +
                        "stored")
@@ -1138,9 +1138,9 @@
 
 class YTMapserverCmd(YTCommand):
     args = ("proj", "field", "weight",
-            dict(short="-a", long="--axis", action="store", type=int,
+            dict(short="-a", longname="--axis", action="store", type=int,
                  dest="axis", default=0, help="Axis (4 for all three)"),
-            dict(short ="-o", long="--host", action="store", type=str,
+            dict(short ="-o", longname="--host", action="store", type=str,
                    dest="host", default=None, help="IP Address to bind on"),
             "pf",
             )
@@ -1181,23 +1181,23 @@
 class YTPastebinCmd(YTCommand):
     name = "pastebin"
     args = (
-             dict(short="-l", long="--language", action="store",
+             dict(short="-l", longname="--language", action="store",
                   default = None, dest="language",
                   help="Use syntax highlighter for the file in language"),
-             dict(short="-L", long="--languages", action="store_true",
+             dict(short="-L", longname="--languages", action="store_true",
                   default = False, dest="languages",
                   help="Retrive a list of supported languages"),
-             dict(short="-e", long="--encoding", action="store",
+             dict(short="-e", longname="--encoding", action="store",
                   default = 'utf-8', dest="encoding",
                   help="Specify the encoding of a file (default is "
                         "utf-8 or guessing if available)"),
-             dict(short="-b", long="--open-browser", action="store_true",
+             dict(short="-b", longname="--open-browser", action="store_true",
                   default = False, dest="open_browser",
                   help="Open the paste in a web browser"),
-             dict(short="-p", long="--private", action="store_true",
+             dict(short="-p", longname="--private", action="store_true",
                   default = False, dest="private",
                   help="Paste as private"),
-             dict(short="-c", long="--clipboard", action="store_true",
+             dict(short="-c", longname="--clipboard", action="store_true",
                   default = False, dest="clipboard",
                   help="File to output to; else, print."),
              dict(short="file", type=str),
@@ -1420,7 +1420,7 @@
 
         """
     args = (
-            dict(short="-t", long="--task", action="store",
+            dict(short="-t", longname="--task", action="store",
                  default = 0, dest='task',
                  help="Open a web browser."),
            )
@@ -1432,13 +1432,13 @@
 class YTNotebookCmd(YTCommand):
     name = ["notebook"]
     args = (
-            dict(short="-o", long="--open-browser", action="store_true",
+            dict(short="-o", longname="--open-browser", action="store_true",
                  default = False, dest='open_browser',
                  help="Open a web browser."),
-            dict(short="-p", long="--port", action="store",
+            dict(short="-p", longname="--port", action="store",
                  default = 0, dest='port',
                  help="Port to listen on; defaults to auto-detection."),
-            dict(short="-n", long="--no-password", action="store_true",
+            dict(short="-n", longname="--no-password", action="store_true",
                  default = False, dest='no_password',
                  help="If set, do not prompt or use a password."),
             )
@@ -1501,19 +1501,19 @@
 class YTGUICmd(YTCommand):
     name = ["serve", "reason"]
     args = (
-            dict(short="-o", long="--open-browser", action="store_true",
+            dict(short="-o", longname="--open-browser", action="store_true",
                  default = False, dest='open_browser',
                  help="Open a web browser."),
-            dict(short="-p", long="--port", action="store",
+            dict(short="-p", longname="--port", action="store",
                  default = 0, dest='port',
                  help="Port to listen on"),
-            dict(short="-f", long="--find", action="store_true",
+            dict(short="-f", longname="--find", action="store_true",
                  default = False, dest="find",
                  help="At startup, find all *.hierarchy files in the CWD"),
-            dict(short="-d", long="--debug", action="store_true",
+            dict(short="-d", longname="--debug", action="store_true",
                  default = False, dest="debug",
                  help="Add a debugging mode for cell execution"),
-            dict(short = "-r", long = "--remote", action = "store_true",
+            dict(short = "-r", longname = "--remote", action = "store_true",
                  default = False, dest="use_pyro",
                  help = "Use with a remote Pyro4 server."),
             "opf"
@@ -1564,9 +1564,9 @@
 
 class YTStatsCmd(YTCommand):
     args = ('outputfn','bn','skip','pf','field',
-            dict(long="--max", action='store_true', default=False,
+            dict(longname="--max", action='store_true', default=False,
                  dest='max', help="Display maximum of field requested through -f option."),
-            dict(long="--min", action='store_true', default=False,
+            dict(longname="--min", action='store_true', default=False,
                  dest='min', help="Display minimum of field requested through -f option."))
     name = "stats"
     description = \


https://bitbucket.org/yt_analysis/yt/commits/f363b1279e87/
Changeset:   f363b1279e87
Branch:      yt
User:        MatthewTurk
Date:        2013-10-22 22:10:46
Summary:     Encode our filename to ASCII.
Affected #:  2 files

diff -r f0c7a0e030e73a51f8133efece04d16173346ac2 -r f363b1279e87748ca9b559d88ed98099cd74857e yt/frontends/orion/data_structures.py
--- a/yt/frontends/orion/data_structures.py
+++ b/yt/frontends/orion/data_structures.py
@@ -571,7 +571,7 @@
         lines = open(self.fparameter_filename).readlines()
         for line in lines:
             if line.count("=") == 1:
-                param, vals = [strip(i) for i in
+                param, vals = [i.strip() for i in
                                 (j.rstrip() for j in line.split("="))]
                 #param, vals = map(strip,map(rstrip,line.split("=")))
                 if vals.count("'") == 0:

diff -r f0c7a0e030e73a51f8133efece04d16173346ac2 -r f363b1279e87748ca9b559d88ed98099cd74857e yt/visualization/image_writer.py
--- a/yt/visualization/image_writer.py
+++ b/yt/visualization/image_writer.py
@@ -151,7 +151,7 @@
     if transpose:
         bitmap_array = bitmap_array.swapaxes(0,1)
     if filename is not None:
-        au.write_png(bitmap_array.copy(), filename)
+        au.write_png(bitmap_array.copy(), filename.encode('ascii'))
     else:
         return au.write_png_to_string(bitmap_array.copy())
     return bitmap_array


https://bitbucket.org/yt_analysis/yt/commits/1a537f8125c9/
Changeset:   1a537f8125c9
Branch:      yt
User:        MatthewTurk
Date:        2014-04-16 22:13:33
Summary:     Merging from months of development.
Affected #:  116 files

diff -r f363b1279e87748ca9b559d88ed98099cd74857e -r 1a537f8125c9131bdf906e65c60f9e1c7d59327a MANIFEST.in
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,4 +1,9 @@
-include distribute_setup.py README* CREDITS FUNDING LICENSE.txt
+include distribute_setup.py README* CREDITS COPYING.txt CITATION nose.cfg
 recursive-include yt/gui/reason/html *.html *.png *.ico *.js
-recursive-include yt *.pyx *.pxd *.hh *.h README*
-recursive-include yt/utilities/kdtree *.f90 *.v Makefile LICENSE
\ No newline at end of file
+recursive-include yt *.pyx *.pxd *.h README* *.glsl *.cu
+recursive-include yt/utilities/kdtree *.f90 *.v Makefile LICENSE
+recursive-include yt/analysis_modules/halo_finding/rockstar *.py *.pyx
+prune yt/frontends/_skeleton
+prune tests
+graft yt/gui/reason/html/resources
+exclude clean.sh .hgchurn

diff -r f363b1279e87748ca9b559d88ed98099cd74857e -r 1a537f8125c9131bdf906e65c60f9e1c7d59327a README
--- a/README
+++ b/README
@@ -1,11 +1,12 @@
-Hi there!  You've just downloaded yt, an analysis tool for astrophysical
-simulation datasets, generated by simulation platforms like Enzo, Orion, FLASH,
-Nyx, MAESTRO, ART and Ramses.  It's written in python and heavily leverages
-both NumPy and Matplotlib for fast arrays and visualization, respectively.
+Hi there!  You've just downloaded yt, an analysis tool for scientific
+datasets, generated on a variety of data platforms.  It's written in 
+python and heavily leverages both NumPy and Matplotlib for fast arrays and 
+visualization, respectively.
 
 Full documentation and a user community can be found at:
 
 http://yt-project.org/
+
 http://yt-project.org/doc/
 
 If you have used Python before, and are comfortable with installing packages,
@@ -16,9 +17,7 @@
 doc/install_script.sh .  You will have to set the destination directory, and
 there are options available, but it should be straightforward.
 
-In case of any problems, please email the yt-users mailing list, and if you're
-interested in helping out, see the developer documentation:
-
-http://yt-project.org/doc/advanced/developing.html
+For more information on installation, what to do if you run into problems, or 
+ways to help development, please visit our website.
 
 Enjoy!

diff -r f363b1279e87748ca9b559d88ed98099cd74857e -r 1a537f8125c9131bdf906e65c60f9e1c7d59327a doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -264,28 +264,45 @@
         echo "Alternatively, download the Xcode command line tools from"
         echo "the Apple developer tools website."
         echo
-	echo "OS X 10.8.2: download Xcode 4.6.1 from the mac app store."
+	echo "OS X 10.8.4 and 10.9: download Xcode 5.02 from the mac app store."
 	echo "(search for Xcode)."
+    echo
 	echo "Additionally, you will have to manually install the Xcode"
-	echo "command line tools, see:"
-	echo "http://stackoverflow.com/questions/9353444"
-	echo "Alternatively, download the Xcode command line tools from"
-	echo "the Apple developer tools website."
+	echo "command line tools."
+    echo
+    echo "For OS X 10.8, see:"
+   	echo "http://stackoverflow.com/questions/9353444"
 	echo
-        echo "NOTE: It's possible that the installation will fail, if so,"
-	echo "please set the following environment variables, remove any"
-	echo "broken installation tree, and re-run this script verbatim."
-        echo
-        echo "$ export CC=gcc"
-        echo "$ export CXX=g++"
-	echo
-        OSX_VERSION=`sw_vers -productVersion`
-        if [ "${OSX_VERSION##10.8}" != "${OSX_VERSION}" ]
+    echo "For OS X 10.9, the command line tools can be installed"
+    echo "with the following command:"
+    echo "    xcode-select --install"
+    echo
+    OSX_VERSION=`sw_vers -productVersion`
+    if [ "${OSX_VERSION##10.8}" != "${OSX_VERSION}" ]
         then
             MPL_SUPP_CFLAGS="${MPL_SUPP_CFLAGS} -mmacosx-version-min=10.7"
             MPL_SUPP_CXXFLAGS="${MPL_SUPP_CXXFLAGS} -mmacosx-version-min=10.7"
         fi
     fi
+    if [ -f /etc/redhat-release ]
+    then
+        echo "Looks like you're on an Redhat-compatible machine."
+        echo
+        echo "You need to have these packages installed:"
+        echo
+        echo "  * openssl-devel"
+        echo "  * uuid-devel"
+        echo "  * readline-devel"
+        echo "  * ncurses-devel"
+        echo "  * zip"
+        echo "  * gcc-{,c++,gfortran}"
+        echo "  * make"
+        echo "  * patch"
+        echo 
+        echo "You can accomplish this by executing:"
+        echo "$ sudo yum install gcc gcc-g++ gcc-gfortran make patch zip"
+        echo "$ sudo yum install ncurses-devel uuid-devel openssl-devel readline-devel"
+    fi
     if [ -f /etc/SuSE-release ] && [ `grep --count SUSE /etc/SuSE-release` -gt 0 ]
     then
         echo "Looks like you're on an OpenSUSE-compatible machine."
@@ -486,10 +503,8 @@
     cd $LIB
     if [ ! -z `echo $LIB | grep h5py` ]
     then
-        shift
 	( ${DEST_DIR}/bin/python2.7 setup.py build --hdf5=${HDF5_DIR} $* 2>&1 ) 1>> ${LOG_FILE} || do_exit
     else
-        shift
         ( ${DEST_DIR}/bin/python2.7 setup.py build   $* 2>&1 ) 1>> ${LOG_FILE} || do_exit
     fi
     ( ${DEST_DIR}/bin/python2.7 setup.py install    2>&1 ) 1>> ${LOG_FILE} || do_exit
@@ -555,22 +570,27 @@
 echo 'de6d8c6ea849f0206d219303329a0276b3cce7c051eec34377d42aacbe0a4f47ac5145eb08966a338ecddd2b83c8f787ca9956508ad5c39ee2088ad875166410  xray_emissivity.h5' > xray_emissivity.h5.sha512
 get_ytdata xray_emissivity.h5
 
+# Set paths to what they should be when yt is activated.
+export PATH=${DEST_DIR}/bin:$PATH
+export LD_LIBRARY_PATH=${DEST_DIR}/lib:$LD_LIBRARY_PATH
+export PYTHONPATH=${DEST_DIR}/lib/python2.7/site-packages
+
 mkdir -p ${DEST_DIR}/src
 cd ${DEST_DIR}/src
 
 CYTHON='Cython-0.19.1'
 FORTHON='Forthon-0.8.11'
 PYX='PyX-0.12.1'
-PYTHON='Python-2.7.5'
+PYTHON='Python-2.7.6'
 BZLIB='bzip2-1.0.6'
 FREETYPE_VER='freetype-2.4.12'
 H5PY='h5py-2.1.3'
 HDF5='hdf5-1.8.11'
-IPYTHON='ipython-1.0.0'
+IPYTHON='ipython-1.1.0'
 LAPACK='lapack-3.4.2'
 PNG=libpng-1.6.3
 MATPLOTLIB='matplotlib-1.3.0'
-MERCURIAL='mercurial-2.7'
+MERCURIAL='mercurial-2.8'
 NOSE='nose-1.3.0'
 NUMPY='numpy-1.7.1'
 PYTHON_HGLIB='python-hglib-1.0'
@@ -580,14 +600,14 @@
 SQLITE='sqlite-autoconf-3071700'
 SYMPY='sympy-0.7.3'
 TORNADO='tornado-3.1'
-ZEROMQ='zeromq-3.2.3'
+ZEROMQ='zeromq-3.2.4'
 ZLIB='zlib-1.2.8'
 
 # Now we dump all our SHA512 files out.
 echo '9dcdda5b2ee2e63c2d3755245b7b4ed2f4592455f40feb6f8e86503195d9474559094ed27e789ab1c086d09da0bb21c4fe844af0e32a7d47c81ff59979b18ca0  Cython-0.19.1.tar.gz' > Cython-0.19.1.tar.gz.sha512
 echo '3f53d0b474bfd79fea2536d0a9197eaef6c0927e95f2f9fd52dbd6c1d46409d0e649c21ac418d8f7767a9f10fe6114b516e06f2be4b06aec3ab5bdebc8768220  Forthon-0.8.11.tar.gz' > Forthon-0.8.11.tar.gz.sha512
 echo '4941f5aa21aff3743546495fb073c10d2657ff42b2aff401903498638093d0e31e344cce778980f28a7170c6d29eab72ac074277b9d4088376e8692dc71e55c1  PyX-0.12.1.tar.gz' > PyX-0.12.1.tar.gz.sha512
-echo 'd6580eb170b36ad50f3a30023fe6ca60234156af91ccb3971b0b0983119b86f3a9f6c717a515c3c6cb72b3dcbf1d02695c6d0b92745f460b46a3defd3ff6ef2f  Python-2.7.5.tgz' > Python-2.7.5.tgz.sha512
+echo '3df0ba4b1cfef5f02fb27925de4c2ca414eca9000af6a3d475d39063720afe987287c3d51377e0a36b88015573ef699f700782e1749c7a357b8390971d858a79  Python-2.7.6.tgz' > Python-2.7.6.tgz.sha512
 echo '172f2bc671145ebb0add2669c117863db35851fb3bdb192006cd710d4d038e0037497eb39a6d01091cb923f71a7e8982a77b6e80bf71d6275d5d83a363c8d7e5  rockstar-0.99.6.tar.gz' > rockstar-0.99.6.tar.gz.sha512
 echo '276bd9c061ec9a27d478b33078a86f93164ee2da72210e12e2c9da71dcffeb64767e4460b93f257302b09328eda8655e93c4b9ae85e74472869afbeae35ca71e  blas.tar.gz' > blas.tar.gz.sha512
 echo '00ace5438cfa0c577e5f578d8a808613187eff5217c35164ffe044fbafdfec9e98f4192c02a7d67e01e5a5ccced630583ad1003c37697219b0f147343a3fdd12  bzip2-1.0.6.tar.gz' > bzip2-1.0.6.tar.gz.sha512
@@ -595,11 +615,11 @@
 echo '609a68a3675087e0cc95268574f31e104549daa48efe15a25a33b8e269a93b4bd160f4c3e8178dca9c950ef5ca514b039d6fd1b45db6af57f25342464d0429ce  freetype-2.4.12.tar.gz' > freetype-2.4.12.tar.gz.sha512
 echo '2eb7030f8559ff5cb06333223d98fda5b3a663b6f4a026949d1c423aa9a869d824e612ed5e1851f3bf830d645eea1a768414f73731c23ab4d406da26014fe202  h5py-2.1.3.tar.gz' > h5py-2.1.3.tar.gz.sha512
 echo 'e9db26baa297c8ed10f1ca4a3fcb12d6985c6542e34c18d48b2022db73014f054c8b8434f3df70dcf44631f38b016e8050701d52744953d0fced3272d7b6b3c1  hdf5-1.8.11.tar.gz' > hdf5-1.8.11.tar.gz.sha512
-echo '1b309c08009583e66d1725a2d2051e6de934db246129568fa6d5ba33ad6babd3b443e7c2782d817128d2b112e21bcdd71e66be34fbd528badd900f1d0ed3db56  ipython-1.0.0.tar.gz' > ipython-1.0.0.tar.gz.sha512
+echo '46b8ae25df2ced674b3b3629070aafac955ba3aa2a5e749f8e63ef1f459126e1c4a9a03661406151622590a90c73b527716ad71bc626f57f52b51abfae0f43ca  ipython-1.1.0.tar.gz' > ipython-1.1.0.tar.gz.sha512
 echo '8770214491e31f0a7a3efaade90eee7b0eb20a8a6ab635c5f854d78263f59a1849133c14ef5123d01023f0110cbb9fc6f818da053c01277914ae81473430a952  lapack-3.4.2.tar.gz' > lapack-3.4.2.tar.gz.sha512
 echo '887582e5a22e4cde338aa8fec7a89f6dd31f2f02b8842735f00f970f64582333fa03401cea6d01704083403c7e8b7ebc26655468ce930165673b33efa4bcd586  libpng-1.6.3.tar.gz' > libpng-1.6.3.tar.gz.sha512
 echo '990e3a155ca7a9d329c41a43b44a9625f717205e81157c668a8f3f2ad5459ed3fed8c9bd85e7f81c509e0628d2192a262d4aa30c8bfc348bb67ed60a0362505a  matplotlib-1.3.0.tar.gz' > matplotlib-1.3.0.tar.gz.sha512
-echo 'e425778edb0f71c34e719e04561ee3de37feaa1be4d60b94c780aebdbe6d41f8f4ab15103a8bbe8894ebeb228c42f0e2cd41b8db840f8384e1cd7cd2d5b67b97  mercurial-2.7.tar.gz' > mercurial-2.7.tar.gz.sha512
+echo 'b08dcd746728d89f1f96036f39df1608fad0ff863ae48fe12424b1645936ebbf59b9068b93fe3c7cfd2036db046df3dc814119f89a827bd5f008d32f323d45a8  mercurial-2.8.tar.gz' > mercurial-2.8.tar.gz.sha512
 echo 'a3b8060e415560a868599224449a3af636d24a060f1381990b175dcd12f30249edd181179d23aea06b0c755ff3dc821b7a15ed8840f7855530479587d4d814f4  nose-1.3.0.tar.gz' > nose-1.3.0.tar.gz.sha512
 echo 'd58177f3971b6d07baf6f81a2088ba371c7e43ea64ee7ada261da97c6d725b4bd4927122ac373c55383254e4e31691939276dab08a79a238bfa55172a3eff684  numpy-1.7.1.tar.gz' > numpy-1.7.1.tar.gz.sha512
 echo '9c0a61299779aff613131aaabbc255c8648f0fa7ab1806af53f19fbdcece0c8a68ddca7880d25b926d67ff1b9201954b207919fb09f6a290acb078e8bbed7b68  python-hglib-1.0.tar.gz' > python-hglib-1.0.tar.gz.sha512
@@ -609,7 +629,7 @@
 echo '96f3e51b46741450bc6b63779c10ebb4a7066860fe544385d64d1eda52592e376a589ef282ace2e1df73df61c10eab1a0d793abbdaf770e60289494d4bf3bcb4  sqlite-autoconf-3071700.tar.gz' > sqlite-autoconf-3071700.tar.gz.sha512
 echo '2992baa3edfb4e1842fb642abf0bf0fc0bf56fc183aab8fed6b3c42fbea928fa110ede7fdddea2d63fc5953e8d304b04da433dc811134fadefb1eecc326121b8  sympy-0.7.3.tar.gz' > sympy-0.7.3.tar.gz.sha512
 echo '101544db6c97beeadc5a02b2ef79edefa0a07e129840ace2e4aa451f3976002a273606bcdc12d6cef5c22ff4c1c9dcf60abccfdee4cbef8e3f957cd25c0430cf  tornado-3.1.tar.gz' > tornado-3.1.tar.gz.sha512
-echo '34ffb6aa645f62bd1158a8f2888bf92929ccf90917a6c50ed51ed1240732f498522e164d1536f26480c87ad5457fe614a93bf0e15f2f89b0b168e64a30de68ca  zeromq-3.2.3.tar.gz' > zeromq-3.2.3.tar.gz.sha512
+echo 'd8eef84860bc5314b42a2cc210340572a9148e008ea65f7650844d0edbe457d6758785047c2770399607f69ba3b3a544db9775a5cdf961223f7e278ef7e0f5c6  zeromq-3.2.4.tar.gz' > zeromq-3.2.4.tar.gz.sha512
 echo 'ece209d4c7ec0cb58ede791444dc754e0d10811cbbdebe3df61c0fd9f9f9867c1c3ccd5f1827f847c005e24eef34fb5bf87b5d3f894d75da04f1797538290e4a  zlib-1.2.8.tar.gz' > zlib-1.2.8.tar.gz.sha512
 # Individual processes
 [ -z "$HDF5_DIR" ] && get_ytproject $HDF5.tar.gz
@@ -1003,10 +1023,7 @@
     echo
     echo "To get started with yt, check out the orientation:"
     echo
-    echo "    http://yt-project.org/doc/orientation/"
-    echo
-    echo "or just activate your environment and run 'yt serve' to bring up the"
-    echo "yt GUI."
+    echo "    http://yt-project.org/doc/bootcamp/"
     echo
     echo "The source for yt is located at:"
     echo "    $YT_DIR"

diff -r f363b1279e87748ca9b559d88ed98099cd74857e -r 1a537f8125c9131bdf906e65c60f9e1c7d59327a setup.py
--- a/setup.py
+++ b/setup.py
@@ -160,7 +160,7 @@
 # End snippet
 ######
 
-VERSION = "2.6dev"
+VERSION = "2.7dev"
 
 if os.path.exists('MANIFEST'):
     os.remove('MANIFEST')

diff -r f363b1279e87748ca9b559d88ed98099cd74857e -r 1a537f8125c9131bdf906e65c60f9e1c7d59327a yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -72,7 +72,7 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-__version__ = "2.5-dev"
+__version__ = "2.7-dev"
 
 def run_nose(verbose=False, run_answer_tests=False, answer_big_data=False):
     import nose, os, sys

diff -r f363b1279e87748ca9b559d88ed98099cd74857e -r 1a537f8125c9131bdf906e65c60f9e1c7d59327a yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
@@ -4,10 +4,9 @@
 from yt.analysis_modules.absorption_spectrum.absorption_line \
         import voigt
 
-
 def generate_total_fit(x, fluxData, orderFits, speciesDicts, 
-        minError=1E-5, complexLim=.999,
-        fitLim=.99, minLength=3, 
+        minError=1E-4, complexLim=.995,
+        fitLim=.97, minLength=3, 
         maxLength=1000, splitLim=.99,
         output_file=None):
 
@@ -90,6 +89,7 @@
     fluxData[0]=1
     fluxData[-1]=1
 
+
     #Find all regions where lines/groups of lines are present
     cBounds = _find_complexes(x, fluxData, fitLim=fitLim,
             complexLim=complexLim, minLength=minLength,
@@ -111,6 +111,7 @@
             yDatBounded=fluxData[b[1]:b[2]]
             yFitBounded=yFit[b[1]:b[2]]
 
+
             #Find init redshift
             z=(xBounded[yDatBounded.argmin()]-initWl)/initWl
 
@@ -121,24 +122,33 @@
 
             #Fit Using complex tools
             newLinesP,flag=_complex_fit(xBounded,yDatBounded,yFitBounded,
-                    z,fitLim,minError*(b[2]-b[1]),speciesDict)
+                    z,fitLim,minError,speciesDict)
+
+            #If flagged as a bad fit, species is lyman alpha,
+            #   and it may be a saturated line, use special tools
+            if flag and species=='lya' and min(yDatBounded)<.1:
+               newLinesP=_large_flag_fit(xBounded,yDatBounded,
+                        yFitBounded,z,speciesDict,
+                        minSize,minError)
+
+            if na.size(newLinesP)> 0:
+
+                #Check for EXPLOOOOSIIONNNSSS
+                newLinesP = _check_numerical_instability(x, newLinesP, speciesDict,b)
+
 
             #Check existence of partner lines if applicable
             if len(speciesDict['wavelength']) != 1:
                 newLinesP = _remove_unaccepted_partners(newLinesP, x, fluxData, 
-                        b, minError*(b[2]-b[1]),
-                        x0, xRes, speciesDict)
+                        b, minError, x0, xRes, speciesDict)
 
-            #If flagged as a bad fit, species is lyman alpha,
-            #   and it may be a saturated line, use special tools
-            if flag and species=='lya' and min(yDatBounded)<.1:
-                newLinesP=_large_flag_fit(xBounded,yDatBounded,
-                        yFitBounded,z,speciesDict,
-                        minSize,minError*(b[2]-b[1]))
+
+
 
             #Adjust total current fit
             yFit=yFit*_gen_flux_lines(x,newLinesP,speciesDict)
 
+
             #Add new group to all fitted lines
             if na.size(newLinesP)>0:
                 speciesLines['N']=na.append(speciesLines['N'],newLinesP[:,0])
@@ -149,6 +159,7 @@
 
         allSpeciesLines[species]=speciesLines
 
+
     if output_file:
         _output_fit(allSpeciesLines, output_file)
 
@@ -205,10 +216,12 @@
     #Setup initial line guesses
     if initP==None: #Regular fit
         initP = [0,0,0] 
-        if min(yDat)<.5: #Large lines get larger initial guess 
-            initP[0] = 10**16
+        if min(yDat)<.01: #Large lines get larger initial guess 
+            initP[0] = speciesDict['init_N']*10**2
+        elif min(yDat)<.5:
+            initP[0] = speciesDict['init_N']*10**1
         elif min(yDat)>.9: #Small lines get smaller initial guess
-            initP[0] = 10**12.5
+            initP[0] = speciesDict['init_N']*10**-1
         else:
             initP[0] = speciesDict['init_N']
         initP[1] = speciesDict['init_b']
@@ -225,9 +238,16 @@
         return [],False
     
     #Values to proceed through first run
-    errSq,prevErrSq=1,1000
+    errSq,prevErrSq,prevLinesP=1,10*len(x),[]
 
+    if errBound == None:
+        errBound = len(yDat)*(max(1-yDat)*1E-2)**2
+    else:
+        errBound = errBound*len(yDat)
+
+    flag = False
     while True:
+
         #Initial parameter guess from joining parameters from all lines
         #   in lines into a single array
         initP = linesP.flatten()
@@ -237,6 +257,7 @@
                 args=(x,yDat,yFit,speciesDict),
                 epsfcn=1E-10,maxfev=1000)
 
+
         #Set results of optimization
         linesP = na.reshape(fitP,(-1,3))
 
@@ -247,17 +268,23 @@
         #Sum to get idea of goodness of fit
         errSq=sum(dif**2)
 
+        if any(linesP[:,1]==speciesDict['init_b']):
+         #   linesP = prevLinesP
+
+            flag = True
+            break
+            
         #If good enough, break
-        if errSq < errBound: 
+        if errSq < errBound:        
             break
 
         #If last fit was worse, reject the last line and revert to last fit
-        if errSq > prevErrSq*10:
+        if errSq > prevErrSq*10 :
             #If its still pretty damn bad, cut losses and try flag fit tools
             if prevErrSq >1E2*errBound and speciesDict['name']=='HI lya':
                 return [],True
             else:
-                yNewFit=_gen_flux_lines(x,prevLinesP,speciesDict)
+                linesP = prevLinesP
                 break
 
         #If too many lines 
@@ -266,21 +293,26 @@
             if errSq >1E2*errBound and speciesDict['name']=='HI lya':
                 return [],True
             else:
-                break 
+                flag = True
+                break
 
         #Store previous data in case reject next fit
         prevErrSq = errSq
         prevLinesP = linesP
 
-
         #Set up initial condition for new line
         newP = [0,0,0] 
-        if min(dif)<.1:
-            newP[0]=10**12
-        elif min(dif)>.9:
-            newP[0]=10**16
+
+        yAdjusted = 1+yFit*yNewFit-yDat
+ 
+        if min(yAdjusted)<.01: #Large lines get larger initial guess 
+            newP[0] = speciesDict['init_N']*10**2
+        elif min(yAdjusted)<.5:
+            newP[0] = speciesDict['init_N']*10**1
+        elif min(yAdjusted)>.9: #Small lines get smaller initial guess
+            newP[0] = speciesDict['init_N']*10**-1
         else:
-            newP[0]=10**14
+            newP[0] = speciesDict['init_N']
         newP[1] = speciesDict['init_b']
         newP[2]=(x[dif.argmax()]-wl0)/wl0
         linesP=na.append(linesP,[newP],axis=0)
@@ -290,12 +322,12 @@
     #   acceptable range, as given in dict ref
     remove=[]
     for i,p in enumerate(linesP):
-        check=_check_params(na.array([p]),speciesDict)
+        check=_check_params(na.array([p]),speciesDict,x)
         if check: 
             remove.append(i)
     linesP = na.delete(linesP,remove,axis=0)
 
-    return linesP,False
+    return linesP,flag
 
 def _large_flag_fit(x, yDat, yFit, initz, speciesDict, minSize, errBound):
     """
@@ -489,6 +521,9 @@
     #List of lines to remove
     removeLines=[]
 
+    #Set error
+
+
     #Iterate through all sets of line parameters
     for i,p in enumerate(linesP):
 
@@ -501,16 +536,23 @@
             lb = _get_bounds(p[2],b,wl,x0,xRes)
             xb,yb=x[lb[0]:lb[1]],y[lb[0]:lb[1]]
 
+            if errBound == None:
+                errBound = 10*len(yb)*(max(1-yb)*1E-2)**2
+            else:
+                errBound = 10*errBound*len(yb)
+
             #Generate a fit and find the difference to data
             yFitb=_gen_flux_lines(xb,na.array([p]),speciesDict)
             dif =yb-yFitb
 
+
+
             #Only counts as an error if line is too big ---------------<
             dif = [k for k in dif if k>0]
             err = sum(dif)
 
             #If the fit is too bad then add the line to list of removed lines
-            if err > errBound*1E2:
+            if err > errBound:
                 removeLines.append(i)
                 break
 
@@ -640,21 +682,13 @@
         #Check if the region needs to be divided
         if b[2]-b[1]>maxLength:
 
-            #Find the minimum absorption in the middle two quartiles of
-            #   the large complex
-            q=(b[2]-b[1])/4
-            cut = yDat[b[1]+q:b[2]-q].argmax()+b[1]+q
+            split = _split_region(yDat,b,splitLim)
 
-            #Only break it up if the minimum absorption is actually low enough
-            if yDat[cut]>splitLim:
-
-                #Get the new two peaks
-                b1Peak = yDat[b[1]:cut].argmin()+b[1]
-                b2Peak = yDat[cut:b[2]].argmin()+cut
+            if split:
 
                 #add the two regions separately
-                cBounds.insert(i+1,[b1Peak,b[1],cut])
-                cBounds.insert(i+2,[b2Peak,cut,b[2]])
+                cBounds.insert(i+1,split[0])
+                cBounds.insert(i+2,split[1])
 
                 #Remove the original region
                 cBounds.pop(i)
@@ -663,7 +697,33 @@
 
     return cBounds
 
-def _gen_flux_lines(x, linesP, speciesDict):
+
+def _split_region(yDat,b,splitLim):
+        #Find the minimum absorption in the middle two quartiles of
+    #   the large complex
+
+    q=(b[2]-b[1])/4
+    cut = yDat[b[1]+q:b[2]-q].argmax()+b[1]+q
+
+    #Only break it up if the minimum absorption is actually low enough
+    if yDat[cut]>splitLim:
+
+        #Get the new two peaks
+        b1Peak = yDat[b[1]:cut].argmin()+b[1]
+        b2Peak = yDat[cut:b[2]].argmin()+cut
+
+        region_1 = [b1Peak,b[1],cut]
+        region_2 = [b2Peak,cut,b[2]]
+
+        return [region_1,region_2]
+
+    else:
+
+        return []
+
+
+
+def _gen_flux_lines(x, linesP, speciesDict,firstLine=False):
     """
     Calculates the normalized flux for a region of wavelength space
     generated by a set of absorption lines.
@@ -692,6 +752,9 @@
             g=speciesDict['Gamma'][i]
             wl=speciesDict['wavelength'][i]
             y = y+ _gen_tau(x,p,f,g,wl)
+            if firstLine: 
+                break
+
     flux = na.exp(-y)
     return flux
 
@@ -744,21 +807,25 @@
         the difference between the fit generated by the parameters
         given in pTotal multiplied by the previous fit and the desired
         flux profile, w/ first index modified appropriately for bad 
-        parameter choices
+        parameter choices and additional penalty for fitting with a lower
+        flux than observed.
     """
 
     pTotal.shape = (-1,3)
     yNewFit = _gen_flux_lines(x,pTotal,speciesDict)
 
     error = yDat-yFit*yNewFit
-    error[0] = _check_params(pTotal,speciesDict)
+    error_plus = (yDat-yFit*yNewFit).clip(min=0)
+
+    error = error+error_plus
+    error[0] = _check_params(pTotal,speciesDict,x)
 
     return error
 
-def _check_params(p, speciesDict):
+def _check_params(p, speciesDict,xb):
     """
     Check to see if any of the parameters in p fall outside the range 
-        given in speciesDict.
+        given in speciesDict or on the boundaries
 
     Parameters
     ----------
@@ -767,6 +834,8 @@
     speciesDict : dictionary
         dictionary with properties giving the max and min
         values appropriate for each parameter N,b, and z.
+    xb : (N) ndarray
+        wavelength array [nm]
 
     Returns
     -------
@@ -774,16 +843,137 @@
         0 if all values are fine
         999 if any values fall outside acceptable range
     """
+
+    minz = (xb[0])/speciesDict['wavelength'][0]-1
+    maxz = (xb[-1])/speciesDict['wavelength'][0]-1
+
     check = 0
-    if any(p[:,0] > speciesDict['maxN']) or\
-          any(p[:,0] < speciesDict['minN']) or\
-          any(p[:,1] > speciesDict['maxb']) or\
-          any(p[:,1] < speciesDict['minb']) or\
-          any(p[:,2] > speciesDict['maxz']) or\
-          any(p[:,2] < speciesDict['minz']):
+    if any(p[:,0] >= speciesDict['maxN']) or\
+          any(p[:,0] <= speciesDict['minN']) or\
+          any(p[:,1] >= speciesDict['maxb']) or\
+          any(p[:,1] <= speciesDict['minb']) or\
+          any(p[:,2] >= maxz) or\
+          any(p[:,2] <= minz):
               check = 999
+              
     return check
 
+def _check_optimization_init(p,speciesDict,initz,xb,yDat,yFit,minSize,errorBound):
+
+    """
+    Check to see if any of the parameters in p are the
+    same as initial paramters and if so, attempt to 
+    split the region and refit it.
+
+    Parameters
+    ----------
+    p : (3,) ndarray
+        array with form [[N1, b1, z1], ...] 
+    speciesDict : dictionary
+        dictionary with properties giving the max and min
+        values appropriate for each parameter N,b, and z.
+    x : (N) ndarray
+        wavelength array [nm]
+    """
+
+    # Check if anything is a default parameter
+    if any(p[:,0] == speciesDict['init_N']) or\
+          any(p[:,0] == speciesDict['init_N']*10) or\
+          any(p[:,0] == speciesDict['init_N']*100) or\
+          any(p[:,0] == speciesDict['init_N']*.1) or\
+          any(p[:,1] == speciesDict['init_b']) or\
+          any(p[:,1] == speciesDict['maxb']):
+
+            # These are the initial bounds
+            init_bounds = [yDat.argmin(),0,len(xb)-1]
+
+            # Gratitutous limit for splitting region
+            newSplitLim = 1 - (1-min(yDat))*.5
+
+            # Attempt to split region
+            split = _split_region(yDat,init_bounds,newSplitLim)
+            
+            # If we can't split it, just reject it. Its unphysical
+            # to just keep the default parameters and we're out of
+            # options at this point
+            if not split:
+                return []
+
+            # Else set up the bounds for each region and fit separately
+            b1,b2 = split[0][2], split[1][1]
+
+            p1,flag = _complex_fit(xb[:b1], yDat[:b1], yFit[:b1],
+                            initz, minSize, errorBound, speciesDict)
+
+            p2,flag = _complex_fit(xb[b2:], yDat[b2:], yFit[b2:],
+                            initz, minSize, errorBound, speciesDict)
+
+            # Make the final line parameters. Its annoying because
+            # one or both regions may have fit to nothing
+            if na.size(p1)> 0 and na.size(p2)>0:
+                p = na.r_[p1,p2]
+            elif na.size(p1) > 0:
+                p = p1
+            else:
+                p = p2
+
+    return p
+
+
+def _check_numerical_instability(x, p, speciesDict,b):
+
+    """
+    Check to see if any of the parameters in p are causing
+    unstable numerical effects outside the region of fit
+
+    Parameters
+    ----------
+    p : (3,) ndarray
+        array with form [[N1, b1, z1], ...] 
+    speciesDict : dictionary
+        dictionary with properties giving the max and min
+        values appropriate for each parameter N,b, and z.
+    x : (N) ndarray
+        wavelength array [nm]
+    b : (3) list
+        list of integers indicating bounds of region fit in x
+    """
+
+    remove_lines = []
+
+
+    for i,line in enumerate(p):
+
+        # First to check if the line is at risk for instability
+        if line[1]<5 or line[0] < 1E12:
+
+
+            # get all flux that isn't part of fit plus a little wiggle room
+            # max and min to prevent boundary errors
+
+            flux = _gen_flux_lines(x,[line],speciesDict,firstLine=True)
+            flux = na.r_[flux[:max(b[1]-10,0)], flux[min(b[2]+10,len(x)):]]
+
+            #Find regions that are absorbing outside the region we fit
+            flux_dif = 1 - flux
+            absorbing_coefficient = max(abs(flux_dif))
+
+
+            #Really there shouldn't be any absorption outside
+            #the region we fit, but we'll give some leeway.
+            #for high resolution spectra the tiny bits on the edges
+            #can give a non negligible amount of flux. Plus the errors
+            #we are looking for are HUGE.
+            if absorbing_coefficient > .1:
+
+                # we just set it to no fit because we've tried
+                # everything else at this point. this region just sucks :(
+                remove_lines.append(i)
+    
+    if remove_lines:
+        p = na.delete(p, remove_lines, axis=0)
+
+    return p
 
 def _output_fit(lineDic, file_name = 'spectrum_fit.h5'):
     """
@@ -815,4 +1005,5 @@
         f.create_dataset("{0}/z".format(ion),data=params['z'])
         f.create_dataset("{0}/complex".format(ion),data=params['group#'])
     print 'Writing spectrum fit to {0}'.format(file_name)
+    f.close()
 

diff -r f363b1279e87748ca9b559d88ed98099cd74857e -r 1a537f8125c9131bdf906e65c60f9e1c7d59327a yt/analysis_modules/api.py
--- a/yt/analysis_modules/api.py
+++ b/yt/analysis_modules/api.py
@@ -108,3 +108,16 @@
 from .radmc3d_export.api import \
     RadMC3DWriter
 
+from .particle_trajectories.api import \
+    ParticleTrajectories
+
+from .photon_simulator.api import \
+     PhotonList, \
+     EventList, \
+     SpectralModel, \
+     XSpecThermalModel, \
+     XSpecAbsorbModel, \
+     TableApecModel, \
+     TableAbsorbModel, \
+     PhotonModel, \
+     ThermalPhotonModel

diff -r f363b1279e87748ca9b559d88ed98099cd74857e -r 1a537f8125c9131bdf906e65c60f9e1c7d59327a yt/analysis_modules/cosmological_observation/cosmology_splice.py
--- a/yt/analysis_modules/cosmological_observation/cosmology_splice.py
+++ b/yt/analysis_modules/cosmological_observation/cosmology_splice.py
@@ -113,7 +113,18 @@
         self._calculate_deltaz_min(deltaz_min=deltaz_min)
 
         cosmology_splice = []
-
+ 
+        if near_redshift == far_redshift:
+            self.simulation.get_time_series(redshifts=[near_redshift])
+            cosmology_splice.append({'time': self.simulation[0].current_time,
+                                     'redshift': self.simulation[0].current_redshift,
+                                     'filename': os.path.join(self.simulation[0].fullpath,
+                                                              self.simulation[0].basename),
+                                     'next': None})
+            mylog.info("create_cosmology_splice: Using %s for z = %f ." %
+                       (cosmology_splice[0]['filename'], near_redshift))
+            return cosmology_splice
+        
         # Use minimum number of datasets to go from z_i to z_f.
         if minimal:
 

diff -r f363b1279e87748ca9b559d88ed98099cd74857e -r 1a537f8125c9131bdf906e65c60f9e1c7d59327a yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -28,6 +28,9 @@
     only_on_root, \
     parallel_objects, \
     parallel_root_only
+from yt.utilities.physical_constants import \
+     speed_of_light_cgs, \
+     cm_per_km
 
 class LightRay(CosmologySplice):
     """
@@ -51,7 +54,9 @@
     near_redshift : float
         The near (lowest) redshift for the light ray.
     far_redshift : float
-        The far (highest) redshift for the light ray.
+        The far (highest) redshift for the light ray.  NOTE: in order 
+        to use only a single dataset in a light ray, set the 
+        near_redshift and far_redshift to be the same.
     use_minimum_datasets : bool
         If True, the minimum number of datasets is used to connect the
         initial and final redshift.  If false, the light ray solution
@@ -111,65 +116,92 @@
                                        time_data=time_data,
                                        redshift_data=redshift_data)
 
-    def _calculate_light_ray_solution(self, seed=None, filename=None):
+    def _calculate_light_ray_solution(self, seed=None, 
+                                      start_position=None, end_position=None,
+                                      trajectory=None, filename=None):
         "Create list of datasets to be added together to make the light ray."
 
         # Calculate dataset sizes, and get random dataset axes and centers.
         np.random.seed(seed)
 
-        # For box coherence, keep track of effective depth travelled.
-        box_fraction_used = 0.0
+        # If using only one dataset, set start and stop manually.
+        if start_position is not None:
+            if len(self.light_ray_solution) > 1:
+                raise RuntimeError("LightRay Error: cannot specify start_position if light ray uses more than one dataset.")
+            if not ((end_position is None) ^ (trajectory is None)):
+                raise RuntimeError("LightRay Error: must specify either end_position or trajectory, but not both.")
+            self.light_ray_solution[0]['start'] = np.array(start_position)
+            if end_position is not None:
+                self.light_ray_solution[0]['end'] = np.array(end_position)
+            else:
+                # assume trajectory given as r, theta, phi
+                if len(trajectory) != 3:
+                    raise RuntimeError("LightRay Error: trajectory must have lenght 3.")
+                r, theta, phi = trajectory
+                self.light_ray_solution[0]['end'] = self.light_ray_solution[0]['start'] + \
+                  r * np.array([np.cos(phi) * np.sin(theta),
+                                np.sin(phi) * np.sin(theta),
+                                np.cos(theta)])
+            self.light_ray_solution[0]['traversal_box_fraction'] = \
+              vector_length(self.light_ray_solution[0]['start'], 
+                            self.light_ray_solution[0]['end'])
 
-        for q in range(len(self.light_ray_solution)):
-            if (q == len(self.light_ray_solution) - 1):
-                z_next = self.near_redshift
-            else:
-                z_next = self.light_ray_solution[q+1]['redshift']
+        # the normal way (random start positions and trajectories for each dataset)
+        else:
+            
+            # For box coherence, keep track of effective depth travelled.
+            box_fraction_used = 0.0
 
-            # Calculate fraction of box required for a depth of delta z
-            self.light_ray_solution[q]['traversal_box_fraction'] = \
-                self.cosmology.ComovingRadialDistance(\
-                z_next, self.light_ray_solution[q]['redshift']) * \
-                self.simulation.hubble_constant / \
-                self.simulation.box_size
+            for q in range(len(self.light_ray_solution)):
+                if (q == len(self.light_ray_solution) - 1):
+                    z_next = self.near_redshift
+                else:
+                    z_next = self.light_ray_solution[q+1]['redshift']
 
-            # Simple error check to make sure more than 100% of box depth
-            # is never required.
-            if (self.light_ray_solution[q]['traversal_box_fraction'] > 1.0):
-                mylog.error("Warning: box fraction required to go from z = %f to %f is %f" %
-                            (self.light_ray_solution[q]['redshift'], z_next,
-                             self.light_ray_solution[q]['traversal_box_fraction']))
-                mylog.error("Full box delta z is %f, but it is %f to the next data dump." %
-                            (self.light_ray_solution[q]['deltazMax'],
-                             self.light_ray_solution[q]['redshift']-z_next))
+                # Calculate fraction of box required for a depth of delta z
+                self.light_ray_solution[q]['traversal_box_fraction'] = \
+                    self.cosmology.ComovingRadialDistance(\
+                    z_next, self.light_ray_solution[q]['redshift']) * \
+                    self.simulation.hubble_constant / \
+                    self.simulation.box_size
 
-            # Get dataset axis and center.
-            # If using box coherence, only get start point and vector if
-            # enough of the box has been used,
-            # or if box_fraction_used will be greater than 1 after this slice.
-            if (q == 0) or (self.minimum_coherent_box_fraction == 0) or \
-                    (box_fraction_used >
-                     self.minimum_coherent_box_fraction) or \
-                    (box_fraction_used +
-                     self.light_ray_solution[q]['traversal_box_fraction'] > 1.0):
-                # Random start point
-                self.light_ray_solution[q]['start'] = np.random.random(3)
-                theta = np.pi * np.random.random()
-                phi = 2 * np.pi * np.random.random()
-                box_fraction_used = 0.0
-            else:
-                # Use end point of previous segment and same theta and phi.
-                self.light_ray_solution[q]['start'] = \
-                  self.light_ray_solution[q-1]['end'][:]
+                # Simple error check to make sure more than 100% of box depth
+                # is never required.
+                if (self.light_ray_solution[q]['traversal_box_fraction'] > 1.0):
+                    mylog.error("Warning: box fraction required to go from z = %f to %f is %f" %
+                                (self.light_ray_solution[q]['redshift'], z_next,
+                                 self.light_ray_solution[q]['traversal_box_fraction']))
+                    mylog.error("Full box delta z is %f, but it is %f to the next data dump." %
+                                (self.light_ray_solution[q]['deltazMax'],
+                                 self.light_ray_solution[q]['redshift']-z_next))
 
-            self.light_ray_solution[q]['end'] = \
-              self.light_ray_solution[q]['start'] + \
-                self.light_ray_solution[q]['traversal_box_fraction'] * \
-                np.array([np.cos(phi) * np.sin(theta),
-                          np.sin(phi) * np.sin(theta),
-                          np.cos(theta)])
-            box_fraction_used += \
-              self.light_ray_solution[q]['traversal_box_fraction']
+                # Get dataset axis and center.
+                # If using box coherence, only get start point and vector if
+                # enough of the box has been used,
+                # or if box_fraction_used will be greater than 1 after this slice.
+                if (q == 0) or (self.minimum_coherent_box_fraction == 0) or \
+                        (box_fraction_used >
+                         self.minimum_coherent_box_fraction) or \
+                        (box_fraction_used +
+                         self.light_ray_solution[q]['traversal_box_fraction'] > 1.0):
+                    # Random start point
+                    self.light_ray_solution[q]['start'] = np.random.random(3)
+                    theta = np.pi * np.random.random()
+                    phi = 2 * np.pi * np.random.random()
+                    box_fraction_used = 0.0
+                else:
+                    # Use end point of previous segment and same theta and phi.
+                    self.light_ray_solution[q]['start'] = \
+                      self.light_ray_solution[q-1]['end'][:]
+
+                self.light_ray_solution[q]['end'] = \
+                  self.light_ray_solution[q]['start'] + \
+                    self.light_ray_solution[q]['traversal_box_fraction'] * \
+                    np.array([np.cos(phi) * np.sin(theta),
+                              np.sin(phi) * np.sin(theta),
+                              np.cos(theta)])
+                box_fraction_used += \
+                  self.light_ray_solution[q]['traversal_box_fraction']
 
         if filename is not None:
             self._write_light_ray_solution(filename,
@@ -178,7 +210,10 @@
                             'far_redshift':self.far_redshift,
                             'near_redshift':self.near_redshift})
 
-    def make_light_ray(self, seed=None, fields=None,
+    def make_light_ray(self, seed=None,
+                       start_position=None, end_position=None,
+                       trajectory=None,
+                       fields=None,
                        solution_filename=None, data_filename=None,
                        get_los_velocity=False,
                        get_nearest_halo=False,
@@ -197,6 +232,19 @@
         seed : int
             Seed for the random number generator.
             Default: None.
+        start_position : list of floats
+            Used only if creating a light ray from a single dataset.
+            The coordinates of the starting position of the ray.
+            Default: None.
+        end_position : list of floats
+            Used only if creating a light ray from a single dataset.
+            The coordinates of the ending position of the ray.
+            Default: None.
+        trajectory : list of floats
+            Used only if creating a light ray from a single dataset.
+            The (r, theta, phi) direction of the light ray.  Use either 
+        end_position or trajectory, not both.
+            Default: None.
         fields : list
             A list of fields for which to get data.
             Default: None.
@@ -313,7 +361,11 @@
             nearest_halo_fields = []
 
         # Calculate solution.
-        self._calculate_light_ray_solution(seed=seed, filename=solution_filename)
+        self._calculate_light_ray_solution(seed=seed, 
+                                           start_position=start_position, 
+                                           end_position=end_position,
+                                           trajectory=trajectory,
+                                           filename=solution_filename)
 
         # Initialize data structures.
         self._data = {}
@@ -335,9 +387,18 @@
         for my_storage, my_segment in parallel_objects(self.light_ray_solution,
                                                        storage=all_ray_storage,
                                                        njobs=njobs, dynamic=dynamic):
-            mylog.info("Creating ray segment at z = %f." %
-                       my_segment['redshift'])
-            if my_segment['next'] is None:
+
+            # Load dataset for segment.
+            pf = load(my_segment['filename'])
+
+            if self.near_redshift == self.far_redshift:
+                h_vel = cm_per_km * pf.units['mpc'] * \
+                  vector_length(my_segment['start'], my_segment['end']) * \
+                  self.cosmology.HubbleConstantNow * \
+                  self.cosmology.ExpansionFactor(my_segment['redshift'])
+                next_redshift = np.sqrt((1. + h_vel / speed_of_light_cgs) /
+                                         (1. - h_vel / speed_of_light_cgs)) - 1.
+            elif my_segment['next'] is None:
                 next_redshift = self.near_redshift
             else:
                 next_redshift = my_segment['next']['redshift']
@@ -346,9 +407,6 @@
                        (my_segment['redshift'], my_segment['start'],
                         my_segment['end']))
 
-            # Load dataset for segment.
-            pf = load(my_segment['filename'])
-
             # Break periodic ray into non-periodic segments.
             sub_segments = periodic_ray(my_segment['start'], my_segment['end'])
 

diff -r f363b1279e87748ca9b559d88ed98099cd74857e -r 1a537f8125c9131bdf906e65c60f9e1c7d59327a yt/analysis_modules/halo_finding/setup.py
--- a/yt/analysis_modules/halo_finding/setup.py
+++ b/yt/analysis_modules/halo_finding/setup.py
@@ -1,9 +1,7 @@
 #!/usr/bin/env python
-import setuptools
-import os
-import sys
 import os.path
 
+
 def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
     config = Configuration('halo_finding', parent_package, top_path)
@@ -12,6 +10,5 @@
     config.add_subpackage("parallel_hop")
     if os.path.exists("rockstar.cfg"):
         config.add_subpackage("rockstar")
-    config.make_config_py() # installs __config__.py
-    #config.make_svn_version_py()
+    config.make_config_py()  # installs __config__.py
     return config

diff -r f363b1279e87748ca9b559d88ed98099cd74857e -r 1a537f8125c9131bdf906e65c60f9e1c7d59327a yt/analysis_modules/halo_profiler/multi_halo_profiler.py
--- a/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
+++ b/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
@@ -25,7 +25,7 @@
 from yt.convenience import \
     load
 from yt.data_objects.profiles import \
-    BinnedProfile1D, EmptyProfileData
+    BinnedProfile1D, YTEmptyProfileData
 from yt.analysis_modules.halo_finding.api import *
 from .halo_filters import \
     VirialFilter
@@ -586,8 +586,8 @@
                                                 r_min, halo['r_max'],
                                                 log_space=True, lazy_reader=True,
                                                 end_collect=True)
-            except EmptyProfileData:
-                mylog.error("Caught EmptyProfileData exception, returning None for this halo.")
+            except YTEmptyProfileData:
+                mylog.error("Caught YTEmptyProfileData exception, returning None for this halo.")
                 return None
             # Figure out which fields to add simultaneously
             field_groupings = defaultdict(lambda: defaultdict(list))

diff -r f363b1279e87748ca9b559d88ed98099cd74857e -r 1a537f8125c9131bdf906e65c60f9e1c7d59327a yt/analysis_modules/particle_trajectories/api.py
--- /dev/null
+++ b/yt/analysis_modules/particle_trajectories/api.py
@@ -0,0 +1,12 @@
+"""
+API for particle_trajectories
+"""
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from particle_trajectories import ParticleTrajectories

diff -r f363b1279e87748ca9b559d88ed98099cd74857e -r 1a537f8125c9131bdf906e65c60f9e1c7d59327a yt/analysis_modules/particle_trajectories/particle_trajectories.py
--- /dev/null
+++ b/yt/analysis_modules/particle_trajectories/particle_trajectories.py
@@ -0,0 +1,329 @@
+"""
+Particle trajectories
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.data_objects.data_containers import YTFieldData
+from yt.data_objects.time_series import TimeSeriesData
+from yt.utilities.lib import CICSample_3
+from yt.funcs import *
+
+import numpy as np
+import h5py
+
+class ParticleTrajectories(object):
+    r"""A collection of particle trajectories in time over a series of
+    parameter files. 
+
+    The ParticleTrajectories object contains a collection of
+    particle trajectories for a specified set of particle indices. 
+    
+    Parameters
+    ----------
+    filenames : list of strings
+        A time-sorted list of filenames to construct the TimeSeriesData
+        object.
+    indices : array_like
+        An integer array of particle indices whose trajectories we
+        want to track. If they are not sorted they will be sorted.
+    fields : list of strings, optional
+        A set of fields that is retrieved when the trajectory
+        collection is instantiated.
+        Default : None (will default to the fields 'particle_position_x',
+        'particle_position_y', 'particle_position_z')
+
+    Examples
+    ________
+    >>> from yt.mods import *
+    >>> my_fns = glob.glob("orbit_hdf5_chk_00[0-9][0-9]")
+    >>> my_fns.sort()
+    >>> fields = ["particle_position_x", "particle_position_y",
+    >>>           "particle_position_z", "particle_velocity_x",
+    >>>           "particle_velocity_y", "particle_velocity_z"]
+    >>> pf = load(my_fns[0])
+    >>> init_sphere = pf.h.sphere(pf.domain_center, (.5, "unitary"))
+    >>> indices = init_sphere["particle_index"].astype("int")
+    >>> trajs = ParticleTrajectories(my_fns, indices, fields=fields)
+    >>> for t in trajs :
+    >>>     print t["particle_velocity_x"].max(), t["particle_velocity_x"].min()
+
+    Notes
+    -----
+    As of this time only particle trajectories that are complete over the
+    set of specified parameter files are supported. If any particle's history
+    ends for some reason (e.g. leaving the simulation domain or being actively
+    destroyed), the whole trajectory collection of which it is a set must end
+    at or before the particle's last timestep. This is a limitation we hope to
+    lift at some point in the future.     
+    """
+    def __init__(self, filenames, indices, fields=None) :
+
+        indices.sort() # Just in case the caller wasn't careful
+        
+        self.field_data = YTFieldData()
+        self.pfs = TimeSeriesData.from_filenames(filenames)
+        self.masks = []
+        self.sorts = []
+        self.indices = indices
+        self.num_indices = len(indices)
+        self.num_steps = len(filenames)
+        self.times = []
+
+        # Default fields 
+        
+        if fields is None: fields = []
+
+        # Must ALWAYS have these fields
+        
+        fields = fields + ["particle_position_x",
+                           "particle_position_y",
+                           "particle_position_z"]
+
+        # Set up the derived field list and the particle field list
+        # so that if the requested field is a particle field, we'll
+        # just copy the field over, but if the field is a grid field,
+        # we will first interpolate the field to the particle positions
+        # and then return the field. 
+
+        pf = self.pfs[0]
+        self.derived_field_list = pf.h.derived_field_list
+        self.particle_fields = [field for field in self.derived_field_list
+                                if pf.field_info[field].particle_type]
+
+        """
+        The following loops through the parameter files
+        and performs two tasks. The first is to isolate
+        the particles with the correct indices, and the
+        second is to create a sorted list of these particles.
+        We also make a list of the current time from each file. 
+        Right now, the code assumes (and checks for) the
+        particle indices existing in each dataset, a limitation I
+        would like to lift at some point since some codes
+        (e.g., FLASH) destroy particles leaving the domain.
+        """
+        
+        for pf in self.pfs:
+            dd = pf.h.all_data()
+            newtags = dd["particle_index"].astype("int")
+            if not np.all(np.in1d(indices, newtags, assume_unique=True)):
+                print "Not all requested particle ids contained in this dataset!"
+                raise IndexError
+            mask = np.in1d(newtags, indices, assume_unique=True)
+            sorts = np.argsort(newtags[mask])
+            self.masks.append(mask)            
+            self.sorts.append(sorts)
+            self.times.append(pf.current_time)
+
+        self.times = np.array(self.times)
+
+        # Now instantiate the requested fields 
+        for field in fields:
+            self._get_data(field)
+            
+    def has_key(self, key):
+        return (key in self.field_data)
+    
+    def keys(self):
+        return self.field_data.keys()
+
+    def __getitem__(self, key):
+        """
+        Get the field associated with key,
+        checking to make sure it is a particle field.
+        """
+        if key == "particle_time":
+            return self.times
+        if not self.field_data.has_key(key):
+            self._get_data(key)
+        return self.field_data[key]
+    
+    def __setitem__(self, key, val):
+        """
+        Sets a field to be some other value.
+        """
+        self.field_data[key] = val
+                        
+    def __delitem__(self, key):
+        """
+        Delete the field from the trajectory
+        """
+        del self.field_data[key]
+
+    def __iter__(self):
+        """
+        This iterates over the trajectories for
+        the different particles, returning dicts
+        of fields for each trajectory
+        """
+        for idx in xrange(self.num_indices):
+            traj = {}
+            traj["particle_index"] = self.indices[idx]
+            traj["particle_time"] = self.times
+            for field in self.field_data.keys():
+                traj[field] = self[field][idx,:]
+            yield traj
+            
+    def __len__(self):
+        """
+        The number of individual trajectories
+        """
+        return self.num_indices
+
+    def add_fields(self, fields):
+        """
+        Add a list of fields to an existing trajectory
+
+        Parameters
+        ----------
+        fields : list of strings
+            A list of fields to be added to the current trajectory
+            collection.
+
+        Examples
+        ________
+        >>> from yt.mods import *
+        >>> trajs = ParticleTrajectories(my_fns, indices)
+        >>> trajs.add_fields(["particle_mass", "particle_gpot"])
+        """
+        for field in fields:
+            if not self.field_data.has_key(field):
+                self._get_data(field)
+                
+    def _get_data(self, field):
+        """
+        Get a field to include in the trajectory collection.
+        The trajectory collection itself is a dict of 2D numpy arrays,
+        with shape (num_indices, num_steps)
+        """
+        if not self.field_data.has_key(field):
+            particles = np.empty((0))
+            step = int(0)
+            for pf, mask, sort in zip(self.pfs, self.masks, self.sorts):
+                if field in self.particle_fields:
+                    # This is easy... just get the particle fields
+                    dd = pf.h.all_data()
+                    pfield = dd[field][mask]
+                    particles = np.append(particles, pfield[sort])
+                else:
+                    # This is hard... must loop over grids
+                    pfield = np.zeros((self.num_indices))
+                    x = self["particle_position_x"][:,step]
+                    y = self["particle_position_y"][:,step]
+                    z = self["particle_position_z"][:,step]
+                    particle_grids, particle_grid_inds = pf.h.find_points(x,y,z)
+                    for grid in particle_grids:
+                        cube = grid.retrieve_ghost_zones(1, [field])
+                        CICSample_3(x,y,z,pfield,
+                                    self.num_indices,
+                                    cube[field],
+                                    np.array(grid.LeftEdge).astype(np.float64),
+                                    np.array(grid.ActiveDimensions).astype(np.int32),
+                                    np.float64(grid['dx']))
+                    particles = np.append(particles, pfield)
+                step += 1
+            self[field] = particles.reshape(self.num_steps,
+                                            self.num_indices).transpose()
+        return self.field_data[field]
+
+    def trajectory_from_index(self, index):
+        """
+        Retrieve a single trajectory corresponding to a specific particle
+        index
+
+        Parameters
+        ----------
+        index : int
+            This defines which particle trajectory from the
+            ParticleTrajectories object will be returned.
+
+        Returns
+        -------
+        A dictionary corresponding to the particle's trajectory and the
+        fields along that trajectory
+
+        Examples
+        --------
+        >>> from yt.mods import *
+        >>> import matplotlib.pylab as pl
+        >>> trajs = ParticleTrajectories(my_fns, indices)
+        >>> traj = trajs.trajectory_from_index(indices[0])
+        >>> pl.plot(traj["particle_time"], traj["particle_position_x"], "-x")
+        >>> pl.savefig("orbit")
+        """
+        mask = np.in1d(self.indices, (index,), assume_unique=True)
+        if not np.any(mask):
+            print "The particle index %d is not in the list!" % (index)
+            raise IndexError
+        fields = [field for field in sorted(self.field_data.keys())]
+        traj = {}
+        traj["particle_time"] = self.times
+        traj["particle_index"] = index
+        for field in fields:
+            traj[field] = self[field][mask,:][0]
+        return traj
+
+    def write_out(self, filename_base):
+        """
+        Write out particle trajectories to tab-separated ASCII files (one
+        for each trajectory) with the field names in the file header. Each
+        file is named with a basename and the index number.
+
+        Parameters
+        ----------
+        filename_base : string
+            The prefix for the outputted ASCII files.
+
+        Examples
+        --------
+        >>> from yt.mods import *
+        >>> trajs = ParticleTrajectories(my_fns, indices)
+        >>> trajs.write_out("orbit_trajectory")       
+        """
+        fields = [field for field in sorted(self.field_data.keys())]
+        num_fields = len(fields)
+        first_str = "# particle_time\t" + "\t".join(fields)+"\n"
+        template_str = "%g\t"*num_fields+"%g\n"
+        for ix in xrange(self.num_indices):
+            outlines = [first_str]
+            for it in xrange(self.num_steps):
+                outlines.append(template_str %
+                                tuple([self.times[it]]+[self[field][ix,it] for field in fields]))
+            fid = open(filename_base + "_%d.dat" % self.indices[ix], "w")
+            fid.writelines(outlines)
+            fid.close()
+            del fid
+            
+    def write_out_h5(self, filename):
+        """
+        Write out all the particle trajectories to a single HDF5 file
+        that contains the indices, the times, and the 2D array for each
+        field individually
+
+        Parameters
+        ----------
+
+        filename : string
+            The output filename for the HDF5 file
+
+        Examples
+        --------
+
+        >>> from yt.mods import *
+        >>> trajs = ParticleTrajectories(my_fns, indices)
+        >>> trajs.write_out_h5("orbit_trajectories")                
+        """
+        fid = h5py.File(filename, "w")
+        fields = [field for field in sorted(self.field_data.keys())]
+        fid.create_dataset("particle_indices", dtype=np.int32,
+                           data=self.indices)
+        fid.create_dataset("particle_time", data=self.times)
+        for field in fields:
+            fid.create_dataset("%s" % field, data=self[field])
+        fid.close()

diff -r f363b1279e87748ca9b559d88ed98099cd74857e -r 1a537f8125c9131bdf906e65c60f9e1c7d59327a yt/analysis_modules/particle_trajectories/setup.py
--- /dev/null
+++ b/yt/analysis_modules/particle_trajectories/setup.py
@@ -0,0 +1,13 @@
+#!/usr/bin/env python
+import setuptools
+import os
+import sys
+import os.path
+
+def configuration(parent_package='', top_path=None):
+    from numpy.distutils.misc_util import Configuration
+    config = Configuration('particle_trajectories', parent_package, top_path)
+    #config.add_subpackage("tests")
+    config.make_config_py()  # installs __config__.py
+    #config.make_svn_version_py()
+    return config

diff -r f363b1279e87748ca9b559d88ed98099cd74857e -r 1a537f8125c9131bdf906e65c60f9e1c7d59327a yt/analysis_modules/photon_simulator/api.py
--- /dev/null
+++ b/yt/analysis_modules/photon_simulator/api.py
@@ -0,0 +1,26 @@
+"""
+API for yt.analysis_modules.photon_simulator.
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from .photon_models import \
+     PhotonModel, \
+     ThermalPhotonModel
+
+from .photon_simulator import \
+     PhotonList, \
+     EventList
+
+from .spectral_models import \
+     SpectralModel, \
+     XSpecThermalModel, \
+     XSpecAbsorbModel, \
+     TableApecModel, \
+     TableAbsorbModel

diff -r f363b1279e87748ca9b559d88ed98099cd74857e -r 1a537f8125c9131bdf906e65c60f9e1c7d59327a yt/analysis_modules/photon_simulator/photon_models.py
--- /dev/null
+++ b/yt/analysis_modules/photon_simulator/photon_models.py
@@ -0,0 +1,205 @@
+"""
+Classes for specific photon models
+
+The algorithms used here are based off of the method used by the
+PHOX code (http://www.mpa-garching.mpg.de/~kdolag/Phox/),
+developed by Veronica Biffi and Klaus Dolag. References for
+PHOX may be found at:
+
+Biffi, V., Dolag, K., Bohringer, H., & Lemson, G. 2012, MNRAS, 420, 3545
+http://adsabs.harvard.edu/abs/2012MNRAS.420.3545B
+
+Biffi, V., Dolag, K., Bohringer, H. 2013, MNRAS, 428, 1395
+http://adsabs.harvard.edu/abs/2013MNRAS.428.1395B
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+from yt.funcs import *
+from yt.utilities.physical_constants import \
+     mp, cm_per_km, K_per_keV, cm_per_mpc
+from yt.utilities.parallel_tools.parallel_analysis_interface import \
+     communication_system
+
+N_TBIN = 10000
+TMIN = 8.08e-2
+TMAX = 50.
+
+comm = communication_system.communicators[-1]
+
+class PhotonModel(object):
+
+    def __init__(self):
+        pass
+
+    def __call__(self, data_source, parameters):
+        photons = {}
+        return photons
+
+class ThermalPhotonModel(PhotonModel):
+    r"""
+    Initialize a ThermalPhotonModel from a thermal spectrum. 
+    
+    Parameters
+    ----------
+
+    spectral_model : `SpectralModel`
+        A thermal spectral model instance, either of `XSpecThermalModel`
+        or `TableApecModel`. 
+    X_H : float, optional
+        The hydrogen mass fraction.
+    Zmet : float or string, optional
+        The metallicity. If a float, assumes a constant metallicity throughout.
+        If a string, is taken to be the name of the metallicity field.
+    """
+    def __init__(self, spectral_model, X_H=0.75, Zmet=0.3):
+        self.X_H = X_H
+        self.Zmet = Zmet
+        self.spectral_model = spectral_model
+
+    def __call__(self, data_source, parameters):
+        
+        pf = data_source.pf
+
+        exp_time = parameters["FiducialExposureTime"]
+        area = parameters["FiducialArea"]
+        redshift = parameters["FiducialRedshift"]
+        D_A = parameters["FiducialAngularDiameterDistance"]*cm_per_mpc
+        dist_fac = 1.0/(4.*np.pi*D_A*D_A*(1.+redshift)**3)
+                
+        vol_scale = pf.units["cm"]**(-3)/np.prod(pf.domain_width)
+        
+        num_cells = data_source["Temperature"].shape[0]
+        start_c = comm.rank*num_cells/comm.size
+        end_c = (comm.rank+1)*num_cells/comm.size
+        
+        kT = data_source["Temperature"][start_c:end_c].copy()/K_per_keV
+        vol = data_source["CellVolume"][start_c:end_c].copy()
+        dx = data_source["dx"][start_c:end_c].copy()
+        EM = (data_source["Density"][start_c:end_c].copy()/mp)**2
+        EM *= 0.5*(1.+self.X_H)*self.X_H*vol
+    
+        data_source.clear_data()
+    
+        x = data_source["x"][start_c:end_c].copy()
+        y = data_source["y"][start_c:end_c].copy()
+        z = data_source["z"][start_c:end_c].copy()
+    
+        data_source.clear_data()
+        
+        vx = data_source["x-velocity"][start_c:end_c].copy()
+        vy = data_source["y-velocity"][start_c:end_c].copy()
+        vz = data_source["z-velocity"][start_c:end_c].copy()
+    
+        if isinstance(self.Zmet, basestring):
+            metalZ = data_source[self.Zmet][start_c:end_c].copy()
+        else:
+            metalZ = self.Zmet*np.ones(EM.shape)
+        
+        data_source.clear_data()
+
+        idxs = np.argsort(kT)
+        dshape = idxs.shape
+
+        kT_bins = np.linspace(TMIN, max(kT[idxs][-1], TMAX), num=N_TBIN+1)
+        dkT = kT_bins[1]-kT_bins[0]
+        kT_idxs = np.digitize(kT[idxs], kT_bins)
+        kT_idxs = np.minimum(np.maximum(1, kT_idxs), N_TBIN) - 1
+        bcounts = np.bincount(kT_idxs).astype("int")
+        bcounts = bcounts[bcounts > 0]
+        n = int(0)
+        bcell = []
+        ecell = []
+        for bcount in bcounts:
+            bcell.append(n)
+            ecell.append(n+bcount)
+            n += bcount
+        kT_idxs = np.unique(kT_idxs)
+        
+        self.spectral_model.prepare()
+        energy = self.spectral_model.ebins
+    
+        cell_em = EM[idxs]*vol_scale
+        cell_vol = vol[idxs]*vol_scale
+    
+        number_of_photons = np.zeros(dshape, dtype='uint64')
+        energies = []
+    
+        u = np.random.random(cell_em.shape)
+        
+        pbar = get_pbar("Generating Photons", dshape[0])
+
+        for i, ikT in enumerate(kT_idxs):
+
+            ncells = int(bcounts[i])
+            ibegin = bcell[i]
+            iend = ecell[i]
+            kT = kT_bins[ikT] + 0.5*dkT
+        
+            em_sum_c = cell_em[ibegin:iend].sum()
+            em_sum_m = (metalZ[ibegin:iend]*cell_em[ibegin:iend]).sum()
+            
+            cspec, mspec = self.spectral_model.get_spectrum(kT)
+            cspec *= dist_fac*em_sum_c/vol_scale
+            mspec *= dist_fac*em_sum_m/vol_scale
+        
+            cumspec_c = np.cumsum(cspec)
+            counts_c = cumspec_c[:]/cumspec_c[-1]
+            counts_c = np.insert(counts_c, 0, 0.0)
+            tot_ph_c = cumspec_c[-1]*area*exp_time
+
+            cumspec_m = np.cumsum(mspec)
+            counts_m = cumspec_m[:]/cumspec_m[-1]
+            counts_m = np.insert(counts_m, 0, 0.0)
+            tot_ph_m = cumspec_m[-1]*area*exp_time
+        
+            for icell in xrange(ibegin, iend):
+            
+                cell_norm_c = tot_ph_c*cell_em[icell]/em_sum_c
+                cell_n_c = np.uint64(cell_norm_c) + np.uint64(np.modf(cell_norm_c)[0] >= u[icell])
+            
+                cell_norm_m = tot_ph_m*metalZ[icell]*cell_em[icell]/em_sum_m
+                cell_n_m = np.uint64(cell_norm_m) + np.uint64(np.modf(cell_norm_m)[0] >= u[icell])
+            
+                cell_n = cell_n_c + cell_n_m
+
+                if cell_n > 0:
+                    number_of_photons[icell] = cell_n
+                    randvec_c = np.random.uniform(size=cell_n_c)
+                    randvec_c.sort()
+                    randvec_m = np.random.uniform(size=cell_n_m)
+                    randvec_m.sort()
+                    cell_e_c = np.interp(randvec_c, counts_c, energy)
+                    cell_e_m = np.interp(randvec_m, counts_m, energy)
+                    energies.append(np.concatenate([cell_e_c,cell_e_m]))
+                
+                pbar.update(icell)
+
+        pbar.finish()
+            
+        active_cells = number_of_photons > 0
+        idxs = idxs[active_cells]
+        
+        photons = {}
+
+        src_ctr = parameters["center"]
+        
+        photons["x"] = (x[idxs]-src_ctr[0])*pf.units["kpc"]
+        photons["y"] = (y[idxs]-src_ctr[1])*pf.units["kpc"]
+        photons["z"] = (z[idxs]-src_ctr[2])*pf.units["kpc"]
+        photons["vx"] = vx[idxs]/cm_per_km
+        photons["vy"] = vy[idxs]/cm_per_km
+        photons["vz"] = vz[idxs]/cm_per_km
+        photons["dx"] = dx[idxs]*pf.units["kpc"]
+        photons["NumberOfPhotons"] = number_of_photons[active_cells]
+        photons["Energy"] = np.concatenate(energies)
+    
+        return photons

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/428c48489d93/
Changeset:   428c48489d93
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-04-16 23:10:27
Summary:     Merging python3 work into yt-3.0.
Affected #:  66 files

diff -r 85aed0cda09466c9635c6539e36f8bd972ac0eaa -r 428c48489d93823bf3244dc70ca59a9318e36046 setup.py
--- a/setup.py
+++ b/setup.py
@@ -12,7 +12,11 @@
     import distribute_setup
     distribute_setup.use_setuptools()
 
-from distutils.command.build_py import build_py
+try:
+   from distutils.command.build_py import build_py_2to3 \
+        as build_py
+except ImportError:
+    from distutils.command.build_py import build_py
 from numpy.distutils.misc_util import appendpath
 from numpy.distutils.command import install_data as np_install_data
 from numpy.distutils import log
@@ -100,11 +104,11 @@
     needs_cython = True
 
 if needs_cython:
-    print "Cython is a build-time requirement for the source tree of yt."
-    print "Please either install yt from a provided, release tarball,"
-    print "or install Cython (version 0.16 or higher)."
-    print "You may be able to accomplish this by typing:"
-    print "     pip install -U Cython"
+    print("Cython is a build-time requirement for the source tree of yt.")
+    print("Please either install yt from a provided, release tarball,")
+    print("or install Cython (version 0.16 or higher).")
+    print("You may be able to accomplish this by typing:")
+    print("     pip install -U Cython")
     sys.exit(1)
 
 ######
@@ -176,12 +180,12 @@
                                      shell=True)
 
     if (get_changeset.stderr.read() != ""):
-        print "Error in obtaining current changeset of the Mercurial repository"
+        print("Error in obtaining current changeset of the Mercurial repository")
         changeset = None
 
-    changeset = get_changeset.stdout.read().strip()
+    changeset = get_changeset.stdout.read().strip().decode("UTF-8")
     if (not re.search("^[0-9a-f]{12}", changeset)):
-        print "Current changeset of the Mercurial repository is malformed"
+        print("Current changeset of the Mercurial repository is malformed")
         changeset = None
 
     return changeset
@@ -215,7 +219,7 @@
             with open(os.path.join(target_dir, '__hg_version__.py'), 'w') as fobj:
                 fobj.write("hg_version = '%s'\n" % changeset)
 
-            build_py.run(self)
+        build_py.run(self)
 
 
 def configuration(parent_package='', top_path=None):

diff -r 85aed0cda09466c9635c6539e36f8bd972ac0eaa -r 428c48489d93823bf3244dc70ca59a9318e36046 tests/runall.py
--- a/tests/runall.py
+++ b/tests/runall.py
@@ -87,7 +87,7 @@
             keys = set(registry_entries())
             tests_to_run += [t for t in new_tests if t in keys]
         tests = list(set(tests_to_run))
-        print "\n    ".join(tests)
+        print ("\n    ".join(tests))
         sys.exit(0)
 
     # Load the test pf and make sure it's good.

diff -r 85aed0cda09466c9635c6539e36f8bd972ac0eaa -r 428c48489d93823bf3244dc70ca59a9318e36046 yt/analysis_modules/halo_finding/api.py
--- a/yt/analysis_modules/halo_finding/api.py
+++ b/yt/analysis_modules/halo_finding/api.py
@@ -13,7 +13,7 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-from halo_objects import \
+from .halo_objects import \
     Halo, \
     HOPHalo, \
     parallelHOPHalo, \

diff -r 85aed0cda09466c9635c6539e36f8bd972ac0eaa -r 428c48489d93823bf3244dc70ca59a9318e36046 yt/analysis_modules/halo_finding/fof/EnzoFOF.c
--- a/yt/analysis_modules/halo_finding/fof/EnzoFOF.c
+++ b/yt/analysis_modules/halo_finding/fof/EnzoFOF.c
@@ -173,14 +173,37 @@
 __declspec(dllexport)
 #endif
 
-void initEnzoFOF(void)
+PyMODINIT_FUNC
+#if PY_MAJOR_VERSION >= 3
+#define _RETVAL m
+PyInit_EnzoFOF(void)
+#else
+#define _RETVAL 
+initEnzoFOF(void)
+#endif
 {
     PyObject *m, *d;
+#if PY_MAJOR_VERSION >= 3
+    static struct PyModuleDef moduledef = {
+        PyModuleDef_HEAD_INIT,
+        "EnzoFOF",           /* m_name */
+        "EnzoFOF Module",    /* m_doc */
+        -1,                  /* m_size */
+        _FOFMethods,          /* m_methods */
+        NULL,                /* m_reload */
+        NULL,                /* m_traverse */
+        NULL,                /* m_clear */
+        NULL,                /* m_free */
+    };
+    m = PyModule_Create(&moduledef); 
+#else
     m = Py_InitModule("EnzoFOF", _FOFMethods);
+#endif
     d = PyModule_GetDict(m);
     _FOFerror = PyErr_NewException("EnzoFOF.FOFerror", NULL, NULL);
     PyDict_SetItemString(d, "error", _FOFerror);
     import_array();
+    return _RETVAL;
 }
 
 /*

diff -r 85aed0cda09466c9635c6539e36f8bd972ac0eaa -r 428c48489d93823bf3244dc70ca59a9318e36046 yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -24,6 +24,7 @@
 import os
 import os.path as path
 from collections import defaultdict
+from yt.extern.six import add_metaclass
 
 from yt.funcs import *
 
@@ -50,12 +51,13 @@
     ParallelAnalysisInterface, \
     parallel_blocking_call
 
+
+ at add_metaclass(ParallelDummy)
 class Halo(object):
     """
     A data source that returns particle information about the members of a
     HOP-identified halo.
     """
-    __metaclass__ = ParallelDummy  # This will proxy up our methods
     _distributed = False
     _processing = False
     _owner = 0
@@ -487,39 +489,39 @@
         # all the parameters except for the center of mass.
         com = self.center_of_mass()
         position = [self["particle_position_x"],
-		    self["particle_position_y"],
-		    self["particle_position_z"]]
+                    self["particle_position_y"],
+                    self["particle_position_z"]]
         # Locate the furthest particle from com, its vector length and index
-	DW = np.array([self.gridsize[0],self.gridsize[1],self.gridsize[2]])
-	position = [position[0] - com[0],
-		    position[1] - com[1],
-		    position[2] - com[2]]
-	# different cases of particles being on other side of boundary
-	for axis in range(np.size(DW)):
-	    cases = np.array([position[axis],
-	  		      position[axis] + DW[axis],
-			      position[axis] - DW[axis]])        
+        DW = np.array([self.gridsize[0],self.gridsize[1],self.gridsize[2]])
+        position = [position[0] - com[0],
+                    position[1] - com[1],
+                    position[2] - com[2]]
+        # different cases of particles being on other side of boundary
+        for axis in range(np.size(DW)):
+            cases = np.array([position[axis],
+                                position[axis] + DW[axis],
+                              position[axis] - DW[axis]])        
             # pick out the smallest absolute distance from com
             position[axis] = np.choose(np.abs(cases).argmin(axis=0), cases)
-	# find the furthest particle's index
-	r = np.sqrt(position[0]**2 +
-		    position[1]**2 +
-		    position[2]**2)
+        # find the furthest particle's index
+        r = np.sqrt(position[0]**2 +
+                    position[1]**2 +
+                    position[2]**2)
         A_index = r.argmax()
         mag_A = r.max()
         # designate the A vector
-	A_vector = (position[0][A_index],
-		    position[1][A_index],
-		    position[2][A_index])
+        A_vector = (position[0][A_index],
+                    position[1][A_index],
+                    position[2][A_index])
         # designate the e0 unit vector
         e0_vector = A_vector / mag_A
         # locate the tB particle position by finding the max B
-	e0_vector_copy = np.empty((np.size(position[0]), 3), dtype='float64')
+        e0_vector_copy = np.empty((np.size(position[0]), 3), dtype='float64')
         for i in range(3):
             e0_vector_copy[:, i] = e0_vector[i]
         rr = np.array([position[0],
-		       position[1],
-		       position[2]]).T # Similar to tB_vector in old code.
+                       position[1],
+                       position[2]]).T # Similar to tB_vector in old code.
         tC_vector = np.cross(e0_vector_copy, rr)
         te2 = tC_vector.copy()
         for dim in range(3):
@@ -935,7 +937,7 @@
         Examples
         --------
         >>> params = halos[0].get_ellipsoid_parameters()
-	"""
+        """
 
         basic_parameters = self._get_ellipsoid_parameters_basic_loadedhalo()
         toreturn = [self.center_of_mass()]

diff -r 85aed0cda09466c9635c6539e36f8bd972ac0eaa -r 428c48489d93823bf3244dc70ca59a9318e36046 yt/analysis_modules/halo_finding/hop/EnzoHop.c
--- a/yt/analysis_modules/halo_finding/hop/EnzoHop.c
+++ b/yt/analysis_modules/halo_finding/hop/EnzoHop.c
@@ -23,6 +23,10 @@
 
 #include "numpy/ndarrayobject.h"
 
+#ifndef Py_TYPE
+    #define Py_TYPE(ob) (((PyObject*)(ob))->ob_type)
+#endif
+
 void initgrouplist(Grouplist *g);
 void hop_main(KD kd, HC *my_comm, float densthres);
 void regroup_main(float dens_outer, HC *my_comm);
@@ -294,7 +298,7 @@
    Py_XDECREF(self->zpos);
    Py_XDECREF(self->mass);
 
-   self->ob_type->tp_free((PyObject*)self);
+   Py_TYPE(self)->tp_free((PyObject*)self);
 }
 
 static PyObject *
@@ -335,7 +339,7 @@
 
     int median = kdMedianJst(self->kd, d, l, u);
 
-    PyObject *omedian = PyInt_FromLong((long)median);
+    PyObject *omedian = PyLong_FromLong((long)median);
     return omedian;
 }
 
@@ -368,8 +372,8 @@
 
 static PyTypeObject
 kDTreeTypeDict = {
-   PyObject_HEAD_INIT(NULL)
-   0,                         /* ob_size */
+   PyVarObject_HEAD_INIT(NULL, 0)
+                            /* ob_size */
    "kDTree",               /* tp_name */
    sizeof(kDTreeType),         /* tp_basicsize */
    0,                         /* tp_itemsize */
@@ -409,10 +413,32 @@
    0,                         /* tp_new */
 };
 
-void initEnzoHop(void)
+PyMODINIT_FUNC
+#if PY_MAJOR_VERSION >= 3
+#define _RETVAL m
+PyInit_EnzoHop(void)
+#else
+#define _RETVAL 
+initEnzoHop(void)
+#endif
 {
     PyObject *m, *d;
+#if PY_MAJOR_VERSION >= 3
+    static struct PyModuleDef moduledef = {
+        PyModuleDef_HEAD_INIT,
+        "EnzoHop",           /* m_name */
+        "EnzoHop Module",    /* m_doc */
+        -1,                  /* m_size */
+        _HOPMethods,          /* m_methods */
+        NULL,                /* m_reload */
+        NULL,                /* m_traverse */
+        NULL,                /* m_clear */
+        NULL,                /* m_free */
+    };
+    m = PyModule_Create(&moduledef); 
+#else
     m = Py_InitModule("EnzoHop", _HOPMethods);
+#endif
     d = PyModule_GetDict(m);
     _HOPerror = PyErr_NewException("EnzoHop.HOPerror", NULL, NULL);
     PyDict_SetItemString(d, "error", _HOPerror);
@@ -426,6 +452,7 @@
    PyModule_AddObject(m, "kDTree", (PyObject*)&kDTreeTypeDict);
 
    import_array();
+   return _RETVAL;
 }
 
 /*

diff -r 85aed0cda09466c9635c6539e36f8bd972ac0eaa -r 428c48489d93823bf3244dc70ca59a9318e36046 yt/analysis_modules/halo_finding/rockstar/setup.py
--- a/yt/analysis_modules/halo_finding/rockstar/setup.py
+++ b/yt/analysis_modules/halo_finding/rockstar/setup.py
@@ -12,10 +12,10 @@
     try:
         rd = open("rockstar.cfg").read().strip()
     except IOError:
-        print "Reading Rockstar location from rockstar.cfg failed."
-        print "Please place the base directory of your"
-        print "Rockstar install in rockstar.cfg and restart."
-        print "(ex: \"echo '/path/to/Rockstar-0.99' > rockstar.cfg\" )"
+        print("Reading Rockstar location from rockstar.cfg failed.")
+        print("Please place the base directory of your")
+        print("Rockstar install in rockstar.cfg and restart.")
+        print("(ex: \"echo '/path/to/Rockstar-0.99' > rockstar.cfg\" )")
         sys.exit(1)
     config.add_extension("rockstar_interface",
                          "yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx",

diff -r 85aed0cda09466c9635c6539e36f8bd972ac0eaa -r 428c48489d93823bf3244dc70ca59a9318e36046 yt/analysis_modules/sunyaev_zeldovich/api.py
--- a/yt/analysis_modules/sunyaev_zeldovich/api.py
+++ b/yt/analysis_modules/sunyaev_zeldovich/api.py
@@ -9,4 +9,4 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-from projection import SZProjection
+from .projection import SZProjection

diff -r 85aed0cda09466c9635c6539e36f8bd972ac0eaa -r 428c48489d93823bf3244dc70ca59a9318e36046 yt/data_objects/analyzer_objects.py
--- a/yt/data_objects/analyzer_objects.py
+++ b/yt/data_objects/analyzer_objects.py
@@ -16,16 +16,19 @@
 import inspect
 
 from yt.funcs import *
+from yt.extern.six import add_metaclass
 
 analysis_task_registry = {}
 
+class RegisteredTask(type):
+    def __init__(cls, name, b, d):
+        type.__init__(cls, name, b, d)
+        if hasattr(cls, "skip") and cls.skip == False:
+            return
+        analysis_task_registry[cls.__name__] = cls
+
+ at add_metaclass(RegisteredTask)
 class AnalysisTask(object):
-    class __metaclass__(type):
-        def __init__(cls, name, b, d):
-            type.__init__(cls, name, b, d)
-            if hasattr(cls, "skip") and cls.skip == False:
-                return
-            analysis_task_registry[cls.__name__] = cls
 
     def __init__(self, *args, **kwargs):
         # This should only get called if the subclassed object

diff -r 85aed0cda09466c9635c6539e36f8bd972ac0eaa -r 428c48489d93823bf3244dc70ca59a9318e36046 yt/data_objects/api.py
--- a/yt/data_objects/api.py
+++ b/yt/data_objects/api.py
@@ -13,20 +13,20 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-from grid_patch import \
+from .grid_patch import \
     AMRGridPatch
 
-from octree_subset import \
+from .octree_subset import \
     OctreeSubset
 
-from static_output import \
+from .static_output import \
     Dataset
 
-from particle_io import \
+from .particle_io import \
     ParticleIOHandler, \
     particle_handler_registry
 
-from profiles import \
+from .profiles import \
     YTEmptyProfileData, \
     BinnedProfile, \
     BinnedProfile1D, \
@@ -37,21 +37,21 @@
     Profile2D, \
     Profile3D
 
-from time_series import \
+from .time_series import \
     DatasetSeries, \
     DatasetSeriesObject
 
-from analyzer_objects import \
+from .analyzer_objects import \
     AnalysisTask, analysis_task
 
-from data_containers import \
+from .data_containers import \
     data_object_registry
 
-import construction_data_containers as __cdc
-import selection_data_containers as __sdc
+from . import construction_data_containers as __cdc
+from . import selection_data_containers as __sdc
 
-from image_array import \
+from .image_array import \
     ImageArray
 
-from particle_filters import \
+from .particle_filters import \
     particle_filter

diff -r 85aed0cda09466c9635c6539e36f8bd972ac0eaa -r 428c48489d93823bf3244dc70ca59a9318e36046 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -41,6 +41,7 @@
 from yt.fields.derived_field import \
     ValidateSpatial
 import yt.geometry.selection_routines
+from yt.extern.six import add_metaclass
 
 def force_array(item, shape):
     try:
@@ -72,7 +73,13 @@
     """
     pass
 
+class RegisteredDataContainer(type):
+    def __init__(cls, name, b, d):
+        type.__init__(cls, name, b, d)
+        if hasattr(cls, "_type_name") and not cls._skip_add:
+            data_object_registry[cls._type_name] = cls
 
+ at add_metaclass(RegisteredDataContainer)
 class YTDataContainer(object):
     """
     Generic YTDataContainer container.  By itself, will attempt to
@@ -87,12 +94,6 @@
     _field_cache = None
     _index = None
 
-    class __metaclass__(type):
-        def __init__(cls, name, b, d):
-            type.__init__(cls, name, b, d)
-            if hasattr(cls, "_type_name") and not cls._skip_add:
-                data_object_registry[cls._type_name] = cls
-
     def __init__(self, pf, field_parameters):
         """
         Typically this is never called directly, but only due to inheritance.
@@ -736,7 +737,7 @@
 
     def _get_pw(self, fields, center, width, origin, plot_type):
         axis = self.axis
-        self.fields = [k for k in self.field_data.keys()
+        self.fields = [k for k in self.field_data
                        if k not in self._key_fields]
         from yt.visualization.plot_window import \
             get_window_parameters, PWViewerMPL

diff -r 85aed0cda09466c9635c6539e36f8bd972ac0eaa -r 428c48489d93823bf3244dc70ca59a9318e36046 yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -13,7 +13,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import exceptions
 import pdb
 import weakref
 import itertools

diff -r 85aed0cda09466c9635c6539e36f8bd972ac0eaa -r 428c48489d93823bf3244dc70ca59a9318e36046 yt/data_objects/particle_io.py
--- a/yt/data_objects/particle_io.py
+++ b/yt/data_objects/particle_io.py
@@ -16,6 +16,7 @@
 import numpy as np
 
 from yt.funcs import *
+from yt.extern.six import add_metaclass
 
 particle_handler_registry = defaultdict()
 
@@ -30,13 +31,14 @@
         return tr
     return save_state
 
+class RegisteredParticleIOType(type):
+    def __init__(cls, name, b, d):
+        type.__init__(cls, name, b, d)
+        if hasattr(cls, "_source_type"):
+            particle_handler_registry[cls._source_type] = cls
+
+ at add_metaclass(RegisteredParticleIOType)
 class ParticleIOHandler(object):
-    class __metaclass__(type):
-        def __init__(cls, name, b, d):
-            type.__init__(cls, name, b, d)
-            if hasattr(cls, "_source_type"):
-                particle_handler_registry[cls._source_type] = cls
-
     _source_type = None
 
     def __init__(self, pf, source):

diff -r 85aed0cda09466c9635c6539e36f8bd972ac0eaa -r 428c48489d93823bf3244dc70ca59a9318e36046 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -18,6 +18,7 @@
 import functools
 
 from yt.funcs import *
+from yt.extern.six import add_metaclass
 
 from yt.config import ytcfg
 from yt.utilities.cosmology import \
@@ -67,6 +68,12 @@
         raise YTObjectNotImplemented(pf, obj_name)
     return _raise_unsupp
 
+class RegisteredDataset(type):
+    def __init__(cls, name, b, d):
+        type.__init__(cls, name, b, d)
+        output_type_registry[name] = cls
+        mylog.debug("Registering: %s as %s", name, cls)
+
 class IndexProxy(object):
     # This is a simple proxy for Index objects.  It enables backwards
     # compatibility so that operations like .h.sphere, .h.print_stats and
@@ -99,6 +106,7 @@
 
     return ireq
 
+ at add_metaclass(RegisteredDataset)
 class Dataset(object):
 
     default_fluid_type = "gas"
@@ -116,12 +124,6 @@
     derived_field_list = requires_index("derived_field_list")
     _instantiated = False
 
-    class __metaclass__(type):
-        def __init__(cls, name, b, d):
-            type.__init__(cls, name, b, d)
-            output_type_registry[name] = cls
-            mylog.debug("Registering: %s as %s", name, cls)
-
     def __new__(cls, filename=None, *args, **kwargs):
         from yt.frontends.stream.data_structures import StreamHandler
         if not isinstance(filename, types.StringTypes):
@@ -220,7 +222,7 @@
             self.current_time, self.unique_identifier)
         try:
             import hashlib
-            return hashlib.md5(s).hexdigest()
+            return hashlib.md5(s.encode('utf-8')).hexdigest()
         except ImportError:
             return s.replace(";", "*")
 

diff -r 85aed0cda09466c9635c6539e36f8bd972ac0eaa -r 428c48489d93823bf3244dc70ca59a9318e36046 yt/data_objects/time_series.py
--- a/yt/data_objects/time_series.py
+++ b/yt/data_objects/time_series.py
@@ -16,6 +16,7 @@
 import inspect, functools, weakref, glob, types, os
 
 from yt.funcs import *
+from yt.extern.six import add_metaclass
 from yt.convenience import load
 from yt.config import ytcfg
 from .data_containers import data_object_registry
@@ -375,16 +376,16 @@
         cls = getattr(pf.h, self.data_object_name)
         return cls(*self._args, **self._kwargs)
 
+class RegisteredSimulationTimeSeries(type):
+    def __init__(cls, name, b, d):
+        type.__init__(cls, name, b, d)
+        code_name = name[:name.find('Simulation')]
+        if code_name:
+            simulation_time_series_registry[code_name] = cls
+            mylog.debug("Registering simulation: %s as %s", code_name, cls)
 
+ at add_metaclass(RegisteredSimulationTimeSeries)
 class SimulationTimeSeries(DatasetSeries):
-    class __metaclass__(type):
-        def __init__(cls, name, b, d):
-            type.__init__(cls, name, b, d)
-            code_name = name[:name.find('Simulation')]
-            if code_name:
-                simulation_time_series_registry[code_name] = cls
-                mylog.debug("Registering simulation: %s as %s", code_name, cls)
-
     def __init__(self, parameter_filename, find_outputs=False):
         """
         Base class for generating simulation time series types.

diff -r 85aed0cda09466c9635c6539e36f8bd972ac0eaa -r 428c48489d93823bf3244dc70ca59a9318e36046 yt/extern/progressbar.py
--- a/yt/extern/progressbar.py
+++ b/yt/extern/progressbar.py
@@ -161,7 +161,7 @@
             return self.marker.update(pbar)
     def update(self, pbar, width):
         percent = pbar.percentage()
-        cwidth = width - len(self.left) - len(self.right)
+        cwidth = int(width - len(self.left) - len(self.right))
         marked_width = int(percent * cwidth / 100)
         m = self._format_marker(pbar)
         bar = (self.left + (m*marked_width).ljust(cwidth) + self.right)

diff -r 85aed0cda09466c9635c6539e36f8bd972ac0eaa -r 428c48489d93823bf3244dc70ca59a9318e36046 yt/extern/six.py
--- a/yt/extern/six.py
+++ b/yt/extern/six.py
@@ -2,32 +2,34 @@
 
 # Copyright (c) 2010-2013 Benjamin Peterson
 #
-# Permission is hereby granted, free of charge, to any person obtaining a copy of
-# this software and associated documentation files (the "Software"), to deal in
-# the Software without restriction, including without limitation the rights to
-# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
-# the Software, and to permit persons to whom the Software is furnished to do so,
-# subject to the following conditions:
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
 #
 # The above copyright notice and this permission notice shall be included in all
 # copies or substantial portions of the Software.
 #
 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
-# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
-# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
-# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
 
 import operator
 import sys
 import types
 
 __author__ = "Benjamin Peterson <benjamin at python.org>"
-__version__ = "1.3.0"
+__version__ = "1.4.1"
 
 
-# True if we are running on Python 3.
+# Useful for very coarse version differentiation.
+PY2 = sys.version_info[0] == 2
 PY3 = sys.version_info[0] == 3
 
 if PY3:
@@ -61,7 +63,7 @@
         else:
             # 64-bit
             MAXSIZE = int((1 << 63) - 1)
-            del X
+        del X
 
 
 def _add_doc(func, doc):
@@ -136,13 +138,17 @@
 _moved_attributes = [
     MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
     MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
+    MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
     MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
     MovedAttribute("map", "itertools", "builtins", "imap", "map"),
+    MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
     MovedAttribute("reload_module", "__builtin__", "imp", "reload"),
     MovedAttribute("reduce", "__builtin__", "functools"),
     MovedAttribute("StringIO", "StringIO", "io"),
+    MovedAttribute("UserString", "UserString", "collections"),
     MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
     MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
+    MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
 
     MovedModule("builtins", "__builtin__"),
     MovedModule("configparser", "ConfigParser"),
@@ -179,6 +185,9 @@
     MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
     MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
                 "tkinter.simpledialog"),
+    MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
+    MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
+    MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
     MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
     MovedModule("winreg", "_winreg"),
 ]
@@ -186,7 +195,144 @@
     setattr(_MovedItems, attr.name, attr)
 del attr
 
-moves = sys.modules[__name__ + ".moves"] = _MovedItems("moves")
+moves = sys.modules[__name__ + ".moves"] = _MovedItems(__name__ + ".moves")
+
+
+
+class Module_six_moves_urllib_parse(types.ModuleType):
+    """Lazy loading of moved objects in six.moves.urllib_parse"""
+
+
+_urllib_parse_moved_attributes = [
+    MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
+    MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
+    MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
+    MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
+    MovedAttribute("urljoin", "urlparse", "urllib.parse"),
+    MovedAttribute("urlparse", "urlparse", "urllib.parse"),
+    MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
+    MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
+    MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
+    MovedAttribute("quote", "urllib", "urllib.parse"),
+    MovedAttribute("quote_plus", "urllib", "urllib.parse"),
+    MovedAttribute("unquote", "urllib", "urllib.parse"),
+    MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
+    MovedAttribute("urlencode", "urllib", "urllib.parse"),
+]
+for attr in _urllib_parse_moved_attributes:
+    setattr(Module_six_moves_urllib_parse, attr.name, attr)
+del attr
+
+sys.modules[__name__ + ".moves.urllib_parse"] = Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse")
+sys.modules[__name__ + ".moves.urllib.parse"] = Module_six_moves_urllib_parse(__name__ + ".moves.urllib.parse")
+
+
+class Module_six_moves_urllib_error(types.ModuleType):
+    """Lazy loading of moved objects in six.moves.urllib_error"""
+
+
+_urllib_error_moved_attributes = [
+    MovedAttribute("URLError", "urllib2", "urllib.error"),
+    MovedAttribute("HTTPError", "urllib2", "urllib.error"),
+    MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
+]
+for attr in _urllib_error_moved_attributes:
+    setattr(Module_six_moves_urllib_error, attr.name, attr)
+del attr
+
+sys.modules[__name__ + ".moves.urllib_error"] = Module_six_moves_urllib_error(__name__ + ".moves.urllib_error")
+sys.modules[__name__ + ".moves.urllib.error"] = Module_six_moves_urllib_error(__name__ + ".moves.urllib.error")
+
+
+class Module_six_moves_urllib_request(types.ModuleType):
+    """Lazy loading of moved objects in six.moves.urllib_request"""
+
+
+_urllib_request_moved_attributes = [
+    MovedAttribute("urlopen", "urllib2", "urllib.request"),
+    MovedAttribute("install_opener", "urllib2", "urllib.request"),
+    MovedAttribute("build_opener", "urllib2", "urllib.request"),
+    MovedAttribute("pathname2url", "urllib", "urllib.request"),
+    MovedAttribute("url2pathname", "urllib", "urllib.request"),
+    MovedAttribute("getproxies", "urllib", "urllib.request"),
+    MovedAttribute("Request", "urllib2", "urllib.request"),
+    MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
+    MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
+    MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
+    MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
+    MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
+    MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
+    MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
+    MovedAttribute("FileHandler", "urllib2", "urllib.request"),
+    MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
+    MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
+    MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
+    MovedAttribute("urlretrieve", "urllib", "urllib.request"),
+    MovedAttribute("urlcleanup", "urllib", "urllib.request"),
+    MovedAttribute("URLopener", "urllib", "urllib.request"),
+    MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
+]
+for attr in _urllib_request_moved_attributes:
+    setattr(Module_six_moves_urllib_request, attr.name, attr)
+del attr
+
+sys.modules[__name__ + ".moves.urllib_request"] = Module_six_moves_urllib_request(__name__ + ".moves.urllib_request")
+sys.modules[__name__ + ".moves.urllib.request"] = Module_six_moves_urllib_request(__name__ + ".moves.urllib.request")
+
+
+class Module_six_moves_urllib_response(types.ModuleType):
+    """Lazy loading of moved objects in six.moves.urllib_response"""
+
+
+_urllib_response_moved_attributes = [
+    MovedAttribute("addbase", "urllib", "urllib.response"),
+    MovedAttribute("addclosehook", "urllib", "urllib.response"),
+    MovedAttribute("addinfo", "urllib", "urllib.response"),
+    MovedAttribute("addinfourl", "urllib", "urllib.response"),
+]
+for attr in _urllib_response_moved_attributes:
+    setattr(Module_six_moves_urllib_response, attr.name, attr)
+del attr
+
+sys.modules[__name__ + ".moves.urllib_response"] = Module_six_moves_urllib_response(__name__ + ".moves.urllib_response")
+sys.modules[__name__ + ".moves.urllib.response"] = Module_six_moves_urllib_response(__name__ + ".moves.urllib.response")
+
+
+class Module_six_moves_urllib_robotparser(types.ModuleType):
+    """Lazy loading of moved objects in six.moves.urllib_robotparser"""
+
+
+_urllib_robotparser_moved_attributes = [
+    MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
+]
+for attr in _urllib_robotparser_moved_attributes:
+    setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
+del attr
+
+sys.modules[__name__ + ".moves.urllib_robotparser"] = Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib_robotparser")
+sys.modules[__name__ + ".moves.urllib.robotparser"] = Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser")
+
+
+class Module_six_moves_urllib(types.ModuleType):
+    """Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
+    parse = sys.modules[__name__ + ".moves.urllib_parse"]
+    error = sys.modules[__name__ + ".moves.urllib_error"]
+    request = sys.modules[__name__ + ".moves.urllib_request"]
+    response = sys.modules[__name__ + ".moves.urllib_response"]
+    robotparser = sys.modules[__name__ + ".moves.urllib_robotparser"]
+
+
+sys.modules[__name__ + ".moves.urllib"] = Module_six_moves_urllib(__name__ + ".moves.urllib")
 
 
 def add_move(move):
@@ -252,11 +398,16 @@
     def get_unbound_function(unbound):
         return unbound
 
+    create_bound_method = types.MethodType
+
     Iterator = object
 else:
     def get_unbound_function(unbound):
         return unbound.im_func
 
+    def create_bound_method(func, obj):
+        return types.MethodType(func, obj, obj.__class__)
+
     class Iterator(object):
 
         def next(self):
@@ -297,12 +448,16 @@
         return s.encode("latin-1")
     def u(s):
         return s
+    unichr = chr
     if sys.version_info[1] <= 1:
         def int2byte(i):
             return bytes((i,))
     else:
         # This is about 2x faster than the implementation above on 3.2+
         int2byte = operator.methodcaller("to_bytes", 1, "big")
+    byte2int = operator.itemgetter(0)
+    indexbytes = operator.getitem
+    iterbytes = iter
     import io
     StringIO = io.StringIO
     BytesIO = io.BytesIO
@@ -311,7 +466,14 @@
         return s
     def u(s):
         return unicode(s, "unicode_escape")
+    unichr = unichr
     int2byte = chr
+    def byte2int(bs):
+        return ord(bs[0])
+    def indexbytes(buf, i):
+        return ord(buf[i])
+    def iterbytes(buf):
+        return (ord(byte) for byte in buf)
     import StringIO
     StringIO = BytesIO = StringIO.StringIO
 _add_doc(b, """Byte literal""")
@@ -399,6 +561,17 @@
 _add_doc(reraise, """Reraise an exception.""")
 
 
-def with_metaclass(meta, base=object):
+def with_metaclass(meta, *bases):
     """Create a base class with a metaclass."""
-    return meta("NewBase", (base,), {})
+    return meta("NewBase", bases, {})
+
+def add_metaclass(metaclass):
+    """Class decorator for creating a class with a metaclass."""
+    def wrapper(cls):
+        orig_vars = cls.__dict__.copy()
+        orig_vars.pop('__dict__', None)
+        orig_vars.pop('__weakref__', None)
+        for slots_var in orig_vars.get('__slots__', ()):
+            orig_vars.pop(slots_var)
+        return metaclass(cls.__name__, cls.__bases__, orig_vars)
+    return wrapper

diff -r 85aed0cda09466c9635c6539e36f8bd972ac0eaa -r 428c48489d93823bf3244dc70ca59a9318e36046 yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -21,9 +21,6 @@
 
 from collections import \
      defaultdict
-from string import \
-     strip, \
-     rstrip
 from stat import \
      ST_CTIME
 
@@ -258,7 +255,8 @@
         # read the file line by line, storing important parameters
         for lineI, line in enumerate(lines):
             try:
-                param, sep, vals = map(rstrip,line.partition(' '))
+                param, sep, vals = [v.rstrip() for v in line.partition(' ')]
+                #param, sep, vals = map(rstrip,line.partition(' '))
             except ValueError:
                 mylog.error("ValueError: '%s'", line)
             if chombo2enzoDict.has_key(param):

diff -r 85aed0cda09466c9635c6539e36f8bd972ac0eaa -r 428c48489d93823bf3244dc70ca59a9318e36046 yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -272,7 +272,7 @@
         t1 = time.time()
         pattern = r"Pointer: Grid\[(\d*)\]->NextGrid(Next|This)Level = (\d*)\s+$"
         patt = re.compile(pattern)
-        f = open(self.index_filename, "rb")
+        f = open(self.index_filename, "rt")
         self.grids = [self.grid(1, self)]
         self.grids[0].Level = 0
         si, ei, LE, RE, fn, npart = [], [], [], [], [], []
@@ -989,11 +989,11 @@
     # the rest aligned on a blocksize boundary.  This may be more
     # efficient than having the last (first in file) block be short
     f.seek(-lastblock,2)
-    yield f.read(lastblock)
+    yield f.read(lastblock).decode('ascii')
 
     for i in range(fullblocks-1,-1, -1):
         f.seek(i * blocksize)
-        yield f.read(blocksize)
+        yield f.read(blocksize).decode('ascii')
 
 def rlines(f, keepends=False):
     """Iterate through the lines of a file in reverse order.

diff -r 85aed0cda09466c9635c6539e36f8bd972ac0eaa -r 428c48489d93823bf3244dc70ca59a9318e36046 yt/frontends/pluto/data_structures.py
--- a/yt/frontends/pluto/data_structures.py
+++ b/yt/frontends/pluto/data_structures.py
@@ -21,9 +21,6 @@
 
 from collections import \
      defaultdict
-from string import \
-     strip, \
-     rstrip
 from stat import \
      ST_CTIME
 
@@ -236,7 +233,8 @@
         # read the file line by line, storing important parameters
         for lineI, line in enumerate(lines):
             try:
-                param, sep, vals = map(rstrip,line.partition(' '))
+                param, sep, vals = [v.rstrip() for v in line.partition(' ')]
+                #param, sep, vals = map(rstrip,line.partition(' '))
             except ValueError:
                 mylog.error("ValueError: '%s'", line)
             if pluto2enzoDict.has_key(param):

diff -r 85aed0cda09466c9635c6539e36f8bd972ac0eaa -r 428c48489d93823bf3244dc70ca59a9318e36046 yt/frontends/stream/io.py
--- a/yt/frontends/stream/io.py
+++ b/yt/frontends/stream/io.py
@@ -15,7 +15,6 @@
 
 from collections import defaultdict
 
-import exceptions
 import os
 import numpy as np
 

diff -r 85aed0cda09466c9635c6539e36f8bd972ac0eaa -r 428c48489d93823bf3244dc70ca59a9318e36046 yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -13,7 +13,7 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import __builtin__
+import time, types, signal, inspect, traceback, sys, pdb, os, re
 import time, types, signal, inspect, traceback, sys, pdb, os, re
 import contextlib
 import warnings, struct, subprocess
@@ -22,6 +22,7 @@
 from math import floor, ceil
 from numbers import Number as numeric_type
 
+from yt.extern.six.moves import builtins
 from yt.utilities.exceptions import *
 from yt.utilities.logger import ytLogger as mylog
 from yt.utilities.definitions import inv_axis_names, axis_names, x_dict, y_dict
@@ -341,7 +342,7 @@
     maxval = max(maxval, 1)
     from yt.config import ytcfg
     if ytcfg.getboolean("yt", "suppressStreamLogging") or \
-       "__IPYTHON__" in dir(__builtin__) or \
+       "__IPYTHON__" in dir(builtins) or \
        ytcfg.getboolean("yt", "__withintesting"):
         return DummyProgressBar()
     elif ytcfg.getboolean("yt", "__withinreason"):
@@ -406,11 +407,12 @@
     Should only be used in sys.excepthook.
     """
     sys.__excepthook__(exc_type, exc, tb)
-    import xmlrpclib, cStringIO
+    from yt.extern.six.moves import StringIO
+    import xmlrpclib
     p = xmlrpclib.ServerProxy(
             "http://paste.yt-project.org/xmlrpc/",
             allow_none=True)
-    s = cStringIO.StringIO()
+    s = StringIO()
     traceback.print_exception(exc_type, exc, tb, file=s)
     s = s.getvalue()
     ret = p.pastes.newPaste('pytb', s, None, '', '', True)
@@ -423,8 +425,9 @@
     This is a traceback handler that knows how to paste to the pastebin.
     Should only be used in sys.excepthook.
     """
-    import xmlrpclib, cStringIO, cgitb
-    s = cStringIO.StringIO()
+    import xmlrpclib, cgitb
+    from yt.extern.six.moves import StringIO
+    s = StringIO()
     handler = cgitb.Hook(format="text", file = s)
     handler(exc_type, exc, tb)
     s = s.getvalue()

diff -r 85aed0cda09466c9635c6539e36f8bd972ac0eaa -r 428c48489d93823bf3244dc70ca59a9318e36046 yt/gui/reason/basic_repl.py
--- a/yt/gui/reason/basic_repl.py
+++ b/yt/gui/reason/basic_repl.py
@@ -21,7 +21,7 @@
 import json
 import sys
 import traceback
-from cStringIO import StringIO
+from yt.extern.six.moves import StringIO
 
 class ProgrammaticREPL(object):
     stopped = False

diff -r 85aed0cda09466c9635c6539e36f8bd972ac0eaa -r 428c48489d93823bf3244dc70ca59a9318e36046 yt/gui/reason/extdirect_repl.py
--- a/yt/gui/reason/extdirect_repl.py
+++ b/yt/gui/reason/extdirect_repl.py
@@ -17,7 +17,7 @@
 import json
 import os
 import stat
-import cStringIO
+from cStringIO import StringIO
 import logging
 import uuid
 import numpy as np
@@ -370,11 +370,11 @@
 
     @lockit
     def paste_session(self):
-        import xmlrpclib, cStringIO
+        import xmlrpclib
         p = xmlrpclib.ServerProxy(
             "http://paste.yt-project.org/xmlrpc/",
             allow_none=True)
-        cs = cStringIO.StringIO()
+        cs = StringIO()
         cs.write("\n######\n".join(self.executed_cell_texts))
         cs = cs.getvalue()
         ret = p.pastes.newPaste('python', cs, None, '', '', True)
@@ -383,7 +383,7 @@
 
     @lockit
     def paste_text(self, to_paste):
-        import xmlrpclib, cStringIO
+        import xmlrpclib
         p = xmlrpclib.ServerProxy(
             "http://paste.yt-project.org/xmlrpc/",
             allow_none=True)
@@ -412,7 +412,7 @@
 
     @lockit
     def _session_py(self):
-        cs = cStringIO.StringIO()
+        cs = StringIO()
         cs.write("\n######\n".join(self.executed_cell_texts))
         cs.seek(0)
         response.headers["content-disposition"] = "attachment;"

diff -r 85aed0cda09466c9635c6539e36f8bd972ac0eaa -r 428c48489d93823bf3244dc70ca59a9318e36046 yt/pmods.py
--- a/yt/pmods.py
+++ b/yt/pmods.py
@@ -119,8 +119,8 @@
 importer, which can be used to disable rank-asymmetric behavior in a
 module import:
 
-import __builtin__
-hasattr(__builtin__.__import__,"mpi_import")
+from yt.extern.six.moves import builtins
+hasattr(builtins.__import__,"mpi_import")
 
 This evaluates to True only when we're in an mpi_import() context
 manager.
@@ -141,8 +141,8 @@
 
 # Either importer is None (standard import) or it's a reference to
 # the mpi_import object that owns the current importer.
-import __builtin__
-importer = getattr(__builtin__.__import__,"mpi_import",None)
+from yt.extern.six.moves import builtins
+importer = getattr(builtins.__import__,"mpi_import",None)
 if importer:
     importer.callAfterImport(f)
 else:
@@ -192,7 +192,8 @@
  more information about the level parameter, run 'help(__import__)'.
 """
 
-import sys, imp, __builtin__,types
+import sys, imp, types
+from yt.extern.six.moves import builtins
 from mpi4py import MPI
 class mpi(object):
     rank = MPI.COMM_WORLD.Get_rank()
@@ -205,11 +206,11 @@
         imp.acquire_lock()
         __import_hook__.mpi_import = self
         self.__funcs = []
-        self.original_import = __builtin__.__import__
-        __builtin__.__import__ = __import_hook__
+        self.original_import = builtins.__import__
+        builtins.__import__ = __import_hook__
 
     def __exit__(self,type,value,traceback):
-        __builtin__.__import__ = self.original_import
+        builtins.__import__ = self.original_import
         __import_hook__.mpi_import = None
         imp.release_lock()
         for f in self.__funcs:

diff -r 85aed0cda09466c9635c6539e36f8bd972ac0eaa -r 428c48489d93823bf3244dc70ca59a9318e36046 yt/utilities/amr_kdtree/api.py
--- a/yt/utilities/amr_kdtree/api.py
+++ b/yt/utilities/amr_kdtree/api.py
@@ -12,4 +12,4 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-from amr_kdtree import AMRKDTree
+from .amr_kdtree import AMRKDTree

diff -r 85aed0cda09466c9635c6539e36f8bd972ac0eaa -r 428c48489d93823bf3244dc70ca59a9318e36046 yt/utilities/command_line.py
--- a/yt/utilities/command_line.py
+++ b/yt/utilities/command_line.py
@@ -18,6 +18,7 @@
 from yt.startup_tasks import parser, subparsers
 from yt.mods import *
 from yt.funcs import *
+from yt.extern.six import add_metaclass
 from yt.utilities.minimal_representation import MinimalProjectDescription
 import argparse, os, os.path, math, sys, time, subprocess, getpass, tempfile
 import urllib, urllib2, base64, os
@@ -41,9 +42,23 @@
     argc = dict(arg.items())
     argnames = []
     if "short" in argc: argnames.append(argc.pop('short'))
-    if "long" in argc: argnames.append(argc.pop('long'))
+    if "longname" in argc: argnames.append(argc.pop('longname'))
     sc.add_argument(*argnames, **argc)
 
+class YTCommandSubtype(type):
+    def __init__(cls, name, b, d):
+        type.__init__(cls, name, b, d)
+        if cls.name is not None:
+            names = ensure_list(cls.name)
+            for name in names:
+                sc = subparsers.add_parser(name,
+                    description = cls.description,
+                    help = cls.description)
+                sc.set_defaults(func=cls.run)
+                for arg in cls.args:
+                    _add_arg(sc, arg)
+
+ at add_metaclass(YTCommandSubtype)
 class YTCommand(object):
     args = ()
     name = None
@@ -51,19 +66,6 @@
     aliases = ()
     npfs = 1
 
-    class __metaclass__(type):
-        def __init__(cls, name, b, d):
-            type.__init__(cls, name, b, d)
-            if cls.name is not None:
-                names = ensure_list(cls.name)
-                for name in names:
-                    sc = subparsers.add_parser(name,
-                        description = cls.description,
-                        help = cls.description)
-                    sc.set_defaults(func=cls.run)
-                    for arg in cls.args:
-                        _add_arg(sc, arg)
-
     @classmethod
     def run(cls, args):
         self = cls()
@@ -98,192 +100,192 @@
         namespace.pf = [_fix_pf(pf) for pf in pfs]
 
 _common_options = dict(
-    all     = dict(long="--all", dest="reinstall",
+    all     = dict(longname="--all", dest="reinstall",
                    default=False, action="store_true",
                    help="Reinstall the full yt stack in the current location."),
     pf      = dict(short="pf", action=GetParameterFiles,
                    nargs="+", help="Parameter files to run on"),
     opf     = dict(action=GetParameterFiles, dest="pf",
                    nargs="*", help="(Optional) Parameter files to run on"),
-    axis    = dict(short="-a", long="--axis",
+    axis    = dict(short="-a", longname="--axis",
                    action="store", type=int,
                    dest="axis", default=4,
                    help="Axis (4 for all three)"),
-    log     = dict(short="-l", long="--log",
+    log     = dict(short="-l", longname="--log",
                    action="store_true",
                    dest="takelog", default=True,
                    help="Use logarithmic scale for image"),
-    linear  = dict(long="--linear",
+    linear  = dict(longname="--linear",
                    action="store_false",
                    dest="takelog",
                    help="Use linear scale for image"),
-    text    = dict(short="-t", long="--text",
+    text    = dict(short="-t", longname="--text",
                    action="store", type=str,
                    dest="text", default=None,
                    help="Textual annotation"),
-    field   = dict(short="-f", long="--field",
+    field   = dict(short="-f", longname="--field",
                    action="store", type=str,
                    dest="field", default="density",
                    help="Field to color by"),
-    weight  = dict(short="-g", long="--weight",
+    weight  = dict(short="-g", longname="--weight",
                    action="store", type=str,
                    dest="weight", default=None,
                    help="Field to weight projections with"),
-    cmap    = dict(long="--colormap",
+    cmap    = dict(longname="--colormap",
                    action="store", type=str,
                    dest="cmap", default="algae",
                    help="Colormap name"),
-    zlim    = dict(short="-z", long="--zlim",
+    zlim    = dict(short="-z", longname="--zlim",
                    action="store", type=float,
                    dest="zlim", default=None,
                    nargs=2,
                    help="Color limits (min, max)"),
-    dex     = dict(long="--dex",
+    dex     = dict(longname="--dex",
                    action="store", type=float,
                    dest="dex", default=None,
                    nargs=1,
                    help="Number of dex above min to display"),
-    width   = dict(short="-w", long="--width",
+    width   = dict(short="-w", longname="--width",
                    action="store", type=float,
                    dest="width", default=None,
                    help="Width in specified units"),
-    unit    = dict(short="-u", long="--unit",
+    unit    = dict(short="-u", longname="--unit",
                    action="store", type=str,
                    dest="unit", default='1',
                    help="Desired units"),
-    center  = dict(short="-c", long="--center",
+    center  = dict(short="-c", longname="--center",
                    action="store", type=float,
                    dest="center", default=None,
                    nargs=3,
                    help="Center, space separated (-1 -1 -1 for max)"),
-    max     = dict(short="-m", long="--max",
+    max     = dict(short="-m", longname="--max",
                    action="store_true",
                    dest="max",default=False,
                    help="Center the plot on the density maximum"),
-    bn      = dict(short="-b", long="--basename",
+    bn      = dict(short="-b", longname="--basename",
                    action="store", type=str,
                    dest="basename", default=None,
                    help="Basename of parameter files"),
-    output  = dict(short="-o", long="--output",
+    output  = dict(short="-o", longname="--output",
                    action="store", type=str,
                    dest="output", default="frames/",
                    help="Folder in which to place output images"),
-    outputfn= dict(short="-o", long="--output",
+    outputfn= dict(short="-o", longname="--output",
                    action="store", type=str,
                    dest="output", default=None,
                    help="File in which to place output"),
-    skip    = dict(short="-s", long="--skip",
+    skip    = dict(short="-s", longname="--skip",
                    action="store", type=int,
                    dest="skip", default=1,
                    help="Skip factor for outputs"),
-    proj    = dict(short="-p", long="--projection",
+    proj    = dict(short="-p", longname="--projection",
                    action="store_true",
                    dest="projection", default=False,
                    help="Use a projection rather than a slice"),
-    maxw    = dict(long="--max-width",
+    maxw    = dict(longname="--max-width",
                    action="store", type=float,
                    dest="max_width", default=1.0,
                    help="Maximum width in code units"),
-    minw    = dict(long="--min-width",
+    minw    = dict(longname="--min-width",
                    action="store", type=float,
                    dest="min_width", default=50,
                    help="Minimum width in units of smallest dx (default: 50)"),
-    nframes = dict(short="-n", long="--nframes",
+    nframes = dict(short="-n", longname="--nframes",
                    action="store", type=int,
                    dest="nframes", default=100,
                    help="Number of frames to generate"),
-    slabw   = dict(long="--slab-width",
+    slabw   = dict(longname="--slab-width",
                    action="store", type=float,
                    dest="slab_width", default=1.0,
                    help="Slab width in specified units"),
-    slabu   = dict(short="-g", long="--slab-unit",
+    slabu   = dict(short="-g", longname="--slab-unit",
                    action="store", type=str,
                    dest="slab_unit", default='1',
                    help="Desired units for the slab"),
-    ptype   = dict(long="--particle-type",
+    ptype   = dict(longname="--particle-type",
                    action="store", type=int,
                    dest="ptype", default=2,
                    help="Particle type to select"),
-    agecut  = dict(long="--age-cut",
+    agecut  = dict(longname="--age-cut",
                    action="store", type=float,
                    dest="age_filter", default=None,
                    nargs=2,
                    help="Bounds for the field to select"),
-    uboxes  = dict(long="--unit-boxes",
+    uboxes  = dict(longname="--unit-boxes",
                    action="store_true",
                    dest="unit_boxes",
                    help="Display helpful unit boxes"),
-    thresh  = dict(long="--threshold",
+    thresh  = dict(longname="--threshold",
                    action="store", type=float,
                    dest="threshold", default=None,
                    help="Density threshold"),
-    dm_only = dict(long="--all-particles",
+    dm_only = dict(longname="--all-particles",
                    action="store_false",
                    dest="dm_only", default=True,
                    help="Use all particles"),
-    grids   = dict(long="--show-grids",
+    grids   = dict(longname="--show-grids",
                    action="store_true",
                    dest="grids", default=False,
                    help="Show the grid boundaries"),
-    time    = dict(long="--time",
+    time    = dict(longname="--time",
                    action="store_true",
                    dest="time", default=False,
                    help="Print time in years on image"),
-    contours    = dict(long="--contours",
+    contours    = dict(longname="--contours",
                    action="store",type=int,
                    dest="contours", default=None,
                    help="Number of Contours for Rendering"),
-    contour_width  = dict(long="--contour_width",
+    contour_width  = dict(longname="--contour_width",
                    action="store",type=float,
                    dest="contour_width", default=None,
                    help="Width of gaussians used for rendering."),
-    enhance   = dict(long="--enhance",
+    enhance   = dict(longname="--enhance",
                    action="store_true",
                    dest="enhance", default=False,
                    help="Enhance!"),
-    valrange  = dict(short="-r", long="--range",
+    valrange  = dict(short="-r", longname="--range",
                    action="store", type=float,
                    dest="valrange", default=None,
                    nargs=2,
                    help="Range, space separated"),
-    up  = dict(long="--up",
+    up  = dict(longname="--up",
                    action="store", type=float,
                    dest="up", default=None,
                    nargs=3,
                    help="Up, space separated"),
-    viewpoint  = dict(long="--viewpoint",
+    viewpoint  = dict(longname="--viewpoint",
                    action="store", type=float,
                    dest="viewpoint", default=[1., 1., 1.],
                    nargs=3,
                    help="Viewpoint, space separated"),
-    pixels    = dict(long="--pixels",
+    pixels    = dict(longname="--pixels",
                    action="store",type=int,
                    dest="pixels", default=None,
                    help="Number of Pixels for Rendering"),
-    halos   = dict(long="--halos",
+    halos   = dict(longname="--halos",
                    action="store", type=str,
                    dest="halos",default="multiple",
                    help="Run halo profiler on a 'single' halo or 'multiple' halos."),
-    halo_radius = dict(long="--halo_radius",
+    halo_radius = dict(longname="--halo_radius",
                        action="store", type=float,
                        dest="halo_radius",default=0.1,
                        help="Constant radius for profiling halos if using hop output files with no radius entry. Default: 0.1."),
-    halo_radius_units = dict(long="--halo_radius_units",
+    halo_radius_units = dict(longname="--halo_radius_units",
                              action="store", type=str,
                              dest="halo_radius_units",default="1",
                              help="Units for radius used with --halo_radius flag. Default: '1' (code units)."),
-    halo_hop_style = dict(long="--halo_hop_style",
+    halo_hop_style = dict(longname="--halo_hop_style",
                           action="store", type=str,
                           dest="halo_hop_style",default="new",
                           help="Style of hop output file.  'new' for yt_hop files and 'old' for enzo_hop files."),
-    halo_parameter_file = dict(long="--halo_parameter_file",
+    halo_parameter_file = dict(longname="--halo_parameter_file",
                                action="store", type=str,
                                dest="halo_parameter_file",default=None,
                                help="HaloProfiler parameter file."),
-    make_profiles = dict(long="--make_profiles",
+    make_profiles = dict(longname="--make_profiles",
                          action="store_true", default=False,
                          help="Make profiles with halo profiler."),
-    make_projections = dict(long="--make_projections",
+    make_projections = dict(longname="--make_projections",
                             action="store_true", default=False,
                             help="Make projections with halo profiler.")
 
@@ -867,7 +869,7 @@
 class YTHubSubmitCmd(YTCommand):
     name = "hub_submit"
     args = (
-            dict(long="--repo", action="store", type=str,
+            dict(longname="--repo", action="store", type=str,
                  dest="repo", default=".", help="Repository to upload"),
            )
     description = \
@@ -1039,10 +1041,10 @@
 class YTInstInfoCmd(YTCommand):
     name = ["instinfo", "version"]
     args = (
-            dict(short="-u", long="--update-source", action="store_true",
+            dict(short="-u", longname="--update-source", action="store_true",
                  default = False,
                  help="Update the yt installation, if able"),
-            dict(short="-o", long="--output-version", action="store",
+            dict(short="-o", longname="--output-version", action="store",
                   default = None, dest="outputfile",
                   help="File into which the current revision number will be" +
                        "stored")
@@ -1138,9 +1140,9 @@
 
 class YTMapserverCmd(YTCommand):
     args = ("proj", "field", "weight",
-            dict(short="-a", long="--axis", action="store", type=int,
+            dict(short="-a", longname="--axis", action="store", type=int,
                  dest="axis", default=0, help="Axis (4 for all three)"),
-            dict(short ="-o", long="--host", action="store", type=str,
+            dict(short ="-o", longname="--host", action="store", type=str,
                    dest="host", default=None, help="IP Address to bind on"),
             "pf",
             )
@@ -1179,23 +1181,23 @@
 class YTPastebinCmd(YTCommand):
     name = "pastebin"
     args = (
-             dict(short="-l", long="--language", action="store",
+             dict(short="-l", longname="--language", action="store",
                   default = None, dest="language",
                   help="Use syntax highlighter for the file in language"),
-             dict(short="-L", long="--languages", action="store_true",
+             dict(short="-L", longname="--languages", action="store_true",
                   default = False, dest="languages",
                   help="Retrive a list of supported languages"),
-             dict(short="-e", long="--encoding", action="store",
+             dict(short="-e", longname="--encoding", action="store",
                   default = 'utf-8', dest="encoding",
                   help="Specify the encoding of a file (default is "
                         "utf-8 or guessing if available)"),
-             dict(short="-b", long="--open-browser", action="store_true",
+             dict(short="-b", longname="--open-browser", action="store_true",
                   default = False, dest="open_browser",
                   help="Open the paste in a web browser"),
-             dict(short="-p", long="--private", action="store_true",
+             dict(short="-p", longname="--private", action="store_true",
                   default = False, dest="private",
                   help="Paste as private"),
-             dict(short="-c", long="--clipboard", action="store_true",
+             dict(short="-c", longname="--clipboard", action="store_true",
                   default = False, dest="clipboard",
                   help="File to output to; else, print."),
              dict(short="file", type=str),
@@ -1420,7 +1422,7 @@
 
         """
     args = (
-            dict(short="-t", long="--task", action="store",
+            dict(short="-t", longname="--task", action="store",
                  default = 0, dest='task',
                  help="Open a web browser."),
            )
@@ -1432,13 +1434,13 @@
 class YTNotebookCmd(YTCommand):
     name = ["notebook"]
     args = (
-            dict(short="-o", long="--open-browser", action="store_true",
+            dict(short="-o", longname="--open-browser", action="store_true",
                  default = False, dest='open_browser',
                  help="Open a web browser."),
-            dict(short="-p", long="--port", action="store",
+            dict(short="-p", longname="--port", action="store",
                  default = 0, dest='port',
                  help="Port to listen on; defaults to auto-detection."),
-            dict(short="-n", long="--no-password", action="store_true",
+            dict(short="-n", longname="--no-password", action="store_true",
                  default = False, dest='no_password',
                  help="If set, do not prompt or use a password."),
             )
@@ -1495,19 +1497,19 @@
 class YTGUICmd(YTCommand):
     name = ["serve", "reason"]
     args = (
-            dict(short="-o", long="--open-browser", action="store_true",
+            dict(short="-o", longname="--open-browser", action="store_true",
                  default = False, dest='open_browser',
                  help="Open a web browser."),
-            dict(short="-p", long="--port", action="store",
+            dict(short="-p", longname="--port", action="store",
                  default = 0, dest='port',
                  help="Port to listen on"),
-            dict(short="-f", long="--find", action="store_true",
+            dict(short="-f", longname="--find", action="store_true",
                  default = False, dest="find",
-                 help="At startup, find all *.index files in the CWD"),
-            dict(short="-d", long="--debug", action="store_true",
+                 help="At startup, find all *.hierarchy files in the CWD"),
+            dict(short="-d", longname="--debug", action="store_true",
                  default = False, dest="debug",
                  help="Add a debugging mode for cell execution"),
-            dict(short = "-r", long = "--remote", action = "store_true",
+            dict(short = "-r", longname = "--remote", action = "store_true",
                  default = False, dest="use_pyro",
                  help = "Use with a remote Pyro4 server."),
             "opf"
@@ -1558,9 +1560,9 @@
 
 class YTStatsCmd(YTCommand):
     args = ('outputfn','bn','skip','pf','field',
-            dict(long="--max", action='store_true', default=False,
+            dict(longname="--max", action='store_true', default=False,
                  dest='max', help="Display maximum of field requested through -f option."),
-            dict(long="--min", action='store_true', default=False,
+            dict(longname="--min", action='store_true', default=False,
                  dest='min', help="Display minimum of field requested through -f option."))
     name = "stats"
     description = \

diff -r 85aed0cda09466c9635c6539e36f8bd972ac0eaa -r 428c48489d93823bf3244dc70ca59a9318e36046 yt/utilities/data_point_utilities.c
--- a/yt/utilities/data_point_utilities.c
+++ b/yt/utilities/data_point_utilities.c
@@ -24,6 +24,14 @@
 #define max(A,B) ((A) > (B) ? (A) : (B))
 #define min(A,B) ((A) < (B) ? (A) : (B))
 
+#if PY_MAJOR_VERSION >= 3
+#define PYINTCONV_AS   PyLong_AsLong
+#define PYINTCONV_FROM PyLong_FromLong
+#else
+#define PYINTCONV_AS   PyInt_AsLong
+#define PYINTCONV_FROM PyInt_FromLong
+#endif
+
 static PyObject *_combineGridsError;
 
 static PyObject *
@@ -236,7 +244,7 @@
     free(src_vals);
     free(dst_vals);
 
-    PyObject *onum_found = PyInt_FromLong((long)num_found);
+    PyObject *onum_found = PYINTCONV_FROM((long)num_found);
     return onum_found;
 
 _fail:
@@ -548,7 +556,7 @@
     free(g_data);
     free(c_data);
 
-    PyObject *status = PyInt_FromLong(total);
+    PyObject *status = PYINTCONV_FROM(total);
     return status;
     
 _fail:
@@ -798,7 +806,7 @@
     }
     free(g_data);
     free(c_data);
-    PyObject *status = PyInt_FromLong(total);
+    PyObject *status = PYINTCONV_FROM(total);
     return status;
 
 _fail:
@@ -1012,7 +1020,7 @@
     if(dls!=NULL)free(dls);
     if(g_data!=NULL)free(g_data);
     if(c_data!=NULL)free(c_data);
-    PyObject *status = PyInt_FromLong(total);
+    PyObject *status = PYINTCONV_FROM(total);
     return status;
 
 _fail:
@@ -1099,7 +1107,7 @@
     Py_DECREF(yi);
     Py_DECREF(zi);
 
-    PyObject *retval = PyInt_FromLong(status);
+    PyObject *retval = PYINTCONV_FROM(status);
     return retval;
 
     _fail:
@@ -1436,10 +1444,33 @@
 __declspec(dllexport)
 #endif
 
-void initdata_point_utilities(void)
+PyMODINIT_FUNC
+#if PY_MAJOR_VERSION >= 3
+#define _RETVAL m
+PyInit_data_point_utilities(void)
+#else
+#define _RETVAL 
+initdata_point_utilities(void)
+#endif
 {
     PyObject *m, *d;
+#if PY_MAJOR_VERSION >= 3
+    static struct PyModuleDef moduledef = {
+        PyModuleDef_HEAD_INIT,
+        "data_point_utilities",           /* m_name */
+        "Utilities for data combination.\n",
+                             /* m_doc */
+        -1,                  /* m_size */
+        _combineMethods,     /* m_methods */
+        NULL,                /* m_reload */
+        NULL,                /* m_traverse */
+        NULL,                /* m_clear */
+        NULL,                /* m_free */
+    };
+    m = PyModule_Create(&moduledef); 
+#else
     m = Py_InitModule("data_point_utilities", _combineMethods);
+#endif
     d = PyModule_GetDict(m);
     _combineGridsError = PyErr_NewException("data_point_utilities.CombineGridsError", NULL, NULL);
     PyDict_SetItemString(d, "error", _combineGridsError);
@@ -1452,6 +1483,7 @@
     _outputFloatsToFileError = PyErr_NewException("data_point_utilities.OutputFloatsToFileError", NULL, NULL);
     PyDict_SetItemString(d, "error", _outputFloatsToFileError);
     import_array();
+    return _RETVAL;
 }
 
 /*

diff -r 85aed0cda09466c9635c6539e36f8bd972ac0eaa -r 428c48489d93823bf3244dc70ca59a9318e36046 yt/utilities/flagging_methods.py
--- a/yt/utilities/flagging_methods.py
+++ b/yt/utilities/flagging_methods.py
@@ -15,16 +15,19 @@
 
 import numpy as np # For modern purposes
 from yt.utilities.lib.misc_utilities import grow_flagging_field
+from yt.extern.six import add_metaclass
 
 flagging_method_registry = {}
 
+class RegisteredFlaggingMethod(type):
+    def __init__(cls, name, b, d):
+        type.__init__(cls, name, b, d)
+        if hasattr(cls, "_type_name") and not cls._skip_add:
+            flagging_method_registry[cls._type_name] = cls
+
+ at add_metaclass(RegisteredFlaggingMethod)
 class FlaggingMethod(object):
     _skip_add = False
-    class __metaclass__(type):
-        def __init__(cls, name, b, d):
-            type.__init__(cls, name, b, d)
-            if hasattr(cls, "_type_name") and not cls._skip_add:
-                flagging_method_registry[cls._type_name] = cls
 
 class OverDensity(FlaggingMethod):
     _type_name = "overdensity"

diff -r 85aed0cda09466c9635c6539e36f8bd972ac0eaa -r 428c48489d93823bf3244dc70ca59a9318e36046 yt/utilities/grid_data_format/conversion/conversion_athena.py
--- a/yt/utilities/grid_data_format/conversion/conversion_athena.py
+++ b/yt/utilities/grid_data_format/conversion/conversion_athena.py
@@ -6,9 +6,6 @@
 from glob import glob
 from collections import \
     defaultdict
-from string import \
-    strip, \
-    rstrip
 from stat import \
     ST_CTIME
 

diff -r 85aed0cda09466c9635c6539e36f8bd972ac0eaa -r 428c48489d93823bf3244dc70ca59a9318e36046 yt/utilities/io_handler.py
--- a/yt/utilities/io_handler.py
+++ b/yt/utilities/io_handler.py
@@ -21,22 +21,24 @@
 import os
 import h5py
 import numpy as np
+from yt.extern.six import add_metaclass
 
 _axis_ids = {0:2,1:1,2:0}
 
 io_registry = {}
 
+class RegisteredIOHandler(type):
+    def __init__(cls, name, b, d):
+        type.__init__(cls, name, b, d)
+        if hasattr(cls, "_dataset_type"):
+            io_registry[cls._dataset_type] = cls
+
+ at add_metaclass(RegisteredIOHandler)
 class BaseIOHandler(object):
     _vector_fields = ()
     _dataset_type = None
     _particle_reader = False
 
-    class __metaclass__(type):
-        def __init__(cls, name, b, d):
-            type.__init__(cls, name, b, d)
-            if hasattr(cls, "_dataset_type"):
-                io_registry[cls._dataset_type] = cls
-
     def __init__(self, pf):
         self.queue = defaultdict(dict)
         self.pf = pf

diff -r 85aed0cda09466c9635c6539e36f8bd972ac0eaa -r 428c48489d93823bf3244dc70ca59a9318e36046 yt/utilities/kdtree/api.py
--- a/yt/utilities/kdtree/api.py
+++ b/yt/utilities/kdtree/api.py
@@ -1,4 +1,4 @@
-from fKDpy import \
+from .fKDpy import \
     chainHOP_tags_dens, \
     create_tree, \
     fKD, \

diff -r 85aed0cda09466c9635c6539e36f8bd972ac0eaa -r 428c48489d93823bf3244dc70ca59a9318e36046 yt/utilities/lib/setup.py
--- a/yt/utilities/lib/setup.py
+++ b/yt/utilities/lib/setup.py
@@ -20,7 +20,7 @@
         # Attempt to compile a test script.
         # See http://openmp.org/wp/openmp-compilers/
         filename = r'test.c'
-        file = open(filename,'w', 0)
+        file = open(filename,'wt', 1)
         file.write(
             "#include <omp.h>\n"
             "#include <stdio.h>\n"
@@ -151,7 +151,7 @@
         gpd = os.environ["GPERFTOOLS"]
         idir = os.path.join(gpd, "include")
         ldir = os.path.join(gpd, "lib")
-        print "INCLUDE AND LIB DIRS", idir, ldir
+        print("INCLUDE AND LIB DIRS", idir, ldir)
         config.add_extension("perftools_wrap",
                 ["yt/utilities/lib/perftools_wrap.pyx"],
                 libraries=["profiler"],

diff -r 85aed0cda09466c9635c6539e36f8bd972ac0eaa -r 428c48489d93823bf3244dc70ca59a9318e36046 yt/utilities/minimal_representation.py
--- a/yt/utilities/minimal_representation.py
+++ b/yt/utilities/minimal_representation.py
@@ -20,6 +20,7 @@
 from tempfile import TemporaryFile
 from yt.config import ytcfg
 from yt.funcs import *
+from yt.extern.six import add_metaclass
 from yt.utilities.exceptions import *
 
 from .poster.streaminghttp import register_openers
@@ -41,8 +42,8 @@
 class ContainerClass(object):
     pass
 
+ at add_metaclass(abc.ABCMeta)
 class MinimalRepresentation(object):
-    __metaclass__ = abc.ABCMeta
 
     def _update_attrs(self, obj, attr_list):
         for attr in attr_list:

diff -r 85aed0cda09466c9635c6539e36f8bd972ac0eaa -r 428c48489d93823bf3244dc70ca59a9318e36046 yt/utilities/poster/streaminghttp.py
--- a/yt/utilities/poster/streaminghttp.py
+++ b/yt/utilities/poster/streaminghttp.py
@@ -26,6 +26,7 @@
 ...                       {'Content-Length': str(len(s))})
 """
 
+import httplib as httplib # hack for py3
 import httplib, urllib2, socket
 from httplib import NotConnected
 

diff -r 85aed0cda09466c9635c6539e36f8bd972ac0eaa -r 428c48489d93823bf3244dc70ca59a9318e36046 yt/utilities/rpdb.py
--- a/yt/utilities/rpdb.py
+++ b/yt/utilities/rpdb.py
@@ -13,7 +13,8 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import cmd, pdb, cStringIO, xmlrpclib, socket, sys
+import cmd, pdb, xmlrpclib, socket, sys
+from yt.extern.six.moves import StringIO
 import traceback
 import signal
 from SimpleXMLRPCServer import SimpleXMLRPCServer
@@ -65,9 +66,9 @@
 
 class pdb_handler(object):
     def __init__(self, tb):
-        self.cin = cStringIO.StringIO()
+        self.cin = StringIO()
         sys.stdin = self.cin
-        self.cout = cStringIO.StringIO()
+        self.cout = StringIO()
         sys.stdout = self.cout
         sys.stderr = self.cout
         self.debugger = pdb.Pdb(stdin=self.cin, stdout=self.cout)

diff -r 85aed0cda09466c9635c6539e36f8bd972ac0eaa -r 428c48489d93823bf3244dc70ca59a9318e36046 yt/utilities/spatial/__init__.py
--- a/yt/utilities/spatial/__init__.py
+++ b/yt/utilities/spatial/__init__.py
@@ -23,12 +23,12 @@
 """
 
 from kdtree import *
-from ckdtree import *
+from .ckdtree import *
 #from qhull import *
 
 __all__ = filter(lambda s: not s.startswith('_'), dir())
 __all__ += ['distance']
 
-import distance
+from . import distance
 from numpy.testing import Tester
 test = Tester().test

diff -r 85aed0cda09466c9635c6539e36f8bd972ac0eaa -r 428c48489d93823bf3244dc70ca59a9318e36046 yt/utilities/spatial/distance.py
--- a/yt/utilities/spatial/distance.py
+++ b/yt/utilities/spatial/distance.py
@@ -114,7 +114,7 @@
 import numpy as np
 from numpy.linalg import norm
 
-import _distance_wrap
+from . import _distance_wrap
 
 
 def _copy_array_if_base_present(a):

diff -r 85aed0cda09466c9635c6539e36f8bd972ac0eaa -r 428c48489d93823bf3244dc70ca59a9318e36046 yt/utilities/spatial/setup.py
--- a/yt/utilities/spatial/setup.py
+++ b/yt/utilities/spatial/setup.py
@@ -9,7 +9,7 @@
 
     config = Configuration('spatial', parent_package, top_path)
 
-    config.add_data_dir('tests')
+    config.add_data_dir('yt/utilities/spatial/tests')
 
 #    qhull_src = ['geom2.c', 'geom.c', 'global.c', 'io.c', 'libqhull.c',
 #                 'mem.c', 'merge.c', 'poly2.c', 'poly.c', 'qset.c',

diff -r 85aed0cda09466c9635c6539e36f8bd972ac0eaa -r 428c48489d93823bf3244dc70ca59a9318e36046 yt/visualization/_MPL.c
--- a/yt/visualization/_MPL.c
+++ b/yt/visualization/_MPL.c
@@ -427,12 +427,37 @@
 __declspec(dllexport)
 #endif
 
-void init_MPL(void)
+
+PyMODINIT_FUNC
+#if PY_MAJOR_VERSION >= 3
+#define _RETVAL m
+PyInit__MPL(void)
+#else
+#define _RETVAL 
+init_MPL(void)
+#endif
 {
     PyObject *m, *d;
+#if PY_MAJOR_VERSION >= 3
+    static struct PyModuleDef moduledef = {
+        PyModuleDef_HEAD_INIT,
+        "_MPL",           /* m_name */
+        "Pixelization routines\n",
+                             /* m_doc */
+        -1,                  /* m_size */
+        __MPLMethods,    /* m_methods */
+        NULL,                /* m_reload */
+        NULL,                /* m_traverse */
+        NULL,                /* m_clear */
+        NULL,                /* m_free */
+    };
+    m = PyModule_Create(&moduledef); 
+#else
     m = Py_InitModule("_MPL", __MPLMethods);
+#endif
     d = PyModule_GetDict(m);
     _pixelizeError = PyErr_NewException("_MPL.error", NULL, NULL);
     PyDict_SetItemString(d, "error", _pixelizeError);
     import_array();
+    return _RETVAL;
 }

diff -r 85aed0cda09466c9635c6539e36f8bd972ac0eaa -r 428c48489d93823bf3244dc70ca59a9318e36046 yt/visualization/_colormap_data.py
--- a/yt/visualization/_colormap_data.py
+++ b/yt/visualization/_colormap_data.py
@@ -7803,6 +7803,6 @@
 # and append a "_r" (for reversal. consistent with MPL convention).
 # So for example, the reversal of "Waves" is "Waves_r"
 temp = {}
-for k,v in color_map_luts.iteritems():
+for k,v in color_map_luts.items():
     temp[k+"_r"] = (v[0][::-1], v[1][::-1], v[2][::-1], v[3][::-1])
 color_map_luts.update(temp)

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/bfa1eb894643/
Changeset:   bfa1eb894643
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-04-16 23:41:40
Summary:     yt now builds and imports in python3.
Affected #:  13 files

diff -r 428c48489d93823bf3244dc70ca59a9318e36046 -r bfa1eb894643cb9951548691a4813b21f912b6e4 yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
--- a/yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
+++ b/yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
@@ -14,7 +14,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-from exceptions import IOError
 import h5py
 import numpy as np
 import os

diff -r 428c48489d93823bf3244dc70ca59a9318e36046 -r bfa1eb894643cb9951548691a4813b21f912b6e4 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -17,10 +17,8 @@
 import numpy as np
 import math
 import weakref
-import exceptions
 import itertools
 import shelve
-from exceptions import ValueError, KeyError
 from functools import wraps
 import fileinput
 from re import finditer

diff -r 428c48489d93823bf3244dc70ca59a9318e36046 -r bfa1eb894643cb9951548691a4813b21f912b6e4 yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -16,7 +16,6 @@
 
 import types
 import numpy as np
-from exceptions import ValueError, SyntaxError
 
 from yt.funcs import *
 from yt.utilities.lib.alt_ray_tracers import cylindrical_ray_trace

diff -r 428c48489d93823bf3244dc70ca59a9318e36046 -r bfa1eb894643cb9951548691a4813b21f912b6e4 yt/frontends/artio/data_structures.py
--- a/yt/frontends/artio/data_structures.py
+++ b/yt/frontends/artio/data_structures.py
@@ -19,10 +19,10 @@
 import cStringIO
 
 from .definitions import ARTIOconstants
-from _artio_caller import \
+from ._artio_caller import \
     artio_is_valid, artio_fileset, ARTIOOctreeContainer, \
     ARTIORootMeshContainer, ARTIOSFCRangeHandler
-import _artio_caller
+from . import _artio_caller
 from yt.utilities.definitions import \
     mpc_conversion, sec_conversion
 from .fields import \

diff -r 428c48489d93823bf3244dc70ca59a9318e36046 -r bfa1eb894643cb9951548691a4813b21f912b6e4 yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -19,7 +19,6 @@
 import itertools
 
 from collections import defaultdict
-from string import strip, rstrip
 from stat import ST_CTIME
 
 import numpy as np

diff -r 428c48489d93823bf3244dc70ca59a9318e36046 -r bfa1eb894643cb9951548691a4813b21f912b6e4 yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -56,7 +56,6 @@
     Class representing a single Enzo Grid instance.
     """
 
-    __slots__ = ["NumberOfActiveParticles"]
     def __init__(self, id, index):
         """
         Returns an instance of EnzoGrid with *id*, associated with

diff -r 428c48489d93823bf3244dc70ca59a9318e36046 -r bfa1eb894643cb9951548691a4813b21f912b6e4 yt/frontends/enzo/io.py
--- a/yt/frontends/enzo/io.py
+++ b/yt/frontends/enzo/io.py
@@ -13,7 +13,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import exceptions
 import os
 
 from yt.utilities.io_handler import \

diff -r 428c48489d93823bf3244dc70ca59a9318e36046 -r bfa1eb894643cb9951548691a4813b21f912b6e4 yt/frontends/gdf/io.py
--- a/yt/frontends/gdf/io.py
+++ b/yt/frontends/gdf/io.py
@@ -15,7 +15,6 @@
 
 import numpy as np
 import h5py
-import exceptions
 from yt.funcs import \
     mylog
 from yt.utilities.io_handler import \

diff -r 428c48489d93823bf3244dc70ca59a9318e36046 -r bfa1eb894643cb9951548691a4813b21f912b6e4 yt/geometry/geometry_handler.py
--- a/yt/geometry/geometry_handler.py
+++ b/yt/geometry/geometry_handler.py
@@ -18,8 +18,6 @@
 import cPickle
 import weakref
 import h5py
-from exceptions import IOError, TypeError
-from types import ClassType
 import numpy as np
 import abc
 import copy

diff -r 428c48489d93823bf3244dc70ca59a9318e36046 -r bfa1eb894643cb9951548691a4813b21f912b6e4 yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -12,7 +12,7 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import md5
+import hashlib
 import cPickle
 import itertools as it
 import numpy as np
@@ -582,7 +582,7 @@
             st = _rv.std(dtype="float64")
             su = _rv.sum(dtype="float64")
             si = _rv.size
-            ha = md5.md5(_rv.tostring()).hexdigest()
+            ha = hashlib.md5(_rv.tostring()).hexdigest()
             fn = "func_results_ref_%s.cpkl" % (name)
             with open(fn, "wb") as f:
                 cPickle.dump( (mi, ma, st, su, si, ha), f)
@@ -606,7 +606,7 @@
                     _rv.std(dtype="float64"),
                     _rv.sum(dtype="float64"),
                     _rv.size,
-                    md5.md5(_rv.tostring()).hexdigest() )
+                    hashlib.md5(_rv.tostring()).hexdigest() )
             fn = "func_results_ref_%s.cpkl" % (name)
             if not os.path.exists(fn):
                 print "Answers need to be created with --answer-reference ."

diff -r 428c48489d93823bf3244dc70ca59a9318e36046 -r bfa1eb894643cb9951548691a4813b21f912b6e4 yt/units/unit_object.py
--- a/yt/units/unit_object.py
+++ b/yt/units/unit_object.py
@@ -497,7 +497,7 @@
 
             if symbol_str not in latex_symbol_lut:
                 latex_symbol_lut[symbol_str] = \
-                    string.replace(latex_symbol_lut[symbol_wo_prefix],
+                    latex_symbol_lut[symbol_wo_prefix].replace(
                                    '{'+symbol_wo_prefix+'}', '{'+symbol_str+'}')
 
             # don't forget to account for the prefix value!

diff -r 428c48489d93823bf3244dc70ca59a9318e36046 -r bfa1eb894643cb9951548691a4813b21f912b6e4 yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -15,7 +15,6 @@
 
 
 # We don't need to import 'exceptions'
-#import exceptions
 import os.path
 
 class YTException(Exception):

diff -r 428c48489d93823bf3244dc70ca59a9318e36046 -r bfa1eb894643cb9951548691a4813b21f912b6e4 yt/utilities/io_handler.py
--- a/yt/utilities/io_handler.py
+++ b/yt/utilities/io_handler.py
@@ -16,7 +16,6 @@
 from collections import defaultdict
 
 from yt.funcs import mylog
-import exceptions
 import cPickle
 import os
 import h5py


https://bitbucket.org/yt_analysis/yt/commits/06d771dcb5cd/
Changeset:   06d771dcb5cd
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-04-18 21:50:04
Summary:     Fixing a few more Python 3 errors.
Affected #:  3 files

diff -r bfa1eb894643cb9951548691a4813b21f912b6e4 -r 06d771dcb5cddbc463231e6f8dbcbb434a0a0763 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -245,21 +245,8 @@
         """ Returns units, parameters, or conversion_factors in that order. """
         return self.parameters[key]
 
-    def keys(self):
-        """
-        Returns a list of possible keys, from units, parameters and
-        conversion_factors.
-
-        """
-        return self.units.keys() \
-             + self.time_units.keys() \
-             + self.parameters.keys() \
-             + self.conversion_factors.keys()
-
     def __iter__(self):
-        for ll in [self.units, self.time_units,
-                   self.parameters, self.conversion_factors]:
-            for i in ll.keys(): yield i
+      for i in self.parameters: yield i
 
     def get_smallest_appropriate_unit(self, v):
         max_nu = 1e30

diff -r bfa1eb894643cb9951548691a4813b21f912b6e4 -r 06d771dcb5cddbc463231e6f8dbcbb434a0a0763 yt/data_objects/time_series.py
--- a/yt/data_objects/time_series.py
+++ b/yt/data_objects/time_series.py
@@ -130,7 +130,7 @@
     def __new__(cls, outputs, *args, **kwargs):
         if isinstance(outputs, basestring):
             outputs = get_filenames_from_glob_pattern(outputs)
-        ret = super(DatasetSeries, cls).__new__(cls, outputs, *args, **kwargs)
+        ret = super(DatasetSeries, cls).__new__(cls, *args, **kwargs)
         try:
             ret._pre_outputs = outputs[:]
         except TypeError:

diff -r bfa1eb894643cb9951548691a4813b21f912b6e4 -r 06d771dcb5cddbc463231e6f8dbcbb434a0a0763 yt/frontends/enzo/io.py
--- a/yt/frontends/enzo/io.py
+++ b/yt/frontends/enzo/io.py
@@ -52,7 +52,7 @@
 
     @property
     def _read_exception(self):
-        return (exceptions.KeyError,)
+        return (KeyError,)
 
     def _read_particle_coords(self, chunks, ptf):
         chunks = list(chunks)
@@ -62,7 +62,7 @@
                 if g.filename is None: continue
                 if f is None:
                     #print "Opening (count) %s" % g.filename
-                    f = h5py.File(g.filename, "r")
+                    f = h5py.File(g.filename.encode('ascii'), "r")
                 nap = sum(g.NumberOfActiveParticles.values())
                 if g.NumberOfParticles == 0 and nap == 0:
                     continue
@@ -88,7 +88,7 @@
                 if g.filename is None: continue
                 if f is None:
                     #print "Opening (read) %s" % g.filename
-                    f = h5py.File(g.filename, "r")
+                    f = h5py.File(g.filename.encode('ascii'), "r")
                 nap = sum(g.NumberOfActiveParticles.values())
                 if g.NumberOfParticles == 0 and nap == 0:
                     continue
@@ -120,7 +120,7 @@
             if not (len(chunks) == len(chunks[0].objs) == 1):
                 raise RuntimeError
             g = chunks[0].objs[0]
-            f = h5py.File(g.filename, 'r')
+            f = h5py.File(g.filename.encode('ascii'), 'r')
             gds = f.get("/Grid%08i" % g.id)
             for ftype, fname in fields:
                 if fname in gds:
@@ -145,14 +145,15 @@
             for g in chunk.objs:
                 if g.filename is None: continue
                 if fid is None:
-                    fid = h5py.h5f.open(g.filename, h5py.h5f.ACC_RDONLY)
+                    fid = h5py.h5f.open(g.filename.encode('ascii'), h5py.h5f.ACC_RDONLY)
                 data = np.empty(g.ActiveDimensions[::-1], dtype="float64")
                 data_view = data.swapaxes(0,2)
                 nd = 0
                 for field in fields:
                     ftype, fname = field
                     try:
-                        dg = h5py.h5d.open(fid, "/Grid%08i/%s" % (g.id, fname))
+                        node = "/Grid%08i/%s" % (g.id, fname)
+                        dg = h5py.h5d.open(fid, node.encode('ascii'))
                     except KeyError:
                         if fname == "Dark_Matter_Density": continue
                         raise
@@ -185,14 +186,15 @@
                 if fid is not None: fid.close()
                 fid = None
             if fid is None:
-                fid = h5py.h5f.open(g.filename, h5py.h5f.ACC_RDONLY)
+                fid = h5py.h5f.open(g.filename.encode('ascii'), h5py.h5f.ACC_RDONLY)
                 fn = g.filename
             data = np.empty(g.ActiveDimensions[::-1], dtype="float64")
             data_view = data.swapaxes(0,2)
             for field in fluid_fields:
                 ftype, fname = field
                 try:
-                    dg = h5py.h5d.open(fid, "/Grid%08i/%s" % (g.id, fname))
+                    node = "/Grid%08i/%s" % (g.id, fname)
+                    dg = h5py.h5d.open(fid, node.encode('ascii'))
                 except KeyError:
                     if fname == "Dark_Matter_Density": continue
                     raise


https://bitbucket.org/yt_analysis/yt/commits/e843abd3b641/
Changeset:   e843abd3b641
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-04-18 21:50:33
Summary:     Merging
Affected #:  6 files

diff -r 06d771dcb5cddbc463231e6f8dbcbb434a0a0763 -r e843abd3b641840d97a3834640cbc7f51584db87 doc/source/analyzing/analysis_modules/halo_analysis.rst
--- a/doc/source/analyzing/analysis_modules/halo_analysis.rst
+++ b/doc/source/analyzing/analysis_modules/halo_analysis.rst
@@ -6,6 +6,7 @@
 
 .. toctree::
    :maxdepth: 1
+
    halo_catalogs
    halo_finding
    halo_mass_function

diff -r 06d771dcb5cddbc463231e6f8dbcbb434a0a0763 -r e843abd3b641840d97a3834640cbc7f51584db87 yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -154,6 +154,9 @@
 from yt.convenience import \
     load, simulation
 
+from yt.testing import \
+    run_nose
+
 # Import some helpful math utilities
 from yt.utilities.math_utils import \
     ortho_find, quartiles, periodic_position

diff -r 06d771dcb5cddbc463231e6f8dbcbb434a0a0763 -r e843abd3b641840d97a3834640cbc7f51584db87 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -476,9 +476,14 @@
         return tuple(self.ActiveDimensions.tolist())
 
     def _setup_data_source(self):
-        self._data_source = self.pf.region(self.center,
-            self.left_edge - self.base_dds,
-            self.right_edge + self.base_dds)
+        LE = self.left_edge - self.base_dds
+        RE = self.right_edge + self.base_dds
+        if not all(self.pf.periodicity):
+            for i in range(3):
+                if self.pf.periodicity[i]: continue
+                LE[i] = max(LE[i], self.pf.domain_left_edge[i])
+                RE[i] = min(RE[i], self.pf.domain_right_edge[i])
+        self._data_source = self.pf.region(self.center, LE, RE)
         self._data_source.min_level = 0
         self._data_source.max_level = self.level
         self._pdata_source = self.pf.region(self.center,

diff -r 06d771dcb5cddbc463231e6f8dbcbb434a0a0763 -r e843abd3b641840d97a3834640cbc7f51584db87 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -613,7 +613,7 @@
         self.unit_registry.modify("code_length", self.length_unit)
         self.unit_registry.modify("code_mass", self.mass_unit)
         self.unit_registry.modify("code_time", self.time_unit)
-        vel_unit = getattr(self, "code_velocity",
+        vel_unit = getattr(self, "velocity_unit",
                     self.length_unit / self.time_unit)
         self.unit_registry.modify("code_velocity", vel_unit)
         # domain_width does not yet exist

diff -r 06d771dcb5cddbc463231e6f8dbcbb434a0a0763 -r e843abd3b641840d97a3834640cbc7f51584db87 yt/frontends/boxlib/fields.py
--- a/yt/frontends/boxlib/fields.py
+++ b/yt/frontends/boxlib/fields.py
@@ -243,12 +243,17 @@
                 self.add_field(name = ("gas", "%s_density" % nice_name),
                                function = func,
                                units = "g/cm**3")
-                # We know this will either have one letter, or two.
-                if field[3] in string.letters:
-                    element, weight = field[2:4], field[4:-1]
-                else:
-                    element, weight = field[2:3], field[3:-1]
-                weight = int(weight)
+                # Most of the time our species will be of the form
+                # element name + atomic weight (e.g. C12), but
+                # sometimes we make up descriptive names (e.g. ash)
+                if any(char.isdigit() for char in field):
+                    # We know this will either have one letter, or two.
+                    if field[3] in string.letters:
+                        element, weight = field[2:4], field[4:-1]
+                    else:
+                        element, weight = field[2:3], field[3:-1]
+                    weight = int(weight)
+
                 # Here we can, later, add number density.
             if field.startswith("omegadot("):
                 nice_name = field[9:-1]

diff -r 06d771dcb5cddbc463231e6f8dbcbb434a0a0763 -r e843abd3b641840d97a3834640cbc7f51584db87 yt/visualization/plot_container.py
--- a/yt/visualization/plot_container.py
+++ b/yt/visualization/plot_container.py
@@ -251,10 +251,15 @@
         # Left blank to be overriden in subclasses
         pass
 
-    def _switch_pf(self, new_pf):
+    def _switch_pf(self, new_pf, data_source=None):
         ds = self.data_source
         name = ds._type_name
         kwargs = dict((n, getattr(ds, n)) for n in ds._con_args)
+        if data_source is not None:
+            if name != "proj":
+                raise RuntimeError("The data_source keyword argument "
+                                   "is only defined for projections.")
+            kwargs['data_source'] = data_source
         new_ds = getattr(new_pf, name)(**kwargs)
         self.pf = new_pf
         self.data_source = new_ds


https://bitbucket.org/yt_analysis/yt/commits/c95e902758bb/
Changeset:   c95e902758bb
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-04-18 22:30:39
Summary:     Aliasing __truediv__ to __div__ for Unit and changing is to ==.
Affected #:  2 files

diff -r e843abd3b641840d97a3834640cbc7f51584db87 -r c95e902758bb3aa8d9df054b4ccdc42aea4db6dc yt/units/unit_object.py
--- a/yt/units/unit_object.py
+++ b/yt/units/unit_object.py
@@ -261,6 +261,8 @@
                     dimensions=(self.dimensions / u.dimensions),
                     registry=self.registry)
 
+    __truediv__ = __div__
+
     def __pow__(self, p):
         """ Take Unit to power p (float). """
         try:

diff -r e843abd3b641840d97a3834640cbc7f51584db87 -r c95e902758bb3aa8d9df054b4ccdc42aea4db6dc yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -73,7 +73,7 @@
     from pyparsing import ParseFatalException
 
 def fix_unitary(u):
-    if u is '1':
+    if u == '1':
         return 'code_length'
     else:
         return u


https://bitbucket.org/yt_analysis/yt/commits/83131baac0e2/
Changeset:   83131baac0e2
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-05-05 15:19:52
Summary:     Merging from yt3.0 work
Affected #:  103 files

diff -r c95e902758bb3aa8d9df054b4ccdc42aea4db6dc -r 83131baac0e26fa102ecf878aa4a2a4905d5fb2f doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -590,7 +590,7 @@
 LAPACK='lapack-3.4.2'
 PNG=libpng-1.6.3
 MATPLOTLIB='matplotlib-1.3.0'
-MERCURIAL='mercurial-2.8'
+MERCURIAL='mercurial-3.0'
 NOSE='nose-1.3.0'
 NUMPY='numpy-1.7.1'
 PYTHON_HGLIB='python-hglib-1.0'
@@ -619,7 +619,7 @@
 echo '8770214491e31f0a7a3efaade90eee7b0eb20a8a6ab635c5f854d78263f59a1849133c14ef5123d01023f0110cbb9fc6f818da053c01277914ae81473430a952  lapack-3.4.2.tar.gz' > lapack-3.4.2.tar.gz.sha512
 echo '887582e5a22e4cde338aa8fec7a89f6dd31f2f02b8842735f00f970f64582333fa03401cea6d01704083403c7e8b7ebc26655468ce930165673b33efa4bcd586  libpng-1.6.3.tar.gz' > libpng-1.6.3.tar.gz.sha512
 echo '990e3a155ca7a9d329c41a43b44a9625f717205e81157c668a8f3f2ad5459ed3fed8c9bd85e7f81c509e0628d2192a262d4aa30c8bfc348bb67ed60a0362505a  matplotlib-1.3.0.tar.gz' > matplotlib-1.3.0.tar.gz.sha512
-echo 'b08dcd746728d89f1f96036f39df1608fad0ff863ae48fe12424b1645936ebbf59b9068b93fe3c7cfd2036db046df3dc814119f89a827bd5f008d32f323d45a8  mercurial-2.8.tar.gz' > mercurial-2.8.tar.gz.sha512
+echo '8cd387ea0d74d5ed01b58d5ef8e3fb408d4b05f7deb45a02e34fbb931fd920aafbfcb3a9b52a027ebcdb562837198637a0e51f2121c94e0fcf7f7d8c016f5342  mercurial-3.0.tar.gz' > mercurial-3.0.tar.gz.sha512
 echo 'a3b8060e415560a868599224449a3af636d24a060f1381990b175dcd12f30249edd181179d23aea06b0c755ff3dc821b7a15ed8840f7855530479587d4d814f4  nose-1.3.0.tar.gz' > nose-1.3.0.tar.gz.sha512
 echo 'd58177f3971b6d07baf6f81a2088ba371c7e43ea64ee7ada261da97c6d725b4bd4927122ac373c55383254e4e31691939276dab08a79a238bfa55172a3eff684  numpy-1.7.1.tar.gz' > numpy-1.7.1.tar.gz.sha512
 echo '9c0a61299779aff613131aaabbc255c8648f0fa7ab1806af53f19fbdcece0c8a68ddca7880d25b926d67ff1b9201954b207919fb09f6a290acb078e8bbed7b68  python-hglib-1.0.tar.gz' > python-hglib-1.0.tar.gz.sha512

diff -r c95e902758bb3aa8d9df054b4ccdc42aea4db6dc -r 83131baac0e26fa102ecf878aa4a2a4905d5fb2f doc/source/cookbook/zoomin_frames.py
--- a/doc/source/cookbook/zoomin_frames.py
+++ b/doc/source/cookbook/zoomin_frames.py
@@ -22,6 +22,6 @@
 for i,v in enumerate(np.logspace(
             0, np.log10(pf.index.get_smallest_dx()*min_dx), n_frames)):
     # We set our width as necessary for this frame ...
-    p.set_width(v, '1')
+    p.set_width(v, 'unitary')
     # ... and we save!
     p.save(frame_template % (i))

diff -r c95e902758bb3aa8d9df054b4ccdc42aea4db6dc -r 83131baac0e26fa102ecf878aa4a2a4905d5fb2f doc/source/reference/api/api.rst
--- a/doc/source/reference/api/api.rst
+++ b/doc/source/reference/api/api.rst
@@ -660,7 +660,6 @@
    ~yt.utilities.parallel_tools.parallel_analysis_interface.parallel_passthrough
    ~yt.utilities.parallel_tools.parallel_analysis_interface.parallel_root_only
    ~yt.utilities.parallel_tools.parallel_analysis_interface.parallel_simple_proxy
-   ~yt.utilities.parallel_tools.parallel_analysis_interface.parallel_splitter
 
 Math Utilities
 --------------

diff -r c95e902758bb3aa8d9df054b4ccdc42aea4db6dc -r 83131baac0e26fa102ecf878aa4a2a4905d5fb2f doc/source/reference/configuration.rst
--- a/doc/source/reference/configuration.rst
+++ b/doc/source/reference/configuration.rst
@@ -93,3 +93,5 @@
   uploading AMRSurface objects.
 * ``suppressStreamLogging`` (default: ``'False'``): If true, execution mode will be
   quiet.
+* ``stdoutStreamLogging`` (default: ``'False'``): If three, logging is directed
+  to stdout rather than stderr

diff -r c95e902758bb3aa8d9df054b4ccdc42aea4db6dc -r 83131baac0e26fa102ecf878aa4a2a4905d5fb2f doc/source/visualizing/_cb_docstrings.inc
--- a/doc/source/visualizing/_cb_docstrings.inc
+++ b/doc/source/visualizing/_cb_docstrings.inc
@@ -104,42 +104,31 @@
 
 -------------
 
-.. function:: annotate_hop_circles(self, hop_output, max_number=None, annotate=False, min_size=20, max_size=10000000, font_size=8, print_halo_size=False, print_halo_mass=False, width=None):
+.. function:: annotate_halos(self, halo_catalog, col='white', alpha =1, width= None):
 
-   (This is a proxy for :class:`~yt.visualization.plot_modifications.HopCircleCallback`.)
+   (This is a proxy for :class:`~yt.visualization.plot_modifications.HaloCatalogCallback`.)
 
-   Accepts a :class:`yt.HopList` *hop_output* and plots up
-   to *max_number* (None for unlimited) halos as circles.
+   Accepts a :class:`yt.HaloCatalog` *HaloCatalog* and plots 
+   a circle at the location of each halo with the radius of
+   the circle corresponding to the virial radius of the halo.
+   If *width* is set to None (default) all halos are plotted.
+   Otherwise, only halos that fall within a slab with width
+   *width* centered on the center of the plot data. The 
+   color and transparency of the circles can be controlled with
+   *col* and *alpha* respectively.
 
 .. python-script::
+   
+   from yt.mods import *
+   data_pf = load('Enzo_64/RD0006/RD0006')
+   halos_pf = load('rockstar_halos/halos_0.0.bin')
 
-   from yt.mods import *
-   pf = load("Enzo_64/DD0043/data0043")
-   halos = HaloFinder(pf)
-   p = ProjectionPlot(pf, "z", "density")
-   p.annotate_hop_circles(halos)
-   p.save()
+   hc = HaloCatalog(halos_pf=halos_pf)
+   hc.create()
 
--------------
-
-.. function:: annotate_hop_particles(self, hop_output, max_number, p_size=1.0, min_size=20, alpha=0.2):
-
-   (This is a proxy for :class:`~yt.visualization.plot_modifications.HopParticleCallback`.)
-
-   Adds particle positions for the members of each halo as
-   identified by HOP. Along *axis* up to *max_number* groups
-   in *hop_output* that are larger than *min_size* are
-   plotted with *p_size* pixels per particle;  *alpha*
-   determines the opacity of each particle.
-
-.. python-script::
-
-   from yt.mods import *
-   pf = load("Enzo_64/DD0043/data0043")
-   halos = HaloFinder(pf)
-   p = ProjectionPlot(pf, "x", "density", center='m', width=(10, 'Mpc'))
-   p.annotate_hop_particles(halos, max_number=100, p_size=5.0)
-   p.save()
+   prj = ProjectionPlot(data_pf, 'z', 'density')
+   prj.annotate_halos(hc)
+   prj.save()
 
 -------------
 

diff -r c95e902758bb3aa8d9df054b4ccdc42aea4db6dc -r 83131baac0e26fa102ecf878aa4a2a4905d5fb2f yt/analysis_modules/halo_analysis/api.py
--- a/yt/analysis_modules/halo_analysis/api.py
+++ b/yt/analysis_modules/halo_analysis/api.py
@@ -20,6 +20,9 @@
 from .halo_callbacks import \
      add_callback
 
+from .halo_finding_methods import \
+     add_finding_method
+
 from .halo_filters import \
      add_filter
      

diff -r c95e902758bb3aa8d9df054b4ccdc42aea4db6dc -r 83131baac0e26fa102ecf878aa4a2a4905d5fb2f yt/analysis_modules/halo_analysis/finding_methods.py
--- a/yt/analysis_modules/halo_analysis/finding_methods.py
+++ /dev/null
@@ -1,20 +0,0 @@
-"""
-Halo Finding methods
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from .operator_registry import \
-    hf_registry
-
-class HaloFindingMethod(object):
-    pass

diff -r c95e902758bb3aa8d9df054b4ccdc42aea4db6dc -r 83131baac0e26fa102ecf878aa4a2a4905d5fb2f yt/analysis_modules/halo_analysis/halo_catalog.py
--- a/yt/analysis_modules/halo_analysis/halo_catalog.py
+++ b/yt/analysis_modules/halo_analysis/halo_catalog.py
@@ -30,20 +30,9 @@
 from .operator_registry import \
      callback_registry, \
      filter_registry, \
-     hf_registry, \
+     finding_method_registry, \
      quantity_registry
 
-from yt.analysis_modules.halo_finding.halo_objects import \
-    FOFHaloFinder, HOPHaloFinder
-from yt.frontends.halo_catalogs.halo_catalog.data_structures import \
-    HaloCatalogDataset
-from yt.frontends.stream.data_structures import \
-    load_particles
-from yt.frontends.halo_catalogs.rockstar.data_structures import \
-    RockstarDataset
-from yt.analysis_modules.halo_finding.rockstar.api import \
-    RockstarHaloFinder
-
 class HaloCatalog(ParallelAnalysisInterface):
     r"""Create a HaloCatalog: an object that allows for the creation and association
     of data with a set of halo objects.
@@ -103,7 +92,7 @@
 
     See Also
     --------
-    add_callback, add_filter, add_quantity
+    add_callback, add_filter, add_finding_method, add_quantity
     
     """
     
@@ -113,7 +102,6 @@
         ParallelAnalysisInterface.__init__(self)
         self.halos_pf = halos_pf
         self.data_pf = data_pf
-        self.finder_method = finder_method
         self.output_dir = ensure_dir(output_dir)
         if os.path.basename(self.output_dir) != ".":
             self.output_prefix = os.path.basename(self.output_dir)
@@ -133,6 +121,10 @@
                 data_source = data_pf.h.all_data()
         self.data_source = data_source
 
+        if finder_method is not None:
+            finder_method = finding_method_registry.find(finder_method)
+        self.finder_method = finder_method            
+        
         # all of the analysis actions to be performed: callbacks, filters, and quantities
         self.actions = []
         # fields to be written to the halo catalog
@@ -358,16 +350,22 @@
 
         if self.halos_pf is None:
             # Find the halos and make a dataset of them
-            particles_pf = self.find_halos()
+            self.halos_pf = self.finder_method(self.data_pf)
+            if self.halos_pf is None:
+                mylog.warning('No halos were found for {0}'.format(\
+                        self.data_pf.basename))
+                if save_catalog:
+                    self.halos_pf = self.data_pf
+                    self.save_catalog()
+                    self.halos_pf = None
+                return
 
             # Assign pf and data sources appropriately
-            self.halos_pf = particles_pf
-            self.data_source = particles_pf.all_data()
+            self.data_source = self.halos_pf.all_data()
 
             # Add all of the default quantities that all halos must have
             self.add_default_quantities('all')
 
-
         my_index = np.argsort(self.data_source["particle_identifier"])
         for i in parallel_objects(my_index, njobs=njobs, dynamic=dynamic):
             new_halo = Halo(self)
@@ -400,80 +398,6 @@
         if save_catalog:
             self.save_catalog()
 
-    def find_halos(self):
-
-        finder_method = (self.finder_method).lower()
-
-        if finder_method == "hop":
-            halo_list = HOPHaloFinder(self.data_pf)
-            halos_pf = self._parse_old_halo_list(halo_list)
-
-        elif finder_method == "fof":
-            halo_list = FOFHaloFinder(self.data_pf)
-            halos_pf = self._parse_old_halo_list(halo_list)
-            
-        elif finder_method == 'rockstar':
-            rh = RockstarHaloFinder(self.data_pf, 
-                outbase='{0}/rockstar_halos'.format(self.output_prefix))
-            rh.run()
-            halos_pf = RockstarDataset('{0}/rockstar_halos/halos_0.0.bin'.format(self.output_prefix))
-            halos_pf.create_field_info()
-        else:
-            raise RuntimeError("finder_method must be 'fof', 'hop', or 'rockstar'")
-
-        for attr in ["current_redshift", "current_time",
-                     "domain_dimensions",
-                     "cosmological_simulation", "omega_lambda",
-                     "omega_matter", "hubble_constant"]:
-            attr_val = getattr(self.data_pf, attr)
-            setattr(halos_pf, attr, attr_val)
-        halos_pf.current_time = halos_pf.current_time.in_cgs()
-
-        return halos_pf
-
-    def _parse_old_halo_list(self, halo_list):
-
-
-        data_pf = self.data_pf
-        num_halos = len(halo_list)
-
-        # Set up fields that we want to pull from identified halos and their units
-        new_fields = ['particle_identifier', 'particle_mass', 'particle_position_x', 
-            'particle_position_y','particle_position_z',
-            'virial_radius']
-        new_units = [ '', 'g', 'cm', 'cm','cm','cm']
-
-        # Set up a dictionary based on those fields 
-        # with empty arrays where we will fill in their values
-        halo_properties = { f : (np.zeros(num_halos),unit) \
-            for f, unit in zip(new_fields,new_units)}
-
-        # Iterate through the halos pulling out their positions and virial quantities
-        # and filling in the properties dictionary
-        for i,halo in enumerate(halo_list):
-            halo_properties['particle_identifier'][0][i] = i
-            halo_properties['particle_mass'][0][i] = halo.virial_mass().in_cgs()
-            halo_properties['virial_radius'][0][i] = halo.virial_radius().in_cgs()
-
-            com = halo.center_of_mass().in_cgs()
-            halo_properties['particle_position_x'][0][i] = com[0]
-            halo_properties['particle_position_y'][0][i] = com[1]
-            halo_properties['particle_position_z'][0][i] = com[2]
-
-        # Define a bounding box based on original data pf
-        bbox = np.array([data_pf.domain_left_edge.in_cgs(),
-                data_pf.domain_right_edge.in_cgs()]).T
-
-        # Create a pf with the halos as particles
-        particle_pf = load_particles(halo_properties, 
-                bbox=bbox, length_unit = 1, mass_unit=1)
-
-        # Create the field info dictionary so we can reference those fields
-        particle_pf.create_field_info()
-
-        return particle_pf
-
-
     def save_catalog(self):
         "Write out hdf5 file with all halo quantities."
 
@@ -513,4 +437,3 @@
         self.add_quantity("particle_position_z", field_type=field_type)
         self.add_quantity("virial_radius", field_type=field_type)
 
-

diff -r c95e902758bb3aa8d9df054b4ccdc42aea4db6dc -r 83131baac0e26fa102ecf878aa4a2a4905d5fb2f yt/analysis_modules/halo_analysis/halo_filters.py
--- a/yt/analysis_modules/halo_analysis/halo_filters.py
+++ b/yt/analysis_modules/halo_analysis/halo_filters.py
@@ -13,6 +13,10 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
+import numpy as np
+
+from yt.utilities.spatial import KDTree
+
 from .halo_callbacks import HaloCallback
 from .operator_registry import filter_registry
 
@@ -58,3 +62,44 @@
     return eval("%s %s %s" % (h_value, operator, value))
 
 add_filter("quantity_value", quantity_value)
+
+def _not_subhalo(halo, field_type="halos"):
+    """
+    Only return true if this halo is not a subhalo.
+    
+    This is used for halo finders such as Rockstar that output parent
+    and subhalos together.
+    """
+
+    if not hasattr(halo.halo_catalog, "parent_dict"):
+        halo.halo_catalog.parent_dict = \
+          create_parent_dict(halo.halo_catalog.data_source, ptype=field_type)
+    return halo.halo_catalog.parent_dict[int(halo.quantities["particle_identifier"])] == -1
+add_filter("not_subhalo", _not_subhalo)
+
+def create_parent_dict(data_source, ptype="halos"):
+    """
+    Create a dictionary of halo parents to allow for filtering of subhalos.
+
+    For a pair of halos whose distance is smaller than the radius of at least 
+    one of the halos, the parent is defined as the halo with the larger radius.
+    Parent halos (halos with no parents of their own) have parent index values of -1.
+    """
+    pos = np.rollaxis(
+        np.array([data_source[ptype, "particle_position_x"].in_units("Mpc"),
+                  data_source[ptype, "particle_position_y"].in_units("Mpc"),
+                  data_source[ptype, "particle_position_z"].in_units("Mpc")]), 1)
+    rad = data_source[ptype, "virial_radius"].in_units("Mpc").to_ndarray()
+    ids = data_source[ptype, "particle_identifier"].to_ndarray().astype("int")
+    parents = -1 * np.ones_like(ids, dtype="int")
+    my_tree = KDTree(pos)
+
+    for i in xrange(ids.size):
+        neighbors = np.array(
+            my_tree.query_ball_point(pos[i], rad[i], p=2))
+        if neighbors.size > 1:
+            parents[neighbors] = ids[neighbors[np.argmax(rad[neighbors])]]
+
+    parents[ids == parents] = -1
+    parent_dict = dict(zip(ids, parents))
+    return parent_dict

diff -r c95e902758bb3aa8d9df054b4ccdc42aea4db6dc -r 83131baac0e26fa102ecf878aa4a2a4905d5fb2f yt/analysis_modules/halo_analysis/halo_finding_methods.py
--- /dev/null
+++ b/yt/analysis_modules/halo_analysis/halo_finding_methods.py
@@ -0,0 +1,141 @@
+"""
+Halo Finding methods
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+
+from yt.analysis_modules.halo_finding.halo_objects import \
+    FOFHaloFinder, HOPHaloFinder
+from yt.frontends.halo_catalogs.halo_catalog.data_structures import \
+    HaloCatalogDataset
+from yt.frontends.stream.data_structures import \
+    load_particles
+
+from .operator_registry import \
+    finding_method_registry
+
+
+def add_finding_method(name, function):
+    finding_method_registry[name] = HaloFindingMethod(function)
+    
+class HaloFindingMethod(object):
+    r"""
+    A halo finding method is a callback that performs halo finding on a 
+    dataset and returns a new dataset that is the loaded halo finder output.
+    """
+    def __init__(self, function, args=None, kwargs=None):
+        self.function = function
+        self.args = args
+        if self.args is None: self.args = []
+        self.kwargs = kwargs
+        if self.kwargs is None: self.kwargs = {}
+
+    def __call__(self, ds):
+        return self.function(ds, *self.args, **self.kwargs)
+
+def _hop_method(pf):
+    r"""
+    Run the Hop halo finding method.
+    """
+    
+    halo_list = HOPHaloFinder(pf)
+    halos_pf = _parse_old_halo_list(pf, halo_list)
+    return halos_pf
+add_finding_method("hop", _hop_method)
+
+def _fof_method(pf):
+    r"""
+    Run the FoF halo finding method.
+    """
+
+    halo_list = FOFHaloFinder(pf)
+    halos_pf = _parse_old_halo_list(pf, halo_list)
+    return halos_pf
+add_finding_method("fof", _fof_method)
+
+def _rockstar_method(pf):
+    r"""
+    Run the Rockstar halo finding method.
+    """
+
+    from yt.frontends.halo_catalogs.rockstar.data_structures import \
+     RockstarDataset
+    from yt.analysis_modules.halo_finding.rockstar.api import \
+     RockstarHaloFinder
+    
+    rh = RockstarHaloFinder(pf)
+    rh.run()
+
+
+    halos_pf = RockstarDataset("rockstar_halos/halos_0.0.bin")
+    try:
+        halos_pf.create_field_info()
+    except ValueError:
+        return None
+
+    return halos_pf
+add_finding_method("rockstar", _rockstar_method)
+
+def _parse_old_halo_list(data_pf, halo_list):
+    r"""
+    Convert the halo list into a loaded dataset.
+    """
+
+    num_halos = len(halo_list)
+
+    if num_halos == 0: return None
+
+    # Set up fields that we want to pull from identified halos and their units
+    new_fields = ['particle_identifier', 'particle_mass', 'particle_position_x', 
+        'particle_position_y','particle_position_z',
+        'virial_radius']
+    new_units = [ '', 'g', 'cm', 'cm','cm','cm']
+
+    # Set up a dictionary based on those fields 
+    # with empty arrays where we will fill in their values
+    halo_properties = { f : (np.zeros(num_halos),unit) \
+        for f, unit in zip(new_fields,new_units)}
+
+    # Iterate through the halos pulling out their positions and virial quantities
+    # and filling in the properties dictionary
+    for i,halo in enumerate(halo_list):
+        halo_properties['particle_identifier'][0][i] = i
+        halo_properties['particle_mass'][0][i] = halo.virial_mass().in_cgs()
+        halo_properties['virial_radius'][0][i] = halo.virial_radius().in_cgs()
+
+        com = halo.center_of_mass().in_cgs()
+        halo_properties['particle_position_x'][0][i] = com[0]
+        halo_properties['particle_position_y'][0][i] = com[1]
+        halo_properties['particle_position_z'][0][i] = com[2]
+
+    # Define a bounding box based on original data pf
+    bbox = np.array([data_pf.domain_left_edge.in_cgs(),
+            data_pf.domain_right_edge.in_cgs()]).T
+
+    # Create a pf with the halos as particles
+    particle_pf = load_particles(halo_properties, 
+            bbox=bbox, length_unit = 1, mass_unit=1)
+
+    # Create the field info dictionary so we can reference those fields
+    particle_pf.create_field_info()
+
+    for attr in ["current_redshift", "current_time",
+                 "domain_dimensions",
+                 "cosmological_simulation", "omega_lambda",
+                 "omega_matter", "hubble_constant"]:
+        attr_val = getattr(data_pf, attr)
+        setattr(particle_pf, attr, attr_val)
+    particle_pf.current_time = particle_pf.current_time.in_cgs()
+    
+    return particle_pf

diff -r c95e902758bb3aa8d9df054b4ccdc42aea4db6dc -r 83131baac0e26fa102ecf878aa4a2a4905d5fb2f yt/analysis_modules/halo_analysis/operator_registry.py
--- a/yt/analysis_modules/halo_analysis/operator_registry.py
+++ b/yt/analysis_modules/halo_analysis/operator_registry.py
@@ -27,5 +27,5 @@
 
 callback_registry = OperatorRegistry()
 filter_registry = OperatorRegistry()
-hf_registry = OperatorRegistry()
+finding_method_registry = OperatorRegistry()
 quantity_registry = OperatorRegistry()

diff -r c95e902758bb3aa8d9df054b4ccdc42aea4db6dc -r 83131baac0e26fa102ecf878aa4a2a4905d5fb2f yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -112,7 +112,9 @@
         if self._name == "RockstarHalo":
             ds = self.pf.sphere(self.CoM, self._radjust * self.max_radius)
         elif self._name == "LoadedHalo":
-            ds = self.pf.sphere(self.CoM, self._radjust * self.max_radius)
+            ds = self.pf.sphere(self.CoM, np.maximum(self._radjust * \
+	    self.pf.quan(self.max_radius, 'code_length'), \
+	    self.pf.index.get_smallest_dx()))
         sp_pid = ds['particle_index']
         self._ds_sort = sp_pid.argsort()
         sp_pid = sp_pid[self._ds_sort]
@@ -219,7 +221,7 @@
         vx = (self["particle_velocity_x"] * pm).sum()
         vy = (self["particle_velocity_y"] * pm).sum()
         vz = (self["particle_velocity_z"] * pm).sum()
-        return np.array([vx, vy, vz]) / pm.sum()
+        return self.pf.arr([vx, vy, vz], vx.units) / pm.sum()
 
     def rms_velocity(self):
         r"""Returns the mass-weighted RMS velocity for the halo
@@ -333,9 +335,11 @@
         handle.create_group("/%s" % gn)
         for field in ["particle_position_%s" % ax for ax in 'xyz'] \
                    + ["particle_velocity_%s" % ax for ax in 'xyz'] \
-                   + ["particle_index"] + ["particle_mass"].in_units('Msun'):
+                   + ["particle_index"]:
             handle.create_dataset("/%s/%s" % (gn, field), data=self[field])
-        if 'creation_time' in self.data.pf.field_list:
+	handle.create_dataset("/%s/particle_mass" % gn,
+		data=self["particle_mass"].in_units('Msun'))
+        if ('io','creation_time') in self.data.pf.field_list:
             handle.create_dataset("/%s/creation_time" % gn,
                 data=self['creation_time'])
         n = handle["/%s" % gn]
@@ -850,6 +854,7 @@
         self._saved_fields = {}
         self._ds_sort = None
         self._particle_mask = None
+	self._pid_sort = None
 
 
     def __getitem__(self, key):
@@ -867,14 +872,28 @@
             self.size, key)
         if field_data is not None:
             if key == 'particle_index':
-                field_data = field_data[field_data.argsort()]
+                #this is an index for turning data sorted by particle index 
+		#into the same order as the fields on disk
+		self._pid_sort = field_data.argsort().argsort()
+	    #convert to YTArray using the data from disk
+	    if key == 'particle_mass':
+		field_data = self.pf.arr(field_data, 'Msun')
+	    else:
+	        field_data = self.pf.arr(field_data, 
+		    self.pf._get_field_info('unknown',key).units)
             self._saved_fields[key] = field_data
             return self._saved_fields[key]
         # We won't store this field below in saved_fields because
         # that would mean keeping two copies of it, one in the yt
         # machinery and one here.
-        ds = self.pf.sphere(self.CoM, 1.05 * self.max_radius)
-        return np.take(ds[key][self._ds_sort], self.particle_mask)
+        ds = self.pf.sphere(self.CoM, np.maximum(self._radjust * \
+	    self.pf.quan(self.max_radius, 'code_length'), \
+	    self.pf.index.get_smallest_dx()))
+	# If particle_mask hasn't been called once then _ds_sort won't have
+	# the proper values set yet
+        if self._particle_mask is None:
+	    self.particle_mask
+        return ds[key][self._ds_sort][self.particle_mask][self._pid_sort]
 
     def _get_particle_data(self, halo, fnames, size, field):
         # Given a list of file names, a halo, its size, and the desired field,
@@ -1089,10 +1108,10 @@
         gc.collect()
 
     def _get_dm_indices(self):
-        if 'creation_time' in self._data_source.index.field_list:
+        if ('io','creation_time') in self._data_source.index.field_list:
             mylog.debug("Differentiating based on creation time")
             return (self._data_source["creation_time"] <= 0)
-        elif 'particle_type' in self._data_source.index.field_list:
+        elif ('io','particle_type') in self._data_source.index.field_list:
             mylog.debug("Differentiating based on particle type")
             return (self._data_source["particle_type"] == 1)
         else:
@@ -2143,7 +2162,7 @@
         elif fancy_padding and self._distributed:
             LE_padding = np.empty(3, dtype='float64')
             RE_padding = np.empty(3, dtype='float64')
-            avg_spacing = (float(vol) / data.size) ** (1. / 3.)
+            avg_spacing = (vol / data.size) ** (1. / 3.)
             base_padding = (self.num_neighbors) ** (1. / 3.) * self.safety * \
                 avg_spacing
             for dim in xrange(3):
@@ -2390,7 +2409,7 @@
                 total_mass = \
                     self.comm.mpi_allreduce((self._data_source['all', "particle_mass"][select].in_units('Msun')).sum(dtype='float64'), op='sum')
             else:
-                total_mass = self.comm.mpi_allreduce(self._data_source.quantities["TotalQuantity"]("particle_mass")[0].in_units('Msun'), op='sum')
+                total_mass = self.comm.mpi_allreduce(self._data_source.quantities["TotalQuantity"]("particle_mass").in_units('Msun'), op='sum')
         # MJT: Note that instead of this, if we are assuming that the particles
         # are all on different processors, we should instead construct an
         # object representing the entire domain and sum it "lazily" with
@@ -2414,7 +2433,7 @@
             sub_mass = self._data_source["particle_mass"][select].in_units('Msun').sum(dtype='float64')
         else:
             sub_mass = \
-                self._data_source.quantities["TotalQuantity"]("particle_mass")[0].in_units('Msun')
+                self._data_source.quantities["TotalQuantity"]("particle_mass").in_units('Msun')
         HOPHaloList.__init__(self, self._data_source,
             threshold * total_mass / sub_mass, dm_only)
         self._parse_halolist(total_mass / sub_mass)

diff -r c95e902758bb3aa8d9df054b4ccdc42aea4db6dc -r 83131baac0e26fa102ecf878aa4a2a4905d5fb2f yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py
--- a/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py
+++ b/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py
@@ -53,7 +53,7 @@
         self.zpos = particle_fields.pop("particle_position_z")
         self.real_size = len(self.xpos)
         self.index = particle_fields.pop("particle_index")
-        self.mass = particle_fields.pop("ParticleMassMsun")
+        self.mass = particle_fields.pop("particle_mass")
         self.padded_particles = []
         self.nMerge = 4
         self.tree = tree

diff -r c95e902758bb3aa8d9df054b4ccdc42aea4db6dc -r 83131baac0e26fa102ecf878aa4a2a4905d5fb2f yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
@@ -6,8 +6,6 @@
 from libc.stdlib cimport malloc, free
 import sys
 
-
-
 # Importing relevant rockstar data types particle, fof halo, halo
 
 cdef import from "particle.h":
@@ -15,6 +13,10 @@
         np.int64_t id
         float pos[6]
 
+cdef import from "rockstar.h":
+    particle *global_particles "p"
+    void rockstar_cleanup()
+
 cdef import from "fof.h":
     struct fof:
         np.int64_t num_p
@@ -23,13 +25,34 @@
 cdef import from "halo.h":
     struct halo:
         np.int64_t id
-        float pos[6], corevel[3], bulkvel[3]
+        float pos[6]
+        float corevel[3]
+        float bulkvel[3]
         float m, r, child_r, vmax_r, mgrav,    vmax, rvmax, rs, klypin_rs, vrms
-        float J[3], energy, spin, alt_m[4], Xoff, Voff, b_to_a, c_to_a, A[3]
+        float J[3]
+        float energy, spin
+        float alt_m[4]
+        float Xoff, Voff, b_to_a, c_to_a
+        float A[3]
         float bullock_spin, kin_to_pot
         np.int64_t num_p, num_child_particles, p_start, desc, flags, n_core
         float min_pos_err, min_vel_err, min_bulkvel_err
 
+ctypedef packed struct haloflat:
+    np.int64_t id
+    float pos_x, pos_y, pos_z, pos_v, pos_u, pos_w
+    float corevel_x, corevel_y, corevel_z
+    float bulkvel_x, bulkvel_y, bulkvel_z
+    float m, r, child_r, vmax_r, mgrav,    vmax, rvmax, rs, klypin_rs, vrms
+    float J1, J2, J3
+    float energy, spin
+    float alt_m1, alt_m2, alt_m3, alt_m4
+    float Xoff, Voff, b_to_a, c_to_a
+    float A1, A2, A3
+    float bullock_spin, kin_to_pot
+    np.int64_t num_p, num_child_particles, p_start, desc, flags, n_core
+    float min_pos_err, min_vel_err, min_bulkvel_err
+
 # For finding sub halos import finder function and global variable
 # rockstar uses to store the results
 
@@ -38,6 +61,9 @@
     halo *halos
     np.int64_t num_halos
     void calc_mass_definition() nogil
+    void free_particle_copies() nogil
+    void alloc_particle_copies(np.int64_t total_copies) nogil
+    void free_halos() nogil
 
 # For outputing halos, rockstar style
 
@@ -48,6 +74,7 @@
 
 cdef import from "config.h":
     void setup_config() nogil
+    void output_config(char *fn) nogil
 
 cdef import from "config_vars.h":
     # Rockstar cleverly puts all of the config variables inside a templated
@@ -197,46 +224,87 @@
     def output_halos(self):
         output_halos(0, 0, 0, NULL) 
 
+    def return_halos(self):
+        cdef haloflat[:] haloview = <haloflat[:num_halos]> (<haloflat*> halos)
+        rv = np.asarray(haloview).copy()
+        rockstar_cleanup()
+        free_halos()
+        return rv
+
     @cython.boundscheck(False)
     @cython.wraparound(False)
-    def make_rockstar_fof(self, np.ndarray[np.int64_t, ndim=1] pid,
+    def make_rockstar_fof(self, np.ndarray[np.int64_t, ndim=1] pind,
+                                np.ndarray[np.int64_t, ndim=1] fof_tags,
                                 np.ndarray[np.float64_t, ndim=2] pos,
-                                np.ndarray[np.float64_t, ndim=2] vel,
-                                np.ndarray[np.int64_t, ndim=1] fof_tags,
-                                np.int64_t nfof,
-                                np.int64_t npart_max):
+                                np.ndarray[np.float64_t, ndim=2] vel):
 
         # Define fof object
 
         # Find number of particles
-        cdef np.int64_t i, j
-        cdef np.int64_t num_particles = pid.shape[0]
+        cdef np.int64_t i, j, k, ind, offset
+        cdef np.int64_t num_particles = pind.shape[0]
+        global global_particles
 
         # Allocate space for correct number of particles
-        cdef particle* particles = <particle*> malloc(npart_max * sizeof(particle))
         cdef fof fof_obj
-        fof_obj.particles = particles
 
-        cdef np.int64_t last_fof_tag = 1
-        cdef np.int64_t k = 0
-        for i in range(num_particles):
-            if fof_tags[i] == 0:
+        cdef np.int64_t max_count = 0
+        cdef np.int64_t next_tag, local_tag, last_fof_tag = -1
+        fof_obj.num_p = 0
+        j = 0
+        # We're going to do one iteration to get the most frequent value.
+        for i in range(pind.shape[0]):
+            ind = pind[i]
+            local_tag = fof_tags[ind]
+            # Don't count the null group
+            if local_tag == -1: continue
+            if local_tag != last_fof_tag:
+                if j > max_count:
+                    max_count = j
+                last_fof_tag = local_tag
+                j = 1
+            else:
+                j += 1
+        if j > max_count:
+            max_count = j
+        #print >> sys.stderr, "Most frequent occurrance: %s" % max_count
+        fof_obj.particles = <particle*> malloc(max_count * sizeof(particle))
+        j = 0
+        cdef int counter = 0, ndone = 0
+        cdef np.ndarray[np.int64_t, ndim=1] pcounts 
+        pcounts = np.zeros(np.unique(fof_tags).size, dtype="int64")
+        cdef np.int64_t frac = <np.int64_t> (pcounts.shape[0] / 20.0)
+        free_halos()
+        for i in range(pind.shape[0]):
+            ind = pind[i]
+            local_tag = fof_tags[ind]
+            # Skip this one -- it means no group.
+            if local_tag == -1:
                 continue
-            if fof_tags[i] != last_fof_tag:
-                last_fof_tag = fof_tags[i]
-                if k > 16:
-                    print "Finding subs", k, i
-                    fof_obj.num_p = k
-                    find_subs(&fof_obj)
-                k = 0
-            particles[k].id = pid[i]
-
-            # fill in locations & velocities
-            for j in range(3):
-                particles[k].pos[j] = pos[i,j]
-                particles[k].pos[j+3] = vel[i,j]
-            k += 1
-        free(particles)
-
-
-
+            if i == pind.shape[0] - 1:
+                next_tag = local_tag + 1
+            else:
+                next_tag = fof_tags[pind[i+1]]
+            for k in range(3):
+                fof_obj.particles[j].pos[k] = pos[ind,k]
+                fof_obj.particles[j].pos[k+3] = vel[ind,k]
+            fof_obj.particles[j].id = j
+            fof_obj.num_p += 1
+            j += 1
+            # Now we check if we're the last one
+            if local_tag != next_tag:
+                pcounts[ndone] = fof_obj.num_p
+                counter += 1
+                ndone += 1
+                if counter == frac:
+                    print >> sys.stderr, "R*-ing % 5.1f%% done (%0.3f -> %0.3f)" % (
+                        (100.0 * ndone)/pcounts.size,
+                        fof_obj.particles[0].pos[2],
+                        halos[num_halos - 1].pos[2])
+                    counter = 0
+                global_particles = &fof_obj.particles[0]
+                find_subs(&fof_obj)
+                # Now we reset
+                fof_obj.num_p = j = 0
+        free(fof_obj.particles)
+        return pcounts

diff -r c95e902758bb3aa8d9df054b4ccdc42aea4db6dc -r 83131baac0e26fa102ecf878aa4a2a4905d5fb2f yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
@@ -204,7 +204,7 @@
     p[0] = <particle *> malloc(sizeof(particle) * local_parts)
 
     conv[0] = conv[1] = conv[2] = pf.length_unit.in_units("Mpccm/h")
-    conv[3] = conv[4] = conv[5] = 1e-5
+    conv[3] = conv[4] = conv[5] = pf.velocity_unit.in_units("km/s")
     left_edge[0] = pf.domain_left_edge[0]
     left_edge[1] = pf.domain_left_edge[1]
     left_edge[2] = pf.domain_left_edge[2]

diff -r c95e902758bb3aa8d9df054b4ccdc42aea4db6dc -r 83131baac0e26fa102ecf878aa4a2a4905d5fb2f yt/analysis_modules/halo_finding/rockstar/setup.py
--- a/yt/analysis_modules/halo_finding/rockstar/setup.py
+++ b/yt/analysis_modules/halo_finding/rockstar/setup.py
@@ -21,6 +21,8 @@
                          "yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx",
                          library_dirs=[rd],
                          libraries=["rockstar"],
+                         #define_macros = [("THREADSAFE", "__thread")],
+                         define_macros = [("THREADSAFE", "")],
                          include_dirs=[rd,
                                        os.path.join(rd, "io"),
                                        os.path.join(rd, "util")])
@@ -28,6 +30,8 @@
                          "yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx",
                          library_dirs=[rd],
                          libraries=["rockstar"],
+                         #define_macros = [("THREADSAFE", "__thread")],
+                         define_macros = [("THREADSAFE", "")],
                          include_dirs=[rd,
                                        os.path.join(rd, "io"),
                                        os.path.join(rd, "util")])

diff -r c95e902758bb3aa8d9df054b4ccdc42aea4db6dc -r 83131baac0e26fa102ecf878aa4a2a4905d5fb2f yt/analysis_modules/sunyaev_zeldovich/projection.py
--- a/yt/analysis_modules/sunyaev_zeldovich/projection.py
+++ b/yt/analysis_modules/sunyaev_zeldovich/projection.py
@@ -22,7 +22,6 @@
 from yt.fields.local_fields import add_field, derived_field
 from yt.data_objects.image_array import ImageArray
 from yt.funcs import fix_axis, mylog, iterable, get_pbar
-from yt.utilities.definitions import inv_axis_names
 from yt.visualization.volume_rendering.camera import off_axis_projection
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
      communication_system, parallel_root_only
@@ -134,7 +133,7 @@
         --------
         >>> szprj.on_axis("y", center="max", width=(1.0, "Mpc"), source=my_sphere)
         """
-        axis = fix_axis(axis)
+        axis = fix_axis(axis, self.pf)
 
         if center == "c":
             ctr = self.pf.domain_center

diff -r c95e902758bb3aa8d9df054b4ccdc42aea4db6dc -r 83131baac0e26fa102ecf878aa4a2a4905d5fb2f yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -24,6 +24,7 @@
     logfile = 'False',
     coloredlogs = 'False',
     suppressstreamlogging = 'False',
+    stdoutStreamLogging = 'False',
     loglevel = '20',
     inline = 'False',
     numthreads = '-1',
@@ -53,6 +54,7 @@
     answer_testing_bitwise = 'False',
     gold_standard_filename = 'gold311',
     local_standard_filename = 'local001',
+    answer_tests_url = 'http://answers.yt-project.org/%s_%s',
     sketchfab_api_key = 'None',
     thread_field_detection = 'False',
     ignore_invalid_unit_operation_errors = 'False'

diff -r c95e902758bb3aa8d9df054b4ccdc42aea4db6dc -r 83131baac0e26fa102ecf878aa4a2a4905d5fb2f yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -39,7 +39,6 @@
     march_cubes_grid, march_cubes_grid_flux
 from yt.utilities.data_point_utilities import CombineGrids,\
     DataCubeRefine, DataCubeReplace, FillRegion, FillBuffer
-from yt.utilities.definitions import axis_names, x_dict, y_dict
 from yt.utilities.minimal_representation import \
     MinimalProjectionData
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
@@ -180,25 +179,12 @@
         weight value before being integrated, and at the conclusion of the
         projection the resultant values will be divided by the projected
         `weight_field`.
-    max_level : int
-        If supplied, only cells at or below this level will be projected.
     center : array_like, optional
         The 'center' supplied to fields that use it.  Note that this does
         not have to have `coord` as one value.  Strictly optional.
     data_source : `yt.data_objects.api.AMRData`, optional
         If specified, this will be the data source used for selecting
         regions to project.
-    node_name: string, optional
-        The node in the .yt file to find or store this slice at.  Should
-        probably not be used.
-    field_cuts : list of strings, optional
-        If supplied, each of these strings will be evaluated to cut a
-        region of a grid out.  They can be of the form "grid['Temperature']
-        > 100" for instance.
-    preload_style : string
-        Either 'level', 'all', or None (default).  Defines how grids are
-        loaded -- either level by level, or all at once.  Only applicable
-        during parallel runs.
     serialize : bool, optional
         Whether we should store this projection in the .yt file or not.
     kwargs : dict of items
@@ -217,7 +203,7 @@
     _con_args = ('axis', 'field', 'weight_field')
     _container_fields = ('px', 'py', 'pdx', 'pdy', 'weight_field')
     def __init__(self, field, axis, weight_field = None,
-                 center = None, pf = None, data_source=None, 
+                 center = None, pf = None, data_source = None,
                  style = "integrate", field_parameters = None):
         YTSelectionContainer2D.__init__(self, axis, pf, field_parameters)
         self.proj_style = style
@@ -250,8 +236,8 @@
         self._mrep.upload()
 
     def _get_tree(self, nvals):
-        xax = x_dict[self.axis]
-        yax = y_dict[self.axis]
+        xax = self.pf.coordinates.x_axis[self.axis]
+        yax = self.pf.coordinates.y_axis[self.axis]
         xd = self.pf.domain_dimensions[xax]
         yd = self.pf.domain_dimensions[yax]
         bounds = (self.pf.domain_left_edge[xax],
@@ -290,18 +276,20 @@
         else:
             raise NotImplementedError
         # TODO: Add the combine operation
-        ox = self.pf.domain_left_edge[x_dict[self.axis]]
-        oy = self.pf.domain_left_edge[y_dict[self.axis]]
+        xax = self.pf.coordinates.x_axis[self.axis]
+        yax = self.pf.coordinates.y_axis[self.axis]
+        ox = self.pf.domain_left_edge[xax]
+        oy = self.pf.domain_left_edge[yax]
         px, py, pdx, pdy, nvals, nwvals = tree.get_all(False, merge_style)
         nvals = self.comm.mpi_allreduce(nvals, op=op)
         nwvals = self.comm.mpi_allreduce(nwvals, op=op)
-        np.multiply(px, self.pf.domain_width[x_dict[self.axis]], px)
+        np.multiply(px, self.pf.domain_width[xax], px)
         np.add(px, ox, px)
-        np.multiply(pdx, self.pf.domain_width[x_dict[self.axis]], pdx)
+        np.multiply(pdx, self.pf.domain_width[xax], pdx)
 
-        np.multiply(py, self.pf.domain_width[y_dict[self.axis]], py)
+        np.multiply(py, self.pf.domain_width[yax], py)
         np.add(py, oy, py)
-        np.multiply(pdy, self.pf.domain_width[y_dict[self.axis]], pdy)
+        np.multiply(pdy, self.pf.domain_width[yax], pdy)
         if self.weight_field is not None:
             np.divide(nvals, nwvals[:,None], nvals)
         # We now convert to half-widths and center-points
@@ -346,8 +334,10 @@
 
     def _initialize_chunk(self, chunk, tree):
         icoords = chunk.icoords
-        i1 = icoords[:,x_dict[self.axis]]
-        i2 = icoords[:,y_dict[self.axis]]
+        xax = self.pf.coordinates.x_axis[self.axis]
+        yax = self.pf.coordinates.y_axis[self.axis]
+        i1 = icoords[:,xax]
+        i2 = icoords[:,yax]
         ilevel = chunk.ires * self.pf.ires_factor
         tree.initialize_chunk(i1, i2, ilevel)
 
@@ -368,8 +358,10 @@
         else:
             w = np.ones(chunk.ires.size, dtype="float64")
         icoords = chunk.icoords
-        i1 = icoords[:,x_dict[self.axis]]
-        i2 = icoords[:,y_dict[self.axis]]
+        xax = self.pf.coordinates.x_axis[self.axis]
+        yax = self.pf.coordinates.y_axis[self.axis]
+        i1 = icoords[:,xax]
+        i2 = icoords[:,yax]
         ilevel = chunk.ires * self.pf.ires_factor
         tree.add_chunk_to_tree(i1, i2, ilevel, v, w)
 
@@ -595,7 +587,7 @@
     ----------
     left_edge : array_like
         The left edge of the region to be extracted
-    rigth_edge : array_like
+    right_edge : array_like
         The left edge of the region to be extracted
     dims : array_like
         Number of cells along each axis of resulting grid.

diff -r c95e902758bb3aa8d9df054b4ccdc42aea4db6dc -r 83131baac0e26fa102ecf878aa4a2a4905d5fb2f yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -28,7 +28,6 @@
 from yt.data_objects.particle_io import particle_handler_registry
 from yt.utilities.lib.marching_cubes import \
     march_cubes_grid, march_cubes_grid_flux
-from yt.utilities.definitions import  x_dict, y_dict
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     ParallelAnalysisInterface
 from yt.utilities.parameter_file_storage import \
@@ -727,9 +726,10 @@
     _spatial = False
     def __init__(self, axis, pf, field_parameters):
         ParallelAnalysisInterface.__init__(self)
-        self.axis = fix_axis(axis)
         super(YTSelectionContainer2D, self).__init__(
             pf, field_parameters)
+        # We need the pf, which will exist by now, for fix_axis.
+        self.axis = fix_axis(axis, self.pf)
         self.set_field_parameter("axis", axis)
 
     def _convert_field_name(self, field):
@@ -813,17 +813,17 @@
             center = self.pf.arr(center, 'code_length')
         if iterable(width):
             w, u = width
-            width = self.pf.arr(w, input_units = u)
+            width = self.pf.quan(w, input_units = u)
         if height is None:
             height = width
         elif iterable(height):
             h, u = height
-            height = self.pf.arr(w, input_units = u)
+            height = self.pf.quan(w, input_units = u)
         if not iterable(resolution):
             resolution = (resolution, resolution)
         from yt.visualization.fixed_resolution import FixedResolutionBuffer
-        xax = x_dict[self.axis]
-        yax = y_dict[self.axis]
+        xax = self.pf.coordinates.x_axis[self.axis]
+        yax = self.pf.coordinates.y_axis[self.axis]
         bounds = (center[xax] - width*0.5, center[xax] + width*0.5,
                   center[yax] - height*0.5, center[yax] + height*0.5)
         frb = FixedResolutionBuffer(self, bounds, resolution,

diff -r c95e902758bb3aa8d9df054b4ccdc42aea4db6dc -r 83131baac0e26fa102ecf878aa4a2a4905d5fb2f yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -19,13 +19,11 @@
 import numpy as np
 
 from yt.funcs import *
-from yt.utilities.definitions import x_dict, y_dict
 
 from yt.data_objects.data_containers import \
     YTFieldData, \
     YTDataContainer, \
     YTSelectionContainer
-from yt.utilities.definitions import x_dict, y_dict
 from yt.fields.field_exceptions import \
     NeedsGridType, \
     NeedsOriginalGrid, \
@@ -378,9 +376,9 @@
 
     def count_particles(self, selector, x, y, z):
         # We don't cache the selector results
-        count = selector.count_points(x,y,z)
+        count = selector.count_points(x,y,z, 0.0)
         return count
 
     def select_particles(self, selector, x, y, z):
-        mask = selector.select_points(x,y,z)
+        mask = selector.select_points(x,y,z, 0.0)
         return mask

diff -r c95e902758bb3aa8d9df054b4ccdc42aea4db6dc -r 83131baac0e26fa102ecf878aa4a2a4905d5fb2f yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -249,11 +249,11 @@
 
     def count_particles(self, selector, x, y, z):
         # We don't cache the selector results
-        count = selector.count_points(x,y,z)
+        count = selector.count_points(x,y,z, 0.0)
         return count
 
     def select_particles(self, selector, x, y, z):
-        mask = selector.select_points(x,y,z)
+        mask = selector.select_points(x,y,z, 0.0)
         return mask
 
 class ParticleOctreeSubset(OctreeSubset):

diff -r c95e902758bb3aa8d9df054b4ccdc42aea4db6dc -r 83131baac0e26fa102ecf878aa4a2a4905d5fb2f yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -157,7 +157,7 @@
     """
     def __init__(self, data_source, n_bins, bin_field,
                  lower_bound, upper_bound,
-                 log_space = True, 
+                 log_space = True,
                  end_collect=False):
         BinnedProfile.__init__(self, data_source)
         self.bin_field = bin_field
@@ -212,7 +212,7 @@
         # summing up all of the histograms and dividing by the
         # weights.  Accumulation likely doesn't work with weighted
         # average fields.
-        if accumulation: 
+        if accumulation:
             binned_field = np.add.accumulate(binned_field)
         return binned_field, weight_field, q_field, \
             used_field.astype("bool")
@@ -237,12 +237,12 @@
             bin_indices = np.clip(bin_indices, 0, self.n_bins - 1)
         else: #throw away outside values
             bin_indices -= 1
-          
+
         return (mi, bin_indices)
 
     def choose_bins(self, bin_style):
         # Depending on the bin_style, choose from bin edges 0...N either:
-        # both: 0...N, left: 0...N-1, right: 1...N 
+        # both: 0...N, left: 0...N-1, right: 1...N
         # center: N bins that are the average (both in linear or log
         # space) of each pair of left/right edges
         x = self.field_data[self.bin_field]
@@ -259,7 +259,7 @@
         return x
 
     def write_out(self, filename, format="%0.16e", bin_style='left'):
-        ''' 
+        '''
         Write out data in ascii file, using *format* and
         *bin_style* (left, right, center, both).
         '''
@@ -268,12 +268,12 @@
         fields.remove(self.bin_field)
         fid.write("\t".join(["#"] + [self.bin_field] + fields + ["\n"]))
 
-        field_data = np.array(self.choose_bins(bin_style)) 
+        field_data = np.array(self.choose_bins(bin_style))
         if bin_style is 'both':
             field_data = np.append([field_data], np.array([self.field_data[field] for field in fields]), axis=0)
-        else: 
+        else:
             field_data = np.append([field_data], np.array([self.field_data[field][:-1] for field in fields]), axis=0)
-        
+
         for line in range(field_data.shape[1]):
             field_data[:,line].tofile(fid, sep="\t", format=format)
             fid.write("\n")
@@ -296,10 +296,10 @@
             name = "%s-1d" % (self.bin_field)
         else:
             name = "%s-%s-1d" % (group_prefix, self.bin_field)
-            
-        if name in fid: 
+
+        if name in fid:
             mylog.info("Profile file is getting larger since you are attempting to overwrite a profile. You may want to repack")
-            del fid[name] 
+            del fid[name]
         group = fid.create_group(name)
         group.attrs["x-axis-%s" % self.bin_field] = self.choose_bins(bin_style)
         for field in fields:
@@ -421,7 +421,7 @@
 
     def choose_bins(self, bin_style):
         # Depending on the bin_style, choose from bin edges 0...N either:
-        # both: 0...N, left: 0...N-1, right: 1...N 
+        # both: 0...N, left: 0...N-1, right: 1...N
         # center: N bins that are the average (both in linear or log
         # space) of each pair of left/right edges
 
@@ -492,9 +492,9 @@
             name = "%s-%s-2d" % (self.y_bin_field, self.x_bin_field)
         else:
             name = "%s-%s-%s-2d" % (group_prefix, self.y_bin_field, self.x_bin_field)
-        if name in fid: 
+        if name in fid:
             mylog.info("Profile file is getting larger since you are attempting to overwrite a profile. You may want to repack")
-            del fid[name] 
+            del fid[name]
         group = fid.create_group(name)
 
         xbins, ybins = self.choose_bins(bin_style)
@@ -520,11 +520,11 @@
     or a straight sum of a field in a bin defined by two other
     fields.  In the case of a weighted average, we have: p_i =
     sum( w_i * v_i ) / sum(w_i)
-    
+
     We accept a *data_source*, which will be binned into
     *(x,y,z)_n_bins* by the field *(x,y,z)_bin_field* between the
     *(x,y,z)_lower_bound* and the *(x,y,z)_upper_bound*.  These bins may or
-    may not be equally divided in log-space as specified by *(x,y,z)_log*. 
+    may not be equally divided in log-space as specified by *(x,y,z)_log*.
     If *end_collect* is True, take all values outside the given bounds and
     store them in the 0 and *n_bins*-1 values.
     """
@@ -641,7 +641,7 @@
 
     def choose_bins(self, bin_style):
         # Depending on the bin_style, choose from bin edges 0...N either:
-        # both: 0...N, left: 0...N-1, right: 1...N 
+        # both: 0...N, left: 0...N-1, right: 1...N
         # center: N bins that are the average (both in linear or log
         # space) of each pair of left/right edges
 
@@ -688,14 +688,14 @@
         attributes.
         """
         fid = h5py.File(filename)
-        fields = [field for field in sorted(self.field_data.keys()) 
+        fields = [field for field in sorted(self.field_data.keys())
                   if (field != "UsedBins" and field != self.x_bin_field and field != self.y_bin_field and field != self.z_bin_field)]
         if group_prefix is None:
             name = "%s-%s-%s-3d" % (self.z_bin_field, self.y_bin_field, self.x_bin_field)
         else:
             name = "%s-%s-%s-%s-3d" % (group_prefix,self.z_bin_field, self.y_bin_field, self.x_bin_field)
 
-        if name in fid: 
+        if name in fid:
             mylog.info("Profile file is getting larger since you are attempting to overwrite a profile. You may want to repack")
             del fid[name]
         group = fid.create_group(name)
@@ -704,7 +704,7 @@
         group.attrs["x-axis-%s" % self.x_bin_field] = xbins
         group.attrs["y-axis-%s" % self.y_bin_field] = ybins
         group.attrs["z-axis-%s" % self.z_bin_field] = zbins
-        
+
         for field in fields:
             dset = group.create_dataset("%s" % field, data=self.field_data[field][:-1,:-1,:-1])
         fid.close()
@@ -826,7 +826,7 @@
             filter &= (data > mi)
             filter &= (data < ma)
         return filter, [data[filter] for data in bin_fields]
-        
+
     def _get_data(self, chunk, fields):
         # We are using chunks now, which will manage the field parameters and
         # the like.
@@ -843,7 +843,7 @@
         else:
             weight_data = np.ones(chunk.ires.size, dtype="float64")
         weight_data = weight_data[filter]
-        # So that we can pass these into 
+        # So that we can pass these into
         return arr, weight_data, bin_fields
 
     def __getitem__(self, field):
@@ -857,7 +857,7 @@
 
     def items(self):
         return [(k,self[k]) for k in self.field_data.keys()]
-    
+
     def __iter__(self):
         return sorted(self.items())
 
@@ -1058,14 +1058,14 @@
         self.z_bins.convert_to_units(new_unit)
         self.z = 0.5*(self.z_bins[1:]+self.z_bins[:-1])
 
-def create_profile(data_source, bin_fields, fields, n_bins = 64,
-                   extrema = None, logs = None,
-                   weight_field = "cell_mass",
-                   accumulation = False, fractional = False):
+def create_profile(data_source, bin_fields, fields, n_bins=64,
+                   extrema=None, logs=None, units=None,
+                   weight_field="cell_mass",
+                   accumulation=False, fractional=False):
     r"""
     Create a 1, 2, or 3D profile object.
 
-    The dimensionality of the profile object is chosen by the number of 
+    The dimensionality of the profile object is chosen by the number of
     fields given in the bin_fields argument.
 
     Parameters
@@ -1077,7 +1077,7 @@
     fields : list of strings
         The fields to be profiled.
     n : int or list of ints
-        The number of bins in each dimension.  If None, 64 bins for 
+        The number of bins in each dimension.  If None, 64 bins for
         each bin are used for each bin field.
         Default: 64.
     extrema : dict of min, max tuples
@@ -1091,24 +1091,24 @@
     units : dict of strings
         The units of the fields in the profiles, including the bin_fields.
     weight_field : str
-        The weight field for computing weighted average for the profile 
-        values.  If None, the profile values are sums of the data in 
+        The weight field for computing weighted average for the profile
+        values.  If None, the profile values are sums of the data in
         each bin.
     accumulation : bool or list of bools
-        If True, the profile values for a bin n are the cumulative sum of 
-        all the values from bin 0 to n.  If -True, the sum is reversed so 
-        that the value for bin n is the cumulative sum from bin N (total bins) 
-        to n.  If the profile is 2D or 3D, a list of values can be given to 
+        If True, the profile values for a bin n are the cumulative sum of
+        all the values from bin 0 to n.  If -True, the sum is reversed so
+        that the value for bin n is the cumulative sum from bin N (total bins)
+        to n.  If the profile is 2D or 3D, a list of values can be given to
         control the summation in each dimension independently.
         Default: False.
-    fractional : If True the profile values are divided by the sum of all 
-        the profile data such that the profile represents a probability 
+    fractional : If True the profile values are divided by the sum of all
+        the profile data such that the profile represents a probability
         distribution function.
 
     Examples
     --------
 
-    Create a 1d profile.  Access bin field from profile.x and field 
+    Create a 1d profile.  Access bin field from profile.x and field
     data from profile.field_data.
 
     >>> pf = load("DD0046/DD0046")
@@ -1118,7 +1118,7 @@
     ...                          fields=["temperature", "velocity_x"]))
     >>> print profile.x
     >>> print profile.field_data["temperature"]
-    
+
     """
     bin_fields = ensure_list(bin_fields)
     fields = ensure_list(fields)
@@ -1139,7 +1139,7 @@
     if not iterable(accumulation):
         accumulation = [accumulation] * len(bin_fields)
     if logs is None:
-        logs = [data_source.pf._get_field_info(f[0],f[1]).take_log 
+        logs = [data_source.pf._get_field_info(f[0],f[1]).take_log
                 for f in bin_fields]
     else:
         logs = [logs[bin_field[-1]] for bin_field in bin_fields]
@@ -1161,7 +1161,7 @@
             ex.append(field_ex)
     args = [data_source]
     for f, n, (mi, ma), l in zip(bin_fields, n_bins, ex, logs):
-        args += [f, n, mi, ma, l] 
+        args += [f, n, mi, ma, l]
     obj = cls(*args, weight_field = weight_field)
     setattr(obj, "accumulation", accumulation)
     setattr(obj, "fractional", fractional)
@@ -1181,6 +1181,16 @@
                 temp = temp[::-1]
             temp = np.rollaxis(temp, axis)
             obj.field_data[field] = temp
-            
+    if units is not None:
+        for field, unit in units.iteritems():
+            field = data_source._determine_fields(field)[0]
+            if field == obj.x_field:
+                obj.set_x_unit(unit)
+            elif field == getattr(obj, "y_field", None):
+                obj.set_y_unit(unit)
+            elif field == getattr(obj, "z_field", None):
+                obj.set_z_unit(unit)
+            else:
+                obj.set_field_unit(field, unit)
     return obj
 

diff -r c95e902758bb3aa8d9df054b4ccdc42aea4db6dc -r 83131baac0e26fa102ecf878aa4a2a4905d5fb2f yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -24,8 +24,6 @@
     YTSelectionContainer1D, YTSelectionContainer2D, YTSelectionContainer3D
 from yt.data_objects.derived_quantities import \
     DerivedQuantityCollection
-from yt.utilities.definitions import \
-    x_dict, y_dict, axis_names
 from yt.utilities.exceptions import YTSphereTooSmall
 from yt.utilities.linear_interpolators import TrilinearFieldInterpolator
 from yt.utilities.minimal_representation import \
@@ -55,9 +53,6 @@
     fields : list of strings, optional
         If you want the object to pre-retrieve a set of fields, supply them
         here.  This is not necessary.
-    kwargs : dict of items
-        Any additional values are passed as field parameters that can be
-        accessed by generated fields.
 
     Examples
     --------
@@ -72,12 +67,15 @@
     def __init__(self, axis, coords, pf=None, field_parameters=None):
         super(YTOrthoRayBase, self).__init__(pf, field_parameters)
         self.axis = axis
-        self.px_ax = x_dict[self.axis]
-        self.py_ax = y_dict[self.axis]
-        self.px_dx = 'd%s'%(axis_names[self.px_ax])
-        self.py_dx = 'd%s'%(axis_names[self.py_ax])
+        xax = self.pf.coordinates.x_axis[self.axis]
+        yax = self.pf.coordinates.y_axis[self.axis]
+        self.px_ax = xax
+        self.py_ax = yax
+        # Even though we may not be using x,y,z we use them here.
+        self.px_dx = 'd%s'%('xyz'[self.px_ax])
+        self.py_dx = 'd%s'%('xyz'[self.py_ax])
         self.px, self.py = coords
-        self.sort_by = axis_names[self.axis]
+        self.sort_by = 'xyz'[self.axis]
 
     @property
     def coords(self):
@@ -104,9 +102,6 @@
     fields : list of strings, optional
         If you want the object to pre-retrieve a set of fields, supply them
         here.  This is not necessary.
-    kwargs : dict of items
-        Any additional values are passed as field parameters that can be
-        accessed by generated fields.
 
     Examples
     --------
@@ -167,9 +162,6 @@
     field_parameters : dictionary
          A dictionary of field parameters than can be accessed by derived
          fields.
-    kwargs : dict of items
-        Any additional values are passed as field parameters that can be
-        accessed by generated fields.
 
     Examples
     --------
@@ -190,16 +182,18 @@
         self.coord = coord
 
     def _generate_container_field(self, field):
+        xax = self.pf.coordinates.x_axis[self.axis]
+        yax = self.pf.coordinates.y_axis[self.axis]
         if self._current_chunk is None:
             self.index._identify_base_chunk(self)
         if field == "px":
-            return self._current_chunk.fcoords[:,x_dict[self.axis]]
+            return self._current_chunk.fcoords[:,xax]
         elif field == "py":
-            return self._current_chunk.fcoords[:,y_dict[self.axis]]
+            return self._current_chunk.fcoords[:,yax]
         elif field == "pdx":
-            return self._current_chunk.fwidth[:,x_dict[self.axis]] * 0.5
+            return self._current_chunk.fwidth[:,xax] * 0.5
         elif field == "pdy":
-            return self._current_chunk.fwidth[:,y_dict[self.axis]] * 0.5
+            return self._current_chunk.fwidth[:,yax] * 0.5
         else:
             raise KeyError(field)
 
@@ -246,9 +240,6 @@
     node_name: string, optional
         The node in the .yt file to find or store this slice at.  Should
         probably not be used.
-    kwargs : dict of items
-        Any additional values are passed as field parameters that can be
-        accessed by generated fields.
 
     Notes
     -----

diff -r c95e902758bb3aa8d9df054b4ccdc42aea4db6dc -r 83131baac0e26fa102ecf878aa4a2a4905d5fb2f yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -563,6 +563,8 @@
         self.unit_registry.add("code_magnetic", 1.0, dimensions.magnetic_field)
         self.unit_registry.add("code_temperature", 1.0, dimensions.temperature)
         self.unit_registry.add("code_velocity", 1.0, dimensions.velocity)
+        self.unit_registry.add("code_metallicity", 1.0,
+                               dimensions.dimensionless)
 
     def set_units(self):
         """
@@ -617,12 +619,10 @@
                     self.length_unit / self.time_unit)
         self.unit_registry.modify("code_velocity", vel_unit)
         # domain_width does not yet exist
-        if self.domain_left_edge is None or self.domain_right_edge is None:
-            DW = np.zeros(3)
-        else:
+        if None not in (self.domain_left_edge, self.domain_right_edge):
             DW = self.arr(self.domain_right_edge - self.domain_left_edge, "code_length")
-        self.unit_registry.add("unitary", float(DW.max() * DW.units.cgs_value),
-                               DW.units.dimensions)
+            self.unit_registry.add("unitary", float(DW.max() * DW.units.cgs_value),
+                                   DW.units.dimensions)
 
     _arr = None
     @property

diff -r c95e902758bb3aa8d9df054b4ccdc42aea4db6dc -r 83131baac0e26fa102ecf878aa4a2a4905d5fb2f yt/data_objects/tests/test_chunking.py
--- a/yt/data_objects/tests/test_chunking.py
+++ b/yt/data_objects/tests/test_chunking.py
@@ -3,7 +3,7 @@
 def _get_dobjs(c):
     dobjs = [("sphere", ("center", (1.0, "unitary"))),
              ("sphere", ("center", (0.1, "unitary"))),
-             ("ortho_ray", (0, (c[x_dict[0]], c[y_dict[0]]))),
+             ("ortho_ray", (0, (c[1], c[2]))),
              ("slice", (0, c[0])),
              #("disk", ("center", [0.1, 0.3, 0.6],
              #           (0.2, 'unitary'), (0.1, 'unitary'))),

diff -r c95e902758bb3aa8d9df054b4ccdc42aea4db6dc -r 83131baac0e26fa102ecf878aa4a2a4905d5fb2f yt/data_objects/tests/test_cutting_plane.py
--- a/yt/data_objects/tests/test_cutting_plane.py
+++ b/yt/data_objects/tests/test_cutting_plane.py
@@ -1,11 +1,13 @@
-from yt.testing import *
+from yt.testing import assert_equal, fake_random_pf
 from yt.units.unit_object import Unit
 import os
 import tempfile
 
+
 def setup():
     from yt.config import ytcfg
-    ytcfg["yt","__withintesting"] = "True"
+    ytcfg["yt", "__withintesting"] = "True"
+
 
 def teardown_func(fns):
     for fn in fns:
@@ -14,15 +16,15 @@
         except OSError:
             pass
 
+
 def test_cutting_plane():
+    fns = []
     for nprocs in [8, 1]:
         # We want to test both 1 proc and 8 procs, to make sure that
         # parallelism isn't broken
-        pf = fake_random_pf(64, nprocs = nprocs)
-        dims = pf.domain_dimensions
-        center = [0.5,0.5,0.5]
-        normal = [1,1,1]
-        fns = []
+        pf = fake_random_pf(64, nprocs=nprocs)
+        center = [0.5, 0.5, 0.5]
+        normal = [1, 1, 1]
         cut = pf.cutting(normal, center)
         yield assert_equal, cut["ones"].sum(), cut["ones"].size
         yield assert_equal, cut["ones"].min(), 1.0
@@ -30,8 +32,9 @@
         pw = cut.to_pw()
         tmpfd, tmpname = tempfile.mkstemp(suffix='.png')
         os.close(tmpfd)
-        fns += pw.save(name=tmpname)
-        frb = cut.to_frb((1.0,'unitary'), 64)
+        pw.save(name=tmpname)
+        fns.append(tmpname)
+        frb = cut.to_frb((1.0, 'unitary'), 64)
         for cut_field in ['ones', 'density']:
             fi = pf._get_field_info("unknown", cut_field)
             yield assert_equal, frb[cut_field].info['data_source'], \
@@ -50,4 +53,4 @@
                 pf.length_unit.in_cgs()
             yield assert_equal, frb[cut_field].info['center'], \
                 cut.center
-        teardown_func(fns)
+    teardown_func(fns)

diff -r c95e902758bb3aa8d9df054b4ccdc42aea4db6dc -r 83131baac0e26fa102ecf878aa4a2a4905d5fb2f yt/data_objects/tests/test_projection.py
--- a/yt/data_objects/tests/test_projection.py
+++ b/yt/data_objects/tests/test_projection.py
@@ -1,11 +1,15 @@
-from yt.testing import *
+import numpy as np
+from yt.testing import \
+    fake_random_pf, assert_equal, assert_rel_equal
 from yt.units.unit_object import Unit
 import os
 import tempfile
 
+
 def setup():
     from yt.config import ytcfg
-    ytcfg["yt","__withintesting"] = "True"
+    ytcfg["yt", "__withintesting"] = "True"
+
 
 def teardown_func(fns):
     for fn in fns:
@@ -14,26 +18,29 @@
         except OSError:
             pass
 
+
 def test_projection():
+    fns = []
     for nprocs in [8, 1]:
         # We want to test both 1 proc and 8 procs, to make sure that
         # parallelism isn't broken
-        pf = fake_random_pf(64, nprocs = nprocs)
+        pf = fake_random_pf(64, nprocs=nprocs)
         dims = pf.domain_dimensions
         xn, yn, zn = pf.domain_dimensions
-        xi, yi, zi = pf.domain_left_edge.to_ndarray() + 1.0/(pf.domain_dimensions * 2)
-        xf, yf, zf = pf.domain_right_edge.to_ndarray() - 1.0/(pf.domain_dimensions * 2)
+        xi, yi, zi = pf.domain_left_edge.to_ndarray() + \
+            1.0 / (pf.domain_dimensions * 2)
+        xf, yf, zf = pf.domain_right_edge.to_ndarray() - \
+            1.0 / (pf.domain_dimensions * 2)
         dd = pf.h.all_data()
         rho_tot = dd.quantities["TotalQuantity"]("density")
         coords = np.mgrid[xi:xf:xn*1j, yi:yf:yn*1j, zi:zf:zn*1j]
         uc = [np.unique(c) for c in coords]
         # Some simple projection tests with single grids
         for ax, an in enumerate("xyz"):
-            xax = x_dict[ax]
-            yax = y_dict[ax]
+            xax = pf.coordinates.x_axis[ax]
+            yax = pf.coordinates.y_axis[ax]
             for wf in ["density", None]:
-                fns = []
-                proj = pf.proj(["ones", "density"], ax, weight_field = wf)
+                proj = pf.proj(["ones", "density"], ax, weight_field=wf)
                 yield assert_equal, proj["ones"].sum(), proj["ones"].size
                 yield assert_equal, proj["ones"].min(), 1.0
                 yield assert_equal, proj["ones"].max(), 1.0
@@ -44,16 +51,17 @@
                 pw = proj.to_pw()
                 tmpfd, tmpname = tempfile.mkstemp(suffix='.png')
                 os.close(tmpfd)
-                fns += pw.save(name=tmpname)
-                frb = proj.to_frb((1.0,'unitary'), 64)
+                pw.save(name=tmpname)
+                fns.append(tmpname)
+                frb = proj.to_frb((1.0, 'unitary'), 64)
                 for proj_field in ['ones', 'density']:
                     fi = pf._get_field_info(proj_field)
                     yield assert_equal, frb[proj_field].info['data_source'], \
-                            proj.__str__()
+                        proj.__str__()
                     yield assert_equal, frb[proj_field].info['axis'], \
-                            ax
+                        ax
                     yield assert_equal, frb[proj_field].info['field'], \
-                            proj_field
+                        proj_field
                     field_unit = Unit(fi.units)
                     if wf is not None:
                         yield assert_equal, frb[proj_field].units, \
@@ -64,22 +72,21 @@
                         else:
                             proj_unit = "cm"
                         if field_unit != '' and field_unit != Unit():
-                            proj_unit = "({0}) * {1}".format(field_unit, proj_unit)
+                            proj_unit = \
+                                "({0}) * {1}".format(field_unit, proj_unit)
                         yield assert_equal, frb[proj_field].units, \
                             Unit(proj_unit, registry=pf.unit_registry)
                     yield assert_equal, frb[proj_field].info['xlim'], \
-                            frb.bounds[:2]
+                        frb.bounds[:2]
                     yield assert_equal, frb[proj_field].info['ylim'], \
-                            frb.bounds[2:]
+                        frb.bounds[2:]
                     yield assert_equal, frb[proj_field].info['center'], \
-                            proj.center
+                        proj.center
                     yield assert_equal, frb[proj_field].info['weight_field'], \
-                            wf
-                teardown_func(fns)
+                        wf
             # wf == None
             yield assert_equal, wf, None
             v1 = proj["density"].sum()
             v2 = (dd["density"] * dd["d%s" % an]).sum()
             yield assert_rel_equal, v1, v2, 10
-
-
+    teardown_func(fns)

diff -r c95e902758bb3aa8d9df054b4ccdc42aea4db6dc -r 83131baac0e26fa102ecf878aa4a2a4905d5fb2f yt/data_objects/tests/test_slice.py
--- a/yt/data_objects/tests/test_slice.py
+++ b/yt/data_objects/tests/test_slice.py
@@ -4,25 +4,21 @@
 
 """
 
-#-----------------------------------------------------------------------------
+# ----------------------------------------------------------------------------
 # Copyright (c) 2013, yt Development Team.
 #
 # Distributed under the terms of the Modified BSD License.
 #
 # The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
+# ----------------------------------------------------------------------------
 import os
 import numpy as np
 import tempfile
-from nose.tools import raises
 from yt.testing import \
-    fake_random_pf, assert_equal, assert_array_equal, YTArray
-from yt.utilities.definitions import \
-    x_dict, y_dict
-from yt.utilities.exceptions import \
-    YTNoDataInObjectError
+    fake_random_pf, assert_equal
 from yt.units.unit_object import Unit
 
+
 def setup():
     from yt.config import ytcfg
     ytcfg["yt", "__withintesting"] = "True"
@@ -35,7 +31,9 @@
         except OSError:
             pass
 
+
 def test_slice():
+    fns = []
     for nprocs in [8, 1]:
         # We want to test both 1 proc and 8 procs, to make sure that
         # parallelism isn't broken
@@ -50,10 +48,9 @@
         slc_pos = 0.5
         # Some simple slice tests with single grids
         for ax, an in enumerate("xyz"):
-            xax = x_dict[ax]
-            yax = y_dict[ax]
+            xax = pf.coordinates.x_axis[ax]
+            yax = pf.coordinates.y_axis[ax]
             for wf in ["density", None]:
-                fns = []
                 slc = pf.slice(ax, slc_pos)
                 yield assert_equal, slc["ones"].sum(), slc["ones"].size
                 yield assert_equal, slc["ones"].min(), 1.0
@@ -65,7 +62,8 @@
                 pw = slc.to_pw()
                 tmpfd, tmpname = tempfile.mkstemp(suffix='.png')
                 os.close(tmpfd)
-                fns += pw.save(name=tmpname)
+                pw.save(name=tmpname)
+                fns.append(tmpname)
                 frb = slc.to_frb((1.0, 'unitary'), 64)
                 for slc_field in ['ones', 'density']:
                     fi = pf._get_field_info(slc_field)
@@ -85,9 +83,9 @@
                         slc.center
                     yield assert_equal, frb[slc_field].info['coord'], \
                         slc_pos
-                teardown_func(fns)
             # wf == None
             yield assert_equal, wf, None
+    teardown_func(fns)
 
 
 def test_slice_over_edges():

diff -r c95e902758bb3aa8d9df054b4ccdc42aea4db6dc -r 83131baac0e26fa102ecf878aa4a2a4905d5fb2f yt/data_objects/unstructured_mesh.py
--- a/yt/data_objects/unstructured_mesh.py
+++ b/yt/data_objects/unstructured_mesh.py
@@ -171,9 +171,9 @@
 
     def count_particles(self, selector, x, y, z):
         # We don't cache the selector results
-        count = selector.count_points(x,y,z)
+        count = selector.count_points(x,y,z, 0.0)
         return count
 
     def select_particles(self, selector, x, y, z):
-        mask = selector.select_points(x,y,z)
+        mask = selector.select_points(x,y,z, 0.0)
         return mask

diff -r c95e902758bb3aa8d9df054b4ccdc42aea4db6dc -r 83131baac0e26fa102ecf878aa4a2a4905d5fb2f yt/extern/_dummy_thread32.py
--- /dev/null
+++ b/yt/extern/_dummy_thread32.py
@@ -0,0 +1,155 @@
+"""Drop-in replacement for the thread module.
+
+Meant to be used as a brain-dead substitute so that threaded code does
+not need to be rewritten for when the thread module is not present.
+
+Suggested usage is::
+
+    try:
+        import _thread
+    except ImportError:
+        import _dummy_thread as _thread
+
+"""
+# Exports only things specified by thread documentation;
+# skipping obsolete synonyms allocate(), start_new(), exit_thread().
+__all__ = ['error', 'start_new_thread', 'exit', 'get_ident', 'allocate_lock',
+           'interrupt_main', 'LockType']
+
+# A dummy value
+TIMEOUT_MAX = 2**31
+
+# NOTE: this module can be imported early in the extension building process,
+# and so top level imports of other modules should be avoided.  Instead, all
+# imports are done when needed on a function-by-function basis.  Since threads
+# are disabled, the import lock should not be an issue anyway (??).
+
+class error(Exception):
+    """Dummy implementation of _thread.error."""
+
+    def __init__(self, *args):
+        self.args = args
+
+def start_new_thread(function, args, kwargs={}):
+    """Dummy implementation of _thread.start_new_thread().
+
+    Compatibility is maintained by making sure that ``args`` is a
+    tuple and ``kwargs`` is a dictionary.  If an exception is raised
+    and it is SystemExit (which can be done by _thread.exit()) it is
+    caught and nothing is done; all other exceptions are printed out
+    by using traceback.print_exc().
+
+    If the executed function calls interrupt_main the KeyboardInterrupt will be
+    raised when the function returns.
+
+    """
+    if type(args) != type(tuple()):
+        raise TypeError("2nd arg must be a tuple")
+    if type(kwargs) != type(dict()):
+        raise TypeError("3rd arg must be a dict")
+    global _main
+    _main = False
+    try:
+        function(*args, **kwargs)
+    except SystemExit:
+        pass
+    except:
+        import traceback
+        traceback.print_exc()
+    _main = True
+    global _interrupt
+    if _interrupt:
+        _interrupt = False
+        raise KeyboardInterrupt
+
+def exit():
+    """Dummy implementation of _thread.exit()."""
+    raise SystemExit
+
+def get_ident():
+    """Dummy implementation of _thread.get_ident().
+
+    Since this module should only be used when _threadmodule is not
+    available, it is safe to assume that the current process is the
+    only thread.  Thus a constant can be safely returned.
+    """
+    return -1
+
+def allocate_lock():
+    """Dummy implementation of _thread.allocate_lock()."""
+    return LockType()
+
+def stack_size(size=None):
+    """Dummy implementation of _thread.stack_size()."""
+    if size is not None:
+        raise error("setting thread stack size not supported")
+    return 0
+
+class LockType(object):
+    """Class implementing dummy implementation of _thread.LockType.
+
+    Compatibility is maintained by maintaining self.locked_status
+    which is a boolean that stores the state of the lock.  Pickling of
+    the lock, though, should not be done since if the _thread module is
+    then used with an unpickled ``lock()`` from here problems could
+    occur from this class not having atomic methods.
+
+    """
+
+    def __init__(self):
+        self.locked_status = False
+
+    def acquire(self, waitflag=None, timeout=-1):
+        """Dummy implementation of acquire().
+
+        For blocking calls, self.locked_status is automatically set to
+        True and returned appropriately based on value of
+        ``waitflag``.  If it is non-blocking, then the value is
+        actually checked and not set if it is already acquired.  This
+        is all done so that threading.Condition's assert statements
+        aren't triggered and throw a little fit.
+
+        """
+        if waitflag is None or waitflag:
+            self.locked_status = True
+            return True
+        else:
+            if not self.locked_status:
+                self.locked_status = True
+                return True
+            else:
+                if timeout > 0:
+                    import time
+                    time.sleep(timeout)
+                return False
+
+    __enter__ = acquire
+
+    def __exit__(self, typ, val, tb):
+        self.release()
+
+    def release(self):
+        """Release the dummy lock."""
+        # XXX Perhaps shouldn't actually bother to test?  Could lead
+        #     to problems for complex, threaded code.
+        if not self.locked_status:
+            raise error
+        self.locked_status = False
+        return True
+
+    def locked(self):
+        return self.locked_status
+
+# Used to signal that interrupt_main was called in a "thread"
+_interrupt = False
+# True when not executing in a "thread"
+_main = True
+
+def interrupt_main():
+    """Set _interrupt flag to True to have start_new_thread raise
+    KeyboardInterrupt upon exiting."""
+    if _main:
+        raise KeyboardInterrupt
+    else:
+        global _interrupt
+        _interrupt = True

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/0a584d53213b/
Changeset:   0a584d53213b
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-05-05 21:41:11
Summary:     Merging with FITS stuff
Affected #:  56 files

diff -r 83131baac0e26fa102ecf878aa4a2a4905d5fb2f -r 0a584d53213b1881bdcb722c0f3f52ebfea3ef7e doc/source/analyzing/analysis_modules/PPVCube.ipynb
--- /dev/null
+++ b/doc/source/analyzing/analysis_modules/PPVCube.ipynb
@@ -0,0 +1,237 @@
+{
+ "metadata": {
+  "name": "",
+  "signature": "sha256:3a720e0a18272564522f9fc23553431908d6f2b4f3e3e7dfe5b3e690e2e37677"
+ },
+ "nbformat": 3,
+ "nbformat_minor": 0,
+ "worksheets": [
+  {
+   "cells": [
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Detailed spectra of astrophysical objects sometimes allow for determinations of how much of the gas is moving with a certain velocity along the line of sight, thanks to Doppler shifting of spectral lines. This enables \"data cubes\" to be created in RA, Dec, and line-of-sight velocity space. In yt, we can use the `PPVCube` analysis module to project fields along a given line of sight traveling at different line-of-sight velocities, to \"mock-up\" what would be seen in observations."
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "To demonstrate this functionality, we'll create a simple unigrid dataset from scratch of a rotating disk galaxy. We create a thin disk in the x-y midplane of the domain of three cells in height in either direction, and a radius of 10 kpc. The density and azimuthal velocity profiles of the disk as a function of radius will be given by the following functions:"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Density: $\\rho(r) \\propto r^{\\alpha}$"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Velocity: $v_{\\theta}(r) \\propto \\frac{r}{1+(r/r_0)^{\\beta}}$"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "where for simplicity we won't worry about the normalizations of these profiles. "
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "%matplotlib inline\n",
+      "from yt.mods import *\n",
+      "from yt.analysis_modules.api import PPVCube"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "data = {}\n",
+      "nx,ny,nz = (256,256,256)\n",
+      "R = 10. # kpc\n",
+      "r_0 = 3. # kpc\n",
+      "beta = 1.4\n",
+      "alpha = -1.\n",
+      "x, y = np.mgrid[-R:R:nx*1j,-R:R:ny*1j] # cartesian coordinates\n",
+      "r = np.sqrt(x*x+y*y) # polar coordinates\n",
+      "theta = np.arctan2(y, x) # polar coordinates\n",
+      "dens = np.zeros((nx,ny,nz))\n",
+      "dens[:,:,nz/2-3:nz/2+3] = (r**alpha).reshape(nx,ny,1) # the density profile of the disk\n",
+      "vel_theta = r/(1.+(r/r_0)**beta) # the azimuthal velocity profile of the disk\n",
+      "velx = np.zeros((nx,ny,nz))\n",
+      "vely = np.zeros((nx,ny,nz))\n",
+      "velx[:,:,nz/2-3:nz/2+3] = (-vel_theta*np.sin(theta)).reshape(nx,ny,1) # convert polar to cartesian\n",
+      "vely[:,:,nz/2-3:nz/2+3] = (vel_theta*np.cos(theta)).reshape(nx,ny,1) # convert polar to cartesian\n",
+      "data[\"density\"] = (dens,\"g/cm**3\")\n",
+      "data[\"velocity_x\"] = (velx, \"km/s\")\n",
+      "data[\"velocity_y\"] = (vely, \"km/s\")\n",
+      "data[\"velocity_z\"] = (np.zeros((nx,ny,nz)), \"km/s\") # zero velocity in the z-direction\n",
+      "bbox = np.array([[-0.5,0.5],[-0.5,0.5],[-0.5,0.5]])\n",
+      "ds = load_uniform_grid(data, (nx,ny,nz), length_unit=(2*R,\"kpc\"), nprocs=1, bbox=bbox)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "To get a sense of what the data looks like, we'll take a slice through the middle of the disk:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "slc = SlicePlot(ds, \"z\", [\"density\",\"velocity_x\",\"velocity_y\",\"velocity_magnitude\"])"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "slc.set_log(\"velocity_x\", False)\n",
+      "slc.set_log(\"velocity_y\", False)\n",
+      "slc.set_log(\"velocity_magnitude\", False)\n",
+      "slc.set_unit(\"velocity_magnitude\", \"km/s\")\n",
+      "slc.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Which shows a rotating disk with a specific density and velocity profile. Now, suppose we wanted to look at this disk galaxy from a certain orientation angle, and simulate a 3D FITS data cube where we can see the gas that is emitting at different velocities along the line of sight. We can do this using the `PPVCube` class. First, let's assume we rotate our viewing angle 60 degrees from face-on, from along the z-axis into the y-axis. We'll create a normal vector:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "i = 60.*np.pi/180.\n",
+      "L = [0.0,np.sin(i),np.sin(i)]"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Next, we need to specify a field that will serve as the \"intensity\" of the emission that we see. For simplicity, we'll simply choose the gas density as this field, though it could be any field (including derived fields) in principle. We also need to specify the dimensions of the data cube, and optionally we may choose the bounds in line-of-sight velocity that the data will be binned into. Otherwise, the bounds will simply be set to the negative and positive of the largest speed in the dataset."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "cube = PPVCube(ds, L, \"density\", dims=(200,100,50), velocity_bounds=(-0.5,0.5,\"km/s\"))"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Following this, we can now write this cube to a FITS file:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "cube.write_fits(\"cube.fits\", clobber=True, length_unit=(5.0,\"deg\"))"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Now, we'll look at the FITS dataset in yt and look at different slices along the velocity axis, which is the \"z\" axis:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "ds = load(\"cube.fits\")\n",
+      "slc = SlicePlot(ds, \"z\", [\"density\"], center=\"c\") # sliced at the center of the domain\n",
+      "slc.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "# To figure out what the domain center and width is in pixel (code length) units:\n",
+      "print ds.domain_center\n",
+      "print ds.domain_width"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "slc = SlicePlot(ds, \"z\", [\"density\"], center=[100.5,50.5,-250.0]) # \"z\" slice is in m/s\n",
+      "slc.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "slc = SlicePlot(ds, \"z\", [\"density\"], center=[100.5,50.5,300.0])\n",
+      "slc.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    }
+   ],
+   "metadata": {}
+  }
+ ]
+}
\ No newline at end of file

diff -r 83131baac0e26fa102ecf878aa4a2a4905d5fb2f -r 0a584d53213b1881bdcb722c0f3f52ebfea3ef7e doc/source/analyzing/analysis_modules/Particle_Trajectories.ipynb
--- a/doc/source/analyzing/analysis_modules/Particle_Trajectories.ipynb
+++ b/doc/source/analyzing/analysis_modules/Particle_Trajectories.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:874e85c86cd80a516bb61775b566cd46766c60bdf8f865336bf9dd3505f83821"
+  "signature": "sha256:e4b5ea69687eb79452c16385b3a6f795b4572518dfa7f9d8a8125bd75b5fea85"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -21,9 +21,11 @@
      "input": [
       "%matplotlib inline\n",
       "from yt.mods import *\n",
-      "from yt.analysis_modules.api import ParticleTrajectories\n",
+      "import glob\n",
+      "from yt.analysis_modules.particle_trajectories.api import ParticleTrajectories\n",
       "from yt.config import ytcfg\n",
-      "path = ytcfg.get(\"yt\", \"test_data_dir\")"
+      "path = ytcfg.get(\"yt\", \"test_data_dir\")\n",
+      "import matplotlib.pyplot as plt"
      ],
      "language": "python",
      "metadata": {},
@@ -75,8 +77,8 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "pf = load(my_fns[0])\n",
-      "dd = pf.h.all_data()\n",
+      "ds = load(my_fns[0])\n",
+      "dd = ds.all_data()\n",
       "indices = dd[\"particle_index\"].astype(\"int\")\n",
       "print indices"
      ],
@@ -130,8 +132,8 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "pylab.plot(trajs[\"particle_position_x\"][0], trajs[\"particle_position_y\"][0])\n",
-      "pylab.plot(trajs[\"particle_position_x\"][1], trajs[\"particle_position_y\"][1])"
+      "plt.plot(trajs[\"particle_position_x\"][0].ndarray_view(), trajs[\"particle_position_y\"][0].ndarray_view())\n",
+      "plt.plot(trajs[\"particle_position_x\"][1].ndarray_view(), trajs[\"particle_position_y\"][1].ndarray_view())"
      ],
      "language": "python",
      "metadata": {},
@@ -148,8 +150,8 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "pylab.plot(trajs[\"particle_velocity_x\"][0], trajs[\"particle_velocity_y\"][0])\n",
-      "pylab.plot(trajs[\"particle_velocity_x\"][1], trajs[\"particle_velocity_y\"][1])"
+      "plt.plot(trajs[\"particle_velocity_x\"][0].ndarray_view(), trajs[\"particle_velocity_y\"][0].ndarray_view())\n",
+      "plt.plot(trajs[\"particle_velocity_x\"][1].ndarray_view(), trajs[\"particle_velocity_y\"][1].ndarray_view())"
      ],
      "language": "python",
      "metadata": {},
@@ -166,8 +168,8 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "pylab.plot(trajs[\"particle_time\"], trajs[\"particle_velocity_x\"][1])\n",
-      "pylab.plot(trajs[\"particle_time\"], trajs[\"particle_velocity_y\"][1])"
+      "plt.plot(trajs[\"particle_time\"].ndarray_view(), trajs[\"particle_velocity_x\"][1].ndarray_view())\n",
+      "plt.plot(trajs[\"particle_time\"].ndarray_view(), trajs[\"particle_velocity_y\"][1].ndarray_view())"
      ],
      "language": "python",
      "metadata": {},
@@ -185,8 +187,8 @@
      "collapsed": false,
      "input": [
       "particle1 = trajs.trajectory_from_index(1)\n",
-      "pylab.plot(particle1[\"particle_time\"], particle1[\"particle_position_x\"])\n",
-      "pylab.plot(particle1[\"particle_time\"], particle1[\"particle_position_y\"])"
+      "plt.plot(particle1[\"particle_time\"].ndarray_view(), particle1[\"particle_position_x\"].ndarray_view())\n",
+      "plt.plot(particle1[\"particle_time\"].ndarray_view(), particle1[\"particle_position_y\"].ndarray_view())"
      ],
      "language": "python",
      "metadata": {},
@@ -203,8 +205,8 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "pf = load(\"enzo_tiny_cosmology/DD0046/DD0046\")\n",
-      "slc = SlicePlot(pf, \"x\", [\"Density\",\"Dark_Matter_Density\"], center=\"max\", width=(3.0, \"mpc\"))\n",
+      "ds = load(\"enzo_tiny_cosmology/DD0046/DD0046\")\n",
+      "slc = SlicePlot(ds, \"x\", [\"density\",\"dark_matter_density\"], center=\"max\", width=(3.0, \"Mpc\"))\n",
       "slc.show()"
      ],
      "language": "python",
@@ -222,7 +224,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "sp = pf.sphere(\"max\", (0.5, \"mpc\"))\n",
+      "sp = ds.sphere(\"max\", (0.5, \"Mpc\"))\n",
       "indices = sp[\"particle_index\"][sp[\"particle_type\"] == 1]"
      ],
      "language": "python",
@@ -240,7 +242,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "my_fns = glob.glob(path+\"/enzo_tiny_cosmology/DD*/*.index\")\n",
+      "my_fns = glob.glob(path+\"/enzo_tiny_cosmology/DD*/*.hierarchy\")\n",
       "my_fns.sort()\n",
       "trajs = ParticleTrajectories(my_fns, indices)"
      ],
@@ -263,9 +265,12 @@
       "from mpl_toolkits.mplot3d import Axes3D\n",
       "fig = plt.figure(figsize=(8.0, 8.0))\n",
       "ax = fig.add_subplot(111, projection='3d')\n",
-      "ax.plot(trajs[\"particle_position_x\"][100], trajs[\"particle_position_z\"][100], trajs[\"particle_position_z\"][100])\n",
-      "ax.plot(trajs[\"particle_position_x\"][8], trajs[\"particle_position_z\"][8], trajs[\"particle_position_z\"][8])\n",
-      "ax.plot(trajs[\"particle_position_x\"][25], trajs[\"particle_position_z\"][25], trajs[\"particle_position_z\"][25])"
+      "ax.plot(trajs[\"particle_position_x\"][100].ndarray_view(), trajs[\"particle_position_z\"][100].ndarray_view(), \n",
+      "        trajs[\"particle_position_z\"][100].ndarray_view())\n",
+      "ax.plot(trajs[\"particle_position_x\"][8].ndarray_view(), trajs[\"particle_position_z\"][8].ndarray_view(), \n",
+      "        trajs[\"particle_position_z\"][8].ndarray_view())\n",
+      "ax.plot(trajs[\"particle_position_x\"][25].ndarray_view(), trajs[\"particle_position_z\"][25].ndarray_view(), \n",
+      "        trajs[\"particle_position_z\"][25].ndarray_view())"
      ],
      "language": "python",
      "metadata": {},
@@ -282,9 +287,9 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "pylab.plot(trajs[\"particle_time\"], trajs[\"particle_position_x\"][100])\n",
-      "pylab.plot(trajs[\"particle_time\"], trajs[\"particle_position_x\"][8])\n",
-      "pylab.plot(trajs[\"particle_time\"], trajs[\"particle_position_x\"][25])"
+      "plt.plot(trajs[\"particle_time\"].ndarray_view(), trajs[\"particle_position_x\"][100].ndarray_view())\n",
+      "plt.plot(trajs[\"particle_time\"].ndarray_view(), trajs[\"particle_position_x\"][8].ndarray_view())\n",
+      "plt.plot(trajs[\"particle_time\"].ndarray_view(), trajs[\"particle_position_x\"][25].ndarray_view())"
      ],
      "language": "python",
      "metadata": {},
@@ -301,7 +306,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "trajs.add_fields([\"Density\"])"
+      "trajs.add_fields([\"density\"])"
      ],
      "language": "python",
      "metadata": {},
@@ -311,17 +316,17 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
-      "We also could have included `\"Density\"` in our original field list. Now, plot up the gas density for each particle as a function of time:"
+      "We also could have included `\"density\"` in our original field list. Now, plot up the gas density for each particle as a function of time:"
      ]
     },
     {
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "pylab.plot(trajs[\"particle_time\"], trajs[\"Density\"][100])\n",
-      "pylab.plot(trajs[\"particle_time\"], trajs[\"Density\"][8])\n",
-      "pylab.plot(trajs[\"particle_time\"], trajs[\"Density\"][25])\n",
-      "pylab.yscale(\"log\")"
+      "plt.plot(trajs[\"particle_time\"].ndarray_view(), trajs[\"density\"][100].ndarray_view())\n",
+      "plt.plot(trajs[\"particle_time\"].ndarray_view(), trajs[\"density\"][8].ndarray_view())\n",
+      "plt.plot(trajs[\"particle_time\"].ndarray_view(), trajs[\"density\"][25].ndarray_view())\n",
+      "plt.yscale(\"log\")"
      ],
      "language": "python",
      "metadata": {},
@@ -338,29 +343,12 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "trajs.write_out(\"halo_trajectories.txt\")\n",
-      "trajs.write_out_h5(\"halo_trajectories.h5\")"
+      "trajs.write_out(\"halo_trajectories\") # This will write a separate file for each trajectory\n",
+      "trajs.write_out_h5(\"halo_trajectories.h5\") # This will write all trajectories to a single file"
      ],
      "language": "python",
      "metadata": {},
      "outputs": []
-    },
-    {
-     "cell_type": "heading",
-     "level": 2,
-     "metadata": {},
-     "source": [
-      "Important Caveats"
-     ]
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "* Parallelization is not yet implemented.\n",
-      "* For large datasets, constructing trajectories can be very slow. We are working on optimizing the algorithm for a future release. \n",
-      "* At the moment, trajectories are limited for particles that exist in every dataset. Therefore, for codes like FLASH that allow for particles to exit the domain (and hence the simulation) for certain types of boundary conditions, you need to insure that the particles you wish to examine exist in all datasets in the time series from the beginning to the end. If this is not the case, `ParticleTrajectories` will throw an error. This is a limitation we hope to relax in a future release. "
-     ]
     }
    ],
    "metadata": {}

diff -r 83131baac0e26fa102ecf878aa4a2a4905d5fb2f -r 0a584d53213b1881bdcb722c0f3f52ebfea3ef7e doc/source/analyzing/analysis_modules/ppv_cubes.rst
--- /dev/null
+++ b/doc/source/analyzing/analysis_modules/ppv_cubes.rst
@@ -0,0 +1,4 @@
+Creating Position-Position-Velocity FITS Cubes
+-------------------------------------------------
+
+.. notebook:: PPVCube.ipynb

diff -r 83131baac0e26fa102ecf878aa4a2a4905d5fb2f -r 0a584d53213b1881bdcb722c0f3f52ebfea3ef7e doc/source/analyzing/analysis_modules/synthetic_observation.rst
--- a/doc/source/analyzing/analysis_modules/synthetic_observation.rst
+++ b/doc/source/analyzing/analysis_modules/synthetic_observation.rst
@@ -18,3 +18,4 @@
    sunyaev_zeldovich
    radial_column_density
    photon_simulator
+   ppv_cubes

diff -r 83131baac0e26fa102ecf878aa4a2a4905d5fb2f -r 0a584d53213b1881bdcb722c0f3f52ebfea3ef7e doc/source/cookbook/fits_radio_cubes.ipynb
--- /dev/null
+++ b/doc/source/cookbook/fits_radio_cubes.ipynb
@@ -0,0 +1,306 @@
+{
+ "metadata": {
+  "name": "",
+  "signature": "sha256:ded7d47bf5a74c9ea5431a37b6d371a631909d2b95214cd8053617762f62e2e4"
+ },
+ "nbformat": 3,
+ "nbformat_minor": 0,
+ "worksheets": [
+  {
+   "cells": [
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "%matplotlib inline\n",
+      "import yt"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "This notebook demonstrates some of the capabilties of `yt` on some FITS \"position-position-velocity\" cubes of radio data. "
+     ]
+    },
+    {
+     "cell_type": "heading",
+     "level": 2,
+     "metadata": {},
+     "source": [
+      "M33 VLA Image"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "The dataset `\"m33_hi.fits\"` has `NaN`s in it, so we'll mask them out by setting `nan_mask` = 0:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "ds = yt.load(\"radio_fits/m33_hi.fits\", nan_mask=0.0)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "First, we'll take a slice of the data along the z-axis, which is the velocity axis of the FITS cube:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "slc = yt.SlicePlot(ds, \"z\", [\"intensity\"], origin=\"native\")\n",
+      "slc.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Note that the x and y axes are in units of \"code length\", which in the case of FITS datasets are equal to the width of one pixel. Currently, the `yt` plotting routines don't understand datasets with non-length units on the axes (such as RA, Dec, velocity, etc.), so it defaults to the pixel scale. This will be changed in a future release. When making plots of FITS data, to see the image coordinates as they are in the file, it is helpful to set the keyword `origin = \"native\"`."
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "We can take slices of this dataset at a few different values along the \"z\" axis (corresponding to the velocity), so let's try a few. First, we'll check what the value along the velocity axis at the domain center is, as well as the range of possible values. This is the third value of each array. "
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print ds.domain_left_edge[2], ds.domain_center[2], ds.domain_right_edge[2]"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Now, we'll choose a few values for the velocity within this range:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "new_center = ds.domain_center \n",
+      "new_center[2] = -250000.\n",
+      "slc = yt.SlicePlot(ds, \"z\", [\"intensity\"], center=new_center, origin=\"native\")\n",
+      "slc.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "new_center = ds.domain_center \n",
+      "new_center[2] = -100000.\n",
+      "slc = yt.SlicePlot(ds, \"z\", [\"intensity\"], center=new_center, origin=\"native\")\n",
+      "slc.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "new_center = ds.domain_center \n",
+      "new_center[2] = -150000.\n",
+      "slc = yt.SlicePlot(ds, \"z\", [\"intensity\"], center=new_center, origin=\"native\")\n",
+      "slc.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "These slices demonstrate the intensity of the radio emission at different line-of-sight velocities. "
+     ]
+    },
+    {
+     "cell_type": "heading",
+     "level": 2,
+     "metadata": {},
+     "source": [
+      "$^{13}$CO GRS Data"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "This next example uses one of the cubes from the [Boston University Galactic Ring Survey](http://www.bu.edu/galacticring/new_index.htm). "
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "ds = yt.load(\"radio_fits/grs-50-cube.fits\", nan_mask=0.0)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "We can use the `quantities` methods to determine derived quantities of the dataset. For example, we could find the maximum and minimum temperature:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "dd = ds.all_data() # A region containing the entire dataset\n",
+      "extrema = dd.quantities.extrema(\"temperature\")\n",
+      "print extrema"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "We can compute the average temperature along the \"velocity\" axis for all positions by making a `ProjectionPlot`:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "prj = yt.ProjectionPlot(ds, \"z\", [\"temperature\"], origin=\"native\", \n",
+      "                        weight_field=\"ones\") # \"ones\" weights each cell by 1\n",
+      "prj.set_log(\"temperature\", True)\n",
+      "prj.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "We can also make a histogram of the temperature field of this region:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "pplot = yt.ProfilePlot(dd, \"temperature\", [\"ones\"], weight_field=None, n_bins=128)\n",
+      "pplot.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "We can see from this histogram and our calculation of the dataset's extrema that there is a lot of noise. Suppose we wanted to make a projection, but instead make it only of the cells which had a positive temperature value. We can do this by doing a \"field cut\" on the data:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "fc = dd.cut_region([\"obj['temperature'] > 0\"])"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Now let's check the extents of this region:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print fc.quantities.extrema(\"temperature\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Looks like we were successful in filtering out the negative temperatures. To compute the average temperature of this new region:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "fc.quantities.weighted_average_quantity(\"temperature\", \"ones\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Now, let's make a projection of the dataset, using the field cut `fc` as a `data_source`:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "prj = yt.ProjectionPlot(ds, \"z\", [\"temperature\"], data_source=fc, origin=\"native\", \n",
+      "                        weight_field=\"ones\") # \"ones\" weights each cell by 1\n",
+      "prj.set_log(\"temperature\", True)\n",
+      "prj.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    }
+   ],
+   "metadata": {}
+  }
+ ]
+}
\ No newline at end of file

diff -r 83131baac0e26fa102ecf878aa4a2a4905d5fb2f -r 0a584d53213b1881bdcb722c0f3f52ebfea3ef7e doc/source/cookbook/fits_radio_cubes.rst
--- /dev/null
+++ b/doc/source/cookbook/fits_radio_cubes.rst
@@ -0,0 +1,6 @@
+.. _radio_cubes:
+
+FITS Radio Cubes in yt
+----------------------
+
+.. notebook:: fits_radio_cubes.ipynb
\ No newline at end of file

diff -r 83131baac0e26fa102ecf878aa4a2a4905d5fb2f -r 0a584d53213b1881bdcb722c0f3f52ebfea3ef7e doc/source/cookbook/fits_xray_images.ipynb
--- /dev/null
+++ b/doc/source/cookbook/fits_xray_images.ipynb
@@ -0,0 +1,259 @@
+{
+ "metadata": {
+  "name": "",
+  "signature": "sha256:564cb1986609d8bb76397a18219974504231b260f912bed483b87c1f896e92ac"
+ },
+ "nbformat": 3,
+ "nbformat_minor": 0,
+ "worksheets": [
+  {
+   "cells": [
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "%matplotlib inline\n",
+      "import yt\n",
+      "import numpy as np"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "This notebook shows how to use `yt` to make plots and examine FITS X-ray images and events files. "
+     ]
+    },
+    {
+     "cell_type": "heading",
+     "level": 2,
+     "metadata": {},
+     "source": [
+      "Sloshing, Shocks, and Bubbles in Abell 2052"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "This example uses data provided by [Scott Randall](http://hea-www.cfa.harvard.edu/~srandall/), presented originally in [Blanton, E.L., Randall, S.W., Clarke, T.E., et al. 2011, ApJ, 737, 99](http://adsabs.harvard.edu/cgi-bin/bib_query?2011ApJ...737...99B). They consist of two files, a \"flux map\" in counts/s/pixel between 0.3 and 2 keV, and a spectroscopic temperature map in keV. "
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "ds = yt.load(\"xray_fits/A2052_merged_0.3-2_match-core_tmap_bgecorr.fits\", \n",
+      "             auxiliary_files=[\"xray_fits/A2052_core_tmap_b1_m2000_.fits\"])"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Since the flux and projected temperature images are in two different files, we had to use one of them (in this case the \"flux\" file) as a master file, and pass in the \"temperature\" file with the `auxiliary_files` keyword to `load`. "
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Next, let's derive some new fields for the number of counts, the \"pseudo-pressure\", and the \"pseudo-entropy\":"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "ds.index\n",
+      "def _counts(field, data):\n",
+      "    exposure_time = data.get_field_parameter(\"exposure_time\")\n",
+      "    return data[\"flux\"]*data[\"pixel\"]*exposure_time\n",
+      "ds.field_info.add_field(name=\"counts\", function=_counts, units=\"counts\")\n",
+      "\n",
+      "def _pp(field, data):\n",
+      "    return np.sqrt(data[\"counts\"])*data[\"projected_temperature\"]\n",
+      "ds.field_info.add_field(name=\"pseudo_pressure\", function=_pp, units=\"sqrt(counts)*keV\")\n",
+      "\n",
+      "def _pe(field, data):\n",
+      "    return data[\"projected_temperature\"]*data[\"counts\"]**(-1./3.)\n",
+      "ds.field_info.add_field(name=\"pseudo_entropy\", function=_pe, units=\"keV*(counts)**(-1/3)\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Here, we're deriving a \"counts\" field from the \"flux\" field by passing it a `field_parameter` for the exposure time of the time and multiplying by the pixel scale. Second, we use the fact that the surface brightness is strongly dependent on density ($S_X \\propto \\rho^2$) to use the counts in each pixel as a \"stand-in\". Next, we'll grab the exposure time from the primary FITS header of the flux file and create a `YTQuantity` from it, to be used as a `field_parameter`:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "exposure_time = ds.quan(ds.primary_header[\"exposure\"], \"s\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Now, we can make the `SlicePlot` object of the fields we want, passing in the `exposure_time` as a `field_parameter`. We'll also set the width of the image to 250 pixels."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "slc = yt.SlicePlot(ds, \"z\", \n",
+      "                   [\"flux\",\"projected_temperature\",\"pseudo_pressure\",\"pseudo_entropy\"], \n",
+      "                   origin=\"native\", field_parameters={\"exposure_time\":exposure_time})\n",
+      "slc.set_log(\"flux\",True)\n",
+      "slc.set_log(\"pseudo_pressure\",False)\n",
+      "slc.set_log(\"pseudo_entropy\",False)\n",
+      "slc.set_width(250.)\n",
+      "slc.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "heading",
+     "level": 2,
+     "metadata": {},
+     "source": [
+      "The Bullet Cluster"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "This example uses an events table file from a ~100 ks exposure of the \"Bullet Cluster\" from the [Chandra Data Archive](http://cxc.harvard.edu/cda/). In this case, the individual photon events are treated as particle fields in `yt`. However, you can make images of the object in different energy bands using the `setup_counts_fields` function. "
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "from yt.frontends.fits.api import setup_counts_fields"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "`setup_counts_fields` will take a list of energy bounds (emin, emax) in keV and create a new field from each where the photons in that energy range will be deposited onto the image grid. "
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "ebounds = [(0.1,2.0),(2.0,5.0)]\n",
+      "setup_counts_fields(ebounds)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "`load` will handle the events file as FITS image files, and will set up a grid using the WCS information in the file. Optionally, the events may be reblocked to a new resolution. by setting the `\"reblock\"` parameter in the `parameters` dictionary in `load`. `\"reblock\"` must be a power of 2. "
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "ds2 = yt.load(\"xray_fits/acisf05356N003_evt2.fits.gz\", parameters={\"reblock\":2})"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "The \"x\", \"y\", \"energy\", and \"time\" fields in the events table are loaded as particle fields. Each one has a name given by \"event\\_\" plus the name of the field:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "dd = ds2.all_data()\n",
+      "print dd[\"event_x\"]\n",
+      "print dd[\"event_y\"]"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Now, we'll make a plot of the two counts fields we made, and pan and zoom to the bullet:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "slc = yt.SlicePlot(ds2, \"z\", [\"counts_0.1-2.0\",\"counts_2.0-5.0\"], origin=\"native\")\n",
+      "slc.pan((100.,100.))\n",
+      "slc.set_width(500.)\n",
+      "slc.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "The counts fields can take the field parameter `\"sigma\"` and use [AstroPy's convolution routines](http://astropy.readthedocs.org/en/latest/convolution/) to smooth the data with a Gaussian:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "slc = yt.SlicePlot(ds2, \"z\", [\"counts_0.1-2.0\",\"counts_2.0-5.0\"], origin=\"native\",\n",
+      "                   field_parameters={\"sigma\":2.}) # This value is in pixel scale\n",
+      "slc.pan((100.,100.))\n",
+      "slc.set_width(500.)\n",
+      "slc.set_zlim(\"counts_0.1-2.0\", 0.01, 100.)\n",
+      "slc.set_zlim(\"counts_2.0-5.0\", 0.01, 50.)\n",
+      "slc.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    }
+   ],
+   "metadata": {}
+  }
+ ]
+}
\ No newline at end of file

diff -r 83131baac0e26fa102ecf878aa4a2a4905d5fb2f -r 0a584d53213b1881bdcb722c0f3f52ebfea3ef7e doc/source/cookbook/fits_xray_images.rst
--- /dev/null
+++ b/doc/source/cookbook/fits_xray_images.rst
@@ -0,0 +1,6 @@
+.. _xray_fits:
+
+FITS X-ray Images in yt
+----------------------
+
+.. notebook:: fits_xray_images.ipynb
\ No newline at end of file

diff -r 83131baac0e26fa102ecf878aa4a2a4905d5fb2f -r 0a584d53213b1881bdcb722c0f3f52ebfea3ef7e doc/source/cookbook/index.rst
--- a/doc/source/cookbook/index.rst
+++ b/doc/source/cookbook/index.rst
@@ -46,4 +46,5 @@
    embedded_javascript_animation
    embedded_webm_animation
    ../analyzing/analysis_modules/sunyaev_zeldovich
-   
+   fits_radio_cubes
+   fits_xray_images

diff -r 83131baac0e26fa102ecf878aa4a2a4905d5fb2f -r 0a584d53213b1881bdcb722c0f3f52ebfea3ef7e doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -459,6 +459,243 @@
 
    pf = load("/u/cmoody3/data/art_snapshots/SFG1/10MpcBox_csf512_a0.460.d")
 
+.. _loading_athena_data:
+
+Athena Data
+-----------
+
+Athena 4.x VTK data is *mostly* supported and cared for by John
+ZuHone. Both uniform grid and SMR datasets are supported.
+
+Loading Athena datasets is slightly different depending on whether
+your dataset came from a serial or a parallel run. If the data came
+from a serial run or you have joined the VTK files together using the
+Athena tool ``join_vtk``, you can load the data like this:
+
+.. code-block:: python
+
+   from yt.mods import *
+   pf = load("kh.0010.vtk")
+
+The filename corresponds to the file on SMR level 0, whereas if there
+are multiple levels the corresponding files will be picked up
+automatically, assuming they are laid out in ``lev*`` subdirectories
+under the directory where the base file is located.
+
+For parallel datasets, yt assumes that they are laid out in
+directories named ``id*``, one for each processor number, each with
+``lev*`` subdirectories for additional refinement levels. To load this
+data, call ``load`` with the base file in the ``id0`` directory:
+
+.. code-block:: python
+
+   from yt.mods import *
+   pf = load("id0/kh.0010.vtk")
+
+which will pick up all of the files in the different ``id*`` directories for
+the entire dataset.
+
+yt works in cgs ("Gaussian") units by default, but Athena data is not
+normally stored in these units. If you would like to convert data to
+cgs units, you may supply conversions for length, time, and mass to ``load``:
+
+.. code-block:: python
+
+   from yt.mods import *
+   pf = load("id0/cluster_merger.0250.vtk",
+             parameters={"length_unit":(1.0,"Mpc"),
+                         "time_unit"(1.0,"Myr"),
+                         "mass_unit":(1.0e14,"Msun")})
+
+This means that the yt fields, e.g. ``("gas","density")``, ``("gas","x-velocity")``,
+``("gas","magnetic_field_x")``, will be in cgs units, but the Athena fields, e.g.,
+``("athena","density")``, ``("athena","velocity_x")``, ``("athena","cell_centered_B_x")``, will be
+in code units.
+
+.. rubric:: Caveats
+
+* yt primarily works with primitive variables. If the Athena
+  dataset contains conservative variables, the yt primitive fields will be generated from the
+  conserved variables on disk.
+* Domains may be visualized assuming periodicity.
+* Particle list data is currently unsupported.
+
+.. _loading-fits-data:
+
+FITS Data
+---------
+
+FITS data is *mostly* supported and cared for by John ZuHone. In order to
+read FITS data, `AstroPy <http://www.astropy.org>`_ must be installed. FITS
+data cubes can be loaded in the same way by yt as other datasets. yt
+can read FITS image files that have the following (case-insensitive) suffixes:
+
+* fits
+* fts
+* fits.gz
+* fts.gz
+
+yt can read two kinds of FITS files: FITS image files and FITS binary table files containing
+positions, times, and energies of X-ray events.
+
+.. note::
+
+  AstroPy is necessary due to the requirements of both FITS file reading and
+  WCS coordinates. Since new releases of `PyFITS <http://www.stsci
+  .edu/institute/software_hardware/pyfits>`_ are to be discontinued, individual
+  installations of this package and the `PyWCS <http://stsdas.stsci
+  .edu/astrolib/pywcs/>`_ package are not supported.
+
+Though FITS a image is composed of one data cube in the FITS file,
+upon being loaded into yt it is automatically decomposed into grids:
+
+.. code-block:: python
+
+  from yt.mods import *
+  ds = load("m33_hi.fits")
+  ds.print_stats()
+
+.. parsed-literal::
+
+  level	  # grids	    # cells	   # cells^3
+  ----------------------------------------------
+    0	     512	  981940800	         994
+  ----------------------------------------------
+             512	  981940800
+
+yt will generate its own domain decomposition, but the number of grids can be
+set manually by passing the ``nprocs`` parameter to the ``load`` call:
+
+.. code-block:: python
+
+  ds = load("m33_hi.fits", nprocs=1024)
+
+Making the Most of `yt` for FITS Data
++++++++++++++++++++++++++++++++++++++
+
+yt will load data without WCS information and/or some missing header keywords, but the resulting
+field information will necessarily be incomplete. For example, field names may not be descriptive,
+and units will not be correct. To get the full use out of yt for FITS files, make sure that for
+each image the following header keywords have sensible values:
+
+* ``CDELTx``: The pixel width in along axis ``x``
+* ``CRVALx``: The coordinate value at the reference position along axis ``x``
+* ``CRPIXx``: The the reference pixel along axis ``x``
+* ``CTYPEx``: The projection type of axis ``x``
+* ``CUNITx``: The units of the coordinate along axis ``x``
+* ``BTYPE``: The type of the image
+* ``BUNIT``: The units of the image
+
+FITS header keywords can easily be updated using AstroPy. For example,
+to set the ``BTYPE`` and ``BUNIT`` keywords:
+
+.. code-block:: python
+
+    import astropy.io.fits as pyfits
+    f = pyfits.open("xray_flux_image.fits", mode="update")
+    f[0].header["BUNIT"] = "cts/s/pixel"
+    f[0].header["BTYPE"] = "flux"
+    f.flush()
+    f.close()
+
+FITS Coordinates
+++++++++++++++++
+
+For FITS datasets, the unit of ``code_length`` is always the width of one
+pixel. yt will attempt to use the WCS information in the FITS header to
+construct information about the coordinate system, and provides support for
+the following dataset types:
+
+1. Rectilinear 2D/3D images with length units (e.g., Mpc, AU,
+   etc.) defined in the ``CUNITx`` keywords
+2. 2D images in some celestial coordinate systems (RA/Dec,
+   galactic latitude/longitude, defined in the ``CTYPEx``
+   keywords), and X-ray binary table event files
+3. 3D images with celestial coordinates and a third axis for another
+   quantity, such as velocity, frequency, wavelength, etc.
+4. 4D images with the first three axes like Case 3, where the slices
+   along the 4th axis are interpreted as different fields.
+
+If your data is of the first case, yt will determine the length units based
+on the information in the header. If your data is of the second or third
+cases, no length units will be assigned, but the world coordinate information
+about the axes will be stored in separate fields. If your data is of the fourth
+type, the coordinates of the first three axes will be determined according to
+cases 1-3.
+
+.. note::
+
+  Linear length-based coordinates (Case 1 above) are only supported if all dimensions
+  have the same value for ``CUNITx``. WCS coordinates are only supported for Cases 2-4.
+
+Fields in FITS Datasets
++++++++++++++++++++++++
+
+Multiple fields can be included in a FITS dataset in several different ways.
+The first way, and the simplest, is if more than one image HDU is
+contained within the same file. The field names will be determined by the
+value of ``BTYPE`` in the header, and the field units will be determined by
+the value of ``BUNIT``. The second way is if a dataset has a fourth axis,
+with each slice along this axis corresponding to a different field. In this
+case, the field names will be determined by the value of the ``CTYPE4`` keyword
+and the index of the slice. So, for example, if ``BTYPE`` = ``"intensity"`` and
+``CTYPE4`` = ``"stokes"``, then the fields will be named
+``"intensity_stokes_1"``, ``"intensity_stokes_2"``, and so on.
+
+The third way is if auxiliary files are included along with the main file, like so:
+
+.. code-block:: python
+
+    ds = load("flux.fits", auxiliary_files=["temp.fits","metal.fits"])
+
+The image blocks in each of these files will be loaded as a separate field,
+provided they have the same dimensions as the image blocks in the main file.
+
+Additionally, fields corresponding to the WCS coordinates will be generated.
+based on the corresponding ``CTYPEx`` keywords. When queried, these fields
+will be generated from the pixel coordinates in the file using the WCS
+transformations provided by AstroPy.
+
+X-ray event data will be loaded as particle fields in yt, but a grid will be constructed from the
+WCS information in the FITS header. There is a helper function, ``setup_counts_fields``,
+which may be used to make deposited image fields from the event data for different energy bands
+(for an example see :ref:`xray_fits`).
+
+.. note::
+
+  Each FITS image from a single dataset, whether from one file or from one of
+  multiple files, must have the same dimensions and WCS information as the
+  first image in the primary file. If this is not the case,
+  yt will raise a warning and will not load this field.
+
+Additional Options
+++++++++++++++++++
+
+FITS image data may include ``NaNs``. If you wish to mask this data out,
+you may supply a ``nan_mask`` parameter to ``load``, which may either be a
+single floating-point number (applies to all fields) or a Python dictionary
+containing different mask values for different fields:
+
+.. code-block::
+
+  # passing a single float
+  ds = load("m33_hi.fits", nan_mask=0.0)
+
+  # passing a dict
+  ds = load("m33_hi.fits", nan_mask={"intensity":-1.0,"temperature":0.0})
+
+Generally, AstroPy may generate a lot of warnings about individual FITS
+files, many of which you may want to ignore. If you want to see these
+warnings, set ``suppress_astropy_warnings = False`` in the call to ``load``.
+
+Examples of Using FITS Data
++++++++++++++++++++++++++++
+
+The following IPython notebooks show examples of working with FITS data in yt:
+
+* :ref:`radio_cubes`
+* :ref:`xray_fits`
+
 .. _loading-moab-data:
 
 MOAB Data

diff -r 83131baac0e26fa102ecf878aa4a2a4905d5fb2f -r 0a584d53213b1881bdcb722c0f3f52ebfea3ef7e doc/source/visualizing/FITSImageBuffer.ipynb
--- /dev/null
+++ b/doc/source/visualizing/FITSImageBuffer.ipynb
@@ -0,0 +1,205 @@
+{
+ "metadata": {
+  "name": "",
+  "signature": "sha256:872f7525edd3c1ee09c67f6ecdd8552218df05ebe5ab73bcab55654edf0ac2bb"
+ },
+ "nbformat": 3,
+ "nbformat_minor": 0,
+ "worksheets": [
+  {
+   "cells": [
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "yt has capabilities for writing 2D and 3D uniformly gridded data generated from datasets to FITS files. This is via the `FITSImageBuffer` class, which has subclasses `FITSSlice` and `FITSProjection` to write slices and projections directly to FITS. We'll test this out on an Athena dataset."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "%matplotlib inline\n",
+      "import yt\n",
+      "from yt.utilities.fits_image import FITSImageBuffer, FITSSlice, FITSProjection"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "ds = yt.load(\"MHDSloshing/virgo_low_res.0054.vtk\", parameters={\"length_unit\":(1.0,\"Mpc\"),\n",
+      "                                                               \"mass_unit\":(1.0e14,\"Msun\"),\n",
+      "                                                               \"time_unit\":(1.0,\"Myr\")})"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "To demonstrate a useful example of creating a FITS file, let's first make a `ProjectionPlot`:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "prj = yt.ProjectionPlot(ds, \"z\", [\"temperature\"], weight_field=\"density\", width=(500.,\"kpc\"))\n",
+      "prj.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Suppose that we wanted to write this projection to a FITS file for analysis and visualization in other programs, such as ds9. We can do that using `FITSProjection`:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "prj_fits = FITSProjection(ds, \"z\", [\"temperature\"], weight_field=\"density\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "which took the same parameters as `ProjectionPlot` except the width, because `FITSProjection` and `FITSSlice` always make slices and projections of the width of the domain size, at the finest resolution available in the simulation, in a unit determined to be appropriate for the physical size of the dataset. `prj_fits` is a full-fledged FITS file in memory, specifically an [AstroPy `HDUList`](http://astropy.readthedocs.org/en/latest/io/fits/api/hdulists.html) object. This means that we can use all of the methods inherited from `HDUList`:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "prj_fits.info()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "`info` shows us the contents of the virtual FITS file. We can also look at the header for the `\"temperature\"` image, like so:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "prj_fits[\"temperature\"].header"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "where we can see that the temperature units are in Kelvin and the cell widths are in kiloparsecs. The projection can be written to disk using the `writeto` method:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "prj_fits.writeto(\"sloshing.fits\", clobber=True)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Since yt can read FITS image files, it can be loaded up just like any other dataset:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "ds2 = yt.load(\"sloshing.fits\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "and we can make a `SlicePlot` of the 2D image, which shows the same data as the previous image:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "slc2 = yt.SlicePlot(ds2, \"z\", [\"temperature\"], width=(500.,\"kpc\"))\n",
+      "slc2.set_log(\"temperature\", True)\n",
+      "slc2.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "If you want more fine-grained control over what goes into the FITS file, you can call `FITSImageBuffer` directly, with various kinds of inputs. For example, you could use a `FixedResolutionBuffer`, and specify you want the units in parsecs instead:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "slc3 = ds.slice(0, 0.0)\n",
+      "frb = slc3.to_frb((500.,\"kpc\"), 800)\n",
+      "fib = FITSImageBuffer(frb, fields=[\"density\",\"temperature\"], units=\"pc\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Finally, a 3D FITS cube can be created from a covering grid:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "cvg = ds.covering_grid(ds.index.max_level, [-0.5,-0.5,-0.5], [64, 64, 64], fields=[\"density\",\"temperature\"])\n",
+      "fib = FITSImageBuffer(cvg, fields=[\"density\",\"temperature\"], units=\"Mpc\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    }
+   ],
+   "metadata": {}
+  }
+ ]
+}
\ No newline at end of file

diff -r 83131baac0e26fa102ecf878aa4a2a4905d5fb2f -r 0a584d53213b1881bdcb722c0f3f52ebfea3ef7e doc/source/visualizing/index.rst
--- a/doc/source/visualizing/index.rst
+++ b/doc/source/visualizing/index.rst
@@ -14,3 +14,4 @@
    mapserver
    streamlines
    colormaps/index
+   writing_fits_images

diff -r 83131baac0e26fa102ecf878aa4a2a4905d5fb2f -r 0a584d53213b1881bdcb722c0f3f52ebfea3ef7e doc/source/visualizing/writing_fits_images.rst
--- /dev/null
+++ b/doc/source/visualizing/writing_fits_images.rst
@@ -0,0 +1,6 @@
+.. _writing_fits_images:
+
+Writing FITS Images
+==========================
+
+.. notebook:: FITSImageBuffer.ipynb
\ No newline at end of file

diff -r 83131baac0e26fa102ecf878aa4a2a4905d5fb2f -r 0a584d53213b1881bdcb722c0f3f52ebfea3ef7e yt/analysis_modules/api.py
--- a/yt/analysis_modules/api.py
+++ b/yt/analysis_modules/api.py
@@ -114,3 +114,6 @@
      TableAbsorbModel, \
      PhotonModel, \
      ThermalPhotonModel
+
+from .ppv_cube.api import \
+    PPVCube

diff -r 83131baac0e26fa102ecf878aa4a2a4905d5fb2f -r 0a584d53213b1881bdcb722c0f3f52ebfea3ef7e yt/analysis_modules/particle_trajectories/particle_trajectories.py
--- a/yt/analysis_modules/particle_trajectories/particle_trajectories.py
+++ b/yt/analysis_modules/particle_trajectories/particle_trajectories.py
@@ -13,7 +13,12 @@
 from yt.data_objects.data_containers import YTFieldData
 from yt.data_objects.time_series import DatasetSeries
 from yt.utilities.lib.CICDeposit import CICSample_3
+from yt.utilities.parallel_tools.parallel_analysis_interface import \
+    parallel_root_only
 from yt.funcs import *
+from yt.units.yt_array import array_like_field
+from yt.config import ytcfg
+from collections import OrderedDict
 
 import numpy as np
 import h5py
@@ -47,30 +52,21 @@
     >>> fields = ["particle_position_x", "particle_position_y",
     >>>           "particle_position_z", "particle_velocity_x",
     >>>           "particle_velocity_y", "particle_velocity_z"]
-    >>> pf = load(my_fns[0])
-    >>> init_sphere = pf.sphere(pf.domain_center, (.5, "unitary"))
+    >>> ds = load(my_fns[0])
+    >>> init_sphere = ds.sphere(ds.domain_center, (.5, "unitary"))
     >>> indices = init_sphere["particle_index"].astype("int")
     >>> trajs = ParticleTrajectories(my_fns, indices, fields=fields)
     >>> for t in trajs :
     >>>     print t["particle_velocity_x"].max(), t["particle_velocity_x"].min()
-
-    Notes
-    -----
-    As of this time only particle trajectories that are complete over the
-    set of specified parameter files are supported. If any particle's history
-    ends for some reason (e.g. leaving the simulation domain or being actively
-    destroyed), the whole trajectory collection of which it is a set must end
-    at or before the particle's last timestep. This is a limitation we hope to
-    lift at some point in the future.     
     """
     def __init__(self, filenames, indices, fields=None) :
 
         indices.sort() # Just in case the caller wasn't careful
-        
         self.field_data = YTFieldData()
-        self.pfs = DatasetSeries.from_filenames(filenames)
+        self.data_series = DatasetSeries.from_filenames(filenames)
         self.masks = []
         self.sorts = []
+        self.array_indices = []
         self.indices = indices
         self.num_indices = len(indices)
         self.num_steps = len(filenames)
@@ -79,54 +75,44 @@
         # Default fields 
         
         if fields is None: fields = []
+        fields.append("particle_position_x")
+        fields.append("particle_position_y")
+        fields.append("particle_position_z")
+        fields = list(OrderedDict.fromkeys(fields))
 
-        # Must ALWAYS have these fields
-        
-        fields = fields + ["particle_position_x",
-                           "particle_position_y",
-                           "particle_position_z"]
-
-        # Set up the derived field list and the particle field list
-        # so that if the requested field is a particle field, we'll
-        # just copy the field over, but if the field is a grid field,
-        # we will first interpolate the field to the particle positions
-        # and then return the field. 
-
-        pf = self.pfs[0]
-        self.derived_field_list = pf.derived_field_list
-        self.particle_fields = [field for field in self.derived_field_list
-                                if pf.field_info[field].particle_type]
-
-        """
-        The following loops through the parameter files
-        and performs two tasks. The first is to isolate
-        the particles with the correct indices, and the
-        second is to create a sorted list of these particles.
-        We also make a list of the current time from each file. 
-        Right now, the code assumes (and checks for) the
-        particle indices existing in each dataset, a limitation I
-        would like to lift at some point since some codes
-        (e.g., FLASH) destroy particles leaving the domain.
-        """
-        
-        for pf in self.pfs:
-            dd = pf.h.all_data()
-            newtags = dd["particle_index"].astype("int")
-            if not np.all(np.in1d(indices, newtags, assume_unique=True)):
-                print "Not all requested particle ids contained in this dataset!"
-                raise IndexError
+        old_level = int(ytcfg.get("yt","loglevel"))
+        mylog.setLevel(40)
+        my_storage = {}
+        pbar = get_pbar("Constructing trajectory information", len(self.data_series))
+        for i, (sto, ds) in enumerate(self.data_series.piter(storage=my_storage)):
+            dd = ds.all_data()
+            idx_field = dd._determine_fields("particle_index")[0]
+            newtags = dd[idx_field].ndarray_view().astype("int64")
             mask = np.in1d(newtags, indices, assume_unique=True)
             sorts = np.argsort(newtags[mask])
-            self.masks.append(mask)            
+            self.array_indices.append(np.where(np.in1d(indices, newtags, assume_unique=True))[0])
+            self.masks.append(mask)
             self.sorts.append(sorts)
-            self.times.append(pf.current_time)
+            sto.result_id = ds.parameter_filename
+            sto.result = ds.current_time
+            pbar.update(i)
+        pbar.finish()
 
-        self.times = np.array(self.times)
+        mylog.setLevel(old_level)
 
-        # Now instantiate the requested fields 
+        times = []
+        for fn, time in sorted(my_storage.items()):
+            times.append(time)
+
+        self.times = self.data_series[0].arr([time for time in times], times[0].units)
+
+        self.particle_fields = []
+
+        # Instantiate fields the caller requested
+
         for field in fields:
             self._get_data(field)
-            
+
     def has_key(self, key):
         return (key in self.field_data)
     
@@ -135,8 +121,7 @@
 
     def __getitem__(self, key):
         """
-        Get the field associated with key,
-        checking to make sure it is a particle field.
+        Get the field associated with key.
         """
         if key == "particle_time":
             return self.times
@@ -203,33 +188,48 @@
         with shape (num_indices, num_steps)
         """
         if not self.field_data.has_key(field):
-            particles = np.empty((0))
+            old_level = int(ytcfg.get("yt","loglevel"))
+            mylog.setLevel(40)
+            dd_first = self.data_series[0].all_data()
+            fd = dd_first._determine_fields(field)[0]
+            if field not in self.particle_fields:
+                if self.data_series[0].field_info[fd].particle_type:
+                    self.particle_fields.append(field)
+            particles = np.empty((self.num_indices,self.num_steps)) * np.nan
             step = int(0)
-            for pf, mask, sort in zip(self.pfs, self.masks, self.sorts):
+            pbar = get_pbar("Generating field %s in trajectories." % (field), self.num_steps)
+            my_storage={}
+            for i, (sto, ds) in enumerate(self.data_series.piter(storage=my_storage)):
+                mask = self.masks[i]
+                sort = self.sorts[i]
                 if field in self.particle_fields:
                     # This is easy... just get the particle fields
-                    dd = pf.h.all_data()
-                    pfield = dd[field][mask]
-                    particles = np.append(particles, pfield[sort])
+                    dd = ds.all_data()
+                    pfield = dd[fd].ndarray_view()[mask][sort]
                 else:
                     # This is hard... must loop over grids
                     pfield = np.zeros((self.num_indices))
-                    x = self["particle_position_x"][:,step]
-                    y = self["particle_position_y"][:,step]
-                    z = self["particle_position_z"][:,step]
-                    particle_grids, particle_grid_inds = pf.h.find_points(x,y,z)
+                    x = self["particle_position_x"][:,step].ndarray_view()
+                    y = self["particle_position_y"][:,step].ndarray_view()
+                    z = self["particle_position_z"][:,step].ndarray_view()
+                    particle_grids, particle_grid_inds = ds.index.find_points(x,y,z)
                     for grid in particle_grids:
-                        cube = grid.retrieve_ghost_zones(1, [field])
+                        cube = grid.retrieve_ghost_zones(1, [fd])
                         CICSample_3(x,y,z,pfield,
                                     self.num_indices,
-                                    cube[field],
+                                    cube[fd],
                                     np.array(grid.LeftEdge).astype(np.float64),
                                     np.array(grid.ActiveDimensions).astype(np.int32),
-                                    np.float64(grid['dx']))
-                    particles = np.append(particles, pfield)
+                                    grid.dds[0])
+                sto.result_id = ds.parameter_filename
+                sto.result = (self.array_indices[i], pfield)
+                pbar.update(step)
                 step += 1
-            self[field] = particles.reshape(self.num_steps,
-                                            self.num_indices).transpose()
+            pbar.finish()
+            for i, (fn, (indices, pfield)) in enumerate(sorted(my_storage.items())):
+                particles[indices,i] = pfield
+            self.field_data[field] = array_like_field(dd_first, particles, fd)
+            mylog.setLevel(old_level)
         return self.field_data[field]
 
     def trajectory_from_index(self, index):
@@ -269,6 +269,7 @@
             traj[field] = self[field][mask,:][0]
         return traj
 
+    @parallel_root_only
     def write_out(self, filename_base):
         """
         Write out particle trajectories to tab-separated ASCII files (one
@@ -299,7 +300,8 @@
             fid.writelines(outlines)
             fid.close()
             del fid
-            
+
+    @parallel_root_only
     def write_out_h5(self, filename):
         """
         Write out all the particle trajectories to a single HDF5 file

diff -r 83131baac0e26fa102ecf878aa4a2a4905d5fb2f -r 0a584d53213b1881bdcb722c0f3f52ebfea3ef7e yt/analysis_modules/ppv_cube/api.py
--- /dev/null
+++ b/yt/analysis_modules/ppv_cube/api.py
@@ -0,0 +1,12 @@
+"""
+API for ppv_cube
+"""
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from ppv_cube import PPVCube

diff -r 83131baac0e26fa102ecf878aa4a2a4905d5fb2f -r 0a584d53213b1881bdcb722c0f3f52ebfea3ef7e yt/analysis_modules/ppv_cube/ppv_cube.py
--- /dev/null
+++ b/yt/analysis_modules/ppv_cube/ppv_cube.py
@@ -0,0 +1,164 @@
+"""
+Generating PPV FITS cubes
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+from yt.frontends.fits.data_structures import ap
+from yt.utilities.orientation import Orientation
+from yt.utilities.fits_image import FITSImageBuffer
+from yt.visualization.volume_rendering.camera import off_axis_projection
+from yt.funcs import get_pbar
+
+def create_intensity(vmin, vmax, ifield):
+    def _intensity(field, data):
+        idxs = (data["v_los"] >= vmin) & (data["v_los"] < vmax)
+        f = np.zeros(data[ifield].shape)
+        f[idxs] = data[ifield][idxs]
+        return f
+    return _intensity
+
+def create_vlos(z_hat):
+    def _v_los(field, data):
+        vz = data["velocity_x"]*z_hat[0] + \
+             data["velocity_y"]*z_hat[1] + \
+             data["velocity_z"]*z_hat[2]
+        return -vz
+    return _v_los
+
+class PPVCube(object):
+    def __init__(self, ds, normal, field, width=(1.0,"unitary"),
+                 dims=(100,100,100), velocity_bounds=None):
+        r""" Initialize a PPVCube object.
+
+        Parameters
+        ----------
+        ds : dataset
+            The dataset.
+        normal : array_like
+            The normal vector along with to make the projections.
+        field : string
+            The field to project.
+        width : float or tuple, optional
+            The width of the projection in length units. Specify a float
+            for code_length units or a tuple (value, units).
+        dims : tuple, optional
+            A 3-tuple of dimensions (nx,ny,nv) for the cube.
+        velocity_bounds : tuple, optional
+            A 3-tuple of (vmin, vmax, units) for the velocity bounds to
+            integrate over. If None, the largest velocity of the
+            dataset will be used, e.g. velocity_bounds = (-v.max(), v.max())
+
+        Examples
+        --------
+        >>> i = 60*np.pi/180.
+        >>> L = [0.0,np.sin(i),np.cos(i)]
+        >>> cube = PPVCube(ds, L, "density", width=(10.,"kpc"),
+        ...                velocity_bounds=(-5.,4.,"km/s"))
+        """
+        self.ds = ds
+        self.field = field
+        self.width = width
+
+        self.nx = dims[0]
+        self.ny = dims[1]
+        self.nv = dims[2]
+
+        normal = np.array(normal)
+        normal /= np.sqrt(np.dot(normal, normal))
+        vecs = np.identity(3)
+        t = np.cross(normal, vecs).sum(axis=1)
+        ax = t.argmax()
+        north = np.cross(normal, vecs[ax,:]).ravel()
+        orient = Orientation(normal, north_vector=north)
+
+        dd = ds.all_data()
+
+        fd = dd._determine_fields(field)[0]
+
+        self.field_units = ds._get_field_info(fd).units
+
+        if velocity_bounds is None:
+            vmin, vmax = dd.quantities.extrema("velocity_magnitude")
+            self.v_bnd = -vmax, vmax
+        else:
+            self.v_bnd = (ds.quan(velocity_bounds[0], velocity_bounds[2]),
+                     ds.quan(velocity_bounds[1], velocity_bounds[2]))
+
+        vbins = np.linspace(self.v_bnd[0], self.v_bnd[1], num=self.nv+1)
+
+        _vlos = create_vlos(orient.unit_vectors[2])
+        ds.field_info.add_field(("gas","v_los"), function=_vlos, units="cm/s")
+
+        self.data = ds.arr(np.zeros((self.nx,self.ny,self.nv)), self.field_units)
+        pbar = get_pbar("Generating cube.", self.nv)
+        for i in xrange(self.nv):
+            v1 = vbins[i]
+            v2 = vbins[i+1]
+            _intensity = create_intensity(v1, v2, field)
+            ds.field_info.add_field(("gas","intensity"),
+                                    function=_intensity, units=self.field_units)
+            prj = off_axis_projection(ds, ds.domain_center, normal, width,
+                                      (self.nx, self.ny), "intensity")
+            self.data[:,:,i] = prj[:,:]
+            ds.field_info.pop(("gas","intensity"))
+            pbar.update(i)
+
+        pbar.finish()
+
+    def write_fits(self, filename, clobber=True, length_unit=(10.0, "kpc"),
+                   sky_center=(30.,45.)):
+        r""" Write the PPVCube to a FITS file.
+
+        Parameters
+        ----------
+        filename : string
+            The name of the file to write.
+        clobber : boolean
+            Whether or not to clobber an existing file with the same name.
+        length_unit : tuple, optional
+            The length that corresponds to the width of the projection in
+            (value, unit) form. Accepts a length unit or 'deg'.
+        sky_center : tuple, optional
+            The (RA, Dec) coordinate in degrees of the central pixel if
+            *length_unit* is 'deg'.
+
+        Examples
+        --------
+        >>> cube.write_fits("my_cube.fits", clobber=False, length_unit=(5,"deg"))
+        """
+        if length_unit[1] == "deg":
+            center = sky_center
+            types = ["RA---SIN","DEC--SIN"]
+        else:
+            center = [0.0,0.0]
+            types = ["LINEAR","LINEAR"]
+
+        v_center = 0.5*(self.v_bnd[0]+self.v_bnd[1]).in_units("m/s").value
+
+        dx = length_unit[0]/self.nx
+        dy = length_unit[0]/self.ny
+        dv = (self.v_bnd[1]-self.v_bnd[0]).in_units("m/s").value/self.nv
+
+        if length_unit[1] == "deg":
+            dx *= -1.
+
+        w = ap.pywcs.WCS(naxis=3)
+        w.wcs.crpix = [0.5*(self.nx+1), 0.5*(self.ny+1), 0.5*(self.nv+1)]
+        w.wcs.cdelt = [dx,dy,dv]
+        w.wcs.crval = [center[0], center[1], v_center]
+        w.wcs.cunit = [length_unit[1],length_unit[1],"m/s"]
+        w.wcs.ctype = [types[0],types[1],"VELO-LSR"]
+
+        fib = FITSImageBuffer(self.data.transpose(), fields=self.field, wcs=w)
+        fib[0].header["bunit"] = self.field_units
+        fib[0].header["btype"] = self.field
+
+        fib.writeto(filename, clobber=clobber)

diff -r 83131baac0e26fa102ecf878aa4a2a4905d5fb2f -r 0a584d53213b1881bdcb722c0f3f52ebfea3ef7e yt/analysis_modules/ppv_cube/setup.py
--- /dev/null
+++ b/yt/analysis_modules/ppv_cube/setup.py
@@ -0,0 +1,14 @@
+#!/usr/bin/env python
+import setuptools
+import os
+import sys
+import os.path
+
+
+def configuration(parent_package='', top_path=None):
+    from numpy.distutils.misc_util import Configuration
+    config = Configuration('ppv_cube', parent_package, top_path)
+    #config.add_subpackage("tests")
+    config.make_config_py()  # installs __config__.py
+    #config.make_svn_version_py()
+    return config

diff -r 83131baac0e26fa102ecf878aa4a2a4905d5fb2f -r 0a584d53213b1881bdcb722c0f3f52ebfea3ef7e yt/analysis_modules/setup.py
--- a/yt/analysis_modules/setup.py
+++ b/yt/analysis_modules/setup.py
@@ -23,4 +23,5 @@
     config.add_subpackage("sunyaev_zeldovich")
     config.add_subpackage("particle_trajectories")
     config.add_subpackage("photon_simulator")
+    config.add_subpackage("ppv_cube")
     return config

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/513b0df9cdaf/
Changeset:   513b0df9cdaf
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-05-06 15:29:30
Summary:     Merging from mainline, fixing conflict
Affected #:  17 files

diff -r 0a584d53213b1881bdcb722c0f3f52ebfea3ef7e -r 513b0df9cdaf57c16feafa25521a00f93fa6d46f doc/source/analyzing/units/2)_Data_Selection_and_fields.ipynb
--- a/doc/source/analyzing/units/2)_Data_Selection_and_fields.ipynb
+++ b/doc/source/analyzing/units/2)_Data_Selection_and_fields.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:9e7ac626b3609cf5f3fb2d4ebc6e027ed923ab1c22f0acc212e42fc7535e3205"
+  "signature": "sha256:b7541e0167001c6dd74306c8490385ace7bdb0533a829286f0505c0b24c67f16"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -296,6 +296,166 @@
      "language": "python",
      "metadata": {},
      "outputs": []
+    },
+    {
+     "cell_type": "heading",
+     "level": 3,
+     "metadata": {},
+     "source": [
+      "Round-Trip Conversions to and from AstroPy's Units System"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Finally, a `YTArray` or `YTQuantity` may be converted to an [AstroPy quantity](http://astropy.readthedocs.org/en/latest/units/), which is a NumPy array or a scalar associated with units from AstroPy's units system. You may use this facility if you have AstroPy installed. "
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Some examples of converting from AstroPy units to yt:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "from astropy import units as u\n",
+      "x = 42.0 * u.meter\n",
+      "y = YTQuantity(x)\n",
+      "y2 = YTQuantity.from_astropy(x) # Another way to create the quantity"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print x, type(x)\n",
+      "print y, type(y)\n",
+      "print y2, type(y2)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "a = np.random.random(size=10) * u.km/u.s\n",
+      "b = YTArray(a)\n",
+      "b2 = YTArray.from_astropy(a) # Another way to create the quantity"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print a, type(a)\n",
+      "print b, type(b)\n",
+      "print b2, type(b2)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "It also works the other way around, converting a `YTArray` or `YTQuantity` to an AstroPy quantity via the method `to_astropy`. For arrays:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "temp = dd[\"temperature\"]\n",
+      "atemp = temp.to_astropy()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print temp, type(temp)\n",
+      "print atemp, type(atemp)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "and quantities:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "from yt.utilities.physical_constants import kboltz\n",
+      "kb = kboltz.to_astropy()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print kboltz, type(kboltz)\n",
+      "print kb, type(kb)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "As a sanity check, you can show that it works round-trip:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "k1 = kboltz.to_astropy()\n",
+      "k2 = YTQuantity(kb)\n",
+      "print k1 == k2"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "c = YTArray(a)\n",
+      "d = c.to_astropy()\n",
+      "print a == d"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
     }
    ],
    "metadata": {}

diff -r 0a584d53213b1881bdcb722c0f3f52ebfea3ef7e -r 513b0df9cdaf57c16feafa25521a00f93fa6d46f doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -390,6 +390,11 @@
 
 These will be used set the units, if they are specified.
 
+Using yt to view and analyze Tipsy outputs from Gasoline
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+.. notebook:: tipsy_any_yt.ipynb
+
 .. _loading-artio-data:
 
 ARTIO Data

diff -r 0a584d53213b1881bdcb722c0f3f52ebfea3ef7e -r 513b0df9cdaf57c16feafa25521a00f93fa6d46f doc/source/examining/tipsy_and_yt.ipynb
--- /dev/null
+++ b/doc/source/examining/tipsy_and_yt.ipynb
@@ -0,0 +1,195 @@
+{
+ "metadata": {
+  "name": "",
+  "signature": "sha256:a80c1b224c121c67e57acfa9183c5660a332a37556a492e230476b424827885f"
+ },
+ "nbformat": 3,
+ "nbformat_minor": 0,
+ "worksheets": [
+  {
+   "cells": [
+    {
+     "cell_type": "heading",
+     "level": 1,
+     "metadata": {},
+     "source": [
+      "Using yt to view and analyze Tipsy outputs from Gasoline"
+     ]
+    },
+    {
+     "cell_type": "heading",
+     "level": 2,
+     "metadata": {},
+     "source": [
+      "Loading Files"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Alright, let's start with some basics.  Before we do anything, we will need to load a snapshot.  You can do this using the ```load``` convenience function.  yt will autodetect that you have a tipsy snapshot, and automatically set itself up appropriately."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "from yt.mods import *"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "We will be looking at a fairly low resolution dataset.  In the next cell, the `ds` object has an atribute called `n_ref` that tells the oct-tree how many particles to refine on.  The default is 64, but we'll get prettier plots (at the expense of a deeper tree) with 8.  Just passing the argument `n_ref=8` to load does this for us."
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      ">This dataset is available for download at http://yt-project.org/data/TipsyGalaxy.tar.gz (10 MB)."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "ds = load('TipsyGalaxy/galaxy.00300', n_ref=8)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "We now have a `TipsyDataset` object called `ds`.  Let's see what fields it has."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "ds.field_list"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "`yt` also defines so-called \"derived\" fields.  These fields are functions of the on-disk fields that live in the `field_list`.  There is a `derived_field_list` attribute attached to the `Dataset` object - let's take look at the derived fields in this dataset:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "ds.derived_field_list"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "All of the field in the `field_list` are arrays containing the values for the associated particles.  These haven't been smoothed or gridded in any way. We can grab the array-data for these particles using `ds.all_data()`. For example, let's take a look at a temperature-colored scatterplot of the gas particles in this output."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "%matplotlib inline\n",
+      "import matplotlib.pyplot as plt"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "dd = ds.all_data()\n",
+      "xcoord = dd['Gas','Coordinates'][:,0].v\n",
+      "ycoord = dd['Gas','Coordinates'][:,1].v\n",
+      "logT = np.log10(dd['Gas','Temperature'])\n",
+      "plt.scatter(xcoord, ycoord, c=logT, s=2*logT, marker='o', edgecolor='none', vmin=2, vmax=6)\n",
+      "plt.xlim(-20,20)\n",
+      "plt.ylim(-20,20)\n",
+      "cb = plt.colorbar()\n",
+      "cb.set_label('$\\log_{10}$ Temperature')\n",
+      "plt.gcf().set_size_inches(15,10)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "heading",
+     "level": 2,
+     "metadata": {},
+     "source": [
+      "Making Smoothed Images"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "`yt` will automatically generate smoothed versions of these fields that you can use to plot.  Let's make a temperature slice and a density projection."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "SlicePlot(ds, 'z', ('gas','density'), width=(40, 'kpc'), center='m')"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "ProjectionPlot(ds, 'z', ('gas','density'), width=(40, 'kpc'), center='m')"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Not only are the values in the tipsy snapshot read and automatically smoothed, the auxiliary files that have physical significance are also smoothed.  Let's look at a slice of Iron mass fraction."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "SlicePlot(ds, 'z', ('gas', 'FeMassFrac'), width=(40, 'kpc'), center='m')"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    }
+   ],
+   "metadata": {}
+  }
+ ]
+}
\ No newline at end of file

diff -r 0a584d53213b1881bdcb722c0f3f52ebfea3ef7e -r 513b0df9cdaf57c16feafa25521a00f93fa6d46f yt/analysis_modules/photon_simulator/photon_simulator.py
--- a/yt/analysis_modules/photon_simulator/photon_simulator.py
+++ b/yt/analysis_modules/photon_simulator/photon_simulator.py
@@ -21,22 +21,19 @@
 #-----------------------------------------------------------------------------
 
 import numpy as np
-from numpy.testing import assert_allclose
 from yt.funcs import *
-from yt.utilities.physical_constants import clight, \
-     cm_per_km, erg_per_keV
+from yt.utilities.physical_constants import clight
 from yt.utilities.cosmology import Cosmology
 from yt.utilities.orientation import Orientation
-from yt.utilities.definitions import mpc_conversion
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
      communication_system, parallel_root_only, get_mpi_type, \
-     op_names, parallel_capable
+     parallel_capable
 from yt import units
 from yt.units.yt_array import YTQuantity
 import h5py
-from yt.frontends.fits.data_structures import ap
-pyfits = ap.pyfits
-pywcs = ap.pywcs
+from yt.utilities.on_demand_imports import _astropy
+pyfits = _astropy.pyfits
+pywcs = _astropy.pywcs
 
 comm = communication_system.communicators[-1]
 
@@ -471,7 +468,9 @@
                         "but not both!")
         
         if sky_center is None:
-             sky_center = YTArray([30.,45.], "degree")
+            sky_center = YTArray([30.,45.], "degree")
+        else:
+            sky_center = YTArray(sky_center, "degree")
 
         dx = self.photons["dx"].ndarray_view()
         nx = self.parameters["Dimension"]
@@ -897,39 +896,42 @@
         tbhdu = pyfits.new_table(coldefs)
         tbhdu.update_ext_name("EVENTS")
 
-        tbhdu.header.update("MTYPE1", "sky")
-        tbhdu.header.update("MFORM1", "x,y")        
-        tbhdu.header.update("MTYPE2", "EQPOS")
-        tbhdu.header.update("MFORM2", "RA,DEC")
-        tbhdu.header.update("TCTYP2", "RA---TAN")
-        tbhdu.header.update("TCTYP3", "DEC--TAN")
-        tbhdu.header.update("TCRVL2", float(self.parameters["sky_center"][0]))
-        tbhdu.header.update("TCRVL3", float(self.parameters["sky_center"][1]))
-        tbhdu.header.update("TCDLT2", -float(self.parameters["dtheta"]))
-        tbhdu.header.update("TCDLT3", float(self.parameters["dtheta"]))
-        tbhdu.header.update("TCRPX2", self.parameters["pix_center"][0])
-        tbhdu.header.update("TCRPX3", self.parameters["pix_center"][1])
-        tbhdu.header.update("TLMIN2", 0.5)
-        tbhdu.header.update("TLMIN3", 0.5)
-        tbhdu.header.update("TLMAX2", 2.*self.parameters["pix_center"][0]-0.5)
-        tbhdu.header.update("TLMAX3", 2.*self.parameters["pix_center"][1]-0.5)
-        tbhdu.header.update("EXPOSURE", float(self.parameters["ExposureTime"]))
-        tbhdu.header.update("AREA", float(self.parameters["Area"]))
-        tbhdu.header.update("D_A", float(self.parameters["AngularDiameterDistance"]))
-        tbhdu.header.update("REDSHIFT", self.parameters["Redshift"])
-        tbhdu.header.update("HDUVERS", "1.1.0")
-        tbhdu.header.update("RADECSYS", "FK5")
-        tbhdu.header.update("EQUINOX", 2000.0)
+        tbhdu.header["MTYPE1"] = "sky"
+        tbhdu.header["MFORM1"] = "x,y"
+        tbhdu.header["MTYPE2"] = "EQPOS"
+        tbhdu.header["MFORM2"] = "RA,DEC"
+        tbhdu.header["TCTYP2"] = "RA---TAN"
+        tbhdu.header["TCTYP3"] = "DEC--TAN"
+        tbhdu.header["TCRVL2"] = float(self.parameters["sky_center"][0])
+        tbhdu.header["TCRVL3"] = float(self.parameters["sky_center"][1])
+        tbhdu.header["TCDLT2"] = -float(self.parameters["dtheta"])
+        tbhdu.header["TCDLT3"] = float(self.parameters["dtheta"])
+        tbhdu.header["TCRPX2"] = self.parameters["pix_center"][0]
+        tbhdu.header["TCRPX3"] = self.parameters["pix_center"][1]
+        tbhdu.header["TLMIN2"] = 0.5
+        tbhdu.header["TLMIN3"] = 0.5
+        tbhdu.header["TLMAX2"] = 2.*self.parameters["pix_center"][0]-0.5
+        tbhdu.header["TLMAX3"] = 2.*self.parameters["pix_center"][1]-0.5
+        tbhdu.header["EXPOSURE"] = float(self.parameters["ExposureTime"])
+        if isinstance(self.parameters["Area"], basestring):
+            tbhdu.header["AREA"] = self.parameters["Area"]
+        else:
+            tbhdu.header["AREA"] = float(self.parameters["Area"])
+        tbhdu.header["D_A"] = float(self.parameters["AngularDiameterDistance"])
+        tbhdu.header["REDSHIFT"] = self.parameters["Redshift"]
+        tbhdu.header["HDUVERS"] = "1.1.0"
+        tbhdu.header["RADECSYS"] = "FK5"
+        tbhdu.header["EQUINOX"] = 2000.0
         if "RMF" in self.parameters:
-            tbhdu.header.update("RMF", self.parameters["RMF"])
+            tbhdu.header["RMF"] = self.parameters["RMF"]
         if "ARF" in self.parameters:
-            tbhdu.header.update("ARF", self.parameters["ARF"])
+            tbhdu.header["ARF"] = self.parameters["ARF"]
         if "ChannelType" in self.parameters:
-            tbhdu.header.update("CHANTYPE", self.parameters["ChannelType"])
+            tbhdu.header["CHANTYPE"] = self.parameters["ChannelType"]
         if "Telescope" in self.parameters:
-            tbhdu.header.update("TELESCOP", self.parameters["Telescope"])
+            tbhdu.header["TELESCOP"] = self.parameters["Telescope"]
         if "Instrument" in self.parameters:
-            tbhdu.header.update("INSTRUME", self.parameters["Instrument"])
+            tbhdu.header["INSTRUME"] = self.parameters["Instrument"]
             
         tbhdu.writeto(fitsfile, clobber=clobber)
 
@@ -978,15 +980,15 @@
         tbhdu = pyfits.new_table(coldefs)
         tbhdu.update_ext_name("PHLIST")
 
-        tbhdu.header.update("HDUCLASS", "HEASARC/SIMPUT")
-        tbhdu.header.update("HDUCLAS1", "PHOTONS")
-        tbhdu.header.update("HDUVERS", "1.1.0")
-        tbhdu.header.update("EXTVER", 1)
-        tbhdu.header.update("REFRA", 0.0)
-        tbhdu.header.update("REFDEC", 0.0)
-        tbhdu.header.update("TUNIT1", "keV")
-        tbhdu.header.update("TUNIT2", "deg")
-        tbhdu.header.update("TUNIT3", "deg")                
+        tbhdu.header["HDUCLASS"] = "HEASARC/SIMPUT"
+        tbhdu.header["HDUCLAS1"] = "PHOTONS"
+        tbhdu.header["HDUVERS"] = "1.1.0"
+        tbhdu.header["EXTVER"] = 1
+        tbhdu.header["REFRA"] = 0.0
+        tbhdu.header["REFDEC"] = 0.0
+        tbhdu.header["TUNIT1"] = "keV"
+        tbhdu.header["TUNIT2"] = "deg"
+        tbhdu.header["TUNIT3"] = "deg"
 
         phfile = prefix+"_phlist.fits"
 
@@ -1006,17 +1008,17 @@
         wrhdu = pyfits.new_table(coldefs)
         wrhdu.update_ext_name("SRC_CAT")
                                 
-        wrhdu.header.update("HDUCLASS", "HEASARC")
-        wrhdu.header.update("HDUCLAS1", "SIMPUT")
-        wrhdu.header.update("HDUCLAS2", "SRC_CAT")        
-        wrhdu.header.update("HDUVERS", "1.1.0")
-        wrhdu.header.update("RADECSYS", "FK5")
-        wrhdu.header.update("EQUINOX", 2000.0)
-        wrhdu.header.update("TUNIT2", "deg")
-        wrhdu.header.update("TUNIT3", "deg")
-        wrhdu.header.update("TUNIT4", "keV")
-        wrhdu.header.update("TUNIT5", "keV")
-        wrhdu.header.update("TUNIT6", "erg/s/cm**2")
+        wrhdu.header["HDUCLASS"] = "HEASARC"
+        wrhdu.header["HDUCLAS1"] = "SIMPUT"
+        wrhdu.header["HDUCLAS2"] = "SRC_CAT"
+        wrhdu.header["HDUVERS"] = "1.1.0"
+        wrhdu.header["RADECSYS"] = "FK5"
+        wrhdu.header["EQUINOX"] = 2000.0
+        wrhdu.header["TUNIT2"] = "deg"
+        wrhdu.header["TUNIT3"] = "deg"
+        wrhdu.header["TUNIT4"] = "keV"
+        wrhdu.header["TUNIT5"] = "keV"
+        wrhdu.header["TUNIT6"] = "erg/s/cm**2"
 
         simputfile = prefix+"_simput.fits"
                 
@@ -1100,19 +1102,19 @@
         
         hdu = pyfits.PrimaryHDU(H.T)
         
-        hdu.header.update("MTYPE1", "EQPOS")
-        hdu.header.update("MFORM1", "RA,DEC")
-        hdu.header.update("CTYPE1", "RA---TAN")
-        hdu.header.update("CTYPE2", "DEC--TAN")
-        hdu.header.update("CRPIX1", 0.5*(nx+1))
-        hdu.header.update("CRPIX2", 0.5*(nx+1))                
-        hdu.header.update("CRVAL1", float(self.parameters["sky_center"][0]))
-        hdu.header.update("CRVAL2", float(self.parameters["sky_center"][1]))
-        hdu.header.update("CUNIT1", "deg")
-        hdu.header.update("CUNIT2", "deg")
-        hdu.header.update("CDELT1", -float(self.parameters["dtheta"]))
-        hdu.header.update("CDELT2", float(self.parameters["dtheta"]))
-        hdu.header.update("EXPOSURE", float(self.parameters["ExposureTime"]))
+        hdu.header["MTYPE1"] = "EQPOS"
+        hdu.header["MFORM1"] = "RA,DEC"
+        hdu.header["CTYPE1"] = "RA---TAN"
+        hdu.header["CTYPE2"] = "DEC--TAN"
+        hdu.header["CRPIX1"] = 0.5*(nx+1)
+        hdu.header["CRPIX2"] = 0.5*(nx+1)
+        hdu.header["CRVAL1"] = float(self.parameters["sky_center"][0])
+        hdu.header["CRVAL2"] = float(self.parameters["sky_center"][1])
+        hdu.header["CUNIT1"] = "deg"
+        hdu.header["CUNIT2"] = "deg"
+        hdu.header["CDELT1"] = -float(self.parameters["dtheta"])
+        hdu.header["CDELT2"] = float(self.parameters["dtheta"])
+        hdu.header["EXPOSURE"] = float(self.parameters["ExposureTime"])
         
         hdu.writeto(imagefile, clobber=clobber)
                                     
@@ -1183,41 +1185,41 @@
         tbhdu.update_ext_name("SPECTRUM")
 
         if not energy_bins:
-            tbhdu.header.update("DETCHANS", spec.shape[0])
-            tbhdu.header.update("TOTCTS", spec.sum())
-            tbhdu.header.update("EXPOSURE", float(self.parameters["ExposureTime"]))
-            tbhdu.header.update("LIVETIME", float(self.parameters["ExposureTime"]))
-            tbhdu.header.update("CONTENT", spectype)
-            tbhdu.header.update("HDUCLASS", "OGIP")
-            tbhdu.header.update("HDUCLAS1", "SPECTRUM")
-            tbhdu.header.update("HDUCLAS2", "TOTAL")
-            tbhdu.header.update("HDUCLAS3", "TYPE:I")
-            tbhdu.header.update("HDUCLAS4", "COUNT")
-            tbhdu.header.update("HDUVERS", "1.1.0")
-            tbhdu.header.update("HDUVERS1", "1.1.0")
-            tbhdu.header.update("CHANTYPE", spectype)
-            tbhdu.header.update("BACKFILE", "none")
-            tbhdu.header.update("CORRFILE", "none")
-            tbhdu.header.update("POISSERR", True)
+            tbhdu.header["DETCHANS"] = spec.shape[0]
+            tbhdu.header["TOTCTS"] = spec.sum()
+            tbhdu.header["EXPOSURE"] = float(self.parameters["ExposureTime"])
+            tbhdu.header["LIVETIME"] = float(self.parameters["ExposureTime"])
+            tbhdu.header["CONTENT"] = spectype
+            tbhdu.header["HDUCLASS"] = "OGIP"
+            tbhdu.header["HDUCLAS1"] = "SPECTRUM"
+            tbhdu.header["HDUCLAS2"] = "TOTAL"
+            tbhdu.header["HDUCLAS3"] = "TYPE:I"
+            tbhdu.header["HDUCLAS4"] = "COUNT"
+            tbhdu.header["HDUVERS"] = "1.1.0"
+            tbhdu.header["HDUVERS1"] = "1.1.0"
+            tbhdu.header["CHANTYPE"] = spectype
+            tbhdu.header["BACKFILE"] = "none"
+            tbhdu.header["CORRFILE"] = "none"
+            tbhdu.header["POISSERR"] = True
             if self.parameters.has_key("RMF"):
-                 tbhdu.header.update("RESPFILE", self.parameters["RMF"])
+                 tbhdu.header["RESPFILE"] = self.parameters["RMF"]
             else:
-                 tbhdu.header.update("RESPFILE", "none")
+                 tbhdu.header["RESPFILE"] = "none"
             if self.parameters.has_key("ARF"):
-                tbhdu.header.update("ANCRFILE", self.parameters["ARF"])
+                tbhdu.header["ANCRFILE"] = self.parameters["ARF"]
             else:        
-                tbhdu.header.update("ANCRFILE", "none")
+                tbhdu.header["ANCRFILE"] = "none"
             if self.parameters.has_key("Telescope"):
-                tbhdu.header.update("TELESCOP", self.parameters["Telescope"])
+                tbhdu.header["TELESCOP"] = self.parameters["Telescope"]
             else:
-                tbhdu.header.update("TELESCOP", "none")
+                tbhdu.header["TELESCOP"] = "none"
             if self.parameters.has_key("Instrument"):
-                tbhdu.header.update("INSTRUME", self.parameters["Instrument"])
+                tbhdu.header["INSTRUME"] = self.parameters["Instrument"]
             else:
-                tbhdu.header.update("INSTRUME", "none")
-            tbhdu.header.update("AREASCAL", 1.0)
-            tbhdu.header.update("CORRSCAL", 0.0)
-            tbhdu.header.update("BACKSCAL", 1.0)
+                tbhdu.header["INSTRUME"] = "none"
+            tbhdu.header["AREASCAL"] = 1.0
+            tbhdu.header["CORRSCAL"] = 0.0
+            tbhdu.header["BACKSCAL"] = 1.0
                                 
         hdulist = pyfits.HDUList([pyfits.PrimaryHDU(), tbhdu])
         

diff -r 0a584d53213b1881bdcb722c0f3f52ebfea3ef7e -r 513b0df9cdaf57c16feafa25521a00f93fa6d46f yt/analysis_modules/photon_simulator/spectral_models.py
--- a/yt/analysis_modules/photon_simulator/spectral_models.py
+++ b/yt/analysis_modules/photon_simulator/spectral_models.py
@@ -28,8 +28,8 @@
     from scipy import stats        
 except ImportError:
     pass
-from yt.frontends.fits.data_structures import ap
-pyfits = ap.pyfits
+from yt.utilities.on_demand_imports import _astropy
+pyfits = _astropy.pyfits
 
 from yt.utilities.physical_constants import hcgs, clight, erg_per_keV, amu_cgs
 

diff -r 0a584d53213b1881bdcb722c0f3f52ebfea3ef7e -r 513b0df9cdaf57c16feafa25521a00f93fa6d46f yt/analysis_modules/ppv_cube/ppv_cube.py
--- a/yt/analysis_modules/ppv_cube/ppv_cube.py
+++ b/yt/analysis_modules/ppv_cube/ppv_cube.py
@@ -11,7 +11,7 @@
 #-----------------------------------------------------------------------------
 
 import numpy as np
-from yt.frontends.fits.data_structures import ap
+from yt.utilities.on_demand_imports import _astropy
 from yt.utilities.orientation import Orientation
 from yt.utilities.fits_image import FITSImageBuffer
 from yt.visualization.volume_rendering.camera import off_axis_projection
@@ -150,7 +150,7 @@
         if length_unit[1] == "deg":
             dx *= -1.
 
-        w = ap.pywcs.WCS(naxis=3)
+        w = _astropy.pywcs.WCS(naxis=3)
         w.wcs.crpix = [0.5*(self.nx+1), 0.5*(self.ny+1), 0.5*(self.nv+1)]
         w.wcs.cdelt = [dx,dy,dv]
         w.wcs.crval = [center[0], center[1], v_center]

diff -r 0a584d53213b1881bdcb722c0f3f52ebfea3ef7e -r 513b0df9cdaf57c16feafa25521a00f93fa6d46f yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -642,6 +642,15 @@
                 registry = self.unit_registry)
         return self._quan
 
+    def add_field(self, name, function=None, **kwargs):
+        """
+        Dataset-specific call to add_field
+        """
+        self.index
+        self.field_info.add_field(name, function=function, **kwargs)
+        deps, _ = self.field_info.check_derived_fields([name])
+        self.field_dependencies.update(deps)
+
 def _reconstruct_pf(*args, **kwargs):
     pfs = ParameterFileStore()
     pf = pfs.get_pf_hash(*args)

diff -r 0a584d53213b1881bdcb722c0f3f52ebfea3ef7e -r 513b0df9cdaf57c16feafa25521a00f93fa6d46f yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -39,58 +39,7 @@
     unit_prefixes
 from yt.units import dimensions
 from yt.units.yt_array import YTQuantity
-
-class astropy_imports:
-    _pyfits = None
-    @property
-    def pyfits(self):
-        if self._pyfits is None:
-            try:
-                import astropy.io.fits as pyfits
-                self.log
-            except ImportError:
-                pyfits = None
-            self._pyfits = pyfits
-        return self._pyfits
-
-    _pywcs = None
-    @property
-    def pywcs(self):
-        if self._pywcs is None:
-            try:
-                import astropy.wcs as pywcs
-                self.log
-            except ImportError:
-                pywcs = None
-            self._pywcs = pywcs
-        return self._pywcs
-
-    _log = None
-    @property
-    def log(self):
-        if self._log is None:
-            try:
-                from astropy import log
-                if log.exception_logging_enabled():
-                    log.disable_exception_logging()
-            except ImportError:
-                log = None
-            self._log = log
-        return self._log
-
-    _conv = None
-    @property
-    def conv(self):
-        if self._conv is None:
-            try:
-                import astropy.convolution as conv
-                self.log
-            except ImportError:
-                conv = None
-            self._conv = conv
-        return self._conv
-
-ap = astropy_imports()
+from yt.utilities.on_demand_imports import _astropy
 
 lon_prefixes = ["X","RA","GLON"]
 lat_prefixes = ["Y","DEC","GLAT"]
@@ -145,7 +94,8 @@
             # FITS units always return upper-case, so we need to get
             # the right case by comparing against known units. This
             # only really works for common units.
-            units = re.split(regex_pattern, field_units)
+            units = set(re.split(regex_pattern, field_units))
+            units.remove('')
             n = int(0)
             for unit in units:
                 if unit in known_units:
@@ -262,7 +212,7 @@
                                                        pf.domain_right_edge)])
             dims = np.array(pf.domain_dimensions)
             # If we are creating a dataset of lines, only decompose along the position axes
-            if pf.line_database is not None:
+            if len(pf.line_database) > 0:
                 dims[pf.vel_axis] = 1
             psize = get_psize(dims, pf.nprocs)
             gle, gre, shapes, slices = decompose_array(dims, psize, bbox)
@@ -270,7 +220,7 @@
             self.grid_right_edge = self.pf.arr(gre, "code_length")
             self.grid_dimensions = np.array([shape for shape in shapes], dtype="int32")
             # If we are creating a dataset of lines, only decompose along the position axes
-            if pf.line_database is not None:
+            if len(pf.line_database) > 0:
                 self.grid_left_edge[:,pf.vel_axis] = pf.domain_left_edge[pf.vel_axis]
                 self.grid_right_edge[:,pf.vel_axis] = pf.domain_right_edge[pf.vel_axis]
                 self.grid_dimensions[:,pf.vel_axis] = pf.domain_dimensions[pf.vel_axis]
@@ -373,7 +323,7 @@
         elif isinstance(nan_mask, dict):
             self.nan_mask = nan_mask
         self.nprocs = nprocs
-        self._handle = ap.pyfits.open(self.filenames[0],
+        self._handle = _astropy.pyfits.open(self.filenames[0],
                                       memmap=True,
                                       do_not_scale_image_data=True,
                                       ignore_blank=True)
@@ -384,7 +334,7 @@
                     fn = fits_file
                 else:
                     fn = os.path.join(ytcfg.get("yt","test_data_dir"),fits_file)
-                f = ap.pyfits.open(fn, memmap=True,
+                f = _astropy.pyfits.open(fn, memmap=True,
                                    do_not_scale_image_data=True,
                                    ignore_blank=True)
                 self._fits_files.append(f)
@@ -394,7 +344,7 @@
             self.first_image = 1
             self.primary_header = self._handle[self.first_image].header
             self.naxis = 2
-            self.wcs = ap.pywcs.WCS(naxis=2)
+            self.wcs = _astropy.pywcs.WCS(naxis=2)
             self.events_info = {}
             for k,v in self.primary_header.items():
                 if k.startswith("TTYP"):
@@ -428,7 +378,7 @@
             self.events_data = False
             self.first_image = 0
             self.primary_header = self._handle[self.first_image].header
-            self.wcs = ap.pywcs.WCS(header=self.primary_header)
+            self.wcs = _astropy.pywcs.WCS(header=self.primary_header)
             self.naxis = self.primary_header["naxis"]
             self.axis_names = [self.primary_header["ctype%d" % (i+1)]
                                for i in xrange(self.naxis)]
@@ -531,6 +481,8 @@
                                     32**self.dimensionality).astype("int")
             self.nprocs = max(min(self.nprocs, 512), 1)
 
+        self.reversed = False
+
         # Check to see if this data is in some kind of (Lat,Lon,Vel) format
         self.ppv_data = False
         x = 0
@@ -572,7 +524,7 @@
             self.vel_axis = np.where(self.vel_axis)[0][0]
             self.vel_name = ctypes[self.vel_axis].split("-")[0].lower()
 
-            self.wcs_2d = ap.pywcs.WCS(naxis=2)
+            self.wcs_2d = _astropy.pywcs.WCS(naxis=2)
             self.wcs_2d.wcs.crpix = self.wcs.wcs.crpix[[self.lon_axis, self.lat_axis]]
             self.wcs_2d.wcs.cdelt = self.wcs.wcs.cdelt[[self.lon_axis, self.lat_axis]]
             self.wcs_2d.wcs.crval = self.wcs.wcs.crval[[self.lon_axis, self.lat_axis]]
@@ -591,7 +543,6 @@
                 le = self.dims[self.vel_axis]+0.5
                 re = 0.5
             else:
-                self.reversed = False
                 le = 0.5
                 re = self.dims[self.vel_axis]+0.5
             self.domain_left_edge[self.vel_axis] = (le-x0)*dz + z0
@@ -632,7 +583,7 @@
         try:
             with warnings.catch_warnings():
                 warnings.filterwarnings('ignore', category=UserWarning, append=True)
-                fileh = ap.pyfits.open(args[0])
+                fileh = _astropy.pyfits.open(args[0])
             valid = fileh[0].header["naxis"] >= 2
             if len(fileh) > 1 and fileh[1].name == "EVENTS":
                 valid = fileh[1].header["naxis"] >= 2

diff -r 0a584d53213b1881bdcb722c0f3f52ebfea3ef7e -r 513b0df9cdaf57c16feafa25521a00f93fa6d46f yt/frontends/fits/fields.py
--- a/yt/frontends/fits/fields.py
+++ b/yt/frontends/fits/fields.py
@@ -25,17 +25,17 @@
 
     def _setup_ppv_fields(self):
 
-        def _get_2d_wcs(self, data, axis):
+        def _get_2d_wcs(data, axis):
             w_coords = data.pf.wcs_2d.wcs_pix2world(data["x"], data["y"], 1)
             return w_coords[axis]
 
         def world_f(axis, unit):
             def _world_f(field, data):
-                return data.pf.arr(self._get_2d_wcs(data, axis), unit)
+                return data.pf.arr(_get_2d_wcs(data, axis), unit)
             return _world_f
 
         for (i, axis), name in zip(enumerate([self.pf.lon_axis, self.pf.lat_axis]),
-                             [self.pf.lon_name, self.pf.lat_name]):
+                                             [self.pf.lon_name, self.pf.lat_name]):
             unit = str(self.pf.wcs_2d.wcs.cunit[i])
             if unit.lower() == "deg": unit = "degree"
             if unit.lower() == "rad": unit = "radian"

diff -r 0a584d53213b1881bdcb722c0f3f52ebfea3ef7e -r 513b0df9cdaf57c16feafa25521a00f93fa6d46f yt/frontends/fits/misc.py
--- a/yt/frontends/fits/misc.py
+++ b/yt/frontends/fits/misc.py
@@ -14,7 +14,7 @@
 from yt.fields.api import add_field
 from yt.fields.derived_field import ValidateSpatial
 from yt.funcs import mylog
-from .data_structures import ap
+from yt.utilities.on_demand_imports import _astropy
 
 def _make_counts(emin, emax):
     def _counts(field, data):
@@ -30,31 +30,34 @@
         else:
             sigma = None
         if sigma is not None and sigma > 0.0:
-            kern = ap.conv.Gaussian2DKernel(stddev=sigma)
-            img[:,:,0] = ap.conv.convolve(img[:,:,0], kern)
+            kern = _astropy.conv.Gaussian2DKernel(stddev=sigma)
+            img[:,:,0] = _astropy.conv.convolve(img[:,:,0], kern)
         return data.pf.arr(img, "counts/pixel")
     return _counts
 
-def setup_counts_fields(ebounds):
+def setup_counts_fields(ds, ebounds, ftype="gas"):
     r"""
     Create deposited image fields from X-ray count data in energy bands.
 
     Parameters
     ----------
+    ds : Dataset
+        The FITS events file dataset to add the counts fields to.
     ebounds : list of tuples
         A list of tuples, one for each field, with (emin, emax) as the
         energy bounds for the image.
 
     Examples
     --------
+    >>> ds = yt.load("evt.fits")
     >>> ebounds = [(0.1,2.0),(2.0,3.0)]
-    >>> setup_counts_fields(ebounds)
+    >>> setup_counts_fields(ds, ebounds)
     """
     for (emin, emax) in ebounds:
         cfunc = _make_counts(emin, emax)
         fname = "counts_%s-%s" % (emin, emax)
         mylog.info("Creating counts field %s." % fname)
-        add_field(("gas",fname), function=cfunc,
-                  units="counts/pixel",
-                  validators = [ValidateSpatial()],
-                  display_name="Counts (%s-%s keV)" % (emin, emax))
\ No newline at end of file
+        ds.add_field((ftype,fname), function=cfunc,
+                     units="counts/pixel",
+                     validators = [ValidateSpatial()],
+                     display_name="Counts (%s-%s keV)" % (emin, emax))
\ No newline at end of file

diff -r 0a584d53213b1881bdcb722c0f3f52ebfea3ef7e -r 513b0df9cdaf57c16feafa25521a00f93fa6d46f yt/units/tests/test_ytarray.py
--- a/yt/units/tests/test_ytarray.py
+++ b/yt/units/tests/test_ytarray.py
@@ -26,7 +26,7 @@
     unary_operators, binary_operators
 from yt.utilities.exceptions import \
     YTUnitOperationError, YTUfuncUnitError
-from yt.testing import fake_random_pf
+from yt.testing import fake_random_pf, requires_module
 from yt.funcs import fix_length
 import numpy as np
 import copy
@@ -650,3 +650,28 @@
 
     for op in [operator.abs, operator.neg, operator.pos]:
         yield unary_op_registry_comparison, op
+
+ at requires_module("astropy")
+def test_astropy():
+    from yt.utilities.on_demand_imports import _astropy
+
+    ap_arr = np.arange(10)*_astropy.units.km/_astropy.units.hr
+    yt_arr = YTArray(np.arange(10), "km/hr")
+    yt_arr2 = YTArray.from_astropy(ap_arr)
+
+    ap_quan = 10.*_astropy.units.Msun**0.5/(_astropy.units.kpc**3)
+    yt_quan = YTQuantity(10.,"sqrt(Msun)/kpc**3")
+    yt_quan2 = YTQuantity.from_astropy(ap_quan)
+
+    yield assert_array_equal, ap_arr, yt_arr.to_astropy()
+    yield assert_array_equal, yt_arr, YTArray(ap_arr)
+    yield assert_array_equal, yt_arr, yt_arr2
+
+    yield assert_equal, ap_quan, yt_quan.to_astropy()
+    yield assert_equal, yt_quan, YTQuantity(ap_quan)
+    yield assert_equal, yt_quan, yt_quan2
+
+    yield assert_array_equal, yt_arr, YTArray(yt_arr.to_astropy())
+    yield assert_equal, yt_quan, YTQuantity(yt_quan.to_astropy())
+
+

diff -r 0a584d53213b1881bdcb722c0f3f52ebfea3ef7e -r 513b0df9cdaf57c16feafa25521a00f93fa6d46f yt/units/unit_lookup_table.py
--- a/yt/units/unit_lookup_table.py
+++ b/yt/units/unit_lookup_table.py
@@ -59,12 +59,12 @@
     "yr":  (sec_per_year, dimensions.time),
 
     # Solar units
-    "Msun": ( mass_sun_grams, dimensions.mass),
-    "msun": ( mass_sun_grams, dimensions.mass),
-    "Rsun": ( cm_per_rsun, dimensions.length),
-    "rsun": ( cm_per_rsun, dimensions.length),
-    "Lsun": ( luminosity_sun_ergs_per_sec, dimensions.power),
-    "Tsun": ( temp_sun_kelvin, dimensions.temperature),
+    "Msun": (mass_sun_grams, dimensions.mass),
+    "msun": (mass_sun_grams, dimensions.mass),
+    "Rsun": (cm_per_rsun, dimensions.length),
+    "rsun": (cm_per_rsun, dimensions.length),
+    "Lsun": (luminosity_sun_ergs_per_sec, dimensions.power),
+    "Tsun": (temp_sun_kelvin, dimensions.temperature),
     "Zsun": (metallicity_sun, dimensions.dimensionless),
     "Mjup": (mass_jupiter_grams, dimensions.mass),
     "Mearth": (mass_earth_grams, dimensions.mass),
@@ -89,6 +89,20 @@
     "angstrom": (cm_per_ang, dimensions.length),
     "Jy": (jansky_cgs, dimensions.specific_flux),
     "counts": (1.0, dimensions.dimensionless),
+
+    # for AstroPy compatibility
+    "solMass": (mass_sun_grams, dimensions.mass),
+    "solRad": (cm_per_rsun, dimensions.length),
+    "solLum": (luminosity_sun_ergs_per_sec, dimensions.power),
+    "dyn": (1.0, dimensions.force),
+    "sr": (1.0, dimensions.solid_angle),
+    "rad": (1.0, dimensions.solid_angle),
+    "deg": (np.pi/180., dimensions.angle),
+    "Fr":  (1.0, dimensions.charge),
+    "G": (1.0, dimensions.magnetic_field),
+    "d": (1.0, dimensions.time),
+    "Angstrom": (cm_per_ang, dimensions.length),
+
 }
 
 # Add LaTeX representations for units with trivial representations.

diff -r 0a584d53213b1881bdcb722c0f3f52ebfea3ef7e -r 513b0df9cdaf57c16feafa25521a00f93fa6d46f yt/units/yt_array.py
--- a/yt/units/yt_array.py
+++ b/yt/units/yt_array.py
@@ -35,6 +35,8 @@
     YTUnitOperationError, YTUnitConversionError, \
     YTUfuncUnitError
 from numbers import Number as numeric_type
+from yt.utilities.on_demand_imports import _astropy
+from sympy import Rational
 
 # redefine this here to avoid a circular import from yt.funcs
 def iterable(obj):
@@ -249,6 +251,9 @@
                     "Perhaps you meant to do something like this instead: \n"
                     "ds.arr(%s, \"%s\")" % (input_array, input_units)
                     )
+        if _astropy.units is not None:
+            if isinstance(input_array, _astropy.units.quantity.Quantity):
+                return cls.from_astropy(input_array)
         if isinstance(input_array, YTArray):
             if input_units is None:
                 if registry is None:
@@ -423,6 +428,38 @@
 
         """
         return np.array(self)
+
+    @classmethod
+    def from_astropy(cls, arr):
+        """
+        Creates a new YTArray with the same unit information from an
+        AstroPy quantity *arr*.
+        """
+        # Converting from AstroPy Quantity
+        u = arr.unit
+        ap_units = []
+        for base, power in zip(u.bases, u.powers):
+            unit_str = base.to_string()
+            # we have to do this because AstroPy is silly and defines
+            # hour as "h"
+            if unit_str == "h": unit_str = "hr"
+            ap_units.append("%s**(%s)" % (unit_str, Rational(power)))
+        ap_units = "*".join(ap_units)
+        if isinstance(arr.value, np.ndarray):
+            return YTArray(arr.value, ap_units)
+        else:
+            return YTQuantity(arr.value, ap_units)
+
+
+    def to_astropy(self, **kwargs):
+        """
+        Creates a new AstroPy quantity with the same unit information.
+        """
+        if _astropy.units is None:
+            raise ImportError("You don't have AstroPy installed, so you can't convert to " +
+                              "an AstroPy quantity.")
+        return self.value*_astropy.units.Unit(str(self.units), **kwargs)
+
     #
     # End unit conversion methods
     #

diff -r 0a584d53213b1881bdcb722c0f3f52ebfea3ef7e -r 513b0df9cdaf57c16feafa25521a00f93fa6d46f yt/utilities/fits_image.py
--- a/yt/utilities/fits_image.py
+++ b/yt/utilities/fits_image.py
@@ -14,11 +14,11 @@
 from yt.funcs import mylog, iterable, fix_axis, ensure_list
 from yt.visualization.fixed_resolution import FixedResolutionBuffer
 from yt.data_objects.construction_data_containers import YTCoveringGridBase
-from yt.frontends.fits.data_structures import ap
+from yt.utilities.on_demand_imports import _astropy
 from yt.units.yt_array import YTQuantity
 
-pyfits = ap.pyfits
-pywcs = ap.pywcs
+pyfits = _astropy.pyfits
+pywcs = _astropy.pywcs
 
 class FITSImageBuffer(pyfits.HDUList):
 
@@ -295,7 +295,7 @@
     """
     def __init__(self, ds, axis, fields, coord, **kwargs):
         fields = ensure_list(fields)
-        axis = fix_axis(axis)
+        axis = fix_axis(axis, ds)
         if isinstance(coord, tuple):
             coord = ds.quan(coord[0], coord[1]).in_units("code_length").value
         elif isinstance(coord, YTQuantity):
@@ -303,6 +303,8 @@
         slc = ds.slice(axis, coord, **kwargs)
         w, frb = construct_image(slc)
         super(FITSSlice, self).__init__(frb, fields=fields, wcs=w)
+        for i, field in enumerate(fields):
+            self[i].header["bunit"] = str(frb[field].units)
 
 class FITSProjection(FITSImageBuffer):
     r"""
@@ -321,10 +323,12 @@
     """
     def __init__(self, ds, axis, fields, weight_field=None, **kwargs):
         fields = ensure_list(fields)
-        axis = fix_axis(axis)
+        axis = fix_axis(axis, ds)
         prj = ds.proj(fields[0], axis, weight_field=weight_field, **kwargs)
         w, frb = construct_image(prj)
         super(FITSProjection, self).__init__(frb, fields=fields, wcs=w)
+        for i, field in enumerate(fields):
+            self[i].header["bunit"] = str(frb[field].units)
 
 
 

diff -r 0a584d53213b1881bdcb722c0f3f52ebfea3ef7e -r 513b0df9cdaf57c16feafa25521a00f93fa6d46f yt/utilities/on_demand_imports.py
--- /dev/null
+++ b/yt/utilities/on_demand_imports.py
@@ -0,0 +1,74 @@
+"""
+A set of convenient on-demand imports
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+class astropy_imports:
+    _pyfits = None
+    @property
+    def pyfits(self):
+        if self._pyfits is None:
+            try:
+                import astropy.io.fits as pyfits
+                self.log
+            except ImportError:
+                pyfits = None
+            self._pyfits = pyfits
+        return self._pyfits
+
+    _pywcs = None
+    @property
+    def pywcs(self):
+        if self._pywcs is None:
+            try:
+                import astropy.wcs as pywcs
+                self.log
+            except ImportError:
+                pywcs = None
+            self._pywcs = pywcs
+        return self._pywcs
+
+    _log = None
+    @property
+    def log(self):
+        if self._log is None:
+            try:
+                from astropy import log
+                if log.exception_logging_enabled():
+                    log.disable_exception_logging()
+            except ImportError:
+                log = None
+            self._log = log
+        return self._log
+
+    _units = None
+    @property
+    def units(self):
+        if self._units is None:
+            try:
+                from astropy import units
+            except ImportError:
+                units = None
+            self._units = units
+        return self._units
+
+    _conv = None
+    @property
+    def conv(self):
+        if self._conv is None:
+            try:
+                import astropy.convolution as conv
+                self.log
+            except ImportError:
+                conv = None
+            self._conv = conv
+        return self._conv
+
+_astropy = astropy_imports()
\ No newline at end of file

diff -r 0a584d53213b1881bdcb722c0f3f52ebfea3ef7e -r 513b0df9cdaf57c16feafa25521a00f93fa6d46f yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -19,6 +19,7 @@
 import sys
 import os
 from yt.extern.six.moves import builtins, StringIO
+import warnings
 
 from matplotlib.delaunay.triangulate import Triangulation as triang
 from matplotlib.mathtext import MathTextParser
@@ -519,7 +520,7 @@
             if unit is None:
                 width = (width, 'code_length')
             else:
-                width = (width, unit)
+                width = (width, fix_unitary(unit))
 
         axes_unit = get_axes_unit(width, self.pf)
 
@@ -651,6 +652,14 @@
         self._axes_unit_names = unit_name
         return self
 
+    @property
+    def _frb(self):
+        # Note we use SyntaxWarning because DeprecationWarning is not shown
+        # by default
+        warnings.warn("_frb is deprecated, use frb instead.",
+                      SyntaxWarning)
+        return self.frb
+
 class PWViewerMPL(PlotWindow):
     """Viewer using matplotlib as a backend via the WindowPlotMPL.

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list