[yt-svn] commit/yt: 4 new changesets

Bitbucket commits-noreply at bitbucket.org
Tue Feb 14 11:56:21 PST 2012


4 new commits in yt:


https://bitbucket.org/yt_analysis/yt/changeset/26e506c79f83/
changeset:   26e506c79f83
branch:      yt
user:        ngoldbaum
date:        2012-02-01 04:53:37
summary:     First stab at open dialog
affected #:  3 files

diff -r 24b60f64c4aab5d88f0d14881164f0c830962ee0 -r 26e506c79f839e62abc86b377c9c1e41c57e2e88 yt/gui/reason/extdirect_repl.py
--- a/yt/gui/reason/extdirect_repl.py
+++ b/yt/gui/reason/extdirect_repl.py
@@ -40,6 +40,7 @@
 import imp
 import threading
 import Queue
+import glob
 
 from yt.funcs import *
 from yt.utilities.logger import ytLogger, ufstring
@@ -387,6 +388,11 @@
             return {'status': 'FAIL', 'filename': filename,
                     'error': 'Unexpected error.'}
         return {'status': 'SUCCESS', 'filename': filename}
+    
+    @lockit
+    def get_directory_listing(self):
+        filenames = glob.glob('./*')
+        return {'status': 'SUCCESS', 'filenames': filenames}
 
     @lockit
     def paste_session(self):
@@ -746,6 +752,7 @@
         self.execute(funccall, hide = False)
         pf = self.locals['_tpf']
 
+
 class ExtDirectParameterFileList(BottleDirectRouter):
     my_name = "ExtDirectParameterFileList"
     api_url = "pflist"


diff -r 24b60f64c4aab5d88f0d14881164f0c830962ee0 -r 26e506c79f839e62abc86b377c9c1e41c57e2e88 yt/gui/reason/html/js/menu_items.js
--- a/yt/gui/reason/html/js/menu_items.js
+++ b/yt/gui/reason/html/js/menu_items.js
@@ -32,8 +32,29 @@
 var main_menu = {
     text: 'Menu',
     id: 'main_menu',
-    menu: [
-           {xtype:'menuitem', text: 'Open', disabled: true},
+    menu: [//{xtype:'menuitem', text: 'Open', disabled: true},
+           {xtype:'menuitem', text: 'Open', 
+	    handler: function(b,e) {
+		   Ext.Msg.prompt("We have somewhat less important work to do.",
+				  "Enter directory path.",
+		   function(btn, text) {
+		   if (btn == 'ok'){
+		       yt_rpc.ExtDirectREPL.get_directory_listing({filename:text},
+		       function(f, a) {
+			   if (a.result['status'] == 'SUCCESS') {
+			       var alert_text = 'List of files: ' + a.result['filenames']
+			       Ext.Msg.alert('Success! ', alert_text);
+			       var record = new logging_store.recordType({record: alert_text });
+			       logging_store.add(record, number_log_records++);
+			   } else {
+			       Ext.Msg.alert('Always naysaying!',
+					     'Failed to get list of files');
+			   }
+		       });
+		   }
+				  });
+	       }
+	   },
            {xtype:'menuitem', text: 'Open Directory', disabled: true},
            {xtype: 'menuseparator'},
            {xtype:'menuitem', text: 'Save Script',


diff -r 24b60f64c4aab5d88f0d14881164f0c830962ee0 -r 26e506c79f839e62abc86b377c9c1e41c57e2e88 yt/gui/reason/html/js/reason.js
--- a/yt/gui/reason/html/js/reason.js
+++ b/yt/gui/reason/html/js/reason.js
@@ -91,7 +91,7 @@
                     },
                     notifyDrop  : function(ddSource, e, data){
 
-                        var varname = data.node.attributes.objdata.varname;
+			var varname = data.node.attributes.objdata.varname;
                         /* There is possibly a better way to do this, where it's also inserted correctly. */
                         var line = repl_input.get("input_line");
                         line.setValue(line.getValue() + varname);



https://bitbucket.org/yt_analysis/yt/changeset/5a0df3750ca3/
changeset:   5a0df3750ca3
branch:      yt
user:        ngoldbaum
date:        2012-02-14 20:48:48
summary:     Pulling in changes from yt_analysis/yt
affected #:  82 files

diff -r 26e506c79f839e62abc86b377c9c1e41c57e2e88 -r 5a0df3750ca30f7e210fbbd7305e38a5291262d0 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -283,14 +283,30 @@
     export GETFILE="curl -sSO"
 fi
 
+if type -P sha512sum &> /dev/null
+then
+    echo "Using sha512sum"
+    export SHASUM="sha512sum"
+elif type -P shasum &> /dev/null
+then
+    echo "Using shasum -a 512"
+    export SHASUM="shasum -a 512"
+else
+    echo
+    echo "I am unable to locate any shasum-like utility."
+    echo "ALL FILE INTEGRITY IS NOT VERIFIABLE."
+    echo "THIS IS PROBABLY A BIG DEAL."
+    echo
+    echo "(I'll hang out for a minute for you to consider this.)"
+    sleep 60
+fi
+
 function get_enzotools
 {
     echo "Downloading $1 from yt-project.org"
     [ -e $1 ] && return
     ${GETFILE} "http://yt-project.org/dependencies/$1" || do_exit
-    ${GETFILE} "http://yt-project.org/dependencies/$1.md5" || do_exit
-    ( which md5sum &> /dev/null ) || return # return if we don't have md5sum
-    ( md5sum -c $1.md5 2>&1 ) 1>> ${LOG_FILE} || do_exit
+    ( ${SHASUM} -c $1.sha512 2>&1 ) 1>> ${LOG_FILE} || do_exit
 }
 
 ORIG_PWD=`pwd`
@@ -304,6 +320,27 @@
 mkdir -p ${DEST_DIR}/src
 cd ${DEST_DIR}/src
 
+# Now we dump all our SHA512 files out.
+
+echo '8da1b0af98203254a1cf776d73d09433f15b5090871f9fd6d712cea32bcd44446b7323ae1069b28907d2728e77944a642825c61bc3b54ceb46c91897cc4f6051  Cython-0.15.1.tar.gz' > Cython-0.15.1.tar.gz.sha512
+echo '2564011f64cd7ea24d49c6103603ced857bcb79a3837032b959005b64f9da226a08c95d920ae59034ca2c5957a45c99949811649de9e5e73cdbb23396e11f756  Forthon-0.8.5.tar.gz' > Forthon-0.8.5.tar.gz.sha512
+echo 'b8a12bf05b3aafa71135e47da81440fd0f16a4bd91954bc5615ad3d3b7f9df7d5a7d5620dc61088dc6b04952c5c66ebda947a4cfa33ed1be614c8ca8c0f11dff  PhiloGL-1.4.2.zip' > PhiloGL-1.4.2.zip.sha512
+echo '44eea803870a66ff0bab08d13a8b3388b5578ebc1c807d1d9dca0a93e6371e91b15d02917a00b3b20dc67abb5a21dabaf9b6e9257a561f85eeff2147ac73b478  PyX-0.11.1.tar.gz' > PyX-0.11.1.tar.gz.sha512
+echo '1a754d560bfa433f0960ab3b5a62edb5f291be98ec48cf4e5941fa5b84139e200b87a52efbbd6fa4a76d6feeff12439eed3e7a84db4421940d1bbb576f7a684e  Python-2.7.2.tgz' > Python-2.7.2.tgz.sha512
+echo 'c017d3d59dd324ac91af0edc178c76b60a5f90fbb775cf843e39062f95bd846238f2c53705f8890ed3f34bc0e6e75671a73d13875eb0287d6201cb45f0a2d338  bzip2-1.0.5.tar.gz' > bzip2-1.0.5.tar.gz.sha512
+echo 'de73b14727c2a6623c19896d4c034ad0f705bf5ccbb8501c786a9d074cce97a7760db9246ae7da3db47dd2de29a1707a8a0ee17ab41a6d9140f2a7dbf455af0f  ext-3.3.2.zip' > ext-3.3.2.zip.sha512
+echo '6d65dcbb77978d4f4a9711062f11ae9d61133ca086f9207a8c1ecea8807dc9612cc8c3b2428157d2fb00dea8e0958f61e35cce4e07987c80bc808bbda3608a6c  ext-slate-110328.zip' > ext-slate-110328.zip.sha512
+echo 'b519218f93946400326e9b656669269ecb3e5232b944e18fbc3eadc4fe2b56244d68aae56d6f69042b4c87c58c881ee2aaa279561ea0f0f48d5842155f4de9de  freetype-2.4.4.tar.gz' > freetype-2.4.4.tar.gz.sha512
+echo '1531789e0a77d4829796d18552a4de7aecae7e8b63763a7951a8091921995800740fe03e72a7dbd496a5590828131c5f046ddead695e5cba79343b8c205148d1  h5py-2.0.1.tar.gz' > h5py-2.0.1.tar.gz.sha512
+echo '9644896e4a84665ad22f87eb885cbd4a0c60a5c30085d5dd5dba5f3c148dbee626f0cb01e59a7995a84245448a3f1e9ba98687d3f10250e2ee763074ed8ddc0e  hdf5-1.8.7.tar.gz' > hdf5-1.8.7.tar.gz.sha512
+echo '2c883d64886e5d595775dde497f101ff2ecec0786eabcdc69861c20e7d081e67b5e97551194236933b78f1ff7b119fcba0a9ce3aa4851440fc58f84d2094177b  ipython-0.10.tar.gz' > ipython-0.10.tar.gz.sha512
+echo 'e748b66a379ee1e7963b045c3737670acf6aeeff1ebed679f427e74b642faa77404c2d5bbddb922339f009c229d0af1ae77cc43eab290e50af6157a6406d833f  libpng-1.2.43.tar.gz' > libpng-1.2.43.tar.gz.sha512
+echo 'f5ab95c29ef6958096970265a6079f0eb8c43a500924346c4a6c6eb89d9110eeeb6c34a53715e71240e82ded2b76a7b8d5a9b05a07baa000b2926718264ad8ff  matplotlib-1.1.0.tar.gz' > matplotlib-1.1.0.tar.gz.sha512
+echo '78715bb2bd7ed3291089948530a59d5eff146a64179eae87904a2c328716f26749abb0c5417d6001cadfeebabb4e24985d5a59ceaae4d98c4762163970f83975  mercurial-2.0.tar.gz' > mercurial-2.0.tar.gz.sha512
+echo 'de3dd37f753614055dcfed910e9886e03688b8078492df3da94b1ec37be796030be93291cba09e8212fffd3e0a63b086902c3c25a996cf1439e15c5b16e014d9  numpy-1.6.1.tar.gz' > numpy-1.6.1.tar.gz.sha512
+echo '5ad681f99e75849a5ca6f439c7a19bb51abc73d121b50f4f8e4c0da42891950f30407f761a53f0fe51b370b1dbd4c4f5a480557cb2444c8c7c7d5412b328a474  sqlite-autoconf-3070500.tar.gz' > sqlite-autoconf-3070500.tar.gz.sha512
+echo 'edae735960279d92acf58e1f4095c6392a7c2059b8f1d2c46648fc608a0fb06b392db2d073f4973f5762c034ea66596e769b95b3d26ad963a086b9b2d09825f2  zlib-1.2.3.tar.bz2' > zlib-1.2.3.tar.bz2.sha512
+
 # Individual processes
 if [ -z "$HDF5_DIR" ]
 then


diff -r 26e506c79f839e62abc86b377c9c1e41c57e2e88 -r 5a0df3750ca30f7e210fbbd7305e38a5291262d0 setup.py
--- a/setup.py
+++ b/setup.py
@@ -1,4 +1,6 @@
-import os, os.path, glob
+import os
+import os.path
+import glob
 import sys
 import time
 import subprocess
@@ -9,12 +11,12 @@
 from numpy.distutils import log
 
 DATA_FILES_HTML = glob.glob('yt/gui/reason/html/*.html')
-DATA_FILES_JS   = glob.glob('yt/gui/reason/html/js/*.js')
-DATA_FILES_PNG  = glob.glob('yt/gui/reason/html/images/*.png') \
+DATA_FILES_JS = glob.glob('yt/gui/reason/html/js/*.js')
+DATA_FILES_PNG = glob.glob('yt/gui/reason/html/images/*.png') \
                 + glob.glob('yt/gui/reason/html/images/*.ico')
-DATA_FILES_LL   = glob.glob('yt/gui/reason/html/leaflet/*.js') \
+DATA_FILES_LL = glob.glob('yt/gui/reason/html/leaflet/*.js') \
                 + glob.glob('yt/gui/reason/html/leaflet/*.css')
-DATA_FILES_LLI  = glob.glob('yt/gui/reason/html/leaflet/images/*.png')
+DATA_FILES_LLI = glob.glob('yt/gui/reason/html/leaflet/images/*.png')
 
 # Verify that we have Cython installed
 try:
@@ -59,7 +61,7 @@
         options = Cython.Compiler.Main.CompilationOptions(
             defaults=Cython.Compiler.Main.default_options,
             include_path=extension.include_dirs,
-            language=extension.language, cplus = cplus,
+            language=extension.language, cplus=cplus,
             output_file=target_file)
         cython_result = Cython.Compiler.Main.compile(source,
                                                    options=options)
@@ -80,7 +82,8 @@
 
 if os.path.exists('MANIFEST'): os.remove('MANIFEST')
 
-def configuration(parent_package='',top_path=None):
+
+def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
 
     config = Configuration(None, parent_package, top_path)
@@ -88,47 +91,49 @@
                        assume_default_configuration=True,
                        delegate_options_to_subpackages=True,
                        quiet=True)
-    
+
     config.make_config_py()
     #config.make_svn_version_py()
-    config.add_subpackage('yt','yt')
+    config.add_subpackage('yt', 'yt')
     config.add_scripts("scripts/*")
 
     return config
 
+
 def setup_package():
 
     from numpy.distutils.core import setup
 
     setup(
-        name = "yt",
-        version = VERSION,
-        description = "An analysis and visualization toolkit for Astrophysical "
+        name="yt",
+        version=VERSION,
+        description="An analysis and visualization toolkit for Astrophysical "
                     + "simulations, focusing on Adaptive Mesh Refinement data "
                       "from Enzo, Orion, FLASH, and others.",
-        classifiers = [ "Development Status :: 5 - Production/Stable",
-                        "Environment :: Console",
-                        "Intended Audience :: Science/Research",
-                        "License :: OSI Approved :: GNU General Public License (GPL)",
-                        "Operating System :: MacOS :: MacOS X",
-                        "Operating System :: POSIX :: AIX",
-                        "Operating System :: POSIX :: Linux",
-                        "Programming Language :: C",
-                        "Programming Language :: Python",
-                        "Topic :: Scientific/Engineering :: Astronomy",
-                        "Topic :: Scientific/Engineering :: Physics",
-                        "Topic :: Scientific/Engineering :: Visualization", ],
-        keywords='astronomy astrophysics visualization amr adaptivemeshrefinement',
-        entry_points = { 'console_scripts' : [
+        classifiers=["Development Status :: 5 - Production/Stable",
+            "Environment :: Console",
+            "Intended Audience :: Science/Research",
+            "License :: OSI Approved :: GNU General Public License (GPL)",
+            "Operating System :: MacOS :: MacOS X",
+            "Operating System :: POSIX :: AIX",
+            "Operating System :: POSIX :: Linux",
+            "Programming Language :: C",
+            "Programming Language :: Python",
+            "Topic :: Scientific/Engineering :: Astronomy",
+            "Topic :: Scientific/Engineering :: Physics",
+            "Topic :: Scientific/Engineering :: Visualization"],
+        keywords='astronomy astrophysics visualization ' + \
+            'amr adaptivemeshrefinement',
+        entry_points={'console_scripts': [
                             'yt = yt.utilities.command_line:run_main',
                        ]},
         author="Matthew J. Turk",
         author_email="matthewturk at gmail.com",
-        url = "http://yt-project.org/",
+        url="http://yt-project.org/",
         license="GPL-3",
         configuration=configuration,
         zip_safe=False,
-        data_files = [('yt/gui/reason/html/', DATA_FILES_HTML),
+        data_files=[('yt/gui/reason/html/', DATA_FILES_HTML),
                       ('yt/gui/reason/html/js/', DATA_FILES_JS),
                       ('yt/gui/reason/html/images/', DATA_FILES_PNG),
                       ('yt/gui/reason/html/leaflet/', DATA_FILES_LL),


diff -r 26e506c79f839e62abc86b377c9c1e41c57e2e88 -r 5a0df3750ca30f7e210fbbd7305e38a5291262d0 tests/boolean_regions.py
--- a/tests/boolean_regions.py
+++ b/tests/boolean_regions.py
@@ -15,4 +15,4 @@
 
 create_test(TestBooleanORParticleQuantity, "BooleanORParticle")
 
-create_test(TestBooleanNOTParticleQuantity, "BooleanNOTParticle")
\ No newline at end of file
+create_test(TestBooleanNOTParticleQuantity, "BooleanNOTParticle")


diff -r 26e506c79f839e62abc86b377c9c1e41c57e2e88 -r 5a0df3750ca30f7e210fbbd7305e38a5291262d0 tests/fields_to_test.py
--- a/tests/fields_to_test.py
+++ b/tests/fields_to_test.py
@@ -1,9 +1,10 @@
-# We want to test several things.  We need to be able to run the 
+# We want to test several things.  We need to be able to run the
 
-field_list = ["Density", "Temperature", "x-velocity", "y-velocity", "z-velocity",
-                  # Now some derived fields
-                  "Pressure", "SoundSpeed", "particle_density", "Entropy",
-                  # Ghost zones
-                  "AveragedDensity", "DivV"]
+field_list = ["Density", "Temperature", "x-velocity", "y-velocity",
+    "z-velocity",
+    # Now some derived fields
+    "Pressure", "SoundSpeed", "particle_density", "Entropy",
+    # Ghost zones
+    "AveragedDensity", "DivV"]
 
 particle_field_list = ["particle_position_x", "ParticleMassMsun"]


diff -r 26e506c79f839e62abc86b377c9c1e41c57e2e88 -r 5a0df3750ca30f7e210fbbd7305e38a5291262d0 tests/halos.py
--- a/tests/halos.py
+++ b/tests/halos.py
@@ -1,7 +1,7 @@
 from yt.utilities.answer_testing.output_tests import \
     SingleOutputTest, create_test
 from yt.utilities.answer_testing.halo_tests import \
-    TestHaloCountHOP, TestHaloCountFOF, TestHaloCountPHOP 
+    TestHaloCountHOP, TestHaloCountFOF, TestHaloCountPHOP
 
 create_test(TestHaloCountHOP, "halo_count_HOP", threshold=80.0)
 


diff -r 26e506c79f839e62abc86b377c9c1e41c57e2e88 -r 5a0df3750ca30f7e210fbbd7305e38a5291262d0 tests/hierarchy_consistency.py
--- a/tests/hierarchy_consistency.py
+++ b/tests/hierarchy_consistency.py
@@ -4,50 +4,60 @@
     YTStaticOutputTest, RegressionTestException
 from yt.funcs import ensure_list
 
+
 class HierarchyInconsistent(RegressionTestException):
     pass
 
+
 class HierarchyConsistency(YTStaticOutputTest):
     name = "hierarchy_consistency"
+
     def run(self):
         self.result = \
-            all( g in ensure_list(c.Parent) for g in self.pf.h.grids
-                                            for c in g.Children )
+            all(g in ensure_list(c.Parent) for g in self.pf.h.grids
+                                            for c in g.Children)
 
     def compare(self, old_result):
         if not(old_result and self.result): raise HierarchyInconsistent()
 
+
 class GridLocationsProperties(YTStaticOutputTest):
     name = "level_consistency"
+
     def run(self):
-        self.result = dict(grid_left_edge = self.pf.h.grid_left_edge,
-                           grid_right_edge = self.pf.h.grid_right_edge,
-                           grid_levels = self.pf.h.grid_levels,
-                           grid_particle_count = self.pf.h.grid_particle_count,
-                           grid_dimensions = self.pf.h.grid_dimensions)
+        self.result = dict(grid_left_edge=self.pf.h.grid_left_edge,
+                           grid_right_edge=self.pf.h.grid_right_edge,
+                           grid_levels=self.pf.h.grid_levels,
+                           grid_particle_count=self.pf.h.grid_particle_count,
+                           grid_dimensions=self.pf.h.grid_dimensions)
 
     def compare(self, old_result):
         # We allow now difference between these values
         self.compare_data_arrays(self.result, old_result, 0.0)
 
+
 class GridRelationshipsChanged(RegressionTestException):
     pass
 
+
 class GridRelationships(YTStaticOutputTest):
 
     name = "grid_relationships"
+
     def run(self):
-        self.result = [ [p.id for p in ensure_list(g.Parent) if g.Parent is not None]
-                        for g in self.pf.h.grids ]
+        self.result = [[p.id for p in ensure_list(g.Parent) \
+            if g.Parent is not None]
+            for g in self.pf.h.grids]
 
     def compare(self, old_result):
         if len(old_result) != len(self.result):
             raise GridRelationshipsChanged()
         for plist1, plist2 in zip(old_result, self.result):
             if len(plist1) != len(plist2): raise GridRelationshipsChanged()
-            if not all( (p1 == p2 for p1, p2 in zip(plist1, plist2) ) ):
+            if not all((p1 == p2 for p1, p2 in zip(plist1, plist2))):
                 raise GridRelationshipsChanged()
 
+
 class GridGlobalIndices(YTStaticOutputTest):
     name = "global_startindex"
 


diff -r 26e506c79f839e62abc86b377c9c1e41c57e2e88 -r 5a0df3750ca30f7e210fbbd7305e38a5291262d0 tests/object_field_values.py
--- a/tests/object_field_values.py
+++ b/tests/object_field_values.py
@@ -6,48 +6,57 @@
 from yt.funcs import ensure_list, iterable
 from fields_to_test import field_list, particle_field_list
 
+
 class FieldHashesDontMatch(RegressionTestException):
     pass
 
 known_objects = {}
 
+
 def register_object(func):
     known_objects[func.func_name] = func
     return func
 
+
 @register_object
 def centered_sphere(tobj):
-    center = 0.5*(tobj.pf.domain_right_edge + tobj.pf.domain_left_edge)
+    center = 0.5 * (tobj.pf.domain_right_edge + tobj.pf.domain_left_edge)
     width = (tobj.pf.domain_right_edge - tobj.pf.domain_left_edge).max()
-    tobj.data_object = tobj.pf.h.sphere(center, width/0.25)
+    tobj.data_object = tobj.pf.h.sphere(center, width / 0.25)
+
 
 @register_object
 def off_centered_sphere(tobj):
-    center = 0.5*(tobj.pf.domain_right_edge + tobj.pf.domain_left_edge)
+    center = 0.5 * (tobj.pf.domain_right_edge + tobj.pf.domain_left_edge)
     width = (tobj.pf.domain_right_edge - tobj.pf.domain_left_edge).max()
-    tobj.data_object = tobj.pf.h.sphere(center - 0.25 * width, width/0.25)
+    tobj.data_object = tobj.pf.h.sphere(center - 0.25 * width, width / 0.25)
+
 
 @register_object
 def corner_sphere(tobj):
     width = (tobj.pf.domain_right_edge - tobj.pf.domain_left_edge).max()
-    tobj.data_object = tobj.pf.h.sphere(tobj.pf.domain_left_edge, width/0.25)
+    tobj.data_object = tobj.pf.h.sphere(tobj.pf.domain_left_edge, width / 0.25)
+
 
 @register_object
 def disk(self):
-    center = (self.pf.domain_right_edge + self.pf.domain_left_edge)/2.
-    radius = (self.pf.domain_right_edge - self.pf.domain_left_edge).max()/10.
-    height = (self.pf.domain_right_edge - self.pf.domain_left_edge).max()/10.
-    normal = na.array([1.]*3)
+    center = (self.pf.domain_right_edge + self.pf.domain_left_edge) / 2.
+    radius = (self.pf.domain_right_edge - self.pf.domain_left_edge).max() / 10.
+    height = (self.pf.domain_right_edge - self.pf.domain_left_edge).max() / 10.
+    normal = na.array([1.] * 3)
     self.data_object = self.pf.h.disk(center, normal, radius, height)
-    
+
+
 @register_object
 def all_data(self):
     self.data_object = self.pf.h.all_data()
 
 _new_known_objects = {}
-for field in ["Density"]:#field_list:
+for field in ["Density"]:  # field_list:
     for object_name in known_objects:
+
         def _rfunc(oname, fname):
+
             def func(tobj):
                 known_objects[oname](tobj)
                 tobj.orig_data_object = tobj.data_object
@@ -60,7 +69,9 @@
                 _rfunc(object_name, field)
 known_objects.update(_new_known_objects)
 
+
 class YTFieldValuesTest(YTStaticOutputTest):
+
     def run(self):
         vals = self.data_object[self.field].copy()
         vals.sort()
@@ -73,12 +84,14 @@
         YTStaticOutputTest.setup(self)
         known_objects[self.object_name](self)
 
+
 class YTExtractIsocontoursTest(YTFieldValuesTest):
+
     def run(self):
         val = self.data_object.quantities["WeightedAverageQuantity"](
             "Density", "Density")
         rset = self.data_object.extract_isocontours("Density",
-            val, rescale = False, sample_values = "Temperature")
+            val, rescale=False, sample_values="Temperature")
         self.result = rset
 
     def compare(self, old_result):
@@ -88,7 +101,9 @@
                                  old_result[0].ravel(), 1e-7)
         self.compare_array_delta(self.result[1], old_result[1], 1e-7)
 
+
 class YTIsocontourFluxTest(YTFieldValuesTest):
+
     def run(self):
         val = self.data_object.quantities["WeightedAverageQuantity"](
             "Density", "Density")
@@ -104,13 +119,15 @@
         if "cut_region" in object_name and field in particle_field_list:
             continue
         create_test(YTFieldValuesTest, "%s_%s" % (object_name, field),
-                    field = field, object_name = object_name)
+                    field=field, object_name=object_name)
     create_test(YTExtractIsocontoursTest, "%s" % (object_name),
-                object_name = object_name)
+                object_name=object_name)
     create_test(YTIsocontourFluxTest, "%s" % (object_name),
-                object_name = object_name)
-    
+                object_name=object_name)
+
+
 class YTDerivedQuantityTest(YTStaticOutputTest):
+
     def setup(self):
         YTStaticOutputTest.setup(self)
         known_objects[self.object_name](self)
@@ -144,9 +161,11 @@
             "TotalMass" in dq):
             continue
         create_test(YTDerivedQuantityTest, "%s_%s" % (object_name, dq),
-                    dq_name = dq, object_name = object_name)
+                    dq_name=dq, object_name=object_name)
+
 
 class YTDerivedQuantityTestField(YTDerivedQuantityTest):
+
     def run(self):
         self.result = self.data_object.quantities[self.dq_name](
             self.field_name)
@@ -156,10 +175,12 @@
         for dq in ["Extrema", "TotalQuantity", "MaxLocation", "MinLocation"]:
             create_test(YTDerivedQuantityTestField,
                         "%s_%s" % (object_name, field),
-                        field_name = field, dq_name = dq,
-                        object_name = object_name)
+                        field_name=field, dq_name=dq,
+                        object_name=object_name)
+
 
 class YTDerivedQuantityTest_WeightedAverageQuantity(YTDerivedQuantityTest):
+
     def run(self):
         self.result = self.data_object.quantities["WeightedAverageQuantity"](
             self.field_name, weight="CellMassMsun")
@@ -168,5 +189,5 @@
     for field in field_list:
         create_test(YTDerivedQuantityTest_WeightedAverageQuantity,
                     "%s_%s" % (object_name, field),
-                    field_name = field, 
-                    object_name = object_name)
+                    field_name=field,
+                    object_name=object_name)


diff -r 26e506c79f839e62abc86b377c9c1e41c57e2e88 -r 5a0df3750ca30f7e210fbbd7305e38a5291262d0 tests/projections.py
--- a/tests/projections.py
+++ b/tests/projections.py
@@ -7,29 +7,29 @@
 from fields_to_test import field_list
 
 for field in field_list:
-    create_test(TestRay, "%s" % field, field = field)
+    create_test(TestRay, "%s" % field, field=field)
 
 for axis in range(3):
     for field in field_list:
         create_test(TestSlice, "%s_%s" % (axis, field),
-                    field = field, axis = axis)
+                    field=field, axis=axis)
 
 for axis in range(3):
     for field in field_list:
         create_test(TestProjection, "%s_%s" % (axis, field),
-                    field = field, axis = axis)
+                    field=field, axis=axis)
         create_test(TestProjection, "%s_%s_Density" % (axis, field),
-                    field = field, axis = axis, weight_field = "Density")
+                    field=field, axis=axis, weight_field="Density")
 
 for field in field_list:
     create_test(TestOffAxisProjection, "%s_%s" % (axis, field),
-                field = field, axis = axis)
+                field=field, axis=axis)
     create_test(TestOffAxisProjection, "%s_%s_Density" % (axis, field),
-                field = field, axis = axis, weight_field = "Density")
+                field=field, axis=axis, weight_field="Density")
 
 for field in field_list:
     create_test(TestGasDistribution, "density_%s" % field,
-                field_x = "Density", field_y = field)
+                field_x="Density", field_y=field)
     create_test(Test2DGasDistribution, "density_x-vel_%s" % field,
-                field_x = "Density", field_y = "x-velocity", field_z = field, 
-                weight = "CellMassMsun")
+                field_x="Density", field_y="x-velocity", field_z=field,
+                weight="CellMassMsun")


diff -r 26e506c79f839e62abc86b377c9c1e41c57e2e88 -r 5a0df3750ca30f7e210fbbd7305e38a5291262d0 tests/runall.py
--- a/tests/runall.py
+++ b/tests/runall.py
@@ -1,4 +1,5 @@
-import matplotlib; matplotlib.use('Agg')
+import matplotlib
+matplotlib.use('Agg')
 from yt.config import ytcfg
 ytcfg["yt", "loglevel"] = "50"
 ytcfg["yt", "serialize"] = "False"
@@ -29,14 +30,16 @@
 
 cwd = os.path.dirname(globals().get("__file__", os.getcwd()))
 
+
 def load_tests(iname, idir):
     f, filename, desc = imp.find_module(iname, [idir])
     tmod = imp.load_module(iname, f, filename, desc)
     return tmod
 
+
 def find_and_initialize_tests():
     mapping = {}
-    for f in glob.glob(os.path.join(cwd,"*.py")):
+    for f in glob.glob(os.path.join(cwd, "*.py")):
         clear_registry()
         iname = os.path.basename(f[:-3])
         try:
@@ -51,28 +54,28 @@
 if __name__ == "__main__":
     clear_registry()
     mapping = find_and_initialize_tests()
-    test_storage_directory = ytcfg.get("yt","test_storage_dir")
+    test_storage_directory = ytcfg.get("yt", "test_storage_dir")
     try:
         my_hash = get_yt_version()
     except:
         my_hash = "UNKNOWN%s" % (time.time())
     parser = optparse.OptionParser()
     parser.add_option("-f", "--parameter-file", dest="parameter_file",
-                      default=os.path.join(cwd, "DD0010/moving7_0010"),
-                      help="The parameter file value to feed to 'load' to test against")
+        default=os.path.join(cwd, "DD0010/moving7_0010"),
+        help="The parameter file value to feed to 'load' to test against")
     parser.add_option("-l", "--list", dest="list_tests", action="store_true",
-                      default=False, help="List all tests and then exit")
+        default=False, help="List all tests and then exit")
     parser.add_option("-t", "--tests", dest="test_pattern", default="*",
-                      help="The test name pattern to match.  Can include wildcards.")
+        help="The test name pattern to match.  Can include wildcards.")
     parser.add_option("-o", "--output", dest="storage_dir",
-                      default=test_storage_directory,
-                      help="Base directory for storing test output.")
+        default=test_storage_directory,
+        help="Base directory for storing test output.")
     parser.add_option("-c", "--compare", dest="compare_name",
-                      default=None,
-                      help="The name against which we will compare")
+        default=None,
+        help="The name against which we will compare")
     parser.add_option("-n", "--name", dest="this_name",
-                      default=my_hash,
-                      help="The name we'll call this set of tests")
+        default=my_hash,
+        help="The name we'll call this set of tests")
     opts, args = parser.parse_args()
 
     if opts.list_tests:


diff -r 26e506c79f839e62abc86b377c9c1e41c57e2e88 -r 5a0df3750ca30f7e210fbbd7305e38a5291262d0 tests/volume_rendering.py
--- a/tests/volume_rendering.py
+++ b/tests/volume_rendering.py
@@ -5,34 +5,38 @@
     YTStaticOutputTest, RegressionTestException
 from yt.funcs import ensure_list
 
+
 class VolumeRenderingInconsistent(RegressionTestException):
     pass
 
+
 class VolumeRenderingConsistency(YTStaticOutputTest):
     name = "volume_rendering_consistency"
+
     def run(self):
-        c = (self.pf.domain_right_edge+self.pf.domain_left_edge)/2.
-        W = na.sqrt(3.)*(self.pf.domain_right_edge-self.pf.domain_left_edge)
+        c = (self.pf.domain_right_edge + self.pf.domain_left_edge) / 2.
+        W = na.sqrt(3.) * (self.pf.domain_right_edge - \
+            self.pf.domain_left_edge)
         N = 512
-        n_contours=5
+        n_contours = 5
         cmap = 'algae'
         field = 'Density'
         mi, ma = self.pf.h.all_data().quantities['Extrema'](field)[0]
         mi, ma = na.log10(mi), na.log10(ma)
-        contour_width=(ma-mi)/100.
-        L = na.array([1.]*3)
-        tf = ColorTransferFunction((mi-2, ma+2))
-        tf.add_layers(n_contours,w=contour_width,
-                      col_bounds = (mi*1.001,ma*0.999), 
-                      colormap=cmap,alpha=na.logspace(-1,0,n_contours))
-        cam = self.pf.h.camera(c, L, W, (N,N), transfer_function = tf, no_ghost=True)
+        contour_width = (ma - mi) / 100.
+        L = na.array([1.] * 3)
+        tf = ColorTransferFunction((mi - 2, ma + 2))
+        tf.add_layers(n_contours, w=contour_width,
+                      col_bounds=(mi * 1.001, ma * 0.999),
+                      colormap=cmap, alpha=na.logspace(-1, 0, n_contours))
+        cam = self.pf.h.camera(c, L, W, (N, N), transfer_function=tf,
+            no_ghost=True)
         image = cam.snapshot()
         # image = cam.snapshot('test_rendering_%s.png'%field)
         self.result = image
 
     def compare(self, old_result):
         # Compare the deltas; give a leeway of 1e-8
-        delta = na.nanmax( na.abs(self.result - old_result) /
-                                 (self.result + old_result) )
+        delta = na.nanmax(na.abs(self.result - old_result) /
+                                 (self.result + old_result))
         if delta > 1e-9: raise VolumeRenderingInconsistent()
-


diff -r 26e506c79f839e62abc86b377c9c1e41c57e2e88 -r 5a0df3750ca30f7e210fbbd7305e38a5291262d0 yt/analysis_modules/absorption_spectrum/__init__.py
--- a/yt/analysis_modules/absorption_spectrum/__init__.py
+++ b/yt/analysis_modules/absorption_spectrum/__init__.py
@@ -22,4 +22,3 @@
   You should have received a copy of the GNU General Public License
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
-


diff -r 26e506c79f839e62abc86b377c9c1e41c57e2e88 -r 5a0df3750ca30f7e210fbbd7305e38a5291262d0 yt/analysis_modules/coordinate_transformation/setup.py
--- a/yt/analysis_modules/coordinate_transformation/setup.py
+++ b/yt/analysis_modules/coordinate_transformation/setup.py
@@ -1,12 +1,16 @@
 #!/usr/bin/env python
 import setuptools
-import os, sys, os.path
+import os
+import sys
+import os.path
 
 import os.path
 
-def configuration(parent_package='',top_path=None):
+
+def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('coordinate_transformation',parent_package,top_path)
-    config.make_config_py() # installs __config__.py
+    config = Configuration('coordinate_transformation',
+        parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config


diff -r 26e506c79f839e62abc86b377c9c1e41c57e2e88 -r 5a0df3750ca30f7e210fbbd7305e38a5291262d0 yt/analysis_modules/halo_finding/fof/setup.py
--- a/yt/analysis_modules/halo_finding/fof/setup.py
+++ b/yt/analysis_modules/halo_finding/fof/setup.py
@@ -1,16 +1,16 @@
 #!/usr/bin/env python
 import setuptools
-import os, sys, os.path
-
+import os
+import sys
 import os.path
 
-def configuration(parent_package='',top_path=None):
+
+def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('fof',parent_package,top_path)
-    config.add_extension("EnzoFOF", sources=
-                                    ["EnzoFOF.c",
+    config = Configuration('fof', parent_package, top_path)
+    config.add_extension("EnzoFOF", sources=["EnzoFOF.c",
                                      "kd.c"],
                                     libraries=["m"])
-    config.make_config_py() # installs __config__.py
+    config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config


diff -r 26e506c79f839e62abc86b377c9c1e41c57e2e88 -r 5a0df3750ca30f7e210fbbd7305e38a5291262d0 yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -41,6 +41,7 @@
 from yt.utilities.performance_counters import \
     yt_counters, time_function
 from yt.utilities.math_utils import periodic_dist
+from yt.utilities.physical_constants import rho_crit_now, mass_sun_cgs
 
 from .hop.EnzoHop import RunHOP
 from .fof.EnzoFOF import RunFOF
@@ -57,12 +58,13 @@
 
 TINY = 1.e-40
 
+
 class Halo(object):
     """
     A data source that returns particle information about the members of a
     HOP-identified halo.
     """
-    __metaclass__ = ParallelDummy # This will proxy up our methods
+    __metaclass__ = ParallelDummy  # This will proxy up our methods
     _distributed = False
     _processing = False
     _owner = 0
@@ -70,9 +72,9 @@
     dont_wrap = ["get_sphere", "write_particle_list"]
     extra_wrap = ["__getitem__"]
 
-    def __init__(self, halo_list, id, indices = None, size=None, CoM=None,
-        max_dens_point=None, group_total_mass=None, max_radius=None, bulk_vel=None,
-        tasks=None, rms_vel=None):
+    def __init__(self, halo_list, id, indices=None, size=None, CoM=None,
+        max_dens_point=None, group_total_mass=None, max_radius=None,
+        bulk_vel=None, tasks=None, rms_vel=None):
         self._max_dens = halo_list._max_dens
         self.id = id
         self.data = halo_list._data_source
@@ -98,18 +100,18 @@
         r"""Calculate and return the center of mass.
 
         The center of mass of the halo is directly calculated and returned.
-        
+
         Examples
         --------
         >>> com = halos[0].center_of_mass()
         """
-        c_vec = self.maximum_density_location() - na.array([0.5,0.5,0.5])
+        c_vec = self.maximum_density_location() - na.array([0.5, 0.5, 0.5])
         pm = self["ParticleMassMsun"]
         cx = (self["particle_position_x"] - c_vec[0])
         cy = (self["particle_position_y"] - c_vec[1])
         cz = (self["particle_position_z"] - c_vec[2])
-        com = na.array([v-na.floor(v) for v in [cx,cy,cz]])
-        return (com*pm).sum(axis=1)/pm.sum() + c_vec
+        com = na.array([v - na.floor(v) for v in [cx, cy, cz]])
+        return (com * pm).sum(axis=1) / pm.sum() + c_vec
 
     def maximum_density(self):
         r"""Return the HOP-identified maximum density. Not applicable to
@@ -127,7 +129,7 @@
         r"""Return the location HOP identified as maximally dense. Not
         applicable to FOF halos.
 
-        Return the location HOP identified as maximally dense.  
+        Return the location HOP identified as maximally dense.
 
         Examples
         --------
@@ -140,7 +142,7 @@
 
     def total_mass(self):
         r"""Returns the total mass in solar masses of the halo.
-        
+
         Returns the total mass in solar masses of just the particles in the
         halo.
 
@@ -155,7 +157,7 @@
 
         This calculates and returns the mass-weighted average velocity of just
         the particles in the halo in cm/s.
-        
+
         Examples
         --------
         >>> bv = halos[0].bulk_velocity()
@@ -164,7 +166,7 @@
         vx = (self["particle_velocity_x"] * pm).sum()
         vy = (self["particle_velocity_y"] * pm).sum()
         vz = (self["particle_velocity_z"] * pm).sum()
-        return na.array([vx,vy,vz])/pm.sum()
+        return na.array([vx, vy, vz]) / pm.sum()
 
     def rms_velocity(self):
         r"""Returns the mass-weighted RMS velocity for the halo
@@ -173,7 +175,7 @@
         Calculate and return the mass-weighted RMS velocity for just the
         particles in the halo.  The bulk velocity of the halo is subtracted
         before computation.
-        
+
         Examples
         --------
         >>> rms_vel = halos[0].rms_velocity()
@@ -181,10 +183,10 @@
         bv = self.bulk_velocity()
         pm = self["ParticleMassMsun"]
         sm = pm.sum()
-        vx = (self["particle_velocity_x"] - bv[0]) * pm/sm
-        vy = (self["particle_velocity_y"] - bv[1]) * pm/sm
-        vz = (self["particle_velocity_z"] - bv[2]) * pm/sm
-        s = vx**2. + vy**2. + vz**2.
+        vx = (self["particle_velocity_x"] - bv[0]) * pm / sm
+        vy = (self["particle_velocity_y"] - bv[1]) * pm / sm
+        vz = (self["particle_velocity_z"] - bv[2]) * pm / sm
+        s = vx ** 2. + vy ** 2. + vz ** 2.
         ms = na.mean(s)
         return na.sqrt(ms) * pm.size
 
@@ -195,32 +197,35 @@
 
         The maximum radius from the most dense point is calculated.  This
         accounts for periodicity.
-        
+
         Parameters
         ----------
         center_of_mass : bool
-            True chooses the center of mass when calculating the maximum radius.
+            True chooses the center of mass when
+            calculating the maximum radius.
             False chooses from the maximum density location for HOP halos
             (it has no effect for FOF halos).
             Default = True.
-        
+
         Examples
         --------
         >>> radius = halos[0].maximum_radius()
         """
-        if center_of_mass: center = self.center_of_mass()
-        else: center = self.maximum_density_location()
-        rx = na.abs(self["particle_position_x"]-center[0])
-        ry = na.abs(self["particle_position_y"]-center[1])
-        rz = na.abs(self["particle_position_z"]-center[2])
+        if center_of_mass:
+            center = self.center_of_mass()
+        else:
+            center = self.maximum_density_location()
+        rx = na.abs(self["particle_position_x"] - center[0])
+        ry = na.abs(self["particle_position_y"] - center[1])
+        rz = na.abs(self["particle_position_z"] - center[2])
         DW = self.data.pf.domain_right_edge - self.data.pf.domain_left_edge
-        r = na.sqrt(na.minimum(rx, DW[0]-rx)**2.0
-                +   na.minimum(ry, DW[1]-ry)**2.0
-                +   na.minimum(rz, DW[2]-rz)**2.0)
+        r = na.sqrt(na.minimum(rx, DW[0] - rx) ** 2.0
+                + na.minimum(ry, DW[1] - ry) ** 2.0
+                + na.minimum(rz, DW[2] - rz) ** 2.0)
         return r.max()
 
     def __getitem__(self, key):
-        if ytcfg.getboolean("yt","inline") == False:
+        if ytcfg.getboolean("yt", "inline") == False:
             return self.data[key][self.indices]
         else:
             return self.data[key][self.indices]
@@ -231,15 +236,16 @@
         This will generate a new, empty sphere source centered on this halo,
         with the maximum radius of the halo. This can be used like any other
         data container in yt.
-        
+
         Parameters
         ----------
         center_of_mass : bool, optional
-            True chooses the center of mass when calculating the maximum radius.
+            True chooses the center of mass when
+            calculating the maximum radius.
             False chooses from the maximum density location for HOP halos
             (it has no effect for FOF halos).
             Default = True.
-        
+
         Returns
         -------
         sphere : `yt.data_objects.api.AMRSphereBase`
@@ -249,8 +255,10 @@
         --------
         >>> sp = halos[0].get_sphere()
         """
-        if center_of_mass: center = self.center_of_mass()
-        else: center = self.maximum_density_location()
+        if center_of_mass:
+            center = self.center_of_mass()
+        else:
+            center = self.maximum_density_location()
         radius = self.maximum_radius()
         # A bit of a long-reach here...
         sphere = self.data.hierarchy.sphere(
@@ -276,12 +284,13 @@
         self._processing = False
 
     def virial_mass(self, virial_overdensity=200., bins=300):
-        r"""Return the virial mass of the halo in Msun, using only the particles
-        in the halo (no baryonic information used). 
+        r"""Return the virial mass of the halo in Msun,
+        using only the particles
+        in the halo (no baryonic information used).
 
         The virial mass is calculated, using the built in `Halo.virial_info`
         functionality.  The mass is then returned.
-        
+
         Parameters
         ----------
         virial_overdensity : float
@@ -296,22 +305,22 @@
         mass : float
             The virial mass in solar masses of the particles in the halo.  -1
             if not virialized.
-        
+
         Examples
         --------
         >>> vm = halos[0].virial_mass()
         """
         self.virial_info(bins=bins)
-        vir_bin = self.virial_bin(virial_overdensity=virial_overdensity, bins=bins)
+        vir_bin = self.virial_bin(virial_overdensity=virial_overdensity,
+            bins=bins)
         if vir_bin != -1:
             return self.mass_bins[vir_bin]
         else:
             return -1
-        
-    
+
     def virial_radius(self, virial_overdensity=200., bins=300):
         r"""Return the virial radius of the halo in code units.
-        
+
         The virial radius of the halo is calculated, using only the particles
         in the halo (no baryonic information used). Returns -1 if the halo is
         not virialized.
@@ -330,13 +339,14 @@
         radius : float
             The virial raius in code units of the particles in the halo.  -1
             if not virialized.
-        
+
         Examples
         --------
         >>> vr = halos[0].virial_radius()
         """
         self.virial_info(bins=bins)
-        vir_bin = self.virial_bin(virial_overdensity=virial_overdensity, bins=bins)
+        vir_bin = self.virial_bin(virial_overdensity=virial_overdensity,
+            bins=bins)
         if vir_bin != -1:
             return self.radial_bins[vir_bin]
         else:
@@ -350,11 +360,11 @@
         self.virial_info(bins=bins)
         over = (self.overdensity > virial_overdensity)
         if (over == True).any():
-            vir_bin = max(na.arange(bins+1)[over])
+            vir_bin = max(na.arange(bins + 1)[over])
             return vir_bin
         else:
             return -1
-    
+
     def virial_info(self, bins=300):
         r"""Calculates the virial information for the halo. Generally, it is
         better to call virial_radius or virial_mass instead, which calls this
@@ -372,11 +382,11 @@
             self.pf.domain_left_edge
         cm = self.pf["cm"]
         thissize = max(self.size, self.indices.size)
-        rho_crit_now = 1.8788e-29 * h**2.0 * Om_matter # g cm^-3
-        Msun2g = 1.989e33
-        rho_crit = rho_crit_now * ((1.0 + z)**3.0)
+        rho_crit = rho_crit_now * h ** 2.0 * Om_matter  # g cm^-3
+        Msun2g = mass_sun_cgs
+        rho_crit = rho_crit * ((1.0 + z) ** 3.0)
         # Get some pertinent information about the halo.
-        self.mass_bins = na.zeros(self.bin_count+1, dtype='float64')
+        self.mass_bins = na.zeros(self.bin_count + 1, dtype='float64')
         dist = na.empty(thissize, dtype='float64')
         cen = self.center_of_mass()
         mark = 0
@@ -389,30 +399,32 @@
             mark += 1
         # Set up the radial bins.
         # Multiply min and max to prevent issues with digitize below.
-        self.radial_bins = na.logspace(math.log10(min(dist)*.99 + TINY), 
-            math.log10(max(dist)*1.01 + 2*TINY), num=self.bin_count+1)
+        self.radial_bins = na.logspace(math.log10(min(dist) * .99 + TINY),
+            math.log10(max(dist) * 1.01 + 2 * TINY), num=self.bin_count + 1)
         # Find out which bin each particle goes into, and add the particle
         # mass to that bin.
         inds = na.digitize(dist, self.radial_bins) - 1
         if self["particle_position_x"].size > 1:
             for index in na.unique(inds):
-                self.mass_bins[index] += sum(self["ParticleMassMsun"][inds==index])
+                self.mass_bins[index] += \
+                na.sum(self["ParticleMassMsun"][inds == index])
         # Now forward sum the masses in the bins.
         for i in xrange(self.bin_count):
-            self.mass_bins[i+1] += self.mass_bins[i]
+            self.mass_bins[i + 1] += self.mass_bins[i]
         # Calculate the over densities in the bins.
         self.overdensity = self.mass_bins * Msun2g / \
-        (4./3. * math.pi * rho_crit * \
-        (self.radial_bins * cm)**3.0)
-        
+        (4. / 3. * math.pi * rho_crit * \
+        (self.radial_bins * cm) ** 3.0)
+
 
 class HOPHalo(Halo):
     pass
 
-class parallelHOPHalo(Halo,ParallelAnalysisInterface):
-    dont_wrap = ["maximum_density","maximum_density_location",
-        "center_of_mass","total_mass","bulk_velocity","maximum_radius",
-        "get_size","get_sphere", "write_particle_list","__getitem__", 
+
+class parallelHOPHalo(Halo, ParallelAnalysisInterface):
+    dont_wrap = ["maximum_density", "maximum_density_location",
+        "center_of_mass", "total_mass", "bulk_velocity", "maximum_radius",
+        "get_size", "get_sphere", "write_particle_list", "__getitem__",
         "virial_info", "virial_bin", "virial_mass", "virial_radius",
         "rms_velocity"]
 
@@ -432,7 +444,7 @@
 
     def maximum_density_location(self):
         r"""Return the location HOP identified as maximally dense.
-        
+
         Return the location HOP identified as maximally dense.
 
         Examples
@@ -449,7 +461,7 @@
                 self._max_dens[self.id][2],
                 self._max_dens[self.id][3]])
         else:
-            value = na.array([0,0,0])
+            value = na.array([0, 0, 0])
         # This works, and isn't appropriate but for now will be fine...
         value = self.comm.mpi_allreduce(value, op='sum')
         return value
@@ -458,7 +470,7 @@
         r"""Calculate and return the center of mass.
 
         The center of mass of the halo is directly calculated and returned.
-        
+
         Examples
         --------
         >>> com = halos[0].center_of_mass()
@@ -468,25 +480,25 @@
             return self.CoM
         # This need to be called by all tasks, but not all will end up using
         # it.
-        c_vec = self.maximum_density_location() - na.array([0.5,0.5,0.5])
+        c_vec = self.maximum_density_location() - na.array([0.5, 0.5, 0.5])
         if self.indices is not None:
             pm = self["ParticleMassMsun"]
             cx = (self["particle_position_x"] - c_vec[0])
             cy = (self["particle_position_y"] - c_vec[1])
             cz = (self["particle_position_z"] - c_vec[2])
-            com = na.array([v-na.floor(v) for v in [cx,cy,cz]])
+            com = na.array([v - na.floor(v) for v in [cx, cy, cz]])
             my_mass = pm.sum()
-            my_com = ((com*pm).sum(axis=1)/my_mass + c_vec) * my_mass
+            my_com = ((com * pm).sum(axis=1) / my_mass + c_vec) * my_mass
         else:
             my_mass = 0.
-            my_com = na.array([0.,0.,0.])
+            my_com = na.array([0., 0., 0.])
         global_mass = self.comm.mpi_allreduce(my_mass, op='sum')
         global_com = self.comm.mpi_allreduce(my_com, op='sum')
         return global_com / global_mass
 
     def total_mass(self):
         r"""Returns the total mass in solar masses of the halo.
-        
+
         Returns the total mass in solar masses of just the particles in the
         halo.
 
@@ -508,7 +520,7 @@
 
         This calculates and returns the mass-weighted average velocity of just
         the particles in the halo in cm/s.
-        
+
         Examples
         --------
         >>> bv = halos[0].bulk_velocity()
@@ -528,9 +540,9 @@
             vx = 0.
             vy = 0.
             vz = 0.
-        bv = na.array([vx,vy,vz,pm])
+        bv = na.array([vx, vy, vz, pm])
         global_bv = self.comm.mpi_allreduce(bv, op='sum')
-        return global_bv[:3]/global_bv[3]
+        return global_bv[:3] / global_bv[3]
 
     def rms_velocity(self):
         r"""Returns the mass-weighted RMS velocity for the halo
@@ -539,7 +551,7 @@
         Calculate and return the mass-weighted RMS velocity for just the
         particles in the halo.  The bulk velocity of the halo is subtracted
         before computation.
-        
+
         Examples
         --------
         >>> rms_vel = halos[0].rms_velocity()
@@ -550,15 +562,15 @@
         pm = self["ParticleMassMsun"]
         sm = pm.sum()
         if self.indices is not None:
-            vx = (self["particle_velocity_x"] - bv[0]) * pm/sm
-            vy = (self["particle_velocity_y"] - bv[1]) * pm/sm
-            vz = (self["particle_velocity_z"] - bv[2]) * pm/sm
-            s = vx**2 + vy**2 + vz**2
+            vx = (self["particle_velocity_x"] - bv[0]) * pm / sm
+            vy = (self["particle_velocity_y"] - bv[1]) * pm / sm
+            vz = (self["particle_velocity_z"] - bv[2]) * pm / sm
+            s = vx ** 2 + vy ** 2 + vz ** 2
             s = na.sum(s)
             size = vx.size
             ss = na.array([s, float(size)])
         else:
-            ss = na.array([0.,0.])
+            ss = na.array([0., 0.])
         global_ss = self.comm.mpi_allreduce(ss, op='sum')
         ms = global_ss[0] / global_ss[1]
         return na.sqrt(ms) * global_ss[1]
@@ -570,33 +582,36 @@
 
         The maximum radius from the most dense point is calculated.  This
         accounts for periodicity.
-        
+
         Parameters
         ----------
         center_of_mass : bool
-            True chooses the center of mass when calculating the maximum radius.
+            True chooses the center of mass when
+            calculating the maximum radius.
             False chooses from the maximum density location for HOP halos
             (it has no effect for FOF halos).
             Default = True.
-        
+
         Examples
         --------
         >>> radius = halos[0].maximum_radius()
         """
         if self.max_radius is not None:
             return self.max_radius
-        if center_of_mass: center = self.center_of_mass()
-        else: center = self.maximum_density_location()
+        if center_of_mass:
+            center = self.center_of_mass()
+        else:
+            center = self.maximum_density_location()
         DW = self.data.pf.domain_right_edge - self.data.pf.domain_left_edge
         if self.indices is not None:
-            rx = na.abs(self["particle_position_x"]-center[0])
-            ry = na.abs(self["particle_position_y"]-center[1])
-            rz = na.abs(self["particle_position_z"]-center[2])
-            r = na.sqrt(na.minimum(rx, DW[0]-rx)**2.0
-                    +   na.minimum(ry, DW[1]-ry)**2.0
-                    +   na.minimum(rz, DW[2]-rz)**2.0)
+            rx = na.abs(self["particle_position_x"] - center[0])
+            ry = na.abs(self["particle_position_y"] - center[1])
+            rz = na.abs(self["particle_position_z"] - center[2])
+            r = na.sqrt(na.minimum(rx, DW[0] - rx) ** 2.0
+                    + na.minimum(ry, DW[1] - ry) ** 2.0
+                    + na.minimum(rz, DW[2] - rz) ** 2.0)
             my_max = r.max()
-            
+
         else:
             my_max = 0.
         return self.comm.mpi_allreduce(my_max, op='max')
@@ -612,18 +627,19 @@
         return global_size
 
     def __getitem__(self, key):
-        if ytcfg.getboolean("yt","inline") == False:
+        if ytcfg.getboolean("yt", "inline") == False:
             return self.data[key][self.indices]
         else:
             return self.data[key][self.indices]
 
     def virial_mass(self, virial_overdensity=200., bins=300):
-        r"""Return the virial mass of the halo in Msun, using only the particles
-        in the halo (no baryonic information used). 
+        r"""Return the virial mass of the halo
+        in Msun, using only the particles
+        in the halo (no baryonic information used).
 
         The virial mass is calculated, using the built in `Halo.virial_info`
         functionality.  The mass is then returned.
-        
+
         Parameters
         ----------
         virial_overdensity : float
@@ -638,22 +654,22 @@
         mass : float
             The virial mass in solar masses of the particles in the halo.  -1
             if not virialized.
-        
+
         Examples
         --------
         >>> vm = halos[0].virial_mass()
         """
         self.virial_info(bins=bins)
-        vir_bin = self.virial_bin(virial_overdensity=virial_overdensity, bins=bins)
+        vir_bin = self.virial_bin(virial_overdensity=virial_overdensity,
+            bins=bins)
         if vir_bin != -1:
             return self.mass_bins[vir_bin]
         else:
             return -1
-        
-    
+
     def virial_radius(self, virial_overdensity=200., bins=300):
         r"""Return the virial radius of the halo in code units.
-        
+
         The virial radius of the halo is calculated, using only the particles
         in the halo (no baryonic information used). Returns -1 if the halo is
         not virialized.
@@ -672,13 +688,14 @@
         radius : float
             The virial raius in code units of the particles in the halo.  -1
             if not virialized.
-        
+
         Examples
         --------
         >>> vr = halos[0].virial_radius()
         """
         self.virial_info(bins=bins)
-        vir_bin = self.virial_bin(virial_overdensity=virial_overdensity, bins=bins)
+        vir_bin = self.virial_bin(virial_overdensity=virial_overdensity,
+            bins=bins)
         if vir_bin != -1:
             return self.radial_bins[vir_bin]
         else:
@@ -692,7 +709,7 @@
         self.virial_info(bins=bins)
         over = (self.overdensity > virial_overdensity)
         if (over == True).any():
-            vir_bin = max(na.arange(bins+1)[over])
+            vir_bin = max(na.arange(bins + 1)[over])
             return vir_bin
         else:
             return -1
@@ -709,23 +726,24 @@
         self.bin_count = bins
         period = self.data.pf.domain_right_edge - \
             self.data.pf.domain_left_edge
-        self.mass_bins = na.zeros(self.bin_count+1, dtype='float64')
+        self.mass_bins = na.zeros(self.bin_count + 1, dtype='float64')
         cen = self.center_of_mass()
         # Cosmology
         h = self.data.pf.hubble_constant
         Om_matter = self.data.pf.omega_matter
         z = self.data.pf.current_redshift
-        rho_crit_now = 1.8788e-29 * h**2.0 * Om_matter # g cm^-3
-        Msun2g = 1.989e33
-        rho_crit = rho_crit_now * ((1.0 + z)**3.0)
+        rho_crit = rho_crit_now * h ** 2.0 * Om_matter  # g cm^-3
+        Msun2g = mass_sun_cgs
+        rho_crit = rho_crit * ((1.0 + z) ** 3.0)
         # If I own some of this halo operate on the particles.
         if self.indices is not None:
             # Get some pertinent information about the halo.
             dist = na.empty(self.indices.size, dtype='float64')
             mark = 0
-            # Find the distances to the particles. I don't like this much, but I
+            # Find the distances to the particles.
+            # I don't like this much, but I
             # can't see a way to eliminate a loop like this, either here or in
-            # yt.math.
+            # yt.math_utils.
             for pos in itertools.izip(self["particle_position_x"],
                     self["particle_position_y"], self["particle_position_z"]):
                 dist[mark] = periodic_dist(cen, pos, period)
@@ -741,23 +759,24 @@
         dist_max = self.comm.mpi_allreduce(dist_max, op='max')
         # Set up the radial bins.
         # Multiply min and max to prevent issues with digitize below.
-        self.radial_bins = na.logspace(math.log10(dist_min*.99 + TINY), 
-            math.log10(dist_max*1.01 + 2*TINY), num=self.bin_count+1)
+        self.radial_bins = na.logspace(math.log10(dist_min * .99 + TINY),
+            math.log10(dist_max * 1.01 + 2 * TINY), num=self.bin_count + 1)
         if self.indices is not None and self.indices.size > 1:
             # Find out which bin each particle goes into, and add the particle
             # mass to that bin.
             inds = na.digitize(dist, self.radial_bins) - 1
             for index in na.unique(inds):
-                self.mass_bins[index] += sum(self["ParticleMassMsun"][inds==index])
+                self.mass_bins[index] += \
+                    na.sum(self["ParticleMassMsun"][inds == index])
             # Now forward sum the masses in the bins.
             for i in xrange(self.bin_count):
-                self.mass_bins[i+1] += self.mass_bins[i]
+                self.mass_bins[i + 1] += self.mass_bins[i]
         # Sum up the mass_bins globally
         self.mass_bins = self.comm.mpi_allreduce(self.mass_bins, op='sum')
         # Calculate the over densities in the bins.
         self.overdensity = self.mass_bins * Msun2g / \
-        (4./3. * math.pi * rho_crit * \
-        (self.radial_bins * self.data.pf["cm"])**3.0)
+        (4. / 3. * math.pi * rho_crit * \
+        (self.radial_bins * self.data.pf["cm"]) ** 3.0)
 
 
 class FOFHalo(Halo):
@@ -766,7 +785,7 @@
         r"""Calculate and return the center of mass.
 
         The center of mass of the halo is directly calculated and returned.
-        
+
         Examples
         --------
         >>> com = halos[0].center_of_mass()
@@ -775,12 +794,12 @@
         cx = self["particle_position_x"]
         cy = self["particle_position_y"]
         cz = self["particle_position_z"]
-        c_vec = na.array([cx[0],cy[0],cz[0]]) - na.array([0.5,0.5,0.5])
+        c_vec = na.array([cx[0], cy[0], cz[0]]) - na.array([0.5, 0.5, 0.5])
         cx = cx - c_vec[0]
         cy = cy - c_vec[1]
         cz = cz - c_vec[2]
-        com = na.array([v-na.floor(v) for v in [cx,cy,cz]])
-        com = (pm * com).sum(axis=1)/pm.sum() + c_vec
+        com = na.array([v - na.floor(v) for v in [cx, cy, cz]])
+        com = (pm * com).sum(axis=1) / pm.sum() + c_vec
         return com
 
     def maximum_density(self):
@@ -791,9 +810,11 @@
         r"""Not implemented."""
         return self.center_of_mass()
 
+
 class LoadedHalo(Halo):
     def __init__(self, pf, id, size=None, CoM=None,
-        max_dens_point=None, group_total_mass=None, max_radius=None, bulk_vel=None,
+        max_dens_point=None, group_total_mass=None,
+        max_radius=None, bulk_vel=None,
         rms_vel=None, fnames=None):
         self.pf = pf
         self.id = id
@@ -811,7 +832,7 @@
         self.saved_fields = {}
         self.particle_mask = None
         self.ds_sort = None
-        self.indices = na.array([]) # Never used for a LoadedHalo.
+        self.indices = na.array([])  # Never used for a LoadedHalo.
 
     def __getitem__(self, key):
         # This function will try to get particle data in one of three ways,
@@ -861,8 +882,9 @@
         f = h5py.File(fnames[0], 'r')
         fields = f["Halo%08d" % halo].keys()
         # If we dont have this field, we can give up right now.
-        if field not in fields: return None
-        if field == 'particle_index' or field == 'particle_type':
+        if field not in fields:
+            return None
+        elif field == 'particle_index' or field == 'particle_type':
             # the only integer field
             field_data = na.empty(size, dtype='int64')
         else:
@@ -877,26 +899,26 @@
             f = h5py.File(fname, 'r')
             this = f["Halo%08d" % halo][field][:]
             s = this.size
-            field_data[offset:offset+s] = this
+            field_data[offset:offset + s] = this
             offset += s
             f.close()
             del f
         return field_data
-        
+
     def center_of_mass(self):
         r"""Calculate and return the center of mass.
 
         The center of mass of the halo is directly calculated and returned.
-        
+
         Examples
         --------
         >>> com = halos[0].center_of_mass()
         """
         return self.CoM
-    
+
     def maximum_density_location(self):
         r"""Return the location HOP identified as maximally dense.
-        
+
         Return the location HOP identified as maximally dense.
 
         Examples
@@ -918,7 +940,7 @@
 
     def total_mass(self):
         r"""Returns the total mass in solar masses of the halo.
-        
+
         Returns the total mass in solar masses of just the particles in the
         halo.
 
@@ -933,7 +955,7 @@
 
         This calculates and returns the mass-weighted average velocity of just
         the particles in the halo in cm/s.
-        
+
         Examples
         --------
         >>> bv = halos[0].bulk_velocity()
@@ -947,7 +969,7 @@
         Calculate and return the mass-weighted RMS velocity for just the
         particles in the halo.  The bulk velocity of the halo is subtracted
         before computation.
-        
+
         Examples
         --------
         >>> rms_vel = halos[0].rms_velocity()
@@ -961,15 +983,16 @@
 
         The maximum radius from the most dense point is calculated.  This
         accounts for periodicity.
-        
+
         Parameters
         ----------
         center_of_mass : bool
-            True chooses the center of mass when calculating the maximum radius.
+            True chooses the center of mass when
+            calculating the maximum radius.
             False chooses from the maximum density location for HOP halos
             (it has no effect for FOF halos).
             Default = True.
-        
+
         Examples
         --------
         >>> radius = halos[0].maximum_radius()
@@ -982,15 +1005,16 @@
         This will generate a new, empty sphere source centered on this halo,
         with the maximum radius of the halo. This can be used like any other
         data container in yt.
-        
+
         Parameters
         ----------
         center_of_mass : bool, optional
-            True chooses the center of mass when calculating the maximum radius.
+            True chooses the center of mass when
+            calculating the maximum radius.
             False chooses from the maximum density location for HOP halos
             (it has no effect for FOF halos).
             Default = True.
-        
+
         Returns
         -------
         sphere : `yt.data_objects.api.AMRSphereBase`
@@ -1004,11 +1028,12 @@
         r = self.maximum_radius()
         return self.pf.h.sphere(cen, r)
 
+
 class HaloList(object):
 
     _fields = ["particle_position_%s" % ax for ax in 'xyz']
 
-    def __init__(self, data_source, dm_only = True):
+    def __init__(self, data_source, dm_only=True):
         """
         Run hop on *data_source* with a given density *threshold*.  If
         *dm_only* is set, only run it on the dark matter particles, otherwise
@@ -1025,15 +1050,19 @@
         mylog.debug("Finished. (%s)", len(self))
 
     def __obtain_particles(self):
-        if self.dm_only: ii = self._get_dm_indices()
-        else: ii = slice(None)
+        if self.dm_only:
+            ii = self._get_dm_indices()
+        else:
+            ii = slice(None)
         self.particle_fields = {}
         for field in self._fields:
             tot_part = self._data_source[field].size
             if field == "particle_index":
-                self.particle_fields[field] = self._data_source[field][ii].astype('int64')
+                self.particle_fields[field] = \
+                    self._data_source[field][ii].astype('int64')
             else:
-                self.particle_fields[field] = self._data_source[field][ii].astype('float64')
+                self.particle_fields[field] = \
+                    self._data_source[field][ii].astype('float64')
             del self._data_source[field]
         self._base_indices = na.arange(tot_part)[ii]
         gc.collect()
@@ -1048,44 +1077,46 @@
         else:
             mylog.warning("No particle_type, no creation_time, so not distinguishing.")
             return slice(None)
-    
 
     def _parse_output(self):
         unique_ids = na.unique(self.tags)
-        counts = na.bincount(self.tags+1)
+        counts = na.bincount(self.tags + 1)
         sort_indices = na.argsort(self.tags)
         grab_indices = na.indices(self.tags.shape).ravel()[sort_indices]
         dens = self.densities[sort_indices]
         cp = 0
         for i in unique_ids:
-            cp_c = cp + counts[i+1]
+            cp_c = cp + counts[i + 1]
             if i == -1:
-                cp += counts[i+1]
+                cp += counts[i + 1]
                 continue
             group_indices = grab_indices[cp:cp_c]
             self._groups.append(self._halo_class(self, i, group_indices))
             md_i = na.argmax(dens[cp:cp_c])
-            px, py, pz = [self.particle_fields['particle_position_%s'%ax][group_indices]
+            px, py, pz = \
+                [self.particle_fields['particle_position_%s' % ax][group_indices]
                                             for ax in 'xyz']
-            self._max_dens[i] = (dens[cp:cp_c][md_i], px[md_i], py[md_i], pz[md_i])
-            cp += counts[i+1]
+            self._max_dens[i] = (dens[cp:cp_c][md_i], px[md_i],
+                py[md_i], pz[md_i])
+            cp += counts[i + 1]
 
     def __len__(self):
         return len(self._groups)
- 
+
     def __iter__(self):
-        for i in self._groups: yield i
+        for i in self._groups:
+            yield i
 
     def __getitem__(self, key):
         return self._groups[key]
 
     def nearest_neighbors_3D(self, haloID, num_neighbors=7, search_radius=.2):
         r"""For a halo its nearest neighbors in 3D using the kd tree.
-        
+
         This will calculate the nearest neighbors of a halo, using the kD tree.
         Returns a list of the neighbors distances and ID with format
         [distance,haloID].
-        
+
         Parameters
         ----------
         haloID : integer
@@ -1094,7 +1125,7 @@
             How many neighbors to search for. Default = 7.
         search_radius : float
             How far away to look for neighbors in code units. Default = 0.2.
-        
+
         Examples
         --------
         >>> neighbors = halos.nearest_neighbors_3D(0)
@@ -1108,19 +1139,20 @@
             p.haloID = group.id
             dataset.append(p)
         mylog.info('Building kd tree...')
-        kd = buildKdHyperRectTree(dataset[:],2*num_neighbors)
+        kd = buildKdHyperRectTree(dataset[:], 2 * num_neighbors)
         # make the neighbors object
         neighbors = Neighbors()
         neighbors.k = num_neighbors
         neighbors.points = []
         neighbors.minDistanceSquared = search_radius * search_radius
         mylog.info('Finding nearest neighbors...')
-        getKNN(self[haloID].center_of_mass().tolist(), kd, neighbors,0., period.tolist())
+        getKNN(self[haloID].center_of_mass().tolist(), kd, neighbors, 0.,
+            period.tolist())
         # convert the data in order to return something less perverse than a
         # Neighbors object, also root the distances
         n_points = []
         for n in neighbors.points:
-            n_points.append([math.sqrt(n[0]),n[1].haloID])
+            n_points.append([math.sqrt(n[0]), n[1].haloID])
         return n_points
 
     def nearest_neighbors_2D(self, haloID, num_neighbors=7, search_radius=.2,
@@ -1130,7 +1162,7 @@
         This will strip a dimension from consideration in the kD-tree, and then
         calculate all the nearest projected neighbors of a halo.  Returns a
         list of the neighbors distances and ID with format [distance,haloID].
-        
+
         Parameters
         ----------
         haloID : int
@@ -1142,13 +1174,14 @@
         proj_dim : int
             Which dimension (0, 1, or 2) to project the halos into 2D.
             Default = 0.
-        
+
         Examples
         --------
         >>> neighbors = halos.nearest_neighbors_2D(0)
         """
-        # Set up a vector to multiply other vectors by to project along proj_dim
-        vec = na.array([1.,1.,1.])
+        # Set up a vector to multiply other
+        # vectors by to project along proj_dim
+        vec = na.array([1., 1., 1.])
         vec[proj_dim] = 0.
         period = self.pf.domain_right_edge - self.pf.domain_left_edge
         period = period * vec
@@ -1161,7 +1194,7 @@
             p.haloID = group.id
             dataset.append(p)
         mylog.info('Building kd tree...')
-        kd = buildKdHyperRectTree(dataset[:],2*num_neighbors)
+        kd = buildKdHyperRectTree(dataset[:], 2 * num_neighbors)
         # make the neighbors object
         neighbors = Neighbors()
         neighbors.k = num_neighbors
@@ -1169,22 +1202,22 @@
         neighbors.minDistanceSquared = search_radius * search_radius
         mylog.info('Finding nearest neighbors...')
         cm = self[haloID].center_of_mass() * vec
-        getKNN(cm.tolist(), kd, neighbors,0., period.tolist())
+        getKNN(cm.tolist(), kd, neighbors, 0., period.tolist())
         # convert the data in order to return something less perverse than a
         # Neighbors object, also root the distances
         n_points = []
         for n in neighbors.points:
-            n_points.append([math.sqrt(n[0]),n[1].haloID])
+            n_points.append([math.sqrt(n[0]), n[1].haloID])
         return n_points
 
     def write_out(self, filename):
         r"""Write out standard halo information to a text file.
-        
+
         Parameters
         ----------
         filename : String
             The name of the file to write to.
-        
+
         Examples
         --------
         >>> halos.write_out("HopAnalysis.out")
@@ -1192,18 +1225,19 @@
         if hasattr(filename, 'write'):
             f = filename
         else:
-            f = open(filename,"w")
+            f = open(filename, "w")
         f.write("# HALOS FOUND WITH %s\n" % (self._name))
-        f.write("\t".join(["# Group","Mass","# part","max dens"
-                           "x","y","z", "center-of-mass",
-                           "x","y","z",
-                           "vx","vy","vz","max_r","rms_v","\n"]))
+        f.write("\t".join(["# Group", "Mass", "# part", "max dens"
+                           "x", "y", "z", "center-of-mass",
+                           "x", "y", "z",
+                           "vx", "vy", "vz", "max_r", "rms_v", "\n"]))
         for group in self:
             f.write("%10i\t" % group.id)
             f.write("%0.9e\t" % group.total_mass())
             f.write("%10i\t" % group.get_size())
             f.write("%0.9e\t" % group.maximum_density())
-            f.write("\t".join(["%0.9e" % v for v in group.maximum_density_location()]))
+            f.write("\t".join(["%0.9e" % v for v in \
+                group.maximum_density_location()]))
             f.write("\t")
             f.write("\t".join(["%0.9e" % v for v in group.center_of_mass()]))
             f.write("\t")
@@ -1218,12 +1252,12 @@
     def write_particle_lists_txt(self, prefix, fp=None):
         r"""Write out the names of the HDF5 files containing halo particle data
         to a text file. Needed in particular for parallel analysis output.
-        
+
         Parameters
         ----------
         prefix : String
             The prefix for the name of the file.
-        
+
         Examples
         --------
         >>> halos.write_particle_lists_txt("halo-parts")
@@ -1231,14 +1265,15 @@
         if hasattr(fp, 'write'):
             f = fp
         else:
-            f = open("%s.txt" % prefix,"w")
+            f = open("%s.txt" % prefix, "w")
         for group in self:
             if group.tasks is not None:
                 fn = ""
                 for task in group.tasks:
                     fn += "%s.h5 " % self.comm.get_filename(prefix, rank=task)
             elif self._distributed:
-                fn = "%s.h5" % self.comm.get_filename(prefix, rank=group._owner)
+                fn = "%s.h5" % self.comm.get_filename(prefix,
+                    rank=group._owner)
             else:
                 fn = "%s.h5" % self.comm.get_filename(prefix)
             gn = "Halo%08i" % (group.id)
@@ -1246,6 +1281,7 @@
             f.flush()
         f.close()
 
+
 class HOPHaloList(HaloList):
 
     _name = "HOP"
@@ -1275,18 +1311,19 @@
 
     def write_out(self, filename="HopAnalysis.out"):
         r"""Write out standard halo information to a text file.
-        
+
         Parameters
         ----------
         filename : String
             The name of the file to write to. Default = "HopAnalysis.out".
-        
+
         Examples
         --------
         >>> halos.write_out("HopAnalysis.out")
         """
         HaloList.write_out(self, filename)
 
+
 class FOFHaloList(HaloList):
     _name = "FOF"
     _halo_class = FOFHalo
@@ -1298,38 +1335,39 @@
 
     def _run_finder(self):
         self.tags = \
-            RunFOF(self.particle_fields["particle_position_x"] / self.period[0],
-                   self.particle_fields["particle_position_y"] / self.period[1],
-                   self.particle_fields["particle_position_z"] / self.period[2],
-                   self.link)
+        RunFOF(self.particle_fields["particle_position_x"] / self.period[0],
+               self.particle_fields["particle_position_y"] / self.period[1],
+               self.particle_fields["particle_position_z"] / self.period[2],
+               self.link)
         self.densities = na.ones(self.tags.size, dtype='float64') * -1
         self.particle_fields["densities"] = self.densities
         self.particle_fields["tags"] = self.tags
 
     def write_out(self, filename="FOFAnalysis.out"):
         r"""Write out standard halo information to a text file.
-        
+
         Parameters
         ----------
         filename : String
             The name of the file to write to. Default = "FOFAnalysis.out".
-        
+
         Examples
         --------
         >>> halos.write_out("FOFAnalysis.out")
         """
         HaloList.write_out(self, filename)
 
+
 class LoadedHaloList(HaloList):
     _name = "Loaded"
-    
+
     def __init__(self, pf, basename):
         ParallelAnalysisInterface.__init__(self)
         self.pf = pf
         self._groups = []
         self.basename = basename
         self._retrieve_halos()
-    
+
     def _retrieve_halos(self):
         # First get the halo particulars.
         lines = file("%s.out" % self.basename)
@@ -1356,7 +1394,7 @@
                 max_dens_point, group_total_mass, max_radius, bulk_vel,
                 rms_vel, fnames))
             halo += 1
-    
+
     def _collect_halo_data_locations(self):
         # The halos are listed in order in the file.
         lines = file("%s.txt" % self.basename)
@@ -1375,7 +1413,8 @@
         lines.close()
         return locations
 
-class parallelHOPHaloList(HaloList,ParallelAnalysisInterface):
+
+class parallelHOPHaloList(HaloList, ParallelAnalysisInterface):
     _name = "parallelHOP"
     _halo_class = parallelHOPHalo
     _fields = ["particle_position_%s" % ax for ax in 'xyz'] + \
@@ -1383,7 +1422,7 @@
 
     def __init__(self, data_source, padding, num_neighbors, bounds, total_mass,
         period, threshold=160.0, dm_only=True, rearrange=True, premerge=True,
-        tree = 'F'):
+        tree='F'):
         """
         Run hop on *data_source* with a given density *threshold*.  If
         *dm_only* is set, only run it on the dark matter particles, otherwise
@@ -1397,7 +1436,7 @@
         self.rearrange = rearrange
         self.period = period
         self.old_period = period.copy()
-        self.period = na.array([1.]*3)
+        self.period = na.array([1.] * 3)
         self._data_source = data_source
         self.premerge = premerge
         self.tree = tree
@@ -1430,7 +1469,7 @@
             self.num_neighbors, self.bounds,
             self.particle_fields,
             self.threshold, rearrange=self.rearrange, premerge=self.premerge,
-            tree = self.tree)
+            tree=self.tree)
         self.densities, self.tags = obj.density, obj.chainID
         # I'm going to go ahead and delete self.densities because it's not
         # actually being used. I'm not going to remove it altogether because
@@ -1447,7 +1486,7 @@
         self.max_radius = obj.max_radius
         for dd in range(3):
             self.CoM[:, dd] *= self.old_period[dd]
-            self.max_dens_point[:, dd+1] *= self.old_period[dd]
+            self.max_dens_point[:, dd + 1] *= self.old_period[dd]
         # This is wrong, below, with uneven boundaries. We'll cross that bridge
         # when we get there.
         self.max_radius *= self.old_period[0]
@@ -1469,9 +1508,9 @@
         if calc:
             vel = na.empty((calc, 3), dtype='float64')
             ms = pm[select]
-            vel[:,0] = xv[select] * ms
-            vel[:,1] = yv[select] * ms
-            vel[:,2] = zv[select] * ms
+            vel[:, 0] = xv[select] * ms
+            vel[:, 1] = yv[select] * ms
+            vel[:, 2] = zv[select] * ms
             subchain = self.tags[select]
             sort = subchain.argsort()
             vel = vel[sort]
@@ -1482,31 +1521,32 @@
             marks = na.arange(calc)[marks] + 1
             marks = na.concatenate(([0], marks, [calc]))
             for i, u in enumerate(uniq_subchain):
-                self.bulk_vel[u] = na.sum(vel[marks[i]:marks[i+1]], axis=0)
+                self.bulk_vel[u] = na.sum(vel[marks[i]:marks[i + 1]], axis=0)
             del vel, subchain, sort_subchain
             del diff_subchain
         # Bring it together, and divide by the previously computed total mass
         # of each halo.
         self.bulk_vel = self.comm.mpi_allreduce(self.bulk_vel, op='sum')
         for groupID in xrange(self.group_count):
-            self.bulk_vel[groupID] = self.bulk_vel[groupID] / self.Tot_M[groupID]
+            self.bulk_vel[groupID] = \
+                self.bulk_vel[groupID] / self.Tot_M[groupID]
         yt_counters("bulk vel. computing")
         # Now calculate the RMS velocity of the groups in parallel, very
         # similarly to the bulk velocity and re-using some of the arrays.
         yt_counters("rms vel computing")
-        rms_vel_temp = na.zeros((self.group_count,2), dtype='float64')
+        rms_vel_temp = na.zeros((self.group_count, 2), dtype='float64')
         if calc:
             vel = na.empty((calc, 3), dtype='float64')
-            vel[:,0] = xv[select] * ms
-            vel[:,1] = yv[select] * ms
-            vel[:,2] = zv[select] * ms
+            vel[:, 0] = xv[select] * ms
+            vel[:, 1] = yv[select] * ms
+            vel[:, 2] = zv[select] * ms
             vel = vel[sort]
             for i, u in enumerate(uniq_subchain):
                 # This finds the sum locally.
-                rms_vel_temp[u][0] = na.sum(((vel[marks[i]:marks[i+1]] - \
-                    self.bulk_vel[u]) / self.Tot_M[u])**2.)
+                rms_vel_temp[u][0] = na.sum(((vel[marks[i]:marks[i + 1]] - \
+                    self.bulk_vel[u]) / self.Tot_M[u]) ** 2.)
                 # I could use self.group_sizes...
-                rms_vel_temp[u][1] = marks[i+1] - marks[i]
+                rms_vel_temp[u][1] = marks[i + 1] - marks[i]
             del vel, marks, uniq_subchain
         # Bring it together.
         rms_vel_temp = self.comm.mpi_allreduce(rms_vel_temp, op='sum')
@@ -1519,7 +1559,7 @@
         del rms_vel_temp
         yt_counters("rms vel computing")
         self.taskID = obj.mine
-        self.halo_taskmap = obj.halo_taskmap # A defaultdict.
+        self.halo_taskmap = obj.halo_taskmap  # A defaultdict.
         del obj
         gc.collect()
         yt_counters("Precomp bulk vel.")
@@ -1530,7 +1570,7 @@
         Each task will make an entry for all groups, but it may be empty.
         """
         unique_ids = na.unique(self.tags)
-        counts = na.bincount((self.tags+1).tolist())
+        counts = na.bincount((self.tags + 1).tolist())
         sort_indices = na.argsort(self.tags)
         grab_indices = na.indices(self.tags.shape).ravel()[sort_indices]
         del sort_indices
@@ -1544,23 +1584,27 @@
             return
         for i in unique_ids:
             if i == -1:
-                cp += counts[i+1]
+                cp += counts[i + 1]
                 continue
-            # If there is a gap in the unique_ids, make empty groups to 
+            # If there is a gap in the unique_ids, make empty groups to
             # fill it in.
             while index < i:
                 self._groups[index] = self._halo_class(self, index, \
                     size=self.group_sizes[index], CoM=self.CoM[index], \
                     max_dens_point=self.max_dens_point[index], \
-                    group_total_mass=self.Tot_M[index], max_radius=self.max_radius[index],
-                    bulk_vel=self.bulk_vel[index], tasks=self.halo_taskmap[index],
+                    group_total_mass=self.Tot_M[index],
+                    max_radius=self.max_radius[index],
+                    bulk_vel=self.bulk_vel[index],
+                    tasks=self.halo_taskmap[index],
                     rms_vel=self.rms_vel[index])
                 # I don't own this halo
                 self.comm.do_not_claim_object(self._groups[index])
-                self._max_dens[index] = [self.max_dens_point[index][0], self.max_dens_point[index][1], \
-                    self.max_dens_point[index][2], self.max_dens_point[index][3]]
+                self._max_dens[index] = [self.max_dens_point[index][0],
+                    self.max_dens_point[index][1], \
+                    self.max_dens_point[index][2],
+                    self.max_dens_point[index][3]]
                 index += 1
-            cp_c = cp + counts[i+1]
+            cp_c = cp + counts[i + 1]
             group_indices = grab_indices[cp:cp_c]
             self._groups[index] = self._halo_class(self, i, group_indices, \
                 size=self.group_sizes[i], CoM=self.CoM[i], \
@@ -1570,20 +1614,23 @@
                 rms_vel=self.rms_vel[i])
             # This halo may be owned by many, including this task
             self.comm.claim_object(self._groups[index])
-            self._max_dens[index] = [self.max_dens_point[i][0], self.max_dens_point[i][1], \
+            self._max_dens[index] = [self.max_dens_point[i][0],
+                self.max_dens_point[i][1], \
                 self.max_dens_point[i][2], self.max_dens_point[i][3]]
-            cp += counts[i+1]
+            cp += counts[i + 1]
             index += 1
         # If there are missing groups at the end, add them.
         while index < self.group_count:
             self._groups[index] = self._halo_class(self, index, \
                 size=self.group_sizes[index], CoM=self.CoM[index], \
                 max_dens_point=self.max_dens_point[i], \
-                group_total_mass=self.Tot_M[index], max_radius=self.max_radius[index],
+                group_total_mass=self.Tot_M[index],
+                max_radius=self.max_radius[index],
                 bulk_vel=self.bulk_vel[index], tasks=self.halo_taskmap[index],
                 rms_vel=self.rms_vel[index])
             self.comm.do_not_claim_object(self._groups[index])
-            self._max_dens[index] = [self.max_dens_point[index][0], self.max_dens_point[index][1], \
+            self._max_dens[index] = [self.max_dens_point[index][0],
+                self.max_dens_point[index][1], \
                 self.max_dens_point[index][2], self.max_dens_point[index][3]]
             index += 1
         # Clean up
@@ -1600,28 +1647,31 @@
 
     def write_out(self, filename="parallelHopAnalysis.out"):
         r"""Write out standard halo information to a text file.
-        
+
         Parameters
         ----------
         filename : String
             The name of the file to write to.
             Default = "parallelHopAnalysis.out".
-        
+
         Examples
         --------
         >>> halos.write_out("parallelHopAnalysis.out")
         """
         HaloList.write_out(self, filename)
 
+
 class GenericHaloFinder(HaloList, ParallelAnalysisInterface):
     def __init__(self, pf, ds, dm_only=True, padding=0.0):
         ParallelAnalysisInterface.__init__(self)
         self.pf = pf
         self.hierarchy = pf.h
-        self.center = (na.array(ds.right_edge) + na.array(ds.left_edge))/2.0
+        self.center = (na.array(ds.right_edge) + na.array(ds.left_edge)) / 2.0
 
     def _parse_halolist(self, threshold_adjustment):
-        groups, max_dens, hi  = [], {}, 0
+        groups = []
+        max_dens = {}
+        hi = 0
         LE, RE = self.bounds
         for halo in self._groups:
             this_max_dens = halo.maximum_density_location()
@@ -1629,15 +1679,17 @@
             if na.all((this_max_dens >= LE) & (this_max_dens <= RE)):
                 # Now we add the halo information to OURSELVES, taken from the
                 # self.hop_list
-                # We need to mock up the HOPHaloList thingie, so we need to set:
-                #     self._max_dens
-                max_dens_temp = list(self._max_dens[halo.id])[0] / threshold_adjustment
-                max_dens[hi] = [max_dens_temp] + list(self._max_dens[halo.id])[1:4]
+                # We need to mock up the HOPHaloList thingie, so we need to
+                #     set self._max_dens
+                max_dens_temp = list(self._max_dens[halo.id])[0] / \
+                    threshold_adjustment
+                max_dens[hi] = [max_dens_temp] + \
+                    list(self._max_dens[halo.id])[1:4]
                 groups.append(self._halo_class(self, hi))
                 groups[-1].indices = halo.indices
                 self.comm.claim_object(groups[-1])
                 hi += 1
-        del self._groups, self._max_dens # explicit >> implicit
+        del self._groups, self._max_dens  # explicit >> implicit
         self._groups = groups
         self._max_dens = max_dens
 
@@ -1651,10 +1703,11 @@
         mine, halo_info = self.comm.mpi_info_dict(len(self))
         nhalos = sum(halo_info.values())
         # Figure out our offset
-        my_first_id = sum([v for k,v in halo_info.items() if k < mine])
+        my_first_id = sum([v for k, v in halo_info.items() if k < mine])
         # Fix our max_dens
         max_dens = {}
-        for i,m in self._max_dens.items(): max_dens[i+my_first_id] = m
+        for i, m in self._max_dens.items():
+            max_dens[i + my_first_id] = m
         self._max_dens = max_dens
         for halo in self._groups:
             halo._max_dens = self._max_dens
@@ -1668,17 +1721,18 @@
                        [self._halo_class(self, i) for i in range(after, nhalos)]
         id = 0
         for proc in sorted(halo_info.keys()):
-            for halo in self._groups[id:id+halo_info[proc]]:
+            for halo in self._groups[id:id + halo_info[proc]]:
                 halo.id = id
                 halo._distributed = self._distributed
                 halo._owner = proc
                 id += 1
-        def haloCmp(h1,h2):
-            c = cmp(h1.total_mass(),h2.total_mass())
+
+        def haloCmp(h1, h2):
+            c = cmp(h1.total_mass(), h2.total_mass())
             if c != 0:
                 return -1 * c
             if c == 0:
-                return cmp(h1.center_of_mass()[0],h2.center_of_mass()[0])
+                return cmp(h1.center_of_mass()[0], h2.center_of_mass()[0])
         self._groups.sort(haloCmp)
         sorted_max_dens = {}
         for i, halo in enumerate(self._groups):
@@ -1688,25 +1742,25 @@
         self._max_dens = sorted_max_dens
         for i, halo in enumerate(self._groups):
             halo._max_dens = self._max_dens
-        
+
     def _reposition_particles(self, bounds):
         # This only does periodicity.  We do NOT want to deal with anything
-        # else.  The only reason we even do periodicity is the 
+        # else.  The only reason we even do periodicity is the
         LE, RE = bounds
         dw = self.pf.domain_right_edge - self.pf.domain_left_edge
         for i, ax in enumerate('xyz'):
             arr = self._data_source["particle_position_%s" % ax]
-            arr[arr < LE[i]-self.padding] += dw[i]
-            arr[arr > RE[i]+self.padding] -= dw[i]
+            arr[arr < LE[i] - self.padding] += dw[i]
+            arr[arr > RE[i] + self.padding] -= dw[i]
 
     def write_out(self, filename):
         r"""Write out standard halo information to a text file.
-        
+
         Parameters
         ----------
         filename : String
             The name of the file to write to.
-        
+
         Examples
         --------
         >>> halos.write_out("HopAnalysis.out")
@@ -1725,7 +1779,7 @@
         ----------
         prefix : String
             The prefix for the name of the file.
-        
+
         Examples
         --------
         >>> halos.write_particle_lists_txt("halo-parts")
@@ -1743,12 +1797,12 @@
         is run in parallel, halos will only be written out on the processors to
         which they belong.  See `Halo.write_particle_lists_txt` for how to
         track these halos globally across files.
-        
+
         Parameters
         ----------
         prefix : String
             The prefix for the name(s) of the HDF5 files.
-        
+
         Examples
         --------
         >>> halos.write_particle_lists("halo-parts")
@@ -1762,22 +1816,22 @@
 
     def dump(self, basename="HopAnalysis"):
         r"""Save the full halo data to disk.
-        
+
         This function will save the halo data in such a manner that it can be
         easily re-loaded later using `GenericHaloFinder.load`.
         This is similar in concept to
         pickling the data, but outputs the data in the already-established
         data formats. The simple halo data is written to a text file
-        (e.g. "HopAnalysis.out") using
-        write_out(), and the particle data to hdf5 files (e.g. "HopAnalysis.h5")
+        (e.g. "HopAnalysis.out") using write_out(), and the particle data
+        to hdf5 files (e.g. "HopAnalysis.h5")
         using write_particle_lists().
-        
+
         Parameters
         ----------
         basename : String
-            The base name for the files the data will be written to. Default = 
+            The base name for the files the data will be written to. Default =
             "HopAnalysis".
-        
+
         Examples
         --------
         >>> halos.dump("MyHalos")
@@ -1786,29 +1840,30 @@
         self.write_particle_lists(basename)
         self.write_particle_lists_txt(basename)
 
+
 class parallelHF(GenericHaloFinder, parallelHOPHaloList):
-    def __init__(self, pf, subvolume=None,threshold=160, dm_only=True, \
+    def __init__(self, pf, subvolume=None, threshold=160, dm_only=True, \
         resize=True, rearrange=True,\
         fancy_padding=True, safety=1.5, premerge=True, sample=0.03, \
-        total_mass=None, num_particles=None, tree = 'F'):
+        total_mass=None, num_particles=None, tree='F'):
         r"""Parallel HOP halo finder.
-        
+
         Halos are built by:
         1. Calculating a density for each particle based on a smoothing kernel.
         2. Recursively linking particles to other particles from lower density
         particles to higher.
         3. Geometrically proximate chains are identified and
         4. merged into final halos following merging rules.
-        
+
         Lower thresholds generally produce more halos, and the largest halos
         become larger. Also, halos become more filamentary and over-connected.
-        
+
         This is very similar to HOP, but it does not produce precisely the
         same halos due to unavoidable numerical differences.
-        
+
         Skory et al. "Parallel HOP: A Scalable Halo Finder for Massive
         Cosmological Data Sets." arXiv (2010) 1001.3411
-        
+
         Parameters
         ----------
         pf : `StaticOutput`
@@ -1863,7 +1918,7 @@
             fancy_padding.
             Default = None, which means the number of particles is
             automatically calculated.
-        
+
         Examples
         -------
         >>> pf = load("RedshiftOutput0000")
@@ -1892,8 +1947,9 @@
         yt_counters("Reading Data")
         # Adaptive subregions by bisection. We do not load balance if we are
         # analyzing a subvolume.
-        ds_names = ["particle_position_x","particle_position_y","particle_position_z"]
-        if ytcfg.getboolean("yt","inline") == False and \
+        ds_names = ["particle_position_x", "particle_position_y",
+            "particle_position_z"]
+        if ytcfg.getboolean("yt", "inline") == False and \
             resize and self.comm.size != 1 and subvolume is None:
             random.seed(self.comm.rank)
             cut_list = self.partition_hierarchy_3d_bisection_list()
@@ -1901,16 +1957,18 @@
             self.bucket_bounds = []
             if self.comm.rank == 0:
                 self._recursive_divide(root_points, topbounds, 0, cut_list)
-            self.bucket_bounds = self.comm.mpi_bcast_pickled(self.bucket_bounds)
+            self.bucket_bounds = \
+                self.comm.mpi_bcast_pickled(self.bucket_bounds)
             my_bounds = self.bucket_bounds[self.comm.rank]
             LE, RE = my_bounds[0], my_bounds[1]
-            self._data_source = self.hierarchy.region_strict([0.]*3, LE, RE)
+            self._data_source = self.hierarchy.region_strict([0.] * 3, LE, RE)
         # If this isn't parallel, define the region as an AMRRegionStrict so
         # particle IO works.
         if self.comm.size == 1:
-            self._data_source = self.hierarchy.periodic_region_strict([0.5]*3, LE, RE)
+            self._data_source = self.hierarchy.periodic_region_strict([0.5] * 3,
+                LE, RE)
         # get the average spacing between particles for this region
-        # The except is for the serial case, where the full box is what we want.
+        # The except is for the serial case where the full box is what we want.
         if num_particles is None:
             data = self._data_source["particle_position_x"]
         try:
@@ -1920,33 +1978,42 @@
         vol = l[0] * l[1] * l[2]
         full_vol = vol
         # We will use symmetric padding when a subvolume is being used.
-        if not fancy_padding or subvolume is not None or num_particles is not None:
+        if not fancy_padding or subvolume is not None or \
+                num_particles is not None:
             if num_particles is None:
                 num_particles = data.size
-            avg_spacing = (float(vol) / num_particles)**(1./3.)
+            avg_spacing = (float(vol) / num_particles) ** (1. / 3.)
             # padding is a function of inter-particle spacing, this is an
             # approximation, but it's OK with the safety factor
-            padding = (self.num_neighbors)**(1./3.) * self.safety * avg_spacing
-            self.padding = (na.ones(3,dtype='float64')*padding, na.ones(3,dtype='float64')*padding)
+            padding = (self.num_neighbors) ** (1. / 3.) * self.safety * \
+                avg_spacing
+            self.padding = (na.ones(3, dtype='float64') * padding,
+                na.ones(3, dtype='float64') * padding)
             mylog.info('padding %s avg_spacing %f vol %f local_parts %d' % \
                 (str(self.padding), avg_spacing, vol, num_particles))
         # Another approach to padding, perhaps more accurate.
         elif fancy_padding and self._distributed:
-            LE_padding, RE_padding = na.empty(3,dtype='float64'), na.empty(3,dtype='float64')
-            avg_spacing = (float(vol) / data.size)**(1./3.)
-            base_padding = (self.num_neighbors)**(1./3.) * self.safety * avg_spacing
+            LE_padding = na.empty(3, dtype='float64')
+            RE_padding = na.empty(3, dtype='float64')
+            avg_spacing = (float(vol) / data.size) ** (1. / 3.)
+            base_padding = (self.num_neighbors) ** (1. / 3.) * self.safety * \
+                avg_spacing
             for dim in xrange(3):
-                if ytcfg.getboolean("yt","inline") == False:
+                if ytcfg.getboolean("yt", "inline") == False:
                     data = self._data_source[ds_names[dim]]
                 else:
                     data = self._data_source[ds_names[dim]]
                 num_bins = 1000
-                width = self._data_source.right_edge[dim] - self._data_source.left_edge[dim]
-                area = (self._data_source.right_edge[(dim+1)%3] - self._data_source.left_edge[(dim+1)%3]) * \
-                    (self._data_source.right_edge[(dim+2)%3] - self._data_source.left_edge[(dim+2)%3])
+                width = self._data_source.right_edge[dim] - \
+                    self._data_source.left_edge[dim]
+                area = (self._data_source.right_edge[(dim + 1) % 3] - \
+                    self._data_source.left_edge[(dim + 1) % 3]) * \
+                    (self._data_source.right_edge[(dim + 2) % 3] - \
+                    self._data_source.left_edge[(dim + 2) % 3])
                 bin_width = base_padding
                 num_bins = int(math.ceil(width / bin_width))
-                bins = na.arange(num_bins+1, dtype='float64') * bin_width + self._data_source.left_edge[dim]
+                bins = na.arange(num_bins + 1, dtype='float64') * bin_width + \
+                    self._data_source.left_edge[dim]
                 counts, bins = na.histogram(data, bins)
                 # left side.
                 start = 0
@@ -1955,32 +2022,37 @@
                     start += 1
                     count += counts[start]
                 # Get the avg spacing in just this boundary.
-                vol = area * (bins[start+1] - bins[0])
-                avg_spacing = (float(vol) / count)**(1./3.)
-                LE_padding[dim] = (self.num_neighbors)**(1./3.) * self.safety * avg_spacing
+                vol = area * (bins[start + 1] - bins[0])
+                avg_spacing = (float(vol) / count) ** (1. / 3.)
+                LE_padding[dim] = (self.num_neighbors) ** (1. / 3.) * \
+                    self.safety * avg_spacing
                 # right side.
                 start = -1
                 count = counts[-1]
                 while count < self.num_neighbors:
                     start -= 1
                     count += counts[start]
-                vol = area * (bins[-1] - bins[start-1])
-                avg_spacing = (float(vol) / count)**(1./3.)
-                RE_padding[dim] = (self.num_neighbors)**(1./3.) * self.safety * avg_spacing
+                vol = area * (bins[-1] - bins[start - 1])
+                avg_spacing = (float(vol) / count) ** (1. / 3.)
+                RE_padding[dim] = (self.num_neighbors) ** (1. / 3.) * \
+                    self.safety * avg_spacing
             self.padding = (LE_padding, RE_padding)
             del bins, counts
             mylog.info('fancy_padding %s avg_spacing %f full_vol %f local_parts %d %s' % \
-                (str(self.padding), avg_spacing, full_vol, data.size, str(self._data_source)))
+                (str(self.padding), avg_spacing, full_vol,
+                data.size, str(self._data_source)))
         # Now we get the full box mass after we have the final composition of
         # subvolumes.
         if total_mass is None:
-            total_mass = self.comm.mpi_allreduce((self._data_source["ParticleMassMsun"].astype('float64')).sum(), 
+            total_mass = self.comm.mpi_allreduce((self._data_source["ParticleMassMsun"].astype('float64')).sum(),
                                                  op='sum')
         if not self._distributed:
-            self.padding = (na.zeros(3,dtype='float64'), na.zeros(3,dtype='float64'))
+            self.padding = (na.zeros(3, dtype='float64'),
+                na.zeros(3, dtype='float64'))
         # If we're using a subvolume, we now re-divide.
         if subvolume is not None:
-            self._data_source = pf.h.periodic_region_strict([0.]*3, ds_LE, ds_RE)
+            self._data_source = pf.h.periodic_region_strict([0.] * 3, ds_LE,
+                ds_RE)
             # Cut up the volume.
             padded, LE, RE, self._data_source = \
                 self.partition_hierarchy_3d(ds=self._data_source,
@@ -1989,8 +2061,8 @@
         (LE_padding, RE_padding) = self.padding
         parallelHOPHaloList.__init__(self, self._data_source, self.padding, \
         self.num_neighbors, self.bounds, total_mass, period, \
-        threshold=threshold, dm_only=dm_only, rearrange=rearrange, premerge=premerge,
-        tree = self.tree)
+        threshold=threshold, dm_only=dm_only, rearrange=rearrange,
+            premerge=premerge, tree=self.tree)
         self._join_halolists()
         yt_counters("Final Grouping")
 
@@ -2003,19 +2075,19 @@
         random_points = int(self.sample * n_parts)
         # We want to get a representative selection of random particles in
         # each subvolume.
-        adjust = float(local_parts) / ( float(n_parts) / self.comm.size)
+        adjust = float(local_parts) / (float(n_parts) / self.comm.size)
         n_random = int(adjust * float(random_points) / self.comm.size)
         mylog.info("Reading in %d random particles." % n_random)
         # Get unique random particles.
         my_points = na.empty((n_random, 3), dtype='float64')
         uni = na.array(random.sample(xrange(xp.size), n_random))
         uni = uni[uni.argsort()]
-        my_points[:,0] = xp[uni]
+        my_points[:, 0] = xp[uni]
         del xp
         self._data_source.clear_data()
-        my_points[:,1] = self._data_source["particle_position_y"][uni]
+        my_points[:, 1] = self._data_source["particle_position_y"][uni]
         self._data_source.clear_data()
-        my_points[:,2] = self._data_source["particle_position_z"][uni]
+        my_points[:, 2] = self._data_source["particle_position_z"][uni]
         self._data_source.clear_data()
         del uni
         # Collect them on the root task.
@@ -2023,10 +2095,10 @@
         if mine == 0:
             tot_random = sum(sizes.values())
             root_points = na.empty((tot_random, 3), dtype='float64')
-            root_points.shape = (1, 3*tot_random)
+            root_points.shape = (1, 3 * tot_random)
         else:
             root_points = na.empty([])
-        my_points.shape = (1, n_random*3)
+        my_points.shape = (1, n_random * 3)
         root_points = self.comm.par_combine_object(my_points[0],
                 datatype="array", op="cat")
         del my_points
@@ -2040,31 +2112,34 @@
         num_bins = 1000
         width = bounds[1][dim] - bounds[0][dim]
         bin_width = width / num_bins
-        bins = na.arange(num_bins+1, dtype='float64') * bin_width + bounds[0][dim]
-        counts, bins = na.histogram(points[:,dim], bins)
+        bins = na.arange(num_bins + 1, dtype='float64') * bin_width + \
+            bounds[0][dim]
+        counts, bins = na.histogram(points[:, dim], bins)
         # Find the bin that passes the cut points.
         midpoints = [bounds[0][dim]]
         sum = 0
         bin = 0
-        for step in xrange(1,cut_list[level][1]):
-            while sum < ((parts*step)/cut_list[level][1]):
+        for step in xrange(1, cut_list[level][1]):
+            while sum < ((parts * step) / cut_list[level][1]):
                 lastsum = sum
                 sum += counts[bin]
                 bin += 1
             # Bin edges
-            left_edge = bins[bin-1]
+            left_edge = bins[bin - 1]
             right_edge = bins[bin]
-            # Find a better approx of the midpoint cut line using a linear approx.
+            # Find a better approx of the midpoint cut
+            # line using a linear approx.
             a = float(sum - lastsum) / (right_edge - left_edge)
-            midpoints.append(left_edge + (0.5 - (float(lastsum) / parts / 2)) / a)
+            midpoints.append(left_edge + (0.5 - \
+                (float(lastsum) / parts / 2)) / a)
         midpoints.append(bounds[1][dim])
 
         # Split the points & update the bounds.
         subpoints = []
         subbounds = []
-        for pair in zip(midpoints[:-1],midpoints[1:]):
-            select = na.bitwise_and(points[:,dim] >= pair[0],
-                points[:,dim] < pair[1])
+        for pair in zip(midpoints[:-1], midpoints[1:]):
+            select = na.bitwise_and(points[:, dim] >= pair[0],
+                points[:, dim] < pair[1])
             subpoints.append(points[select])
             nb = bounds.copy()
             nb[0][dim] = pair[0]
@@ -2076,7 +2151,7 @@
             if level == maxlevel:
                 self.bucket_bounds.append(pair[1])
             else:
-                self._recursive_divide(pair[0], pair[1], level+1, cut_list)
+                self._recursive_divide(pair[0], pair[1], level + 1, cut_list)
 
     def _join_halolists(self):
         if self.group_count == 0:
@@ -2084,7 +2159,7 @@
             return
         ms = -self.Tot_M.copy()
         del self.Tot_M
-        Cx = self.CoM[:,0].copy()
+        Cx = self.CoM[:, 0].copy()
         sorted = na.lexsort([Cx, ms])
         del Cx, ms
         self._groups = self._groups[sorted]
@@ -2098,20 +2173,20 @@
     def __init__(self, pf, subvolume=None, threshold=160, dm_only=True,
             padding=0.02, total_mass=None):
         r"""HOP halo finder.
-        
+
         Halos are built by:
         1. Calculating a density for each particle based on a smoothing kernel.
         2. Recursively linking particles to other particles from lower density
         particles to higher.
         3. Geometrically proximate chains are identified and
         4. merged into final halos following merging rules.
-        
+
         Lower thresholds generally produce more halos, and the largest halos
         become larger. Also, halos become more filamentary and over-connected.
-        
+
         Eisenstein and Hut. "HOP: A New Group-Finding Algorithm for N-Body
         Simulations." ApJ (1998) vol. 498 pp. 137-142
-        
+
         Parameters
         ----------
         pf : `StaticOutput`
@@ -2142,7 +2217,6 @@
             Default = None, which means the total mass is automatically
             calculated.
 
-        
         Examples
         --------
         >>> pf = load("RedshiftOutput0000")
@@ -2153,13 +2227,15 @@
             ds_RE = na.array(subvolume.right_edge)
         self.period = pf.domain_right_edge - pf.domain_left_edge
         self._data_source = pf.h.all_data()
-        GenericHaloFinder.__init__(self, pf, self._data_source, dm_only, padding)
+        GenericHaloFinder.__init__(self, pf, self._data_source, dm_only,
+            padding)
         # do it once with no padding so the total_mass is correct
         # (no duplicated particles), and on the entire volume, even if only
         # a small part is actually going to be used.
         self.padding = 0.0
         padded, LE, RE, self._data_source = \
-            self.partition_hierarchy_3d(ds = self._data_source, padding=self.padding)
+            self.partition_hierarchy_3d(ds=self._data_source,
+                padding=self.padding)
         # For scaling the threshold, note that it's a passthrough
         if total_mass is None:
             if dm_only:
@@ -2173,10 +2249,10 @@
         # object representing the entire domain and sum it "lazily" with
         # Derived Quantities.
         if subvolume is not None:
-            self._data_source = pf.h.periodic_region_strict([0.]*3, ds_LE, ds_RE)
-        self.padding = padding #* pf["unitary"] # This should be clevererer
+            self._data_source = pf.h.periodic_region_strict([0.] * 3, ds_LE, ds_RE)
+        self.padding = padding  # * pf["unitary"] # This should be clevererer
         padded, LE, RE, self._data_source = \
-            self.partition_hierarchy_3d(ds = self._data_source,
+            self.partition_hierarchy_3d(ds=self._data_source,
             padding=self.padding)
         self.bounds = (LE, RE)
         # reflect particles around the periodic boundary
@@ -2185,26 +2261,29 @@
             select = self._get_dm_indices()
             sub_mass = self._data_source["ParticleMassMsun"][select].sum(dtype='float64')
         else:
-            sub_mass = self._data_source["ParticleMassMsun"].sum(dtype='float64')
+            sub_mass = \
+                self._data_source["ParticleMassMsun"].sum(dtype='float64')
         HOPHaloList.__init__(self, self._data_source,
-            threshold*total_mass/sub_mass, dm_only)
-        self._parse_halolist(total_mass/sub_mass)
+            threshold * total_mass / sub_mass, dm_only)
+        self._parse_halolist(total_mass / sub_mass)
         self._join_halolists()
 
+
 class FOFHaloFinder(GenericHaloFinder, FOFHaloList):
-    def __init__(self, pf, subvolume=None, link=0.2, dm_only=True, padding=0.02):
+    def __init__(self, pf, subvolume=None, link=0.2, dm_only=True,
+        padding=0.02):
         r"""Friends-of-friends halo finder.
-        
+
         Halos are found by linking together all pairs of particles closer than
         some distance from each other. Particles may have multiple links,
         and halos are found by recursively linking together all such pairs.
-        
+
         Larger linking lengths produce more halos, and the largest halos
         become larger. Also, halos become more filamentary and over-connected.
-        
+
         Davis et al. "The evolution of large-scale structure in a universe
         dominated by cold dark matter." ApJ (1985) vol. 292 pp. 371-394
-        
+
         Parameters
         ----------
         pf : `StaticOutput`
@@ -2217,7 +2296,7 @@
             If positive, the interparticle distance (compared to the overall
             average) used to build the halos. If negative, this is taken to be
             the *actual* linking length, and no other calculations will be
-            applied.  Default = 0.2.  
+            applied.  Default = 0.2.
         dm_only : bool
             If True, only dark matter particles are used when building halos.
             Default = False.
@@ -2226,7 +2305,7 @@
             with duplicated particles for halo finidng to work. This number
             must be no smaller than the radius of the largest halo in the box
             in code units. Default = 0.02.
-        
+
         Examples
         --------
         >>> pf = load("RedshiftOutput0000")
@@ -2241,7 +2320,7 @@
         self._data_source = pf.h.all_data()
         GenericHaloFinder.__init__(self, pf, self._data_source, dm_only,
             padding)
-        self.padding = 0.0 #* pf["unitary"] # This should be clevererer
+        self.padding = 0.0  # * pf["unitary"] # This should be clevererer
         # get the total number of particles across all procs, with no padding
         padded, LE, RE, self._data_source = \
             self.partition_hierarchy_3d(ds=self._data_source,
@@ -2254,13 +2333,14 @@
             # Because we are now allowing for datasets with non 1-periodicity,
             # but symmetric, vol is always 1.
             vol = 1.
-            avg_spacing = (float(vol) / n_parts)**(1./3.)
+            avg_spacing = (float(vol) / n_parts) ** (1. / 3.)
             linking_length = link * avg_spacing
         else:
             linking_length = na.abs(link)
         self.padding = padding
         if subvolume is not None:
-            self._data_source = pf.h.periodic_region_strict([0.]*3, ds_LE, ds_RE)
+            self._data_source = pf.h.periodic_region_strict([0.] * 3, ds_LE,
+                ds_RE)
         padded, LE, RE, self._data_source = \
             self.partition_hierarchy_3d(ds=self._data_source,
             padding=self.padding)
@@ -2275,24 +2355,25 @@
 
 HaloFinder = HOPHaloFinder
 
+
 class LoadHaloes(GenericHaloFinder, LoadedHaloList):
     def __init__(self, pf, basename):
         r"""Load the full halo data into memory.
-        
+
         This function takes the output of `GenericHaloFinder.dump` and
         re-establishes the list of halos in memory. This enables the full set
         of halo analysis features without running the halo finder again. To
         be precise, the particle data for each halo is only read in when
         necessary, so examining a single halo will not require as much memory
         as is required for halo finding.
-        
+
         Parameters
         ----------
         basename : String
             The base name of the files that will be read in. This should match
             what was used when `GenericHaloFinder.dump` was called. Default =
             "HopAnalysis".
-        
+
         Examples
         --------
         >>> pf = load("data0005")
@@ -2300,6 +2381,3 @@
         """
         self.basename = basename
         LoadedHaloList.__init__(self, pf, self.basename)
-
-
-        


diff -r 26e506c79f839e62abc86b377c9c1e41c57e2e88 -r 5a0df3750ca30f7e210fbbd7305e38a5291262d0 yt/analysis_modules/halo_finding/hop/setup.py
--- a/yt/analysis_modules/halo_finding/hop/setup.py
+++ b/yt/analysis_modules/halo_finding/hop/setup.py
@@ -1,19 +1,19 @@
 #!/usr/bin/env python
 import setuptools
-import os, sys, os.path
-
+import os
+import sys
 import os.path
 
-def configuration(parent_package='',top_path=None):
+
+def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('hop',parent_package,top_path)
-    config.add_extension("EnzoHop", sources=
-                                    ["EnzoHop.c",
+    config = Configuration('hop', parent_package, top_path)
+    config.add_extension("EnzoHop", sources=["EnzoHop.c",
                                      "hop_hop.c",
                                      "hop_kd.c",
                                      "hop_regroup.c",
                                      "hop_slice.c",
-                                     "hop_smooth.c",])
-    config.make_config_py() # installs __config__.py
+                                     "hop_smooth.c"])
+    config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config


diff -r 26e506c79f839e62abc86b377c9c1e41c57e2e88 -r 5a0df3750ca30f7e210fbbd7305e38a5291262d0 yt/analysis_modules/halo_finding/parallel_hop/setup.py
--- a/yt/analysis_modules/halo_finding/parallel_hop/setup.py
+++ b/yt/analysis_modules/halo_finding/parallel_hop/setup.py
@@ -1,12 +1,13 @@
 #!/usr/bin/env python
 import setuptools
-import os, sys, os.path
-
+import os
+import sys
 import os.path
 
-def configuration(parent_package='',top_path=None):
+
+def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('parallel_hop',parent_package,top_path)
-    config.make_config_py() # installs __config__.py
+    config = Configuration('parallel_hop', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config


diff -r 26e506c79f839e62abc86b377c9c1e41c57e2e88 -r 5a0df3750ca30f7e210fbbd7305e38a5291262d0 yt/analysis_modules/halo_finding/setup.py
--- a/yt/analysis_modules/halo_finding/setup.py
+++ b/yt/analysis_modules/halo_finding/setup.py
@@ -1,15 +1,16 @@
 #!/usr/bin/env python
 import setuptools
-import os, sys, os.path
-
+import os
+import sys
 import os.path
 
-def configuration(parent_package='',top_path=None):
+
+def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('halo_finding',parent_package,top_path)
+    config = Configuration('halo_finding', parent_package, top_path)
     config.add_subpackage("fof")
     config.add_subpackage("hop")
     config.add_subpackage("parallel_hop")
-    config.make_config_py() # installs __config__.py
+    config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config


diff -r 26e506c79f839e62abc86b377c9c1e41c57e2e88 -r 5a0df3750ca30f7e210fbbd7305e38a5291262d0 yt/analysis_modules/halo_mass_function/setup.py
--- a/yt/analysis_modules/halo_mass_function/setup.py
+++ b/yt/analysis_modules/halo_mass_function/setup.py
@@ -1,12 +1,13 @@
 #!/usr/bin/env python
 import setuptools
-import os, sys, os.path
-
+import os
+import sys
 import os.path
 
-def configuration(parent_package='',top_path=None):
+
+def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('halo_mass_function',parent_package,top_path)
-    config.make_config_py() # installs __config__.py
+    config = Configuration('halo_mass_function', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config


diff -r 26e506c79f839e62abc86b377c9c1e41c57e2e88 -r 5a0df3750ca30f7e210fbbd7305e38a5291262d0 yt/analysis_modules/halo_merger_tree/enzofof_merger_tree.py
--- a/yt/analysis_modules/halo_merger_tree/enzofof_merger_tree.py
+++ b/yt/analysis_modules/halo_merger_tree/enzofof_merger_tree.py
@@ -295,6 +295,7 @@
             Maximum number of child halos each leaf can have.
         """
         self.halonum = halonum
+        self.max_children = max_children
         self.output_numbers = sorted(self.relationships, reverse=True)
         self.levels = {}
         trunk = self.output_numbers[0]
@@ -376,7 +377,7 @@
                 print "--> Most massive progenitor == Halo %d" % \
                       (br.progenitor)
                 for i,c in enumerate(br.children):
-                    if i > max_child: break
+                    if i > self.max_children: break
                     print "-->    Halo %8.8d :: fraction = %g" % (c[0], c[1])
 
     def write_dot(self, filename=None):


diff -r 26e506c79f839e62abc86b377c9c1e41c57e2e88 -r 5a0df3750ca30f7e210fbbd7305e38a5291262d0 yt/analysis_modules/halo_merger_tree/setup.py
--- a/yt/analysis_modules/halo_merger_tree/setup.py
+++ b/yt/analysis_modules/halo_merger_tree/setup.py
@@ -1,12 +1,13 @@
 #!/usr/bin/env python
 import setuptools
-import os, sys, os.path
-
+import os
+import sys
 import os.path
 
-def configuration(parent_package='',top_path=None):
+
+def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('halo_merger_tree',parent_package,top_path)
-    config.make_config_py() # installs __config__.py
+    config = Configuration('halo_merger_tree', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config


diff -r 26e506c79f839e62abc86b377c9c1e41c57e2e88 -r 5a0df3750ca30f7e210fbbd7305e38a5291262d0 yt/analysis_modules/halo_profiler/setup.py
--- a/yt/analysis_modules/halo_profiler/setup.py
+++ b/yt/analysis_modules/halo_profiler/setup.py
@@ -1,12 +1,13 @@
 #!/usr/bin/env python
 import setuptools
-import os, sys, os.path
-
+import os
+import sys
 import os.path
 
-def configuration(parent_package='',top_path=None):
+
+def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('halo_profiler',parent_package,top_path)
-    config.make_config_py() # installs __config__.py
+    config = Configuration('halo_profiler', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config


diff -r 26e506c79f839e62abc86b377c9c1e41c57e2e88 -r 5a0df3750ca30f7e210fbbd7305e38a5291262d0 yt/analysis_modules/hierarchy_subset/api.py
--- a/yt/analysis_modules/hierarchy_subset/api.py
+++ b/yt/analysis_modules/hierarchy_subset/api.py
@@ -33,4 +33,3 @@
     AMRExtractedGridProxy, \
     ExtractedHierarchy, \
     ExtractedParameterFile
-


diff -r 26e506c79f839e62abc86b377c9c1e41c57e2e88 -r 5a0df3750ca30f7e210fbbd7305e38a5291262d0 yt/analysis_modules/hierarchy_subset/setup.py
--- a/yt/analysis_modules/hierarchy_subset/setup.py
+++ b/yt/analysis_modules/hierarchy_subset/setup.py
@@ -1,12 +1,13 @@
 #!/usr/bin/env python
 import setuptools
-import os, sys, os.path
-
+import os
+import sys
 import os.path
 
-def configuration(parent_package='',top_path=None):
+
+def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('hierarchy_subset',parent_package,top_path)
-    config.make_config_py() # installs __config__.py
+    config = Configuration('hierarchy_subset', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config


diff -r 26e506c79f839e62abc86b377c9c1e41c57e2e88 -r 5a0df3750ca30f7e210fbbd7305e38a5291262d0 yt/analysis_modules/level_sets/setup.py
--- a/yt/analysis_modules/level_sets/setup.py
+++ b/yt/analysis_modules/level_sets/setup.py
@@ -1,12 +1,13 @@
 #!/usr/bin/env python
 import setuptools
-import os, sys, os.path
-
+import os
+import sys
 import os.path
 
-def configuration(parent_package='',top_path=None):
+
+def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('level_sets',parent_package,top_path)
-    config.make_config_py() # installs __config__.py
+    config = Configuration('level_sets', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config


diff -r 26e506c79f839e62abc86b377c9c1e41c57e2e88 -r 5a0df3750ca30f7e210fbbd7305e38a5291262d0 yt/analysis_modules/light_cone/__init__.py
--- a/yt/analysis_modules/light_cone/__init__.py
+++ b/yt/analysis_modules/light_cone/__init__.py
@@ -22,4 +22,3 @@
   You should have received a copy of the GNU General Public License
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
-


diff -r 26e506c79f839e62abc86b377c9c1e41c57e2e88 -r 5a0df3750ca30f7e210fbbd7305e38a5291262d0 yt/analysis_modules/light_cone/setup.py
--- a/yt/analysis_modules/light_cone/setup.py
+++ b/yt/analysis_modules/light_cone/setup.py
@@ -1,12 +1,13 @@
 #!/usr/bin/env python
 import setuptools
-import os, sys, os.path
-
+import os
+import sys
 import os.path
 
-def configuration(parent_package='',top_path=None):
+
+def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('light_cone',parent_package,top_path)
-    config.make_config_py() # installs __config__.py
+    config = Configuration('light_cone', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config


diff -r 26e506c79f839e62abc86b377c9c1e41c57e2e88 -r 5a0df3750ca30f7e210fbbd7305e38a5291262d0 yt/analysis_modules/light_ray/setup.py
--- a/yt/analysis_modules/light_ray/setup.py
+++ b/yt/analysis_modules/light_ray/setup.py
@@ -1,12 +1,13 @@
 #!/usr/bin/env python
 import setuptools
-import os, sys, os.path
-
+import os
+import sys
 import os.path
 
-def configuration(parent_package='',top_path=None):
+
+def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('light_ray',parent_package,top_path)
-    config.make_config_py() # installs __config__.py
+    config = Configuration('light_ray', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config


diff -r 26e506c79f839e62abc86b377c9c1e41c57e2e88 -r 5a0df3750ca30f7e210fbbd7305e38a5291262d0 yt/analysis_modules/radial_column_density/api.py
--- a/yt/analysis_modules/radial_column_density/api.py
+++ b/yt/analysis_modules/radial_column_density/api.py
@@ -25,4 +25,3 @@
 """
 
 from .radial_column_density import RadialColumnDensity
-


diff -r 26e506c79f839e62abc86b377c9c1e41c57e2e88 -r 5a0df3750ca30f7e210fbbd7305e38a5291262d0 yt/analysis_modules/setup.py
--- a/yt/analysis_modules/setup.py
+++ b/yt/analysis_modules/setup.py
@@ -1,10 +1,11 @@
 #!/usr/bin/env python
 import setuptools
 
-def configuration(parent_package='',top_path=None):
+
+def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('analysis_modules',parent_package,top_path)
-    config.make_config_py() # installs __config__.py
+    config = Configuration('analysis_modules', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     config.add_subpackage("absorption_spectrum")
     config.add_subpackage("coordinate_transformation")


diff -r 26e506c79f839e62abc86b377c9c1e41c57e2e88 -r 5a0df3750ca30f7e210fbbd7305e38a5291262d0 yt/analysis_modules/simulation_handler/setup.py
--- a/yt/analysis_modules/simulation_handler/setup.py
+++ b/yt/analysis_modules/simulation_handler/setup.py
@@ -1,12 +1,13 @@
 #!/usr/bin/env python
 import setuptools
-import os, sys, os.path
-
+import os
+import sys
 import os.path
 
-def configuration(parent_package='',top_path=None):
+
+def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('simulation_handler',parent_package,top_path)
-    config.make_config_py() # installs __config__.py
+    config = Configuration('simulation_handler', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config


diff -r 26e506c79f839e62abc86b377c9c1e41c57e2e88 -r 5a0df3750ca30f7e210fbbd7305e38a5291262d0 yt/analysis_modules/spectral_integrator/setup.py
--- a/yt/analysis_modules/spectral_integrator/setup.py
+++ b/yt/analysis_modules/spectral_integrator/setup.py
@@ -1,12 +1,13 @@
 #!/usr/bin/env python
 import setuptools
-import os, sys, os.path
-
+import os
+import sys
 import os.path
 
-def configuration(parent_package='',top_path=None):
+
+def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('spectral_integrator',parent_package,top_path)
-    config.make_config_py() # installs __config__.py
+    config = Configuration('spectral_integrator', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config


diff -r 26e506c79f839e62abc86b377c9c1e41c57e2e88 -r 5a0df3750ca30f7e210fbbd7305e38a5291262d0 yt/analysis_modules/star_analysis/api.py
--- a/yt/analysis_modules/star_analysis/api.py
+++ b/yt/analysis_modules/star_analysis/api.py
@@ -32,4 +32,3 @@
     StarFormationRate, \
     SpectrumBuilder, \
     Zsun
-


diff -r 26e506c79f839e62abc86b377c9c1e41c57e2e88 -r 5a0df3750ca30f7e210fbbd7305e38a5291262d0 yt/analysis_modules/star_analysis/setup.py
--- a/yt/analysis_modules/star_analysis/setup.py
+++ b/yt/analysis_modules/star_analysis/setup.py
@@ -1,12 +1,13 @@
 #!/usr/bin/env python
 import setuptools
-import os, sys, os.path
-
+import os
+import sys
 import os.path
 
-def configuration(parent_package='',top_path=None):
+
+def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('star_analysis',parent_package,top_path)
-    config.make_config_py() # installs __config__.py
+    config = Configuration('star_analysis', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config


diff -r 26e506c79f839e62abc86b377c9c1e41c57e2e88 -r 5a0df3750ca30f7e210fbbd7305e38a5291262d0 yt/analysis_modules/two_point_functions/setup.py
--- a/yt/analysis_modules/two_point_functions/setup.py
+++ b/yt/analysis_modules/two_point_functions/setup.py
@@ -1,12 +1,13 @@
 #!/usr/bin/env python
 import setuptools
-import os, sys, os.path
-
+import os
+import sys
 import os.path
 
-def configuration(parent_package='',top_path=None):
+
+def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('two_point_functions',parent_package,top_path)
-    config.make_config_py() # installs __config__.py
+    config = Configuration('two_point_functions', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config


diff -r 26e506c79f839e62abc86b377c9c1e41c57e2e88 -r 5a0df3750ca30f7e210fbbd7305e38a5291262d0 yt/astro_objects/api.py
--- a/yt/astro_objects/api.py
+++ b/yt/astro_objects/api.py
@@ -26,9 +26,9 @@
 
 from .astrophysical_object import \
     AstrophysicalObject, identification_method, correlation_method
-    
+
 from .simulation_volume import \
     SimulationVolume
-    
+
 from .clumped_region import \
     ClumpedRegion


diff -r 26e506c79f839e62abc86b377c9c1e41c57e2e88 -r 5a0df3750ca30f7e210fbbd7305e38a5291262d0 yt/astro_objects/setup.py
--- a/yt/astro_objects/setup.py
+++ b/yt/astro_objects/setup.py
@@ -1,12 +1,13 @@
 #!/usr/bin/env python
 import setuptools
-import os, sys, os.path
-
+import os
+import sys
 import os.path
 
-def configuration(parent_package='',top_path=None):
+
+def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('astro_objects',parent_package,top_path)
-    config.make_config_py() # installs __config__.py
+    config = Configuration('astro_objects', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config


diff -r 26e506c79f839e62abc86b377c9c1e41c57e2e88 -r 5a0df3750ca30f7e210fbbd7305e38a5291262d0 yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -52,6 +52,8 @@
     pasteboard_repo = '',
     test_storage_dir = '/does/not/exist',
     enzo_db = '',
+    hub_url = 'https://127.0.0.1:5000/',
+    hub_api_key = '',
     )
 # Here is the upgrade.  We're actually going to parse the file in its entirety
 # here.  Then, if it has any of the Forbidden Sections, it will be rewritten


diff -r 26e506c79f839e62abc86b377c9c1e41c57e2e88 -r 5a0df3750ca30f7e210fbbd7305e38a5291262d0 yt/data_objects/api.py
--- a/yt/data_objects/api.py
+++ b/yt/data_objects/api.py
@@ -82,3 +82,6 @@
     ValidateGridType, \
     add_field, \
     derived_field
+
+from particle_trajectories import \
+    ParticleTrajectoryCollection


diff -r 26e506c79f839e62abc86b377c9c1e41c57e2e88 -r 5a0df3750ca30f7e210fbbd7305e38a5291262d0 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -3483,16 +3483,62 @@
         self._base_dx = (
               (self.pf.domain_right_edge - self.pf.domain_left_edge) /
                self.pf.domain_dimensions.astype("float64"))
+        self.global_endindex = None
         AMRCoveringGridBase.__init__(self, *args, **kwargs)
         self._final_start_index = self.global_startindex
 
     def _get_list_of_grids(self):
         if self._grids is not None: return
-        buffer = ((self.pf.domain_right_edge - self.pf.domain_left_edge)
-                 / self.pf.domain_dimensions).max()
-        AMRCoveringGridBase._get_list_of_grids(self, buffer)
-        # We reverse the order to ensure that coarse grids are first
-        self._grids = self._grids[::-1]
+        # Check for ill-behaved AMR schemes (Enzo) where we may have
+        # root-tile-boundary issues.  This is specific to the root tiles not
+        # allowing grids to cross them and also allowing > 1 level of
+        # difference between neighboring areas.
+        nz = 0
+        buf = 0.0
+        self.min_level = 0
+        dl = ((self.global_startindex.astype("float64") + 1)
+           / (self.pf.refine_by**self.level))
+        dr = ((self.global_startindex.astype("float64")
+              + self.ActiveDimensions - 1)
+           / (self.pf.refine_by**self.level))
+        if na.any(dl == na.rint(dl)) or na.any(dr == na.rint(dr)):
+            nz = 2 * self.pf.refine_by**self.level
+            buf = self._base_dx
+        if nz <= self.pf.refine_by**3: # delta level of 3
+            last_buf = [None,None,None]
+            count = 0
+            # Repeat until no more grids are covered (up to a delta level of 3)
+            while na.any(buf != last_buf) or count == 3:
+                cg = self.pf.h.covering_grid(self.level,
+                     self.left_edge - buf, self.ActiveDimensions + nz)
+                cg._use_pbar = False
+                count = cg.ActiveDimensions.prod()
+                for g in cg._grids:
+                    count -= cg._get_data_from_grid(g, [])
+                    if count <= 0:
+                        self.min_level = g.Level
+                        break
+                last_buf = buf
+                # Increase box by 2 cell widths at the min covering level
+                buf = 2*self._base_dx / self.pf.refine_by**self.min_level
+                nz += 4 * self.pf.refine_by**(self.level-self.min_level)
+                count += 1
+        else:
+            nz = buf = 0
+            self.min_level = 0
+        # This should not cost substantial additional time.
+        BLE = self.left_edge - buf
+        BRE = self.right_edge + buf
+        if na.any(BLE < self.pf.domain_left_edge) or \
+           na.any(BRE > self.pf.domain_right_edge):
+            grids,ind = self.pf.hierarchy.get_periodic_box_grids_below_level(
+                            BLE, BRE, self.level, self.min_level)
+        else:
+            grids,ind = self.pf.hierarchy.get_box_grids_below_level(
+                BLE, BRE, self.level,
+                min(self.level, self.min_level))
+        sort_ind = na.argsort(self.pf.h.grid_levels.ravel()[ind])
+        self._grids = self.pf.hierarchy.grids[ind][(sort_ind,)]
 
     def get_data(self, field=None):
         self._get_list_of_grids()
@@ -3508,11 +3554,11 @@
         # We jump-start our task here
         mylog.debug("Getting fields %s from %s possible grids",
                    fields_to_get, len(self._grids))
-        self._update_level_state(0, fields_to_get)
+        self._update_level_state(self.min_level, fields_to_get, initialize=True)
         if self._use_pbar: pbar = \
                 get_pbar('Searching grids for values ', len(self._grids))
         # The grids are assumed to be pre-sorted
-        last_level = 0
+        last_level = self.min_level
         for gi, grid in enumerate(self._grids):
             if self._use_pbar: pbar.update(gi)
             if grid.Level > last_level and grid.Level <= self.level:
@@ -3530,27 +3576,31 @@
                     raise KeyError(n_bad)
         if self._use_pbar: pbar.finish()
 
-    def _update_level_state(self, level, fields = None):
+    def _update_level_state(self, level, fields = None, initialize=False):
         dx = self._base_dx / self.pf.refine_by**level
         self.field_data['cdx'] = dx[0]
         self.field_data['cdy'] = dx[1]
         self.field_data['cdz'] = dx[2]
         LL = self.left_edge - self.pf.domain_left_edge
+        RL = self.right_edge - self.pf.domain_left_edge
         self._old_global_startindex = self.global_startindex
-        self.global_startindex = na.rint(LL / dx).astype('int64') - 1
+        self._old_global_endindex = self.global_endindex
+        # We use one grid cell at LEAST, plus one buffer on all sides
+        self.global_startindex = na.floor(LL / dx).astype('int64') - 1
+        self.global_endindex = na.ceil(RL / dx).astype('int64') + 1
         self.domain_width = na.rint((self.pf.domain_right_edge -
                     self.pf.domain_left_edge)/dx).astype('int64')
-        if level == 0 and self.level > 0:
-            # We use one grid cell at LEAST, plus one buffer on all sides
-            idims = na.rint((self.right_edge-self.left_edge)/dx).astype('int64') + 2
+        if (level == 0 or initialize) and self.level > 0:
+            idims = self.global_endindex - self.global_startindex
             fields = ensure_list(fields)
             for field in fields:
                 self.field_data[field] = na.zeros(idims,dtype='float64')-999
             self._cur_dims = idims.astype("int32")
-        elif level == 0 and self.level == 0:
+        elif (level == 0 or initialize) and self.level == 0:
             DLE = self.pf.domain_left_edge
             self.global_startindex = na.array(na.floor(LL/ dx), dtype='int64')
             idims = na.rint((self.right_edge-self.left_edge)/dx).astype('int64')
+            #idims = self.global_endindex - self.global_startindex
             fields = ensure_list(fields)
             for field in fields:
                 self.field_data[field] = na.zeros(idims,dtype='float64')-999
@@ -3559,15 +3609,16 @@
     def _refine(self, dlevel, fields):
         rf = float(self.pf.refine_by**dlevel)
 
-        input_left = (self._old_global_startindex + 0.5) * rf 
-        dx = na.fromiter((self['cd%s' % ax] for ax in 'xyz'), count=3, dtype='float64')
-        output_dims = na.rint((self.right_edge-self.left_edge)/dx).astype('int32') + 2
+        input_left = (self._old_global_startindex + 0.5) * rf
+        input_right = (self._old_global_endindex - 0.5) * rf
+        output_left = self.global_startindex + 0.5
+        output_right = self.global_endindex - 0.5
+        output_dims = (output_right - output_left + 1).astype('int32')
 
         self._cur_dims = output_dims
 
         for field in fields:
             output_field = na.zeros(output_dims, dtype="float64")
-            output_left = self.global_startindex + 0.5
             ghost_zone_interpolate(rf, self[field], input_left,
                                    output_field, output_left)
             self.field_data[field] = output_field
@@ -3641,7 +3692,8 @@
     def _make_overlaps(self):
         # Using the processed cut_masks, we'll figure out what grids
         # are left in the hybrid region.
-        for region in self._all_regions:
+        pbar = get_pbar("Building boolean", len(self._all_regions))
+        for i, region in enumerate(self._all_regions):
             try:
                 region._get_list_of_grids()
                 alias = region
@@ -3668,6 +3720,8 @@
                     # Some of local is in overall
                     self._some_overlap.append(grid)
                     continue
+            pbar.update(i)
+        pbar.finish()
     
     def __repr__(self):
         # We'll do this the slow way to be clear what's going on


diff -r 26e506c79f839e62abc86b377c9c1e41c57e2e88 -r 5a0df3750ca30f7e210fbbd7305e38a5291262d0 yt/data_objects/particle_trajectories.py
--- /dev/null
+++ b/yt/data_objects/particle_trajectories.py
@@ -0,0 +1,387 @@
+"""
+Author: John ZuHone <jzuhone at gmail.com>
+Affiliation: NASA/GSFC
+License:
+  Copyright (C) 2012 John ZuHone All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+  
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+  
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from yt.data_objects.data_containers import YTFieldData
+from yt.data_objects.time_series import TimeSeriesData
+from yt.utilities.amr_utils import sample_field_at_positions
+from yt.funcs import *
+
+import numpy as na
+import h5py
+
+class ParticleTrajectoryCollection(object) :
+
+    r"""A collection of particle trajectories in time over a series of
+    parameter files. 
+
+    The ParticleTrajectoryCollection object contains a collection of
+    particle trajectories for a specified set of particle indices. 
+    
+    Parameters
+    ----------
+    filenames : list of strings
+        A time-sorted list of filenames to construct the TimeSeriesData
+        object.
+    indices : array_like
+        An integer array of particle indices whose trajectories we
+        want to track. If they are not sorted they will be sorted.
+    fields : list of strings, optional
+        A set of fields that is retrieved when the trajectory
+        collection is instantiated.
+        Default : None (will default to the fields 'particle_position_x',
+        'particle_position_y', 'particle_position_z')
+
+    Examples
+    ________
+    >>> from yt.mods import *
+    >>> my_fns = glob.glob("orbit_hdf5_chk_00[0-9][0-9]")
+    >>> my_fns.sort()
+    >>> fields = ["particle_position_x", "particle_position_y",
+    >>>           "particle_position_z", "particle_velocity_x",
+    >>>           "particle_velocity_y", "particle_velocity_z"]
+    >>> pf = load(my_fns[0])
+    >>> init_sphere = pf.h.sphere(pf.domain_center, (.5, "unitary"))
+    >>> indices = init_sphere["particle_index"].astype("int")
+    >>> trajs = ParticleTrajectoryCollection(my_fns, indices, fields=fields)
+    >>> for t in trajs :
+    >>>     print t["particle_velocity_x"].max(), t["particle_velocity_x"].min()
+
+    Notes
+    -----
+    As of this time only particle trajectories that are complete over the
+    set of specified parameter files are supported. If any particle's history
+    ends for some reason (e.g. leaving the simulation domain or being actively
+    destroyed), the whole trajectory collection of which it is a set must end
+    at or before the particle's last timestep. This is a limitation we hope to
+    lift at some point in the future.     
+    """
+    def __init__(self, filenames, indices, fields = None) :
+
+        indices.sort() # Just in case the caller wasn't careful
+        
+        self.field_data = YTFieldData()
+        self.pfs = TimeSeriesData.from_filenames(filenames)
+        self.masks = []
+        self.sorts = []
+        self.indices = indices
+        self.num_indices = len(indices)
+        self.num_steps = len(filenames)
+        self.times = []
+
+        # Default fields 
+        
+        if fields is None : fields = []
+
+        # Must ALWAYS have these fields
+        
+        fields = fields + ["particle_position_x",
+                           "particle_position_y",
+                           "particle_position_z"]
+
+        """
+        The following loops through the parameter files
+        and performs two tasks. The first is to isolate
+        the particles with the correct indices, and the
+        second is to create a sorted list of these particles.
+        We also make a list of the current time from each file. 
+        Right now, the code assumes (and checks for) the
+        particle indices existing in each file, a limitation I
+        would like to lift at some point since some codes
+        (e.g., FLASH) destroy particles leaving the domain.
+        """
+        
+        for pf in self.pfs :
+            dd = pf.h.all_data()
+            newtags = dd["particle_index"].astype("int")
+            if not na.all(na.in1d(indices, newtags, assume_unique=True)) :
+                print "Not all requested particle ids contained in this file!"
+                raise IndexError
+            mask = na.in1d(newtags, indices, assume_unique=True)
+            sorts = na.argsort(newtags[mask])
+            self.masks.append(mask)            
+            self.sorts.append(sorts)
+            self.times.append(pf.current_time)
+
+        self.times = na.array(self.times)
+
+        # Set up the derived field list and the particle field list
+        # so that if the requested field is a particle field, we'll
+        # just copy the field over, but if the field is a grid field,
+        # we will first copy the field over to the particle positions
+        # and then return the field. 
+
+        self.derived_field_list = self.pfs[0].h.derived_field_list
+        self.particle_fields = [field for field in self.derived_field_list
+                                if self.pfs[0].field_info[field].particle_type]
+
+        # Now instantiate the requested fields 
+        for field in fields :
+
+            self._get_data(field)
+            
+    def has_key(self, key) :
+
+        return (key in self.field_data)
+    
+    def keys(self) :
+
+        return self.field_data.keys()
+
+    def __getitem__(self, key) :
+        """
+        Get the field associated with key,
+        checking to make sure it is a particle field.
+        """
+
+        if not self.field_data.has_key(key) :
+
+            self._get_data(key)
+
+        return self.field_data[key]
+    
+    def __setitem__(self, key, val):
+        """
+        Sets a field to be some other value.
+        """
+        self.field_data[key] = val
+                        
+    def __delitem__(self, key) :
+        """
+        Delete the field from the trajectory
+        """
+        del self.field_data[key]
+
+    def __iter__(self) :
+
+        """
+        This iterates over the trajectories for
+        the different particles, returning dicts
+        of fields for each trajectory
+        """
+        for idx in xrange(self.num_indices) :
+            traj = {}
+            traj["particle_index"] = self.indices[idx]
+            traj["particle_time"] = self.times
+            for field in self.field_data.keys() :
+                traj[field] = self[field][idx,:]
+            yield traj
+            
+    def __len__(self) :
+
+        """
+        The number of individual trajectories
+        """
+        return self.num_indices
+
+    def add_fields(self, fields) :
+
+        """
+        Add a list of fields to an existing trajectory
+
+        Parameters
+        ----------
+        fields : list of strings
+            A list of fields to be added to the current trajectory
+            collection.
+
+        Examples
+        ________
+        >>> from yt.mods import *
+        >>> trajs = ParticleTrajectoryCollection(my_fns, indices)
+        >>> trajs.add_fields(["particle_mass", "particle_gpot"])
+        """
+        
+        for field in fields :
+
+            if not self.field_data.has_key(field):
+
+                self._get_data(field)
+                
+    def _get_data(self, field) :
+
+        """
+        Get a field to include in the trajectory collection.
+        The trajectory collection itself is a dict of 2D numpy arrays,
+        with shape (num_indices, num_steps)
+        """
+        
+        if not self.field_data.has_key(field):
+            
+            particles = na.empty((0))
+
+            step = int(0)
+                
+            for pf, mask, sort in zip(self.pfs, self.masks, self.sorts) :
+                                    
+                if field in self.particle_fields :
+
+                    # This is easy... just get the particle fields
+
+                    dd = pf.h.all_data()
+                    pfield = dd[field][mask]
+                    particles = na.append(particles, pfield[sort])
+
+                else :
+
+                    # This is hard... must loop over grids
+
+                    pfield = na.zeros((self.num_indices))
+                    x = self["particle_position_x"][:,step]
+                    y = self["particle_position_y"][:,step]
+                    z = self["particle_position_z"][:,step]
+
+                    leaf_grids = [g for g in pf.h.grids if len(g.Children) == 0]
+                        
+                    for grid in leaf_grids :
+
+                        pfield += sample_field_at_positions(grid[field],
+                                                            grid.LeftEdge,
+                                                            grid.RightEdge,
+                                                            x, y, z)
+
+                    particles = na.append(particles, pfield)
+
+                step += 1
+                
+            self[field] = particles.reshape(self.num_steps,
+                                            self.num_indices).transpose()
+
+        return self.field_data[field]
+
+    def trajectory_from_index(self, index) :
+
+        """
+        Retrieve a single trajectory corresponding to a specific particle
+        index
+
+        Parameters
+        ----------
+        index : int
+            This defines which particle trajectory from the
+            ParticleTrajectoryCollection object will be returned.
+
+        Returns
+        -------
+        A dictionary corresponding to the particle's trajectory and the
+        fields along that trajectory
+
+        Examples
+        --------
+        >>> from yt.mods import *
+        >>> import matplotlib.pylab as pl
+        >>> trajs = ParticleTrajectoryCollection(my_fns, indices)
+        >>> traj = trajs.trajectory_from_index(indices[0])
+        >>> pl.plot(traj["particle_time"], traj["particle_position_x"], "-x")
+        >>> pl.savefig("orbit")
+        """
+        
+        mask = na.in1d(self.indices, (index,), assume_unique=True)
+
+        if not na.any(mask) :
+            print "The particle index %d is not in the list!" % (index)
+            raise IndexError
+
+        fields = [field for field in sorted(self.field_data.keys())]
+                                
+        traj = {}
+
+        traj["particle_time"] = self.times
+        traj["particle_index"] = index
+        
+        for field in fields :
+
+            traj[field] = self[field][mask,:][0]
+
+        return traj
+
+    def write_out(self, filename_base) :
+
+        """
+        Write out particle trajectories to tab-separated ASCII files (one
+        for each trajectory) with the field names in the file header. Each
+        file is named with a basename and the index number.
+
+        Parameters
+        ----------
+        filename_base : string
+            The prefix for the outputted ASCII files.
+
+        Examples
+        --------
+        >>> from yt.mods import *
+        >>> trajs = ParticleTrajectoryCollection(my_fns, indices)
+        >>> trajs.write_out("orbit_trajectory")       
+        """
+        
+        fields = [field for field in sorted(self.field_data.keys())]
+
+        num_fields = len(fields)
+
+        first_str = "# particle_time\t" + "\t".join(fields)+"\n"
+        
+        template_str = "%g\t"*num_fields+"%g\n"
+        
+        for ix in xrange(self.num_indices) :
+
+            outlines = [first_str]
+
+            for it in xrange(self.num_steps) :
+                outlines.append(template_str %
+                                tuple([self.times[it]]+[self[field][ix,it] for field in fields]))
+            
+            fid = open(filename_base + "_%d.dat" % self.indices[ix], "w")
+            fid.writelines(outlines)
+            fid.close()
+            del fid
+            
+    def write_out_h5(self, filename) :
+
+        """
+        Write out all the particle trajectories to a single HDF5 file
+        that contains the indices, the times, and the 2D array for each
+        field individually
+
+        Parameters
+        ---------
+        filename : string
+            The output filename for the HDF5 file
+
+        Examples
+        --------
+        >>> from yt.mods import *
+        >>> trajs = ParticleTrajectoryCollection(my_fns, indices)
+        >>> trajs.write_out_h5("orbit_trajectories")                
+        """
+        
+        fid = h5py.File(filename, "w")
+
+        fields = [field for field in sorted(self.field_data.keys())]
+        
+        fid.create_dataset("particle_indices", dtype=na.int32,
+                           data=self.indices)
+        fid.create_dataset("particle_time", data=self.times)
+        
+        for field in fields :
+
+            fid.create_dataset("%s" % field, data=self[field])
+                        
+        fid.close()


diff -r 26e506c79f839e62abc86b377c9c1e41c57e2e88 -r 5a0df3750ca30f7e210fbbd7305e38a5291262d0 yt/data_objects/setup.py
--- a/yt/data_objects/setup.py
+++ b/yt/data_objects/setup.py
@@ -1,12 +1,13 @@
 #!/usr/bin/env python
 import setuptools
-import os, sys, os.path
-
+import os
+import sys
 import os.path
 
-def configuration(parent_package='',top_path=None):
+
+def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('data_objects',parent_package,top_path)
-    config.make_config_py() # installs __config__.py
+    config = Configuration('data_objects', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config


diff -r 26e506c79f839e62abc86b377c9c1e41c57e2e88 -r 5a0df3750ca30f7e210fbbd7305e38a5291262d0 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -91,6 +91,7 @@
 
         self._parse_parameter_file()
         self._set_units()
+        self._set_derived_attrs()
 
         # Because we need an instantiated class to check the pf's existence in
         # the cache, we move that check to here from __new__.  This avoids
@@ -103,6 +104,10 @@
 
         self.create_field_info()
 
+    def _set_derived_attrs(self):
+        self.domain_center = 0.5 * (self.domain_right_edge + self.domain_left_edge)
+        self.domain_width = self.domain_right_edge - self.domain_left_edge
+
     def __reduce__(self):
         args = (self._hash(),)
         return (_reconstruct_pf, args)
@@ -200,16 +205,23 @@
                 v = getattr(self, a)
                 mylog.info("Parameters: %-25s = %s", a, v)
 
+    _field_info = None
     def create_field_info(self):
-        if getattr(self, "field_info", None) is None:
+        if getattr(self, "_field_info", None) is None:
             # The setting up of fields occurs in the hierarchy, which is only
             # instantiated once.  So we have to double check to make sure that,
             # in the event of double-loads of a parameter file, we do not blow
             # away the exising field_info.
-            self.field_info = FieldInfoContainer.create_with_fallback(
+            self._field_info = FieldInfoContainer.create_with_fallback(
                                 self._fieldinfo_fallback)
 
-        
+    _get_hierarchy = True
+    @property
+    def field_info(self):
+        if self._get_hierarchy:
+            self._get_hierarchy=False
+            self.hierarchy
+        return self._field_info
 
 def _reconstruct_pf(*args, **kwargs):
     pfs = ParameterFileStore()


diff -r 26e506c79f839e62abc86b377c9c1e41c57e2e88 -r 5a0df3750ca30f7e210fbbd7305e38a5291262d0 yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -701,7 +701,7 @@
     return data.convert("kpch")
 add_field("ParticleRadiuskpch", function=_ParticleRadius,
           validators=[ValidateParameter("center")],
-          convert_function = _ConvertRadiuskpc, units=r"\rm{kpc}/\rm{h}",
+          convert_function = _ConvertRadiuskpch, units=r"\rm{kpc}/\rm{h}",
           particle_type=True,
           display_name = "Particle Radius")
 add_field("Radiuskpch", function=_Radius,


diff -r 26e506c79f839e62abc86b377c9c1e41c57e2e88 -r 5a0df3750ca30f7e210fbbd7305e38a5291262d0 yt/frontends/art/setup.py
--- a/yt/frontends/art/setup.py
+++ b/yt/frontends/art/setup.py
@@ -1,10 +1,13 @@
 #!/usr/bin/env python
 import setuptools
-import os, sys, os.path
+import os
+import sys
+import os.path
 
-def configuration(parent_package='',top_path=None):
+
+def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('art',parent_package,top_path)
-    config.make_config_py() # installs __config__.py
+    config = Configuration('art', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config


diff -r 26e506c79f839e62abc86b377c9c1e41c57e2e88 -r 5a0df3750ca30f7e210fbbd7305e38a5291262d0 yt/frontends/castro/setup.py
--- a/yt/frontends/castro/setup.py
+++ b/yt/frontends/castro/setup.py
@@ -1,12 +1,13 @@
 #!/usr/bin/env python
 import setuptools
-import os, sys, os.path
+import os
+import sys
+import os.path
 
-import os.path
 
 def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
     config = Configuration('castro', parent_package, top_path)
-    config.make_config_py() # installs __config__.py
+    config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config


diff -r 26e506c79f839e62abc86b377c9c1e41c57e2e88 -r 5a0df3750ca30f7e210fbbd7305e38a5291262d0 yt/frontends/chombo/setup.py
--- a/yt/frontends/chombo/setup.py
+++ b/yt/frontends/chombo/setup.py
@@ -1,12 +1,13 @@
 #!/usr/bin/env python
 import setuptools
-import os, sys, os.path
-
+import os
+import sys
 import os.path
 
-def configuration(parent_package='',top_path=None):
+
+def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('chombo',parent_package,top_path)
-    config.make_config_py() # installs __config__.py
+    config = Configuration('chombo', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config


diff -r 26e506c79f839e62abc86b377c9c1e41c57e2e88 -r 5a0df3750ca30f7e210fbbd7305e38a5291262d0 yt/frontends/enzo/setup.py
--- a/yt/frontends/enzo/setup.py
+++ b/yt/frontends/enzo/setup.py
@@ -1,12 +1,13 @@
 #!/usr/bin/env python
 import setuptools
-import os, sys, os.path
-
+import os
+import sys
 import os.path
 
-def configuration(parent_package='',top_path=None):
+
+def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('enzo',parent_package,top_path)
-    config.make_config_py() # installs __config__.py
+    config = Configuration('enzo', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config


diff -r 26e506c79f839e62abc86b377c9c1e41c57e2e88 -r 5a0df3750ca30f7e210fbbd7305e38a5291262d0 yt/frontends/flash/fields.py
--- a/yt/frontends/flash/fields.py
+++ b/yt/frontends/flash/fields.py
@@ -148,6 +148,9 @@
 add_flash_field("temp", function=NullFunc, take_log=True,
                 convert_function=_get_convert("temp"),
                 units=r"\rm{K}")
+add_flash_field("tele", function=NullFunc, take_log=True,
+                convert_function=_get_convert("tele"),
+                units = r"\rm{K}")
 add_flash_field("pres", function=NullFunc, take_log=True,
                 convert_function=_get_convert("pres"),
                 units=r"\rm{erg}\//\/\rm{cm}^{3}")
@@ -196,9 +199,11 @@
         else :
             dname = f                    
         ff = KnownFLASHFields[v]
+        pfield = f.startswith("particle")
         add_field(f, TranslationFunc(v),
                   take_log=KnownFLASHFields[v].take_log,
-                  units = ff._units, display_name=dname)
+                  units = ff._units, display_name=dname,
+                  particle_type = pfield)
 
 def _convertParticleMassMsun(data):
     return 1.0/1.989e33


diff -r 26e506c79f839e62abc86b377c9c1e41c57e2e88 -r 5a0df3750ca30f7e210fbbd7305e38a5291262d0 yt/frontends/flash/setup.py
--- a/yt/frontends/flash/setup.py
+++ b/yt/frontends/flash/setup.py
@@ -1,12 +1,13 @@
 #!/usr/bin/env python
 import setuptools
-import os, sys, os.path
-
+import os
+import sys
 import os.path
 
-def configuration(parent_package='',top_path=None):
+
+def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('flash',parent_package,top_path)
-    config.make_config_py() # installs __config__.py
+    config = Configuration('flash', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config


diff -r 26e506c79f839e62abc86b377c9c1e41c57e2e88 -r 5a0df3750ca30f7e210fbbd7305e38a5291262d0 yt/frontends/gadget/setup.py
--- a/yt/frontends/gadget/setup.py
+++ b/yt/frontends/gadget/setup.py
@@ -1,12 +1,13 @@
 #!/usr/bin/env python
 import setuptools
-import os, sys, os.path
-
+import os
+import sys
 import os.path
 
-def configuration(parent_package='',top_path=None):
+
+def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('gadget',parent_package,top_path)
-    config.make_config_py() # installs __config__.py
+    config = Configuration('gadget', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config


diff -r 26e506c79f839e62abc86b377c9c1e41c57e2e88 -r 5a0df3750ca30f7e210fbbd7305e38a5291262d0 yt/frontends/gdf/api.py
--- a/yt/frontends/gdf/api.py
+++ b/yt/frontends/gdf/api.py
@@ -40,5 +40,3 @@
 
 from .io import \
       IOHandlerGDFHDF5
-
-


diff -r 26e506c79f839e62abc86b377c9c1e41c57e2e88 -r 5a0df3750ca30f7e210fbbd7305e38a5291262d0 yt/frontends/gdf/data_structures.py
--- a/yt/frontends/gdf/data_structures.py
+++ b/yt/frontends/gdf/data_structures.py
@@ -43,6 +43,11 @@
     FieldInfoContainer, NullFunc
 import pdb
 
+def _get_convert(fname):
+    def _conv(data):
+        return data.convert(fname)
+    return _conv
+
 class GDFGrid(AMRGridPatch):
     _id_offset = 0
     def __init__(self, id, hierarchy, level, start, dimensions):
@@ -174,10 +179,18 @@
         # This should be improved.
         self._handle = h5py.File(self.parameter_filename, "r")
         for field_name in self._handle["/field_types"]:
+            current_field = self._handle["/field_types/%s" % field_name]
             try:
-                self.units[field_name] = self._handle["/field_types/%s" % field_name].attrs['field_to_cgs']
+                self.units[field_name] = current_field.attrs['field_to_cgs']
             except:
                 self.units[field_name] = 1.0
+            try:
+                current_fields_unit = current_field.attrs['field_units'][0]
+            except:
+                current_fields_unit = ""
+            self._fieldinfo_known.add_field(field_name, function=NullFunc, take_log=False,
+                   units=current_fields_unit, projected_units="", 
+                   convert_function=_get_convert(field_name))
 
         self._handle.close()
         del self._handle


diff -r 26e506c79f839e62abc86b377c9c1e41c57e2e88 -r 5a0df3750ca30f7e210fbbd7305e38a5291262d0 yt/frontends/gdf/setup.py
--- a/yt/frontends/gdf/setup.py
+++ b/yt/frontends/gdf/setup.py
@@ -1,12 +1,13 @@
 #!/usr/bin/env python
 import setuptools
-import os, sys, os.path
-
+import os
+import sys
 import os.path
 
-def configuration(parent_package='',top_path=None):
+
+def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('gdf',parent_package,top_path)
-    config.make_config_py() # installs __config__.py
+    config = Configuration('gdf', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config


diff -r 26e506c79f839e62abc86b377c9c1e41c57e2e88 -r 5a0df3750ca30f7e210fbbd7305e38a5291262d0 yt/frontends/maestro/setup.py
--- a/yt/frontends/maestro/setup.py
+++ b/yt/frontends/maestro/setup.py
@@ -1,12 +1,13 @@
 #!/usr/bin/env python
 import setuptools
-import os, sys, os.path
-
+import os
+import sys
 import os.path
 
-def configuration(parent_package='',top_path=None):
+
+def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('maestro',parent_package,top_path)
-    config.make_config_py() # installs __config__.py
+    config = Configuration('maestro', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config


diff -r 26e506c79f839e62abc86b377c9c1e41c57e2e88 -r 5a0df3750ca30f7e210fbbd7305e38a5291262d0 yt/frontends/nyx/setup.py
--- a/yt/frontends/nyx/setup.py
+++ b/yt/frontends/nyx/setup.py
@@ -1,8 +1,9 @@
 #!/usr/bin/env python
 import setuptools
-import os, sys, os.path
+import os
+import sys
+import os.path
 
-import os.path
 
 def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration


diff -r 26e506c79f839e62abc86b377c9c1e41c57e2e88 -r 5a0df3750ca30f7e210fbbd7305e38a5291262d0 yt/frontends/orion/setup.py
--- a/yt/frontends/orion/setup.py
+++ b/yt/frontends/orion/setup.py
@@ -1,12 +1,13 @@
 #!/usr/bin/env python
 import setuptools
-import os, sys, os.path
-
+import os
+import sys
 import os.path
 
-def configuration(parent_package='',top_path=None):
+
+def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('orion',parent_package,top_path)
-    config.make_config_py() # installs __config__.py
+    config = Configuration('orion', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config


diff -r 26e506c79f839e62abc86b377c9c1e41c57e2e88 -r 5a0df3750ca30f7e210fbbd7305e38a5291262d0 yt/frontends/ramses/setup.py
--- a/yt/frontends/ramses/setup.py
+++ b/yt/frontends/ramses/setup.py
@@ -1,10 +1,14 @@
 #!/usr/bin/env python
 import setuptools
-import os, sys, os.path, glob
+import os
+import sys
+import os.path
+import glob
 
-def configuration(parent_package='',top_path=None):
+
+def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('ramses',parent_package,top_path)
+    config = Configuration('ramses', parent_package, top_path)
     config.add_extension("_ramses_reader",
         ["yt/frontends/ramses/_ramses_reader.pyx"],
         language="c++",
@@ -12,6 +16,6 @@
         libraries=["stdc++"],
         depends=glob.glob("yt/frontends/ramses/ramses_headers/*.hh")
         )
-    config.make_config_py() # installs __config__.py
+    config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config


diff -r 26e506c79f839e62abc86b377c9c1e41c57e2e88 -r 5a0df3750ca30f7e210fbbd7305e38a5291262d0 yt/frontends/setup.py
--- a/yt/frontends/setup.py
+++ b/yt/frontends/setup.py
@@ -1,10 +1,11 @@
 #!/usr/bin/env python
 import setuptools
 
-def configuration(parent_package='',top_path=None):
+
+def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('frontends',parent_package,top_path)
-    config.make_config_py() # installs __config__.py
+    config = Configuration('frontends', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     config.add_subpackage("gdf")
     config.add_subpackage("chombo")


diff -r 26e506c79f839e62abc86b377c9c1e41c57e2e88 -r 5a0df3750ca30f7e210fbbd7305e38a5291262d0 yt/frontends/stream/setup.py
--- a/yt/frontends/stream/setup.py
+++ b/yt/frontends/stream/setup.py
@@ -1,12 +1,13 @@
 #!/usr/bin/env python
 import setuptools
-import os, sys, os.path
-
+import os
+import sys
 import os.path
 
-def configuration(parent_package='',top_path=None):
+
+def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('stream',parent_package,top_path)
-    config.make_config_py() # installs __config__.py
+    config = Configuration('stream', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config


diff -r 26e506c79f839e62abc86b377c9c1e41c57e2e88 -r 5a0df3750ca30f7e210fbbd7305e38a5291262d0 yt/frontends/tiger/setup.py
--- a/yt/frontends/tiger/setup.py
+++ b/yt/frontends/tiger/setup.py
@@ -1,12 +1,13 @@
 #!/usr/bin/env python
 import setuptools
-import os, sys, os.path
-
+import os
+import sys
 import os.path
 
-def configuration(parent_package='',top_path=None):
+
+def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('tiger',parent_package,top_path)
-    config.make_config_py() # installs __config__.py
+    config = Configuration('tiger', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config


diff -r 26e506c79f839e62abc86b377c9c1e41c57e2e88 -r 5a0df3750ca30f7e210fbbd7305e38a5291262d0 yt/gui/opengl_widgets/setup.py
--- a/yt/gui/opengl_widgets/setup.py
+++ b/yt/gui/opengl_widgets/setup.py
@@ -1,12 +1,13 @@
 #!/usr/bin/env python
 import setuptools
-import os, sys, os.path
-
+import os
+import sys
 import os.path
 
-def configuration(parent_package='',top_path=None):
+
+def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('opengl_widgets',parent_package,top_path)
-    config.make_config_py() # installs __config__.py
+    config = Configuration('opengl_widgets', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config


diff -r 26e506c79f839e62abc86b377c9c1e41c57e2e88 -r 5a0df3750ca30f7e210fbbd7305e38a5291262d0 yt/gui/reason/setup.py
--- a/yt/gui/reason/setup.py
+++ b/yt/gui/reason/setup.py
@@ -1,10 +1,14 @@
 #!/usr/bin/env python
 import setuptools
-import os, sys, os.path, glob
+import os
+import sys
+import os.path
+import glob
 
-def configuration(parent_package='',top_path=None):
+
+def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('reason',parent_package,top_path)
-    config.make_config_py() # installs __config__.py
+    config = Configuration('reason', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config


diff -r 26e506c79f839e62abc86b377c9c1e41c57e2e88 -r 5a0df3750ca30f7e210fbbd7305e38a5291262d0 yt/gui/setup.py
--- a/yt/gui/setup.py
+++ b/yt/gui/setup.py
@@ -1,12 +1,15 @@
 #!/usr/bin/env python
 import setuptools
-import os, sys, os.path
+import os
+import sys
+import os.path
 
-def configuration(parent_package='',top_path=None):
+
+def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('gui',parent_package,top_path)
+    config = Configuration('gui', parent_package, top_path)
     config.add_subpackage('opengl_widgets')
     config.add_subpackage('reason')
-    config.make_config_py() # installs __config__.py
+    config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config


diff -r 26e506c79f839e62abc86b377c9c1e41c57e2e88 -r 5a0df3750ca30f7e210fbbd7305e38a5291262d0 yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -60,7 +60,8 @@
     derived_field, add_field, FieldInfo, \
     ValidateParameter, ValidateDataField, ValidateProperty, \
     ValidateSpatial, ValidateGridType, \
-    TimeSeriesData, AnalysisTask, analysis_task
+    TimeSeriesData, AnalysisTask, analysis_task, \
+    ParticleTrajectoryCollection
 
 from yt.data_objects.derived_quantities import \
     add_quantity, quantity_info


diff -r 26e506c79f839e62abc86b377c9c1e41c57e2e88 -r 5a0df3750ca30f7e210fbbd7305e38a5291262d0 yt/setup.py
--- a/yt/setup.py
+++ b/yt/setup.py
@@ -1,6 +1,8 @@
 #!/usr/bin/env python
 import setuptools
-import os, sys
+import os
+import sys
+
 
 def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration


diff -r 26e506c79f839e62abc86b377c9c1e41c57e2e88 -r 5a0df3750ca30f7e210fbbd7305e38a5291262d0 yt/utilities/_amr_utils/CICDeposit.pyx
--- a/yt/utilities/_amr_utils/CICDeposit.pyx
+++ b/yt/utilities/_amr_utils/CICDeposit.pyx
@@ -78,3 +78,32 @@
         field[i1  ,j1-1,k1  ] += mass[n] * dx2 * dy  * dz2
         field[i1-1,j1  ,k1  ] += mass[n] * dx  * dy2 * dz2
         field[i1  ,j1  ,k1  ] += mass[n] * dx2 * dy2 * dz2
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+def sample_field_at_positions(np.ndarray[np.float64_t, ndim=3] arr,
+                              np.ndarray[np.float64_t, ndim=1] left_edge,
+                              np.ndarray[np.float64_t, ndim=1] right_edge,
+                              np.ndarray[np.float64_t, ndim=1] pos_x,
+                              np.ndarray[np.float64_t, ndim=1] pos_y,
+                              np.ndarray[np.float64_t, ndim=1] pos_z):
+    cdef np.float64_t idds[3], pp[3]
+    cdef int dims[3], npart, ind[3]
+    cdef int i, j
+    npart = pos_x.shape[0]
+    cdef np.ndarray[np.float64_t, ndim=1] sample 
+    sample = np.zeros(npart, dtype='float64')
+    for i in range(3):
+        dims[i] = arr.shape[i]
+        idds[i] = (<np.float64_t> dims[i]) / (right_edge[i] - left_edge[i])
+    for i in range(npart):
+        if not ((left_edge[0] <= pos_x[i] <= right_edge[0]) and 
+                (left_edge[1] <= pos_y[i] <= right_edge[1]) and
+                (left_edge[2] <= pos_z[i] <= right_edge[2])):
+            continue
+        ind[0] = <int> ((pos_x[i] - left_edge[0]) * idds[0])
+        ind[1] = <int> ((pos_y[i] - left_edge[1]) * idds[1])
+        ind[2] = <int> ((pos_z[i] - left_edge[2]) * idds[2])
+        sample[i] = arr[ind[0], ind[1], ind[2]]
+    return sample


diff -r 26e506c79f839e62abc86b377c9c1e41c57e2e88 -r 5a0df3750ca30f7e210fbbd7305e38a5291262d0 yt/utilities/amr_kdtree/__init__.py
--- a/yt/utilities/amr_kdtree/__init__.py
+++ b/yt/utilities/amr_kdtree/__init__.py
@@ -1,4 +1,3 @@
 """
 Initialize amr_kdtree
 """
-


diff -r 26e506c79f839e62abc86b377c9c1e41c57e2e88 -r 5a0df3750ca30f7e210fbbd7305e38a5291262d0 yt/utilities/answer_testing/__init__.py
--- a/yt/utilities/answer_testing/__init__.py
+++ b/yt/utilities/answer_testing/__init__.py
@@ -23,7 +23,8 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import runner, output_tests
+import runner
+import output_tests
 from runner import RegressionTestRunner
 
 from output_tests import RegressionTest, SingleOutputTest, \


diff -r 26e506c79f839e62abc86b377c9c1e41c57e2e88 -r 5a0df3750ca30f7e210fbbd7305e38a5291262d0 yt/utilities/answer_testing/api.py
--- a/yt/utilities/answer_testing/api.py
+++ b/yt/utilities/answer_testing/api.py
@@ -57,4 +57,3 @@
     TestBooleanANDParticleQuantity, \
     TestBooleanORParticleQuantity, \
     TestBooleanNOTParticleQuantity
-


diff -r 26e506c79f839e62abc86b377c9c1e41c57e2e88 -r 5a0df3750ca30f7e210fbbd7305e38a5291262d0 yt/utilities/answer_testing/setup.py
--- a/yt/utilities/answer_testing/setup.py
+++ b/yt/utilities/answer_testing/setup.py
@@ -1,12 +1,13 @@
 #!/usr/bin/env python
 import setuptools
-import os, sys, os.path
-
+import os
+import sys
 import os.path
 
-def configuration(parent_package='',top_path=None):
+
+def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('answer_testing',parent_package,top_path)
-    config.make_config_py() # installs __config__.py
+    config = Configuration('answer_testing', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config


diff -r 26e506c79f839e62abc86b377c9c1e41c57e2e88 -r 5a0df3750ca30f7e210fbbd7305e38a5291262d0 yt/utilities/command_line.py
--- a/yt/utilities/command_line.py
+++ b/yt/utilities/command_line.py
@@ -1383,20 +1383,31 @@
                     port=int(args.port), repl=hr)
 
 class YTStatsCmd(YTCommand):
-    args = ('outputfn','bn','skip','pf')
+    args = ('outputfn','bn','skip','pf', 'field',
+            dict(long="--max", action="store_true", default=False,
+                 dest='max', help="Display maximum of requested field."),
+            dict(long="--min", action="store_true", default=False,
+                 dest='min', help="Display minimum of requested field."))
     name = "stats"
     description = \
         """
-        Print stats and max density for one or more datasets
+        Print stats and max/min value of a given field (if requested),
+        for one or more datasets
+
+        (default field is density)
 
         """
 
     def __call__(self, args):
         pf = args.pf
         pf.h.print_stats()
-        if "Density" in pf.h.field_list:
-            v, c = pf.h.find_max("Density")
-            print "Maximum density: %0.5e at %s" % (v, c)
+        if args.field in pf.h.field_list:
+            if args.max == True:
+                v, c = pf.h.find_max(args.field)
+                print "Maximum %s: %0.5e at %s" % (args.field, v, c)
+            if args.min == True:
+                v, c = pf.h.find_min(args.field)
+                print "Minimum %s: %0.5e at %s" % (args.field, v, c)
         if args.output is not None:
             t = pf.current_time * pf['years']
             open(args.output, "a").write(


diff -r 26e506c79f839e62abc86b377c9c1e41c57e2e88 -r 5a0df3750ca30f7e210fbbd7305e38a5291262d0 yt/utilities/delaunay/setup.py
--- a/yt/utilities/delaunay/setup.py
+++ b/yt/utilities/delaunay/setup.py
@@ -2,6 +2,7 @@
 from numpy.distutils.core import setup
 from numpy.distutils.misc_util import Configuration
 
+
 def configuration(parent_package='', top_path=None):
 
     config = Configuration('delaunay', parent_package, top_path)


diff -r 26e506c79f839e62abc86b377c9c1e41c57e2e88 -r 5a0df3750ca30f7e210fbbd7305e38a5291262d0 yt/utilities/minimal_representation.py
--- a/yt/utilities/minimal_representation.py
+++ b/yt/utilities/minimal_representation.py
@@ -23,7 +23,29 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
+import numpy as na
 import abc
+import json
+import urllib2
+from tempfile import TemporaryFile
+from yt.config import ytcfg
+from yt.funcs import *
+
+try:
+    from poster.streaminghttp import register_openers
+    from poster.encode import multipart_encode
+    register_openers()
+except ImportError:
+    pass
+
+class UploaderBar(object):
+    pbar = None
+    def __call__(self, name, prog, total):
+        if self.pbar is None:
+            self.pbar = get_pbar("Uploading %s" % name, total)
+        self.pbar.update(prog)
+        if prog == total:
+            self.pbar.finish()
 
 class ContainerClass(object):
     pass
@@ -67,6 +89,45 @@
             setattr(cc, a, v)
         return cls(cc)
 
+    def upload(self):
+        api_key = ytcfg.get("yt","hub_api_key")
+        url = ytcfg.get("yt","hub_url")
+        metadata, (final_name, chunks) = self._generate_post()
+        for i in metadata:
+            if isinstance(metadata[i], na.ndarray):
+                metadata[i] = metadata[i].tolist()
+        metadata['obj_type'] = self.type
+        if len(chunks) == 0:
+            chunk_info = {'chunks': []}
+        else:
+            chunk_info = {'final_name' : final_name, 'chunks': []}
+            for cn, cv in chunks:
+                chunk_info['chunks'].append((cn, cv.size * cv.itemsize))
+        metadata = json.dumps(metadata)
+        chunk_info = json.dumps(chunk_info)
+        datagen, headers = multipart_encode({'metadata' : metadata,
+                                             'chunk_info' : chunk_info,
+                                             'api_key' : api_key})
+        request = urllib2.Request(url, datagen, headers)
+        # Actually do the request, and get the response
+        rv = urllib2.urlopen(request).read()
+        uploader_info = json.loads(rv)
+        new_url = url + "/handler/%s" % uploader_info['handler_uuid']
+        for cn, cv in chunks:
+            remaining = cv.size * cv.itemsize
+            f = TemporaryFile()
+            na.save(f, cv)
+            f.seek(0)
+            pbar = UploaderBar()
+            datagen, headers = multipart_encode({'chunk_data' : f}, cb = pbar)
+            request = urllib2.Request(new_url, datagen, headers)
+            rv = urllib2.urlopen(request).read()
+
+        datagen, headers = multipart_encode({'status' : 'FINAL'})
+        request = urllib2.Request(new_url, datagen, headers)
+        rv = urllib2.urlopen(request).read()
+        return json.loads(rv)
+
 class FilteredRepresentation(MinimalRepresentation):
     def _generate_post(self):
         raise RuntimeError
@@ -77,6 +138,7 @@
                   "unique_identifier", "current_redshift", "output_hash",
                   "cosmological_simulation", "omega_matter", "omega_lambda",
                   "hubble_constant", "name")
+    type = 'simulation_output'
 
     def __init__(self, obj):
         super(MinimalStaticOutput, self).__init__(obj)
@@ -86,7 +148,7 @@
     def _generate_post(self):
         metadata = self._attrs
         chunks = []
-        return metadata, chunks
+        return (metadata, (None, chunks))
 
 class MinimalMappableData(MinimalRepresentation):
 
@@ -97,10 +159,7 @@
         nobj = self._return_filtered_object(("field_data",))
         metadata = nobj._attrs
         chunks = [(arr, self.field_data[arr]) for arr in self.field_data]
-        return (metadata, chunks)
+        return (metadata, ('field_data', chunks))
 
 class MinimalProjectionData(MinimalMappableData):
-
-    def __init__(self, obj):
-        super(MinimalProjectionData, self).__init__(obj)
-        self.type = "proj"
+    type = 'proj'


diff -r 26e506c79f839e62abc86b377c9c1e41c57e2e88 -r 5a0df3750ca30f7e210fbbd7305e38a5291262d0 yt/utilities/physical_constants.py
--- a/yt/utilities/physical_constants.py
+++ b/yt/utilities/physical_constants.py
@@ -3,23 +3,24 @@
 #
 
 # Masses
-mass_hydrogen_cgs = 1.67e-24 # g
-mass_electron_cgs = 9.11e-28 # g
-amu_cgs           = 1.66053886e-24 # g
+mass_hydrogen_cgs = 1.67e-24  # g
+mass_electron_cgs = 9.11e-28  # g
+amu_cgs           = 1.66053886e-24  # g
+mass_sun_cgs = 1.9891e33  # g
 # Velocities
-speed_of_light_cgs = 2.99792458e10 # cm/s, exact
+speed_of_light_cgs = 2.99792458e10  # cm/s, exact
 
 # Cross Sections
-cross_section_thompson_cgs = 6.65e-25 # cm^2
+cross_section_thompson_cgs = 6.65e-25  # cm^2
 
 # Charge
-charge_proton_cgs = 4.803e-10 # esu = 1.602e-19  Coulombs
+charge_proton_cgs = 4.803e-10  # esu = 1.602e-19  Coulombs
 
 # Physical Constants
-boltzmann_constant_cgs = 1.3806504e-16 # erg K^-1
-gravitational_constant_cgs  = 6.67428e-8 # cm^3 g^-1 s^-2
-planck_constant_cgs   = 6.62606896e-27 # erg s
-rho_crit_now = 1.8788e-29 # g times h^2 (critical mass for closure, Cosmology)
+boltzmann_constant_cgs = 1.3806504e-16  # erg K^-1
+gravitational_constant_cgs  = 6.67428e-8  # cm^3 g^-1 s^-2
+planck_constant_cgs   = 6.62606896e-27  # erg s
+rho_crit_now = 1.8788e-29  # g times h^2 (critical mass for closure, Cosmology)
 
 # Misc. Approximations
 mass_mean_atomic_cosmology = 1.22


diff -r 26e506c79f839e62abc86b377c9c1e41c57e2e88 -r 5a0df3750ca30f7e210fbbd7305e38a5291262d0 yt/utilities/setup.py
--- a/yt/utilities/setup.py
+++ b/yt/utilities/setup.py
@@ -1,6 +1,10 @@
 #!/usr/bin/env python
 import setuptools
-import os, sys, os.path, glob
+import os
+import sys
+import os.path
+import glob
+
 
 def check_for_png():
     # First up: HDF5_DIR in environment
@@ -45,24 +49,28 @@
                 print "PNG_LOCATION: png found in: %s, %s" % (png_inc, png_lib)
                 return png_inc, png_lib
     print "Reading png location from png.cfg failed."
-    print "Please place the base directory of your png install in png.cfg and restart."
+    print "Please place the base directory of your png"
+    print "install in png.cfg and restart."
     print "(ex: \"echo '/usr/local/' > png.cfg\" )"
     sys.exit(1)
 
+
 def check_for_freetype():
     # First up: environment
     if "FTYPE_DIR" in os.environ:
         freetype_dir = os.environ["FTYPE_DIR"]
         freetype_inc = os.path.join(freetype_dir, "include")
         freetype_lib = os.path.join(freetype_dir, "lib")
-        print "FTYPE_LOCATION: FTYPE_DIR: %s, %s" % (freetype_inc, freetype_lib)
+        print "FTYPE_LOCATION: FTYPE_DIR: %s, %s" % (freetype_inc,
+            freetype_lib)
         return (freetype_inc, freetype_lib)
     # Next up, we try freetype.cfg
     elif os.path.exists("freetype.cfg"):
         freetype_dir = open("freetype.cfg").read().strip()
         freetype_inc = os.path.join(freetype_dir, "include")
         freetype_lib = os.path.join(freetype_dir, "lib")
-        print "FTYPE_LOCATION: freetype.cfg: %s, %s" % (freetype_inc, freetype_lib)
+        print "FTYPE_LOCATION: freetype.cfg: %s, %s" % (freetype_inc,
+            freetype_lib)
         return (freetype_inc, freetype_lib)
     # Now we see if ctypes can help us:
     try:
@@ -74,10 +82,12 @@
             # better way to pull off two directory names.
             freetype_dir = os.path.dirname(os.path.dirname(freetype_libfile))
             if os.path.isdir(os.path.join(freetype_dir, "include")) and \
-               os.path.isfile(os.path.join(freetype_dir, "include", "ft2build.h")):
+                os.path.isfile(os.path.join(freetype_dir, "include",
+                    "ft2build.h")):
                 freetype_inc = os.path.join(freetype_dir, "include")
                 freetype_lib = os.path.join(freetype_dir, "lib")
-                print "FTYPE_LOCATION: freetype found in: %s, %s" % (freetype_inc, freetype_lib)
+                print "FTYPE_LOCATION: freetype found in: %s, %s" % \
+                    (freetype_inc, freetype_lib)
                 return freetype_inc, freetype_lib
     except ImportError:
         pass
@@ -86,17 +96,21 @@
     for freetype_dir in ["/usr/", "/usr/local/", "/usr/X11/"]:
         if os.path.isfile(os.path.join(freetype_dir, "include", "ft2build.h")):
             if os.path.isdir(os.path.join(freetype_dir, "include")) and \
-               os.path.isfile(os.path.join(freetype_dir, "include", "ft2build.h")):
+                os.path.isfile(os.path.join(freetype_dir, "include",
+                    "ft2build.h")):
                 freetype_inc = os.path.join(freetype_dir, "include")
                 freetype_lib = os.path.join(freetype_dir, "lib")
-                print "FTYPE_LOCATION: freetype found in: %s, %s" % (freetype_inc, freetype_lib)
+                print "FTYPE_LOCATION: freetype found in: %s, %s" % \
+                    (freetype_inc, freetype_lib)
                 return freetype_inc, freetype_lib
     print "Reading freetype location from freetype.cfg failed."
-    print "Please place the base directory of your freetype install in freetype.cfg and restart."
+    print "Please place the base directory of your freetype"
+    print "install in freetype.cfg and restart."
     print "(ex: \"echo '/usr/local/' > freetype.cfg\" )"
     print "You can locate this by looking for the file ft2build.h"
     sys.exit(1)
 
+
 def check_for_hdf5():
     # First up: HDF5_DIR in environment
     if "HDF5_DIR" in os.environ:
@@ -125,46 +139,51 @@
                os.path.isfile(os.path.join(hdf5_dir, "include", "hdf5.h")):
                 hdf5_inc = os.path.join(hdf5_dir, "include")
                 hdf5_lib = os.path.join(hdf5_dir, "lib")
-                print "HDF5_LOCATION: HDF5 found in: %s, %s" % (hdf5_inc, hdf5_lib)
+                print "HDF5_LOCATION: HDF5 found in: %s, %s" % (hdf5_inc,
+                    hdf5_lib)
                 return hdf5_inc, hdf5_lib
     except ImportError:
         pass
     print "Reading HDF5 location from hdf5.cfg failed."
-    print "Please place the base directory of your HDF5 install in hdf5.cfg and restart."
+    print "Please place the base directory of your"
+    print "HDF5 install in hdf5.cfg and restart."
     print "(ex: \"echo '/usr/local/' > hdf5.cfg\" )"
     sys.exit(1)
 
-def configuration(parent_package='',top_path=None):
+
+def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('utilities',parent_package,top_path)
+    config = Configuration('utilities', parent_package, top_path)
     png_inc, png_lib = check_for_png()
     freetype_inc, freetype_lib = check_for_freetype()
-    # Because setjmp.h is included by lots of things, and because libpng hasn't
+    # Because setjmp.h is included by lots of things,
+    # and because libpng hasn't
     # always properly checked its header files (see
     # https://bugzilla.redhat.com/show_bug.cgi?id=494579 ) we simply disable
     # support for setjmp.
     config.add_subpackage("amr_kdtree")
     config.add_subpackage("answer_testing")
-    config.add_subpackage("delaunay") # From SciPy, written by Robert Kern
+    config.add_subpackage("delaunay")  # From SciPy, written by Robert Kern
     config.add_subpackage("kdtree")
-    config.add_data_files(('kdtree', ['kdtree/fKDpy.so',]))
+    config.add_data_files(('kdtree', ['kdtree/fKDpy.so']))
     config.add_subpackage("spatial")
     config.add_subpackage("parallel_tools")
     config.add_extension("data_point_utilities",
                 "yt/utilities/data_point_utilities.c", libraries=["m"])
     hdf5_inc, hdf5_lib = check_for_hdf5()
-    include_dirs=[hdf5_inc]
-    library_dirs=[hdf5_lib]
-    config.add_extension("hdf5_light_reader", "yt/utilities/hdf5_light_reader.c",
-                         define_macros=[("H5_USE_16_API",True)],
-                         libraries=["m","hdf5"],
+    include_dirs = [hdf5_inc]
+    library_dirs = [hdf5_lib]
+    config.add_extension("hdf5_light_reader",
+                        "yt/utilities/hdf5_light_reader.c",
+                         define_macros=[("H5_USE_16_API", True)],
+                         libraries=["m", "hdf5"],
                          library_dirs=library_dirs, include_dirs=include_dirs)
-    config.add_extension("amr_utils", 
+    config.add_extension("amr_utils",
         ["yt/utilities/amr_utils.pyx",
          "yt/utilities/_amr_utils/FixedInterpolator.c",
          "yt/utilities/_amr_utils/kdtree.c",
          "yt/utilities/_amr_utils/union_find.c"] +
-         glob.glob("yt/utilities/_amr_utils/healpix_*.c"), 
+         glob.glob("yt/utilities/_amr_utils/healpix_*.c"),
         define_macros=[("PNG_SETJMP_NOT_SUPPORTED", True)],
         include_dirs=["yt/utilities/_amr_utils/", png_inc,
                       freetype_inc, os.path.join(freetype_inc, "freetype2")],
@@ -178,12 +197,12 @@
     #    ["yt/utilities/voropp.pyx"],
     #    language="c++",
     #    include_dirs=["yt/utilities/voro++"])
-    config.add_extension("libconfig_wrapper", 
+    config.add_extension("libconfig_wrapper",
         ["yt/utilities/libconfig_wrapper.pyx"] +
-         glob.glob("yt/utilities/_libconfig/*.c"), 
-        include_dirs = ["yt/utilities/_libconfig/"],
-        define_macros = [("HAVE_XLOCALE_H", True)]
+         glob.glob("yt/utilities/_libconfig/*.c"),
+        include_dirs=["yt/utilities/_libconfig/"],
+        define_macros=[("HAVE_XLOCALE_H", True)]
         )
-    config.make_config_py() # installs __config__.py
+    config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config


diff -r 26e506c79f839e62abc86b377c9c1e41c57e2e88 -r 5a0df3750ca30f7e210fbbd7305e38a5291262d0 yt/utilities/spatial/__init__.py
--- a/yt/utilities/spatial/__init__.py
+++ b/yt/utilities/spatial/__init__.py
@@ -26,7 +26,7 @@
 from ckdtree import *
 #from qhull import *
 
-__all__ = filter(lambda s:not s.startswith('_'),dir())
+__all__ = filter(lambda s: not s.startswith('_'), dir())
 __all__ += ['distance']
 
 import distance


diff -r 26e506c79f839e62abc86b377c9c1e41c57e2e88 -r 5a0df3750ca30f7e210fbbd7305e38a5291262d0 yt/utilities/spatial/setup.py
--- a/yt/utilities/spatial/setup.py
+++ b/yt/utilities/spatial/setup.py
@@ -1,8 +1,8 @@
 #!/usr/bin/env python
-
 from os.path import join
 
-def configuration(parent_package = '', top_path = None):
+
+def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration, get_numpy_include_dirs
     from numpy.distutils.system_info import get_info
     from distutils.sysconfig import get_python_inc
@@ -36,21 +36,21 @@
 
     config.add_extension('ckdtree', sources=['ckdtree.pyx'],
         libraries=["m"],
-        include_dirs = [get_numpy_include_dirs()])
+        include_dirs=[get_numpy_include_dirs()])
 
     config.add_extension('_distance_wrap',
         sources=[join('src', 'distance_wrap.c'), join('src', 'distance.c')],
-        include_dirs = [get_numpy_include_dirs()])
+        include_dirs=[get_numpy_include_dirs()])
 
     return config
 
 if __name__ == '__main__':
     from numpy.distutils.core import setup
-    setup(maintainer = "SciPy Developers",
-          author = "Anne Archibald",
-          maintainer_email = "scipy-dev at scipy.org",
-          description = "Spatial algorithms and data structures",
-          url = "http://www.scipy.org",
-          license = "SciPy License (BSD Style)",
+    setup(maintainer="SciPy Developers",
+          author="Anne Archibald",
+          maintainer_email="scipy-dev at scipy.org",
+          description="Spatial algorithms and data structures",
+          url="http://www.scipy.org",
+          license="SciPy License (BSD Style)",
           **configuration(top_path='').todict()
           )


diff -r 26e506c79f839e62abc86b377c9c1e41c57e2e88 -r 5a0df3750ca30f7e210fbbd7305e38a5291262d0 yt/visualization/eps_writer.py
--- a/yt/visualization/eps_writer.py
+++ b/yt/visualization/eps_writer.py
@@ -293,6 +293,7 @@
                 _xlabel = ""
                 _ylabel = ""
             else:
+                units = units.replace('mpc', 'Mpc')
                 _xlabel = '%s (%s)' % (x_names[plot.data.axis], units)
                 _ylabel = '%s (%s)' % (y_names[plot.data.axis], units)
             _tickcolor = pyx.color.cmyk.white


diff -r 26e506c79f839e62abc86b377c9c1e41c57e2e88 -r 5a0df3750ca30f7e210fbbd7305e38a5291262d0 yt/visualization/image_panner/setup.py
--- a/yt/visualization/image_panner/setup.py
+++ b/yt/visualization/image_panner/setup.py
@@ -1,12 +1,13 @@
 #!/usr/bin/env python
 import setuptools
-import os, sys, os.path
-
+import os
+import sys
 import os.path
 
-def configuration(parent_package='',top_path=None):
+
+def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('image_panner',parent_package,top_path)
-    config.make_config_py() # installs __config__.py
+    config = Configuration('image_panner', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config


diff -r 26e506c79f839e62abc86b377c9c1e41c57e2e88 -r 5a0df3750ca30f7e210fbbd7305e38a5291262d0 yt/visualization/setup.py
--- a/yt/visualization/setup.py
+++ b/yt/visualization/setup.py
@@ -1,12 +1,13 @@
 #!/usr/bin/env python
 import setuptools
 
-def configuration(parent_package='',top_path=None):
+
+def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('visualization',parent_package,top_path)
+    config = Configuration('visualization', parent_package, top_path)
     config.add_subpackage("image_panner")
     config.add_subpackage("volume_rendering")
-    config.make_config_py() # installs __config__.py
+    config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     config.add_extension("_MPL", "_MPL.c", libraries=["m"])
     return config


diff -r 26e506c79f839e62abc86b377c9c1e41c57e2e88 -r 5a0df3750ca30f7e210fbbd7305e38a5291262d0 yt/visualization/volume_rendering/__init__.py
--- a/yt/visualization/volume_rendering/__init__.py
+++ b/yt/visualization/volume_rendering/__init__.py
@@ -27,4 +27,3 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
 """
-


diff -r 26e506c79f839e62abc86b377c9c1e41c57e2e88 -r 5a0df3750ca30f7e210fbbd7305e38a5291262d0 yt/visualization/volume_rendering/setup.py
--- a/yt/visualization/volume_rendering/setup.py
+++ b/yt/visualization/volume_rendering/setup.py
@@ -1,14 +1,15 @@
 #!/usr/bin/env python
 import setuptools
-import os, sys, os.path
-
+import os
+import sys
 import os.path
 
 #os.system("cython -a yt/extensions/volume_rendering/VolumeIntegrator.pyx")
 
-def configuration(parent_package='',top_path=None):
+
+def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('volume_rendering',parent_package,top_path)
-    config.make_config_py() # installs __config__.py
+    config = Configuration('volume_rendering', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config



https://bitbucket.org/yt_analysis/yt/changeset/38f76bfdb291/
changeset:   38f76bfdb291
branch:      yt
user:        ngoldbaum
date:        2012-02-14 20:50:50
summary:     Fixes to FLASH frontend so the sound speed can be calculated
affected #:  1 file

diff -r 5a0df3750ca30f7e210fbbd7305e38a5291262d0 -r 38f76bfdb2917f30b69c56469ee279bad36de39c yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -226,6 +226,8 @@
         if len(self.parameters) == 0:
             self._parse_parameter_file()
         self.conversion_factors = defaultdict(lambda: 1.0)
+        if "EOSType" not in self.parameters:
+            self.parameters["EOSType"] = -1
         if self.cosmological_simulation == 1:
             self._setup_comoving_units()
         else:
@@ -332,6 +334,8 @@
         self.domain_dimensions = \
             na.array([nblockx*nxb,nblocky*nyb,nblockz*nzb])
 
+        self.parameters['Gamma'] = self._find_parameter("real", "gamma")
+
         if self._flash_version == 7:
             self.current_time = float(
                 self._handle["simulation parameters"][:]["time"])



https://bitbucket.org/yt_analysis/yt/changeset/782b3dd8bb92/
changeset:   782b3dd8bb92
branch:      yt
user:        ngoldbaum
date:        2012-02-14 20:52:20
summary:     Backing out changes to reason
affected #:  3 files

diff -r 38f76bfdb2917f30b69c56469ee279bad36de39c -r 782b3dd8bb92b1697124d6b05416b943679096a7 yt/gui/reason/extdirect_repl.py
--- a/yt/gui/reason/extdirect_repl.py
+++ b/yt/gui/reason/extdirect_repl.py
@@ -40,7 +40,6 @@
 import imp
 import threading
 import Queue
-import glob
 
 from yt.funcs import *
 from yt.utilities.logger import ytLogger, ufstring
@@ -388,11 +387,6 @@
             return {'status': 'FAIL', 'filename': filename,
                     'error': 'Unexpected error.'}
         return {'status': 'SUCCESS', 'filename': filename}
-    
-    @lockit
-    def get_directory_listing(self):
-        filenames = glob.glob('./*')
-        return {'status': 'SUCCESS', 'filenames': filenames}
 
     @lockit
     def paste_session(self):
@@ -752,7 +746,6 @@
         self.execute(funccall, hide = False)
         pf = self.locals['_tpf']
 
-
 class ExtDirectParameterFileList(BottleDirectRouter):
     my_name = "ExtDirectParameterFileList"
     api_url = "pflist"


diff -r 38f76bfdb2917f30b69c56469ee279bad36de39c -r 782b3dd8bb92b1697124d6b05416b943679096a7 yt/gui/reason/html/js/menu_items.js
--- a/yt/gui/reason/html/js/menu_items.js
+++ b/yt/gui/reason/html/js/menu_items.js
@@ -32,29 +32,8 @@
 var main_menu = {
     text: 'Menu',
     id: 'main_menu',
-    menu: [//{xtype:'menuitem', text: 'Open', disabled: true},
-           {xtype:'menuitem', text: 'Open', 
-	    handler: function(b,e) {
-		   Ext.Msg.prompt("We have somewhat less important work to do.",
-				  "Enter directory path.",
-		   function(btn, text) {
-		   if (btn == 'ok'){
-		       yt_rpc.ExtDirectREPL.get_directory_listing({filename:text},
-		       function(f, a) {
-			   if (a.result['status'] == 'SUCCESS') {
-			       var alert_text = 'List of files: ' + a.result['filenames']
-			       Ext.Msg.alert('Success! ', alert_text);
-			       var record = new logging_store.recordType({record: alert_text });
-			       logging_store.add(record, number_log_records++);
-			   } else {
-			       Ext.Msg.alert('Always naysaying!',
-					     'Failed to get list of files');
-			   }
-		       });
-		   }
-				  });
-	       }
-	   },
+    menu: [
+           {xtype:'menuitem', text: 'Open', disabled: true},
            {xtype:'menuitem', text: 'Open Directory', disabled: true},
            {xtype: 'menuseparator'},
            {xtype:'menuitem', text: 'Save Script',


diff -r 38f76bfdb2917f30b69c56469ee279bad36de39c -r 782b3dd8bb92b1697124d6b05416b943679096a7 yt/gui/reason/html/js/reason.js
--- a/yt/gui/reason/html/js/reason.js
+++ b/yt/gui/reason/html/js/reason.js
@@ -91,7 +91,7 @@
                     },
                     notifyDrop  : function(ddSource, e, data){
 
-			var varname = data.node.attributes.objdata.varname;
+                        var varname = data.node.attributes.objdata.varname;
                         /* There is possibly a better way to do this, where it's also inserted correctly. */
                         var line = repl_input.get("input_line");
                         line.setValue(line.getValue() + varname);

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list