[yt-svn] commit/yt: 2 new changesets

Bitbucket commits-noreply at bitbucket.org
Tue Nov 13 07:27:15 PST 2012


2 new commits in yt:


https://bitbucket.org/yt_analysis/yt/changeset/7f0e191c6869/
changeset:   7f0e191c6869
branch:      yt
user:        jsoishi
date:        2012-11-13 16:22:18
summary:     added stefan-boltzmann constant in cgs
affected #:  1 file

diff -r 189d2eace2e91e9ac7aeae9593fe465b50b6cf92 -r 7f0e191c686919d4932ec0c178f41521f75a90c2 yt/utilities/physical_constants.py
--- a/yt/utilities/physical_constants.py
+++ b/yt/utilities/physical_constants.py
@@ -20,6 +20,7 @@
 boltzmann_constant_cgs = 1.3806504e-16  # erg K^-1
 gravitational_constant_cgs  = 6.67428e-8  # cm^3 g^-1 s^-2
 planck_constant_cgs   = 6.62606896e-27  # erg s
+stefan_boltzmann_constant_cgs = 5.670373e-5 # erg cm^-2 s^-1 K^-4
 rho_crit_now = 1.8788e-29  # g times h^2 (critical mass for closure, Cosmology)
 
 # Misc. Approximations



https://bitbucket.org/yt_analysis/yt/changeset/8cee6c380933/
changeset:   8cee6c380933
branch:      yt
user:        jsoishi
date:        2012-11-13 16:22:44
summary:     merged.
affected #:  30 files

diff -r 7f0e191c686919d4932ec0c178f41521f75a90c2 -r 8cee6c380933435f0f09218c0e489c73517dff56 .hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -5152,3 +5152,6 @@
 0000000000000000000000000000000000000000 svn.993
 fff7118f00e25731ccf37cba3082b8fcb73cf90e svn.371
 0000000000000000000000000000000000000000 svn.371
+f15825659f5af3ce64aaad30062aff3603cbfb66 hop callback
+0000000000000000000000000000000000000000 hop callback
+0000000000000000000000000000000000000000 hop callback


diff -r 7f0e191c686919d4932ec0c178f41521f75a90c2 -r 8cee6c380933435f0f09218c0e489c73517dff56 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -245,7 +245,7 @@
         echo "******************************************"
         echo "**                                      **"
         echo "**    Your CFLAGS is not empty.         **"
-        echo "**    This can beak h5py compilation.   **"
+        echo "**    This can break h5py compilation.  **"
         echo "**                                      **"
         echo "******************************************"
         echo "******************************************"
@@ -411,28 +411,31 @@
 
 # Now we dump all our SHA512 files out.
 
-echo '2c1933ab31246b4f4eba049d3288156e0a72f1730604e3ed7357849967cdd329e4647cf236c9442ecfb06d0aff03e6fc892a7ba2a5c1cf5c011b7ab9c619acec  Cython-0.16.tar.gz' > Cython-0.16.tar.gz.sha512
+echo 'eda1b8090e5e21e7e039ef4dd03de186a7b416df9d5a4e4422abeeb4d51383b9a6858e1ac4902d8e5010f661b295bbb2452c43c8738be668379b4eb4835d0f61  Cython-0.17.1.tar.gz' > Cython-0.17.1.tar.gz.sha512
 echo '44eea803870a66ff0bab08d13a8b3388b5578ebc1c807d1d9dca0a93e6371e91b15d02917a00b3b20dc67abb5a21dabaf9b6e9257a561f85eeff2147ac73b478  PyX-0.11.1.tar.gz' > PyX-0.11.1.tar.gz.sha512
 echo 'b981f8464575bb24c297631c87a3b9172312804a0fc14ce1fa7cb41ce2b0d2fd383cd1c816d6e10c36467d18bf9492d6faf557c81c04ff3b22debfa93f30ad0b  Python-2.7.3.tgz' > Python-2.7.3.tgz.sha512
 echo 'c017d3d59dd324ac91af0edc178c76b60a5f90fbb775cf843e39062f95bd846238f2c53705f8890ed3f34bc0e6e75671a73d13875eb0287d6201cb45f0a2d338  bzip2-1.0.5.tar.gz' > bzip2-1.0.5.tar.gz.sha512
 echo 'a296dfcaef7e853e58eed4e24b37c4fa29cfc6ac688def048480f4bb384b9e37ca447faf96eec7b378fd764ba291713f03ac464581d62275e28eb2ec99110ab6  reason-js-20120623.zip' > reason-js-20120623.zip.sha512
 echo 'b519218f93946400326e9b656669269ecb3e5232b944e18fbc3eadc4fe2b56244d68aae56d6f69042b4c87c58c881ee2aaa279561ea0f0f48d5842155f4de9de  freetype-2.4.4.tar.gz' > freetype-2.4.4.tar.gz.sha512
-echo '1531789e0a77d4829796d18552a4de7aecae7e8b63763a7951a8091921995800740fe03e72a7dbd496a5590828131c5f046ddead695e5cba79343b8c205148d1  h5py-2.0.1.tar.gz' > h5py-2.0.1.tar.gz.sha512
-echo '9644896e4a84665ad22f87eb885cbd4a0c60a5c30085d5dd5dba5f3c148dbee626f0cb01e59a7995a84245448a3f1e9ba98687d3f10250e2ee763074ed8ddc0e  hdf5-1.8.7.tar.gz' > hdf5-1.8.7.tar.gz.sha512
+echo 'b3290c498191684781ca5286ab454eb1bd045e8d894f5b86fb86beb88f174e22ac3ab008fb02d6562051d9fa6a9593920cab433223f6d5473999913223b8e183  h5py-2.1.0.tar.gz' > h5py-2.1.0.tar.gz.sha512
+echo 'c68a425bacaa7441037910b9166f25b89e1387776a7749a5350793f89b1690350df5f018060c31d03686e7c3ed2aa848bd2b945c96350dc3b6322e087934783a  hdf5-1.8.9.tar.gz' > hdf5-1.8.9.tar.gz.sha512
 echo 'dbefad00fa34f4f21dca0f1e92e95bd55f1f4478fa0095dcf015b4d06f0c823ff11755cd777e507efaf1c9098b74af18f613ec9000e5c3a5cc1c7554fb5aefb8  libpng-1.5.12.tar.gz' > libpng-1.5.12.tar.gz.sha512
-echo 'f5ab95c29ef6958096970265a6079f0eb8c43a500924346c4a6c6eb89d9110eeeb6c34a53715e71240e82ded2b76a7b8d5a9b05a07baa000b2926718264ad8ff  matplotlib-1.1.0.tar.gz' > matplotlib-1.1.0.tar.gz.sha512
-echo 'ec7416729f99f5eef6700507e740552e771d6dd8863f757311538d7d67a0eecd3426381bd42a7ddbf0771bdde8bba5cb943f60031ae3567d6a3dcac738facda8  mercurial-2.2.2.tar.gz' > mercurial-2.2.2.tar.gz.sha512
+echo '5b1a0fb52dcb21ca5f0ab71c8a49550e1e8cf633552ec6598dc43f0b32c03422bf5af65b30118c163231ecdddfd40846909336f16da318959106076e80a3fad0  matplotlib-1.2.0.tar.gz' > matplotlib-1.2.0.tar.gz.sha512
+echo '52d1127de2208aaae693d16fef10ffc9b8663081bece83b7597d65706e9568af3b9e56bd211878774e1ebed92e21365ee9c49602a0ff5e48f89f12244d79c161  mercurial-2.4.tar.gz' > mercurial-2.4.tar.gz.sha512
 echo 'de3dd37f753614055dcfed910e9886e03688b8078492df3da94b1ec37be796030be93291cba09e8212fffd3e0a63b086902c3c25a996cf1439e15c5b16e014d9  numpy-1.6.1.tar.gz' > numpy-1.6.1.tar.gz.sha512
 echo '5ad681f99e75849a5ca6f439c7a19bb51abc73d121b50f4f8e4c0da42891950f30407f761a53f0fe51b370b1dbd4c4f5a480557cb2444c8c7c7d5412b328a474  sqlite-autoconf-3070500.tar.gz' > sqlite-autoconf-3070500.tar.gz.sha512
 echo 'edae735960279d92acf58e1f4095c6392a7c2059b8f1d2c46648fc608a0fb06b392db2d073f4973f5762c034ea66596e769b95b3d26ad963a086b9b2d09825f2  zlib-1.2.3.tar.bz2' > zlib-1.2.3.tar.bz2.sha512
-echo '42021737c93cea513116e6051cff9b803e3f25d6019c74370b42f4c91d1af73e94ac2b7ace85b7565fa3d45b79231079bd48a242531beeafa33c36d7139ce838  ipython-0.13.tar.gz' > ipython-0.13.tar.gz.sha512
+echo '05ac335727a2c3036f31a2506fdd2615aa436bfbe2f81799fe6c51bffe2591ad6a8427f3b25c34e7e709fb4e7607a0589dc7a22185c1f9b894e90de6711a88aa  ipython-0.13.1.tar.gz' > ipython-0.13.1.tar.gz.sha512
 echo 'fb3cf421b2dc48c31956b3e3ee4ab6ebc743deec3bf626c2238a1996c8c51be87260bd6aa662793a1f0c34dcda9b3146763777bb162dfad6fec4ca7acc403b2e  zeromq-2.2.0.tar.gz' > zeromq-2.2.0.tar.gz.sha512
 echo 'd761b492352841cdc125d9f0c99ee6d6c435812472ea234728b7f0fb4ad1048e1eec9b399df2081fbc926566f333f7780fedd0ce23255a6633fe5c60ed15a6af  pyzmq-2.1.11.tar.gz' > pyzmq-2.1.11.tar.gz.sha512
 echo '57fa5e57dfb98154a42d2d477f29401c2260ae7ad3a8128a4098b42ee3b35c54367b1a3254bc76b9b3b14b4aab7c3e1135858f68abc5636daedf2f01f9b8a3cf  tornado-2.2.tar.gz' > tornado-2.2.tar.gz.sha512
 echo '1332e3d5465ca249c357314cf15d2a4e5e83a941841021b8f6a17a107dce268a7a082838ade5e8db944ecde6bfb111211ab218aa414ee90aafbb81f1491b3b93  Forthon-0.8.10.tar.gz' > Forthon-0.8.10.tar.gz.sha512
+echo 'c13116c1f0547000cc565e15774687b9e884f8b74fb62a84e578408a868a84961704839065ae4f21b662e87f2aaedf6ea424ea58dfa9d3d73c06281f806d15dd  nose-1.2.1.tar.gz' > nose-1.2.1.tar.gz.sha512
+echo '73de2c99406a38f85273931597525cec4ebef55b93712adca3b0bfea8ca3fc99446e5d6495817e9ad55cf4d48feb7fb49734675c4cc8938db8d4a5225d30eca7  python-hglib-0.2.tar.gz' > python-hglib-0.2.tar.gz.sha512
+echo 'ffc602eb346717286b3d0a6770c60b03b578b3cf70ebd12f9e8b1c8c39cdb12ef219ddaa041d7929351a6b02dbb8caf1821b5452d95aae95034cbf4bc9904a7a  sympy-0.7.2.tar.gz' > sympy-0.7.2.tar.gz.sha512
 
 # Individual processes
-[ -z "$HDF5_DIR" ] && get_ytproject hdf5-1.8.7.tar.gz
+[ -z "$HDF5_DIR" ] && get_ytproject hdf5-1.8.9.tar.gz
 [ $INST_ZLIB -eq 1 ] && get_ytproject zlib-1.2.3.tar.bz2 
 [ $INST_BZLIB -eq 1 ] && get_ytproject bzip2-1.0.5.tar.gz
 [ $INST_PNG -eq 1 ] && get_ytproject libpng-1.5.12.tar.gz
@@ -444,14 +447,16 @@
 [ $INST_0MQ -eq 1 ] && get_ytproject tornado-2.2.tar.gz
 get_ytproject Python-2.7.3.tgz
 get_ytproject numpy-1.6.1.tar.gz
-get_ytproject matplotlib-1.1.0.tar.gz
-get_ytproject mercurial-2.2.2.tar.gz
-get_ytproject ipython-0.13.tar.gz
-get_ytproject h5py-2.0.1.tar.gz
-get_ytproject Cython-0.16.tar.gz
+get_ytproject matplotlib-1.2.0.tar.gz
+get_ytproject mercurial-2.4.tar.gz
+get_ytproject ipython-0.13.1.tar.gz
+get_ytproject h5py-2.1.0.tar.gz
+get_ytproject Cython-0.17.1.tar.gz
 get_ytproject reason-js-20120623.zip
 get_ytproject Forthon-0.8.10.tar.gz
-
+get_ytproject nose-1.2.1.tar.gz 
+get_ytproject python-hglib-0.2.tar.gz
+get_ytproject sympy-0.7.2.tar.gz
 if [ $INST_BZLIB -eq 1 ]
 then
     if [ ! -e bzip2-1.0.5/done ]
@@ -535,11 +540,11 @@
 
 if [ -z "$HDF5_DIR" ]
 then
-    if [ ! -e hdf5-1.8.7/done ]
+    if [ ! -e hdf5-1.8.9/done ]
     then
-        [ ! -e hdf5-1.8.7 ] && tar xfz hdf5-1.8.7.tar.gz
+        [ ! -e hdf5-1.8.9 ] && tar xfz hdf5-1.8.9.tar.gz
         echo "Installing HDF5"
-        cd hdf5-1.8.7
+        cd hdf5-1.8.9
         ( ./configure --prefix=${DEST_DIR}/ --enable-shared 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make ${MAKE_PROCS} install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -587,7 +592,7 @@
 if [ $INST_HG -eq 1 ]
 then
     echo "Installing Mercurial."
-    do_setup_py mercurial-2.2.2
+    do_setup_py mercurial-2.4
     export HG_EXEC=${DEST_DIR}/bin/hg
 else
     # We assume that hg can be found in the path.
@@ -655,10 +660,10 @@
     echo "Setting CFLAGS ${CFLAGS}"
 fi
 # Now we set up the basedir for matplotlib:
-mkdir -p ${DEST_DIR}/src/matplotlib-1.1.0
-echo "[directories]" >> ${DEST_DIR}/src/matplotlib-1.1.0/setup.cfg
-echo "basedirlist = ${DEST_DIR}" >> ${DEST_DIR}/src/matplotlib-1.1.0/setup.cfg
-do_setup_py matplotlib-1.1.0
+mkdir -p ${DEST_DIR}/src/matplotlib-1.2.0
+echo "[directories]" >> ${DEST_DIR}/src/matplotlib-1.2.0/setup.cfg
+echo "basedirlist = ${DEST_DIR}" >> ${DEST_DIR}/src/matplotlib-1.2.0/setup.cfg
+do_setup_py matplotlib-1.2.0
 if [ -n "${OLD_LDFLAGS}" ]
 then
     export LDFLAG=${OLD_LDFLAGS}
@@ -685,10 +690,13 @@
     do_setup_py tornado-2.2
 fi
 
-do_setup_py ipython-0.13
-do_setup_py h5py-2.0.1
-do_setup_py Cython-0.16
+do_setup_py ipython-0.13.1
+do_setup_py h5py-2.1.0
+do_setup_py Cython-0.17.1
 do_setup_py Forthon-0.8.10
+do_setup_py nose-1.2.1
+do_setup_py python-hglib-0.2
+do_setup_py sympy-0.7.2
 [ $INST_PYX -eq 1 ] && do_setup_py PyX-0.11.1
 
 echo "Doing yt update, wiping local changes and updating to branch ${BRANCH}"


diff -r 7f0e191c686919d4932ec0c178f41521f75a90c2 -r 8cee6c380933435f0f09218c0e489c73517dff56 yt/data_objects/api.py
--- a/yt/data_objects/api.py
+++ b/yt/data_objects/api.py
@@ -84,6 +84,7 @@
     ValidateSpatial, \
     ValidateGridType, \
     add_field, \
+    add_grad, \
     derived_field
 
 from particle_trajectories import \


diff -r 7f0e191c686919d4932ec0c178f41521f75a90c2 -r 8cee6c380933435f0f09218c0e489c73517dff56 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -709,7 +709,7 @@
     _type_name = "streamline"
     _con_args = ('positions')
     sort_by = 't'
-    def __init__(self, positions, fields=None, pf=None, **kwargs):
+    def __init__(self, positions, length = 1.0, fields=None, pf=None, **kwargs):
         """
         This is a streamline, which is a set of points defined as
         being parallel to some vector field.
@@ -725,6 +725,8 @@
         ----------
         positions : array-like
             List of streamline positions
+        length : float
+            The magnitude of the distance; dts will be divided by this
         fields : list of strings, optional
             If you want the object to pre-retrieve a set of fields, supply them
             here.  This is not necessary.
@@ -749,7 +751,9 @@
         self.dts = np.empty_like(positions[:,0])
         self.dts[:-1] = np.sqrt(np.sum((self.positions[1:]-
                                         self.positions[:-1])**2,axis=1))
-        self.dts[-1] = self.dts[-1]
+        self.dts[-1] = self.dts[-2]
+        self.length = length
+        self.dts /= length
         self.ts = np.add.accumulate(self.dts)
         self._set_center(self.positions[0])
         self.set_field_parameter('center', self.positions[0])
@@ -768,31 +772,30 @@
 
     @restore_grid_state
     def _get_data_from_grid(self, grid, field):
-        mask = np.logical_and(self._get_cut_mask(grid),
-                              grid.child_mask)
-        if field == 'dts': return self._dts[grid.id][mask]
-        if field == 't': return self._ts[grid.id][mask]
-        return grid[field][mask]
+        # No child masking here; it happens inside the mask cut
+        mask = self._get_cut_mask(grid) 
+        if field == 'dts': return self._dts[grid.id]
+        if field == 't': return self._ts[grid.id]
+        return grid[field].flat[mask]
         
     @cache_mask
     def _get_cut_mask(self, grid):
-        mask = np.zeros(grid.ActiveDimensions, dtype='int')
-        dts = np.zeros(grid.ActiveDimensions, dtype='float64')
-        ts = np.zeros(grid.ActiveDimensions, dtype='float64')
         #pdb.set_trace()
         points_in_grid = np.all(self.positions > grid.LeftEdge, axis=1) & \
                          np.all(self.positions <= grid.RightEdge, axis=1) 
         pids = np.where(points_in_grid)[0]
-        for i, pos in zip(pids, self.positions[points_in_grid]):
+        mask = np.zeros(points_in_grid.sum(), dtype='int')
+        dts = np.zeros(points_in_grid.sum(), dtype='float64')
+        ts = np.zeros(points_in_grid.sum(), dtype='float64')
+        for mi, (i, pos) in enumerate(zip(pids, self.positions[points_in_grid])):
             if not points_in_grid[i]: continue
             ci = ((pos - grid.LeftEdge)/grid.dds).astype('int')
+            if grid.child_mask[ci[0], ci[1], ci[2]] == 0: continue
             for j in range(3):
                 ci[j] = min(ci[j], grid.ActiveDimensions[j]-1)
-            if mask[ci[0], ci[1], ci[2]]:
-                continue
-            mask[ci[0], ci[1], ci[2]] = 1
-            dts[ci[0], ci[1], ci[2]] = self.dts[i]
-            ts[ci[0], ci[1], ci[2]] = self.ts[i]
+            mask[mi] = np.ravel_multi_index(ci, grid.ActiveDimensions)
+            dts[mi] = self.dts[i]
+            ts[mi] = self.ts[i]
         self._dts[grid.id] = dts
         self._ts[grid.id] = ts
         return mask
@@ -3884,10 +3887,21 @@
         self._get_list_of_grids()
         # We don't generate coordinates here.
         if field == None:
-            fields_to_get = self.fields[:]
+            fields = self.fields[:]
         else:
-            fields_to_get = ensure_list(field)
-        fields_to_get = [f for f in fields_to_get if f not in self.field_data]
+            fields = ensure_list(field)
+        fields_to_get = []
+        for field in fields:
+            if self.field_data.has_key(field): continue
+            if field not in self.hierarchy.field_list:
+                try:
+                    #print "Generating", field
+                    self._generate_field(field)
+                    continue
+                except NeedsOriginalGrid, ngt_exception:
+                    pass
+            fields_to_get.append(field)
+        if len(fields_to_get) == 0: return
         # Note that, thanks to some trickery, we have different dimensions
         # on the field than one might think from looking at the dx and the
         # L/R edges.


diff -r 7f0e191c686919d4932ec0c178f41521f75a90c2 -r 8cee6c380933435f0f09218c0e489c73517dff56 yt/data_objects/field_info_container.py
--- a/yt/data_objects/field_info_container.py
+++ b/yt/data_objects/field_info_container.py
@@ -58,6 +58,66 @@
                 return function
             return create_function
         self[name] = DerivedField(name, function, **kwargs)
+        
+    def add_grad(self, field, **kwargs):
+        """
+        Creates the partial derivative of a given field. This function will
+        autogenerate the names of the gradient fields.
+
+        """
+        sl = slice(2,None,None)
+        sr = slice(None,-2,None)
+        
+        def _gradx(f, data):
+            grad = data[field][sl,1:-1,1:-1] - data[field][sr,1:-1,1:-1]
+            grad /= 2.0*data["dx"].flat[0]
+            g = np.zeros(data[field].shape, dtype='float64')
+            g[1:-1,1:-1,1:-1] = grad
+            return g
+            
+        def _grady(f, data):
+            grad = data[field][1:-1,sl,1:-1] - data[field][1:-1,sr,1:-1]
+            grad /= 2.0*data["dy"].flat[0]
+            g = np.zeros(data[field].shape, dtype='float64')
+            g[1:-1,1:-1,1:-1] = grad
+            return g
+            
+        def _gradz(f, data):
+            grad = data[field][1:-1,1:-1,sl] - data[field][1:-1,1:-1,sr]
+            grad /= 2.0*data["dz"].flat[0]
+            g = np.zeros(data[field].shape, dtype='float64')
+            g[1:-1,1:-1,1:-1] = grad
+            return g
+        
+        d_kwargs = kwargs.copy()
+        if "display_name" in kwargs: del d_kwargs["display_name"]
+        
+        for ax in "xyz":
+            if "display_name" in kwargs:
+                disp_name = r"%s\_%s" % (kwargs["display_name"], ax)
+            else:
+                disp_name = r"\partial %s/\partial %s" % (field, ax)
+            name = "Grad_%s_%s" % (field, ax)
+            self[name] = DerivedField(name, function=eval('_grad%s' % ax),
+                         take_log=False, validators=[ValidateSpatial(1,[field])],
+                         display_name = disp_name, **d_kwargs)
+        
+        def _grad(f, data) :
+            a = np.power(data["Grad_%s_x" % field],2)
+            b = np.power(data["Grad_%s_y" % field],2)
+            c = np.power(data["Grad_%s_z" % field],2)
+            norm = np.sqrt(a+b+c)
+            return norm
+
+        if "display_name" in kwargs:
+            disp_name = kwargs["display_name"]
+        else:
+            disp_name = r"\Vert\nabla %s\Vert" % (field)   
+        name = "Grad_%s" % field           
+        self[name] = DerivedField(name, function=_grad, take_log=False,
+                                  display_name = disp_name, **d_kwargs)
+        mylog.info("Added new fields: Grad_%s_x, Grad_%s_y, Grad_%s_z, Grad_%s" \
+                   % (field, field, field, field))
 
     def has_key(self, key):
         # This gets used a lot
@@ -96,6 +156,7 @@
 
 FieldInfo = FieldInfoContainer()
 add_field = FieldInfo.add_field
+add_grad = FieldInfo.add_grad
 
 def derived_field(**kwargs):
     def inner_decorator(function):


diff -r 7f0e191c686919d4932ec0c178f41521f75a90c2 -r 8cee6c380933435f0f09218c0e489c73517dff56 yt/data_objects/hierarchy.py
--- a/yt/data_objects/hierarchy.py
+++ b/yt/data_objects/hierarchy.py
@@ -382,17 +382,19 @@
         """
         Prints out (stdout) relevant information about the simulation
         """
-        header = "%3s\t%6s\t%14s" % ("level","# grids", "# cells")
+        header = "%3s\t%6s\t%14s\t%14s" % ("level","# grids", "# cells",
+                                           "# cells^3")
         print header
         print "%s" % (len(header.expandtabs())*"-")
         for level in xrange(MAXLEVEL):
             if (self.level_stats['numgrids'][level]) == 0:
                 break
-            print "% 3i\t% 6i\t% 14i" % \
+            print "% 3i\t% 6i\t% 14i\t% 14i" % \
                   (level, self.level_stats['numgrids'][level],
-                   self.level_stats['numcells'][level])
+                   self.level_stats['numcells'][level],
+                   self.level_stats['numcells'][level]**(1./3))
             dx = self.select_grids(level)[0].dds[0]
-        print "-" * 28
+        print "-" * 46
         print "   \t% 6i\t% 14i" % (self.level_stats['numgrids'].sum(), self.level_stats['numcells'].sum())
         print "\n"
         try:


diff -r 7f0e191c686919d4932ec0c178f41521f75a90c2 -r 8cee6c380933435f0f09218c0e489c73517dff56 yt/data_objects/tests/test_covering_grid.py
--- a/yt/data_objects/tests/test_covering_grid.py
+++ b/yt/data_objects/tests/test_covering_grid.py
@@ -25,3 +25,24 @@
                                       dn*di[1]+i:dn*(di[1]+dd[1])+i:dn,
                                       dn*di[2]+i:dn*(di[2]+dd[2])+i:dn]
                     yield assert_equal, f, g["Density"]
+
+def test_smoothed_covering_grid():
+    # We decompose in different ways
+    for level in [0, 1, 2]:
+        for nprocs in [1, 2, 4, 8]:
+            pf = fake_random_pf(16, nprocs = nprocs)
+            dn = pf.refine_by**level 
+            cg = pf.h.smoothed_covering_grid(level, [0.0, 0.0, 0.0],
+                    dn * pf.domain_dimensions)
+            assert_equal( cg["Ones"].max(), 1.0)
+            assert_equal( cg["Ones"].min(), 1.0)
+            assert_equal( cg["CellVolume"].sum(), pf.domain_width.prod())
+            for g in pf.h.grids:
+                if level != g.Level: continue
+                di = g.get_global_startindex()
+                dd = g.ActiveDimensions
+                for i in range(dn):
+                    f = cg["Density"][dn*di[0]+i:dn*(di[0]+dd[0])+i:dn,
+                                      dn*di[1]+i:dn*(di[1]+dd[1])+i:dn,
+                                      dn*di[2]+i:dn*(di[2]+dd[2])+i:dn]
+                    yield assert_equal, f, g["Density"]


diff -r 7f0e191c686919d4932ec0c178f41521f75a90c2 -r 8cee6c380933435f0f09218c0e489c73517dff56 yt/data_objects/tests/test_projection.py
--- a/yt/data_objects/tests/test_projection.py
+++ b/yt/data_objects/tests/test_projection.py
@@ -1,6 +1,4 @@
 from yt.testing import *
-from yt.data_objects.profiles import \
-    BinnedProfile1D, BinnedProfile2D, BinnedProfile3D
 
 def setup():
     from yt.config import ytcfg
@@ -32,8 +30,30 @@
                 yield assert_equal, np.unique(proj["py"]), uc[yax]
                 yield assert_equal, np.unique(proj["pdx"]), 1.0/(dims[xax]*2.0)
                 yield assert_equal, np.unique(proj["pdy"]), 1.0/(dims[yax]*2.0)
+                frb = proj.to_frb((1.0,'unitary'), 64)
+                for proj_field in ['Ones', 'Density']:
+                    yield assert_equal, frb[proj_field].info['data_source'], \
+                            proj.__str__()
+                    yield assert_equal, frb[proj_field].info['axis'], \
+                            ax
+                    yield assert_equal, frb[proj_field].info['field'], \
+                            proj_field
+                    yield assert_equal, frb[proj_field].info['units'], \
+                            pf.field_info[proj_field].get_units()
+                    yield assert_equal, frb[proj_field].info['xlim'], \
+                            frb.bounds[:2]
+                    yield assert_equal, frb[proj_field].info['ylim'], \
+                            frb.bounds[2:]
+                    yield assert_equal, frb[proj_field].info['length_to_cm'], \
+                            pf['cm']
+                    yield assert_equal, frb[proj_field].info['center'], \
+                            proj.center
+                    yield assert_equal, frb[proj_field].info['weight_field'], \
+                            wf
             # wf == None
             yield assert_equal, wf, None
             v1 = proj["Density"].sum()
             v2 = (dd["Density"] * dd["d%s" % an]).sum()
             yield assert_rel_equal, v1, v2, 10
+
+


diff -r 7f0e191c686919d4932ec0c178f41521f75a90c2 -r 8cee6c380933435f0f09218c0e489c73517dff56 yt/data_objects/tests/test_slice.py
--- /dev/null
+++ b/yt/data_objects/tests/test_slice.py
@@ -0,0 +1,55 @@
+from yt.testing import *
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","__withintesting"] = "True"
+
+def test_slice():
+    for nprocs in [8, 1]:
+        # We want to test both 1 proc and 8 procs, to make sure that
+        # parallelism isn't broken
+        pf = fake_random_pf(64, nprocs = nprocs)
+        dims = pf.domain_dimensions
+        xn, yn, zn = pf.domain_dimensions
+        xi, yi, zi = pf.domain_left_edge + 1.0/(pf.domain_dimensions * 2)
+        xf, yf, zf = pf.domain_right_edge - 1.0/(pf.domain_dimensions * 2)
+        coords = np.mgrid[xi:xf:xn*1j, yi:yf:yn*1j, zi:zf:zn*1j]
+        uc = [np.unique(c) for c in coords]
+        slc_pos = 0.5
+        # Some simple slice tests with single grids
+        for ax, an in enumerate("xyz"):
+            xax = x_dict[ax]
+            yax = y_dict[ax]
+            for wf in ["Density", None]:
+                slc = pf.h.slice(ax, slc_pos, ["Ones", "Density"])
+                yield assert_equal, slc["Ones"].sum(), slc["Ones"].size
+                yield assert_equal, slc["Ones"].min(), 1.0
+                yield assert_equal, slc["Ones"].max(), 1.0
+                yield assert_equal, np.unique(slc["px"]), uc[xax]
+                yield assert_equal, np.unique(slc["py"]), uc[yax]
+                yield assert_equal, np.unique(slc["pdx"]), 1.0/(dims[xax]*2.0)
+                yield assert_equal, np.unique(slc["pdy"]), 1.0/(dims[yax]*2.0)
+                frb = slc.to_frb((1.0,'unitary'), 64)
+                for slc_field in ['Ones', 'Density']:
+                    yield assert_equal, frb[slc_field].info['data_source'], \
+                            slc.__str__()
+                    yield assert_equal, frb[slc_field].info['axis'], \
+                            ax
+                    yield assert_equal, frb[slc_field].info['field'], \
+                            slc_field
+                    yield assert_equal, frb[slc_field].info['units'], \
+                            pf.field_info[slc_field].get_units()
+                    yield assert_equal, frb[slc_field].info['xlim'], \
+                            frb.bounds[:2]
+                    yield assert_equal, frb[slc_field].info['ylim'], \
+                            frb.bounds[2:]
+                    yield assert_equal, frb[slc_field].info['length_to_cm'], \
+                            pf['cm']
+                    yield assert_equal, frb[slc_field].info['center'], \
+                            slc.center
+                    yield assert_equal, frb[slc_field].info['coord'], \
+                            slc_pos
+            # wf == None
+            yield assert_equal, wf, None
+
+


diff -r 7f0e191c686919d4932ec0c178f41521f75a90c2 -r 8cee6c380933435f0f09218c0e489c73517dff56 yt/data_objects/tests/test_streamlines.py
--- /dev/null
+++ b/yt/data_objects/tests/test_streamlines.py
@@ -0,0 +1,22 @@
+from yt.testing import *
+from yt.visualization.api import Streamlines
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","__withintesting"] = "True"
+
+_fields = ("Density", "x-velocity", "y-velocity", "z-velocity")
+
+def test_covering_grid():
+    # We decompose in different ways
+    cs = np.mgrid[0.47:0.53:2j,0.47:0.53:2j,0.47:0.53:2j]
+    cs = np.array([a.ravel() for a in cs]).T
+    length = (1.0/128) * 16 # 16 half-widths of a cell
+    for nprocs in [1, 2, 4, 8]:
+        pf = fake_random_pf(64, nprocs = nprocs, fields = _fields)
+        streams = Streamlines(pf, cs, length=length)
+        streams.integrate_through_volume()
+        for path in (streams.path(i) for i in range(8)):
+            yield assert_rel_equal, path['dts'].sum(), 1.0, 14
+            yield assert_equal, np.all(path['t'] <= (1.0 + 1e-10)), True
+            path["Density"]


diff -r 7f0e191c686919d4932ec0c178f41521f75a90c2 -r 8cee6c380933435f0f09218c0e489c73517dff56 yt/frontends/enzo/answer_testing_support.py
--- /dev/null
+++ b/yt/frontends/enzo/answer_testing_support.py
@@ -0,0 +1,113 @@
+"""
+Answer Testing support for Enzo.
+
+Author: Britton Smith <brittonsmith at gmail.com>
+Affiliation: Michigan State University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Britton Smith.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from yt.testing import *
+from yt.config import ytcfg
+from yt.mods import *
+
+from yt.utilities.answer_testing.framework import \
+     AnswerTestingTest, \
+     can_run_pf, \
+     FieldValuesTest, \
+     GridHierarchyTest, \
+     GridValuesTest, \
+     ProjectionValuesTest, \
+     ParentageRelationshipsTest, \
+     temp_cwd
+
+def requires_outputlog(path = ".", prefix = ""):
+    def ffalse(func):
+        return lambda: None
+    def ftrue(func):
+        @wraps(func)
+        def fyielder(*args, **kwargs):
+            with temp_cwd(path):
+                for t in func(*args, **kwargs):
+                    if isinstance(t, AnswerTestingTest):
+                        t.prefix = prefix
+                    yield t
+        return fyielder
+    if os.path.exists("OutputLog"):
+        return ftrue
+    with temp_cwd(path):
+        if os.path.exists("OutputLog"):
+            return ftrue
+    return ffalse
+     
+def standard_small_simulation(pf_fn, fields):
+    if not can_run_pf(pf_fn): return
+    dso = [None]
+    yield GridHierarchyTest(pf_fn)
+    yield ParentageRelationshipsTest(pf_fn)
+    for field in fields:
+        yield GridValuesTest(pf_fn, field)
+        if 'particle' in field: continue
+        for axis in [0, 1, 2]:
+            for ds in dso:
+                for weight_field in [None, "Density"]:
+                    yield ProjectionValuesTest(
+                        pf_fn, axis, field, weight_field,
+                        ds)
+                yield FieldValuesTest(
+                        pf_fn, field, ds)
+                    
+class ShockTubeTest(object):
+    def __init__(self, data_file, solution_file, fields, 
+                 left_edges, right_edges, rtol, atol):
+        self.solution_file = solution_file
+        self.data_file = data_file
+        self.fields = fields
+        self.left_edges = left_edges
+        self.right_edges = right_edges
+        self.rtol = rtol
+        self.atol = atol
+
+    def __call__(self):
+        # Read in the pf
+        pf = load(self.data_file)  
+        exact = self.get_analytical_solution() 
+
+        ad = pf.h.all_data()
+        position = ad['x']
+        for k in self.fields:
+            field = ad[k]
+            for xmin, xmax in zip(self.left_edges, self.right_edges):
+                mask = (position >= xmin)*(position <= xmax)
+                exact_field = np.interp(position[mask], exact['pos'], exact[k]) 
+                # yield test vs analytical solution 
+                yield assert_allclose, field[mask], exact_field, \
+                    self.rtol, self.atol
+
+    def get_analytical_solution(self):
+        # Reads in from file 
+        pos, dens, vel, pres, inte = \
+                np.loadtxt(self.solution_file, unpack=True)
+        exact = {}
+        exact['pos'] = pos
+        exact['Density'] = dens
+        exact['x-velocity'] = vel
+        exact['Pressure'] = pres
+        exact['ThermalEnergy'] = inte
+        return exact


diff -r 7f0e191c686919d4932ec0c178f41521f75a90c2 -r 8cee6c380933435f0f09218c0e489c73517dff56 yt/frontends/enzo/simulation_handling.py
--- a/yt/frontends/enzo/simulation_handling.py
+++ b/yt/frontends/enzo/simulation_handling.py
@@ -241,9 +241,13 @@
             if my_initial_time == my_times[my_indices[0] - 1]: my_indices[0] -= 1
             my_outputs = my_all_outputs[my_indices[0]:my_indices[1]]
 
-        TimeSeriesData.__init__(self, outputs=[output['filename'] for output in my_outputs],
-                                parallel=parallel)
-        mylog.info("%d outputs loaded into time series." % len(my_outputs))
+        init_outputs = []
+        for output in my_outputs:
+            if os.path.exists(output['filename']):
+                init_outputs.append(output['filename'])
+            
+        TimeSeriesData.__init__(self, outputs=init_outputs, parallel=parallel)
+        mylog.info("%d outputs loaded into time series." % len(init_outputs))
 
     def _parse_parameter_file(self):
         """
@@ -478,7 +482,7 @@
         self.parameters['TopGridRank'] = 3
         self.parameters['DomainLeftEdge'] = np.zeros(self.parameters['TopGridRank'])
         self.parameters['DomainRightEdge'] = np.ones(self.parameters['TopGridRank'])
-        self.parameters['Refineby'] = 2 # technically not the enzo default
+        self.parameters['RefineBy'] = 2 # technically not the enzo default
         self.parameters['StopCycle'] = 100000
         self.parameters['dtDataDump'] = 0.
         self.parameters['CycleSkipDataDump'] = 0.
@@ -585,6 +589,8 @@
         if outputs is None:
             outputs = self.all_outputs
         my_outputs = []
+        if not outputs:
+            return my_outputs
         for value in values:
             outputs.sort(key=lambda obj:np.fabs(value - obj[key]))
             if (tolerance is None or np.abs(value - outputs[0][key]) <= tolerance) \


diff -r 7f0e191c686919d4932ec0c178f41521f75a90c2 -r 8cee6c380933435f0f09218c0e489c73517dff56 yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -274,6 +274,7 @@
             self.conversion_factors["Time"] = 1.0
         for unit in mpc_conversion.keys():
             self.units[unit] = mpc_conversion[unit] / mpc_conversion["cm"]
+            self.units[unit+"cm"] = self.units[unit]
             self.units[unit] /= (1.0+self.current_redshift)
             
     def _setup_cgs_units(self):


diff -r 7f0e191c686919d4932ec0c178f41521f75a90c2 -r 8cee6c380933435f0f09218c0e489c73517dff56 yt/frontends/flash/fields.py
--- a/yt/frontends/flash/fields.py
+++ b/yt/frontends/flash/fields.py
@@ -99,8 +99,8 @@
         add_field(fn1.split("_")[0] + "_Density",
                   function=_get_density(fn1), take_log=True,
                   display_name="%s\/Density" % fn1.split("_")[0],
-                  units = r"\rm{g}/\rm{cm}^3",
-                  projected_units = r"\rm{g}/\rm{cm}^2",
+                  units = r"\rm{g}/\rm{cm}^{3}",
+                  projected_units = r"\rm{g}/\rm{cm}^{2}",
                   )
 
 def _get_convert(fname):
@@ -110,8 +110,8 @@
 
 add_flash_field("dens", function=NullFunc, take_log=True,
                 convert_function=_get_convert("dens"),
-                units=r"\rm{g}/\rm{cm}^3",
-                projected_units = r"\rm{g}/\rm{cm}^2"),
+                units=r"\rm{g}/\rm{cm}^{3}",
+                projected_units = r"\rm{g}/\rm{cm}^{2}"),
 add_flash_field("velx", function=NullFunc, take_log=False,
                 convert_function=_get_convert("velx"),
                 units=r"\rm{cm}/\rm{s}")
@@ -159,10 +159,10 @@
                 units = r"\rm{K}")
 add_flash_field("pres", function=NullFunc, take_log=True,
                 convert_function=_get_convert("pres"),
-                units=r"\rm{erg}\//\/\rm{cm}^{3}")
+                units=r"\rm{erg}/\rm{cm}^{3}")
 add_flash_field("pden", function=NullFunc, take_log=True,
                 convert_function=_get_convert("pden"),
-                units=r"\rm{g}/\rm{cm}^3")
+                units=r"\rm{g}/\rm{cm}^{3}")
 add_flash_field("magx", function=NullFunc, take_log=False,
                 convert_function=_get_convert("magx"),
                 units = r"\mathrm{Gau\ss}")
@@ -174,7 +174,7 @@
                 units = r"\mathrm{Gau\ss}")
 add_flash_field("magp", function=NullFunc, take_log=True,
                 convert_function=_get_convert("magp"),
-                units = r"\rm{erg}\//\/\rm{cm}^{3}")
+                units = r"\rm{erg}/\rm{cm}^{3}")
 add_flash_field("divb", function=NullFunc, take_log=False,
                 convert_function=_get_convert("divb"),
                 units = r"\mathrm{Gau\ss}\/\rm{cm}")
@@ -186,10 +186,10 @@
                 units=r"\rm{ratio\/of\/specific\/heats}")
 add_flash_field("gpot", function=NullFunc, take_log=False,
                 convert_function=_get_convert("gpot"),
-                units=r"\rm{ergs\//\/g}")
+                units=r"\rm{ergs}/\rm{g}")
 add_flash_field("gpol", function=NullFunc, take_log=False,
                 convert_function=_get_convert("gpol"),
-                units = r"\rm{ergs\//\/g}")
+                units = r"\rm{ergs}/\rm{g}")
 add_flash_field("flam", function=NullFunc, take_log=False,
                 convert_function=_get_convert("flam"))
 


diff -r 7f0e191c686919d4932ec0c178f41521f75a90c2 -r 8cee6c380933435f0f09218c0e489c73517dff56 yt/frontends/stream/api.py
--- a/yt/frontends/stream/api.py
+++ b/yt/frontends/stream/api.py
@@ -29,7 +29,9 @@
       StreamHierarchy, \
       StreamStaticOutput, \
       StreamHandler, \
-      load_uniform_grid
+      load_uniform_grid, \
+      load_amr_grids, \
+      refine_amr
 
 from .fields import \
       KnownStreamFields, \


diff -r 7f0e191c686919d4932ec0c178f41521f75a90c2 -r 8cee6c380933435f0f09218c0e489c73517dff56 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -44,6 +44,8 @@
     decompose_array, get_psize
 from yt.utilities.definitions import \
     mpc_conversion, sec_conversion
+from yt.utilities.flagging_methods import \
+    FlaggingGrid
 
 from .fields import \
     StreamFieldInfo, \
@@ -395,3 +397,168 @@
     for unit in mpc_conversion.keys():
         spf.units[unit] = mpc_conversion[unit] * box_in_mpc
     return spf
+
+def load_amr_grids(grid_data, domain_dimensions, sim_unit_to_cm, bbox=None,
+                   sim_time=0.0, number_of_particles=0):
+    r"""Load a set of grids of data into yt as a
+    :class:`~yt.frontends.stream.data_structures.StreamHandler`.
+
+    This should allow a sequence of grids of varying resolution of data to be
+    loaded directly into yt and analyzed as would any others.  This comes with
+    several caveats:
+        * Units will be incorrect unless the data has already been converted to
+          cgs.
+        * Some functions may behave oddly, and parallelism will be
+          disappointing or non-existent in most cases.
+        * Particles may be difficult to integrate.
+        * No consistency checks are performed on the hierarchy
+
+    Parameters
+    ----------
+    grid_data : list of dicts
+        This is a list of dicts.  Each dict must have entries "left_edge",
+        "right_edge", "dimensions", "level", and then any remaining entries are
+        assumed to be fields.  This will be modified in place and can't be
+        assumed to be static..
+    domain_dimensions : array_like
+        This is the domain dimensions of the grid
+    sim_unit_to_cm : float
+        Conversion factor from simulation units to centimeters
+    bbox : array_like (xdim:zdim, LE:RE), optional
+        Size of computational domain in units sim_unit_to_cm
+    sim_time : float, optional
+        The simulation time in seconds
+    number_of_particles : int, optional
+        If particle fields are included, set this to the number of particles
+
+    Examples
+    --------
+
+    >>> grid_data = [
+    ...     dict(left_edge = [0.0, 0.0, 0.0],
+    ...          right_edge = [1.0, 1.0, 1.],
+    ...          level = 0,
+    ...          dimensions = [32, 32, 32]),
+    ...     dict(left_edge = [0.25, 0.25, 0.25],
+    ...          right_edge = [0.75, 0.75, 0.75],
+    ...          level = 1,
+    ...          dimensions = [32, 32, 32])
+    ... ]
+    ... 
+    >>> for g in grid_data:
+    ...     g["Density"] = np.random.random(g["dimensions"]) * 2**g["level"]
+    ...
+    >>> pf = load_amr_grids(grid_data, [32, 32, 32], 1.0)
+    """
+
+    domain_dimensions = np.array(domain_dimensions)
+    ngrids = len(grid_data)
+    if bbox is None:
+        bbox = np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]], 'float64')
+    domain_left_edge = np.array(bbox[:, 0], 'float64')
+    domain_right_edge = np.array(bbox[:, 1], 'float64')
+    grid_levels = np.zeros((ngrids, 1), dtype='int32')
+    grid_left_edges = np.zeros((ngrids, 3), dtype="float32")
+    grid_right_edges = np.zeros((ngrids, 3), dtype="float32")
+    grid_dimensions = np.zeros((ngrids, 3), dtype="int32")
+    sfh = StreamDictFieldHandler()
+    for i, g in enumerate(grid_data):
+        grid_left_edges[i,:] = g.pop("left_edge")
+        grid_right_edges[i,:] = g.pop("right_edge")
+        grid_dimensions[i,:] = g.pop("dimensions")
+        grid_levels[i,:] = g.pop("level")
+        sfh[i] = g
+
+    handler = StreamHandler(
+        grid_left_edges,
+        grid_right_edges,
+        grid_dimensions,
+        grid_levels,
+        None, # parent_ids is none
+        number_of_particles*np.ones(ngrids, dtype='int64').reshape(ngrids,1),
+        np.zeros(ngrids).reshape((ngrids,1)),
+        sfh,
+    )
+
+    handler.name = "AMRGridData"
+    handler.domain_left_edge = domain_left_edge
+    handler.domain_right_edge = domain_right_edge
+    handler.refine_by = 2
+    handler.dimensionality = 3
+    handler.domain_dimensions = domain_dimensions
+    handler.simulation_time = sim_time
+    handler.cosmology_simulation = 0
+
+    spf = StreamStaticOutput(handler)
+    spf.units["cm"] = sim_unit_to_cm
+    spf.units['1'] = 1.0
+    spf.units["unitary"] = 1.0
+    box_in_mpc = sim_unit_to_cm / mpc_conversion['cm']
+    for unit in mpc_conversion.keys():
+        spf.units[unit] = mpc_conversion[unit] * box_in_mpc
+    return spf
+
+def refine_amr(base_pf, refinement_criteria, fluid_operators, max_level,
+               callback = None):
+    r"""Given a base parameter file, repeatedly apply refinement criteria and
+    fluid operators until a maximum level is reached.
+
+    Parameters
+    ----------
+    base_pf : StaticOutput
+        This is any static output.  It can also be a stream static output, for
+        instance as returned by load_uniform_data.
+    refinement_critera : list of :class:`~yt.utilities.flagging_methods.FlaggingMethod`
+        These criteria will be applied in sequence to identify cells that need
+        to be refined.
+    fluid_operators : list of :class:`~yt.utilities.initial_conditions.FluidOperator`
+        These fluid operators will be applied in sequence to all resulting
+        grids.
+    max_level : int
+        The maximum level to which the data will be refined
+    callback : function, optional
+        A function that will be called at the beginning of each refinement
+        cycle, with the current parameter file.
+
+    Examples
+    --------
+    >>> domain_dims = (32, 32, 32)
+    >>> data = np.zeros(domain_dims) + 0.25
+    >>> fo = [ic.CoredSphere(0.05, 0.3, [0.7,0.4,0.75], {"Density": (0.25, 100.0)})]
+    >>> rc = [fm.flagging_method_registry["overdensity"](8.0)]
+    >>> ug = load_uniform_grid({'Density': data}, domain_dims, 1.0)
+    >>> pf = refine_amr(ug, rc, fo, 5)
+    """
+    last_gc = base_pf.h.num_grids
+    cur_gc = -1
+    pf = base_pf    
+    while pf.h.max_level < max_level and last_gc != cur_gc:
+        mylog.info("Refining another level.  Current max level: %s",
+                  pf.h.max_level)
+        last_gc = pf.h.grids.size
+        for m in fluid_operators: m.apply(pf)
+        if callback is not None: callback(pf)
+        grid_data = []
+        for g in pf.h.grids:
+            gd = dict( left_edge = g.LeftEdge,
+                       right_edge = g.RightEdge,
+                       level = g.Level,
+                       dimensions = g.ActiveDimensions )
+            for field in pf.h.field_list:
+                gd[field] = g[field]
+            grid_data.append(gd)
+            if g.Level < pf.h.max_level: continue
+            fg = FlaggingGrid(g, refinement_criteria)
+            nsg = fg.find_subgrids()
+            for sg in nsg:
+                LE = sg.left_index * g.dds
+                dims = sg.dimensions * pf.refine_by
+                grid = pf.h.smoothed_covering_grid(g.Level + 1, LE, dims)
+                gd = dict(left_edge = LE, right_edge = grid.right_edge,
+                          level = g.Level + 1, dimensions = dims)
+                for field in pf.h.field_list:
+                    gd[field] = grid[field]
+                grid_data.append(gd)
+        pf = load_amr_grids(grid_data, pf.domain_dimensions, 1.0)
+        cur_gc = pf.h.num_grids
+    return pf


diff -r 7f0e191c686919d4932ec0c178f41521f75a90c2 -r 8cee6c380933435f0f09218c0e489c73517dff56 yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -23,6 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
+import __builtin__
 import time, types, signal, inspect, traceback, sys, pdb, os
 import contextlib
 import warnings, struct, subprocess
@@ -310,7 +311,7 @@
     maxval = max(maxval, 1)
     from yt.config import ytcfg
     if ytcfg.getboolean("yt", "suppressStreamLogging") or \
-       ytcfg.getboolean("yt", "ipython_notebook") or \
+       "__IPYTHON__" in dir(__builtin__) or \
        ytcfg.getboolean("yt", "__withintesting"):
         return DummyProgressBar()
     elif ytcfg.getboolean("yt", "__withinreason"):


diff -r 7f0e191c686919d4932ec0c178f41521f75a90c2 -r 8cee6c380933435f0f09218c0e489c73517dff56 yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -58,7 +58,7 @@
 from yt.data_objects.api import \
     BinnedProfile1D, BinnedProfile2D, BinnedProfile3D, \
     data_object_registry, \
-    derived_field, add_field, FieldInfo, \
+    derived_field, add_field, add_grad, FieldInfo, \
     ValidateParameter, ValidateDataField, ValidateProperty, \
     ValidateSpatial, ValidateGridType, \
     TimeSeriesData, AnalysisTask, analysis_task, \


diff -r 7f0e191c686919d4932ec0c178f41521f75a90c2 -r 8cee6c380933435f0f09218c0e489c73517dff56 yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -26,9 +26,10 @@
 from yt.funcs import *
 from numpy.testing import assert_array_equal, assert_almost_equal, \
     assert_approx_equal, assert_array_almost_equal, assert_equal, \
-    assert_array_less, assert_string_equal, assert_array_almost_equal_nulp
+    assert_array_less, assert_string_equal, assert_array_almost_equal_nulp,\
+    assert_allclose
 
-def assert_rel_equal(a1, a2, decimals):
+def assert_rel_equal(a1, a2, decimals, err_msg=''):
     # We have nan checks in here because occasionally we have fields that get
     # weighted without non-zero weights.  I'm looking at you, particle fields!
     if isinstance(a1, np.ndarray):
@@ -38,7 +39,7 @@
         a2[np.isnan(a2)] = 1.0
     elif np.isnan(a1) and np.isnan(a2):
         return True
-    return assert_almost_equal(a1/a2, 1.0, decimals)
+    return assert_almost_equal(a1/a2, 1.0, decimals, err_msg=err_msg)
 
 def amrspace(extent, levels=7, cells=8):
     """Creates two numpy arrays representing the left and right bounds of 


diff -r 7f0e191c686919d4932ec0c178f41521f75a90c2 -r 8cee6c380933435f0f09218c0e489c73517dff56 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -34,7 +34,9 @@
 from yt.testing import *
 from yt.config import ytcfg
 from yt.mods import *
+from yt.data_objects.static_output import StaticOutput
 import cPickle
+import shelve
 
 from yt.utilities.logger import disable_stream_logging
 from yt.utilities.command_line import get_yt_version
@@ -60,6 +62,8 @@
             help="The name we'll call this set of tests")
         parser.add_option("--answer-store", dest="store_results",
             default=False, action="store_true")
+        parser.add_option("--local-store", dest="store_local_results",
+            default=False, action="store_true", help="Store/Load local results?")
 
     def configure(self, options, conf):
         super(AnswerTesting, self).configure(options, conf)
@@ -75,40 +79,47 @@
         ytcfg["yt","__withintesting"] = "True"
         AnswerTestingTest.result_storage = \
             self.result_storage = defaultdict(dict)
-        if options.compare_name is not None:
-            # Now we grab from our S3 store
-            if options.compare_name == "latest":
-                options.compare_name = _latest
+        if options.compare_name == "SKIP":
+            options.compare_name = None
+        elif options.compare_name == "latest":
+            options.compare_name = _latest
+
+        # We only either store or test.
+        if options.store_local_results:
+            if options.compare_name is not None:
+                options.compare_name = "%s/%s" % \
+                        (os.path.realpath(options.output_dir), 
+                         options.compare_name)
             AnswerTestingTest.reference_storage = \
-                AnswerTestOpener(options.compare_name)
-        self.answer_name = options.this_name
+                self.storage = \
+                    AnswerTestLocalStorage(options.compare_name, 
+                                           not options.store_results)
+        else:
+            AnswerTestingTest.reference_storage = \
+                self.storage = AnswerTestCloudStorage(options.compare_name, not options.store_results)
+
         self.store_results = options.store_results
+        self.store_local_results = options.store_local_results
         global run_big_data
         run_big_data = options.big_data
 
-    def finalize(self, result):
-        # This is where we dump our result storage up to Amazon, if we are able
-        # to.
+    def finalize(self, result=None):
         if self.store_results is False: return
-        import boto
-        from boto.s3.key import Key
-        c = boto.connect_s3()
-        bucket = c.get_bucket("yt-answer-tests")
-        for pf_name in self.result_storage:
-            rs = cPickle.dumps(self.result_storage[pf_name])
-            tk = bucket.get_key("%s_%s" % (self.answer_name, pf_name)) 
-            if tk is not None: tk.delete()
-            k = Key(bucket)
-            k.key = "%s_%s" % (self.answer_name, pf_name)
-            k.set_contents_from_string(rs)
-            k.set_acl("public-read")
+        self.storage.dump(self.result_storage)        
 
-class AnswerTestOpener(object):
-    def __init__(self, reference_name):
+class AnswerTestStorage(object):
+    def __init__(self, reference_name, read=True):
         self.reference_name = reference_name
         self.cache = {}
+        self.read = read
+    def dump(self, result_storage, result):
+        raise NotImplementedError 
+    def get(self, pf_name, default=None):
+        raise NotImplementedError 
 
+class AnswerTestCloudStorage(AnswerTestStorage):
     def get(self, pf_name, default = None):
+        if not self.read: return default
         if pf_name in self.cache: return self.cache[pf_name]
         url = _url_path % (self.reference_name, pf_name)
         try:
@@ -123,6 +134,47 @@
         self.cache[pf_name] = rv
         return rv
 
+    def dump(self, result_storage):
+        if self.read: return
+        # This is where we dump our result storage up to Amazon, if we are able
+        # to.
+        import boto
+        from boto.s3.key import Key
+        c = boto.connect_s3()
+        bucket = c.get_bucket("yt-answer-tests")
+        for pf_name in result_storage:
+            rs = cPickle.dumps(result_storage[pf_name])
+            tk = bucket.get_key("%s_%s" % (self.reference_name, pf_name)) 
+            if tk is not None: tk.delete()
+            k = Key(bucket)
+            k.key = "%s_%s" % (self.reference_name, pf_name)
+            k.set_contents_from_string(rs)
+            k.set_acl("public-read")
+
+class AnswerTestLocalStorage(AnswerTestStorage):
+    def dump(self, result_storage):
+        if self.read: return 
+        # Store data using shelve
+        ds = shelve.open(self.reference_name, protocol=-1)
+        for pf_name in result_storage:
+            answer_name = "%s" % pf_name
+            if name in ds:
+                mylog.info("Overwriting %s", answer_name)
+            ds[answer_name] = result_storage[pf_name]
+        ds.close()
+
+    def get(self, pf_name, default=None):
+        if not self.read: return default
+        # Read data using shelve
+        answer_name = "%s" % pf_name
+        ds = shelve.open(self.reference_name, protocol=-1)
+        try:
+            result = ds[answer_name]
+        except KeyError:
+            result = default
+        ds.close()
+        return result
+
 @contextlib.contextmanager
 def temp_cwd(cwd):
     oldcwd = os.getcwd()
@@ -132,6 +184,10 @@
 
 def can_run_pf(pf_fn):
     path = ytcfg.get("yt", "test_data_dir")
+    if not os.path.isdir(path):
+        return False
+    if isinstance(pf_fn, StaticOutput):
+        return AnswerTestingTest.result_storage is not None
     with temp_cwd(path):
         try:
             load(pf_fn)
@@ -141,26 +197,48 @@
 
 def data_dir_load(pf_fn):
     path = ytcfg.get("yt", "test_data_dir")
+    if not os.path.isdir(path):
+        return False
+    if isinstance(pf_fn, StaticOutput): return pf_fn
     with temp_cwd(path):
         pf = load(pf_fn)
         pf.h
         return pf
 
+def sim_dir_load(sim_fn, path = None, sim_type = "Enzo",
+                 find_outputs=False):
+    if path is None and not os.path.exists(sim_fn):
+        raise IOError
+    if os.path.exists(sim_fn) or not path:
+        path = "."
+    with temp_cwd(path):
+        return simulation(sim_fn, sim_type,
+                          find_outputs=find_outputs)
+
 class AnswerTestingTest(object):
     reference_storage = None
+    result_storage = None
+    prefix = ""
     def __init__(self, pf_fn):
         self.pf = data_dir_load(pf_fn)
 
     def __call__(self):
         nv = self.run()
-        if self.reference_storage is not None:
-            dd = self.reference_storage.get(str(self.pf))
-            if dd is None: raise YTNoOldAnswer()
+        if self.reference_storage.read and \
+           self.reference_storage.reference_name is not None:
+            dd = self.reference_storage.get(self.storage_name)
+            if dd is None: raise YTNoOldAnswer(self.storage_name)
             ov = dd[self.description]
             self.compare(nv, ov)
         else:
             ov = None
-        self.result_storage[str(self.pf)][self.description] = nv
+        self.result_storage[self.storage_name][self.description] = nv
+
+    @property
+    def storage_name(self):
+        if self.prefix != "":
+            return "%s_%s" % (self.prefix, self.pf)
+        return str(self.pf)
 
     def compare(self, new_result, old_result):
         raise RuntimeError
@@ -211,10 +289,12 @@
     _type_name = "FieldValues"
     _attrs = ("field", )
 
-    def __init__(self, pf_fn, field, obj_type = None):
+    def __init__(self, pf_fn, field, obj_type = None,
+                 decimals = None):
         super(FieldValuesTest, self).__init__(pf_fn)
         self.obj_type = obj_type
         self.field = field
+        self.decimals = decimals
 
     def run(self):
         obj = self.create_obj(self.pf, self.obj_type)
@@ -224,19 +304,50 @@
         return np.array([avg, mi, ma])
 
     def compare(self, new_result, old_result):
-        assert_equal(new_result, old_result)
+        err_msg = "Field values for %s not equal." % self.field
+        if self.decimals is None:
+            assert_equal(new_result, old_result, 
+                         err_msg=err_msg, verbose=True)
+        else:
+            assert_rel_equal(new_result, old_result, self.decimals,
+                             err_msg=err_msg)
 
+class AllFieldValuesTest(AnswerTestingTest):
+    _type_name = "AllFieldValues"
+    _attrs = ("field", )
+
+    def __init__(self, pf_fn, field, obj_type = None,
+                 decimals = None):
+        super(AllFieldValuesTest, self).__init__(pf_fn)
+        self.obj_type = obj_type
+        self.field = field
+        self.decimals = decimals
+
+    def run(self):
+        obj = self.create_obj(self.pf, self.obj_type)
+        return obj[self.field]
+
+    def compare(self, new_result, old_result):
+        err_msg = "All field values for %s not equal." % self.field
+        if self.decimals is None:
+            assert_equal(new_result, old_result, 
+                         err_msg=err_msg, verbose=True)
+        else:
+            assert_rel_equal(new_result, old_result, self.decimals,
+                             err_msg=err_msg)
+            
 class ProjectionValuesTest(AnswerTestingTest):
     _type_name = "ProjectionValues"
     _attrs = ("field", "axis", "weight_field")
 
     def __init__(self, pf_fn, axis, field, weight_field = None,
-                 obj_type = None):
+                 obj_type = None, decimals = None):
         super(ProjectionValuesTest, self).__init__(pf_fn)
         self.axis = axis
         self.field = field
         self.weight_field = field
         self.obj_type = obj_type
+        self.decimals = decimals
 
     def run(self):
         if self.obj_type is not None:
@@ -253,7 +364,14 @@
         for k in new_result:
             assert (k in old_result)
         for k in new_result:
-            assert_equal(new_result[k], old_result[k])
+            err_msg = "%s values of %s (%s weighted) projection (axis %s) not equal." % \
+              (k, self.field, self.weight_field, self.axis)
+            if self.decimals is None:
+                assert_equal(new_result[k], old_result[k],
+                             err_msg=err_msg)
+            else:
+                assert_rel_equal(new_result[k], old_result[k], 
+                                 self.decimals, err_msg=err_msg)
 
 class PixelizedProjectionValuesTest(AnswerTestingTest):
     _type_name = "PixelizedProjectionValues"
@@ -312,6 +430,26 @@
         for k in new_result:
             assert_equal(new_result[k], old_result[k])
 
+class VerifySimulationSameTest(AnswerTestingTest):
+    _type_name = "VerifySimulationSame"
+    _attrs = ()
+
+    def __init__(self, simulation_obj):
+        self.pf = simulation_obj
+
+    def run(self):
+        result = [ds.current_time for ds in self.pf]
+        return result
+
+    def compare(self, new_result, old_result):
+        assert_equal(len(new_result), len(old_result),
+                     err_msg="Number of outputs not equal.",
+                     verbose=True)
+        for i in range(len(new_result)):
+            assert_equal(new_result[i], old_result[i],
+                         err_msg="Output times not equal.",
+                         verbose=True)
+        
 class GridHierarchyTest(AnswerTestingTest):
     _type_name = "GridHierarchy"
     _attrs = ()


diff -r 7f0e191c686919d4932ec0c178f41521f75a90c2 -r 8cee6c380933435f0f09218c0e489c73517dff56 yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -25,6 +25,7 @@
 
 # We don't need to import 'exceptions'
 #import exceptions
+import os.path
 
 class YTException(Exception):
     def __init__(self, pf = None):
@@ -172,3 +173,12 @@
 
     def __str__(self):
         return "Must have A>=B>=C"
+
+class EnzoTestOutputFileNonExistent(YTException):
+    def __init__(self, filename):
+        self.filename = filename
+        self.testname = os.path.basename(os.path.dirname(filename))
+
+    def __str__(self):
+        return "Enzo test output file (OutputLog) not generated for: " + \
+            "'%s'" % (self.testname) + ".\nTest did not complete."


diff -r 7f0e191c686919d4932ec0c178f41521f75a90c2 -r 8cee6c380933435f0f09218c0e489c73517dff56 yt/utilities/flagging_methods.py
--- a/yt/utilities/flagging_methods.py
+++ b/yt/utilities/flagging_methods.py
@@ -24,15 +24,10 @@
 """
 
 import numpy as np # For modern purposes
+from yt.utilities.lib import grow_flagging_field
 
 flagging_method_registry = {}
 
-def flag_cells(grid, methods):
-    flagged = np.zeros(grid.ActiveDimensions, dtype="bool")
-    for method in methods:
-        flagged |= method(grid)
-    return flagged
-
 class FlaggingMethod(object):
     _skip_add = False
     class __metaclass__(type):
@@ -46,6 +41,144 @@
     def __init__(self, over_density):
         self.over_density = over_density
 
-    def __call__(self, pf, grid):
-        rho = grid["Density"] / (pf.refine_by**grid.Level)
+    def __call__(self, grid):
+        rho = grid["Density"] / (grid.pf.refine_by**grid.Level)
         return (rho > self.over_density)
+
+class FlaggingGrid(object):
+    def __init__(self, grid, methods):
+        self.grid = grid
+        flagged = np.zeros(grid.ActiveDimensions, dtype="bool")
+        for method in methods:
+            flagged |= method(self.grid)
+        self.flagged = grow_flagging_field(flagged)
+        self.subgrids = []
+        self.left_index = grid.get_global_startindex()
+        self.dimensions = grid.ActiveDimensions.copy()
+
+    def find_subgrids(self):
+        if not np.any(self.flagged): return []
+        psg = ProtoSubgrid(self.flagged, self.left_index, self.dimensions)
+        sgl = [psg]
+        index = 0
+        while index < len(sgl):
+            psg = sgl[index]
+            psg.shrink()
+            if psg.dimensions.prod() == 0:
+                sgl[index] = None
+                continue
+            while not psg.acceptable:
+                new_psgs = []
+                for i, dim in enumerate(np.argsort(psg.dimensions)[::-1]):
+                    new_psgs = psg.find_by_zero_signature(dim)
+                    if len(new_psgs) > 1:
+                        break
+                if len(new_psgs) <= 1:
+                    new_psgs = psg.find_by_second_derivative()
+                psg = new_psgs[0]
+                sgl[index] = psg 
+                sgl.extend(new_psgs[1:])
+                psg.shrink()
+            index += 1
+        return sgl
+
+
+# Much or most of this is directly translated from Enzo
+class ProtoSubgrid(object):
+
+    def __init__(self, flagged_base, left_index, dimensions, offset = (0,0,0)):
+        self.left_index = left_index.copy()
+        self.dimensions = dimensions.copy()
+        self.flagged = flagged_base[offset[0]:offset[0]+dimensions[0],
+                                    offset[1]:offset[1]+dimensions[1],
+                                    offset[2]:offset[2]+dimensions[2]]
+        self.compute_signatures()
+
+    def compute_signatures(self):
+        self.sigs = []
+        for dim in range(3):
+            d1 = (dim + 1) % 3
+            d2 = (dim == 0)
+            self.sigs.append(self.flagged.sum(axis=d1).sum(axis=d2))
+
+    @property
+    def acceptable(self):
+        return float(self.flagged.sum()) / self.flagged.size > 0.2
+
+    def shrink(self):
+        new_ind = []
+        for dim in range(3):
+            sig = self.sigs[dim]
+            new_start = 0
+            while sig[new_start] == 0:
+                new_start += 1
+            new_end = sig.size 
+            while sig[new_end - 1] == 0:
+                new_end -= 1
+            self.dimensions[dim] = new_end - new_start
+            self.left_index[dim] += new_start
+            new_ind.append((new_start, new_end))
+        self.flagged = self.flagged[new_ind[0][0]:new_ind[0][1],
+                                    new_ind[1][0]:new_ind[1][1],
+                                    new_ind[2][0]:new_ind[2][1]]
+        self.compute_signatures()
+
+    def find_by_zero_signature(self, dim):
+        sig = self.sigs[dim]
+        grid_ends = np.zeros((sig.size, 2))
+        ng = 0
+        i = 0
+        while i < sig.size:
+            if sig[i] != 0:
+                grid_ends[ng, 0] = i
+                while i < sig.size and sig[i] != 0:
+                    i += 1
+                grid_ends[ng, 1] = i - 1
+                ng += 1
+            i += 1
+        new_grids = []
+        for si, ei in grid_ends[:ng,:]:
+            li = self.left_index.copy()
+            dims = self.dimensions.copy()
+            li[dim] += si
+            dims[dim] = ei - si
+            offset = [0,0,0]
+            offset[dim] = si
+            new_grids.append(ProtoSubgrid(self.flagged, li, dims, offset))
+        return new_grids
+
+    def find_by_second_derivative(self):
+        max_strength = 0
+        max_axis = -1
+        max_ind = -1
+        for dim in range(3):
+            sig = self.sigs[dim]
+            sd = sig[:-2] - 2.0*sig[1:-1] + sig[2:]
+            grid_ends = np.zeros((sig.size, 2))
+            ng = 0
+            center = int((self.flagged.shape[dim] - 1) / 2)
+            strength = zero_strength = 0
+            for i in range(1, sig.size-2):
+                # Note that sd is offset by one
+                if sd[i-1] * sd[i] < 0:
+                    strength = np.abs(sd[i-1] - sd[i])
+                    if strength > zero_strength or \
+                       (strength == zero_strength and np.abs(center - i) < np.abs(zero_cross -i )):
+                        zero_strength = strength
+                        zero_cross = i
+            if zero_strength > max_strength:
+                max_axis = dim
+                max_ind = zero_cross
+        dims = self.dimensions.copy()
+        li = self.left_index.copy()
+        dims[max_axis] = zero_cross
+        psg1 = ProtoSubgrid(self.flagged, li, dims)
+        li[max_axis] += zero_cross
+        dims[max_axis] = self.dimensions[max_axis] - zero_cross
+        offset = np.zeros(3)
+        offset[max_axis] = zero_cross
+        psg2 = ProtoSubgrid(self.flagged, li, dims, offset)
+        return [psg1, psg2]
+
+    def __str__(self):
+        return "LI: (%s) DIMS: (%s)" % (self.left_index, self.dimensions)


diff -r 7f0e191c686919d4932ec0c178f41521f75a90c2 -r 8cee6c380933435f0f09218c0e489c73517dff56 yt/utilities/initial_conditions.py
--- /dev/null
+++ b/yt/utilities/initial_conditions.py
@@ -0,0 +1,80 @@
+"""
+Painting zones in a grid
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import numpy as np
+
+class FluidOperator(object):
+    def apply(self, pf):
+        for g in pf.h.grids: self(g)
+
+class TopHatSphere(FluidOperator):
+    def __init__(self, radius, center, fields):
+        self.radius = radius
+        self.center = center
+        self.fields = fields
+        
+    def __call__(self, grid, sub_select = None):
+        r = np.zeros(grid.ActiveDimensions, dtype="float64")
+        for i, ax in enumerate("xyz"):
+            np.add(r, (grid[ax] - self.center[i])**2.0, r)
+        np.sqrt(r, r)
+        ind = (r <= self.radius)
+        if sub_select is not None:
+            ind &= sub_select
+        for field, val in self.fields.iteritems():
+            grid[field][ind] = val
+
+class CoredSphere(FluidOperator):
+    def __init__(self, core_radius, radius, center, fields):
+        self.radius = radius
+        self.center = center
+        self.fields = fields
+        self.core_radius = core_radius
+
+    def __call__(self, grid, sub_select = None):
+        r = np.zeros(grid.ActiveDimensions, dtype="float64")
+        r2 = self.radius**2
+        cr2 = self.core_radius**2
+        for i, ax in enumerate("xyz"):
+            np.add(r, (grid[ax] - self.center[i])**2.0, r)
+        np.maximum(r, cr2, r)
+        ind = (r <= r2)
+        if sub_select is not None:
+            ind &= sub_select
+        for field, (outer_val, inner_val) in self.fields.iteritems():
+            val = ((r[ind] - cr2) / (r2 - cr2))**0.5 * (outer_val - inner_val)
+            grid[field][ind] = val + inner_val
+
+class RandomFluctuation(FluidOperator):
+    def __init__(self, fields):
+        self.fields = fields
+
+    def __call__(self, grid, sub_select = None):
+        if sub_select is None:
+            sub_select = Ellipsis
+        for field, mag in self.fields.iteritems():
+            vals = grid[field][sub_select]
+            rc = 1.0 + (np.random.random(vals.shape) - 0.5) * mag
+            grid[field][sub_select] *= rc


diff -r 7f0e191c686919d4932ec0c178f41521f75a90c2 -r 8cee6c380933435f0f09218c0e489c73517dff56 yt/utilities/lib/misc_utilities.pyx
--- a/yt/utilities/lib/misc_utilities.pyx
+++ b/yt/utilities/lib/misc_utilities.pyx
@@ -291,3 +291,25 @@
     # Return out unique values
     return best_dim, split, less_ids.view("bool"), greater_ids.view("bool")
 
+
+def grow_flagging_field(oofield):
+    cdef np.ndarray[np.uint8_t, ndim=3] ofield = oofield.astype("uint8")
+    cdef np.ndarray[np.uint8_t, ndim=3] nfield
+    nfield = np.zeros_like(ofield)
+    cdef int i, j, k, ni, nj, nk
+    cdef int oi, oj, ok
+    for ni in range(ofield.shape[0]):
+        for nj in range(ofield.shape[1]):
+            for nk in range(ofield.shape[2]):
+                for oi in range(3):
+                    i = ni + (oi - 1)
+                    if i < 0 or i >= ofield.shape[0]: continue
+                    for oj in range(3):
+                        j = nj + (oj - 1)
+                        if j < 0 or j >= ofield.shape[1]: continue
+                        for ok in range(3):
+                            k = nk + (ok - 1)
+                            if k < 0 or k >= ofield.shape[2]: continue
+                            if ofield[i, j, k] == 1:
+                                nfield[ni, nj, nk] = 1
+    return nfield.astype("bool")


diff -r 7f0e191c686919d4932ec0c178f41521f75a90c2 -r 8cee6c380933435f0f09218c0e489c73517dff56 yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -279,12 +279,12 @@
     ranks = None
     available_ranks = None
     tasks = None
-    workgroups = []
     def __init__(self):
         self.comm = communication_system.communicators[-1]
         self.size = self.comm.size
         self.ranks = range(self.size)
         self.available_ranks = range(self.size)
+        self.workgroups = []
     
     def add_workgroup(self, size=None, ranks=None, name=None):
         if size is None:
@@ -296,7 +296,7 @@
             ranks = [self.available_ranks.pop(0) for i in range(size)]
         # Default name to the workgroup number.
         if name is None: 
-            name = string(len(workgroups))
+            name = string(len(self.workgroups))
         group = self.comm.comm.Get_group().Incl(ranks)
         new_comm = self.comm.comm.Create(group)
         if self.comm.rank in ranks:
@@ -304,16 +304,20 @@
         self.workgroups.append(Workgroup(len(ranks), ranks, new_comm, name))
     
     def free_workgroup(self, workgroup):
+        # If you want to actually delete the workgroup you will need to
+        # pop it out of the self.workgroups list so you don't have references
+        # that are left dangling, e.g. see free_all() below.
         for i in workgroup.ranks:
             if self.comm.rank == i:
                 communication_system.communicators.pop()
             self.available_ranks.append(i) 
-        del workgroup
         self.available_ranks.sort()
 
     def free_all(self):
         for wg in self.workgroups:
             self.free_workgroup(wg)
+        for i in range(len(self.workgroups)):
+            self.workgroups.pop(0)
 
     @classmethod
     def from_sizes(cls, sizes):
@@ -581,7 +585,9 @@
                     ncols, size = data.shape
             ncols = self.comm.allreduce(ncols, op=MPI.MAX)
             if ncols == 0:
-                    data = np.zeros(0, dtype=dtype) # This only works for
+                data = np.zeros(0, dtype=dtype) # This only works for
+            elif data is None:
+                data = np.zeros((ncols, 0), dtype=dtype)
             size = data.shape[-1]
             sizes = np.zeros(self.comm.size, dtype='int64')
             outsize = np.array(size, dtype='int64')
@@ -1058,3 +1064,49 @@
                 nextdim = (nextdim + 1) % 3
         return cuts
     
+class GroupOwnership(ParallelAnalysisInterface):
+    def __init__(self, items):
+        ParallelAnalysisInterface.__init__(self)
+        self.num_items = len(items)
+        self.items = items
+        assert(self.num_items >= self.comm.size)
+        self.owned = range(self.comm.size)
+        self.pointer = 0
+        if parallel_capable:
+            communication_system.push_with_ids([self.comm.rank])
+
+    def __del__(self):
+        if parallel_capable:
+            communication_system.pop()
+
+    def inc(self, n = -1):
+        old_item = self.item
+        if n == -1: n = self.comm.size
+        for i in range(n):
+            if self.pointer >= self.num_items - self.comm.size: break
+            self.owned[self.pointer % self.comm.size] += self.comm.size
+            self.pointer += 1
+        if self.item is not old_item:
+            self.switch()
+            
+    def dec(self, n = -1):
+        old_item = self.item
+        if n == -1: n = self.comm.size
+        for i in range(n):
+            if self.pointer == 0: break
+            self.owned[(self.pointer - 1) % self.comm.size] -= self.comm.size
+            self.pointer -= 1
+        if self.item is not old_item:
+            self.switch()
+
+    _last = None
+    @property
+    def item(self):
+        own = self.owned[self.comm.rank]
+        if self._last != own:
+            self._item = self.items[own]
+            self._last = own
+        return self._item
+
+    def switch(self):
+        pass


diff -r 7f0e191c686919d4932ec0c178f41521f75a90c2 -r 8cee6c380933435f0f09218c0e489c73517dff56 yt/utilities/tests/test_flagging_methods.py
--- a/yt/utilities/tests/test_flagging_methods.py
+++ b/yt/utilities/tests/test_flagging_methods.py
@@ -9,4 +9,4 @@
 def test_over_density():
     od_flag = flagging_method_registry["overdensity"](0.75) 
     criterion = (pf.h.grids[0]["Density"] > 0.75)
-    assert( np.all( od_flag(pf, pf.h.grids[0]) == criterion) )
+    assert( np.all( od_flag(pf.h.grids[0]) == criterion) )


diff -r 7f0e191c686919d4932ec0c178f41521f75a90c2 -r 8cee6c380933435f0f09218c0e489c73517dff56 yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -29,6 +29,7 @@
     y_dict, \
     axis_names
 from .volume_rendering.api import off_axis_projection
+from yt.data_objects.image_array import ImageArray
 import _MPL
 import numpy as np
 import weakref
@@ -133,8 +134,9 @@
                              self.bounds, int(self.antialias),
                              self._period, int(self.periodic),
                              ).transpose()
-        self[item] = buff
-        return buff
+        ia = ImageArray(buff, info=self._get_info(item))
+        self[item] = ia
+        return ia 
 
     def __setitem__(self, item, val):
         self.data[item] = val
@@ -145,6 +147,28 @@
             if f not in exclude:
                 self[f]
 
+    def _get_info(self, item):
+        info = {}
+        info['data_source'] = self.data_source.__str__()  
+        info['axis'] = self.data_source.axis
+        info['field'] = str(item)
+        info['units'] = self.data_source.pf.field_info[item].get_units()
+        info['xlim'] = self.bounds[:2]
+        info['ylim'] = self.bounds[2:]
+        info['length_to_cm'] = self.data_source.pf['cm']
+        info['projected_units'] = \
+                self.data_source.pf.field_info[item].get_projected_units()
+        info['center'] = self.data_source.center
+        try:
+            info['coord'] = self.data_source.coord
+        except AttributeError:
+            pass
+        try:
+            info['weight_field'] = self.data_source.weight_field
+        except AttributeError:
+            pass
+        return info
+
     def convert_to_pixel(self, coords):
         r"""This function converts coordinates in code-space to pixel-space.
 
@@ -383,8 +407,9 @@
                                self.data_source[item],
                                self.buff_size[0], self.buff_size[1],
                                self.bounds).transpose()
-        self[item] = buff
-        return buff
+        ia = ImageArray(buff, info=self._get_info(item))
+        self[item] = ia
+        return ia 
 
 
 class OffAxisProjectionFixedResolutionBuffer(FixedResolutionBuffer):
@@ -406,7 +431,8 @@
                                    weight=ds.weight_field, volume=ds.volume,
                                    no_ghost=ds.no_ghost, interpolated=ds.interpolated,
                                    north_vector=ds.north_vector)
-        self[item] = buff.swapaxes(0,1)
-        return buff
+        ia = ImageArray(buff.swapaxes(0,1), info=self._get_info(item))
+        self[item] = ia
+        return ia 
 
 


diff -r 7f0e191c686919d4932ec0c178f41521f75a90c2 -r 8cee6c380933435f0f09218c0e489c73517dff56 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -375,12 +375,10 @@
 class StreamlineCallback(PlotCallback):
     _type_name = "streamlines"
     def __init__(self, field_x, field_y, factor = 16,
-                 density = 1, arrowsize = 1, arrowstyle = None,
-                 color = None, normalize = False):
+                 density = 1, plot_args=None):
         """
-        annotate_streamlines(field_x, field_y, factor = 16, density = 1,
-                             arrowsize = 1, arrowstyle = None,
-                             color = None, normalize = False):
+        annotate_streamlines(field_x, field_y, factor = 16,
+                             density = 1, plot_args=None):
 
         Add streamlines to any plot, using the *field_x* and *field_y*
         from the associated data, skipping every *factor* datapoints like
@@ -392,12 +390,8 @@
         self.bv_x = self.bv_y = 0
         self.factor = factor
         self.dens = density
-        self.arrowsize = arrowsize
-        if arrowstyle is None : arrowstyle='-|>'
-        self.arrowstyle = arrowstyle
-        if color is None : color = "#000000"
-        self.color = color
-        self.normalize = normalize
+        if plot_args is None: plot_args = {}
+        self.plot_args = plot_args
         
     def __call__(self, plot):
         x0, x1 = plot.xlim
@@ -421,15 +415,10 @@
                              plot.data[self.field_y] - self.bv_y,
                              int(nx), int(ny),
                            (x0, x1, y0, y1),).transpose()
-        X,Y = (na.linspace(xx0,xx1,nx,endpoint=True),
-                          na.linspace(yy0,yy1,ny,endpoint=True))
-        if self.normalize:
-            nn = na.sqrt(pixX**2 + pixY**2)
-            pixX /= nn
-            pixY /= nn
-        plot._axes.streamplot(X,Y, pixX, pixY, density=self.dens,
-                              arrowsize=self.arrowsize, arrowstyle=self.arrowstyle,
-                              color=self.color, norm=self.normalize)
+        X,Y = (np.linspace(xx0,xx1,nx,endpoint=True),
+                          np.linspace(yy0,yy1,ny,endpoint=True))
+        plot._axes.streamplot(X,Y, pixX, pixY, density = self.dens,
+                              **self.plot_args)
         plot._axes.set_xlim(xx0,xx1)
         plot._axes.set_ylim(yy0,yy1)
         plot._axes.hold(False)
@@ -644,6 +633,9 @@
         y0, y1 = plot.ylim
         xx0, xx1 = plot._axes.get_xlim()
         yy0, yy1 = plot._axes.get_ylim()
+
+        extent = [xx0,xx1,yy0,yy1]
+
         plot._axes.hold(True)
 
         px_index = x_dict[plot.data.axis]
@@ -673,7 +665,7 @@
                              (x0, x1, y0, y1), 0).transpose()
             buff = np.maximum(temp, buff)
         self.rv = plot._axes.contour(buff, len(self.clumps)+1,
-                                     **self.plot_args)
+                                     extent=extent,**self.plot_args)
         plot._axes.hold(False)
 
 class ArrowCallback(PlotCallback):
@@ -743,13 +735,18 @@
         self.plot_args = plot_args
 
     def __call__(self, plot):
-        if len(self.pos) == 3:
+        xx0, xx1 = plot._axes.get_xlim()
+        yy0, yy1 = plot._axes.get_ylim()
+        if np.array(self.pos).shape == (3,):
             pos = (self.pos[x_dict[plot.data.axis]],
                    self.pos[y_dict[plot.data.axis]])
-        else: pos = self.pos
+        elif np.array(self.pos).shape == (2,):
+            pos = self.pos
         x,y = self.convert_to_plot(plot, pos)
         plot._axes.hold(True)
-        plot._axes.plot((x,),(y,),self.marker, **self.plot_args)
+        plot._axes.scatter(x,y, marker = self.marker, **self.plot_args)
+        plot._axes.set_xlim(xx0,xx1)
+        plot._axes.set_ylim(yy0,yy1)
         plot._axes.hold(False)
 
 class SphereCallback(PlotCallback):
@@ -815,6 +812,7 @@
 
     def __call__(self, plot):
         from matplotlib.patches import Circle
+        num = len(self.hop_output[:self.max_number])
         for halo in self.hop_output[:self.max_number]:
             size = halo.get_size()
             if size < self.min_size or size > self.max_size: continue
@@ -831,18 +829,19 @@
             (xi, yi) = (x_dict[plot.data.axis], y_dict[plot.data.axis])
 
             (center_x,center_y) = self.convert_to_plot(plot,(center[xi], center[yi]))
-            cir = Circle((center_x, center_y), radius, fill=False)
+            color = np.ones(3) * (0.4 * (num - halo.id)/ num) + 0.6
+            cir = Circle((center_x, center_y), radius, fill=False, color=color)
             plot._axes.add_patch(cir)
             if self.annotate:
                 if self.print_halo_size:
-                    plot._axes.text(center_x, center_y, "%s" % size,
-                    fontsize=self.font_size)
+                    plot._axes.text(center_x+radius, center_y+radius, "%s" % size,
+                    fontsize=self.font_size, color=color)
                 elif self.print_halo_mass:
-                    plot._axes.text(center_x, center_y, "%s" % halo.total_mass(),
-                    fontsize=self.font_size)
+                    plot._axes.text(center_x+radius, center_y+radius, "%s" % halo.total_mass(),
+                    fontsize=self.font_size, color=color)
                 else:
-                    plot._axes.text(center_x, center_y, "%s" % halo.id,
-                    fontsize=self.font_size)
+                    plot._axes.text(center_x+radius, center_y+radius, "%s" % halo.id,
+                    fontsize=self.font_size, color=color)
 
 class HopParticleCallback(PlotCallback):
     _type_name = "hop_particles"
@@ -1174,41 +1173,59 @@
           'kev': 1e-12 * 7.6e-8 / 6.03,
           'mev': 1e-15 * 7.6e-8 / 6.03,
           }
+    _bbox_dict = {'boxstyle': 'square,pad=0.6', 'fc': 'white', 'ec': 'black', 'alpha': 1.0}
 
-    def __init__(self, x, y, units=None, format="{time:.3G} {units}", **kwargs):
+    def __init__(self, x, y, units=None, format="{time:.3G} {units}", normalized=False, 
+                 bbox_dict=None, **kwargs):
         """ 
-        annotate_timestamp(x, y, units=None, format="{time:.3G} {units}", **kwargs)
+        annotate_timestamp(x, y, units=None, format="{time:.3G} {units}", **kwargs,
+                           normalized=False, bbox_dict=None)
 
         Adds the current time to the plot at point given by *x* and *y*.  If *units* 
         is given ('s', 'ms', 'ns', etc), it will covert the time to this basis.  If 
         *units* is None, it will attempt to figure out the correct value by which to 
         scale.  The *format* keyword is a template string that will be evaluated and 
-        displayed on the plot.  All other *kwargs* will be passed to the text() 
-        method on the plot axes.  See matplotlib's text() functions for more 
-        information.
+        displayed on the plot.  If *normalized* is true, *x* and *y* are interpreted 
+        as normalized plot coordinates (0,0 is lower-left and 1,1 is upper-right) 
+        otherwise *x* and *y* are assumed to be in plot coordinates. The *bbox_dict* 
+        is an optional dict of arguments for the bbox that frames the timestamp, see 
+        matplotlib's text annotation guide for more details. All other *kwargs* will 
+        be passed to the text() method on the plot axes.  See matplotlib's text() 
+        functions for more information.
         """
         self.x = x
         self.y = y
         self.format = format
         self.units = units
+        self.normalized = normalized
+        if bbox_dict is not None:
+            self.bbox_dict = bbox_dict
+        else:
+            self.bbox_dict = self._bbox_dict
         self.kwargs = {'color': 'w'}
         self.kwargs.update(kwargs)
 
     def __call__(self, plot):
         if self.units is None:
-            t = plot.data.pf.current_time
+            t = plot.data.pf.current_time * plot.data.pf['Time']
             scale_keys = ['as', 'fs', 'ps', 'ns', 'us', 'ms', 's']
             self.units = 's'
             for k in scale_keys:
                 if t < self._time_conv[k]:
                     break
                 self.units = k
-        t = plot.data.pf.current_time / self._time_conv[self.units.lower()]
+        t = plot.data.pf.current_time * plot.data.pf['Time'] 
+        t /= self._time_conv[self.units.lower()]
         if self.units == 'us':
             self.units = '$\\mu s$'
         s = self.format.format(time=t, units=self.units)
         plot._axes.hold(True)
-        plot._axes.text(self.x, self.y, s, **self.kwargs)
+        if self.normalized:
+            plot._axes.text(self.x, self.y, s, horizontalalignment='center',
+                            verticalalignment='center', 
+                            transform = plot._axes.transAxes, bbox=self.bbox_dict)
+        else:
+            plot._axes.text(self.x, self.y, s, bbox=self.bbox_dict, **self.kwargs)
         plot._axes.hold(False)
 
 


diff -r 7f0e191c686919d4932ec0c178f41521f75a90c2 -r 8cee6c380933435f0f09218c0e489c73517dff56 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -27,10 +27,20 @@
 import base64
 import matplotlib.figure
 from matplotlib.mathtext import MathTextParser
+from distutils import version
+import matplotlib
+
+# Some magic for dealing with pyparsing being included or not
+# included in matplotlib (not in gentoo, yes in everything else)
+# Also accounting for the fact that in 1.2.0, pyparsing got renamed.
 try:
-    from matplotlib.pyparsing import ParseFatalException
+    if version.LooseVersion(matplotlib.__version__) < version.LooseVersion("1.2.0"):
+        from matplotlib.pyparsing import ParseFatalException
+    else:
+        from matplotlib.pyparsing_py2 import ParseFatalException
 except ImportError:
     from pyparsing import ParseFatalException
+
 import cStringIO
 import types
 import __builtin__
@@ -60,6 +70,10 @@
     axis_labels
 from yt.utilities.math_utils import \
     ortho_find
+from yt.utilities.parallel_tools.parallel_analysis_interface import \
+    GroupOwnership
+from yt.data_objects.time_series import \
+    TimeSeriesData
 
 def invalidate_data(f):
     @wraps(f)
@@ -209,7 +223,7 @@
     _vector_info = None
     _frb = None
     def __init__(self, data_source, bounds, buff_size=(800,800), antialias=True, 
-                 periodic=True, origin='center-window', oblique=False):
+                 periodic=True, origin='center-window', oblique=False, fontsize=15):
         r"""
         PlotWindow(data_source, bounds, buff_size=(800,800), antialias = True)
         
@@ -239,6 +253,10 @@
             rendering is used during data deposition.
 
         """
+        if not hasattr(self, "pf"):
+            self.pf = data_source.pf
+            ts = self._initialize_dataset(self.pf) 
+            self.ts = ts
         self._initfinished = False
         self.center = None
         self.plots = {}
@@ -249,11 +267,40 @@
         self.antialias = True
         self.set_window(bounds) # this automatically updates the data and plot
         self.origin = origin
+        self.fontsize = fontsize
         if self.data_source.center is not None and oblique == False:
             center = [self.data_source.center[i] for i in range(len(self.data_source.center)) if i != self.data_source.axis]
             self.set_center(center)
         self._initfinished = True
 
+    def _initialize_dataset(self, ts):
+        if not isinstance(ts, TimeSeriesData):
+            if not iterable(ts): ts = [ts]
+            ts = TimeSeriesData(ts)
+        return ts
+
+    def __iter__(self):
+        for pf in self.ts:
+            mylog.warning("Switching to %s", pf)
+            self._switch_pf(pf)
+            yield self
+
+    def piter(self, *args, **kwargs):
+        for pf in self.ts.piter(*args, **kwargs):
+            self._switch_pf(pf)
+            yield self
+
+    def _switch_pf(self, new_pf):
+        ds = self.data_source
+        name = ds._type_name
+        kwargs = dict((n, getattr(ds, n)) for n in ds._con_args)
+        new_ds = getattr(new_pf.h, name)(**kwargs)
+        self.pf = new_pf
+        self.data_source = new_ds
+        self._data_valid = self._plot_valid = False
+        self._recreate_frb()
+        self._setup_plots()
+
     def __getitem__(self, item):
         return self.plots[item]
 
@@ -273,7 +320,6 @@
             self._frb._get_data_source_fields()
         else:
             for key in old_fields: self._frb[key]
-        self.pf = self._frb.pf
         self._data_valid = True
         
     def _setup_plots(self):
@@ -767,8 +813,10 @@
                 labels = [r'$\rm{Image\/x'+axes_unit_label+'}$',
                           r'$\rm{Image\/y'+axes_unit_label+'}$']
 
-            self.plots[f].axes.set_xlabel(labels[0])
-            self.plots[f].axes.set_ylabel(labels[1])
+            self.plots[f].axes.set_xlabel(labels[0],fontsize=self.fontsize)
+            self.plots[f].axes.set_ylabel(labels[1],fontsize=self.fontsize)
+
+            self.plots[f].axes.tick_params(labelsize=self.fontsize)
 
             field_name = self.data_source.pf.field_info[f].display_name
 
@@ -783,17 +831,18 @@
             except ParseFatalException, err:
                 raise YTCannotParseFieldDisplayName(f,field_name,str(err))
 
-            try:
-                parser.parse(r'$'+md['units']+r'$')
-            except ParseFatalException, err:
-                raise YTCannotParseUnitDisplayName(f, md['units'],str(err))
-
             if md['units'] == None or md['units'] == '':
                 label = field_name
             else:
+                try:
+                    parser.parse(r'$'+md['units']+r'$')
+                except ParseFatalException, err:
+                    raise YTCannotParseUnitDisplayName(f, md['units'],str(err))
                 label = field_name+r'$\/\/('+md['units']+r')$'
 
-            self.plots[f].cb.set_label(label)
+            self.plots[f].cb.set_label(label,fontsize=self.fontsize)
+
+            self.plots[f].cb.ax.tick_params(labelsize=self.fontsize)
 
             self.run_callbacks(f)
 
@@ -920,7 +969,7 @@
     _frb_generator = FixedResolutionBuffer
 
     def __init__(self, pf, axis, fields, center='c', width=None, axes_unit=None,
-                 origin='center-window'):
+                 origin='center-window', fontsize=15):
         r"""Creates a slice plot from a parameter file
         
         Given a pf object, an axis to slice along, and a field name
@@ -976,6 +1025,8 @@
              to the bottom-left hand corner of the simulation domain, 'center-domain',
              corresponding the center of the simulation domain, or 'center-window' for 
              the center of the plot window.
+        fontsize : integer
+             The size of the fonts for the axis, colorbar, and tick labels.
              
         Examples
         --------
@@ -987,8 +1038,12 @@
         >>> p.save('sliceplot')
         
         """
+        # tHis will handle time series data and controllers
+        ts = self._initialize_dataset(pf) 
+        self.ts = ts
+        pf = self.pf = ts[0]
         axis = fix_axis(axis)
-        (bounds,center) = GetBoundsAndCenter(axis, center, width, pf)
+        (bounds, center) = GetBoundsAndCenter(axis, center, width, pf)
         slc = pf.h.slice(axis, center[axis], fields=fields)
         PWViewerMPL.__init__(self, slc, bounds, origin=origin)
         self.set_axes_unit(axes_unit)
@@ -998,7 +1053,7 @@
     _frb_generator = FixedResolutionBuffer
 
     def __init__(self, pf, axis, fields, center='c', width=None, axes_unit=None,
-                 weight_field=None, max_level=None, origin='center-window'):
+                 weight_field=None, max_level=None, origin='center-window', fontsize=15):
         r"""Creates a projection plot from a parameter file
         
         Given a pf object, an axis to project along, and a field name
@@ -1058,6 +1113,8 @@
             The name of the weighting field.  Set to None for no weight.
         max_level: int
             The maximum level to project to.
+        fontsize : integer
+             The size of the fonts for the axis, colorbar, and tick labels.
         
         Examples
         --------
@@ -1069,8 +1126,11 @@
         >>> p.save('sliceplot')
         
         """
+        ts = self._initialize_dataset(pf) 
+        self.ts = ts
+        pf = self.pf = ts[0]
         axis = fix_axis(axis)
-        (bounds,center) = GetBoundsAndCenter(axis,center,width,pf)
+        (bounds, center) = GetBoundsAndCenter(axis, center, width, pf)
         proj = pf.h.proj(axis,fields,weight_field=weight_field,max_level=max_level,center=center)
         PWViewerMPL.__init__(self,proj,bounds,origin=origin)
         self.set_axes_unit(axes_unit)
@@ -1080,7 +1140,7 @@
     _frb_generator = ObliqueFixedResolutionBuffer
 
     def __init__(self, pf, normal, fields, center='c', width=(1,'unitary'), 
-                 axes_unit=None, north_vector=None):
+                 axes_unit=None, north_vector=None, fontsize=15):
         r"""Creates an off axis slice plot from a parameter file
 
         Given a pf object, a normal vector defining a slicing plane, and
@@ -1117,7 +1177,8 @@
             A vector defining the 'up' direction in the plot.  This
             option sets the orientation of the slicing plane.  If not
             set, an arbitrary grid-aligned north-vector is chosen.
-
+        fontsize : integer
+             The size of the fonts for the axis, colorbar, and tick labels.
         """
         (bounds,center_rot) = GetOffAxisBoundsAndCenter(normal,center,width,pf)
         cutting = pf.h.cutting(normal,center,fields=fields,north_vector=north_vector)
@@ -1156,7 +1217,7 @@
     def __init__(self, pf, normal, fields, center='c', width=(1,'unitary'), 
                  depth=(1,'unitary'), axes_unit=None, weight_field=None, 
                  max_level=None, north_vector=None, volume=None, no_ghost=False, 
-                 le=None, re=None, interpolated=False):
+                 le=None, re=None, interpolated=False, fontsize=15):
         r"""Creates an off axis projection plot from a parameter file
 
         Given a pf object, a normal vector to project along, and
@@ -1492,4 +1553,3 @@
                                       norm = norm, vmin = self.zmin, 
                                       vmax = self.zmax, cmap = cmap)
         self.image.axes.ticklabel_format(scilimits=(-4,3))
-


diff -r 7f0e191c686919d4932ec0c178f41521f75a90c2 -r 8cee6c380933435f0f09218c0e489c73517dff56 yt/visualization/streamlines.py
--- a/yt/visualization/streamlines.py
+++ b/yt/visualization/streamlines.py
@@ -118,7 +118,9 @@
         if length is None:
             length = np.max(self.pf.domain_right_edge-self.pf.domain_left_edge)
         self.length = length
-        self.steps = int(length/dx)
+        self.steps = int(length/dx)+1
+        # Fix up the dx.
+        self.dx = 1.0*self.length/self.steps
         self.streamlines = np.zeros((self.N,self.steps,3), dtype='float64')
         self.magnitudes = None
         if self.get_magnitude:
@@ -206,5 +208,6 @@
         >>> matplotlib.pylab.semilogy(stream['t'], stream['Density'], '-x')
         
         """
-        return AMRStreamlineBase(self.streamlines[streamline_id], pf=self.pf)
+        return self.pf.h.streamline(self.streamlines[streamline_id],
+                                    length = self.length)

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list