[yt-svn] commit/yt: 2 new changesets

Bitbucket commits-noreply at bitbucket.org
Sun Dec 9 13:04:25 PST 2012


2 new commits in yt:


https://bitbucket.org/yt_analysis/yt/changeset/2a0f5a24ed21/
changeset:   2a0f5a24ed21
branch:      yt
user:        jzuhone
date:        2012-12-09 22:03:08
summary:     Passing the 'center' attribute to the slice just as we do for the projection. Otherwise certain callbacks (particles, etc) are broken.
affected #:  1 file

diff -r fa360d6c81c39378a925cc88cb67ac2b44248ba7 -r 2a0f5a24ed218cf9c11fc5799785a93a6b1ff32e yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -1059,7 +1059,7 @@
         (bounds, center, units) = GetWindowParameters(axis, center, width, pf)
         if axes_unit is None and units != ('1', '1'):
             axes_unit = units
-        slc = pf.h.slice(axis, center[axis], fields=fields)
+        slc = pf.h.slice(axis, center[axis], center=center, fields=fields)
         PWViewerMPL.__init__(self, slc, bounds, origin=origin)
         self.set_axes_unit(axes_unit)
 



https://bitbucket.org/yt_analysis/yt/changeset/7fe902d74ea4/
changeset:   7fe902d74ea4
branch:      yt
user:        jzuhone
date:        2012-12-09 22:04:00
summary:     Merging
affected #:  22 files

diff -r 2a0f5a24ed218cf9c11fc5799785a93a6b1ff32e -r 7fe902d74ea4da03d4c1cf13b791f612490bc767 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -434,8 +434,7 @@
 echo 'c13116c1f0547000cc565e15774687b9e884f8b74fb62a84e578408a868a84961704839065ae4f21b662e87f2aaedf6ea424ea58dfa9d3d73c06281f806d15dd  nose-1.2.1.tar.gz' > nose-1.2.1.tar.gz.sha512
 echo '73de2c99406a38f85273931597525cec4ebef55b93712adca3b0bfea8ca3fc99446e5d6495817e9ad55cf4d48feb7fb49734675c4cc8938db8d4a5225d30eca7  python-hglib-0.2.tar.gz' > python-hglib-0.2.tar.gz.sha512
 echo 'ffc602eb346717286b3d0a6770c60b03b578b3cf70ebd12f9e8b1c8c39cdb12ef219ddaa041d7929351a6b02dbb8caf1821b5452d95aae95034cbf4bc9904a7a  sympy-0.7.2.tar.gz' > sympy-0.7.2.tar.gz.sha512
-echo 'd91fa23d8f86c5f5b3df210731c14c92a2e3d999979ef2c4c67be3dda19a8a8d2363f8cd23611957280c17f61653b0e66f59d3f5515b6d443b22cddeaef86ab6  Rockstar-0.99.tar.gz' > Rockstar-0.99.tar.gz.sha512
-
+echo '172f2bc671145ebb0add2669c117863db35851fb3bdb192006cd710d4d038e0037497eb39a6d01091cb923f71a7e8982a77b6e80bf71d6275d5d83a363c8d7e5  rockstar-0.99.6.tar.gz' > rockstar-0.99.6.tar.gz.sha512
 # Individual processes
 [ -z "$HDF5_DIR" ] && get_ytproject hdf5-1.8.9.tar.gz
 [ $INST_ZLIB -eq 1 ] && get_ytproject zlib-1.2.3.tar.bz2 
@@ -459,7 +458,7 @@
 get_ytproject nose-1.2.1.tar.gz 
 get_ytproject python-hglib-0.2.tar.gz
 get_ytproject sympy-0.7.2.tar.gz
-get_ytproject Rockstar-0.99.tar.gz
+get_ytproject rockstar-0.99.6.tar.gz
 if [ $INST_BZLIB -eq 1 ]
 then
     if [ ! -e bzip2-1.0.5/done ]
@@ -705,14 +704,14 @@
 # Now we build Rockstar and set its environment variable.
 if [ $INST_ROCKSTAR -eq 1 ]
 then
-    if [ ! -e Rockstar-0.99/done ]
+    if [ ! -e Rockstar/done ]
     then
-        [ ! -e Rockstar-0.99 ] && tar xfz Rockstar-0.99.tar.gz
+        [ ! -e Rockstar] && tar xfz rockstar-0.99.6.tar.gz
         echo "Building Rockstar"
-        cd Rockstar-0.99
+        cd Rockstar
         ( make lib 2>&1 ) 1>> ${LOG_FILE} || do_exit
         cp librockstar.so ${DEST_DIR}/lib
-        ROCKSTAR_DIR=${DEST_DIR}/src/Rockstar-0.99
+        ROCKSTAR_DIR=${DEST_DIR}/src/Rockstar
         echo $ROCKSTAR_DIR > ${YT_DIR}/rockstar.cfg
         touch done
         cd ..


diff -r 2a0f5a24ed218cf9c11fc5799785a93a6b1ff32e -r 7fe902d74ea4da03d4c1cf13b791f612490bc767 setup.py
--- a/setup.py
+++ b/setup.py
@@ -7,6 +7,7 @@
 import distribute_setup
 distribute_setup.use_setuptools()
 
+from distutils.command.build_py import build_py
 from numpy.distutils.misc_util import appendpath
 from numpy.distutils import log
 from distutils import version
@@ -110,6 +111,42 @@
 
 if os.path.exists('MANIFEST'): os.remove('MANIFEST')
 
+def get_mercurial_changeset_id(target_dir):
+    """adapted from a script by Jason F. Harris, published at
+
+    http://jasonfharris.com/blog/2010/05/versioning-your-application-with-the-mercurial-changeset-hash/
+
+    """
+    import subprocess
+    import re
+    get_changeset = subprocess.Popen('hg identify -b -i',
+                                     stdout=subprocess.PIPE,
+                                     stderr=subprocess.PIPE,
+                                     shell=True)
+        
+    if (get_changeset.stderr.read() != ""):
+        print "Error in obtaining current changeset of the Mercurial repository"
+        changeset = None
+        
+    changeset = get_changeset.stdout.read().strip()
+    if (not re.search("^[0-9a-f]{12}", changeset)):
+        print "Current changeset of the Mercurial repository is malformed"
+        changeset = None
+
+    return changeset
+
+class my_build_py(build_py):
+    def run(self):
+        # honor the --dry-run flag
+        if not self.dry_run:
+            target_dir = os.path.join(self.build_lib,'yt')
+            src_dir =  os.getcwd() 
+            changeset = get_mercurial_changeset_id(src_dir)
+            self.mkpath(target_dir)
+            with open(os.path.join(target_dir, '__hg_version__.py'), 'w') as fobj:
+                fobj.write("hg_version = '%s'\n" % changeset)
+
+            build_py.run(self)
 
 def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
@@ -166,6 +203,7 @@
         configuration=configuration,
         zip_safe=False,
         data_files=REASON_FILES,
+        cmdclass = {'build_py': my_build_py},
         )
     return
 


diff -r 2a0f5a24ed218cf9c11fc5799785a93a6b1ff32e -r 7fe902d74ea4da03d4c1cf13b791f612490bc767 yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -75,11 +75,6 @@
     dont_wrap = ["get_sphere", "write_particle_list"]
     extra_wrap = ["__getitem__"]
 
-    # Used for Loaded and RockstarHalos
-    _saved_fields = {}
-    _ds_sort = None
-    _particle_mask = None
-
     def __init__(self, halo_list, id, indices=None, size=None, CoM=None,
         max_dens_point=None, group_total_mass=None, max_radius=None,
         bulk_vel=None, tasks=None, rms_vel=None, supp=None):
@@ -111,6 +106,9 @@
             self.supp = {}
         else:
             self.supp = supp
+        self._saved_fields = {}
+        self._ds_sort = None
+        self._particle_mask = None
 
     @property
     def particle_mask(self):
@@ -1323,18 +1321,24 @@
     _halo_dt = np.dtype([('id', np.int64), ('pos', (np.float32, 6)),
         ('corevel', (np.float32, 3)), ('bulkvel', (np.float32, 3)),
         ('m', np.float32), ('r', np.float32), ('child_r', np.float32),
+        ('vmax_r', np.float32), 
         ('mgrav', np.float32), ('vmax', np.float32),
         ('rvmax', np.float32), ('rs', np.float32),
+        ('klypin_rs', np.float32), 
         ('vrms', np.float32), ('J', (np.float32, 3)),
         ('energy', np.float32), ('spin', np.float32),
-        ('padding1', np.float32), ('num_p', np.int64),
+        ('alt_m', (np.float32, 4)), ('Xoff', np.float32),
+        ('Voff', np.float32), ('b_to_a', np.float32),
+        ('c_to_a', np.float32), ('A', (np.float32, 3)),
+        ('bullock_spin', np.float32), ('kin_to_pot', np.float32),
+        ('num_p', np.int64),
         ('num_child_particles', np.int64), ('p_start', np.int64),
         ('desc', np.int64), ('flags', np.int64), ('n_core', np.int64),
         ('min_pos_err', np.float32), ('min_vel_err', np.float32),
         ('min_bulkvel_err', np.float32), ('padding2', np.float32),])
-    # Above, padding1&2 are due to c byte ordering which pads between
+    # Above, padding* are due to c byte ordering which pads between
     # 4 and 8 byte values in the struct as to not overlap memory registers.
-    _tocleanup = ['padding1', 'padding2']
+    _tocleanup = ['padding2']
 
     def __init__(self, pf, out_list):
         ParallelAnalysisInterface.__init__(self)


diff -r 2a0f5a24ed218cf9c11fc5799785a93a6b1ff32e -r 7fe902d74ea4da03d4c1cf13b791f612490bc767 yt/analysis_modules/halo_finding/rockstar/rockstar.py
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar.py
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar.py
@@ -40,61 +40,14 @@
 from os import mkdir
 from os import path
 
-# Get some definitions from Rockstar directly.
-if "ROCKSTAR_DIR" in os.environ:
-    ROCKSTAR_DIR = os.environ["ROCKSTAR_DIR"]
-elif os.path.exists("rockstar.cfg"):
-    ROCKSTAR_DIR = open("rockstar.cfg").read().strip()
-else:
-    print "Reading Rockstar location from rockstar.cfg failed."
-    print "Please place the base directory of your"
-    print "Rockstar install in rockstar.cfg and restart."
-    print "(ex: \"echo '/path/to/Rockstar-0.99' > rockstar.cfg\" )"
-    sys.exit(1)
-lines = file(path.join(ROCKSTAR_DIR, 'server.h'))
-READER_TYPE = None
-WRITER_TYPE = None
-for line in lines:
-    if "READER_TYPE" in line:
-        line = line.split()
-        READER_TYPE = int(line[-1])
-    if "WRITER_TYPE" in line:
-        line = line.split()
-        WRITER_TYPE = int(line[-1])
-    if READER_TYPE != None and WRITER_TYPE != None:
-        break
-lines.close()
-
 class InlineRunner(ParallelAnalysisInterface):
-    def __init__(self, num_writers):
+    def __init__(self):
         # If this is being run inline, num_readers == comm.size, always.
-        self.num_readers = ytcfg.getint("yt", "__global_parallel_size")
-        if num_writers is None:
-            self.num_writers =  ytcfg.getint("yt", "__global_parallel_size")
-        else:
-            self.num_writers = min(num_writers,
-                ytcfg.getint("yt", "__global_parallel_size"))
-
-    def split_work(self, pool):
-        avail = range(pool.comm.size)
-        self.writers = []
-        self.readers = []
-        # If we're inline, everyone is a reader.
-        self.readers = avail[:]
-        if self.num_writers == pool.comm.size:
-            # And everyone is a writer!
-            self.writers = avail[:]
-        else:
-            # Everyone is not a writer.
-            # Cyclically assign writers which should approximate
-            # memory load balancing (depending on the mpirun call,
-            # but this should do it in most cases).
-            stride = int(ceil(float(pool.comm.size) / self.num_writers))
-            while len(self.writers) < self.num_writers:
-                self.writers.extend(avail[::stride])
-                for r in readers:
-                    avail.pop(avail.index(r))
-
+        psize = ytcfg.getint("yt", "__global_parallel_size")
+        self.num_readers = psize
+        # No choice for you, everyone's a writer too!
+        self.num_writers =  psize
+    
     def run(self, handler, pool):
         # If inline, we use forks.
         server_pid = 0
@@ -104,67 +57,66 @@
             if server_pid == 0:
                 handler.start_server()
                 os._exit(0)
-        # Start writers.
+        # Start writers on all.
         writer_pid = 0
-        if pool.comm.rank in self.writers:
-            time.sleep(0.1 + pool.comm.rank/10.0)
-            writer_pid = os.fork()
-            if writer_pid == 0:
-                handler.start_client(WRITER_TYPE)
-                os._exit(0)
-        # Start readers, not forked.
-        if pool.comm.rank in self.readers:
-            time.sleep(0.1 + pool.comm.rank/10.0)
-            handler.start_client(READER_TYPE)
+        time.sleep(0.05 + pool.comm.rank/10.0)
+        writer_pid = os.fork()
+        if writer_pid == 0:
+            handler.start_writer()
+            os._exit(0)
+        # Everyone's a reader!
+        time.sleep(0.05 + pool.comm.rank/10.0)
+        handler.start_reader()
         # Make sure the forks are done, which they should be.
         if writer_pid != 0:
             os.waitpid(writer_pid, 0)
         if server_pid != 0:
             os.waitpid(server_pid, 0)
 
+    def setup_pool(self):
+        pool = ProcessorPool()
+        # Everyone is a reader, and when we're inline, that's all that matters.
+        readers = np.arange(ytcfg.getint("yt", "__global_parallel_size"))
+        pool.add_workgroup(ranks=readers, name="readers")
+        return pool, pool.workgroups[0]
+
 class StandardRunner(ParallelAnalysisInterface):
     def __init__(self, num_readers, num_writers):
         self.num_readers = num_readers
+        psize = ytcfg.getint("yt", "__global_parallel_size")
         if num_writers is None:
-            self.num_writers = ytcfg.getint("yt", "__global_parallel_size") \
-                - num_readers - 1
+            self.num_writers =  psize - num_readers - 1
         else:
-            self.num_writers = min(num_writers,
-                ytcfg.getint("yt", "__global_parallel_size"))
-        if self.num_readers + self.num_writers + 1 != ytcfg.getint("yt", \
-                "__global_parallel_size"):
+            self.num_writers = min(num_writers, psize)
+        if self.num_readers + self.num_writers + 1 != psize:
             mylog.error('%i reader + %i writers != %i mpi',
-                    self.num_readers, self.num_writers,
-                    ytcfg.getint("yt", "__global_parallel_size"))
+                    self.num_readers, self.num_writers, psize)
             raise RuntimeError
     
-    def split_work(self, pool):
-        # Who is going to do what.
-        avail = range(pool.comm.size)
-        self.writers = []
-        self.readers = []
-        # If we're not running inline, rank 0 should be removed immediately.
-        avail.pop(0)
-        # Now we assign the rest.
-        for i in range(self.num_readers):
-            self.readers.append(avail.pop(0))
-        for i in range(self.num_writers):
-            self.writers.append(avail.pop(0))
+    def run(self, handler, wg):
+        # Not inline so we just launch them directly from our MPI threads.
+        if wg.name == "server":
+            handler.start_server()
+        if wg.name == "readers":
+            time.sleep(0.05)
+            handler.start_reader()
+        if wg.name == "writers":
+            time.sleep(0.1)
+            handler.start_writer()
     
-    def run(self, handler, pool):
-        # Not inline so we just launch them directly from our MPI threads.
-        if pool.comm.rank == 0:
-            handler.start_server()
-        if pool.comm.rank in self.readers:
-            time.sleep(0.1 + pool.comm.rank/10.0)
-            handler.start_client(READER_TYPE)
-        if pool.comm.rank in self.writers:
-            time.sleep(0.2 + pool.comm.rank/10.0)
-            handler.start_client(WRITER_TYPE)
+    def setup_pool(self):
+        pool = ProcessorPool()
+        pool, workgroup = ProcessorPool.from_sizes(
+           [ (1, "server"),
+             (self.num_readers, "readers"),
+             (self.num_writers, "writers") ]
+        )
+        return pool, workgroup
 
 class RockstarHaloFinder(ParallelAnalysisInterface):
-    def __init__(self, ts, num_readers = 1, num_writers = None, 
-            outbase=None,particle_mass=-1.0,dm_type=1,force_res=None):
+    def __init__(self, ts, num_readers = 1, num_writers = None,
+            outbase="rockstar_halos", dm_type=1, 
+            force_res=None, total_particles=None, dm_only=False):
         r"""Spawns the Rockstar Halo finder, distributes dark matter
         particles and finds halos.
 
@@ -193,8 +145,6 @@
         outbase: str
             This is where the out*list files that Rockstar makes should be
             placed. Default is 'rockstar_halos'.
-        particle_mass: float
-            This sets the DM particle mass used in Rockstar.
         dm_type: 1
             In order to exclude stars and other particle types, define
             the dm_type. Default is 1, as Enzo has the DM particle type=1.
@@ -206,6 +156,17 @@
             last data snapshot (i.e. the one where time has evolved the
             longest) in the time series:
             ``pf_last.h.get_smallest_dx() * pf_last['mpch']``.
+        total_particles : int
+            If supplied, this is a pre-calculated total number of dark matter
+            particles present in the simulation. For example, this is useful
+            when analyzing a series of snapshots where the number of dark
+            matter particles should not change and this will save some disk
+            access time. If left unspecified, it will
+            be calculated automatically. Default: ``None``.
+        dm_only : boolean
+            If set to ``True``, it will be assumed that there are only dark
+            matter particles present in the simulation. This can save analysis
+            time if this is indeed the case. Default: ``False``.
             
         Returns
         -------
@@ -222,16 +183,15 @@
         from yt.mods import *
         import sys
 
-        files = glob.glob('/u/cmoody3/data/a*')
-        files.sort()
-        ts = TimeSeriesData.from_filenames(files)
+        ts = TimeSeriesData.from_filenames('/u/cmoody3/data/a*')
         pm = 7.81769027e+11
-        rh = RockstarHaloFinder(ts, particle_mass=pm)
+        rh = RockstarHaloFinder(ts)
         rh.run()
         """
+        ParallelAnalysisInterface.__init__(self)
         # Decide how we're working.
         if ytcfg.getboolean("yt", "inline") == True:
-            self.runner = InlineRunner(num_writers)
+            self.runner = InlineRunner()
         else:
             self.runner = StandardRunner(num_readers, num_writers)
         self.num_readers = self.runner.num_readers
@@ -241,52 +201,75 @@
         # Note that Rockstar does not support subvolumes.
         # We assume that all of the snapshots in the time series
         # use the same domain info as the first snapshots.
-        if not isinstance(ts,TimeSeriesData):
+        if not isinstance(ts, TimeSeriesData):
             ts = TimeSeriesData([ts])
         self.ts = ts
         self.dm_type = dm_type
-        tpf = ts.__iter__().next()
+        self.outbase = outbase
+        if force_res is None:
+            tpf = ts[-1] # Cache a reference
+            self.force_res = tpf.h.get_smallest_dx() * tpf['mpch']
+            # We have to delete now to wipe the hierarchy
+            del tpf
+        else:
+            self.force_res = force_res
+        self.total_particles = total_particles
+        self.dm_only = dm_only
+        # Setup pool and workgroups.
+        self.pool, self.workgroup = self.runner.setup_pool()
+        p = self._setup_parameters(ts)
+        params = self.comm.mpi_bcast(p, root = self.pool['readers'].ranks[0])
+        self.__dict__.update(params)
+        self.handler = rockstar_interface.RockstarInterface(self.ts)
+
+    def _setup_parameters(self, ts):
+        if self.workgroup.name != "readers": return None
+        tpf = ts[0]
         def _particle_count(field, data):
+            if self.dm_only:
+                return np.prod(data["particle_position_x"].shape)
             try:
-                return (data["particle_type"]==dm_type).sum()
+                return (data["particle_type"]==self.dm_type).sum()
             except KeyError:
                 return np.prod(data["particle_position_x"].shape)
-        add_field("particle_count",function=_particle_count, not_in_all=True,
-            particle_type=True)
-        # Get total_particles in parallel.
+        add_field("particle_count", function=_particle_count,
+                  not_in_all=True, particle_type=True)
         dd = tpf.h.all_data()
-        self.total_particles = int(dd.quantities['TotalQuantity']('particle_count')[0])
-        self.hierarchy = tpf.h
-        self.particle_mass = particle_mass 
-        self.center = (tpf.domain_right_edge + tpf.domain_left_edge)/2.0
-        if outbase is None:
-            outbase = 'rockstar_halos'
-        self.outbase = outbase
-        self.particle_mass = particle_mass
-        if force_res is None:
-            self.force_res = ts[-1].h.get_smallest_dx() * ts[-1]['mpch']
-        else:
-            self.force_res = force_res
-        self.left_edge = tpf.domain_left_edge
-        self.right_edge = tpf.domain_right_edge
-        self.center = (tpf.domain_right_edge + tpf.domain_left_edge)/2.0
-        # We set up the workgroups *before* initializing
-        # ParallelAnalysisInterface. Everyone is their own workgroup!
-        self.pool = ProcessorPool()
-        for i in range(ytcfg.getint("yt", "__global_parallel_size")):
-             self.pool.add_workgroup(size=1)
-        ParallelAnalysisInterface.__init__(self)
-        for wg in self.pool.workgroups:
-            if self.pool.comm.rank in wg.ranks:
-                self.workgroup = wg
-        self.handler = rockstar_interface.RockstarInterface(
-                self.ts, dd)
+        # Get DM particle mass.
+        all_fields = set(tpf.h.derived_field_list + tpf.h.field_list)
+        for g in tpf.h._get_objs("grids"):
+            if g.NumberOfParticles == 0: continue
+            if self.dm_only:
+                iddm = Ellipsis
+            elif "particle_type" in all_fields:
+                iddm = g["particle_type"] == self.dm_type
+            else:
+                iddm = Ellipsis
+            particle_mass = g['ParticleMassMsun'][iddm][0] / tpf.hubble_constant
+            break
+        p = {}
+        if self.total_particles is None:
+            # Get total_particles in parallel.
+            p['total_particles'] = int(dd.quantities['TotalQuantity']('particle_count')[0])
+        p['left_edge'] = tpf.domain_left_edge
+        p['right_edge'] = tpf.domain_right_edge
+        p['center'] = (tpf.domain_right_edge + tpf.domain_left_edge)/2.0
+        p['particle_mass'] = particle_mass
+        return p
+
 
     def __del__(self):
-        self.pool.free_all()
+        try:
+            self.pool.free_all()
+        except AttributeError:
+            # This really only acts to cut down on the misleading
+            # error messages when/if this class is called incorrectly
+            # or some other error happens and self.pool hasn't been created
+            # already.
+            pass
 
     def _get_hosts(self):
-        if self.pool.comm.size == 1 or self.pool.comm.rank == 0:
+        if self.comm.rank == 0 or self.comm.size == 1:
             server_address = socket.gethostname()
             sock = socket.socket()
             sock.bind(('', 0))
@@ -294,7 +277,7 @@
             del sock
         else:
             server_address, port = None, None
-        self.server_address, self.port = self.pool.comm.mpi_bcast(
+        self.server_address, self.port = self.comm.mpi_bcast(
             (server_address, port))
         self.port = str(self.port)
 
@@ -308,19 +291,20 @@
         self.handler.setup_rockstar(self.server_address, self.port,
                     len(self.ts), self.total_particles, 
                     self.dm_type,
-                    parallel = self.pool.comm.size > 1,
+                    parallel = self.comm.size > 1,
                     num_readers = self.num_readers,
                     num_writers = self.num_writers,
                     writing_port = -1,
                     block_ratio = block_ratio,
                     outbase = self.outbase,
-                    force_res=self.force_res,
+                    force_res = self.force_res,
                     particle_mass = float(self.particle_mass),
+                    dm_only = int(self.dm_only),
                     **kwargs)
         # Make the directory to store the halo lists in.
-        if self.pool.comm.rank == 0:
+        if self.comm.rank == 0:
             if not os.path.exists(self.outbase):
-                os.mkdir(self.outbase)
+                os.makedirs(self.outbase)
             # Make a record of which dataset corresponds to which set of
             # output files because it will be easy to lose this connection.
             fp = open(self.outbase + '/pfs.txt', 'w')
@@ -331,15 +315,13 @@
                 fp.write(line)
             fp.close()
         # This barrier makes sure the directory exists before it might be used.
-        self.pool.comm.barrier()
-        if self.pool.comm.size == 1:
+        self.comm.barrier()
+        if self.comm.size == 1:
             self.handler.call_rockstar()
         else:
-            # Split up the work.
-            self.runner.split_work(self.pool)
             # And run it!
-            self.runner.run(self.handler, self.pool)
-        self.pool.comm.barrier()
+            self.runner.run(self.handler, self.workgroup)
+        self.comm.barrier()
         self.pool.free_all()
     
     def halo_list(self,file_name='out_0.list'):


diff -r 2a0f5a24ed218cf9c11fc5799785a93a6b1ff32e -r 7fe902d74ea4da03d4c1cf13b791f612490bc767 yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
@@ -48,13 +48,15 @@
 
 cdef import from "server.h" nogil:
     int server()
+    np.int64_t READER_TYPE
+    np.int64_t WRITER_TYPE
 
 cdef import from "client.h" nogil:
     void client(np.int64_t in_type)
 
 cdef import from "meta_io.h":
     void read_particles(char *filename)
-    void output_and_free_halos(np.int64_t id_offset, np.int64_t snap, 
+    void output_halos(np.int64_t id_offset, np.int64_t snap, 
 			   np.int64_t chunk, float *bounds)
 
 cdef import from "config_vars.h":
@@ -144,101 +146,9 @@
     np.float64_t AVG_PARTICLE_SPACING
     np.int64_t SINGLE_SNAP
 
-def print_rockstar_settings():
-    # We have to do the config
-    print "FILE_FORMAT =", FILE_FORMAT
-    print "PARTICLE_MASS =", PARTICLE_MASS
+# Forward declare
+cdef class RockstarInterface
 
-    print "MASS_DEFINITION =", MASS_DEFINITION
-    print "MIN_HALO_OUTPUT_SIZE =", MIN_HALO_OUTPUT_SIZE
-    print "FORCE_RES =", FORCE_RES
-
-    print "SCALE_NOW =", SCALE_NOW
-    print "h0 =", h0
-    print "Ol =", Ol
-    print "Om =", Om
-
-    print "GADGET_ID_BYTES =", GADGET_ID_BYTES
-    print "GADGET_MASS_CONVERSION =", GADGET_MASS_CONVERSION
-    print "GADGET_LENGTH_CONVERSION =", GADGET_LENGTH_CONVERSION
-    print "GADGET_SKIP_NON_HALO_PARTICLES =", GADGET_SKIP_NON_HALO_PARTICLES
-    print "RESCALE_PARTICLE_MASS =", RESCALE_PARTICLE_MASS
-
-    print "PARALLEL_IO =", PARALLEL_IO
-    print "PARALLEL_IO_SERVER_ADDRESS =", PARALLEL_IO_SERVER_ADDRESS
-    print "PARALLEL_IO_SERVER_PORT =", PARALLEL_IO_SERVER_PORT
-    print "PARALLEL_IO_WRITER_PORT =", PARALLEL_IO_WRITER_PORT
-    print "PARALLEL_IO_SERVER_INTERFACE =", PARALLEL_IO_SERVER_INTERFACE
-    print "RUN_ON_SUCCESS =", RUN_ON_SUCCESS
-
-    print "INBASE =", INBASE
-    print "FILENAME =", FILENAME
-    print "STARTING_SNAP =", STARTING_SNAP
-    print "NUM_SNAPS =", NUM_SNAPS
-    print "NUM_BLOCKS =", NUM_BLOCKS
-    print "NUM_READERS =", NUM_READERS
-    print "PRELOAD_PARTICLES =", PRELOAD_PARTICLES
-    print "SNAPSHOT_NAMES =", SNAPSHOT_NAMES
-    print "LIGHTCONE_ALT_SNAPS =", LIGHTCONE_ALT_SNAPS
-    print "BLOCK_NAMES =", BLOCK_NAMES
-
-    print "OUTBASE =", OUTBASE
-    print "OVERLAP_LENGTH =", OVERLAP_LENGTH
-    print "NUM_WRITERS =", NUM_WRITERS
-    print "FORK_READERS_FROM_WRITERS =", FORK_READERS_FROM_WRITERS
-    print "FORK_PROCESSORS_PER_MACHINE =", FORK_PROCESSORS_PER_MACHINE
-
-    print "OUTPUT_FORMAT =", OUTPUT_FORMAT
-    print "DELETE_BINARY_OUTPUT_AFTER_FINISHED =", DELETE_BINARY_OUTPUT_AFTER_FINISHED
-    print "FULL_PARTICLE_CHUNKS =", FULL_PARTICLE_CHUNKS
-    print "BGC2_SNAPNAMES =", BGC2_SNAPNAMES
-
-    print "BOUND_PROPS =", BOUND_PROPS
-    print "BOUND_OUT_TO_HALO_EDGE =", BOUND_OUT_TO_HALO_EDGE
-    print "DO_MERGER_TREE_ONLY =", DO_MERGER_TREE_ONLY
-    print "IGNORE_PARTICLE_IDS =", IGNORE_PARTICLE_IDS
-    print "TRIM_OVERLAP =", TRIM_OVERLAP
-    print "ROUND_AFTER_TRIM =", ROUND_AFTER_TRIM
-    print "LIGHTCONE =", LIGHTCONE
-    print "PERIODIC =", PERIODIC
-
-    print "LIGHTCONE_ORIGIN =", LIGHTCONE_ORIGIN[0]
-    print "LIGHTCONE_ORIGIN[1] =", LIGHTCONE_ORIGIN[1]
-    print "LIGHTCONE_ORIGIN[2] =", LIGHTCONE_ORIGIN[2]
-    print "LIGHTCONE_ALT_ORIGIN =", LIGHTCONE_ALT_ORIGIN[0]
-    print "LIGHTCONE_ALT_ORIGIN[1] =", LIGHTCONE_ALT_ORIGIN[1]
-    print "LIGHTCONE_ALT_ORIGIN[2] =", LIGHTCONE_ALT_ORIGIN[2]
-
-    print "LIMIT_CENTER =", LIMIT_CENTER[0]
-    print "LIMIT_CENTER[1] =", LIMIT_CENTER[1]
-    print "LIMIT_CENTER[2] =", LIMIT_CENTER[2]
-    print "LIMIT_RADIUS =", LIMIT_RADIUS
-
-    print "SWAP_ENDIANNESS =", SWAP_ENDIANNESS
-    print "GADGET_VARIANT =", GADGET_VARIANT
-
-    print "FOF_FRACTION =", FOF_FRACTION
-    print "FOF_LINKING_LENGTH =", FOF_LINKING_LENGTH
-    print "INCLUDE_HOST_POTENTIAL_RATIO =", INCLUDE_HOST_POTENTIAL_RATIO
-    print "DOUBLE_COUNT_SUBHALO_MASS_RATIO =", DOUBLE_COUNT_SUBHALO_MASS_RATIO
-    print "TEMPORAL_HALO_FINDING =", TEMPORAL_HALO_FINDING
-    print "MIN_HALO_PARTICLES =", MIN_HALO_PARTICLES
-    print "UNBOUND_THRESHOLD =", UNBOUND_THRESHOLD
-    print "ALT_NFW_METRIC =", ALT_NFW_METRIC
-
-    print "TOTAL_PARTICLES =", TOTAL_PARTICLES
-    print "BOX_SIZE =", BOX_SIZE
-    print "OUTPUT_HMAD =", OUTPUT_HMAD
-    print "OUTPUT_PARTICLES =", OUTPUT_PARTICLES
-    print "OUTPUT_LEVELS =", OUTPUT_LEVELS
-    print "DUMP_PARTICLES =", DUMP_PARTICLES[0]
-    print "DUMP_PARTICLES[1] =", DUMP_PARTICLES[1]
-    print "DUMP_PARTICLES[2] =", DUMP_PARTICLES[2]
-
-    print "AVG_PARTICLE_SPACING =", AVG_PARTICLE_SPACING
-    print "SINGLE_SNAP =", SINGLE_SNAP
-
-cdef class RockstarInterface
 cdef void rh_read_particles(char *filename, particle **p, np.int64_t *num_p) with gil:
     global SCALE_NOW
     cdef np.float64_t conv[6], left_edge[6]
@@ -250,31 +160,19 @@
     block = int(str(filename).rsplit(".")[-1])
     n = rh.block_ratio
 
-    all_grids = pf.h.grids
     SCALE_NOW = 1.0/(pf.current_redshift+1.0)
     # Now we want to grab data from only a subset of the grids for each reader.
-    if NUM_BLOCKS == 1:
-        grids = all_grids
-    else:
-        if ytcfg.getboolean("yt", "inline") == False:
-            fnames = np.array([g.filename for g in all_grids])
-            sort = fnames.argsort()
-            grids = np.array_split(all_grids[sort], NUM_BLOCKS)[block]
-        else:
-            # We must be inline, grap only the local grids.
-            grids  = [g for g in all_grids if g.proc_num ==
-                          ytcfg.getint('yt','__topcomm_parallel_rank')]
-    
     all_fields = set(pf.h.derived_field_list + pf.h.field_list)
 
     # First we need to find out how many this reader is going to read in
     # if the number of readers > 1.
     if NUM_BLOCKS > 1:
         local_parts = 0
-        for g in grids:
+        for g in pf.h._get_objs("grids"):
             if g.NumberOfParticles == 0: continue
-            if "particle_type" in all_fields:
-                #iddm = dd._get_data_from_grid(g, "particle_type")==rh.dm_type
+            if rh.dm_only:
+                iddm = Ellipsis
+            elif "particle_type" in all_fields:
                 iddm = g["particle_type"] == rh.dm_type
             else:
                 iddm = Ellipsis
@@ -295,9 +193,11 @@
     left_edge[2] = pf.domain_left_edge[2]
     left_edge[3] = left_edge[4] = left_edge[5] = 0.0
     pi = 0
-    for g in grids:
+    for g in pf.h._get_objs("grids"):
         if g.NumberOfParticles == 0: continue
-        if "particle_type" in all_fields:
+        if rh.dm_only:
+            iddm = Ellipsis
+        elif "particle_type" in all_fields:
             iddm = g["particle_type"] == rh.dm_type
         else:
             iddm = Ellipsis
@@ -329,21 +229,22 @@
     cdef public int block_ratio
     cdef public int dm_type
     cdef public int total_particles
+    cdef public int dm_only
 
-    def __cinit__(self, ts, data_source):
+    def __cinit__(self, ts):
         self.ts = ts
         self.tsl = ts.__iter__() #timseries generator used by read
-        self.data_source = data_source
 
     def setup_rockstar(self, char *server_address, char *server_port,
                        int num_snaps, np.int64_t total_particles,
                        int dm_type,
-                       np.float64_t particle_mass = -1.0,
+                       np.float64_t particle_mass,
                        int parallel = False, int num_readers = 1,
                        int num_writers = 1,
                        int writing_port = -1, int block_ratio = 1,
                        int periodic = 1, force_res=None,
-                       int min_halo_size = 25, outbase = "None"):
+                       int min_halo_size = 25, outbase = "None",
+                       int dm_only = 0):
         global PARALLEL_IO, PARALLEL_IO_SERVER_ADDRESS, PARALLEL_IO_SERVER_PORT
         global FILENAME, FILE_FORMAT, NUM_SNAPS, STARTING_SNAP, h0, Ol, Om
         global BOX_SIZE, PERIODIC, PARTICLE_MASS, NUM_BLOCKS, NUM_READERS
@@ -374,6 +275,7 @@
         MIN_HALO_OUTPUT_SIZE=min_halo_size
         TOTAL_PARTICLES = total_particles
         self.block_ratio = block_ratio
+        self.dm_only = dm_only
         
         tpf = self.ts[0]
         h0 = tpf.hubble_constant
@@ -385,8 +287,6 @@
             #workaround is to make a new directory
             OUTBASE = outbase 
 
-        if particle_mass < 0:
-            particle_mass = tpf.h.grids[0]["ParticleMassMsun"][0] / h0
         PARTICLE_MASS = particle_mass
         PERIODIC = periodic
         BOX_SIZE = (tpf.domain_right_edge[0] -
@@ -400,12 +300,16 @@
     def call_rockstar(self):
         read_particles("generic")
         rockstar(NULL, 0)
-        output_and_free_halos(0, 0, 0, NULL)
+        output_halos(0, 0, 0, NULL)
 
     def start_server(self):
         with nogil:
             server()
 
-    def start_client(self, in_type):
-        in_type = np.int64(in_type)
+    def start_reader(self):
+        cdef np.int64_t in_type = np.int64(READER_TYPE)
         client(in_type)
+
+    def start_writer(self):
+        cdef np.int64_t in_type = np.int64(WRITER_TYPE)
+        client(in_type)


diff -r 2a0f5a24ed218cf9c11fc5799785a93a6b1ff32e -r 7fe902d74ea4da03d4c1cf13b791f612490bc767 yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -61,8 +61,9 @@
     ipython_notebook = 'False',
     answer_testing_tolerance = '3',
     answer_testing_bitwise = 'False',
-    gold_standard_filename = 'gold003',
-    local_standard_filename = 'local001'
+    gold_standard_filename = 'gold004',
+    local_standard_filename = 'local001',
+    sketchfab_api_key = 'None'
     )
 # Here is the upgrade.  We're actually going to parse the file in its entirety
 # here.  Then, if it has any of the Forbidden Sections, it will be rewritten


diff -r 2a0f5a24ed218cf9c11fc5799785a93a6b1ff32e -r 7fe902d74ea4da03d4c1cf13b791f612490bc767 yt/convenience.py
--- a/yt/convenience.py
+++ b/yt/convenience.py
@@ -61,6 +61,12 @@
     valid_file = [os.path.exists(arg) if isinstance(arg, types.StringTypes) 
             else False for arg in args]
     if not any(valid_file):
+        try:
+            from yt.data_objects.time_series import TimeSeriesData
+            ts = TimeSeriesData.from_filenames(*args, **kwargs)
+            return ts
+        except YTOutputNotIdentified:
+            pass
         mylog.error("None of the arguments provided to load() is a valid file")
         mylog.error("Please check that you have used a correct path")
         raise YTOutputNotIdentified(args, kwargs)


diff -r 2a0f5a24ed218cf9c11fc5799785a93a6b1ff32e -r 7fe902d74ea4da03d4c1cf13b791f612490bc767 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -35,8 +35,10 @@
 import exceptions
 import itertools
 import shelve
+import cStringIO
 
 from yt.funcs import *
+from yt.config import ytcfg
 
 from yt.data_objects.derived_quantities import GridChildMaskWrapper
 from yt.data_objects.particle_io import particle_handler_registry
@@ -48,7 +50,7 @@
     DataCubeRefine, DataCubeReplace, FillRegion, FillBuffer
 from yt.utilities.definitions import axis_names, x_dict, y_dict
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
-    ParallelAnalysisInterface
+    ParallelAnalysisInterface, parallel_root_only
 from yt.utilities.linear_interpolators import \
     UnilinearFieldInterpolator, \
     BilinearFieldInterpolator, \
@@ -867,9 +869,11 @@
         else:
             self.fields = ensure_list(fields)
         from yt.visualization.plot_window import \
-            GetBoundsAndCenter, PWViewerMPL
+            GetWindowParameters, PWViewerMPL
         from yt.visualization.fixed_resolution import FixedResolutionBuffer
-        (bounds, center) = GetBoundsAndCenter(axis, center, width, self.pf)
+        (bounds, center, units) = GetWindowParameters(axis, center, width, self.pf)
+        if axes_unit is None and units != ('1', '1'):
+            axes_unit = units
         pw = PWViewerMPL(self, bounds, origin=origin, frb_generator=FixedResolutionBuffer, 
                          plot_type=plot_type)
         pw.set_axes_unit(axes_unit)
@@ -3808,7 +3812,9 @@
     def _get_data_from_grid(self, grid, fields):
         ll = int(grid.Level == self.level)
         ref_ratio = self.pf.refine_by**(self.level - grid.Level)
-        g_fields = [grid[field].astype("float64") for field in fields]
+        g_fields = [gf.astype("float64") 
+                    if gf.dtype != "float64"
+                    else gf for gf in (grid[field] for field in fields)]
         c_fields = [self[field] for field in fields]
         count = FillRegion(ref_ratio,
             grid.get_global_startindex(), self.global_startindex,
@@ -3979,8 +3985,9 @@
 
     @restore_field_information_state
     def _get_data_from_grid(self, grid, fields):
-        fields = ensure_list(fields)
-        g_fields = [grid[field].astype("float64") for field in fields]
+        g_fields = [gf.astype("float64") 
+                    if gf.dtype != "float64"
+                    else gf for gf in (grid[field] for field in fields)]
         c_fields = [self.field_data[field] for field in fields]
         count = FillRegion(1,
             grid.get_global_startindex(), self.global_startindex,
@@ -4162,6 +4169,427 @@
             self._cut_masks[grid.id] = this_cut_mask
         return this_cut_mask
 
+class AMRSurfaceBase(AMRData, ParallelAnalysisInterface):
+    _type_name = "surface"
+    _con_args = ("data_source", "surface_field", "field_value")
+    vertices = None
+    def __init__(self, data_source, surface_field, field_value):
+        r"""This surface object identifies isocontours on a cell-by-cell basis,
+        with no consideration of global connectedness, and returns the vertices
+        of the Triangles in that isocontour.
+
+        This object simply returns the vertices of all the triangles
+        calculated by the marching cubes algorithm; for more complex
+        operations, such as identifying connected sets of cells above a given
+        threshold, see the extract_connected_sets function.  This is more
+        useful for calculating, for instance, total isocontour area, or
+        visualizing in an external program (such as `MeshLab
+        <http://meshlab.sf.net>`_.)  The object has the properties .vertices
+        and will sample values if a field is requested.  The values are
+        interpolated to the center of a given face.
+        
+        Parameters
+        ----------
+        data_source : AMR3DDataObject
+            This is the object which will used as a source
+        surface_field : string
+            Any field that can be obtained in a data object.  This is the field
+            which will be isocontoured.
+        field_value : float
+            The value at which the isocontour should be calculated.
+
+        References
+        ----------
+
+        .. [1] Marching Cubes: http://en.wikipedia.org/wiki/Marching_cubes
+
+        Examples
+        --------
+        This will create a data object, find a nice value in the center, and
+        output the vertices to "triangles.obj" after rescaling them.
+
+        >>> sp = pf.h.sphere("max", (10, "kpc")
+        >>> surf = pf.h.surface(sp, "Density", 5e-27)
+        >>> print surf["Temperature"]
+        >>> print surf.vertices
+        >>> bounds = [(sp.center[i] - 5.0/pf['kpc'],
+        ...            sp.center[i] + 5.0/pf['kpc']) for i in range(3)]
+        >>> surf.export_ply("my_galaxy.ply", bounds = bounds)
+        """
+        ParallelAnalysisInterface.__init__(self)
+        self.data_source = data_source
+        self.surface_field = surface_field
+        self.field_value = field_value
+        self.vertex_samples = YTFieldData()
+        center = data_source.get_field_parameter("center")
+        AMRData.__init__(self, center = center, fields = None, pf =
+                         data_source.pf)
+        self._grids = self.data_source._grids.copy()
+
+    def get_data(self, fields = None, sample_type = "face"):
+        if isinstance(fields, list) and len(fields) > 1:
+            for field in fields: self.get_data(field)
+            return
+        elif isinstance(fields, list):
+            fields = fields[0]
+        # Now we have a "fields" value that is either a string or None
+        pb = get_pbar("Extracting (sampling: %s)" % fields,
+                      len(list(self._get_grid_objs())))
+        verts = []
+        samples = []
+        for i,g in enumerate(self._get_grid_objs()):
+            pb.update(i)
+            my_verts = self._extract_isocontours_from_grid(
+                            g, self.surface_field, self.field_value,
+                            fields, sample_type)
+            if fields is not None:
+                my_verts, svals = my_verts
+                samples.append(svals)
+            verts.append(my_verts)
+        pb.finish()
+        verts = np.concatenate(verts).transpose()
+        verts = self.comm.par_combine_object(verts, op='cat', datatype='array')
+        self.vertices = verts
+        if fields is not None:
+            samples = np.concatenate(samples)
+            samples = self.comm.par_combine_object(samples, op='cat',
+                                datatype='array')
+            if sample_type == "face":
+                self[fields] = samples
+            elif sample_type == "vertex":
+                self.vertex_samples[fields] = samples
+        
+
+    @restore_grid_state
+    def _extract_isocontours_from_grid(self, grid, field, value,
+                                       sample_values = None,
+                                       sample_type = "face"):
+        mask = self.data_source._get_cut_mask(grid) * grid.child_mask
+        vals = grid.get_vertex_centered_data(field, no_ghost = False)
+        if sample_values is not None:
+            svals = grid.get_vertex_centered_data(sample_values)
+        else:
+            svals = None
+        sample_type = {"face":1, "vertex":2}[sample_type]
+        my_verts = march_cubes_grid(value, vals, mask, grid.LeftEdge,
+                                    grid.dds, svals, sample_type)
+        return my_verts
+
+    def calculate_flux(self, field_x, field_y, field_z, fluxing_field = None):
+        r"""This calculates the flux over the surface.
+
+        This function will conduct marching cubes on all the cells in a given
+        data container (grid-by-grid), and then for each identified triangular
+        segment of an isocontour in a given cell, calculate the gradient (i.e.,
+        normal) in the isocontoured field, interpolate the local value of the
+        "fluxing" field, the area of the triangle, and then return:
+
+        area * local_flux_value * (n dot v)
+
+        Where area, local_value, and the vector v are interpolated at the barycenter
+        (weighted by the vertex values) of the triangle.  Note that this
+        specifically allows for the field fluxing across the surface to be
+        *different* from the field being contoured.  If the fluxing_field is
+        not specified, it is assumed to be 1.0 everywhere, and the raw flux
+        with no local-weighting is returned.
+
+        Additionally, the returned flux is defined as flux *into* the surface,
+        not flux *out of* the surface.
+        
+        Parameters
+        ----------
+        field_x : string
+            The x-component field
+        field_y : string
+            The y-component field
+        field_z : string
+            The z-component field
+        fluxing_field : string, optional
+            The field whose passage over the surface is of interest.  If not
+            specified, assumed to be 1.0 everywhere.
+
+        Returns
+        -------
+        flux : float
+            The summed flux.  Note that it is not currently scaled; this is
+            simply the code-unit area times the fields.
+
+        References
+        ----------
+
+        .. [1] Marching Cubes: http://en.wikipedia.org/wiki/Marching_cubes
+
+        Examples
+        --------
+
+        This will create a data object, find a nice value in the center, and
+        calculate the metal flux over it.
+
+        >>> sp = pf.h.sphere("max", (10, "kpc")
+        >>> surf = pf.h.surface(sp, "Density", 5e-27)
+        >>> flux = surf.calculate_flux(
+        ...     "x-velocity", "y-velocity", "z-velocity", "Metal_Density")
+        """
+        flux = 0.0
+        pb = get_pbar("Fluxing %s" % fluxing_field,
+                len(list(self._get_grid_objs())))
+        for i, g in enumerate(self._get_grid_objs()):
+            pb.update(i)
+            flux += self._calculate_flux_in_grid(g,
+                    field_x, field_y, field_z, fluxing_field)
+        pb.finish()
+        flux = self.comm.mpi_allreduce(flux, op="sum")
+        return flux
+
+    @restore_grid_state
+    def _calculate_flux_in_grid(self, grid, 
+                    field_x, field_y, field_z, fluxing_field = None):
+        mask = self.data_source._get_cut_mask(grid) * grid.child_mask
+        vals = grid.get_vertex_centered_data(self.surface_field)
+        if fluxing_field is None:
+            ff = np.ones(vals.shape, dtype="float64")
+        else:
+            ff = grid.get_vertex_centered_data(fluxing_field)
+        xv, yv, zv = [grid.get_vertex_centered_data(f) for f in 
+                     [field_x, field_y, field_z]]
+        return march_cubes_grid_flux(self.field_value, vals, xv, yv, zv,
+                    ff, mask, grid.LeftEdge, grid.dds)
+
+    def export_ply(self, filename, bounds = None, color_field = None,
+                   color_map = "algae", color_log = True, sample_type = "face"):
+        r"""This exports the surface to the PLY format, suitable for visualization
+        in many different programs (e.g., MeshLab).
+
+        Parameters
+        ----------
+        filename : string
+            The file this will be exported to.  This cannot be a file-like object.
+        bounds : list of tuples
+            The bounds the vertices will be normalized to.  This is of the format:
+            [(xmin, xmax), (ymin, ymax), (zmin, zmax)].  Defaults to the full
+            domain.
+        color_field : string
+            Should a field be sample and colormapped?
+        color_map : string
+            Which color map should be applied?
+        color_log : bool
+            Should the color field be logged before being mapped?
+
+        Examples
+        --------
+
+        >>> sp = pf.h.sphere("max", (10, "kpc")
+        >>> surf = pf.h.surface(sp, "Density", 5e-27)
+        >>> print surf["Temperature"]
+        >>> print surf.vertices
+        >>> bounds = [(sp.center[i] - 5.0/pf['kpc'],
+        ...            sp.center[i] + 5.0/pf['kpc']) for i in range(3)]
+        >>> surf.export_ply("my_galaxy.ply", bounds = bounds)
+        """
+        if self.vertices is None:
+            self.get_data(color_field, sample_type)
+        elif color_field is not None:
+            if sample_type == "face" and \
+                color_field not in self.field_data:
+                self[color_field]
+            elif sample_type == "vertex" and \
+                color_field not in self.vertex_data:
+                self.get_data(color_field, sample_type)
+        self._export_ply(filename, bounds, color_field, color_map, color_log,
+                         sample_type)
+
+    def _color_samples(self, cs, color_log, color_map, arr):
+            if color_log: cs = np.log10(cs)
+            mi, ma = cs.min(), cs.max()
+            cs = (cs - mi) / (ma - mi)
+            from yt.visualization.image_writer import map_to_colors
+            cs = map_to_colors(cs, color_map)
+            arr["red"][:] = cs[0,:,0]
+            arr["green"][:] = cs[0,:,1]
+            arr["blue"][:] = cs[0,:,2]
+
+    @parallel_root_only
+    def _export_ply(self, filename, bounds = None, color_field = None,
+                   color_map = "algae", color_log = True, sample_type = "face"):
+        if isinstance(filename, file):
+            f = filename
+        else:
+            f = open(filename, "wb")
+        if bounds is None:
+            DLE = self.pf.domain_left_edge
+            DRE = self.pf.domain_right_edge
+            bounds = [(DLE[i], DRE[i]) for i in range(3)]
+        nv = self.vertices.shape[1]
+        vs = [("x", "<f"), ("y", "<f"), ("z", "<f"),
+              ("red", "uint8"), ("green", "uint8"), ("blue", "uint8") ]
+        fs = [("ni", "uint8"), ("v1", "<i4"), ("v2", "<i4"), ("v3", "<i4"),
+              ("red", "uint8"), ("green", "uint8"), ("blue", "uint8") ]
+        f.write("ply\n")
+        f.write("format binary_little_endian 1.0\n")
+        f.write("element vertex %s\n" % (nv))
+        f.write("property float x\n")
+        f.write("property float y\n")
+        f.write("property float z\n")
+        if color_field is not None and sample_type == "vertex":
+            f.write("property uchar red\n")
+            f.write("property uchar green\n")
+            f.write("property uchar blue\n")
+            v = np.empty(self.vertices.shape[1], dtype=vs)
+            cs = self.vertex_samples[color_field]
+            self._color_samples(cs, color_log, color_map, v)
+        else:
+            v = np.empty(self.vertices.shape[1], dtype=vs[:3])
+        f.write("element face %s\n" % (nv/3))
+        f.write("property list uchar int vertex_indices\n")
+        if color_field is not None and sample_type == "face":
+            f.write("property uchar red\n")
+            f.write("property uchar green\n")
+            f.write("property uchar blue\n")
+            # Now we get our samples
+            cs = self[color_field]
+            arr = np.empty(cs.shape[0], dtype=np.dtype(fs))
+            self._color_samples(cs, color_log, color_map, arr)
+        else:
+            arr = np.empty(nv/3, np.dtype(fs[:-3]))
+        for i, ax in enumerate("xyz"):
+            # Do the bounds first since we cast to f32
+            tmp = self.vertices[i,:]
+            np.subtract(tmp, bounds[i][0], tmp)
+            w = bounds[i][1] - bounds[i][0]
+            np.divide(tmp, w, tmp)
+            np.subtract(tmp, 0.5, tmp) # Center at origin.
+            v[ax][:] = tmp 
+        f.write("end_header\n")
+        v.tofile(f)
+        arr["ni"][:] = 3
+        vi = np.arange(nv, dtype="<i")
+        vi.shape = (nv/3, 3)
+        arr["v1"][:] = vi[:,0]
+        arr["v2"][:] = vi[:,1]
+        arr["v3"][:] = vi[:,2]
+        arr.tofile(f)
+        if filename is not f:
+            f.close()
+
+    def export_sketchfab(self, title, description, api_key = None,
+                            color_field = None, color_map = "algae",
+                            color_log = True, bounds = None):
+        r"""This exports Surfaces to SketchFab.com, where they can be viewed
+        interactively in a web browser.
+
+        SketchFab.com is a proprietary web service that provides WebGL
+        rendering of models.  This routine will use temporary files to
+        construct a compressed binary representation (in .PLY format) of the
+        Surface and any optional fields you specify and upload it to
+        SketchFab.com.  It requires an API key, which can be found on your
+        SketchFab.com dashboard.  You can either supply the API key to this
+        routine directly or you can place it in the variable
+        "sketchfab_api_key" in your ~/.yt/config file.  This function is
+        parallel-safe.
+
+        Parameters
+        ----------
+        title : string
+            The title for the model on the website
+        description : string
+            How you want the model to be described on the website
+        api_key : string
+            Optional; defaults to using the one in the config file
+        color_field : string
+            If specified, the field by which the surface will be colored
+        color_map : string
+            The name of the color map to use to map the color field
+        color_log : bool
+            Should the field be logged before being mapped to RGB?
+        bounds : list of tuples
+            [ (xmin, xmax), (ymin, ymax), (zmin, zmax) ] within which the model
+            will be scaled and centered.  Defaults to the full domain.
+
+        Returns
+        -------
+        URL : string
+            The URL at which your model can be viewed.
+
+        Examples
+        --------
+
+        from yt.mods import *
+        pf = load("redshift0058")
+        dd = pf.h.sphere("max", (200, "kpc"))
+        rho = 5e-27
+
+        bounds = [(dd.center[i] - 100.0/pf['kpc'],
+                   dd.center[i] + 100.0/pf['kpc']) for i in range(3)]
+
+        surf = pf.h.surface(dd, "Density", rho)
+
+        rv = surf.export_sketchfab(
+            title = "Testing Upload",
+            description = "A simple test of the uploader",
+            color_field = "Temperature",
+            color_map = "hot",
+            color_log = True,
+            bounds = bounds
+        )
+        """
+        api_key = api_key or ytcfg.get("yt","sketchfab_api_key")
+        if api_key in (None, "None"):
+            raise YTNoAPIKey("SketchFab.com", "sketchfab_api_key")
+        import zipfile, json
+        from tempfile import TemporaryFile
+
+        ply_file = TemporaryFile()
+        self.export_ply(ply_file, bounds, color_field, color_map, color_log,
+                        sample_type = "vertex")
+        ply_file.seek(0)
+        # Greater than ten million vertices and we throw an error but dump
+        # to a file.
+        if self.vertices.shape[1] > 1e7:
+            tfi = 0
+            fn = "temp_model_%03i.ply" % tfi
+            while os.path.exists(fn):
+                fn = "temp_model_%03i.ply" % tfi
+                tfi += 1
+            open(fn, "wb").write(ply_file.read())
+            raise YTTooManyVertices(self.vertices.shape[1], fn)
+
+        zfs = TemporaryFile()
+        with zipfile.ZipFile(zfs, "w", zipfile.ZIP_DEFLATED) as zf:
+            zf.writestr("yt_export.ply", ply_file.read())
+        zfs.seek(0)
+
+        zfs.seek(0)
+        data = {
+            'title': title,
+            'token': api_key,
+            'description': description,
+            'fileModel': zfs,
+            'filenameModel': "yt_export.zip",
+        }
+        upload_id = self._upload_to_sketchfab(data)
+        upload_id = self.comm.mpi_bcast(upload_id, root = 0)
+        return upload_id
+
+    @parallel_root_only
+    def _upload_to_sketchfab(self, data):
+        import urllib2, json
+        from yt.utilities.poster.encode import multipart_encode
+        from yt.utilities.poster.streaminghttp import register_openers
+        register_openers()
+        datamulti, headers = multipart_encode(data)
+        request = urllib2.Request("https://api.sketchfab.com/v1/models",
+                        datamulti, headers)
+        rv = urllib2.urlopen(request).read()
+        rv = json.loads(rv)
+        upload_id = rv.get("result", {}).get("id", None)
+        if upload_id:
+            mylog.info("Model uploaded to: https://sketchfab.com/show/%s",
+                       upload_id)
+        else:
+            mylog.error("Problem uploading.")
+        return upload_id
+
+
 def _reconstruct_object(*args, **kwargs):
     pfid = args[0]
     dtype = args[1]


diff -r 2a0f5a24ed218cf9c11fc5799785a93a6b1ff32e -r 7fe902d74ea4da03d4c1cf13b791f612490bc767 yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -153,7 +153,7 @@
         """
         Returns a single field.  Will add if necessary.
         """
-        if not self.field_data.has_key(key):
+        if key not in self.field_data:
             self.get_data(key)
         return self.field_data[key]
 


diff -r 2a0f5a24ed218cf9c11fc5799785a93a6b1ff32e -r 7fe902d74ea4da03d4c1cf13b791f612490bc767 yt/data_objects/tests/test_fluxes.py
--- /dev/null
+++ b/yt/data_objects/tests/test_fluxes.py
@@ -0,0 +1,21 @@
+from yt.testing import *
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","__withintesting"] = "True"
+
+def test_flux_calculation():
+    pf = fake_random_pf(64, nprocs = 4)
+    dd = pf.h.all_data()
+    surf = pf.h.surface(dd, "x", 0.51)
+    yield assert_equal, surf["x"], 0.51
+    flux = surf.calculate_flux("Ones", "Zeros", "Zeros", "Ones")
+    yield assert_almost_equal, flux, 1.0, 12
+
+def test_sampling():
+    pf = fake_random_pf(64, nprocs = 4)
+    dd = pf.h.all_data()
+    for i, ax in enumerate('xyz'):
+        surf = pf.h.surface(dd, ax, 0.51)
+        surf.get_data(ax, "vertex")
+        yield assert_equal, surf.vertex_samples[ax], surf.vertices[i,:]


diff -r 2a0f5a24ed218cf9c11fc5799785a93a6b1ff32e -r 7fe902d74ea4da03d4c1cf13b791f612490bc767 yt/data_objects/tests/test_projection.py
--- a/yt/data_objects/tests/test_projection.py
+++ b/yt/data_objects/tests/test_projection.py
@@ -1,9 +1,14 @@
 from yt.testing import *
+import os
 
 def setup():
     from yt.config import ytcfg
     ytcfg["yt","__withintesting"] = "True"
 
+def teardown_func(fns):
+    for fn in fns:
+        os.remove(fn)
+
 def test_projection():
     for nprocs in [8, 1]:
         # We want to test both 1 proc and 8 procs, to make sure that
@@ -22,6 +27,7 @@
             xax = x_dict[ax]
             yax = y_dict[ax]
             for wf in ["Density", None]:
+                fns = []
                 proj = pf.h.proj(ax, ["Ones", "Density"], weight_field = wf)
                 yield assert_equal, proj["Ones"].sum(), proj["Ones"].size
                 yield assert_equal, proj["Ones"].min(), 1.0
@@ -30,6 +36,8 @@
                 yield assert_equal, np.unique(proj["py"]), uc[yax]
                 yield assert_equal, np.unique(proj["pdx"]), 1.0/(dims[xax]*2.0)
                 yield assert_equal, np.unique(proj["pdy"]), 1.0/(dims[yax]*2.0)
+                pw = proj.to_pw()
+                fns += pw.save()
                 frb = proj.to_frb((1.0,'unitary'), 64)
                 for proj_field in ['Ones', 'Density']:
                     yield assert_equal, frb[proj_field].info['data_source'], \
@@ -50,6 +58,7 @@
                             proj.center
                     yield assert_equal, frb[proj_field].info['weight_field'], \
                             wf
+                teardown_func(fns)
             # wf == None
             yield assert_equal, wf, None
             v1 = proj["Density"].sum()


diff -r 2a0f5a24ed218cf9c11fc5799785a93a6b1ff32e -r 7fe902d74ea4da03d4c1cf13b791f612490bc767 yt/data_objects/tests/test_slice.py
--- a/yt/data_objects/tests/test_slice.py
+++ b/yt/data_objects/tests/test_slice.py
@@ -1,9 +1,14 @@
 from yt.testing import *
+import os
 
 def setup():
     from yt.config import ytcfg
     ytcfg["yt","__withintesting"] = "True"
 
+def teardown_func(fns):
+    for fn in fns:
+        os.remove(fn)
+
 def test_slice():
     for nprocs in [8, 1]:
         # We want to test both 1 proc and 8 procs, to make sure that
@@ -21,6 +26,7 @@
             xax = x_dict[ax]
             yax = y_dict[ax]
             for wf in ["Density", None]:
+                fns = []
                 slc = pf.h.slice(ax, slc_pos, ["Ones", "Density"])
                 yield assert_equal, slc["Ones"].sum(), slc["Ones"].size
                 yield assert_equal, slc["Ones"].min(), 1.0
@@ -29,6 +35,8 @@
                 yield assert_equal, np.unique(slc["py"]), uc[yax]
                 yield assert_equal, np.unique(slc["pdx"]), 1.0/(dims[xax]*2.0)
                 yield assert_equal, np.unique(slc["pdy"]), 1.0/(dims[yax]*2.0)
+                pw = slc.to_pw()
+                fns += pw.save()
                 frb = slc.to_frb((1.0,'unitary'), 64)
                 for slc_field in ['Ones', 'Density']:
                     yield assert_equal, frb[slc_field].info['data_source'], \
@@ -49,7 +57,7 @@
                             slc.center
                     yield assert_equal, frb[slc_field].info['coord'], \
                             slc_pos
+                teardown_func(fns)
             # wf == None
             yield assert_equal, wf, None
 
-


diff -r 2a0f5a24ed218cf9c11fc5799785a93a6b1ff32e -r 7fe902d74ea4da03d4c1cf13b791f612490bc767 yt/data_objects/time_series.py
--- a/yt/data_objects/time_series.py
+++ b/yt/data_objects/time_series.py
@@ -173,12 +173,12 @@
         This demonstrates how one might store results:
 
         >>> ts = TimeSeriesData.from_filenames("DD*/DD*.hierarchy")
-        >>> storage = {}
-        >>> for sto, pf in ts.piter():
+        >>> my_storage = {}
+        >>> for sto, pf in ts.piter(storage=my_storage):
         ...     v, c = pf.h.find_max("Density")
         ...     sto.result = (v, c)
         ...
-        >>> for i, (v, c) in sorted(storage.items()):
+        >>> for i, (v, c) in sorted(my_storage.items()):
         ...     print "% 4i  %0.3e" % (i, v)
         ...
 


diff -r 2a0f5a24ed218cf9c11fc5799785a93a6b1ff32e -r 7fe902d74ea4da03d4c1cf13b791f612490bc767 yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -131,6 +131,13 @@
 add_field("OnesOverDx", function=_OnesOverDx,
           display_field=False)
 
+def _Zeros(field, data):
+    return np.zeros(data.ActiveDimensions, dtype='float64')
+add_field("Zeros", function=_Zeros,
+          validators=[ValidateSpatial(0)],
+          projection_conversion="unitary",
+          display_field = False)
+
 def _Ones(field, data):
     return np.ones(data.ActiveDimensions, dtype='float64')
 add_field("Ones", function=_Ones,


diff -r 2a0f5a24ed218cf9c11fc5799785a93a6b1ff32e -r 7fe902d74ea4da03d4c1cf13b791f612490bc767 yt/frontends/enzo/io.py
--- a/yt/frontends/enzo/io.py
+++ b/yt/frontends/enzo/io.py
@@ -181,8 +181,10 @@
         mylog.debug("Finished read of %s", sets)
 
     def _read_data_set(self, grid, field):
-        return self.modify(hdf5_light_reader.ReadData(grid.filename,
-                "/Grid%08i/%s" % (grid.id, field)))
+        tr = hdf5_light_reader.ReadData(grid.filename,
+                "/Grid%08i/%s" % (grid.id, field))
+        if tr.dtype == "float32": tr = tr.astype("float64")
+        return self.modify(tr)
 
     def _read_data_slice(self, grid, field, axis, coord):
         axis = _axis_ids[axis]


diff -r 2a0f5a24ed218cf9c11fc5799785a93a6b1ff32e -r 7fe902d74ea4da03d4c1cf13b791f612490bc767 yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -466,6 +466,11 @@
     return u.popbuffer()
 
 def get_yt_version():
+    try:
+        from yt.__hg_version__ import hg_version
+        return hg_version
+    except ImportError:
+        pass
     import pkg_resources
     yt_provider = pkg_resources.get_provider("yt")
     path = os.path.dirname(yt_provider.module_path)


diff -r 2a0f5a24ed218cf9c11fc5799785a93a6b1ff32e -r 7fe902d74ea4da03d4c1cf13b791f612490bc767 yt/utilities/command_line.py
--- a/yt/utilities/command_line.py
+++ b/yt/utilities/command_line.py
@@ -339,9 +339,15 @@
     return u.popbuffer()
 
 def get_yt_version():
+    try:
+        from yt.__hg_version__ import hg_version
+        return hg_version
+    except ImportError:
+        pass
     import pkg_resources
     yt_provider = pkg_resources.get_provider("yt")
     path = os.path.dirname(yt_provider.module_path)
+    if not os.path.isdir(os.path.join(path, ".hg")): return None
     version = _get_hg_version(path)[:12]
     return version
 
@@ -1037,9 +1043,8 @@
                 print "The supplemental repositories are located at:"
                 print "    %s" % (spath)
                 update_supp = True
-        vstring = None
-        if "site-packages" not in path:
-            vstring = get_hg_version(path)
+        vstring = get_yt_version()
+        if vstring is not None:
             print
             print "The current version of the code is:"
             print
@@ -1047,10 +1052,11 @@
             print vstring.strip()
             print "---"
             print
-            print "This installation CAN be automatically updated."
-            if opts.update_source:  
-                update_hg(path)
-            print "Updated successfully."
+            if "site-packages" not in path:
+                print "This installation CAN be automatically updated."
+                if opts.update_source:  
+                    update_hg(path)
+                print "Updated successfully."
         elif opts.update_source:
             print
             print "YT site-packages not in path, so you must"


diff -r 2a0f5a24ed218cf9c11fc5799785a93a6b1ff32e -r 7fe902d74ea4da03d4c1cf13b791f612490bc767 yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -189,3 +189,22 @@
     def __str__(self):
         return "Enzo test output file (OutputLog) not generated for: " + \
             "'%s'" % (self.testname) + ".\nTest did not complete."
+
+class YTNoAPIKey(YTException):
+    def __init__(self, service, config_name):
+        self.service = service
+        self.config_name = config_name
+
+    def __str__(self):
+        return "You need to set an API key for %s in ~/.yt/config as %s" % (
+            self.service, self.config_name)
+
+class YTTooManyVertices(YTException):
+    def __init__(self, nv, fn):
+        self.nv = nv
+        self.fn = fn
+
+    def __str__(self):
+        s = "There are too many vertices (%s) to upload to Sketchfab. " % (self.nv)
+        s += "Your model has been saved as %s .  You should upload manually." % (self.fn)
+        return s


diff -r 2a0f5a24ed218cf9c11fc5799785a93a6b1ff32e -r 7fe902d74ea4da03d4c1cf13b791f612490bc767 yt/utilities/lib/marching_cubes.pyx
--- a/yt/utilities/lib/marching_cubes.pyx
+++ b/yt/utilities/lib/marching_cubes.pyx
@@ -33,7 +33,7 @@
 cdef struct Triangle:
     Triangle *next
     np.float64_t p[3][3]
-    np.float64_t val
+    np.float64_t val[3] # Usually only use one value
 
 cdef struct TriangleCollection:
     int count
@@ -64,12 +64,14 @@
     return count
 
 cdef void FillTriangleValues(np.ndarray[np.float64_t, ndim=1] values,
-                             Triangle *first):
+                             Triangle *first, int nskip = 1):
     cdef Triangle *this = first
     cdef Triangle *last
     cdef int i = 0
+    cdef int j
     while this != NULL:
-        values[i] = this.val
+        for j in range(nskip):
+            values[i*nskip + j] = this.val[j]
         i += 1
         last = this
         this = this.next
@@ -463,7 +465,7 @@
                      np.ndarray[np.int32_t, ndim=3] mask,
                      np.ndarray[np.float64_t, ndim=1] left_edge,
                      np.ndarray[np.float64_t, ndim=1] dxs,
-                     obj_sample = None):
+                     obj_sample = None, int sample_type = 1):
     cdef int dims[3]
     cdef int i, j, k, n, m, nt
     cdef int offset
@@ -478,7 +480,7 @@
     if obj_sample is not None:
         sample = obj_sample
         sdata = <np.float64_t *> sample.data
-        do_sample = 1
+        do_sample = sample_type # 1 for face, 2 for vertex
     else:
         do_sample = 0
     for i in range(3):
@@ -502,13 +504,16 @@
                     offset_fill(dims, intdata, gv)
                     nt = march_cubes(gv, isovalue, dds, pos[0], pos[1], pos[2],
                                 &triangles)
-                    if do_sample == 1 and nt > 0:
+                    if nt == 0 or do_sample == 0:
+                        pos[2] += dds[2]
+                        continue
+                    if last == NULL and triangles.first != NULL:
+                        current = triangles.first
+                        last = NULL
+                    elif last != NULL:
+                        current = last.next
+                    if do_sample == 1:
                         # At each triangle's center, sample our secondary field
-                        if last == NULL and triangles.first != NULL:
-                            current = triangles.first
-                            last = NULL
-                        elif last != NULL:
-                            current = last.next
                         while current != NULL:
                             for n in range(3):
                                 point[n] = 0.0
@@ -517,24 +522,38 @@
                                     point[m] += (current.p[n][m]-pos[m])*idds[m]
                             for n in range(3):
                                 point[n] /= 3.0
-                            current.val = offset_interpolate(dims, point,
+                            current.val[0] = offset_interpolate(dims, point,
                                                              sdata + offset)
                             last = current
                             if current.next == NULL: break
                             current = current.next
+                    elif do_sample == 2:
+                        while current != NULL:
+                            for n in range(3):
+                                for m in range(3):
+                                    point[m] = (current.p[n][m]-pos[m])*idds[m]
+                                current.val[n] = offset_interpolate(dims,
+                                                    point, sdata + offset)
+                            last = current
+                            if current.next == NULL: break
+                            current = current.next
                 pos[2] += dds[2]
             pos[1] += dds[1]
         pos[0] += dds[0]
     # Hallo, we are all done.
     cdef np.ndarray[np.float64_t, ndim=2] vertices 
     vertices = np.zeros((triangles.count*3,3), dtype='float64')
+    if do_sample == 0:
+        FillAndWipeTriangles(vertices, triangles.first)
+    cdef int nskip
     if do_sample == 1:
-        sampled = np.zeros(triangles.count, dtype='float64')
-        FillTriangleValues(sampled, triangles.first)
-        FillAndWipeTriangles(vertices, triangles.first)
-        return vertices, sampled
+        nskip = 1
+    elif do_sample == 2:
+        nskip = 3
+    sampled = np.zeros(triangles.count * nskip, dtype='float64')
+    FillTriangleValues(sampled, triangles.first, nskip)
     FillAndWipeTriangles(vertices, triangles.first)
-    return vertices
+    return vertices, sampled
 
 @cython.boundscheck(False)
 @cython.wraparound(False)


diff -r 2a0f5a24ed218cf9c11fc5799785a93a6b1ff32e -r 7fe902d74ea4da03d4c1cf13b791f612490bc767 yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -252,9 +252,10 @@
     @wraps(func)
     def root_only(*args, **kwargs):
         comm = _get_comm(args)
+        rv = None
         if comm.rank == 0:
             try:
-                func(*args, **kwargs)
+                rv = func(*args, **kwargs)
                 all_clear = 1
             except:
                 traceback.print_last()
@@ -263,6 +264,7 @@
             all_clear = None
         all_clear = comm.mpi_bcast(all_clear)
         if not all_clear: raise RuntimeError
+        return rv
     if parallel_capable: return root_only
     return func
 


diff -r 2a0f5a24ed218cf9c11fc5799785a93a6b1ff32e -r 7fe902d74ea4da03d4c1cf13b791f612490bc767 yt/visualization/profile_plotter.py
--- a/yt/visualization/profile_plotter.py
+++ b/yt/visualization/profile_plotter.py
@@ -36,7 +36,6 @@
 from yt.data_objects.profiles import \
     BinnedProfile1D, \
     BinnedProfile2D
-from .plot_types import ProfilePlot, PhasePlot
 from .tick_locators import LogLocator, LinearLocator
 from yt.utilities.logger import ytLogger as mylog
 


diff -r 2a0f5a24ed218cf9c11fc5799785a93a6b1ff32e -r 7fe902d74ea4da03d4c1cf13b791f612490bc767 yt/visualization/tests/test_plotwindow.py
--- a/yt/visualization/tests/test_plotwindow.py
+++ b/yt/visualization/tests/test_plotwindow.py
@@ -1,7 +1,7 @@
 from yt.testing import *
 from yt.mods import SlicePlot, ProjectionPlot, \
     OffAxisSlicePlot, OffAxisProjectionPlot
-import glob, os
+import os
 
 def setup():
     from yt.config import ytcfg

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list