[yt-svn] commit/yt-3.0: 182 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Thu Jun 6 11:00:39 PDT 2013


182 new commits in yt-3.0:

https://bitbucket.org/yt_analysis/yt-3.0/commits/eb57083b1568/
Changeset:   eb57083b1568
Branch:      yt
User:        atmyers
Date:        2013-02-19 01:44:26
Summary:     change to enable parallel computation in the field save function
Affected #:  1 file

diff -r 9823f77dfcbbb2c286da9cc872d63d06a1791a6a -r eb57083b1568b6bb8c417d902b2b77d23a8502f3 yt/utilities/grid_data_format/writer.py
--- a/yt/utilities/grid_data_format/writer.py
+++ b/yt/utilities/grid_data_format/writer.py
@@ -57,7 +57,7 @@
     # don't forget to close the file.
     f.close()
 
-def save_field(pf, field_name):
+def save_field(pf, field_name, data=None):
     """
     Write a single field associated with the parameter file pf to the
     backup file.
@@ -85,12 +85,12 @@
                        particle_type_name="dark_matter")
 
     # now save the field
-    _write_field_to_gdf(pf, f, field_name, particle_type_name="dark_matter")
+    _write_field_to_gdf(pf, f, field_name, particle_type_name="dark_matter", data)
 
     # don't forget to close the file.
     f.close()
         
-def _write_field_to_gdf(pf, fhandle, field_name, particle_type_name):
+def _write_field_to_gdf(pf, fhandle, field_name, particle_type_name, data=None):
 
     # add field info to field_types group
     g = fhandle["field_types"]
@@ -131,7 +131,10 @@
         if field_obj.particle_type:  # particle data
             pt_group[field_name] = grid.get_data(field_name)
         else:  # a field
-            grid_group[field_name] = grid.get_data(field_name)
+            if data != None:
+                grid_group[field_name] = data[str(grid.id)]
+            else:
+                grid_group[field_name] = grid.get_data(field_name)
 
 def _create_new_gdf(pf, gdf_path, data_author=None, data_comment=None,
                    particle_type_name="dark_matter"):


https://bitbucket.org/yt_analysis/yt-3.0/commits/052135b61bdc/
Changeset:   052135b61bdc
Branch:      yt
User:        atmyers
Date:        2013-02-19 01:44:46
Summary:     merging in new pmods from Matt
Affected #:  1 file

diff -r eb57083b1568b6bb8c417d902b2b77d23a8502f3 -r 052135b61bdc98c611bb8371af4e2722f3b55f74 yt/pmods.py
--- a/yt/pmods.py
+++ b/yt/pmods.py
@@ -17,343 +17,381 @@
 #####
 
 
-# This code is derived from knee.py, which was included in the Python
-# 2.6 distribution.
-#
-# The modifications to this code are copyright (c) 2011, Lawrence
-# Livermore National Security, LLC. Produced at the Lawrence Livermore
-# National Laboratory. Written by Tim Kadich and Asher Langton
-# <langton2 at llnl.gov>. Released as LLNL-CODE-522751 under the name
-# SmartImport.py, version 1.0. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# - Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the disclaimer below.
-#
-# - Redistributions in binary form must reproduce the above copyright
-#   notice, this list of conditions and the disclaimer (as noted below)
-#   in the documentation and/or other materials provided with the
-#   distribution.
-#
-# - Neither the name of the LLNS/LLNL nor the names of its contributors
-#   may be used to endorse or promote products derived from this
-#   software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE
-# LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR
-# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
-# Additional BSD Notice
-#
-# 1. This notice is required to be provided under our contract with the
-# U.S. Department of Energy (DOE). This work was produced at Lawrence
-# Livermore National Laboratory under Contract No. DE-AC52-07NA27344
-# with the DOE.
-#
-# 2. Neither the United States Government nor Lawrence Livermore
-# National Security, LLC nor any of their employees, makes any warranty,
-# express or implied, or assumes any liability or responsibility for the
-# accuracy, completeness, or usefulness of any information, apparatus,
-# product, or process disclosed, or represents that its use would not
-# infringe privately-owned rights.
-#
-# 3. Also, reference herein to any specific commercial products,
-# process, or services by trade name, trademark, manufacturer or
-# otherwise does not necessarily constitute or imply its endorsement,
-# recommendation, or favoring by the United States Government or
-# Lawrence Livermore National Security, LLC. The views and opinions of
-# authors expressed herein do not necessarily state or reflect those of
-# the United States Government or Lawrence Livermore National Security,
-# LLC, and shall not be used for advertising or product endorsement
-# purposes.
+"""This is an initial implementation of the finder/loader discussed at:
+http://mail.scipy.org/pipermail/numpy-discussion/2012-March/061160.html
 
-"""MPI_Import defines an mpi-aware import hook. The standard use of
-this module is as follows:
+This is intended to take the place of MPI_Import.py. This version has
+only been tested minimally, and is being made available primarily for
+testing and preliminary benchmarking.
 
-   from MPI_Import import mpi_import
-   with mpi_import():
-      import foo
-      import bar
+Known issues:
+- Modules loaded via the Windows registry may be incorrectly hidden by
+  a module of the same name in sys.path.
+- If a file is added to a directory on sys.path, it won't be cached, so
+  there may be precedence issues. If a file disappears or its permissions
+  change, the import will fail.
 
-Within the with block, the standard import statement is replaced by an
-MPI-aware import statement. The rank 0 process finds the location of
-each module to import, broadcasts the location, then all of the
-processes load that module.
+Update (3/16/12): I've merged in a new version, simple_finder, described
+below.
 
-One CRITICAL detail: any code inside the mpi_import block must be
-executed exactly the same on all of the MPI ranks. For example,
-consider this:
+To use the finder, start a script off with the following:
 
-def foo():
-   import mpi
-   if mpi.rank == 0:
-      bar = someFunction()
-   bar = mpi.bcast(bar,root=0)
+import sys
+from cached_import import finder
+sys.meta_path.append(finder())
 
-def someFunction():
-   import os
-   return os.name
+There are also variants of the finder that use MPI. The rank 0 process
+builds the cache and then broadcasts it. For these, replace finder
+with either pympi_finder or mpi4py_finder.
 
-If foo() is called during the import process, then things may go very
-wrong. If the os module hasn't been loaded, then the rank 0 process
-will find os and broadcast its location. Since there's no
-corresponding bcast for rank > 0, the other processes will receive
-that broadcast instead of the broadcast for bar, resulting in
-undefined behavior. Similarly, if rank >0 process encounters an import
-that rank 0 does not encounter, that process will either hang waiting
-for the bcast, or it will receive an out-of-order bcast.
+This finder works by building a cache mapping module names to
+locations. The expensive parts of this process are the calls that
+result in a stat. For that reason, we don't, by default, check whether
+a module file is readable.
 
-The import hook provides a way to test whether we're using this
-importer, which can be used to disable rank-asymmetric behavior in a
-module import:
-
-import __builtin__
-hasattr(__builtin__.__import__,"mpi_import")
-
-This evaluates to True only when we're in an mpi_import() context
-manager.
-
-There are some situations where rank-dependent code may be necessary.
-One such example is pyMPI's synchronizeQueuedOutput function, which
-tends to cause deadlocks when it is executed inside an mpi_imported
-module. In that case, we provide a hook to execute a function after
-the mpi_import hook has been replaced by the standard import hook.
-Here is an example showing the use of this feature:
-
-# encapsulate the rank-asymmetric code in a function
-def f():
-    if mpi.rank == 0:
-        doOneThing()
-    else:
-        doSomethingElse()
-
-# Either importer is None (standard import) or it's a reference to
-# the mpi_import object that owns the current importer.
-import __builtin__
-importer = getattr(__builtin__.__import__,"mpi_import",None)
-if importer:
-    importer.callAfterImport(f)
-else:
-    # If we're using the standard import, then we'll execute the
-    # code in f immediately
-    f()
-
-WARNING: the callAfterImport feature is not intended for casual use.
-Usually it will be sufficient (and preferable) to either remove the
-rank-asymmetric code or explicitly move it outside of the 'with
-mpi_import' block. callAfterImport is provided for the (hopefully
-rare!) cases where this does not suffice.
-
-
-Some implementation details:
-
--This code is based on knee.py, which is an example of a pure Python
- hierarchical import that was included with Python 2.6 distributions.
-
--Python PEP 302 defines another way to override import by using finder
- and loader objects, which behave similarly to the imp.find_module and
- imp.load_module functions in __import_module__ below. Unfortunately,
- the implementation of PEP 302 is such that the path for the module
- has already been found by the time that the "finder" object is
- constructed, so it's not suitable for our purposes.
-
--This module uses pyMPI. It was originally designed with mpi4py, and
- switching back to mpi4py requires only minor modifications. To
- quickly substitute mpi4py for pyMPI, the 'import mpi' line below can
- be replaced with the following wrapper:
-
-from mpi4py import MPI
-class mpi(object):
-    rank = MPI.COMM_WORLD.Get_rank()
-    @staticmethod
-    def bcast(obj=None,root=0):
-        return MPI.COMM_WORLD.bcast(obj,root)
-
--An alternate version of this module had rank 0 perform all of the
- lookups, and then broadcast the locations all-at-once when that
- process reached the end of the context manager. This was somewhat
- faster than the current implementation, but was prone to deadlock
- when loading modules containing MPI synchronization points.
-
--The 'level' parameter to the import hook is not handled correctly; we
- treat it as if it were -1 (try relative and absolute imports). For
- more information about the level parameter, run 'help(__import__)'.
+Since calls like os.isfile are expensive, I've added an alternate
+version called simple_finder. Instead of figuring out where all of the
+modules in sys.path are located, we just cache the contents of
+directories on sys.path and use the standard probing algorithm for the
+imports. This is much cheaper at startup and easier to maintain. It
+appears to be a bit faster than the MPI-enabled finders, though that
+will depend on the number of modules in sys.path as well as the number
+of modules actually imported.
 """
 
-import sys, imp, __builtin__,types
-from mpi4py import MPI
-class mpi(object):
-    rank = MPI.COMM_WORLD.Get_rank()
-    @staticmethod
-    def bcast(obj=None,root=0):
-        return MPI.COMM_WORLD.bcast(obj,root)
+import sys,os,imp
 
-class mpi_import(object):
-    def __enter__(self):
-        imp.acquire_lock()
-        __import_hook__.mpi_import = self
-        self.__funcs = []
-        self.original_import = __builtin__.__import__
-        __builtin__.__import__ = __import_hook__
+class finder(object):
+    def __init__(self,skip_checks=True,build=True):
+        """Build a finder object.
 
-    def __exit__(self,type,value,traceback):
-        __builtin__.__import__ = self.original_import
-        __import_hook__.mpi_import = None
-        imp.release_lock()
-        for f in self.__funcs:
-            f()
+        Arguments:
+        - skip_checks: Don't test whether modules are readable while building
+                       the cache. This improves performace, but can cause an
+                       unreadable file that looks like a Python module to
+                       shadow a readable module with the same name later
+                       in sys.path.
+        -build: if set, build the cache now. This is used in the mpi4py_finder
+                and pympi_finder extensions
+        """
+        # Store some suffix and module description information
+        t = imp.get_suffixes()
+        self.skip_checks = skip_checks
+        self._suffixes = [x[0] for x in t] # in order of precedence
+        self._rsuffixes = self._suffixes[::-1] # and in reverse order
+        self._suffix_tuples = dict((x[0],tuple(x)) for x in t)
 
-    def callAfterImport(self,f):
-        "Add f to the list of functions to call on exit"
-        if type(f) != types.FunctionType:
-            raise TypeError("Argument must be a function!")
-        self.__funcs.append(f)
+        # We store the value of sys.path in _syspath so we can keep track
+        # of changes. _cache is a dictionary mapping module names to tuples
+        # containing the information needed to load the module (path and
+        # module description).
+        if build:
+            self._syspath = list(sys.path)
+            self._build_cache()
+        else: # For some subclasses
+            self._syspath = []
+            self._cache = {}
 
+    def _build_cache(self):
+        """Traverse sys.path, building (or re-building) the cache."""
+        import os
+        self._cache = {}
+        for d in self._syspath:
+            self._process_dir(os.path.realpath(d))
 
-# The remaining code is for internal use only. Do not explicitly call
-# call any of the following functions.
+    def find_module(self,fullname,path=None):
+        """Return self if 'fullname' is in sys.path (and isn't a builtin or
+        frozen module)."""
+        # First, make sure our cache is up-to-date. (We could combine
+        # the append/prepend cases and more generally handle the case where
+        # self._syspath is a sublist of the new sys.path, but is that worth
+        # the effort? It's only beneficial if we encounter code where sys.path
+        # is both prepended to and appended to, and there isn't an import
+        # statement in between.
+        if sys.path != self._syspath:
+            stored_length = len(self._syspath)
+            real_length = len(sys.path)
+            rebuild = False
+            # If sys.path isn't bigger, we need to rebuild the cache
+            # but not before we update self._syspath.
+            if real_length <= stored_length:
+                rebuild = True
+            # Some directories were prepended to the path, so add them.
+            elif self._syspath == sys.path[-stored_length:]:
+                for d in sys.path[real_length-stored_length-1::-1]:
+                    self._process_dir(os.path.realpath(d),prepend=True)
+            # Directories appended to the path.
+            elif self._syspath == sys.path[:len(self._syspath)]:
+                for d in sys.path[stored_length-real_length:]:
+                    self._process_dir(os.path.realpath(d))
+            # Path otherwise modified, so we need to rebuild the cache.
+            else:
+                rebuild = True
 
-# Replacement for __import__(). Taken from knee.py; unmodified except for the
-# (unused) level parameter.
-def __import_hook__(name, globals=None, locals=None, fromlist=None, level=-1):
-    # TODO: handle level parameter correctly. For now, we'll ignore
-    # it and try both absolute and relative imports.
-    parent = __determine_parent__(globals)
-    q, tail = __find_head_package__(parent, name)
-    m = __load_tail__(q, tail)
-    if not fromlist:
-        return q
-    if hasattr(m, "__path__"):
-        __ensure_fromlist__(m, fromlist)
-    return m
+            # Now update self._syspath
+            self._syspath = list(sys.path)
+            if rebuild:
+                self._build_cache()
+            
+        # Don't override builtin/frozen modules. TODO: Windows registry?
+        if (fullname not in sys.builtin_module_names and
+            not imp.is_frozen(fullname) and
+            fullname in self._cache):
+            #print "__IMPORTING ",fullname
+            return self
+        return None
 
-# __import_module__ is the only part of knee.py with non-trivial changes.
-# The MPI rank 0 process handles the lookup and broadcasts the location to
-# the others. This must be called synchronously, at least in the case that
-# 'fqname' is not already in sys.modules.
-def __import_module__(partname, fqname, parent):
-    fqname = fqname.rstrip(".")
-    try:
-        return sys.modules[fqname]
-    except KeyError:
-        pass
-    fp = None         # module's file
-    pathname = None   # module's location
-    stuff = None      # tuple of (suffix,mode,type) for the module
-    ierror = False    # are we propagating an import error from rank 0?
+    def load_module(self,fullname):
+        """Load the module fullname using cached path."""
+        if fullname in self._cache:
+            if fullname in sys.modules:
+                return sys.modules[fullname]
+            pathname,desc = self._cache[fullname]
+            #print "__LOADING ",fullname,pathname
+            if os.path.isfile(pathname):
+                # (If we're loading a PY_SOURCE file, the interpreter will
+                # automatically check for a compiled (.py[c|o]) file.)
+                with open(pathname,desc[1]) as f:
+                    mod = imp.load_module(fullname,f,pathname,desc)
+            # Not a file, so it's a package directory
+            else:
+                mod = imp.load_module(fullname,None,pathname,desc)
+            mod.__loader__ = self
+            return mod
+        raise ImportError("This shouldn't happen!")
 
-    # Start with the lookup on rank 0. The other processes will be waiting
-    # on a broadcast, so we need to send one even if we're bailing out due
-    # to an import error.
-    if mpi.rank == 0:
+
+    # Build up a dict of modules (including package directories) found in a
+    # directory. If this directory has been prepended to the path, we need to
+    # overwrite any conflicting entries in the cache. To make sure precedence
+    # is correct, we'll reverse the list of suffixes when we're prepending.
+    #
+    # Rather than add a lot of checks here to make sure we don't stomp on a
+    # builtin module, we'll just reject these in find_module
+    def _process_dir(self,dir,parent=None,prepend=False,visited=None):
+        """Process a directory dir, looking for valid modules.
+
+        Arguments:
+        dir -- (an absolute, real path to a directory)
+        parent -- parent module, in the case where dir is a package directory
+        prepend -- True if dir has just been prepended to sys.path. In that
+                   case, we'll replace existing cached entries with the same
+                   module name.
+        visited -- list of the real paths of visited directories. Used to
+                   prevent infinite recursion in the case of symlink cycles
+                   in package subdirectories.
+        """
+        import stat
+        
+        # Avoid symlink cycles in a package.
+        if not visited:
+            visited = [dir]
+        elif dir not in visited:
+            visited.append(dir)
+        else:
+            return
+
+        # All files and subdirs. Store the name and the path.
         try:
-            fp, pathname, stuff = imp.find_module(partname,
-                                                  parent and parent.__path__)
-        except ImportError:
-            ierror = True
-            return None
-        finally:
-            pathname,stuff,ierror = mpi.bcast((pathname,stuff,ierror))
-    else:
-        pathname,stuff,ierror = mpi.bcast((pathname,stuff,ierror))
-        if ierror:
-            return None
-        # If imp.find_module returned an open file to rank 0, then we should
-        # open the corresponding file for this process too.
-        if stuff and stuff[1]:
-            fp = open(pathname,stuff[1])
+            contents = dict((x,os.path.join(dir,x))
+                            for x in os.listdir(dir))
+        # Unreadable directory, so skip
+        except OSError:
+            return
 
-    try:
-        m = imp.load_module(fqname, fp, pathname, stuff)
-    finally:
-        if fp: fp.close()
-    if parent:
-        setattr(parent, partname, m)
-    return m
+        # If this is a possible package directory with no __init__.py, bail
+        # out. If __init__.py is there, we need to see if there's an exising
+        # module by that name. 
+        if parent:
+            if "__init__.py" not in contents:
+                return
+            if not (self.skip_checks or
+                    os.access(os.path.join(dir,"__init__.py"),os.R_OK)):
+                return
+            if parent in self._cache and not prepend:
+                return
+            # Okay, this is a valid, non-duplicate module.
+            self._cache[parent] = (dir,('','',imp.PKG_DIRECTORY))
+            
+        # Split contents into files & subdirs (only stat each one once)
+        files = {}
+        subdirs = {}
+        for entry in contents:
+            try:
+                mode = os.stat(contents[entry]).st_mode
+            except OSError:
+                continue # couldn't read!
+            if stat.S_ISDIR(mode) and (self.skip_checks or
+                                       os.access(contents[entry],os.R_OK)):
+                subdirs[entry] = contents[entry]
+            elif stat.S_ISREG(mode) and (self.skip_checks or
+                                         os.access(contents[entry],os.R_OK)):
+                files[entry] = contents[entry]
 
+        # Package directories have the highest precedence. But when prepend is
+        # True, we need to reverse the order here. We'll do this with these
+        # nested functions.
+        def process_subdirs():
+            for d in subdirs:
+                fqname = parent+"."+d if parent else d # fully qualified name
+                self._process_dir(os.path.join(dir,d),fqname,prepend,visited)
 
-# The remaining functions are taken unmodified (except for the names)
-# from knee.py.
-def __determine_parent__(globals):
-    if not globals or  not globals.has_key("__name__"):
+        def process_files():
+            ordered_suffixes = self._rsuffixes if prepend else self._suffixes
+            for s in ordered_suffixes:
+                l = len(s)
+                for f in files:
+                    # Check for matching suffix.
+                    if f[-l:] == s:
+                        fqname = parent+"."+f[:-l] if parent else f[:-l]
+                        if fqname not in self._cache or prepend:
+                                self._cache[fqname] = (files[f],
+                                                       self._suffix_tuples[s])
+
+        if prepend:
+            process_files()
+            process_subdirs()
+        else:
+            process_subdirs()
+            process_files()
+
+                                
+"""Finder that lets one MPI process do all of the initial caching.
+"""
+class pympi_finder(finder):        
+    def __init__(self,skip_checks=True):
+        import mpi
+        if mpi.rank == 0:
+            finder.__init__(self,skip_checks)
+        else:
+            finder.__init__(self,skip_checks,False)
+        self._syspath,self._cache = mpi.bcast((self._syspath,self._cache))
+
+"""Finder that lets one MPI process do all of the initial caching.
+"""
+class mpi4py_finder(finder):        
+    def __init__(self,skip_checks=True):
+        from mpi4py import MPI
+        comm = MPI.COMM_WORLD
+        rank = comm.Get_rank()
+        if rank == 0:
+            finder.__init__(self,skip_checks)
+        else:
+            finder.__init__(self,skip_checks,False)
+        self._syspath,self._cache = comm.bcast((self._syspath,self._cache))
+
+"""
+Alternate version of cached_import. Instead of caching locations,
+just cache directory contents. Then mimic the standard import probing
+algorithm.
+
+This has not been thoroughly tested!
+"""
+
+class simple_finder(object):    
+    def __init__(self):
+        # _contents is a representation of the files located in
+        # sys.path (including, in the case of module packages, any
+        # subdirectories that are encountered in the import process).
+        # For each string in sys.path or subdirectory visited,
+        # _contents contains a dict mapping the filenames in the
+        # directory to full paths. If the string doesn't represent a
+        # valid directory, then the dict is empty.
+        self._contents = {}
+        for d in sys.path:
+            self._process_dir(d)
+
+    # Search for a module 'name' in the cached directory listing for 'path'
+    def _search(self,name,path):
+        # If we haven't cached the directory, do so now.
+        if path not in self._contents:
+            self._process_dir(path)
+        listing = self._contents[path]
+        # First check for a package directory.
+        try:
+            if (name in listing and
+                os.path.isdir(listing[name]) and
+                os.path.isfile(os.path.join(listing[name],
+                                            "__init__.py"))):
+                return listing[name],('','',imp.PKG_DIRECTORY)
+        except OSError:
+            pass
+        # Now check probe for name.so, namemodule.so, name.py, etc.
+        for suffix in imp.get_suffixes():
+            s = name+suffix[0]
+            if (s in listing and
+                os.path.isfile(listing[s]) and
+                os.access(listing[s],os.R_OK)):
+                return listing[s],suffix
+        return None,None
+    
+    # Don't use this directly. We need more state than the load_module
+    # signature allows, so we'll return a loader object for any module
+    # that we have found.
+    class _loader(object):
+        def __init__(self,fullname,path,desc,finder):
+            self._fullname = fullname
+            self._path = path
+            self._desc = desc
+            self._finder = finder
+
+        def load_module(self,fullname):
+            """Load the module fullname using cached path."""
+            if fullname != self._fullname:
+                raise ImportError 
+            if os.path.isfile(self._path): # check desc instead?
+                with open(self._path,self._desc[1]) as f:
+                    mod = imp.load_module(fullname,f,self._path,self._desc)
+            # Not a file, so it's a package directory
+            else:
+                mod = imp.load_module(fullname,None,self._path,self._desc)
+            mod.__loader__ = self._finder
+            return mod
+
+    # "Loader" for modules that have already been imported.
+    class _null_loader(object):
+        def load_module(self,fullname):
+            return sys.modules[fullname]
+
+    def find_module(self,fullname,path=None):
+        """Return self if 'fullname' is in sys.path (and isn't a builtin or
+        frozen module)."""
+        if fullname in sys.modules:
+            return simple_finder._null_loader()
+        # Don't override builtin/frozen modules. TODO: Windows registry?
+        if (fullname not in sys.builtin_module_names and
+            not imp.is_frozen(fullname)):
+            if path:
+                iterpath = path
+                name = fullname.split('.')[-1]
+            else:
+                iterpath = sys.path
+                name = fullname
+            for dir in iterpath:
+                loadpath,desc = self._search(name,dir)
+                if loadpath:
+                    break
+            #print "__IMPORTING ",fullname
+            if loadpath:
+                return simple_finder._loader(fullname,loadpath,desc,self)
         return None
-    pname = globals['__name__']
-    if globals.has_key("__path__"):
-        parent = sys.modules[pname]
-        assert globals is parent.__dict__
-        return parent
-    if '.' in pname:
-        i = pname.rfind('.')
-        pname = pname[:i]
-        parent = sys.modules[pname]
-        assert parent.__name__ == pname
-        return parent
-    return None
 
-def __find_head_package__(parent, name):
-    if '.' in name:
-        i = name.find('.')
-        head = name[:i]
-        tail = name[i+1:]
-    else:
-        head = name
-        tail = ""
-    if parent:
-        qname = "%s.%s" % (parent.__name__, head)
-    else:
-        qname = head
-    q = __import_module__(head, qname, parent)
-    if q: return q, tail
-    if parent:
-        qname = head
-        parent = None
-        q = __import_module__(head, qname, parent)
-        if q: return q, tail
-    raise ImportError, "No module named " + qname
+    def _process_dir(self,dir):
+        """
+        Arguments:
+        dir -- 
+        """
+        # All files and subdirs. Store the name and the path.
+        try:
+            contents = dict((x,os.path.join(dir,x))
+                            for x in os.listdir(dir))
+        # Unreadable directory, so skip
+        except OSError:
+            contents = {}
 
-def __load_tail__(q, tail):
-    m = q
-    while tail:
-        i = tail.find('.')
-        if i < 0: i = len(tail)
-        head, tail = tail[:i], tail[i+1:]
-        mname = "%s.%s" % (m.__name__, head)
-        m = __import_module__(head, mname, m)
-        if not m:
-            raise ImportError, "No module named " + mname
-    return m
-
-def __ensure_fromlist__(m, fromlist, recursive=0):
-    for sub in fromlist:
-        if sub == "*":
-            if not recursive:
-                try:
-                    all = m.__all__
-                except AttributeError:
-                    pass
-                else:
-                    __ensure_fromlist__(m, all, 1)
-            continue
-        if sub != "*" and not hasattr(m, sub):
-            subname = "%s.%s" % (m.__name__, sub)
-            submod = __import_module__(sub, subname, m)
-            if not submod:
-                raise ImportError, "No module named " + subname
+        self._contents[dir] = contents
 
 # Now we import all the yt.mods items.
-with mpi_import():
-    if MPI.COMM_WORLD.rank == 0: print "Beginning parallel import block."
-    from yt.mods import *
-    if MPI.COMM_WORLD.rank == 0: print "Ending parallel import block."
+import sys
+sys.meta_path.append(mpi4py_finder())
+from yt.mods import *


https://bitbucket.org/yt_analysis/yt-3.0/commits/a1ff99289b02/
Changeset:   a1ff99289b02
Branch:      yt
User:        atmyers
Date:        2013-03-07 22:38:49
Summary:     adding a frontend for Pluto
Affected #:  12 files

diff -r 052135b61bdc98c611bb8371af4e2722f3b55f74 -r a1ff99289b028229a1570481842295a9fb28b31b yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -39,8 +39,8 @@
      ST_CTIME
 
 from .definitions import \
-     pluto2enzoDict, \
-     yt2plutoFieldsDict, \
+     chombo2enzoDict, \
+     yt2chomboFieldsDict, \
      parameterDict \
 
 from yt.funcs import *
@@ -250,7 +250,7 @@
         seconds = 1 #self["Time"]
         for unit in sec_conversion.keys():
             self.time_units[unit] = seconds / sec_conversion[unit]
-        for key in yt2plutoFieldsDict:
+        for key in yt2chomboFieldsDict:
             self.conversion_factors[key] = 1.0
 
     def _setup_nounits_units(self):
@@ -270,29 +270,22 @@
 
     def _parse_parameter_file(self):
         """
-        Check to see whether a 'pluto.ini' or 'orion2.ini' file
+        Check to see whether an 'orion2.ini' file
         exists in the plot file directory. If one does, attempt to parse it.
-        Otherwise, assume the left edge starts at 0 and get the right edge
-        from the hdf5 file.
+        Otherwise grab the dimensions from the hdf5 file.
         """
-        if os.path.isfile('pluto.ini'):
-            self._parse_pluto_file('pluto.ini')
-        else:
-            if os.path.isfile('orion2.ini'): self._parse_pluto_file('orion2.ini')
-            self.unique_identifier = \
-                int(os.stat(self.parameter_filename)[ST_CTIME])
-            self.domain_left_edge = self.__calc_left_edge()
-            self.domain_right_edge = self.__calc_right_edge()
-            self.domain_dimensions = self.__calc_domain_dimensions()
-            self.dimensionality = 3
-            self.refine_by = self._handle['/level_0'].attrs['ref_ratio']
+        
+        if os.path.isfile('orion2.ini'): self._parse_inputs_file('orion2.ini')
+        self.unique_identifier = \
+                               int(os.stat(self.parameter_filename)[ST_CTIME])
+        self.domain_left_edge = self.__calc_left_edge()
+        self.domain_right_edge = self.__calc_right_edge()
+        self.domain_dimensions = self.__calc_domain_dimensions()
+        self.dimensionality = 3
+        self.refine_by = self._handle['/level_0'].attrs['ref_ratio']
         self.periodicity = (True, True, True)
 
-    def _parse_pluto_file(self, ini_filename):
-        """
-        Reads in an inputs file in the 'pluto.ini' format. Probably not
-        especially robust at the moment.
-        """
+    def _parse_inputs_file(self, ini_filename):
         self.fullplotdir = os.path.abspath(self.parameter_filename)
         self.ini_filename = self._localize( \
             self.ini_filename, ini_filename)
@@ -305,8 +298,8 @@
                 param, sep, vals = map(rstrip,line.partition(' '))
             except ValueError:
                 mylog.error("ValueError: '%s'", line)
-            if pluto2enzoDict.has_key(param):
-                paramName = pluto2enzoDict[param]
+            if chombo2enzoDict.has_key(param):
+                paramName = chombo2enzoDict[param]
                 t = map(parameterDict[paramName], vals.split())
                 if len(t) == 1:
                     self.parameters[paramName] = t[0]
@@ -336,13 +329,14 @@
 
     @classmethod
     def _is_valid(self, *args, **kwargs):
-        try:
-            fileh = h5py.File(args[0],'r')
-            valid = "Chombo_global" in fileh["/"]
-            fileh.close()
-            return valid
-        except:
-            pass
+        if not os.path.isfile('pluto.ini'):
+            try:
+                fileh = h5py.File(args[0],'r')
+                valid = "Chombo_global" in fileh["/"]
+                fileh.close()
+                return valid
+            except:
+                pass
         return False
 
     @parallel_root_only

diff -r 052135b61bdc98c611bb8371af4e2722f3b55f74 -r a1ff99289b028229a1570481842295a9fb28b31b yt/frontends/chombo/definitions.py
--- a/yt/frontends/chombo/definitions.py
+++ b/yt/frontends/chombo/definitions.py
@@ -56,10 +56,10 @@
                  "NumberOfParticleAttributes": int,
                                  }
 
-pluto2enzoDict = {"GAMMA": "Gamma",
+chombo2enzoDict = {"GAMMA": "Gamma",
                   "Ref_ratio": "RefineBy"
                                     }
 
-yt2plutoFieldsDict = {}
-pluto2ytFieldsDict = {}
+yt2chomboFieldsDict = {}
+chombo2ytFieldsDict = {}
 

diff -r 052135b61bdc98c611bb8371af4e2722f3b55f74 -r a1ff99289b028229a1570481842295a9fb28b31b yt/frontends/pluto/api.py
--- /dev/null
+++ b/yt/frontends/pluto/api.py
@@ -0,0 +1,41 @@
+"""
+API for yt.frontends.pluto
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: UCSD
+Author: J.S. Oishi <jsoishi at gmail.com>
+Affiliation: KIPAC/SLAC/Stanford
+Author: Britton Smith <brittonsmith at gmail.com>
+Affiliation: MSU
+Homepage: http://yt.Chombotools.org/
+License:
+  Copyright (C) 2010-2011 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""
+
+from .data_structures import \
+      PlutoGrid, \
+      PlutoHierarchy, \
+      PlutoStaticOutput
+
+from .fields import \
+      PlutoFieldInfo, \
+      add_pluto_field
+
+from .io import \
+      IOHandlerChomboHDF5

diff -r 052135b61bdc98c611bb8371af4e2722f3b55f74 -r a1ff99289b028229a1570481842295a9fb28b31b yt/frontends/pluto/data_structures.py
--- /dev/null
+++ b/yt/frontends/pluto/data_structures.py
@@ -0,0 +1,307 @@
+"""
+Data structures for Pluto.
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Author: J. S. Oishi <jsoishi at gmail.com>
+Affiliation: KIPAC/SLAC/Stanford
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2008-2011 Matthew Turk, J. S. Oishi.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import h5py
+import re
+import os
+import weakref
+import numpy as np
+
+from collections import \
+     defaultdict
+from string import \
+     strip, \
+     rstrip
+from stat import \
+     ST_CTIME
+
+from .definitions import \
+     pluto2enzoDict, \
+     yt2plutoFieldsDict, \
+     parameterDict \
+
+from yt.funcs import *
+from yt.data_objects.grid_patch import \
+     AMRGridPatch
+from yt.data_objects.hierarchy import \
+     AMRHierarchy
+from yt.data_objects.static_output import \
+     StaticOutput
+from yt.utilities.definitions import \
+     mpc_conversion, sec_conversion
+from yt.utilities.parallel_tools.parallel_analysis_interface import \
+     parallel_root_only
+from yt.utilities.io_handler import \
+    io_registry
+
+from yt.data_objects.field_info_container import \
+    FieldInfoContainer, NullFunc
+from .fields import PlutoFieldInfo, KnownPlutoFields
+
+class PlutoGrid(AMRGridPatch):
+    _id_offset = 0
+    __slots__ = ["_level_id", "stop_index"]
+    def __init__(self, id, hierarchy, level, start, stop):
+        AMRGridPatch.__init__(self, id, filename = hierarchy.hierarchy_filename,
+                              hierarchy = hierarchy)
+        self.Parent = []
+        self.Children = []
+        self.Level = level
+        self.ActiveDimensions = stop - start + 1
+
+    def get_global_startindex(self):
+        """
+        Return the integer starting index for each dimension at the current
+        level.
+
+        """
+        if self.start_index != None:
+            return self.start_index
+        if self.Parent == []:
+            iLE = self.LeftEdge - self.pf.domain_left_edge
+            start_index = iLE / self.dds
+            return np.rint(start_index).astype('int64').ravel()
+        pdx = self.Parent[0].dds
+        start_index = (self.Parent[0].get_global_startindex()) + \
+            np.rint((self.LeftEdge - self.Parent[0].LeftEdge)/pdx)
+        self.start_index = (start_index*self.pf.refine_by).astype('int64').ravel()
+        return self.start_index
+
+    def _setup_dx(self):
+        # has already been read in and stored in hierarchy
+        self.dds = self.hierarchy.dds_list[self.Level]
+        self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
+
+class PlutoHierarchy(AMRHierarchy):
+
+    grid = PlutoGrid
+
+    def __init__(self,pf,data_style='chombo_hdf5'):
+        self.domain_left_edge = pf.domain_left_edge
+        self.domain_right_edge = pf.domain_right_edge
+        self.data_style = data_style
+        self.field_indexes = {}
+        self.parameter_file = weakref.proxy(pf)
+        # for now, the hierarchy file is the parameter file!
+        self.hierarchy_filename = os.path.abspath(
+            self.parameter_file.parameter_filename)
+        self.directory = pf.fullpath
+        self._handle = pf._handle
+
+        self.float_type = self._handle['/level_0']['data:datatype=0'].dtype.name
+        self._levels = self._handle.keys()[2:]
+        AMRHierarchy.__init__(self,pf,data_style)
+
+    def _detect_fields(self):
+        ncomp = int(self._handle['/'].attrs['num_components'])
+        self.field_list = [c[1] for c in self._handle['/'].attrs.items()[-ncomp:]]
+          
+    def _setup_classes(self):
+        dd = self._get_data_reader_dict()
+        AMRHierarchy._setup_classes(self, dd)
+        self.object_types.sort()
+
+    def _count_grids(self):
+        self.num_grids = 0
+        for lev in self._levels:
+            self.num_grids += self._handle[lev]['Processors'].len()
+
+    def _parse_hierarchy(self):
+        f = self._handle # shortcut
+
+        # this relies on the first Group in the H5 file being
+        # 'Chombo_global' and the second 'Expressions'
+        levels = f.keys()[2:]
+        grids = []
+        self.dds_list = []
+        i = 0
+        for lev in levels:
+            level_number = int(re.match('level_(\d+)',lev).groups()[0])
+            boxes = f[lev]['boxes'].value
+            dx = f[lev].attrs['dx']
+            self.dds_list.append(dx * np.ones(3))
+            for level_id, box in enumerate(boxes):
+                si = np.array([box['lo_%s' % ax] for ax in 'ijk'])
+                ei = np.array([box['hi_%s' % ax] for ax in 'ijk'])
+                pg = self.grid(len(grids),self,level=level_number,
+                               start = si, stop = ei)
+                grids.append(pg)
+                grids[-1]._level_id = level_id
+                self.grid_left_edge[i] = dx*si.astype(self.float_type) + self.domain_left_edge
+                self.grid_right_edge[i] = dx*(ei.astype(self.float_type)+1) + self.domain_left_edge
+                self.grid_particle_count[i] = 0
+                self.grid_dimensions[i] = ei - si + 1
+                i += 1
+        self.grids = np.empty(len(grids), dtype='object')
+        for gi, g in enumerate(grids): self.grids[gi] = g
+#        self.grids = np.array(self.grids, dtype='object')
+
+    def _populate_grid_objects(self):
+        for g in self.grids:
+            g._prepare_grid()
+            g._setup_dx()
+
+        for g in self.grids:
+            g.Children = self._get_grid_children(g)
+            for g1 in g.Children:
+                g1.Parent.append(g)
+        self.max_level = self.grid_levels.max()
+
+    def _setup_derived_fields(self):
+        self.derived_field_list = []
+
+    def _get_grid_children(self, grid):
+        mask = np.zeros(self.num_grids, dtype='bool')
+        grids, grid_ind = self.get_box_grids(grid.LeftEdge, grid.RightEdge)
+        mask[grid_ind] = True
+        return [g for g in self.grids[mask] if g.Level == grid.Level + 1]
+
+    def _setup_data_io(self):
+        self.io = io_registry[self.data_style](self.parameter_file)
+
+class PlutoStaticOutput(StaticOutput):
+    _hierarchy_class = PlutoHierarchy
+    _fieldinfo_fallback = PlutoFieldInfo
+    _fieldinfo_known = KnownPlutoFields
+
+    def __init__(self, filename, data_style='chombo_hdf5',
+                 storage_filename = None, ini_filename = None):
+        self._handle = h5py.File(filename,'r')
+        self.current_time = self._handle.attrs['time']
+        self.ini_filename = ini_filename
+        self.fullplotdir = os.path.abspath(filename)
+        StaticOutput.__init__(self,filename,data_style)
+        self.storage_filename = storage_filename
+        self.cosmological_simulation = False
+
+        # These are parameters that I very much wish to get rid of.
+        self.parameters["HydroMethod"] = 'chombo' # always PPM DE
+        self.parameters["DualEnergyFormalism"] = 0 
+        self.parameters["EOSType"] = -1 # default
+
+    def __del__(self):
+        self._handle.close()
+
+    def _set_units(self):
+        """
+        Generates the conversion to various physical _units based on the parameter file
+        """
+        self.units = {}
+        self.time_units = {}
+        if len(self.parameters) == 0:
+            self._parse_parameter_file()
+        self._setup_nounits_units()
+        self.conversion_factors = defaultdict(lambda: 1.0)
+        self.time_units['1'] = 1
+        self.units['1'] = 1.0
+        self.units['unitary'] = 1.0 / (self.domain_right_edge - self.domain_left_edge).max()
+        seconds = 1 #self["Time"]
+        for unit in sec_conversion.keys():
+            self.time_units[unit] = seconds / sec_conversion[unit]
+        for key in yt2plutoFieldsDict:
+            self.conversion_factors[key] = 1.0
+
+    def _setup_nounits_units(self):
+        z = 0
+        mylog.warning("Setting 1.0 in code units to be 1.0 cm")
+        if not self.has_key("TimeUnits"):
+            mylog.warning("No time units.  Setting 1.0 = 1 second.")
+            self.conversion_factors["Time"] = 1.0
+        for unit in mpc_conversion.keys():
+            self.units[unit] = mpc_conversion[unit] / mpc_conversion["cm"]
+
+
+    def _localize(self, f, default):
+        if f is None:
+            return os.path.join(self.directory, default)
+        return f
+
+    def _parse_parameter_file(self):
+        """
+        Reads in an inputs file in the 'pluto.ini' format. Probably not
+        especially robust at the moment.
+        """
+
+        ini_filename = 'pluto.ini'
+        self.fullplotdir = os.path.abspath(self.parameter_filename)
+        self.ini_filename = self._localize( \
+            self.ini_filename, ini_filename)
+        self.unique_identifier = \
+                               int(os.stat(self.parameter_filename)[ST_CTIME])
+        lines = open(self.ini_filename).readlines()
+        # read the file line by line, storing important parameters
+        for lineI, line in enumerate(lines):
+            try:
+                param, sep, vals = map(rstrip,line.partition(' '))
+            except ValueError:
+                mylog.error("ValueError: '%s'", line)
+            if pluto2enzoDict.has_key(param):
+                paramName = pluto2enzoDict[param]
+                t = map(parameterDict[paramName], vals.split())
+                if len(t) == 1:
+                    self.parameters[paramName] = t[0]
+                else:
+                    if paramName == "RefineBy":
+                        self.parameters[paramName] = t[0]
+                    else:
+                        self.parameters[paramName] = t
+
+            # assumes 3D for now
+            elif param.startswith("X1-grid"):
+                t = vals.split()
+                low1 = float(t[1])
+                high1 = float(t[4])
+                N1 = int(t[2])
+            elif param.startswith("X2-grid"):
+                t = vals.split()
+                low2 = float(t[1])
+                high2 = float(t[4])
+                N2 = int(t[2])
+            elif param.startswith("X3-grid"):
+                t = vals.split()
+                low3 = float(t[1])
+                high3 = float(t[4])
+                N3 = int(t[2])
+            
+        self.dimensionality = 3
+        self.domain_left_edge = np.array([low1,low2,low3])
+        self.domain_right_edge = np.array([high1,high2,high3])
+        self.domain_dimensions = np.array([N1,N2,N3])
+        self.refine_by = self.parameters["RefineBy"]
+            
+    @classmethod
+    def _is_valid(self, *args, **kwargs):
+        return os.path.isfile('pluto.ini')
+
+    @parallel_root_only
+    def print_key_parameters(self):
+        for a in ["current_time", "domain_dimensions", "domain_left_edge",
+                  "domain_right_edge"]:
+            if not hasattr(self, a):
+                mylog.error("Missing %s in parameter file definition!", a)
+                continue
+            v = getattr(self, a)
+            mylog.info("Parameters: %-25s = %s", a, v)

diff -r 052135b61bdc98c611bb8371af4e2722f3b55f74 -r a1ff99289b028229a1570481842295a9fb28b31b yt/frontends/pluto/definitions.py
--- /dev/null
+++ b/yt/frontends/pluto/definitions.py
@@ -0,0 +1,65 @@
+"""
+Various definitions for various other modules and routines
+
+Author: J. S. Oishi <jsoishi at gmail.com>
+Affiliation: KIPAC/SLAC/Stanford
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2008-2011 J.S. Oishi.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""
+
+parameterDict = {"CosmologyCurrentRedshift": float,
+                 "CosmologyComovingBoxSize": float,
+                 "CosmologyOmegaMatterNow": float,
+                 "CosmologyOmegaLambdaNow": float,
+                 "CosmologyHubbleConstantNow": float,
+                 "CosmologyInitialRedshift": float,
+                 "DualEnergyFormalismEta1": float,
+                 "DualEnergyFormalismEta2": float,
+                 "MetaDataString": str,
+                 "HydroMethod": int,
+                 "DualEnergyFormalism": int,
+                 "InitialTime": float,
+                 "ComovingCoordinates": int,
+                 "DensityUnits": float,
+                 "LengthUnits": float,
+                 "LengthUnit": float,
+                 "TemperatureUnits": float,
+                 "TimeUnits": float,
+                 "GravitationalConstant": float,
+                 "Gamma": float,
+                 "MultiSpecies": int,
+                 "CompilerPrecision": str,
+                 "CurrentTimeIdentifier": int,
+                 "RefineBy": int,
+                 "BoundaryConditionName": str,
+                 "TopGridRank": int,
+                 "TopGridDimensions": int,
+                 "EOSSoundSpeed": float,
+                 "EOSType": int,
+                 "NumberOfParticleAttributes": int,
+                                 }
+
+pluto2enzoDict = {"GAMMA": "Gamma",
+                  "Ref_ratio": "RefineBy"
+                                    }
+
+yt2plutoFieldsDict = {}
+pluto2ytFieldsDict = {}
+

diff -r 052135b61bdc98c611bb8371af4e2722f3b55f74 -r a1ff99289b028229a1570481842295a9fb28b31b yt/frontends/pluto/fields.py
--- /dev/null
+++ b/yt/frontends/pluto/fields.py
@@ -0,0 +1,97 @@
+"""
+Pluto-specific fields
+
+Author: J. S. Oishi <jsoishi at gmail.com>
+Affiliation: KIPAC/SLAC/Stanford
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2009-2011 J. S. Oishi, Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from yt.data_objects.field_info_container import \
+    FieldInfoContainer, \
+    FieldInfo, \
+    NullFunc, \
+    ValidateParameter, \
+    ValidateDataField, \
+    ValidateProperty, \
+    ValidateSpatial, \
+    ValidateGridType
+import yt.data_objects.universal_fields
+import numpy as np
+
+KnownPlutoFields = FieldInfoContainer()
+add_pluto_field = KnownPlutoFields.add_field
+
+PlutoFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
+add_field = PlutoFieldInfo.add_field
+
+add_pluto_field("rho", function=NullFunc, take_log=True,
+                 validators = [ValidateDataField("density")],
+                 units=r"\rm{g}/\rm{cm}^3")
+
+KnownPlutoFields["rho"]._projected_units =r"\rm{g}/\rm{cm}^2"
+
+add_pluto_field("vx1", function=NullFunc, take_log=False,
+                 validators = [ValidateDataField("X-Momentum")],
+                 units=r"",display_name=r"M_x")
+KnownPlutoFields["vx1"]._projected_units=r""
+
+add_pluto_field("vx2", function=NullFunc, take_log=False,
+                 validators = [ValidateDataField("Y-Momentum")],
+                 units=r"",display_name=r"M_y")
+KnownPlutoFields["vx2"]._projected_units=r""
+
+add_pluto_field("vx3", function=NullFunc, take_log=False,
+                 validators = [ValidateDataField("Z-Momentum")],
+                 units=r"",display_name=r"M_z")
+KnownPlutoFields["vx3"]._projected_units=r""
+
+add_pluto_field("prs", function=NullFunc, take_log=True,
+                 validators = [ValidateDataField("energy-density")],
+                 units=r"\rm{erg}/\rm{cm}^3")
+
+def _Density(field,data):
+    """A duplicate of the density field. This is needed because when you try 
+    to instantiate a PlotCollection without passing in a center, the code
+    will try to generate one for you using the "Density" field, which gives an error 
+    if it isn't defined.
+
+    """
+    return data["rho"]
+add_field("Density",function=_Density, take_log=True,
+          units=r'\rm{g}/\rm{cm^3}')
+
+def _Xmomentum(field, data):
+    """ Generate x-momentum. """
+    return data["vx1"]*data["density"]
+add_field("X-momentum",function=_Xmomentum, take_log=False,
+          units=r'\rm{g}/\rm{cm^2 s}')
+
+def _Ymomentum(field, data):
+    """ Generate y-momentum  """
+    return data["vx2"]*data["density"]
+add_field("Y-momentum",function=_Ymomentum, take_log=False,
+          units=r'\rm{g}/\rm{cm^2 s}')
+
+def _Zmomentum(field,data):
+    """ Generate z-momentum"""
+    return data["vx3"]*data["density"]
+add_field("Z-Momentum",function=_Zmomentum, take_log=False,
+          units=r'\rm{g}/\rm{cm^2 s}')
+

diff -r 052135b61bdc98c611bb8371af4e2722f3b55f74 -r a1ff99289b028229a1570481842295a9fb28b31b yt/frontends/pluto/io.py
--- /dev/null
+++ b/yt/frontends/pluto/io.py
@@ -0,0 +1,73 @@
+"""
+The data-file handling functions
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Author: J. S. Oishi <jsoishi at gmail.com>
+Affiliation: KIPAC/SLAC/Stanford
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2007-2011 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+import h5py
+import os
+import re
+import numpy as np
+
+from yt.utilities.io_handler import \
+           BaseIOHandler
+
+class IOHandlerChomboHDF5(BaseIOHandler):
+    _data_style = "chombo_hdf5"
+    _offset_string = 'data:offsets=0'
+    _data_string = 'data:datatype=0'
+
+    def __init__(self, pf, *args, **kwargs):
+        BaseIOHandler.__init__(self, *args, **kwargs)
+        self.pf = pf
+        self._handle = pf._handle
+
+    _field_dict = None
+    @property
+    def field_dict(self):
+        if self._field_dict is not None:
+            return self._field_dict
+        ncomp = int(self._handle['/'].attrs['num_components'])
+        temp =  self._handle['/'].attrs.items()[-ncomp:]
+        val, keys = zip(*temp)
+        val = [int(re.match('component_(\d+)',v).groups()[0]) for v in val]
+        self._field_dict = dict(zip(keys,val))
+        return self._field_dict
+        
+    def _read_field_names(self,grid):
+        ncomp = int(self._handle['/'].attrs['num_components'])
+
+        fns = [c[1] for c in f['/'].attrs.items()[-ncomp-1:-1]]
+    
+    def _read_data(self,grid,field):
+
+        lstring = 'level_%i' % grid.Level
+        lev = self._handle[lstring]
+        dims = grid.ActiveDimensions
+        boxsize = dims.prod()
+        
+        grid_offset = lev[self._offset_string][grid._level_id]
+        start = grid_offset+self.field_dict[field]*boxsize
+        stop = start + boxsize
+        data = lev[self._data_string][start:stop]
+        
+        return data.reshape(dims, order='F')

diff -r 052135b61bdc98c611bb8371af4e2722f3b55f74 -r a1ff99289b028229a1570481842295a9fb28b31b yt/frontends/pluto/setup.py
--- /dev/null
+++ b/yt/frontends/pluto/setup.py
@@ -0,0 +1,13 @@
+#!/usr/bin/env python
+import setuptools
+import os
+import sys
+import os.path
+
+
+def configuration(parent_package='', top_path=None):
+    from numpy.distutils.misc_util import Configuration
+    config = Configuration('pluto', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
+    #config.make_svn_version_py()
+    return config

diff -r 052135b61bdc98c611bb8371af4e2722f3b55f74 -r a1ff99289b028229a1570481842295a9fb28b31b yt/frontends/setup.py
--- a/yt/frontends/setup.py
+++ b/yt/frontends/setup.py
@@ -20,4 +20,5 @@
     config.add_subpackage("maestro")
     config.add_subpackage("castro")
     config.add_subpackage("stream")
+    config.add_subpackage("pluto")
     return config

diff -r 052135b61bdc98c611bb8371af4e2722f3b55f74 -r a1ff99289b028229a1570481842295a9fb28b31b yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -102,6 +102,9 @@
 from yt.frontends.art.api import \
     ARTStaticOutput, ARTFieldInfo, add_art_field
 
+from yt.frontends.pluto.api import \
+     PlutoStaticOutput, PlutoFieldInfo, add_pluto_field
+
 #from yt.frontends.maestro.api import \
 #    MaestroStaticOutput, MaestroFieldInfo, add_maestro_field
 


https://bitbucket.org/yt_analysis/yt-3.0/commits/b59c9b89ad06/
Changeset:   b59c9b89ad06
Branch:      yt
User:        atmyers
Date:        2013-03-07 22:55:00
Summary:     merging
Affected #:  63 files

diff -r a1ff99289b028229a1570481842295a9fb28b31b -r b59c9b89ad062e0bd2df49df6b5466b0e7f90865 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -837,16 +837,11 @@
 cd $YT_DIR
 ( ${HG_EXEC} pull 2>1 && ${HG_EXEC} up -C 2>1 ${BRANCH} 2>&1 ) 1>> ${LOG_FILE}
 
-echo "Building Fortran kD-tree module."
-cd yt/utilities/kdtree
-( make 2>&1 ) 1>> ${LOG_FILE}
-cd ../../..
-
 echo "Installing yt"
 echo $HDF5_DIR > hdf5.cfg
 [ $INST_PNG -eq 1 ] && echo $PNG_DIR > png.cfg
 [ $INST_FTYPE -eq 1 ] && echo $FTYPE_DIR > freetype.cfg
-( ${DEST_DIR}/bin/python2.7 setup.py develop 2>&1 ) 1>> ${LOG_FILE} || do_exit
+( export PATH=$DEST_DIR/bin:$PATH ; ${DEST_DIR}/bin/python2.7 setup.py develop 2>&1 ) 1>> ${LOG_FILE} || do_exit
 touch done
 cd $MY_PWD
 

diff -r a1ff99289b028229a1570481842295a9fb28b31b -r b59c9b89ad062e0bd2df49df6b5466b0e7f90865 setup.py
--- a/setup.py
+++ b/setup.py
@@ -4,14 +4,61 @@
 import sys
 import time
 import subprocess
+import shutil
+import glob
 import distribute_setup
 distribute_setup.use_setuptools()
 
 from distutils.command.build_py import build_py
 from numpy.distutils.misc_util import appendpath
+from numpy.distutils.command import install_data as np_install_data
 from numpy.distutils import log
 from distutils import version
 
+from distutils.core import Command
+from distutils.spawn import find_executable
+
+
+class BuildForthon(Command):
+
+    """Command for building Forthon modules"""
+
+    description = "Build Forthon modules"
+    user_options = []
+
+    def initialize_options(self):
+
+        """init options"""
+
+        pass
+
+    def finalize_options(self):
+
+        """finalize options"""
+
+        pass
+
+    def run(self):
+
+        """runner"""
+        Forthon_exe = find_executable("Forthon")
+        gfortran_exe = find_executable("gfortran")
+
+        if None in (Forthon_exe, gfortran_exe):
+            sys.stderr.write(
+                "fKDpy.so won't be built due to missing Forthon/gfortran\n"
+            )
+            return
+
+        cwd = os.getcwd()
+        os.chdir(os.path.join(cwd, 'yt/utilities/kdtree'))
+        cmd = [Forthon_exe, "-F", "gfortran", "--compile_first",
+               "fKD_source", "--no2underscores", "--fopt", "'-O3'", "fKD",
+               "fKD_source.f90"]
+        subprocess.check_call(cmd, shell=False)
+        shutil.move(glob.glob('build/lib*/fKDpy.so')[0], os.getcwd())
+        os.chdir(cwd)
+
 REASON_FILES = []
 REASON_DIRS = [
     "",
@@ -36,7 +83,7 @@
     files = []
     for ext in ["js", "html", "css", "png", "ico", "gif"]:
         files += glob.glob("%s/*.%s" % (dir_name, ext))
-    REASON_FILES.append( (dir_name, files) )
+    REASON_FILES.append((dir_name, files))
 
 # Verify that we have Cython installed
 try:
@@ -93,10 +140,10 @@
             language=extension.language, cplus=cplus,
             output_file=target_file)
         cython_result = Cython.Compiler.Main.compile(source,
-                                                   options=options)
+                                                     options=options)
         if cython_result.num_errors != 0:
-            raise DistutilsError("%d errors while compiling %r with Cython" \
-                  % (cython_result.num_errors, source))
+            raise DistutilsError("%d errors while compiling %r with Cython"
+                                 % (cython_result.num_errors, source))
     return target_file
 
 
@@ -109,7 +156,9 @@
 
 VERSION = "2.5dev"
 
-if os.path.exists('MANIFEST'): os.remove('MANIFEST')
+if os.path.exists('MANIFEST'):
+    os.remove('MANIFEST')
+
 
 def get_mercurial_changeset_id(target_dir):
     """adapted from a script by Jason F. Harris, published at
@@ -123,11 +172,11 @@
                                      stdout=subprocess.PIPE,
                                      stderr=subprocess.PIPE,
                                      shell=True)
-        
+
     if (get_changeset.stderr.read() != ""):
         print "Error in obtaining current changeset of the Mercurial repository"
         changeset = None
-        
+
     changeset = get_changeset.stdout.read().strip()
     if (not re.search("^[0-9a-f]{12}", changeset)):
         print "Current changeset of the Mercurial repository is malformed"
@@ -135,12 +184,26 @@
 
     return changeset
 
+
+class my_build_src(build_src.build_src):
+    def run(self):
+        self.run_command("build_forthon")
+        build_src.build_src.run(self)
+
+
+class my_install_data(np_install_data.install_data):
+    def run(self):
+        self.distribution.data_files.append(
+            ('yt/utilities/kdtree', ['yt/utilities/kdtree/fKDpy.so'])
+        )
+        np_install_data.install_data.run(self)
+
 class my_build_py(build_py):
     def run(self):
         # honor the --dry-run flag
         if not self.dry_run:
-            target_dir = os.path.join(self.build_lib,'yt')
-            src_dir =  os.getcwd() 
+            target_dir = os.path.join(self.build_lib, 'yt')
+            src_dir = os.getcwd()
             changeset = get_mercurial_changeset_id(src_dir)
             self.mkpath(target_dir)
             with open(os.path.join(target_dir, '__hg_version__.py'), 'w') as fobj:
@@ -148,6 +211,7 @@
 
             build_py.run(self)
 
+
 def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
 
@@ -158,7 +222,7 @@
                        quiet=True)
 
     config.make_config_py()
-    #config.make_svn_version_py()
+    # config.make_svn_version_py()
     config.add_subpackage('yt', 'yt')
     config.add_scripts("scripts/*")
 
@@ -176,25 +240,25 @@
                     + "simulations, focusing on Adaptive Mesh Refinement data "
                       "from Enzo, Orion, FLASH, and others.",
         classifiers=["Development Status :: 5 - Production/Stable",
-            "Environment :: Console",
-            "Intended Audience :: Science/Research",
-            "License :: OSI Approved :: GNU General Public License (GPL)",
-            "Operating System :: MacOS :: MacOS X",
-            "Operating System :: POSIX :: AIX",
-            "Operating System :: POSIX :: Linux",
-            "Programming Language :: C",
-            "Programming Language :: Python",
-            "Topic :: Scientific/Engineering :: Astronomy",
-            "Topic :: Scientific/Engineering :: Physics",
-            "Topic :: Scientific/Engineering :: Visualization"],
-        keywords='astronomy astrophysics visualization ' + \
-            'amr adaptivemeshrefinement',
+                     "Environment :: Console",
+                     "Intended Audience :: Science/Research",
+                     "License :: OSI Approved :: GNU General Public License (GPL)",
+                     "Operating System :: MacOS :: MacOS X",
+                     "Operating System :: POSIX :: AIX",
+                     "Operating System :: POSIX :: Linux",
+                     "Programming Language :: C",
+                     "Programming Language :: Python",
+                     "Topic :: Scientific/Engineering :: Astronomy",
+                     "Topic :: Scientific/Engineering :: Physics",
+                     "Topic :: Scientific/Engineering :: Visualization"],
+        keywords='astronomy astrophysics visualization ' +
+        'amr adaptivemeshrefinement',
         entry_points={'console_scripts': [
-                            'yt = yt.utilities.command_line:run_main',
-                      ],
-                      'nose.plugins.0.10': [
-                            'answer-testing = yt.utilities.answer_testing.framework:AnswerTesting'
-                      ]
+        'yt = yt.utilities.command_line:run_main',
+        ],
+            'nose.plugins.0.10': [
+                'answer-testing = yt.utilities.answer_testing.framework:AnswerTesting'
+            ]
         },
         author="Matthew J. Turk",
         author_email="matthewturk at gmail.com",
@@ -203,8 +267,9 @@
         configuration=configuration,
         zip_safe=False,
         data_files=REASON_FILES,
-        cmdclass = {'build_py': my_build_py},
-        )
+        cmdclass={'build_py': my_build_py, 'build_forthon': BuildForthon,
+                  'build_src': my_build_src, 'install_data': my_install_data},
+    )
     return
 
 if __name__ == '__main__':

diff -r a1ff99289b028229a1570481842295a9fb28b31b -r b59c9b89ad062e0bd2df49df6b5466b0e7f90865 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -244,8 +244,9 @@
             If True, use dynamic load balancing to create the projections.
             Default: False.
 
-        Getting the Nearest Galaxies
-        ----------------------------
+        Notes
+        -----
+
         The light ray tool will use the HaloProfiler to calculate the
         distance and mass of the nearest halo to that pixel.  In order
         to do this, a dictionary called halo_profiler_parameters is used

diff -r a1ff99289b028229a1570481842295a9fb28b31b -r b59c9b89ad062e0bd2df49df6b5466b0e7f90865 yt/analysis_modules/halo_finding/rockstar/rockstar.py
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar.py
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar.py
@@ -164,6 +164,13 @@
         If set to ``True``, it will be assumed that there are only dark
         matter particles present in the simulation. This can save analysis
         time if this is indeed the case. Default: ``False``.
+    hires_dm_mass : float
+        If supplied, use only the highest resolution dark matter
+        particles, with a mass less than (1.1*hires_dm_mass), in units
+        of ParticleMassMsun. This is useful for multi-dm-mass
+        simulations. Note that this will only give sensible results for
+        halos that are not "polluted" by lower resolution
+        particles. Default: ``None``.
         
     Returns
     -------
@@ -187,7 +194,8 @@
     """
     def __init__(self, ts, num_readers = 1, num_writers = None,
             outbase="rockstar_halos", dm_type=1, 
-            force_res=None, total_particles=None, dm_only=False):
+            force_res=None, total_particles=None, dm_only=False,
+            hires_dm_mass=None):
         mylog.warning("The citation for the Rockstar halo finder can be found at")
         mylog.warning("http://adsabs.harvard.edu/abs/2013ApJ...762..109B")
         ParallelAnalysisInterface.__init__(self)
@@ -217,6 +225,7 @@
             self.force_res = force_res
         self.total_particles = total_particles
         self.dm_only = dm_only
+        self.hires_dm_mass = hires_dm_mass
         # Setup pool and workgroups.
         self.pool, self.workgroup = self.runner.setup_pool()
         p = self._setup_parameters(ts)
@@ -227,28 +236,51 @@
     def _setup_parameters(self, ts):
         if self.workgroup.name != "readers": return None
         tpf = ts[0]
+
         def _particle_count(field, data):
-            if self.dm_only:
-                return np.prod(data["particle_position_x"].shape)
             try:
-                return (data["particle_type"]==self.dm_type).sum()
+                data["particle_type"]
+                has_particle_type=True
             except KeyError:
-                return np.prod(data["particle_position_x"].shape)
+                has_particle_type=False
+                
+            if (self.dm_only or (not has_particle_type)):
+                if self.hires_dm_mass is None:
+                    return np.prod(data["particle_position_x"].shape)
+                else:
+                    return (data['ParticleMassMsun'] < self.hires_dm_mass*1.1).sum()
+            elif has_particle_type:
+                if self.hires_dm_mass is None:
+                    return (data["particle_type"]==self.dm_type).sum()
+                else:
+                    return ( (data["particle_type"]==self.dm_type) & 
+                             (data['ParticleMassMsun'] < self.hires_dm_mass*1.1) ).sum()
+            else:                
+                raise RuntimeError() # should never get here
+
         add_field("particle_count", function=_particle_count,
                   not_in_all=True, particle_type=True)
         dd = tpf.h.all_data()
         # Get DM particle mass.
         all_fields = set(tpf.h.derived_field_list + tpf.h.field_list)
-        for g in tpf.h._get_objs("grids"):
-            if g.NumberOfParticles == 0: continue
-            if self.dm_only:
-                iddm = Ellipsis
-            elif "particle_type" in all_fields:
-                iddm = g["particle_type"] == self.dm_type
-            else:
-                iddm = Ellipsis
-            particle_mass = g['ParticleMassMsun'][iddm][0] / tpf.hubble_constant
-            break
+        has_particle_type = ("particle_type" in all_fields)
+
+        if self.hires_dm_mass is None:
+            for g in tpf.h._get_objs("grids"):
+                if g.NumberOfParticles == 0: continue
+
+                if (self.dm_only or (not has_particle_type)):
+                    iddm = Ellipsis
+                elif has_particle_type:
+                    iddm = g["particle_type"] == self.dm_type
+                else:                    
+                    iddm = Ellipsis # should never get here
+
+                particle_mass = g['ParticleMassMsun'][iddm][0] / tpf.hubble_constant
+                break
+        else:
+            particle_mass = self.hires_dm_mass / tpf.hubble_constant
+
         p = {}
         if self.total_particles is None:
             # Get total_particles in parallel.
@@ -302,6 +334,7 @@
                     force_res = self.force_res,
                     particle_mass = float(self.particle_mass),
                     dm_only = int(self.dm_only),
+                    hires_only = (self.hires_dm_mass is not None),
                     **kwargs)
         # Make the directory to store the halo lists in.
         if self.comm.rank == 0:

diff -r a1ff99289b028229a1570481842295a9fb28b31b -r b59c9b89ad062e0bd2df49df6b5466b0e7f90865 yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
@@ -163,6 +163,7 @@
     SCALE_NOW = 1.0/(pf.current_redshift+1.0)
     # Now we want to grab data from only a subset of the grids for each reader.
     all_fields = set(pf.h.derived_field_list + pf.h.field_list)
+    has_particle_type = ("particle_type" in all_fields)
 
     # First we need to find out how many this reader is going to read in
     # if the number of readers > 1.
@@ -170,12 +171,19 @@
         local_parts = 0
         for g in pf.h._get_objs("grids"):
             if g.NumberOfParticles == 0: continue
-            if rh.dm_only:
-                iddm = Ellipsis
-            elif "particle_type" in all_fields:
-                iddm = g["particle_type"] == rh.dm_type
+            if (rh.dm_only or (not has_particle_type)):
+                if rh.hires_only:
+                    iddm = (g['ParticleMassMsun'] < PARTICLE_MASS*1.1)
+                else:
+                    iddm = Ellipsis
+            elif has_particle_type:
+                if rh.hires_only:
+                    iddm = ( (g["particle_type"]==rh.dm_type) &
+                             (g['ParticleMassMsun'] < PARTICLE_MASS*1.1) )                    
+                else:
+                    iddm = g["particle_type"] == rh.dm_type
             else:
-                iddm = Ellipsis
+                iddm = Ellipsis # should never get here
             arri = g["particle_index"].astype("int64")
             arri = arri[iddm] #pick only DM
             local_parts += arri.size
@@ -195,12 +203,19 @@
     pi = 0
     for g in pf.h._get_objs("grids"):
         if g.NumberOfParticles == 0: continue
-        if rh.dm_only:
-            iddm = Ellipsis
-        elif "particle_type" in all_fields:
-            iddm = g["particle_type"] == rh.dm_type
-        else:
-            iddm = Ellipsis
+        if (rh.dm_only or (not has_particle_type)):
+            if rh.hires_only:
+                iddm = (g['ParticleMassMsun'] < PARTICLE_MASS*1.1)
+            else:
+                iddm = Ellipsis
+        elif has_particle_type:
+            if rh.hires_only:
+                iddm = ( (g["particle_type"]==rh.dm_type) &
+                         (g['ParticleMassMsun'] < PARTICLE_MASS*1.1) )                    
+            else:
+                iddm = g["particle_type"] == rh.dm_type
+        else:            
+            iddm = Ellipsis # should never get here
         arri = g["particle_index"].astype("int64")
         arri = arri[iddm] #pick only DM
         npart = arri.size
@@ -230,6 +245,7 @@
     cdef public int dm_type
     cdef public int total_particles
     cdef public int dm_only
+    cdef public int hires_only
 
     def __cinit__(self, ts):
         self.ts = ts
@@ -244,7 +260,7 @@
                        int writing_port = -1, int block_ratio = 1,
                        int periodic = 1, force_res=None,
                        int min_halo_size = 25, outbase = "None",
-                       int dm_only = 0):
+                       int dm_only = 0, int hires_only = False):
         global PARALLEL_IO, PARALLEL_IO_SERVER_ADDRESS, PARALLEL_IO_SERVER_PORT
         global FILENAME, FILE_FORMAT, NUM_SNAPS, STARTING_SNAP, h0, Ol, Om
         global BOX_SIZE, PERIODIC, PARTICLE_MASS, NUM_BLOCKS, NUM_READERS
@@ -276,6 +292,7 @@
         TOTAL_PARTICLES = total_particles
         self.block_ratio = block_ratio
         self.dm_only = dm_only
+        self.hires_only = hires_only
         
         tpf = self.ts[0]
         h0 = tpf.hubble_constant

diff -r a1ff99289b028229a1570481842295a9fb28b31b -r b59c9b89ad062e0bd2df49df6b5466b0e7f90865 yt/analysis_modules/halo_merger_tree/enzofof_merger_tree.py
--- a/yt/analysis_modules/halo_merger_tree/enzofof_merger_tree.py
+++ b/yt/analysis_modules/halo_merger_tree/enzofof_merger_tree.py
@@ -454,8 +454,8 @@
         halonum : int
             Halo number at the last output to trace.
 
-        Output
-        ------
+        Returns
+        -------
         output : dict
             Dictionary of redshifts, cycle numbers, and halo numbers
             of the most massive progenitor.  keys = {redshift, cycle,
@@ -810,6 +810,6 @@
         ax.set_xscale("log")
     if y_log:
         ax.set_yscale("log")
-    ofn = "%s_%s_%s.png" % (basename, fields[0], fields[1])
+    ofn = "%s/%s_%s_%s.png" % (FOF_directory, basename, fields[0], fields[1])
     plt.savefig(ofn)
     plt.clf()

diff -r a1ff99289b028229a1570481842295a9fb28b31b -r b59c9b89ad062e0bd2df49df6b5466b0e7f90865 yt/analysis_modules/halo_merger_tree/merger_tree.py
--- a/yt/analysis_modules/halo_merger_tree/merger_tree.py
+++ b/yt/analysis_modules/halo_merger_tree/merger_tree.py
@@ -758,17 +758,19 @@
     
     def query(self, string):
         r"""Performs a query of the database and returns the results as a list
-        of tuple(s), even if the result is singular.
+        of tuples, even if the result is singular.
         
         Parameters
         ----------
-        string : String
+        
+        string : str
             The SQL query of the database.
         
         Examples
-        -------
+        --------
+
         >>> results = mtc.query("SELECT GlobalHaloID from Halos where SnapHaloID = 0 and \
-        ... SnapZ = 0;")
+        ...    SnapZ = 0;")
         """
         # Query the database and return a list of tuples.
         if string is None:

diff -r a1ff99289b028229a1570481842295a9fb28b31b -r b59c9b89ad062e0bd2df49df6b5466b0e7f90865 yt/analysis_modules/halo_profiler/multi_halo_profiler.py
--- a/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
+++ b/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
@@ -430,8 +430,8 @@
         After all the calls to `add_profile`, this will trigger the actual
         calculations and output the profiles to disk.
 
-        Paramters
-        ---------
+        Parameters
+        ----------
 
         filename : str
             If set, a file will be written with all of the filtered halos

diff -r a1ff99289b028229a1570481842295a9fb28b31b -r b59c9b89ad062e0bd2df49df6b5466b0e7f90865 yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
--- a/yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
+++ b/yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
@@ -60,9 +60,9 @@
     
     Initialize an EmissivityIntegrator object.
 
-    Keyword Parameters
-    ------------------
-    filename: string
+    Parameters
+    ----------
+    filename: string, default None
         Path to data file containing emissivity values.  If None,
         a file called xray_emissivity.h5 is used.  This file contains 
         emissivity tables for primordial elements and for metals at 
@@ -146,8 +146,8 @@
     e_min: float
         the maximum energy in keV for the energy band.
 
-    Keyword Parameters
-    ------------------
+    Other Parameters
+    ----------------
     filename: string
         Path to data file containing emissivity values.  If None,
         a file called xray_emissivity.h5 is used.  This file contains 
@@ -220,8 +220,8 @@
     e_min: float
         the maximum energy in keV for the energy band.
 
-    Keyword Parameters
-    ------------------
+    Other Parameters
+    ----------------
     filename: string
         Path to data file containing emissivity values.  If None,
         a file called xray_emissivity.h5 is used.  This file contains 
@@ -277,8 +277,8 @@
     e_min: float
         the maximum energy in keV for the energy band.
 
-    Keyword Parameters
-    ------------------
+    Other Parameters
+    ----------------
     filename: string
         Path to data file containing emissivity values.  If None,
         a file called xray_emissivity.h5 is used.  This file contains 

diff -r a1ff99289b028229a1570481842295a9fb28b31b -r b59c9b89ad062e0bd2df49df6b5466b0e7f90865 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -178,7 +178,7 @@
         self.child_mask = 1
         self.ActiveDimensions = self.field_data['x'].shape
         self.DW = grid.pf.domain_right_edge - grid.pf.domain_left_edge
-        
+
     def __getitem__(self, field):
         if field not in self.field_data.keys():
             if field == "RadiusCode":
@@ -424,7 +424,7 @@
         return grids
 
     def select_grid_indices(self, level):
-        return np.where(self.grid_levels == level)
+        return np.where(self.grid_levels[:,0] == level)
 
     def __get_grid_left_edge(self):
         if self.__grid_left_edge == None:
@@ -461,6 +461,7 @@
     def __get_grid_levels(self):
         if self.__grid_levels == None:
             self.__grid_levels = np.array([g.Level for g in self._grids])
+            self.__grid_levels.shape = (self.__grid_levels.size, 1)
         return self.__grid_levels
 
     def __del_grid_levels(self):
@@ -474,7 +475,6 @@
     grid_levels = property(__get_grid_levels, __set_grid_levels,
                              __del_grid_levels)
 
-
     def __get_grid_dimensions(self):
         if self.__grid_dimensions == None:
             self.__grid_dimensions = np.array([g.ActiveDimensions for g in self._grids])
@@ -491,6 +491,19 @@
     grid_dimensions = property(__get_grid_dimensions, __set_grid_dimensions,
                              __del_grid_dimensions)
 
+    @property
+    def grid_corners(self):
+        return np.array([
+          [self.grid_left_edge[:,0], self.grid_left_edge[:,1], self.grid_left_edge[:,2]],
+          [self.grid_right_edge[:,0], self.grid_left_edge[:,1], self.grid_left_edge[:,2]],
+          [self.grid_right_edge[:,0], self.grid_right_edge[:,1], self.grid_left_edge[:,2]],
+          [self.grid_left_edge[:,0], self.grid_right_edge[:,1], self.grid_left_edge[:,2]],
+          [self.grid_left_edge[:,0], self.grid_left_edge[:,1], self.grid_right_edge[:,2]],
+          [self.grid_right_edge[:,0], self.grid_left_edge[:,1], self.grid_right_edge[:,2]],
+          [self.grid_right_edge[:,0], self.grid_right_edge[:,1], self.grid_right_edge[:,2]],
+          [self.grid_left_edge[:,0], self.grid_right_edge[:,1], self.grid_right_edge[:,2]],
+        ], dtype='float64')
+
 
 class AMR1DData(AMRData, GridPropertiesMixin):
     _spatial = False
@@ -530,7 +543,7 @@
             # generated it above.  This way, fields that are grabbed from the
             # grids are sorted properly.
             self[field] = self[field][self._sortkey]
-       
+
 class AMROrthoRayBase(AMR1DData):
     """
     This is an orthogonal ray cast through the entire domain, at a specific
@@ -673,9 +686,9 @@
             vs = self._get_line_at_coord(RE[:,i], i)
             p = p | ( ( (LE[:,i1] <= vs[:,i1]) & (RE[:,i1] >= vs[:,i1]) ) \
                     & ( (LE[:,i2] <= vs[:,i2]) & (RE[:,i2] >= vs[:,i2]) ) )
-        p = p | ( np.all( LE <= self.start_point, axis=1 ) 
+        p = p | ( np.all( LE <= self.start_point, axis=1 )
                 & np.all( RE >= self.start_point, axis=1 ) )
-        p = p | ( np.all( LE <= self.end_point,   axis=1 ) 
+        p = p | ( np.all( LE <= self.end_point,   axis=1 )
                 & np.all( RE >= self.end_point,   axis=1 ) )
         self._grids = self.hierarchy.grids[p]
 
@@ -695,7 +708,7 @@
         if not iterable(gf):
             gf = gf * np.ones(grid.child_mask.shape)
         return gf[mask]
-        
+
     @cache_mask
     def _get_cut_mask(self, grid):
         mask = np.zeros(grid.ActiveDimensions, dtype='int')
@@ -738,11 +751,11 @@
     --------
 
     >>> from yt.visualization.api import Streamlines
-    >>> streamlines = Streamlines(pf, [0.5]*3) 
+    >>> streamlines = Streamlines(pf, [0.5]*3)
     >>> streamlines.integrate_through_volume()
     >>> stream = streamlines.path(0)
     >>> matplotlib.pylab.semilogy(stream['t'], stream['Density'], '-x')
-    
+
     """
     _type_name = "streamline"
     _con_args = ('positions')
@@ -775,16 +788,16 @@
     @restore_grid_state
     def _get_data_from_grid(self, grid, field):
         # No child masking here; it happens inside the mask cut
-        mask = self._get_cut_mask(grid) 
+        mask = self._get_cut_mask(grid)
         if field == 'dts': return self._dts[grid.id]
         if field == 't': return self._ts[grid.id]
         return grid[field].flat[mask]
-        
+
     @cache_mask
     def _get_cut_mask(self, grid):
         #pdb.set_trace()
         points_in_grid = np.all(self.positions > grid.LeftEdge, axis=1) & \
-                         np.all(self.positions <= grid.RightEdge, axis=1) 
+                         np.all(self.positions <= grid.RightEdge, axis=1)
         pids = np.where(points_in_grid)[0]
         mask = np.zeros(points_in_grid.sum(), dtype='int')
         dts = np.zeros(points_in_grid.sum(), dtype='float64')
@@ -819,7 +832,7 @@
         AMRData.__init__(self, pf, fields, **kwargs)
         self.field = ensure_list(fields)[0]
         self.set_field_parameter("axis",axis)
-        
+
     def _convert_field_name(self, field):
         return field
 
@@ -838,7 +851,6 @@
             fields_to_get = self.fields[:]
         else:
             fields_to_get = ensure_list(fields)
-        temp_data = {}
         for field in fields_to_get:
             if self.field_data.has_key(field): continue
             if field not in self.hierarchy.field_list:
@@ -848,18 +860,13 @@
             # we're going to have to set the same thing several times
             data = [self._get_data_from_grid(grid, field)
                     for grid in self._get_grids()]
-            if len(data) == 0: data = np.array([])
-            else: data = np.concatenate(data)
-            temp_data[field] = data
+            if len(data) == 0:
+                data = np.array([])
+            else:
+                data = np.concatenate(data)
             # Now the next field can use this field
-            self[field] = temp_data[field] 
-        # We finalize
-        if temp_data != {}:
-            temp_data = self.comm.par_combine_object(temp_data,
-                    datatype='dict', op='cat')
-        # And set, for the next group
-        for field in temp_data.keys():
-            self[field] = temp_data[field]
+            self[field] = self.comm.par_combine_object(data, op='cat',
+                                                       datatype='array')
 
     def _get_pw(self, fields, center, width, origin, axes_unit, plot_type):
         axis = self.axis
@@ -874,7 +881,7 @@
         (bounds, center, units) = GetWindowParameters(axis, center, width, self.pf)
         if axes_unit is None and units != ('1', '1'):
             axes_unit = units
-        pw = PWViewerMPL(self, bounds, origin=origin, frb_generator=FixedResolutionBuffer, 
+        pw = PWViewerMPL(self, bounds, origin=origin, frb_generator=FixedResolutionBuffer,
                          plot_type=plot_type)
         pw.set_axes_unit(axes_unit)
         return pw
@@ -980,7 +987,7 @@
         for field in fields:
             #mylog.debug("Trying to obtain %s from node %s",
                 #self._convert_field_name(field), node_name)
-            fdata=self.hierarchy.get_data(node_name, 
+            fdata=self.hierarchy.get_data(node_name,
                 self._convert_field_name(field))
             if fdata is not None:
                 #mylog.debug("Got %s from node %s", field, node_name)
@@ -1138,7 +1145,7 @@
         t = points * ind[cm] * dx + (grid.LeftEdge[xaxis] + 0.5 * dx)
         # calculate ypoints array
         ind = cmI[1, :].ravel()   # yind
-        del cmI   # no longer needed 
+        del cmI   # no longer needed
         t = np.vstack( (t, points * ind[cm] * dy + \
                 (grid.LeftEdge[yaxis] + 0.5 * dy))
             )
@@ -1197,7 +1204,7 @@
     def hub_upload(self):
         self._mrep.upload()
 
-    def to_pw(self, fields=None, center='c', width=None, axes_unit=None, 
+    def to_pw(self, fields=None, center='c', width=None, axes_unit=None,
                origin='center-window'):
         r"""Create a :class:`~yt.visualization.plot_window.PWViewerMPL` from this
         object.
@@ -1477,7 +1484,7 @@
         self.dims = dims
         self.dds = self.width / self.dims
         self.bounds = np.array([0.0,1.0,0.0,1.0])
-        
+
         self.set_field_parameter('center', center)
         # Let's set up our plane equation
         # ax + by + cz + d = 0
@@ -1563,7 +1570,7 @@
 
             # Mark these pixels to speed things up
             self._pixelmask[pointI] = 0
-            
+
             return
         else:
             raise SyntaxError("Making a fixed resolution slice with "
@@ -1651,7 +1658,7 @@
         L_name = ("%s" % self._norm_vec).replace(" ","_")[1:-1]
         return "%s/c%s_L%s" % \
             (self._top_node, cen_name, L_name)
-        
+
 class AMRQuadTreeProjBase(AMR2DData):
     """
     This is a data object corresponding to a line integral through the
@@ -1809,7 +1816,7 @@
             convs[:] = 1.0
         return dls, convs
 
-    def to_pw(self, fields=None, center='c', width=None, axes_unit=None, 
+    def to_pw(self, fields=None, center='c', width=None, axes_unit=None,
                origin='center-window'):
         r"""Create a :class:`~yt.visualization.plot_window.PWViewerMPL` from this
         object.
@@ -1850,7 +1857,7 @@
                                  if g.Level == level],
                               self.get_dependencies(fields), self.hierarchy.io)
             self._add_level_to_tree(tree, level, fields)
-            mylog.debug("End of projecting level level %s, memory usage %0.3e", 
+            mylog.debug("End of projecting level level %s, memory usage %0.3e",
                         level, get_memory_usage()/1024.)
         # Note that this will briefly double RAM usage
         if self.proj_style == "mip":
@@ -1942,7 +1949,7 @@
         xpoints = (xind + (start_index[x_dict[self.axis]])).astype('int64')
         ypoints = (yind + (start_index[y_dict[self.axis]])).astype('int64')
         to_add = np.array([d[used_points].ravel() for d in full_proj], order='F')
-        tree.add_array_to_tree(grid.Level, xpoints, ypoints, 
+        tree.add_array_to_tree(grid.Level, xpoints, ypoints,
                     to_add, weight_proj[used_points].ravel())
 
     def _add_level_to_tree(self, tree, level, fields):
@@ -2068,6 +2075,7 @@
                  source=None, node_name = None, field_cuts = None,
                  preload_style='level', serialize=True,**kwargs):
         AMR2DData.__init__(self, axis, field, pf, node_name = None, **kwargs)
+        self.proj_style = "integrate"
         self.weight_field = weight_field
         self._field_cuts = field_cuts
         self.serialize = serialize
@@ -2282,7 +2290,7 @@
                 del self.__retval_coords[grid.id]
                 del self.__retval_fields[grid.id]
                 del self.__overlap_masks[grid.id]
-            mylog.debug("End of projecting level level %s, memory usage %0.3e", 
+            mylog.debug("End of projecting level level %s, memory usage %0.3e",
                         level, get_memory_usage()/1024.)
         coord_data = np.concatenate(coord_data, axis=1)
         field_data = np.concatenate(field_data, axis=1)
@@ -2313,7 +2321,7 @@
     def add_fields(self, fields, weight = "CellMassMsun"):
         pass
 
-    def to_pw(self, fields=None, center='c', width=None, axes_unit=None, 
+    def to_pw(self, fields=None, center='c', width=None, axes_unit=None,
                origin='center-window'):
         r"""Create a :class:`~yt.visualization.plot_window.PWViewerMPL` from this
         object.
@@ -2521,7 +2529,7 @@
         ref_ratio = self.pf.refine_by**(self.level - grid.Level)
         FillBuffer(ref_ratio,
             grid.get_global_startindex(), self.global_startindex,
-            c_fields, g_fields, 
+            c_fields, g_fields,
             self.ActiveDimensions, grid.ActiveDimensions,
             grid.child_mask, self.domain_width, dls[grid.Level],
             self.axis)
@@ -2682,9 +2690,9 @@
     def cut_region(self, field_cuts):
         """
         Return an InLineExtractedRegion, where the grid cells are cut on the
-        fly with a set of field_cuts.  It is very useful for applying 
+        fly with a set of field_cuts.  It is very useful for applying
         conditions to the fields in your data object.
-        
+
         Examples
         --------
         To find the total mass of gas above 10^6 K in your volume:
@@ -2725,7 +2733,7 @@
         useful for calculating, for instance, total isocontour area, or
         visualizing in an external program (such as `MeshLab
         <http://meshlab.sf.net>`_.)
-        
+
         Parameters
         ----------
         field : string
@@ -2839,7 +2847,7 @@
 
         Additionally, the returned flux is defined as flux *into* the surface,
         not flux *out of* the surface.
-        
+
         Parameters
         ----------
         field : string
@@ -2896,7 +2904,7 @@
             ff = np.ones(vals.shape, dtype="float64")
         else:
             ff = grid.get_vertex_centered_data(fluxing_field)
-        xv, yv, zv = [grid.get_vertex_centered_data(f) for f in 
+        xv, yv, zv = [grid.get_vertex_centered_data(f) for f in
                      [field_x, field_y, field_z]]
         return march_cubes_grid_flux(value, vals, xv, yv, zv,
                     ff, mask, grid.LeftEdge, grid.dds)
@@ -2989,7 +2997,7 @@
     ----------------
     force_refresh : bool
        Force a refresh of the data. Defaults to True.
-    
+
     Examples
     --------
     """
@@ -3229,7 +3237,7 @@
         if self._grids is not None: return
         GLE = self.pf.h.grid_left_edge
         GRE = self.pf.h.grid_right_edge
-        goodI = find_grids_in_inclined_box(self.box_vectors, self.center, 
+        goodI = find_grids_in_inclined_box(self.box_vectors, self.center,
                                            GLE, GRE)
         cgrids = self.pf.h.grids[goodI.astype('bool')]
        # find_grids_in_inclined_box seems to be broken.
@@ -3237,13 +3245,13 @@
         grids = []
         for i,grid in enumerate(cgrids):
             v = grid_points_in_volume(self.box_lengths, self.origin,
-                                      self._rot_mat, grid.LeftEdge, 
+                                      self._rot_mat, grid.LeftEdge,
                                       grid.RightEdge, grid.dds,
                                       grid.child_mask, 1)
             if v: grids.append(grid)
         self._grids = np.empty(len(grids), dtype='object')
         for gi, g in enumerate(grids): self._grids[gi] = g
-            
+
 
     def _is_fully_enclosed(self, grid):
         # This should be written at some point.
@@ -3256,10 +3264,10 @@
             return True
         pm = np.zeros(grid.ActiveDimensions, dtype='int32')
         grid_points_in_volume(self.box_lengths, self.origin,
-                              self._rot_mat, grid.LeftEdge, 
+                              self._rot_mat, grid.LeftEdge,
                               grid.RightEdge, grid.dds, pm, 0)
         return pm
-        
+
 
 class AMRRegionBase(AMR3DData):
     """A 3D region of data with an arbitrary center.
@@ -3395,9 +3403,9 @@
     _dx_pad = 0.0
     def __init__(self, center, left_edge, right_edge, fields = None,
                  pf = None, **kwargs):
-        AMRPeriodicRegionBase.__init__(self, center, left_edge, right_edge, 
+        AMRPeriodicRegionBase.__init__(self, center, left_edge, right_edge,
                                        fields = None, pf = None, **kwargs)
-    
+
 
 class AMRGridCollectionBase(AMR3DData):
     """
@@ -3564,7 +3572,7 @@
         self._C = C
         self._e0 = e0 = e0 / (e0**2.0).sum()**0.5
         self._tilt = tilt
-        
+
         # find the t1 angle needed to rotate about z axis to align e0 to x
         t1 = np.arctan(e0[1] / e0[0])
         # rotate e0 by -t1
@@ -3574,7 +3582,7 @@
         t2 = np.arctan(-r1[2] / r1[0])
         """
         calculate the original e1
-        given the tilt about the x axis when e0 was aligned 
+        given the tilt about the x axis when e0 was aligned
         to x after t1, t2 rotations about z, y
         """
         RX = get_rotation_matrix(-tilt, (1,0,0)).transpose()
@@ -3598,7 +3606,7 @@
         self._refresh_data()
 
         """
-        Having another function find_ellipsoid_grids is too much work, 
+        Having another function find_ellipsoid_grids is too much work,
         can just use the sphere one and forget about checking orientation
         but feed in the A parameter for radius
         """
@@ -3686,7 +3694,7 @@
 class AMRCoveringGridBase(AMR3DData):
     """A 3D region with all data extracted to a single, specified
     resolution.
-    
+
     Parameters
     ----------
     level : int
@@ -3784,7 +3792,7 @@
             n_bad = np.where(self[obtain_fields[0]]==-999)[0].size
             mylog.error("Covering problem: %s cells are uncovered", n_bad)
             raise KeyError(n_bad)
-            
+
     def _generate_field(self, field):
         if self.pf.field_info.has_key(field):
             # First we check the validator; this might even raise!
@@ -3812,13 +3820,13 @@
     def _get_data_from_grid(self, grid, fields):
         ll = int(grid.Level == self.level)
         ref_ratio = self.pf.refine_by**(self.level - grid.Level)
-        g_fields = [gf.astype("float64") 
+        g_fields = [gf.astype("float64")
                     if gf.dtype != "float64"
                     else gf for gf in (grid[field] for field in fields)]
         c_fields = [self[field] for field in fields]
         count = FillRegion(ref_ratio,
             grid.get_global_startindex(), self.global_startindex,
-            c_fields, g_fields, 
+            c_fields, g_fields,
             self.ActiveDimensions, grid.ActiveDimensions,
             grid.child_mask, self.domain_width, ll, 0)
         return count
@@ -3834,7 +3842,7 @@
         c_fields = [self[field] for field in fields]
         FillRegion(ref_ratio,
             grid.get_global_startindex(), self.global_startindex,
-            c_fields, g_fields, 
+            c_fields, g_fields,
             self.ActiveDimensions, grid.ActiveDimensions,
             grid.child_mask, self.domain_width, ll, 1)
 
@@ -3855,7 +3863,7 @@
     fill the region to level 1, replacing any cells actually
     covered by level 1 data, and then recursively repeating this
     process until it reaches the specified `level`.
-    
+
     Parameters
     ----------
     level : int
@@ -3867,10 +3875,11 @@
     fields : array_like, optional
         A list of fields that you'd like pre-generated for your object
 
-    Example
-    -------
-    cube = pf.h.smoothed_covering_grid(2, left_edge=[0.0, 0.0, 0.0], \
-                              dims=[128, 128, 128])
+    Examples
+    --------
+
+    >>> cube = pf.h.smoothed_covering_grid(2, left_edge=[0.0, 0.0, 0.0], \
+    ...                          dims=[128, 128, 128])
     """
     _type_name = "smoothed_covering_grid"
     def __init__(self, *args, **kwargs):
@@ -3975,7 +3984,7 @@
     def _refine(self, dlevel, fields):
         rf = float(self.pf.refine_by**dlevel)
 
-        input_left = (self._old_global_startindex + 0.5) * rf 
+        input_left = (self._old_global_startindex + 0.5) * rf
         dx = np.fromiter((self['cd%s' % ax] for ax in 'xyz'), count=3, dtype='float64')
         output_dims = np.rint((self.ActiveDimensions*self.dds)/dx+0.5).astype('int32') + 2
         self._cur_dims = output_dims
@@ -3989,13 +3998,13 @@
 
     @restore_field_information_state
     def _get_data_from_grid(self, grid, fields):
-        g_fields = [gf.astype("float64") 
+        g_fields = [gf.astype("float64")
                     if gf.dtype != "float64"
                     else gf for gf in (grid[field] for field in fields)]
         c_fields = [self.field_data[field] for field in fields]
         count = FillRegion(1,
             grid.get_global_startindex(), self.global_startindex,
-            c_fields, g_fields, 
+            c_fields, g_fields,
             self._cur_dims, grid.ActiveDimensions,
             grid.child_mask, self.domain_width, 1, 0)
         return count
@@ -4007,14 +4016,14 @@
     """
     This will build a hybrid region based on the boolean logic
     of the regions.
-    
+
     Parameters
     ----------
     regions : list
         A list of region objects and strings describing the boolean logic
         to use when building the hybrid region. The boolean logic can be
         nested using parentheses.
-    
+
     Examples
     --------
     >>> re1 = pf.h.region([0.5, 0.5, 0.5], [0.4, 0.4, 0.4],
@@ -4027,7 +4036,7 @@
         sp1, ")"])
     """
     _type_name = "boolean"
-    _con_args = ("regions")
+    _con_args = ("regions",)
     def __init__(self, regions, fields = None, pf = None, **kwargs):
         # Center is meaningless, but we'll define it all the same.
         AMR3DData.__init__(self, [0.5]*3, fields, pf, **kwargs)
@@ -4039,7 +4048,7 @@
         self._get_all_regions()
         self._make_overlaps()
         self._get_list_of_grids()
-    
+
     def _get_all_regions(self):
         # Before anything, we simply find out which regions are involved in all
         # of this process, uniquely.
@@ -4049,7 +4058,7 @@
             # So cut_masks don't get messed up.
             item._boolean_touched = True
         self._all_regions = np.unique(self._all_regions)
-    
+
     def _make_overlaps(self):
         # Using the processed cut_masks, we'll figure out what grids
         # are left in the hybrid region.
@@ -4083,7 +4092,7 @@
                     continue
             pbar.update(i)
         pbar.finish()
-    
+
     def __repr__(self):
         # We'll do this the slow way to be clear what's going on
         s = "%s (%s): " % (self.__class__.__name__, self.pf)
@@ -4096,7 +4105,7 @@
             if i < (len(self.regions) - 1): s += ", "
         s += "]"
         return s
-    
+
     def _is_fully_enclosed(self, grid):
         return (grid in self._all_overlap)
 
@@ -4183,7 +4192,7 @@
     <http://meshlab.sf.net>`_.)  The object has the properties .vertices
     and will sample values if a field is requested.  The values are
     interpolated to the center of a given face.
-    
+
     Parameters
     ----------
     data_source : AMR3DDataObject
@@ -4258,7 +4267,7 @@
                 self[fields] = samples
             elif sample_type == "vertex":
                 self.vertex_samples[fields] = samples
-        
+
 
     @restore_grid_state
     def _extract_isocontours_from_grid(self, grid, field, value,
@@ -4295,7 +4304,7 @@
 
         Additionally, the returned flux is defined as flux *into* the surface,
         not flux *out of* the surface.
-        
+
         Parameters
         ----------
         field_x : string
@@ -4342,7 +4351,7 @@
         return flux
 
     @restore_grid_state
-    def _calculate_flux_in_grid(self, grid, 
+    def _calculate_flux_in_grid(self, grid,
                     field_x, field_y, field_z, fluxing_field = None):
         mask = self.data_source._get_cut_mask(grid) * grid.child_mask
         vals = grid.get_vertex_centered_data(self.surface_field)
@@ -4350,7 +4359,7 @@
             ff = np.ones(vals.shape, dtype="float64")
         else:
             ff = grid.get_vertex_centered_data(fluxing_field)
-        xv, yv, zv = [grid.get_vertex_centered_data(f) for f in 
+        xv, yv, zv = [grid.get_vertex_centered_data(f) for f in
                      [field_x, field_y, field_z]]
         return march_cubes_grid_flux(self.field_value, vals, xv, yv, zv,
                     ff, mask, grid.LeftEdge, grid.dds)
@@ -4468,7 +4477,7 @@
             w = bounds[i][1] - bounds[i][0]
             np.divide(tmp, w, tmp)
             np.subtract(tmp, 0.5, tmp) # Center at origin.
-            v[ax][:] = tmp 
+            v[ax][:] = tmp
         f.write("end_header\n")
         v.tofile(f)
         arr["ni"][:] = 3

diff -r a1ff99289b028229a1570481842295a9fb28b31b -r b59c9b89ad062e0bd2df49df6b5466b0e7f90865 yt/data_objects/hierarchy.py
--- a/yt/data_objects/hierarchy.py
+++ b/yt/data_objects/hierarchy.py
@@ -209,7 +209,7 @@
         pf = self.parameter_file
         if find_max: c = self.find_max("Density")[1]
         else: c = (pf.domain_right_edge + pf.domain_left_edge)/2.0
-        return self.region(c, 
+        return self.region(c,
             pf.domain_left_edge, pf.domain_right_edge)
 
     def clear_all_data(self):
@@ -308,7 +308,7 @@
             self.save_data = self._save_data
         else:
             self.save_data = parallel_splitter(self._save_data, self._reload_data_file)
-    
+
     save_data = parallel_splitter(_save_data, _reload_data_file)
 
     def save_object(self, obj, name):
@@ -367,7 +367,7 @@
         """
         Returns (in code units) the smallest cell size in the simulation.
         """
-        return self.select_grids(self.grid_levels.max())[0].dds[0]
+        return self.select_grids(self.grid_levels.max())[0].dds[:].min()
 
     def _add_object_class(self, name, class_name, base, dd):
         self.object_types.append(name)

diff -r a1ff99289b028229a1570481842295a9fb28b31b -r b59c9b89ad062e0bd2df49df6b5466b0e7f90865 yt/data_objects/image_array.py
--- a/yt/data_objects/image_array.py
+++ b/yt/data_objects/image_array.py
@@ -71,12 +71,12 @@
 
     >>> im = np.zeros([64,128,3])
     >>> for i in xrange(im.shape[0]):
-    >>>     for k in xrange(im.shape[2]):
-    >>>         im[i,:,k] = np.linspace(0.,0.3*k, im.shape[1])
+    ...     for k in xrange(im.shape[2]):
+    ...         im[i,:,k] = np.linspace(0.,0.3*k, im.shape[1])
 
     >>> myinfo = {'field':'dinosaurs', 'east_vector':np.array([1.,0.,0.]), 
-    >>>     'north_vector':np.array([0.,0.,1.]), 'normal_vector':np.array([0.,1.,0.]),  
-    >>>     'width':0.245, 'units':'cm', 'type':'rendering'}
+    ...     'north_vector':np.array([0.,0.,1.]), 'normal_vector':np.array([0.,1.,0.]),  
+    ...     'width':0.245, 'units':'cm', 'type':'rendering'}
 
     >>> im_arr = ImageArray(im, info=myinfo)
     >>> im_arr.save('test_ImageArray')
@@ -112,12 +112,12 @@
         -------- 
         >>> im = np.zeros([64,128,3])
         >>> for i in xrange(im.shape[0]):
-        >>>     for k in xrange(im.shape[2]):
-        >>>         im[i,:,k] = np.linspace(0.,0.3*k, im.shape[1])
+        ...     for k in xrange(im.shape[2]):
+        ...         im[i,:,k] = np.linspace(0.,0.3*k, im.shape[1])
 
         >>> myinfo = {'field':'dinosaurs', 'east_vector':np.array([1.,0.,0.]), 
-        >>>     'north_vector':np.array([0.,0.,1.]), 'normal_vector':np.array([0.,1.,0.]),  
-        >>>     'width':0.245, 'units':'cm', 'type':'rendering'}
+        ...     'north_vector':np.array([0.,0.,1.]), 'normal_vector':np.array([0.,1.,0.]),  
+        ...     'width':0.245, 'units':'cm', 'type':'rendering'}
 
         >>> im_arr = ImageArray(im, info=myinfo)
         >>> im_arr.write_hdf5('test_ImageArray.h5')
@@ -133,38 +133,191 @@
             d.attrs.create(k, v)
         f.close()
 
-    def write_png(self, filename, clip_ratio=None):
+    def add_background_color(self, background='black', inline=True):
+        r"""Adds a background color to a 4-channel ImageArray
+
+        This adds a background color to a 4-channel ImageArray, by default
+        doing so inline.  The ImageArray must already be normalized to the
+        [0,1] range.
+
+        Parameters
+        ----------
+        background: 
+            This can be used to set a background color for the image, and can
+            take several types of values:
+
+               * ``white``: white background, opaque
+               * ``black``: black background, opaque
+               * ``None``: transparent background
+               * 4-element array [r,g,b,a]: arbitrary rgba setting.
+
+            Default: 'black'
+        inline: boolean, optional
+            If True, original ImageArray is modified. If False, a copy is first
+            created, then modified. Default: True
+
+        Returns
+        -------
+        out: ImageArray
+            The modified ImageArray with a background color added.
+       
+        Examples
+        --------
+        >>> im = np.zeros([64,128,4])
+        >>> for i in xrange(im.shape[0]):
+        ...     for k in xrange(im.shape[2]):
+        ...         im[i,:,k] = np.linspace(0.,10.*k, im.shape[1])
+
+        >>> im_arr = ImageArray(im)
+        >>> im_arr.rescale()
+        >>> new_im = im_arr.add_background_color([1.,0.,0.,1.], inline=False)
+        >>> new_im.write_png('red_bg.png')
+        >>> im_arr.add_background_color('black')
+        >>> im_arr.write_png('black_bg.png')
+        """
+        assert(self.shape[-1] == 4)
+        
+        if background == None:
+            background = (0., 0., 0., 0.)
+        elif background == 'white':
+            background = (1., 1., 1., 1.)
+        elif background == 'black':
+            background = (0., 0., 0., 1.)
+
+        # Alpha blending to background
+        if inline:
+            out = self
+        else:
+            out = self.copy()
+
+        for i in range(3):
+            out[:,:,i] = self[:,:,i]*self[:,:,3] + \
+                    background[i]*background[3]*(1.0-self[:,:,3])
+        out[:,:,3] = self[:,:,3] + background[3]*(1.0-self[:,:,3]) 
+        return out 
+
+
+    def rescale(self, cmax=None, amax=None, inline=True):
+        r"""Rescales the image to be in [0,1] range.
+
+        Parameters
+        ----------
+        cmax: float, optional
+            Normalization value to use for rgb channels. Defaults to None,
+            corresponding to using the maximum value in the rgb channels.
+        amax: float, optional
+            Normalization value to use for alpha channel. Defaults to None,
+            corresponding to using the maximum value in the alpha channel.
+        inline: boolean, optional
+            Specifies whether or not the rescaling is done inline. If false,
+            a new copy of the ImageArray will be created, returned. 
+            Default:True.
+
+        Returns
+        -------
+        out: ImageArray
+            The rescaled ImageArray, clipped to the [0,1] range.
+
+        Notes
+        -----
+        This requires that the shape of the ImageArray to have a length of 3,
+        and for the third dimension to be >= 3.  If the third dimension has
+        a shape of 4, the alpha channel will also be rescaled.
+       
+        Examples
+        -------- 
+        >>> im = np.zeros([64,128,4])
+        >>> for i in xrange(im.shape[0]):
+        ...     for k in xrange(im.shape[2]):
+        ...         im[i,:,k] = np.linspace(0.,0.3*k, im.shape[1])
+
+        >>> im_arr.write_png('original.png')
+        >>> im_arr.rescale()
+        >>> im_arr.write_png('normalized.png')
+
+        """
+        assert(len(self.shape) == 3)
+        assert(self.shape[2] >= 3)
+        if inline:
+            out = self
+        else:
+            out = self.copy()
+        if cmax is None: 
+            cmax = self[:,:,:3].sum(axis=2).max()
+
+        np.multiply(self[:,:,:3], 1./cmax, out[:,:,:3])
+
+        if self.shape[2] == 4:
+            if amax is None:
+                amax = self[:,:,3].max()
+            if amax > 0.0:
+                np.multiply(self[:,:,3], 1./amax, out[:,:,3])
+        
+        np.clip(out, 0.0, 1.0, out)
+        return out
+
+    def write_png(self, filename, clip_ratio=None, background='black',
+                 rescale=True):
         r"""Writes ImageArray to png file.
 
         Parameters
         ----------
         filename: string
             Note filename not be modified.
+        clip_ratio: float, optional
+            Image will be clipped before saving to the standard deviation
+            of the image multiplied by this value.  Useful for enhancing 
+            images. Default: None
+        background: 
+            This can be used to set a background color for the image, and can
+            take several types of values:
+
+               * ``white``: white background, opaque
+               * ``black``: black background, opaque
+               * ``None``: transparent background
+               * 4-element array [r,g,b,a]: arbitrary rgba setting.
+
+            Default: 'black'
+        rescale: boolean, optional
+            If True, will write out a rescaled image (without modifying the
+            original image). Default: True
        
         Examples
         --------
-        
-        >>> im = np.zeros([64,128,3])
+        >>> im = np.zeros([64,128,4])
         >>> for i in xrange(im.shape[0]):
-        >>>     for k in xrange(im.shape[2]):
-        >>>         im[i,:,k] = np.linspace(0.,0.3*k, im.shape[1])
+        ...     for k in xrange(im.shape[2]):
+        ...         im[i,:,k] = np.linspace(0.,10.*k, im.shape[1])
 
-        >>> myinfo = {'field':'dinosaurs', 'east_vector':np.array([1.,0.,0.]), 
-        >>>     'north_vector':np.array([0.,0.,1.]), 'normal_vector':np.array([0.,1.,0.]),  
-        >>>     'width':0.245, 'units':'cm', 'type':'rendering'}
-
-        >>> im_arr = ImageArray(im, info=myinfo)
-        >>> im_arr.write_png('test_ImageArray.png')
+        >>> im_arr = ImageArray(im)
+        >>> im_arr.write_png('standard.png')
+        >>> im_arr.write_png('non-scaled.png', rescale=False)
+        >>> im_arr.write_png('black_bg.png', background='black')
+        >>> im_arr.write_png('white_bg.png', background='white')
+        >>> im_arr.write_png('green_bg.png', background=[0,1,0,1])
+        >>> im_arr.write_png('transparent_bg.png', background=None)
 
         """
+        if rescale:
+            scaled = self.rescale(inline=False)
+        else:
+            scaled = self
+
+        if self.shape[-1] == 4:
+            out = scaled.add_background_color(background, inline=False)
+        else:
+            out = scaled
+
         if filename[-4:] != '.png': 
             filename += '.png'
 
         if clip_ratio is not None:
-            return write_bitmap(self.swapaxes(0, 1), filename,
-                                clip_ratio * self.std())
+            nz = out[:,:,:3][out[:,:,:3].nonzero()]
+            return write_bitmap(out.swapaxes(0, 1), filename,
+                                nz.mean() + \
+                                clip_ratio * nz.std())
         else:
-            return write_bitmap(self.swapaxes(0, 1), filename)
+            return write_bitmap(out.swapaxes(0, 1), filename)
 
     def write_image(self, filename, color_bounds=None, channel=None,  cmap_name="algae", func=lambda x: x):
         r"""Writes a single channel of the ImageArray to a png file.
@@ -197,11 +350,11 @@
         
         >>> im = np.zeros([64,128])
         >>> for i in xrange(im.shape[0]):
-        >>>     im[i,:] = np.linspace(0.,0.3*k, im.shape[1])
+        ...     im[i,:] = np.linspace(0.,0.3*k, im.shape[1])
 
         >>> myinfo = {'field':'dinosaurs', 'east_vector':np.array([1.,0.,0.]), 
-        >>>     'north_vector':np.array([0.,0.,1.]), 'normal_vector':np.array([0.,1.,0.]),  
-        >>>     'width':0.245, 'units':'cm', 'type':'rendering'}
+        ...     'north_vector':np.array([0.,0.,1.]), 'normal_vector':np.array([0.,1.,0.]),  
+        ...     'width':0.245, 'units':'cm', 'type':'rendering'}
 
         >>> im_arr = ImageArray(im, info=myinfo)
         >>> im_arr.write_image('test_ImageArray.png')
@@ -245,27 +398,3 @@
 
     __doc__ += np.ndarray.__doc__
 
-if __name__ == "__main__":
-    im = np.zeros([64,128,3])
-    for i in xrange(im.shape[0]):
-        for k in xrange(im.shape[2]):
-            im[i,:,k] = np.linspace(0.,0.3*k, im.shape[1])
-
-    myinfo = {'field':'dinosaurs', 'east_vector':np.array([1.,0.,0.]), 
-        'north_vector':np.array([0.,0.,1.]), 'normal_vector':np.array([0.,1.,0.]),  
-        'width':0.245, 'units':'cm', 'type':'rendering'}
-
-    im_arr = ImageArray(im, info=myinfo)
-    im_arr.save('test_3d_ImageArray')
-
-    im = np.zeros([64,128])
-    for i in xrange(im.shape[0]):
-        im[i,:] = np.linspace(0.,0.3*k, im.shape[1])
-
-    myinfo = {'field':'dinosaurs', 'east_vector':np.array([1.,0.,0.]), 
-        'north_vector':np.array([0.,0.,1.]), 'normal_vector':np.array([0.,1.,0.]),  
-        'width':0.245, 'units':'cm', 'type':'rendering'}
-
-    im_arr = ImageArray(im, info=myinfo)
-    im_arr.save('test_2d_ImageArray')
-

diff -r a1ff99289b028229a1570481842295a9fb28b31b -r b59c9b89ad062e0bd2df49df6b5466b0e7f90865 yt/data_objects/object_finding_mixin.py
--- a/yt/data_objects/object_finding_mixin.py
+++ b/yt/data_objects/object_finding_mixin.py
@@ -198,8 +198,10 @@
         """
         Gets back all the grids between a left edge and right edge
         """
-        grid_i = np.where((np.all(self.grid_right_edge > left_edge, axis=1)
-                         & np.all(self.grid_left_edge < right_edge, axis=1)) == True)
+        eps = np.finfo(np.float64).eps
+        grid_i = np.where((np.all((self.grid_right_edge - left_edge) > eps, axis=1)
+                         & np.all((right_edge - self.grid_left_edge) > eps, axis=1)) == True)
+
         return self.grids[grid_i], grid_i
 
     def get_periodic_box_grids(self, left_edge, right_edge):

diff -r a1ff99289b028229a1570481842295a9fb28b31b -r b59c9b89ad062e0bd2df49df6b5466b0e7f90865 yt/data_objects/tests/test_image_array.py
--- /dev/null
+++ b/yt/data_objects/tests/test_image_array.py
@@ -0,0 +1,130 @@
+from yt.testing import *
+from yt.data_objects.image_array import ImageArray
+import numpy as np
+import os
+import tempfile
+import shutil
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","__withintesting"] = "True"
+    np.seterr(all = 'ignore')
+
+def test_rgba_rescale():
+    im = np.zeros([64,128,4])
+    for i in xrange(im.shape[0]):
+        for k in xrange(im.shape[2]):
+            im[i,:,k] = np.linspace(0.,10.*k, im.shape[1])
+    im_arr = ImageArray(im)
+
+    new_im = im_arr.rescale(inline=False)
+    yield assert_equal, im_arr[:,:,:3].max(), 2*10.
+    yield assert_equal, im_arr[:,:,3].max(), 3*10.
+    yield assert_equal, new_im[:,:,:3].sum(axis=2).max(), 1.0 
+    yield assert_equal, new_im[:,:,3].max(), 1.0
+
+    im_arr.rescale()
+    yield assert_equal, im_arr[:,:,:3].sum(axis=2).max(), 1.0
+    yield assert_equal, im_arr[:,:,3].max(), 1.0
+
+def test_image_array_hdf5():
+    # Perform I/O in safe place instead of yt main dir
+    tmpdir = tempfile.mkdtemp()
+    curdir = os.getcwd()
+    os.chdir(tmpdir)
+
+    im = np.zeros([64,128,3])
+    for i in xrange(im.shape[0]):
+        for k in xrange(im.shape[2]):
+            im[i,:,k] = np.linspace(0.,0.3*k, im.shape[1])
+
+    myinfo = {'field':'dinosaurs', 'east_vector':np.array([1.,0.,0.]), 
+        'north_vector':np.array([0.,0.,1.]), 'normal_vector':np.array([0.,1.,0.]),  
+        'width':0.245, 'units':'cm', 'type':'rendering'}
+
+    im_arr = ImageArray(im, info=myinfo)
+    im_arr.save('test_3d_ImageArray')
+
+    im = np.zeros([64,128])
+    for i in xrange(im.shape[0]):
+        im[i,:] = np.linspace(0.,0.3*k, im.shape[1])
+
+    myinfo = {'field':'dinosaurs', 'east_vector':np.array([1.,0.,0.]), 
+        'north_vector':np.array([0.,0.,1.]), 'normal_vector':np.array([0.,1.,0.]),  
+        'width':0.245, 'units':'cm', 'type':'rendering'}
+
+    im_arr = ImageArray(im, info=myinfo)
+    im_arr.save('test_2d_ImageArray')
+
+    os.chdir(curdir)
+    # clean up
+    shutil.rmtree(tmpdir)
+
+def test_image_array_rgb_png():
+    # Perform I/O in safe place instead of yt main dir
+    tmpdir = tempfile.mkdtemp()
+    curdir = os.getcwd()
+    os.chdir(tmpdir)
+
+    im = np.zeros([64,128,3])
+    for i in xrange(im.shape[0]):
+        for k in xrange(im.shape[2]):
+            im[i,:,k] = np.linspace(0.,10.*k, im.shape[1])
+
+    im_arr = ImageArray(im)
+    im_arr.write_png('standard.png')
+
+def test_image_array_rgba_png():
+    # Perform I/O in safe place instead of yt main dir
+    tmpdir = tempfile.mkdtemp()
+    curdir = os.getcwd()
+    os.chdir(tmpdir)
+
+    im = np.zeros([64,128,4])
+    for i in xrange(im.shape[0]):
+        for k in xrange(im.shape[2]):
+            im[i,:,k] = np.linspace(0.,10.*k, im.shape[1])
+
+    im_arr = ImageArray(im)
+    im_arr.write_png('standard.png')
+    im_arr.write_png('non-scaled.png', rescale=False)
+    im_arr.write_png('black_bg.png', background='black')
+    im_arr.write_png('white_bg.png', background='white')
+    im_arr.write_png('green_bg.png', background=[0.,1.,0.,1.])
+    im_arr.write_png('transparent_bg.png', background=None)
+
+
+def test_image_array_background():
+    # Perform I/O in safe place instead of yt main dir
+    tmpdir = tempfile.mkdtemp()
+    curdir = os.getcwd()
+    os.chdir(tmpdir)
+
+    im = np.zeros([64,128,4])
+    for i in xrange(im.shape[0]):
+        for k in xrange(im.shape[2]):
+            im[i,:,k] = np.linspace(0.,10.*k, im.shape[1])
+
+    im_arr = ImageArray(im)
+    im_arr.rescale()
+    new_im = im_arr.add_background_color([1.,0.,0.,1.], inline=False)
+    new_im.write_png('red_bg.png')
+    im_arr.add_background_color('black')
+    im_arr.write_png('black_bg2.png')
+ 
+    os.chdir(curdir)
+    # clean up
+    shutil.rmtree(tmpdir)
+
+
+
+
+
+
+
+
+
+
+
+
+

diff -r a1ff99289b028229a1570481842295a9fb28b31b -r b59c9b89ad062e0bd2df49df6b5466b0e7f90865 yt/data_objects/tests/test_slice.py
--- a/yt/data_objects/tests/test_slice.py
+++ b/yt/data_objects/tests/test_slice.py
@@ -1,24 +1,60 @@
-from yt.testing import *
+"""
+Tests for AMRSlice
+
+Authors: Samuel Skillman <samskillman at gmail.com>
+Affiliation: University of Colorado at Boulder
+Author: Kacper Kowalik <xarthisius.kk at gmail.com>
+Affiliation: CA UMK
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Samuel Skillman.  All Rights Reserved.
+  Copyright (C) 2013 Kacper Kowalik.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
 import os
+import numpy as np
+from nose.tools import raises
+from yt.testing import \
+    fake_random_pf, assert_equal, assert_array_equal
+from yt.utilities.definitions import \
+    x_dict, y_dict
+from yt.utilities.exceptions import \
+    YTNoDataInObjectError
 
 def setup():
     from yt.config import ytcfg
-    ytcfg["yt","__withintesting"] = "True"
+    ytcfg["yt", "__withintesting"] = "True"
+
 
 def teardown_func(fns):
     for fn in fns:
         os.remove(fn)
 
+
 def test_slice():
     for nprocs in [8, 1]:
         # We want to test both 1 proc and 8 procs, to make sure that
         # parallelism isn't broken
-        pf = fake_random_pf(64, nprocs = nprocs)
+        pf = fake_random_pf(64, nprocs=nprocs)
         dims = pf.domain_dimensions
         xn, yn, zn = pf.domain_dimensions
-        xi, yi, zi = pf.domain_left_edge + 1.0/(pf.domain_dimensions * 2)
-        xf, yf, zf = pf.domain_right_edge - 1.0/(pf.domain_dimensions * 2)
-        coords = np.mgrid[xi:xf:xn*1j, yi:yf:yn*1j, zi:zf:zn*1j]
+        xi, yi, zi = pf.domain_left_edge + 1.0 / (pf.domain_dimensions * 2)
+        xf, yf, zf = pf.domain_right_edge - 1.0 / (pf.domain_dimensions * 2)
+        coords = np.mgrid[xi:xf:xn * 1j, yi:yf:yn * 1j, zi:zf:zn * 1j]
         uc = [np.unique(c) for c in coords]
         slc_pos = 0.5
         # Some simple slice tests with single grids
@@ -33,31 +69,45 @@
                 yield assert_equal, slc["Ones"].max(), 1.0
                 yield assert_equal, np.unique(slc["px"]), uc[xax]
                 yield assert_equal, np.unique(slc["py"]), uc[yax]
-                yield assert_equal, np.unique(slc["pdx"]), 1.0/(dims[xax]*2.0)
-                yield assert_equal, np.unique(slc["pdy"]), 1.0/(dims[yax]*2.0)
+                yield assert_equal, np.unique(slc["pdx"]), 0.5 / dims[xax]
+                yield assert_equal, np.unique(slc["pdy"]), 0.5 / dims[yax]
                 pw = slc.to_pw()
                 fns += pw.save()
-                frb = slc.to_frb((1.0,'unitary'), 64)
+                frb = slc.to_frb((1.0, 'unitary'), 64)
                 for slc_field in ['Ones', 'Density']:
                     yield assert_equal, frb[slc_field].info['data_source'], \
-                            slc.__str__()
+                        slc.__str__()
                     yield assert_equal, frb[slc_field].info['axis'], \
-                            ax
+                        ax
                     yield assert_equal, frb[slc_field].info['field'], \
-                            slc_field
+                        slc_field
                     yield assert_equal, frb[slc_field].info['units'], \
-                            pf.field_info[slc_field].get_units()
+                        pf.field_info[slc_field].get_units()
                     yield assert_equal, frb[slc_field].info['xlim'], \
-                            frb.bounds[:2]
+                        frb.bounds[:2]
                     yield assert_equal, frb[slc_field].info['ylim'], \
-                            frb.bounds[2:]
+                        frb.bounds[2:]
                     yield assert_equal, frb[slc_field].info['length_to_cm'], \
-                            pf['cm']
+                        pf['cm']
                     yield assert_equal, frb[slc_field].info['center'], \
-                            slc.center
+                        slc.center
                     yield assert_equal, frb[slc_field].info['coord'], \
-                            slc_pos
+                        slc_pos
                 teardown_func(fns)
             # wf == None
             yield assert_equal, wf, None
 
+
+def test_slice_over_edges():
+    pf = fake_random_pf(64, nprocs=8, fields=["Density"], negative=[False])
+
+    slc = pf.h.slice(0, 0.0, "Density")
+    yield assert_array_equal, slc.grid_left_edge[:, 0], np.zeros((4))
+    slc = pf.h.slice(1, 0.5, "Density")
+    yield assert_array_equal, slc.grid_left_edge[:, 1], np.ones((4)) * 0.5
+
+
+ at raises(YTNoDataInObjectError)
+def test_slice_over_outer_boundary():
+    pf = fake_random_pf(64, nprocs=8, fields=["Density"], negative=[False])
+    slc = pf.h.slice(2, 1.0, "Density")

diff -r a1ff99289b028229a1570481842295a9fb28b31b -r b59c9b89ad062e0bd2df49df6b5466b0e7f90865 yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -55,7 +55,7 @@
      G, \
      rho_crit_now, \
      speed_of_light_cgs, \
-     km_per_cm
+     km_per_cm, keV_per_K
 
 from yt.utilities.math_utils import \
     get_sph_r_component, \
@@ -216,18 +216,25 @@
            data["Density"] * data["ThermalEnergy"]
 add_field("Pressure", function=_Pressure, units=r"\rm{dyne}/\rm{cm}^{2}")
 
+def _TempkeV(field, data):
+    return data["Temperature"] * keV_per_K
+add_field("TempkeV", function=_TempkeV, units=r"\rm{keV}",
+          display_name="Temperature")
+
 def _Entropy(field, data):
     if data.has_field_parameter("mu"):
         mw = mh*data.get_field_parameter("mu")
     else :
         mw = mh
+    try:
+        gammam1 = data.pf["Gamma"] - 1.0
+    except:
+        gammam1 = 5./3. - 1.0
     return kboltz * data["Temperature"] / \
-           ((data["Density"]/mw)**(data.pf["Gamma"] - 1.0))
+           ((data["Density"]/mw)**gammam1)
 add_field("Entropy", units=r"\rm{ergs}\ \rm{cm}^{3\gamma-3}",
           function=_Entropy)
 
-
-
 ### spherical coordinates: r (radius)
 def _sph_r(field, data):
     center = data.get_field_parameter("center")
@@ -784,22 +791,28 @@
          units=r"\rm{g}\/\rm{cm}^2/\rm{s}", particle_type=True,
          validators=[ValidateParameter('center')])
 
-def get_radius(positions, data):
-    c = data.get_field_parameter("center")
-    n_tup = tuple([1 for i in range(positions.ndim-1)])
-    center = np.tile(np.reshape(c, (positions.shape[0],)+n_tup),(1,)+positions.shape[1:])
-    periodicity = data.pf.periodicity
-    if any(periodicity):
-        period = data.pf.domain_right_edge - data.pf.domain_left_edge
-        return periodic_dist(positions, center, period, periodicity)
-    else:
-        return euclidean_dist(positions, center)
+def get_radius(data, field_prefix):
+    center = data.get_field_parameter("center")
+    DW = data.pf.domain_right_edge - data.pf.domain_left_edge
+    radius = np.zeros(data[field_prefix+"x"].shape, dtype='float64')
+    r = radius.copy()
+    if any(data.pf.periodicity):
+        rdw = radius.copy()
+    for i, ax in enumerate('xyz'):
+        np.subtract(data["%s%s" % (field_prefix, ax)], center[i], r)
+        if data.pf.periodicity[i] == True:
+            np.subtract(DW[i], r, rdw)
+            np.abs(r, r)
+            np.minimum(r, rdw, r)
+        np.power(r, 2.0, r)
+        np.add(radius, r, radius)
+    np.sqrt(radius, radius)
+    return radius
+
 def _ParticleRadius(field, data):
-    positions = np.array([data["particle_position_%s" % ax] for ax in 'xyz'])
-    return get_radius(positions, data)
+    return get_radius(data, "particle_position_")
 def _Radius(field, data):
-    positions = np.array([data['x'], data['y'], data['z']])
-    return get_radius(positions, data)
+    return get_radius(data, "")
 
 def _ConvertRadiusCGS(data):
     return data.convert("cm")

diff -r a1ff99289b028229a1570481842295a9fb28b31b -r b59c9b89ad062e0bd2df49df6b5466b0e7f90865 yt/frontends/athena/data_structures.py
--- a/yt/frontends/athena/data_structures.py
+++ b/yt/frontends/athena/data_structures.py
@@ -289,6 +289,11 @@
                      self.parameter_file.domain_right_edge)
         self.parameter_file.domain_dimensions = \
                 np.round(self.parameter_file.domain_width/gdds[0]).astype('int')
+
+        # Need to reset the units in the parameter file based on the correct
+        # domain left/right/dimensions.
+        self.parameter_file._set_units()
+
         if self.parameter_file.dimensionality <= 2 :
             self.parameter_file.domain_dimensions[2] = np.int(1)
         if self.parameter_file.dimensionality == 1 :

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt-3.0/commits/34b95297062b/
Changeset:   34b95297062b
Branch:      yt
User:        atmyers
Date:        2013-03-07 23:02:12
Summary:     undoing a couple of changes I made by accident
Affected #:  2 files

diff -r b59c9b89ad062e0bd2df49df6b5466b0e7f90865 -r 34b95297062b9f6dedf50d8a127e94ba1ec8e278 yt/pmods.py
--- a/yt/pmods.py
+++ b/yt/pmods.py
@@ -17,116 +17,213 @@
 #####
 
 
-"""This is an initial implementation of the finder/loader discussed at:
-http://mail.scipy.org/pipermail/numpy-discussion/2012-March/061160.html
+# This code is derived from knee.py, which was included in the Python
+# 2.6 distribution.
+#
+# The modifications to this code are copyright (c) 2011, Lawrence
+# Livermore National Security, LLC. Produced at the Lawrence Livermore
+# National Laboratory. Written by Tim Kadich and Asher Langton
+# <langton2 at llnl.gov>. Released as LLNL-CODE-522751 under the name
+# SmartImport.py, version 1.0. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# - Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the disclaimer below.
+#
+# - Redistributions in binary form must reproduce the above copyright
+#   notice, this list of conditions and the disclaimer (as noted below)
+#   in the documentation and/or other materials provided with the
+#   distribution.
+#
+# - Neither the name of the LLNS/LLNL nor the names of its contributors
+#   may be used to endorse or promote products derived from this
+#   software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE
+# LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR
+# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# Additional BSD Notice
+#
+# 1. This notice is required to be provided under our contract with the
+# U.S. Department of Energy (DOE). This work was produced at Lawrence
+# Livermore National Laboratory under Contract No. DE-AC52-07NA27344
+# with the DOE.
+#
+# 2. Neither the United States Government nor Lawrence Livermore
+# National Security, LLC nor any of their employees, makes any warranty,
+# express or implied, or assumes any liability or responsibility for the
+# accuracy, completeness, or usefulness of any information, apparatus,
+# product, or process disclosed, or represents that its use would not
+# infringe privately-owned rights.
+#
+# 3. Also, reference herein to any specific commercial products,
+# process, or services by trade name, trademark, manufacturer or
+# otherwise does not necessarily constitute or imply its endorsement,
+# recommendation, or favoring by the United States Government or
+# Lawrence Livermore National Security, LLC. The views and opinions of
+# authors expressed herein do not necessarily state or reflect those of
+# the United States Government or Lawrence Livermore National Security,
+# LLC, and shall not be used for advertising or product endorsement
+# purposes.
 
-This is intended to take the place of MPI_Import.py. This version has
-only been tested minimally, and is being made available primarily for
-testing and preliminary benchmarking.
+"""MPI_Import defines an mpi-aware import hook. The standard use of
+this module is as follows:
 
-Known issues:
-- Modules loaded via the Windows registry may be incorrectly hidden by
-  a module of the same name in sys.path.
-- If a file is added to a directory on sys.path, it won't be cached, so
-  there may be precedence issues. If a file disappears or its permissions
-  change, the import will fail.
+   from MPI_Import import mpi_import
+   with mpi_import():
+      import foo
+      import bar
 
-Update (3/16/12): I've merged in a new version, simple_finder, described
-below.
+Within the with block, the standard import statement is replaced by an
+MPI-aware import statement. The rank 0 process finds the location of
+each module to import, broadcasts the location, then all of the
+processes load that module.
 
-To use the finder, start a script off with the following:
+One CRITICAL detail: any code inside the mpi_import block must be
+executed exactly the same on all of the MPI ranks. For example,
+consider this:
 
-import sys
-from cached_import import finder
-sys.meta_path.append(finder())
+def foo():
+   import mpi
+   if mpi.rank == 0:
+      bar = someFunction()
+   bar = mpi.bcast(bar,root=0)
 
-There are also variants of the finder that use MPI. The rank 0 process
-builds the cache and then broadcasts it. For these, replace finder
-with either pympi_finder or mpi4py_finder.
+def someFunction():
+   import os
+   return os.name
 
-This finder works by building a cache mapping module names to
-locations. The expensive parts of this process are the calls that
-result in a stat. For that reason, we don't, by default, check whether
-a module file is readable.
+If foo() is called during the import process, then things may go very
+wrong. If the os module hasn't been loaded, then the rank 0 process
+will find os and broadcast its location. Since there's no
+corresponding bcast for rank > 0, the other processes will receive
+that broadcast instead of the broadcast for bar, resulting in
+undefined behavior. Similarly, if rank >0 process encounters an import
+that rank 0 does not encounter, that process will either hang waiting
+for the bcast, or it will receive an out-of-order bcast.
 
-Since calls like os.isfile are expensive, I've added an alternate
-version called simple_finder. Instead of figuring out where all of the
-modules in sys.path are located, we just cache the contents of
-directories on sys.path and use the standard probing algorithm for the
-imports. This is much cheaper at startup and easier to maintain. It
-appears to be a bit faster than the MPI-enabled finders, though that
-will depend on the number of modules in sys.path as well as the number
-of modules actually imported.
+The import hook provides a way to test whether we're using this
+importer, which can be used to disable rank-asymmetric behavior in a
+module import:
+
+import __builtin__
+hasattr(__builtin__.__import__,"mpi_import")
+
+This evaluates to True only when we're in an mpi_import() context
+manager.
+
+There are some situations where rank-dependent code may be necessary.
+One such example is pyMPI's synchronizeQueuedOutput function, which
+tends to cause deadlocks when it is executed inside an mpi_imported
+module. In that case, we provide a hook to execute a function after
+the mpi_import hook has been replaced by the standard import hook.
+Here is an example showing the use of this feature:
+
+# encapsulate the rank-asymmetric code in a function
+def f():
+    if mpi.rank == 0:
+        doOneThing()
+    else:
+        doSomethingElse()
+
+# Either importer is None (standard import) or it's a reference to
+# the mpi_import object that owns the current importer.
+import __builtin__
+importer = getattr(__builtin__.__import__,"mpi_import",None)
+if importer:
+    importer.callAfterImport(f)
+else:
+    # If we're using the standard import, then we'll execute the
+    # code in f immediately
+    f()
+
+WARNING: the callAfterImport feature is not intended for casual use.
+Usually it will be sufficient (and preferable) to either remove the
+rank-asymmetric code or explicitly move it outside of the 'with
+mpi_import' block. callAfterImport is provided for the (hopefully
+rare!) cases where this does not suffice.
+
+
+Some implementation details:
+
+-This code is based on knee.py, which is an example of a pure Python
+ hierarchical import that was included with Python 2.6 distributions.
+
+-Python PEP 302 defines another way to override import by using finder
+ and loader objects, which behave similarly to the imp.find_module and
+ imp.load_module functions in __import_module__ below. Unfortunately,
+ the implementation of PEP 302 is such that the path for the module
+ has already been found by the time that the "finder" object is
+ constructed, so it's not suitable for our purposes.
+
+-This module uses pyMPI. It was originally designed with mpi4py, and
+ switching back to mpi4py requires only minor modifications. To
+ quickly substitute mpi4py for pyMPI, the 'import mpi' line below can
+ be replaced with the following wrapper:
+
+from mpi4py import MPI
+class mpi(object):
+    rank = MPI.COMM_WORLD.Get_rank()
+    @staticmethod
+    def bcast(obj=None,root=0):
+        return MPI.COMM_WORLD.bcast(obj,root)
+
+-An alternate version of this module had rank 0 perform all of the
+ lookups, and then broadcast the locations all-at-once when that
+ process reached the end of the context manager. This was somewhat
+ faster than the current implementation, but was prone to deadlock
+ when loading modules containing MPI synchronization points.
+
+-The 'level' parameter to the import hook is not handled correctly; we
+ treat it as if it were -1 (try relative and absolute imports). For
+ more information about the level parameter, run 'help(__import__)'.
 """
 
-import sys,os,imp
+import sys, imp, __builtin__,types
+from mpi4py import MPI
+class mpi(object):
+    rank = MPI.COMM_WORLD.Get_rank()
+    @staticmethod
+    def bcast(obj=None,root=0):
+        return MPI.COMM_WORLD.bcast(obj,root)
 
-class finder(object):
-    def __init__(self,skip_checks=True,build=True):
-        """Build a finder object.
+class mpi_import(object):
+    def __enter__(self):
+        imp.acquire_lock()
+        __import_hook__.mpi_import = self
+        self.__funcs = []
+        self.original_import = __builtin__.__import__
+        __builtin__.__import__ = __import_hook__
 
-        Arguments:
-        - skip_checks: Don't test whether modules are readable while building
-                       the cache. This improves performace, but can cause an
-                       unreadable file that looks like a Python module to
-                       shadow a readable module with the same name later
-                       in sys.path.
-        -build: if set, build the cache now. This is used in the mpi4py_finder
-                and pympi_finder extensions
-        """
-        # Store some suffix and module description information
-        t = imp.get_suffixes()
-        self.skip_checks = skip_checks
-        self._suffixes = [x[0] for x in t] # in order of precedence
-        self._rsuffixes = self._suffixes[::-1] # and in reverse order
-        self._suffix_tuples = dict((x[0],tuple(x)) for x in t)
+    def __exit__(self,type,value,traceback):
+        __builtin__.__import__ = self.original_import
+        __import_hook__.mpi_import = None
+        imp.release_lock()
+        for f in self.__funcs:
+            f()
 
-        # We store the value of sys.path in _syspath so we can keep track
-        # of changes. _cache is a dictionary mapping module names to tuples
-        # containing the information needed to load the module (path and
-        # module description).
-        if build:
-            self._syspath = list(sys.path)
-            self._build_cache()
-        else: # For some subclasses
-            self._syspath = []
-            self._cache = {}
+    def callAfterImport(self,f):
+        "Add f to the list of functions to call on exit"
+        if type(f) != types.FunctionType:
+            raise TypeError("Argument must be a function!")
+        self.__funcs.append(f)
 
-    def _build_cache(self):
-        """Traverse sys.path, building (or re-building) the cache."""
-        import os
-        self._cache = {}
-        for d in self._syspath:
-            self._process_dir(os.path.realpath(d))
 
-    def find_module(self,fullname,path=None):
-        """Return self if 'fullname' is in sys.path (and isn't a builtin or
-        frozen module)."""
-        # First, make sure our cache is up-to-date. (We could combine
-        # the append/prepend cases and more generally handle the case where
-        # self._syspath is a sublist of the new sys.path, but is that worth
-        # the effort? It's only beneficial if we encounter code where sys.path
-        # is both prepended to and appended to, and there isn't an import
-        # statement in between.
-        if sys.path != self._syspath:
-            stored_length = len(self._syspath)
-            real_length = len(sys.path)
-            rebuild = False
-            # If sys.path isn't bigger, we need to rebuild the cache
-            # but not before we update self._syspath.
-            if real_length <= stored_length:
-                rebuild = True
-            # Some directories were prepended to the path, so add them.
-            elif self._syspath == sys.path[-stored_length:]:
-                for d in sys.path[real_length-stored_length-1::-1]:
-                    self._process_dir(os.path.realpath(d),prepend=True)
-            # Directories appended to the path.
-            elif self._syspath == sys.path[:len(self._syspath)]:
-                for d in sys.path[stored_length-real_length:]:
-                    self._process_dir(os.path.realpath(d))
-            # Path otherwise modified, so we need to rebuild the cache.
-            else:
-                rebuild = True
+# The remaining code is for internal use only. Do not explicitly call
+# call any of the following functions.
 
 # Replacement for __import__(). Taken from knee.py; unmodified except for the
 # (unused) level parameter.
@@ -142,107 +239,56 @@
         __ensure_fromlist__(m, fromlist)
     return m
 
-    def load_module(self,fullname):
-        """Load the module fullname using cached path."""
-        if fullname in self._cache:
-            if fullname in sys.modules:
-                return sys.modules[fullname]
-            pathname,desc = self._cache[fullname]
-            #print "__LOADING ",fullname,pathname
-            if os.path.isfile(pathname):
-                # (If we're loading a PY_SOURCE file, the interpreter will
-                # automatically check for a compiled (.py[c|o]) file.)
-                with open(pathname,desc[1]) as f:
-                    mod = imp.load_module(fullname,f,pathname,desc)
-            # Not a file, so it's a package directory
-            else:
-                mod = imp.load_module(fullname,None,pathname,desc)
-            mod.__loader__ = self
-            return mod
-        raise ImportError("This shouldn't happen!")
+# __import_module__ is the only part of knee.py with non-trivial changes.
+# The MPI rank 0 process handles the lookup and broadcasts the location to
+# the others. This must be called synchronously, at least in the case that
+# 'fqname' is not already in sys.modules.
+def __import_module__(partname, fqname, parent):
+    fqname = fqname.rstrip(".")
+    try:
+        return sys.modules[fqname]
+    except KeyError:
+        pass
+    fp = None         # module's file
+    pathname = None   # module's location
+    stuff = None      # tuple of (suffix,mode,type) for the module
+    ierror = False    # are we propagating an import error from rank 0?
 
+    # Start with the lookup on rank 0. The other processes will be waiting
+    # on a broadcast, so we need to send one even if we're bailing out due
+    # to an import error.
+    if mpi.rank == 0:
+        try:
+            fp, pathname, stuff = imp.find_module(partname,
+                                                  parent and parent.__path__)
+        except ImportError:
+            ierror = True
+            return None
+        finally:
+            pathname,stuff,ierror = mpi.bcast((pathname,stuff,ierror))
+    else:
+        pathname,stuff,ierror = mpi.bcast((pathname,stuff,ierror))
+        if ierror:
+            return None
+        # If imp.find_module returned an open file to rank 0, then we should
+        # open the corresponding file for this process too.
+        if stuff and stuff[1]:
+            fp = open(pathname,stuff[1])
 
-    # Build up a dict of modules (including package directories) found in a
-    # directory. If this directory has been prepended to the path, we need to
-    # overwrite any conflicting entries in the cache. To make sure precedence
-    # is correct, we'll reverse the list of suffixes when we're prepending.
-    #
-    # Rather than add a lot of checks here to make sure we don't stomp on a
-    # builtin module, we'll just reject these in find_module
-    def _process_dir(self,dir,parent=None,prepend=False,visited=None):
-        """Process a directory dir, looking for valid modules.
+    try:
+        m = imp.load_module(fqname, fp, pathname, stuff)
+    finally:
+        if fp: fp.close()
+    if parent:
+        setattr(parent, partname, m)
+    return m
 
-        Arguments:
-        dir -- (an absolute, real path to a directory)
-        parent -- parent module, in the case where dir is a package directory
-        prepend -- True if dir has just been prepended to sys.path. In that
-                   case, we'll replace existing cached entries with the same
-                   module name.
-        visited -- list of the real paths of visited directories. Used to
-                   prevent infinite recursion in the case of symlink cycles
-                   in package subdirectories.
-        """
-        import stat
-        
-        # Avoid symlink cycles in a package.
-        if not visited:
-            visited = [dir]
-        elif dir not in visited:
-            visited.append(dir)
-        else:
-            return
-
-        # All files and subdirs. Store the name and the path.
-        try:
-            contents = dict((x,os.path.join(dir,x))
-                            for x in os.listdir(dir))
-        # Unreadable directory, so skip
-        except OSError:
-            return
-
-        # If this is a possible package directory with no __init__.py, bail
-        # out. If __init__.py is there, we need to see if there's an exising
-        # module by that name. 
-        if parent:
-            if "__init__.py" not in contents:
-                return
-            if not (self.skip_checks or
-                    os.access(os.path.join(dir,"__init__.py"),os.R_OK)):
-                return
-            if parent in self._cache and not prepend:
-                return
-            # Okay, this is a valid, non-duplicate module.
-            self._cache[parent] = (dir,('','',imp.PKG_DIRECTORY))
-            
-        # Split contents into files & subdirs (only stat each one once)
-        files = {}
-        subdirs = {}
-        for entry in contents:
-            try:
-                mode = os.stat(contents[entry]).st_mode
-            except OSError:
-                continue # couldn't read!
-            if stat.S_ISDIR(mode) and (self.skip_checks or
-                                       os.access(contents[entry],os.R_OK)):
-                subdirs[entry] = contents[entry]
-            elif stat.S_ISREG(mode) and (self.skip_checks or
-                                         os.access(contents[entry],os.R_OK)):
-                files[entry] = contents[entry]
-
-        # Package directories have the highest precedence. But when prepend is
-        # True, we need to reverse the order here. We'll do this with these
-        # nested functions.
-        def process_subdirs():
-            for d in subdirs:
-                fqname = parent+"."+d if parent else d # fully qualified name
-                self._process_dir(os.path.join(dir,d),fqname,prepend,visited)
 
 # The remaining functions are taken unmodified (except for the names)
 # from knee.py.
 def __determine_parent__(globals, level):
     if not globals or  not globals.has_key("__name__"):
         return None
-
     pname = globals['__name__']
     if globals.has_key("__path__"):
         parent = sys.modules[pname]
@@ -262,22 +308,58 @@
         return parent
     return None
 
-    def _process_dir(self,dir):
-        """
-        Arguments:
-        dir -- 
-        """
-        # All files and subdirs. Store the name and the path.
-        try:
-            contents = dict((x,os.path.join(dir,x))
-                            for x in os.listdir(dir))
-        # Unreadable directory, so skip
-        except OSError:
-            contents = {}
+def __find_head_package__(parent, name):
+    if '.' in name:
+        i = name.find('.')
+        head = name[:i]
+        tail = name[i+1:]
+    else:
+        head = name
+        tail = ""
+    if parent:
+        qname = "%s.%s" % (parent.__name__, head)
+    else:
+        qname = head
+    q = __import_module__(head, qname, parent)
+    if q: return q, tail
+    if parent:
+        qname = head
+        parent = None
+        q = __import_module__(head, qname, parent)
+        if q: return q, tail
+    raise ImportError, "No module named " + qname
 
-        self._contents[dir] = contents
+def __load_tail__(q, tail):
+    m = q
+    while tail:
+        i = tail.find('.')
+        if i < 0: i = len(tail)
+        head, tail = tail[:i], tail[i+1:]
+        mname = "%s.%s" % (m.__name__, head)
+        m = __import_module__(head, mname, m)
+        if not m:
+            raise ImportError, "No module named " + mname
+    return m
+
+def __ensure_fromlist__(m, fromlist, recursive=0):
+    for sub in fromlist:
+        if sub == "*":
+            if not recursive:
+                try:
+                    all = m.__all__
+                except AttributeError:
+                    pass
+                else:
+                    __ensure_fromlist__(m, all, 1)
+            continue
+        if sub != "*" and not hasattr(m, sub):
+            subname = "%s.%s" % (m.__name__, sub)
+            submod = __import_module__(sub, subname, m)
+            if not submod:
+                raise ImportError, "No module named " + subname
 
 # Now we import all the yt.mods items.
-import sys
-sys.meta_path.append(mpi4py_finder())
-from yt.mods import *
+with mpi_import():
+    if MPI.COMM_WORLD.rank == 0: print "Beginning parallel import block."
+    from yt.mods import *
+    if MPI.COMM_WORLD.rank == 0: print "Ending parallel import block."

diff -r b59c9b89ad062e0bd2df49df6b5466b0e7f90865 -r 34b95297062b9f6dedf50d8a127e94ba1ec8e278 yt/utilities/grid_data_format/writer.py
--- a/yt/utilities/grid_data_format/writer.py
+++ b/yt/utilities/grid_data_format/writer.py
@@ -57,7 +57,7 @@
     # don't forget to close the file.
     f.close()
 
-def save_field(pf, field_name, data=None):
+def save_field(pf, field_name):
     """
     Write a single field associated with the parameter file pf to the
     backup file.
@@ -85,12 +85,12 @@
                        particle_type_name="dark_matter")
 
     # now save the field
-    _write_field_to_gdf(pf, f, field_name, particle_type_name="dark_matter", data)
+    _write_field_to_gdf(pf, f, field_name, particle_type_name="dark_matter")
 
     # don't forget to close the file.
     f.close()
         
-def _write_field_to_gdf(pf, fhandle, field_name, particle_type_name, data=None):
+def _write_field_to_gdf(pf, fhandle, field_name, particle_type_name):
 
     # add field info to field_types group
     g = fhandle["field_types"]
@@ -131,10 +131,7 @@
         if field_obj.particle_type:  # particle data
             pt_group[field_name] = grid.get_data(field_name)
         else:  # a field
-            if data != None:
-                grid_group[field_name] = data[str(grid.id)]
-            else:
-                grid_group[field_name] = grid.get_data(field_name)
+            grid_group[field_name] = grid.get_data(field_name)
 
 def _create_new_gdf(pf, gdf_path, data_author=None, data_comment=None,
                    particle_type_name="dark_matter"):


https://bitbucket.org/yt_analysis/yt-3.0/commits/5117127ce2e9/
Changeset:   5117127ce2e9
Branch:      yt
User:        ngoldbaum
Date:        2013-03-13 00:46:49
Summary:     Fixing setup.py for the case when the fortran kd tree isn't built.
Affected #:  1 file

diff -r 34b95297062b9f6dedf50d8a127e94ba1ec8e278 -r 5117127ce2e958db759092a1af4a0a0c42007daf setup.py
--- a/setup.py
+++ b/setup.py
@@ -18,6 +18,9 @@
 from distutils.core import Command
 from distutils.spawn import find_executable
 
+def find_fortran_deps():
+    return (find_executable("Forthon"),
+            find_executable("gfortran"))
 
 class BuildForthon(Command):
 
@@ -41,9 +44,7 @@
     def run(self):
 
         """runner"""
-        Forthon_exe = find_executable("Forthon")
-        gfortran_exe = find_executable("gfortran")
-
+        (Forthon_exe, gfortran_exe) = find_fortran_deps()
         if None in (Forthon_exe, gfortran_exe):
             sys.stderr.write(
                 "fKDpy.so won't be built due to missing Forthon/gfortran\n"
@@ -193,9 +194,13 @@
 
 class my_install_data(np_install_data.install_data):
     def run(self):
-        self.distribution.data_files.append(
-            ('yt/utilities/kdtree', ['yt/utilities/kdtree/fKDpy.so'])
-        )
+        (Forthon_exe, gfortran_exe) = find_fortran_deps()
+        if None in (Forthon_exe, gfortran_exe):
+            pass
+        else:
+            self.distribution.data_files.append(
+                ('yt/utilities/kdtree', ['yt/utilities/kdtree/fKDpy.so'])
+                )
         np_install_data.install_data.run(self)
 
 class my_build_py(build_py):


https://bitbucket.org/yt_analysis/yt-3.0/commits/f8367487aec0/
Changeset:   f8367487aec0
Branch:      yt
User:        jwise77
Date:        2013-02-28 16:01:39
Summary:     If there are no particles in a grid, don't add to the particle count.
This is only necessary in N-body runs.  In Enzo N-body runs, grids
without particles don't even have a Grid group in the data!
Affected #:  1 file

diff -r 28a296dcad83e2297c79015b019ea708ddcefc70 -r f8367487aec0add0c0b69f9e2d04cb77711e1b1f yt/analysis_modules/halo_finding/rockstar/rockstar.py
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar.py
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar.py
@@ -238,6 +238,7 @@
         tpf = ts[0]
 
         def _particle_count(field, data):
+            if data.NumberOfParticles == 0: return 0
             try:
                 data["particle_type"]
                 has_particle_type=True


https://bitbucket.org/yt_analysis/yt-3.0/commits/f3d1098915c5/
Changeset:   f3d1098915c5
Branch:      yt
User:        jwise77
Date:        2013-02-28 16:05:12
Summary:     Merging
Affected #:  16 files

diff -r f8367487aec0add0c0b69f9e2d04cb77711e1b1f -r f3d1098915c54b05d32d3b5d13c4e0e748d480ef doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -837,16 +837,11 @@
 cd $YT_DIR
 ( ${HG_EXEC} pull 2>1 && ${HG_EXEC} up -C 2>1 ${BRANCH} 2>&1 ) 1>> ${LOG_FILE}
 
-echo "Building Fortran kD-tree module."
-cd yt/utilities/kdtree
-( make 2>&1 ) 1>> ${LOG_FILE}
-cd ../../..
-
 echo "Installing yt"
 echo $HDF5_DIR > hdf5.cfg
 [ $INST_PNG -eq 1 ] && echo $PNG_DIR > png.cfg
 [ $INST_FTYPE -eq 1 ] && echo $FTYPE_DIR > freetype.cfg
-( ${DEST_DIR}/bin/python2.7 setup.py develop 2>&1 ) 1>> ${LOG_FILE} || do_exit
+( export PATH=$DEST_DIR/bin:$PATH ; ${DEST_DIR}/bin/python2.7 setup.py develop 2>&1 ) 1>> ${LOG_FILE} || do_exit
 touch done
 cd $MY_PWD
 

diff -r f8367487aec0add0c0b69f9e2d04cb77711e1b1f -r f3d1098915c54b05d32d3b5d13c4e0e748d480ef setup.py
--- a/setup.py
+++ b/setup.py
@@ -4,14 +4,52 @@
 import sys
 import time
 import subprocess
+import shutil
+import glob
 import distribute_setup
 distribute_setup.use_setuptools()
 
 from distutils.command.build_py import build_py
 from numpy.distutils.misc_util import appendpath
+from numpy.distutils.command import install_data as np_install_data
 from numpy.distutils import log
 from distutils import version
 
+from distutils.core import Command
+
+
+class BuildForthon(Command):
+
+    """Command for building Forthon modules"""
+
+    description = "Build Forthon modules"
+    user_options = []
+
+    def initialize_options(self):
+
+        """init options"""
+
+        pass
+
+    def finalize_options(self):
+
+        """finalize options"""
+
+        pass
+
+    def run(self):
+
+        """runner"""
+
+        cwd = os.getcwd()
+        os.chdir(os.path.join(cwd, 'yt/utilities/kdtree'))
+        cmd = ["Forthon", "-F", "gfortran", "--compile_first", "fKD_source",
+               "--no2underscores", "--fopt", "'-O3'", "fKD",
+               "fKD_source.f90"]
+        subprocess.check_call(cmd, shell=False)
+        shutil.move(glob.glob('build/lib*/fKDpy.so')[0], os.getcwd())
+        os.chdir(cwd)
+
 REASON_FILES = []
 REASON_DIRS = [
     "",
@@ -36,7 +74,7 @@
     files = []
     for ext in ["js", "html", "css", "png", "ico", "gif"]:
         files += glob.glob("%s/*.%s" % (dir_name, ext))
-    REASON_FILES.append( (dir_name, files) )
+    REASON_FILES.append((dir_name, files))
 
 # Verify that we have Cython installed
 try:
@@ -93,10 +131,10 @@
             language=extension.language, cplus=cplus,
             output_file=target_file)
         cython_result = Cython.Compiler.Main.compile(source,
-                                                   options=options)
+                                                     options=options)
         if cython_result.num_errors != 0:
-            raise DistutilsError("%d errors while compiling %r with Cython" \
-                  % (cython_result.num_errors, source))
+            raise DistutilsError("%d errors while compiling %r with Cython"
+                                 % (cython_result.num_errors, source))
     return target_file
 
 
@@ -109,7 +147,9 @@
 
 VERSION = "2.5dev"
 
-if os.path.exists('MANIFEST'): os.remove('MANIFEST')
+if os.path.exists('MANIFEST'):
+    os.remove('MANIFEST')
+
 
 def get_mercurial_changeset_id(target_dir):
     """adapted from a script by Jason F. Harris, published at
@@ -123,11 +163,11 @@
                                      stdout=subprocess.PIPE,
                                      stderr=subprocess.PIPE,
                                      shell=True)
-        
+
     if (get_changeset.stderr.read() != ""):
         print "Error in obtaining current changeset of the Mercurial repository"
         changeset = None
-        
+
     changeset = get_changeset.stdout.read().strip()
     if (not re.search("^[0-9a-f]{12}", changeset)):
         print "Current changeset of the Mercurial repository is malformed"
@@ -135,12 +175,26 @@
 
     return changeset
 
+
+class my_build_src(build_src.build_src):
+    def run(self):
+        self.run_command("build_forthon")
+        build_src.build_src.run(self)
+
+
+class my_install_data(np_install_data.install_data):
+    def run(self):
+        self.distribution.data_files.append(
+            ('yt/utilities/kdtree', ['yt/utilities/kdtree/fKDpy.so'])
+        )
+        np_install_data.install_data.run(self)
+
 class my_build_py(build_py):
     def run(self):
         # honor the --dry-run flag
         if not self.dry_run:
-            target_dir = os.path.join(self.build_lib,'yt')
-            src_dir =  os.getcwd() 
+            target_dir = os.path.join(self.build_lib, 'yt')
+            src_dir = os.getcwd()
             changeset = get_mercurial_changeset_id(src_dir)
             self.mkpath(target_dir)
             with open(os.path.join(target_dir, '__hg_version__.py'), 'w') as fobj:
@@ -148,6 +202,7 @@
 
             build_py.run(self)
 
+
 def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
 
@@ -158,7 +213,7 @@
                        quiet=True)
 
     config.make_config_py()
-    #config.make_svn_version_py()
+    # config.make_svn_version_py()
     config.add_subpackage('yt', 'yt')
     config.add_scripts("scripts/*")
 
@@ -176,25 +231,25 @@
                     + "simulations, focusing on Adaptive Mesh Refinement data "
                       "from Enzo, Orion, FLASH, and others.",
         classifiers=["Development Status :: 5 - Production/Stable",
-            "Environment :: Console",
-            "Intended Audience :: Science/Research",
-            "License :: OSI Approved :: GNU General Public License (GPL)",
-            "Operating System :: MacOS :: MacOS X",
-            "Operating System :: POSIX :: AIX",
-            "Operating System :: POSIX :: Linux",
-            "Programming Language :: C",
-            "Programming Language :: Python",
-            "Topic :: Scientific/Engineering :: Astronomy",
-            "Topic :: Scientific/Engineering :: Physics",
-            "Topic :: Scientific/Engineering :: Visualization"],
-        keywords='astronomy astrophysics visualization ' + \
-            'amr adaptivemeshrefinement',
+                     "Environment :: Console",
+                     "Intended Audience :: Science/Research",
+                     "License :: OSI Approved :: GNU General Public License (GPL)",
+                     "Operating System :: MacOS :: MacOS X",
+                     "Operating System :: POSIX :: AIX",
+                     "Operating System :: POSIX :: Linux",
+                     "Programming Language :: C",
+                     "Programming Language :: Python",
+                     "Topic :: Scientific/Engineering :: Astronomy",
+                     "Topic :: Scientific/Engineering :: Physics",
+                     "Topic :: Scientific/Engineering :: Visualization"],
+        keywords='astronomy astrophysics visualization ' +
+        'amr adaptivemeshrefinement',
         entry_points={'console_scripts': [
-                            'yt = yt.utilities.command_line:run_main',
-                      ],
-                      'nose.plugins.0.10': [
-                            'answer-testing = yt.utilities.answer_testing.framework:AnswerTesting'
-                      ]
+        'yt = yt.utilities.command_line:run_main',
+        ],
+            'nose.plugins.0.10': [
+                'answer-testing = yt.utilities.answer_testing.framework:AnswerTesting'
+            ]
         },
         author="Matthew J. Turk",
         author_email="matthewturk at gmail.com",
@@ -203,8 +258,9 @@
         configuration=configuration,
         zip_safe=False,
         data_files=REASON_FILES,
-        cmdclass = {'build_py': my_build_py},
-        )
+        cmdclass={'build_py': my_build_py, 'build_forthon': BuildForthon,
+                  'build_src': my_build_src, 'install_data': my_install_data},
+    )
     return
 
 if __name__ == '__main__':

diff -r f8367487aec0add0c0b69f9e2d04cb77711e1b1f -r f3d1098915c54b05d32d3b5d13c4e0e748d480ef yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -178,7 +178,7 @@
         self.child_mask = 1
         self.ActiveDimensions = self.field_data['x'].shape
         self.DW = grid.pf.domain_right_edge - grid.pf.domain_left_edge
-        
+
     def __getitem__(self, field):
         if field not in self.field_data.keys():
             if field == "RadiusCode":
@@ -424,7 +424,7 @@
         return grids
 
     def select_grid_indices(self, level):
-        return np.where(self.grid_levels == level)
+        return np.where(self.grid_levels[:,0] == level)
 
     def __get_grid_left_edge(self):
         if self.__grid_left_edge == None:
@@ -461,6 +461,7 @@
     def __get_grid_levels(self):
         if self.__grid_levels == None:
             self.__grid_levels = np.array([g.Level for g in self._grids])
+            self.__grid_levels.shape = (self.__grid_levels.size, 1)
         return self.__grid_levels
 
     def __del_grid_levels(self):
@@ -474,7 +475,6 @@
     grid_levels = property(__get_grid_levels, __set_grid_levels,
                              __del_grid_levels)
 
-
     def __get_grid_dimensions(self):
         if self.__grid_dimensions == None:
             self.__grid_dimensions = np.array([g.ActiveDimensions for g in self._grids])
@@ -491,6 +491,19 @@
     grid_dimensions = property(__get_grid_dimensions, __set_grid_dimensions,
                              __del_grid_dimensions)
 
+    @property
+    def grid_corners(self):
+        return np.array([
+          [self.grid_left_edge[:,0], self.grid_left_edge[:,1], self.grid_left_edge[:,2]],
+          [self.grid_right_edge[:,0], self.grid_left_edge[:,1], self.grid_left_edge[:,2]],
+          [self.grid_right_edge[:,0], self.grid_right_edge[:,1], self.grid_left_edge[:,2]],
+          [self.grid_left_edge[:,0], self.grid_right_edge[:,1], self.grid_left_edge[:,2]],
+          [self.grid_left_edge[:,0], self.grid_left_edge[:,1], self.grid_right_edge[:,2]],
+          [self.grid_right_edge[:,0], self.grid_left_edge[:,1], self.grid_right_edge[:,2]],
+          [self.grid_right_edge[:,0], self.grid_right_edge[:,1], self.grid_right_edge[:,2]],
+          [self.grid_left_edge[:,0], self.grid_right_edge[:,1], self.grid_right_edge[:,2]],
+        ], dtype='float64')
+
 
 class AMR1DData(AMRData, GridPropertiesMixin):
     _spatial = False
@@ -530,7 +543,7 @@
             # generated it above.  This way, fields that are grabbed from the
             # grids are sorted properly.
             self[field] = self[field][self._sortkey]
-       
+
 class AMROrthoRayBase(AMR1DData):
     """
     This is an orthogonal ray cast through the entire domain, at a specific
@@ -673,9 +686,9 @@
             vs = self._get_line_at_coord(RE[:,i], i)
             p = p | ( ( (LE[:,i1] <= vs[:,i1]) & (RE[:,i1] >= vs[:,i1]) ) \
                     & ( (LE[:,i2] <= vs[:,i2]) & (RE[:,i2] >= vs[:,i2]) ) )
-        p = p | ( np.all( LE <= self.start_point, axis=1 ) 
+        p = p | ( np.all( LE <= self.start_point, axis=1 )
                 & np.all( RE >= self.start_point, axis=1 ) )
-        p = p | ( np.all( LE <= self.end_point,   axis=1 ) 
+        p = p | ( np.all( LE <= self.end_point,   axis=1 )
                 & np.all( RE >= self.end_point,   axis=1 ) )
         self._grids = self.hierarchy.grids[p]
 
@@ -695,7 +708,7 @@
         if not iterable(gf):
             gf = gf * np.ones(grid.child_mask.shape)
         return gf[mask]
-        
+
     @cache_mask
     def _get_cut_mask(self, grid):
         mask = np.zeros(grid.ActiveDimensions, dtype='int')
@@ -738,11 +751,11 @@
     --------
 
     >>> from yt.visualization.api import Streamlines
-    >>> streamlines = Streamlines(pf, [0.5]*3) 
+    >>> streamlines = Streamlines(pf, [0.5]*3)
     >>> streamlines.integrate_through_volume()
     >>> stream = streamlines.path(0)
     >>> matplotlib.pylab.semilogy(stream['t'], stream['Density'], '-x')
-    
+
     """
     _type_name = "streamline"
     _con_args = ('positions')
@@ -775,16 +788,16 @@
     @restore_grid_state
     def _get_data_from_grid(self, grid, field):
         # No child masking here; it happens inside the mask cut
-        mask = self._get_cut_mask(grid) 
+        mask = self._get_cut_mask(grid)
         if field == 'dts': return self._dts[grid.id]
         if field == 't': return self._ts[grid.id]
         return grid[field].flat[mask]
-        
+
     @cache_mask
     def _get_cut_mask(self, grid):
         #pdb.set_trace()
         points_in_grid = np.all(self.positions > grid.LeftEdge, axis=1) & \
-                         np.all(self.positions <= grid.RightEdge, axis=1) 
+                         np.all(self.positions <= grid.RightEdge, axis=1)
         pids = np.where(points_in_grid)[0]
         mask = np.zeros(points_in_grid.sum(), dtype='int')
         dts = np.zeros(points_in_grid.sum(), dtype='float64')
@@ -819,7 +832,7 @@
         AMRData.__init__(self, pf, fields, **kwargs)
         self.field = ensure_list(fields)[0]
         self.set_field_parameter("axis",axis)
-        
+
     def _convert_field_name(self, field):
         return field
 
@@ -838,7 +851,6 @@
             fields_to_get = self.fields[:]
         else:
             fields_to_get = ensure_list(fields)
-        temp_data = {}
         for field in fields_to_get:
             if self.field_data.has_key(field): continue
             if field not in self.hierarchy.field_list:
@@ -848,18 +860,13 @@
             # we're going to have to set the same thing several times
             data = [self._get_data_from_grid(grid, field)
                     for grid in self._get_grids()]
-            if len(data) == 0: data = np.array([])
-            else: data = np.concatenate(data)
-            temp_data[field] = data
+            if len(data) == 0:
+                data = np.array([])
+            else:
+                data = np.concatenate(data)
             # Now the next field can use this field
-            self[field] = temp_data[field] 
-        # We finalize
-        if temp_data != {}:
-            temp_data = self.comm.par_combine_object(temp_data,
-                    datatype='dict', op='cat')
-        # And set, for the next group
-        for field in temp_data.keys():
-            self[field] = temp_data[field]
+            self[field] = self.comm.par_combine_object(data, op='cat',
+                                                       datatype='array')
 
     def _get_pw(self, fields, center, width, origin, axes_unit, plot_type):
         axis = self.axis
@@ -874,7 +881,7 @@
         (bounds, center, units) = GetWindowParameters(axis, center, width, self.pf)
         if axes_unit is None and units != ('1', '1'):
             axes_unit = units
-        pw = PWViewerMPL(self, bounds, origin=origin, frb_generator=FixedResolutionBuffer, 
+        pw = PWViewerMPL(self, bounds, origin=origin, frb_generator=FixedResolutionBuffer,
                          plot_type=plot_type)
         pw.set_axes_unit(axes_unit)
         return pw
@@ -980,7 +987,7 @@
         for field in fields:
             #mylog.debug("Trying to obtain %s from node %s",
                 #self._convert_field_name(field), node_name)
-            fdata=self.hierarchy.get_data(node_name, 
+            fdata=self.hierarchy.get_data(node_name,
                 self._convert_field_name(field))
             if fdata is not None:
                 #mylog.debug("Got %s from node %s", field, node_name)
@@ -1138,7 +1145,7 @@
         t = points * ind[cm] * dx + (grid.LeftEdge[xaxis] + 0.5 * dx)
         # calculate ypoints array
         ind = cmI[1, :].ravel()   # yind
-        del cmI   # no longer needed 
+        del cmI   # no longer needed
         t = np.vstack( (t, points * ind[cm] * dy + \
                 (grid.LeftEdge[yaxis] + 0.5 * dy))
             )
@@ -1197,7 +1204,7 @@
     def hub_upload(self):
         self._mrep.upload()
 
-    def to_pw(self, fields=None, center='c', width=None, axes_unit=None, 
+    def to_pw(self, fields=None, center='c', width=None, axes_unit=None,
                origin='center-window'):
         r"""Create a :class:`~yt.visualization.plot_window.PWViewerMPL` from this
         object.
@@ -1477,7 +1484,7 @@
         self.dims = dims
         self.dds = self.width / self.dims
         self.bounds = np.array([0.0,1.0,0.0,1.0])
-        
+
         self.set_field_parameter('center', center)
         # Let's set up our plane equation
         # ax + by + cz + d = 0
@@ -1563,7 +1570,7 @@
 
             # Mark these pixels to speed things up
             self._pixelmask[pointI] = 0
-            
+
             return
         else:
             raise SyntaxError("Making a fixed resolution slice with "
@@ -1651,7 +1658,7 @@
         L_name = ("%s" % self._norm_vec).replace(" ","_")[1:-1]
         return "%s/c%s_L%s" % \
             (self._top_node, cen_name, L_name)
-        
+
 class AMRQuadTreeProjBase(AMR2DData):
     """
     This is a data object corresponding to a line integral through the
@@ -1809,7 +1816,7 @@
             convs[:] = 1.0
         return dls, convs
 
-    def to_pw(self, fields=None, center='c', width=None, axes_unit=None, 
+    def to_pw(self, fields=None, center='c', width=None, axes_unit=None,
                origin='center-window'):
         r"""Create a :class:`~yt.visualization.plot_window.PWViewerMPL` from this
         object.
@@ -1850,7 +1857,7 @@
                                  if g.Level == level],
                               self.get_dependencies(fields), self.hierarchy.io)
             self._add_level_to_tree(tree, level, fields)
-            mylog.debug("End of projecting level level %s, memory usage %0.3e", 
+            mylog.debug("End of projecting level level %s, memory usage %0.3e",
                         level, get_memory_usage()/1024.)
         # Note that this will briefly double RAM usage
         if self.proj_style == "mip":
@@ -1942,7 +1949,7 @@
         xpoints = (xind + (start_index[x_dict[self.axis]])).astype('int64')
         ypoints = (yind + (start_index[y_dict[self.axis]])).astype('int64')
         to_add = np.array([d[used_points].ravel() for d in full_proj], order='F')
-        tree.add_array_to_tree(grid.Level, xpoints, ypoints, 
+        tree.add_array_to_tree(grid.Level, xpoints, ypoints,
                     to_add, weight_proj[used_points].ravel())
 
     def _add_level_to_tree(self, tree, level, fields):
@@ -2283,7 +2290,7 @@
                 del self.__retval_coords[grid.id]
                 del self.__retval_fields[grid.id]
                 del self.__overlap_masks[grid.id]
-            mylog.debug("End of projecting level level %s, memory usage %0.3e", 
+            mylog.debug("End of projecting level level %s, memory usage %0.3e",
                         level, get_memory_usage()/1024.)
         coord_data = np.concatenate(coord_data, axis=1)
         field_data = np.concatenate(field_data, axis=1)
@@ -2314,7 +2321,7 @@
     def add_fields(self, fields, weight = "CellMassMsun"):
         pass
 
-    def to_pw(self, fields=None, center='c', width=None, axes_unit=None, 
+    def to_pw(self, fields=None, center='c', width=None, axes_unit=None,
                origin='center-window'):
         r"""Create a :class:`~yt.visualization.plot_window.PWViewerMPL` from this
         object.
@@ -2522,7 +2529,7 @@
         ref_ratio = self.pf.refine_by**(self.level - grid.Level)
         FillBuffer(ref_ratio,
             grid.get_global_startindex(), self.global_startindex,
-            c_fields, g_fields, 
+            c_fields, g_fields,
             self.ActiveDimensions, grid.ActiveDimensions,
             grid.child_mask, self.domain_width, dls[grid.Level],
             self.axis)
@@ -2683,9 +2690,9 @@
     def cut_region(self, field_cuts):
         """
         Return an InLineExtractedRegion, where the grid cells are cut on the
-        fly with a set of field_cuts.  It is very useful for applying 
+        fly with a set of field_cuts.  It is very useful for applying
         conditions to the fields in your data object.
-        
+
         Examples
         --------
         To find the total mass of gas above 10^6 K in your volume:
@@ -2726,7 +2733,7 @@
         useful for calculating, for instance, total isocontour area, or
         visualizing in an external program (such as `MeshLab
         <http://meshlab.sf.net>`_.)
-        
+
         Parameters
         ----------
         field : string
@@ -2840,7 +2847,7 @@
 
         Additionally, the returned flux is defined as flux *into* the surface,
         not flux *out of* the surface.
-        
+
         Parameters
         ----------
         field : string
@@ -2897,7 +2904,7 @@
             ff = np.ones(vals.shape, dtype="float64")
         else:
             ff = grid.get_vertex_centered_data(fluxing_field)
-        xv, yv, zv = [grid.get_vertex_centered_data(f) for f in 
+        xv, yv, zv = [grid.get_vertex_centered_data(f) for f in
                      [field_x, field_y, field_z]]
         return march_cubes_grid_flux(value, vals, xv, yv, zv,
                     ff, mask, grid.LeftEdge, grid.dds)
@@ -2990,7 +2997,7 @@
     ----------------
     force_refresh : bool
        Force a refresh of the data. Defaults to True.
-    
+
     Examples
     --------
     """
@@ -3230,7 +3237,7 @@
         if self._grids is not None: return
         GLE = self.pf.h.grid_left_edge
         GRE = self.pf.h.grid_right_edge
-        goodI = find_grids_in_inclined_box(self.box_vectors, self.center, 
+        goodI = find_grids_in_inclined_box(self.box_vectors, self.center,
                                            GLE, GRE)
         cgrids = self.pf.h.grids[goodI.astype('bool')]
        # find_grids_in_inclined_box seems to be broken.
@@ -3238,13 +3245,13 @@
         grids = []
         for i,grid in enumerate(cgrids):
             v = grid_points_in_volume(self.box_lengths, self.origin,
-                                      self._rot_mat, grid.LeftEdge, 
+                                      self._rot_mat, grid.LeftEdge,
                                       grid.RightEdge, grid.dds,
                                       grid.child_mask, 1)
             if v: grids.append(grid)
         self._grids = np.empty(len(grids), dtype='object')
         for gi, g in enumerate(grids): self._grids[gi] = g
-            
+
 
     def _is_fully_enclosed(self, grid):
         # This should be written at some point.
@@ -3257,10 +3264,10 @@
             return True
         pm = np.zeros(grid.ActiveDimensions, dtype='int32')
         grid_points_in_volume(self.box_lengths, self.origin,
-                              self._rot_mat, grid.LeftEdge, 
+                              self._rot_mat, grid.LeftEdge,
                               grid.RightEdge, grid.dds, pm, 0)
         return pm
-        
+
 
 class AMRRegionBase(AMR3DData):
     """A 3D region of data with an arbitrary center.
@@ -3396,9 +3403,9 @@
     _dx_pad = 0.0
     def __init__(self, center, left_edge, right_edge, fields = None,
                  pf = None, **kwargs):
-        AMRPeriodicRegionBase.__init__(self, center, left_edge, right_edge, 
+        AMRPeriodicRegionBase.__init__(self, center, left_edge, right_edge,
                                        fields = None, pf = None, **kwargs)
-    
+
 
 class AMRGridCollectionBase(AMR3DData):
     """
@@ -3565,7 +3572,7 @@
         self._C = C
         self._e0 = e0 = e0 / (e0**2.0).sum()**0.5
         self._tilt = tilt
-        
+
         # find the t1 angle needed to rotate about z axis to align e0 to x
         t1 = np.arctan(e0[1] / e0[0])
         # rotate e0 by -t1
@@ -3575,7 +3582,7 @@
         t2 = np.arctan(-r1[2] / r1[0])
         """
         calculate the original e1
-        given the tilt about the x axis when e0 was aligned 
+        given the tilt about the x axis when e0 was aligned
         to x after t1, t2 rotations about z, y
         """
         RX = get_rotation_matrix(-tilt, (1,0,0)).transpose()
@@ -3599,7 +3606,7 @@
         self._refresh_data()
 
         """
-        Having another function find_ellipsoid_grids is too much work, 
+        Having another function find_ellipsoid_grids is too much work,
         can just use the sphere one and forget about checking orientation
         but feed in the A parameter for radius
         """
@@ -3687,7 +3694,7 @@
 class AMRCoveringGridBase(AMR3DData):
     """A 3D region with all data extracted to a single, specified
     resolution.
-    
+
     Parameters
     ----------
     level : int
@@ -3785,7 +3792,7 @@
             n_bad = np.where(self[obtain_fields[0]]==-999)[0].size
             mylog.error("Covering problem: %s cells are uncovered", n_bad)
             raise KeyError(n_bad)
-            
+
     def _generate_field(self, field):
         if self.pf.field_info.has_key(field):
             # First we check the validator; this might even raise!
@@ -3813,13 +3820,13 @@
     def _get_data_from_grid(self, grid, fields):
         ll = int(grid.Level == self.level)
         ref_ratio = self.pf.refine_by**(self.level - grid.Level)
-        g_fields = [gf.astype("float64") 
+        g_fields = [gf.astype("float64")
                     if gf.dtype != "float64"
                     else gf for gf in (grid[field] for field in fields)]
         c_fields = [self[field] for field in fields]
         count = FillRegion(ref_ratio,
             grid.get_global_startindex(), self.global_startindex,
-            c_fields, g_fields, 
+            c_fields, g_fields,
             self.ActiveDimensions, grid.ActiveDimensions,
             grid.child_mask, self.domain_width, ll, 0)
         return count
@@ -3835,7 +3842,7 @@
         c_fields = [self[field] for field in fields]
         FillRegion(ref_ratio,
             grid.get_global_startindex(), self.global_startindex,
-            c_fields, g_fields, 
+            c_fields, g_fields,
             self.ActiveDimensions, grid.ActiveDimensions,
             grid.child_mask, self.domain_width, ll, 1)
 
@@ -3856,7 +3863,7 @@
     fill the region to level 1, replacing any cells actually
     covered by level 1 data, and then recursively repeating this
     process until it reaches the specified `level`.
-    
+
     Parameters
     ----------
     level : int
@@ -3976,7 +3983,7 @@
     def _refine(self, dlevel, fields):
         rf = float(self.pf.refine_by**dlevel)
 
-        input_left = (self._old_global_startindex + 0.5) * rf 
+        input_left = (self._old_global_startindex + 0.5) * rf
         dx = np.fromiter((self['cd%s' % ax] for ax in 'xyz'), count=3, dtype='float64')
         output_dims = np.rint((self.ActiveDimensions*self.dds)/dx+0.5).astype('int32') + 2
         self._cur_dims = output_dims
@@ -3990,13 +3997,13 @@
 
     @restore_field_information_state
     def _get_data_from_grid(self, grid, fields):
-        g_fields = [gf.astype("float64") 
+        g_fields = [gf.astype("float64")
                     if gf.dtype != "float64"
                     else gf for gf in (grid[field] for field in fields)]
         c_fields = [self.field_data[field] for field in fields]
         count = FillRegion(1,
             grid.get_global_startindex(), self.global_startindex,
-            c_fields, g_fields, 
+            c_fields, g_fields,
             self._cur_dims, grid.ActiveDimensions,
             grid.child_mask, self.domain_width, 1, 0)
         return count
@@ -4008,14 +4015,14 @@
     """
     This will build a hybrid region based on the boolean logic
     of the regions.
-    
+
     Parameters
     ----------
     regions : list
         A list of region objects and strings describing the boolean logic
         to use when building the hybrid region. The boolean logic can be
         nested using parentheses.
-    
+
     Examples
     --------
     >>> re1 = pf.h.region([0.5, 0.5, 0.5], [0.4, 0.4, 0.4],
@@ -4040,7 +4047,7 @@
         self._get_all_regions()
         self._make_overlaps()
         self._get_list_of_grids()
-    
+
     def _get_all_regions(self):
         # Before anything, we simply find out which regions are involved in all
         # of this process, uniquely.
@@ -4050,7 +4057,7 @@
             # So cut_masks don't get messed up.
             item._boolean_touched = True
         self._all_regions = np.unique(self._all_regions)
-    
+
     def _make_overlaps(self):
         # Using the processed cut_masks, we'll figure out what grids
         # are left in the hybrid region.
@@ -4084,7 +4091,7 @@
                     continue
             pbar.update(i)
         pbar.finish()
-    
+
     def __repr__(self):
         # We'll do this the slow way to be clear what's going on
         s = "%s (%s): " % (self.__class__.__name__, self.pf)
@@ -4097,7 +4104,7 @@
             if i < (len(self.regions) - 1): s += ", "
         s += "]"
         return s
-    
+
     def _is_fully_enclosed(self, grid):
         return (grid in self._all_overlap)
 
@@ -4184,7 +4191,7 @@
     <http://meshlab.sf.net>`_.)  The object has the properties .vertices
     and will sample values if a field is requested.  The values are
     interpolated to the center of a given face.
-    
+
     Parameters
     ----------
     data_source : AMR3DDataObject
@@ -4259,7 +4266,7 @@
                 self[fields] = samples
             elif sample_type == "vertex":
                 self.vertex_samples[fields] = samples
-        
+
 
     @restore_grid_state
     def _extract_isocontours_from_grid(self, grid, field, value,
@@ -4296,7 +4303,7 @@
 
         Additionally, the returned flux is defined as flux *into* the surface,
         not flux *out of* the surface.
-        
+
         Parameters
         ----------
         field_x : string
@@ -4343,7 +4350,7 @@
         return flux
 
     @restore_grid_state
-    def _calculate_flux_in_grid(self, grid, 
+    def _calculate_flux_in_grid(self, grid,
                     field_x, field_y, field_z, fluxing_field = None):
         mask = self.data_source._get_cut_mask(grid) * grid.child_mask
         vals = grid.get_vertex_centered_data(self.surface_field)
@@ -4351,7 +4358,7 @@
             ff = np.ones(vals.shape, dtype="float64")
         else:
             ff = grid.get_vertex_centered_data(fluxing_field)
-        xv, yv, zv = [grid.get_vertex_centered_data(f) for f in 
+        xv, yv, zv = [grid.get_vertex_centered_data(f) for f in
                      [field_x, field_y, field_z]]
         return march_cubes_grid_flux(self.field_value, vals, xv, yv, zv,
                     ff, mask, grid.LeftEdge, grid.dds)
@@ -4469,7 +4476,7 @@
             w = bounds[i][1] - bounds[i][0]
             np.divide(tmp, w, tmp)
             np.subtract(tmp, 0.5, tmp) # Center at origin.
-            v[ax][:] = tmp 
+            v[ax][:] = tmp
         f.write("end_header\n")
         v.tofile(f)
         arr["ni"][:] = 3

diff -r f8367487aec0add0c0b69f9e2d04cb77711e1b1f -r f3d1098915c54b05d32d3b5d13c4e0e748d480ef yt/data_objects/hierarchy.py
--- a/yt/data_objects/hierarchy.py
+++ b/yt/data_objects/hierarchy.py
@@ -209,7 +209,7 @@
         pf = self.parameter_file
         if find_max: c = self.find_max("Density")[1]
         else: c = (pf.domain_right_edge + pf.domain_left_edge)/2.0
-        return self.region(c, 
+        return self.region(c,
             pf.domain_left_edge, pf.domain_right_edge)
 
     def clear_all_data(self):
@@ -308,7 +308,7 @@
             self.save_data = self._save_data
         else:
             self.save_data = parallel_splitter(self._save_data, self._reload_data_file)
-    
+
     save_data = parallel_splitter(_save_data, _reload_data_file)
 
     def save_object(self, obj, name):
@@ -367,7 +367,7 @@
         """
         Returns (in code units) the smallest cell size in the simulation.
         """
-        return self.select_grids(self.grid_levels.max())[0].dds[0]
+        return self.select_grids(self.grid_levels.max())[0].dds[:].min()
 
     def _add_object_class(self, name, class_name, base, dd):
         self.object_types.append(name)

diff -r f8367487aec0add0c0b69f9e2d04cb77711e1b1f -r f3d1098915c54b05d32d3b5d13c4e0e748d480ef yt/data_objects/object_finding_mixin.py
--- a/yt/data_objects/object_finding_mixin.py
+++ b/yt/data_objects/object_finding_mixin.py
@@ -198,8 +198,10 @@
         """
         Gets back all the grids between a left edge and right edge
         """
-        grid_i = np.where((np.all(self.grid_right_edge > left_edge, axis=1)
-                         & np.all(self.grid_left_edge < right_edge, axis=1)) == True)
+        eps = np.finfo(np.float64).eps
+        grid_i = np.where((np.all((self.grid_right_edge - left_edge) > eps, axis=1)
+                         & np.all((right_edge - self.grid_left_edge) > eps, axis=1)) == True)
+
         return self.grids[grid_i], grid_i
 
     def get_periodic_box_grids(self, left_edge, right_edge):

diff -r f8367487aec0add0c0b69f9e2d04cb77711e1b1f -r f3d1098915c54b05d32d3b5d13c4e0e748d480ef yt/data_objects/tests/test_slice.py
--- a/yt/data_objects/tests/test_slice.py
+++ b/yt/data_objects/tests/test_slice.py
@@ -1,24 +1,60 @@
-from yt.testing import *
+"""
+Tests for AMRSlice
+
+Authors: Samuel Skillman <samskillman at gmail.com>
+Affiliation: University of Colorado at Boulder
+Author: Kacper Kowalik <xarthisius.kk at gmail.com>
+Affiliation: CA UMK
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Samuel Skillman.  All Rights Reserved.
+  Copyright (C) 2013 Kacper Kowalik.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
 import os
+import numpy as np
+from nose.tools import raises
+from yt.testing import \
+    fake_random_pf, assert_equal, assert_array_equal
+from yt.utilities.definitions import \
+    x_dict, y_dict
+from yt.utilities.exceptions import \
+    YTNoDataInObjectError
 
 def setup():
     from yt.config import ytcfg
-    ytcfg["yt","__withintesting"] = "True"
+    ytcfg["yt", "__withintesting"] = "True"
+
 
 def teardown_func(fns):
     for fn in fns:
         os.remove(fn)
 
+
 def test_slice():
     for nprocs in [8, 1]:
         # We want to test both 1 proc and 8 procs, to make sure that
         # parallelism isn't broken
-        pf = fake_random_pf(64, nprocs = nprocs)
+        pf = fake_random_pf(64, nprocs=nprocs)
         dims = pf.domain_dimensions
         xn, yn, zn = pf.domain_dimensions
-        xi, yi, zi = pf.domain_left_edge + 1.0/(pf.domain_dimensions * 2)
-        xf, yf, zf = pf.domain_right_edge - 1.0/(pf.domain_dimensions * 2)
-        coords = np.mgrid[xi:xf:xn*1j, yi:yf:yn*1j, zi:zf:zn*1j]
+        xi, yi, zi = pf.domain_left_edge + 1.0 / (pf.domain_dimensions * 2)
+        xf, yf, zf = pf.domain_right_edge - 1.0 / (pf.domain_dimensions * 2)
+        coords = np.mgrid[xi:xf:xn * 1j, yi:yf:yn * 1j, zi:zf:zn * 1j]
         uc = [np.unique(c) for c in coords]
         slc_pos = 0.5
         # Some simple slice tests with single grids
@@ -33,31 +69,45 @@
                 yield assert_equal, slc["Ones"].max(), 1.0
                 yield assert_equal, np.unique(slc["px"]), uc[xax]
                 yield assert_equal, np.unique(slc["py"]), uc[yax]
-                yield assert_equal, np.unique(slc["pdx"]), 1.0/(dims[xax]*2.0)
-                yield assert_equal, np.unique(slc["pdy"]), 1.0/(dims[yax]*2.0)
+                yield assert_equal, np.unique(slc["pdx"]), 0.5 / dims[xax]
+                yield assert_equal, np.unique(slc["pdy"]), 0.5 / dims[yax]
                 pw = slc.to_pw()
                 fns += pw.save()
-                frb = slc.to_frb((1.0,'unitary'), 64)
+                frb = slc.to_frb((1.0, 'unitary'), 64)
                 for slc_field in ['Ones', 'Density']:
                     yield assert_equal, frb[slc_field].info['data_source'], \
-                            slc.__str__()
+                        slc.__str__()
                     yield assert_equal, frb[slc_field].info['axis'], \
-                            ax
+                        ax
                     yield assert_equal, frb[slc_field].info['field'], \
-                            slc_field
+                        slc_field
                     yield assert_equal, frb[slc_field].info['units'], \
-                            pf.field_info[slc_field].get_units()
+                        pf.field_info[slc_field].get_units()
                     yield assert_equal, frb[slc_field].info['xlim'], \
-                            frb.bounds[:2]
+                        frb.bounds[:2]
                     yield assert_equal, frb[slc_field].info['ylim'], \
-                            frb.bounds[2:]
+                        frb.bounds[2:]
                     yield assert_equal, frb[slc_field].info['length_to_cm'], \
-                            pf['cm']
+                        pf['cm']
                     yield assert_equal, frb[slc_field].info['center'], \
-                            slc.center
+                        slc.center
                     yield assert_equal, frb[slc_field].info['coord'], \
-                            slc_pos
+                        slc_pos
                 teardown_func(fns)
             # wf == None
             yield assert_equal, wf, None
 
+
+def test_slice_over_edges():
+    pf = fake_random_pf(64, nprocs=8, fields=["Density"], negative=[False])
+
+    slc = pf.h.slice(0, 0.0, "Density")
+    yield assert_array_equal, slc.grid_left_edge[:, 0], np.zeros((4))
+    slc = pf.h.slice(1, 0.5, "Density")
+    yield assert_array_equal, slc.grid_left_edge[:, 1], np.ones((4)) * 0.5
+
+
+ at raises(YTNoDataInObjectError)
+def test_slice_over_outer_boundary():
+    pf = fake_random_pf(64, nprocs=8, fields=["Density"], negative=[False])
+    slc = pf.h.slice(2, 1.0, "Density")

diff -r f8367487aec0add0c0b69f9e2d04cb77711e1b1f -r f3d1098915c54b05d32d3b5d13c4e0e748d480ef yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -55,7 +55,7 @@
      G, \
      rho_crit_now, \
      speed_of_light_cgs, \
-     km_per_cm
+     km_per_cm, keV_per_K
 
 from yt.utilities.math_utils import \
     get_sph_r_component, \
@@ -216,18 +216,25 @@
            data["Density"] * data["ThermalEnergy"]
 add_field("Pressure", function=_Pressure, units=r"\rm{dyne}/\rm{cm}^{2}")
 
+def _TempkeV(field, data):
+    return data["Temperature"] * keV_per_K
+add_field("TempkeV", function=_TempkeV, units=r"\rm{keV}",
+          display_name="Temperature")
+
 def _Entropy(field, data):
     if data.has_field_parameter("mu"):
         mw = mh*data.get_field_parameter("mu")
     else :
         mw = mh
+    try:
+        gammam1 = data.pf["Gamma"] - 1.0
+    except:
+        gammam1 = 5./3. - 1.0
     return kboltz * data["Temperature"] / \
-           ((data["Density"]/mw)**(data.pf["Gamma"] - 1.0))
+           ((data["Density"]/mw)**gammam1)
 add_field("Entropy", units=r"\rm{ergs}\ \rm{cm}^{3\gamma-3}",
           function=_Entropy)
 
-
-
 ### spherical coordinates: r (radius)
 def _sph_r(field, data):
     center = data.get_field_parameter("center")

diff -r f8367487aec0add0c0b69f9e2d04cb77711e1b1f -r f3d1098915c54b05d32d3b5d13c4e0e748d480ef yt/frontends/athena/data_structures.py
--- a/yt/frontends/athena/data_structures.py
+++ b/yt/frontends/athena/data_structures.py
@@ -289,6 +289,11 @@
                      self.parameter_file.domain_right_edge)
         self.parameter_file.domain_dimensions = \
                 np.round(self.parameter_file.domain_width/gdds[0]).astype('int')
+
+        # Need to reset the units in the parameter file based on the correct
+        # domain left/right/dimensions.
+        self.parameter_file._set_units()
+
         if self.parameter_file.dimensionality <= 2 :
             self.parameter_file.domain_dimensions[2] = np.int(1)
         if self.parameter_file.dimensionality == 1 :

diff -r f8367487aec0add0c0b69f9e2d04cb77711e1b1f -r f3d1098915c54b05d32d3b5d13c4e0e748d480ef yt/pmods.py
--- a/yt/pmods.py
+++ b/yt/pmods.py
@@ -230,7 +230,7 @@
 def __import_hook__(name, globals=None, locals=None, fromlist=None, level=-1):
     # TODO: handle level parameter correctly. For now, we'll ignore
     # it and try both absolute and relative imports.
-    parent = __determine_parent__(globals)
+    parent = __determine_parent__(globals, level)
     q, tail = __find_head_package__(parent, name)
     m = __load_tail__(q, tail)
     if not fromlist:
@@ -286,7 +286,7 @@
 
 # The remaining functions are taken unmodified (except for the names)
 # from knee.py.
-def __determine_parent__(globals):
+def __determine_parent__(globals, level):
     if not globals or  not globals.has_key("__name__"):
         return None
     pname = globals['__name__']
@@ -295,7 +295,13 @@
         assert globals is parent.__dict__
         return parent
     if '.' in pname:
-        i = pname.rfind('.')
+        if level > 0:
+            end = len(pname)
+            for l in range(level):
+                i = pname.rfind('.', 0, end)
+                end = i
+        else:
+            i = pname.rfind('.')
         pname = pname[:i]
         parent = sys.modules[pname]
         assert parent.__name__ == pname

diff -r f8367487aec0add0c0b69f9e2d04cb77711e1b1f -r f3d1098915c54b05d32d3b5d13c4e0e748d480ef yt/utilities/lib/geometry_utils.pyx
--- a/yt/utilities/lib/geometry_utils.pyx
+++ b/yt/utilities/lib/geometry_utils.pyx
@@ -183,7 +183,7 @@
 @cython.wraparound(False)
 @cython.cdivision(True)
 def cutting_plane_cells(dobj, gobj):
-    cdef np.ndarray[np.uint8_t, ndim=3] mask 
+    cdef np.ndarray[np.uint8_t, ndim=3] mask
     cdef np.ndarray[np.float64_t, ndim=1] left_edge = gobj.LeftEdge
     cdef np.ndarray[np.float64_t, ndim=1] dds = gobj.dds
     cdef int i, j, k
@@ -205,58 +205,6 @@
             y += dds[1]
         x += dds[0]
     return mask
-                
- at cython.boundscheck(False)
- at cython.wraparound(False)
- at cython.cdivision(True)
-def get_box_grids_level(np.ndarray[np.float64_t, ndim=1] left_edge,
-                        np.ndarray[np.float64_t, ndim=1] right_edge,
-                        int level,
-                        np.ndarray[np.float64_t, ndim=2] left_edges,
-                        np.ndarray[np.float64_t, ndim=2] right_edges,
-                        np.ndarray[np.int32_t, ndim=2] levels,
-                        np.ndarray[np.int32_t, ndim=1] mask,
-                        int min_index = 0):
-    cdef int i, n
-    cdef int nx = left_edges.shape[0]
-    cdef int inside 
-    for i in range(nx):
-        if i < min_index or levels[i,0] != level:
-            mask[i] = 0
-            continue
-        inside = 1
-        for n in range(3):
-            if left_edge[n] >= right_edges[i,n] or \
-               right_edge[n] <= left_edges[i,n]:
-                inside = 0
-                break
-        if inside == 1: mask[i] = 1
-        else: mask[i] = 0
-
- at cython.boundscheck(False)
- at cython.wraparound(False)
- at cython.cdivision(True)
-def get_box_grids_below_level(
-                        np.ndarray[np.float64_t, ndim=1] left_edge,
-                        np.ndarray[np.float64_t, ndim=1] right_edge,
-                        int level,
-                        np.ndarray[np.float64_t, ndim=2] left_edges,
-                        np.ndarray[np.float64_t, ndim=2] right_edges,
-                        np.ndarray[np.int32_t, ndim=2] levels,
-                        np.ndarray[np.int32_t, ndim=1] mask):
-    cdef int i, n
-    cdef int nx = left_edges.shape[0]
-    cdef int inside 
-    for i in range(nx):
-        mask[i] = 0
-        if levels[i,0] <= level:
-            inside = 1
-            for n in range(3):
-                if left_edge[n] >= right_edges[i,n] or \
-                   right_edge[n] <= left_edges[i,n]:
-                    inside = 0
-                    break
-            if inside == 1: mask[i] = 1
 
 # Finally, miscellaneous routines.
 

diff -r f8367487aec0add0c0b69f9e2d04cb77711e1b1f -r f3d1098915c54b05d32d3b5d13c4e0e748d480ef yt/utilities/lib/misc_utilities.pyx
--- a/yt/utilities/lib/misc_utilities.pyx
+++ b/yt/utilities/lib/misc_utilities.pyx
@@ -117,12 +117,12 @@
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
-def lines(np.ndarray[np.float64_t, ndim=3] image, 
+def lines(np.ndarray[np.float64_t, ndim=3] image,
           np.ndarray[np.int64_t, ndim=1] xs,
           np.ndarray[np.int64_t, ndim=1] ys,
           np.ndarray[np.float64_t, ndim=2] colors,
           int points_per_color=1):
-    
+
     cdef int nx = image.shape[0]
     cdef int ny = image.shape[1]
     cdef int nl = xs.shape[0]
@@ -144,7 +144,7 @@
             for i in range(3):
                 alpha[i] = colors[j/points_per_color,3]*\
                         colors[j/points_per_color,i]
-        if x0 < x1: 
+        if x0 < x1:
             sx = 1
         else:
             sx = -1
@@ -152,7 +152,7 @@
             sy = 1
         else:
             sy = -1
-        while(1): 
+        while(1):
             if (x0 < 0 and sx == -1): break
             elif (x0 >= nx and sx == 1): break
             elif (y0 < 0 and sy == -1): break
@@ -174,13 +174,13 @@
             if e2 < dx :
                 err = err + dx
                 y0 += sy
-    return 
+    return
 
 def rotate_vectors(np.ndarray[np.float64_t, ndim=3] vecs,
         np.ndarray[np.float64_t, ndim=2] R):
     cdef int nx = vecs.shape[0]
     cdef int ny = vecs.shape[1]
-    rotated = np.empty((nx,ny,3),dtype='float64') 
+    rotated = np.empty((nx,ny,3),dtype='float64')
     for i in range(nx):
         for j in range(ny):
             for k in range(3):
@@ -229,15 +229,16 @@
                         int min_index = 0):
     cdef int i, n
     cdef int nx = left_edges.shape[0]
-    cdef int inside 
+    cdef int inside
+    cdef np.float64_t eps = np.finfo(np.float64).eps
     for i in range(nx):
         if i < min_index or levels[i,0] != level:
             mask[i] = 0
             continue
         inside = 1
         for n in range(3):
-            if left_edge[n] >= right_edges[i,n] or \
-               right_edge[n] <= left_edges[i,n]:
+            if (right_edges[i,n] - left_edge[n]) <= eps or \
+               (right_edge[n] - left_edges[i,n]) <= eps:
                 inside = 0
                 break
         if inside == 1: mask[i] = 1
@@ -257,14 +258,15 @@
                         int min_level = 0):
     cdef int i, n
     cdef int nx = left_edges.shape[0]
-    cdef int inside 
+    cdef int inside
+    cdef np.float64_t eps = np.finfo(np.float64).eps
     for i in range(nx):
         mask[i] = 0
         if levels[i,0] <= level and levels[i,0] >= min_level:
             inside = 1
             for n in range(3):
-                if left_edge[n] >= right_edges[i,n] or \
-                   right_edge[n] <= left_edges[i,n]:
+                if (right_edges[i,n] - left_edge[n]) <= eps or \
+                   (right_edge[n] - left_edges[i,n]) <= eps:
                     inside = 0
                     break
             if inside == 1: mask[i] = 1

diff -r f8367487aec0add0c0b69f9e2d04cb77711e1b1f -r f3d1098915c54b05d32d3b5d13c4e0e748d480ef yt/utilities/physical_constants.py
--- a/yt/utilities/physical_constants.py
+++ b/yt/utilities/physical_constants.py
@@ -41,6 +41,7 @@
 mpc_per_rsun  = 2.253962e-14
 mpc_per_miles = 5.21552871e-20
 mpc_per_cm    = 3.24077929e-25
+kpc_per_cm    = mpc_per_cm / mpc_per_kpc
 km_per_pc     = 1.3806504e13
 km_per_m      = 1e-3
 km_per_cm     = 1e-5
@@ -54,9 +55,11 @@
 rsun_per_mpc  = 1.0 / mpc_per_rsun
 miles_per_mpc = 1.0 / mpc_per_miles
 cm_per_mpc    = 1.0 / mpc_per_cm
+cm_per_kpc    = 1.0 / kpc_per_cm
 cm_per_km     = 1.0 / km_per_cm
 pc_per_km     = 1.0 / km_per_pc
 cm_per_pc     = 1.0 / pc_per_cm
+
 # time
 sec_per_Gyr  = 31.5576e15
 sec_per_Myr  = 31.5576e12
@@ -65,6 +68,12 @@
 sec_per_hr   = 3600.0
 day_per_year = 365.25
 
+# temperature / energy
+erg_per_eV = 1.602176487e-12 # http://goldbook.iupac.org/E02014.html
+erg_per_keV = erg_per_eV * 1.0e3
+K_per_keV = erg_per_keV / boltzmann_constant_cgs
+keV_per_K = 1.0 / K_per_keV
+
 #Short cuts
 G = gravitational_constant_cgs
 me = mass_electron_cgs

diff -r f8367487aec0add0c0b69f9e2d04cb77711e1b1f -r f3d1098915c54b05d32d3b5d13c4e0e748d480ef yt/utilities/setup.py
--- a/yt/utilities/setup.py
+++ b/yt/utilities/setup.py
@@ -50,7 +50,6 @@
     config.add_subpackage("answer_testing")
     config.add_subpackage("delaunay")  # From SciPy, written by Robert Kern
     config.add_subpackage("kdtree")
-    config.add_data_files(('kdtree', ['kdtree/fKDpy.so']))
     config.add_subpackage("spatial")
     config.add_subpackage("grid_data_format")
     config.add_subpackage("parallel_tools")

diff -r f8367487aec0add0c0b69f9e2d04cb77711e1b1f -r f3d1098915c54b05d32d3b5d13c4e0e748d480ef yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -32,6 +32,7 @@
 import __builtin__
 
 from matplotlib.mathtext import MathTextParser
+from matplotlib.font_manager import FontProperties
 from distutils import version
 from functools import wraps
 
@@ -222,8 +223,6 @@
 
 class PlotWindow(object):
     r"""
-    PlotWindow(data_source, bounds, buff_size=(800,800), antialias = True)
-    
     A ploting mechanism based around the concept of a window into a
     data source. It can have arbitrary fields, each of which will be
     centered on the same viewpoint, but will have individual zlimits. 
@@ -248,6 +247,9 @@
     antialias : boolean
         This can be true or false.  It determines whether or not sub-pixel
         rendering is used during data deposition.
+    window_size : float
+        The size of the window on the longest axis (in units of inches),
+        including the margins but not the colorbar.
 
     """
     _plot_valid = False
@@ -256,8 +258,7 @@
     _vector_info = None
     _frb = None
     def __init__(self, data_source, bounds, buff_size=(800,800), antialias=True, 
-                 periodic=True, origin='center-window', oblique=False, fontsize=15,
-                 window_size=10.0):
+                 periodic=True, origin='center-window', oblique=False, window_size=10.0):
         if not hasattr(self, "pf"):
             self.pf = data_source.pf
             ts = self._initialize_dataset(self.pf) 
@@ -273,9 +274,9 @@
         self.antialias = antialias
         self.set_window(bounds) # this automatically updates the data and plot
         self.origin = origin
-        self.fontsize = fontsize
         if self.data_source.center is not None and oblique == False:
-            center = [self.data_source.center[i] for i in range(len(self.data_source.center)) if i != self.data_source.axis]
+            center = [self.data_source.center[i] for i in range(len(self.data_source.center)) 
+                      if i != self.data_source.axis]
             self.set_center(center)
         self._initfinished = True
 
@@ -527,6 +528,14 @@
 
     @invalidate_data
     def set_buff_size(self, size):
+        """Sets a new buffer size for the fixed resolution buffer
+
+        parameters
+        ----------
+        size : int or two element sequence of ints
+            The number of data elements in the buffer on the x and y axes.
+            If a scalar is provided,  then the buffer is assumed to be square.
+        """
         if iterable(size):
             self.buff_size = size
         else:
@@ -534,6 +543,14 @@
             
     @invalidate_plot
     def set_window_size(self, size):
+        """Sets a new window size for the plot
+
+        parameters
+        ----------
+        size : float
+            The size of the window on the longest axis (in units of inches),
+            including the margins but not the colorbar.
+        """
         self.window_size = float(size)
 
     @invalidate_data
@@ -782,6 +799,9 @@
             self._frb_generator = kwargs.pop("frb_generator")
         if self._plot_type is None:
             self._plot_type = kwargs.pop("plot_type")
+        font_size = kwargs.pop("fontsize", 18)
+        font_path = matplotlib.get_data_path() + '/fonts/ttf/STIXGeneral.ttf'
+        self._font_properties = FontProperties(size=font_size, fname=font_path)
         PWViewer.__init__(self, *args, **kwargs)
         
     def _setup_origin(self):
@@ -884,9 +904,11 @@
             
             image = self._frb[f]
 
+            fp = self._font_properties
+
             self.plots[f] = WindowPlotMPL(image, self._field_transform[f].name, 
                                           self._colormaps[f], extent, aspect, 
-                                          zlim, size, self.fontsize)
+                                          zlim, size, fp.get_size())
 
             self.plots[f].cb = self.plots[f].figure.colorbar(
                 self.plots[f].image, cax = self.plots[f].cax)
@@ -903,10 +925,12 @@
                 labels = [r'$\rm{'+axis_labels[axis_index][i]+
                           axes_unit_labels[i] + r'}$' for i in (0,1)]
 
-            self.plots[f].axes.set_xlabel(labels[0],fontsize=self.fontsize)
-            self.plots[f].axes.set_ylabel(labels[1],fontsize=self.fontsize)
+            self.plots[f].axes.set_xlabel(labels[0],fontproperties=fp)
+            self.plots[f].axes.set_ylabel(labels[1],fontproperties=fp)
 
-            self.plots[f].axes.tick_params(labelsize=self.fontsize)
+            for label in (self.plots[f].axes.get_xticklabels() + 
+                          self.plots[f].axes.get_yticklabels()):
+                label.set_fontproperties(fp)
 
             colorbar_label = image.info['label']
 
@@ -916,9 +940,10 @@
             except ParseFatalException, err:
                 raise YTCannotParseUnitDisplayName(f, colorbar_label, str(err))
                 
-            self.plots[f].cb.set_label(colorbar_label, fontsize=self.fontsize)
+            self.plots[f].cb.set_label(colorbar_label, fontproperties=fp)
 
-            self.plots[f].cb.ax.tick_params(labelsize=self.fontsize)
+            for label in self.plots[f].cb.ax.get_yticklabels():
+                label.set_fontproperties(fp)
 
             self.run_callbacks(f)
 
@@ -936,6 +961,36 @@
                 del self._frb[key]
 
     @invalidate_plot
+    def set_font(self, font_dict=None):
+        """set the font and font properties
+
+        Parameters 
+        ---------- 
+        font_dict : dict 
+        A dict of keyword parameters to be passed to 
+        matplotlib.font_manager.FontProperties.  See the matplotlib font 
+        manager documentation for more details.
+        http://matplotlib.org/api/font_manager_api.html
+
+        Caveats
+        -------
+        Mathtext axis labels will only obey the `size` keyword. 
+
+        Examples
+        --------
+        This sets the font to be 24-pt, sans-serif, italic, and bold-face.
+
+        >>> slc = SlicePlot(pf, 'x', 'Density')
+        >>> slc.set_font({'family':'sans-serif', 'style':'italic',
+                          'weight':'bold', 'size':24})
+        
+        """
+        if font_dict is None:
+            font_dict = {}
+        self._font_properties = \
+            FontProperties(**font_dict)
+
+    @invalidate_plot
     def set_cmap(self, field, cmap):
         """set the colormap for one of the fields
 
@@ -1136,7 +1191,7 @@
     _frb_generator = FixedResolutionBuffer
 
     def __init__(self, pf, axis, fields, center='c', width=None, axes_unit=None,
-                 origin='center-window', fontsize=15, field_parameters=None):
+                 origin='center-window', fontsize=18, field_parameters=None):
         # this will handle time series data and controllers
         ts = self._initialize_dataset(pf) 
         self.ts = ts
@@ -1251,7 +1306,7 @@
     _frb_generator = FixedResolutionBuffer
 
     def __init__(self, pf, axis, fields, center='c', width=None, axes_unit=None,
-                 weight_field=None, max_level=None, origin='center-window', fontsize=15, 
+                 weight_field=None, max_level=None, origin='center-window', fontsize=18, 
                  field_parameters=None):
         ts = self._initialize_dataset(pf) 
         self.ts = ts
@@ -1261,9 +1316,9 @@
         if axes_unit is None  and units != ('1', '1'):
             axes_unit = units
         if field_parameters is None: field_parameters = {}
-        proj = pf.h.proj(axis,fields,weight_field=weight_field,max_level=max_level,
+        proj = pf.h.proj(axis, fields, weight_field=weight_field, max_level=max_level,
                          center=center, **field_parameters)
-        PWViewerMPL.__init__(self,proj,bounds,origin=origin,fontsize=fontsize)
+        PWViewerMPL.__init__(self, proj, bounds, origin=origin, fontsize=fontsize)
         self.set_axes_unit(axes_unit)
 
 class OffAxisSlicePlot(PWViewerMPL):
@@ -1313,7 +1368,7 @@
     _frb_generator = ObliqueFixedResolutionBuffer
 
     def __init__(self, pf, normal, fields, center='c', width=None, 
-                 axes_unit=None, north_vector=None, fontsize=15,
+                 axes_unit=None, north_vector=None, fontsize=18,
                  field_parameters=None):
         (bounds, center_rot, units) = GetObliqueWindowParameters(normal,center,width,pf)
         if axes_unit is None and units != ('1', '1'):
@@ -1322,8 +1377,8 @@
         cutting = pf.h.cutting(normal, center, fields=fields, north_vector=north_vector, **field_parameters)
         # Hard-coding the origin keyword since the other two options
         # aren't well-defined for off-axis data objects
-        PWViewerMPL.__init__(self,cutting,bounds,origin='center-window',periodic=False,
-                             oblique=True,fontsize=fontsize)
+        PWViewerMPL.__init__(self, cutting, bounds, origin='center-window', periodic=False,
+                             oblique=True, fontsize=fontsize)
         self.set_axes_unit(axes_unit)
 
 class OffAxisProjectionDummyDataSource(object):
@@ -1410,6 +1465,8 @@
         A vector defining the 'up' direction in the plot.  This
         option sets the orientation of the slicing plane.  If not
         set, an arbitrary grid-aligned north-vector is chosen.
+    fontsize : integer
+         The size of the fonts for the axis, colorbar, and tick labels.
 
     """
     _plot_type = 'OffAxisProjection'
@@ -1418,7 +1475,7 @@
     def __init__(self, pf, normal, fields, center='c', width=None, 
                  depth=(1, '1'), axes_unit=None, weight_field=None, 
                  max_level=None, north_vector=None, volume=None, no_ghost=False, 
-                 le=None, re=None, interpolated=False, fontsize=15):
+                 le=None, re=None, interpolated=False, fontsize=18):
         (bounds, center_rot, units) = GetObliqueWindowParameters(normal,center,width,pf,depth=depth)
         if axes_unit is None and units != ('1', '1', '1'):
             axes_unit = units[:2]
@@ -1429,8 +1486,8 @@
                                                        le=le, re=re, north_vector=north_vector)
         # Hard-coding the origin keyword since the other two options
         # aren't well-defined for off-axis data objects
-        PWViewerMPL.__init__(self,OffAxisProj,bounds,origin='center-window',periodic=False,
-                             oblique=True,fontsize=fontsize)
+        PWViewerMPL.__init__(self, OffAxisProj, bounds, origin='center-window', periodic=False,
+                             oblique=True, fontsize=fontsize)
         self.set_axes_unit(axes_unit)
 
 _metadata_template = """
@@ -1618,9 +1675,9 @@
         self._init_image(data, cbname, cmap, extent, aspect)
         self.image.axes.ticklabel_format(scilimits=(-2,3))
 
-    def _get_best_layout(self, size, fontsize=15):
+    def _get_best_layout(self, size, fontsize=18):
         aspect = 1.0*size[0]/size[1]
-        fontscale = fontsize / 15.0
+        fontscale = fontsize / 18.0
 
         # add room for a colorbar
         cbar_inches = fontscale*0.7

diff -r f8367487aec0add0c0b69f9e2d04cb77711e1b1f -r f3d1098915c54b05d32d3b5d13c4e0e748d480ef yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -232,16 +232,22 @@
         if self.no_ghost:
             mylog.info('Warning: no_ghost is currently True (default). This may lead to artifacts at grid boundaries.')
         self.tree_type = tree_type
+        if le is None: le = self.pf.domain_left_edge
+        self.le = np.array(le)
+        if re is None: re = self.pf.domain_right_edge
+        self.re = np.array(re)
         if volume is None:
             if self.use_kd:
                 volume = AMRKDTree(self.pf, l_max=l_max, fields=self.fields, no_ghost=no_ghost,
-                                   log_fields = log_fields, le=le, re=re)
+                                   log_fields = log_fields, le=self.le, re=self.re)
             else:
                 volume = HomogenizedVolume(fields, pf = self.pf,
                                            log_fields = log_fields)
         else:
             self.use_kd = isinstance(volume, AMRKDTree)
         self.volume = volume        
+        self.center = (self.re + self.le) / 2.0
+        self.region = self.pf.h.region(self.center, self.le, self.re)
 
     def _setup_box_properties(self, width, center, unit_vectors):
         self.width = width
@@ -300,8 +306,8 @@
         >>> write_bitmap(im, 'render_with_grids.png')
 
         """
-        corners = self.pf.h.grid_corners
-        levels = self.pf.h.grid_levels[:,0]
+        corners = self.region.grid_corners
+        levels = self.region.grid_levels[:,0]
 
         if max_level is not None:
             subset = levels <= max_level


https://bitbucket.org/yt_analysis/yt-3.0/commits/3065e9d3036c/
Changeset:   3065e9d3036c
Branch:      yt
User:        jwise77
Date:        2013-03-06 20:29:28
Summary:     Merging
Affected #:  1 file

diff -r 60ea751d9671fd5acc73a2ed71124a23d9afa79b -r 3065e9d3036cc321cc416cea30b132a86349b9aa yt/analysis_modules/halo_finding/rockstar/rockstar.py
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar.py
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar.py
@@ -238,6 +238,7 @@
         tpf = ts[0]
 
         def _particle_count(field, data):
+            if data.NumberOfParticles == 0: return 0
             try:
                 data["particle_type"]
                 has_particle_type=True


https://bitbucket.org/yt_analysis/yt-3.0/commits/cc66bd1759b5/
Changeset:   cc66bd1759b5
Branch:      yt
User:        jwise77
Date:        2013-03-13 18:28:46
Summary:     Merging.
Affected #:  1 file

diff -r 5117127ce2e958db759092a1af4a0a0c42007daf -r cc66bd1759b58561263dccabeee262d9e0f08644 yt/analysis_modules/halo_finding/rockstar/rockstar.py
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar.py
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar.py
@@ -238,6 +238,7 @@
         tpf = ts[0]
 
         def _particle_count(field, data):
+            if data.NumberOfParticles == 0: return 0
             try:
                 data["particle_type"]
                 has_particle_type=True


https://bitbucket.org/yt_analysis/yt-3.0/commits/a0b8959fce5d/
Changeset:   a0b8959fce5d
Branch:      yt
User:        jwise77
Date:        2013-03-13 18:43:38
Summary:     In the DM density field in simulations without attributes, filter =
None would give an array with a shape (1,N) when CICDeposit_3 is
expecting a 1-d array.  This fixes that error.
Affected #:  1 file

diff -r cc66bd1759b58561263dccabeee262d9e0f08644 -r a0b8959fce5d1a56f1903b02dd8b0e4edf961444 yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -369,8 +369,8 @@
         if not filter.any(): return blank
         num = filter.sum()
     else:
-        filter = None
         num = data["particle_position_x"].size
+        filter = np.ones(num, dtype='bool')
     amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(np.float64),
                            data["particle_position_y"][filter].astype(np.float64),
                            data["particle_position_z"][filter].astype(np.float64),


https://bitbucket.org/yt_analysis/yt-3.0/commits/bda5c9c8fe1b/
Changeset:   bda5c9c8fe1b
Branch:      yt
User:        jwise77
Date:        2013-03-13 19:04:50
Summary:     Changing the ones filter (previously None before my initial changes)
in enzo/dm_density to Ellipsis.
Affected #:  1 file

diff -r a0b8959fce5d1a56f1903b02dd8b0e4edf961444 -r bda5c9c8fe1bfdda64bdd5f476690ad43fff9b4b yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -369,8 +369,8 @@
         if not filter.any(): return blank
         num = filter.sum()
     else:
+        filter = Ellipsis
         num = data["particle_position_x"].size
-        filter = np.ones(num, dtype='bool')
     amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(np.float64),
                            data["particle_position_y"][filter].astype(np.float64),
                            data["particle_position_z"][filter].astype(np.float64),


https://bitbucket.org/yt_analysis/yt-3.0/commits/f6364b765358/
Changeset:   f6364b765358
Branch:      yt
User:        devinsilvia
Date:        2013-03-15 03:39:12
Summary:     Removed unecessary formatting from Total Energy to fix plots breaking.
Affected #:  1 file

diff -r bda5c9c8fe1bfdda64bdd5f476690ad43fff9b4b -r f6364b76535868d6179e94d089e57df628ec92b2 yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -171,22 +171,22 @@
 # We set up fields for both TotalEnergy and Total_Energy in the known fields
 # lists.  Note that this does not mean these will be the used definitions.
 add_enzo_field("TotalEnergy", function=NullFunc,
-          display_name = "$\rm{Total}\/\rm{Energy}$",
+          display_name = "Total Energy",
           units=r"\rm{ergs}/\rm{g}", convert_function=_convertEnergy)
 add_enzo_field("Total_Energy", function=NullFunc,
-          display_name = "$\rm{Total}\/\rm{Energy}$",
+          display_name = "Total Energy",
           units=r"\rm{ergs}/\rm{g}", convert_function=_convertEnergy)
 
 def _Total_Energy(field, data):
     return data["TotalEnergy"] / _convertEnergy(data)
 add_field("Total_Energy", function=_Total_Energy,
-          display_name = "$\rm{Total}\/\rm{Energy}$",
+          display_name = "Total Energy",
           units=r"\rm{ergs}/\rm{g}", convert_function=_convertEnergy)
 
 def _TotalEnergy(field, data):
     return data["Total_Energy"] / _convertEnergy(data)
 add_field("TotalEnergy", function=_TotalEnergy,
-          display_name = "$\rm{Total}\/\rm{Energy}$",
+          display_name = "Total Energy",
           units=r"\rm{ergs}/\rm{g}", convert_function=_convertEnergy)
 
 def _NumberDensity(field, data):


https://bitbucket.org/yt_analysis/yt-3.0/commits/6de7d51693b9/
Changeset:   6de7d51693b9
Branch:      yt
User:        devinsilvia
Date:        2013-03-15 20:20:28
Summary:     Changing the Total Energy string to match Jeff Oishi's suggestion
of making it a raw string.
Affected #:  1 file

diff -r f6364b76535868d6179e94d089e57df628ec92b2 -r 6de7d51693b94a2c1c796008769bd1ecf47fc391 yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -171,22 +171,22 @@
 # We set up fields for both TotalEnergy and Total_Energy in the known fields
 # lists.  Note that this does not mean these will be the used definitions.
 add_enzo_field("TotalEnergy", function=NullFunc,
-          display_name = "Total Energy",
+          display_name = r"\rm{Total} \rm{Energy}",
           units=r"\rm{ergs}/\rm{g}", convert_function=_convertEnergy)
 add_enzo_field("Total_Energy", function=NullFunc,
-          display_name = "Total Energy",
+          display_name = r"\rm{Total} \rm{Energy}",
           units=r"\rm{ergs}/\rm{g}", convert_function=_convertEnergy)
 
 def _Total_Energy(field, data):
     return data["TotalEnergy"] / _convertEnergy(data)
 add_field("Total_Energy", function=_Total_Energy,
-          display_name = "Total Energy",
+          display_name = r"\rm{Total} \rm{Energy}",
           units=r"\rm{ergs}/\rm{g}", convert_function=_convertEnergy)
 
 def _TotalEnergy(field, data):
     return data["Total_Energy"] / _convertEnergy(data)
 add_field("TotalEnergy", function=_TotalEnergy,
-          display_name = "Total Energy",
+          display_name = r"\rm{Total} \rm{Energy}",
           units=r"\rm{ergs}/\rm{g}", convert_function=_convertEnergy)
 
 def _NumberDensity(field, data):


https://bitbucket.org/yt_analysis/yt-3.0/commits/ed10fc84bafc/
Changeset:   ed10fc84bafc
Branch:      yt
User:        devinsilvia
Date:        2013-03-15 20:28:42
Summary:     Things weren't quite right, needed some \/'s in there.
Affected #:  1 file

diff -r 6de7d51693b94a2c1c796008769bd1ecf47fc391 -r ed10fc84bafcefebfdfd813ca59caa245f8a5ebd yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -171,22 +171,22 @@
 # We set up fields for both TotalEnergy and Total_Energy in the known fields
 # lists.  Note that this does not mean these will be the used definitions.
 add_enzo_field("TotalEnergy", function=NullFunc,
-          display_name = r"\rm{Total} \rm{Energy}",
+          display_name = r"\rm{Total}\/ \rm{Energy}",
           units=r"\rm{ergs}/\rm{g}", convert_function=_convertEnergy)
 add_enzo_field("Total_Energy", function=NullFunc,
-          display_name = r"\rm{Total} \rm{Energy}",
+          display_name = r"\rm{Total}\/ \rm{Energy}",
           units=r"\rm{ergs}/\rm{g}", convert_function=_convertEnergy)
 
 def _Total_Energy(field, data):
     return data["TotalEnergy"] / _convertEnergy(data)
 add_field("Total_Energy", function=_Total_Energy,
-          display_name = r"\rm{Total} \rm{Energy}",
+          display_name = r"\rm{Total}\/ \rm{Energy}",
           units=r"\rm{ergs}/\rm{g}", convert_function=_convertEnergy)
 
 def _TotalEnergy(field, data):
     return data["Total_Energy"] / _convertEnergy(data)
 add_field("TotalEnergy", function=_TotalEnergy,
-          display_name = r"\rm{Total} \rm{Energy}",
+          display_name = r"\rm{Total}\/ \rm{Energy}",
           units=r"\rm{ergs}/\rm{g}", convert_function=_convertEnergy)
 
 def _NumberDensity(field, data):


https://bitbucket.org/yt_analysis/yt-3.0/commits/15d72caf0ea6/
Changeset:   15d72caf0ea6
Branch:      yt
User:        MatthewTurk
Date:        2013-03-15 17:57:43
Summary:     Attempting to create a backwards-compatible pickle fix for boolean objects.

The comments explain some of the thought process.
Affected #:  1 file

diff -r bda5c9c8fe1bfdda64bdd5f476690ad43fff9b4b -r 15d72caf0ea6927828285981b476fd91725432d9 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -4606,22 +4606,38 @@
             mylog.error("Problem uploading.")
         return upload_id
 
+# Many of these items are set up specifically to ensure that
+# we are not breaking old pickle files.  This means we must only call the
+# _reconstruct_object and that we cannot mandate any additional arguments to
+# the reconstruction function.
+#
+# In the future, this would be better off being set up to more directly
+# reference objects or retain state, perhaps with a context manager.
+#
+# One final detail: time series or multiple parameter files in a single pickle
+# seems problematic.
+
+class ReconstructedObject(tuple):
+    pass
+
+def _check_nested_args(arg, ref_pf):
+    if not isinstance(arg, (tuple, list, ReconstructedObject)):
+        return arg
+    elif isinstance(arg, ReconstructedObject) and ref_pf == arg[0]:
+        return arg[1]
+    narg = [_check_nested_args(a, ref_pf) for a in arg]
+    return narg
 
 def _reconstruct_object(*args, **kwargs):
     pfid = args[0]
     dtype = args[1]
+    pfs = ParameterFileStore()
+    pf = pfs.get_pf_hash(pfid)
     field_parameters = args[-1]
     # will be much nicer when we can do pfid, *a, fp = args
-    args, new_args = args[2:-1], []
-    for arg in args:
-        if iterable(arg) and len(arg) == 2 \
-           and not isinstance(arg, types.DictType) \
-           and isinstance(arg[1], AMRData):
-            new_args.append(arg[1])
-        else: new_args.append(arg)
-    pfs = ParameterFileStore()
-    pf = pfs.get_pf_hash(pfid)
+    args = args[2:-1]
+    new_args = [_check_nested_args(a, pf) for a in args]
     cls = getattr(pf.h, dtype)
     obj = cls(*new_args)
     obj.field_parameters.update(field_parameters)
-    return pf, obj
+    return ReconstructedObject((pf, obj))


https://bitbucket.org/yt_analysis/yt-3.0/commits/3c55a238d857/
Changeset:   3c55a238d857
Branch:      yt
User:        ejtasker
Date:        2013-03-16 01:00:17
Summary:     added test for pickle
Affected #:  1 file

diff -r ed10fc84bafcefebfdfd813ca59caa245f8a5ebd -r 3c55a238d8577874b0e0f48199dfeb4ae217dce0 yt/data_objects/tests/test_pickle.py
--- /dev/null
+++ b/yt/data_objects/tests/test_pickle.py
@@ -0,0 +1,36 @@
+from yt.testing import fake_random_pf, assert_equal
+from yt.analysis_modules.level_sets.api import identify_contours
+import cPickle
+import os
+
+def setup():
+    """Test specific setup."""
+    from yt.config import ytcfg
+    ytcfg["yt", "__withintesting"] = "True"
+
+
+def test_save_load_pickle():
+
+    test_pf = fake_random_pf(64)
+
+    # create extracted region from boolean (fairly complex object)
+    center = (pf.domain_left_edge + pf.domain_right_edge) / 2
+    sp_outer = pf.h.sphere(center, pf.domain_width[0])
+    sp_inner = pf.h.sphere(center, pf.domain_width[0]/10.0)
+    sp_boolean = pf.h.boolean([sp_outer, "NOT", sp_inner])
+
+    minv, maxv = sp_boolean.quantities["Extrema"]("Density")[0]
+    contour_threshold = min(minv*10, 0.9*maxv)
+    
+    contours = sp_boolean.extract_connected_sets("Density", 1, contour_threshold, maxv+1, log_space=True, cache=True)
+
+    # save object
+    cPickle.dump(contours[1][0], open("myobject.cpkl", "wb"))
+    
+    # load object
+    test_load = cPickle.load(open("myobject.cpkl", "rb"))
+
+    yield assert_equal, test_load != None, True
+    yield assert_equal, len(contours[1][0]), len(test_load)
+
+    os.remove("myobject.cpkl")


https://bitbucket.org/yt_analysis/yt-3.0/commits/e59b164d12e5/
Changeset:   e59b164d12e5
Branch:      yt
User:        ejtasker
Date:        2013-03-16 01:03:12
Summary:     merged yt
Affected #:  1 file

diff -r 3c55a238d8577874b0e0f48199dfeb4ae217dce0 -r e59b164d12e5fd6547e92a02f6333b4868bc4b87 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -4606,22 +4606,38 @@
             mylog.error("Problem uploading.")
         return upload_id
 
+# Many of these items are set up specifically to ensure that
+# we are not breaking old pickle files.  This means we must only call the
+# _reconstruct_object and that we cannot mandate any additional arguments to
+# the reconstruction function.
+#
+# In the future, this would be better off being set up to more directly
+# reference objects or retain state, perhaps with a context manager.
+#
+# One final detail: time series or multiple parameter files in a single pickle
+# seems problematic.
+
+class ReconstructedObject(tuple):
+    pass
+
+def _check_nested_args(arg, ref_pf):
+    if not isinstance(arg, (tuple, list, ReconstructedObject)):
+        return arg
+    elif isinstance(arg, ReconstructedObject) and ref_pf == arg[0]:
+        return arg[1]
+    narg = [_check_nested_args(a, ref_pf) for a in arg]
+    return narg
 
 def _reconstruct_object(*args, **kwargs):
     pfid = args[0]
     dtype = args[1]
+    pfs = ParameterFileStore()
+    pf = pfs.get_pf_hash(pfid)
     field_parameters = args[-1]
     # will be much nicer when we can do pfid, *a, fp = args
-    args, new_args = args[2:-1], []
-    for arg in args:
-        if iterable(arg) and len(arg) == 2 \
-           and not isinstance(arg, types.DictType) \
-           and isinstance(arg[1], AMRData):
-            new_args.append(arg[1])
-        else: new_args.append(arg)
-    pfs = ParameterFileStore()
-    pf = pfs.get_pf_hash(pfid)
+    args = args[2:-1]
+    new_args = [_check_nested_args(a, pf) for a in args]
     cls = getattr(pf.h, dtype)
     obj = cls(*new_args)
     obj.field_parameters.update(field_parameters)
-    return pf, obj
+    return ReconstructedObject((pf, obj))


https://bitbucket.org/yt_analysis/yt-3.0/commits/5b79077a918d/
Changeset:   5b79077a918d
Branch:      yt
User:        MatthewTurk
Date:        2013-03-16 15:34:30
Summary:     Ensure that Stream data outputs get added to the _cached_pfs.

Also and check for them during pickle reconstruction.  Fixes test_pickle.
Affected #:  3 files

diff -r e59b164d12e5fd6547e92a02f6333b4868bc4b87 -r 5b79077a918d2c91fd74b734946138d81dbd83d5 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -4628,11 +4628,19 @@
     narg = [_check_nested_args(a, ref_pf) for a in arg]
     return narg
 
+def _get_pf_by_hash(hash):
+    from yt.data_objects.static_output import _cached_pfs
+    for pf in _cached_pfs.values():
+        if pf._hash() == hash: return pf
+    return None
+
 def _reconstruct_object(*args, **kwargs):
     pfid = args[0]
     dtype = args[1]
-    pfs = ParameterFileStore()
-    pf = pfs.get_pf_hash(pfid)
+    pf = _get_pf_by_hash(pfid)
+    if not pf:
+        pfs = ParameterFileStore()
+        pf = pfs.get_pf_hash(pfid)
     field_parameters = args[-1]
     # will be much nicer when we can do pfid, *a, fp = args
     args = args[2:-1]

diff -r e59b164d12e5fd6547e92a02f6333b4868bc4b87 -r 5b79077a918d2c91fd74b734946138d81dbd83d5 yt/data_objects/tests/test_pickle.py
--- a/yt/data_objects/tests/test_pickle.py
+++ b/yt/data_objects/tests/test_pickle.py
@@ -1,5 +1,6 @@
 from yt.testing import fake_random_pf, assert_equal
 from yt.analysis_modules.level_sets.api import identify_contours
+import yt.data_objects.api 
 import cPickle
 import os
 
@@ -11,7 +12,7 @@
 
 def test_save_load_pickle():
 
-    test_pf = fake_random_pf(64)
+    pf = fake_random_pf(64)
 
     # create extracted region from boolean (fairly complex object)
     center = (pf.domain_left_edge + pf.domain_right_edge) / 2

diff -r e59b164d12e5fd6547e92a02f6333b4868bc4b87 -r 5b79077a918d2c91fd74b734946138d81dbd83d5 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -25,6 +25,7 @@
 
 import weakref
 import numpy as np
+import uuid
 
 from yt.utilities.io_handler import io_registry
 from yt.funcs import *
@@ -302,7 +303,10 @@
         #self._conversion_override = conversion_override
 
         self.stream_handler = stream_handler
-        StaticOutput.__init__(self, "InMemoryParameterFile", self._data_style)
+        name = "InMemoryParameterFile_%s" % (uuid.uuid4().hex)
+        from yt.data_objects.static_output import _cached_pfs
+        _cached_pfs[name] = self
+        StaticOutput.__init__(self, name, self._data_style)
 
         self.units = {}
         self.time_units = {}


https://bitbucket.org/yt_analysis/yt-3.0/commits/1b01901aeeba/
Changeset:   1b01901aeeba
Branch:      yt
User:        mqk
Date:        2013-03-14 23:17:14
Summary:     Added field_parameters keywords to GDF writer.save_field().
Affected #:  1 file

diff -r bda5c9c8fe1bfdda64bdd5f476690ad43fff9b4b -r 1b01901aeebaab2eea6485bae9f27ca32380d11a yt/utilities/grid_data_format/writer.py
--- a/yt/utilities/grid_data_format/writer.py
+++ b/yt/utilities/grid_data_format/writer.py
@@ -57,7 +57,7 @@
     # don't forget to close the file.
     f.close()
 
-def save_field(pf, field_name):
+def save_field(pf, field_name, field_parameters=None):
     """
     Write a single field associated with the parameter file pf to the
     backup file.
@@ -68,6 +68,8 @@
         The yt parameter file that the field is associated with.
     field_name : string
         The name of the field to save.
+    field_parameters : dictionary
+        A dictionary of field parameters to set.
     """
 
     field_obj = pf.field_info[field_name]
@@ -85,12 +87,12 @@
                        particle_type_name="dark_matter")
 
     # now save the field
-    _write_field_to_gdf(pf, f, field_name, particle_type_name="dark_matter")
+    _write_field_to_gdf(pf, f, field_name, particle_type_name="dark_matter", field_parameters=field_parameters)
 
     # don't forget to close the file.
     f.close()
         
-def _write_field_to_gdf(pf, fhandle, field_name, particle_type_name):
+def _write_field_to_gdf(pf, fhandle, field_name, particle_type_name, field_parameters=None):
 
     # add field info to field_types group
     g = fhandle["field_types"]
@@ -122,6 +124,12 @@
     # now add actual data, grid by grid
     g = fhandle["data"]     
     for grid in pf.h.grids:
+
+        # set field parameters, if specified
+        if field_parameters is not None:
+            for k,v in field_parameters.iteritems():
+                grid.set_field_parameter(k,v)
+
         grid_group = g["grid_%010i" % (grid.id - grid._id_offset)]
         particles_group = grid_group["particles"]
         pt_group = particles_group[particle_type_name]


https://bitbucket.org/yt_analysis/yt-3.0/commits/22228acb73d5/
Changeset:   22228acb73d5
Branch:      yt
User:        ngoldbaum
Date:        2013-03-16 18:37:16
Summary:     Merged in mqk/yt_clean (pull request #461)

Added field_parameters keywords to GDF writer.save_field(). (Fixes #526)
Affected #:  1 file

diff -r 5b79077a918d2c91fd74b734946138d81dbd83d5 -r 22228acb73d5af081289f362ddfe82753c394ae8 yt/utilities/grid_data_format/writer.py
--- a/yt/utilities/grid_data_format/writer.py
+++ b/yt/utilities/grid_data_format/writer.py
@@ -57,7 +57,7 @@
     # don't forget to close the file.
     f.close()
 
-def save_field(pf, field_name):
+def save_field(pf, field_name, field_parameters=None):
     """
     Write a single field associated with the parameter file pf to the
     backup file.
@@ -68,6 +68,8 @@
         The yt parameter file that the field is associated with.
     field_name : string
         The name of the field to save.
+    field_parameters : dictionary
+        A dictionary of field parameters to set.
     """
 
     field_obj = pf.field_info[field_name]
@@ -85,12 +87,12 @@
                        particle_type_name="dark_matter")
 
     # now save the field
-    _write_field_to_gdf(pf, f, field_name, particle_type_name="dark_matter")
+    _write_field_to_gdf(pf, f, field_name, particle_type_name="dark_matter", field_parameters=field_parameters)
 
     # don't forget to close the file.
     f.close()
         
-def _write_field_to_gdf(pf, fhandle, field_name, particle_type_name):
+def _write_field_to_gdf(pf, fhandle, field_name, particle_type_name, field_parameters=None):
 
     # add field info to field_types group
     g = fhandle["field_types"]
@@ -122,6 +124,12 @@
     # now add actual data, grid by grid
     g = fhandle["data"]     
     for grid in pf.h.grids:
+
+        # set field parameters, if specified
+        if field_parameters is not None:
+            for k,v in field_parameters.iteritems():
+                grid.set_field_parameter(k,v)
+
         grid_group = g["grid_%010i" % (grid.id - grid._id_offset)]
         particles_group = grid_group["particles"]
         pt_group = particles_group[particle_type_name]


https://bitbucket.org/yt_analysis/yt-3.0/commits/b7e6a9542770/
Changeset:   b7e6a9542770
Branch:      yt
User:        MatthewTurk
Date:        2013-03-17 01:21:33
Summary:     Updating version in development branch.
Affected #:  1 file

diff -r 22228acb73d5af081289f362ddfe82753c394ae8 -r b7e6a954277064a0cf50f17b93142e0dd72bd8e2 setup.py
--- a/setup.py
+++ b/setup.py
@@ -155,7 +155,7 @@
 
 import setuptools
 
-VERSION = "2.5dev"
+VERSION = "2.6dev"
 
 if os.path.exists('MANIFEST'):
     os.remove('MANIFEST')


https://bitbucket.org/yt_analysis/yt-3.0/commits/d07cd0907966/
Changeset:   d07cd0907966
Branch:      yt
User:        xarthisius
Date:        2013-03-16 16:03:29
Summary:     [test_pickle] use tempfile, add license, pep8, add test description
Affected #:  1 file

diff -r 5b79077a918d2c91fd74b734946138d81dbd83d5 -r d07cd090796667dee3970968eda4938a7aa2183a yt/data_objects/tests/test_pickle.py
--- a/yt/data_objects/tests/test_pickle.py
+++ b/yt/data_objects/tests/test_pickle.py
@@ -1,8 +1,33 @@
-from yt.testing import fake_random_pf, assert_equal
-from yt.analysis_modules.level_sets.api import identify_contours
-import yt.data_objects.api 
+"""
+Testsuite for pickling yt objects.
+
+Author: Elizabeth Tasker <tasker at astro1.sci.hokudai.ac.jp>
+Affiliation: Hokkaido University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2013 Elizabeth Tasker. All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
 import cPickle
 import os
+import tempfile
+from yt.testing \
+    import fake_random_pf, assert_equal
+
 
 def setup():
     """Test specific setup."""
@@ -11,27 +36,34 @@
 
 
 def test_save_load_pickle():
-
-    pf = fake_random_pf(64)
+    """Main test for loading pickled objects"""
+    test_pf = fake_random_pf(64)
 
     # create extracted region from boolean (fairly complex object)
-    center = (pf.domain_left_edge + pf.domain_right_edge) / 2
-    sp_outer = pf.h.sphere(center, pf.domain_width[0])
-    sp_inner = pf.h.sphere(center, pf.domain_width[0]/10.0)
-    sp_boolean = pf.h.boolean([sp_outer, "NOT", sp_inner])
+    center = (test_pf.domain_left_edge + test_pf.domain_right_edge) / 2
+    sp_outer = test_pf.h.sphere(center, test_pf.domain_width[0])
+    sp_inner = test_pf.h.sphere(center, test_pf.domain_width[0] / 10.0)
+    sp_boolean = test_pf.h.boolean([sp_outer, "NOT", sp_inner])
 
     minv, maxv = sp_boolean.quantities["Extrema"]("Density")[0]
-    contour_threshold = min(minv*10, 0.9*maxv)
-    
-    contours = sp_boolean.extract_connected_sets("Density", 1, contour_threshold, maxv+1, log_space=True, cache=True)
+    contour_threshold = min(minv * 10.0, 0.9 * maxv)
+
+    contours = sp_boolean.extract_connected_sets(
+        "Density", 1, contour_threshold, maxv + 1, log_space=True, cache=True)
 
     # save object
-    cPickle.dump(contours[1][0], open("myobject.cpkl", "wb"))
-    
+    cpklfile = tempfile.NamedTemporaryFile(delete=False)
+    cPickle.dump(contours[1][0], cpklfile)
+    cpklfile.close()
+
     # load object
-    test_load = cPickle.load(open("myobject.cpkl", "rb"))
+    test_load = cPickle.load(open(cpklfile.name, "rb"))
 
-    yield assert_equal, test_load != None, True
+    assert_equal.description = \
+        "%s: File was pickle-loaded succesfully" % __name__
+    yield assert_equal, test_load is not None, True
+    assert_equal.description = \
+        "%s: Length of pickle-loaded connected set object" % __name__
     yield assert_equal, len(contours[1][0]), len(test_load)
 
-    os.remove("myobject.cpkl")
+    os.remove(cpklfile.name)


https://bitbucket.org/yt_analysis/yt-3.0/commits/37e33227a70d/
Changeset:   37e33227a70d
Branch:      yt
User:        MatthewTurk
Date:        2013-03-18 12:37:14
Summary:     Merged in xarthisius/yt (pull request #464)

[test_pickle] use tempfile, add license, pep8, add test description
Affected #:  1 file

diff -r b7e6a954277064a0cf50f17b93142e0dd72bd8e2 -r 37e33227a70d4db04d9715aaa195d1b5e6859c9f yt/data_objects/tests/test_pickle.py
--- a/yt/data_objects/tests/test_pickle.py
+++ b/yt/data_objects/tests/test_pickle.py
@@ -1,8 +1,33 @@
-from yt.testing import fake_random_pf, assert_equal
-from yt.analysis_modules.level_sets.api import identify_contours
-import yt.data_objects.api 
+"""
+Testsuite for pickling yt objects.
+
+Author: Elizabeth Tasker <tasker at astro1.sci.hokudai.ac.jp>
+Affiliation: Hokkaido University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2013 Elizabeth Tasker. All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
 import cPickle
 import os
+import tempfile
+from yt.testing \
+    import fake_random_pf, assert_equal
+
 
 def setup():
     """Test specific setup."""
@@ -11,27 +36,34 @@
 
 
 def test_save_load_pickle():
-
-    pf = fake_random_pf(64)
+    """Main test for loading pickled objects"""
+    test_pf = fake_random_pf(64)
 
     # create extracted region from boolean (fairly complex object)
-    center = (pf.domain_left_edge + pf.domain_right_edge) / 2
-    sp_outer = pf.h.sphere(center, pf.domain_width[0])
-    sp_inner = pf.h.sphere(center, pf.domain_width[0]/10.0)
-    sp_boolean = pf.h.boolean([sp_outer, "NOT", sp_inner])
+    center = (test_pf.domain_left_edge + test_pf.domain_right_edge) / 2
+    sp_outer = test_pf.h.sphere(center, test_pf.domain_width[0])
+    sp_inner = test_pf.h.sphere(center, test_pf.domain_width[0] / 10.0)
+    sp_boolean = test_pf.h.boolean([sp_outer, "NOT", sp_inner])
 
     minv, maxv = sp_boolean.quantities["Extrema"]("Density")[0]
-    contour_threshold = min(minv*10, 0.9*maxv)
-    
-    contours = sp_boolean.extract_connected_sets("Density", 1, contour_threshold, maxv+1, log_space=True, cache=True)
+    contour_threshold = min(minv * 10.0, 0.9 * maxv)
+
+    contours = sp_boolean.extract_connected_sets(
+        "Density", 1, contour_threshold, maxv + 1, log_space=True, cache=True)
 
     # save object
-    cPickle.dump(contours[1][0], open("myobject.cpkl", "wb"))
-    
+    cpklfile = tempfile.NamedTemporaryFile(delete=False)
+    cPickle.dump(contours[1][0], cpklfile)
+    cpklfile.close()
+
     # load object
-    test_load = cPickle.load(open("myobject.cpkl", "rb"))
+    test_load = cPickle.load(open(cpklfile.name, "rb"))
 
-    yield assert_equal, test_load != None, True
+    assert_equal.description = \
+        "%s: File was pickle-loaded succesfully" % __name__
+    yield assert_equal, test_load is not None, True
+    assert_equal.description = \
+        "%s: Length of pickle-loaded connected set object" % __name__
     yield assert_equal, len(contours[1][0]), len(test_load)
 
-    os.remove("myobject.cpkl")
+    os.remove(cpklfile.name)


https://bitbucket.org/yt_analysis/yt-3.0/commits/592dbaec8a44/
Changeset:   592dbaec8a44
Branch:      yt
User:        MatthewTurk
Date:        2013-03-19 15:39:05
Summary:     I believe this fixes periodicity in radius.

http://paste.yt-project.org/show/3266/

I have started a mailing list discussion about this issue.
Affected #:  1 file

diff -r 37e33227a70d4db04d9715aaa195d1b5e6859c9f -r 592dbaec8a44664b878b286a23a14095ce1b694e yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -801,8 +801,9 @@
     for i, ax in enumerate('xyz'):
         np.subtract(data["%s%s" % (field_prefix, ax)], center[i], r)
         if data.pf.periodicity[i] == True:
-            np.subtract(DW[i], r, rdw)
             np.abs(r, r)
+            np.subtract(r, DW[i], rdw)
+            np.abs(rdw, rdw)
             np.minimum(r, rdw, r)
         np.power(r, 2.0, r)
         np.add(radius, r, radius)


https://bitbucket.org/yt_analysis/yt-3.0/commits/7a62187ba23c/
Changeset:   7a62187ba23c
Branch:      yt
User:        ngoldbaum
Date:        2013-03-22 22:41:20
Summary:     Fixing set_axes_unit.  Closes #534.
Affected #:  2 files

diff -r 592dbaec8a44664b878b286a23a14095ce1b694e -r 7a62187ba23c171db740b23d0f44169e264283d7 yt/utilities/definitions.py
--- a/yt/utilities/definitions.py
+++ b/yt/utilities/definitions.py
@@ -48,7 +48,8 @@
 y_names = ['z','z','y']
 
 # How many of each thing are in an Mpc
-mpc_conversion = {'mpc'   : mpc_per_mpc,
+mpc_conversion = {'Mpc'   : mpc_per_mpc,
+                  'mpc'   : mpc_per_mpc,
                   'kpc'   : kpc_per_mpc,
                   'pc'    : pc_per_mpc,
                   'au'    : au_per_mpc,
@@ -56,7 +57,7 @@
                   'miles' : miles_per_mpc,
                   'cm'    : cm_per_mpc}
 
-# How many seconds are in each thig
+# How many seconds are in each thing
 sec_conversion = {'Gyr'   : sec_per_Gyr,
                   'Myr'   : sec_per_Myr,
                   'years' : sec_per_year,

diff -r 592dbaec8a44664b878b286a23a14095ce1b694e -r 7a62187ba23c171db740b23d0f44169e264283d7 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -65,6 +65,7 @@
     ortho_find
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     GroupOwnership
+from yt.utilities.exceptions import YTUnitNotRecognized
 from yt.data_objects.time_series import \
     TimeSeriesData
 
@@ -726,6 +727,8 @@
         """
         # blind except because it could be in conversion_factors or units
         if unit_name is not None:
+            if isinstance(unit_name, str):
+                unit_name = (unit_name, unit_name)
             for un in unit_name:
                 try:
                     self.pf[un]


https://bitbucket.org/yt_analysis/yt-3.0/commits/7be6e111993a/
Changeset:   7be6e111993a
Branch:      yt
User:        jsoishi
Date:        2013-03-25 17:56:01
Summary:     added check for MHD when computing ThermalEnergy without DualEnergy on
Affected #:  1 file

diff -r 7a62187ba23c171db740b23d0f44169e264283d7 -r 7be6e111993a46fcbd6adb204057e29c18e9780d yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -134,10 +134,16 @@
         if data.pf["DualEnergyFormalism"]:
             return data["GasEnergy"]
         else:
-            return data["TotalEnergy"] - 0.5*(
-                   data["x-velocity"]**2.0
-                 + data["y-velocity"]**2.0
-                 + data["z-velocity"]**2.0 )
+            if data.pf["HydroMethod"] == 4 or data.pf["HydroMethod"] == 6:
+                return data["TotalEnergy"] - 0.5*(
+                    data["x-velocity"]**2.0
+                    + data["y-velocity"]**2.0
+                    + data["z-velocity"]**2.0 ) - data["MagneticPressure"]
+            else:
+                return data["TotalEnergy"] - 0.5*(
+                    data["x-velocity"]**2.0
+                    + data["y-velocity"]**2.0
+                    + data["z-velocity"]**2.0 )
 add_field("ThermalEnergy", function=_ThermalEnergy,
           units=r"\rm{ergs}/\rm{g}")
 


https://bitbucket.org/yt_analysis/yt-3.0/commits/8c458bcc4bce/
Changeset:   8c458bcc4bce
Branch:      yt
User:        jsoishi
Date:        2013-03-25 18:10:26
Summary:     satisfying mjturk' code review.
Affected #:  1 file

diff -r 7be6e111993a46fcbd6adb204057e29c18e9780d -r 8c458bcc4bce43d2ca04442ec05dcce651011e75 yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -134,16 +134,16 @@
         if data.pf["DualEnergyFormalism"]:
             return data["GasEnergy"]
         else:
-            if data.pf["HydroMethod"] == 4 or data.pf["HydroMethod"] == 6:
+            if data.pf["HydroMethod"] in (4,6):
                 return data["TotalEnergy"] - 0.5*(
                     data["x-velocity"]**2.0
                     + data["y-velocity"]**2.0
                     + data["z-velocity"]**2.0 ) - data["MagneticPressure"]
-            else:
-                return data["TotalEnergy"] - 0.5*(
-                    data["x-velocity"]**2.0
-                    + data["y-velocity"]**2.0
-                    + data["z-velocity"]**2.0 )
+
+            return data["TotalEnergy"] - 0.5*(
+                data["x-velocity"]**2.0
+                + data["y-velocity"]**2.0
+                + data["z-velocity"]**2.0 )
 add_field("ThermalEnergy", function=_ThermalEnergy,
           units=r"\rm{ergs}/\rm{g}")
 


https://bitbucket.org/yt_analysis/yt-3.0/commits/c087b133da9c/
Changeset:   c087b133da9c
Branch:      yt
User:        jsoishi
Date:        2013-03-25 18:23:13
Summary:     more cleanup to enhance readability. also, caught a no-op but semantically important physics change.
Affected #:  1 file

diff -r 8c458bcc4bce43d2ca04442ec05dcce651011e75 -r c087b133da9ccdcc5b08d92f3a7d6a94efb6fcaa yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -130,20 +130,20 @@
 def _ThermalEnergy(field, data):
     if data.pf["HydroMethod"] == 2:
         return data["TotalEnergy"]
-    else:
-        if data.pf["DualEnergyFormalism"]:
-            return data["GasEnergy"]
-        else:
-            if data.pf["HydroMethod"] in (4,6):
-                return data["TotalEnergy"] - 0.5*(
-                    data["x-velocity"]**2.0
-                    + data["y-velocity"]**2.0
-                    + data["z-velocity"]**2.0 ) - data["MagneticPressure"]
+    
+    if data.pf["DualEnergyFormalism"]:
+        return data["GasEnergy"]
 
-            return data["TotalEnergy"] - 0.5*(
-                data["x-velocity"]**2.0
-                + data["y-velocity"]**2.0
-                + data["z-velocity"]**2.0 )
+    if data.pf["HydroMethod"] in (4,6):
+        return data["TotalEnergy"] - 0.5*(
+            data["x-velocity"]**2.0
+            + data["y-velocity"]**2.0
+            + data["z-velocity"]**2.0 ) - data["MagneticEnergy"]
+
+    return data["TotalEnergy"] - 0.5*(
+        data["x-velocity"]**2.0
+        + data["y-velocity"]**2.0
+        + data["z-velocity"]**2.0 )
 add_field("ThermalEnergy", function=_ThermalEnergy,
           units=r"\rm{ergs}/\rm{g}")
 


https://bitbucket.org/yt_analysis/yt-3.0/commits/1266fa57e30e/
Changeset:   1266fa57e30e
Branch:      yt
User:        xarthisius
Date:        2013-03-26 12:59:25
Summary:     [decompose] initialize 'p_size', skip decomposition for one proc. Fixes #537
Affected #:  1 file

diff -r c087b133da9ccdcc5b08d92f3a7d6a94efb6fcaa -r 1266fa57e30ef96b31e4fd6cf21a3ab0f839b11e yt/utilities/decompose.py
--- a/yt/utilities/decompose.py
+++ b/yt/utilities/decompose.py
@@ -109,6 +109,10 @@
     fac = factorize_number(pieces)
     nfactors = len(fac[:, 2])
     best = 0.0
+    p_size = np.ones(3, dtype=np.int)
+    if pieces == 1:
+        return p_size
+
     while np.all(fac[:, 2] > 0):
         ldom = np.ones(3, dtype=np.int)
         for nfac in range(nfactors):


https://bitbucket.org/yt_analysis/yt-3.0/commits/5dcd737e56c4/
Changeset:   5dcd737e56c4
Branch:      yt
User:        xarthisius
Date:        2013-03-26 13:40:20
Summary:     [decompose] Avoid integer overflow while calculating 'ideal_bsize'
Affected #:  1 file

diff -r 1266fa57e30ef96b31e4fd6cf21a3ab0f839b11e -r 5dcd737e56c43faf694bddd7830df42263341b5d yt/utilities/decompose.py
--- a/yt/utilities/decompose.py
+++ b/yt/utilities/decompose.py
@@ -69,8 +69,8 @@
     """ Evaluate longest to shortest edge ratio
         BEWARE: lot's of magic here """
     eff_dim = (n_d > 1).sum()
-    ideal_bsize = eff_dim * (pieces * np.product(n_d) ** (eff_dim - 1)
-                             ) ** (1.0 / eff_dim)
+    exp = float(eff_dim - 1) / float(eff_dim)
+    ideal_bsize = eff_dim * pieces ** (1.0 / eff_dim) * np.product(n_d) ** exp
     mask = np.where(n_d > 1)
     nd_arr = np.array(n_d, dtype=np.float64)[mask]
     bsize = int(np.sum(ldom[mask] / nd_arr * np.product(nd_arr)))


https://bitbucket.org/yt_analysis/yt-3.0/commits/39c3e1ab1c4d/
Changeset:   39c3e1ab1c4d
Branch:      yt
User:        jnaiman
Date:        2013-03-06 23:02:54
Summary:     Adding jnaiman's stuff
Affected #:  1 file

diff -r 592dbaec8a44664b878b286a23a14095ce1b694e -r 39c3e1ab1c4d7c95d229f30382f1d0786e5ced1c yt/visualization/volume_rendering/jnaiman.py
--- /dev/null
+++ b/yt/visualization/volume_rendering/jnaiman.py
@@ -0,0 +1,1 @@
+  


https://bitbucket.org/yt_analysis/yt-3.0/commits/f553aa4f6794/
Changeset:   f553aa4f6794
Branch:      yt
User:        jnaiman
Date:        2013-03-06 23:46:32
Summary:     Adding OBJ exporter
Affected #:  1 file

diff -r 39c3e1ab1c4d7c95d229f30382f1d0786e5ced1c -r f553aa4f679473602e37866a7d0ce9b0c54b3f47 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -4374,6 +4374,159 @@
                 vv[:,i,j] = self.vertices[j,i::3]
         return vv
 
+    def export_obj(self, filename, transparency = None, dist_fac = None,
+                   color_field = None, color_map = "algae", 
+                   color_log = True, plot_index = None):
+        r"""This exports the surface to the OBJ format, suitable for visualization
+        in many different programs (e.g., Blender).
+
+        Parameters
+        ----------
+        filename : string
+            The file this will be exported to.  This cannot be a file-like object.
+            Note - there are no file extentions included - both obj & mtl files 
+            are created.
+        transparency : list floats
+            This gives the transparency of the output surface plot.  If multiple 
+            surface plots, this will be a list.  Values from 0.0 (invisible) to 
+            1.0 (opaque).
+        dist_fac : float
+            Divide the axes distances by this amount.
+        color_field : string
+            Should a field be sample and colormapped?
+        color_map : string
+            Which color map should be applied?
+        color_log : bool
+            Should the color field be logged before being mapped?
+        plot_index : integer
+            Index of plot for multiple plots.  If none, then only 1 plot.
+
+        Examples
+        --------
+
+        >>> sp = pf.h.sphere("max", (10, "kpc"))
+        >>> trans = [1.0]
+        >>> distf = 3.1e18*1e3 # distances into kpc
+        >>> surf = pf.h.surface(sp, "Density", 5e-27)
+        >>> surf.export_obj("my_galaxy", transparency=trans, dist_fac = distf)
+
+        >>> sp = pf.h.sphere("max", (10, "kpc"))
+        >>> rhos = [1e-24, 1e-25]
+        >>> trans = [0.5, 1.0]
+        >>> distf = 3.1e18*1e3 # distances into kpc
+        >>> for i, r in enumerate(rhos):
+        >>>     surf = pf.h.surface(dd,'Density',r)
+        >>>     surf.export_obj("my_galaxy", transparency=trans, dist_fac = distf, 
+        >>>                     plot_index = i)
+        """
+        if transparency is None:
+            transparency = 1.0
+        if self.vertices is None:
+            self.get_data(color_field,"face")
+        elif color_field is not None:
+            if color_field not in self.field_data:
+                self[color_field]
+        #only_on_root(self._export_obj, "%s %12.12e, %12.12e %s %s %r %d" 
+        #             %(filename, transparency, dist_fac, color_field, 
+        #               color_map, color_log, plot_index))
+        if MPI.COMM_WORLD.rank == 0:  # this works, 2 seperate calls, in for loops
+            self._export_obj(filename, transparency, dist_fac, color_field, 
+                             color_map, color_log, plot_index)
+
+    def _color_samples_obj(self, cs, color_log, color_map, arr): # this now holds for obj files
+            if color_log: cs = np.log10(cs)
+            mi, ma = cs.min(), cs.max()
+            cs = (cs - mi) / (ma - mi)
+            # to get color indicies for OBJ formatting
+            from yt.visualization._colormap_data import color_map_luts
+            lut = color_map_luts[color_map]
+            x = np.mgrid[0.0:1.0:lut[0].shape[0]*1j]
+            arr["cind"][:] = (np.interp(cs,x,x)*(lut[0].shape[0]-1)).astype("uint8")
+
+    @parallel_root_only
+    def _export_obj(self, filename, transparency, dist_fac = None, 
+                    color_field = None, color_map = "algae", color_log = True, 
+                    plot_index = None):
+        if plot_index is None:
+            plot_index = 0
+        if isinstance(filename, file):
+            fobj = filename + '.obj'
+            fmtl = filename + '.mtl'
+        else:
+            if plot_index == 0:
+                fobj = open(filename + '.obj', "w")
+                fmtl = open(filename + '.mtl', 'w')
+                cc = 1
+            else:
+                # read in last vertex
+                linesave = ''
+                for line in fileinput.input(filename + '.obj'):
+                    if line[0] == 'f':
+                        linesave = line
+                p = [m.start() for m in finditer(' ', linesave)]
+                cc = int(linesave[p[len(p)-1]:])+1
+                fobj = open(filename + '.obj', "a")
+                fmtl = open(filename + '.mtl', 'a')
+        ftype = [("cind", "uint8")]
+        vtype = [("x","float"),("y","float"), ("z","float")]
+        if plot_index == 0:
+            fobj.write("# yt OBJ file\n")
+            fobj.write("# www.yt-project.com\n")
+            fobj.write("mtllib " + filename + '.mtl\n\n')  # use this material file for the faces
+            fmtl.write("# yt MLT file\n")
+            fmtl.write("# www.yt-project.com\n\n")
+        from yt.visualization._colormap_data import color_map_luts # import colors for mtl file
+        lut = color_map_luts[color_map]
+        #(0) formulate vertices
+        nv = self.vertices.shape[1] # number of groups of vertices
+        f = np.empty(nv/self.vertices.shape[0], dtype=ftype) # store sets of face colors
+        v = np.empty(nv, dtype=vtype) # stores vertices
+        if color_field is not None:
+            cs = self[color_field]
+        else:
+            cs = np.empty(self.vertices.shape[1]/self.vertices.shape[0])
+        self._color_samples_obj(cs, color_log, color_map, f) # map color values to color scheme
+        if dist_fac is None: # then normalize by bounds
+            DLE = self.pf.domain_left_edge
+            DRE = self.pf.domain_right_edge
+            bounds = [(DLE[i], DRE[i]) for i in range(3)]
+            for i, ax in enumerate("xyz"):
+                # Do the bounds first since we cast to f32
+                tmp = self.vertices[i,:]
+                np.subtract(tmp, bounds[i][0], tmp)
+                w = bounds[i][1] - bounds[i][0]
+                np.divide(tmp, w, tmp)
+                np.subtract(tmp, 0.5, tmp) # Center at origin.
+                v[ax][:] = tmp   
+        else:
+            for i, ax in enumerate("xyz"):
+                tmp = self.vertices[i,:]
+                np.divide(tmp, dist_fac, tmp)
+                v[ax][:] = tmp
+        #(1) write all colors per surface to mtl file
+        for i in range(0,lut[0].shape[0]): 
+            omname = "material_" + str(i) + '_' + str(plot_index)  # name of the material
+            fmtl.write("newmtl " + omname +'\n') # the specific material (color) for this face
+            fmtl.write("Ka %.6f %.6f %.6f\n" %(0.0, 0.0, 0.0)) # ambient color, keep off
+            fmtl.write("Kd %.6f %.6f %.6f\n" %(lut[0][i], lut[1][i], lut[2][i])) # color of face
+            fmtl.write("Ks %.6f %.6f %.6f\n" %(0.0, 0.0, 0.0)) # specular color, keep off
+            fmtl.write("d %.6f\n" %(transparency))  # transparency
+            fmtl.write("illum 2\n") # not relevant, 2 means highlights on?
+            fmtl.write("Ns %.6f\n\n" %(0.0)) #keep off, some other specular thing
+        #(2) write vertices
+        for i in range(0,self.vertices.shape[1]):
+            fobj.write("v %.6f %.6f %.6f\n" %(v["x"][i], v["y"][i], v["z"][i]))    
+        fobj.write("#done defining vertices\n\n")
+        #(3) define faces and materials for each face
+        for i in range(0,self.triangles.shape[0]):
+            omname = 'material_' + str(f["cind"][i]) + '_' + str(plot_index) # which color to use
+            fobj.write("usemtl " + omname + '\n') # which material to use for this face (color)
+            fobj.write("f " + str(cc) + ' ' + str(cc+1) + ' ' + str(cc+2) + '\n\n') # vertices to color
+            cc = cc+3
+        fmtl.close()
+        fobj.close()
+
+
     def export_ply(self, filename, bounds = None, color_field = None,
                    color_map = "algae", color_log = True, sample_type = "face"):
         r"""This exports the surface to the PLY format, suitable for visualization


https://bitbucket.org/yt_analysis/yt-3.0/commits/e1e95586fbed/
Changeset:   e1e95586fbed
Branch:      yt
User:        jnaiman
Date:        2013-03-07 02:29:49
Summary:     Adding OBJ exporter.
Affected #:  1 file

diff -r f553aa4f679473602e37866a7d0ce9b0c54b3f47 -r e1e95586fbed2ff19bec9741361e687ec9a3b916 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -36,6 +36,9 @@
 import itertools
 import shelve
 import cStringIO
+from mpi4py import MPI
+import fileinput
+from re import finditer
 
 from yt.funcs import *
 from yt.config import ytcfg


https://bitbucket.org/yt_analysis/yt-3.0/commits/cd5da3f8dca8/
Changeset:   cd5da3f8dca8
Branch:      yt
User:        jnaiman
Date:        2013-03-07 02:34:55
Summary:     Added an OBJ exporter.  Note: exports both .obj and a "material file",
(an .mtl extension).  The .obj file references this .mtl file in its header
for color/transparency information.
Affected #:  1 file

diff -r e1e95586fbed2ff19bec9741361e687ec9a3b916 -r cd5da3f8dca8b5e38684e76817c2b139d58869c3 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -4381,7 +4381,10 @@
                    color_field = None, color_map = "algae", 
                    color_log = True, plot_index = None):
         r"""This exports the surface to the OBJ format, suitable for visualization
-        in many different programs (e.g., Blender).
+        in many different programs (e.g., Blender).  NOTE: this exports an .obj file 
+        and an .mtl file, both with the general 'filename' as a prefix.  
+        The .obj file points to the .mtl file in its header, so if you move the 2 
+        files, make sure you change the .obj header to account for this.
 
         Parameters
         ----------


https://bitbucket.org/yt_analysis/yt-3.0/commits/1366d3d7b0e6/
Changeset:   1366d3d7b0e6
Branch:      yt
User:        jnaiman
Date:        2013-03-08 02:03:19
Summary:     Updated export_obj.  No MPI calls, includes emissivity.
Affected #:  1 file

diff -r cd5da3f8dca8b5e38684e76817c2b139d58869c3 -r 1366d3d7b0e6660d7cf917a36b26a70911f3a8cf yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -36,7 +36,6 @@
 import itertools
 import shelve
 import cStringIO
-from mpi4py import MPI
 import fileinput
 from re import finditer
 
@@ -4378,13 +4377,17 @@
         return vv
 
     def export_obj(self, filename, transparency = None, dist_fac = None,
-                   color_field = None, color_map = "algae", 
-                   color_log = True, plot_index = None):
+                   color_field = None, emit_field = None, color_map = "algae", 
+                   color_log = True, emit_log = True, plot_index = None, 
+                   color_field_max = None, color_field_min = None, 
+                   emit_field_max = None, emit_field_min = None):
         r"""This exports the surface to the OBJ format, suitable for visualization
         in many different programs (e.g., Blender).  NOTE: this exports an .obj file 
         and an .mtl file, both with the general 'filename' as a prefix.  
         The .obj file points to the .mtl file in its header, so if you move the 2 
-        files, make sure you change the .obj header to account for this.
+        files, make sure you change the .obj header to account for this. ALSO NOTE: 
+        the emit_field needs to be a combination of the other 2 fields used to 
+        have the emissivity track with the color.
 
         Parameters
         ----------
@@ -4400,12 +4403,25 @@
             Divide the axes distances by this amount.
         color_field : string
             Should a field be sample and colormapped?
+        emit_field : string
+            Should we track the emissivity of a field?
+              NOTE: this should be a combination of the other 2 fields being used.
         color_map : string
             Which color map should be applied?
         color_log : bool
             Should the color field be logged before being mapped?
+        emit_log : bool
+            Should the emitting field be logged before being mapped?
         plot_index : integer
             Index of plot for multiple plots.  If none, then only 1 plot.
+        color_field_max : float
+            Maximum value of the color field across all surfaces.
+        color_field_min : float
+            Minimum value of the color field across all surfaces.
+        emit_field_max : float
+            Maximum value of the emitting field across all surfaces.
+        emit_field_min : float
+            Minimum value of the emitting field across all surfaces.
 
         Examples
         --------
@@ -4417,13 +4433,30 @@
         >>> surf.export_obj("my_galaxy", transparency=trans, dist_fac = distf)
 
         >>> sp = pf.h.sphere("max", (10, "kpc"))
+        >>> mi, ma = sp.quantities['Extrema']('Temperature')[0]
         >>> rhos = [1e-24, 1e-25]
         >>> trans = [0.5, 1.0]
         >>> distf = 3.1e18*1e3 # distances into kpc
         >>> for i, r in enumerate(rhos):
-        >>>     surf = pf.h.surface(dd,'Density',r)
-        >>>     surf.export_obj("my_galaxy", transparency=trans, dist_fac = distf, 
-        >>>                     plot_index = i)
+        >>>     surf = pf.h.surface(sp,'Density',r)
+        >>>     surf.export_obj("my_galaxy", transparency=trans, 
+        >>>                      color_field='Temperature', dist_fac = distf, 
+        >>>                      plot_index = i, color_field_max = ma, 
+        >>>                      color_field_min = mi)
+
+        >>> sp = pf.h.sphere("max", (10, "kpc"))
+        >>> rhos = [1e-24, 1e-25]
+        >>> trans = [0.5, 1.0]
+        >>> distf = 3.1e18*1e3 # distances into kpc
+        >>> def _Emissivity(field, data):
+        >>>     return (data['Density']*data['Density']*np.sqrt(data['Temperature']))
+        >>> add_field("Emissivity", function=_Emissivity, units=r"\rm{g K}/\rm{cm}^{6}")
+        >>> for i, r in enumerate(rhos):
+        >>>     surf = pf.h.surface(sp,'Density',r)
+        >>>     surf.export_obj("my_galaxy", transparency=trans, 
+        >>>                      color_field='Temperature', emit_field = 'Emissivity', 
+        >>>                      dist_fac = distf, plot_index = i)
+
         """
         if transparency is None:
             transparency = 1.0
@@ -4432,27 +4465,55 @@
         elif color_field is not None:
             if color_field not in self.field_data:
                 self[color_field]
-        #only_on_root(self._export_obj, "%s %12.12e, %12.12e %s %s %r %d" 
-        #             %(filename, transparency, dist_fac, color_field, 
-        #               color_map, color_log, plot_index))
-        if MPI.COMM_WORLD.rank == 0:  # this works, 2 seperate calls, in for loops
-            self._export_obj(filename, transparency, dist_fac, color_field, 
-                             color_map, color_log, plot_index)
-
-    def _color_samples_obj(self, cs, color_log, color_map, arr): # this now holds for obj files
+        if emit_field is not None:
+            if color_field not in self.field_data:
+                self[emit_field]
+        only_on_root(self._export_obj, filename, transparency, dist_fac, color_field, emit_field, 
+                             color_map, color_log, emit_log, plot_index, color_field_max, 
+                             color_field_min, emit_field_max, emit_field_min)
+
+    def _color_samples_obj(self, cs, em, color_log, emit_log, color_map, arr, 
+                           color_field_max, color_field_min, 
+                           emit_field_max, emit_field_min): # this now holds for obj files
             if color_log: cs = np.log10(cs)
-            mi, ma = cs.min(), cs.max()
+            if emit_log: em = np.log10(em)
+            if color_field_min is None:
+                mi = cs.min()
+            else:
+                mi = color_field_min
+                if color_log: mi = np.log10(mi)
+            if color_field_max is None:
+                ma = cs.max()
+            else:
+                ma = color_field_max
+                if color_log: ma = np.log10(ma)
             cs = (cs - mi) / (ma - mi)
             # to get color indicies for OBJ formatting
             from yt.visualization._colormap_data import color_map_luts
             lut = color_map_luts[color_map]
             x = np.mgrid[0.0:1.0:lut[0].shape[0]*1j]
             arr["cind"][:] = (np.interp(cs,x,x)*(lut[0].shape[0]-1)).astype("uint8")
+            # now, get emission
+            if emit_field_min is None:
+                emi = em.min()
+            else:
+                emi = emit_field_min
+                if emit_log: emi = np.log10(emi)
+            if emit_field_max is None:
+                ema = em.max()
+            else:
+                ema = emit_field_max
+                if emit_log: ema = np.log10(ema)
+            em = (em - emi)/(ema - emi)
+            x = np.mgrid[0.0:255.0:2j] # assume 1 emissivity per color
+            arr["emit"][:] = (np.interp(em,x,x))*2.0 # for some reason, max emiss = 2
 
     @parallel_root_only
     def _export_obj(self, filename, transparency, dist_fac = None, 
-                    color_field = None, color_map = "algae", color_log = True, 
-                    plot_index = None):
+                    color_field = None, emit_field = None, color_map = "algae", 
+                    color_log = True, emit_log = True, plot_index = None, 
+                    color_field_max = None, color_field_min = None, 
+                    emit_field_max = None, emit_field_min = None):
         if plot_index is None:
             plot_index = 0
         if isinstance(filename, file):
@@ -4473,7 +4534,7 @@
                 cc = int(linesave[p[len(p)-1]:])+1
                 fobj = open(filename + '.obj', "a")
                 fmtl = open(filename + '.mtl', 'a')
-        ftype = [("cind", "uint8")]
+        ftype = [("cind", "uint8"), ("emit", "float")]
         vtype = [("x","float"),("y","float"), ("z","float")]
         if plot_index == 0:
             fobj.write("# yt OBJ file\n")
@@ -4481,8 +4542,6 @@
             fobj.write("mtllib " + filename + '.mtl\n\n')  # use this material file for the faces
             fmtl.write("# yt MLT file\n")
             fmtl.write("# www.yt-project.com\n\n")
-        from yt.visualization._colormap_data import color_map_luts # import colors for mtl file
-        lut = color_map_luts[color_map]
         #(0) formulate vertices
         nv = self.vertices.shape[1] # number of groups of vertices
         f = np.empty(nv/self.vertices.shape[0], dtype=ftype) # store sets of face colors
@@ -4491,7 +4550,17 @@
             cs = self[color_field]
         else:
             cs = np.empty(self.vertices.shape[1]/self.vertices.shape[0])
-        self._color_samples_obj(cs, color_log, color_map, f) # map color values to color scheme
+        if emit_field is not None:
+            em = self[emit_field]
+        else:
+            em = np.empty(self.vertices.shape[1]/self.vertices.shape[0])            
+        self._color_samples_obj(cs, em, color_log, emit_log, color_map, f, 
+                                color_field_max, color_field_min, 
+                                emit_field_max, emit_field_min) # map color values to color scheme
+        from yt.visualization._colormap_data import color_map_luts # import colors for mtl file
+        lut = color_map_luts[color_map] # enumerate colors
+        # interpolate emissivity to enumerated colors
+        emiss = np.interp(np.mgrid[0:lut[0].shape[0]],np.mgrid[0:len(cs)],f["emit"][:])
         if dist_fac is None: # then normalize by bounds
             DLE = self.pf.domain_left_edge
             DRE = self.pf.domain_right_edge
@@ -4517,6 +4586,7 @@
             fmtl.write("Kd %.6f %.6f %.6f\n" %(lut[0][i], lut[1][i], lut[2][i])) # color of face
             fmtl.write("Ks %.6f %.6f %.6f\n" %(0.0, 0.0, 0.0)) # specular color, keep off
             fmtl.write("d %.6f\n" %(transparency))  # transparency
+            fmtl.write("em %.6f\n" %(emiss[i])) # emissivity per color
             fmtl.write("illum 2\n") # not relevant, 2 means highlights on?
             fmtl.write("Ns %.6f\n\n" %(0.0)) #keep off, some other specular thing
         #(2) write vertices


https://bitbucket.org/yt_analysis/yt-3.0/commits/3790c2e60603/
Changeset:   3790c2e60603
Branch:      yt
User:        MatthewTurk
Date:        2013-03-22 11:33:58
Summary:     Removing empty file
Affected #:  1 file

diff -r 1366d3d7b0e6660d7cf917a36b26a70911f3a8cf -r 3790c2e60603745298e69ee46b4483f1152395dd yt/visualization/volume_rendering/jnaiman.py
--- a/yt/visualization/volume_rendering/jnaiman.py
+++ /dev/null
@@ -1,1 +0,0 @@
-  


https://bitbucket.org/yt_analysis/yt-3.0/commits/585d7faa337b/
Changeset:   585d7faa337b
Branch:      yt
User:        MatthewTurk
Date:        2013-03-28 19:33:22
Summary:     Changing transparency to be a float.  De-indenting a function.
Affected #:  1 file

diff -r 3790c2e60603745298e69ee46b4483f1152395dd -r 585d7faa337bc7139b4d24ad5fd7abc7dcb813f1 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -4376,7 +4376,7 @@
                 vv[:,i,j] = self.vertices[j,i::3]
         return vv
 
-    def export_obj(self, filename, transparency = None, dist_fac = None,
+    def export_obj(self, filename, transparency = 1.0, dist_fac = None,
                    color_field = None, emit_field = None, color_map = "algae", 
                    color_log = True, emit_log = True, plot_index = None, 
                    color_field_max = None, color_field_min = None, 
@@ -4395,10 +4395,9 @@
             The file this will be exported to.  This cannot be a file-like object.
             Note - there are no file extentions included - both obj & mtl files 
             are created.
-        transparency : list floats
-            This gives the transparency of the output surface plot.  If multiple 
-            surface plots, this will be a list.  Values from 0.0 (invisible) to 
-            1.0 (opaque).
+        transparency : float
+            This gives the transparency of the output surface plot.  Values
+            from 0.0 (invisible) to 1.0 (opaque).
         dist_fac : float
             Divide the axes distances by this amount.
         color_field : string
@@ -4458,8 +4457,6 @@
         >>>                      dist_fac = distf, plot_index = i)
 
         """
-        if transparency is None:
-            transparency = 1.0
         if self.vertices is None:
             self.get_data(color_field,"face")
         elif color_field is not None:
@@ -4475,38 +4472,38 @@
     def _color_samples_obj(self, cs, em, color_log, emit_log, color_map, arr, 
                            color_field_max, color_field_min, 
                            emit_field_max, emit_field_min): # this now holds for obj files
-            if color_log: cs = np.log10(cs)
-            if emit_log: em = np.log10(em)
-            if color_field_min is None:
-                mi = cs.min()
-            else:
-                mi = color_field_min
-                if color_log: mi = np.log10(mi)
-            if color_field_max is None:
-                ma = cs.max()
-            else:
-                ma = color_field_max
-                if color_log: ma = np.log10(ma)
-            cs = (cs - mi) / (ma - mi)
-            # to get color indicies for OBJ formatting
-            from yt.visualization._colormap_data import color_map_luts
-            lut = color_map_luts[color_map]
-            x = np.mgrid[0.0:1.0:lut[0].shape[0]*1j]
-            arr["cind"][:] = (np.interp(cs,x,x)*(lut[0].shape[0]-1)).astype("uint8")
-            # now, get emission
-            if emit_field_min is None:
-                emi = em.min()
-            else:
-                emi = emit_field_min
-                if emit_log: emi = np.log10(emi)
-            if emit_field_max is None:
-                ema = em.max()
-            else:
-                ema = emit_field_max
-                if emit_log: ema = np.log10(ema)
-            em = (em - emi)/(ema - emi)
-            x = np.mgrid[0.0:255.0:2j] # assume 1 emissivity per color
-            arr["emit"][:] = (np.interp(em,x,x))*2.0 # for some reason, max emiss = 2
+        if color_log: cs = np.log10(cs)
+        if emit_log: em = np.log10(em)
+        if color_field_min is None:
+            mi = cs.min()
+        else:
+            mi = color_field_min
+            if color_log: mi = np.log10(mi)
+        if color_field_max is None:
+            ma = cs.max()
+        else:
+            ma = color_field_max
+            if color_log: ma = np.log10(ma)
+        cs = (cs - mi) / (ma - mi)
+        # to get color indicies for OBJ formatting
+        from yt.visualization._colormap_data import color_map_luts
+        lut = color_map_luts[color_map]
+        x = np.mgrid[0.0:1.0:lut[0].shape[0]*1j]
+        arr["cind"][:] = (np.interp(cs,x,x)*(lut[0].shape[0]-1)).astype("uint8")
+        # now, get emission
+        if emit_field_min is None:
+            emi = em.min()
+        else:
+            emi = emit_field_min
+            if emit_log: emi = np.log10(emi)
+        if emit_field_max is None:
+            ema = em.max()
+        else:
+            ema = emit_field_max
+            if emit_log: ema = np.log10(ema)
+        em = (em - emi)/(ema - emi)
+        x = np.mgrid[0.0:255.0:2j] # assume 1 emissivity per color
+        arr["emit"][:] = (np.interp(em,x,x))*2.0 # for some reason, max emiss = 2
 
     @parallel_root_only
     def _export_obj(self, filename, transparency, dist_fac = None, 


https://bitbucket.org/yt_analysis/yt-3.0/commits/7b3c323b3346/
Changeset:   7b3c323b3346
Branch:      yt
User:        MatthewTurk
Date:        2013-03-28 21:56:55
Summary:     Updating docstrings for export_obj.
Affected #:  1 file

diff -r 585d7faa337bc7139b4d24ad5fd7abc7dcb813f1 -r 7b3c323b3346f471e58df7ad23d31791d1abe298 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -4426,7 +4426,7 @@
         --------
 
         >>> sp = pf.h.sphere("max", (10, "kpc"))
-        >>> trans = [1.0]
+        >>> trans = 1.0
         >>> distf = 3.1e18*1e3 # distances into kpc
         >>> surf = pf.h.surface(sp, "Density", 5e-27)
         >>> surf.export_obj("my_galaxy", transparency=trans, dist_fac = distf)
@@ -4437,24 +4437,24 @@
         >>> trans = [0.5, 1.0]
         >>> distf = 3.1e18*1e3 # distances into kpc
         >>> for i, r in enumerate(rhos):
-        >>>     surf = pf.h.surface(sp,'Density',r)
-        >>>     surf.export_obj("my_galaxy", transparency=trans, 
-        >>>                      color_field='Temperature', dist_fac = distf, 
-        >>>                      plot_index = i, color_field_max = ma, 
-        >>>                      color_field_min = mi)
+        ...     surf = pf.h.surface(sp,'Density',r)
+        ...     surf.export_obj("my_galaxy", transparency=trans[i], 
+        ...                      color_field='Temperature', dist_fac = distf, 
+        ...                      plot_index = i, color_field_max = ma, 
+        ...                      color_field_min = mi)
 
         >>> sp = pf.h.sphere("max", (10, "kpc"))
         >>> rhos = [1e-24, 1e-25]
         >>> trans = [0.5, 1.0]
         >>> distf = 3.1e18*1e3 # distances into kpc
         >>> def _Emissivity(field, data):
-        >>>     return (data['Density']*data['Density']*np.sqrt(data['Temperature']))
+        ...     return (data['Density']*data['Density']*np.sqrt(data['Temperature']))
         >>> add_field("Emissivity", function=_Emissivity, units=r"\rm{g K}/\rm{cm}^{6}")
         >>> for i, r in enumerate(rhos):
-        >>>     surf = pf.h.surface(sp,'Density',r)
-        >>>     surf.export_obj("my_galaxy", transparency=trans, 
-        >>>                      color_field='Temperature', emit_field = 'Emissivity', 
-        >>>                      dist_fac = distf, plot_index = i)
+        ...     surf = pf.h.surface(sp,'Density',r)
+        ...     surf.export_obj("my_galaxy", transparency=trans[i], 
+        ...                      color_field='Temperature', emit_field = 'Emissivity', 
+        ...                      dist_fac = distf, plot_index = i)
 
         """
         if self.vertices is None:


https://bitbucket.org/yt_analysis/yt-3.0/commits/ae7a263311d3/
Changeset:   ae7a263311d3
Branch:      yt
User:        MatthewTurk
Date:        2013-03-29 11:46:36
Summary:     Merged in MatthewTurk/yt (pull request #466)

Porting OBJ exporter
Affected #:  1 file

diff -r 5dcd737e56c43faf694bddd7830df42263341b5d -r ae7a263311d3d0cfd7809e7841501dc724384106 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -36,6 +36,8 @@
 import itertools
 import shelve
 import cStringIO
+import fileinput
+from re import finditer
 
 from yt.funcs import *
 from yt.config import ytcfg
@@ -4374,6 +4376,230 @@
                 vv[:,i,j] = self.vertices[j,i::3]
         return vv
 
+    def export_obj(self, filename, transparency = 1.0, dist_fac = None,
+                   color_field = None, emit_field = None, color_map = "algae", 
+                   color_log = True, emit_log = True, plot_index = None, 
+                   color_field_max = None, color_field_min = None, 
+                   emit_field_max = None, emit_field_min = None):
+        r"""This exports the surface to the OBJ format, suitable for visualization
+        in many different programs (e.g., Blender).  NOTE: this exports an .obj file 
+        and an .mtl file, both with the general 'filename' as a prefix.  
+        The .obj file points to the .mtl file in its header, so if you move the 2 
+        files, make sure you change the .obj header to account for this. ALSO NOTE: 
+        the emit_field needs to be a combination of the other 2 fields used to 
+        have the emissivity track with the color.
+
+        Parameters
+        ----------
+        filename : string
+            The file this will be exported to.  This cannot be a file-like object.
+            Note - there are no file extentions included - both obj & mtl files 
+            are created.
+        transparency : float
+            This gives the transparency of the output surface plot.  Values
+            from 0.0 (invisible) to 1.0 (opaque).
+        dist_fac : float
+            Divide the axes distances by this amount.
+        color_field : string
+            Should a field be sample and colormapped?
+        emit_field : string
+            Should we track the emissivity of a field?
+              NOTE: this should be a combination of the other 2 fields being used.
+        color_map : string
+            Which color map should be applied?
+        color_log : bool
+            Should the color field be logged before being mapped?
+        emit_log : bool
+            Should the emitting field be logged before being mapped?
+        plot_index : integer
+            Index of plot for multiple plots.  If none, then only 1 plot.
+        color_field_max : float
+            Maximum value of the color field across all surfaces.
+        color_field_min : float
+            Minimum value of the color field across all surfaces.
+        emit_field_max : float
+            Maximum value of the emitting field across all surfaces.
+        emit_field_min : float
+            Minimum value of the emitting field across all surfaces.
+
+        Examples
+        --------
+
+        >>> sp = pf.h.sphere("max", (10, "kpc"))
+        >>> trans = 1.0
+        >>> distf = 3.1e18*1e3 # distances into kpc
+        >>> surf = pf.h.surface(sp, "Density", 5e-27)
+        >>> surf.export_obj("my_galaxy", transparency=trans, dist_fac = distf)
+
+        >>> sp = pf.h.sphere("max", (10, "kpc"))
+        >>> mi, ma = sp.quantities['Extrema']('Temperature')[0]
+        >>> rhos = [1e-24, 1e-25]
+        >>> trans = [0.5, 1.0]
+        >>> distf = 3.1e18*1e3 # distances into kpc
+        >>> for i, r in enumerate(rhos):
+        ...     surf = pf.h.surface(sp,'Density',r)
+        ...     surf.export_obj("my_galaxy", transparency=trans[i], 
+        ...                      color_field='Temperature', dist_fac = distf, 
+        ...                      plot_index = i, color_field_max = ma, 
+        ...                      color_field_min = mi)
+
+        >>> sp = pf.h.sphere("max", (10, "kpc"))
+        >>> rhos = [1e-24, 1e-25]
+        >>> trans = [0.5, 1.0]
+        >>> distf = 3.1e18*1e3 # distances into kpc
+        >>> def _Emissivity(field, data):
+        ...     return (data['Density']*data['Density']*np.sqrt(data['Temperature']))
+        >>> add_field("Emissivity", function=_Emissivity, units=r"\rm{g K}/\rm{cm}^{6}")
+        >>> for i, r in enumerate(rhos):
+        ...     surf = pf.h.surface(sp,'Density',r)
+        ...     surf.export_obj("my_galaxy", transparency=trans[i], 
+        ...                      color_field='Temperature', emit_field = 'Emissivity', 
+        ...                      dist_fac = distf, plot_index = i)
+
+        """
+        if self.vertices is None:
+            self.get_data(color_field,"face")
+        elif color_field is not None:
+            if color_field not in self.field_data:
+                self[color_field]
+        if emit_field is not None:
+            if color_field not in self.field_data:
+                self[emit_field]
+        only_on_root(self._export_obj, filename, transparency, dist_fac, color_field, emit_field, 
+                             color_map, color_log, emit_log, plot_index, color_field_max, 
+                             color_field_min, emit_field_max, emit_field_min)
+
+    def _color_samples_obj(self, cs, em, color_log, emit_log, color_map, arr, 
+                           color_field_max, color_field_min, 
+                           emit_field_max, emit_field_min): # this now holds for obj files
+        if color_log: cs = np.log10(cs)
+        if emit_log: em = np.log10(em)
+        if color_field_min is None:
+            mi = cs.min()
+        else:
+            mi = color_field_min
+            if color_log: mi = np.log10(mi)
+        if color_field_max is None:
+            ma = cs.max()
+        else:
+            ma = color_field_max
+            if color_log: ma = np.log10(ma)
+        cs = (cs - mi) / (ma - mi)
+        # to get color indicies for OBJ formatting
+        from yt.visualization._colormap_data import color_map_luts
+        lut = color_map_luts[color_map]
+        x = np.mgrid[0.0:1.0:lut[0].shape[0]*1j]
+        arr["cind"][:] = (np.interp(cs,x,x)*(lut[0].shape[0]-1)).astype("uint8")
+        # now, get emission
+        if emit_field_min is None:
+            emi = em.min()
+        else:
+            emi = emit_field_min
+            if emit_log: emi = np.log10(emi)
+        if emit_field_max is None:
+            ema = em.max()
+        else:
+            ema = emit_field_max
+            if emit_log: ema = np.log10(ema)
+        em = (em - emi)/(ema - emi)
+        x = np.mgrid[0.0:255.0:2j] # assume 1 emissivity per color
+        arr["emit"][:] = (np.interp(em,x,x))*2.0 # for some reason, max emiss = 2
+
+    @parallel_root_only
+    def _export_obj(self, filename, transparency, dist_fac = None, 
+                    color_field = None, emit_field = None, color_map = "algae", 
+                    color_log = True, emit_log = True, plot_index = None, 
+                    color_field_max = None, color_field_min = None, 
+                    emit_field_max = None, emit_field_min = None):
+        if plot_index is None:
+            plot_index = 0
+        if isinstance(filename, file):
+            fobj = filename + '.obj'
+            fmtl = filename + '.mtl'
+        else:
+            if plot_index == 0:
+                fobj = open(filename + '.obj', "w")
+                fmtl = open(filename + '.mtl', 'w')
+                cc = 1
+            else:
+                # read in last vertex
+                linesave = ''
+                for line in fileinput.input(filename + '.obj'):
+                    if line[0] == 'f':
+                        linesave = line
+                p = [m.start() for m in finditer(' ', linesave)]
+                cc = int(linesave[p[len(p)-1]:])+1
+                fobj = open(filename + '.obj', "a")
+                fmtl = open(filename + '.mtl', 'a')
+        ftype = [("cind", "uint8"), ("emit", "float")]
+        vtype = [("x","float"),("y","float"), ("z","float")]
+        if plot_index == 0:
+            fobj.write("# yt OBJ file\n")
+            fobj.write("# www.yt-project.com\n")
+            fobj.write("mtllib " + filename + '.mtl\n\n')  # use this material file for the faces
+            fmtl.write("# yt MLT file\n")
+            fmtl.write("# www.yt-project.com\n\n")
+        #(0) formulate vertices
+        nv = self.vertices.shape[1] # number of groups of vertices
+        f = np.empty(nv/self.vertices.shape[0], dtype=ftype) # store sets of face colors
+        v = np.empty(nv, dtype=vtype) # stores vertices
+        if color_field is not None:
+            cs = self[color_field]
+        else:
+            cs = np.empty(self.vertices.shape[1]/self.vertices.shape[0])
+        if emit_field is not None:
+            em = self[emit_field]
+        else:
+            em = np.empty(self.vertices.shape[1]/self.vertices.shape[0])            
+        self._color_samples_obj(cs, em, color_log, emit_log, color_map, f, 
+                                color_field_max, color_field_min, 
+                                emit_field_max, emit_field_min) # map color values to color scheme
+        from yt.visualization._colormap_data import color_map_luts # import colors for mtl file
+        lut = color_map_luts[color_map] # enumerate colors
+        # interpolate emissivity to enumerated colors
+        emiss = np.interp(np.mgrid[0:lut[0].shape[0]],np.mgrid[0:len(cs)],f["emit"][:])
+        if dist_fac is None: # then normalize by bounds
+            DLE = self.pf.domain_left_edge
+            DRE = self.pf.domain_right_edge
+            bounds = [(DLE[i], DRE[i]) for i in range(3)]
+            for i, ax in enumerate("xyz"):
+                # Do the bounds first since we cast to f32
+                tmp = self.vertices[i,:]
+                np.subtract(tmp, bounds[i][0], tmp)
+                w = bounds[i][1] - bounds[i][0]
+                np.divide(tmp, w, tmp)
+                np.subtract(tmp, 0.5, tmp) # Center at origin.
+                v[ax][:] = tmp   
+        else:
+            for i, ax in enumerate("xyz"):
+                tmp = self.vertices[i,:]
+                np.divide(tmp, dist_fac, tmp)
+                v[ax][:] = tmp
+        #(1) write all colors per surface to mtl file
+        for i in range(0,lut[0].shape[0]): 
+            omname = "material_" + str(i) + '_' + str(plot_index)  # name of the material
+            fmtl.write("newmtl " + omname +'\n') # the specific material (color) for this face
+            fmtl.write("Ka %.6f %.6f %.6f\n" %(0.0, 0.0, 0.0)) # ambient color, keep off
+            fmtl.write("Kd %.6f %.6f %.6f\n" %(lut[0][i], lut[1][i], lut[2][i])) # color of face
+            fmtl.write("Ks %.6f %.6f %.6f\n" %(0.0, 0.0, 0.0)) # specular color, keep off
+            fmtl.write("d %.6f\n" %(transparency))  # transparency
+            fmtl.write("em %.6f\n" %(emiss[i])) # emissivity per color
+            fmtl.write("illum 2\n") # not relevant, 2 means highlights on?
+            fmtl.write("Ns %.6f\n\n" %(0.0)) #keep off, some other specular thing
+        #(2) write vertices
+        for i in range(0,self.vertices.shape[1]):
+            fobj.write("v %.6f %.6f %.6f\n" %(v["x"][i], v["y"][i], v["z"][i]))    
+        fobj.write("#done defining vertices\n\n")
+        #(3) define faces and materials for each face
+        for i in range(0,self.triangles.shape[0]):
+            omname = 'material_' + str(f["cind"][i]) + '_' + str(plot_index) # which color to use
+            fobj.write("usemtl " + omname + '\n') # which material to use for this face (color)
+            fobj.write("f " + str(cc) + ' ' + str(cc+1) + ' ' + str(cc+2) + '\n\n') # vertices to color
+            cc = cc+3
+        fmtl.close()
+        fobj.close()
+
+
     def export_ply(self, filename, bounds = None, color_field = None,
                    color_map = "algae", color_log = True, sample_type = "face"):
         r"""This exports the surface to the PLY format, suitable for visualization


https://bitbucket.org/yt_analysis/yt-3.0/commits/4c9a9cd7eac8/
Changeset:   4c9a9cd7eac8
Branch:      yt
User:        drudd
Date:        2013-04-02 04:59:02
Summary:     Added detection of UChicago RCC Midway to install script.
Affected #:  1 file

diff -r ae7a263311d3d0cfd7809e7841501dc724384106 -r 4c9a9cd7eac8541bc10a747fb90ad2fdf200761a doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -170,6 +170,19 @@
         echo "   $ module load gcc"
         echo
     fi
+    if [ "${MYHOST##midway}" != "${MYHOST}" ]
+    then
+        echo "Looks like you're on Midway."
+        echo
+        echo " ******************************************"
+        echo " * It may be better to use the yt module! *"
+        echo " *                                        *"
+        echo " *   $ module load yt                     *"
+        echo " *                                        *"
+        echo " ******************************************"
+        echo
+        return
+    fi
     if [ "${MYOS##Darwin}" != "${MYOS}" ]
     then
         echo "Looks like you're running on Mac OSX."


https://bitbucket.org/yt_analysis/yt-3.0/commits/27e5098852ae/
Changeset:   27e5098852ae
Branch:      yt
User:        samskillman
Date:        2013-04-02 17:08:41
Summary:     Swapping loop order in plotwindow tests. Speeds up nosetests for test_plotwindow
from 28 seconds to 12 seconds.
Affected #:  1 file

diff -r 4c9a9cd7eac8541bc10a747fb90ad2fdf200761a -r 27e5098852ae6bd3f6e4e4431a6e902ab7138b00 yt/visualization/tests/test_plotwindow.py
--- a/yt/visualization/tests/test_plotwindow.py
+++ b/yt/visualization/tests/test_plotwindow.py
@@ -116,18 +116,27 @@
     test_pf = fake_random_pf(64)
     test_flnms = [None, 'test.png', 'test.eps',
                   'test.ps', 'test.pdf']
-    for fname in test_flnms:
-        for dim in [0, 1, 2]:
-            obj = SlicePlot(test_pf, dim, 'Density')
+
+    for dim in [0, 1, 2]:
+        obj = SlicePlot(test_pf, dim, 'Density')
+        obj.set_window_size(1)
+        for fname in test_flnms:
             yield assert_equal, assert_fname(obj.save(fname)[0]), True
 
-            obj = ProjectionPlot(test_pf, dim, 'Density')
+    for dim in [0, 1, 2]:
+        obj = ProjectionPlot(test_pf, dim, 'Density')
+        obj.set_window_size(1)
+        for fname in test_flnms:
             yield assert_equal, assert_fname(obj.save(fname)[0]), True
 
-        obj = OffAxisSlicePlot(test_pf, normal, 'Density')
+    obj = OffAxisSlicePlot(test_pf, normal, 'Density')
+    obj.set_window_size(1)
+    for fname in test_flnms:
         yield assert_equal, assert_fname(obj.save(fname)[0]), True
 
-        obj = OffAxisProjectionPlot(test_pf, normal, 'Density')
+    obj = OffAxisProjectionPlot(test_pf, normal, 'Density')
+    obj.set_window_size(1)
+    for fname in test_flnms:
         yield assert_equal, assert_fname(obj.save(fname)[0]), True
 
     os.chdir(curdir)


https://bitbucket.org/yt_analysis/yt-3.0/commits/85145a05a5be/
Changeset:   85145a05a5be
Branch:      yt
User:        samskillman
Date:        2013-04-02 21:00:16
Summary:     Taking out the window size adjustments.
Affected #:  1 file

diff -r 27e5098852ae6bd3f6e4e4431a6e902ab7138b00 -r 85145a05a5bed50af23759d545e4f2aa46a69fdb yt/visualization/tests/test_plotwindow.py
--- a/yt/visualization/tests/test_plotwindow.py
+++ b/yt/visualization/tests/test_plotwindow.py
@@ -119,23 +119,19 @@
 
     for dim in [0, 1, 2]:
         obj = SlicePlot(test_pf, dim, 'Density')
-        obj.set_window_size(1)
         for fname in test_flnms:
             yield assert_equal, assert_fname(obj.save(fname)[0]), True
 
     for dim in [0, 1, 2]:
         obj = ProjectionPlot(test_pf, dim, 'Density')
-        obj.set_window_size(1)
         for fname in test_flnms:
             yield assert_equal, assert_fname(obj.save(fname)[0]), True
 
     obj = OffAxisSlicePlot(test_pf, normal, 'Density')
-    obj.set_window_size(1)
     for fname in test_flnms:
         yield assert_equal, assert_fname(obj.save(fname)[0]), True
 
     obj = OffAxisProjectionPlot(test_pf, normal, 'Density')
-    obj.set_window_size(1)
     for fname in test_flnms:
         yield assert_equal, assert_fname(obj.save(fname)[0]), True
 


https://bitbucket.org/yt_analysis/yt-3.0/commits/f6be5073ad1b/
Changeset:   f6be5073ad1b
Branch:      yt
User:        ngoldbaum
Date:        2013-03-28 01:34:26
Summary:     First draft of plot window answer test framework.
Affected #:  1 file

diff -r 5dcd737e56c43faf694bddd7830df42263341b5d -r f6be5073ad1befc78ba6c0985abf4ad753515780 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -36,6 +36,8 @@
 from yt.config import ytcfg
 from yt.mods import *
 from yt.data_objects.static_output import StaticOutput
+from matplotlib.testing.compare import compare_images
+import yt.visualization.plot_window as pw
 import cPickle
 import shelve
 
@@ -303,6 +305,16 @@
         obj = cls(*obj_type[1])
         return obj
 
+    def create_plot(self, pf, plot_type, plot_args, plot_kwargs = None):
+        # plot_type should be a string
+        # plot_args should be a tuple
+        # plot_kwargs should be a dict
+        if obj_type is None:
+            raise RuntimeError('Must explicitly request a plot type')
+        cls = getattr(pw, plot_type)
+        plot = cls(*plot_args, **plot_kwargs)
+        return plot
+
     @property
     def sim_center(self):
         """
@@ -547,6 +559,31 @@
         for newc, oldc in zip(new_result["children"], old_result["children"]):
             assert(newp == oldp)
 
+class PlotWindowAttributeTest(AnswerTestingTest):
+    _type_name = "PlotWindowAttribute"
+    _attrs = ('plot_args', 'attr_name', 'attr_args')
+    def __init__(self, pf_fn, plot_args, attr_name, attr_args, tolerance=1e-3)
+        super(PlotWindowAttributeTest, self).__init__(pf_fn)
+        self.plot_args = plot_args
+        self.plot_kwargs = {} # hard-coding for now.
+        self.attr_name = attr_name
+        self.attr_args = attr_args
+        self.tolerance = tolerance
+
+    def run(self):
+        plot = self.create_plot(self.pf, self.plot_type,
+                                self.plot_args, self.plot_kwargs)
+        attr = getattr(plot, self.attr_name)
+        attr(*self.attr_args)
+        return plot
+
+    def compare(self, new_result, old_result):
+        fns = []
+        fns.append(old_result.save('old'))
+        fns.append(new_result.save('new'))
+        compare_images(fns[0], fns[1], self.tolerance)
+        #for fn in fns: os.remove(fn)
+
 def requires_pf(pf_fn, big_data = False):
     def ffalse(func):
         return lambda: None


https://bitbucket.org/yt_analysis/yt-3.0/commits/92bcf02fe2e1/
Changeset:   92bcf02fe2e1
Branch:      yt
User:        ngoldbaum
Date:        2013-03-28 01:35:11
Summary:     Preventing the command line plotting tools from plotting along dummy dimensions.
Affected #:  1 file

diff -r f6be5073ad1befc78ba6c0985abf4ad753515780 -r 92bcf02fe2e1445c894277248f16b3058ce628cc yt/utilities/command_line.py
--- a/yt/utilities/command_line.py
+++ b/yt/utilities/command_line.py
@@ -1292,7 +1292,9 @@
         elif args.center is None:
             center = 0.5*(pf.domain_left_edge + pf.domain_right_edge)
         center = np.array(center)
-        if args.axis == 4:
+        if pf.dimensionality < 3:
+            axes = [2]
+        elif args.axis == 4:
             axes = range(3)
         else:
             axes = [args.axis]


https://bitbucket.org/yt_analysis/yt-3.0/commits/b73721c13839/
Changeset:   b73721c13839
Branch:      yt
User:        ngoldbaum
Date:        2013-03-28 02:39:07
Summary:     Looking for decimals rather than a floating point tolerance.
Affected #:  1 file

diff -r 92bcf02fe2e1445c894277248f16b3058ce628cc -r b73721c1383941ee283d65851fc092ccfd1238d1 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -562,13 +562,13 @@
 class PlotWindowAttributeTest(AnswerTestingTest):
     _type_name = "PlotWindowAttribute"
     _attrs = ('plot_args', 'attr_name', 'attr_args')
-    def __init__(self, pf_fn, plot_args, attr_name, attr_args, tolerance=1e-3)
+    def __init__(self, pf_fn, plot_args, attr_name, attr_args, decimals=3):
         super(PlotWindowAttributeTest, self).__init__(pf_fn)
         self.plot_args = plot_args
         self.plot_kwargs = {} # hard-coding for now.
         self.attr_name = attr_name
         self.attr_args = attr_args
-        self.tolerance = tolerance
+        self.decimals = decimals
 
     def run(self):
         plot = self.create_plot(self.pf, self.plot_type,
@@ -581,7 +581,7 @@
         fns = []
         fns.append(old_result.save('old'))
         fns.append(new_result.save('new'))
-        compare_images(fns[0], fns[1], self.tolerance)
+        compare_images(fns[0], fns[1], 10**(-self.decimals))
         #for fn in fns: os.remove(fn)
 
 def requires_pf(pf_fn, big_data = False):


https://bitbucket.org/yt_analysis/yt-3.0/commits/8492c5d017b2/
Changeset:   8492c5d017b2
Branch:      yt
User:        ngoldbaum
Date:        2013-03-28 03:21:15
Summary:     Adding empty __init__.py files for directories with units tests.
This makes it easier to test only one set of unit tests.
Affected #:  5 files



https://bitbucket.org/yt_analysis/yt-3.0/commits/ea28411d2bec/
Changeset:   ea28411d2bec
Branch:      yt
User:        ngoldbaum
Date:        2013-03-28 05:20:42
Summary:     Moving some attributes from the base plot window class to PWViewerExtJS4
Affected #:  1 file

diff -r 8492c5d017b2aef59c45457cc322f2249b35b37a -r ea28411d2becca2266680e97cf5e432d2fbed5aa yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -515,18 +515,6 @@
     def set_antialias(self,aa):
         self.antialias = aa
 
-    @invalidate_plot
-    def set_contour_info(self, field_name, n_cont = 8, colors = None,
-                         logit = True):
-        if field_name == "None" or n_cont == 0:
-            self._contour_info = None
-            return
-        self._contour_info = (field_name, n_cont, colors, logit)
-
-    @invalidate_plot
-    def set_vector_info(self, skip, scale = 1):
-        self._vector_info = (skip, scale)
-
     @invalidate_data
     def set_buff_size(self, size):
         """Sets a new buffer size for the fixed resolution buffer
@@ -736,59 +724,6 @@
                     raise YTUnitNotRecognized(un)
         self._axes_unit_names = unit_name
 
-    def get_metadata(self, field, strip_mathml = True, return_string = True):
-        fval = self._frb[field]
-        mi = fval.min()
-        ma = fval.max()
-        x_width = self.xlim[1] - self.xlim[0]
-        y_width = self.ylim[1] - self.ylim[0]
-        if self._axes_unit_names is None:
-            unit = get_smallest_appropriate_unit(x_width, self.pf)
-            unit = (unit, unit)
-        else:
-            unit = self._axes_unit_names
-        units = self.get_field_units(field, strip_mathml)
-        center = getattr(self._frb.data_source, "center", None)
-        if center is None or self._frb.axis == 4:
-            xc, yc, zc = -999, -999, -999
-        else:
-            center[x_dict[self._frb.axis]] = 0.5 * (
-                self.xlim[0] + self.xlim[1])
-            center[y_dict[self._frb.axis]] = 0.5 * (
-                self.ylim[0] + self.ylim[1])
-            xc, yc, zc = center
-        if return_string:
-            md = _metadata_template % dict(
-                pf = self.pf,
-                x_width = x_width*self.pf[unit[0]],
-                y_width = y_width*self.pf[unit[1]],
-                axes_unit_names = unit[0], colorbar_unit = units, 
-                mi = mi, ma = ma, xc = xc, yc = yc, zc = zc)
-        else:
-            md = dict(pf = self.pf,
-                      x_width = x_width*self.pf[unit[0]],
-                      y_width = y_width*self.pf[unit[1]],
-                      axes_unit_names = unit, colorbar_unit = units, 
-                      mi = mi, ma = ma, xc = xc, yc = yc, zc = zc)
-        return md
-
-    def get_field_units(self, field, strip_mathml = True):
-        ds = self._frb.data_source
-        pf = self.pf
-        if ds._type_name in ("slice", "cutting"):
-            units = pf.field_info[field].get_units()
-        elif ds._type_name == "proj" and (ds.weight_field is not None or 
-                                        ds.proj_style == "mip"):
-            units = pf.field_info[field].get_units()
-        elif ds._type_name == "proj":
-            units = pf.field_info[field].get_projected_units()
-        else:
-            units = ""
-        if strip_mathml:
-            units = units.replace(r"\rm{", "").replace("}","")
-        return units
-
-
 class PWViewerMPL(PWViewer):
     """Viewer using matplotlib as a backend via the WindowPlotMPL. 
 
@@ -1655,6 +1590,70 @@
         print img_x, img_y, dx, dy, new_x, new_y
         self.set_center((new_x, new_y))
 
+    def get_field_units(self, field, strip_mathml = True):
+        ds = self._frb.data_source
+        pf = self.pf
+        if ds._type_name in ("slice", "cutting"):
+            units = pf.field_info[field].get_units()
+        elif ds._type_name == "proj" and (ds.weight_field is not None or 
+                                        ds.proj_style == "mip"):
+            units = pf.field_info[field].get_units()
+        elif ds._type_name == "proj":
+            units = pf.field_info[field].get_projected_units()
+        else:
+            units = ""
+        if strip_mathml:
+            units = units.replace(r"\rm{", "").replace("}","")
+        return units
+
+    def get_metadata(self, field, strip_mathml = True, return_string = True):
+        fval = self._frb[field]
+        mi = fval.min()
+        ma = fval.max()
+        x_width = self.xlim[1] - self.xlim[0]
+        y_width = self.ylim[1] - self.ylim[0]
+        if self._axes_unit_names is None:
+            unit = get_smallest_appropriate_unit(x_width, self.pf)
+            unit = (unit, unit)
+        else:
+            unit = self._axes_unit_names
+        units = self.get_field_units(field, strip_mathml)
+        center = getattr(self._frb.data_source, "center", None)
+        if center is None or self._frb.axis == 4:
+            xc, yc, zc = -999, -999, -999
+        else:
+            center[x_dict[self._frb.axis]] = 0.5 * (
+                self.xlim[0] + self.xlim[1])
+            center[y_dict[self._frb.axis]] = 0.5 * (
+                self.ylim[0] + self.ylim[1])
+            xc, yc, zc = center
+        if return_string:
+            md = _metadata_template % dict(
+                pf = self.pf,
+                x_width = x_width*self.pf[unit[0]],
+                y_width = y_width*self.pf[unit[1]],
+                axes_unit_names = unit[0], colorbar_unit = units, 
+                mi = mi, ma = ma, xc = xc, yc = yc, zc = zc)
+        else:
+            md = dict(pf = self.pf,
+                      x_width = x_width*self.pf[unit[0]],
+                      y_width = y_width*self.pf[unit[1]],
+                      axes_unit_names = unit, colorbar_unit = units, 
+                      mi = mi, ma = ma, xc = xc, yc = yc, zc = zc)
+        return md
+
+    @invalidate_plot
+    def set_contour_info(self, field_name, n_cont = 8, colors = None,
+                         logit = True):
+        if field_name == "None" or n_cont == 0:
+            self._contour_info = None
+            return
+        self._contour_info = (field_name, n_cont, colors, logit)
+
+    @invalidate_plot
+    def set_vector_info(self, skip, scale = 1):
+        self._vector_info = (skip, scale)
+
     @invalidate_data
     def set_current_field(self, field):
         self._current_field = field


https://bitbucket.org/yt_analysis/yt-3.0/commits/b08d7640f4fb/
Changeset:   b08d7640f4fb
Branch:      yt
User:        ngoldbaum
Date:        2013-03-28 06:33:10
Summary:     Finishing up plot window answer tests.  Deleting some trailing whitespace.
Affected #:  2 files

diff -r ea28411d2becca2266680e97cf5e432d2fbed5aa -r b08d7640f4fb8ebfa4870ab7c9fe9d1f9bc15fe6 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -98,7 +98,7 @@
                 self.store_name = options.answer_name
             self.compare_name = None
         # if we're not storing, then we're comparing, and we want default
-        # comparison name to be the latest gold standard 
+        # comparison name to be the latest gold standard
         # either on network or local
         else:
             if options.answer_name is None:
@@ -119,18 +119,18 @@
             self.compare_name = None
         elif self.compare_name == "latest":
             self.compare_name = _latest
-            
-        # Local/Cloud storage 
+
+        # Local/Cloud storage
         if options.local_results:
             storage_class = AnswerTestLocalStorage
-            # Fix up filename for local storage 
+            # Fix up filename for local storage
             if self.compare_name is not None:
                 self.compare_name = "%s/%s/%s" % \
-                    (os.path.realpath(options.output_dir), self.compare_name, 
+                    (os.path.realpath(options.output_dir), self.compare_name,
                      self.compare_name)
             if self.store_name is not None:
                 name_dir_path = "%s/%s" % \
-                    (os.path.realpath(options.output_dir), 
+                    (os.path.realpath(options.output_dir),
                     self.store_name)
                 if not os.path.isdir(name_dir_path):
                     os.makedirs(name_dir_path)
@@ -149,7 +149,7 @@
 
     def finalize(self, result=None):
         if self.store_results is False: return
-        self.storage.dump(self.result_storage)        
+        self.storage.dump(self.result_storage)
 
 class AnswerTestStorage(object):
     def __init__(self, reference_name=None, answer_name=None):
@@ -157,9 +157,9 @@
         self.answer_name = answer_name
         self.cache = {}
     def dump(self, result_storage, result):
-        raise NotImplementedError 
+        raise NotImplementedError
     def get(self, pf_name, default=None):
-        raise NotImplementedError 
+        raise NotImplementedError
 
 class AnswerTestCloudStorage(AnswerTestStorage):
     def get(self, pf_name, default = None):
@@ -197,7 +197,7 @@
         bucket = c.get_bucket("yt-answer-tests")
         for pf_name in result_storage:
             rs = cPickle.dumps(result_storage[pf_name])
-            tk = bucket.get_key("%s_%s" % (self.answer_name, pf_name)) 
+            tk = bucket.get_key("%s_%s" % (self.answer_name, pf_name))
             if tk is not None: tk.delete()
             k = Key(bucket)
             k.key = "%s_%s" % (self.answer_name, pf_name)
@@ -279,7 +279,7 @@
         nv = self.run()
         if self.reference_storage.reference_name is not None:
             dd = self.reference_storage.get(self.storage_name)
-            if dd is None or self.description not in dd: 
+            if dd is None or self.description not in dd:
                 raise YTNoOldAnswer("%s : %s" % (self.storage_name , self.description))
             ov = dd[self.description]
             self.compare(nv, ov)
@@ -347,7 +347,7 @@
         args = [self._type_name, str(self.pf), oname]
         args += [str(getattr(self, an)) for an in self._attrs]
         return "_".join(args)
-        
+
 class FieldValuesTest(AnswerTestingTest):
     _type_name = "FieldValues"
     _attrs = ("field", )
@@ -369,7 +369,7 @@
     def compare(self, new_result, old_result):
         err_msg = "Field values for %s not equal." % self.field
         if self.decimals is None:
-            assert_equal(new_result, old_result, 
+            assert_equal(new_result, old_result,
                          err_msg=err_msg, verbose=True)
         else:
             assert_allclose(new_result, old_result, 10.**(-self.decimals),
@@ -393,12 +393,12 @@
     def compare(self, new_result, old_result):
         err_msg = "All field values for %s not equal." % self.field
         if self.decimals is None:
-            assert_equal(new_result, old_result, 
+            assert_equal(new_result, old_result,
                          err_msg=err_msg, verbose=True)
         else:
             assert_rel_equal(new_result, old_result, self.decimals,
                              err_msg=err_msg, verbose=True)
-            
+
 class ProjectionValuesTest(AnswerTestingTest):
     _type_name = "ProjectionValues"
     _attrs = ("field", "axis", "weight_field")
@@ -438,7 +438,7 @@
                 assert_equal(new_result[k], old_result[k],
                              err_msg=err_msg)
             else:
-                assert_allclose(new_result[k], old_result[k], 
+                assert_allclose(new_result[k], old_result[k],
                                  10.**-(self.decimals), err_msg=err_msg)
 
 class PixelizedProjectionValuesTest(AnswerTestingTest):
@@ -517,7 +517,7 @@
             assert_equal(new_result[i], old_result[i],
                          err_msg="Output times not equal.",
                          verbose=True)
-        
+
 class GridHierarchyTest(AnswerTestingTest):
     _type_name = "GridHierarchy"
     _attrs = ()
@@ -562,10 +562,10 @@
 class PlotWindowAttributeTest(AnswerTestingTest):
     _type_name = "PlotWindowAttribute"
     _attrs = ('plot_args', 'attr_name', 'attr_args')
-    def __init__(self, pf_fn, plot_args, attr_name, attr_args, decimals=3):
+    def __init__(self, pf_fn, plot_field, plot_axis, attr_name, attr_args,
+                 decimals):
         super(PlotWindowAttributeTest, self).__init__(pf_fn)
         self.plot_args = plot_args
-        self.plot_kwargs = {} # hard-coding for now.
         self.attr_name = attr_name
         self.attr_args = attr_args
         self.decimals = decimals
@@ -639,4 +639,3 @@
 
     def __call__(self):
         self.args[0](*self.args[1:])
-

diff -r ea28411d2becca2266680e97cf5e432d2fbed5aa -r b08d7640f4fb8ebfa4870ab7c9fe9d1f9bc15fe6 yt/visualization/tests/test_plotwindow.py
--- a/yt/visualization/tests/test_plotwindow.py
+++ b/yt/visualization/tests/test_plotwindow.py
@@ -27,7 +27,10 @@
 import shutil
 from yt.testing import \
     fake_random_pf, assert_equal, assert_rel_equal
-from yt.mods import \
+from yt.utilities.answer_testing.framework import \
+    requires_pf, \
+    data_dir_load
+from yt.visualization.api import \
     SlicePlot, ProjectionPlot, OffAxisSlicePlot, OffAxisProjectionPlot
 
 
@@ -65,24 +68,24 @@
 
 def test_setwidth():
     pf = fake_random_pf(64)
-    
+
     slc = SlicePlot(pf, 0, 'Density')
 
     yield assert_equal, [slc.xlim, slc.ylim, slc.width], \
         [(0.0, 1.0), (0.0, 1.0), (1.0, 1.0)]
-    
+
     slc.set_width((0.5,0.8))
 
     yield assert_rel_equal, [slc.xlim, slc.ylim, slc.width], \
         [(0.25, 0.75), (0.1, 0.9), (0.5, 0.8)], 15
 
     slc.set_width(15,'kpc')
-    
+
     yield assert_rel_equal, [slc.xlim, slc.ylim, slc.width], \
         [(-7.5/pf['kpc'], 7.5/pf['kpc']),
          (-7.5/pf['kpc'], 7.5/pf['kpc']),
          (15/pf['kpc'], 15/pf['kpc'])], 15
-    
+
     slc.set_width((15,'kpc'))
 
     yield assert_rel_equal, [slc.xlim, slc.ylim, slc.width], \
@@ -90,7 +93,7 @@
          (-7.5/pf['kpc'], 7.5/pf['kpc']),
          (15/pf['kpc'], 15/pf['kpc'])], 15
 
-    slc.set_width(((15,'kpc'),(10,'kpc'))) 
+    slc.set_width(((15,'kpc'),(10,'kpc')))
 
     yield assert_rel_equal, [slc.xlim, slc.ylim, slc.width], \
         [(-7.5/pf['kpc'], 7.5/pf['kpc']),
@@ -105,7 +108,7 @@
          (15/pf['kpc'], 10/pf['kpc'])], 15
 
 def test_save():
-    """Main test suite for PlotWindow."""
+    """Test plot window creation and saving to disk."""
     # Perform I/O in safe place instead of yt main dir
     tmpdir = tempfile.mkdtemp()
     curdir = os.getcwd()
@@ -133,3 +136,51 @@
     os.chdir(curdir)
     # clean up
     shutil.rmtree(tmpdir)
+
+attr_args ={ "pan"             : [( (0.1, 0.1), {} )],
+             "pan_rel"         : [( (0.1, 0.1), {} )],
+             "set_axes_unit"   : [( "kpc", {} ),
+                                  ( "Mpc", {} ),
+                                  ( ("kpc", "kpc"), {} ),
+                                  ( ("kpc", "Mpc"), {} )],
+             "set_buff_size"   : [( 1600, {} ),
+                                  ( (600, 800), {} )],
+             "set_center"      : [( (0.4, 0.3), {} ),
+                                  ( (12, 15), {'unit' : 'kpc'} )],
+             "set_cmap"        : [( ('Density', 'RdBu'), {} ),
+                                  ( ('Density', 'kamae'), {} )],
+             "set_font"        : [( ({'family':'sans-serif', 'style':'italic',
+                                      'weight':'bold', 'size':24}), {} )],
+             "set_log"         : [( ('Density', False), {} )],
+             "set_window_size" : [( (7.0), {} )],
+             "set_zlim" : [( ('Density', 1e-25, 1e-23), {} ),
+                           ( ('Density', 1e-25, None), {'dynamic_range' : 4} )],
+             "zoom" : [( (10), {} )] }
+
+m7 = "DD0010/moving7_0010"
+g30 = "IsolatedGalaxy/galaxy0030/galaxy0030"
+wt = "WindTunnel/windtunnel_4lev_hdf5_plt_cnt_0030"
+ at requires_pf(m7)
+ at requires_pf(g30)
+ at requires_pf(wt)
+def test_attributes():
+    """Test plot member functions that aren't callbacks"""
+    plot_field = 'Density'
+    decimals = 3
+
+    pf = data_dir_load(m7)
+    for ax in 'xyz':
+        for attr_name in arrr_args.keys():
+            yield PlotWindowAttributeTest(pf, plot_field, ax, attr_name,
+                                          attr_args[attr_name], decimals)
+    pf = data_dir_load(g30)
+    for ax in 'xyz':
+        for attr_name in arrr_args.keys():
+            yield PlotWindowAttributeTest(pf, plot_field, ax, attr_name,
+                                          attr_args[attr_name], decimals)
+
+    pf = data_dir_load(wt)
+    ax = 'z'
+    for attr_name in attr_args.keys():
+        yield PlotWindowAttributeTest(pf, plot_field, ax, attr_name,
+                                      attr_args[attr_name], decimals)


https://bitbucket.org/yt_analysis/yt-3.0/commits/bbabd1d3530e/
Changeset:   bbabd1d3530e
Branch:      yt
User:        ngoldbaum
Date:        2013-03-28 07:15:31
Summary:     Fixes to get this working.  This closes #505.
Affected #:  2 files

diff -r b08d7640f4fb8ebfa4870ab7c9fe9d1f9bc15fe6 -r bbabd1d3530e6a6df7ea64c23427e1700fa6c6aa yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -305,14 +305,14 @@
         obj = cls(*obj_type[1])
         return obj
 
-    def create_plot(self, pf, plot_type, plot_args, plot_kwargs = None):
+    def create_plot(self, pf, plot_type, plot_field, plot_axis, plot_kwargs = None):
         # plot_type should be a string
         # plot_args should be a tuple
         # plot_kwargs should be a dict
-        if obj_type is None:
+        if plot_type is None:
             raise RuntimeError('Must explicitly request a plot type')
         cls = getattr(pw, plot_type)
-        plot = cls(*plot_args, **plot_kwargs)
+        plot = cls(*(pf, plot_axis, plot_field), **plot_kwargs)
         return plot
 
     @property
@@ -561,20 +561,23 @@
 
 class PlotWindowAttributeTest(AnswerTestingTest):
     _type_name = "PlotWindowAttribute"
-    _attrs = ('plot_args', 'attr_name', 'attr_args')
+    _attrs = ('plot_type', 'plot_field', 'plot_axis', 'attr_name', 'attr_args')
     def __init__(self, pf_fn, plot_field, plot_axis, attr_name, attr_args,
-                 decimals):
+                 decimals, plot_type = 'SlicePlot'):
         super(PlotWindowAttributeTest, self).__init__(pf_fn)
-        self.plot_args = plot_args
+        self.plot_type = plot_type
+        self.plot_field = plot_field
+        self.plot_axis = plot_axis
+        self.plot_kwargs = {}
         self.attr_name = attr_name
         self.attr_args = attr_args
         self.decimals = decimals
 
     def run(self):
-        plot = self.create_plot(self.pf, self.plot_type,
-                                self.plot_args, self.plot_kwargs)
+        plot = self.create_plot(self.pf, self.plot_type, self.plot_field,
+                                self.plot_axis, self.plot_kwargs)
         attr = getattr(plot, self.attr_name)
-        attr(*self.attr_args)
+        attr(*self.attr_args[0], **self.attr_args[1])
         return plot
 
     def compare(self, new_result, old_result):

diff -r b08d7640f4fb8ebfa4870ab7c9fe9d1f9bc15fe6 -r bbabd1d3530e6a6df7ea64c23427e1700fa6c6aa yt/visualization/tests/test_plotwindow.py
--- a/yt/visualization/tests/test_plotwindow.py
+++ b/yt/visualization/tests/test_plotwindow.py
@@ -28,8 +28,7 @@
 from yt.testing import \
     fake_random_pf, assert_equal, assert_rel_equal
 from yt.utilities.answer_testing.framework import \
-    requires_pf, \
-    data_dir_load
+    requires_pf, data_dir_load, PlotWindowAttributeTest
 from yt.visualization.api import \
     SlicePlot, ProjectionPlot, OffAxisSlicePlot, OffAxisProjectionPlot
 
@@ -66,6 +65,47 @@
 
     return image_type == os.path.splitext(fname)[1]
 
+attr_args ={ "pan"             : [( ((0.1, 0.1),), {} )],
+             "pan_rel"         : [( ((0.1, 0.1),), {} )],
+             "set_axes_unit"   : [( ("kpc",), {} ),
+                                  ( ("Mpc",), {} ),
+                                  ( (("kpc", "kpc"),), {} ),
+                                  ( (("kpc", "Mpc"),), {} )],
+             "set_buff_size"   : [( (1600,), {} ),
+                                  ( ((600, 800),), {} )],
+             "set_center"      : [( ((0.4, 0.3),), {} )],
+             "set_cmap"        : [( ('Density', 'RdBu'), {} ),
+                                  ( ('Density', 'kamae'), {} )],
+             "set_font"        : [( ({'family':'sans-serif', 'style':'italic',
+                                      'weight':'bold', 'size':24},), {} )],
+             "set_log"         : [( ('Density', False), {} )],
+             "set_window_size" : [( (7.0,), {} )],
+             "set_zlim" : [( ('Density', 1e-25, 1e-23), {} ),
+                           ( ('Density', 1e-25, None), {'dynamic_range' : 4} )],
+             "zoom" : [( (10,), {} )] }
+
+m7 = "DD0010/moving7_0010"
+wt = "WindTunnel/windtunnel_4lev_hdf5_plt_cnt_0030"
+ at requires_pf(m7)
+ at requires_pf(wt)
+def test_attributes():
+    """Test plot member functions that aren't callbacks"""
+    plot_field = 'Density'
+    decimals = 3
+
+    pf = data_dir_load(m7)
+    for ax in 'xyz':
+        for attr_name in attr_args.keys():
+            for args in attr_args[attr_name]:
+                yield PlotWindowAttributeTest(pf, plot_field, ax, attr_name,
+                                              args, decimals)
+    pf = data_dir_load(wt)
+    ax = 'z'
+    for attr_name in attr_args.keys():
+        for args in attr_args[attr_name]:
+            yield PlotWindowAttributeTest(pf, plot_field, ax, attr_name,
+                                          args, decimals)
+
 def test_setwidth():
     pf = fake_random_pf(64)
 
@@ -136,51 +176,3 @@
     os.chdir(curdir)
     # clean up
     shutil.rmtree(tmpdir)
-
-attr_args ={ "pan"             : [( (0.1, 0.1), {} )],
-             "pan_rel"         : [( (0.1, 0.1), {} )],
-             "set_axes_unit"   : [( "kpc", {} ),
-                                  ( "Mpc", {} ),
-                                  ( ("kpc", "kpc"), {} ),
-                                  ( ("kpc", "Mpc"), {} )],
-             "set_buff_size"   : [( 1600, {} ),
-                                  ( (600, 800), {} )],
-             "set_center"      : [( (0.4, 0.3), {} ),
-                                  ( (12, 15), {'unit' : 'kpc'} )],
-             "set_cmap"        : [( ('Density', 'RdBu'), {} ),
-                                  ( ('Density', 'kamae'), {} )],
-             "set_font"        : [( ({'family':'sans-serif', 'style':'italic',
-                                      'weight':'bold', 'size':24}), {} )],
-             "set_log"         : [( ('Density', False), {} )],
-             "set_window_size" : [( (7.0), {} )],
-             "set_zlim" : [( ('Density', 1e-25, 1e-23), {} ),
-                           ( ('Density', 1e-25, None), {'dynamic_range' : 4} )],
-             "zoom" : [( (10), {} )] }
-
-m7 = "DD0010/moving7_0010"
-g30 = "IsolatedGalaxy/galaxy0030/galaxy0030"
-wt = "WindTunnel/windtunnel_4lev_hdf5_plt_cnt_0030"
- at requires_pf(m7)
- at requires_pf(g30)
- at requires_pf(wt)
-def test_attributes():
-    """Test plot member functions that aren't callbacks"""
-    plot_field = 'Density'
-    decimals = 3
-
-    pf = data_dir_load(m7)
-    for ax in 'xyz':
-        for attr_name in arrr_args.keys():
-            yield PlotWindowAttributeTest(pf, plot_field, ax, attr_name,
-                                          attr_args[attr_name], decimals)
-    pf = data_dir_load(g30)
-    for ax in 'xyz':
-        for attr_name in arrr_args.keys():
-            yield PlotWindowAttributeTest(pf, plot_field, ax, attr_name,
-                                          attr_args[attr_name], decimals)
-
-    pf = data_dir_load(wt)
-    ax = 'z'
-    for attr_name in attr_args.keys():
-        yield PlotWindowAttributeTest(pf, plot_field, ax, attr_name,
-                                      attr_args[attr_name], decimals)


https://bitbucket.org/yt_analysis/yt-3.0/commits/432887371718/
Changeset:   432887371718
Branch:      yt
User:        ngoldbaum
Date:        2013-03-28 07:21:35
Summary:     Forgot to uncomment the line that deletes the images.
Affected #:  1 file

diff -r bbabd1d3530e6a6df7ea64c23427e1700fa6c6aa -r 432887371718265ba76bb16684d9723ebaed907f yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -585,7 +585,7 @@
         fns.append(old_result.save('old'))
         fns.append(new_result.save('new'))
         compare_images(fns[0], fns[1], 10**(-self.decimals))
-        #for fn in fns: os.remove(fn)
+        for fn in fns: os.remove(fn)
 
 def requires_pf(pf_fn, big_data = False):
     def ffalse(func):


https://bitbucket.org/yt_analysis/yt-3.0/commits/5141c3634e1b/
Changeset:   5141c3634e1b
Branch:      yt
User:        ngoldbaum
Date:        2013-03-28 19:41:12
Summary:     No longer assuming that 2D sims are in the XY plane.
Affected #:  1 file

diff -r 432887371718265ba76bb16684d9723ebaed907f -r 5141c3634e1b0be3905cba873129af4d6a61292b yt/utilities/command_line.py
--- a/yt/utilities/command_line.py
+++ b/yt/utilities/command_line.py
@@ -309,10 +309,10 @@
         return
     print "".join(file(date_file, 'r').readlines())
     print "To update all dependencies, run \"yt update --all\"."
-    
+
 def _update_yt_stack(path):
     "Rerun the install script to updated all dependencies."
-    
+
     install_script = os.path.join(path, "doc/install_script.sh")
     if not os.path.exists(install_script):
         print
@@ -1293,7 +1293,8 @@
             center = 0.5*(pf.domain_left_edge + pf.domain_right_edge)
         center = np.array(center)
         if pf.dimensionality < 3:
-            axes = [2]
+            dummy_dimensions = np.nonzero(pf.h.grids[0].ActiveDimensions)
+            axes = ensure_list(dummy_dimensions[0][0])
         elif args.axis == 4:
             axes = range(3)
         else:


https://bitbucket.org/yt_analysis/yt-3.0/commits/300e35be7b95/
Changeset:   300e35be7b95
Branch:      yt
User:        ngoldbaum
Date:        2013-03-28 20:20:01
Summary:     No longer pickling the plot object since that is harder than expected.
We now read in the pngs saved by the plot object, pickle the numpy
array we get back, write them to disk again, and them use matplotlib
to compare.  A bit more roundabout but it seems to work.
Affected #:  2 files

diff -r 5141c3634e1b0be3905cba873129af4d6a61292b -r 300e35be7b95e8524685f453a1ce1fd0cbee4452 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -37,6 +37,7 @@
 from yt.mods import *
 from yt.data_objects.static_output import StaticOutput
 from matplotlib.testing.compare import compare_images
+import matplotlib.image as mpimg
 import yt.visualization.plot_window as pw
 import cPickle
 import shelve
@@ -68,6 +69,9 @@
         parser.add_option("--answer-big-data", dest="big_data",
             default=False, help="Should we run against big data, too?",
             action="store_true")
+        parser.add_option("--local-dir", dest="output_dir",
+                          default=None, metavar='str',
+                          help="The name of the directory to store local results")
 
     @property
     def my_version(self, version=None):
@@ -578,12 +582,15 @@
                                 self.plot_axis, self.plot_kwargs)
         attr = getattr(plot, self.attr_name)
         attr(*self.attr_args[0], **self.attr_args[1])
-        return plot
+        fn = plot.save()[0]
+        image = mpimg.imread(fn)
+        os.remove(fn)
+        return image
 
     def compare(self, new_result, old_result):
-        fns = []
-        fns.append(old_result.save('old'))
-        fns.append(new_result.save('new'))
+        fns = ['old.png', 'new.png']
+        mpimg.imsave(fns[0], old_result)
+        mpimg.imsave(fns[1], new_result)
         compare_images(fns[0], fns[1], 10**(-self.decimals))
         for fn in fns: os.remove(fn)
 

diff -r 5141c3634e1b0be3905cba873129af4d6a61292b -r 300e35be7b95e8524685f453a1ce1fd0cbee4452 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -131,7 +131,7 @@
             self._type_name = "CuttingPlane"
         else:
             self._type_name = viewer._plot_type
- 
+
 class FieldTransform(object):
     def __init__(self, name, func, locator):
         self.name = name
@@ -163,7 +163,7 @@
             # for an off-axis data object.
             width = ((pf.domain_width.min(), '1'),
                      (pf.domain_width.min(), '1'))
-    elif iterable(width): 
+    elif iterable(width):
         if isinstance(width[1], str):
             width = (width, width)
         elif isinstance(width[1], (long, int, float)):
@@ -194,9 +194,9 @@
     width = StandardWidth(axis, width, None, pf)
     center = StandardCenter(center, pf)
     units = (width[0][1], width[1][1])
-    bounds = (center[x_dict[axis]]-width[0][0]/pf[units[0]]/2,  
-              center[x_dict[axis]]+width[0][0]/pf[units[0]]/2, 
-              center[y_dict[axis]]-width[1][0]/pf[units[1]]/2, 
+    bounds = (center[x_dict[axis]]-width[0][0]/pf[units[0]]/2,
+              center[x_dict[axis]]+width[0][0]/pf[units[0]]/2,
+              center[y_dict[axis]]-width[1][0]/pf[units[1]]/2,
               center[y_dict[axis]]+width[1][0]/pf[units[1]]/2)
     return (bounds, center, units)
 
@@ -211,14 +211,14 @@
         (normal,perp1,perp2) = ortho_find(normal)
         mat = np.transpose(np.column_stack((perp1,perp2,normal)))
         center = np.dot(mat,center)
-    
+
         units = (width[0][1], width[1][1])
-        bounds = (-width[0][0]/pf[units[0]]/2, width[0][0]/pf[units[0]]/2, 
+        bounds = (-width[0][0]/pf[units[0]]/2, width[0][0]/pf[units[0]]/2,
                   -width[1][0]/pf[units[1]]/2, width[1][0]/pf[units[1]]/2)
     else:
         units = (width[0][1], width[1][1], width[2][1])
-        bounds = (-width[0][0]/pf[units[0]]/2, width[0][0]/pf[units[0]]/2, 
-                  -width[1][0]/pf[units[1]]/2, width[1][0]/pf[units[1]]/2, 
+        bounds = (-width[0][0]/pf[units[0]]/2, width[0][0]/pf[units[0]]/2,
+                  -width[1][0]/pf[units[1]]/2, width[1][0]/pf[units[1]]/2,
                   -width[2][0]/pf[units[2]]/2, width[2][0]/pf[units[2]]/2)
     return (bounds, center, units)
 
@@ -226,11 +226,11 @@
     r"""
     A ploting mechanism based around the concept of a window into a
     data source. It can have arbitrary fields, each of which will be
-    centered on the same viewpoint, but will have individual zlimits. 
-    
+    centered on the same viewpoint, but will have individual zlimits.
+
     The data and plot are updated separately, and each can be
     invalidated as the object is modified.
-    
+
     Data is handled by a FixedResolutionBuffer object.
 
     Parameters
@@ -258,11 +258,11 @@
     _contour_info = None
     _vector_info = None
     _frb = None
-    def __init__(self, data_source, bounds, buff_size=(800,800), antialias=True, 
+    def __init__(self, data_source, bounds, buff_size=(800,800), antialias=True,
                  periodic=True, origin='center-window', oblique=False, window_size=10.0):
         if not hasattr(self, "pf"):
             self.pf = data_source.pf
-            ts = self._initialize_dataset(self.pf) 
+            ts = self._initialize_dataset(self.pf)
             self.ts = ts
         self._initfinished = False
         self.center = None
@@ -276,7 +276,7 @@
         self.set_window(bounds) # this automatically updates the data and plot
         self.origin = origin
         if self.data_source.center is not None and oblique == False:
-            center = [self.data_source.center[i] for i in range(len(self.data_source.center)) 
+            center = [self.data_source.center[i] for i in range(len(self.data_source.center))
                       if i != self.data_source.axis]
             self.set_center(center)
         self._initfinished = True
@@ -329,7 +329,7 @@
         else:
             for key in old_fields: self._frb[key]
         self._data_valid = True
-        
+
     def _setup_plots(self):
         pass
 
@@ -363,12 +363,12 @@
         nWx, nWy = Wx/factor, Wy/factor
         self.xlim = (centerx - nWx*0.5, centerx + nWx*0.5)
         self.ylim = (centery - nWy*0.5, centery + nWy*0.5)
-                    
+
 
     @invalidate_data
     def pan(self, deltas):
         r"""Pan the image by specifying absolute code unit coordinate deltas.
-        
+
         Parameters
         ----------
         deltas : sequence of floats
@@ -381,7 +381,7 @@
     @invalidate_data
     def pan_rel(self, deltas):
         r"""Pan the image by specifying relative deltas, to the FOV.
-        
+
         Parameters
         ----------
         deltas : sequence of floats
@@ -396,7 +396,7 @@
     def set_window(self, bounds):
         """Set the bounds of the plot window.
         This is normally only called internally, see set_width.
-        
+
 
         Parameters
         ----------
@@ -427,28 +427,28 @@
         parameters
         ----------
         width : float, array of floats, (float, unit) tuple, or arry of (float, unit) tuples.
-             Width can have four different formats to support windows with variable 
+             Width can have four different formats to support windows with variable
              x and y widths.  They are:
-             
+
              ==================================     =======================
-             format                                 example                
+             format                                 example
              ==================================     =======================
              (float, string)                        (10,'kpc')
              ((float, string), (float, string))     ((10,'kpc'),(15,'kpc'))
              float                                  0.2
              (float, float)                         (0.2, 0.3)
              ==================================     =======================
-             
-             For example, (10, 'kpc') requests a plot window that is 10 kiloparsecs 
-             wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a window 
-             that is 10 kiloparsecs wide along the x axis and 15 kiloparsecs wide along 
+
+             For example, (10, 'kpc') requests a plot window that is 10 kiloparsecs
+             wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a window
+             that is 10 kiloparsecs wide along the x axis and 15 kiloparsecs wide along
              the y axis.  In the other two examples, code units are assumed, for example
-             (0.2, 0.3) requests a plot that has an x width of 0.2 and a y width of 0.3 
-             in code units.  If units are provided the resulting plot axis labels will  
+             (0.2, 0.3) requests a plot that has an x width of 0.2 and a y width of 0.3
+             in code units.  If units are provided the resulting plot axis labels will
              use the supplied units.
         unit : str
              the unit the width has been specified in.
-             defaults to code units.  If width is a tuple this 
+             defaults to code units.  If width is a tuple this
              argument is ignored
 
         """
@@ -463,8 +463,8 @@
         width = StandardWidth(self._frb.axis, width, None, self.pf)
 
         centerx = (self.xlim[1] + self.xlim[0])/2.
-        centery = (self.ylim[1] + self.ylim[0])/2. 
-        
+        centery = (self.ylim[1] + self.ylim[0])/2.
+
         units = (width[0][1], width[1][1])
 
         if set_axes_unit:
@@ -476,7 +476,7 @@
                      centerx + width[0][0]/self.pf[units[0]]/2.)
         self.ylim = (centery - width[1][0]/self.pf[units[1]]/2.,
                      centery + width[1][0]/self.pf[units[1]]/2.)
-        
+
         if hasattr(self,'zlim'):
             centerz = (self.zlim[1] + self.zlim[0])/2.
             mw = max([width[0][0], width[1][0]])
@@ -491,7 +491,7 @@
         ----------
         new_center : two element sequence of floats
             The coordinates of the new center of the image.
-            If the unit keyword is not specified, the 
+            If the unit keyword is not specified, the
             coordinates are assumed to be in code units
 
         unit : string
@@ -529,7 +529,7 @@
             self.buff_size = size
         else:
             self.buff_size = (size, size)
-            
+
     @invalidate_plot
     def set_window_size(self, size):
         """Sets a new window size for the plot
@@ -570,7 +570,7 @@
     @invalidate_plot
     def set_log(self, field, log):
         """set a field to log or linear.
-        
+
         Parameters
         ----------
         field : string
@@ -591,7 +591,7 @@
 
     @invalidate_plot
     def set_transform(self, field, name):
-        if name not in field_transforms: 
+        if name not in field_transforms:
             raise KeyError(name)
         self._field_transform[field] = field_transforms[name]
 
@@ -675,7 +675,7 @@
                 ignored += ['VelocityCallback','MagFieldCallback',
                             'QuiverCallback','CuttingQuiverCallback',
                             'StreamlineCallback']
-            if key in ignored: 
+            if key in ignored:
                 continue
             cbname = callback_registry[key]._type_name
             CallbackMaker = callback_registry[key]
@@ -693,7 +693,7 @@
             A unit, available for conversion in the parameter file, that the
             image extents will be displayed in.  If set to None, any previous
             units will be reset.  If the unit is None, the default is chosen.
-            If unit_name is '1', 'u', or 'unitary', it will not display the 
+            If unit_name is '1', 'u', or 'unitary', it will not display the
             units, and only show the axes name. If unit_name is a tuple, the first
             element is assumed to be the unit for the x axis and the second element
             the unit for the y axis.
@@ -720,12 +720,12 @@
             for un in unit_name:
                 try:
                     self.pf[un]
-                except KeyError: 
+                except KeyError:
                     raise YTUnitNotRecognized(un)
         self._axes_unit_names = unit_name
 
 class PWViewerMPL(PWViewer):
-    """Viewer using matplotlib as a backend via the WindowPlotMPL. 
+    """Viewer using matplotlib as a backend via the WindowPlotMPL.
 
     """
     _current_field = None
@@ -741,7 +741,7 @@
         font_path = matplotlib.get_data_path() + '/fonts/ttf/STIXGeneral.ttf'
         self._font_properties = FontProperties(size=font_size, fname=font_path)
         PWViewer.__init__(self, *args, **kwargs)
-        
+
     def _setup_origin(self):
         origin = self.origin
         axis_index = self.data_source.axis
@@ -828,7 +828,7 @@
                 zlim = (None, None)
 
             plot_aspect = (self.xlim[1] - self.xlim[0]) / (self.ylim[1] - self.ylim[0])
-            
+
             # This sets the size of the figure, and defaults to making one of the dimensions smaller.
             # This should protect against giant images in the case of a very large aspect ratio.
             cbar_frac = 0.0
@@ -839,13 +839,13 @@
 
             # Correct the aspect ratio in case unit_x and unit_y are different
             aspect = self.pf[unit_x]/self.pf[unit_y]
-            
+
             image = self._frb[f]
 
             fp = self._font_properties
 
-            self.plots[f] = WindowPlotMPL(image, self._field_transform[f].name, 
-                                          self._colormaps[f], extent, aspect, 
+            self.plots[f] = WindowPlotMPL(image, self._field_transform[f].name,
+                                          self._colormaps[f], extent, aspect,
                                           zlim, size, fp.get_size())
 
             self.plots[f].cb = self.plots[f].figure.colorbar(
@@ -855,7 +855,7 @@
             for i, un in enumerate((unit_x, unit_y)):
                 if un not in ['1', 'u', 'unitary']:
                     axes_unit_labels[i] = '\/\/('+un+')'
-                    
+
             if self.oblique:
                 labels = [r'$\rm{Image\/x'+axes_unit_labels[0]+'}$',
                           r'$\rm{Image\/y'+axes_unit_labels[1]+'}$']
@@ -866,7 +866,7 @@
             self.plots[f].axes.set_xlabel(labels[0],fontproperties=fp)
             self.plots[f].axes.set_ylabel(labels[1],fontproperties=fp)
 
-            for label in (self.plots[f].axes.get_xticklabels() + 
+            for label in (self.plots[f].axes.get_xticklabels() +
                           self.plots[f].axes.get_yticklabels()):
                 label.set_fontproperties(fp)
 
@@ -877,7 +877,7 @@
                 parser.parse(colorbar_label)
             except ParseFatalException, err:
                 raise YTCannotParseUnitDisplayName(f, colorbar_label, str(err))
-                
+
             self.plots[f].cb.set_label(colorbar_label, fontproperties=fp)
 
             for label in self.plots[f].cb.ax.get_yticklabels():
@@ -902,17 +902,17 @@
     def set_font(self, font_dict=None):
         """set the font and font properties
 
-        Parameters 
-        ---------- 
-        font_dict : dict 
-        A dict of keyword parameters to be passed to 
-        matplotlib.font_manager.FontProperties.  See the matplotlib font 
+        Parameters
+        ----------
+        font_dict : dict
+        A dict of keyword parameters to be passed to
+        matplotlib.font_manager.FontProperties.  See the matplotlib font
         manager documentation for more details.
         http://matplotlib.org/api/font_manager_api.html
 
         Notes
         -----
-        Mathtext axis labels will only obey the `size` keyword. 
+        Mathtext axis labels will only obey the `size` keyword.
 
         Examples
         --------
@@ -921,7 +921,7 @@
         >>> slc = SlicePlot(pf, 'x', 'Density')
         >>> slc.set_font({'family':'sans-serif', 'style':'italic',
                           'weight':'bold', 'size':24})
-        
+
         """
         if font_dict is None:
             font_dict = {}
@@ -964,11 +964,11 @@
         Parameters
         ----------
         name : string
-           the base of the filename.  If not set the filename of 
+           the base of the filename.  If not set the filename of
            the parameter file is used
         mpl_kwargs : dict
            A dict of keyword arguments to be passed to matplotlib.
-           
+
         >>> slc.save(mpl_kwargs={'bbox_inches':'tight'})
 
         """
@@ -1001,10 +1001,10 @@
 
     def _send_zmq(self):
         try:
-            # pre-IPython v0.14        
+            # pre-IPython v0.14
             from IPython.zmq.pylab.backend_inline import send_figure as display
         except ImportError:
-            # IPython v0.14+ 
+            # IPython v0.14+
             from IPython.core.display import display
         for k, v in sorted(self.plots.iteritems()):
             canvas = FigureCanvasAgg(v.figure)
@@ -1035,14 +1035,14 @@
 
 class SlicePlot(PWViewerMPL):
     r"""Creates a slice plot from a parameter file
-    
+
     Given a pf object, an axis to slice along, and a field name
     string, this will return a PWViewrMPL object containing
     the plot.
-    
+
     The plot can be updated using one of the many helper functions
     defined in PlotWindow.
-    
+
     Parameters
     ----------
     pf : `StaticOutput`
@@ -1060,35 +1060,35 @@
          the middle of the domain.  If set to 'max', will be at the point
          of highest density.
     width : tuple or a float.
-         Width can have four different formats to support windows with variable 
+         Width can have four different formats to support windows with variable
          x and y widths.  They are:
-         
+
          ==================================     =======================
-         format                                 example                
+         format                                 example
          ==================================     =======================
          (float, string)                        (10,'kpc')
          ((float, string), (float, string))     ((10,'kpc'),(15,'kpc'))
          float                                  0.2
          (float, float)                         (0.2, 0.3)
          ==================================     =======================
-         
-         For example, (10, 'kpc') requests a plot window that is 10 kiloparsecs 
-         wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a window 
-         that is 10 kiloparsecs wide along the x axis and 15 kiloparsecs wide along 
+
+         For example, (10, 'kpc') requests a plot window that is 10 kiloparsecs
+         wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a window
+         that is 10 kiloparsecs wide along the x axis and 15 kiloparsecs wide along
          the y axis.  In the other two examples, code units are assumed, for example
-         (0.2, 0.3) requests a plot that has an x width of 0.2 and a y width of 0.3 
+         (0.2, 0.3) requests a plot that has an x width of 0.2 and a y width of 0.3
          in code units.  If units are provided the resulting plot axis labels will
          use the supplied units.
     axes_unit : A string
-         The name of the unit for the tick labels on the x and y axes.  
+         The name of the unit for the tick labels on the x and y axes.
          Defaults to None, which automatically picks an appropriate unit.
-         If axes_unit is '1', 'u', or 'unitary', it will not display the 
+         If axes_unit is '1', 'u', or 'unitary', it will not display the
          units, and only show the axes name.
     origin : string or length 1, 2, or 3 sequence of strings
-         The location of the origin of the plot coordinate system.  This is 
+         The location of the origin of the plot coordinate system.  This is
          represented by '-' separated string or a tuple of strings.  In the
          first index the y-location is given by 'lower', 'upper', or 'center'.
-         The second index is the x-location, given as 'left', 'right', or 
+         The second index is the x-location, given as 'left', 'right', or
          'center'.  Finally, the whether the origin is applied in 'domain' space,
          plot 'window' space or 'native' simulation coordinate system is given.
          For example, both 'upper-right-domain' and ['upper', 'right', 'domain']
@@ -1099,7 +1099,7 @@
          or 'center-window' for the center of the plot window. Further examples:
 
          ==================================     ============================
-         format                                 example                
+         format                                 example
          ==================================     ============================
          '{space}'                              'domain'
          '{xloc}-{space}'                       'left-window'
@@ -1114,16 +1114,16 @@
          The size of the fonts for the axis, colorbar, and tick labels.
     field_parameters : dictionary
          A dictionary of field parameters than can be accessed by derived fields.
-         
+
     Examples
     --------
-    
+
     This will save an image the the file 'sliceplot_Density
-    
+
     >>> pf = load('galaxy0030/galaxy0030')
     >>> p = SlicePlot(pf,2,'Density','c',(20,'kpc'))
     >>> p.save('sliceplot')
-    
+
     """
     _plot_type = 'Slice'
     _frb_generator = FixedResolutionBuffer
@@ -1131,7 +1131,7 @@
     def __init__(self, pf, axis, fields, center='c', width=None, axes_unit=None,
                  origin='center-window', fontsize=18, field_parameters=None):
         # this will handle time series data and controllers
-        ts = self._initialize_dataset(pf) 
+        ts = self._initialize_dataset(pf)
         self.ts = ts
         pf = self.pf = ts[0]
         axis = fix_axis(axis)
@@ -1145,14 +1145,14 @@
 
 class ProjectionPlot(PWViewerMPL):
     r"""Creates a projection plot from a parameter file
-    
+
     Given a pf object, an axis to project along, and a field name
     string, this will return a PWViewrMPL object containing
     the plot.
-    
+
     The plot can be updated using one of the many helper functions
     defined in PlotWindow.
-    
+
     Parameters
     ----------
     pf : `StaticOutput`
@@ -1170,35 +1170,35 @@
          the middle of the domain.  If set to 'max', will be at the point
          of highest density.
     width : tuple or a float.
-         Width can have four different formats to support windows with variable 
+         Width can have four different formats to support windows with variable
          x and y widths.  They are:
-         
+
          ==================================     =======================
-         format                                 example                
+         format                                 example
          ==================================     =======================
          (float, string)                        (10,'kpc')
          ((float, string), (float, string))     ((10,'kpc'),(15,'kpc'))
          float                                  0.2
          (float, float)                         (0.2, 0.3)
          ==================================     =======================
-         
-         For example, (10, 'kpc') requests a plot window that is 10 kiloparsecs 
-         wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a window 
-         that is 10 kiloparsecs wide along the x axis and 15 kiloparsecs wide along 
+
+         For example, (10, 'kpc') requests a plot window that is 10 kiloparsecs
+         wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a window
+         that is 10 kiloparsecs wide along the x axis and 15 kiloparsecs wide along
          the y axis.  In the other two examples, code units are assumed, for example
-         (0.2, 0.3) requests a plot that has an x width of 0.2 and a y width of 0.3 
-         in code units.  If units are provided the resulting plot axis labels will 
+         (0.2, 0.3) requests a plot that has an x width of 0.2 and a y width of 0.3
+         in code units.  If units are provided the resulting plot axis labels will
          use the supplied units.
     axes_unit : A string
-         The name of the unit for the tick labels on the x and y axes.  
+         The name of the unit for the tick labels on the x and y axes.
          Defaults to None, which automatically picks an appropriate unit.
-         If axes_unit is '1', 'u', or 'unitary', it will not display the 
+         If axes_unit is '1', 'u', or 'unitary', it will not display the
          units, and only show the axes name.
     origin : string or length 1, 2, or 3 sequence of strings
-         The location of the origin of the plot coordinate system.  This is 
+         The location of the origin of the plot coordinate system.  This is
          represented by '-' separated string or a tuple of strings.  In the
          first index the y-location is given by 'lower', 'upper', or 'center'.
-         The second index is the x-location, given as 'left', 'right', or 
+         The second index is the x-location, given as 'left', 'right', or
          'center'.  Finally, the whether the origin is applied in 'domain' space,
          plot 'window' space or 'native' simulation coordinate system is given.
          For example, both 'upper-right-domain' and ['upper', 'right', 'domain']
@@ -1210,7 +1210,7 @@
 
          ==================================     ============================
          format                                 example
-         ==================================     ============================ 
+         ==================================     ============================
          '{space}'                              'domain'
          '{xloc}-{space}'                       'left-window'
          '{yloc}-{space}'                       'upper-domain'
@@ -1220,7 +1220,7 @@
          ('{yloc}', '{space}')                  ('lower', 'window')
          ('{yloc}', '{xloc}', '{space}')        ('lower', 'right', 'window')
          ==================================     ============================
-         
+
     weight_field : string
          The name of the weighting field.  Set to None for no weight.
     max_level: int
@@ -1232,21 +1232,21 @@
 
     Examples
     --------
-    
+
     This is a very simple way of creating a projection plot.
-    
+
     >>> pf = load('galaxy0030/galaxy0030')
     >>> p = ProjectionPlot(pf,2,'Density','c',(20,'kpc'))
     >>> p.save('sliceplot')
-    
+
     """
     _plot_type = 'Projection'
     _frb_generator = FixedResolutionBuffer
 
     def __init__(self, pf, axis, fields, center='c', width=None, axes_unit=None,
-                 weight_field=None, max_level=None, origin='center-window', fontsize=18, 
+                 weight_field=None, max_level=None, origin='center-window', fontsize=18,
                  field_parameters=None):
-        ts = self._initialize_dataset(pf) 
+        ts = self._initialize_dataset(pf)
         self.ts = ts
         pf = self.pf = ts[0]
         axis = fix_axis(axis)
@@ -1265,7 +1265,7 @@
     Given a pf object, a normal vector defining a slicing plane, and
     a field name string, this will return a PWViewrMPL object
     containing the plot.
-    
+
     The plot can be updated using one of the many helper functions
     defined in PlotWindow.
 
@@ -1288,9 +1288,9 @@
         the unit: (width, 'unit').  If set to a float, code units
         are assumed
     axes_unit : A string
-        The name of the unit for the tick labels on the x and y axes.  
+        The name of the unit for the tick labels on the x and y axes.
         Defaults to None, which automatically picks an appropriate unit.
-        If axes_unit is '1', 'u', or 'unitary', it will not display the 
+        If axes_unit is '1', 'u', or 'unitary', it will not display the
         units, and only show the axes name.
     north-vector : a sequence of floats
         A vector defining the 'up' direction in the plot.  This
@@ -1305,7 +1305,7 @@
     _plot_type = 'OffAxisSlice'
     _frb_generator = ObliqueFixedResolutionBuffer
 
-    def __init__(self, pf, normal, fields, center='c', width=None, 
+    def __init__(self, pf, normal, fields, center='c', width=None,
                  axes_unit=None, north_vector=None, fontsize=18,
                  field_parameters=None):
         (bounds, center_rot, units) = GetObliqueWindowParameters(normal,center,width,pf)
@@ -1323,9 +1323,9 @@
     _type_name = 'proj'
     proj_style = 'integrate'
     _key_fields = []
-    def __init__(self, center, pf, normal_vector, width, fields, 
-                 interpolated, resolution = (800,800), weight=None,  
-                 volume=None, no_ghost=False, le=None, re=None, 
+    def __init__(self, center, pf, normal_vector, width, fields,
+                 interpolated, resolution = (800,800), weight=None,
+                 volume=None, no_ghost=False, le=None, re=None,
                  north_vector=None):
         self.center = center
         self.pf = pf
@@ -1348,7 +1348,7 @@
     Given a pf object, a normal vector to project along, and
     a field name string, this will return a PWViewrMPL object
     containing the plot.
-    
+
     The plot can be updated using one of the many helper functions
     defined in PlotWindow.
 
@@ -1367,23 +1367,23 @@
         cell.  If set to 'c' or 'center', the plot is centered on
         the middle of the domain.
     width : tuple or a float.
-         Width can have four different formats to support windows with variable 
+         Width can have four different formats to support windows with variable
          x and y widths.  They are:
-         
+
          ==================================     =======================
-         format                                 example                
+         format                                 example
          ==================================     =======================
          (float, string)                        (10,'kpc')
          ((float, string), (float, string))     ((10,'kpc'),(15,'kpc'))
          float                                  0.2
          (float, float)                         (0.2, 0.3)
          ==================================     =======================
-         
-         For example, (10, 'kpc') requests a plot window that is 10 kiloparsecs 
-         wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a window 
-         that is 10 kiloparsecs wide along the x axis and 15 kiloparsecs wide along 
+
+         For example, (10, 'kpc') requests a plot window that is 10 kiloparsecs
+         wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a window
+         that is 10 kiloparsecs wide along the x axis and 15 kiloparsecs wide along
          the y axis.  In the other two examples, code units are assumed, for example
-         (0.2, 0.3) requests a plot that has an x width of 0.2 and a y width of 0.3 
+         (0.2, 0.3) requests a plot that has an x width of 0.2 and a y width of 0.3
          in code units.  If units are provided the resulting plot axis labels will
          use the supplied units.
     depth : A tuple or a float
@@ -1395,9 +1395,9 @@
     max_level: int
         The maximum level to project to.
     axes_unit : A string
-        The name of the unit for the tick labels on the x and y axes.  
+        The name of the unit for the tick labels on the x and y axes.
         Defaults to None, which automatically picks an appropriate unit.
-        If axes_unit is '1', 'u', or 'unitary', it will not display the 
+        If axes_unit is '1', 'u', or 'unitary', it will not display the
         units, and only show the axes name.
     north-vector : a sequence of floats
         A vector defining the 'up' direction in the plot.  This
@@ -1410,9 +1410,9 @@
     _plot_type = 'OffAxisProjection'
     _frb_generator = OffAxisProjectionFixedResolutionBuffer
 
-    def __init__(self, pf, normal, fields, center='c', width=None, 
-                 depth=(1, '1'), axes_unit=None, weight_field=None, 
-                 max_level=None, north_vector=None, volume=None, no_ghost=False, 
+    def __init__(self, pf, normal, fields, center='c', width=None,
+                 depth=(1, '1'), axes_unit=None, weight_field=None,
+                 max_level=None, north_vector=None, volume=None, no_ghost=False,
                  le=None, re=None, interpolated=False, fontsize=18):
         (bounds, center_rot, units) = GetObliqueWindowParameters(normal,center,width,pf,depth=depth)
         if axes_unit is None and units != ('1', '1', '1'):
@@ -1509,7 +1509,7 @@
         return img
 
     def _apply_contours(self, ax, vi, vj):
-        if self._contour_info is None: return 
+        if self._contour_info is None: return
         plot_args = {}
         field, number, colors, logit = self._contour_info
         if colors is not None: plot_args['colors'] = colors
@@ -1525,9 +1525,9 @@
         fvals = triang(x,y).nn_interpolator(z)(xi,yi).transpose()[::-1,:]
 
         ax.contour(fvals, number, colors='w')
-        
+
     def _apply_vectors(self, ax, vi, vj):
-        if self._vector_info is None: return 
+        if self._vector_info is None: return
         skip, scale = self._vector_info
 
         nx = self._frb.buff_size[0]/skip
@@ -1548,7 +1548,7 @@
         py /= nn
         print scale, px.min(), px.max(), py.min(), py.max()
         ax.quiver(x, y, px, py, scale=float(vi)/skip)
-        
+
     def get_ticks(self, field, height = 400):
         # This will eventually change to work with non-logged fields
         ticks = []
@@ -1595,7 +1595,7 @@
         pf = self.pf
         if ds._type_name in ("slice", "cutting"):
             units = pf.field_info[field].get_units()
-        elif ds._type_name == "proj" and (ds.weight_field is not None or 
+        elif ds._type_name == "proj" and (ds.weight_field is not None or
                                         ds.proj_style == "mip"):
             units = pf.field_info[field].get_units()
         elif ds._type_name == "proj":
@@ -1632,13 +1632,13 @@
                 pf = self.pf,
                 x_width = x_width*self.pf[unit[0]],
                 y_width = y_width*self.pf[unit[1]],
-                axes_unit_names = unit[0], colorbar_unit = units, 
+                axes_unit_names = unit[0], colorbar_unit = units,
                 mi = mi, ma = ma, xc = xc, yc = yc, zc = zc)
         else:
             md = dict(pf = self.pf,
                       x_width = x_width*self.pf[unit[0]],
                       y_width = y_width*self.pf[unit[1]],
-                      axes_unit_names = unit, colorbar_unit = units, 
+                      axes_unit_names = unit, colorbar_unit = units,
                       mi = mi, ma = ma, xc = xc, yc = yc, zc = zc)
         return md
 
@@ -1684,15 +1684,15 @@
         # add room for a colorbar
         cbar_inches = fontscale*0.7
         newsize = [size[0] + cbar_inches, size[1]]
-        
+
         # add buffers for text, and a bit of whitespace on top
         text_buffx = fontscale * 1.0/(newsize[0])
         text_bottomy = fontscale * 0.7/size[1]
         text_topy = fontscale * 0.3/size[1]
 
         # calculate how much room the colorbar takes
-        cbar_frac = cbar_inches/newsize[0] 
-        
+        cbar_frac = cbar_inches/newsize[0]
+
         # Calculate y fraction, then use to make x fraction.
         yfrac = 1.0-text_bottomy-text_topy
         ysize = yfrac*size[1]


https://bitbucket.org/yt_analysis/yt-3.0/commits/cd985cbf5a53/
Changeset:   cd985cbf5a53
Branch:      yt
User:        ngoldbaum
Date:        2013-03-29 19:20:19
Summary:     Bumping up to ytgold0007
Affected #:  1 file

diff -r 300e35be7b95e8524685f453a1ce1fd0cbee4452 -r cd985cbf5a53070d241dbd2a730fd9ca06a9ba81 yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -62,7 +62,7 @@
     notebook_password = '',
     answer_testing_tolerance = '3',
     answer_testing_bitwise = 'False',
-    gold_standard_filename = 'gold006',
+    gold_standard_filename = 'gold007',
     local_standard_filename = 'local001',
     sketchfab_api_key = 'None'
     )


https://bitbucket.org/yt_analysis/yt-3.0/commits/f31afd69216f/
Changeset:   f31afd69216f
Branch:      yt
User:        ngoldbaum
Date:        2013-03-30 11:44:24
Summary:     Making gas sloshing a big data test and removing the galaxy cluster test.
The filename in the test doesn't correspond to the dataset on yt-p.org/data
so I don't think these tests were ever run by anyone.  The dataset on the
website is huge, I don't think it should be part of the test suite.
Affected #:  1 file

diff -r cd985cbf5a53070d241dbd2a730fd9ca06a9ba81 -r f31afd69216f34604bb6749f1e126735411693e3 yt/frontends/flash/tests/test_outputs.py
--- a/yt/frontends/flash/tests/test_outputs.py
+++ b/yt/frontends/flash/tests/test_outputs.py
@@ -34,7 +34,7 @@
 _fields = ("Temperature", "Density", "VelocityMagnitude", "DivV")
 
 sloshing = "GasSloshingLowRes/sloshing_low_res_hdf5_plt_cnt_0300"
- at requires_pf(sloshing)
+ at requires_pf(sloshing, big_data=True)
 def test_sloshing():
     pf = data_dir_load(sloshing)
     yield assert_equal, str(pf), "sloshing_low_res_hdf5_plt_cnt_0300"
@@ -50,11 +50,3 @@
     yield assert_equal, str(pf), "windtunnel_4lev_hdf5_plt_cnt_0030"
     for test in small_patch_amr(wt, _fields_2d):
         yield test
-
-gcm = "GalaxyClusterMerger/fiducial_1to10_b0.273d_hdf5_plt_cnt_0245.gz"
- at requires_pf(gcm, big_data=True)
-def test_galaxy_cluster_merger():
-    pf = data_dir_load(gcm)
-    for test in big_patch_amr(gcm, _fields):
-        yield test
-


https://bitbucket.org/yt_analysis/yt-3.0/commits/30d55ce051fc/
Changeset:   30d55ce051fc
Branch:      yt
User:        ngoldbaum
Date:        2013-03-30 11:45:16
Summary:     Fixing up the plot window answer tests.  Adding a progress bar to
the s3 cloud upload.
Affected #:  1 file

diff -r f31afd69216f34604bb6749f1e126735411693e3 -r 30d55ce051fca25b61a9542dbaf74283ffb74b2e yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -30,20 +30,22 @@
 import urllib2
 import cPickle
 import sys
+import cPickle
+import shelve
+import zlib
 
+from matplotlib.testing.compare import compare_images
 from nose.plugins import Plugin
 from yt.testing import *
 from yt.config import ytcfg
 from yt.mods import *
 from yt.data_objects.static_output import StaticOutput
-from matplotlib.testing.compare import compare_images
+from yt.utilities.logger import disable_stream_logging
+from yt.utilities.command_line import get_yt_version
+
 import matplotlib.image as mpimg
 import yt.visualization.plot_window as pw
-import cPickle
-import shelve
-
-from yt.utilities.logger import disable_stream_logging
-from yt.utilities.command_line import get_yt_version
+import yt.utilities.progressbar as progressbar
 
 mylog = logging.getLogger('nose.plugins.answer-testing')
 run_big_data = False
@@ -70,7 +72,7 @@
             default=False, help="Should we run against big data, too?",
             action="store_true")
         parser.add_option("--local-dir", dest="output_dir",
-                          default=None, metavar='str',
+                          default=ytcfg.get("yt", "test_data_dir"), metavar='str',
                           help="The name of the directory to store local results")
 
     @property
@@ -191,6 +193,9 @@
         self.cache[pf_name] = rv
         return rv
 
+    def progress_callback(self, current, total):
+        self.pbar.update(current)
+
     def dump(self, result_storage):
         if self.answer_name is None: return
         # This is where we dump our result storage up to Amazon, if we are able
@@ -200,13 +205,26 @@
         c = boto.connect_s3()
         bucket = c.get_bucket("yt-answer-tests")
         for pf_name in result_storage:
-            rs = cPickle.dumps(result_storage[pf_name])
+            rs = cPickle.dumps(result_storage[pf_name],
+                               cPickle.HIGHEST_PROTOCOL)
             tk = bucket.get_key("%s_%s" % (self.answer_name, pf_name))
             if tk is not None: tk.delete()
             k = Key(bucket)
             k.key = "%s_%s" % (self.answer_name, pf_name)
-            k.set_contents_from_string(rs)
+
+            pb_widgets = [
+                unicode(k.key, errors='ignore').encode('utf-8'), ' ',
+                progressbar.FileTransferSpeed(),' <<<', progressbar.Bar(),
+                '>>> ', progressbar.Percentage(), ' ', progressbar.ETA()
+                ]
+            self.pbar = progressbar.ProgressBar(widgets=pb_widgets,
+                                                maxval=sys.getsizeof(rs))
+
+            self.pbar.start()
+            k.set_contents_from_string(rs, cb=self.progress_callback,
+                                       num_cb=100000)
             k.set_acl("public-read")
+            self.pbar.finish()
 
 class AnswerTestLocalStorage(AnswerTestStorage):
     def dump(self, result_storage):
@@ -585,12 +603,12 @@
         fn = plot.save()[0]
         image = mpimg.imread(fn)
         os.remove(fn)
-        return image
+        return [zlib.compress(image.dumps())]
 
     def compare(self, new_result, old_result):
         fns = ['old.png', 'new.png']
-        mpimg.imsave(fns[0], old_result)
-        mpimg.imsave(fns[1], new_result)
+        mpimg.imsave(fns[0], np.loads(zlib.decompress(old_result)))
+        mpimg.imsave(fns[1], np.loads(new_result))
         compare_images(fns[0], fns[1], 10**(-self.decimals))
         for fn in fns: os.remove(fn)
 


https://bitbucket.org/yt_analysis/yt-3.0/commits/3aee876d0aa4/
Changeset:   3aee876d0aa4
Branch:      yt
User:        ngoldbaum
Date:        2013-03-30 12:14:47
Summary:     Fixing deserializing the plot window answer tests.
Affected #:  1 file

diff -r 30d55ce051fca25b61a9542dbaf74283ffb74b2e -r 3aee876d0aa4b26aa45dd6fce00a16ae7fba12ba yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -607,8 +607,8 @@
 
     def compare(self, new_result, old_result):
         fns = ['old.png', 'new.png']
-        mpimg.imsave(fns[0], np.loads(zlib.decompress(old_result)))
-        mpimg.imsave(fns[1], np.loads(new_result))
+        mpimg.imsave(fns[0], np.loads(zlib.decompress(old_result[0])))
+        mpimg.imsave(fns[1], np.loads(zlib.decompress(new_result[0])))
         compare_images(fns[0], fns[1], 10**(-self.decimals))
         for fn in fns: os.remove(fn)
 


https://bitbucket.org/yt_analysis/yt-3.0/commits/77877b7fe6b5/
Changeset:   77877b7fe6b5
Branch:      yt
User:        ngoldbaum
Date:        2013-03-30 12:57:20
Summary:     Falling back to ASCII pickle for portability.
Affected #:  1 file

diff -r 3aee876d0aa4b26aa45dd6fce00a16ae7fba12ba -r 77877b7fe6b50e12ecffbe3a522596f2d3bd3c77 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -205,8 +205,7 @@
         c = boto.connect_s3()
         bucket = c.get_bucket("yt-answer-tests")
         for pf_name in result_storage:
-            rs = cPickle.dumps(result_storage[pf_name],
-                               cPickle.HIGHEST_PROTOCOL)
+            rs = cPickle.dumps(result_storage[pf_name])
             tk = bucket.get_key("%s_%s" % (self.answer_name, pf_name))
             if tk is not None: tk.delete()
             k = Key(bucket)


https://bitbucket.org/yt_analysis/yt-3.0/commits/7208dbbf115c/
Changeset:   7208dbbf115c
Branch:      yt
User:        ngoldbaum
Date:        2013-04-02 06:55:42
Summary:     Fixing a bug I introduced in the command line modules.
Affected #:  1 file

diff -r 77877b7fe6b50e12ecffbe3a522596f2d3bd3c77 -r 7208dbbf115c7c9458ece1df987c70db9d7b5e57 yt/utilities/command_line.py
--- a/yt/utilities/command_line.py
+++ b/yt/utilities/command_line.py
@@ -1293,7 +1293,7 @@
             center = 0.5*(pf.domain_left_edge + pf.domain_right_edge)
         center = np.array(center)
         if pf.dimensionality < 3:
-            dummy_dimensions = np.nonzero(pf.h.grids[0].ActiveDimensions)
+            dummy_dimensions = np.nonzero(pf.h.grids[0].ActiveDimensions <= 1)
             axes = ensure_list(dummy_dimensions[0][0])
         elif args.axis == 4:
             axes = range(3)


https://bitbucket.org/yt_analysis/yt-3.0/commits/73fa10cba4d5/
Changeset:   73fa10cba4d5
Branch:      yt
User:        chummels
Date:        2013-04-03 00:52:21
Summary:     Merged in ngoldbaum/yt (pull request #470)

Plot window answer testing
Affected #:  11 files

diff -r 85145a05a5bed50af23759d545e4f2aa46a69fdb -r 73fa10cba4d55f2a2fdb3c0c8141e4798dd8ba68 yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -62,7 +62,7 @@
     notebook_password = '',
     answer_testing_tolerance = '3',
     answer_testing_bitwise = 'False',
-    gold_standard_filename = 'gold006',
+    gold_standard_filename = 'gold007',
     local_standard_filename = 'local001',
     sketchfab_api_key = 'None'
     )

diff -r 85145a05a5bed50af23759d545e4f2aa46a69fdb -r 73fa10cba4d55f2a2fdb3c0c8141e4798dd8ba68 yt/frontends/flash/tests/test_outputs.py
--- a/yt/frontends/flash/tests/test_outputs.py
+++ b/yt/frontends/flash/tests/test_outputs.py
@@ -34,7 +34,7 @@
 _fields = ("Temperature", "Density", "VelocityMagnitude", "DivV")
 
 sloshing = "GasSloshingLowRes/sloshing_low_res_hdf5_plt_cnt_0300"
- at requires_pf(sloshing)
+ at requires_pf(sloshing, big_data=True)
 def test_sloshing():
     pf = data_dir_load(sloshing)
     yield assert_equal, str(pf), "sloshing_low_res_hdf5_plt_cnt_0300"
@@ -50,11 +50,3 @@
     yield assert_equal, str(pf), "windtunnel_4lev_hdf5_plt_cnt_0030"
     for test in small_patch_amr(wt, _fields_2d):
         yield test
-
-gcm = "GalaxyClusterMerger/fiducial_1to10_b0.273d_hdf5_plt_cnt_0245.gz"
- at requires_pf(gcm, big_data=True)
-def test_galaxy_cluster_merger():
-    pf = data_dir_load(gcm)
-    for test in big_patch_amr(gcm, _fields):
-        yield test
-

diff -r 85145a05a5bed50af23759d545e4f2aa46a69fdb -r 73fa10cba4d55f2a2fdb3c0c8141e4798dd8ba68 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -30,18 +30,23 @@
 import urllib2
 import cPickle
 import sys
+import cPickle
+import shelve
+import zlib
 
+from matplotlib.testing.compare import compare_images
 from nose.plugins import Plugin
 from yt.testing import *
 from yt.config import ytcfg
 from yt.mods import *
 from yt.data_objects.static_output import StaticOutput
-import cPickle
-import shelve
-
 from yt.utilities.logger import disable_stream_logging
 from yt.utilities.command_line import get_yt_version
 
+import matplotlib.image as mpimg
+import yt.visualization.plot_window as pw
+import yt.utilities.progressbar as progressbar
+
 mylog = logging.getLogger('nose.plugins.answer-testing')
 run_big_data = False
 
@@ -66,6 +71,9 @@
         parser.add_option("--answer-big-data", dest="big_data",
             default=False, help="Should we run against big data, too?",
             action="store_true")
+        parser.add_option("--local-dir", dest="output_dir",
+                          default=ytcfg.get("yt", "test_data_dir"), metavar='str',
+                          help="The name of the directory to store local results")
 
     @property
     def my_version(self, version=None):
@@ -96,7 +104,7 @@
                 self.store_name = options.answer_name
             self.compare_name = None
         # if we're not storing, then we're comparing, and we want default
-        # comparison name to be the latest gold standard 
+        # comparison name to be the latest gold standard
         # either on network or local
         else:
             if options.answer_name is None:
@@ -117,18 +125,18 @@
             self.compare_name = None
         elif self.compare_name == "latest":
             self.compare_name = _latest
-            
-        # Local/Cloud storage 
+
+        # Local/Cloud storage
         if options.local_results:
             storage_class = AnswerTestLocalStorage
-            # Fix up filename for local storage 
+            # Fix up filename for local storage
             if self.compare_name is not None:
                 self.compare_name = "%s/%s/%s" % \
-                    (os.path.realpath(options.output_dir), self.compare_name, 
+                    (os.path.realpath(options.output_dir), self.compare_name,
                      self.compare_name)
             if self.store_name is not None:
                 name_dir_path = "%s/%s" % \
-                    (os.path.realpath(options.output_dir), 
+                    (os.path.realpath(options.output_dir),
                     self.store_name)
                 if not os.path.isdir(name_dir_path):
                     os.makedirs(name_dir_path)
@@ -147,7 +155,7 @@
 
     def finalize(self, result=None):
         if self.store_results is False: return
-        self.storage.dump(self.result_storage)        
+        self.storage.dump(self.result_storage)
 
 class AnswerTestStorage(object):
     def __init__(self, reference_name=None, answer_name=None):
@@ -155,9 +163,9 @@
         self.answer_name = answer_name
         self.cache = {}
     def dump(self, result_storage, result):
-        raise NotImplementedError 
+        raise NotImplementedError
     def get(self, pf_name, default=None):
-        raise NotImplementedError 
+        raise NotImplementedError
 
 class AnswerTestCloudStorage(AnswerTestStorage):
     def get(self, pf_name, default = None):
@@ -185,6 +193,9 @@
         self.cache[pf_name] = rv
         return rv
 
+    def progress_callback(self, current, total):
+        self.pbar.update(current)
+
     def dump(self, result_storage):
         if self.answer_name is None: return
         # This is where we dump our result storage up to Amazon, if we are able
@@ -195,12 +206,24 @@
         bucket = c.get_bucket("yt-answer-tests")
         for pf_name in result_storage:
             rs = cPickle.dumps(result_storage[pf_name])
-            tk = bucket.get_key("%s_%s" % (self.answer_name, pf_name)) 
+            tk = bucket.get_key("%s_%s" % (self.answer_name, pf_name))
             if tk is not None: tk.delete()
             k = Key(bucket)
             k.key = "%s_%s" % (self.answer_name, pf_name)
-            k.set_contents_from_string(rs)
+
+            pb_widgets = [
+                unicode(k.key, errors='ignore').encode('utf-8'), ' ',
+                progressbar.FileTransferSpeed(),' <<<', progressbar.Bar(),
+                '>>> ', progressbar.Percentage(), ' ', progressbar.ETA()
+                ]
+            self.pbar = progressbar.ProgressBar(widgets=pb_widgets,
+                                                maxval=sys.getsizeof(rs))
+
+            self.pbar.start()
+            k.set_contents_from_string(rs, cb=self.progress_callback,
+                                       num_cb=100000)
             k.set_acl("public-read")
+            self.pbar.finish()
 
 class AnswerTestLocalStorage(AnswerTestStorage):
     def dump(self, result_storage):
@@ -277,7 +300,7 @@
         nv = self.run()
         if self.reference_storage.reference_name is not None:
             dd = self.reference_storage.get(self.storage_name)
-            if dd is None or self.description not in dd: 
+            if dd is None or self.description not in dd:
                 raise YTNoOldAnswer("%s : %s" % (self.storage_name , self.description))
             ov = dd[self.description]
             self.compare(nv, ov)
@@ -303,6 +326,16 @@
         obj = cls(*obj_type[1])
         return obj
 
+    def create_plot(self, pf, plot_type, plot_field, plot_axis, plot_kwargs = None):
+        # plot_type should be a string
+        # plot_args should be a tuple
+        # plot_kwargs should be a dict
+        if plot_type is None:
+            raise RuntimeError('Must explicitly request a plot type')
+        cls = getattr(pw, plot_type)
+        plot = cls(*(pf, plot_axis, plot_field), **plot_kwargs)
+        return plot
+
     @property
     def sim_center(self):
         """
@@ -335,7 +368,7 @@
         args = [self._type_name, str(self.pf), oname]
         args += [str(getattr(self, an)) for an in self._attrs]
         return "_".join(args)
-        
+
 class FieldValuesTest(AnswerTestingTest):
     _type_name = "FieldValues"
     _attrs = ("field", )
@@ -357,7 +390,7 @@
     def compare(self, new_result, old_result):
         err_msg = "Field values for %s not equal." % self.field
         if self.decimals is None:
-            assert_equal(new_result, old_result, 
+            assert_equal(new_result, old_result,
                          err_msg=err_msg, verbose=True)
         else:
             assert_allclose(new_result, old_result, 10.**(-self.decimals),
@@ -381,12 +414,12 @@
     def compare(self, new_result, old_result):
         err_msg = "All field values for %s not equal." % self.field
         if self.decimals is None:
-            assert_equal(new_result, old_result, 
+            assert_equal(new_result, old_result,
                          err_msg=err_msg, verbose=True)
         else:
             assert_rel_equal(new_result, old_result, self.decimals,
                              err_msg=err_msg, verbose=True)
-            
+
 class ProjectionValuesTest(AnswerTestingTest):
     _type_name = "ProjectionValues"
     _attrs = ("field", "axis", "weight_field")
@@ -426,7 +459,7 @@
                 assert_equal(new_result[k], old_result[k],
                              err_msg=err_msg)
             else:
-                assert_allclose(new_result[k], old_result[k], 
+                assert_allclose(new_result[k], old_result[k],
                                  10.**-(self.decimals), err_msg=err_msg)
 
 class PixelizedProjectionValuesTest(AnswerTestingTest):
@@ -505,7 +538,7 @@
             assert_equal(new_result[i], old_result[i],
                          err_msg="Output times not equal.",
                          verbose=True)
-        
+
 class GridHierarchyTest(AnswerTestingTest):
     _type_name = "GridHierarchy"
     _attrs = ()
@@ -547,6 +580,37 @@
         for newc, oldc in zip(new_result["children"], old_result["children"]):
             assert(newp == oldp)
 
+class PlotWindowAttributeTest(AnswerTestingTest):
+    _type_name = "PlotWindowAttribute"
+    _attrs = ('plot_type', 'plot_field', 'plot_axis', 'attr_name', 'attr_args')
+    def __init__(self, pf_fn, plot_field, plot_axis, attr_name, attr_args,
+                 decimals, plot_type = 'SlicePlot'):
+        super(PlotWindowAttributeTest, self).__init__(pf_fn)
+        self.plot_type = plot_type
+        self.plot_field = plot_field
+        self.plot_axis = plot_axis
+        self.plot_kwargs = {}
+        self.attr_name = attr_name
+        self.attr_args = attr_args
+        self.decimals = decimals
+
+    def run(self):
+        plot = self.create_plot(self.pf, self.plot_type, self.plot_field,
+                                self.plot_axis, self.plot_kwargs)
+        attr = getattr(plot, self.attr_name)
+        attr(*self.attr_args[0], **self.attr_args[1])
+        fn = plot.save()[0]
+        image = mpimg.imread(fn)
+        os.remove(fn)
+        return [zlib.compress(image.dumps())]
+
+    def compare(self, new_result, old_result):
+        fns = ['old.png', 'new.png']
+        mpimg.imsave(fns[0], np.loads(zlib.decompress(old_result[0])))
+        mpimg.imsave(fns[1], np.loads(zlib.decompress(new_result[0])))
+        compare_images(fns[0], fns[1], 10**(-self.decimals))
+        for fn in fns: os.remove(fn)
+
 def requires_pf(pf_fn, big_data = False):
     def ffalse(func):
         return lambda: None
@@ -602,4 +666,3 @@
 
     def __call__(self):
         self.args[0](*self.args[1:])
-

diff -r 85145a05a5bed50af23759d545e4f2aa46a69fdb -r 73fa10cba4d55f2a2fdb3c0c8141e4798dd8ba68 yt/utilities/command_line.py
--- a/yt/utilities/command_line.py
+++ b/yt/utilities/command_line.py
@@ -309,10 +309,10 @@
         return
     print "".join(file(date_file, 'r').readlines())
     print "To update all dependencies, run \"yt update --all\"."
-    
+
 def _update_yt_stack(path):
     "Rerun the install script to updated all dependencies."
-    
+
     install_script = os.path.join(path, "doc/install_script.sh")
     if not os.path.exists(install_script):
         print
@@ -1292,7 +1292,10 @@
         elif args.center is None:
             center = 0.5*(pf.domain_left_edge + pf.domain_right_edge)
         center = np.array(center)
-        if args.axis == 4:
+        if pf.dimensionality < 3:
+            dummy_dimensions = np.nonzero(pf.h.grids[0].ActiveDimensions <= 1)
+            axes = ensure_list(dummy_dimensions[0][0])
+        elif args.axis == 4:
             axes = range(3)
         else:
             axes = [args.axis]

diff -r 85145a05a5bed50af23759d545e4f2aa46a69fdb -r 73fa10cba4d55f2a2fdb3c0c8141e4798dd8ba68 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -131,7 +131,7 @@
             self._type_name = "CuttingPlane"
         else:
             self._type_name = viewer._plot_type
- 
+
 class FieldTransform(object):
     def __init__(self, name, func, locator):
         self.name = name
@@ -163,7 +163,7 @@
             # for an off-axis data object.
             width = ((pf.domain_width.min(), '1'),
                      (pf.domain_width.min(), '1'))
-    elif iterable(width): 
+    elif iterable(width):
         if isinstance(width[1], str):
             width = (width, width)
         elif isinstance(width[1], (long, int, float)):
@@ -194,9 +194,9 @@
     width = StandardWidth(axis, width, None, pf)
     center = StandardCenter(center, pf)
     units = (width[0][1], width[1][1])
-    bounds = (center[x_dict[axis]]-width[0][0]/pf[units[0]]/2,  
-              center[x_dict[axis]]+width[0][0]/pf[units[0]]/2, 
-              center[y_dict[axis]]-width[1][0]/pf[units[1]]/2, 
+    bounds = (center[x_dict[axis]]-width[0][0]/pf[units[0]]/2,
+              center[x_dict[axis]]+width[0][0]/pf[units[0]]/2,
+              center[y_dict[axis]]-width[1][0]/pf[units[1]]/2,
               center[y_dict[axis]]+width[1][0]/pf[units[1]]/2)
     return (bounds, center, units)
 
@@ -211,14 +211,14 @@
         (normal,perp1,perp2) = ortho_find(normal)
         mat = np.transpose(np.column_stack((perp1,perp2,normal)))
         center = np.dot(mat,center)
-    
+
         units = (width[0][1], width[1][1])
-        bounds = (-width[0][0]/pf[units[0]]/2, width[0][0]/pf[units[0]]/2, 
+        bounds = (-width[0][0]/pf[units[0]]/2, width[0][0]/pf[units[0]]/2,
                   -width[1][0]/pf[units[1]]/2, width[1][0]/pf[units[1]]/2)
     else:
         units = (width[0][1], width[1][1], width[2][1])
-        bounds = (-width[0][0]/pf[units[0]]/2, width[0][0]/pf[units[0]]/2, 
-                  -width[1][0]/pf[units[1]]/2, width[1][0]/pf[units[1]]/2, 
+        bounds = (-width[0][0]/pf[units[0]]/2, width[0][0]/pf[units[0]]/2,
+                  -width[1][0]/pf[units[1]]/2, width[1][0]/pf[units[1]]/2,
                   -width[2][0]/pf[units[2]]/2, width[2][0]/pf[units[2]]/2)
     return (bounds, center, units)
 
@@ -226,11 +226,11 @@
     r"""
     A ploting mechanism based around the concept of a window into a
     data source. It can have arbitrary fields, each of which will be
-    centered on the same viewpoint, but will have individual zlimits. 
-    
+    centered on the same viewpoint, but will have individual zlimits.
+
     The data and plot are updated separately, and each can be
     invalidated as the object is modified.
-    
+
     Data is handled by a FixedResolutionBuffer object.
 
     Parameters
@@ -258,11 +258,11 @@
     _contour_info = None
     _vector_info = None
     _frb = None
-    def __init__(self, data_source, bounds, buff_size=(800,800), antialias=True, 
+    def __init__(self, data_source, bounds, buff_size=(800,800), antialias=True,
                  periodic=True, origin='center-window', oblique=False, window_size=10.0):
         if not hasattr(self, "pf"):
             self.pf = data_source.pf
-            ts = self._initialize_dataset(self.pf) 
+            ts = self._initialize_dataset(self.pf)
             self.ts = ts
         self._initfinished = False
         self.center = None
@@ -276,7 +276,7 @@
         self.set_window(bounds) # this automatically updates the data and plot
         self.origin = origin
         if self.data_source.center is not None and oblique == False:
-            center = [self.data_source.center[i] for i in range(len(self.data_source.center)) 
+            center = [self.data_source.center[i] for i in range(len(self.data_source.center))
                       if i != self.data_source.axis]
             self.set_center(center)
         self._initfinished = True
@@ -329,7 +329,7 @@
         else:
             for key in old_fields: self._frb[key]
         self._data_valid = True
-        
+
     def _setup_plots(self):
         pass
 
@@ -363,12 +363,12 @@
         nWx, nWy = Wx/factor, Wy/factor
         self.xlim = (centerx - nWx*0.5, centerx + nWx*0.5)
         self.ylim = (centery - nWy*0.5, centery + nWy*0.5)
-                    
+
 
     @invalidate_data
     def pan(self, deltas):
         r"""Pan the image by specifying absolute code unit coordinate deltas.
-        
+
         Parameters
         ----------
         deltas : sequence of floats
@@ -381,7 +381,7 @@
     @invalidate_data
     def pan_rel(self, deltas):
         r"""Pan the image by specifying relative deltas, to the FOV.
-        
+
         Parameters
         ----------
         deltas : sequence of floats
@@ -396,7 +396,7 @@
     def set_window(self, bounds):
         """Set the bounds of the plot window.
         This is normally only called internally, see set_width.
-        
+
 
         Parameters
         ----------
@@ -427,28 +427,28 @@
         parameters
         ----------
         width : float, array of floats, (float, unit) tuple, or arry of (float, unit) tuples.
-             Width can have four different formats to support windows with variable 
+             Width can have four different formats to support windows with variable
              x and y widths.  They are:
-             
+
              ==================================     =======================
-             format                                 example                
+             format                                 example
              ==================================     =======================
              (float, string)                        (10,'kpc')
              ((float, string), (float, string))     ((10,'kpc'),(15,'kpc'))
              float                                  0.2
              (float, float)                         (0.2, 0.3)
              ==================================     =======================
-             
-             For example, (10, 'kpc') requests a plot window that is 10 kiloparsecs 
-             wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a window 
-             that is 10 kiloparsecs wide along the x axis and 15 kiloparsecs wide along 
+
+             For example, (10, 'kpc') requests a plot window that is 10 kiloparsecs
+             wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a window
+             that is 10 kiloparsecs wide along the x axis and 15 kiloparsecs wide along
              the y axis.  In the other two examples, code units are assumed, for example
-             (0.2, 0.3) requests a plot that has an x width of 0.2 and a y width of 0.3 
-             in code units.  If units are provided the resulting plot axis labels will  
+             (0.2, 0.3) requests a plot that has an x width of 0.2 and a y width of 0.3
+             in code units.  If units are provided the resulting plot axis labels will
              use the supplied units.
         unit : str
              the unit the width has been specified in.
-             defaults to code units.  If width is a tuple this 
+             defaults to code units.  If width is a tuple this
              argument is ignored
 
         """
@@ -463,8 +463,8 @@
         width = StandardWidth(self._frb.axis, width, None, self.pf)
 
         centerx = (self.xlim[1] + self.xlim[0])/2.
-        centery = (self.ylim[1] + self.ylim[0])/2. 
-        
+        centery = (self.ylim[1] + self.ylim[0])/2.
+
         units = (width[0][1], width[1][1])
 
         if set_axes_unit:
@@ -476,7 +476,7 @@
                      centerx + width[0][0]/self.pf[units[0]]/2.)
         self.ylim = (centery - width[1][0]/self.pf[units[1]]/2.,
                      centery + width[1][0]/self.pf[units[1]]/2.)
-        
+
         if hasattr(self,'zlim'):
             centerz = (self.zlim[1] + self.zlim[0])/2.
             mw = max([width[0][0], width[1][0]])
@@ -491,7 +491,7 @@
         ----------
         new_center : two element sequence of floats
             The coordinates of the new center of the image.
-            If the unit keyword is not specified, the 
+            If the unit keyword is not specified, the
             coordinates are assumed to be in code units
 
         unit : string
@@ -515,18 +515,6 @@
     def set_antialias(self,aa):
         self.antialias = aa
 
-    @invalidate_plot
-    def set_contour_info(self, field_name, n_cont = 8, colors = None,
-                         logit = True):
-        if field_name == "None" or n_cont == 0:
-            self._contour_info = None
-            return
-        self._contour_info = (field_name, n_cont, colors, logit)
-
-    @invalidate_plot
-    def set_vector_info(self, skip, scale = 1):
-        self._vector_info = (skip, scale)
-
     @invalidate_data
     def set_buff_size(self, size):
         """Sets a new buffer size for the fixed resolution buffer
@@ -541,7 +529,7 @@
             self.buff_size = size
         else:
             self.buff_size = (size, size)
-            
+
     @invalidate_plot
     def set_window_size(self, size):
         """Sets a new window size for the plot
@@ -582,7 +570,7 @@
     @invalidate_plot
     def set_log(self, field, log):
         """set a field to log or linear.
-        
+
         Parameters
         ----------
         field : string
@@ -603,7 +591,7 @@
 
     @invalidate_plot
     def set_transform(self, field, name):
-        if name not in field_transforms: 
+        if name not in field_transforms:
             raise KeyError(name)
         self._field_transform[field] = field_transforms[name]
 
@@ -687,7 +675,7 @@
                 ignored += ['VelocityCallback','MagFieldCallback',
                             'QuiverCallback','CuttingQuiverCallback',
                             'StreamlineCallback']
-            if key in ignored: 
+            if key in ignored:
                 continue
             cbname = callback_registry[key]._type_name
             CallbackMaker = callback_registry[key]
@@ -705,7 +693,7 @@
             A unit, available for conversion in the parameter file, that the
             image extents will be displayed in.  If set to None, any previous
             units will be reset.  If the unit is None, the default is chosen.
-            If unit_name is '1', 'u', or 'unitary', it will not display the 
+            If unit_name is '1', 'u', or 'unitary', it will not display the
             units, and only show the axes name. If unit_name is a tuple, the first
             element is assumed to be the unit for the x axis and the second element
             the unit for the y axis.
@@ -732,65 +720,12 @@
             for un in unit_name:
                 try:
                     self.pf[un]
-                except KeyError: 
+                except KeyError:
                     raise YTUnitNotRecognized(un)
         self._axes_unit_names = unit_name
 
-    def get_metadata(self, field, strip_mathml = True, return_string = True):
-        fval = self._frb[field]
-        mi = fval.min()
-        ma = fval.max()
-        x_width = self.xlim[1] - self.xlim[0]
-        y_width = self.ylim[1] - self.ylim[0]
-        if self._axes_unit_names is None:
-            unit = get_smallest_appropriate_unit(x_width, self.pf)
-            unit = (unit, unit)
-        else:
-            unit = self._axes_unit_names
-        units = self.get_field_units(field, strip_mathml)
-        center = getattr(self._frb.data_source, "center", None)
-        if center is None or self._frb.axis == 4:
-            xc, yc, zc = -999, -999, -999
-        else:
-            center[x_dict[self._frb.axis]] = 0.5 * (
-                self.xlim[0] + self.xlim[1])
-            center[y_dict[self._frb.axis]] = 0.5 * (
-                self.ylim[0] + self.ylim[1])
-            xc, yc, zc = center
-        if return_string:
-            md = _metadata_template % dict(
-                pf = self.pf,
-                x_width = x_width*self.pf[unit[0]],
-                y_width = y_width*self.pf[unit[1]],
-                axes_unit_names = unit[0], colorbar_unit = units, 
-                mi = mi, ma = ma, xc = xc, yc = yc, zc = zc)
-        else:
-            md = dict(pf = self.pf,
-                      x_width = x_width*self.pf[unit[0]],
-                      y_width = y_width*self.pf[unit[1]],
-                      axes_unit_names = unit, colorbar_unit = units, 
-                      mi = mi, ma = ma, xc = xc, yc = yc, zc = zc)
-        return md
-
-    def get_field_units(self, field, strip_mathml = True):
-        ds = self._frb.data_source
-        pf = self.pf
-        if ds._type_name in ("slice", "cutting"):
-            units = pf.field_info[field].get_units()
-        elif ds._type_name == "proj" and (ds.weight_field is not None or 
-                                        ds.proj_style == "mip"):
-            units = pf.field_info[field].get_units()
-        elif ds._type_name == "proj":
-            units = pf.field_info[field].get_projected_units()
-        else:
-            units = ""
-        if strip_mathml:
-            units = units.replace(r"\rm{", "").replace("}","")
-        return units
-
-
 class PWViewerMPL(PWViewer):
-    """Viewer using matplotlib as a backend via the WindowPlotMPL. 
+    """Viewer using matplotlib as a backend via the WindowPlotMPL.
 
     """
     _current_field = None
@@ -806,7 +741,7 @@
         font_path = matplotlib.get_data_path() + '/fonts/ttf/STIXGeneral.ttf'
         self._font_properties = FontProperties(size=font_size, fname=font_path)
         PWViewer.__init__(self, *args, **kwargs)
-        
+
     def _setup_origin(self):
         origin = self.origin
         axis_index = self.data_source.axis
@@ -893,7 +828,7 @@
                 zlim = (None, None)
 
             plot_aspect = (self.xlim[1] - self.xlim[0]) / (self.ylim[1] - self.ylim[0])
-            
+
             # This sets the size of the figure, and defaults to making one of the dimensions smaller.
             # This should protect against giant images in the case of a very large aspect ratio.
             cbar_frac = 0.0
@@ -904,13 +839,13 @@
 
             # Correct the aspect ratio in case unit_x and unit_y are different
             aspect = self.pf[unit_x]/self.pf[unit_y]
-            
+
             image = self._frb[f]
 
             fp = self._font_properties
 
-            self.plots[f] = WindowPlotMPL(image, self._field_transform[f].name, 
-                                          self._colormaps[f], extent, aspect, 
+            self.plots[f] = WindowPlotMPL(image, self._field_transform[f].name,
+                                          self._colormaps[f], extent, aspect,
                                           zlim, size, fp.get_size())
 
             self.plots[f].cb = self.plots[f].figure.colorbar(
@@ -920,7 +855,7 @@
             for i, un in enumerate((unit_x, unit_y)):
                 if un not in ['1', 'u', 'unitary']:
                     axes_unit_labels[i] = '\/\/('+un+')'
-                    
+
             if self.oblique:
                 labels = [r'$\rm{Image\/x'+axes_unit_labels[0]+'}$',
                           r'$\rm{Image\/y'+axes_unit_labels[1]+'}$']
@@ -931,7 +866,7 @@
             self.plots[f].axes.set_xlabel(labels[0],fontproperties=fp)
             self.plots[f].axes.set_ylabel(labels[1],fontproperties=fp)
 
-            for label in (self.plots[f].axes.get_xticklabels() + 
+            for label in (self.plots[f].axes.get_xticklabels() +
                           self.plots[f].axes.get_yticklabels()):
                 label.set_fontproperties(fp)
 
@@ -942,7 +877,7 @@
                 parser.parse(colorbar_label)
             except ParseFatalException, err:
                 raise YTCannotParseUnitDisplayName(f, colorbar_label, str(err))
-                
+
             self.plots[f].cb.set_label(colorbar_label, fontproperties=fp)
 
             for label in self.plots[f].cb.ax.get_yticklabels():
@@ -967,17 +902,17 @@
     def set_font(self, font_dict=None):
         """set the font and font properties
 
-        Parameters 
-        ---------- 
-        font_dict : dict 
-        A dict of keyword parameters to be passed to 
-        matplotlib.font_manager.FontProperties.  See the matplotlib font 
+        Parameters
+        ----------
+        font_dict : dict
+        A dict of keyword parameters to be passed to
+        matplotlib.font_manager.FontProperties.  See the matplotlib font
         manager documentation for more details.
         http://matplotlib.org/api/font_manager_api.html
 
         Notes
         -----
-        Mathtext axis labels will only obey the `size` keyword. 
+        Mathtext axis labels will only obey the `size` keyword.
 
         Examples
         --------
@@ -986,7 +921,7 @@
         >>> slc = SlicePlot(pf, 'x', 'Density')
         >>> slc.set_font({'family':'sans-serif', 'style':'italic',
                           'weight':'bold', 'size':24})
-        
+
         """
         if font_dict is None:
             font_dict = {}
@@ -1029,11 +964,11 @@
         Parameters
         ----------
         name : string
-           the base of the filename.  If not set the filename of 
+           the base of the filename.  If not set the filename of
            the parameter file is used
         mpl_kwargs : dict
            A dict of keyword arguments to be passed to matplotlib.
-           
+
         >>> slc.save(mpl_kwargs={'bbox_inches':'tight'})
 
         """
@@ -1066,10 +1001,10 @@
 
     def _send_zmq(self):
         try:
-            # pre-IPython v0.14        
+            # pre-IPython v0.14
             from IPython.zmq.pylab.backend_inline import send_figure as display
         except ImportError:
-            # IPython v0.14+ 
+            # IPython v0.14+
             from IPython.core.display import display
         for k, v in sorted(self.plots.iteritems()):
             canvas = FigureCanvasAgg(v.figure)
@@ -1100,14 +1035,14 @@
 
 class SlicePlot(PWViewerMPL):
     r"""Creates a slice plot from a parameter file
-    
+
     Given a pf object, an axis to slice along, and a field name
     string, this will return a PWViewrMPL object containing
     the plot.
-    
+
     The plot can be updated using one of the many helper functions
     defined in PlotWindow.
-    
+
     Parameters
     ----------
     pf : `StaticOutput`
@@ -1125,35 +1060,35 @@
          the middle of the domain.  If set to 'max', will be at the point
          of highest density.
     width : tuple or a float.
-         Width can have four different formats to support windows with variable 
+         Width can have four different formats to support windows with variable
          x and y widths.  They are:
-         
+
          ==================================     =======================
-         format                                 example                
+         format                                 example
          ==================================     =======================
          (float, string)                        (10,'kpc')
          ((float, string), (float, string))     ((10,'kpc'),(15,'kpc'))
          float                                  0.2
          (float, float)                         (0.2, 0.3)
          ==================================     =======================
-         
-         For example, (10, 'kpc') requests a plot window that is 10 kiloparsecs 
-         wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a window 
-         that is 10 kiloparsecs wide along the x axis and 15 kiloparsecs wide along 
+
+         For example, (10, 'kpc') requests a plot window that is 10 kiloparsecs
+         wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a window
+         that is 10 kiloparsecs wide along the x axis and 15 kiloparsecs wide along
          the y axis.  In the other two examples, code units are assumed, for example
-         (0.2, 0.3) requests a plot that has an x width of 0.2 and a y width of 0.3 
+         (0.2, 0.3) requests a plot that has an x width of 0.2 and a y width of 0.3
          in code units.  If units are provided the resulting plot axis labels will
          use the supplied units.
     axes_unit : A string
-         The name of the unit for the tick labels on the x and y axes.  
+         The name of the unit for the tick labels on the x and y axes.
          Defaults to None, which automatically picks an appropriate unit.
-         If axes_unit is '1', 'u', or 'unitary', it will not display the 
+         If axes_unit is '1', 'u', or 'unitary', it will not display the
          units, and only show the axes name.
     origin : string or length 1, 2, or 3 sequence of strings
-         The location of the origin of the plot coordinate system.  This is 
+         The location of the origin of the plot coordinate system.  This is
          represented by '-' separated string or a tuple of strings.  In the
          first index the y-location is given by 'lower', 'upper', or 'center'.
-         The second index is the x-location, given as 'left', 'right', or 
+         The second index is the x-location, given as 'left', 'right', or
          'center'.  Finally, the whether the origin is applied in 'domain' space,
          plot 'window' space or 'native' simulation coordinate system is given.
          For example, both 'upper-right-domain' and ['upper', 'right', 'domain']
@@ -1164,7 +1099,7 @@
          or 'center-window' for the center of the plot window. Further examples:
 
          ==================================     ============================
-         format                                 example                
+         format                                 example
          ==================================     ============================
          '{space}'                              'domain'
          '{xloc}-{space}'                       'left-window'
@@ -1179,16 +1114,16 @@
          The size of the fonts for the axis, colorbar, and tick labels.
     field_parameters : dictionary
          A dictionary of field parameters than can be accessed by derived fields.
-         
+
     Examples
     --------
-    
+
     This will save an image the the file 'sliceplot_Density
-    
+
     >>> pf = load('galaxy0030/galaxy0030')
     >>> p = SlicePlot(pf,2,'Density','c',(20,'kpc'))
     >>> p.save('sliceplot')
-    
+
     """
     _plot_type = 'Slice'
     _frb_generator = FixedResolutionBuffer
@@ -1196,7 +1131,7 @@
     def __init__(self, pf, axis, fields, center='c', width=None, axes_unit=None,
                  origin='center-window', fontsize=18, field_parameters=None):
         # this will handle time series data and controllers
-        ts = self._initialize_dataset(pf) 
+        ts = self._initialize_dataset(pf)
         self.ts = ts
         pf = self.pf = ts[0]
         axis = fix_axis(axis)
@@ -1210,14 +1145,14 @@
 
 class ProjectionPlot(PWViewerMPL):
     r"""Creates a projection plot from a parameter file
-    
+
     Given a pf object, an axis to project along, and a field name
     string, this will return a PWViewrMPL object containing
     the plot.
-    
+
     The plot can be updated using one of the many helper functions
     defined in PlotWindow.
-    
+
     Parameters
     ----------
     pf : `StaticOutput`
@@ -1235,35 +1170,35 @@
          the middle of the domain.  If set to 'max', will be at the point
          of highest density.
     width : tuple or a float.
-         Width can have four different formats to support windows with variable 
+         Width can have four different formats to support windows with variable
          x and y widths.  They are:
-         
+
          ==================================     =======================
-         format                                 example                
+         format                                 example
          ==================================     =======================
          (float, string)                        (10,'kpc')
          ((float, string), (float, string))     ((10,'kpc'),(15,'kpc'))
          float                                  0.2
          (float, float)                         (0.2, 0.3)
          ==================================     =======================
-         
-         For example, (10, 'kpc') requests a plot window that is 10 kiloparsecs 
-         wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a window 
-         that is 10 kiloparsecs wide along the x axis and 15 kiloparsecs wide along 
+
+         For example, (10, 'kpc') requests a plot window that is 10 kiloparsecs
+         wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a window
+         that is 10 kiloparsecs wide along the x axis and 15 kiloparsecs wide along
          the y axis.  In the other two examples, code units are assumed, for example
-         (0.2, 0.3) requests a plot that has an x width of 0.2 and a y width of 0.3 
-         in code units.  If units are provided the resulting plot axis labels will 
+         (0.2, 0.3) requests a plot that has an x width of 0.2 and a y width of 0.3
+         in code units.  If units are provided the resulting plot axis labels will
          use the supplied units.
     axes_unit : A string
-         The name of the unit for the tick labels on the x and y axes.  
+         The name of the unit for the tick labels on the x and y axes.
          Defaults to None, which automatically picks an appropriate unit.
-         If axes_unit is '1', 'u', or 'unitary', it will not display the 
+         If axes_unit is '1', 'u', or 'unitary', it will not display the
          units, and only show the axes name.
     origin : string or length 1, 2, or 3 sequence of strings
-         The location of the origin of the plot coordinate system.  This is 
+         The location of the origin of the plot coordinate system.  This is
          represented by '-' separated string or a tuple of strings.  In the
          first index the y-location is given by 'lower', 'upper', or 'center'.
-         The second index is the x-location, given as 'left', 'right', or 
+         The second index is the x-location, given as 'left', 'right', or
          'center'.  Finally, the whether the origin is applied in 'domain' space,
          plot 'window' space or 'native' simulation coordinate system is given.
          For example, both 'upper-right-domain' and ['upper', 'right', 'domain']
@@ -1275,7 +1210,7 @@
 
          ==================================     ============================
          format                                 example
-         ==================================     ============================ 
+         ==================================     ============================
          '{space}'                              'domain'
          '{xloc}-{space}'                       'left-window'
          '{yloc}-{space}'                       'upper-domain'
@@ -1285,7 +1220,7 @@
          ('{yloc}', '{space}')                  ('lower', 'window')
          ('{yloc}', '{xloc}', '{space}')        ('lower', 'right', 'window')
          ==================================     ============================
-         
+
     weight_field : string
          The name of the weighting field.  Set to None for no weight.
     max_level: int
@@ -1297,21 +1232,21 @@
 
     Examples
     --------
-    
+
     This is a very simple way of creating a projection plot.
-    
+
     >>> pf = load('galaxy0030/galaxy0030')
     >>> p = ProjectionPlot(pf,2,'Density','c',(20,'kpc'))
     >>> p.save('sliceplot')
-    
+
     """
     _plot_type = 'Projection'
     _frb_generator = FixedResolutionBuffer
 
     def __init__(self, pf, axis, fields, center='c', width=None, axes_unit=None,
-                 weight_field=None, max_level=None, origin='center-window', fontsize=18, 
+                 weight_field=None, max_level=None, origin='center-window', fontsize=18,
                  field_parameters=None):
-        ts = self._initialize_dataset(pf) 
+        ts = self._initialize_dataset(pf)
         self.ts = ts
         pf = self.pf = ts[0]
         axis = fix_axis(axis)
@@ -1330,7 +1265,7 @@
     Given a pf object, a normal vector defining a slicing plane, and
     a field name string, this will return a PWViewrMPL object
     containing the plot.
-    
+
     The plot can be updated using one of the many helper functions
     defined in PlotWindow.
 
@@ -1353,9 +1288,9 @@
         the unit: (width, 'unit').  If set to a float, code units
         are assumed
     axes_unit : A string
-        The name of the unit for the tick labels on the x and y axes.  
+        The name of the unit for the tick labels on the x and y axes.
         Defaults to None, which automatically picks an appropriate unit.
-        If axes_unit is '1', 'u', or 'unitary', it will not display the 
+        If axes_unit is '1', 'u', or 'unitary', it will not display the
         units, and only show the axes name.
     north-vector : a sequence of floats
         A vector defining the 'up' direction in the plot.  This
@@ -1370,7 +1305,7 @@
     _plot_type = 'OffAxisSlice'
     _frb_generator = ObliqueFixedResolutionBuffer
 
-    def __init__(self, pf, normal, fields, center='c', width=None, 
+    def __init__(self, pf, normal, fields, center='c', width=None,
                  axes_unit=None, north_vector=None, fontsize=18,
                  field_parameters=None):
         (bounds, center_rot, units) = GetObliqueWindowParameters(normal,center,width,pf)
@@ -1388,9 +1323,9 @@
     _type_name = 'proj'
     proj_style = 'integrate'
     _key_fields = []
-    def __init__(self, center, pf, normal_vector, width, fields, 
-                 interpolated, resolution = (800,800), weight=None,  
-                 volume=None, no_ghost=False, le=None, re=None, 
+    def __init__(self, center, pf, normal_vector, width, fields,
+                 interpolated, resolution = (800,800), weight=None,
+                 volume=None, no_ghost=False, le=None, re=None,
                  north_vector=None):
         self.center = center
         self.pf = pf
@@ -1413,7 +1348,7 @@
     Given a pf object, a normal vector to project along, and
     a field name string, this will return a PWViewrMPL object
     containing the plot.
-    
+
     The plot can be updated using one of the many helper functions
     defined in PlotWindow.
 
@@ -1432,23 +1367,23 @@
         cell.  If set to 'c' or 'center', the plot is centered on
         the middle of the domain.
     width : tuple or a float.
-         Width can have four different formats to support windows with variable 
+         Width can have four different formats to support windows with variable
          x and y widths.  They are:
-         
+
          ==================================     =======================
-         format                                 example                
+         format                                 example
          ==================================     =======================
          (float, string)                        (10,'kpc')
          ((float, string), (float, string))     ((10,'kpc'),(15,'kpc'))
          float                                  0.2
          (float, float)                         (0.2, 0.3)
          ==================================     =======================
-         
-         For example, (10, 'kpc') requests a plot window that is 10 kiloparsecs 
-         wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a window 
-         that is 10 kiloparsecs wide along the x axis and 15 kiloparsecs wide along 
+
+         For example, (10, 'kpc') requests a plot window that is 10 kiloparsecs
+         wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a window
+         that is 10 kiloparsecs wide along the x axis and 15 kiloparsecs wide along
          the y axis.  In the other two examples, code units are assumed, for example
-         (0.2, 0.3) requests a plot that has an x width of 0.2 and a y width of 0.3 
+         (0.2, 0.3) requests a plot that has an x width of 0.2 and a y width of 0.3
          in code units.  If units are provided the resulting plot axis labels will
          use the supplied units.
     depth : A tuple or a float
@@ -1460,9 +1395,9 @@
     max_level: int
         The maximum level to project to.
     axes_unit : A string
-        The name of the unit for the tick labels on the x and y axes.  
+        The name of the unit for the tick labels on the x and y axes.
         Defaults to None, which automatically picks an appropriate unit.
-        If axes_unit is '1', 'u', or 'unitary', it will not display the 
+        If axes_unit is '1', 'u', or 'unitary', it will not display the
         units, and only show the axes name.
     north-vector : a sequence of floats
         A vector defining the 'up' direction in the plot.  This
@@ -1475,9 +1410,9 @@
     _plot_type = 'OffAxisProjection'
     _frb_generator = OffAxisProjectionFixedResolutionBuffer
 
-    def __init__(self, pf, normal, fields, center='c', width=None, 
-                 depth=(1, '1'), axes_unit=None, weight_field=None, 
-                 max_level=None, north_vector=None, volume=None, no_ghost=False, 
+    def __init__(self, pf, normal, fields, center='c', width=None,
+                 depth=(1, '1'), axes_unit=None, weight_field=None,
+                 max_level=None, north_vector=None, volume=None, no_ghost=False,
                  le=None, re=None, interpolated=False, fontsize=18):
         (bounds, center_rot, units) = GetObliqueWindowParameters(normal,center,width,pf,depth=depth)
         if axes_unit is None and units != ('1', '1', '1'):
@@ -1574,7 +1509,7 @@
         return img
 
     def _apply_contours(self, ax, vi, vj):
-        if self._contour_info is None: return 
+        if self._contour_info is None: return
         plot_args = {}
         field, number, colors, logit = self._contour_info
         if colors is not None: plot_args['colors'] = colors
@@ -1590,9 +1525,9 @@
         fvals = triang(x,y).nn_interpolator(z)(xi,yi).transpose()[::-1,:]
 
         ax.contour(fvals, number, colors='w')
-        
+
     def _apply_vectors(self, ax, vi, vj):
-        if self._vector_info is None: return 
+        if self._vector_info is None: return
         skip, scale = self._vector_info
 
         nx = self._frb.buff_size[0]/skip
@@ -1613,7 +1548,7 @@
         py /= nn
         print scale, px.min(), px.max(), py.min(), py.max()
         ax.quiver(x, y, px, py, scale=float(vi)/skip)
-        
+
     def get_ticks(self, field, height = 400):
         # This will eventually change to work with non-logged fields
         ticks = []
@@ -1655,6 +1590,70 @@
         print img_x, img_y, dx, dy, new_x, new_y
         self.set_center((new_x, new_y))
 
+    def get_field_units(self, field, strip_mathml = True):
+        ds = self._frb.data_source
+        pf = self.pf
+        if ds._type_name in ("slice", "cutting"):
+            units = pf.field_info[field].get_units()
+        elif ds._type_name == "proj" and (ds.weight_field is not None or
+                                        ds.proj_style == "mip"):
+            units = pf.field_info[field].get_units()
+        elif ds._type_name == "proj":
+            units = pf.field_info[field].get_projected_units()
+        else:
+            units = ""
+        if strip_mathml:
+            units = units.replace(r"\rm{", "").replace("}","")
+        return units
+
+    def get_metadata(self, field, strip_mathml = True, return_string = True):
+        fval = self._frb[field]
+        mi = fval.min()
+        ma = fval.max()
+        x_width = self.xlim[1] - self.xlim[0]
+        y_width = self.ylim[1] - self.ylim[0]
+        if self._axes_unit_names is None:
+            unit = get_smallest_appropriate_unit(x_width, self.pf)
+            unit = (unit, unit)
+        else:
+            unit = self._axes_unit_names
+        units = self.get_field_units(field, strip_mathml)
+        center = getattr(self._frb.data_source, "center", None)
+        if center is None or self._frb.axis == 4:
+            xc, yc, zc = -999, -999, -999
+        else:
+            center[x_dict[self._frb.axis]] = 0.5 * (
+                self.xlim[0] + self.xlim[1])
+            center[y_dict[self._frb.axis]] = 0.5 * (
+                self.ylim[0] + self.ylim[1])
+            xc, yc, zc = center
+        if return_string:
+            md = _metadata_template % dict(
+                pf = self.pf,
+                x_width = x_width*self.pf[unit[0]],
+                y_width = y_width*self.pf[unit[1]],
+                axes_unit_names = unit[0], colorbar_unit = units,
+                mi = mi, ma = ma, xc = xc, yc = yc, zc = zc)
+        else:
+            md = dict(pf = self.pf,
+                      x_width = x_width*self.pf[unit[0]],
+                      y_width = y_width*self.pf[unit[1]],
+                      axes_unit_names = unit, colorbar_unit = units,
+                      mi = mi, ma = ma, xc = xc, yc = yc, zc = zc)
+        return md
+
+    @invalidate_plot
+    def set_contour_info(self, field_name, n_cont = 8, colors = None,
+                         logit = True):
+        if field_name == "None" or n_cont == 0:
+            self._contour_info = None
+            return
+        self._contour_info = (field_name, n_cont, colors, logit)
+
+    @invalidate_plot
+    def set_vector_info(self, skip, scale = 1):
+        self._vector_info = (skip, scale)
+
     @invalidate_data
     def set_current_field(self, field):
         self._current_field = field
@@ -1685,15 +1684,15 @@
         # add room for a colorbar
         cbar_inches = fontscale*0.7
         newsize = [size[0] + cbar_inches, size[1]]
-        
+
         # add buffers for text, and a bit of whitespace on top
         text_buffx = fontscale * 1.0/(newsize[0])
         text_bottomy = fontscale * 0.7/size[1]
         text_topy = fontscale * 0.3/size[1]
 
         # calculate how much room the colorbar takes
-        cbar_frac = cbar_inches/newsize[0] 
-        
+        cbar_frac = cbar_inches/newsize[0]
+
         # Calculate y fraction, then use to make x fraction.
         yfrac = 1.0-text_bottomy-text_topy
         ysize = yfrac*size[1]

diff -r 85145a05a5bed50af23759d545e4f2aa46a69fdb -r 73fa10cba4d55f2a2fdb3c0c8141e4798dd8ba68 yt/visualization/tests/test_plotwindow.py
--- a/yt/visualization/tests/test_plotwindow.py
+++ b/yt/visualization/tests/test_plotwindow.py
@@ -27,7 +27,9 @@
 import shutil
 from yt.testing import \
     fake_random_pf, assert_equal, assert_rel_equal
-from yt.mods import \
+from yt.utilities.answer_testing.framework import \
+    requires_pf, data_dir_load, PlotWindowAttributeTest
+from yt.visualization.api import \
     SlicePlot, ProjectionPlot, OffAxisSlicePlot, OffAxisProjectionPlot
 
 
@@ -63,26 +65,67 @@
 
     return image_type == os.path.splitext(fname)[1]
 
+attr_args ={ "pan"             : [( ((0.1, 0.1),), {} )],
+             "pan_rel"         : [( ((0.1, 0.1),), {} )],
+             "set_axes_unit"   : [( ("kpc",), {} ),
+                                  ( ("Mpc",), {} ),
+                                  ( (("kpc", "kpc"),), {} ),
+                                  ( (("kpc", "Mpc"),), {} )],
+             "set_buff_size"   : [( (1600,), {} ),
+                                  ( ((600, 800),), {} )],
+             "set_center"      : [( ((0.4, 0.3),), {} )],
+             "set_cmap"        : [( ('Density', 'RdBu'), {} ),
+                                  ( ('Density', 'kamae'), {} )],
+             "set_font"        : [( ({'family':'sans-serif', 'style':'italic',
+                                      'weight':'bold', 'size':24},), {} )],
+             "set_log"         : [( ('Density', False), {} )],
+             "set_window_size" : [( (7.0,), {} )],
+             "set_zlim" : [( ('Density', 1e-25, 1e-23), {} ),
+                           ( ('Density', 1e-25, None), {'dynamic_range' : 4} )],
+             "zoom" : [( (10,), {} )] }
+
+m7 = "DD0010/moving7_0010"
+wt = "WindTunnel/windtunnel_4lev_hdf5_plt_cnt_0030"
+ at requires_pf(m7)
+ at requires_pf(wt)
+def test_attributes():
+    """Test plot member functions that aren't callbacks"""
+    plot_field = 'Density'
+    decimals = 3
+
+    pf = data_dir_load(m7)
+    for ax in 'xyz':
+        for attr_name in attr_args.keys():
+            for args in attr_args[attr_name]:
+                yield PlotWindowAttributeTest(pf, plot_field, ax, attr_name,
+                                              args, decimals)
+    pf = data_dir_load(wt)
+    ax = 'z'
+    for attr_name in attr_args.keys():
+        for args in attr_args[attr_name]:
+            yield PlotWindowAttributeTest(pf, plot_field, ax, attr_name,
+                                          args, decimals)
+
 def test_setwidth():
     pf = fake_random_pf(64)
-    
+
     slc = SlicePlot(pf, 0, 'Density')
 
     yield assert_equal, [slc.xlim, slc.ylim, slc.width], \
         [(0.0, 1.0), (0.0, 1.0), (1.0, 1.0)]
-    
+
     slc.set_width((0.5,0.8))
 
     yield assert_rel_equal, [slc.xlim, slc.ylim, slc.width], \
         [(0.25, 0.75), (0.1, 0.9), (0.5, 0.8)], 15
 
     slc.set_width(15,'kpc')
-    
+
     yield assert_rel_equal, [slc.xlim, slc.ylim, slc.width], \
         [(-7.5/pf['kpc'], 7.5/pf['kpc']),
          (-7.5/pf['kpc'], 7.5/pf['kpc']),
          (15/pf['kpc'], 15/pf['kpc'])], 15
-    
+
     slc.set_width((15,'kpc'))
 
     yield assert_rel_equal, [slc.xlim, slc.ylim, slc.width], \
@@ -90,7 +133,7 @@
          (-7.5/pf['kpc'], 7.5/pf['kpc']),
          (15/pf['kpc'], 15/pf['kpc'])], 15
 
-    slc.set_width(((15,'kpc'),(10,'kpc'))) 
+    slc.set_width(((15,'kpc'),(10,'kpc')))
 
     yield assert_rel_equal, [slc.xlim, slc.ylim, slc.width], \
         [(-7.5/pf['kpc'], 7.5/pf['kpc']),
@@ -105,7 +148,7 @@
          (15/pf['kpc'], 10/pf['kpc'])], 15
 
 def test_save():
-    """Main test suite for PlotWindow."""
+    """Test plot window creation and saving to disk."""
     # Perform I/O in safe place instead of yt main dir
     tmpdir = tempfile.mkdtemp()
     curdir = os.getcwd()


https://bitbucket.org/yt_analysis/yt-3.0/commits/8d14a55aefcd/
Changeset:   8d14a55aefcd
Branch:      yt
User:        ngoldbaum
Date:        2013-04-03 01:04:25
Summary:     This restores the previous behavior in the enzo answer testing where the tests
will not run unless an output directory is specified.  I've additionally added a
hook that exits the yt answer tests if an output directory is not specified.
Affected #:  1 file

diff -r 73fa10cba4d55f2a2fdb3c0c8141e4798dd8ba68 -r 8d14a55aefcdda5a65309c1102a95ad87ddb8515 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -71,8 +71,7 @@
         parser.add_option("--answer-big-data", dest="big_data",
             default=False, help="Should we run against big data, too?",
             action="store_true")
-        parser.add_option("--local-dir", dest="output_dir",
-                          default=ytcfg.get("yt", "test_data_dir"), metavar='str',
+        parser.add_option("--local-dir", dest="output_dir", metavar='str',
                           help="The name of the directory to store local results")
 
     @property
@@ -128,6 +127,9 @@
 
         # Local/Cloud storage
         if options.local_results:
+            if options.output_dir is None:
+                print 'Please supply an output directory with the --local-dir option'
+                sys.exit(1)
             storage_class = AnswerTestLocalStorage
             # Fix up filename for local storage
             if self.compare_name is not None:


https://bitbucket.org/yt_analysis/yt-3.0/commits/56a2f92a6299/
Changeset:   56a2f92a6299
Branch:      yt
User:        jsoishi
Date:        2013-04-09 19:44:18
Summary:     corrected for the fact that enzo uses specific energy instead of energy density. Thanks to David Collins for the catch.
Affected #:  1 file

diff -r 8d14a55aefcdda5a65309c1102a95ad87ddb8515 -r 56a2f92a62997a0aa99bc43626830dc7f40b88bf yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -138,7 +138,8 @@
         return data["TotalEnergy"] - 0.5*(
             data["x-velocity"]**2.0
             + data["y-velocity"]**2.0
-            + data["z-velocity"]**2.0 ) - data["MagneticEnergy"]
+            + data["z-velocity"]**2.0 ) 
+            - data["MagneticEnergy"]/data["Density"]
 
     return data["TotalEnergy"] - 0.5*(
         data["x-velocity"]**2.0


https://bitbucket.org/yt_analysis/yt-3.0/commits/77f18fb5b757/
Changeset:   77f18fb5b757
Branch:      yt
User:        jsoishi
Date:        2013-04-09 19:47:49
Summary:     fixed typo.
Affected #:  1 file

diff -r 56a2f92a62997a0aa99bc43626830dc7f40b88bf -r 77f18fb5b757b6051dada9e2bad7774938a62064 yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -138,7 +138,7 @@
         return data["TotalEnergy"] - 0.5*(
             data["x-velocity"]**2.0
             + data["y-velocity"]**2.0
-            + data["z-velocity"]**2.0 ) 
+            + data["z-velocity"]**2.0 ) \
             - data["MagneticEnergy"]/data["Density"]
 
     return data["TotalEnergy"] - 0.5*(


https://bitbucket.org/yt_analysis/yt-3.0/commits/2e35b24d19e4/
Changeset:   2e35b24d19e4
Branch:      yt
User:        ngoldbaum
Date:        2013-04-07 08:44:59
Summary:     Version bumping a many of the dependencies.
Affected #:  1 file

diff -r 8d14a55aefcdda5a65309c1102a95ad87ddb8515 -r 2e35b24d19e475672382f7489a499c07b5687382 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -34,7 +34,7 @@
 
 INST_HG=1       # Install Mercurial or not?  If hg is not already
                 # installed, yt cannot be installed.
-INST_ZLIB=1     # On some systems (Kraken) matplotlib has issues with 
+INST_ZLIB=1     # On some systems (Kraken) matplotlib has issues with
                 # the system zlib, which is compiled statically.
                 # If need be, you can turn this off.
 INST_BZLIB=1    # On some systems, libbzip2 is missing.  This can
@@ -76,7 +76,7 @@
    echo "the script to re-enable root-level installation.  Sorry!"
    exit 1
 fi
-if [[ ${DEST_DIR%/} == /usr/local ]] 
+if [[ ${DEST_DIR%/} == /usr/local ]]
 then
    echo "******************************************************"
    echo "*                                                    *"
@@ -194,7 +194,7 @@
 	echo "must register for an account on the apple developer tools"
 	echo "website: https://developer.apple.com/downloads to obtain the"
 	echo "download link."
-	echo 
+	echo
 	echo "We have gathered some additional instructions for each"
 	echo "version of OS X below. If you have trouble installing yt"
 	echo "after following these instructions, don't hesitate to contact"
@@ -205,15 +205,15 @@
 	echo "menu bar.  We're assuming that you've installed all operating"
 	echo "system updates; if you have an older version, we suggest"
 	echo "running software update and installing all available updates."
-	echo 
-        echo "OS X 10.5.8: search for and download Xcode 3.1.4 from the" 
+	echo
+        echo "OS X 10.5.8: search for and download Xcode 3.1.4 from the"
 	echo "Apple developer tools website."
         echo
         echo "OS X 10.6.8: search for and download Xcode 3.2 from the Apple"
 	echo "developer tools website.  You can either download the"
 	echo "Xcode 3.2.2 Developer Tools package (744 MB) and then use"
-	echo "Software Update to update to XCode 3.2.6 or" 
-	echo "alternatively, you can download the Xcode 3.2.6/iOS SDK" 
+	echo "Software Update to update to XCode 3.2.6 or"
+	echo "alternatively, you can download the Xcode 3.2.6/iOS SDK"
 	echo "bundle (4.1 GB)."
         echo
         echo "OS X 10.7.5: download Xcode 4.2 from the mac app store"
@@ -221,20 +221,20 @@
         echo "Alternatively, download the Xcode command line tools from"
         echo "the Apple developer tools website."
         echo
-	echo "OS X 10.8.2: download Xcode 4.6 from the mac app store."
+	echo "OS X 10.8.2: download Xcode 4.6.1 from the mac app store."
 	echo "(search for Xcode)."
 	echo "Additionally, you will have to manually install the Xcode"
-	echo "command line tools, see:" 
+	echo "command line tools, see:"
 	echo "http://stackoverflow.com/questions/9353444"
 	echo "Alternatively, download the Xcode command line tools from"
 	echo "the Apple developer tools website."
 	echo
-        echo "NOTE: It's possible that the installation will fail, if so," 
-	echo "please set the following environment variables, remove any" 
+        echo "NOTE: It's possible that the installation will fail, if so,"
+	echo "please set the following environment variables, remove any"
 	echo "broken installation tree, and re-run this script verbatim."
         echo
-        echo "$ export CC=gcc-4.2"
-        echo "$ export CXX=g++-4.2"
+        echo "$ export CC=gcc"
+        echo "$ export CXX=g++"
 	echo
         OSX_VERSION=`sw_vers -productVersion`
         if [ "${OSX_VERSION##10.8}" != "${OSX_VERSION}" ]
@@ -291,7 +291,7 @@
         echo
         echo " INST_ZLIB=0"
         echo " INST_FTYPE=0"
-        echo 
+        echo
         echo " to avoid conflicts with other command-line programs "
         echo " (like eog and evince, for example)."
     fi
@@ -437,7 +437,7 @@
     cd ..
 }
 
-if type -P wget &>/dev/null 
+if type -P wget &>/dev/null
 then
     echo "Using wget"
     export GETFILE="wget -nv"
@@ -499,79 +499,78 @@
 cd ${DEST_DIR}/src
 
 # Now we dump all our SHA512 files out.
-
-echo 'eda1b8090e5e21e7e039ef4dd03de186a7b416df9d5a4e4422abeeb4d51383b9a6858e1ac4902d8e5010f661b295bbb2452c43c8738be668379b4eb4835d0f61  Cython-0.17.1.tar.gz' > Cython-0.17.1.tar.gz.sha512
-echo '44eea803870a66ff0bab08d13a8b3388b5578ebc1c807d1d9dca0a93e6371e91b15d02917a00b3b20dc67abb5a21dabaf9b6e9257a561f85eeff2147ac73b478  PyX-0.11.1.tar.gz' > PyX-0.11.1.tar.gz.sha512
-echo 'b981f8464575bb24c297631c87a3b9172312804a0fc14ce1fa7cb41ce2b0d2fd383cd1c816d6e10c36467d18bf9492d6faf557c81c04ff3b22debfa93f30ad0b  Python-2.7.3.tgz' > Python-2.7.3.tgz.sha512
-echo 'c017d3d59dd324ac91af0edc178c76b60a5f90fbb775cf843e39062f95bd846238f2c53705f8890ed3f34bc0e6e75671a73d13875eb0287d6201cb45f0a2d338  bzip2-1.0.5.tar.gz' > bzip2-1.0.5.tar.gz.sha512
+echo 'fb85d71bb4f80b35f0d0f1735c650dd75c5f84b05635ddf91d6241ff103b5a49158c5b851a20c15e05425f6dde32a4971b35fcbd7445f61865b4d61ffd1fbfa1  Cython-0.18.tar.gz' > Cython-0.18.tar.gz.sha512
+echo '4941f5aa21aff3743546495fb073c10d2657ff42b2aff401903498638093d0e31e344cce778980f28a7170c6d29eab72ac074277b9d4088376e8692dc71e55c1  PyX-0.12.1.tar.gz' > PyX-0.12.1.tar.gz.sha512
+echo '3349152c47ed2b63c5c9aabcfa92b8497ea9d71ca551fd721e827fcb8f91ff9fbbee6bba8f8cb2dea185701b8798878b4b2435c1496b63d4b4a37c624a625299  Python-2.7.4.tgz' > Python-2.7.4.tgz.sha512
+echo '00ace5438cfa0c577e5f578d8a808613187eff5217c35164ffe044fbafdfec9e98f4192c02a7d67e01e5a5ccced630583ad1003c37697219b0f147343a3fdd12  bzip2-1.0.6.tar.gz' > bzip2-1.0.6.tar.gz.sha512
 echo 'a296dfcaef7e853e58eed4e24b37c4fa29cfc6ac688def048480f4bb384b9e37ca447faf96eec7b378fd764ba291713f03ac464581d62275e28eb2ec99110ab6  reason-js-20120623.zip' > reason-js-20120623.zip.sha512
-echo 'b519218f93946400326e9b656669269ecb3e5232b944e18fbc3eadc4fe2b56244d68aae56d6f69042b4c87c58c881ee2aaa279561ea0f0f48d5842155f4de9de  freetype-2.4.4.tar.gz' > freetype-2.4.4.tar.gz.sha512
-echo 'b3290c498191684781ca5286ab454eb1bd045e8d894f5b86fb86beb88f174e22ac3ab008fb02d6562051d9fa6a9593920cab433223f6d5473999913223b8e183  h5py-2.1.0.tar.gz' > h5py-2.1.0.tar.gz.sha512
-echo 'c68a425bacaa7441037910b9166f25b89e1387776a7749a5350793f89b1690350df5f018060c31d03686e7c3ed2aa848bd2b945c96350dc3b6322e087934783a  hdf5-1.8.9.tar.gz' > hdf5-1.8.9.tar.gz.sha512
-echo 'dbefad00fa34f4f21dca0f1e92e95bd55f1f4478fa0095dcf015b4d06f0c823ff11755cd777e507efaf1c9098b74af18f613ec9000e5c3a5cc1c7554fb5aefb8  libpng-1.5.12.tar.gz' > libpng-1.5.12.tar.gz.sha512
-echo '5b1a0fb52dcb21ca5f0ab71c8a49550e1e8cf633552ec6598dc43f0b32c03422bf5af65b30118c163231ecdddfd40846909336f16da318959106076e80a3fad0  matplotlib-1.2.0.tar.gz' > matplotlib-1.2.0.tar.gz.sha512
-echo '91693ca5f34934956a7c2c98bb69a5648b2a5660afd2ecf4a05035c5420450d42c194eeef0606d7683e267e4eaaaab414df23f30b34c88219bdd5c1a0f1f66ed  mercurial-2.5.1.tar.gz' > mercurial-2.5.1.tar.gz.sha512
-echo 'de3dd37f753614055dcfed910e9886e03688b8078492df3da94b1ec37be796030be93291cba09e8212fffd3e0a63b086902c3c25a996cf1439e15c5b16e014d9  numpy-1.6.1.tar.gz' > numpy-1.6.1.tar.gz.sha512
-echo '5ad681f99e75849a5ca6f439c7a19bb51abc73d121b50f4f8e4c0da42891950f30407f761a53f0fe51b370b1dbd4c4f5a480557cb2444c8c7c7d5412b328a474  sqlite-autoconf-3070500.tar.gz' > sqlite-autoconf-3070500.tar.gz.sha512
-echo 'edae735960279d92acf58e1f4095c6392a7c2059b8f1d2c46648fc608a0fb06b392db2d073f4973f5762c034ea66596e769b95b3d26ad963a086b9b2d09825f2  zlib-1.2.3.tar.bz2' > zlib-1.2.3.tar.bz2.sha512
+echo 'b46c93d76f8ce09c94765b20b2eeadf71207671f1131777de178b3727c235b4dd77f6e60d62442b96648c3c6749e9e4c1194c1b02af7e946576be09e1ff7ada3  freetype-2.4.11.tar.gz' > freetype-2.4.11.tar.gz.sha512
+echo '15ca0209e8d8f172cb0708a2de946fbbde8551d9bebc4a95fa7ae31558457a7f43249d5289d7675490c577deb4e0153698fd2407644078bf30bd5ab10135fce3  h5py-2.1.2.tar.gz' > h5py-2.1.2.tar.gz.sha512
+echo '283301bf6c3131e3079e7430dce758ccefbc7add42b14ebfc87ca30b1d9ecb20bfc00ecf6ec823fc1e3cc7af16f8ed186cd09364444f55acfcab70c6981c10ed  hdf5-1.8.10-patch1.tar.gz' > hdf5-1.8.10-patch1.tar.gz.sha512
+echo 'b2b53ed358bacab9e8d63a51f17bd5f121ece60a1d7c53e8a8eb08ad8b1e4393a8d7a86eec06e2efc62348114f0d84c0a3dfc805e68e6edd93b20401962b3554  libpng-1.6.1.tar.gz' > libpng-1.6.1.tar.gz.sha512
+echo '497f91725eaf361bdb9bdf38db2bff5068a77038f1536df193db64c9b887e3b0d967486daee722eda6e2c4e60f034eee030673e53d07bf0db0f3f7c0ef3bd208  matplotlib-1.2.1.tar.gz' > matplotlib-1.2.1.tar.gz.sha512
+echo '928fdeaaf0eaec80adbd8765521de9666ab56aaa2101fb9ab2cb392d8b29475d3b052d89652ff9b67522cfcc6cd958717ac715f51b0573ee088e9a595f29afe2  mercurial-2.5.4.tar.gz' > mercurial-2.5.4.tar.gz.sha512
+echo 'a485daa556f6c76003de1dbb3e42b3daeee0a320c69c81b31a7d2ebbc2cf8ab8e96c214a4758e5e7bf814295dc1d6aa563092b714db7e719678d8462135861a8  numpy-1.7.0.tar.gz' > numpy-1.7.0.tar.gz.sha512
+echo '293d78d14a9347cb83e1a644e5f3e4447ed6fc21642c51683e5495dda08d2312194a73d1fc3c1d78287e33ed065aa251ecbaa7c0ea9189456c1702e96d78becd  sqlite-autoconf-3071601.tar.gz' > sqlite-autoconf-3071601.tar.gz.sha512
+echo 'b1c073ad26684e354f7c522c14655840592e03872bc0a94690f89cae2ff88f146fce1dad252ff27a889dac4a32ff9f8ab63ba940671f9da89e9ba3e19f1bf58d  zlib-1.2.7.tar.gz' > zlib-1.2.7.tar.gz.sha512
 echo '05ac335727a2c3036f31a2506fdd2615aa436bfbe2f81799fe6c51bffe2591ad6a8427f3b25c34e7e709fb4e7607a0589dc7a22185c1f9b894e90de6711a88aa  ipython-0.13.1.tar.gz' > ipython-0.13.1.tar.gz.sha512
-echo 'fb3cf421b2dc48c31956b3e3ee4ab6ebc743deec3bf626c2238a1996c8c51be87260bd6aa662793a1f0c34dcda9b3146763777bb162dfad6fec4ca7acc403b2e  zeromq-2.2.0.tar.gz' > zeromq-2.2.0.tar.gz.sha512
-echo 'd761b492352841cdc125d9f0c99ee6d6c435812472ea234728b7f0fb4ad1048e1eec9b399df2081fbc926566f333f7780fedd0ce23255a6633fe5c60ed15a6af  pyzmq-2.1.11.tar.gz' > pyzmq-2.1.11.tar.gz.sha512
-echo '57fa5e57dfb98154a42d2d477f29401c2260ae7ad3a8128a4098b42ee3b35c54367b1a3254bc76b9b3b14b4aab7c3e1135858f68abc5636daedf2f01f9b8a3cf  tornado-2.2.tar.gz' > tornado-2.2.tar.gz.sha512
-echo '1332e3d5465ca249c357314cf15d2a4e5e83a941841021b8f6a17a107dce268a7a082838ade5e8db944ecde6bfb111211ab218aa414ee90aafbb81f1491b3b93  Forthon-0.8.10.tar.gz' > Forthon-0.8.10.tar.gz.sha512
+echo 'b9d061ca49e54ea917e0aed2b2a48faef33061dbf6d17eae7f8c3fff0b35ca883e7324f6cb24bda542443f669dcd5748037a5f2309f4c359d68adef520894865  zeromq-3.2.2.tar.gz' > zeromq-3.2.2.tar.gz.sha512
+echo '852fce8a8308c4e1e4b19c77add2b2055ca2ba570b28e8364888df490af92b860c72e860adfb075b3405a9ceb62f343889f20a8711c9353a7d9059adee910f83  pyzmq-13.0.2.tar.gz' > pyzmq-13.0.2.tar.gz.sha512
+echo '303bd3fbea22be57fddf7df78ddf5a783d355a0c8071b1363250daafc20232ddd28eedc44aa1194f4a7afd82f9396628c5bb06819e02b065b6a1b1ae8a7c19e1  tornado-3.0.tar.gz' > tornado-3.0.tar.gz.sha512
+echo '3f53d0b474bfd79fea2536d0a9197eaef6c0927e95f2f9fd52dbd6c1d46409d0e649c21ac418d8f7767a9f10fe6114b516e06f2be4b06aec3ab5bdebc8768220  Forthon-0.8.11.tar.gz' > Forthon-0.8.11.tar.gz.sha512
 echo 'c13116c1f0547000cc565e15774687b9e884f8b74fb62a84e578408a868a84961704839065ae4f21b662e87f2aaedf6ea424ea58dfa9d3d73c06281f806d15dd  nose-1.2.1.tar.gz' > nose-1.2.1.tar.gz.sha512
-echo '73de2c99406a38f85273931597525cec4ebef55b93712adca3b0bfea8ca3fc99446e5d6495817e9ad55cf4d48feb7fb49734675c4cc8938db8d4a5225d30eca7  python-hglib-0.2.tar.gz' > python-hglib-0.2.tar.gz.sha512
+echo 'd67de9567256e6f1649e4f3f7dfee63371d5f00fd3fd4f92426198f862e97c57f70e827d19f4e5e1929ad85ef2ce7aa5a0596b101cafdac71672e97dc115b397  python-hglib-0.3.tar.gz' > python-hglib-0.3.tar.gz.sha512
 echo 'ffc602eb346717286b3d0a6770c60b03b578b3cf70ebd12f9e8b1c8c39cdb12ef219ddaa041d7929351a6b02dbb8caf1821b5452d95aae95034cbf4bc9904a7a  sympy-0.7.2.tar.gz' > sympy-0.7.2.tar.gz.sha512
 echo '172f2bc671145ebb0add2669c117863db35851fb3bdb192006cd710d4d038e0037497eb39a6d01091cb923f71a7e8982a77b6e80bf71d6275d5d83a363c8d7e5  rockstar-0.99.6.tar.gz' > rockstar-0.99.6.tar.gz.sha512
 echo 'd4fdd62f2db5285cd133649bd1bfa5175cb9da8304323abd74e0ef1207d55e6152f0f944da1da75f73e9dafb0f3bb14efba3c0526c732c348a653e0bd223ccfa  scipy-0.11.0.tar.gz' > scipy-0.11.0.tar.gz.sha512
 echo '276bd9c061ec9a27d478b33078a86f93164ee2da72210e12e2c9da71dcffeb64767e4460b93f257302b09328eda8655e93c4b9ae85e74472869afbeae35ca71e  blas.tar.gz' > blas.tar.gz.sha512
 echo '8770214491e31f0a7a3efaade90eee7b0eb20a8a6ab635c5f854d78263f59a1849133c14ef5123d01023f0110cbb9fc6f818da053c01277914ae81473430a952  lapack-3.4.2.tar.gz' > lapack-3.4.2.tar.gz.sha512
 # Individual processes
-[ -z "$HDF5_DIR" ] && get_ytproject hdf5-1.8.9.tar.gz
-[ $INST_ZLIB -eq 1 ] && get_ytproject zlib-1.2.3.tar.bz2 
-[ $INST_BZLIB -eq 1 ] && get_ytproject bzip2-1.0.5.tar.gz
-[ $INST_PNG -eq 1 ] && get_ytproject libpng-1.5.12.tar.gz
-[ $INST_FTYPE -eq 1 ] && get_ytproject freetype-2.4.4.tar.gz
-[ $INST_SQLITE3 -eq 1 ] && get_ytproject sqlite-autoconf-3070500.tar.gz
-[ $INST_PYX -eq 1 ] && get_ytproject PyX-0.11.1.tar.gz
-[ $INST_0MQ -eq 1 ] && get_ytproject zeromq-2.2.0.tar.gz
-[ $INST_0MQ -eq 1 ] && get_ytproject pyzmq-2.1.11.tar.gz
-[ $INST_0MQ -eq 1 ] && get_ytproject tornado-2.2.tar.gz
+[ -z "$HDF5_DIR" ] && get_ytproject hdf5-1.8.10-patch1.tar.gz
+[ $INST_ZLIB -eq 1 ] && get_ytproject zlib-1.2.7.tar.gz
+[ $INST_BZLIB -eq 1 ] && get_ytproject bzip2-1.0.6.tar.gz
+[ $INST_PNG -eq 1 ] && get_ytproject libpng-1.6.1.tar.gz
+[ $INST_FTYPE -eq 1 ] && get_ytproject freetype-2.4.11.tar.gz
+[ $INST_SQLITE3 -eq 1 ] && get_ytproject sqlite-autoconf-3071601.tar.gz
+[ $INST_PYX -eq 1 ] && get_ytproject PyX-0.12.1.tar.gz
+[ $INST_0MQ -eq 1 ] && get_ytproject zeromq-3.2.2.tar.gz
+[ $INST_0MQ -eq 1 ] && get_ytproject pyzmq-13.0.2.tar.gz
+[ $INST_0MQ -eq 1 ] && get_ytproject tornado-3.0.tar.gz
 [ $INST_SCIPY -eq 1 ] && get_ytproject scipy-0.11.0.tar.gz
 [ $INST_SCIPY -eq 1 ] && get_ytproject blas.tar.gz
 [ $INST_SCIPY -eq 1 ] && get_ytproject lapack-3.4.2.tar.gz
-get_ytproject Python-2.7.3.tgz
-get_ytproject numpy-1.6.1.tar.gz
-get_ytproject matplotlib-1.2.0.tar.gz
-get_ytproject mercurial-2.5.1.tar.gz
+get_ytproject Python-2.7.4.tgz
+get_ytproject numpy-1.7.0.tar.gz
+get_ytproject matplotlib-1.2.1.tar.gz
+get_ytproject mercurial-2.5.4.tar.gz
 get_ytproject ipython-0.13.1.tar.gz
-get_ytproject h5py-2.1.0.tar.gz
-get_ytproject Cython-0.17.1.tar.gz
+get_ytproject h5py-2.1.2.tar.gz
+get_ytproject Cython-0.18.tar.gz
 get_ytproject reason-js-20120623.zip
-get_ytproject Forthon-0.8.10.tar.gz
-get_ytproject nose-1.2.1.tar.gz 
-get_ytproject python-hglib-0.2.tar.gz
+get_ytproject Forthon-0.8.11.tar.gz
+get_ytproject nose-1.2.1.tar.gz
+get_ytproject python-hglib-0.3.tar.gz
 get_ytproject sympy-0.7.2.tar.gz
 get_ytproject rockstar-0.99.6.tar.gz
 if [ $INST_BZLIB -eq 1 ]
 then
-    if [ ! -e bzip2-1.0.5/done ]
+    if [ ! -e bzip2-1.0.6/done ]
     then
-        [ ! -e bzip2-1.0.5 ] && tar xfz bzip2-1.0.5.tar.gz
+        [ ! -e bzip2-1.0.6 ] && tar xfz bzip2-1.0.6.tar.gz
         echo "Installing BZLIB"
-        cd bzip2-1.0.5
-        if [ `uname` = "Darwin" ] 
+        cd bzip2-1.0.6
+        if [ `uname` = "Darwin" ]
         then
-            if [ -z "${CC}" ] 
+            if [ -z "${CC}" ]
             then
                 sed -i.bak 's/soname/install_name/' Makefile-libbz2_so
             else
-                sed -i.bak -e 's/soname/install_name/' -e "s/CC=gcc/CC=${CC}/" Makefile-libbz2_so 
+                sed -i.bak -e 's/soname/install_name/' -e "s/CC=gcc/CC=${CC}/" Makefile-libbz2_so
             fi
         fi
         ( make install CFLAGS=-fPIC LDFLAGS=-fPIC PREFIX=${DEST_DIR} 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make -f Makefile-libbz2_so CFLAGS=-fPIC LDFLAGS=-fPIC PREFIX=${DEST_DIR} 2>&1 ) 1>> ${LOG_FILE} || do_exit
-        ( cp -v libbz2.so.1.0.4 ${DEST_DIR}/lib 2>&1 ) 1>> ${LOG_FILE} || do_exit
+        ( cp -v libbz2.so.1.0.6 ${DEST_DIR}/lib 2>&1 ) 1>> ${LOG_FILE} || do_exit
         touch done
         cd ..
     fi
@@ -582,11 +581,11 @@
 
 if [ $INST_ZLIB -eq 1 ]
 then
-    if [ ! -e zlib-1.2.3/done ]
+    if [ ! -e zlib-1.2.7/done ]
     then
-        [ ! -e zlib-1.2.3 ] && tar xfj zlib-1.2.3.tar.bz2
+        [ ! -e zlib-1.2.7 ] && tar xfz zlib-1.2.7.tar.gz
         echo "Installing ZLIB"
-        cd zlib-1.2.3
+        cd zlib-1.2.7
         ( ./configure --shared --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -600,11 +599,11 @@
 
 if [ $INST_PNG -eq 1 ]
 then
-    if [ ! -e libpng-1.5.12/done ]
+    if [ ! -e libpng-1.6.1/done ]
     then
-        [ ! -e libpng-1.5.12 ] && tar xfz libpng-1.5.12.tar.gz
+        [ ! -e libpng-1.6.1 ] && tar xfz libpng-1.6.1.tar.gz
         echo "Installing PNG"
-        cd libpng-1.5.12
+        cd libpng-1.6.1
         ( ./configure CPPFLAGS=-I${DEST_DIR}/include CFLAGS=-I${DEST_DIR}/include --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -618,11 +617,11 @@
 
 if [ $INST_FTYPE -eq 1 ]
 then
-    if [ ! -e freetype-2.4.4/done ]
+    if [ ! -e freetype-2.4.11/done ]
     then
-        [ ! -e freetype-2.4.4 ] && tar xfz freetype-2.4.4.tar.gz
+        [ ! -e freetype-2.4.11 ] && tar xfz freetype-2.4.11.tar.gz
         echo "Installing FreeType2"
-        cd freetype-2.4.4
+        cd freetype-2.4.11
         ( ./configure CFLAGS=-I${DEST_DIR}/include --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -636,11 +635,11 @@
 
 if [ -z "$HDF5_DIR" ]
 then
-    if [ ! -e hdf5-1.8.9/done ]
+    if [ ! -e hdf5-1.8.10-patch1/done ]
     then
-        [ ! -e hdf5-1.8.9 ] && tar xfz hdf5-1.8.9.tar.gz
+        [ ! -e hdf5-1.8.10-patch1 ] && tar xfz hdf5-1.8.10-patch1.tar.gz
         echo "Installing HDF5"
-        cd hdf5-1.8.9
+        cd hdf5-1.8.10-patch1
         ( ./configure --prefix=${DEST_DIR}/ --enable-shared 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make ${MAKE_PROCS} install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -655,11 +654,11 @@
 
 if [ $INST_SQLITE3 -eq 1 ]
 then
-    if [ ! -e sqlite-autoconf-3070500/done ]
+    if [ ! -e sqlite-autoconf-3071601/done ]
     then
-        [ ! -e sqlite-autoconf-3070500 ] && tar xfz sqlite-autoconf-3070500.tar.gz
+        [ ! -e sqlite-autoconf-3071601 ] && tar xfz sqlite-autoconf-3071601.tar.gz
         echo "Installing SQLite3"
-        cd sqlite-autoconf-3070500
+        cd sqlite-autoconf-3071601
         ( ./configure --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make ${MAKE_PROCS} install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -668,11 +667,11 @@
     fi
 fi
 
-if [ ! -e Python-2.7.3/done ]
+if [ ! -e Python-2.7.4/done ]
 then
     echo "Installing Python.  This may take a while, but don't worry.  yt loves you."
-    [ ! -e Python-2.7.3 ] && tar xfz Python-2.7.3.tgz
-    cd Python-2.7.3
+    [ ! -e Python-2.7.4 ] && tar xfz Python-2.7.4.tgz
+    cd Python-2.7.4
     ( ./configure --prefix=${DEST_DIR}/ ${PYCONF_ARGS} 2>&1 ) 1>> ${LOG_FILE} || do_exit
 
     ( make ${MAKE_PROCS} 2>&1 ) 1>> ${LOG_FILE} || do_exit
@@ -687,12 +686,11 @@
 
 if [ $INST_HG -eq 1 ]
 then
-    echo "Installing Mercurial."
-    do_setup_py mercurial-2.5.1
+    do_setup_py mercurial-2.5.4
     export HG_EXEC=${DEST_DIR}/bin/hg
 else
     # We assume that hg can be found in the path.
-    if type -P hg &>/dev/null 
+    if type -P hg &>/dev/null
     then
         export HG_EXEC=hg
     else
@@ -709,7 +707,7 @@
     elif [ -e $ORIG_PWD/../yt/mods.py ]
     then
         YT_DIR=`dirname $ORIG_PWD`
-    elif [ ! -e yt-hg ] 
+    elif [ ! -e yt-hg ]
     then
         YT_DIR="$PWD/yt-hg/"
         ( ${HG_EXEC} --debug clone https://bitbucket.org/yt_analysis/yt-supplemental/ 2>&1 ) 1>> ${LOG_FILE}
@@ -719,7 +717,7 @@
         ( ${HG_EXEC} --debug clone https://bitbucket.org/yt_analysis/yt/ ./yt-hg 2>&1 ) 1>> ${LOG_FILE}
         # Now we update to the branch we're interested in.
         ( ${HG_EXEC} -R ${YT_DIR} up -C ${BRANCH} 2>&1 ) 1>> ${LOG_FILE}
-    elif [ -e yt-hg ] 
+    elif [ -e yt-hg ]
     then
         YT_DIR="$PWD/yt-hg/"
     fi
@@ -727,7 +725,7 @@
 fi
 
 # This fixes problems with gfortran linking.
-unset LDFLAGS 
+unset LDFLAGS
 
 echo "Installing distribute"
 ( ${DEST_DIR}/bin/python2.7 ${YT_DIR}/distribute_setup.py 2>&1 ) 1>> ${LOG_FILE} || do_exit
@@ -737,7 +735,7 @@
 
 if [ $INST_SCIPY -eq 0 ]
 then
-    do_setup_py numpy-1.6.1 ${NUMPY_ARGS}
+    do_setup_py numpy-1.7.0 ${NUMPY_ARGS}
 else
     if [ ! -e scipy-0.11.0/done ]
     then
@@ -765,8 +763,8 @@
 	fi
     fi
     export BLAS=$PWD/BLAS/libfblas.a
-    export LAPACK=$PWD/lapack-3.4.2/liblapack.a    
-    do_setup_py numpy-1.6.1 ${NUMPY_ARGS}
+    export LAPACK=$PWD/lapack-3.4.2/liblapack.a
+    do_setup_py numpy-1.7.0 ${NUMPY_ARGS}
     do_setup_py scipy-0.11.0 ${NUMPY_ARGS}
 fi
 
@@ -789,10 +787,10 @@
     echo "Setting CFLAGS ${CFLAGS}"
 fi
 # Now we set up the basedir for matplotlib:
-mkdir -p ${DEST_DIR}/src/matplotlib-1.2.0
-echo "[directories]" >> ${DEST_DIR}/src/matplotlib-1.2.0/setup.cfg
-echo "basedirlist = ${DEST_DIR}" >> ${DEST_DIR}/src/matplotlib-1.2.0/setup.cfg
-do_setup_py matplotlib-1.2.0
+mkdir -p ${DEST_DIR}/src/matplotlib-1.2.1
+echo "[directories]" >> ${DEST_DIR}/src/matplotlib-1.2.1/setup.cfg
+echo "basedirlist = ${DEST_DIR}" >> ${DEST_DIR}/src/matplotlib-1.2.1/setup.cfg
+do_setup_py matplotlib-1.2.1
 if [ -n "${OLD_LDFLAGS}" ]
 then
     export LDFLAG=${OLD_LDFLAGS}
@@ -804,29 +802,29 @@
 # Now we do our IPython installation, which has two optional dependencies.
 if [ $INST_0MQ -eq 1 ]
 then
-    if [ ! -e zeromq-2.2.0/done ]
+    if [ ! -e zeromq-3.2.2/done ]
     then
-        [ ! -e zeromq-2.2.0 ] && tar xfz zeromq-2.2.0.tar.gz
+        [ ! -e zeromq-3.2.2 ] && tar xfz zeromq-3.2.2.tar.gz
         echo "Installing ZeroMQ"
-        cd zeromq-2.2.0
+        cd zeromq-3.2.2
         ( ./configure --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
         touch done
         cd ..
     fi
-    do_setup_py pyzmq-2.1.11 --zmq=${DEST_DIR}
-    do_setup_py tornado-2.2
+    do_setup_py pyzmq-13.0.2 --zmq=${DEST_DIR}
+    do_setup_py tornado-3.0
 fi
 
 do_setup_py ipython-0.13.1
-do_setup_py h5py-2.1.0
-do_setup_py Cython-0.17.1
-do_setup_py Forthon-0.8.10
+do_setup_py h5py-2.1.2
+do_setup_py Cython-0.18
+do_setup_py Forthon-0.8.11
 do_setup_py nose-1.2.1
-do_setup_py python-hglib-0.2
+do_setup_py python-hglib-0.3
 do_setup_py sympy-0.7.2
-[ $INST_PYX -eq 1 ] && do_setup_py PyX-0.11.1
+[ $INST_PYX -eq 1 ] && do_setup_py PyX-0.12.1
 
 # Now we build Rockstar and set its environment variable.
 if [ $INST_ROCKSTAR -eq 1 ]


https://bitbucket.org/yt_analysis/yt-3.0/commits/ed2264cda685/
Changeset:   ed2264cda685
Branch:      yt
User:        ngoldbaum
Date:        2013-04-07 08:27:02
Summary:     Adding some miscelaneous files to .hgignore.
Affected #:  1 file

diff -r 8d14a55aefcdda5a65309c1102a95ad87ddb8515 -r ed2264cda6856d6bac75e3399b5bc13cec8ddf95 .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -4,7 +4,9 @@
 freetype.cfg
 hdf5.cfg
 png.cfg
+rockstar.cfg
 yt_updater.log
+yt/analysis_modules/halo_finding/rockstar/rockstar_interface.c
 yt/frontends/ramses/_ramses_reader.cpp
 yt/utilities/amr_utils.c
 yt/utilities/kdtree/forthonf2c.h


https://bitbucket.org/yt_analysis/yt-3.0/commits/cd5fb19b3f4d/
Changeset:   cd5fb19b3f4d
Branch:      yt
User:        ngoldbaum
Date:        2013-04-07 08:52:48
Summary:     Merging.
Affected #:  1 file

diff -r 2e35b24d19e475672382f7489a499c07b5687382 -r cd5fb19b3f4d71d675120bc2158a5d7010687713 .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -4,7 +4,9 @@
 freetype.cfg
 hdf5.cfg
 png.cfg
+rockstar.cfg
 yt_updater.log
+yt/analysis_modules/halo_finding/rockstar/rockstar_interface.c
 yt/frontends/ramses/_ramses_reader.cpp
 yt/utilities/amr_utils.c
 yt/utilities/kdtree/forthonf2c.h


https://bitbucket.org/yt_analysis/yt-3.0/commits/59822b0adb7f/
Changeset:   59822b0adb7f
Branch:      yt
User:        ngoldbaum
Date:        2013-04-07 09:39:09
Summary:     Avoiding an import * from yt.mods
Affected #:  1 file

diff -r ed2264cda6856d6bac75e3399b5bc13cec8ddf95 -r 59822b0adb7fca5e0be35a2a1915070c4773e4a8 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -37,8 +37,8 @@
 from matplotlib.testing.compare import compare_images
 from nose.plugins import Plugin
 from yt.testing import *
+from yt.mods import load
 from yt.config import ytcfg
-from yt.mods import *
 from yt.data_objects.static_output import StaticOutput
 from yt.utilities.logger import disable_stream_logging
 from yt.utilities.command_line import get_yt_version


https://bitbucket.org/yt_analysis/yt-3.0/commits/241e3bd1eee6/
Changeset:   241e3bd1eee6
Branch:      yt
User:        ngoldbaum
Date:        2013-04-07 09:40:17
Summary:     Merging.
Affected #:  1 file

diff -r cd5fb19b3f4d71d675120bc2158a5d7010687713 -r 241e3bd1eee6ff2f7d03a3a9c2748544d6486063 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -37,8 +37,8 @@
 from matplotlib.testing.compare import compare_images
 from nose.plugins import Plugin
 from yt.testing import *
+from yt.mods import load
 from yt.config import ytcfg
-from yt.mods import *
 from yt.data_objects.static_output import StaticOutput
 from yt.utilities.logger import disable_stream_logging
 from yt.utilities.command_line import get_yt_version


https://bitbucket.org/yt_analysis/yt-3.0/commits/4a7525afa4ca/
Changeset:   4a7525afa4ca
Branch:      yt
User:        ngoldbaum
Date:        2013-04-08 03:05:44
Summary:     Fixing the command line parser.
Affected #:  3 files

diff -r 59822b0adb7fca5e0be35a2a1915070c4773e4a8 -r 4a7525afa4ca1cd9849a6efa278254ae58efcc00 yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -41,7 +41,6 @@
 # operations.
 
 import yt.startup_tasks as __startup_tasks
-unparsed_args = __startup_tasks.unparsed_args
 
 from yt.funcs import *
 from yt.utilities.logger import ytLogger as mylog
@@ -146,7 +145,7 @@
 
 # Import some helpful math utilities
 from yt.utilities.math_utils import \
-    ortho_find, quartiles, periodic_position 
+    ortho_find, quartiles, periodic_position
 
 
 # We load plugins.  Keep in mind, this can be fairly dangerous -

diff -r 59822b0adb7fca5e0be35a2a1915070c4773e4a8 -r 4a7525afa4ca1cd9849a6efa278254ae58efcc00 yt/startup_tasks.py
--- a/yt/startup_tasks.py
+++ b/yt/startup_tasks.py
@@ -115,22 +115,14 @@
     help = "Run in MPI-parallel mode (must be launched as an MPI task)")
 if not hasattr(sys, 'argv') or sys.argv is None: sys.argv = []
 
-unparsed_args = []
-
 parallel_capable = False
+subparsers = parser.add_subparsers(title="subcommands",
+                        dest='subcommands',
+                        description="Valid subcommands",)
 if not ytcfg.getboolean("yt","__command_line"):
-    opts, unparsed_args = parser.parse_known_args()
-    # THIS IS NOT SUCH A GOOD IDEA:
-    #sys.argv = [a for a in unparsed_args]
     if opts.parallel:
         parallel_capable = turn_on_parallelism()
-    subparsers = parser.add_subparsers(title="subcommands",
-                        dest='subcommands',
-                        description="Valid subcommands",)
 else:
-    subparsers = parser.add_subparsers(title="subcommands",
-                        dest='subcommands',
-                        description="Valid subcommands",)
     def print_help(*args, **kwargs):
         parser.print_help()
     help_parser = subparsers.add_parser("help", help="Print help message")

diff -r 59822b0adb7fca5e0be35a2a1915070c4773e4a8 -r 4a7525afa4ca1cd9849a6efa278254ae58efcc00 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -54,6 +54,7 @@
 _latest = ytcfg.get("yt", "gold_standard_filename")
 _latest_local = ytcfg.get("yt", "local_standard_filename")
 _url_path = "http://yt-answer-tests.s3-website-us-east-1.amazonaws.com/%s_%s"
+ytcfg.set("yt", "__command_line", "True")
 
 class AnswerTesting(Plugin):
     name = "answer-testing"


https://bitbucket.org/yt_analysis/yt-3.0/commits/2ecec544aa26/
Changeset:   2ecec544aa26
Branch:      yt
User:        ngoldbaum
Date:        2013-04-08 03:06:35
Summary:     Merging.
Affected #:  3 files

diff -r 241e3bd1eee6ff2f7d03a3a9c2748544d6486063 -r 2ecec544aa265be7ac4bffaa44636d83286c1cb6 yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -41,7 +41,6 @@
 # operations.
 
 import yt.startup_tasks as __startup_tasks
-unparsed_args = __startup_tasks.unparsed_args
 
 from yt.funcs import *
 from yt.utilities.logger import ytLogger as mylog
@@ -146,7 +145,7 @@
 
 # Import some helpful math utilities
 from yt.utilities.math_utils import \
-    ortho_find, quartiles, periodic_position 
+    ortho_find, quartiles, periodic_position
 
 
 # We load plugins.  Keep in mind, this can be fairly dangerous -

diff -r 241e3bd1eee6ff2f7d03a3a9c2748544d6486063 -r 2ecec544aa265be7ac4bffaa44636d83286c1cb6 yt/startup_tasks.py
--- a/yt/startup_tasks.py
+++ b/yt/startup_tasks.py
@@ -115,22 +115,14 @@
     help = "Run in MPI-parallel mode (must be launched as an MPI task)")
 if not hasattr(sys, 'argv') or sys.argv is None: sys.argv = []
 
-unparsed_args = []
-
 parallel_capable = False
+subparsers = parser.add_subparsers(title="subcommands",
+                        dest='subcommands',
+                        description="Valid subcommands",)
 if not ytcfg.getboolean("yt","__command_line"):
-    opts, unparsed_args = parser.parse_known_args()
-    # THIS IS NOT SUCH A GOOD IDEA:
-    #sys.argv = [a for a in unparsed_args]
     if opts.parallel:
         parallel_capable = turn_on_parallelism()
-    subparsers = parser.add_subparsers(title="subcommands",
-                        dest='subcommands',
-                        description="Valid subcommands",)
 else:
-    subparsers = parser.add_subparsers(title="subcommands",
-                        dest='subcommands',
-                        description="Valid subcommands",)
     def print_help(*args, **kwargs):
         parser.print_help()
     help_parser = subparsers.add_parser("help", help="Print help message")

diff -r 241e3bd1eee6ff2f7d03a3a9c2748544d6486063 -r 2ecec544aa265be7ac4bffaa44636d83286c1cb6 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -54,6 +54,7 @@
 _latest = ytcfg.get("yt", "gold_standard_filename")
 _latest_local = ytcfg.get("yt", "local_standard_filename")
 _url_path = "http://yt-answer-tests.s3-website-us-east-1.amazonaws.com/%s_%s"
+ytcfg.set("yt", "__command_line", "True")
 
 class AnswerTesting(Plugin):
     name = "answer-testing"


https://bitbucket.org/yt_analysis/yt-3.0/commits/912e53b8ede3/
Changeset:   912e53b8ede3
Branch:      yt
User:        ngoldbaum
Date:        2013-04-08 07:40:17
Summary:     Need to import load from convenience rather than mods.
Affected #:  1 file

diff -r 2ecec544aa265be7ac4bffaa44636d83286c1cb6 -r 912e53b8ede3e5fcbafcd03539e952d48a1b90c8 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -37,7 +37,7 @@
 from matplotlib.testing.compare import compare_images
 from nose.plugins import Plugin
 from yt.testing import *
-from yt.mods import load
+from yt.convenience import load
 from yt.config import ytcfg
 from yt.data_objects.static_output import StaticOutput
 from yt.utilities.logger import disable_stream_logging


https://bitbucket.org/yt_analysis/yt-3.0/commits/c859a80671c5/
Changeset:   c859a80671c5
Branch:      yt
User:        ngoldbaum
Date:        2013-04-08 07:52:39
Summary:     Reverting some unecessary, broken changes.
Affected #:  2 files

diff -r 912e53b8ede3e5fcbafcd03539e952d48a1b90c8 -r c859a80671c520b2a49103d9a9e46ef9d113ca2a yt/startup_tasks.py
--- a/yt/startup_tasks.py
+++ b/yt/startup_tasks.py
@@ -115,14 +115,20 @@
     help = "Run in MPI-parallel mode (must be launched as an MPI task)")
 if not hasattr(sys, 'argv') or sys.argv is None: sys.argv = []
 
+unparsed_args = []
+
 parallel_capable = False
-subparsers = parser.add_subparsers(title="subcommands",
+if not ytcfg.getboolean("yt","__command_line"):
+    opts, unparsed_args = parser.parse_known_args()
+    if opts.parallel:
+        parallel_capable = turn_on_parallelism()
+    subparsers = parser.add_subparsers(title="subcommands",
                         dest='subcommands',
                         description="Valid subcommands",)
-if not ytcfg.getboolean("yt","__command_line"):
-    if opts.parallel:
-        parallel_capable = turn_on_parallelism()
 else:
+    subparsers = parser.add_subparsers(title="subcommands",
+                        dest='subcommands',
+                        description="Valid subcommands",)
     def print_help(*args, **kwargs):
         parser.print_help()
     help_parser = subparsers.add_parser("help", help="Print help message")

diff -r 912e53b8ede3e5fcbafcd03539e952d48a1b90c8 -r c859a80671c520b2a49103d9a9e46ef9d113ca2a yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -54,7 +54,6 @@
 _latest = ytcfg.get("yt", "gold_standard_filename")
 _latest_local = ytcfg.get("yt", "local_standard_filename")
 _url_path = "http://yt-answer-tests.s3-website-us-east-1.amazonaws.com/%s_%s"
-ytcfg.set("yt", "__command_line", "True")
 
 class AnswerTesting(Plugin):
     name = "answer-testing"


https://bitbucket.org/yt_analysis/yt-3.0/commits/875b16c94eca/
Changeset:   875b16c94eca
Branch:      yt
User:        ngoldbaum
Date:        2013-04-08 07:57:26
Summary:     Removing an unecessary import, giving the AnswerTesting plugin a help function.
Affected #:  2 files

diff -r c859a80671c520b2a49103d9a9e46ef9d113ca2a -r 875b16c94eca2b2be8c522f1bef12f05094b870e yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -40,8 +40,6 @@
 # also attempt to parse the command line and set up the global state of various
 # operations.
 
-import yt.startup_tasks as __startup_tasks
-
 from yt.funcs import *
 from yt.utilities.logger import ytLogger as mylog
 from yt.utilities.performance_counters import yt_counters, time_function

diff -r c859a80671c520b2a49103d9a9e46ef9d113ca2a -r 875b16c94eca2b2be8c522f1bef12f05094b870e yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -159,6 +159,9 @@
         if self.store_results is False: return
         self.storage.dump(self.result_storage)
 
+    def help(self):
+        return "yt answer testing support"
+
 class AnswerTestStorage(object):
     def __init__(self, reference_name=None, answer_name=None):
         self.reference_name = reference_name


https://bitbucket.org/yt_analysis/yt-3.0/commits/cfba28abecda/
Changeset:   cfba28abecda
Branch:      yt
User:        ngoldbaum
Date:        2013-04-09 08:08:35
Summary:     Reverting the install script to 8d14a55.
Affected #:  1 file

diff -r 875b16c94eca2b2be8c522f1bef12f05094b870e -r cfba28abecda77f52b0ec6aa6dba2ba91df189aa doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -34,7 +34,7 @@
 
 INST_HG=1       # Install Mercurial or not?  If hg is not already
                 # installed, yt cannot be installed.
-INST_ZLIB=1     # On some systems (Kraken) matplotlib has issues with
+INST_ZLIB=1     # On some systems (Kraken) matplotlib has issues with 
                 # the system zlib, which is compiled statically.
                 # If need be, you can turn this off.
 INST_BZLIB=1    # On some systems, libbzip2 is missing.  This can
@@ -76,7 +76,7 @@
    echo "the script to re-enable root-level installation.  Sorry!"
    exit 1
 fi
-if [[ ${DEST_DIR%/} == /usr/local ]]
+if [[ ${DEST_DIR%/} == /usr/local ]] 
 then
    echo "******************************************************"
    echo "*                                                    *"
@@ -194,7 +194,7 @@
 	echo "must register for an account on the apple developer tools"
 	echo "website: https://developer.apple.com/downloads to obtain the"
 	echo "download link."
-	echo
+	echo 
 	echo "We have gathered some additional instructions for each"
 	echo "version of OS X below. If you have trouble installing yt"
 	echo "after following these instructions, don't hesitate to contact"
@@ -205,15 +205,15 @@
 	echo "menu bar.  We're assuming that you've installed all operating"
 	echo "system updates; if you have an older version, we suggest"
 	echo "running software update and installing all available updates."
-	echo
-        echo "OS X 10.5.8: search for and download Xcode 3.1.4 from the"
+	echo 
+        echo "OS X 10.5.8: search for and download Xcode 3.1.4 from the" 
 	echo "Apple developer tools website."
         echo
         echo "OS X 10.6.8: search for and download Xcode 3.2 from the Apple"
 	echo "developer tools website.  You can either download the"
 	echo "Xcode 3.2.2 Developer Tools package (744 MB) and then use"
-	echo "Software Update to update to XCode 3.2.6 or"
-	echo "alternatively, you can download the Xcode 3.2.6/iOS SDK"
+	echo "Software Update to update to XCode 3.2.6 or" 
+	echo "alternatively, you can download the Xcode 3.2.6/iOS SDK" 
 	echo "bundle (4.1 GB)."
         echo
         echo "OS X 10.7.5: download Xcode 4.2 from the mac app store"
@@ -221,20 +221,20 @@
         echo "Alternatively, download the Xcode command line tools from"
         echo "the Apple developer tools website."
         echo
-	echo "OS X 10.8.2: download Xcode 4.6.1 from the mac app store."
+	echo "OS X 10.8.2: download Xcode 4.6 from the mac app store."
 	echo "(search for Xcode)."
 	echo "Additionally, you will have to manually install the Xcode"
-	echo "command line tools, see:"
+	echo "command line tools, see:" 
 	echo "http://stackoverflow.com/questions/9353444"
 	echo "Alternatively, download the Xcode command line tools from"
 	echo "the Apple developer tools website."
 	echo
-        echo "NOTE: It's possible that the installation will fail, if so,"
-	echo "please set the following environment variables, remove any"
+        echo "NOTE: It's possible that the installation will fail, if so," 
+	echo "please set the following environment variables, remove any" 
 	echo "broken installation tree, and re-run this script verbatim."
         echo
-        echo "$ export CC=gcc"
-        echo "$ export CXX=g++"
+        echo "$ export CC=gcc-4.2"
+        echo "$ export CXX=g++-4.2"
 	echo
         OSX_VERSION=`sw_vers -productVersion`
         if [ "${OSX_VERSION##10.8}" != "${OSX_VERSION}" ]
@@ -291,7 +291,7 @@
         echo
         echo " INST_ZLIB=0"
         echo " INST_FTYPE=0"
-        echo
+        echo 
         echo " to avoid conflicts with other command-line programs "
         echo " (like eog and evince, for example)."
     fi
@@ -437,7 +437,7 @@
     cd ..
 }
 
-if type -P wget &>/dev/null
+if type -P wget &>/dev/null 
 then
     echo "Using wget"
     export GETFILE="wget -nv"
@@ -499,78 +499,79 @@
 cd ${DEST_DIR}/src
 
 # Now we dump all our SHA512 files out.
-echo 'fb85d71bb4f80b35f0d0f1735c650dd75c5f84b05635ddf91d6241ff103b5a49158c5b851a20c15e05425f6dde32a4971b35fcbd7445f61865b4d61ffd1fbfa1  Cython-0.18.tar.gz' > Cython-0.18.tar.gz.sha512
-echo '4941f5aa21aff3743546495fb073c10d2657ff42b2aff401903498638093d0e31e344cce778980f28a7170c6d29eab72ac074277b9d4088376e8692dc71e55c1  PyX-0.12.1.tar.gz' > PyX-0.12.1.tar.gz.sha512
-echo '3349152c47ed2b63c5c9aabcfa92b8497ea9d71ca551fd721e827fcb8f91ff9fbbee6bba8f8cb2dea185701b8798878b4b2435c1496b63d4b4a37c624a625299  Python-2.7.4.tgz' > Python-2.7.4.tgz.sha512
-echo '00ace5438cfa0c577e5f578d8a808613187eff5217c35164ffe044fbafdfec9e98f4192c02a7d67e01e5a5ccced630583ad1003c37697219b0f147343a3fdd12  bzip2-1.0.6.tar.gz' > bzip2-1.0.6.tar.gz.sha512
+
+echo 'eda1b8090e5e21e7e039ef4dd03de186a7b416df9d5a4e4422abeeb4d51383b9a6858e1ac4902d8e5010f661b295bbb2452c43c8738be668379b4eb4835d0f61  Cython-0.17.1.tar.gz' > Cython-0.17.1.tar.gz.sha512
+echo '44eea803870a66ff0bab08d13a8b3388b5578ebc1c807d1d9dca0a93e6371e91b15d02917a00b3b20dc67abb5a21dabaf9b6e9257a561f85eeff2147ac73b478  PyX-0.11.1.tar.gz' > PyX-0.11.1.tar.gz.sha512
+echo 'b981f8464575bb24c297631c87a3b9172312804a0fc14ce1fa7cb41ce2b0d2fd383cd1c816d6e10c36467d18bf9492d6faf557c81c04ff3b22debfa93f30ad0b  Python-2.7.3.tgz' > Python-2.7.3.tgz.sha512
+echo 'c017d3d59dd324ac91af0edc178c76b60a5f90fbb775cf843e39062f95bd846238f2c53705f8890ed3f34bc0e6e75671a73d13875eb0287d6201cb45f0a2d338  bzip2-1.0.5.tar.gz' > bzip2-1.0.5.tar.gz.sha512
 echo 'a296dfcaef7e853e58eed4e24b37c4fa29cfc6ac688def048480f4bb384b9e37ca447faf96eec7b378fd764ba291713f03ac464581d62275e28eb2ec99110ab6  reason-js-20120623.zip' > reason-js-20120623.zip.sha512
-echo 'b46c93d76f8ce09c94765b20b2eeadf71207671f1131777de178b3727c235b4dd77f6e60d62442b96648c3c6749e9e4c1194c1b02af7e946576be09e1ff7ada3  freetype-2.4.11.tar.gz' > freetype-2.4.11.tar.gz.sha512
-echo '15ca0209e8d8f172cb0708a2de946fbbde8551d9bebc4a95fa7ae31558457a7f43249d5289d7675490c577deb4e0153698fd2407644078bf30bd5ab10135fce3  h5py-2.1.2.tar.gz' > h5py-2.1.2.tar.gz.sha512
-echo '283301bf6c3131e3079e7430dce758ccefbc7add42b14ebfc87ca30b1d9ecb20bfc00ecf6ec823fc1e3cc7af16f8ed186cd09364444f55acfcab70c6981c10ed  hdf5-1.8.10-patch1.tar.gz' > hdf5-1.8.10-patch1.tar.gz.sha512
-echo 'b2b53ed358bacab9e8d63a51f17bd5f121ece60a1d7c53e8a8eb08ad8b1e4393a8d7a86eec06e2efc62348114f0d84c0a3dfc805e68e6edd93b20401962b3554  libpng-1.6.1.tar.gz' > libpng-1.6.1.tar.gz.sha512
-echo '497f91725eaf361bdb9bdf38db2bff5068a77038f1536df193db64c9b887e3b0d967486daee722eda6e2c4e60f034eee030673e53d07bf0db0f3f7c0ef3bd208  matplotlib-1.2.1.tar.gz' > matplotlib-1.2.1.tar.gz.sha512
-echo '928fdeaaf0eaec80adbd8765521de9666ab56aaa2101fb9ab2cb392d8b29475d3b052d89652ff9b67522cfcc6cd958717ac715f51b0573ee088e9a595f29afe2  mercurial-2.5.4.tar.gz' > mercurial-2.5.4.tar.gz.sha512
-echo 'a485daa556f6c76003de1dbb3e42b3daeee0a320c69c81b31a7d2ebbc2cf8ab8e96c214a4758e5e7bf814295dc1d6aa563092b714db7e719678d8462135861a8  numpy-1.7.0.tar.gz' > numpy-1.7.0.tar.gz.sha512
-echo '293d78d14a9347cb83e1a644e5f3e4447ed6fc21642c51683e5495dda08d2312194a73d1fc3c1d78287e33ed065aa251ecbaa7c0ea9189456c1702e96d78becd  sqlite-autoconf-3071601.tar.gz' > sqlite-autoconf-3071601.tar.gz.sha512
-echo 'b1c073ad26684e354f7c522c14655840592e03872bc0a94690f89cae2ff88f146fce1dad252ff27a889dac4a32ff9f8ab63ba940671f9da89e9ba3e19f1bf58d  zlib-1.2.7.tar.gz' > zlib-1.2.7.tar.gz.sha512
+echo 'b519218f93946400326e9b656669269ecb3e5232b944e18fbc3eadc4fe2b56244d68aae56d6f69042b4c87c58c881ee2aaa279561ea0f0f48d5842155f4de9de  freetype-2.4.4.tar.gz' > freetype-2.4.4.tar.gz.sha512
+echo 'b3290c498191684781ca5286ab454eb1bd045e8d894f5b86fb86beb88f174e22ac3ab008fb02d6562051d9fa6a9593920cab433223f6d5473999913223b8e183  h5py-2.1.0.tar.gz' > h5py-2.1.0.tar.gz.sha512
+echo 'c68a425bacaa7441037910b9166f25b89e1387776a7749a5350793f89b1690350df5f018060c31d03686e7c3ed2aa848bd2b945c96350dc3b6322e087934783a  hdf5-1.8.9.tar.gz' > hdf5-1.8.9.tar.gz.sha512
+echo 'dbefad00fa34f4f21dca0f1e92e95bd55f1f4478fa0095dcf015b4d06f0c823ff11755cd777e507efaf1c9098b74af18f613ec9000e5c3a5cc1c7554fb5aefb8  libpng-1.5.12.tar.gz' > libpng-1.5.12.tar.gz.sha512
+echo '5b1a0fb52dcb21ca5f0ab71c8a49550e1e8cf633552ec6598dc43f0b32c03422bf5af65b30118c163231ecdddfd40846909336f16da318959106076e80a3fad0  matplotlib-1.2.0.tar.gz' > matplotlib-1.2.0.tar.gz.sha512
+echo '91693ca5f34934956a7c2c98bb69a5648b2a5660afd2ecf4a05035c5420450d42c194eeef0606d7683e267e4eaaaab414df23f30b34c88219bdd5c1a0f1f66ed  mercurial-2.5.1.tar.gz' > mercurial-2.5.1.tar.gz.sha512
+echo 'de3dd37f753614055dcfed910e9886e03688b8078492df3da94b1ec37be796030be93291cba09e8212fffd3e0a63b086902c3c25a996cf1439e15c5b16e014d9  numpy-1.6.1.tar.gz' > numpy-1.6.1.tar.gz.sha512
+echo '5ad681f99e75849a5ca6f439c7a19bb51abc73d121b50f4f8e4c0da42891950f30407f761a53f0fe51b370b1dbd4c4f5a480557cb2444c8c7c7d5412b328a474  sqlite-autoconf-3070500.tar.gz' > sqlite-autoconf-3070500.tar.gz.sha512
+echo 'edae735960279d92acf58e1f4095c6392a7c2059b8f1d2c46648fc608a0fb06b392db2d073f4973f5762c034ea66596e769b95b3d26ad963a086b9b2d09825f2  zlib-1.2.3.tar.bz2' > zlib-1.2.3.tar.bz2.sha512
 echo '05ac335727a2c3036f31a2506fdd2615aa436bfbe2f81799fe6c51bffe2591ad6a8427f3b25c34e7e709fb4e7607a0589dc7a22185c1f9b894e90de6711a88aa  ipython-0.13.1.tar.gz' > ipython-0.13.1.tar.gz.sha512
-echo 'b9d061ca49e54ea917e0aed2b2a48faef33061dbf6d17eae7f8c3fff0b35ca883e7324f6cb24bda542443f669dcd5748037a5f2309f4c359d68adef520894865  zeromq-3.2.2.tar.gz' > zeromq-3.2.2.tar.gz.sha512
-echo '852fce8a8308c4e1e4b19c77add2b2055ca2ba570b28e8364888df490af92b860c72e860adfb075b3405a9ceb62f343889f20a8711c9353a7d9059adee910f83  pyzmq-13.0.2.tar.gz' > pyzmq-13.0.2.tar.gz.sha512
-echo '303bd3fbea22be57fddf7df78ddf5a783d355a0c8071b1363250daafc20232ddd28eedc44aa1194f4a7afd82f9396628c5bb06819e02b065b6a1b1ae8a7c19e1  tornado-3.0.tar.gz' > tornado-3.0.tar.gz.sha512
-echo '3f53d0b474bfd79fea2536d0a9197eaef6c0927e95f2f9fd52dbd6c1d46409d0e649c21ac418d8f7767a9f10fe6114b516e06f2be4b06aec3ab5bdebc8768220  Forthon-0.8.11.tar.gz' > Forthon-0.8.11.tar.gz.sha512
+echo 'fb3cf421b2dc48c31956b3e3ee4ab6ebc743deec3bf626c2238a1996c8c51be87260bd6aa662793a1f0c34dcda9b3146763777bb162dfad6fec4ca7acc403b2e  zeromq-2.2.0.tar.gz' > zeromq-2.2.0.tar.gz.sha512
+echo 'd761b492352841cdc125d9f0c99ee6d6c435812472ea234728b7f0fb4ad1048e1eec9b399df2081fbc926566f333f7780fedd0ce23255a6633fe5c60ed15a6af  pyzmq-2.1.11.tar.gz' > pyzmq-2.1.11.tar.gz.sha512
+echo '57fa5e57dfb98154a42d2d477f29401c2260ae7ad3a8128a4098b42ee3b35c54367b1a3254bc76b9b3b14b4aab7c3e1135858f68abc5636daedf2f01f9b8a3cf  tornado-2.2.tar.gz' > tornado-2.2.tar.gz.sha512
+echo '1332e3d5465ca249c357314cf15d2a4e5e83a941841021b8f6a17a107dce268a7a082838ade5e8db944ecde6bfb111211ab218aa414ee90aafbb81f1491b3b93  Forthon-0.8.10.tar.gz' > Forthon-0.8.10.tar.gz.sha512
 echo 'c13116c1f0547000cc565e15774687b9e884f8b74fb62a84e578408a868a84961704839065ae4f21b662e87f2aaedf6ea424ea58dfa9d3d73c06281f806d15dd  nose-1.2.1.tar.gz' > nose-1.2.1.tar.gz.sha512
-echo 'd67de9567256e6f1649e4f3f7dfee63371d5f00fd3fd4f92426198f862e97c57f70e827d19f4e5e1929ad85ef2ce7aa5a0596b101cafdac71672e97dc115b397  python-hglib-0.3.tar.gz' > python-hglib-0.3.tar.gz.sha512
+echo '73de2c99406a38f85273931597525cec4ebef55b93712adca3b0bfea8ca3fc99446e5d6495817e9ad55cf4d48feb7fb49734675c4cc8938db8d4a5225d30eca7  python-hglib-0.2.tar.gz' > python-hglib-0.2.tar.gz.sha512
 echo 'ffc602eb346717286b3d0a6770c60b03b578b3cf70ebd12f9e8b1c8c39cdb12ef219ddaa041d7929351a6b02dbb8caf1821b5452d95aae95034cbf4bc9904a7a  sympy-0.7.2.tar.gz' > sympy-0.7.2.tar.gz.sha512
 echo '172f2bc671145ebb0add2669c117863db35851fb3bdb192006cd710d4d038e0037497eb39a6d01091cb923f71a7e8982a77b6e80bf71d6275d5d83a363c8d7e5  rockstar-0.99.6.tar.gz' > rockstar-0.99.6.tar.gz.sha512
 echo 'd4fdd62f2db5285cd133649bd1bfa5175cb9da8304323abd74e0ef1207d55e6152f0f944da1da75f73e9dafb0f3bb14efba3c0526c732c348a653e0bd223ccfa  scipy-0.11.0.tar.gz' > scipy-0.11.0.tar.gz.sha512
 echo '276bd9c061ec9a27d478b33078a86f93164ee2da72210e12e2c9da71dcffeb64767e4460b93f257302b09328eda8655e93c4b9ae85e74472869afbeae35ca71e  blas.tar.gz' > blas.tar.gz.sha512
 echo '8770214491e31f0a7a3efaade90eee7b0eb20a8a6ab635c5f854d78263f59a1849133c14ef5123d01023f0110cbb9fc6f818da053c01277914ae81473430a952  lapack-3.4.2.tar.gz' > lapack-3.4.2.tar.gz.sha512
 # Individual processes
-[ -z "$HDF5_DIR" ] && get_ytproject hdf5-1.8.10-patch1.tar.gz
-[ $INST_ZLIB -eq 1 ] && get_ytproject zlib-1.2.7.tar.gz
-[ $INST_BZLIB -eq 1 ] && get_ytproject bzip2-1.0.6.tar.gz
-[ $INST_PNG -eq 1 ] && get_ytproject libpng-1.6.1.tar.gz
-[ $INST_FTYPE -eq 1 ] && get_ytproject freetype-2.4.11.tar.gz
-[ $INST_SQLITE3 -eq 1 ] && get_ytproject sqlite-autoconf-3071601.tar.gz
-[ $INST_PYX -eq 1 ] && get_ytproject PyX-0.12.1.tar.gz
-[ $INST_0MQ -eq 1 ] && get_ytproject zeromq-3.2.2.tar.gz
-[ $INST_0MQ -eq 1 ] && get_ytproject pyzmq-13.0.2.tar.gz
-[ $INST_0MQ -eq 1 ] && get_ytproject tornado-3.0.tar.gz
+[ -z "$HDF5_DIR" ] && get_ytproject hdf5-1.8.9.tar.gz
+[ $INST_ZLIB -eq 1 ] && get_ytproject zlib-1.2.3.tar.bz2 
+[ $INST_BZLIB -eq 1 ] && get_ytproject bzip2-1.0.5.tar.gz
+[ $INST_PNG -eq 1 ] && get_ytproject libpng-1.5.12.tar.gz
+[ $INST_FTYPE -eq 1 ] && get_ytproject freetype-2.4.4.tar.gz
+[ $INST_SQLITE3 -eq 1 ] && get_ytproject sqlite-autoconf-3070500.tar.gz
+[ $INST_PYX -eq 1 ] && get_ytproject PyX-0.11.1.tar.gz
+[ $INST_0MQ -eq 1 ] && get_ytproject zeromq-2.2.0.tar.gz
+[ $INST_0MQ -eq 1 ] && get_ytproject pyzmq-2.1.11.tar.gz
+[ $INST_0MQ -eq 1 ] && get_ytproject tornado-2.2.tar.gz
 [ $INST_SCIPY -eq 1 ] && get_ytproject scipy-0.11.0.tar.gz
 [ $INST_SCIPY -eq 1 ] && get_ytproject blas.tar.gz
 [ $INST_SCIPY -eq 1 ] && get_ytproject lapack-3.4.2.tar.gz
-get_ytproject Python-2.7.4.tgz
-get_ytproject numpy-1.7.0.tar.gz
-get_ytproject matplotlib-1.2.1.tar.gz
-get_ytproject mercurial-2.5.4.tar.gz
+get_ytproject Python-2.7.3.tgz
+get_ytproject numpy-1.6.1.tar.gz
+get_ytproject matplotlib-1.2.0.tar.gz
+get_ytproject mercurial-2.5.1.tar.gz
 get_ytproject ipython-0.13.1.tar.gz
-get_ytproject h5py-2.1.2.tar.gz
-get_ytproject Cython-0.18.tar.gz
+get_ytproject h5py-2.1.0.tar.gz
+get_ytproject Cython-0.17.1.tar.gz
 get_ytproject reason-js-20120623.zip
-get_ytproject Forthon-0.8.11.tar.gz
-get_ytproject nose-1.2.1.tar.gz
-get_ytproject python-hglib-0.3.tar.gz
+get_ytproject Forthon-0.8.10.tar.gz
+get_ytproject nose-1.2.1.tar.gz 
+get_ytproject python-hglib-0.2.tar.gz
 get_ytproject sympy-0.7.2.tar.gz
 get_ytproject rockstar-0.99.6.tar.gz
 if [ $INST_BZLIB -eq 1 ]
 then
-    if [ ! -e bzip2-1.0.6/done ]
+    if [ ! -e bzip2-1.0.5/done ]
     then
-        [ ! -e bzip2-1.0.6 ] && tar xfz bzip2-1.0.6.tar.gz
+        [ ! -e bzip2-1.0.5 ] && tar xfz bzip2-1.0.5.tar.gz
         echo "Installing BZLIB"
-        cd bzip2-1.0.6
-        if [ `uname` = "Darwin" ]
+        cd bzip2-1.0.5
+        if [ `uname` = "Darwin" ] 
         then
-            if [ -z "${CC}" ]
+            if [ -z "${CC}" ] 
             then
                 sed -i.bak 's/soname/install_name/' Makefile-libbz2_so
             else
-                sed -i.bak -e 's/soname/install_name/' -e "s/CC=gcc/CC=${CC}/" Makefile-libbz2_so
+                sed -i.bak -e 's/soname/install_name/' -e "s/CC=gcc/CC=${CC}/" Makefile-libbz2_so 
             fi
         fi
         ( make install CFLAGS=-fPIC LDFLAGS=-fPIC PREFIX=${DEST_DIR} 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make -f Makefile-libbz2_so CFLAGS=-fPIC LDFLAGS=-fPIC PREFIX=${DEST_DIR} 2>&1 ) 1>> ${LOG_FILE} || do_exit
-        ( cp -v libbz2.so.1.0.6 ${DEST_DIR}/lib 2>&1 ) 1>> ${LOG_FILE} || do_exit
+        ( cp -v libbz2.so.1.0.4 ${DEST_DIR}/lib 2>&1 ) 1>> ${LOG_FILE} || do_exit
         touch done
         cd ..
     fi
@@ -581,11 +582,11 @@
 
 if [ $INST_ZLIB -eq 1 ]
 then
-    if [ ! -e zlib-1.2.7/done ]
+    if [ ! -e zlib-1.2.3/done ]
     then
-        [ ! -e zlib-1.2.7 ] && tar xfz zlib-1.2.7.tar.gz
+        [ ! -e zlib-1.2.3 ] && tar xfj zlib-1.2.3.tar.bz2
         echo "Installing ZLIB"
-        cd zlib-1.2.7
+        cd zlib-1.2.3
         ( ./configure --shared --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -599,11 +600,11 @@
 
 if [ $INST_PNG -eq 1 ]
 then
-    if [ ! -e libpng-1.6.1/done ]
+    if [ ! -e libpng-1.5.12/done ]
     then
-        [ ! -e libpng-1.6.1 ] && tar xfz libpng-1.6.1.tar.gz
+        [ ! -e libpng-1.5.12 ] && tar xfz libpng-1.5.12.tar.gz
         echo "Installing PNG"
-        cd libpng-1.6.1
+        cd libpng-1.5.12
         ( ./configure CPPFLAGS=-I${DEST_DIR}/include CFLAGS=-I${DEST_DIR}/include --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -617,11 +618,11 @@
 
 if [ $INST_FTYPE -eq 1 ]
 then
-    if [ ! -e freetype-2.4.11/done ]
+    if [ ! -e freetype-2.4.4/done ]
     then
-        [ ! -e freetype-2.4.11 ] && tar xfz freetype-2.4.11.tar.gz
+        [ ! -e freetype-2.4.4 ] && tar xfz freetype-2.4.4.tar.gz
         echo "Installing FreeType2"
-        cd freetype-2.4.11
+        cd freetype-2.4.4
         ( ./configure CFLAGS=-I${DEST_DIR}/include --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -635,11 +636,11 @@
 
 if [ -z "$HDF5_DIR" ]
 then
-    if [ ! -e hdf5-1.8.10-patch1/done ]
+    if [ ! -e hdf5-1.8.9/done ]
     then
-        [ ! -e hdf5-1.8.10-patch1 ] && tar xfz hdf5-1.8.10-patch1.tar.gz
+        [ ! -e hdf5-1.8.9 ] && tar xfz hdf5-1.8.9.tar.gz
         echo "Installing HDF5"
-        cd hdf5-1.8.10-patch1
+        cd hdf5-1.8.9
         ( ./configure --prefix=${DEST_DIR}/ --enable-shared 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make ${MAKE_PROCS} install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -654,11 +655,11 @@
 
 if [ $INST_SQLITE3 -eq 1 ]
 then
-    if [ ! -e sqlite-autoconf-3071601/done ]
+    if [ ! -e sqlite-autoconf-3070500/done ]
     then
-        [ ! -e sqlite-autoconf-3071601 ] && tar xfz sqlite-autoconf-3071601.tar.gz
+        [ ! -e sqlite-autoconf-3070500 ] && tar xfz sqlite-autoconf-3070500.tar.gz
         echo "Installing SQLite3"
-        cd sqlite-autoconf-3071601
+        cd sqlite-autoconf-3070500
         ( ./configure --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make ${MAKE_PROCS} install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -667,11 +668,11 @@
     fi
 fi
 
-if [ ! -e Python-2.7.4/done ]
+if [ ! -e Python-2.7.3/done ]
 then
     echo "Installing Python.  This may take a while, but don't worry.  yt loves you."
-    [ ! -e Python-2.7.4 ] && tar xfz Python-2.7.4.tgz
-    cd Python-2.7.4
+    [ ! -e Python-2.7.3 ] && tar xfz Python-2.7.3.tgz
+    cd Python-2.7.3
     ( ./configure --prefix=${DEST_DIR}/ ${PYCONF_ARGS} 2>&1 ) 1>> ${LOG_FILE} || do_exit
 
     ( make ${MAKE_PROCS} 2>&1 ) 1>> ${LOG_FILE} || do_exit
@@ -686,11 +687,12 @@
 
 if [ $INST_HG -eq 1 ]
 then
-    do_setup_py mercurial-2.5.4
+    echo "Installing Mercurial."
+    do_setup_py mercurial-2.5.1
     export HG_EXEC=${DEST_DIR}/bin/hg
 else
     # We assume that hg can be found in the path.
-    if type -P hg &>/dev/null
+    if type -P hg &>/dev/null 
     then
         export HG_EXEC=hg
     else
@@ -707,7 +709,7 @@
     elif [ -e $ORIG_PWD/../yt/mods.py ]
     then
         YT_DIR=`dirname $ORIG_PWD`
-    elif [ ! -e yt-hg ]
+    elif [ ! -e yt-hg ] 
     then
         YT_DIR="$PWD/yt-hg/"
         ( ${HG_EXEC} --debug clone https://bitbucket.org/yt_analysis/yt-supplemental/ 2>&1 ) 1>> ${LOG_FILE}
@@ -717,7 +719,7 @@
         ( ${HG_EXEC} --debug clone https://bitbucket.org/yt_analysis/yt/ ./yt-hg 2>&1 ) 1>> ${LOG_FILE}
         # Now we update to the branch we're interested in.
         ( ${HG_EXEC} -R ${YT_DIR} up -C ${BRANCH} 2>&1 ) 1>> ${LOG_FILE}
-    elif [ -e yt-hg ]
+    elif [ -e yt-hg ] 
     then
         YT_DIR="$PWD/yt-hg/"
     fi
@@ -725,7 +727,7 @@
 fi
 
 # This fixes problems with gfortran linking.
-unset LDFLAGS
+unset LDFLAGS 
 
 echo "Installing distribute"
 ( ${DEST_DIR}/bin/python2.7 ${YT_DIR}/distribute_setup.py 2>&1 ) 1>> ${LOG_FILE} || do_exit
@@ -735,7 +737,7 @@
 
 if [ $INST_SCIPY -eq 0 ]
 then
-    do_setup_py numpy-1.7.0 ${NUMPY_ARGS}
+    do_setup_py numpy-1.6.1 ${NUMPY_ARGS}
 else
     if [ ! -e scipy-0.11.0/done ]
     then
@@ -763,8 +765,8 @@
 	fi
     fi
     export BLAS=$PWD/BLAS/libfblas.a
-    export LAPACK=$PWD/lapack-3.4.2/liblapack.a
-    do_setup_py numpy-1.7.0 ${NUMPY_ARGS}
+    export LAPACK=$PWD/lapack-3.4.2/liblapack.a    
+    do_setup_py numpy-1.6.1 ${NUMPY_ARGS}
     do_setup_py scipy-0.11.0 ${NUMPY_ARGS}
 fi
 
@@ -787,10 +789,10 @@
     echo "Setting CFLAGS ${CFLAGS}"
 fi
 # Now we set up the basedir for matplotlib:
-mkdir -p ${DEST_DIR}/src/matplotlib-1.2.1
-echo "[directories]" >> ${DEST_DIR}/src/matplotlib-1.2.1/setup.cfg
-echo "basedirlist = ${DEST_DIR}" >> ${DEST_DIR}/src/matplotlib-1.2.1/setup.cfg
-do_setup_py matplotlib-1.2.1
+mkdir -p ${DEST_DIR}/src/matplotlib-1.2.0
+echo "[directories]" >> ${DEST_DIR}/src/matplotlib-1.2.0/setup.cfg
+echo "basedirlist = ${DEST_DIR}" >> ${DEST_DIR}/src/matplotlib-1.2.0/setup.cfg
+do_setup_py matplotlib-1.2.0
 if [ -n "${OLD_LDFLAGS}" ]
 then
     export LDFLAG=${OLD_LDFLAGS}
@@ -802,29 +804,29 @@
 # Now we do our IPython installation, which has two optional dependencies.
 if [ $INST_0MQ -eq 1 ]
 then
-    if [ ! -e zeromq-3.2.2/done ]
+    if [ ! -e zeromq-2.2.0/done ]
     then
-        [ ! -e zeromq-3.2.2 ] && tar xfz zeromq-3.2.2.tar.gz
+        [ ! -e zeromq-2.2.0 ] && tar xfz zeromq-2.2.0.tar.gz
         echo "Installing ZeroMQ"
-        cd zeromq-3.2.2
+        cd zeromq-2.2.0
         ( ./configure --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
         touch done
         cd ..
     fi
-    do_setup_py pyzmq-13.0.2 --zmq=${DEST_DIR}
-    do_setup_py tornado-3.0
+    do_setup_py pyzmq-2.1.11 --zmq=${DEST_DIR}
+    do_setup_py tornado-2.2
 fi
 
 do_setup_py ipython-0.13.1
-do_setup_py h5py-2.1.2
-do_setup_py Cython-0.18
-do_setup_py Forthon-0.8.11
+do_setup_py h5py-2.1.0
+do_setup_py Cython-0.17.1
+do_setup_py Forthon-0.8.10
 do_setup_py nose-1.2.1
-do_setup_py python-hglib-0.3
+do_setup_py python-hglib-0.2
 do_setup_py sympy-0.7.2
-[ $INST_PYX -eq 1 ] && do_setup_py PyX-0.12.1
+[ $INST_PYX -eq 1 ] && do_setup_py PyX-0.11.1
 
 # Now we build Rockstar and set its environment variable.
 if [ $INST_ROCKSTAR -eq 1 ]


https://bitbucket.org/yt_analysis/yt-3.0/commits/8496120c1bd0/
Changeset:   8496120c1bd0
Branch:      yt
User:        ngoldbaum
Date:        2013-04-09 21:08:39
Summary:     Reverting some bad deletions.  Adding a bit of explanation about unparsed_args.
Affected #:  2 files

diff -r cfba28abecda77f52b0ec6aa6dba2ba91df189aa -r 8496120c1bd00b439f790af1f2ed22500db02c6d yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -38,7 +38,12 @@
 
 # This next item will handle most of the actual startup procedures, but it will
 # also attempt to parse the command line and set up the global state of various
-# operations.
+# operations.  The variable unparsed_args is not used internally but is
+# provided as a convenience for users who wish to parse arguments in scripts.
+# See http://lists.spacepope.org/pipermail/yt-dev-spacepope.org/2011-December/
+#     001727.html
+import yt.startup_tasks as __startup_tasks
+unparsed_args = __startup_tasks.unparsed_args
 
 from yt.funcs import *
 from yt.utilities.logger import ytLogger as mylog

diff -r cfba28abecda77f52b0ec6aa6dba2ba91df189aa -r 8496120c1bd00b439f790af1f2ed22500db02c6d yt/startup_tasks.py
--- a/yt/startup_tasks.py
+++ b/yt/startup_tasks.py
@@ -120,6 +120,8 @@
 parallel_capable = False
 if not ytcfg.getboolean("yt","__command_line"):
     opts, unparsed_args = parser.parse_known_args()
+    # THIS IS NOT SUCH A GOOD IDEA:
+    #sys.argv = [a for a in unparsed_args]
     if opts.parallel:
         parallel_capable = turn_on_parallelism()
     subparsers = parser.add_subparsers(title="subcommands",


https://bitbucket.org/yt_analysis/yt-3.0/commits/7ab40c0ebf6f/
Changeset:   7ab40c0ebf6f
Branch:      yt
User:        MatthewTurk
Date:        2013-04-09 21:51:36
Summary:     Merged in ngoldbaum/yt (pull request #476)

Fixing nose argument parsing. Closes #545
Affected #:  5 files

diff -r 77f18fb5b757b6051dada9e2bad7774938a62064 -r 7ab40c0ebf6fe1e4606285788b5db148cf438a81 .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -4,7 +4,9 @@
 freetype.cfg
 hdf5.cfg
 png.cfg
+rockstar.cfg
 yt_updater.log
+yt/analysis_modules/halo_finding/rockstar/rockstar_interface.c
 yt/frontends/ramses/_ramses_reader.cpp
 yt/utilities/amr_utils.c
 yt/utilities/kdtree/forthonf2c.h

diff -r 77f18fb5b757b6051dada9e2bad7774938a62064 -r 7ab40c0ebf6fe1e4606285788b5db148cf438a81 yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -38,8 +38,10 @@
 
 # This next item will handle most of the actual startup procedures, but it will
 # also attempt to parse the command line and set up the global state of various
-# operations.
-
+# operations.  The variable unparsed_args is not used internally but is
+# provided as a convenience for users who wish to parse arguments in scripts.
+# See http://lists.spacepope.org/pipermail/yt-dev-spacepope.org/2011-December/
+#     001727.html
 import yt.startup_tasks as __startup_tasks
 unparsed_args = __startup_tasks.unparsed_args
 
@@ -146,7 +148,7 @@
 
 # Import some helpful math utilities
 from yt.utilities.math_utils import \
-    ortho_find, quartiles, periodic_position 
+    ortho_find, quartiles, periodic_position
 
 
 # We load plugins.  Keep in mind, this can be fairly dangerous -

diff -r 77f18fb5b757b6051dada9e2bad7774938a62064 -r 7ab40c0ebf6fe1e4606285788b5db148cf438a81 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -37,8 +37,8 @@
 from matplotlib.testing.compare import compare_images
 from nose.plugins import Plugin
 from yt.testing import *
+from yt.convenience import load
 from yt.config import ytcfg
-from yt.mods import *
 from yt.data_objects.static_output import StaticOutput
 from yt.utilities.logger import disable_stream_logging
 from yt.utilities.command_line import get_yt_version
@@ -159,6 +159,9 @@
         if self.store_results is False: return
         self.storage.dump(self.result_storage)
 
+    def help(self):
+        return "yt answer testing support"
+
 class AnswerTestStorage(object):
     def __init__(self, reference_name=None, answer_name=None):
         self.reference_name = reference_name


https://bitbucket.org/yt_analysis/yt-3.0/commits/ba4470df120b/
Changeset:   ba4470df120b
Branch:      yt
User:        ngoldbaum
Date:        2013-04-12 00:19:35
Summary:     simulation needs to be imported for the enzo answer tests.
Affected #:  1 file

diff -r 7ab40c0ebf6fe1e4606285788b5db148cf438a81 -r ba4470df120bafee3ae0d0db4e1644f8b1bd1787 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -37,7 +37,7 @@
 from matplotlib.testing.compare import compare_images
 from nose.plugins import Plugin
 from yt.testing import *
-from yt.convenience import load
+from yt.convenience import load, simulation
 from yt.config import ytcfg
 from yt.data_objects.static_output import StaticOutput
 from yt.utilities.logger import disable_stream_logging


https://bitbucket.org/yt_analysis/yt-3.0/commits/68a0eaaeb643/
Changeset:   68a0eaaeb643
Branch:      yt
User:        jsoishi
Date:        2013-04-12 18:06:43
Summary:     corrected typos in magnetic pressure display name and units
Affected #:  1 file

diff -r 77f18fb5b757b6051dada9e2bad7774938a62064 -r 68a0eaaeb643df98e5fccb8f26c77fb967d9d58a yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -1034,8 +1034,8 @@
     return data['MagneticEnergy']
 add_field("MagneticPressure",
           function=_MagneticPressure,
-          display_name=r"\rm{Magnetic}\/\rm{Energy}",
-          units="\rm{ergs}\/\rm{cm}^{-3}")
+          display_name=r"\rm{Magnetic}\/\rm{Pressure}",
+          units=r"\rm{ergs}\/\rm{cm}^{-3}")
 
 def _BPoloidal(field,data):
     normal = data.get_field_parameter("normal")


https://bitbucket.org/yt_analysis/yt-3.0/commits/527f460483f7/
Changeset:   527f460483f7
Branch:      yt
User:        MatthewTurk
Date:        2013-04-12 18:08:45
Summary:     Merged in jsoishi/yt-fixes (pull request #478)

corrected typos in magnetic pressure display name and units
Affected #:  1 file

diff -r ba4470df120bafee3ae0d0db4e1644f8b1bd1787 -r 527f460483f7c5666cb2de4c16ac35cb7199fa50 yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -1034,8 +1034,8 @@
     return data['MagneticEnergy']
 add_field("MagneticPressure",
           function=_MagneticPressure,
-          display_name=r"\rm{Magnetic}\/\rm{Energy}",
-          units="\rm{ergs}\/\rm{cm}^{-3}")
+          display_name=r"\rm{Magnetic}\/\rm{Pressure}",
+          units=r"\rm{ergs}\/\rm{cm}^{-3}")
 
 def _BPoloidal(field,data):
     normal = data.get_field_parameter("normal")


https://bitbucket.org/yt_analysis/yt-3.0/commits/940c7f15440c/
Changeset:   940c7f15440c
Branch:      yt
User:        samskillman
Date:        2013-04-02 20:00:51
Summary:     Implementing data storage for athena datasets, and fixing a bug from h5py not
liking to save a string:

https://github.com/h5py/h5py/issues/63
Affected #:  2 files

diff -r 4c9a9cd7eac8541bc10a747fb90ad2fdf200761a -r 940c7f15440c12c15042c43fb2667702e019672d yt/data_objects/hierarchy.py
--- a/yt/data_objects/hierarchy.py
+++ b/yt/data_objects/hierarchy.py
@@ -317,7 +317,7 @@
         under the name *name* on the node /Objects.
         """
         s = cPickle.dumps(obj, protocol=-1)
-        self.save_data(s, "/Objects", name, force = True)
+        self.save_data(np.array(s), "/Objects", name, force = True)
 
     def load_object(self, name):
         """

diff -r 4c9a9cd7eac8541bc10a747fb90ad2fdf200761a -r 940c7f15440c12c15042c43fb2667702e019672d yt/frontends/athena/data_structures.py
--- a/yt/frontends/athena/data_structures.py
+++ b/yt/frontends/athena/data_structures.py
@@ -53,7 +53,7 @@
 class AthenaGrid(AMRGridPatch):
     _id_offset = 0
     def __init__(self, id, hierarchy, level, start, dimensions):
-        df = hierarchy.storage_filename
+        df = hierarchy.parameter_file.filename[4:-4]
         if 'id0' not in hierarchy.parameter_file.filename:
             gname = hierarchy.parameter_file.filename
         else:
@@ -119,12 +119,13 @@
 
     grid = AthenaGrid
     _data_style='athena'
+    _data_file = None
     
     def __init__(self, pf, data_style='athena'):
         self.parameter_file = weakref.proxy(pf)
+        self.directory = os.path.dirname(self.parameter_file.filename)
         self.data_style = data_style
         # for now, the hierarchy file is the parameter file!
-        self.storage_filename = self.parameter_file.storage_filename
         self.hierarchy_filename = self.parameter_file.filename
         #self.directory = os.path.dirname(self.hierarchy_filename)
         self._fhandle = file(self.hierarchy_filename,'rb')
@@ -132,9 +133,6 @@
 
         self._fhandle.close()
 
-    def _initialize_data_storage(self):
-        pass
-
     def _detect_fields(self):
         field_map = {}
         f = open(self.hierarchy_filename,'rb')
@@ -337,11 +335,11 @@
     _data_style = "athena"
 
     def __init__(self, filename, data_style='athena',
-                 storage_filename = None, parameters = {}):
+                 storage_filename=None, parameters={}):
         self.specified_parameters = parameters
         StaticOutput.__init__(self, filename, data_style)
         self.filename = filename
-        self.storage_filename = filename[4:-4]
+        self.storage_filename = storage_filename 
         
         # Unfortunately we now have to mandate that the hierarchy gets 
         # instantiated so that we can make sure we have the correct left 


https://bitbucket.org/yt_analysis/yt-3.0/commits/bb1c7ac50b87/
Changeset:   bb1c7ac50b87
Branch:      yt
User:        samskillman
Date:        2013-04-02 21:13:21
Summary:     Explicitly making the order of the np array 'c'.
Affected #:  1 file

diff -r 940c7f15440c12c15042c43fb2667702e019672d -r bb1c7ac50b875e82b79396abde25ef3a199d06c8 yt/data_objects/hierarchy.py
--- a/yt/data_objects/hierarchy.py
+++ b/yt/data_objects/hierarchy.py
@@ -317,7 +317,7 @@
         under the name *name* on the node /Objects.
         """
         s = cPickle.dumps(obj, protocol=-1)
-        self.save_data(np.array(s), "/Objects", name, force = True)
+        self.save_data(np.array(s, order='c'), "/Objects", name, force = True)
 
     def load_object(self, name):
         """


https://bitbucket.org/yt_analysis/yt-3.0/commits/8a02b74fec3d/
Changeset:   8a02b74fec3d
Branch:      yt
User:        samskillman
Date:        2013-04-02 23:09:41
Summary:     Need a different hash to uniquely identify athena datadumps.  This is one way.
Affected #:  1 file

diff -r bb1c7ac50b875e82b79396abde25ef3a199d06c8 -r 8a02b74fec3d86a0056f84393b769deb63b05c6b yt/frontends/athena/data_structures.py
--- a/yt/frontends/athena/data_structures.py
+++ b/yt/frontends/athena/data_structures.py
@@ -400,7 +400,7 @@
         if dimensionality == 1 : self.domain_dimensions[1] = np.int32(1)
         self.dimensionality = dimensionality
         self.current_time = grid["time"]
-        self.unique_identifier = self._handle.__hash__()
+        self.unique_identifier = self.parameter_filename.__hash__()
         self.cosmological_simulation = False
         self.num_ghost_zones = 0
         self.field_ordering = 'fortran'


https://bitbucket.org/yt_analysis/yt-3.0/commits/6ab1db6b056e/
Changeset:   6ab1db6b056e
Branch:      yt
User:        samskillman
Date:        2013-04-12 19:07:37
Summary:     Fixing up Athena data storage to not be in the id0/ directory *unless* you only
load up a single cpu file.  This requires setting the default path to check for
storage as using the directory '.' since if the initialize_data_storage can't
find the storage_filename file, it would end up looking in '', which returns
False for os.access('',os.W_OK).  I think it is okay to always set this to '.'
if '' is returned from dirname.  This also forces the save_object to cast the
pickle as 'c'. which is a 'char' dtype.
Affected #:  2 files

diff -r 8a02b74fec3d86a0056f84393b769deb63b05c6b -r 6ab1db6b056ed117fc4864a6453b7402a5154fc1 yt/data_objects/hierarchy.py
--- a/yt/data_objects/hierarchy.py
+++ b/yt/data_objects/hierarchy.py
@@ -236,6 +236,8 @@
                 fn = os.path.join(self.directory,
                         "%s.yt" % self.parameter_file.basename)
         dir_to_check = os.path.dirname(fn)
+        if dir_to_check == '':
+            dir_to_check = '.'
         # We have four options:
         #    Writeable, does not exist      : create, open as append
         #    Writeable, does exist          : open as append
@@ -317,7 +319,7 @@
         under the name *name* on the node /Objects.
         """
         s = cPickle.dumps(obj, protocol=-1)
-        self.save_data(np.array(s, order='c'), "/Objects", name, force = True)
+        self.save_data(np.array(s, dtype='c'), "/Objects", name, force = True)
 
     def load_object(self, name):
         """

diff -r 8a02b74fec3d86a0056f84393b769deb63b05c6b -r 6ab1db6b056ed117fc4864a6453b7402a5154fc1 yt/frontends/athena/data_structures.py
--- a/yt/frontends/athena/data_structures.py
+++ b/yt/frontends/athena/data_structures.py
@@ -339,8 +339,10 @@
         self.specified_parameters = parameters
         StaticOutput.__init__(self, filename, data_style)
         self.filename = filename
-        self.storage_filename = storage_filename 
-        
+        if storage_filename is None:
+            storage_filename = '%s.yt' % filename.split('/')[-1]
+        self.storage_filename = storage_filename
+
         # Unfortunately we now have to mandate that the hierarchy gets 
         # instantiated so that we can make sure we have the correct left 
         # and right domain edges.


https://bitbucket.org/yt_analysis/yt-3.0/commits/3708abcb5084/
Changeset:   3708abcb5084
Branch:      yt
User:        MatthewTurk
Date:        2013-04-12 19:19:45
Summary:     Merged in samskillman/yt (pull request #474)

Implementing data storage for athena datasets, and fixing a bug from h5py not
Affected #:  2 files

diff -r 527f460483f7c5666cb2de4c16ac35cb7199fa50 -r 3708abcb5084d03b4520eb5a1214561a99063aa7 yt/data_objects/hierarchy.py
--- a/yt/data_objects/hierarchy.py
+++ b/yt/data_objects/hierarchy.py
@@ -236,6 +236,8 @@
                 fn = os.path.join(self.directory,
                         "%s.yt" % self.parameter_file.basename)
         dir_to_check = os.path.dirname(fn)
+        if dir_to_check == '':
+            dir_to_check = '.'
         # We have four options:
         #    Writeable, does not exist      : create, open as append
         #    Writeable, does exist          : open as append
@@ -317,7 +319,7 @@
         under the name *name* on the node /Objects.
         """
         s = cPickle.dumps(obj, protocol=-1)
-        self.save_data(s, "/Objects", name, force = True)
+        self.save_data(np.array(s, dtype='c'), "/Objects", name, force = True)
 
     def load_object(self, name):
         """

diff -r 527f460483f7c5666cb2de4c16ac35cb7199fa50 -r 3708abcb5084d03b4520eb5a1214561a99063aa7 yt/frontends/athena/data_structures.py
--- a/yt/frontends/athena/data_structures.py
+++ b/yt/frontends/athena/data_structures.py
@@ -53,7 +53,7 @@
 class AthenaGrid(AMRGridPatch):
     _id_offset = 0
     def __init__(self, id, hierarchy, level, start, dimensions):
-        df = hierarchy.storage_filename
+        df = hierarchy.parameter_file.filename[4:-4]
         if 'id0' not in hierarchy.parameter_file.filename:
             gname = hierarchy.parameter_file.filename
         else:
@@ -119,12 +119,13 @@
 
     grid = AthenaGrid
     _data_style='athena'
+    _data_file = None
     
     def __init__(self, pf, data_style='athena'):
         self.parameter_file = weakref.proxy(pf)
+        self.directory = os.path.dirname(self.parameter_file.filename)
         self.data_style = data_style
         # for now, the hierarchy file is the parameter file!
-        self.storage_filename = self.parameter_file.storage_filename
         self.hierarchy_filename = self.parameter_file.filename
         #self.directory = os.path.dirname(self.hierarchy_filename)
         self._fhandle = file(self.hierarchy_filename,'rb')
@@ -132,9 +133,6 @@
 
         self._fhandle.close()
 
-    def _initialize_data_storage(self):
-        pass
-
     def _detect_fields(self):
         field_map = {}
         f = open(self.hierarchy_filename,'rb')
@@ -337,12 +335,14 @@
     _data_style = "athena"
 
     def __init__(self, filename, data_style='athena',
-                 storage_filename = None, parameters = {}):
+                 storage_filename=None, parameters={}):
         self.specified_parameters = parameters
         StaticOutput.__init__(self, filename, data_style)
         self.filename = filename
-        self.storage_filename = filename[4:-4]
-        
+        if storage_filename is None:
+            storage_filename = '%s.yt' % filename.split('/')[-1]
+        self.storage_filename = storage_filename
+
         # Unfortunately we now have to mandate that the hierarchy gets 
         # instantiated so that we can make sure we have the correct left 
         # and right domain edges.
@@ -402,7 +402,7 @@
         if dimensionality == 1 : self.domain_dimensions[1] = np.int32(1)
         self.dimensionality = dimensionality
         self.current_time = grid["time"]
-        self.unique_identifier = self._handle.__hash__()
+        self.unique_identifier = self.parameter_filename.__hash__()
         self.cosmological_simulation = False
         self.num_ghost_zones = 0
         self.field_ordering = 'fortran'


https://bitbucket.org/yt_analysis/yt-3.0/commits/96d51cf76c6d/
Changeset:   96d51cf76c6d
Branch:      yt
User:        ngoldbaum
Date:        2013-04-09 08:10:22
Summary:     Reverting a few files ot 8d14a55
Affected #:  3 files

diff -r 875b16c94eca2b2be8c522f1bef12f05094b870e -r 96d51cf76c6d883f3688ad68e65c8cf623c5025c yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -40,6 +40,9 @@
 # also attempt to parse the command line and set up the global state of various
 # operations.
 
+import yt.startup_tasks as __startup_tasks
+unparsed_args = __startup_tasks.unparsed_args
+
 from yt.funcs import *
 from yt.utilities.logger import ytLogger as mylog
 from yt.utilities.performance_counters import yt_counters, time_function
@@ -143,7 +146,7 @@
 
 # Import some helpful math utilities
 from yt.utilities.math_utils import \
-    ortho_find, quartiles, periodic_position
+    ortho_find, quartiles, periodic_position 
 
 
 # We load plugins.  Keep in mind, this can be fairly dangerous -

diff -r 875b16c94eca2b2be8c522f1bef12f05094b870e -r 96d51cf76c6d883f3688ad68e65c8cf623c5025c yt/startup_tasks.py
--- a/yt/startup_tasks.py
+++ b/yt/startup_tasks.py
@@ -120,6 +120,8 @@
 parallel_capable = False
 if not ytcfg.getboolean("yt","__command_line"):
     opts, unparsed_args = parser.parse_known_args()
+    # THIS IS NOT SUCH A GOOD IDEA:
+    #sys.argv = [a for a in unparsed_args]
     if opts.parallel:
         parallel_capable = turn_on_parallelism()
     subparsers = parser.add_subparsers(title="subcommands",

diff -r 875b16c94eca2b2be8c522f1bef12f05094b870e -r 96d51cf76c6d883f3688ad68e65c8cf623c5025c yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -37,8 +37,8 @@
 from matplotlib.testing.compare import compare_images
 from nose.plugins import Plugin
 from yt.testing import *
-from yt.convenience import load
 from yt.config import ytcfg
+from yt.mods import *
 from yt.data_objects.static_output import StaticOutput
 from yt.utilities.logger import disable_stream_logging
 from yt.utilities.command_line import get_yt_version
@@ -159,9 +159,6 @@
         if self.store_results is False: return
         self.storage.dump(self.result_storage)
 
-    def help(self):
-        return "yt answer testing support"
-
 class AnswerTestStorage(object):
     def __init__(self, reference_name=None, answer_name=None):
         self.reference_name = reference_name


https://bitbucket.org/yt_analysis/yt-3.0/commits/7a00a8fea1f7/
Changeset:   7a00a8fea1f7
Branch:      yt
User:        ngoldbaum
Date:        2013-04-09 08:12:20
Summary:     Forgot to revert hgignore.
Affected #:  1 file

diff -r 96d51cf76c6d883f3688ad68e65c8cf623c5025c -r 7a00a8fea1f7b982a58791d5887ae0ce8e07a7ba .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -4,9 +4,7 @@
 freetype.cfg
 hdf5.cfg
 png.cfg
-rockstar.cfg
 yt_updater.log
-yt/analysis_modules/halo_finding/rockstar/rockstar_interface.c
 yt/frontends/ramses/_ramses_reader.cpp
 yt/utilities/amr_utils.c
 yt/utilities/kdtree/forthonf2c.h


https://bitbucket.org/yt_analysis/yt-3.0/commits/2f0b4a6dc941/
Changeset:   2f0b4a6dc941
Branch:      yt
User:        ngoldbaum
Date:        2013-04-09 22:12:51
Summary:     Merging with tip.
Affected #:  3 files

diff -r 7a00a8fea1f7b982a58791d5887ae0ce8e07a7ba -r 2f0b4a6dc941a717535d630e31e11c56a3734184 yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -138,7 +138,8 @@
         return data["TotalEnergy"] - 0.5*(
             data["x-velocity"]**2.0
             + data["y-velocity"]**2.0
-            + data["z-velocity"]**2.0 ) - data["MagneticEnergy"]
+            + data["z-velocity"]**2.0 ) \
+            - data["MagneticEnergy"]/data["Density"]
 
     return data["TotalEnergy"] - 0.5*(
         data["x-velocity"]**2.0

diff -r 7a00a8fea1f7b982a58791d5887ae0ce8e07a7ba -r 2f0b4a6dc941a717535d630e31e11c56a3734184 yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -38,7 +38,12 @@
 
 # This next item will handle most of the actual startup procedures, but it will
 # also attempt to parse the command line and set up the global state of various
-# operations.
+# operations.  The variable unparsed_args is not used internally but is
+# provided as a convenience for users who wish to parse arguments in scripts.
+# See http://lists.spacepope.org/pipermail/yt-dev-spacepope.org/2011-December/
+#     001727.html
+import yt.startup_tasks as __startup_tasks
+unparsed_args = __startup_tasks.unparsed_args
 
 import yt.startup_tasks as __startup_tasks
 unparsed_args = __startup_tasks.unparsed_args


https://bitbucket.org/yt_analysis/yt-3.0/commits/aad13f055e6e/
Changeset:   aad13f055e6e
Branch:      yt
User:        ngoldbaum
Date:        2013-04-09 22:14:42
Summary:     Fixing some merge errors.
Affected #:  3 files

diff -r 2f0b4a6dc941a717535d630e31e11c56a3734184 -r aad13f055e6e0fcb6ad45c2698240b3712a652f7 .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -4,7 +4,9 @@
 freetype.cfg
 hdf5.cfg
 png.cfg
+rockstar.cfg
 yt_updater.log
+yt/analysis_modules/halo_finding/rockstar/rockstar_interface.c
 yt/frontends/ramses/_ramses_reader.cpp
 yt/utilities/amr_utils.c
 yt/utilities/kdtree/forthonf2c.h

diff -r 2f0b4a6dc941a717535d630e31e11c56a3734184 -r aad13f055e6e0fcb6ad45c2698240b3712a652f7 yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -45,9 +45,6 @@
 import yt.startup_tasks as __startup_tasks
 unparsed_args = __startup_tasks.unparsed_args
 
-import yt.startup_tasks as __startup_tasks
-unparsed_args = __startup_tasks.unparsed_args
-
 from yt.funcs import *
 from yt.utilities.logger import ytLogger as mylog
 from yt.utilities.performance_counters import yt_counters, time_function
@@ -151,7 +148,7 @@
 
 # Import some helpful math utilities
 from yt.utilities.math_utils import \
-    ortho_find, quartiles, periodic_position 
+    ortho_find, quartiles, periodic_position
 
 
 # We load plugins.  Keep in mind, this can be fairly dangerous -

diff -r 2f0b4a6dc941a717535d630e31e11c56a3734184 -r aad13f055e6e0fcb6ad45c2698240b3712a652f7 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -37,8 +37,8 @@
 from matplotlib.testing.compare import compare_images
 from nose.plugins import Plugin
 from yt.testing import *
+from yt.convenience import load
 from yt.config import ytcfg
-from yt.mods import *
 from yt.data_objects.static_output import StaticOutput
 from yt.utilities.logger import disable_stream_logging
 from yt.utilities.command_line import get_yt_version
@@ -159,6 +159,9 @@
         if self.store_results is False: return
         self.storage.dump(self.result_storage)
 
+    def help(self):
+        return "yt answer testing support"
+
 class AnswerTestStorage(object):
     def __init__(self, reference_name=None, answer_name=None):
         self.reference_name = reference_name


https://bitbucket.org/yt_analysis/yt-3.0/commits/eb7dc8269fdb/
Changeset:   eb7dc8269fdb
Branch:      yt
User:        ngoldbaum
Date:        2013-04-12 20:30:43
Summary:     Falling back to hdf5-1.8.9 in the install script.
Affected #:  1 file

diff -r aad13f055e6e0fcb6ad45c2698240b3712a652f7 -r eb7dc8269fdb32c6e633d8423d1e38e0daaa0c86 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -506,7 +506,7 @@
 echo 'a296dfcaef7e853e58eed4e24b37c4fa29cfc6ac688def048480f4bb384b9e37ca447faf96eec7b378fd764ba291713f03ac464581d62275e28eb2ec99110ab6  reason-js-20120623.zip' > reason-js-20120623.zip.sha512
 echo 'b46c93d76f8ce09c94765b20b2eeadf71207671f1131777de178b3727c235b4dd77f6e60d62442b96648c3c6749e9e4c1194c1b02af7e946576be09e1ff7ada3  freetype-2.4.11.tar.gz' > freetype-2.4.11.tar.gz.sha512
 echo '15ca0209e8d8f172cb0708a2de946fbbde8551d9bebc4a95fa7ae31558457a7f43249d5289d7675490c577deb4e0153698fd2407644078bf30bd5ab10135fce3  h5py-2.1.2.tar.gz' > h5py-2.1.2.tar.gz.sha512
-echo '283301bf6c3131e3079e7430dce758ccefbc7add42b14ebfc87ca30b1d9ecb20bfc00ecf6ec823fc1e3cc7af16f8ed186cd09364444f55acfcab70c6981c10ed  hdf5-1.8.10-patch1.tar.gz' > hdf5-1.8.10-patch1.tar.gz.sha512
+echo 'c68a425bacaa7441037910b9166f25b89e1387776a7749a5350793f89b1690350df5f018060c31d03686e7c3ed2aa848bd2b945c96350dc3b6322e087934783a  hdf5-1.8.9.tar.gz' > hdf5-1.8.9.tar.gz.sha512
 echo 'b2b53ed358bacab9e8d63a51f17bd5f121ece60a1d7c53e8a8eb08ad8b1e4393a8d7a86eec06e2efc62348114f0d84c0a3dfc805e68e6edd93b20401962b3554  libpng-1.6.1.tar.gz' > libpng-1.6.1.tar.gz.sha512
 echo '497f91725eaf361bdb9bdf38db2bff5068a77038f1536df193db64c9b887e3b0d967486daee722eda6e2c4e60f034eee030673e53d07bf0db0f3f7c0ef3bd208  matplotlib-1.2.1.tar.gz' > matplotlib-1.2.1.tar.gz.sha512
 echo '928fdeaaf0eaec80adbd8765521de9666ab56aaa2101fb9ab2cb392d8b29475d3b052d89652ff9b67522cfcc6cd958717ac715f51b0573ee088e9a595f29afe2  mercurial-2.5.4.tar.gz' > mercurial-2.5.4.tar.gz.sha512
@@ -526,7 +526,7 @@
 echo '276bd9c061ec9a27d478b33078a86f93164ee2da72210e12e2c9da71dcffeb64767e4460b93f257302b09328eda8655e93c4b9ae85e74472869afbeae35ca71e  blas.tar.gz' > blas.tar.gz.sha512
 echo '8770214491e31f0a7a3efaade90eee7b0eb20a8a6ab635c5f854d78263f59a1849133c14ef5123d01023f0110cbb9fc6f818da053c01277914ae81473430a952  lapack-3.4.2.tar.gz' > lapack-3.4.2.tar.gz.sha512
 # Individual processes
-[ -z "$HDF5_DIR" ] && get_ytproject hdf5-1.8.10-patch1.tar.gz
+[ -z "$HDF5_DIR" ] && get_ytproject hdf5-1.8.9.tar.gz
 [ $INST_ZLIB -eq 1 ] && get_ytproject zlib-1.2.7.tar.gz
 [ $INST_BZLIB -eq 1 ] && get_ytproject bzip2-1.0.6.tar.gz
 [ $INST_PNG -eq 1 ] && get_ytproject libpng-1.6.1.tar.gz
@@ -635,11 +635,11 @@
 
 if [ -z "$HDF5_DIR" ]
 then
-    if [ ! -e hdf5-1.8.10-patch1/done ]
+    if [ ! -e hdf5-1.8.9/done ]
     then
-        [ ! -e hdf5-1.8.10-patch1 ] && tar xfz hdf5-1.8.10-patch1.tar.gz
+        [ ! -e hdf5-1.8.9 ] && tar xfz hdf5-1.8.9.tar.gz
         echo "Installing HDF5"
-        cd hdf5-1.8.10-patch1
+        cd hdf5-1.8.9
         ( ./configure --prefix=${DEST_DIR}/ --enable-shared 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make ${MAKE_PROCS} install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit


https://bitbucket.org/yt_analysis/yt-3.0/commits/7f171073bb43/
Changeset:   7f171073bb43
Branch:      yt
User:        MatthewTurk
Date:        2013-04-12 20:32:30
Summary:     Merged in ngoldbaum/yt (pull request #475)

Updating the dependencies in the install script
Affected #:  4 files

diff -r 3708abcb5084d03b4520eb5a1214561a99063aa7 -r 7f171073bb43bebf2c1af25596cb9e7fb0bc2b0a doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -34,7 +34,7 @@
 
 INST_HG=1       # Install Mercurial or not?  If hg is not already
                 # installed, yt cannot be installed.
-INST_ZLIB=1     # On some systems (Kraken) matplotlib has issues with 
+INST_ZLIB=1     # On some systems (Kraken) matplotlib has issues with
                 # the system zlib, which is compiled statically.
                 # If need be, you can turn this off.
 INST_BZLIB=1    # On some systems, libbzip2 is missing.  This can
@@ -76,7 +76,7 @@
    echo "the script to re-enable root-level installation.  Sorry!"
    exit 1
 fi
-if [[ ${DEST_DIR%/} == /usr/local ]] 
+if [[ ${DEST_DIR%/} == /usr/local ]]
 then
    echo "******************************************************"
    echo "*                                                    *"
@@ -194,7 +194,7 @@
 	echo "must register for an account on the apple developer tools"
 	echo "website: https://developer.apple.com/downloads to obtain the"
 	echo "download link."
-	echo 
+	echo
 	echo "We have gathered some additional instructions for each"
 	echo "version of OS X below. If you have trouble installing yt"
 	echo "after following these instructions, don't hesitate to contact"
@@ -205,15 +205,15 @@
 	echo "menu bar.  We're assuming that you've installed all operating"
 	echo "system updates; if you have an older version, we suggest"
 	echo "running software update and installing all available updates."
-	echo 
-        echo "OS X 10.5.8: search for and download Xcode 3.1.4 from the" 
+	echo
+        echo "OS X 10.5.8: search for and download Xcode 3.1.4 from the"
 	echo "Apple developer tools website."
         echo
         echo "OS X 10.6.8: search for and download Xcode 3.2 from the Apple"
 	echo "developer tools website.  You can either download the"
 	echo "Xcode 3.2.2 Developer Tools package (744 MB) and then use"
-	echo "Software Update to update to XCode 3.2.6 or" 
-	echo "alternatively, you can download the Xcode 3.2.6/iOS SDK" 
+	echo "Software Update to update to XCode 3.2.6 or"
+	echo "alternatively, you can download the Xcode 3.2.6/iOS SDK"
 	echo "bundle (4.1 GB)."
         echo
         echo "OS X 10.7.5: download Xcode 4.2 from the mac app store"
@@ -221,20 +221,20 @@
         echo "Alternatively, download the Xcode command line tools from"
         echo "the Apple developer tools website."
         echo
-	echo "OS X 10.8.2: download Xcode 4.6 from the mac app store."
+	echo "OS X 10.8.2: download Xcode 4.6.1 from the mac app store."
 	echo "(search for Xcode)."
 	echo "Additionally, you will have to manually install the Xcode"
-	echo "command line tools, see:" 
+	echo "command line tools, see:"
 	echo "http://stackoverflow.com/questions/9353444"
 	echo "Alternatively, download the Xcode command line tools from"
 	echo "the Apple developer tools website."
 	echo
-        echo "NOTE: It's possible that the installation will fail, if so," 
-	echo "please set the following environment variables, remove any" 
+        echo "NOTE: It's possible that the installation will fail, if so,"
+	echo "please set the following environment variables, remove any"
 	echo "broken installation tree, and re-run this script verbatim."
         echo
-        echo "$ export CC=gcc-4.2"
-        echo "$ export CXX=g++-4.2"
+        echo "$ export CC=gcc"
+        echo "$ export CXX=g++"
 	echo
         OSX_VERSION=`sw_vers -productVersion`
         if [ "${OSX_VERSION##10.8}" != "${OSX_VERSION}" ]
@@ -291,7 +291,7 @@
         echo
         echo " INST_ZLIB=0"
         echo " INST_FTYPE=0"
-        echo 
+        echo
         echo " to avoid conflicts with other command-line programs "
         echo " (like eog and evince, for example)."
     fi
@@ -437,7 +437,7 @@
     cd ..
 }
 
-if type -P wget &>/dev/null 
+if type -P wget &>/dev/null
 then
     echo "Using wget"
     export GETFILE="wget -nv"
@@ -499,28 +499,27 @@
 cd ${DEST_DIR}/src
 
 # Now we dump all our SHA512 files out.
-
-echo 'eda1b8090e5e21e7e039ef4dd03de186a7b416df9d5a4e4422abeeb4d51383b9a6858e1ac4902d8e5010f661b295bbb2452c43c8738be668379b4eb4835d0f61  Cython-0.17.1.tar.gz' > Cython-0.17.1.tar.gz.sha512
-echo '44eea803870a66ff0bab08d13a8b3388b5578ebc1c807d1d9dca0a93e6371e91b15d02917a00b3b20dc67abb5a21dabaf9b6e9257a561f85eeff2147ac73b478  PyX-0.11.1.tar.gz' > PyX-0.11.1.tar.gz.sha512
-echo 'b981f8464575bb24c297631c87a3b9172312804a0fc14ce1fa7cb41ce2b0d2fd383cd1c816d6e10c36467d18bf9492d6faf557c81c04ff3b22debfa93f30ad0b  Python-2.7.3.tgz' > Python-2.7.3.tgz.sha512
-echo 'c017d3d59dd324ac91af0edc178c76b60a5f90fbb775cf843e39062f95bd846238f2c53705f8890ed3f34bc0e6e75671a73d13875eb0287d6201cb45f0a2d338  bzip2-1.0.5.tar.gz' > bzip2-1.0.5.tar.gz.sha512
+echo 'fb85d71bb4f80b35f0d0f1735c650dd75c5f84b05635ddf91d6241ff103b5a49158c5b851a20c15e05425f6dde32a4971b35fcbd7445f61865b4d61ffd1fbfa1  Cython-0.18.tar.gz' > Cython-0.18.tar.gz.sha512
+echo '4941f5aa21aff3743546495fb073c10d2657ff42b2aff401903498638093d0e31e344cce778980f28a7170c6d29eab72ac074277b9d4088376e8692dc71e55c1  PyX-0.12.1.tar.gz' > PyX-0.12.1.tar.gz.sha512
+echo '3349152c47ed2b63c5c9aabcfa92b8497ea9d71ca551fd721e827fcb8f91ff9fbbee6bba8f8cb2dea185701b8798878b4b2435c1496b63d4b4a37c624a625299  Python-2.7.4.tgz' > Python-2.7.4.tgz.sha512
+echo '00ace5438cfa0c577e5f578d8a808613187eff5217c35164ffe044fbafdfec9e98f4192c02a7d67e01e5a5ccced630583ad1003c37697219b0f147343a3fdd12  bzip2-1.0.6.tar.gz' > bzip2-1.0.6.tar.gz.sha512
 echo 'a296dfcaef7e853e58eed4e24b37c4fa29cfc6ac688def048480f4bb384b9e37ca447faf96eec7b378fd764ba291713f03ac464581d62275e28eb2ec99110ab6  reason-js-20120623.zip' > reason-js-20120623.zip.sha512
-echo 'b519218f93946400326e9b656669269ecb3e5232b944e18fbc3eadc4fe2b56244d68aae56d6f69042b4c87c58c881ee2aaa279561ea0f0f48d5842155f4de9de  freetype-2.4.4.tar.gz' > freetype-2.4.4.tar.gz.sha512
-echo 'b3290c498191684781ca5286ab454eb1bd045e8d894f5b86fb86beb88f174e22ac3ab008fb02d6562051d9fa6a9593920cab433223f6d5473999913223b8e183  h5py-2.1.0.tar.gz' > h5py-2.1.0.tar.gz.sha512
+echo 'b46c93d76f8ce09c94765b20b2eeadf71207671f1131777de178b3727c235b4dd77f6e60d62442b96648c3c6749e9e4c1194c1b02af7e946576be09e1ff7ada3  freetype-2.4.11.tar.gz' > freetype-2.4.11.tar.gz.sha512
+echo '15ca0209e8d8f172cb0708a2de946fbbde8551d9bebc4a95fa7ae31558457a7f43249d5289d7675490c577deb4e0153698fd2407644078bf30bd5ab10135fce3  h5py-2.1.2.tar.gz' > h5py-2.1.2.tar.gz.sha512
 echo 'c68a425bacaa7441037910b9166f25b89e1387776a7749a5350793f89b1690350df5f018060c31d03686e7c3ed2aa848bd2b945c96350dc3b6322e087934783a  hdf5-1.8.9.tar.gz' > hdf5-1.8.9.tar.gz.sha512
-echo 'dbefad00fa34f4f21dca0f1e92e95bd55f1f4478fa0095dcf015b4d06f0c823ff11755cd777e507efaf1c9098b74af18f613ec9000e5c3a5cc1c7554fb5aefb8  libpng-1.5.12.tar.gz' > libpng-1.5.12.tar.gz.sha512
-echo '5b1a0fb52dcb21ca5f0ab71c8a49550e1e8cf633552ec6598dc43f0b32c03422bf5af65b30118c163231ecdddfd40846909336f16da318959106076e80a3fad0  matplotlib-1.2.0.tar.gz' > matplotlib-1.2.0.tar.gz.sha512
-echo '91693ca5f34934956a7c2c98bb69a5648b2a5660afd2ecf4a05035c5420450d42c194eeef0606d7683e267e4eaaaab414df23f30b34c88219bdd5c1a0f1f66ed  mercurial-2.5.1.tar.gz' > mercurial-2.5.1.tar.gz.sha512
-echo 'de3dd37f753614055dcfed910e9886e03688b8078492df3da94b1ec37be796030be93291cba09e8212fffd3e0a63b086902c3c25a996cf1439e15c5b16e014d9  numpy-1.6.1.tar.gz' > numpy-1.6.1.tar.gz.sha512
-echo '5ad681f99e75849a5ca6f439c7a19bb51abc73d121b50f4f8e4c0da42891950f30407f761a53f0fe51b370b1dbd4c4f5a480557cb2444c8c7c7d5412b328a474  sqlite-autoconf-3070500.tar.gz' > sqlite-autoconf-3070500.tar.gz.sha512
-echo 'edae735960279d92acf58e1f4095c6392a7c2059b8f1d2c46648fc608a0fb06b392db2d073f4973f5762c034ea66596e769b95b3d26ad963a086b9b2d09825f2  zlib-1.2.3.tar.bz2' > zlib-1.2.3.tar.bz2.sha512
+echo 'b2b53ed358bacab9e8d63a51f17bd5f121ece60a1d7c53e8a8eb08ad8b1e4393a8d7a86eec06e2efc62348114f0d84c0a3dfc805e68e6edd93b20401962b3554  libpng-1.6.1.tar.gz' > libpng-1.6.1.tar.gz.sha512
+echo '497f91725eaf361bdb9bdf38db2bff5068a77038f1536df193db64c9b887e3b0d967486daee722eda6e2c4e60f034eee030673e53d07bf0db0f3f7c0ef3bd208  matplotlib-1.2.1.tar.gz' > matplotlib-1.2.1.tar.gz.sha512
+echo '928fdeaaf0eaec80adbd8765521de9666ab56aaa2101fb9ab2cb392d8b29475d3b052d89652ff9b67522cfcc6cd958717ac715f51b0573ee088e9a595f29afe2  mercurial-2.5.4.tar.gz' > mercurial-2.5.4.tar.gz.sha512
+echo 'a485daa556f6c76003de1dbb3e42b3daeee0a320c69c81b31a7d2ebbc2cf8ab8e96c214a4758e5e7bf814295dc1d6aa563092b714db7e719678d8462135861a8  numpy-1.7.0.tar.gz' > numpy-1.7.0.tar.gz.sha512
+echo '293d78d14a9347cb83e1a644e5f3e4447ed6fc21642c51683e5495dda08d2312194a73d1fc3c1d78287e33ed065aa251ecbaa7c0ea9189456c1702e96d78becd  sqlite-autoconf-3071601.tar.gz' > sqlite-autoconf-3071601.tar.gz.sha512
+echo 'b1c073ad26684e354f7c522c14655840592e03872bc0a94690f89cae2ff88f146fce1dad252ff27a889dac4a32ff9f8ab63ba940671f9da89e9ba3e19f1bf58d  zlib-1.2.7.tar.gz' > zlib-1.2.7.tar.gz.sha512
 echo '05ac335727a2c3036f31a2506fdd2615aa436bfbe2f81799fe6c51bffe2591ad6a8427f3b25c34e7e709fb4e7607a0589dc7a22185c1f9b894e90de6711a88aa  ipython-0.13.1.tar.gz' > ipython-0.13.1.tar.gz.sha512
-echo 'fb3cf421b2dc48c31956b3e3ee4ab6ebc743deec3bf626c2238a1996c8c51be87260bd6aa662793a1f0c34dcda9b3146763777bb162dfad6fec4ca7acc403b2e  zeromq-2.2.0.tar.gz' > zeromq-2.2.0.tar.gz.sha512
-echo 'd761b492352841cdc125d9f0c99ee6d6c435812472ea234728b7f0fb4ad1048e1eec9b399df2081fbc926566f333f7780fedd0ce23255a6633fe5c60ed15a6af  pyzmq-2.1.11.tar.gz' > pyzmq-2.1.11.tar.gz.sha512
-echo '57fa5e57dfb98154a42d2d477f29401c2260ae7ad3a8128a4098b42ee3b35c54367b1a3254bc76b9b3b14b4aab7c3e1135858f68abc5636daedf2f01f9b8a3cf  tornado-2.2.tar.gz' > tornado-2.2.tar.gz.sha512
-echo '1332e3d5465ca249c357314cf15d2a4e5e83a941841021b8f6a17a107dce268a7a082838ade5e8db944ecde6bfb111211ab218aa414ee90aafbb81f1491b3b93  Forthon-0.8.10.tar.gz' > Forthon-0.8.10.tar.gz.sha512
+echo 'b9d061ca49e54ea917e0aed2b2a48faef33061dbf6d17eae7f8c3fff0b35ca883e7324f6cb24bda542443f669dcd5748037a5f2309f4c359d68adef520894865  zeromq-3.2.2.tar.gz' > zeromq-3.2.2.tar.gz.sha512
+echo '852fce8a8308c4e1e4b19c77add2b2055ca2ba570b28e8364888df490af92b860c72e860adfb075b3405a9ceb62f343889f20a8711c9353a7d9059adee910f83  pyzmq-13.0.2.tar.gz' > pyzmq-13.0.2.tar.gz.sha512
+echo '303bd3fbea22be57fddf7df78ddf5a783d355a0c8071b1363250daafc20232ddd28eedc44aa1194f4a7afd82f9396628c5bb06819e02b065b6a1b1ae8a7c19e1  tornado-3.0.tar.gz' > tornado-3.0.tar.gz.sha512
+echo '3f53d0b474bfd79fea2536d0a9197eaef6c0927e95f2f9fd52dbd6c1d46409d0e649c21ac418d8f7767a9f10fe6114b516e06f2be4b06aec3ab5bdebc8768220  Forthon-0.8.11.tar.gz' > Forthon-0.8.11.tar.gz.sha512
 echo 'c13116c1f0547000cc565e15774687b9e884f8b74fb62a84e578408a868a84961704839065ae4f21b662e87f2aaedf6ea424ea58dfa9d3d73c06281f806d15dd  nose-1.2.1.tar.gz' > nose-1.2.1.tar.gz.sha512
-echo '73de2c99406a38f85273931597525cec4ebef55b93712adca3b0bfea8ca3fc99446e5d6495817e9ad55cf4d48feb7fb49734675c4cc8938db8d4a5225d30eca7  python-hglib-0.2.tar.gz' > python-hglib-0.2.tar.gz.sha512
+echo 'd67de9567256e6f1649e4f3f7dfee63371d5f00fd3fd4f92426198f862e97c57f70e827d19f4e5e1929ad85ef2ce7aa5a0596b101cafdac71672e97dc115b397  python-hglib-0.3.tar.gz' > python-hglib-0.3.tar.gz.sha512
 echo 'ffc602eb346717286b3d0a6770c60b03b578b3cf70ebd12f9e8b1c8c39cdb12ef219ddaa041d7929351a6b02dbb8caf1821b5452d95aae95034cbf4bc9904a7a  sympy-0.7.2.tar.gz' > sympy-0.7.2.tar.gz.sha512
 echo '172f2bc671145ebb0add2669c117863db35851fb3bdb192006cd710d4d038e0037497eb39a6d01091cb923f71a7e8982a77b6e80bf71d6275d5d83a363c8d7e5  rockstar-0.99.6.tar.gz' > rockstar-0.99.6.tar.gz.sha512
 echo 'd4fdd62f2db5285cd133649bd1bfa5175cb9da8304323abd74e0ef1207d55e6152f0f944da1da75f73e9dafb0f3bb14efba3c0526c732c348a653e0bd223ccfa  scipy-0.11.0.tar.gz' > scipy-0.11.0.tar.gz.sha512
@@ -528,50 +527,50 @@
 echo '8770214491e31f0a7a3efaade90eee7b0eb20a8a6ab635c5f854d78263f59a1849133c14ef5123d01023f0110cbb9fc6f818da053c01277914ae81473430a952  lapack-3.4.2.tar.gz' > lapack-3.4.2.tar.gz.sha512
 # Individual processes
 [ -z "$HDF5_DIR" ] && get_ytproject hdf5-1.8.9.tar.gz
-[ $INST_ZLIB -eq 1 ] && get_ytproject zlib-1.2.3.tar.bz2 
-[ $INST_BZLIB -eq 1 ] && get_ytproject bzip2-1.0.5.tar.gz
-[ $INST_PNG -eq 1 ] && get_ytproject libpng-1.5.12.tar.gz
-[ $INST_FTYPE -eq 1 ] && get_ytproject freetype-2.4.4.tar.gz
-[ $INST_SQLITE3 -eq 1 ] && get_ytproject sqlite-autoconf-3070500.tar.gz
-[ $INST_PYX -eq 1 ] && get_ytproject PyX-0.11.1.tar.gz
-[ $INST_0MQ -eq 1 ] && get_ytproject zeromq-2.2.0.tar.gz
-[ $INST_0MQ -eq 1 ] && get_ytproject pyzmq-2.1.11.tar.gz
-[ $INST_0MQ -eq 1 ] && get_ytproject tornado-2.2.tar.gz
+[ $INST_ZLIB -eq 1 ] && get_ytproject zlib-1.2.7.tar.gz
+[ $INST_BZLIB -eq 1 ] && get_ytproject bzip2-1.0.6.tar.gz
+[ $INST_PNG -eq 1 ] && get_ytproject libpng-1.6.1.tar.gz
+[ $INST_FTYPE -eq 1 ] && get_ytproject freetype-2.4.11.tar.gz
+[ $INST_SQLITE3 -eq 1 ] && get_ytproject sqlite-autoconf-3071601.tar.gz
+[ $INST_PYX -eq 1 ] && get_ytproject PyX-0.12.1.tar.gz
+[ $INST_0MQ -eq 1 ] && get_ytproject zeromq-3.2.2.tar.gz
+[ $INST_0MQ -eq 1 ] && get_ytproject pyzmq-13.0.2.tar.gz
+[ $INST_0MQ -eq 1 ] && get_ytproject tornado-3.0.tar.gz
 [ $INST_SCIPY -eq 1 ] && get_ytproject scipy-0.11.0.tar.gz
 [ $INST_SCIPY -eq 1 ] && get_ytproject blas.tar.gz
 [ $INST_SCIPY -eq 1 ] && get_ytproject lapack-3.4.2.tar.gz
-get_ytproject Python-2.7.3.tgz
-get_ytproject numpy-1.6.1.tar.gz
-get_ytproject matplotlib-1.2.0.tar.gz
-get_ytproject mercurial-2.5.1.tar.gz
+get_ytproject Python-2.7.4.tgz
+get_ytproject numpy-1.7.0.tar.gz
+get_ytproject matplotlib-1.2.1.tar.gz
+get_ytproject mercurial-2.5.4.tar.gz
 get_ytproject ipython-0.13.1.tar.gz
-get_ytproject h5py-2.1.0.tar.gz
-get_ytproject Cython-0.17.1.tar.gz
+get_ytproject h5py-2.1.2.tar.gz
+get_ytproject Cython-0.18.tar.gz
 get_ytproject reason-js-20120623.zip
-get_ytproject Forthon-0.8.10.tar.gz
-get_ytproject nose-1.2.1.tar.gz 
-get_ytproject python-hglib-0.2.tar.gz
+get_ytproject Forthon-0.8.11.tar.gz
+get_ytproject nose-1.2.1.tar.gz
+get_ytproject python-hglib-0.3.tar.gz
 get_ytproject sympy-0.7.2.tar.gz
 get_ytproject rockstar-0.99.6.tar.gz
 if [ $INST_BZLIB -eq 1 ]
 then
-    if [ ! -e bzip2-1.0.5/done ]
+    if [ ! -e bzip2-1.0.6/done ]
     then
-        [ ! -e bzip2-1.0.5 ] && tar xfz bzip2-1.0.5.tar.gz
+        [ ! -e bzip2-1.0.6 ] && tar xfz bzip2-1.0.6.tar.gz
         echo "Installing BZLIB"
-        cd bzip2-1.0.5
-        if [ `uname` = "Darwin" ] 
+        cd bzip2-1.0.6
+        if [ `uname` = "Darwin" ]
         then
-            if [ -z "${CC}" ] 
+            if [ -z "${CC}" ]
             then
                 sed -i.bak 's/soname/install_name/' Makefile-libbz2_so
             else
-                sed -i.bak -e 's/soname/install_name/' -e "s/CC=gcc/CC=${CC}/" Makefile-libbz2_so 
+                sed -i.bak -e 's/soname/install_name/' -e "s/CC=gcc/CC=${CC}/" Makefile-libbz2_so
             fi
         fi
         ( make install CFLAGS=-fPIC LDFLAGS=-fPIC PREFIX=${DEST_DIR} 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make -f Makefile-libbz2_so CFLAGS=-fPIC LDFLAGS=-fPIC PREFIX=${DEST_DIR} 2>&1 ) 1>> ${LOG_FILE} || do_exit
-        ( cp -v libbz2.so.1.0.4 ${DEST_DIR}/lib 2>&1 ) 1>> ${LOG_FILE} || do_exit
+        ( cp -v libbz2.so.1.0.6 ${DEST_DIR}/lib 2>&1 ) 1>> ${LOG_FILE} || do_exit
         touch done
         cd ..
     fi
@@ -582,11 +581,11 @@
 
 if [ $INST_ZLIB -eq 1 ]
 then
-    if [ ! -e zlib-1.2.3/done ]
+    if [ ! -e zlib-1.2.7/done ]
     then
-        [ ! -e zlib-1.2.3 ] && tar xfj zlib-1.2.3.tar.bz2
+        [ ! -e zlib-1.2.7 ] && tar xfz zlib-1.2.7.tar.gz
         echo "Installing ZLIB"
-        cd zlib-1.2.3
+        cd zlib-1.2.7
         ( ./configure --shared --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -600,11 +599,11 @@
 
 if [ $INST_PNG -eq 1 ]
 then
-    if [ ! -e libpng-1.5.12/done ]
+    if [ ! -e libpng-1.6.1/done ]
     then
-        [ ! -e libpng-1.5.12 ] && tar xfz libpng-1.5.12.tar.gz
+        [ ! -e libpng-1.6.1 ] && tar xfz libpng-1.6.1.tar.gz
         echo "Installing PNG"
-        cd libpng-1.5.12
+        cd libpng-1.6.1
         ( ./configure CPPFLAGS=-I${DEST_DIR}/include CFLAGS=-I${DEST_DIR}/include --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -618,11 +617,11 @@
 
 if [ $INST_FTYPE -eq 1 ]
 then
-    if [ ! -e freetype-2.4.4/done ]
+    if [ ! -e freetype-2.4.11/done ]
     then
-        [ ! -e freetype-2.4.4 ] && tar xfz freetype-2.4.4.tar.gz
+        [ ! -e freetype-2.4.11 ] && tar xfz freetype-2.4.11.tar.gz
         echo "Installing FreeType2"
-        cd freetype-2.4.4
+        cd freetype-2.4.11
         ( ./configure CFLAGS=-I${DEST_DIR}/include --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -655,11 +654,11 @@
 
 if [ $INST_SQLITE3 -eq 1 ]
 then
-    if [ ! -e sqlite-autoconf-3070500/done ]
+    if [ ! -e sqlite-autoconf-3071601/done ]
     then
-        [ ! -e sqlite-autoconf-3070500 ] && tar xfz sqlite-autoconf-3070500.tar.gz
+        [ ! -e sqlite-autoconf-3071601 ] && tar xfz sqlite-autoconf-3071601.tar.gz
         echo "Installing SQLite3"
-        cd sqlite-autoconf-3070500
+        cd sqlite-autoconf-3071601
         ( ./configure --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make ${MAKE_PROCS} install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -668,11 +667,11 @@
     fi
 fi
 
-if [ ! -e Python-2.7.3/done ]
+if [ ! -e Python-2.7.4/done ]
 then
     echo "Installing Python.  This may take a while, but don't worry.  yt loves you."
-    [ ! -e Python-2.7.3 ] && tar xfz Python-2.7.3.tgz
-    cd Python-2.7.3
+    [ ! -e Python-2.7.4 ] && tar xfz Python-2.7.4.tgz
+    cd Python-2.7.4
     ( ./configure --prefix=${DEST_DIR}/ ${PYCONF_ARGS} 2>&1 ) 1>> ${LOG_FILE} || do_exit
 
     ( make ${MAKE_PROCS} 2>&1 ) 1>> ${LOG_FILE} || do_exit
@@ -687,12 +686,11 @@
 
 if [ $INST_HG -eq 1 ]
 then
-    echo "Installing Mercurial."
-    do_setup_py mercurial-2.5.1
+    do_setup_py mercurial-2.5.4
     export HG_EXEC=${DEST_DIR}/bin/hg
 else
     # We assume that hg can be found in the path.
-    if type -P hg &>/dev/null 
+    if type -P hg &>/dev/null
     then
         export HG_EXEC=hg
     else
@@ -709,7 +707,7 @@
     elif [ -e $ORIG_PWD/../yt/mods.py ]
     then
         YT_DIR=`dirname $ORIG_PWD`
-    elif [ ! -e yt-hg ] 
+    elif [ ! -e yt-hg ]
     then
         YT_DIR="$PWD/yt-hg/"
         ( ${HG_EXEC} --debug clone https://bitbucket.org/yt_analysis/yt-supplemental/ 2>&1 ) 1>> ${LOG_FILE}
@@ -719,7 +717,7 @@
         ( ${HG_EXEC} --debug clone https://bitbucket.org/yt_analysis/yt/ ./yt-hg 2>&1 ) 1>> ${LOG_FILE}
         # Now we update to the branch we're interested in.
         ( ${HG_EXEC} -R ${YT_DIR} up -C ${BRANCH} 2>&1 ) 1>> ${LOG_FILE}
-    elif [ -e yt-hg ] 
+    elif [ -e yt-hg ]
     then
         YT_DIR="$PWD/yt-hg/"
     fi
@@ -727,7 +725,7 @@
 fi
 
 # This fixes problems with gfortran linking.
-unset LDFLAGS 
+unset LDFLAGS
 
 echo "Installing distribute"
 ( ${DEST_DIR}/bin/python2.7 ${YT_DIR}/distribute_setup.py 2>&1 ) 1>> ${LOG_FILE} || do_exit
@@ -737,7 +735,7 @@
 
 if [ $INST_SCIPY -eq 0 ]
 then
-    do_setup_py numpy-1.6.1 ${NUMPY_ARGS}
+    do_setup_py numpy-1.7.0 ${NUMPY_ARGS}
 else
     if [ ! -e scipy-0.11.0/done ]
     then
@@ -765,8 +763,8 @@
 	fi
     fi
     export BLAS=$PWD/BLAS/libfblas.a
-    export LAPACK=$PWD/lapack-3.4.2/liblapack.a    
-    do_setup_py numpy-1.6.1 ${NUMPY_ARGS}
+    export LAPACK=$PWD/lapack-3.4.2/liblapack.a
+    do_setup_py numpy-1.7.0 ${NUMPY_ARGS}
     do_setup_py scipy-0.11.0 ${NUMPY_ARGS}
 fi
 
@@ -789,10 +787,10 @@
     echo "Setting CFLAGS ${CFLAGS}"
 fi
 # Now we set up the basedir for matplotlib:
-mkdir -p ${DEST_DIR}/src/matplotlib-1.2.0
-echo "[directories]" >> ${DEST_DIR}/src/matplotlib-1.2.0/setup.cfg
-echo "basedirlist = ${DEST_DIR}" >> ${DEST_DIR}/src/matplotlib-1.2.0/setup.cfg
-do_setup_py matplotlib-1.2.0
+mkdir -p ${DEST_DIR}/src/matplotlib-1.2.1
+echo "[directories]" >> ${DEST_DIR}/src/matplotlib-1.2.1/setup.cfg
+echo "basedirlist = ${DEST_DIR}" >> ${DEST_DIR}/src/matplotlib-1.2.1/setup.cfg
+do_setup_py matplotlib-1.2.1
 if [ -n "${OLD_LDFLAGS}" ]
 then
     export LDFLAG=${OLD_LDFLAGS}
@@ -804,29 +802,29 @@
 # Now we do our IPython installation, which has two optional dependencies.
 if [ $INST_0MQ -eq 1 ]
 then
-    if [ ! -e zeromq-2.2.0/done ]
+    if [ ! -e zeromq-3.2.2/done ]
     then
-        [ ! -e zeromq-2.2.0 ] && tar xfz zeromq-2.2.0.tar.gz
+        [ ! -e zeromq-3.2.2 ] && tar xfz zeromq-3.2.2.tar.gz
         echo "Installing ZeroMQ"
-        cd zeromq-2.2.0
+        cd zeromq-3.2.2
         ( ./configure --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
         touch done
         cd ..
     fi
-    do_setup_py pyzmq-2.1.11 --zmq=${DEST_DIR}
-    do_setup_py tornado-2.2
+    do_setup_py pyzmq-13.0.2 --zmq=${DEST_DIR}
+    do_setup_py tornado-3.0
 fi
 
 do_setup_py ipython-0.13.1
-do_setup_py h5py-2.1.0
-do_setup_py Cython-0.17.1
-do_setup_py Forthon-0.8.10
+do_setup_py h5py-2.1.2
+do_setup_py Cython-0.18
+do_setup_py Forthon-0.8.11
 do_setup_py nose-1.2.1
-do_setup_py python-hglib-0.2
+do_setup_py python-hglib-0.3
 do_setup_py sympy-0.7.2
-[ $INST_PYX -eq 1 ] && do_setup_py PyX-0.11.1
+[ $INST_PYX -eq 1 ] && do_setup_py PyX-0.12.1
 
 # Now we build Rockstar and set its environment variable.
 if [ $INST_ROCKSTAR -eq 1 ]


https://bitbucket.org/yt_analysis/yt-3.0/commits/64fabb67c5eb/
Changeset:   64fabb67c5eb
Branch:      yt
User:        samskillman
Date:        2013-04-02 05:31:44
Summary:     Adding particle io for ellipsoids, speeding up the ellipsoid data object by ~2x, and some PEP8 style fixes.
Affected #:  2 files

diff -r ae7a263311d3d0cfd7809e7841501dc724384106 -r 64fabb67c5eb9eadb2e51ee891217f3d1e98708a yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -3587,12 +3587,12 @@
         given the tilt about the x axis when e0 was aligned
         to x after t1, t2 rotations about z, y
         """
-        RX = get_rotation_matrix(-tilt, (1,0,0)).transpose()
-        RY = get_rotation_matrix(-t2,   (0,1,0)).transpose()
-        RZ = get_rotation_matrix(-t1,   (0,0,1)).transpose()
-        e1 = ((0, 1, 0) * RX).sum(axis = 1)
-        e1 = (e1 * RY).sum(axis = 1)
-        e1 = (e1 * RZ).sum(axis = 1)
+        RX = get_rotation_matrix(-tilt, (1, 0, 0)).transpose()
+        RY = get_rotation_matrix(-t2,   (0, 1, 0)).transpose()
+        RZ = get_rotation_matrix(-t1,   (0, 0, 1)).transpose()
+        e1 = ((0, 1, 0) * RX).sum(axis=1)
+        e1 = (e1 * RY).sum(axis=1)
+        e1 = (e1 * RZ).sum(axis=1)
         e2 = np.cross(e0, e1)
 
         self._e1 = e1
@@ -3612,87 +3612,64 @@
         can just use the sphere one and forget about checking orientation
         but feed in the A parameter for radius
         """
-    def _get_list_of_grids(self, field = None):
+    def _get_list_of_grids(self, field=None):
         """
         This returns the grids that are possibly within the ellipse
         """
-        grids,ind = self.hierarchy.find_sphere_grids(self.center, self._A)
+        grids, ind = self.hierarchy.find_sphere_grids(self.center, self._A)
         # Now we sort by level
         grids = grids.tolist()
-        grids.sort(key=lambda x: (x.Level, \
-                                  x.LeftEdge[0], \
-                                  x.LeftEdge[1], \
+        grids.sort(key=lambda x: (x.Level,
+                                  x.LeftEdge[0],
+                                  x.LeftEdge[1],
                                   x.LeftEdge[2]))
-        self._grids = np.array(grids, dtype = 'object')
+        self._grids = np.array(grids, dtype='object')
 
     def _is_fully_enclosed(self, grid):
         """
         check if all grid corners are inside the ellipsoid
         """
-        # vector from corner to center
-        vr = (grid._corners - self.center)
-        # 3 possible cases of locations taking periodic BC into account
-        # just listing the components, find smallest later
-        dotarr=np.array([vr, vr + self.DW, vr - self.DW])
-        # these vrdote# finds the product of vr components with e#
-        # square the results
-        # find the smallest
-        # sums it
-        vrdote0_2 = (np.multiply(dotarr, self._e0)**2).min(axis \
-                                                           = 0).sum(axis = 1)
-        vrdote1_2 = (np.multiply(dotarr, self._e1)**2).min(axis \
-                                                           = 0).sum(axis = 1)
-        vrdote2_2 = (np.multiply(dotarr, self._e2)**2).min(axis \
-                                                           = 0).sum(axis = 1)
-        return np.all(vrdote0_2 / self._A**2 + \
-                      vrdote1_2 / self._B**2 + \
-                      vrdote2_2 / self._C**2 <=1.0)
-
-    @restore_grid_state # Pains me not to decorate with cache_mask here
-    def _get_cut_mask(self, grid, field = None):
+        return False
+
+    @restore_grid_state  # Pains me not to decorate with cache_mask here
+    def _get_cut_mask(self, grid, field=None):
         """
         This checks if each cell is inside the ellipsoid
         """
         # We have the *property* center, which is not necessarily
         # the same as the field_parameter
         if self._is_fully_enclosed(grid):
-            return True # We do not want child masking here
+            return True  # We do not want child masking here
         if not isinstance(grid, (FakeGridForParticles, GridChildMaskWrapper)) \
            and grid.id in self._cut_masks:
             return self._cut_masks[grid.id]
-        Inside = np.zeros(grid["x"].shape, dtype = 'float64')
-        dim = grid["x"].shape
-        # need this to take into account non-cube root grid tiles
-        if (len(dim) == 1):
-            dot_evec = np.zeros([3, dim[0]])
-        elif (len(dim) == 2):
-            dot_evec = np.zeros([3, dim[0], dim[1]])
-        elif (len(dim) == 3):
-            dot_evec = np.zeros([3, dim[0], dim[1], dim[2]])
+
+        dot_evecx = np.zeros(grid.ActiveDimensions)
+        dot_evecy = np.zeros(grid.ActiveDimensions)
+        dot_evecz = np.zeros(grid.ActiveDimensions)
 
         for i, ax in enumerate('xyz'):
             # distance to center
-            ar  = grid[ax]-self.center[i]
-            # cases to take into account periodic BC
-            case = np.array([ar, ar + self.DW[i], ar - self.DW[i]])
-            # find which of the 3 cases is smallest in magnitude
-            index = np.abs(case).argmin(axis = 0)
-            # restrict distance to only the smallest cases
-            vec = np.choose(index, case)
+            ar = grid[ax]-self.center[i]
+            # correct for periodicity
+            vec = np.array([ar, ar + self.DW[i], ar - self.DW[i]])
+            ind = np.argmin(np.abs(vec), axis=0)
+            vec = np.choose(ind, vec)
             # sum up to get the dot product with e_vectors
-            dot_evec += np.array([vec * self._e0[i], \
-                                  vec * self._e1[i], \
-                                  vec * self._e2[i]])
+            dot_evecx += vec * self._e0[i] / self._A
+            dot_evecy += vec * self._e1[i] / self._B
+            dot_evecz += vec * self._e2[i] / self._C
+
         # Calculate the eqn of ellipsoid, if it is inside
         # then result should be <= 1.0
-        Inside = dot_evec[0]**2 / self._A**2 + \
-                 dot_evec[1]**2 / self._B**2 + \
-                 dot_evec[2]**2 / self._C**2
-        cm = ((Inside <= 1.0) & grid.child_mask)
+        cm = ((dot_evecx**2 +
+               dot_evecy**2 +
+               dot_evecz**2 <= 1.0) & grid.child_mask)
         if not isinstance(grid, (FakeGridForParticles, GridChildMaskWrapper)):
             self._cut_masks[grid.id] = cm
         return cm
 
+
 class AMRCoveringGridBase(AMR3DData):
     """A 3D region with all data extracted to a single, specified
     resolution.

diff -r ae7a263311d3d0cfd7809e7841501dc724384106 -r 64fabb67c5eb9eadb2e51ee891217f3d1e98708a yt/data_objects/particle_io.py
--- a/yt/data_objects/particle_io.py
+++ b/yt/data_objects/particle_io.py
@@ -145,6 +145,23 @@
         return (1, (np.array(self.center, dtype='float64'), self.radius,
             1, DLE, DRE))
 
+class ParticleIOHandlerEllipsoid(ParticleIOHandlerImplemented):
+    _source_type = "ellipsoid"
+
+    def __init__(self, pf, source):
+        self.center = source.center
+        self._A = source._A
+        self._B = source._B
+        self._C = source._C
+        self._e0 = source._e0
+        self._tilt = source._tilt
+        ParticleIOHandler.__init__(self, pf, source)
+
+    def _get_args(self):
+        return (1, (np.array(self.center, dtype='float64'), self._A, self._B,
+                    self._C, self._e0, self._tilt))
+
+
 class ParticleIOHandlerDisk(ParticleIOHandlerImplemented):
     _source_type = "disk"
     


https://bitbucket.org/yt_analysis/yt-3.0/commits/8dd9d9819b06/
Changeset:   8dd9d9819b06
Branch:      yt
User:        samskillman
Date:        2013-04-02 06:11:44
Summary:     Getting rid of bad attempt at particle io for ellipsoids.
Affected #:  1 file

diff -r 64fabb67c5eb9eadb2e51ee891217f3d1e98708a -r 8dd9d9819b061bcfa68d2ef962e44f44810228f3 yt/data_objects/particle_io.py
--- a/yt/data_objects/particle_io.py
+++ b/yt/data_objects/particle_io.py
@@ -145,23 +145,6 @@
         return (1, (np.array(self.center, dtype='float64'), self.radius,
             1, DLE, DRE))
 
-class ParticleIOHandlerEllipsoid(ParticleIOHandlerImplemented):
-    _source_type = "ellipsoid"
-
-    def __init__(self, pf, source):
-        self.center = source.center
-        self._A = source._A
-        self._B = source._B
-        self._C = source._C
-        self._e0 = source._e0
-        self._tilt = source._tilt
-        ParticleIOHandler.__init__(self, pf, source)
-
-    def _get_args(self):
-        return (1, (np.array(self.center, dtype='float64'), self._A, self._B,
-                    self._C, self._e0, self._tilt))
-
-
 class ParticleIOHandlerDisk(ParticleIOHandlerImplemented):
     _source_type = "disk"
     


https://bitbucket.org/yt_analysis/yt-3.0/commits/af7c2b1e3697/
Changeset:   af7c2b1e3697
Branch:      yt
User:        MatthewTurk
Date:        2013-04-12 23:47:35
Summary:     Merged in samskillman/yt (pull request #472)

Ellipsoid Data Object Speedup
Affected #:  2 files

diff -r 7f171073bb43bebf2c1af25596cb9e7fb0bc2b0a -r af7c2b1e3697906d0a4faa0f244903d079490c2e yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -3587,12 +3587,12 @@
         given the tilt about the x axis when e0 was aligned
         to x after t1, t2 rotations about z, y
         """
-        RX = get_rotation_matrix(-tilt, (1,0,0)).transpose()
-        RY = get_rotation_matrix(-t2,   (0,1,0)).transpose()
-        RZ = get_rotation_matrix(-t1,   (0,0,1)).transpose()
-        e1 = ((0, 1, 0) * RX).sum(axis = 1)
-        e1 = (e1 * RY).sum(axis = 1)
-        e1 = (e1 * RZ).sum(axis = 1)
+        RX = get_rotation_matrix(-tilt, (1, 0, 0)).transpose()
+        RY = get_rotation_matrix(-t2,   (0, 1, 0)).transpose()
+        RZ = get_rotation_matrix(-t1,   (0, 0, 1)).transpose()
+        e1 = ((0, 1, 0) * RX).sum(axis=1)
+        e1 = (e1 * RY).sum(axis=1)
+        e1 = (e1 * RZ).sum(axis=1)
         e2 = np.cross(e0, e1)
 
         self._e1 = e1
@@ -3612,87 +3612,64 @@
         can just use the sphere one and forget about checking orientation
         but feed in the A parameter for radius
         """
-    def _get_list_of_grids(self, field = None):
+    def _get_list_of_grids(self, field=None):
         """
         This returns the grids that are possibly within the ellipse
         """
-        grids,ind = self.hierarchy.find_sphere_grids(self.center, self._A)
+        grids, ind = self.hierarchy.find_sphere_grids(self.center, self._A)
         # Now we sort by level
         grids = grids.tolist()
-        grids.sort(key=lambda x: (x.Level, \
-                                  x.LeftEdge[0], \
-                                  x.LeftEdge[1], \
+        grids.sort(key=lambda x: (x.Level,
+                                  x.LeftEdge[0],
+                                  x.LeftEdge[1],
                                   x.LeftEdge[2]))
-        self._grids = np.array(grids, dtype = 'object')
+        self._grids = np.array(grids, dtype='object')
 
     def _is_fully_enclosed(self, grid):
         """
         check if all grid corners are inside the ellipsoid
         """
-        # vector from corner to center
-        vr = (grid._corners - self.center)
-        # 3 possible cases of locations taking periodic BC into account
-        # just listing the components, find smallest later
-        dotarr=np.array([vr, vr + self.DW, vr - self.DW])
-        # these vrdote# finds the product of vr components with e#
-        # square the results
-        # find the smallest
-        # sums it
-        vrdote0_2 = (np.multiply(dotarr, self._e0)**2).min(axis \
-                                                           = 0).sum(axis = 1)
-        vrdote1_2 = (np.multiply(dotarr, self._e1)**2).min(axis \
-                                                           = 0).sum(axis = 1)
-        vrdote2_2 = (np.multiply(dotarr, self._e2)**2).min(axis \
-                                                           = 0).sum(axis = 1)
-        return np.all(vrdote0_2 / self._A**2 + \
-                      vrdote1_2 / self._B**2 + \
-                      vrdote2_2 / self._C**2 <=1.0)
-
-    @restore_grid_state # Pains me not to decorate with cache_mask here
-    def _get_cut_mask(self, grid, field = None):
+        return False
+
+    @restore_grid_state  # Pains me not to decorate with cache_mask here
+    def _get_cut_mask(self, grid, field=None):
         """
         This checks if each cell is inside the ellipsoid
         """
         # We have the *property* center, which is not necessarily
         # the same as the field_parameter
         if self._is_fully_enclosed(grid):
-            return True # We do not want child masking here
+            return True  # We do not want child masking here
         if not isinstance(grid, (FakeGridForParticles, GridChildMaskWrapper)) \
            and grid.id in self._cut_masks:
             return self._cut_masks[grid.id]
-        Inside = np.zeros(grid["x"].shape, dtype = 'float64')
-        dim = grid["x"].shape
-        # need this to take into account non-cube root grid tiles
-        if (len(dim) == 1):
-            dot_evec = np.zeros([3, dim[0]])
-        elif (len(dim) == 2):
-            dot_evec = np.zeros([3, dim[0], dim[1]])
-        elif (len(dim) == 3):
-            dot_evec = np.zeros([3, dim[0], dim[1], dim[2]])
+
+        dot_evecx = np.zeros(grid.ActiveDimensions)
+        dot_evecy = np.zeros(grid.ActiveDimensions)
+        dot_evecz = np.zeros(grid.ActiveDimensions)
 
         for i, ax in enumerate('xyz'):
             # distance to center
-            ar  = grid[ax]-self.center[i]
-            # cases to take into account periodic BC
-            case = np.array([ar, ar + self.DW[i], ar - self.DW[i]])
-            # find which of the 3 cases is smallest in magnitude
-            index = np.abs(case).argmin(axis = 0)
-            # restrict distance to only the smallest cases
-            vec = np.choose(index, case)
+            ar = grid[ax]-self.center[i]
+            # correct for periodicity
+            vec = np.array([ar, ar + self.DW[i], ar - self.DW[i]])
+            ind = np.argmin(np.abs(vec), axis=0)
+            vec = np.choose(ind, vec)
             # sum up to get the dot product with e_vectors
-            dot_evec += np.array([vec * self._e0[i], \
-                                  vec * self._e1[i], \
-                                  vec * self._e2[i]])
+            dot_evecx += vec * self._e0[i] / self._A
+            dot_evecy += vec * self._e1[i] / self._B
+            dot_evecz += vec * self._e2[i] / self._C
+
         # Calculate the eqn of ellipsoid, if it is inside
         # then result should be <= 1.0
-        Inside = dot_evec[0]**2 / self._A**2 + \
-                 dot_evec[1]**2 / self._B**2 + \
-                 dot_evec[2]**2 / self._C**2
-        cm = ((Inside <= 1.0) & grid.child_mask)
+        cm = ((dot_evecx**2 +
+               dot_evecy**2 +
+               dot_evecz**2 <= 1.0) & grid.child_mask)
         if not isinstance(grid, (FakeGridForParticles, GridChildMaskWrapper)):
             self._cut_masks[grid.id] = cm
         return cm
 
+
 class AMRCoveringGridBase(AMR3DData):
     """A 3D region with all data extracted to a single, specified
     resolution.


https://bitbucket.org/yt_analysis/yt-3.0/commits/134989bc7f64/
Changeset:   134989bc7f64
Branch:      yt
User:        MatthewTurk
Date:        2013-04-15 17:01:07
Summary:     Touch-ups to FisheyeCamera to work with new VR system.
Affected #:  1 file

diff -r 4c9a9cd7eac8541bc10a747fb90ad2fdf200761a -r 134989bc7f64b3cc4732c29668a4e6fa3a873ebc yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -1390,7 +1390,7 @@
 
 
     def finalize_image(self, image):
-        image.shape = self.resolution, self.resolution, 3
+        image.shape = self.resolution, self.resolution, 4
 
     def _render(self, double_check, num_threads, image, sampler):
         pbar = get_pbar("Ray casting", (self.volume.brick_dimensions + 1).prod(axis=-1).sum())
@@ -1402,7 +1402,7 @@
                         raise RuntimeError
         
         view_pos = self.center
-        for brick in self.volume.traverse(view_pos, None, image):
+        for brick in self.volume.traverse(view_pos):
             sampler(brick, num_threads=num_threads)
             total_cells += np.prod(brick.my_data[0].shape)
             pbar.update(total_cells)


https://bitbucket.org/yt_analysis/yt-3.0/commits/335a441ef6db/
Changeset:   335a441ef6db
Branch:      yt
User:        samskillman
Date:        2013-04-15 17:05:53
Summary:     Merged in MatthewTurk/yt (pull request #479)

Touch-ups to FisheyeCamera to work with new VR system.
Affected #:  1 file

diff -r af7c2b1e3697906d0a4faa0f244903d079490c2e -r 335a441ef6db882f8934808f616fd677b2d527b2 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -1390,7 +1390,7 @@
 
 
     def finalize_image(self, image):
-        image.shape = self.resolution, self.resolution, 3
+        image.shape = self.resolution, self.resolution, 4
 
     def _render(self, double_check, num_threads, image, sampler):
         pbar = get_pbar("Ray casting", (self.volume.brick_dimensions + 1).prod(axis=-1).sum())
@@ -1402,7 +1402,7 @@
                         raise RuntimeError
         
         view_pos = self.center
-        for brick in self.volume.traverse(view_pos, None, image):
+        for brick in self.volume.traverse(view_pos):
             sampler(brick, num_threads=num_threads)
             total_cells += np.prod(brick.my_data[0].shape)
             pbar.update(total_cells)


https://bitbucket.org/yt_analysis/yt-3.0/commits/716ffad527a4/
Changeset:   716ffad527a4
Branch:      yt
User:        ngoldbaum
Date:        2013-04-12 20:32:15
Summary:     Merging.
Affected #:  4 files

diff -r ba4470df120bafee3ae0d0db4e1644f8b1bd1787 -r 716ffad527a492fdc207faa65afdf6d91788dc04 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -34,7 +34,7 @@
 
 INST_HG=1       # Install Mercurial or not?  If hg is not already
                 # installed, yt cannot be installed.
-INST_ZLIB=1     # On some systems (Kraken) matplotlib has issues with 
+INST_ZLIB=1     # On some systems (Kraken) matplotlib has issues with
                 # the system zlib, which is compiled statically.
                 # If need be, you can turn this off.
 INST_BZLIB=1    # On some systems, libbzip2 is missing.  This can
@@ -76,7 +76,7 @@
    echo "the script to re-enable root-level installation.  Sorry!"
    exit 1
 fi
-if [[ ${DEST_DIR%/} == /usr/local ]] 
+if [[ ${DEST_DIR%/} == /usr/local ]]
 then
    echo "******************************************************"
    echo "*                                                    *"
@@ -194,7 +194,7 @@
 	echo "must register for an account on the apple developer tools"
 	echo "website: https://developer.apple.com/downloads to obtain the"
 	echo "download link."
-	echo 
+	echo
 	echo "We have gathered some additional instructions for each"
 	echo "version of OS X below. If you have trouble installing yt"
 	echo "after following these instructions, don't hesitate to contact"
@@ -205,15 +205,15 @@
 	echo "menu bar.  We're assuming that you've installed all operating"
 	echo "system updates; if you have an older version, we suggest"
 	echo "running software update and installing all available updates."
-	echo 
-        echo "OS X 10.5.8: search for and download Xcode 3.1.4 from the" 
+	echo
+        echo "OS X 10.5.8: search for and download Xcode 3.1.4 from the"
 	echo "Apple developer tools website."
         echo
         echo "OS X 10.6.8: search for and download Xcode 3.2 from the Apple"
 	echo "developer tools website.  You can either download the"
 	echo "Xcode 3.2.2 Developer Tools package (744 MB) and then use"
-	echo "Software Update to update to XCode 3.2.6 or" 
-	echo "alternatively, you can download the Xcode 3.2.6/iOS SDK" 
+	echo "Software Update to update to XCode 3.2.6 or"
+	echo "alternatively, you can download the Xcode 3.2.6/iOS SDK"
 	echo "bundle (4.1 GB)."
         echo
         echo "OS X 10.7.5: download Xcode 4.2 from the mac app store"
@@ -221,20 +221,20 @@
         echo "Alternatively, download the Xcode command line tools from"
         echo "the Apple developer tools website."
         echo
-	echo "OS X 10.8.2: download Xcode 4.6 from the mac app store."
+	echo "OS X 10.8.2: download Xcode 4.6.1 from the mac app store."
 	echo "(search for Xcode)."
 	echo "Additionally, you will have to manually install the Xcode"
-	echo "command line tools, see:" 
+	echo "command line tools, see:"
 	echo "http://stackoverflow.com/questions/9353444"
 	echo "Alternatively, download the Xcode command line tools from"
 	echo "the Apple developer tools website."
 	echo
-        echo "NOTE: It's possible that the installation will fail, if so," 
-	echo "please set the following environment variables, remove any" 
+        echo "NOTE: It's possible that the installation will fail, if so,"
+	echo "please set the following environment variables, remove any"
 	echo "broken installation tree, and re-run this script verbatim."
         echo
-        echo "$ export CC=gcc-4.2"
-        echo "$ export CXX=g++-4.2"
+        echo "$ export CC=gcc"
+        echo "$ export CXX=g++"
 	echo
         OSX_VERSION=`sw_vers -productVersion`
         if [ "${OSX_VERSION##10.8}" != "${OSX_VERSION}" ]
@@ -291,7 +291,7 @@
         echo
         echo " INST_ZLIB=0"
         echo " INST_FTYPE=0"
-        echo 
+        echo
         echo " to avoid conflicts with other command-line programs "
         echo " (like eog and evince, for example)."
     fi
@@ -437,7 +437,7 @@
     cd ..
 }
 
-if type -P wget &>/dev/null 
+if type -P wget &>/dev/null
 then
     echo "Using wget"
     export GETFILE="wget -nv"
@@ -499,28 +499,27 @@
 cd ${DEST_DIR}/src
 
 # Now we dump all our SHA512 files out.
-
-echo 'eda1b8090e5e21e7e039ef4dd03de186a7b416df9d5a4e4422abeeb4d51383b9a6858e1ac4902d8e5010f661b295bbb2452c43c8738be668379b4eb4835d0f61  Cython-0.17.1.tar.gz' > Cython-0.17.1.tar.gz.sha512
-echo '44eea803870a66ff0bab08d13a8b3388b5578ebc1c807d1d9dca0a93e6371e91b15d02917a00b3b20dc67abb5a21dabaf9b6e9257a561f85eeff2147ac73b478  PyX-0.11.1.tar.gz' > PyX-0.11.1.tar.gz.sha512
-echo 'b981f8464575bb24c297631c87a3b9172312804a0fc14ce1fa7cb41ce2b0d2fd383cd1c816d6e10c36467d18bf9492d6faf557c81c04ff3b22debfa93f30ad0b  Python-2.7.3.tgz' > Python-2.7.3.tgz.sha512
-echo 'c017d3d59dd324ac91af0edc178c76b60a5f90fbb775cf843e39062f95bd846238f2c53705f8890ed3f34bc0e6e75671a73d13875eb0287d6201cb45f0a2d338  bzip2-1.0.5.tar.gz' > bzip2-1.0.5.tar.gz.sha512
+echo 'fb85d71bb4f80b35f0d0f1735c650dd75c5f84b05635ddf91d6241ff103b5a49158c5b851a20c15e05425f6dde32a4971b35fcbd7445f61865b4d61ffd1fbfa1  Cython-0.18.tar.gz' > Cython-0.18.tar.gz.sha512
+echo '4941f5aa21aff3743546495fb073c10d2657ff42b2aff401903498638093d0e31e344cce778980f28a7170c6d29eab72ac074277b9d4088376e8692dc71e55c1  PyX-0.12.1.tar.gz' > PyX-0.12.1.tar.gz.sha512
+echo '3349152c47ed2b63c5c9aabcfa92b8497ea9d71ca551fd721e827fcb8f91ff9fbbee6bba8f8cb2dea185701b8798878b4b2435c1496b63d4b4a37c624a625299  Python-2.7.4.tgz' > Python-2.7.4.tgz.sha512
+echo '00ace5438cfa0c577e5f578d8a808613187eff5217c35164ffe044fbafdfec9e98f4192c02a7d67e01e5a5ccced630583ad1003c37697219b0f147343a3fdd12  bzip2-1.0.6.tar.gz' > bzip2-1.0.6.tar.gz.sha512
 echo 'a296dfcaef7e853e58eed4e24b37c4fa29cfc6ac688def048480f4bb384b9e37ca447faf96eec7b378fd764ba291713f03ac464581d62275e28eb2ec99110ab6  reason-js-20120623.zip' > reason-js-20120623.zip.sha512
-echo 'b519218f93946400326e9b656669269ecb3e5232b944e18fbc3eadc4fe2b56244d68aae56d6f69042b4c87c58c881ee2aaa279561ea0f0f48d5842155f4de9de  freetype-2.4.4.tar.gz' > freetype-2.4.4.tar.gz.sha512
-echo 'b3290c498191684781ca5286ab454eb1bd045e8d894f5b86fb86beb88f174e22ac3ab008fb02d6562051d9fa6a9593920cab433223f6d5473999913223b8e183  h5py-2.1.0.tar.gz' > h5py-2.1.0.tar.gz.sha512
+echo 'b46c93d76f8ce09c94765b20b2eeadf71207671f1131777de178b3727c235b4dd77f6e60d62442b96648c3c6749e9e4c1194c1b02af7e946576be09e1ff7ada3  freetype-2.4.11.tar.gz' > freetype-2.4.11.tar.gz.sha512
+echo '15ca0209e8d8f172cb0708a2de946fbbde8551d9bebc4a95fa7ae31558457a7f43249d5289d7675490c577deb4e0153698fd2407644078bf30bd5ab10135fce3  h5py-2.1.2.tar.gz' > h5py-2.1.2.tar.gz.sha512
 echo 'c68a425bacaa7441037910b9166f25b89e1387776a7749a5350793f89b1690350df5f018060c31d03686e7c3ed2aa848bd2b945c96350dc3b6322e087934783a  hdf5-1.8.9.tar.gz' > hdf5-1.8.9.tar.gz.sha512
-echo 'dbefad00fa34f4f21dca0f1e92e95bd55f1f4478fa0095dcf015b4d06f0c823ff11755cd777e507efaf1c9098b74af18f613ec9000e5c3a5cc1c7554fb5aefb8  libpng-1.5.12.tar.gz' > libpng-1.5.12.tar.gz.sha512
-echo '5b1a0fb52dcb21ca5f0ab71c8a49550e1e8cf633552ec6598dc43f0b32c03422bf5af65b30118c163231ecdddfd40846909336f16da318959106076e80a3fad0  matplotlib-1.2.0.tar.gz' > matplotlib-1.2.0.tar.gz.sha512
-echo '91693ca5f34934956a7c2c98bb69a5648b2a5660afd2ecf4a05035c5420450d42c194eeef0606d7683e267e4eaaaab414df23f30b34c88219bdd5c1a0f1f66ed  mercurial-2.5.1.tar.gz' > mercurial-2.5.1.tar.gz.sha512
-echo 'de3dd37f753614055dcfed910e9886e03688b8078492df3da94b1ec37be796030be93291cba09e8212fffd3e0a63b086902c3c25a996cf1439e15c5b16e014d9  numpy-1.6.1.tar.gz' > numpy-1.6.1.tar.gz.sha512
-echo '5ad681f99e75849a5ca6f439c7a19bb51abc73d121b50f4f8e4c0da42891950f30407f761a53f0fe51b370b1dbd4c4f5a480557cb2444c8c7c7d5412b328a474  sqlite-autoconf-3070500.tar.gz' > sqlite-autoconf-3070500.tar.gz.sha512
-echo 'edae735960279d92acf58e1f4095c6392a7c2059b8f1d2c46648fc608a0fb06b392db2d073f4973f5762c034ea66596e769b95b3d26ad963a086b9b2d09825f2  zlib-1.2.3.tar.bz2' > zlib-1.2.3.tar.bz2.sha512
+echo 'b2b53ed358bacab9e8d63a51f17bd5f121ece60a1d7c53e8a8eb08ad8b1e4393a8d7a86eec06e2efc62348114f0d84c0a3dfc805e68e6edd93b20401962b3554  libpng-1.6.1.tar.gz' > libpng-1.6.1.tar.gz.sha512
+echo '497f91725eaf361bdb9bdf38db2bff5068a77038f1536df193db64c9b887e3b0d967486daee722eda6e2c4e60f034eee030673e53d07bf0db0f3f7c0ef3bd208  matplotlib-1.2.1.tar.gz' > matplotlib-1.2.1.tar.gz.sha512
+echo '928fdeaaf0eaec80adbd8765521de9666ab56aaa2101fb9ab2cb392d8b29475d3b052d89652ff9b67522cfcc6cd958717ac715f51b0573ee088e9a595f29afe2  mercurial-2.5.4.tar.gz' > mercurial-2.5.4.tar.gz.sha512
+echo 'a485daa556f6c76003de1dbb3e42b3daeee0a320c69c81b31a7d2ebbc2cf8ab8e96c214a4758e5e7bf814295dc1d6aa563092b714db7e719678d8462135861a8  numpy-1.7.0.tar.gz' > numpy-1.7.0.tar.gz.sha512
+echo '293d78d14a9347cb83e1a644e5f3e4447ed6fc21642c51683e5495dda08d2312194a73d1fc3c1d78287e33ed065aa251ecbaa7c0ea9189456c1702e96d78becd  sqlite-autoconf-3071601.tar.gz' > sqlite-autoconf-3071601.tar.gz.sha512
+echo 'b1c073ad26684e354f7c522c14655840592e03872bc0a94690f89cae2ff88f146fce1dad252ff27a889dac4a32ff9f8ab63ba940671f9da89e9ba3e19f1bf58d  zlib-1.2.7.tar.gz' > zlib-1.2.7.tar.gz.sha512
 echo '05ac335727a2c3036f31a2506fdd2615aa436bfbe2f81799fe6c51bffe2591ad6a8427f3b25c34e7e709fb4e7607a0589dc7a22185c1f9b894e90de6711a88aa  ipython-0.13.1.tar.gz' > ipython-0.13.1.tar.gz.sha512
-echo 'fb3cf421b2dc48c31956b3e3ee4ab6ebc743deec3bf626c2238a1996c8c51be87260bd6aa662793a1f0c34dcda9b3146763777bb162dfad6fec4ca7acc403b2e  zeromq-2.2.0.tar.gz' > zeromq-2.2.0.tar.gz.sha512
-echo 'd761b492352841cdc125d9f0c99ee6d6c435812472ea234728b7f0fb4ad1048e1eec9b399df2081fbc926566f333f7780fedd0ce23255a6633fe5c60ed15a6af  pyzmq-2.1.11.tar.gz' > pyzmq-2.1.11.tar.gz.sha512
-echo '57fa5e57dfb98154a42d2d477f29401c2260ae7ad3a8128a4098b42ee3b35c54367b1a3254bc76b9b3b14b4aab7c3e1135858f68abc5636daedf2f01f9b8a3cf  tornado-2.2.tar.gz' > tornado-2.2.tar.gz.sha512
-echo '1332e3d5465ca249c357314cf15d2a4e5e83a941841021b8f6a17a107dce268a7a082838ade5e8db944ecde6bfb111211ab218aa414ee90aafbb81f1491b3b93  Forthon-0.8.10.tar.gz' > Forthon-0.8.10.tar.gz.sha512
+echo 'b9d061ca49e54ea917e0aed2b2a48faef33061dbf6d17eae7f8c3fff0b35ca883e7324f6cb24bda542443f669dcd5748037a5f2309f4c359d68adef520894865  zeromq-3.2.2.tar.gz' > zeromq-3.2.2.tar.gz.sha512
+echo '852fce8a8308c4e1e4b19c77add2b2055ca2ba570b28e8364888df490af92b860c72e860adfb075b3405a9ceb62f343889f20a8711c9353a7d9059adee910f83  pyzmq-13.0.2.tar.gz' > pyzmq-13.0.2.tar.gz.sha512
+echo '303bd3fbea22be57fddf7df78ddf5a783d355a0c8071b1363250daafc20232ddd28eedc44aa1194f4a7afd82f9396628c5bb06819e02b065b6a1b1ae8a7c19e1  tornado-3.0.tar.gz' > tornado-3.0.tar.gz.sha512
+echo '3f53d0b474bfd79fea2536d0a9197eaef6c0927e95f2f9fd52dbd6c1d46409d0e649c21ac418d8f7767a9f10fe6114b516e06f2be4b06aec3ab5bdebc8768220  Forthon-0.8.11.tar.gz' > Forthon-0.8.11.tar.gz.sha512
 echo 'c13116c1f0547000cc565e15774687b9e884f8b74fb62a84e578408a868a84961704839065ae4f21b662e87f2aaedf6ea424ea58dfa9d3d73c06281f806d15dd  nose-1.2.1.tar.gz' > nose-1.2.1.tar.gz.sha512
-echo '73de2c99406a38f85273931597525cec4ebef55b93712adca3b0bfea8ca3fc99446e5d6495817e9ad55cf4d48feb7fb49734675c4cc8938db8d4a5225d30eca7  python-hglib-0.2.tar.gz' > python-hglib-0.2.tar.gz.sha512
+echo 'd67de9567256e6f1649e4f3f7dfee63371d5f00fd3fd4f92426198f862e97c57f70e827d19f4e5e1929ad85ef2ce7aa5a0596b101cafdac71672e97dc115b397  python-hglib-0.3.tar.gz' > python-hglib-0.3.tar.gz.sha512
 echo 'ffc602eb346717286b3d0a6770c60b03b578b3cf70ebd12f9e8b1c8c39cdb12ef219ddaa041d7929351a6b02dbb8caf1821b5452d95aae95034cbf4bc9904a7a  sympy-0.7.2.tar.gz' > sympy-0.7.2.tar.gz.sha512
 echo '172f2bc671145ebb0add2669c117863db35851fb3bdb192006cd710d4d038e0037497eb39a6d01091cb923f71a7e8982a77b6e80bf71d6275d5d83a363c8d7e5  rockstar-0.99.6.tar.gz' > rockstar-0.99.6.tar.gz.sha512
 echo 'd4fdd62f2db5285cd133649bd1bfa5175cb9da8304323abd74e0ef1207d55e6152f0f944da1da75f73e9dafb0f3bb14efba3c0526c732c348a653e0bd223ccfa  scipy-0.11.0.tar.gz' > scipy-0.11.0.tar.gz.sha512
@@ -528,50 +527,50 @@
 echo '8770214491e31f0a7a3efaade90eee7b0eb20a8a6ab635c5f854d78263f59a1849133c14ef5123d01023f0110cbb9fc6f818da053c01277914ae81473430a952  lapack-3.4.2.tar.gz' > lapack-3.4.2.tar.gz.sha512
 # Individual processes
 [ -z "$HDF5_DIR" ] && get_ytproject hdf5-1.8.9.tar.gz
-[ $INST_ZLIB -eq 1 ] && get_ytproject zlib-1.2.3.tar.bz2 
-[ $INST_BZLIB -eq 1 ] && get_ytproject bzip2-1.0.5.tar.gz
-[ $INST_PNG -eq 1 ] && get_ytproject libpng-1.5.12.tar.gz
-[ $INST_FTYPE -eq 1 ] && get_ytproject freetype-2.4.4.tar.gz
-[ $INST_SQLITE3 -eq 1 ] && get_ytproject sqlite-autoconf-3070500.tar.gz
-[ $INST_PYX -eq 1 ] && get_ytproject PyX-0.11.1.tar.gz
-[ $INST_0MQ -eq 1 ] && get_ytproject zeromq-2.2.0.tar.gz
-[ $INST_0MQ -eq 1 ] && get_ytproject pyzmq-2.1.11.tar.gz
-[ $INST_0MQ -eq 1 ] && get_ytproject tornado-2.2.tar.gz
+[ $INST_ZLIB -eq 1 ] && get_ytproject zlib-1.2.7.tar.gz
+[ $INST_BZLIB -eq 1 ] && get_ytproject bzip2-1.0.6.tar.gz
+[ $INST_PNG -eq 1 ] && get_ytproject libpng-1.6.1.tar.gz
+[ $INST_FTYPE -eq 1 ] && get_ytproject freetype-2.4.11.tar.gz
+[ $INST_SQLITE3 -eq 1 ] && get_ytproject sqlite-autoconf-3071601.tar.gz
+[ $INST_PYX -eq 1 ] && get_ytproject PyX-0.12.1.tar.gz
+[ $INST_0MQ -eq 1 ] && get_ytproject zeromq-3.2.2.tar.gz
+[ $INST_0MQ -eq 1 ] && get_ytproject pyzmq-13.0.2.tar.gz
+[ $INST_0MQ -eq 1 ] && get_ytproject tornado-3.0.tar.gz
 [ $INST_SCIPY -eq 1 ] && get_ytproject scipy-0.11.0.tar.gz
 [ $INST_SCIPY -eq 1 ] && get_ytproject blas.tar.gz
 [ $INST_SCIPY -eq 1 ] && get_ytproject lapack-3.4.2.tar.gz
-get_ytproject Python-2.7.3.tgz
-get_ytproject numpy-1.6.1.tar.gz
-get_ytproject matplotlib-1.2.0.tar.gz
-get_ytproject mercurial-2.5.1.tar.gz
+get_ytproject Python-2.7.4.tgz
+get_ytproject numpy-1.7.0.tar.gz
+get_ytproject matplotlib-1.2.1.tar.gz
+get_ytproject mercurial-2.5.4.tar.gz
 get_ytproject ipython-0.13.1.tar.gz
-get_ytproject h5py-2.1.0.tar.gz
-get_ytproject Cython-0.17.1.tar.gz
+get_ytproject h5py-2.1.2.tar.gz
+get_ytproject Cython-0.18.tar.gz
 get_ytproject reason-js-20120623.zip
-get_ytproject Forthon-0.8.10.tar.gz
-get_ytproject nose-1.2.1.tar.gz 
-get_ytproject python-hglib-0.2.tar.gz
+get_ytproject Forthon-0.8.11.tar.gz
+get_ytproject nose-1.2.1.tar.gz
+get_ytproject python-hglib-0.3.tar.gz
 get_ytproject sympy-0.7.2.tar.gz
 get_ytproject rockstar-0.99.6.tar.gz
 if [ $INST_BZLIB -eq 1 ]
 then
-    if [ ! -e bzip2-1.0.5/done ]
+    if [ ! -e bzip2-1.0.6/done ]
     then
-        [ ! -e bzip2-1.0.5 ] && tar xfz bzip2-1.0.5.tar.gz
+        [ ! -e bzip2-1.0.6 ] && tar xfz bzip2-1.0.6.tar.gz
         echo "Installing BZLIB"
-        cd bzip2-1.0.5
-        if [ `uname` = "Darwin" ] 
+        cd bzip2-1.0.6
+        if [ `uname` = "Darwin" ]
         then
-            if [ -z "${CC}" ] 
+            if [ -z "${CC}" ]
             then
                 sed -i.bak 's/soname/install_name/' Makefile-libbz2_so
             else
-                sed -i.bak -e 's/soname/install_name/' -e "s/CC=gcc/CC=${CC}/" Makefile-libbz2_so 
+                sed -i.bak -e 's/soname/install_name/' -e "s/CC=gcc/CC=${CC}/" Makefile-libbz2_so
             fi
         fi
         ( make install CFLAGS=-fPIC LDFLAGS=-fPIC PREFIX=${DEST_DIR} 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make -f Makefile-libbz2_so CFLAGS=-fPIC LDFLAGS=-fPIC PREFIX=${DEST_DIR} 2>&1 ) 1>> ${LOG_FILE} || do_exit
-        ( cp -v libbz2.so.1.0.4 ${DEST_DIR}/lib 2>&1 ) 1>> ${LOG_FILE} || do_exit
+        ( cp -v libbz2.so.1.0.6 ${DEST_DIR}/lib 2>&1 ) 1>> ${LOG_FILE} || do_exit
         touch done
         cd ..
     fi
@@ -582,11 +581,11 @@
 
 if [ $INST_ZLIB -eq 1 ]
 then
-    if [ ! -e zlib-1.2.3/done ]
+    if [ ! -e zlib-1.2.7/done ]
     then
-        [ ! -e zlib-1.2.3 ] && tar xfj zlib-1.2.3.tar.bz2
+        [ ! -e zlib-1.2.7 ] && tar xfz zlib-1.2.7.tar.gz
         echo "Installing ZLIB"
-        cd zlib-1.2.3
+        cd zlib-1.2.7
         ( ./configure --shared --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -600,11 +599,11 @@
 
 if [ $INST_PNG -eq 1 ]
 then
-    if [ ! -e libpng-1.5.12/done ]
+    if [ ! -e libpng-1.6.1/done ]
     then
-        [ ! -e libpng-1.5.12 ] && tar xfz libpng-1.5.12.tar.gz
+        [ ! -e libpng-1.6.1 ] && tar xfz libpng-1.6.1.tar.gz
         echo "Installing PNG"
-        cd libpng-1.5.12
+        cd libpng-1.6.1
         ( ./configure CPPFLAGS=-I${DEST_DIR}/include CFLAGS=-I${DEST_DIR}/include --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -618,11 +617,11 @@
 
 if [ $INST_FTYPE -eq 1 ]
 then
-    if [ ! -e freetype-2.4.4/done ]
+    if [ ! -e freetype-2.4.11/done ]
     then
-        [ ! -e freetype-2.4.4 ] && tar xfz freetype-2.4.4.tar.gz
+        [ ! -e freetype-2.4.11 ] && tar xfz freetype-2.4.11.tar.gz
         echo "Installing FreeType2"
-        cd freetype-2.4.4
+        cd freetype-2.4.11
         ( ./configure CFLAGS=-I${DEST_DIR}/include --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -655,11 +654,11 @@
 
 if [ $INST_SQLITE3 -eq 1 ]
 then
-    if [ ! -e sqlite-autoconf-3070500/done ]
+    if [ ! -e sqlite-autoconf-3071601/done ]
     then
-        [ ! -e sqlite-autoconf-3070500 ] && tar xfz sqlite-autoconf-3070500.tar.gz
+        [ ! -e sqlite-autoconf-3071601 ] && tar xfz sqlite-autoconf-3071601.tar.gz
         echo "Installing SQLite3"
-        cd sqlite-autoconf-3070500
+        cd sqlite-autoconf-3071601
         ( ./configure --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make ${MAKE_PROCS} install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -668,11 +667,11 @@
     fi
 fi
 
-if [ ! -e Python-2.7.3/done ]
+if [ ! -e Python-2.7.4/done ]
 then
     echo "Installing Python.  This may take a while, but don't worry.  yt loves you."
-    [ ! -e Python-2.7.3 ] && tar xfz Python-2.7.3.tgz
-    cd Python-2.7.3
+    [ ! -e Python-2.7.4 ] && tar xfz Python-2.7.4.tgz
+    cd Python-2.7.4
     ( ./configure --prefix=${DEST_DIR}/ ${PYCONF_ARGS} 2>&1 ) 1>> ${LOG_FILE} || do_exit
 
     ( make ${MAKE_PROCS} 2>&1 ) 1>> ${LOG_FILE} || do_exit
@@ -687,12 +686,11 @@
 
 if [ $INST_HG -eq 1 ]
 then
-    echo "Installing Mercurial."
-    do_setup_py mercurial-2.5.1
+    do_setup_py mercurial-2.5.4
     export HG_EXEC=${DEST_DIR}/bin/hg
 else
     # We assume that hg can be found in the path.
-    if type -P hg &>/dev/null 
+    if type -P hg &>/dev/null
     then
         export HG_EXEC=hg
     else
@@ -709,7 +707,7 @@
     elif [ -e $ORIG_PWD/../yt/mods.py ]
     then
         YT_DIR=`dirname $ORIG_PWD`
-    elif [ ! -e yt-hg ] 
+    elif [ ! -e yt-hg ]
     then
         YT_DIR="$PWD/yt-hg/"
         ( ${HG_EXEC} --debug clone https://bitbucket.org/yt_analysis/yt-supplemental/ 2>&1 ) 1>> ${LOG_FILE}
@@ -719,7 +717,7 @@
         ( ${HG_EXEC} --debug clone https://bitbucket.org/yt_analysis/yt/ ./yt-hg 2>&1 ) 1>> ${LOG_FILE}
         # Now we update to the branch we're interested in.
         ( ${HG_EXEC} -R ${YT_DIR} up -C ${BRANCH} 2>&1 ) 1>> ${LOG_FILE}
-    elif [ -e yt-hg ] 
+    elif [ -e yt-hg ]
     then
         YT_DIR="$PWD/yt-hg/"
     fi
@@ -727,7 +725,7 @@
 fi
 
 # This fixes problems with gfortran linking.
-unset LDFLAGS 
+unset LDFLAGS
 
 echo "Installing distribute"
 ( ${DEST_DIR}/bin/python2.7 ${YT_DIR}/distribute_setup.py 2>&1 ) 1>> ${LOG_FILE} || do_exit
@@ -737,7 +735,7 @@
 
 if [ $INST_SCIPY -eq 0 ]
 then
-    do_setup_py numpy-1.6.1 ${NUMPY_ARGS}
+    do_setup_py numpy-1.7.0 ${NUMPY_ARGS}
 else
     if [ ! -e scipy-0.11.0/done ]
     then
@@ -765,8 +763,8 @@
 	fi
     fi
     export BLAS=$PWD/BLAS/libfblas.a
-    export LAPACK=$PWD/lapack-3.4.2/liblapack.a    
-    do_setup_py numpy-1.6.1 ${NUMPY_ARGS}
+    export LAPACK=$PWD/lapack-3.4.2/liblapack.a
+    do_setup_py numpy-1.7.0 ${NUMPY_ARGS}
     do_setup_py scipy-0.11.0 ${NUMPY_ARGS}
 fi
 
@@ -789,10 +787,10 @@
     echo "Setting CFLAGS ${CFLAGS}"
 fi
 # Now we set up the basedir for matplotlib:
-mkdir -p ${DEST_DIR}/src/matplotlib-1.2.0
-echo "[directories]" >> ${DEST_DIR}/src/matplotlib-1.2.0/setup.cfg
-echo "basedirlist = ${DEST_DIR}" >> ${DEST_DIR}/src/matplotlib-1.2.0/setup.cfg
-do_setup_py matplotlib-1.2.0
+mkdir -p ${DEST_DIR}/src/matplotlib-1.2.1
+echo "[directories]" >> ${DEST_DIR}/src/matplotlib-1.2.1/setup.cfg
+echo "basedirlist = ${DEST_DIR}" >> ${DEST_DIR}/src/matplotlib-1.2.1/setup.cfg
+do_setup_py matplotlib-1.2.1
 if [ -n "${OLD_LDFLAGS}" ]
 then
     export LDFLAG=${OLD_LDFLAGS}
@@ -804,29 +802,29 @@
 # Now we do our IPython installation, which has two optional dependencies.
 if [ $INST_0MQ -eq 1 ]
 then
-    if [ ! -e zeromq-2.2.0/done ]
+    if [ ! -e zeromq-3.2.2/done ]
     then
-        [ ! -e zeromq-2.2.0 ] && tar xfz zeromq-2.2.0.tar.gz
+        [ ! -e zeromq-3.2.2 ] && tar xfz zeromq-3.2.2.tar.gz
         echo "Installing ZeroMQ"
-        cd zeromq-2.2.0
+        cd zeromq-3.2.2
         ( ./configure --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
         touch done
         cd ..
     fi
-    do_setup_py pyzmq-2.1.11 --zmq=${DEST_DIR}
-    do_setup_py tornado-2.2
+    do_setup_py pyzmq-13.0.2 --zmq=${DEST_DIR}
+    do_setup_py tornado-3.0
 fi
 
 do_setup_py ipython-0.13.1
-do_setup_py h5py-2.1.0
-do_setup_py Cython-0.17.1
-do_setup_py Forthon-0.8.10
+do_setup_py h5py-2.1.2
+do_setup_py Cython-0.18
+do_setup_py Forthon-0.8.11
 do_setup_py nose-1.2.1
-do_setup_py python-hglib-0.2
+do_setup_py python-hglib-0.3
 do_setup_py sympy-0.7.2
-[ $INST_PYX -eq 1 ] && do_setup_py PyX-0.11.1
+[ $INST_PYX -eq 1 ] && do_setup_py PyX-0.12.1
 
 # Now we build Rockstar and set its environment variable.
 if [ $INST_ROCKSTAR -eq 1 ]


https://bitbucket.org/yt_analysis/yt-3.0/commits/1436dc682943/
Changeset:   1436dc682943
Branch:      yt
User:        ngoldbaum
Date:        2013-04-15 22:13:29
Summary:     Fixing a name error in the local answer test storage.
Affected #:  1 file

diff -r 716ffad527a492fdc207faa65afdf6d91788dc04 -r 1436dc68294375a27855276cedee619701af2f68 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -237,7 +237,7 @@
         ds = shelve.open(self.answer_name, protocol=-1)
         for pf_name in result_storage:
             answer_name = "%s" % pf_name
-            if name in ds:
+            if answer_name in ds:
                 mylog.info("Overwriting %s", answer_name)
             ds[answer_name] = result_storage[pf_name]
         ds.close()


https://bitbucket.org/yt_analysis/yt-3.0/commits/51292a52b8cb/
Changeset:   51292a52b8cb
Branch:      yt
User:        samskillman
Date:        2013-04-15 22:28:30
Summary:     Merged in ngoldbaum/yt (pull request #480)

Fixing a name error in the local answer test storage.
Affected #:  1 file

diff -r 335a441ef6db882f8934808f616fd677b2d527b2 -r 51292a52b8cb411e6551a20709277e3c6ca22189 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -237,7 +237,7 @@
         ds = shelve.open(self.answer_name, protocol=-1)
         for pf_name in result_storage:
             answer_name = "%s" % pf_name
-            if name in ds:
+            if answer_name in ds:
                 mylog.info("Overwriting %s", answer_name)
             ds[answer_name] = result_storage[pf_name]
         ds.close()


https://bitbucket.org/yt_analysis/yt-3.0/commits/cd2751781bed/
Changeset:   cd2751781bed
Branch:      yt
User:        xarthisius
Date:        2013-04-16 11:07:27
Summary:     [gdf] field_units attribute is a string not an array. Fixes #550
Affected #:  1 file

diff -r 51292a52b8cb411e6551a20709277e3c6ca22189 -r cd2751781beda239978abacceb02db371ed7ba2a yt/frontends/gdf/data_structures.py
--- a/yt/frontends/gdf/data_structures.py
+++ b/yt/frontends/gdf/data_structures.py
@@ -202,7 +202,7 @@
             except:
                 self.units[field_name] = 1.0
             try:
-                current_fields_unit = current_field.attrs['field_units'][0]
+                current_fields_unit = current_field.attrs['field_units']
             except:
                 current_fields_unit = ""
             self._fieldinfo_known.add_field(field_name, function=NullFunc, take_log=False,


https://bitbucket.org/yt_analysis/yt-3.0/commits/d0bc82a75da0/
Changeset:   d0bc82a75da0
Branch:      yt
User:        xarthisius
Date:        2013-04-16 17:40:12
Summary:     [gdf] convert bare try, except clauses into if statements. 'field_units' can now be either string or array
Affected #:  1 file

diff -r cd2751781beda239978abacceb02db371ed7ba2a -r d0bc82a75da022a0185165fb8b72c444dc4da104 yt/frontends/gdf/data_structures.py
--- a/yt/frontends/gdf/data_structures.py
+++ b/yt/frontends/gdf/data_structures.py
@@ -197,13 +197,13 @@
         self._handle = h5py.File(self.parameter_filename, "r")
         for field_name in self._handle["/field_types"]:
             current_field = self._handle["/field_types/%s" % field_name]
-            try:
+            if 'field_to_cgs' in current_field.attrs:
                 self.units[field_name] = current_field.attrs['field_to_cgs']
-            except:
+            else:
                 self.units[field_name] = 1.0
-            try:
-                current_fields_unit = current_field.attrs['field_units']
-            except:
+            if 'field_units' in current_field.attrs:
+                current_fields_unit = just_one(current_field.attrs['field_units'])
+            else:
                 current_fields_unit = ""
             self._fieldinfo_known.add_field(field_name, function=NullFunc, take_log=False,
                    units=current_fields_unit, projected_units="",


https://bitbucket.org/yt_analysis/yt-3.0/commits/a1af53aa97b8/
Changeset:   a1af53aa97b8
Branch:      yt
User:        MatthewTurk
Date:        2013-04-17 00:27:41
Summary:     get_data is not guaranteed to return a value.  This adjusts particle IO to work
in some cases, specifically those where a ParticleIO handler is defined for a
frontend but not a 3D data object; I believe this is restricted exclusively to
Enzo and the Ellipsoid.
Affected #:  1 file

diff -r d0bc82a75da022a0185165fb8b72c444dc4da104 -r a1af53aa97b89abd94dc65933cad0423d48ccba5 yt/data_objects/particle_io.py
--- a/yt/data_objects/particle_io.py
+++ b/yt/data_objects/particle_io.py
@@ -58,7 +58,8 @@
 
     def get_data(self, fields):
         fields = ensure_list(fields)
-        rvs = self.source.get_data(fields, force_particle_read=True)
+        self.source.get_data(fields, force_particle_read=True)
+        rvs = [self.source[field] for field in fields]
         if len(fields) == 1: return rvs[0]
         return rvs
 


https://bitbucket.org/yt_analysis/yt-3.0/commits/bac915c08be4/
Changeset:   bac915c08be4
Branch:      yt
User:        MatthewTurk
Date:        2013-04-18 20:30:14
Summary:     Fixing field detection for in situ runs.
Affected #:  1 file

diff -r a1af53aa97b89abd94dc65933cad0423d48ccba5 -r bac915c08be4a85baea2425edad3bb3a62e91800 yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -634,6 +634,24 @@
         else:
             self.derived_field_list = self.__class__._cached_derived_field_list
 
+    def _detect_fields(self):
+        self.field_list = []
+        # Do this only on the root processor to save disk work.
+        mylog.info("Gathering a field list (this may take a moment.)")
+        field_list = set()
+        random_sample = self._generate_random_grids()
+        for grid in random_sample:
+            try:
+                gf = self.io._read_field_names(grid)
+            except self.io._read_exception:
+                mylog.debug("Grid %s is a bit funky?", grid.id)
+                continue
+            mylog.debug("Grid %s has: %s", grid.id, gf)
+            field_list = field_list.union(gf)
+        field_list = self.comm.par_combine_object(list(field_list),
+                        datatype="list", op = "cat")
+        self.field_list = list(set(field_list))
+
     def _generate_random_grids(self):
         my_rank = self.comm.rank
         my_grids = self.grids[self.grid_procs.ravel() == my_rank]


https://bitbucket.org/yt_analysis/yt-3.0/commits/54de3b5fda5d/
Changeset:   54de3b5fda5d
Branch:      yt
User:        MatthewTurk
Date:        2013-04-18 20:31:13
Summary:     Fixing particle_density 's display name.
Affected #:  1 file

diff -r bac915c08be4a85baea2425edad3bb3a62e91800 -r 54de3b5fda5d8a9f074cd672ca2a6e066b9f77fc yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -998,7 +998,7 @@
     return blank
 add_field("particle_density", function=_pdensity,
           validators=[ValidateGridType()], convert_function=_convertDensity,
-          display_name=r"$\mathrm{Particle}\/\mathrm{Density}$")
+          display_name=r"\mathrm{Particle}\/\mathrm{Density}")
 
 def _MagneticEnergy(field,data):
     """This assumes that your front end has provided Bx, By, Bz in


https://bitbucket.org/yt_analysis/yt-3.0/commits/fccec7a78458/
Changeset:   fccec7a78458
Branch:      yt
User:        scopatz
Date:        2013-04-22 22:11:35
Summary:     re-added improved PWViewerMPL.show_or_save()
Affected #:  1 file

diff -r 54de3b5fda5d8a9f074cd672ca2a6e066b9f77fc -r fccec7a78458ee2d4b65808bc20f6d86fd298fd4 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -1033,6 +1033,14 @@
         else:
             raise YTNotInsideNotebook
 
+    def show_or_save(self, name=None, mpl_kwargs=None):
+        """Will attempt to show the plot in in an IPython notebook.  Failing that, the 
+        plot will be saved to disk."""
+        try:
+            return self.show()
+        except YTNotInsideNotebook:
+            return self.save(name=name, mpl_kwargs=mpl_kwargs)
+
 class SlicePlot(PWViewerMPL):
     r"""Creates a slice plot from a parameter file
 


https://bitbucket.org/yt_analysis/yt-3.0/commits/dd62805d9db9/
Changeset:   dd62805d9db9
Branch:      yt
User:        scopatz
Date:        2013-04-22 22:33:23
Summary:     show_or_save() -> display()
Affected #:  1 file

diff -r fccec7a78458ee2d4b65808bc20f6d86fd298fd4 -r dd62805d9db99dfc664e47ec2c1bee2d5365c8fc yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -1033,7 +1033,7 @@
         else:
             raise YTNotInsideNotebook
 
-    def show_or_save(self, name=None, mpl_kwargs=None):
+    def display(self, name=None, mpl_kwargs=None):
         """Will attempt to show the plot in in an IPython notebook.  Failing that, the 
         plot will be saved to disk."""
         try:


https://bitbucket.org/yt_analysis/yt-3.0/commits/ba9d67fad65a/
Changeset:   ba9d67fad65a
Branch:      yt
User:        ngoldbaum
Date:        2013-04-26 21:57:53
Summary:     Making it possible to parse enzo parameters that contain '='
Affected #:  1 file

diff -r 1436dc68294375a27855276cedee619701af2f68 -r ba9d67fad65a642c0a0c6c621fee5fdf8139d765 yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -770,7 +770,7 @@
         data_label_factors = {}
         for line in (l.strip() for l in lines):
             if len(line) < 2: continue
-            param, vals = (i.strip() for i in line.split("="))
+            param, vals = (i.strip() for i in line.split("=",1))
             # First we try to decipher what type of value it is.
             vals = vals.split()
             # Special case approaching.


https://bitbucket.org/yt_analysis/yt-3.0/commits/5b6b482fd29e/
Changeset:   5b6b482fd29e
Branch:      yt
User:        MatthewTurk
Date:        2013-04-26 22:15:48
Summary:     Merged in ngoldbaum/yt (pull request #484)

Making it possible to parse enzo parameters that contain '='
Affected #:  1 file

diff -r dd62805d9db99dfc664e47ec2c1bee2d5365c8fc -r 5b6b482fd29eac3ed397588d25ddb4ab0d8b7385 yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -788,7 +788,7 @@
         data_label_factors = {}
         for line in (l.strip() for l in lines):
             if len(line) < 2: continue
-            param, vals = (i.strip() for i in line.split("="))
+            param, vals = (i.strip() for i in line.split("=",1))
             # First we try to decipher what type of value it is.
             vals = vals.split()
             # Special case approaching.


https://bitbucket.org/yt_analysis/yt-3.0/commits/0639ecad15ba/
Changeset:   0639ecad15ba
Branch:      yt
User:        scopatz
Date:        2013-04-29 21:39:38
Summary:     back ported flash fields from yt-3.0
Affected #:  1 file

diff -r 5b6b482fd29eac3ed397588d25ddb4ab0d8b7385 -r 0639ecad15baece1e4588d45acafd62856ff3f46 yt/frontends/flash/fields.py
--- a/yt/frontends/flash/fields.py
+++ b/yt/frontends/flash/fields.py
@@ -5,7 +5,7 @@
 Affiliation: UCSD
 Homepage: http://yt-project.org/
 License:
-  Copyright (C) 2010-2011 Matthew Turk, John ZuHone.  All Rights Reserved.
+  Copyright (C) 2010-2012 Matthew Turk, John ZuHone, Anthony Scopatz.  All Rights Reserved.
 
   This file is part of yt.
 
@@ -24,6 +24,7 @@
 """
 
 import numpy as np
+from yt.utilities.exceptions import *
 from yt.data_objects.field_info_container import \
     FieldInfoContainer, \
     NullFunc, \
@@ -154,15 +155,41 @@
 add_flash_field("temp", function=NullFunc, take_log=True,
                 convert_function=_get_convert("temp"),
                 units=r"\rm{K}")
+add_flash_field("tion", function=NullFunc, take_log=True,
+                units=r"\rm{K}")
 add_flash_field("tele", function=NullFunc, take_log=True,
                 convert_function=_get_convert("tele"),
                 units = r"\rm{K}")
+add_flash_field("trad", function=NullFunc, take_log=True,
+                units = r"\rm{K}")
 add_flash_field("pres", function=NullFunc, take_log=True,
                 convert_function=_get_convert("pres"),
                 units=r"\rm{erg}/\rm{cm}^{3}")
+add_flash_field("pion", function=NullFunc, take_log=True,
+                display_name="Ion Pressure",
+                units=r"\rm{J}/\rm{cm}^3")
+add_flash_field("pele", function=NullFunc, take_log=True,
+                display_name="Electron Pressure, P_e",
+                units=r"\rm{J}/\rm{cm}^3")
+add_flash_field("prad", function=NullFunc, take_log=True,
+                display_name="Radiation Pressure",
+                units = r"\rm{J}/\rm{cm}^3")
+add_flash_field("eion", function=NullFunc, take_log=True,
+                display_name="Ion Internal Energy",
+                units=r"\rm{J}")
+add_flash_field("eele", function=NullFunc, take_log=True,
+                display_name="Electron Internal Energy",
+                units=r"\rm{J}")
+add_flash_field("erad", function=NullFunc, take_log=True,
+                display_name="Radiation Internal Energy",
+                units=r"\rm{J}")
 add_flash_field("pden", function=NullFunc, take_log=True,
                 convert_function=_get_convert("pden"),
                 units=r"\rm{g}/\rm{cm}^{3}")
+add_flash_field("depo", function=NullFunc, take_log=True,
+                units = r"\rm{ergs}/\rm{g}")
+add_flash_field("ye", function=NullFunc, take_log=True,
+                units = r"\rm{ergs}/\rm{g}")
 add_flash_field("magx", function=NullFunc, take_log=False,
                 convert_function=_get_convert("magx"),
                 units = r"\mathrm{Gau\ss}")
@@ -192,6 +219,34 @@
                 units = r"\rm{ergs}/\rm{g}")
 add_flash_field("flam", function=NullFunc, take_log=False,
                 convert_function=_get_convert("flam"))
+add_flash_field("absr", function=NullFunc, take_log=False,
+                display_name="Absorption Coefficient")
+add_flash_field("emis", function=NullFunc, take_log=False,
+                display_name="Emissivity")
+add_flash_field("cond", function=NullFunc, take_log=False,
+                display_name="Conductivity")
+add_flash_field("dfcf", function=NullFunc, take_log=False,
+                display_name="Diffusion Equation Scalar")
+add_flash_field("fllm", function=NullFunc, take_log=False,
+                display_name="Flux Limit")
+add_flash_field("pipe", function=NullFunc, take_log=False,
+                display_name="P_i/P_e")
+add_flash_field("tite", function=NullFunc, take_log=False,
+                display_name="T_i/T_e")
+add_flash_field("dbgs", function=NullFunc, take_log=False,
+                display_name="Debug for Shocks")
+add_flash_field("cham", function=NullFunc, take_log=False,
+                display_name="Chamber Material Fraction")
+add_flash_field("targ", function=NullFunc, take_log=False,
+                display_name="Target Material Fraction")
+add_flash_field("sumy", function=NullFunc, take_log=False)
+add_flash_field("mgdc", function=NullFunc, take_log=False,
+                display_name="Emission Minus Absorption Diffusion Terms")
+
+for i in range(1, 1000):
+    add_flash_field("r{0:03}".format(i), function=NullFunc, take_log=False,
+        display_name="Energy Group {0}".format(i))
+
 
 for f,v in translation_dict.items():
     if v not in KnownFLASHFields:
@@ -300,3 +355,23 @@
           units=r"\rm{Gauss}\/\rm{cm}^{-1}")
 
 
+
+## Derived FLASH Fields
+def _nele(field, data):
+    return data['ye'] * data['dens'] * data['sumy'] * 6.022E23
+add_field('nele', function=_nele, take_log=True, units=r"\rm{cm}^{-3}")
+add_field('edens', function=_nele, take_log=True, units=r"\rm{cm}^{-3}")
+
+def _nion(field, data):
+    return data['dens'] * data['sumy'] * 6.022E23
+add_field('nion', function=_nion, take_log=True, units=r"\rm{cm}^{-3}")
+
+
+def _abar(field, data):
+    return 1.0 / data['sumy']
+add_field('abar', function=_abar, take_log=False)
+
+
+def _velo(field, data):
+    return (data['velx']**2 + data['vely']**2 + data['velz']**2)**0.5
+add_field ('velo', function=_velo, take_log=True, units=r"\rm{cm}/\rm{s}")


https://bitbucket.org/yt_analysis/yt-3.0/commits/7605932bcd7a/
Changeset:   7605932bcd7a
Branch:      yt
User:        scopatz
Date:        2013-04-29 22:39:44
Summary:     fixed ye units
Affected #:  1 file

diff -r 0639ecad15baece1e4588d45acafd62856ff3f46 -r 7605932bcd7af866e8a64622bc2870926c5190f4 yt/frontends/flash/fields.py
--- a/yt/frontends/flash/fields.py
+++ b/yt/frontends/flash/fields.py
@@ -188,8 +188,7 @@
                 units=r"\rm{g}/\rm{cm}^{3}")
 add_flash_field("depo", function=NullFunc, take_log=True,
                 units = r"\rm{ergs}/\rm{g}")
-add_flash_field("ye", function=NullFunc, take_log=True,
-                units = r"\rm{ergs}/\rm{g}")
+add_flash_field("ye", function=NullFunc, take_log=True,)
 add_flash_field("magx", function=NullFunc, take_log=False,
                 convert_function=_get_convert("magx"),
                 units = r"\mathrm{Gau\ss}")


https://bitbucket.org/yt_analysis/yt-3.0/commits/15403b956871/
Changeset:   15403b956871
Branch:      yt
User:        scopatz
Date:        2013-04-29 23:08:53
Summary:     updated nele computation.
Affected #:  1 file

diff -r 7605932bcd7af866e8a64622bc2870926c5190f4 -r 15403b956871ebcf5c596548abb4a7491bf77305 yt/frontends/flash/fields.py
--- a/yt/frontends/flash/fields.py
+++ b/yt/frontends/flash/fields.py
@@ -357,7 +357,7 @@
 
 ## Derived FLASH Fields
 def _nele(field, data):
-    return data['ye'] * data['dens'] * data['sumy'] * 6.022E23
+    return data['dens'] * data['ye'] * 6.022E23
 add_field('nele', function=_nele, take_log=True, units=r"\rm{cm}^{-3}")
 add_field('edens', function=_nele, take_log=True, units=r"\rm{cm}^{-3}")
 


https://bitbucket.org/yt_analysis/yt-3.0/commits/3879cc8a93df/
Changeset:   3879cc8a93df
Branch:      yt
User:        scopatz
Date:        2013-04-30 17:01:32
Summary:     updated pressure units to ergs
Affected #:  1 file

diff -r 15403b956871ebcf5c596548abb4a7491bf77305 -r 3879cc8a93df108d41af7267fe5fbff5c81c7967 yt/frontends/flash/fields.py
--- a/yt/frontends/flash/fields.py
+++ b/yt/frontends/flash/fields.py
@@ -167,22 +167,22 @@
                 units=r"\rm{erg}/\rm{cm}^{3}")
 add_flash_field("pion", function=NullFunc, take_log=True,
                 display_name="Ion Pressure",
-                units=r"\rm{J}/\rm{cm}^3")
+                units=r"\rm{erg}/\rm{cm}^3")
 add_flash_field("pele", function=NullFunc, take_log=True,
                 display_name="Electron Pressure, P_e",
-                units=r"\rm{J}/\rm{cm}^3")
+                units=r"\rm{erg}/\rm{cm}^3")
 add_flash_field("prad", function=NullFunc, take_log=True,
                 display_name="Radiation Pressure",
-                units = r"\rm{J}/\rm{cm}^3")
+                units = r"\rm{erg}/\rm{cm}^3")
 add_flash_field("eion", function=NullFunc, take_log=True,
                 display_name="Ion Internal Energy",
-                units=r"\rm{J}")
+                units=r"\rm{erg}")
 add_flash_field("eele", function=NullFunc, take_log=True,
                 display_name="Electron Internal Energy",
-                units=r"\rm{J}")
+                units=r"\rm{erg}")
 add_flash_field("erad", function=NullFunc, take_log=True,
                 display_name="Radiation Internal Energy",
-                units=r"\rm{J}")
+                units=r"\rm{erg}")
 add_flash_field("pden", function=NullFunc, take_log=True,
                 convert_function=_get_convert("pden"),
                 units=r"\rm{g}/\rm{cm}^{3}")


https://bitbucket.org/yt_analysis/yt-3.0/commits/532858bcf68d/
Changeset:   532858bcf68d
Branch:      yt
User:        scopatz
Date:        2013-04-30 19:53:07
Summary:     removed velo
Affected #:  1 file

diff -r 3879cc8a93df108d41af7267fe5fbff5c81c7967 -r 532858bcf68d9cba1d77bafc80ff472cc5a6bf09 yt/frontends/flash/fields.py
--- a/yt/frontends/flash/fields.py
+++ b/yt/frontends/flash/fields.py
@@ -64,6 +64,7 @@
 translation_dict = {"x-velocity": "velx",
                     "y-velocity": "vely",
                     "z-velocity": "velz",
+                    "VelocityMagnitude": "velo",
                     "Density": "dens",
                     "Temperature": "temp",
                     "Pressure" : "pres", 
@@ -365,12 +366,6 @@
     return data['dens'] * data['sumy'] * 6.022E23
 add_field('nion', function=_nion, take_log=True, units=r"\rm{cm}^{-3}")
 
-
 def _abar(field, data):
     return 1.0 / data['sumy']
 add_field('abar', function=_abar, take_log=False)
-
-
-def _velo(field, data):
-    return (data['velx']**2 + data['vely']**2 + data['velz']**2)**0.5
-add_field ('velo', function=_velo, take_log=True, units=r"\rm{cm}/\rm{s}")


https://bitbucket.org/yt_analysis/yt-3.0/commits/aa91cc1e668b/
Changeset:   aa91cc1e668b
Branch:      yt
User:        scopatz
Date:        2013-04-30 20:54:00
Summary:     added Na
Affected #:  2 files

diff -r 532858bcf68d9cba1d77bafc80ff472cc5a6bf09 -r aa91cc1e668b20685bab877e42e5f8ed61292d99 yt/frontends/flash/fields.py
--- a/yt/frontends/flash/fields.py
+++ b/yt/frontends/flash/fields.py
@@ -37,7 +37,7 @@
     ValidateGridType
 import yt.data_objects.universal_fields
 from yt.utilities.physical_constants import \
-    kboltz, mh
+    kboltz, mh, Na
 KnownFLASHFields = FieldInfoContainer()
 add_flash_field = KnownFLASHFields.add_field
 
@@ -358,12 +358,12 @@
 
 ## Derived FLASH Fields
 def _nele(field, data):
-    return data['dens'] * data['ye'] * 6.022E23
+    return data['dens'] * data['ye'] * Na
 add_field('nele', function=_nele, take_log=True, units=r"\rm{cm}^{-3}")
 add_field('edens', function=_nele, take_log=True, units=r"\rm{cm}^{-3}")
 
 def _nion(field, data):
-    return data['dens'] * data['sumy'] * 6.022E23
+    return data['dens'] * data['sumy'] * Na
 add_field('nion', function=_nion, take_log=True, units=r"\rm{cm}^{-3}")
 
 def _abar(field, data):

diff -r 532858bcf68d9cba1d77bafc80ff472cc5a6bf09 -r aa91cc1e668b20685bab877e42e5f8ed61292d99 yt/utilities/physical_constants.py
--- a/yt/utilities/physical_constants.py
+++ b/yt/utilities/physical_constants.py
@@ -85,3 +85,4 @@
 kboltz = boltzmann_constant_cgs
 hcgs = planck_constant_cgs
 sigma_thompson = cross_section_thompson_cgs
+Na = 1 / amu_cgs


https://bitbucket.org/yt_analysis/yt-3.0/commits/be0a96c06b28/
Changeset:   be0a96c06b28
Branch:      yt
User:        ngoldbaum
Date:        2013-04-29 03:15:30
Summary:     Adding a data_source keyword for ProjectionPlot
Affected #:  2 files

diff -r ba9d67fad65a642c0a0c6c621fee5fdf8139d765 -r be0a96c06b2809151aaf2ebede1873b46dc98670 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -1221,6 +1221,9 @@
          ('{yloc}', '{xloc}', '{space}')        ('lower', 'right', 'window')
          ==================================     ============================
 
+    data_source : AMR3DData Object
+         Object to be used for data selection.  Defaults to a region covering the
+         entire simulation.
     weight_field : string
          The name of the weighting field.  Set to None for no weight.
     max_level: int
@@ -1245,7 +1248,7 @@
 
     def __init__(self, pf, axis, fields, center='c', width=None, axes_unit=None,
                  weight_field=None, max_level=None, origin='center-window', fontsize=18,
-                 field_parameters=None):
+                 field_parameters=None, data_source=None):
         ts = self._initialize_dataset(pf)
         self.ts = ts
         pf = self.pf = ts[0]
@@ -1255,7 +1258,7 @@
             axes_unit = units
         if field_parameters is None: field_parameters = {}
         proj = pf.h.proj(axis, fields, weight_field=weight_field, max_level=max_level,
-                         center=center, **field_parameters)
+                         center=center, source=data_source, **field_parameters)
         PWViewerMPL.__init__(self, proj, bounds, origin=origin, fontsize=fontsize)
         self.set_axes_unit(axes_unit)
 

diff -r ba9d67fad65a642c0a0c6c621fee5fdf8139d765 -r be0a96c06b2809151aaf2ebede1873b46dc98670 yt/visualization/tests/test_plotwindow.py
--- a/yt/visualization/tests/test_plotwindow.py
+++ b/yt/visualization/tests/test_plotwindow.py
@@ -160,6 +160,8 @@
     test_flnms = [None, 'test.png', 'test.eps',
                   'test.ps', 'test.pdf']
 
+    ds_region = test_pf.h.region([0.5]*3,[0.4]*3,[0.6]*3)
+
     for dim in [0, 1, 2]:
         obj = SlicePlot(test_pf, dim, 'Density')
         for fname in test_flnms:
@@ -169,6 +171,10 @@
         obj = ProjectionPlot(test_pf, dim, 'Density')
         for fname in test_flnms:
             yield assert_equal, assert_fname(obj.save(fname)[0]), True
+        # Test ProjectionPlot's data_source keyword
+        obj = ProjectionPlot(test_pf, dim, 'Density',
+                             data_source=ds_region)
+        obj.save()
 
     obj = OffAxisSlicePlot(test_pf, normal, 'Density')
     for fname in test_flnms:


https://bitbucket.org/yt_analysis/yt-3.0/commits/8da80188ed68/
Changeset:   8da80188ed68
Branch:      yt
User:        MatthewTurk
Date:        2013-05-01 05:41:56
Summary:     Merged in ngoldbaum/yt (pull request #485)

Adding a data_source keyword for ProjectionPlot
Affected #:  2 files

diff -r aa91cc1e668b20685bab877e42e5f8ed61292d99 -r 8da80188ed6862e0bf9e04dab41b726bb7df8b97 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -1229,6 +1229,9 @@
          ('{yloc}', '{xloc}', '{space}')        ('lower', 'right', 'window')
          ==================================     ============================
 
+    data_source : AMR3DData Object
+         Object to be used for data selection.  Defaults to a region covering the
+         entire simulation.
     weight_field : string
          The name of the weighting field.  Set to None for no weight.
     max_level: int
@@ -1253,7 +1256,7 @@
 
     def __init__(self, pf, axis, fields, center='c', width=None, axes_unit=None,
                  weight_field=None, max_level=None, origin='center-window', fontsize=18,
-                 field_parameters=None):
+                 field_parameters=None, data_source=None):
         ts = self._initialize_dataset(pf)
         self.ts = ts
         pf = self.pf = ts[0]
@@ -1263,7 +1266,7 @@
             axes_unit = units
         if field_parameters is None: field_parameters = {}
         proj = pf.h.proj(axis, fields, weight_field=weight_field, max_level=max_level,
-                         center=center, **field_parameters)
+                         center=center, source=data_source, **field_parameters)
         PWViewerMPL.__init__(self, proj, bounds, origin=origin, fontsize=fontsize)
         self.set_axes_unit(axes_unit)
 

diff -r aa91cc1e668b20685bab877e42e5f8ed61292d99 -r 8da80188ed6862e0bf9e04dab41b726bb7df8b97 yt/visualization/tests/test_plotwindow.py
--- a/yt/visualization/tests/test_plotwindow.py
+++ b/yt/visualization/tests/test_plotwindow.py
@@ -160,6 +160,8 @@
     test_flnms = [None, 'test.png', 'test.eps',
                   'test.ps', 'test.pdf']
 
+    ds_region = test_pf.h.region([0.5]*3,[0.4]*3,[0.6]*3)
+
     for dim in [0, 1, 2]:
         obj = SlicePlot(test_pf, dim, 'Density')
         for fname in test_flnms:
@@ -169,6 +171,10 @@
         obj = ProjectionPlot(test_pf, dim, 'Density')
         for fname in test_flnms:
             yield assert_equal, assert_fname(obj.save(fname)[0]), True
+        # Test ProjectionPlot's data_source keyword
+        obj = ProjectionPlot(test_pf, dim, 'Density',
+                             data_source=ds_region)
+        obj.save()
 
     obj = OffAxisSlicePlot(test_pf, normal, 'Density')
     for fname in test_flnms:


https://bitbucket.org/yt_analysis/yt-3.0/commits/ab3828c6bcf3/
Changeset:   ab3828c6bcf3
Branch:      yt
User:        MatthewTurk
Date:        2013-04-23 17:30:16
Summary:     Castro frontend brought slightly up to date.
Affected #:  2 files

diff -r 54de3b5fda5d8a9f074cd672ca2a6e066b9f77fc -r ab3828c6bcf3f9d07e6fe4eef1a17c9b2b613cdf yt/frontends/castro/data_structures.py
--- a/yt/frontends/castro/data_structures.py
+++ b/yt/frontends/castro/data_structures.py
@@ -60,7 +60,7 @@
 
     def __init__(self, LeftEdge, RightEdge, index, level, filename, offset,
                  dimensions, start, stop, paranoia=False, **kwargs):
-        super(CastroGrid, self).__init__(self, index, **kwargs)
+        super(CastroGrid, self).__init__(index, **kwargs)
         self.filename = filename
         self._offset = offset
         self._paranoid = paranoia  # TODO: Factor this behavior out in tests
@@ -72,7 +72,7 @@
         self.LeftEdge  = LeftEdge.copy()
         self.RightEdge = RightEdge.copy()
         self.index = index
-        self.level = level
+        self.Level = level
 
     def get_global_startindex(self):
         return self.start_index
@@ -115,8 +115,6 @@
     grid = CastroGrid
 
     def __init__(self, pf, data_style='castro_native'):
-        super(CastroHierarchy, self).__init__(self, pf, self.data_style)
-
         self.field_indexes = {}
         self.parameter_file = weakref.proxy(pf)
         header_filename = os.path.join(pf.fullplotdir, 'Header')
@@ -128,6 +126,8 @@
                                 self.parameter_file.paranoid_read) 
         self.read_particle_header()
         self._cache_endianness(self.levels[-1].grids[-1])
+
+        super(CastroHierarchy, self).__init__(pf, data_style)
         self._setup_data_io()
         self._setup_field_list()
         self._populate_hierarchy()
@@ -181,7 +181,7 @@
         counter += 1
 
         self.dx = np.zeros((self.n_levels, 3))
-        for i, line in enumerate(self.__global_header_lines[counter:counter+self.n_levels]):
+        for i, line in enumerate(self._global_header_lines[counter:counter+self.n_levels]):
             self.dx[i] = np.array(map(float, line.split()))
         counter += self.n_levels
         self.geometry = int(self._global_header_lines[counter])
@@ -424,21 +424,6 @@
         return self.grids[mask]
 
     def _setup_field_list(self):
-        self.derived_field_list = []
-
-        for field in self.field_info:
-            try:
-                fd = self.field_info[field].get_dependencies(pf=self.parameter_file)
-            except:
-                continue
-
-            available = np.all([f in self.field_list for f in fd.requested])
-            if available: self.derived_field_list.append(field)
-
-        for field in self.field_list:
-            if field not in self.derived_field_list:
-                self.derived_field_list.append(field)
-
         if self.parameter_file.use_particles:
             # We know which particle fields will exist -- pending further
             # changes in the future.
@@ -521,15 +506,15 @@
          * ASCII (not implemented in yt)
 
         """
-        super(CastroStaticOutput, self).__init__(self, plotname.rstrip("/"),
-                                                 data_style='castro_native')
         self.storage_filename = storage_filename
         self.paranoid_read = paranoia
         self.parameter_filename = paramFilename
         self.fparameter_filename = fparamFilename
         self.__ipfn = paramFilename
+        self.fparameters = {}
+        super(CastroStaticOutput, self).__init__(plotname.rstrip("/"),
+                                                 data_style='castro_native')
 
-        self.fparameters = {}
 
         # These should maybe not be hardcoded?
         ### TODO: this.
@@ -618,6 +603,7 @@
                 self.domain_left_edge = np.array([float(i) for i in vals.split()])
             elif param.startswith("particles.write_in_plotfile"):
                 self.use_particles = boxlib_bool_to_int(vals)
+            self.fparameters[param] = vals
 
         self.parameters["TopGridRank"] = len(self.parameters["TopGridDimensions"])
         self.dimensionality = self.parameters["TopGridRank"]
@@ -655,8 +641,11 @@
         for line in lines:
             if line.count("=") == 1:
                 param, vals = map(strip, map(rstrip, line.split("=")))
-                if vals.count("'") == 0:
-                    t = map(float, [a.replace('D','e').replace('d','e') for a in vals.split()]) # all are floating point.
+                if vals.count("'") == 0 and vals.count("\"") == 0:
+                    try:
+                        t = map(float, [a.replace('D','e').replace('d','e') for a in vals.split()]) # all are floating point.
+                    except ValueError:
+                        print "Failed on line", line
                 else:
                     t = vals.split()
                 if len(t) == 1:

diff -r 54de3b5fda5d8a9f074cd672ca2a6e066b9f77fc -r ab3828c6bcf3f9d07e6fe4eef1a17c9b2b613cdf yt/frontends/castro/io.py
--- a/yt/frontends/castro/io.py
+++ b/yt/frontends/castro/io.py
@@ -100,10 +100,10 @@
             if (gridSize != grid.ActiveDimensions).any():
                 print "Paranoia Error: Cell_H and %s do not agree on grid dimensions." %grid.filename
                 error_count += 1
-            if bytesPerReal != grid.hierarchy._bytesPerReal:
+            if bytesPerReal != grid.hierarchy._bytes_per_real:
                 print "Paranoia Error: Cell_H and %s do not agree on bytes per real number." %grid.filename
                 error_count += 1
-            if (bytesPerReal == grid.hierarchy._bytesPerReal and dtype != grid.hierarchy._dtype):
+            if (bytesPerReal == grid.hierarchy._bytes_per_real and dtype != grid.hierarchy._dtype):
                 print "Paranoia Error: Cell_H and %s do not agree on endianness." %grid.filename
                 error_count += 1
 
@@ -114,7 +114,7 @@
             start = grid.start_index
             stop = grid.stop_index
             dtype = grid.hierarchy._dtype
-            bytesPerReal = grid.hierarchy._bytesPerReal
+            bytesPerReal = grid.hierarchy._bytes_per_real
 
         nElements = grid.ActiveDimensions.prod()
 


https://bitbucket.org/yt_analysis/yt-3.0/commits/c5ca570658c6/
Changeset:   c5ca570658c6
Branch:      yt
User:        MatthewTurk
Date:        2013-05-01 05:42:32
Summary:     Merged in MatthewTurk/yt (pull request #483)

Castro frontend brought slightly up to date.
Affected #:  2 files

diff -r 8da80188ed6862e0bf9e04dab41b726bb7df8b97 -r c5ca570658c61817278dcd5047e1f532c1043ac2 yt/frontends/castro/data_structures.py
--- a/yt/frontends/castro/data_structures.py
+++ b/yt/frontends/castro/data_structures.py
@@ -60,7 +60,7 @@
 
     def __init__(self, LeftEdge, RightEdge, index, level, filename, offset,
                  dimensions, start, stop, paranoia=False, **kwargs):
-        super(CastroGrid, self).__init__(self, index, **kwargs)
+        super(CastroGrid, self).__init__(index, **kwargs)
         self.filename = filename
         self._offset = offset
         self._paranoid = paranoia  # TODO: Factor this behavior out in tests
@@ -72,7 +72,7 @@
         self.LeftEdge  = LeftEdge.copy()
         self.RightEdge = RightEdge.copy()
         self.index = index
-        self.level = level
+        self.Level = level
 
     def get_global_startindex(self):
         return self.start_index
@@ -115,8 +115,6 @@
     grid = CastroGrid
 
     def __init__(self, pf, data_style='castro_native'):
-        super(CastroHierarchy, self).__init__(self, pf, self.data_style)
-
         self.field_indexes = {}
         self.parameter_file = weakref.proxy(pf)
         header_filename = os.path.join(pf.fullplotdir, 'Header')
@@ -128,6 +126,8 @@
                                 self.parameter_file.paranoid_read) 
         self.read_particle_header()
         self._cache_endianness(self.levels[-1].grids[-1])
+
+        super(CastroHierarchy, self).__init__(pf, data_style)
         self._setup_data_io()
         self._setup_field_list()
         self._populate_hierarchy()
@@ -181,7 +181,7 @@
         counter += 1
 
         self.dx = np.zeros((self.n_levels, 3))
-        for i, line in enumerate(self.__global_header_lines[counter:counter+self.n_levels]):
+        for i, line in enumerate(self._global_header_lines[counter:counter+self.n_levels]):
             self.dx[i] = np.array(map(float, line.split()))
         counter += self.n_levels
         self.geometry = int(self._global_header_lines[counter])
@@ -424,21 +424,6 @@
         return self.grids[mask]
 
     def _setup_field_list(self):
-        self.derived_field_list = []
-
-        for field in self.field_info:
-            try:
-                fd = self.field_info[field].get_dependencies(pf=self.parameter_file)
-            except:
-                continue
-
-            available = np.all([f in self.field_list for f in fd.requested])
-            if available: self.derived_field_list.append(field)
-
-        for field in self.field_list:
-            if field not in self.derived_field_list:
-                self.derived_field_list.append(field)
-
         if self.parameter_file.use_particles:
             # We know which particle fields will exist -- pending further
             # changes in the future.
@@ -521,15 +506,15 @@
          * ASCII (not implemented in yt)
 
         """
-        super(CastroStaticOutput, self).__init__(self, plotname.rstrip("/"),
-                                                 data_style='castro_native')
         self.storage_filename = storage_filename
         self.paranoid_read = paranoia
         self.parameter_filename = paramFilename
         self.fparameter_filename = fparamFilename
         self.__ipfn = paramFilename
+        self.fparameters = {}
+        super(CastroStaticOutput, self).__init__(plotname.rstrip("/"),
+                                                 data_style='castro_native')
 
-        self.fparameters = {}
 
         # These should maybe not be hardcoded?
         ### TODO: this.
@@ -618,6 +603,7 @@
                 self.domain_left_edge = np.array([float(i) for i in vals.split()])
             elif param.startswith("particles.write_in_plotfile"):
                 self.use_particles = boxlib_bool_to_int(vals)
+            self.fparameters[param] = vals
 
         self.parameters["TopGridRank"] = len(self.parameters["TopGridDimensions"])
         self.dimensionality = self.parameters["TopGridRank"]
@@ -655,8 +641,11 @@
         for line in lines:
             if line.count("=") == 1:
                 param, vals = map(strip, map(rstrip, line.split("=")))
-                if vals.count("'") == 0:
-                    t = map(float, [a.replace('D','e').replace('d','e') for a in vals.split()]) # all are floating point.
+                if vals.count("'") == 0 and vals.count("\"") == 0:
+                    try:
+                        t = map(float, [a.replace('D','e').replace('d','e') for a in vals.split()]) # all are floating point.
+                    except ValueError:
+                        print "Failed on line", line
                 else:
                     t = vals.split()
                 if len(t) == 1:

diff -r 8da80188ed6862e0bf9e04dab41b726bb7df8b97 -r c5ca570658c61817278dcd5047e1f532c1043ac2 yt/frontends/castro/io.py
--- a/yt/frontends/castro/io.py
+++ b/yt/frontends/castro/io.py
@@ -100,10 +100,10 @@
             if (gridSize != grid.ActiveDimensions).any():
                 print "Paranoia Error: Cell_H and %s do not agree on grid dimensions." %grid.filename
                 error_count += 1
-            if bytesPerReal != grid.hierarchy._bytesPerReal:
+            if bytesPerReal != grid.hierarchy._bytes_per_real:
                 print "Paranoia Error: Cell_H and %s do not agree on bytes per real number." %grid.filename
                 error_count += 1
-            if (bytesPerReal == grid.hierarchy._bytesPerReal and dtype != grid.hierarchy._dtype):
+            if (bytesPerReal == grid.hierarchy._bytes_per_real and dtype != grid.hierarchy._dtype):
                 print "Paranoia Error: Cell_H and %s do not agree on endianness." %grid.filename
                 error_count += 1
 
@@ -114,7 +114,7 @@
             start = grid.start_index
             stop = grid.stop_index
             dtype = grid.hierarchy._dtype
-            bytesPerReal = grid.hierarchy._bytesPerReal
+            bytesPerReal = grid.hierarchy._bytes_per_real
 
         nElements = grid.ActiveDimensions.prod()
 


https://bitbucket.org/yt_analysis/yt-3.0/commits/8e9bfe3b8eb3/
Changeset:   8e9bfe3b8eb3
Branch:      yt
User:        sskory
Date:        2013-05-02 22:24:27
Summary:     This fixes the redshift output for Rockstar Halos.
Affected #:  1 file

diff -r c5ca570658c61817278dcd5047e1f532c1043ac2 -r 8e9bfe3b8eb3fa20c3bc0d2ac6f6274e82b4ea99 yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -1367,6 +1367,7 @@
         self._groups = []
         self._max_dens = -1
         self.pf = pf
+        self.redshift = pf.current_redshift
         self.out_list = out_list
         self._data_source = pf.h.all_data()
         mylog.info("Parsing Rockstar halo list")


https://bitbucket.org/yt_analysis/yt-3.0/commits/9586a3112cd4/
Changeset:   9586a3112cd4
Branch:      yt
User:        atmyers
Date:        2013-05-07 02:04:01
Summary:     make the pluto iohandler something different than the chombo one
Affected #:  2 files

diff -r 34b95297062b9f6dedf50d8a127e94ba1ec8e278 -r 9586a3112cd440027b1f353c59e95dd638ea5676 yt/frontends/pluto/api.py
--- a/yt/frontends/pluto/api.py
+++ b/yt/frontends/pluto/api.py
@@ -38,4 +38,4 @@
       add_pluto_field
 
 from .io import \
-      IOHandlerChomboHDF5
+      IOHandlerPlutoHDF5

diff -r 34b95297062b9f6dedf50d8a127e94ba1ec8e278 -r 9586a3112cd440027b1f353c59e95dd638ea5676 yt/frontends/pluto/io.py
--- a/yt/frontends/pluto/io.py
+++ b/yt/frontends/pluto/io.py
@@ -31,8 +31,8 @@
 from yt.utilities.io_handler import \
            BaseIOHandler
 
-class IOHandlerChomboHDF5(BaseIOHandler):
-    _data_style = "chombo_hdf5"
+class IOHandlerPlutoHDF5(BaseIOHandler):
+    _data_style = "pluto_hdf5"
     _offset_string = 'data:offsets=0'
     _data_string = 'data:datatype=0'
 


https://bitbucket.org/yt_analysis/yt-3.0/commits/a5a04b9207b1/
Changeset:   a5a04b9207b1
Branch:      yt
User:        atmyers
Date:        2013-05-07 02:07:17
Summary:     Merged yt_analysis/yt into yt
Affected #:  36 files

diff -r 9586a3112cd440027b1f353c59e95dd638ea5676 -r a5a04b9207b187eb09478969630d01275da603f1 .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -4,7 +4,9 @@
 freetype.cfg
 hdf5.cfg
 png.cfg
+rockstar.cfg
 yt_updater.log
+yt/analysis_modules/halo_finding/rockstar/rockstar_interface.c
 yt/frontends/ramses/_ramses_reader.cpp
 yt/utilities/amr_utils.c
 yt/utilities/kdtree/forthonf2c.h

diff -r 9586a3112cd440027b1f353c59e95dd638ea5676 -r a5a04b9207b187eb09478969630d01275da603f1 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -34,7 +34,7 @@
 
 INST_HG=1       # Install Mercurial or not?  If hg is not already
                 # installed, yt cannot be installed.
-INST_ZLIB=1     # On some systems (Kraken) matplotlib has issues with 
+INST_ZLIB=1     # On some systems (Kraken) matplotlib has issues with
                 # the system zlib, which is compiled statically.
                 # If need be, you can turn this off.
 INST_BZLIB=1    # On some systems, libbzip2 is missing.  This can
@@ -76,7 +76,7 @@
    echo "the script to re-enable root-level installation.  Sorry!"
    exit 1
 fi
-if [[ ${DEST_DIR%/} == /usr/local ]] 
+if [[ ${DEST_DIR%/} == /usr/local ]]
 then
    echo "******************************************************"
    echo "*                                                    *"
@@ -170,6 +170,19 @@
         echo "   $ module load gcc"
         echo
     fi
+    if [ "${MYHOST##midway}" != "${MYHOST}" ]
+    then
+        echo "Looks like you're on Midway."
+        echo
+        echo " ******************************************"
+        echo " * It may be better to use the yt module! *"
+        echo " *                                        *"
+        echo " *   $ module load yt                     *"
+        echo " *                                        *"
+        echo " ******************************************"
+        echo
+        return
+    fi
     if [ "${MYOS##Darwin}" != "${MYOS}" ]
     then
         echo "Looks like you're running on Mac OSX."
@@ -181,7 +194,7 @@
 	echo "must register for an account on the apple developer tools"
 	echo "website: https://developer.apple.com/downloads to obtain the"
 	echo "download link."
-	echo 
+	echo
 	echo "We have gathered some additional instructions for each"
 	echo "version of OS X below. If you have trouble installing yt"
 	echo "after following these instructions, don't hesitate to contact"
@@ -192,15 +205,15 @@
 	echo "menu bar.  We're assuming that you've installed all operating"
 	echo "system updates; if you have an older version, we suggest"
 	echo "running software update and installing all available updates."
-	echo 
-        echo "OS X 10.5.8: search for and download Xcode 3.1.4 from the" 
+	echo
+        echo "OS X 10.5.8: search for and download Xcode 3.1.4 from the"
 	echo "Apple developer tools website."
         echo
         echo "OS X 10.6.8: search for and download Xcode 3.2 from the Apple"
 	echo "developer tools website.  You can either download the"
 	echo "Xcode 3.2.2 Developer Tools package (744 MB) and then use"
-	echo "Software Update to update to XCode 3.2.6 or" 
-	echo "alternatively, you can download the Xcode 3.2.6/iOS SDK" 
+	echo "Software Update to update to XCode 3.2.6 or"
+	echo "alternatively, you can download the Xcode 3.2.6/iOS SDK"
 	echo "bundle (4.1 GB)."
         echo
         echo "OS X 10.7.5: download Xcode 4.2 from the mac app store"
@@ -208,20 +221,20 @@
         echo "Alternatively, download the Xcode command line tools from"
         echo "the Apple developer tools website."
         echo
-	echo "OS X 10.8.2: download Xcode 4.6 from the mac app store."
+	echo "OS X 10.8.2: download Xcode 4.6.1 from the mac app store."
 	echo "(search for Xcode)."
 	echo "Additionally, you will have to manually install the Xcode"
-	echo "command line tools, see:" 
+	echo "command line tools, see:"
 	echo "http://stackoverflow.com/questions/9353444"
 	echo "Alternatively, download the Xcode command line tools from"
 	echo "the Apple developer tools website."
 	echo
-        echo "NOTE: It's possible that the installation will fail, if so," 
-	echo "please set the following environment variables, remove any" 
+        echo "NOTE: It's possible that the installation will fail, if so,"
+	echo "please set the following environment variables, remove any"
 	echo "broken installation tree, and re-run this script verbatim."
         echo
-        echo "$ export CC=gcc-4.2"
-        echo "$ export CXX=g++-4.2"
+        echo "$ export CC=gcc"
+        echo "$ export CXX=g++"
 	echo
         OSX_VERSION=`sw_vers -productVersion`
         if [ "${OSX_VERSION##10.8}" != "${OSX_VERSION}" ]
@@ -278,7 +291,7 @@
         echo
         echo " INST_ZLIB=0"
         echo " INST_FTYPE=0"
-        echo 
+        echo
         echo " to avoid conflicts with other command-line programs "
         echo " (like eog and evince, for example)."
     fi
@@ -424,7 +437,7 @@
     cd ..
 }
 
-if type -P wget &>/dev/null 
+if type -P wget &>/dev/null
 then
     echo "Using wget"
     export GETFILE="wget -nv"
@@ -486,28 +499,27 @@
 cd ${DEST_DIR}/src
 
 # Now we dump all our SHA512 files out.
-
-echo 'eda1b8090e5e21e7e039ef4dd03de186a7b416df9d5a4e4422abeeb4d51383b9a6858e1ac4902d8e5010f661b295bbb2452c43c8738be668379b4eb4835d0f61  Cython-0.17.1.tar.gz' > Cython-0.17.1.tar.gz.sha512
-echo '44eea803870a66ff0bab08d13a8b3388b5578ebc1c807d1d9dca0a93e6371e91b15d02917a00b3b20dc67abb5a21dabaf9b6e9257a561f85eeff2147ac73b478  PyX-0.11.1.tar.gz' > PyX-0.11.1.tar.gz.sha512
-echo 'b981f8464575bb24c297631c87a3b9172312804a0fc14ce1fa7cb41ce2b0d2fd383cd1c816d6e10c36467d18bf9492d6faf557c81c04ff3b22debfa93f30ad0b  Python-2.7.3.tgz' > Python-2.7.3.tgz.sha512
-echo 'c017d3d59dd324ac91af0edc178c76b60a5f90fbb775cf843e39062f95bd846238f2c53705f8890ed3f34bc0e6e75671a73d13875eb0287d6201cb45f0a2d338  bzip2-1.0.5.tar.gz' > bzip2-1.0.5.tar.gz.sha512
+echo 'fb85d71bb4f80b35f0d0f1735c650dd75c5f84b05635ddf91d6241ff103b5a49158c5b851a20c15e05425f6dde32a4971b35fcbd7445f61865b4d61ffd1fbfa1  Cython-0.18.tar.gz' > Cython-0.18.tar.gz.sha512
+echo '4941f5aa21aff3743546495fb073c10d2657ff42b2aff401903498638093d0e31e344cce778980f28a7170c6d29eab72ac074277b9d4088376e8692dc71e55c1  PyX-0.12.1.tar.gz' > PyX-0.12.1.tar.gz.sha512
+echo '3349152c47ed2b63c5c9aabcfa92b8497ea9d71ca551fd721e827fcb8f91ff9fbbee6bba8f8cb2dea185701b8798878b4b2435c1496b63d4b4a37c624a625299  Python-2.7.4.tgz' > Python-2.7.4.tgz.sha512
+echo '00ace5438cfa0c577e5f578d8a808613187eff5217c35164ffe044fbafdfec9e98f4192c02a7d67e01e5a5ccced630583ad1003c37697219b0f147343a3fdd12  bzip2-1.0.6.tar.gz' > bzip2-1.0.6.tar.gz.sha512
 echo 'a296dfcaef7e853e58eed4e24b37c4fa29cfc6ac688def048480f4bb384b9e37ca447faf96eec7b378fd764ba291713f03ac464581d62275e28eb2ec99110ab6  reason-js-20120623.zip' > reason-js-20120623.zip.sha512
-echo 'b519218f93946400326e9b656669269ecb3e5232b944e18fbc3eadc4fe2b56244d68aae56d6f69042b4c87c58c881ee2aaa279561ea0f0f48d5842155f4de9de  freetype-2.4.4.tar.gz' > freetype-2.4.4.tar.gz.sha512
-echo 'b3290c498191684781ca5286ab454eb1bd045e8d894f5b86fb86beb88f174e22ac3ab008fb02d6562051d9fa6a9593920cab433223f6d5473999913223b8e183  h5py-2.1.0.tar.gz' > h5py-2.1.0.tar.gz.sha512
+echo 'b46c93d76f8ce09c94765b20b2eeadf71207671f1131777de178b3727c235b4dd77f6e60d62442b96648c3c6749e9e4c1194c1b02af7e946576be09e1ff7ada3  freetype-2.4.11.tar.gz' > freetype-2.4.11.tar.gz.sha512
+echo '15ca0209e8d8f172cb0708a2de946fbbde8551d9bebc4a95fa7ae31558457a7f43249d5289d7675490c577deb4e0153698fd2407644078bf30bd5ab10135fce3  h5py-2.1.2.tar.gz' > h5py-2.1.2.tar.gz.sha512
 echo 'c68a425bacaa7441037910b9166f25b89e1387776a7749a5350793f89b1690350df5f018060c31d03686e7c3ed2aa848bd2b945c96350dc3b6322e087934783a  hdf5-1.8.9.tar.gz' > hdf5-1.8.9.tar.gz.sha512
-echo 'dbefad00fa34f4f21dca0f1e92e95bd55f1f4478fa0095dcf015b4d06f0c823ff11755cd777e507efaf1c9098b74af18f613ec9000e5c3a5cc1c7554fb5aefb8  libpng-1.5.12.tar.gz' > libpng-1.5.12.tar.gz.sha512
-echo '5b1a0fb52dcb21ca5f0ab71c8a49550e1e8cf633552ec6598dc43f0b32c03422bf5af65b30118c163231ecdddfd40846909336f16da318959106076e80a3fad0  matplotlib-1.2.0.tar.gz' > matplotlib-1.2.0.tar.gz.sha512
-echo '91693ca5f34934956a7c2c98bb69a5648b2a5660afd2ecf4a05035c5420450d42c194eeef0606d7683e267e4eaaaab414df23f30b34c88219bdd5c1a0f1f66ed  mercurial-2.5.1.tar.gz' > mercurial-2.5.1.tar.gz.sha512
-echo 'de3dd37f753614055dcfed910e9886e03688b8078492df3da94b1ec37be796030be93291cba09e8212fffd3e0a63b086902c3c25a996cf1439e15c5b16e014d9  numpy-1.6.1.tar.gz' > numpy-1.6.1.tar.gz.sha512
-echo '5ad681f99e75849a5ca6f439c7a19bb51abc73d121b50f4f8e4c0da42891950f30407f761a53f0fe51b370b1dbd4c4f5a480557cb2444c8c7c7d5412b328a474  sqlite-autoconf-3070500.tar.gz' > sqlite-autoconf-3070500.tar.gz.sha512
-echo 'edae735960279d92acf58e1f4095c6392a7c2059b8f1d2c46648fc608a0fb06b392db2d073f4973f5762c034ea66596e769b95b3d26ad963a086b9b2d09825f2  zlib-1.2.3.tar.bz2' > zlib-1.2.3.tar.bz2.sha512
+echo 'b2b53ed358bacab9e8d63a51f17bd5f121ece60a1d7c53e8a8eb08ad8b1e4393a8d7a86eec06e2efc62348114f0d84c0a3dfc805e68e6edd93b20401962b3554  libpng-1.6.1.tar.gz' > libpng-1.6.1.tar.gz.sha512
+echo '497f91725eaf361bdb9bdf38db2bff5068a77038f1536df193db64c9b887e3b0d967486daee722eda6e2c4e60f034eee030673e53d07bf0db0f3f7c0ef3bd208  matplotlib-1.2.1.tar.gz' > matplotlib-1.2.1.tar.gz.sha512
+echo '928fdeaaf0eaec80adbd8765521de9666ab56aaa2101fb9ab2cb392d8b29475d3b052d89652ff9b67522cfcc6cd958717ac715f51b0573ee088e9a595f29afe2  mercurial-2.5.4.tar.gz' > mercurial-2.5.4.tar.gz.sha512
+echo 'a485daa556f6c76003de1dbb3e42b3daeee0a320c69c81b31a7d2ebbc2cf8ab8e96c214a4758e5e7bf814295dc1d6aa563092b714db7e719678d8462135861a8  numpy-1.7.0.tar.gz' > numpy-1.7.0.tar.gz.sha512
+echo '293d78d14a9347cb83e1a644e5f3e4447ed6fc21642c51683e5495dda08d2312194a73d1fc3c1d78287e33ed065aa251ecbaa7c0ea9189456c1702e96d78becd  sqlite-autoconf-3071601.tar.gz' > sqlite-autoconf-3071601.tar.gz.sha512
+echo 'b1c073ad26684e354f7c522c14655840592e03872bc0a94690f89cae2ff88f146fce1dad252ff27a889dac4a32ff9f8ab63ba940671f9da89e9ba3e19f1bf58d  zlib-1.2.7.tar.gz' > zlib-1.2.7.tar.gz.sha512
 echo '05ac335727a2c3036f31a2506fdd2615aa436bfbe2f81799fe6c51bffe2591ad6a8427f3b25c34e7e709fb4e7607a0589dc7a22185c1f9b894e90de6711a88aa  ipython-0.13.1.tar.gz' > ipython-0.13.1.tar.gz.sha512
-echo 'fb3cf421b2dc48c31956b3e3ee4ab6ebc743deec3bf626c2238a1996c8c51be87260bd6aa662793a1f0c34dcda9b3146763777bb162dfad6fec4ca7acc403b2e  zeromq-2.2.0.tar.gz' > zeromq-2.2.0.tar.gz.sha512
-echo 'd761b492352841cdc125d9f0c99ee6d6c435812472ea234728b7f0fb4ad1048e1eec9b399df2081fbc926566f333f7780fedd0ce23255a6633fe5c60ed15a6af  pyzmq-2.1.11.tar.gz' > pyzmq-2.1.11.tar.gz.sha512
-echo '57fa5e57dfb98154a42d2d477f29401c2260ae7ad3a8128a4098b42ee3b35c54367b1a3254bc76b9b3b14b4aab7c3e1135858f68abc5636daedf2f01f9b8a3cf  tornado-2.2.tar.gz' > tornado-2.2.tar.gz.sha512
-echo '1332e3d5465ca249c357314cf15d2a4e5e83a941841021b8f6a17a107dce268a7a082838ade5e8db944ecde6bfb111211ab218aa414ee90aafbb81f1491b3b93  Forthon-0.8.10.tar.gz' > Forthon-0.8.10.tar.gz.sha512
+echo 'b9d061ca49e54ea917e0aed2b2a48faef33061dbf6d17eae7f8c3fff0b35ca883e7324f6cb24bda542443f669dcd5748037a5f2309f4c359d68adef520894865  zeromq-3.2.2.tar.gz' > zeromq-3.2.2.tar.gz.sha512
+echo '852fce8a8308c4e1e4b19c77add2b2055ca2ba570b28e8364888df490af92b860c72e860adfb075b3405a9ceb62f343889f20a8711c9353a7d9059adee910f83  pyzmq-13.0.2.tar.gz' > pyzmq-13.0.2.tar.gz.sha512
+echo '303bd3fbea22be57fddf7df78ddf5a783d355a0c8071b1363250daafc20232ddd28eedc44aa1194f4a7afd82f9396628c5bb06819e02b065b6a1b1ae8a7c19e1  tornado-3.0.tar.gz' > tornado-3.0.tar.gz.sha512
+echo '3f53d0b474bfd79fea2536d0a9197eaef6c0927e95f2f9fd52dbd6c1d46409d0e649c21ac418d8f7767a9f10fe6114b516e06f2be4b06aec3ab5bdebc8768220  Forthon-0.8.11.tar.gz' > Forthon-0.8.11.tar.gz.sha512
 echo 'c13116c1f0547000cc565e15774687b9e884f8b74fb62a84e578408a868a84961704839065ae4f21b662e87f2aaedf6ea424ea58dfa9d3d73c06281f806d15dd  nose-1.2.1.tar.gz' > nose-1.2.1.tar.gz.sha512
-echo '73de2c99406a38f85273931597525cec4ebef55b93712adca3b0bfea8ca3fc99446e5d6495817e9ad55cf4d48feb7fb49734675c4cc8938db8d4a5225d30eca7  python-hglib-0.2.tar.gz' > python-hglib-0.2.tar.gz.sha512
+echo 'd67de9567256e6f1649e4f3f7dfee63371d5f00fd3fd4f92426198f862e97c57f70e827d19f4e5e1929ad85ef2ce7aa5a0596b101cafdac71672e97dc115b397  python-hglib-0.3.tar.gz' > python-hglib-0.3.tar.gz.sha512
 echo 'ffc602eb346717286b3d0a6770c60b03b578b3cf70ebd12f9e8b1c8c39cdb12ef219ddaa041d7929351a6b02dbb8caf1821b5452d95aae95034cbf4bc9904a7a  sympy-0.7.2.tar.gz' > sympy-0.7.2.tar.gz.sha512
 echo '172f2bc671145ebb0add2669c117863db35851fb3bdb192006cd710d4d038e0037497eb39a6d01091cb923f71a7e8982a77b6e80bf71d6275d5d83a363c8d7e5  rockstar-0.99.6.tar.gz' > rockstar-0.99.6.tar.gz.sha512
 echo 'd4fdd62f2db5285cd133649bd1bfa5175cb9da8304323abd74e0ef1207d55e6152f0f944da1da75f73e9dafb0f3bb14efba3c0526c732c348a653e0bd223ccfa  scipy-0.11.0.tar.gz' > scipy-0.11.0.tar.gz.sha512
@@ -515,50 +527,50 @@
 echo '8770214491e31f0a7a3efaade90eee7b0eb20a8a6ab635c5f854d78263f59a1849133c14ef5123d01023f0110cbb9fc6f818da053c01277914ae81473430a952  lapack-3.4.2.tar.gz' > lapack-3.4.2.tar.gz.sha512
 # Individual processes
 [ -z "$HDF5_DIR" ] && get_ytproject hdf5-1.8.9.tar.gz
-[ $INST_ZLIB -eq 1 ] && get_ytproject zlib-1.2.3.tar.bz2 
-[ $INST_BZLIB -eq 1 ] && get_ytproject bzip2-1.0.5.tar.gz
-[ $INST_PNG -eq 1 ] && get_ytproject libpng-1.5.12.tar.gz
-[ $INST_FTYPE -eq 1 ] && get_ytproject freetype-2.4.4.tar.gz
-[ $INST_SQLITE3 -eq 1 ] && get_ytproject sqlite-autoconf-3070500.tar.gz
-[ $INST_PYX -eq 1 ] && get_ytproject PyX-0.11.1.tar.gz
-[ $INST_0MQ -eq 1 ] && get_ytproject zeromq-2.2.0.tar.gz
-[ $INST_0MQ -eq 1 ] && get_ytproject pyzmq-2.1.11.tar.gz
-[ $INST_0MQ -eq 1 ] && get_ytproject tornado-2.2.tar.gz
+[ $INST_ZLIB -eq 1 ] && get_ytproject zlib-1.2.7.tar.gz
+[ $INST_BZLIB -eq 1 ] && get_ytproject bzip2-1.0.6.tar.gz
+[ $INST_PNG -eq 1 ] && get_ytproject libpng-1.6.1.tar.gz
+[ $INST_FTYPE -eq 1 ] && get_ytproject freetype-2.4.11.tar.gz
+[ $INST_SQLITE3 -eq 1 ] && get_ytproject sqlite-autoconf-3071601.tar.gz
+[ $INST_PYX -eq 1 ] && get_ytproject PyX-0.12.1.tar.gz
+[ $INST_0MQ -eq 1 ] && get_ytproject zeromq-3.2.2.tar.gz
+[ $INST_0MQ -eq 1 ] && get_ytproject pyzmq-13.0.2.tar.gz
+[ $INST_0MQ -eq 1 ] && get_ytproject tornado-3.0.tar.gz
 [ $INST_SCIPY -eq 1 ] && get_ytproject scipy-0.11.0.tar.gz
 [ $INST_SCIPY -eq 1 ] && get_ytproject blas.tar.gz
 [ $INST_SCIPY -eq 1 ] && get_ytproject lapack-3.4.2.tar.gz
-get_ytproject Python-2.7.3.tgz
-get_ytproject numpy-1.6.1.tar.gz
-get_ytproject matplotlib-1.2.0.tar.gz
-get_ytproject mercurial-2.5.1.tar.gz
+get_ytproject Python-2.7.4.tgz
+get_ytproject numpy-1.7.0.tar.gz
+get_ytproject matplotlib-1.2.1.tar.gz
+get_ytproject mercurial-2.5.4.tar.gz
 get_ytproject ipython-0.13.1.tar.gz
-get_ytproject h5py-2.1.0.tar.gz
-get_ytproject Cython-0.17.1.tar.gz
+get_ytproject h5py-2.1.2.tar.gz
+get_ytproject Cython-0.18.tar.gz
 get_ytproject reason-js-20120623.zip
-get_ytproject Forthon-0.8.10.tar.gz
-get_ytproject nose-1.2.1.tar.gz 
-get_ytproject python-hglib-0.2.tar.gz
+get_ytproject Forthon-0.8.11.tar.gz
+get_ytproject nose-1.2.1.tar.gz
+get_ytproject python-hglib-0.3.tar.gz
 get_ytproject sympy-0.7.2.tar.gz
 get_ytproject rockstar-0.99.6.tar.gz
 if [ $INST_BZLIB -eq 1 ]
 then
-    if [ ! -e bzip2-1.0.5/done ]
+    if [ ! -e bzip2-1.0.6/done ]
     then
-        [ ! -e bzip2-1.0.5 ] && tar xfz bzip2-1.0.5.tar.gz
+        [ ! -e bzip2-1.0.6 ] && tar xfz bzip2-1.0.6.tar.gz
         echo "Installing BZLIB"
-        cd bzip2-1.0.5
-        if [ `uname` = "Darwin" ] 
+        cd bzip2-1.0.6
+        if [ `uname` = "Darwin" ]
         then
-            if [ -z "${CC}" ] 
+            if [ -z "${CC}" ]
             then
                 sed -i.bak 's/soname/install_name/' Makefile-libbz2_so
             else
-                sed -i.bak -e 's/soname/install_name/' -e "s/CC=gcc/CC=${CC}/" Makefile-libbz2_so 
+                sed -i.bak -e 's/soname/install_name/' -e "s/CC=gcc/CC=${CC}/" Makefile-libbz2_so
             fi
         fi
         ( make install CFLAGS=-fPIC LDFLAGS=-fPIC PREFIX=${DEST_DIR} 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make -f Makefile-libbz2_so CFLAGS=-fPIC LDFLAGS=-fPIC PREFIX=${DEST_DIR} 2>&1 ) 1>> ${LOG_FILE} || do_exit
-        ( cp -v libbz2.so.1.0.4 ${DEST_DIR}/lib 2>&1 ) 1>> ${LOG_FILE} || do_exit
+        ( cp -v libbz2.so.1.0.6 ${DEST_DIR}/lib 2>&1 ) 1>> ${LOG_FILE} || do_exit
         touch done
         cd ..
     fi
@@ -569,11 +581,11 @@
 
 if [ $INST_ZLIB -eq 1 ]
 then
-    if [ ! -e zlib-1.2.3/done ]
+    if [ ! -e zlib-1.2.7/done ]
     then
-        [ ! -e zlib-1.2.3 ] && tar xfj zlib-1.2.3.tar.bz2
+        [ ! -e zlib-1.2.7 ] && tar xfz zlib-1.2.7.tar.gz
         echo "Installing ZLIB"
-        cd zlib-1.2.3
+        cd zlib-1.2.7
         ( ./configure --shared --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -587,11 +599,11 @@
 
 if [ $INST_PNG -eq 1 ]
 then
-    if [ ! -e libpng-1.5.12/done ]
+    if [ ! -e libpng-1.6.1/done ]
     then
-        [ ! -e libpng-1.5.12 ] && tar xfz libpng-1.5.12.tar.gz
+        [ ! -e libpng-1.6.1 ] && tar xfz libpng-1.6.1.tar.gz
         echo "Installing PNG"
-        cd libpng-1.5.12
+        cd libpng-1.6.1
         ( ./configure CPPFLAGS=-I${DEST_DIR}/include CFLAGS=-I${DEST_DIR}/include --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -605,11 +617,11 @@
 
 if [ $INST_FTYPE -eq 1 ]
 then
-    if [ ! -e freetype-2.4.4/done ]
+    if [ ! -e freetype-2.4.11/done ]
     then
-        [ ! -e freetype-2.4.4 ] && tar xfz freetype-2.4.4.tar.gz
+        [ ! -e freetype-2.4.11 ] && tar xfz freetype-2.4.11.tar.gz
         echo "Installing FreeType2"
-        cd freetype-2.4.4
+        cd freetype-2.4.11
         ( ./configure CFLAGS=-I${DEST_DIR}/include --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -642,11 +654,11 @@
 
 if [ $INST_SQLITE3 -eq 1 ]
 then
-    if [ ! -e sqlite-autoconf-3070500/done ]
+    if [ ! -e sqlite-autoconf-3071601/done ]
     then
-        [ ! -e sqlite-autoconf-3070500 ] && tar xfz sqlite-autoconf-3070500.tar.gz
+        [ ! -e sqlite-autoconf-3071601 ] && tar xfz sqlite-autoconf-3071601.tar.gz
         echo "Installing SQLite3"
-        cd sqlite-autoconf-3070500
+        cd sqlite-autoconf-3071601
         ( ./configure --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make ${MAKE_PROCS} install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -655,11 +667,11 @@
     fi
 fi
 
-if [ ! -e Python-2.7.3/done ]
+if [ ! -e Python-2.7.4/done ]
 then
     echo "Installing Python.  This may take a while, but don't worry.  yt loves you."
-    [ ! -e Python-2.7.3 ] && tar xfz Python-2.7.3.tgz
-    cd Python-2.7.3
+    [ ! -e Python-2.7.4 ] && tar xfz Python-2.7.4.tgz
+    cd Python-2.7.4
     ( ./configure --prefix=${DEST_DIR}/ ${PYCONF_ARGS} 2>&1 ) 1>> ${LOG_FILE} || do_exit
 
     ( make ${MAKE_PROCS} 2>&1 ) 1>> ${LOG_FILE} || do_exit
@@ -674,12 +686,11 @@
 
 if [ $INST_HG -eq 1 ]
 then
-    echo "Installing Mercurial."
-    do_setup_py mercurial-2.5.1
+    do_setup_py mercurial-2.5.4
     export HG_EXEC=${DEST_DIR}/bin/hg
 else
     # We assume that hg can be found in the path.
-    if type -P hg &>/dev/null 
+    if type -P hg &>/dev/null
     then
         export HG_EXEC=hg
     else
@@ -696,7 +707,7 @@
     elif [ -e $ORIG_PWD/../yt/mods.py ]
     then
         YT_DIR=`dirname $ORIG_PWD`
-    elif [ ! -e yt-hg ] 
+    elif [ ! -e yt-hg ]
     then
         YT_DIR="$PWD/yt-hg/"
         ( ${HG_EXEC} --debug clone https://bitbucket.org/yt_analysis/yt-supplemental/ 2>&1 ) 1>> ${LOG_FILE}
@@ -706,7 +717,7 @@
         ( ${HG_EXEC} --debug clone https://bitbucket.org/yt_analysis/yt/ ./yt-hg 2>&1 ) 1>> ${LOG_FILE}
         # Now we update to the branch we're interested in.
         ( ${HG_EXEC} -R ${YT_DIR} up -C ${BRANCH} 2>&1 ) 1>> ${LOG_FILE}
-    elif [ -e yt-hg ] 
+    elif [ -e yt-hg ]
     then
         YT_DIR="$PWD/yt-hg/"
     fi
@@ -714,7 +725,7 @@
 fi
 
 # This fixes problems with gfortran linking.
-unset LDFLAGS 
+unset LDFLAGS
 
 echo "Installing distribute"
 ( ${DEST_DIR}/bin/python2.7 ${YT_DIR}/distribute_setup.py 2>&1 ) 1>> ${LOG_FILE} || do_exit
@@ -724,7 +735,7 @@
 
 if [ $INST_SCIPY -eq 0 ]
 then
-    do_setup_py numpy-1.6.1 ${NUMPY_ARGS}
+    do_setup_py numpy-1.7.0 ${NUMPY_ARGS}
 else
     if [ ! -e scipy-0.11.0/done ]
     then
@@ -752,8 +763,8 @@
 	fi
     fi
     export BLAS=$PWD/BLAS/libfblas.a
-    export LAPACK=$PWD/lapack-3.4.2/liblapack.a    
-    do_setup_py numpy-1.6.1 ${NUMPY_ARGS}
+    export LAPACK=$PWD/lapack-3.4.2/liblapack.a
+    do_setup_py numpy-1.7.0 ${NUMPY_ARGS}
     do_setup_py scipy-0.11.0 ${NUMPY_ARGS}
 fi
 
@@ -776,10 +787,10 @@
     echo "Setting CFLAGS ${CFLAGS}"
 fi
 # Now we set up the basedir for matplotlib:
-mkdir -p ${DEST_DIR}/src/matplotlib-1.2.0
-echo "[directories]" >> ${DEST_DIR}/src/matplotlib-1.2.0/setup.cfg
-echo "basedirlist = ${DEST_DIR}" >> ${DEST_DIR}/src/matplotlib-1.2.0/setup.cfg
-do_setup_py matplotlib-1.2.0
+mkdir -p ${DEST_DIR}/src/matplotlib-1.2.1
+echo "[directories]" >> ${DEST_DIR}/src/matplotlib-1.2.1/setup.cfg
+echo "basedirlist = ${DEST_DIR}" >> ${DEST_DIR}/src/matplotlib-1.2.1/setup.cfg
+do_setup_py matplotlib-1.2.1
 if [ -n "${OLD_LDFLAGS}" ]
 then
     export LDFLAG=${OLD_LDFLAGS}
@@ -791,29 +802,29 @@
 # Now we do our IPython installation, which has two optional dependencies.
 if [ $INST_0MQ -eq 1 ]
 then
-    if [ ! -e zeromq-2.2.0/done ]
+    if [ ! -e zeromq-3.2.2/done ]
     then
-        [ ! -e zeromq-2.2.0 ] && tar xfz zeromq-2.2.0.tar.gz
+        [ ! -e zeromq-3.2.2 ] && tar xfz zeromq-3.2.2.tar.gz
         echo "Installing ZeroMQ"
-        cd zeromq-2.2.0
+        cd zeromq-3.2.2
         ( ./configure --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
         touch done
         cd ..
     fi
-    do_setup_py pyzmq-2.1.11 --zmq=${DEST_DIR}
-    do_setup_py tornado-2.2
+    do_setup_py pyzmq-13.0.2 --zmq=${DEST_DIR}
+    do_setup_py tornado-3.0
 fi
 
 do_setup_py ipython-0.13.1
-do_setup_py h5py-2.1.0
-do_setup_py Cython-0.17.1
-do_setup_py Forthon-0.8.10
+do_setup_py h5py-2.1.2
+do_setup_py Cython-0.18
+do_setup_py Forthon-0.8.11
 do_setup_py nose-1.2.1
-do_setup_py python-hglib-0.2
+do_setup_py python-hglib-0.3
 do_setup_py sympy-0.7.2
-[ $INST_PYX -eq 1 ] && do_setup_py PyX-0.11.1
+[ $INST_PYX -eq 1 ] && do_setup_py PyX-0.12.1
 
 # Now we build Rockstar and set its environment variable.
 if [ $INST_ROCKSTAR -eq 1 ]

diff -r 9586a3112cd440027b1f353c59e95dd638ea5676 -r a5a04b9207b187eb09478969630d01275da603f1 setup.py
--- a/setup.py
+++ b/setup.py
@@ -18,6 +18,9 @@
 from distutils.core import Command
 from distutils.spawn import find_executable
 
+def find_fortran_deps():
+    return (find_executable("Forthon"),
+            find_executable("gfortran"))
 
 class BuildForthon(Command):
 
@@ -41,9 +44,7 @@
     def run(self):
 
         """runner"""
-        Forthon_exe = find_executable("Forthon")
-        gfortran_exe = find_executable("gfortran")
-
+        (Forthon_exe, gfortran_exe) = find_fortran_deps()
         if None in (Forthon_exe, gfortran_exe):
             sys.stderr.write(
                 "fKDpy.so won't be built due to missing Forthon/gfortran\n"
@@ -154,7 +155,7 @@
 
 import setuptools
 
-VERSION = "2.5dev"
+VERSION = "2.6dev"
 
 if os.path.exists('MANIFEST'):
     os.remove('MANIFEST')
@@ -193,9 +194,13 @@
 
 class my_install_data(np_install_data.install_data):
     def run(self):
-        self.distribution.data_files.append(
-            ('yt/utilities/kdtree', ['yt/utilities/kdtree/fKDpy.so'])
-        )
+        (Forthon_exe, gfortran_exe) = find_fortran_deps()
+        if None in (Forthon_exe, gfortran_exe):
+            pass
+        else:
+            self.distribution.data_files.append(
+                ('yt/utilities/kdtree', ['yt/utilities/kdtree/fKDpy.so'])
+                )
         np_install_data.install_data.run(self)
 
 class my_build_py(build_py):

diff -r 9586a3112cd440027b1f353c59e95dd638ea5676 -r a5a04b9207b187eb09478969630d01275da603f1 yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -1367,6 +1367,7 @@
         self._groups = []
         self._max_dens = -1
         self.pf = pf
+        self.redshift = pf.current_redshift
         self.out_list = out_list
         self._data_source = pf.h.all_data()
         mylog.info("Parsing Rockstar halo list")

diff -r 9586a3112cd440027b1f353c59e95dd638ea5676 -r a5a04b9207b187eb09478969630d01275da603f1 yt/analysis_modules/halo_finding/rockstar/rockstar.py
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar.py
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar.py
@@ -238,6 +238,7 @@
         tpf = ts[0]
 
         def _particle_count(field, data):
+            if data.NumberOfParticles == 0: return 0
             try:
                 data["particle_type"]
                 has_particle_type=True

diff -r 9586a3112cd440027b1f353c59e95dd638ea5676 -r a5a04b9207b187eb09478969630d01275da603f1 yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -62,7 +62,7 @@
     notebook_password = '',
     answer_testing_tolerance = '3',
     answer_testing_bitwise = 'False',
-    gold_standard_filename = 'gold006',
+    gold_standard_filename = 'gold007',
     local_standard_filename = 'local001',
     sketchfab_api_key = 'None'
     )

diff -r 9586a3112cd440027b1f353c59e95dd638ea5676 -r a5a04b9207b187eb09478969630d01275da603f1 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -36,6 +36,8 @@
 import itertools
 import shelve
 import cStringIO
+import fileinput
+from re import finditer
 
 from yt.funcs import *
 from yt.config import ytcfg
@@ -3585,12 +3587,12 @@
         given the tilt about the x axis when e0 was aligned
         to x after t1, t2 rotations about z, y
         """
-        RX = get_rotation_matrix(-tilt, (1,0,0)).transpose()
-        RY = get_rotation_matrix(-t2,   (0,1,0)).transpose()
-        RZ = get_rotation_matrix(-t1,   (0,0,1)).transpose()
-        e1 = ((0, 1, 0) * RX).sum(axis = 1)
-        e1 = (e1 * RY).sum(axis = 1)
-        e1 = (e1 * RZ).sum(axis = 1)
+        RX = get_rotation_matrix(-tilt, (1, 0, 0)).transpose()
+        RY = get_rotation_matrix(-t2,   (0, 1, 0)).transpose()
+        RZ = get_rotation_matrix(-t1,   (0, 0, 1)).transpose()
+        e1 = ((0, 1, 0) * RX).sum(axis=1)
+        e1 = (e1 * RY).sum(axis=1)
+        e1 = (e1 * RZ).sum(axis=1)
         e2 = np.cross(e0, e1)
 
         self._e1 = e1
@@ -3610,87 +3612,64 @@
         can just use the sphere one and forget about checking orientation
         but feed in the A parameter for radius
         """
-    def _get_list_of_grids(self, field = None):
+    def _get_list_of_grids(self, field=None):
         """
         This returns the grids that are possibly within the ellipse
         """
-        grids,ind = self.hierarchy.find_sphere_grids(self.center, self._A)
+        grids, ind = self.hierarchy.find_sphere_grids(self.center, self._A)
         # Now we sort by level
         grids = grids.tolist()
-        grids.sort(key=lambda x: (x.Level, \
-                                  x.LeftEdge[0], \
-                                  x.LeftEdge[1], \
+        grids.sort(key=lambda x: (x.Level,
+                                  x.LeftEdge[0],
+                                  x.LeftEdge[1],
                                   x.LeftEdge[2]))
-        self._grids = np.array(grids, dtype = 'object')
+        self._grids = np.array(grids, dtype='object')
 
     def _is_fully_enclosed(self, grid):
         """
         check if all grid corners are inside the ellipsoid
         """
-        # vector from corner to center
-        vr = (grid._corners - self.center)
-        # 3 possible cases of locations taking periodic BC into account
-        # just listing the components, find smallest later
-        dotarr=np.array([vr, vr + self.DW, vr - self.DW])
-        # these vrdote# finds the product of vr components with e#
-        # square the results
-        # find the smallest
-        # sums it
-        vrdote0_2 = (np.multiply(dotarr, self._e0)**2).min(axis \
-                                                           = 0).sum(axis = 1)
-        vrdote1_2 = (np.multiply(dotarr, self._e1)**2).min(axis \
-                                                           = 0).sum(axis = 1)
-        vrdote2_2 = (np.multiply(dotarr, self._e2)**2).min(axis \
-                                                           = 0).sum(axis = 1)
-        return np.all(vrdote0_2 / self._A**2 + \
-                      vrdote1_2 / self._B**2 + \
-                      vrdote2_2 / self._C**2 <=1.0)
-
-    @restore_grid_state # Pains me not to decorate with cache_mask here
-    def _get_cut_mask(self, grid, field = None):
+        return False
+
+    @restore_grid_state  # Pains me not to decorate with cache_mask here
+    def _get_cut_mask(self, grid, field=None):
         """
         This checks if each cell is inside the ellipsoid
         """
         # We have the *property* center, which is not necessarily
         # the same as the field_parameter
         if self._is_fully_enclosed(grid):
-            return True # We do not want child masking here
+            return True  # We do not want child masking here
         if not isinstance(grid, (FakeGridForParticles, GridChildMaskWrapper)) \
            and grid.id in self._cut_masks:
             return self._cut_masks[grid.id]
-        Inside = np.zeros(grid["x"].shape, dtype = 'float64')
-        dim = grid["x"].shape
-        # need this to take into account non-cube root grid tiles
-        if (len(dim) == 1):
-            dot_evec = np.zeros([3, dim[0]])
-        elif (len(dim) == 2):
-            dot_evec = np.zeros([3, dim[0], dim[1]])
-        elif (len(dim) == 3):
-            dot_evec = np.zeros([3, dim[0], dim[1], dim[2]])
+
+        dot_evecx = np.zeros(grid.ActiveDimensions)
+        dot_evecy = np.zeros(grid.ActiveDimensions)
+        dot_evecz = np.zeros(grid.ActiveDimensions)
 
         for i, ax in enumerate('xyz'):
             # distance to center
-            ar  = grid[ax]-self.center[i]
-            # cases to take into account periodic BC
-            case = np.array([ar, ar + self.DW[i], ar - self.DW[i]])
-            # find which of the 3 cases is smallest in magnitude
-            index = np.abs(case).argmin(axis = 0)
-            # restrict distance to only the smallest cases
-            vec = np.choose(index, case)
+            ar = grid[ax]-self.center[i]
+            # correct for periodicity
+            vec = np.array([ar, ar + self.DW[i], ar - self.DW[i]])
+            ind = np.argmin(np.abs(vec), axis=0)
+            vec = np.choose(ind, vec)
             # sum up to get the dot product with e_vectors
-            dot_evec += np.array([vec * self._e0[i], \
-                                  vec * self._e1[i], \
-                                  vec * self._e2[i]])
+            dot_evecx += vec * self._e0[i] / self._A
+            dot_evecy += vec * self._e1[i] / self._B
+            dot_evecz += vec * self._e2[i] / self._C
+
         # Calculate the eqn of ellipsoid, if it is inside
         # then result should be <= 1.0
-        Inside = dot_evec[0]**2 / self._A**2 + \
-                 dot_evec[1]**2 / self._B**2 + \
-                 dot_evec[2]**2 / self._C**2
-        cm = ((Inside <= 1.0) & grid.child_mask)
+        cm = ((dot_evecx**2 +
+               dot_evecy**2 +
+               dot_evecz**2 <= 1.0) & grid.child_mask)
         if not isinstance(grid, (FakeGridForParticles, GridChildMaskWrapper)):
             self._cut_masks[grid.id] = cm
         return cm
 
+
 class AMRCoveringGridBase(AMR3DData):
     """A 3D region with all data extracted to a single, specified
     resolution.
@@ -4374,6 +4353,230 @@
                 vv[:,i,j] = self.vertices[j,i::3]
         return vv
 
+    def export_obj(self, filename, transparency = 1.0, dist_fac = None,
+                   color_field = None, emit_field = None, color_map = "algae", 
+                   color_log = True, emit_log = True, plot_index = None, 
+                   color_field_max = None, color_field_min = None, 
+                   emit_field_max = None, emit_field_min = None):
+        r"""This exports the surface to the OBJ format, suitable for visualization
+        in many different programs (e.g., Blender).  NOTE: this exports an .obj file 
+        and an .mtl file, both with the general 'filename' as a prefix.  
+        The .obj file points to the .mtl file in its header, so if you move the 2 
+        files, make sure you change the .obj header to account for this. ALSO NOTE: 
+        the emit_field needs to be a combination of the other 2 fields used to 
+        have the emissivity track with the color.
+
+        Parameters
+        ----------
+        filename : string
+            The file this will be exported to.  This cannot be a file-like object.
+            Note - there are no file extentions included - both obj & mtl files 
+            are created.
+        transparency : float
+            This gives the transparency of the output surface plot.  Values
+            from 0.0 (invisible) to 1.0 (opaque).
+        dist_fac : float
+            Divide the axes distances by this amount.
+        color_field : string
+            Should a field be sample and colormapped?
+        emit_field : string
+            Should we track the emissivity of a field?
+              NOTE: this should be a combination of the other 2 fields being used.
+        color_map : string
+            Which color map should be applied?
+        color_log : bool
+            Should the color field be logged before being mapped?
+        emit_log : bool
+            Should the emitting field be logged before being mapped?
+        plot_index : integer
+            Index of plot for multiple plots.  If none, then only 1 plot.
+        color_field_max : float
+            Maximum value of the color field across all surfaces.
+        color_field_min : float
+            Minimum value of the color field across all surfaces.
+        emit_field_max : float
+            Maximum value of the emitting field across all surfaces.
+        emit_field_min : float
+            Minimum value of the emitting field across all surfaces.
+
+        Examples
+        --------
+
+        >>> sp = pf.h.sphere("max", (10, "kpc"))
+        >>> trans = 1.0
+        >>> distf = 3.1e18*1e3 # distances into kpc
+        >>> surf = pf.h.surface(sp, "Density", 5e-27)
+        >>> surf.export_obj("my_galaxy", transparency=trans, dist_fac = distf)
+
+        >>> sp = pf.h.sphere("max", (10, "kpc"))
+        >>> mi, ma = sp.quantities['Extrema']('Temperature')[0]
+        >>> rhos = [1e-24, 1e-25]
+        >>> trans = [0.5, 1.0]
+        >>> distf = 3.1e18*1e3 # distances into kpc
+        >>> for i, r in enumerate(rhos):
+        ...     surf = pf.h.surface(sp,'Density',r)
+        ...     surf.export_obj("my_galaxy", transparency=trans[i], 
+        ...                      color_field='Temperature', dist_fac = distf, 
+        ...                      plot_index = i, color_field_max = ma, 
+        ...                      color_field_min = mi)
+
+        >>> sp = pf.h.sphere("max", (10, "kpc"))
+        >>> rhos = [1e-24, 1e-25]
+        >>> trans = [0.5, 1.0]
+        >>> distf = 3.1e18*1e3 # distances into kpc
+        >>> def _Emissivity(field, data):
+        ...     return (data['Density']*data['Density']*np.sqrt(data['Temperature']))
+        >>> add_field("Emissivity", function=_Emissivity, units=r"\rm{g K}/\rm{cm}^{6}")
+        >>> for i, r in enumerate(rhos):
+        ...     surf = pf.h.surface(sp,'Density',r)
+        ...     surf.export_obj("my_galaxy", transparency=trans[i], 
+        ...                      color_field='Temperature', emit_field = 'Emissivity', 
+        ...                      dist_fac = distf, plot_index = i)
+
+        """
+        if self.vertices is None:
+            self.get_data(color_field,"face")
+        elif color_field is not None:
+            if color_field not in self.field_data:
+                self[color_field]
+        if emit_field is not None:
+            if color_field not in self.field_data:
+                self[emit_field]
+        only_on_root(self._export_obj, filename, transparency, dist_fac, color_field, emit_field, 
+                             color_map, color_log, emit_log, plot_index, color_field_max, 
+                             color_field_min, emit_field_max, emit_field_min)
+
+    def _color_samples_obj(self, cs, em, color_log, emit_log, color_map, arr, 
+                           color_field_max, color_field_min, 
+                           emit_field_max, emit_field_min): # this now holds for obj files
+        if color_log: cs = np.log10(cs)
+        if emit_log: em = np.log10(em)
+        if color_field_min is None:
+            mi = cs.min()
+        else:
+            mi = color_field_min
+            if color_log: mi = np.log10(mi)
+        if color_field_max is None:
+            ma = cs.max()
+        else:
+            ma = color_field_max
+            if color_log: ma = np.log10(ma)
+        cs = (cs - mi) / (ma - mi)
+        # to get color indicies for OBJ formatting
+        from yt.visualization._colormap_data import color_map_luts
+        lut = color_map_luts[color_map]
+        x = np.mgrid[0.0:1.0:lut[0].shape[0]*1j]
+        arr["cind"][:] = (np.interp(cs,x,x)*(lut[0].shape[0]-1)).astype("uint8")
+        # now, get emission
+        if emit_field_min is None:
+            emi = em.min()
+        else:
+            emi = emit_field_min
+            if emit_log: emi = np.log10(emi)
+        if emit_field_max is None:
+            ema = em.max()
+        else:
+            ema = emit_field_max
+            if emit_log: ema = np.log10(ema)
+        em = (em - emi)/(ema - emi)
+        x = np.mgrid[0.0:255.0:2j] # assume 1 emissivity per color
+        arr["emit"][:] = (np.interp(em,x,x))*2.0 # for some reason, max emiss = 2
+
+    @parallel_root_only
+    def _export_obj(self, filename, transparency, dist_fac = None, 
+                    color_field = None, emit_field = None, color_map = "algae", 
+                    color_log = True, emit_log = True, plot_index = None, 
+                    color_field_max = None, color_field_min = None, 
+                    emit_field_max = None, emit_field_min = None):
+        if plot_index is None:
+            plot_index = 0
+        if isinstance(filename, file):
+            fobj = filename + '.obj'
+            fmtl = filename + '.mtl'
+        else:
+            if plot_index == 0:
+                fobj = open(filename + '.obj', "w")
+                fmtl = open(filename + '.mtl', 'w')
+                cc = 1
+            else:
+                # read in last vertex
+                linesave = ''
+                for line in fileinput.input(filename + '.obj'):
+                    if line[0] == 'f':
+                        linesave = line
+                p = [m.start() for m in finditer(' ', linesave)]
+                cc = int(linesave[p[len(p)-1]:])+1
+                fobj = open(filename + '.obj', "a")
+                fmtl = open(filename + '.mtl', 'a')
+        ftype = [("cind", "uint8"), ("emit", "float")]
+        vtype = [("x","float"),("y","float"), ("z","float")]
+        if plot_index == 0:
+            fobj.write("# yt OBJ file\n")
+            fobj.write("# www.yt-project.com\n")
+            fobj.write("mtllib " + filename + '.mtl\n\n')  # use this material file for the faces
+            fmtl.write("# yt MLT file\n")
+            fmtl.write("# www.yt-project.com\n\n")
+        #(0) formulate vertices
+        nv = self.vertices.shape[1] # number of groups of vertices
+        f = np.empty(nv/self.vertices.shape[0], dtype=ftype) # store sets of face colors
+        v = np.empty(nv, dtype=vtype) # stores vertices
+        if color_field is not None:
+            cs = self[color_field]
+        else:
+            cs = np.empty(self.vertices.shape[1]/self.vertices.shape[0])
+        if emit_field is not None:
+            em = self[emit_field]
+        else:
+            em = np.empty(self.vertices.shape[1]/self.vertices.shape[0])            
+        self._color_samples_obj(cs, em, color_log, emit_log, color_map, f, 
+                                color_field_max, color_field_min, 
+                                emit_field_max, emit_field_min) # map color values to color scheme
+        from yt.visualization._colormap_data import color_map_luts # import colors for mtl file
+        lut = color_map_luts[color_map] # enumerate colors
+        # interpolate emissivity to enumerated colors
+        emiss = np.interp(np.mgrid[0:lut[0].shape[0]],np.mgrid[0:len(cs)],f["emit"][:])
+        if dist_fac is None: # then normalize by bounds
+            DLE = self.pf.domain_left_edge
+            DRE = self.pf.domain_right_edge
+            bounds = [(DLE[i], DRE[i]) for i in range(3)]
+            for i, ax in enumerate("xyz"):
+                # Do the bounds first since we cast to f32
+                tmp = self.vertices[i,:]
+                np.subtract(tmp, bounds[i][0], tmp)
+                w = bounds[i][1] - bounds[i][0]
+                np.divide(tmp, w, tmp)
+                np.subtract(tmp, 0.5, tmp) # Center at origin.
+                v[ax][:] = tmp   
+        else:
+            for i, ax in enumerate("xyz"):
+                tmp = self.vertices[i,:]
+                np.divide(tmp, dist_fac, tmp)
+                v[ax][:] = tmp
+        #(1) write all colors per surface to mtl file
+        for i in range(0,lut[0].shape[0]): 
+            omname = "material_" + str(i) + '_' + str(plot_index)  # name of the material
+            fmtl.write("newmtl " + omname +'\n') # the specific material (color) for this face
+            fmtl.write("Ka %.6f %.6f %.6f\n" %(0.0, 0.0, 0.0)) # ambient color, keep off
+            fmtl.write("Kd %.6f %.6f %.6f\n" %(lut[0][i], lut[1][i], lut[2][i])) # color of face
+            fmtl.write("Ks %.6f %.6f %.6f\n" %(0.0, 0.0, 0.0)) # specular color, keep off
+            fmtl.write("d %.6f\n" %(transparency))  # transparency
+            fmtl.write("em %.6f\n" %(emiss[i])) # emissivity per color
+            fmtl.write("illum 2\n") # not relevant, 2 means highlights on?
+            fmtl.write("Ns %.6f\n\n" %(0.0)) #keep off, some other specular thing
+        #(2) write vertices
+        for i in range(0,self.vertices.shape[1]):
+            fobj.write("v %.6f %.6f %.6f\n" %(v["x"][i], v["y"][i], v["z"][i]))    
+        fobj.write("#done defining vertices\n\n")
+        #(3) define faces and materials for each face
+        for i in range(0,self.triangles.shape[0]):
+            omname = 'material_' + str(f["cind"][i]) + '_' + str(plot_index) # which color to use
+            fobj.write("usemtl " + omname + '\n') # which material to use for this face (color)
+            fobj.write("f " + str(cc) + ' ' + str(cc+1) + ' ' + str(cc+2) + '\n\n') # vertices to color
+            cc = cc+3
+        fmtl.close()
+        fobj.close()
+
+
     def export_ply(self, filename, bounds = None, color_field = None,
                    color_map = "algae", color_log = True, sample_type = "face"):
         r"""This exports the surface to the PLY format, suitable for visualization
@@ -4606,22 +4809,46 @@
             mylog.error("Problem uploading.")
         return upload_id
 
+# Many of these items are set up specifically to ensure that
+# we are not breaking old pickle files.  This means we must only call the
+# _reconstruct_object and that we cannot mandate any additional arguments to
+# the reconstruction function.
+#
+# In the future, this would be better off being set up to more directly
+# reference objects or retain state, perhaps with a context manager.
+#
+# One final detail: time series or multiple parameter files in a single pickle
+# seems problematic.
+
+class ReconstructedObject(tuple):
+    pass
+
+def _check_nested_args(arg, ref_pf):
+    if not isinstance(arg, (tuple, list, ReconstructedObject)):
+        return arg
+    elif isinstance(arg, ReconstructedObject) and ref_pf == arg[0]:
+        return arg[1]
+    narg = [_check_nested_args(a, ref_pf) for a in arg]
+    return narg
+
+def _get_pf_by_hash(hash):
+    from yt.data_objects.static_output import _cached_pfs
+    for pf in _cached_pfs.values():
+        if pf._hash() == hash: return pf
+    return None
 
 def _reconstruct_object(*args, **kwargs):
     pfid = args[0]
     dtype = args[1]
+    pf = _get_pf_by_hash(pfid)
+    if not pf:
+        pfs = ParameterFileStore()
+        pf = pfs.get_pf_hash(pfid)
     field_parameters = args[-1]
     # will be much nicer when we can do pfid, *a, fp = args
-    args, new_args = args[2:-1], []
-    for arg in args:
-        if iterable(arg) and len(arg) == 2 \
-           and not isinstance(arg, types.DictType) \
-           and isinstance(arg[1], AMRData):
-            new_args.append(arg[1])
-        else: new_args.append(arg)
-    pfs = ParameterFileStore()
-    pf = pfs.get_pf_hash(pfid)
+    args = args[2:-1]
+    new_args = [_check_nested_args(a, pf) for a in args]
     cls = getattr(pf.h, dtype)
     obj = cls(*new_args)
     obj.field_parameters.update(field_parameters)
-    return pf, obj
+    return ReconstructedObject((pf, obj))

diff -r 9586a3112cd440027b1f353c59e95dd638ea5676 -r a5a04b9207b187eb09478969630d01275da603f1 yt/data_objects/hierarchy.py
--- a/yt/data_objects/hierarchy.py
+++ b/yt/data_objects/hierarchy.py
@@ -236,6 +236,8 @@
                 fn = os.path.join(self.directory,
                         "%s.yt" % self.parameter_file.basename)
         dir_to_check = os.path.dirname(fn)
+        if dir_to_check == '':
+            dir_to_check = '.'
         # We have four options:
         #    Writeable, does not exist      : create, open as append
         #    Writeable, does exist          : open as append
@@ -317,7 +319,7 @@
         under the name *name* on the node /Objects.
         """
         s = cPickle.dumps(obj, protocol=-1)
-        self.save_data(s, "/Objects", name, force = True)
+        self.save_data(np.array(s, dtype='c'), "/Objects", name, force = True)
 
     def load_object(self, name):
         """

diff -r 9586a3112cd440027b1f353c59e95dd638ea5676 -r a5a04b9207b187eb09478969630d01275da603f1 yt/data_objects/particle_io.py
--- a/yt/data_objects/particle_io.py
+++ b/yt/data_objects/particle_io.py
@@ -58,7 +58,8 @@
 
     def get_data(self, fields):
         fields = ensure_list(fields)
-        rvs = self.source.get_data(fields, force_particle_read=True)
+        self.source.get_data(fields, force_particle_read=True)
+        rvs = [self.source[field] for field in fields]
         if len(fields) == 1: return rvs[0]
         return rvs
 

diff -r 9586a3112cd440027b1f353c59e95dd638ea5676 -r a5a04b9207b187eb09478969630d01275da603f1 yt/data_objects/tests/test_pickle.py
--- /dev/null
+++ b/yt/data_objects/tests/test_pickle.py
@@ -0,0 +1,69 @@
+"""
+Testsuite for pickling yt objects.
+
+Author: Elizabeth Tasker <tasker at astro1.sci.hokudai.ac.jp>
+Affiliation: Hokkaido University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2013 Elizabeth Tasker. All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+import cPickle
+import os
+import tempfile
+from yt.testing \
+    import fake_random_pf, assert_equal
+
+
+def setup():
+    """Test specific setup."""
+    from yt.config import ytcfg
+    ytcfg["yt", "__withintesting"] = "True"
+
+
+def test_save_load_pickle():
+    """Main test for loading pickled objects"""
+    test_pf = fake_random_pf(64)
+
+    # create extracted region from boolean (fairly complex object)
+    center = (test_pf.domain_left_edge + test_pf.domain_right_edge) / 2
+    sp_outer = test_pf.h.sphere(center, test_pf.domain_width[0])
+    sp_inner = test_pf.h.sphere(center, test_pf.domain_width[0] / 10.0)
+    sp_boolean = test_pf.h.boolean([sp_outer, "NOT", sp_inner])
+
+    minv, maxv = sp_boolean.quantities["Extrema"]("Density")[0]
+    contour_threshold = min(minv * 10.0, 0.9 * maxv)
+
+    contours = sp_boolean.extract_connected_sets(
+        "Density", 1, contour_threshold, maxv + 1, log_space=True, cache=True)
+
+    # save object
+    cpklfile = tempfile.NamedTemporaryFile(delete=False)
+    cPickle.dump(contours[1][0], cpklfile)
+    cpklfile.close()
+
+    # load object
+    test_load = cPickle.load(open(cpklfile.name, "rb"))
+
+    assert_equal.description = \
+        "%s: File was pickle-loaded succesfully" % __name__
+    yield assert_equal, test_load is not None, True
+    assert_equal.description = \
+        "%s: Length of pickle-loaded connected set object" % __name__
+    yield assert_equal, len(contours[1][0]), len(test_load)
+
+    os.remove(cpklfile.name)

diff -r 9586a3112cd440027b1f353c59e95dd638ea5676 -r a5a04b9207b187eb09478969630d01275da603f1 yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -801,8 +801,9 @@
     for i, ax in enumerate('xyz'):
         np.subtract(data["%s%s" % (field_prefix, ax)], center[i], r)
         if data.pf.periodicity[i] == True:
-            np.subtract(DW[i], r, rdw)
             np.abs(r, r)
+            np.subtract(r, DW[i], rdw)
+            np.abs(rdw, rdw)
             np.minimum(r, rdw, r)
         np.power(r, 2.0, r)
         np.add(radius, r, radius)
@@ -997,7 +998,7 @@
     return blank
 add_field("particle_density", function=_pdensity,
           validators=[ValidateGridType()], convert_function=_convertDensity,
-          display_name=r"$\mathrm{Particle}\/\mathrm{Density}$")
+          display_name=r"\mathrm{Particle}\/\mathrm{Density}")
 
 def _MagneticEnergy(field,data):
     """This assumes that your front end has provided Bx, By, Bz in
@@ -1033,8 +1034,8 @@
     return data['MagneticEnergy']
 add_field("MagneticPressure",
           function=_MagneticPressure,
-          display_name=r"\rm{Magnetic}\/\rm{Energy}",
-          units="\rm{ergs}\/\rm{cm}^{-3}")
+          display_name=r"\rm{Magnetic}\/\rm{Pressure}",
+          units=r"\rm{ergs}\/\rm{cm}^{-3}")
 
 def _BPoloidal(field,data):
     normal = data.get_field_parameter("normal")

diff -r 9586a3112cd440027b1f353c59e95dd638ea5676 -r a5a04b9207b187eb09478969630d01275da603f1 yt/frontends/athena/data_structures.py
--- a/yt/frontends/athena/data_structures.py
+++ b/yt/frontends/athena/data_structures.py
@@ -53,7 +53,7 @@
 class AthenaGrid(AMRGridPatch):
     _id_offset = 0
     def __init__(self, id, hierarchy, level, start, dimensions):
-        df = hierarchy.storage_filename
+        df = hierarchy.parameter_file.filename[4:-4]
         if 'id0' not in hierarchy.parameter_file.filename:
             gname = hierarchy.parameter_file.filename
         else:
@@ -119,12 +119,13 @@
 
     grid = AthenaGrid
     _data_style='athena'
+    _data_file = None
     
     def __init__(self, pf, data_style='athena'):
         self.parameter_file = weakref.proxy(pf)
+        self.directory = os.path.dirname(self.parameter_file.filename)
         self.data_style = data_style
         # for now, the hierarchy file is the parameter file!
-        self.storage_filename = self.parameter_file.storage_filename
         self.hierarchy_filename = self.parameter_file.filename
         #self.directory = os.path.dirname(self.hierarchy_filename)
         self._fhandle = file(self.hierarchy_filename,'rb')
@@ -132,9 +133,6 @@
 
         self._fhandle.close()
 
-    def _initialize_data_storage(self):
-        pass
-
     def _detect_fields(self):
         field_map = {}
         f = open(self.hierarchy_filename,'rb')
@@ -337,12 +335,14 @@
     _data_style = "athena"
 
     def __init__(self, filename, data_style='athena',
-                 storage_filename = None, parameters = {}):
+                 storage_filename=None, parameters={}):
         self.specified_parameters = parameters
         StaticOutput.__init__(self, filename, data_style)
         self.filename = filename
-        self.storage_filename = filename[4:-4]
-        
+        if storage_filename is None:
+            storage_filename = '%s.yt' % filename.split('/')[-1]
+        self.storage_filename = storage_filename
+
         # Unfortunately we now have to mandate that the hierarchy gets 
         # instantiated so that we can make sure we have the correct left 
         # and right domain edges.
@@ -402,7 +402,7 @@
         if dimensionality == 1 : self.domain_dimensions[1] = np.int32(1)
         self.dimensionality = dimensionality
         self.current_time = grid["time"]
-        self.unique_identifier = self._handle.__hash__()
+        self.unique_identifier = self.parameter_filename.__hash__()
         self.cosmological_simulation = False
         self.num_ghost_zones = 0
         self.field_ordering = 'fortran'

diff -r 9586a3112cd440027b1f353c59e95dd638ea5676 -r a5a04b9207b187eb09478969630d01275da603f1 yt/frontends/castro/data_structures.py
--- a/yt/frontends/castro/data_structures.py
+++ b/yt/frontends/castro/data_structures.py
@@ -60,7 +60,7 @@
 
     def __init__(self, LeftEdge, RightEdge, index, level, filename, offset,
                  dimensions, start, stop, paranoia=False, **kwargs):
-        super(CastroGrid, self).__init__(self, index, **kwargs)
+        super(CastroGrid, self).__init__(index, **kwargs)
         self.filename = filename
         self._offset = offset
         self._paranoid = paranoia  # TODO: Factor this behavior out in tests
@@ -72,7 +72,7 @@
         self.LeftEdge  = LeftEdge.copy()
         self.RightEdge = RightEdge.copy()
         self.index = index
-        self.level = level
+        self.Level = level
 
     def get_global_startindex(self):
         return self.start_index
@@ -115,8 +115,6 @@
     grid = CastroGrid
 
     def __init__(self, pf, data_style='castro_native'):
-        super(CastroHierarchy, self).__init__(self, pf, self.data_style)
-
         self.field_indexes = {}
         self.parameter_file = weakref.proxy(pf)
         header_filename = os.path.join(pf.fullplotdir, 'Header')
@@ -128,6 +126,8 @@
                                 self.parameter_file.paranoid_read) 
         self.read_particle_header()
         self._cache_endianness(self.levels[-1].grids[-1])
+
+        super(CastroHierarchy, self).__init__(pf, data_style)
         self._setup_data_io()
         self._setup_field_list()
         self._populate_hierarchy()
@@ -181,7 +181,7 @@
         counter += 1
 
         self.dx = np.zeros((self.n_levels, 3))
-        for i, line in enumerate(self.__global_header_lines[counter:counter+self.n_levels]):
+        for i, line in enumerate(self._global_header_lines[counter:counter+self.n_levels]):
             self.dx[i] = np.array(map(float, line.split()))
         counter += self.n_levels
         self.geometry = int(self._global_header_lines[counter])
@@ -424,21 +424,6 @@
         return self.grids[mask]
 
     def _setup_field_list(self):
-        self.derived_field_list = []
-
-        for field in self.field_info:
-            try:
-                fd = self.field_info[field].get_dependencies(pf=self.parameter_file)
-            except:
-                continue
-
-            available = np.all([f in self.field_list for f in fd.requested])
-            if available: self.derived_field_list.append(field)
-
-        for field in self.field_list:
-            if field not in self.derived_field_list:
-                self.derived_field_list.append(field)
-
         if self.parameter_file.use_particles:
             # We know which particle fields will exist -- pending further
             # changes in the future.
@@ -521,15 +506,15 @@
          * ASCII (not implemented in yt)
 
         """
-        super(CastroStaticOutput, self).__init__(self, plotname.rstrip("/"),
-                                                 data_style='castro_native')
         self.storage_filename = storage_filename
         self.paranoid_read = paranoia
         self.parameter_filename = paramFilename
         self.fparameter_filename = fparamFilename
         self.__ipfn = paramFilename
+        self.fparameters = {}
+        super(CastroStaticOutput, self).__init__(plotname.rstrip("/"),
+                                                 data_style='castro_native')
 
-        self.fparameters = {}
 
         # These should maybe not be hardcoded?
         ### TODO: this.
@@ -618,6 +603,7 @@
                 self.domain_left_edge = np.array([float(i) for i in vals.split()])
             elif param.startswith("particles.write_in_plotfile"):
                 self.use_particles = boxlib_bool_to_int(vals)
+            self.fparameters[param] = vals
 
         self.parameters["TopGridRank"] = len(self.parameters["TopGridDimensions"])
         self.dimensionality = self.parameters["TopGridRank"]
@@ -655,8 +641,11 @@
         for line in lines:
             if line.count("=") == 1:
                 param, vals = map(strip, map(rstrip, line.split("=")))
-                if vals.count("'") == 0:
-                    t = map(float, [a.replace('D','e').replace('d','e') for a in vals.split()]) # all are floating point.
+                if vals.count("'") == 0 and vals.count("\"") == 0:
+                    try:
+                        t = map(float, [a.replace('D','e').replace('d','e') for a in vals.split()]) # all are floating point.
+                    except ValueError:
+                        print "Failed on line", line
                 else:
                     t = vals.split()
                 if len(t) == 1:

diff -r 9586a3112cd440027b1f353c59e95dd638ea5676 -r a5a04b9207b187eb09478969630d01275da603f1 yt/frontends/castro/io.py
--- a/yt/frontends/castro/io.py
+++ b/yt/frontends/castro/io.py
@@ -100,10 +100,10 @@
             if (gridSize != grid.ActiveDimensions).any():
                 print "Paranoia Error: Cell_H and %s do not agree on grid dimensions." %grid.filename
                 error_count += 1
-            if bytesPerReal != grid.hierarchy._bytesPerReal:
+            if bytesPerReal != grid.hierarchy._bytes_per_real:
                 print "Paranoia Error: Cell_H and %s do not agree on bytes per real number." %grid.filename
                 error_count += 1
-            if (bytesPerReal == grid.hierarchy._bytesPerReal and dtype != grid.hierarchy._dtype):
+            if (bytesPerReal == grid.hierarchy._bytes_per_real and dtype != grid.hierarchy._dtype):
                 print "Paranoia Error: Cell_H and %s do not agree on endianness." %grid.filename
                 error_count += 1
 
@@ -114,7 +114,7 @@
             start = grid.start_index
             stop = grid.stop_index
             dtype = grid.hierarchy._dtype
-            bytesPerReal = grid.hierarchy._bytesPerReal
+            bytesPerReal = grid.hierarchy._bytes_per_real
 
         nElements = grid.ActiveDimensions.prod()
 

diff -r 9586a3112cd440027b1f353c59e95dd638ea5676 -r a5a04b9207b187eb09478969630d01275da603f1 yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -634,6 +634,24 @@
         else:
             self.derived_field_list = self.__class__._cached_derived_field_list
 
+    def _detect_fields(self):
+        self.field_list = []
+        # Do this only on the root processor to save disk work.
+        mylog.info("Gathering a field list (this may take a moment.)")
+        field_list = set()
+        random_sample = self._generate_random_grids()
+        for grid in random_sample:
+            try:
+                gf = self.io._read_field_names(grid)
+            except self.io._read_exception:
+                mylog.debug("Grid %s is a bit funky?", grid.id)
+                continue
+            mylog.debug("Grid %s has: %s", grid.id, gf)
+            field_list = field_list.union(gf)
+        field_list = self.comm.par_combine_object(list(field_list),
+                        datatype="list", op = "cat")
+        self.field_list = list(set(field_list))
+
     def _generate_random_grids(self):
         my_rank = self.comm.rank
         my_grids = self.grids[self.grid_procs.ravel() == my_rank]
@@ -770,7 +788,7 @@
         data_label_factors = {}
         for line in (l.strip() for l in lines):
             if len(line) < 2: continue
-            param, vals = (i.strip() for i in line.split("="))
+            param, vals = (i.strip() for i in line.split("=",1))
             # First we try to decipher what type of value it is.
             vals = vals.split()
             # Special case approaching.

diff -r 9586a3112cd440027b1f353c59e95dd638ea5676 -r a5a04b9207b187eb09478969630d01275da603f1 yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -130,14 +130,21 @@
 def _ThermalEnergy(field, data):
     if data.pf["HydroMethod"] == 2:
         return data["TotalEnergy"]
-    else:
-        if data.pf["DualEnergyFormalism"]:
-            return data["GasEnergy"]
-        else:
-            return data["TotalEnergy"] - 0.5*(
-                   data["x-velocity"]**2.0
-                 + data["y-velocity"]**2.0
-                 + data["z-velocity"]**2.0 )
+    
+    if data.pf["DualEnergyFormalism"]:
+        return data["GasEnergy"]
+
+    if data.pf["HydroMethod"] in (4,6):
+        return data["TotalEnergy"] - 0.5*(
+            data["x-velocity"]**2.0
+            + data["y-velocity"]**2.0
+            + data["z-velocity"]**2.0 ) \
+            - data["MagneticEnergy"]/data["Density"]
+
+    return data["TotalEnergy"] - 0.5*(
+        data["x-velocity"]**2.0
+        + data["y-velocity"]**2.0
+        + data["z-velocity"]**2.0 )
 add_field("ThermalEnergy", function=_ThermalEnergy,
           units=r"\rm{ergs}/\rm{g}")
 
@@ -171,22 +178,22 @@
 # We set up fields for both TotalEnergy and Total_Energy in the known fields
 # lists.  Note that this does not mean these will be the used definitions.
 add_enzo_field("TotalEnergy", function=NullFunc,
-          display_name = "$\rm{Total}\/\rm{Energy}$",
+          display_name = r"\rm{Total}\/ \rm{Energy}",
           units=r"\rm{ergs}/\rm{g}", convert_function=_convertEnergy)
 add_enzo_field("Total_Energy", function=NullFunc,
-          display_name = "$\rm{Total}\/\rm{Energy}$",
+          display_name = r"\rm{Total}\/ \rm{Energy}",
           units=r"\rm{ergs}/\rm{g}", convert_function=_convertEnergy)
 
 def _Total_Energy(field, data):
     return data["TotalEnergy"] / _convertEnergy(data)
 add_field("Total_Energy", function=_Total_Energy,
-          display_name = "$\rm{Total}\/\rm{Energy}$",
+          display_name = r"\rm{Total}\/ \rm{Energy}",
           units=r"\rm{ergs}/\rm{g}", convert_function=_convertEnergy)
 
 def _TotalEnergy(field, data):
     return data["Total_Energy"] / _convertEnergy(data)
 add_field("TotalEnergy", function=_TotalEnergy,
-          display_name = "$\rm{Total}\/\rm{Energy}$",
+          display_name = r"\rm{Total}\/ \rm{Energy}",
           units=r"\rm{ergs}/\rm{g}", convert_function=_convertEnergy)
 
 def _NumberDensity(field, data):
@@ -369,7 +376,7 @@
         if not filter.any(): return blank
         num = filter.sum()
     else:
-        filter = None
+        filter = Ellipsis
         num = data["particle_position_x"].size
     amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(np.float64),
                            data["particle_position_y"][filter].astype(np.float64),

diff -r 9586a3112cd440027b1f353c59e95dd638ea5676 -r a5a04b9207b187eb09478969630d01275da603f1 yt/frontends/flash/fields.py
--- a/yt/frontends/flash/fields.py
+++ b/yt/frontends/flash/fields.py
@@ -5,7 +5,7 @@
 Affiliation: UCSD
 Homepage: http://yt-project.org/
 License:
-  Copyright (C) 2010-2011 Matthew Turk, John ZuHone.  All Rights Reserved.
+  Copyright (C) 2010-2012 Matthew Turk, John ZuHone, Anthony Scopatz.  All Rights Reserved.
 
   This file is part of yt.
 
@@ -24,6 +24,7 @@
 """
 
 import numpy as np
+from yt.utilities.exceptions import *
 from yt.data_objects.field_info_container import \
     FieldInfoContainer, \
     NullFunc, \
@@ -36,7 +37,7 @@
     ValidateGridType
 import yt.data_objects.universal_fields
 from yt.utilities.physical_constants import \
-    kboltz, mh
+    kboltz, mh, Na
 KnownFLASHFields = FieldInfoContainer()
 add_flash_field = KnownFLASHFields.add_field
 
@@ -63,6 +64,7 @@
 translation_dict = {"x-velocity": "velx",
                     "y-velocity": "vely",
                     "z-velocity": "velz",
+                    "VelocityMagnitude": "velo",
                     "Density": "dens",
                     "Temperature": "temp",
                     "Pressure" : "pres", 
@@ -154,15 +156,40 @@
 add_flash_field("temp", function=NullFunc, take_log=True,
                 convert_function=_get_convert("temp"),
                 units=r"\rm{K}")
+add_flash_field("tion", function=NullFunc, take_log=True,
+                units=r"\rm{K}")
 add_flash_field("tele", function=NullFunc, take_log=True,
                 convert_function=_get_convert("tele"),
                 units = r"\rm{K}")
+add_flash_field("trad", function=NullFunc, take_log=True,
+                units = r"\rm{K}")
 add_flash_field("pres", function=NullFunc, take_log=True,
                 convert_function=_get_convert("pres"),
                 units=r"\rm{erg}/\rm{cm}^{3}")
+add_flash_field("pion", function=NullFunc, take_log=True,
+                display_name="Ion Pressure",
+                units=r"\rm{erg}/\rm{cm}^3")
+add_flash_field("pele", function=NullFunc, take_log=True,
+                display_name="Electron Pressure, P_e",
+                units=r"\rm{erg}/\rm{cm}^3")
+add_flash_field("prad", function=NullFunc, take_log=True,
+                display_name="Radiation Pressure",
+                units = r"\rm{erg}/\rm{cm}^3")
+add_flash_field("eion", function=NullFunc, take_log=True,
+                display_name="Ion Internal Energy",
+                units=r"\rm{erg}")
+add_flash_field("eele", function=NullFunc, take_log=True,
+                display_name="Electron Internal Energy",
+                units=r"\rm{erg}")
+add_flash_field("erad", function=NullFunc, take_log=True,
+                display_name="Radiation Internal Energy",
+                units=r"\rm{erg}")
 add_flash_field("pden", function=NullFunc, take_log=True,
                 convert_function=_get_convert("pden"),
                 units=r"\rm{g}/\rm{cm}^{3}")
+add_flash_field("depo", function=NullFunc, take_log=True,
+                units = r"\rm{ergs}/\rm{g}")
+add_flash_field("ye", function=NullFunc, take_log=True,)
 add_flash_field("magx", function=NullFunc, take_log=False,
                 convert_function=_get_convert("magx"),
                 units = r"\mathrm{Gau\ss}")
@@ -192,6 +219,34 @@
                 units = r"\rm{ergs}/\rm{g}")
 add_flash_field("flam", function=NullFunc, take_log=False,
                 convert_function=_get_convert("flam"))
+add_flash_field("absr", function=NullFunc, take_log=False,
+                display_name="Absorption Coefficient")
+add_flash_field("emis", function=NullFunc, take_log=False,
+                display_name="Emissivity")
+add_flash_field("cond", function=NullFunc, take_log=False,
+                display_name="Conductivity")
+add_flash_field("dfcf", function=NullFunc, take_log=False,
+                display_name="Diffusion Equation Scalar")
+add_flash_field("fllm", function=NullFunc, take_log=False,
+                display_name="Flux Limit")
+add_flash_field("pipe", function=NullFunc, take_log=False,
+                display_name="P_i/P_e")
+add_flash_field("tite", function=NullFunc, take_log=False,
+                display_name="T_i/T_e")
+add_flash_field("dbgs", function=NullFunc, take_log=False,
+                display_name="Debug for Shocks")
+add_flash_field("cham", function=NullFunc, take_log=False,
+                display_name="Chamber Material Fraction")
+add_flash_field("targ", function=NullFunc, take_log=False,
+                display_name="Target Material Fraction")
+add_flash_field("sumy", function=NullFunc, take_log=False)
+add_flash_field("mgdc", function=NullFunc, take_log=False,
+                display_name="Emission Minus Absorption Diffusion Terms")
+
+for i in range(1, 1000):
+    add_flash_field("r{0:03}".format(i), function=NullFunc, take_log=False,
+        display_name="Energy Group {0}".format(i))
+
 
 for f,v in translation_dict.items():
     if v not in KnownFLASHFields:
@@ -300,3 +355,17 @@
           units=r"\rm{Gauss}\/\rm{cm}^{-1}")
 
 
+
+## Derived FLASH Fields
+def _nele(field, data):
+    return data['dens'] * data['ye'] * Na
+add_field('nele', function=_nele, take_log=True, units=r"\rm{cm}^{-3}")
+add_field('edens', function=_nele, take_log=True, units=r"\rm{cm}^{-3}")
+
+def _nion(field, data):
+    return data['dens'] * data['sumy'] * Na
+add_field('nion', function=_nion, take_log=True, units=r"\rm{cm}^{-3}")
+
+def _abar(field, data):
+    return 1.0 / data['sumy']
+add_field('abar', function=_abar, take_log=False)

diff -r 9586a3112cd440027b1f353c59e95dd638ea5676 -r a5a04b9207b187eb09478969630d01275da603f1 yt/frontends/flash/tests/test_outputs.py
--- a/yt/frontends/flash/tests/test_outputs.py
+++ b/yt/frontends/flash/tests/test_outputs.py
@@ -34,7 +34,7 @@
 _fields = ("Temperature", "Density", "VelocityMagnitude", "DivV")
 
 sloshing = "GasSloshingLowRes/sloshing_low_res_hdf5_plt_cnt_0300"
- at requires_pf(sloshing)
+ at requires_pf(sloshing, big_data=True)
 def test_sloshing():
     pf = data_dir_load(sloshing)
     yield assert_equal, str(pf), "sloshing_low_res_hdf5_plt_cnt_0300"
@@ -50,11 +50,3 @@
     yield assert_equal, str(pf), "windtunnel_4lev_hdf5_plt_cnt_0030"
     for test in small_patch_amr(wt, _fields_2d):
         yield test
-
-gcm = "GalaxyClusterMerger/fiducial_1to10_b0.273d_hdf5_plt_cnt_0245.gz"
- at requires_pf(gcm, big_data=True)
-def test_galaxy_cluster_merger():
-    pf = data_dir_load(gcm)
-    for test in big_patch_amr(gcm, _fields):
-        yield test
-

diff -r 9586a3112cd440027b1f353c59e95dd638ea5676 -r a5a04b9207b187eb09478969630d01275da603f1 yt/frontends/gdf/data_structures.py
--- a/yt/frontends/gdf/data_structures.py
+++ b/yt/frontends/gdf/data_structures.py
@@ -197,13 +197,13 @@
         self._handle = h5py.File(self.parameter_filename, "r")
         for field_name in self._handle["/field_types"]:
             current_field = self._handle["/field_types/%s" % field_name]
-            try:
+            if 'field_to_cgs' in current_field.attrs:
                 self.units[field_name] = current_field.attrs['field_to_cgs']
-            except:
+            else:
                 self.units[field_name] = 1.0
-            try:
-                current_fields_unit = current_field.attrs['field_units'][0]
-            except:
+            if 'field_units' in current_field.attrs:
+                current_fields_unit = just_one(current_field.attrs['field_units'])
+            else:
                 current_fields_unit = ""
             self._fieldinfo_known.add_field(field_name, function=NullFunc, take_log=False,
                    units=current_fields_unit, projected_units="",

diff -r 9586a3112cd440027b1f353c59e95dd638ea5676 -r a5a04b9207b187eb09478969630d01275da603f1 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -25,6 +25,7 @@
 
 import weakref
 import numpy as np
+import uuid
 
 from yt.utilities.io_handler import io_registry
 from yt.funcs import *
@@ -302,7 +303,10 @@
         #self._conversion_override = conversion_override
 
         self.stream_handler = stream_handler
-        StaticOutput.__init__(self, "InMemoryParameterFile", self._data_style)
+        name = "InMemoryParameterFile_%s" % (uuid.uuid4().hex)
+        from yt.data_objects.static_output import _cached_pfs
+        _cached_pfs[name] = self
+        StaticOutput.__init__(self, name, self._data_style)
 
         self.units = {}
         self.time_units = {}

diff -r 9586a3112cd440027b1f353c59e95dd638ea5676 -r a5a04b9207b187eb09478969630d01275da603f1 yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -38,8 +38,10 @@
 
 # This next item will handle most of the actual startup procedures, but it will
 # also attempt to parse the command line and set up the global state of various
-# operations.
-
+# operations.  The variable unparsed_args is not used internally but is
+# provided as a convenience for users who wish to parse arguments in scripts.
+# See http://lists.spacepope.org/pipermail/yt-dev-spacepope.org/2011-December/
+#     001727.html
 import yt.startup_tasks as __startup_tasks
 unparsed_args = __startup_tasks.unparsed_args
 
@@ -146,7 +148,7 @@
 
 # Import some helpful math utilities
 from yt.utilities.math_utils import \
-    ortho_find, quartiles, periodic_position 
+    ortho_find, quartiles, periodic_position
 
 
 # We load plugins.  Keep in mind, this can be fairly dangerous -

diff -r 9586a3112cd440027b1f353c59e95dd638ea5676 -r a5a04b9207b187eb09478969630d01275da603f1 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -30,18 +30,23 @@
 import urllib2
 import cPickle
 import sys
+import cPickle
+import shelve
+import zlib
 
+from matplotlib.testing.compare import compare_images
 from nose.plugins import Plugin
 from yt.testing import *
+from yt.convenience import load, simulation
 from yt.config import ytcfg
-from yt.mods import *
 from yt.data_objects.static_output import StaticOutput
-import cPickle
-import shelve
-
 from yt.utilities.logger import disable_stream_logging
 from yt.utilities.command_line import get_yt_version
 
+import matplotlib.image as mpimg
+import yt.visualization.plot_window as pw
+import yt.utilities.progressbar as progressbar
+
 mylog = logging.getLogger('nose.plugins.answer-testing')
 run_big_data = False
 
@@ -66,6 +71,8 @@
         parser.add_option("--answer-big-data", dest="big_data",
             default=False, help="Should we run against big data, too?",
             action="store_true")
+        parser.add_option("--local-dir", dest="output_dir", metavar='str',
+                          help="The name of the directory to store local results")
 
     @property
     def my_version(self, version=None):
@@ -96,7 +103,7 @@
                 self.store_name = options.answer_name
             self.compare_name = None
         # if we're not storing, then we're comparing, and we want default
-        # comparison name to be the latest gold standard 
+        # comparison name to be the latest gold standard
         # either on network or local
         else:
             if options.answer_name is None:
@@ -117,18 +124,21 @@
             self.compare_name = None
         elif self.compare_name == "latest":
             self.compare_name = _latest
-            
-        # Local/Cloud storage 
+
+        # Local/Cloud storage
         if options.local_results:
+            if options.output_dir is None:
+                print 'Please supply an output directory with the --local-dir option'
+                sys.exit(1)
             storage_class = AnswerTestLocalStorage
-            # Fix up filename for local storage 
+            # Fix up filename for local storage
             if self.compare_name is not None:
                 self.compare_name = "%s/%s/%s" % \
-                    (os.path.realpath(options.output_dir), self.compare_name, 
+                    (os.path.realpath(options.output_dir), self.compare_name,
                      self.compare_name)
             if self.store_name is not None:
                 name_dir_path = "%s/%s" % \
-                    (os.path.realpath(options.output_dir), 
+                    (os.path.realpath(options.output_dir),
                     self.store_name)
                 if not os.path.isdir(name_dir_path):
                     os.makedirs(name_dir_path)
@@ -147,7 +157,10 @@
 
     def finalize(self, result=None):
         if self.store_results is False: return
-        self.storage.dump(self.result_storage)        
+        self.storage.dump(self.result_storage)
+
+    def help(self):
+        return "yt answer testing support"
 
 class AnswerTestStorage(object):
     def __init__(self, reference_name=None, answer_name=None):
@@ -155,9 +168,9 @@
         self.answer_name = answer_name
         self.cache = {}
     def dump(self, result_storage, result):
-        raise NotImplementedError 
+        raise NotImplementedError
     def get(self, pf_name, default=None):
-        raise NotImplementedError 
+        raise NotImplementedError
 
 class AnswerTestCloudStorage(AnswerTestStorage):
     def get(self, pf_name, default = None):
@@ -185,6 +198,9 @@
         self.cache[pf_name] = rv
         return rv
 
+    def progress_callback(self, current, total):
+        self.pbar.update(current)
+
     def dump(self, result_storage):
         if self.answer_name is None: return
         # This is where we dump our result storage up to Amazon, if we are able
@@ -195,12 +211,24 @@
         bucket = c.get_bucket("yt-answer-tests")
         for pf_name in result_storage:
             rs = cPickle.dumps(result_storage[pf_name])
-            tk = bucket.get_key("%s_%s" % (self.answer_name, pf_name)) 
+            tk = bucket.get_key("%s_%s" % (self.answer_name, pf_name))
             if tk is not None: tk.delete()
             k = Key(bucket)
             k.key = "%s_%s" % (self.answer_name, pf_name)
-            k.set_contents_from_string(rs)
+
+            pb_widgets = [
+                unicode(k.key, errors='ignore').encode('utf-8'), ' ',
+                progressbar.FileTransferSpeed(),' <<<', progressbar.Bar(),
+                '>>> ', progressbar.Percentage(), ' ', progressbar.ETA()
+                ]
+            self.pbar = progressbar.ProgressBar(widgets=pb_widgets,
+                                                maxval=sys.getsizeof(rs))
+
+            self.pbar.start()
+            k.set_contents_from_string(rs, cb=self.progress_callback,
+                                       num_cb=100000)
             k.set_acl("public-read")
+            self.pbar.finish()
 
 class AnswerTestLocalStorage(AnswerTestStorage):
     def dump(self, result_storage):
@@ -209,7 +237,7 @@
         ds = shelve.open(self.answer_name, protocol=-1)
         for pf_name in result_storage:
             answer_name = "%s" % pf_name
-            if name in ds:
+            if answer_name in ds:
                 mylog.info("Overwriting %s", answer_name)
             ds[answer_name] = result_storage[pf_name]
         ds.close()
@@ -277,7 +305,7 @@
         nv = self.run()
         if self.reference_storage.reference_name is not None:
             dd = self.reference_storage.get(self.storage_name)
-            if dd is None or self.description not in dd: 
+            if dd is None or self.description not in dd:
                 raise YTNoOldAnswer("%s : %s" % (self.storage_name , self.description))
             ov = dd[self.description]
             self.compare(nv, ov)
@@ -303,6 +331,16 @@
         obj = cls(*obj_type[1])
         return obj
 
+    def create_plot(self, pf, plot_type, plot_field, plot_axis, plot_kwargs = None):
+        # plot_type should be a string
+        # plot_args should be a tuple
+        # plot_kwargs should be a dict
+        if plot_type is None:
+            raise RuntimeError('Must explicitly request a plot type')
+        cls = getattr(pw, plot_type)
+        plot = cls(*(pf, plot_axis, plot_field), **plot_kwargs)
+        return plot
+
     @property
     def sim_center(self):
         """
@@ -335,7 +373,7 @@
         args = [self._type_name, str(self.pf), oname]
         args += [str(getattr(self, an)) for an in self._attrs]
         return "_".join(args)
-        
+
 class FieldValuesTest(AnswerTestingTest):
     _type_name = "FieldValues"
     _attrs = ("field", )
@@ -357,7 +395,7 @@
     def compare(self, new_result, old_result):
         err_msg = "Field values for %s not equal." % self.field
         if self.decimals is None:
-            assert_equal(new_result, old_result, 
+            assert_equal(new_result, old_result,
                          err_msg=err_msg, verbose=True)
         else:
             assert_allclose(new_result, old_result, 10.**(-self.decimals),
@@ -381,12 +419,12 @@
     def compare(self, new_result, old_result):
         err_msg = "All field values for %s not equal." % self.field
         if self.decimals is None:
-            assert_equal(new_result, old_result, 
+            assert_equal(new_result, old_result,
                          err_msg=err_msg, verbose=True)
         else:
             assert_rel_equal(new_result, old_result, self.decimals,
                              err_msg=err_msg, verbose=True)
-            
+
 class ProjectionValuesTest(AnswerTestingTest):
     _type_name = "ProjectionValues"
     _attrs = ("field", "axis", "weight_field")
@@ -426,7 +464,7 @@
                 assert_equal(new_result[k], old_result[k],
                              err_msg=err_msg)
             else:
-                assert_allclose(new_result[k], old_result[k], 
+                assert_allclose(new_result[k], old_result[k],
                                  10.**-(self.decimals), err_msg=err_msg)
 
 class PixelizedProjectionValuesTest(AnswerTestingTest):
@@ -505,7 +543,7 @@
             assert_equal(new_result[i], old_result[i],
                          err_msg="Output times not equal.",
                          verbose=True)
-        
+
 class GridHierarchyTest(AnswerTestingTest):
     _type_name = "GridHierarchy"
     _attrs = ()
@@ -547,6 +585,37 @@
         for newc, oldc in zip(new_result["children"], old_result["children"]):
             assert(newp == oldp)
 
+class PlotWindowAttributeTest(AnswerTestingTest):
+    _type_name = "PlotWindowAttribute"
+    _attrs = ('plot_type', 'plot_field', 'plot_axis', 'attr_name', 'attr_args')
+    def __init__(self, pf_fn, plot_field, plot_axis, attr_name, attr_args,
+                 decimals, plot_type = 'SlicePlot'):
+        super(PlotWindowAttributeTest, self).__init__(pf_fn)
+        self.plot_type = plot_type
+        self.plot_field = plot_field
+        self.plot_axis = plot_axis
+        self.plot_kwargs = {}
+        self.attr_name = attr_name
+        self.attr_args = attr_args
+        self.decimals = decimals
+
+    def run(self):
+        plot = self.create_plot(self.pf, self.plot_type, self.plot_field,
+                                self.plot_axis, self.plot_kwargs)
+        attr = getattr(plot, self.attr_name)
+        attr(*self.attr_args[0], **self.attr_args[1])
+        fn = plot.save()[0]
+        image = mpimg.imread(fn)
+        os.remove(fn)
+        return [zlib.compress(image.dumps())]
+
+    def compare(self, new_result, old_result):
+        fns = ['old.png', 'new.png']
+        mpimg.imsave(fns[0], np.loads(zlib.decompress(old_result[0])))
+        mpimg.imsave(fns[1], np.loads(zlib.decompress(new_result[0])))
+        compare_images(fns[0], fns[1], 10**(-self.decimals))
+        for fn in fns: os.remove(fn)
+
 def requires_pf(pf_fn, big_data = False):
     def ffalse(func):
         return lambda: None
@@ -602,4 +671,3 @@
 
     def __call__(self):
         self.args[0](*self.args[1:])
-

diff -r 9586a3112cd440027b1f353c59e95dd638ea5676 -r a5a04b9207b187eb09478969630d01275da603f1 yt/utilities/command_line.py
--- a/yt/utilities/command_line.py
+++ b/yt/utilities/command_line.py
@@ -309,10 +309,10 @@
         return
     print "".join(file(date_file, 'r').readlines())
     print "To update all dependencies, run \"yt update --all\"."
-    
+
 def _update_yt_stack(path):
     "Rerun the install script to updated all dependencies."
-    
+
     install_script = os.path.join(path, "doc/install_script.sh")
     if not os.path.exists(install_script):
         print
@@ -1292,7 +1292,10 @@
         elif args.center is None:
             center = 0.5*(pf.domain_left_edge + pf.domain_right_edge)
         center = np.array(center)
-        if args.axis == 4:
+        if pf.dimensionality < 3:
+            dummy_dimensions = np.nonzero(pf.h.grids[0].ActiveDimensions <= 1)
+            axes = ensure_list(dummy_dimensions[0][0])
+        elif args.axis == 4:
             axes = range(3)
         else:
             axes = [args.axis]

diff -r 9586a3112cd440027b1f353c59e95dd638ea5676 -r a5a04b9207b187eb09478969630d01275da603f1 yt/utilities/decompose.py
--- a/yt/utilities/decompose.py
+++ b/yt/utilities/decompose.py
@@ -69,8 +69,8 @@
     """ Evaluate longest to shortest edge ratio
         BEWARE: lot's of magic here """
     eff_dim = (n_d > 1).sum()
-    ideal_bsize = eff_dim * (pieces * np.product(n_d) ** (eff_dim - 1)
-                             ) ** (1.0 / eff_dim)
+    exp = float(eff_dim - 1) / float(eff_dim)
+    ideal_bsize = eff_dim * pieces ** (1.0 / eff_dim) * np.product(n_d) ** exp
     mask = np.where(n_d > 1)
     nd_arr = np.array(n_d, dtype=np.float64)[mask]
     bsize = int(np.sum(ldom[mask] / nd_arr * np.product(nd_arr)))
@@ -109,6 +109,10 @@
     fac = factorize_number(pieces)
     nfactors = len(fac[:, 2])
     best = 0.0
+    p_size = np.ones(3, dtype=np.int)
+    if pieces == 1:
+        return p_size
+
     while np.all(fac[:, 2] > 0):
         ldom = np.ones(3, dtype=np.int)
         for nfac in range(nfactors):

diff -r 9586a3112cd440027b1f353c59e95dd638ea5676 -r a5a04b9207b187eb09478969630d01275da603f1 yt/utilities/definitions.py
--- a/yt/utilities/definitions.py
+++ b/yt/utilities/definitions.py
@@ -48,7 +48,8 @@
 y_names = ['z','z','y']
 
 # How many of each thing are in an Mpc
-mpc_conversion = {'mpc'   : mpc_per_mpc,
+mpc_conversion = {'Mpc'   : mpc_per_mpc,
+                  'mpc'   : mpc_per_mpc,
                   'kpc'   : kpc_per_mpc,
                   'pc'    : pc_per_mpc,
                   'au'    : au_per_mpc,
@@ -56,7 +57,7 @@
                   'miles' : miles_per_mpc,
                   'cm'    : cm_per_mpc}
 
-# How many seconds are in each thig
+# How many seconds are in each thing
 sec_conversion = {'Gyr'   : sec_per_Gyr,
                   'Myr'   : sec_per_Myr,
                   'years' : sec_per_year,

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt-3.0/commits/9166fa060a7b/
Changeset:   9166fa060a7b
Branch:      yt
User:        atmyers
Date:        2013-05-07 02:13:02
Summary:     add particle mass fields for orion and chombo
Affected #:  2 files

diff -r a5a04b9207b187eb09478969630d01275da603f1 -r 9166fa060a7b4982adef36637fb83322935d7d24 yt/frontends/chombo/fields.py
--- a/yt/frontends/chombo/fields.py
+++ b/yt/frontends/chombo/fields.py
@@ -174,3 +174,18 @@
     add_field("particle_%s" % pf, function=pfunc,
               validators = [ValidateSpatial(0)],
               particle_type=True)
+
+def _ParticleMass(field, data):
+    particles = data["particle_mass"].astype('float64')
+    return particles
+
+def _ParticleMassMsun(field, data):
+    particles = data["particle_mass"].astype('float64')
+    return particles/1.989e33
+
+add_field("ParticleMass",
+          function=_ParticleMass, validators=[ValidateSpatial(0)],
+          particle_type=True)
+add_field("ParticleMassMsun",
+          function=_ParticleMassMsun, validators=[ValidateSpatial(0)],
+          particle_type=True)

diff -r a5a04b9207b187eb09478969630d01275da603f1 -r 9166fa060a7b4982adef36637fb83322935d7d24 yt/frontends/orion/fields.py
--- a/yt/frontends/orion/fields.py
+++ b/yt/frontends/orion/fields.py
@@ -174,3 +174,18 @@
     add_field("particle_%s" % pf, function=pfunc,
               validators = [ValidateSpatial(0)],
               particle_type=True)
+
+def _ParticleMass(field, data):
+    particles = data["particle_mass"].astype('float64')
+    return particles
+
+def _ParticleMassMsun(field, data):
+    particles = data["particle_mass"].astype('float64')
+    return particles/1.989e33
+
+add_field("ParticleMass",
+          function=_ParticleMass, validators=[ValidateSpatial(0)],
+          particle_type=True)
+add_field("ParticleMassMsun",
+          function=_ParticleMassMsun, validators=[ValidateSpatial(0)],
+          particle_type=True)


https://bitbucket.org/yt_analysis/yt-3.0/commits/34979d998227/
Changeset:   34979d998227
Branch:      yt
User:        atmyers
Date:        2013-05-07 02:37:59
Summary:     wrap code that touched ParticleMass in a try/except block
Affected #:  1 file

diff -r 9166fa060a7b4982adef36637fb83322935d7d24 -r 34979d998227182ac38c2c51b876472755d650ff yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -151,8 +151,12 @@
     particle masses in the object.
     """
     baryon_mass = data["CellMassMsun"].sum()
-    particle_mass = data["ParticleMassMsun"].sum()
-    return [baryon_mass + particle_mass]
+    try:
+        particle_mass = data["ParticleMassMsun"].sum()
+        total_mass = baryon_mass + particle_mass
+    except KeyError:
+        total_mass = baryon_mass
+    return [total_mass]
 def _combTotalMass(data, total_mass):
     return total_mass.sum()
 add_quantity("TotalMass", function=_TotalMass,


https://bitbucket.org/yt_analysis/yt-3.0/commits/79b21e4927d9/
Changeset:   79b21e4927d9
Branch:      yt
User:        bcrosby
Date:        2013-05-07 18:23:02
Summary:     Change the default for the HOP and pHOP threshold when called from the merger tree to be 160, which is consistent with HOP and pHOP when used alone.
Affected #:  1 file

diff -r ccfe34e70803932ff921e6020a7972f8bcdcef49 -r 79b21e4927d9c0edb1ce7b83c5b1bb5dbe924e7e yt/analysis_modules/halo_merger_tree/merger_tree.py
--- a/yt/analysis_modules/halo_merger_tree/merger_tree.py
+++ b/yt/analysis_modules/halo_merger_tree/merger_tree.py
@@ -169,7 +169,7 @@
     ... halo_finder_function=parallelHF)
     """
     def __init__(self, restart_files=[], database='halos.db',
-            halo_finder_function=HaloFinder, halo_finder_threshold=80.0,
+            halo_finder_function=HaloFinder, halo_finder_threshold=160.0,
             FOF_link_length=0.2, dm_only=False, refresh=False,
             index=True):
         ParallelAnalysisInterface.__init__(self)


https://bitbucket.org/yt_analysis/yt-3.0/commits/86838dfc73cd/
Changeset:   86838dfc73cd
Branch:      yt
User:        bcrosby
Date:        2013-05-07 18:26:45
Summary:     Forgot to update the default value in the description.
Affected #:  1 file

diff -r 79b21e4927d9c0edb1ce7b83c5b1bb5dbe924e7e -r 86838dfc73cdc83b39a3b82ab4a228357784ce45 yt/analysis_modules/halo_merger_tree/merger_tree.py
--- a/yt/analysis_modules/halo_merger_tree/merger_tree.py
+++ b/yt/analysis_modules/halo_merger_tree/merger_tree.py
@@ -143,7 +143,7 @@
         Note that this is not a string, so no quotes. Default = HaloFinder.
     halo_finder_threshold : Float
         If using HaloFinder or parallelHF, the value of the density threshold
-        used when halo finding. Default = 80.0.
+        used when halo finding. Default = 160.0.
     FOF_link_length : Float
         If using FOFHaloFinder, the linking length between particles.
         Default = 0.2.


https://bitbucket.org/yt_analysis/yt-3.0/commits/9b5c690e791a/
Changeset:   9b5c690e791a
Branch:      yt
User:        bcrosby
Date:        2013-05-07 18:39:13
Summary:     Merged.
Affected #:  85 files

diff -r 86838dfc73cdc83b39a3b82ab4a228357784ce45 -r 9b5c690e791ab9195415188482b532fbab758208 .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -4,7 +4,9 @@
 freetype.cfg
 hdf5.cfg
 png.cfg
+rockstar.cfg
 yt_updater.log
+yt/analysis_modules/halo_finding/rockstar/rockstar_interface.c
 yt/frontends/ramses/_ramses_reader.cpp
 yt/utilities/amr_utils.c
 yt/utilities/kdtree/forthonf2c.h

diff -r 86838dfc73cdc83b39a3b82ab4a228357784ce45 -r 9b5c690e791ab9195415188482b532fbab758208 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -34,7 +34,7 @@
 
 INST_HG=1       # Install Mercurial or not?  If hg is not already
                 # installed, yt cannot be installed.
-INST_ZLIB=1     # On some systems (Kraken) matplotlib has issues with 
+INST_ZLIB=1     # On some systems (Kraken) matplotlib has issues with
                 # the system zlib, which is compiled statically.
                 # If need be, you can turn this off.
 INST_BZLIB=1    # On some systems, libbzip2 is missing.  This can
@@ -76,7 +76,7 @@
    echo "the script to re-enable root-level installation.  Sorry!"
    exit 1
 fi
-if [[ ${DEST_DIR%/} == /usr/local ]] 
+if [[ ${DEST_DIR%/} == /usr/local ]]
 then
    echo "******************************************************"
    echo "*                                                    *"
@@ -170,6 +170,19 @@
         echo "   $ module load gcc"
         echo
     fi
+    if [ "${MYHOST##midway}" != "${MYHOST}" ]
+    then
+        echo "Looks like you're on Midway."
+        echo
+        echo " ******************************************"
+        echo " * It may be better to use the yt module! *"
+        echo " *                                        *"
+        echo " *   $ module load yt                     *"
+        echo " *                                        *"
+        echo " ******************************************"
+        echo
+        return
+    fi
     if [ "${MYOS##Darwin}" != "${MYOS}" ]
     then
         echo "Looks like you're running on Mac OSX."
@@ -181,7 +194,7 @@
 	echo "must register for an account on the apple developer tools"
 	echo "website: https://developer.apple.com/downloads to obtain the"
 	echo "download link."
-	echo 
+	echo
 	echo "We have gathered some additional instructions for each"
 	echo "version of OS X below. If you have trouble installing yt"
 	echo "after following these instructions, don't hesitate to contact"
@@ -192,15 +205,15 @@
 	echo "menu bar.  We're assuming that you've installed all operating"
 	echo "system updates; if you have an older version, we suggest"
 	echo "running software update and installing all available updates."
-	echo 
-        echo "OS X 10.5.8: search for and download Xcode 3.1.4 from the" 
+	echo
+        echo "OS X 10.5.8: search for and download Xcode 3.1.4 from the"
 	echo "Apple developer tools website."
         echo
         echo "OS X 10.6.8: search for and download Xcode 3.2 from the Apple"
 	echo "developer tools website.  You can either download the"
 	echo "Xcode 3.2.2 Developer Tools package (744 MB) and then use"
-	echo "Software Update to update to XCode 3.2.6 or" 
-	echo "alternatively, you can download the Xcode 3.2.6/iOS SDK" 
+	echo "Software Update to update to XCode 3.2.6 or"
+	echo "alternatively, you can download the Xcode 3.2.6/iOS SDK"
 	echo "bundle (4.1 GB)."
         echo
         echo "OS X 10.7.5: download Xcode 4.2 from the mac app store"
@@ -208,20 +221,20 @@
         echo "Alternatively, download the Xcode command line tools from"
         echo "the Apple developer tools website."
         echo
-	echo "OS X 10.8.2: download Xcode 4.6 from the mac app store."
+	echo "OS X 10.8.2: download Xcode 4.6.1 from the mac app store."
 	echo "(search for Xcode)."
 	echo "Additionally, you will have to manually install the Xcode"
-	echo "command line tools, see:" 
+	echo "command line tools, see:"
 	echo "http://stackoverflow.com/questions/9353444"
 	echo "Alternatively, download the Xcode command line tools from"
 	echo "the Apple developer tools website."
 	echo
-        echo "NOTE: It's possible that the installation will fail, if so," 
-	echo "please set the following environment variables, remove any" 
+        echo "NOTE: It's possible that the installation will fail, if so,"
+	echo "please set the following environment variables, remove any"
 	echo "broken installation tree, and re-run this script verbatim."
         echo
-        echo "$ export CC=gcc-4.2"
-        echo "$ export CXX=g++-4.2"
+        echo "$ export CC=gcc"
+        echo "$ export CXX=g++"
 	echo
         OSX_VERSION=`sw_vers -productVersion`
         if [ "${OSX_VERSION##10.8}" != "${OSX_VERSION}" ]
@@ -278,7 +291,7 @@
         echo
         echo " INST_ZLIB=0"
         echo " INST_FTYPE=0"
-        echo 
+        echo
         echo " to avoid conflicts with other command-line programs "
         echo " (like eog and evince, for example)."
     fi
@@ -424,7 +437,7 @@
     cd ..
 }
 
-if type -P wget &>/dev/null 
+if type -P wget &>/dev/null
 then
     echo "Using wget"
     export GETFILE="wget -nv"
@@ -486,28 +499,27 @@
 cd ${DEST_DIR}/src
 
 # Now we dump all our SHA512 files out.
-
-echo 'eda1b8090e5e21e7e039ef4dd03de186a7b416df9d5a4e4422abeeb4d51383b9a6858e1ac4902d8e5010f661b295bbb2452c43c8738be668379b4eb4835d0f61  Cython-0.17.1.tar.gz' > Cython-0.17.1.tar.gz.sha512
-echo '44eea803870a66ff0bab08d13a8b3388b5578ebc1c807d1d9dca0a93e6371e91b15d02917a00b3b20dc67abb5a21dabaf9b6e9257a561f85eeff2147ac73b478  PyX-0.11.1.tar.gz' > PyX-0.11.1.tar.gz.sha512
-echo 'b981f8464575bb24c297631c87a3b9172312804a0fc14ce1fa7cb41ce2b0d2fd383cd1c816d6e10c36467d18bf9492d6faf557c81c04ff3b22debfa93f30ad0b  Python-2.7.3.tgz' > Python-2.7.3.tgz.sha512
-echo 'c017d3d59dd324ac91af0edc178c76b60a5f90fbb775cf843e39062f95bd846238f2c53705f8890ed3f34bc0e6e75671a73d13875eb0287d6201cb45f0a2d338  bzip2-1.0.5.tar.gz' > bzip2-1.0.5.tar.gz.sha512
+echo 'fb85d71bb4f80b35f0d0f1735c650dd75c5f84b05635ddf91d6241ff103b5a49158c5b851a20c15e05425f6dde32a4971b35fcbd7445f61865b4d61ffd1fbfa1  Cython-0.18.tar.gz' > Cython-0.18.tar.gz.sha512
+echo '4941f5aa21aff3743546495fb073c10d2657ff42b2aff401903498638093d0e31e344cce778980f28a7170c6d29eab72ac074277b9d4088376e8692dc71e55c1  PyX-0.12.1.tar.gz' > PyX-0.12.1.tar.gz.sha512
+echo '3349152c47ed2b63c5c9aabcfa92b8497ea9d71ca551fd721e827fcb8f91ff9fbbee6bba8f8cb2dea185701b8798878b4b2435c1496b63d4b4a37c624a625299  Python-2.7.4.tgz' > Python-2.7.4.tgz.sha512
+echo '00ace5438cfa0c577e5f578d8a808613187eff5217c35164ffe044fbafdfec9e98f4192c02a7d67e01e5a5ccced630583ad1003c37697219b0f147343a3fdd12  bzip2-1.0.6.tar.gz' > bzip2-1.0.6.tar.gz.sha512
 echo 'a296dfcaef7e853e58eed4e24b37c4fa29cfc6ac688def048480f4bb384b9e37ca447faf96eec7b378fd764ba291713f03ac464581d62275e28eb2ec99110ab6  reason-js-20120623.zip' > reason-js-20120623.zip.sha512
-echo 'b519218f93946400326e9b656669269ecb3e5232b944e18fbc3eadc4fe2b56244d68aae56d6f69042b4c87c58c881ee2aaa279561ea0f0f48d5842155f4de9de  freetype-2.4.4.tar.gz' > freetype-2.4.4.tar.gz.sha512
-echo 'b3290c498191684781ca5286ab454eb1bd045e8d894f5b86fb86beb88f174e22ac3ab008fb02d6562051d9fa6a9593920cab433223f6d5473999913223b8e183  h5py-2.1.0.tar.gz' > h5py-2.1.0.tar.gz.sha512
+echo 'b46c93d76f8ce09c94765b20b2eeadf71207671f1131777de178b3727c235b4dd77f6e60d62442b96648c3c6749e9e4c1194c1b02af7e946576be09e1ff7ada3  freetype-2.4.11.tar.gz' > freetype-2.4.11.tar.gz.sha512
+echo '15ca0209e8d8f172cb0708a2de946fbbde8551d9bebc4a95fa7ae31558457a7f43249d5289d7675490c577deb4e0153698fd2407644078bf30bd5ab10135fce3  h5py-2.1.2.tar.gz' > h5py-2.1.2.tar.gz.sha512
 echo 'c68a425bacaa7441037910b9166f25b89e1387776a7749a5350793f89b1690350df5f018060c31d03686e7c3ed2aa848bd2b945c96350dc3b6322e087934783a  hdf5-1.8.9.tar.gz' > hdf5-1.8.9.tar.gz.sha512
-echo 'dbefad00fa34f4f21dca0f1e92e95bd55f1f4478fa0095dcf015b4d06f0c823ff11755cd777e507efaf1c9098b74af18f613ec9000e5c3a5cc1c7554fb5aefb8  libpng-1.5.12.tar.gz' > libpng-1.5.12.tar.gz.sha512
-echo '5b1a0fb52dcb21ca5f0ab71c8a49550e1e8cf633552ec6598dc43f0b32c03422bf5af65b30118c163231ecdddfd40846909336f16da318959106076e80a3fad0  matplotlib-1.2.0.tar.gz' > matplotlib-1.2.0.tar.gz.sha512
-echo '91693ca5f34934956a7c2c98bb69a5648b2a5660afd2ecf4a05035c5420450d42c194eeef0606d7683e267e4eaaaab414df23f30b34c88219bdd5c1a0f1f66ed  mercurial-2.5.1.tar.gz' > mercurial-2.5.1.tar.gz.sha512
-echo 'de3dd37f753614055dcfed910e9886e03688b8078492df3da94b1ec37be796030be93291cba09e8212fffd3e0a63b086902c3c25a996cf1439e15c5b16e014d9  numpy-1.6.1.tar.gz' > numpy-1.6.1.tar.gz.sha512
-echo '5ad681f99e75849a5ca6f439c7a19bb51abc73d121b50f4f8e4c0da42891950f30407f761a53f0fe51b370b1dbd4c4f5a480557cb2444c8c7c7d5412b328a474  sqlite-autoconf-3070500.tar.gz' > sqlite-autoconf-3070500.tar.gz.sha512
-echo 'edae735960279d92acf58e1f4095c6392a7c2059b8f1d2c46648fc608a0fb06b392db2d073f4973f5762c034ea66596e769b95b3d26ad963a086b9b2d09825f2  zlib-1.2.3.tar.bz2' > zlib-1.2.3.tar.bz2.sha512
+echo 'b2b53ed358bacab9e8d63a51f17bd5f121ece60a1d7c53e8a8eb08ad8b1e4393a8d7a86eec06e2efc62348114f0d84c0a3dfc805e68e6edd93b20401962b3554  libpng-1.6.1.tar.gz' > libpng-1.6.1.tar.gz.sha512
+echo '497f91725eaf361bdb9bdf38db2bff5068a77038f1536df193db64c9b887e3b0d967486daee722eda6e2c4e60f034eee030673e53d07bf0db0f3f7c0ef3bd208  matplotlib-1.2.1.tar.gz' > matplotlib-1.2.1.tar.gz.sha512
+echo '928fdeaaf0eaec80adbd8765521de9666ab56aaa2101fb9ab2cb392d8b29475d3b052d89652ff9b67522cfcc6cd958717ac715f51b0573ee088e9a595f29afe2  mercurial-2.5.4.tar.gz' > mercurial-2.5.4.tar.gz.sha512
+echo 'a485daa556f6c76003de1dbb3e42b3daeee0a320c69c81b31a7d2ebbc2cf8ab8e96c214a4758e5e7bf814295dc1d6aa563092b714db7e719678d8462135861a8  numpy-1.7.0.tar.gz' > numpy-1.7.0.tar.gz.sha512
+echo '293d78d14a9347cb83e1a644e5f3e4447ed6fc21642c51683e5495dda08d2312194a73d1fc3c1d78287e33ed065aa251ecbaa7c0ea9189456c1702e96d78becd  sqlite-autoconf-3071601.tar.gz' > sqlite-autoconf-3071601.tar.gz.sha512
+echo 'b1c073ad26684e354f7c522c14655840592e03872bc0a94690f89cae2ff88f146fce1dad252ff27a889dac4a32ff9f8ab63ba940671f9da89e9ba3e19f1bf58d  zlib-1.2.7.tar.gz' > zlib-1.2.7.tar.gz.sha512
 echo '05ac335727a2c3036f31a2506fdd2615aa436bfbe2f81799fe6c51bffe2591ad6a8427f3b25c34e7e709fb4e7607a0589dc7a22185c1f9b894e90de6711a88aa  ipython-0.13.1.tar.gz' > ipython-0.13.1.tar.gz.sha512
-echo 'fb3cf421b2dc48c31956b3e3ee4ab6ebc743deec3bf626c2238a1996c8c51be87260bd6aa662793a1f0c34dcda9b3146763777bb162dfad6fec4ca7acc403b2e  zeromq-2.2.0.tar.gz' > zeromq-2.2.0.tar.gz.sha512
-echo 'd761b492352841cdc125d9f0c99ee6d6c435812472ea234728b7f0fb4ad1048e1eec9b399df2081fbc926566f333f7780fedd0ce23255a6633fe5c60ed15a6af  pyzmq-2.1.11.tar.gz' > pyzmq-2.1.11.tar.gz.sha512
-echo '57fa5e57dfb98154a42d2d477f29401c2260ae7ad3a8128a4098b42ee3b35c54367b1a3254bc76b9b3b14b4aab7c3e1135858f68abc5636daedf2f01f9b8a3cf  tornado-2.2.tar.gz' > tornado-2.2.tar.gz.sha512
-echo '1332e3d5465ca249c357314cf15d2a4e5e83a941841021b8f6a17a107dce268a7a082838ade5e8db944ecde6bfb111211ab218aa414ee90aafbb81f1491b3b93  Forthon-0.8.10.tar.gz' > Forthon-0.8.10.tar.gz.sha512
+echo 'b9d061ca49e54ea917e0aed2b2a48faef33061dbf6d17eae7f8c3fff0b35ca883e7324f6cb24bda542443f669dcd5748037a5f2309f4c359d68adef520894865  zeromq-3.2.2.tar.gz' > zeromq-3.2.2.tar.gz.sha512
+echo '852fce8a8308c4e1e4b19c77add2b2055ca2ba570b28e8364888df490af92b860c72e860adfb075b3405a9ceb62f343889f20a8711c9353a7d9059adee910f83  pyzmq-13.0.2.tar.gz' > pyzmq-13.0.2.tar.gz.sha512
+echo '303bd3fbea22be57fddf7df78ddf5a783d355a0c8071b1363250daafc20232ddd28eedc44aa1194f4a7afd82f9396628c5bb06819e02b065b6a1b1ae8a7c19e1  tornado-3.0.tar.gz' > tornado-3.0.tar.gz.sha512
+echo '3f53d0b474bfd79fea2536d0a9197eaef6c0927e95f2f9fd52dbd6c1d46409d0e649c21ac418d8f7767a9f10fe6114b516e06f2be4b06aec3ab5bdebc8768220  Forthon-0.8.11.tar.gz' > Forthon-0.8.11.tar.gz.sha512
 echo 'c13116c1f0547000cc565e15774687b9e884f8b74fb62a84e578408a868a84961704839065ae4f21b662e87f2aaedf6ea424ea58dfa9d3d73c06281f806d15dd  nose-1.2.1.tar.gz' > nose-1.2.1.tar.gz.sha512
-echo '73de2c99406a38f85273931597525cec4ebef55b93712adca3b0bfea8ca3fc99446e5d6495817e9ad55cf4d48feb7fb49734675c4cc8938db8d4a5225d30eca7  python-hglib-0.2.tar.gz' > python-hglib-0.2.tar.gz.sha512
+echo 'd67de9567256e6f1649e4f3f7dfee63371d5f00fd3fd4f92426198f862e97c57f70e827d19f4e5e1929ad85ef2ce7aa5a0596b101cafdac71672e97dc115b397  python-hglib-0.3.tar.gz' > python-hglib-0.3.tar.gz.sha512
 echo 'ffc602eb346717286b3d0a6770c60b03b578b3cf70ebd12f9e8b1c8c39cdb12ef219ddaa041d7929351a6b02dbb8caf1821b5452d95aae95034cbf4bc9904a7a  sympy-0.7.2.tar.gz' > sympy-0.7.2.tar.gz.sha512
 echo '172f2bc671145ebb0add2669c117863db35851fb3bdb192006cd710d4d038e0037497eb39a6d01091cb923f71a7e8982a77b6e80bf71d6275d5d83a363c8d7e5  rockstar-0.99.6.tar.gz' > rockstar-0.99.6.tar.gz.sha512
 echo 'd4fdd62f2db5285cd133649bd1bfa5175cb9da8304323abd74e0ef1207d55e6152f0f944da1da75f73e9dafb0f3bb14efba3c0526c732c348a653e0bd223ccfa  scipy-0.11.0.tar.gz' > scipy-0.11.0.tar.gz.sha512
@@ -515,50 +527,50 @@
 echo '8770214491e31f0a7a3efaade90eee7b0eb20a8a6ab635c5f854d78263f59a1849133c14ef5123d01023f0110cbb9fc6f818da053c01277914ae81473430a952  lapack-3.4.2.tar.gz' > lapack-3.4.2.tar.gz.sha512
 # Individual processes
 [ -z "$HDF5_DIR" ] && get_ytproject hdf5-1.8.9.tar.gz
-[ $INST_ZLIB -eq 1 ] && get_ytproject zlib-1.2.3.tar.bz2 
-[ $INST_BZLIB -eq 1 ] && get_ytproject bzip2-1.0.5.tar.gz
-[ $INST_PNG -eq 1 ] && get_ytproject libpng-1.5.12.tar.gz
-[ $INST_FTYPE -eq 1 ] && get_ytproject freetype-2.4.4.tar.gz
-[ $INST_SQLITE3 -eq 1 ] && get_ytproject sqlite-autoconf-3070500.tar.gz
-[ $INST_PYX -eq 1 ] && get_ytproject PyX-0.11.1.tar.gz
-[ $INST_0MQ -eq 1 ] && get_ytproject zeromq-2.2.0.tar.gz
-[ $INST_0MQ -eq 1 ] && get_ytproject pyzmq-2.1.11.tar.gz
-[ $INST_0MQ -eq 1 ] && get_ytproject tornado-2.2.tar.gz
+[ $INST_ZLIB -eq 1 ] && get_ytproject zlib-1.2.7.tar.gz
+[ $INST_BZLIB -eq 1 ] && get_ytproject bzip2-1.0.6.tar.gz
+[ $INST_PNG -eq 1 ] && get_ytproject libpng-1.6.1.tar.gz
+[ $INST_FTYPE -eq 1 ] && get_ytproject freetype-2.4.11.tar.gz
+[ $INST_SQLITE3 -eq 1 ] && get_ytproject sqlite-autoconf-3071601.tar.gz
+[ $INST_PYX -eq 1 ] && get_ytproject PyX-0.12.1.tar.gz
+[ $INST_0MQ -eq 1 ] && get_ytproject zeromq-3.2.2.tar.gz
+[ $INST_0MQ -eq 1 ] && get_ytproject pyzmq-13.0.2.tar.gz
+[ $INST_0MQ -eq 1 ] && get_ytproject tornado-3.0.tar.gz
 [ $INST_SCIPY -eq 1 ] && get_ytproject scipy-0.11.0.tar.gz
 [ $INST_SCIPY -eq 1 ] && get_ytproject blas.tar.gz
 [ $INST_SCIPY -eq 1 ] && get_ytproject lapack-3.4.2.tar.gz
-get_ytproject Python-2.7.3.tgz
-get_ytproject numpy-1.6.1.tar.gz
-get_ytproject matplotlib-1.2.0.tar.gz
-get_ytproject mercurial-2.5.1.tar.gz
+get_ytproject Python-2.7.4.tgz
+get_ytproject numpy-1.7.0.tar.gz
+get_ytproject matplotlib-1.2.1.tar.gz
+get_ytproject mercurial-2.5.4.tar.gz
 get_ytproject ipython-0.13.1.tar.gz
-get_ytproject h5py-2.1.0.tar.gz
-get_ytproject Cython-0.17.1.tar.gz
+get_ytproject h5py-2.1.2.tar.gz
+get_ytproject Cython-0.18.tar.gz
 get_ytproject reason-js-20120623.zip
-get_ytproject Forthon-0.8.10.tar.gz
-get_ytproject nose-1.2.1.tar.gz 
-get_ytproject python-hglib-0.2.tar.gz
+get_ytproject Forthon-0.8.11.tar.gz
+get_ytproject nose-1.2.1.tar.gz
+get_ytproject python-hglib-0.3.tar.gz
 get_ytproject sympy-0.7.2.tar.gz
 get_ytproject rockstar-0.99.6.tar.gz
 if [ $INST_BZLIB -eq 1 ]
 then
-    if [ ! -e bzip2-1.0.5/done ]
+    if [ ! -e bzip2-1.0.6/done ]
     then
-        [ ! -e bzip2-1.0.5 ] && tar xfz bzip2-1.0.5.tar.gz
+        [ ! -e bzip2-1.0.6 ] && tar xfz bzip2-1.0.6.tar.gz
         echo "Installing BZLIB"
-        cd bzip2-1.0.5
-        if [ `uname` = "Darwin" ] 
+        cd bzip2-1.0.6
+        if [ `uname` = "Darwin" ]
         then
-            if [ -z "${CC}" ] 
+            if [ -z "${CC}" ]
             then
                 sed -i.bak 's/soname/install_name/' Makefile-libbz2_so
             else
-                sed -i.bak -e 's/soname/install_name/' -e "s/CC=gcc/CC=${CC}/" Makefile-libbz2_so 
+                sed -i.bak -e 's/soname/install_name/' -e "s/CC=gcc/CC=${CC}/" Makefile-libbz2_so
             fi
         fi
         ( make install CFLAGS=-fPIC LDFLAGS=-fPIC PREFIX=${DEST_DIR} 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make -f Makefile-libbz2_so CFLAGS=-fPIC LDFLAGS=-fPIC PREFIX=${DEST_DIR} 2>&1 ) 1>> ${LOG_FILE} || do_exit
-        ( cp -v libbz2.so.1.0.4 ${DEST_DIR}/lib 2>&1 ) 1>> ${LOG_FILE} || do_exit
+        ( cp -v libbz2.so.1.0.6 ${DEST_DIR}/lib 2>&1 ) 1>> ${LOG_FILE} || do_exit
         touch done
         cd ..
     fi
@@ -569,11 +581,11 @@
 
 if [ $INST_ZLIB -eq 1 ]
 then
-    if [ ! -e zlib-1.2.3/done ]
+    if [ ! -e zlib-1.2.7/done ]
     then
-        [ ! -e zlib-1.2.3 ] && tar xfj zlib-1.2.3.tar.bz2
+        [ ! -e zlib-1.2.7 ] && tar xfz zlib-1.2.7.tar.gz
         echo "Installing ZLIB"
-        cd zlib-1.2.3
+        cd zlib-1.2.7
         ( ./configure --shared --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -587,11 +599,11 @@
 
 if [ $INST_PNG -eq 1 ]
 then
-    if [ ! -e libpng-1.5.12/done ]
+    if [ ! -e libpng-1.6.1/done ]
     then
-        [ ! -e libpng-1.5.12 ] && tar xfz libpng-1.5.12.tar.gz
+        [ ! -e libpng-1.6.1 ] && tar xfz libpng-1.6.1.tar.gz
         echo "Installing PNG"
-        cd libpng-1.5.12
+        cd libpng-1.6.1
         ( ./configure CPPFLAGS=-I${DEST_DIR}/include CFLAGS=-I${DEST_DIR}/include --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -605,11 +617,11 @@
 
 if [ $INST_FTYPE -eq 1 ]
 then
-    if [ ! -e freetype-2.4.4/done ]
+    if [ ! -e freetype-2.4.11/done ]
     then
-        [ ! -e freetype-2.4.4 ] && tar xfz freetype-2.4.4.tar.gz
+        [ ! -e freetype-2.4.11 ] && tar xfz freetype-2.4.11.tar.gz
         echo "Installing FreeType2"
-        cd freetype-2.4.4
+        cd freetype-2.4.11
         ( ./configure CFLAGS=-I${DEST_DIR}/include --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -642,11 +654,11 @@
 
 if [ $INST_SQLITE3 -eq 1 ]
 then
-    if [ ! -e sqlite-autoconf-3070500/done ]
+    if [ ! -e sqlite-autoconf-3071601/done ]
     then
-        [ ! -e sqlite-autoconf-3070500 ] && tar xfz sqlite-autoconf-3070500.tar.gz
+        [ ! -e sqlite-autoconf-3071601 ] && tar xfz sqlite-autoconf-3071601.tar.gz
         echo "Installing SQLite3"
-        cd sqlite-autoconf-3070500
+        cd sqlite-autoconf-3071601
         ( ./configure --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make ${MAKE_PROCS} install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -655,11 +667,11 @@
     fi
 fi
 
-if [ ! -e Python-2.7.3/done ]
+if [ ! -e Python-2.7.4/done ]
 then
     echo "Installing Python.  This may take a while, but don't worry.  yt loves you."
-    [ ! -e Python-2.7.3 ] && tar xfz Python-2.7.3.tgz
-    cd Python-2.7.3
+    [ ! -e Python-2.7.4 ] && tar xfz Python-2.7.4.tgz
+    cd Python-2.7.4
     ( ./configure --prefix=${DEST_DIR}/ ${PYCONF_ARGS} 2>&1 ) 1>> ${LOG_FILE} || do_exit
 
     ( make ${MAKE_PROCS} 2>&1 ) 1>> ${LOG_FILE} || do_exit
@@ -674,12 +686,11 @@
 
 if [ $INST_HG -eq 1 ]
 then
-    echo "Installing Mercurial."
-    do_setup_py mercurial-2.5.1
+    do_setup_py mercurial-2.5.4
     export HG_EXEC=${DEST_DIR}/bin/hg
 else
     # We assume that hg can be found in the path.
-    if type -P hg &>/dev/null 
+    if type -P hg &>/dev/null
     then
         export HG_EXEC=hg
     else
@@ -696,7 +707,7 @@
     elif [ -e $ORIG_PWD/../yt/mods.py ]
     then
         YT_DIR=`dirname $ORIG_PWD`
-    elif [ ! -e yt-hg ] 
+    elif [ ! -e yt-hg ]
     then
         YT_DIR="$PWD/yt-hg/"
         ( ${HG_EXEC} --debug clone https://bitbucket.org/yt_analysis/yt-supplemental/ 2>&1 ) 1>> ${LOG_FILE}
@@ -706,7 +717,7 @@
         ( ${HG_EXEC} --debug clone https://bitbucket.org/yt_analysis/yt/ ./yt-hg 2>&1 ) 1>> ${LOG_FILE}
         # Now we update to the branch we're interested in.
         ( ${HG_EXEC} -R ${YT_DIR} up -C ${BRANCH} 2>&1 ) 1>> ${LOG_FILE}
-    elif [ -e yt-hg ] 
+    elif [ -e yt-hg ]
     then
         YT_DIR="$PWD/yt-hg/"
     fi
@@ -714,7 +725,7 @@
 fi
 
 # This fixes problems with gfortran linking.
-unset LDFLAGS 
+unset LDFLAGS
 
 echo "Installing distribute"
 ( ${DEST_DIR}/bin/python2.7 ${YT_DIR}/distribute_setup.py 2>&1 ) 1>> ${LOG_FILE} || do_exit
@@ -724,7 +735,7 @@
 
 if [ $INST_SCIPY -eq 0 ]
 then
-    do_setup_py numpy-1.6.1 ${NUMPY_ARGS}
+    do_setup_py numpy-1.7.0 ${NUMPY_ARGS}
 else
     if [ ! -e scipy-0.11.0/done ]
     then
@@ -752,8 +763,8 @@
 	fi
     fi
     export BLAS=$PWD/BLAS/libfblas.a
-    export LAPACK=$PWD/lapack-3.4.2/liblapack.a    
-    do_setup_py numpy-1.6.1 ${NUMPY_ARGS}
+    export LAPACK=$PWD/lapack-3.4.2/liblapack.a
+    do_setup_py numpy-1.7.0 ${NUMPY_ARGS}
     do_setup_py scipy-0.11.0 ${NUMPY_ARGS}
 fi
 
@@ -776,10 +787,10 @@
     echo "Setting CFLAGS ${CFLAGS}"
 fi
 # Now we set up the basedir for matplotlib:
-mkdir -p ${DEST_DIR}/src/matplotlib-1.2.0
-echo "[directories]" >> ${DEST_DIR}/src/matplotlib-1.2.0/setup.cfg
-echo "basedirlist = ${DEST_DIR}" >> ${DEST_DIR}/src/matplotlib-1.2.0/setup.cfg
-do_setup_py matplotlib-1.2.0
+mkdir -p ${DEST_DIR}/src/matplotlib-1.2.1
+echo "[directories]" >> ${DEST_DIR}/src/matplotlib-1.2.1/setup.cfg
+echo "basedirlist = ${DEST_DIR}" >> ${DEST_DIR}/src/matplotlib-1.2.1/setup.cfg
+do_setup_py matplotlib-1.2.1
 if [ -n "${OLD_LDFLAGS}" ]
 then
     export LDFLAG=${OLD_LDFLAGS}
@@ -791,29 +802,29 @@
 # Now we do our IPython installation, which has two optional dependencies.
 if [ $INST_0MQ -eq 1 ]
 then
-    if [ ! -e zeromq-2.2.0/done ]
+    if [ ! -e zeromq-3.2.2/done ]
     then
-        [ ! -e zeromq-2.2.0 ] && tar xfz zeromq-2.2.0.tar.gz
+        [ ! -e zeromq-3.2.2 ] && tar xfz zeromq-3.2.2.tar.gz
         echo "Installing ZeroMQ"
-        cd zeromq-2.2.0
+        cd zeromq-3.2.2
         ( ./configure --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
         touch done
         cd ..
     fi
-    do_setup_py pyzmq-2.1.11 --zmq=${DEST_DIR}
-    do_setup_py tornado-2.2
+    do_setup_py pyzmq-13.0.2 --zmq=${DEST_DIR}
+    do_setup_py tornado-3.0
 fi
 
 do_setup_py ipython-0.13.1
-do_setup_py h5py-2.1.0
-do_setup_py Cython-0.17.1
-do_setup_py Forthon-0.8.10
+do_setup_py h5py-2.1.2
+do_setup_py Cython-0.18
+do_setup_py Forthon-0.8.11
 do_setup_py nose-1.2.1
-do_setup_py python-hglib-0.2
+do_setup_py python-hglib-0.3
 do_setup_py sympy-0.7.2
-[ $INST_PYX -eq 1 ] && do_setup_py PyX-0.11.1
+[ $INST_PYX -eq 1 ] && do_setup_py PyX-0.12.1
 
 # Now we build Rockstar and set its environment variable.
 if [ $INST_ROCKSTAR -eq 1 ]

diff -r 86838dfc73cdc83b39a3b82ab4a228357784ce45 -r 9b5c690e791ab9195415188482b532fbab758208 setup.py
--- a/setup.py
+++ b/setup.py
@@ -16,7 +16,11 @@
 from distutils import version
 
 from distutils.core import Command
+from distutils.spawn import find_executable
 
+def find_fortran_deps():
+    return (find_executable("Forthon"),
+            find_executable("gfortran"))
 
 class BuildForthon(Command):
 
@@ -40,11 +44,17 @@
     def run(self):
 
         """runner"""
+        (Forthon_exe, gfortran_exe) = find_fortran_deps()
+        if None in (Forthon_exe, gfortran_exe):
+            sys.stderr.write(
+                "fKDpy.so won't be built due to missing Forthon/gfortran\n"
+            )
+            return
 
         cwd = os.getcwd()
         os.chdir(os.path.join(cwd, 'yt/utilities/kdtree'))
-        cmd = ["Forthon", "-F", "gfortran", "--compile_first", "fKD_source",
-               "--no2underscores", "--fopt", "'-O3'", "fKD",
+        cmd = [Forthon_exe, "-F", "gfortran", "--compile_first",
+               "fKD_source", "--no2underscores", "--fopt", "'-O3'", "fKD",
                "fKD_source.f90"]
         subprocess.check_call(cmd, shell=False)
         shutil.move(glob.glob('build/lib*/fKDpy.so')[0], os.getcwd())
@@ -145,7 +155,7 @@
 
 import setuptools
 
-VERSION = "2.5dev"
+VERSION = "2.6dev"
 
 if os.path.exists('MANIFEST'):
     os.remove('MANIFEST')
@@ -184,9 +194,13 @@
 
 class my_install_data(np_install_data.install_data):
     def run(self):
-        self.distribution.data_files.append(
-            ('yt/utilities/kdtree', ['yt/utilities/kdtree/fKDpy.so'])
-        )
+        (Forthon_exe, gfortran_exe) = find_fortran_deps()
+        if None in (Forthon_exe, gfortran_exe):
+            pass
+        else:
+            self.distribution.data_files.append(
+                ('yt/utilities/kdtree', ['yt/utilities/kdtree/fKDpy.so'])
+                )
         np_install_data.install_data.run(self)
 
 class my_build_py(build_py):

diff -r 86838dfc73cdc83b39a3b82ab4a228357784ce45 -r 9b5c690e791ab9195415188482b532fbab758208 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -244,8 +244,9 @@
             If True, use dynamic load balancing to create the projections.
             Default: False.
 
-        Getting the Nearest Galaxies
-        ----------------------------
+        Notes
+        -----
+
         The light ray tool will use the HaloProfiler to calculate the
         distance and mass of the nearest halo to that pixel.  In order
         to do this, a dictionary called halo_profiler_parameters is used

diff -r 86838dfc73cdc83b39a3b82ab4a228357784ce45 -r 9b5c690e791ab9195415188482b532fbab758208 yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -1367,6 +1367,7 @@
         self._groups = []
         self._max_dens = -1
         self.pf = pf
+        self.redshift = pf.current_redshift
         self.out_list = out_list
         self._data_source = pf.h.all_data()
         mylog.info("Parsing Rockstar halo list")

diff -r 86838dfc73cdc83b39a3b82ab4a228357784ce45 -r 9b5c690e791ab9195415188482b532fbab758208 yt/analysis_modules/halo_finding/rockstar/rockstar.py
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar.py
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar.py
@@ -238,6 +238,7 @@
         tpf = ts[0]
 
         def _particle_count(field, data):
+            if data.NumberOfParticles == 0: return 0
             try:
                 data["particle_type"]
                 has_particle_type=True

diff -r 86838dfc73cdc83b39a3b82ab4a228357784ce45 -r 9b5c690e791ab9195415188482b532fbab758208 yt/analysis_modules/halo_merger_tree/enzofof_merger_tree.py
--- a/yt/analysis_modules/halo_merger_tree/enzofof_merger_tree.py
+++ b/yt/analysis_modules/halo_merger_tree/enzofof_merger_tree.py
@@ -454,8 +454,8 @@
         halonum : int
             Halo number at the last output to trace.
 
-        Output
-        ------
+        Returns
+        -------
         output : dict
             Dictionary of redshifts, cycle numbers, and halo numbers
             of the most massive progenitor.  keys = {redshift, cycle,

diff -r 86838dfc73cdc83b39a3b82ab4a228357784ce45 -r 9b5c690e791ab9195415188482b532fbab758208 yt/analysis_modules/halo_merger_tree/merger_tree.py
--- a/yt/analysis_modules/halo_merger_tree/merger_tree.py
+++ b/yt/analysis_modules/halo_merger_tree/merger_tree.py
@@ -758,17 +758,19 @@
     
     def query(self, string):
         r"""Performs a query of the database and returns the results as a list
-        of tuple(s), even if the result is singular.
+        of tuples, even if the result is singular.
         
         Parameters
         ----------
-        string : String
+        
+        string : str
             The SQL query of the database.
         
         Examples
-        -------
+        --------
+
         >>> results = mtc.query("SELECT GlobalHaloID from Halos where SnapHaloID = 0 and \
-        ... SnapZ = 0;")
+        ...    SnapZ = 0;")
         """
         # Query the database and return a list of tuples.
         if string is None:

diff -r 86838dfc73cdc83b39a3b82ab4a228357784ce45 -r 9b5c690e791ab9195415188482b532fbab758208 yt/analysis_modules/halo_profiler/multi_halo_profiler.py
--- a/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
+++ b/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
@@ -430,8 +430,8 @@
         After all the calls to `add_profile`, this will trigger the actual
         calculations and output the profiles to disk.
 
-        Paramters
-        ---------
+        Parameters
+        ----------
 
         filename : str
             If set, a file will be written with all of the filtered halos

diff -r 86838dfc73cdc83b39a3b82ab4a228357784ce45 -r 9b5c690e791ab9195415188482b532fbab758208 yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
--- a/yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
+++ b/yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
@@ -60,9 +60,9 @@
     
     Initialize an EmissivityIntegrator object.
 
-    Keyword Parameters
-    ------------------
-    filename: string
+    Parameters
+    ----------
+    filename: string, default None
         Path to data file containing emissivity values.  If None,
         a file called xray_emissivity.h5 is used.  This file contains 
         emissivity tables for primordial elements and for metals at 
@@ -146,8 +146,8 @@
     e_min: float
         the maximum energy in keV for the energy band.
 
-    Keyword Parameters
-    ------------------
+    Other Parameters
+    ----------------
     filename: string
         Path to data file containing emissivity values.  If None,
         a file called xray_emissivity.h5 is used.  This file contains 
@@ -220,8 +220,8 @@
     e_min: float
         the maximum energy in keV for the energy band.
 
-    Keyword Parameters
-    ------------------
+    Other Parameters
+    ----------------
     filename: string
         Path to data file containing emissivity values.  If None,
         a file called xray_emissivity.h5 is used.  This file contains 
@@ -277,8 +277,8 @@
     e_min: float
         the maximum energy in keV for the energy band.
 
-    Keyword Parameters
-    ------------------
+    Other Parameters
+    ----------------
     filename: string
         Path to data file containing emissivity values.  If None,
         a file called xray_emissivity.h5 is used.  This file contains 

diff -r 86838dfc73cdc83b39a3b82ab4a228357784ce45 -r 9b5c690e791ab9195415188482b532fbab758208 yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -62,7 +62,7 @@
     notebook_password = '',
     answer_testing_tolerance = '3',
     answer_testing_bitwise = 'False',
-    gold_standard_filename = 'gold006',
+    gold_standard_filename = 'gold007',
     local_standard_filename = 'local001',
     sketchfab_api_key = 'None'
     )

diff -r 86838dfc73cdc83b39a3b82ab4a228357784ce45 -r 9b5c690e791ab9195415188482b532fbab758208 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -36,6 +36,8 @@
 import itertools
 import shelve
 import cStringIO
+import fileinput
+from re import finditer
 
 from yt.funcs import *
 from yt.config import ytcfg
@@ -3585,12 +3587,12 @@
         given the tilt about the x axis when e0 was aligned
         to x after t1, t2 rotations about z, y
         """
-        RX = get_rotation_matrix(-tilt, (1,0,0)).transpose()
-        RY = get_rotation_matrix(-t2,   (0,1,0)).transpose()
-        RZ = get_rotation_matrix(-t1,   (0,0,1)).transpose()
-        e1 = ((0, 1, 0) * RX).sum(axis = 1)
-        e1 = (e1 * RY).sum(axis = 1)
-        e1 = (e1 * RZ).sum(axis = 1)
+        RX = get_rotation_matrix(-tilt, (1, 0, 0)).transpose()
+        RY = get_rotation_matrix(-t2,   (0, 1, 0)).transpose()
+        RZ = get_rotation_matrix(-t1,   (0, 0, 1)).transpose()
+        e1 = ((0, 1, 0) * RX).sum(axis=1)
+        e1 = (e1 * RY).sum(axis=1)
+        e1 = (e1 * RZ).sum(axis=1)
         e2 = np.cross(e0, e1)
 
         self._e1 = e1
@@ -3610,87 +3612,64 @@
         can just use the sphere one and forget about checking orientation
         but feed in the A parameter for radius
         """
-    def _get_list_of_grids(self, field = None):
+    def _get_list_of_grids(self, field=None):
         """
         This returns the grids that are possibly within the ellipse
         """
-        grids,ind = self.hierarchy.find_sphere_grids(self.center, self._A)
+        grids, ind = self.hierarchy.find_sphere_grids(self.center, self._A)
         # Now we sort by level
         grids = grids.tolist()
-        grids.sort(key=lambda x: (x.Level, \
-                                  x.LeftEdge[0], \
-                                  x.LeftEdge[1], \
+        grids.sort(key=lambda x: (x.Level,
+                                  x.LeftEdge[0],
+                                  x.LeftEdge[1],
                                   x.LeftEdge[2]))
-        self._grids = np.array(grids, dtype = 'object')
+        self._grids = np.array(grids, dtype='object')
 
     def _is_fully_enclosed(self, grid):
         """
         check if all grid corners are inside the ellipsoid
         """
-        # vector from corner to center
-        vr = (grid._corners - self.center)
-        # 3 possible cases of locations taking periodic BC into account
-        # just listing the components, find smallest later
-        dotarr=np.array([vr, vr + self.DW, vr - self.DW])
-        # these vrdote# finds the product of vr components with e#
-        # square the results
-        # find the smallest
-        # sums it
-        vrdote0_2 = (np.multiply(dotarr, self._e0)**2).min(axis \
-                                                           = 0).sum(axis = 1)
-        vrdote1_2 = (np.multiply(dotarr, self._e1)**2).min(axis \
-                                                           = 0).sum(axis = 1)
-        vrdote2_2 = (np.multiply(dotarr, self._e2)**2).min(axis \
-                                                           = 0).sum(axis = 1)
-        return np.all(vrdote0_2 / self._A**2 + \
-                      vrdote1_2 / self._B**2 + \
-                      vrdote2_2 / self._C**2 <=1.0)
-
-    @restore_grid_state # Pains me not to decorate with cache_mask here
-    def _get_cut_mask(self, grid, field = None):
+        return False
+
+    @restore_grid_state  # Pains me not to decorate with cache_mask here
+    def _get_cut_mask(self, grid, field=None):
         """
         This checks if each cell is inside the ellipsoid
         """
         # We have the *property* center, which is not necessarily
         # the same as the field_parameter
         if self._is_fully_enclosed(grid):
-            return True # We do not want child masking here
+            return True  # We do not want child masking here
         if not isinstance(grid, (FakeGridForParticles, GridChildMaskWrapper)) \
            and grid.id in self._cut_masks:
             return self._cut_masks[grid.id]
-        Inside = np.zeros(grid["x"].shape, dtype = 'float64')
-        dim = grid["x"].shape
-        # need this to take into account non-cube root grid tiles
-        if (len(dim) == 1):
-            dot_evec = np.zeros([3, dim[0]])
-        elif (len(dim) == 2):
-            dot_evec = np.zeros([3, dim[0], dim[1]])
-        elif (len(dim) == 3):
-            dot_evec = np.zeros([3, dim[0], dim[1], dim[2]])
+
+        dot_evecx = np.zeros(grid.ActiveDimensions)
+        dot_evecy = np.zeros(grid.ActiveDimensions)
+        dot_evecz = np.zeros(grid.ActiveDimensions)
 
         for i, ax in enumerate('xyz'):
             # distance to center
-            ar  = grid[ax]-self.center[i]
-            # cases to take into account periodic BC
-            case = np.array([ar, ar + self.DW[i], ar - self.DW[i]])
-            # find which of the 3 cases is smallest in magnitude
-            index = np.abs(case).argmin(axis = 0)
-            # restrict distance to only the smallest cases
-            vec = np.choose(index, case)
+            ar = grid[ax]-self.center[i]
+            # correct for periodicity
+            vec = np.array([ar, ar + self.DW[i], ar - self.DW[i]])
+            ind = np.argmin(np.abs(vec), axis=0)
+            vec = np.choose(ind, vec)
             # sum up to get the dot product with e_vectors
-            dot_evec += np.array([vec * self._e0[i], \
-                                  vec * self._e1[i], \
-                                  vec * self._e2[i]])
+            dot_evecx += vec * self._e0[i] / self._A
+            dot_evecy += vec * self._e1[i] / self._B
+            dot_evecz += vec * self._e2[i] / self._C
+
         # Calculate the eqn of ellipsoid, if it is inside
         # then result should be <= 1.0
-        Inside = dot_evec[0]**2 / self._A**2 + \
-                 dot_evec[1]**2 / self._B**2 + \
-                 dot_evec[2]**2 / self._C**2
-        cm = ((Inside <= 1.0) & grid.child_mask)
+        cm = ((dot_evecx**2 +
+               dot_evecy**2 +
+               dot_evecz**2 <= 1.0) & grid.child_mask)
         if not isinstance(grid, (FakeGridForParticles, GridChildMaskWrapper)):
             self._cut_masks[grid.id] = cm
         return cm
 
+
 class AMRCoveringGridBase(AMR3DData):
     """A 3D region with all data extracted to a single, specified
     resolution.
@@ -3875,10 +3854,11 @@
     fields : array_like, optional
         A list of fields that you'd like pre-generated for your object
 
-    Example
-    -------
-    cube = pf.h.smoothed_covering_grid(2, left_edge=[0.0, 0.0, 0.0], \
-                              dims=[128, 128, 128])
+    Examples
+    --------
+
+    >>> cube = pf.h.smoothed_covering_grid(2, left_edge=[0.0, 0.0, 0.0], \
+    ...                          dims=[128, 128, 128])
     """
     _type_name = "smoothed_covering_grid"
     def __init__(self, *args, **kwargs):
@@ -4035,7 +4015,7 @@
         sp1, ")"])
     """
     _type_name = "boolean"
-    _con_args = ("regions")
+    _con_args = ("regions",)
     def __init__(self, regions, fields = None, pf = None, **kwargs):
         # Center is meaningless, but we'll define it all the same.
         AMR3DData.__init__(self, [0.5]*3, fields, pf, **kwargs)
@@ -4373,6 +4353,230 @@
                 vv[:,i,j] = self.vertices[j,i::3]
         return vv
 
+    def export_obj(self, filename, transparency = 1.0, dist_fac = None,
+                   color_field = None, emit_field = None, color_map = "algae", 
+                   color_log = True, emit_log = True, plot_index = None, 
+                   color_field_max = None, color_field_min = None, 
+                   emit_field_max = None, emit_field_min = None):
+        r"""This exports the surface to the OBJ format, suitable for visualization
+        in many different programs (e.g., Blender).  NOTE: this exports an .obj file 
+        and an .mtl file, both with the general 'filename' as a prefix.  
+        The .obj file points to the .mtl file in its header, so if you move the 2 
+        files, make sure you change the .obj header to account for this. ALSO NOTE: 
+        the emit_field needs to be a combination of the other 2 fields used to 
+        have the emissivity track with the color.
+
+        Parameters
+        ----------
+        filename : string
+            The file this will be exported to.  This cannot be a file-like object.
+            Note - there are no file extentions included - both obj & mtl files 
+            are created.
+        transparency : float
+            This gives the transparency of the output surface plot.  Values
+            from 0.0 (invisible) to 1.0 (opaque).
+        dist_fac : float
+            Divide the axes distances by this amount.
+        color_field : string
+            Should a field be sample and colormapped?
+        emit_field : string
+            Should we track the emissivity of a field?
+              NOTE: this should be a combination of the other 2 fields being used.
+        color_map : string
+            Which color map should be applied?
+        color_log : bool
+            Should the color field be logged before being mapped?
+        emit_log : bool
+            Should the emitting field be logged before being mapped?
+        plot_index : integer
+            Index of plot for multiple plots.  If none, then only 1 plot.
+        color_field_max : float
+            Maximum value of the color field across all surfaces.
+        color_field_min : float
+            Minimum value of the color field across all surfaces.
+        emit_field_max : float
+            Maximum value of the emitting field across all surfaces.
+        emit_field_min : float
+            Minimum value of the emitting field across all surfaces.
+
+        Examples
+        --------
+
+        >>> sp = pf.h.sphere("max", (10, "kpc"))
+        >>> trans = 1.0
+        >>> distf = 3.1e18*1e3 # distances into kpc
+        >>> surf = pf.h.surface(sp, "Density", 5e-27)
+        >>> surf.export_obj("my_galaxy", transparency=trans, dist_fac = distf)
+
+        >>> sp = pf.h.sphere("max", (10, "kpc"))
+        >>> mi, ma = sp.quantities['Extrema']('Temperature')[0]
+        >>> rhos = [1e-24, 1e-25]
+        >>> trans = [0.5, 1.0]
+        >>> distf = 3.1e18*1e3 # distances into kpc
+        >>> for i, r in enumerate(rhos):
+        ...     surf = pf.h.surface(sp,'Density',r)
+        ...     surf.export_obj("my_galaxy", transparency=trans[i], 
+        ...                      color_field='Temperature', dist_fac = distf, 
+        ...                      plot_index = i, color_field_max = ma, 
+        ...                      color_field_min = mi)
+
+        >>> sp = pf.h.sphere("max", (10, "kpc"))
+        >>> rhos = [1e-24, 1e-25]
+        >>> trans = [0.5, 1.0]
+        >>> distf = 3.1e18*1e3 # distances into kpc
+        >>> def _Emissivity(field, data):
+        ...     return (data['Density']*data['Density']*np.sqrt(data['Temperature']))
+        >>> add_field("Emissivity", function=_Emissivity, units=r"\rm{g K}/\rm{cm}^{6}")
+        >>> for i, r in enumerate(rhos):
+        ...     surf = pf.h.surface(sp,'Density',r)
+        ...     surf.export_obj("my_galaxy", transparency=trans[i], 
+        ...                      color_field='Temperature', emit_field = 'Emissivity', 
+        ...                      dist_fac = distf, plot_index = i)
+
+        """
+        if self.vertices is None:
+            self.get_data(color_field,"face")
+        elif color_field is not None:
+            if color_field not in self.field_data:
+                self[color_field]
+        if emit_field is not None:
+            if color_field not in self.field_data:
+                self[emit_field]
+        only_on_root(self._export_obj, filename, transparency, dist_fac, color_field, emit_field, 
+                             color_map, color_log, emit_log, plot_index, color_field_max, 
+                             color_field_min, emit_field_max, emit_field_min)
+
+    def _color_samples_obj(self, cs, em, color_log, emit_log, color_map, arr, 
+                           color_field_max, color_field_min, 
+                           emit_field_max, emit_field_min): # this now holds for obj files
+        if color_log: cs = np.log10(cs)
+        if emit_log: em = np.log10(em)
+        if color_field_min is None:
+            mi = cs.min()
+        else:
+            mi = color_field_min
+            if color_log: mi = np.log10(mi)
+        if color_field_max is None:
+            ma = cs.max()
+        else:
+            ma = color_field_max
+            if color_log: ma = np.log10(ma)
+        cs = (cs - mi) / (ma - mi)
+        # to get color indicies for OBJ formatting
+        from yt.visualization._colormap_data import color_map_luts
+        lut = color_map_luts[color_map]
+        x = np.mgrid[0.0:1.0:lut[0].shape[0]*1j]
+        arr["cind"][:] = (np.interp(cs,x,x)*(lut[0].shape[0]-1)).astype("uint8")
+        # now, get emission
+        if emit_field_min is None:
+            emi = em.min()
+        else:
+            emi = emit_field_min
+            if emit_log: emi = np.log10(emi)
+        if emit_field_max is None:
+            ema = em.max()
+        else:
+            ema = emit_field_max
+            if emit_log: ema = np.log10(ema)
+        em = (em - emi)/(ema - emi)
+        x = np.mgrid[0.0:255.0:2j] # assume 1 emissivity per color
+        arr["emit"][:] = (np.interp(em,x,x))*2.0 # for some reason, max emiss = 2
+
+    @parallel_root_only
+    def _export_obj(self, filename, transparency, dist_fac = None, 
+                    color_field = None, emit_field = None, color_map = "algae", 
+                    color_log = True, emit_log = True, plot_index = None, 
+                    color_field_max = None, color_field_min = None, 
+                    emit_field_max = None, emit_field_min = None):
+        if plot_index is None:
+            plot_index = 0
+        if isinstance(filename, file):
+            fobj = filename + '.obj'
+            fmtl = filename + '.mtl'
+        else:
+            if plot_index == 0:
+                fobj = open(filename + '.obj', "w")
+                fmtl = open(filename + '.mtl', 'w')
+                cc = 1
+            else:
+                # read in last vertex
+                linesave = ''
+                for line in fileinput.input(filename + '.obj'):
+                    if line[0] == 'f':
+                        linesave = line
+                p = [m.start() for m in finditer(' ', linesave)]
+                cc = int(linesave[p[len(p)-1]:])+1
+                fobj = open(filename + '.obj', "a")
+                fmtl = open(filename + '.mtl', 'a')
+        ftype = [("cind", "uint8"), ("emit", "float")]
+        vtype = [("x","float"),("y","float"), ("z","float")]
+        if plot_index == 0:
+            fobj.write("# yt OBJ file\n")
+            fobj.write("# www.yt-project.com\n")
+            fobj.write("mtllib " + filename + '.mtl\n\n')  # use this material file for the faces
+            fmtl.write("# yt MLT file\n")
+            fmtl.write("# www.yt-project.com\n\n")
+        #(0) formulate vertices
+        nv = self.vertices.shape[1] # number of groups of vertices
+        f = np.empty(nv/self.vertices.shape[0], dtype=ftype) # store sets of face colors
+        v = np.empty(nv, dtype=vtype) # stores vertices
+        if color_field is not None:
+            cs = self[color_field]
+        else:
+            cs = np.empty(self.vertices.shape[1]/self.vertices.shape[0])
+        if emit_field is not None:
+            em = self[emit_field]
+        else:
+            em = np.empty(self.vertices.shape[1]/self.vertices.shape[0])            
+        self._color_samples_obj(cs, em, color_log, emit_log, color_map, f, 
+                                color_field_max, color_field_min, 
+                                emit_field_max, emit_field_min) # map color values to color scheme
+        from yt.visualization._colormap_data import color_map_luts # import colors for mtl file
+        lut = color_map_luts[color_map] # enumerate colors
+        # interpolate emissivity to enumerated colors
+        emiss = np.interp(np.mgrid[0:lut[0].shape[0]],np.mgrid[0:len(cs)],f["emit"][:])
+        if dist_fac is None: # then normalize by bounds
+            DLE = self.pf.domain_left_edge
+            DRE = self.pf.domain_right_edge
+            bounds = [(DLE[i], DRE[i]) for i in range(3)]
+            for i, ax in enumerate("xyz"):
+                # Do the bounds first since we cast to f32
+                tmp = self.vertices[i,:]
+                np.subtract(tmp, bounds[i][0], tmp)
+                w = bounds[i][1] - bounds[i][0]
+                np.divide(tmp, w, tmp)
+                np.subtract(tmp, 0.5, tmp) # Center at origin.
+                v[ax][:] = tmp   
+        else:
+            for i, ax in enumerate("xyz"):
+                tmp = self.vertices[i,:]
+                np.divide(tmp, dist_fac, tmp)
+                v[ax][:] = tmp
+        #(1) write all colors per surface to mtl file
+        for i in range(0,lut[0].shape[0]): 
+            omname = "material_" + str(i) + '_' + str(plot_index)  # name of the material
+            fmtl.write("newmtl " + omname +'\n') # the specific material (color) for this face
+            fmtl.write("Ka %.6f %.6f %.6f\n" %(0.0, 0.0, 0.0)) # ambient color, keep off
+            fmtl.write("Kd %.6f %.6f %.6f\n" %(lut[0][i], lut[1][i], lut[2][i])) # color of face
+            fmtl.write("Ks %.6f %.6f %.6f\n" %(0.0, 0.0, 0.0)) # specular color, keep off
+            fmtl.write("d %.6f\n" %(transparency))  # transparency
+            fmtl.write("em %.6f\n" %(emiss[i])) # emissivity per color
+            fmtl.write("illum 2\n") # not relevant, 2 means highlights on?
+            fmtl.write("Ns %.6f\n\n" %(0.0)) #keep off, some other specular thing
+        #(2) write vertices
+        for i in range(0,self.vertices.shape[1]):
+            fobj.write("v %.6f %.6f %.6f\n" %(v["x"][i], v["y"][i], v["z"][i]))    
+        fobj.write("#done defining vertices\n\n")
+        #(3) define faces and materials for each face
+        for i in range(0,self.triangles.shape[0]):
+            omname = 'material_' + str(f["cind"][i]) + '_' + str(plot_index) # which color to use
+            fobj.write("usemtl " + omname + '\n') # which material to use for this face (color)
+            fobj.write("f " + str(cc) + ' ' + str(cc+1) + ' ' + str(cc+2) + '\n\n') # vertices to color
+            cc = cc+3
+        fmtl.close()
+        fobj.close()
+
+
     def export_ply(self, filename, bounds = None, color_field = None,
                    color_map = "algae", color_log = True, sample_type = "face"):
         r"""This exports the surface to the PLY format, suitable for visualization
@@ -4605,22 +4809,46 @@
             mylog.error("Problem uploading.")
         return upload_id
 
+# Many of these items are set up specifically to ensure that
+# we are not breaking old pickle files.  This means we must only call the
+# _reconstruct_object and that we cannot mandate any additional arguments to
+# the reconstruction function.
+#
+# In the future, this would be better off being set up to more directly
+# reference objects or retain state, perhaps with a context manager.
+#
+# One final detail: time series or multiple parameter files in a single pickle
+# seems problematic.
+
+class ReconstructedObject(tuple):
+    pass
+
+def _check_nested_args(arg, ref_pf):
+    if not isinstance(arg, (tuple, list, ReconstructedObject)):
+        return arg
+    elif isinstance(arg, ReconstructedObject) and ref_pf == arg[0]:
+        return arg[1]
+    narg = [_check_nested_args(a, ref_pf) for a in arg]
+    return narg
+
+def _get_pf_by_hash(hash):
+    from yt.data_objects.static_output import _cached_pfs
+    for pf in _cached_pfs.values():
+        if pf._hash() == hash: return pf
+    return None
 
 def _reconstruct_object(*args, **kwargs):
     pfid = args[0]
     dtype = args[1]
+    pf = _get_pf_by_hash(pfid)
+    if not pf:
+        pfs = ParameterFileStore()
+        pf = pfs.get_pf_hash(pfid)
     field_parameters = args[-1]
     # will be much nicer when we can do pfid, *a, fp = args
-    args, new_args = args[2:-1], []
-    for arg in args:
-        if iterable(arg) and len(arg) == 2 \
-           and not isinstance(arg, types.DictType) \
-           and isinstance(arg[1], AMRData):
-            new_args.append(arg[1])
-        else: new_args.append(arg)
-    pfs = ParameterFileStore()
-    pf = pfs.get_pf_hash(pfid)
+    args = args[2:-1]
+    new_args = [_check_nested_args(a, pf) for a in args]
     cls = getattr(pf.h, dtype)
     obj = cls(*new_args)
     obj.field_parameters.update(field_parameters)
-    return pf, obj
+    return ReconstructedObject((pf, obj))

diff -r 86838dfc73cdc83b39a3b82ab4a228357784ce45 -r 9b5c690e791ab9195415188482b532fbab758208 yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -151,8 +151,12 @@
     particle masses in the object.
     """
     baryon_mass = data["CellMassMsun"].sum()
-    particle_mass = data["ParticleMassMsun"].sum()
-    return [baryon_mass + particle_mass]
+    try:
+        particle_mass = data["ParticleMassMsun"].sum()
+        total_mass = baryon_mass + particle_mass
+    except KeyError:
+        total_mass = baryon_mass
+    return [total_mass]
 def _combTotalMass(data, total_mass):
     return total_mass.sum()
 add_quantity("TotalMass", function=_TotalMass,

diff -r 86838dfc73cdc83b39a3b82ab4a228357784ce45 -r 9b5c690e791ab9195415188482b532fbab758208 yt/data_objects/hierarchy.py
--- a/yt/data_objects/hierarchy.py
+++ b/yt/data_objects/hierarchy.py
@@ -236,6 +236,8 @@
                 fn = os.path.join(self.directory,
                         "%s.yt" % self.parameter_file.basename)
         dir_to_check = os.path.dirname(fn)
+        if dir_to_check == '':
+            dir_to_check = '.'
         # We have four options:
         #    Writeable, does not exist      : create, open as append
         #    Writeable, does exist          : open as append
@@ -317,7 +319,7 @@
         under the name *name* on the node /Objects.
         """
         s = cPickle.dumps(obj, protocol=-1)
-        self.save_data(s, "/Objects", name, force = True)
+        self.save_data(np.array(s, dtype='c'), "/Objects", name, force = True)
 
     def load_object(self, name):
         """

diff -r 86838dfc73cdc83b39a3b82ab4a228357784ce45 -r 9b5c690e791ab9195415188482b532fbab758208 yt/data_objects/particle_io.py
--- a/yt/data_objects/particle_io.py
+++ b/yt/data_objects/particle_io.py
@@ -58,7 +58,8 @@
 
     def get_data(self, fields):
         fields = ensure_list(fields)
-        rvs = self.source.get_data(fields, force_particle_read=True)
+        self.source.get_data(fields, force_particle_read=True)
+        rvs = [self.source[field] for field in fields]
         if len(fields) == 1: return rvs[0]
         return rvs
 

diff -r 86838dfc73cdc83b39a3b82ab4a228357784ce45 -r 9b5c690e791ab9195415188482b532fbab758208 yt/data_objects/tests/test_pickle.py
--- /dev/null
+++ b/yt/data_objects/tests/test_pickle.py
@@ -0,0 +1,69 @@
+"""
+Testsuite for pickling yt objects.
+
+Author: Elizabeth Tasker <tasker at astro1.sci.hokudai.ac.jp>
+Affiliation: Hokkaido University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2013 Elizabeth Tasker. All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+import cPickle
+import os
+import tempfile
+from yt.testing \
+    import fake_random_pf, assert_equal
+
+
+def setup():
+    """Test specific setup."""
+    from yt.config import ytcfg
+    ytcfg["yt", "__withintesting"] = "True"
+
+
+def test_save_load_pickle():
+    """Main test for loading pickled objects"""
+    test_pf = fake_random_pf(64)
+
+    # create extracted region from boolean (fairly complex object)
+    center = (test_pf.domain_left_edge + test_pf.domain_right_edge) / 2
+    sp_outer = test_pf.h.sphere(center, test_pf.domain_width[0])
+    sp_inner = test_pf.h.sphere(center, test_pf.domain_width[0] / 10.0)
+    sp_boolean = test_pf.h.boolean([sp_outer, "NOT", sp_inner])
+
+    minv, maxv = sp_boolean.quantities["Extrema"]("Density")[0]
+    contour_threshold = min(minv * 10.0, 0.9 * maxv)
+
+    contours = sp_boolean.extract_connected_sets(
+        "Density", 1, contour_threshold, maxv + 1, log_space=True, cache=True)
+
+    # save object
+    cpklfile = tempfile.NamedTemporaryFile(delete=False)
+    cPickle.dump(contours[1][0], cpklfile)
+    cpklfile.close()
+
+    # load object
+    test_load = cPickle.load(open(cpklfile.name, "rb"))
+
+    assert_equal.description = \
+        "%s: File was pickle-loaded succesfully" % __name__
+    yield assert_equal, test_load is not None, True
+    assert_equal.description = \
+        "%s: Length of pickle-loaded connected set object" % __name__
+    yield assert_equal, len(contours[1][0]), len(test_load)
+
+    os.remove(cpklfile.name)

diff -r 86838dfc73cdc83b39a3b82ab4a228357784ce45 -r 9b5c690e791ab9195415188482b532fbab758208 yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -791,22 +791,29 @@
          units=r"\rm{g}\/\rm{cm}^2/\rm{s}", particle_type=True,
          validators=[ValidateParameter('center')])
 
-def get_radius(positions, data):
-    c = data.get_field_parameter("center")
-    n_tup = tuple([1 for i in range(positions.ndim-1)])
-    center = np.tile(np.reshape(c, (positions.shape[0],)+n_tup),(1,)+positions.shape[1:])
-    periodicity = data.pf.periodicity
-    if any(periodicity):
-        period = data.pf.domain_right_edge - data.pf.domain_left_edge
-        return periodic_dist(positions, center, period, periodicity)
-    else:
-        return euclidean_dist(positions, center)
+def get_radius(data, field_prefix):
+    center = data.get_field_parameter("center")
+    DW = data.pf.domain_right_edge - data.pf.domain_left_edge
+    radius = np.zeros(data[field_prefix+"x"].shape, dtype='float64')
+    r = radius.copy()
+    if any(data.pf.periodicity):
+        rdw = radius.copy()
+    for i, ax in enumerate('xyz'):
+        np.subtract(data["%s%s" % (field_prefix, ax)], center[i], r)
+        if data.pf.periodicity[i] == True:
+            np.abs(r, r)
+            np.subtract(r, DW[i], rdw)
+            np.abs(rdw, rdw)
+            np.minimum(r, rdw, r)
+        np.power(r, 2.0, r)
+        np.add(radius, r, radius)
+    np.sqrt(radius, radius)
+    return radius
+
 def _ParticleRadius(field, data):
-    positions = np.array([data["particle_position_%s" % ax] for ax in 'xyz'])
-    return get_radius(positions, data)
+    return get_radius(data, "particle_position_")
 def _Radius(field, data):
-    positions = np.array([data['x'], data['y'], data['z']])
-    return get_radius(positions, data)
+    return get_radius(data, "")
 
 def _ConvertRadiusCGS(data):
     return data.convert("cm")
@@ -991,7 +998,7 @@
     return blank
 add_field("particle_density", function=_pdensity,
           validators=[ValidateGridType()], convert_function=_convertDensity,
-          display_name=r"$\mathrm{Particle}\/\mathrm{Density}$")
+          display_name=r"\mathrm{Particle}\/\mathrm{Density}")
 
 def _MagneticEnergy(field,data):
     """This assumes that your front end has provided Bx, By, Bz in
@@ -1027,8 +1034,8 @@
     return data['MagneticEnergy']
 add_field("MagneticPressure",
           function=_MagneticPressure,
-          display_name=r"\rm{Magnetic}\/\rm{Energy}",
-          units="\rm{ergs}\/\rm{cm}^{-3}")
+          display_name=r"\rm{Magnetic}\/\rm{Pressure}",
+          units=r"\rm{ergs}\/\rm{cm}^{-3}")
 
 def _BPoloidal(field,data):
     normal = data.get_field_parameter("normal")

diff -r 86838dfc73cdc83b39a3b82ab4a228357784ce45 -r 9b5c690e791ab9195415188482b532fbab758208 yt/frontends/athena/data_structures.py
--- a/yt/frontends/athena/data_structures.py
+++ b/yt/frontends/athena/data_structures.py
@@ -53,7 +53,7 @@
 class AthenaGrid(AMRGridPatch):
     _id_offset = 0
     def __init__(self, id, hierarchy, level, start, dimensions):
-        df = hierarchy.storage_filename
+        df = hierarchy.parameter_file.filename[4:-4]
         if 'id0' not in hierarchy.parameter_file.filename:
             gname = hierarchy.parameter_file.filename
         else:
@@ -119,12 +119,13 @@
 
     grid = AthenaGrid
     _data_style='athena'
+    _data_file = None
     
     def __init__(self, pf, data_style='athena'):
         self.parameter_file = weakref.proxy(pf)
+        self.directory = os.path.dirname(self.parameter_file.filename)
         self.data_style = data_style
         # for now, the hierarchy file is the parameter file!
-        self.storage_filename = self.parameter_file.storage_filename
         self.hierarchy_filename = self.parameter_file.filename
         #self.directory = os.path.dirname(self.hierarchy_filename)
         self._fhandle = file(self.hierarchy_filename,'rb')
@@ -132,9 +133,6 @@
 
         self._fhandle.close()
 
-    def _initialize_data_storage(self):
-        pass
-
     def _detect_fields(self):
         field_map = {}
         f = open(self.hierarchy_filename,'rb')
@@ -337,12 +335,14 @@
     _data_style = "athena"
 
     def __init__(self, filename, data_style='athena',
-                 storage_filename = None, parameters = {}):
+                 storage_filename=None, parameters={}):
         self.specified_parameters = parameters
         StaticOutput.__init__(self, filename, data_style)
         self.filename = filename
-        self.storage_filename = filename[4:-4]
-        
+        if storage_filename is None:
+            storage_filename = '%s.yt' % filename.split('/')[-1]
+        self.storage_filename = storage_filename
+
         # Unfortunately we now have to mandate that the hierarchy gets 
         # instantiated so that we can make sure we have the correct left 
         # and right domain edges.
@@ -402,7 +402,7 @@
         if dimensionality == 1 : self.domain_dimensions[1] = np.int32(1)
         self.dimensionality = dimensionality
         self.current_time = grid["time"]
-        self.unique_identifier = self._handle.__hash__()
+        self.unique_identifier = self.parameter_filename.__hash__()
         self.cosmological_simulation = False
         self.num_ghost_zones = 0
         self.field_ordering = 'fortran'

diff -r 86838dfc73cdc83b39a3b82ab4a228357784ce45 -r 9b5c690e791ab9195415188482b532fbab758208 yt/frontends/castro/data_structures.py
--- a/yt/frontends/castro/data_structures.py
+++ b/yt/frontends/castro/data_structures.py
@@ -60,7 +60,7 @@
 
     def __init__(self, LeftEdge, RightEdge, index, level, filename, offset,
                  dimensions, start, stop, paranoia=False, **kwargs):
-        super(CastroGrid, self).__init__(self, index, **kwargs)
+        super(CastroGrid, self).__init__(index, **kwargs)
         self.filename = filename
         self._offset = offset
         self._paranoid = paranoia  # TODO: Factor this behavior out in tests
@@ -72,7 +72,7 @@
         self.LeftEdge  = LeftEdge.copy()
         self.RightEdge = RightEdge.copy()
         self.index = index
-        self.level = level
+        self.Level = level
 
     def get_global_startindex(self):
         return self.start_index
@@ -115,8 +115,6 @@
     grid = CastroGrid
 
     def __init__(self, pf, data_style='castro_native'):
-        super(CastroHierarchy, self).__init__(self, pf, self.data_style)
-
         self.field_indexes = {}
         self.parameter_file = weakref.proxy(pf)
         header_filename = os.path.join(pf.fullplotdir, 'Header')
@@ -128,6 +126,8 @@
                                 self.parameter_file.paranoid_read) 
         self.read_particle_header()
         self._cache_endianness(self.levels[-1].grids[-1])
+
+        super(CastroHierarchy, self).__init__(pf, data_style)
         self._setup_data_io()
         self._setup_field_list()
         self._populate_hierarchy()
@@ -181,7 +181,7 @@
         counter += 1
 
         self.dx = np.zeros((self.n_levels, 3))
-        for i, line in enumerate(self.__global_header_lines[counter:counter+self.n_levels]):
+        for i, line in enumerate(self._global_header_lines[counter:counter+self.n_levels]):
             self.dx[i] = np.array(map(float, line.split()))
         counter += self.n_levels
         self.geometry = int(self._global_header_lines[counter])
@@ -424,21 +424,6 @@
         return self.grids[mask]
 
     def _setup_field_list(self):
-        self.derived_field_list = []
-
-        for field in self.field_info:
-            try:
-                fd = self.field_info[field].get_dependencies(pf=self.parameter_file)
-            except:
-                continue
-
-            available = np.all([f in self.field_list for f in fd.requested])
-            if available: self.derived_field_list.append(field)
-
-        for field in self.field_list:
-            if field not in self.derived_field_list:
-                self.derived_field_list.append(field)
-
         if self.parameter_file.use_particles:
             # We know which particle fields will exist -- pending further
             # changes in the future.
@@ -521,15 +506,15 @@
          * ASCII (not implemented in yt)
 
         """
-        super(CastroStaticOutput, self).__init__(self, plotname.rstrip("/"),
-                                                 data_style='castro_native')
         self.storage_filename = storage_filename
         self.paranoid_read = paranoia
         self.parameter_filename = paramFilename
         self.fparameter_filename = fparamFilename
         self.__ipfn = paramFilename
+        self.fparameters = {}
+        super(CastroStaticOutput, self).__init__(plotname.rstrip("/"),
+                                                 data_style='castro_native')
 
-        self.fparameters = {}
 
         # These should maybe not be hardcoded?
         ### TODO: this.
@@ -618,6 +603,7 @@
                 self.domain_left_edge = np.array([float(i) for i in vals.split()])
             elif param.startswith("particles.write_in_plotfile"):
                 self.use_particles = boxlib_bool_to_int(vals)
+            self.fparameters[param] = vals
 
         self.parameters["TopGridRank"] = len(self.parameters["TopGridDimensions"])
         self.dimensionality = self.parameters["TopGridRank"]
@@ -655,8 +641,11 @@
         for line in lines:
             if line.count("=") == 1:
                 param, vals = map(strip, map(rstrip, line.split("=")))
-                if vals.count("'") == 0:
-                    t = map(float, [a.replace('D','e').replace('d','e') for a in vals.split()]) # all are floating point.
+                if vals.count("'") == 0 and vals.count("\"") == 0:
+                    try:
+                        t = map(float, [a.replace('D','e').replace('d','e') for a in vals.split()]) # all are floating point.
+                    except ValueError:
+                        print "Failed on line", line
                 else:
                     t = vals.split()
                 if len(t) == 1:

diff -r 86838dfc73cdc83b39a3b82ab4a228357784ce45 -r 9b5c690e791ab9195415188482b532fbab758208 yt/frontends/castro/io.py
--- a/yt/frontends/castro/io.py
+++ b/yt/frontends/castro/io.py
@@ -100,10 +100,10 @@
             if (gridSize != grid.ActiveDimensions).any():
                 print "Paranoia Error: Cell_H and %s do not agree on grid dimensions." %grid.filename
                 error_count += 1
-            if bytesPerReal != grid.hierarchy._bytesPerReal:
+            if bytesPerReal != grid.hierarchy._bytes_per_real:
                 print "Paranoia Error: Cell_H and %s do not agree on bytes per real number." %grid.filename
                 error_count += 1
-            if (bytesPerReal == grid.hierarchy._bytesPerReal and dtype != grid.hierarchy._dtype):
+            if (bytesPerReal == grid.hierarchy._bytes_per_real and dtype != grid.hierarchy._dtype):
                 print "Paranoia Error: Cell_H and %s do not agree on endianness." %grid.filename
                 error_count += 1
 
@@ -114,7 +114,7 @@
             start = grid.start_index
             stop = grid.stop_index
             dtype = grid.hierarchy._dtype
-            bytesPerReal = grid.hierarchy._bytesPerReal
+            bytesPerReal = grid.hierarchy._bytes_per_real
 
         nElements = grid.ActiveDimensions.prod()
 

diff -r 86838dfc73cdc83b39a3b82ab4a228357784ce45 -r 9b5c690e791ab9195415188482b532fbab758208 yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -39,8 +39,8 @@
      ST_CTIME
 
 from .definitions import \
-     pluto2enzoDict, \
-     yt2plutoFieldsDict, \
+     chombo2enzoDict, \
+     yt2chomboFieldsDict, \
      parameterDict \
 
 from yt.funcs import *
@@ -250,7 +250,7 @@
         seconds = 1 #self["Time"]
         for unit in sec_conversion.keys():
             self.time_units[unit] = seconds / sec_conversion[unit]
-        for key in yt2plutoFieldsDict:
+        for key in yt2chomboFieldsDict:
             self.conversion_factors[key] = 1.0
 
     def _setup_nounits_units(self):
@@ -270,29 +270,22 @@
 
     def _parse_parameter_file(self):
         """
-        Check to see whether a 'pluto.ini' or 'orion2.ini' file
+        Check to see whether an 'orion2.ini' file
         exists in the plot file directory. If one does, attempt to parse it.
-        Otherwise, assume the left edge starts at 0 and get the right edge
-        from the hdf5 file.
+        Otherwise grab the dimensions from the hdf5 file.
         """
-        if os.path.isfile('pluto.ini'):
-            self._parse_pluto_file('pluto.ini')
-        else:
-            if os.path.isfile('orion2.ini'): self._parse_pluto_file('orion2.ini')
-            self.unique_identifier = \
-                int(os.stat(self.parameter_filename)[ST_CTIME])
-            self.domain_left_edge = self.__calc_left_edge()
-            self.domain_right_edge = self.__calc_right_edge()
-            self.domain_dimensions = self.__calc_domain_dimensions()
-            self.dimensionality = 3
-            self.refine_by = self._handle['/level_0'].attrs['ref_ratio']
+        
+        if os.path.isfile('orion2.ini'): self._parse_inputs_file('orion2.ini')
+        self.unique_identifier = \
+                               int(os.stat(self.parameter_filename)[ST_CTIME])
+        self.domain_left_edge = self.__calc_left_edge()
+        self.domain_right_edge = self.__calc_right_edge()
+        self.domain_dimensions = self.__calc_domain_dimensions()
+        self.dimensionality = 3
+        self.refine_by = self._handle['/level_0'].attrs['ref_ratio']
         self.periodicity = (True, True, True)
 
-    def _parse_pluto_file(self, ini_filename):
-        """
-        Reads in an inputs file in the 'pluto.ini' format. Probably not
-        especially robust at the moment.
-        """
+    def _parse_inputs_file(self, ini_filename):
         self.fullplotdir = os.path.abspath(self.parameter_filename)
         self.ini_filename = self._localize( \
             self.ini_filename, ini_filename)
@@ -305,8 +298,8 @@
                 param, sep, vals = map(rstrip,line.partition(' '))
             except ValueError:
                 mylog.error("ValueError: '%s'", line)
-            if pluto2enzoDict.has_key(param):
-                paramName = pluto2enzoDict[param]
+            if chombo2enzoDict.has_key(param):
+                paramName = chombo2enzoDict[param]
                 t = map(parameterDict[paramName], vals.split())
                 if len(t) == 1:
                     self.parameters[paramName] = t[0]
@@ -336,13 +329,14 @@
 
     @classmethod
     def _is_valid(self, *args, **kwargs):
-        try:
-            fileh = h5py.File(args[0],'r')
-            valid = "Chombo_global" in fileh["/"]
-            fileh.close()
-            return valid
-        except:
-            pass
+        if not os.path.isfile('pluto.ini'):
+            try:
+                fileh = h5py.File(args[0],'r')
+                valid = "Chombo_global" in fileh["/"]
+                fileh.close()
+                return valid
+            except:
+                pass
         return False
 
     @parallel_root_only

diff -r 86838dfc73cdc83b39a3b82ab4a228357784ce45 -r 9b5c690e791ab9195415188482b532fbab758208 yt/frontends/chombo/definitions.py
--- a/yt/frontends/chombo/definitions.py
+++ b/yt/frontends/chombo/definitions.py
@@ -56,10 +56,10 @@
                  "NumberOfParticleAttributes": int,
                                  }
 
-pluto2enzoDict = {"GAMMA": "Gamma",
+chombo2enzoDict = {"GAMMA": "Gamma",
                   "Ref_ratio": "RefineBy"
                                     }
 
-yt2plutoFieldsDict = {}
-pluto2ytFieldsDict = {}
+yt2chomboFieldsDict = {}
+chombo2ytFieldsDict = {}
 

diff -r 86838dfc73cdc83b39a3b82ab4a228357784ce45 -r 9b5c690e791ab9195415188482b532fbab758208 yt/frontends/chombo/fields.py
--- a/yt/frontends/chombo/fields.py
+++ b/yt/frontends/chombo/fields.py
@@ -174,3 +174,18 @@
     add_field("particle_%s" % pf, function=pfunc,
               validators = [ValidateSpatial(0)],
               particle_type=True)
+
+def _ParticleMass(field, data):
+    particles = data["particle_mass"].astype('float64')
+    return particles
+
+def _ParticleMassMsun(field, data):
+    particles = data["particle_mass"].astype('float64')
+    return particles/1.989e33
+
+add_field("ParticleMass",
+          function=_ParticleMass, validators=[ValidateSpatial(0)],
+          particle_type=True)
+add_field("ParticleMassMsun",
+          function=_ParticleMassMsun, validators=[ValidateSpatial(0)],
+          particle_type=True)

diff -r 86838dfc73cdc83b39a3b82ab4a228357784ce45 -r 9b5c690e791ab9195415188482b532fbab758208 yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -634,6 +634,24 @@
         else:
             self.derived_field_list = self.__class__._cached_derived_field_list
 
+    def _detect_fields(self):
+        self.field_list = []
+        # Do this only on the root processor to save disk work.
+        mylog.info("Gathering a field list (this may take a moment.)")
+        field_list = set()
+        random_sample = self._generate_random_grids()
+        for grid in random_sample:
+            try:
+                gf = self.io._read_field_names(grid)
+            except self.io._read_exception:
+                mylog.debug("Grid %s is a bit funky?", grid.id)
+                continue
+            mylog.debug("Grid %s has: %s", grid.id, gf)
+            field_list = field_list.union(gf)
+        field_list = self.comm.par_combine_object(list(field_list),
+                        datatype="list", op = "cat")
+        self.field_list = list(set(field_list))
+
     def _generate_random_grids(self):
         my_rank = self.comm.rank
         my_grids = self.grids[self.grid_procs.ravel() == my_rank]
@@ -770,7 +788,7 @@
         data_label_factors = {}
         for line in (l.strip() for l in lines):
             if len(line) < 2: continue
-            param, vals = (i.strip() for i in line.split("="))
+            param, vals = (i.strip() for i in line.split("=",1))
             # First we try to decipher what type of value it is.
             vals = vals.split()
             # Special case approaching.

diff -r 86838dfc73cdc83b39a3b82ab4a228357784ce45 -r 9b5c690e791ab9195415188482b532fbab758208 yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -130,14 +130,21 @@
 def _ThermalEnergy(field, data):
     if data.pf["HydroMethod"] == 2:
         return data["TotalEnergy"]
-    else:
-        if data.pf["DualEnergyFormalism"]:
-            return data["GasEnergy"]
-        else:
-            return data["TotalEnergy"] - 0.5*(
-                   data["x-velocity"]**2.0
-                 + data["y-velocity"]**2.0
-                 + data["z-velocity"]**2.0 )
+    
+    if data.pf["DualEnergyFormalism"]:
+        return data["GasEnergy"]
+
+    if data.pf["HydroMethod"] in (4,6):
+        return data["TotalEnergy"] - 0.5*(
+            data["x-velocity"]**2.0
+            + data["y-velocity"]**2.0
+            + data["z-velocity"]**2.0 ) \
+            - data["MagneticEnergy"]/data["Density"]
+
+    return data["TotalEnergy"] - 0.5*(
+        data["x-velocity"]**2.0
+        + data["y-velocity"]**2.0
+        + data["z-velocity"]**2.0 )
 add_field("ThermalEnergy", function=_ThermalEnergy,
           units=r"\rm{ergs}/\rm{g}")
 
@@ -171,22 +178,22 @@
 # We set up fields for both TotalEnergy and Total_Energy in the known fields
 # lists.  Note that this does not mean these will be the used definitions.
 add_enzo_field("TotalEnergy", function=NullFunc,
-          display_name = "$\rm{Total}\/\rm{Energy}$",
+          display_name = r"\rm{Total}\/ \rm{Energy}",
           units=r"\rm{ergs}/\rm{g}", convert_function=_convertEnergy)
 add_enzo_field("Total_Energy", function=NullFunc,
-          display_name = "$\rm{Total}\/\rm{Energy}$",
+          display_name = r"\rm{Total}\/ \rm{Energy}",
           units=r"\rm{ergs}/\rm{g}", convert_function=_convertEnergy)
 
 def _Total_Energy(field, data):
     return data["TotalEnergy"] / _convertEnergy(data)
 add_field("Total_Energy", function=_Total_Energy,
-          display_name = "$\rm{Total}\/\rm{Energy}$",
+          display_name = r"\rm{Total}\/ \rm{Energy}",
           units=r"\rm{ergs}/\rm{g}", convert_function=_convertEnergy)
 
 def _TotalEnergy(field, data):
     return data["Total_Energy"] / _convertEnergy(data)
 add_field("TotalEnergy", function=_TotalEnergy,
-          display_name = "$\rm{Total}\/\rm{Energy}$",
+          display_name = r"\rm{Total}\/ \rm{Energy}",
           units=r"\rm{ergs}/\rm{g}", convert_function=_convertEnergy)
 
 def _NumberDensity(field, data):
@@ -369,7 +376,7 @@
         if not filter.any(): return blank
         num = filter.sum()
     else:
-        filter = None
+        filter = Ellipsis
         num = data["particle_position_x"].size
     amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(np.float64),
                            data["particle_position_y"][filter].astype(np.float64),

diff -r 86838dfc73cdc83b39a3b82ab4a228357784ce45 -r 9b5c690e791ab9195415188482b532fbab758208 yt/frontends/flash/fields.py
--- a/yt/frontends/flash/fields.py
+++ b/yt/frontends/flash/fields.py
@@ -5,7 +5,7 @@
 Affiliation: UCSD
 Homepage: http://yt-project.org/
 License:
-  Copyright (C) 2010-2011 Matthew Turk, John ZuHone.  All Rights Reserved.
+  Copyright (C) 2010-2012 Matthew Turk, John ZuHone, Anthony Scopatz.  All Rights Reserved.
 
   This file is part of yt.
 
@@ -24,6 +24,7 @@
 """
 
 import numpy as np
+from yt.utilities.exceptions import *
 from yt.data_objects.field_info_container import \
     FieldInfoContainer, \
     NullFunc, \
@@ -36,7 +37,7 @@
     ValidateGridType
 import yt.data_objects.universal_fields
 from yt.utilities.physical_constants import \
-    kboltz, mh
+    kboltz, mh, Na
 KnownFLASHFields = FieldInfoContainer()
 add_flash_field = KnownFLASHFields.add_field
 
@@ -63,6 +64,7 @@
 translation_dict = {"x-velocity": "velx",
                     "y-velocity": "vely",
                     "z-velocity": "velz",
+                    "VelocityMagnitude": "velo",
                     "Density": "dens",
                     "Temperature": "temp",
                     "Pressure" : "pres", 
@@ -154,15 +156,40 @@
 add_flash_field("temp", function=NullFunc, take_log=True,
                 convert_function=_get_convert("temp"),
                 units=r"\rm{K}")
+add_flash_field("tion", function=NullFunc, take_log=True,
+                units=r"\rm{K}")
 add_flash_field("tele", function=NullFunc, take_log=True,
                 convert_function=_get_convert("tele"),
                 units = r"\rm{K}")
+add_flash_field("trad", function=NullFunc, take_log=True,
+                units = r"\rm{K}")
 add_flash_field("pres", function=NullFunc, take_log=True,
                 convert_function=_get_convert("pres"),
                 units=r"\rm{erg}/\rm{cm}^{3}")
+add_flash_field("pion", function=NullFunc, take_log=True,
+                display_name="Ion Pressure",
+                units=r"\rm{erg}/\rm{cm}^3")
+add_flash_field("pele", function=NullFunc, take_log=True,
+                display_name="Electron Pressure, P_e",
+                units=r"\rm{erg}/\rm{cm}^3")
+add_flash_field("prad", function=NullFunc, take_log=True,
+                display_name="Radiation Pressure",
+                units = r"\rm{erg}/\rm{cm}^3")
+add_flash_field("eion", function=NullFunc, take_log=True,
+                display_name="Ion Internal Energy",
+                units=r"\rm{erg}")
+add_flash_field("eele", function=NullFunc, take_log=True,
+                display_name="Electron Internal Energy",
+                units=r"\rm{erg}")
+add_flash_field("erad", function=NullFunc, take_log=True,
+                display_name="Radiation Internal Energy",
+                units=r"\rm{erg}")
 add_flash_field("pden", function=NullFunc, take_log=True,
                 convert_function=_get_convert("pden"),
                 units=r"\rm{g}/\rm{cm}^{3}")
+add_flash_field("depo", function=NullFunc, take_log=True,
+                units = r"\rm{ergs}/\rm{g}")
+add_flash_field("ye", function=NullFunc, take_log=True,)
 add_flash_field("magx", function=NullFunc, take_log=False,
                 convert_function=_get_convert("magx"),
                 units = r"\mathrm{Gau\ss}")
@@ -192,6 +219,34 @@
                 units = r"\rm{ergs}/\rm{g}")
 add_flash_field("flam", function=NullFunc, take_log=False,
                 convert_function=_get_convert("flam"))
+add_flash_field("absr", function=NullFunc, take_log=False,
+                display_name="Absorption Coefficient")
+add_flash_field("emis", function=NullFunc, take_log=False,
+                display_name="Emissivity")
+add_flash_field("cond", function=NullFunc, take_log=False,
+                display_name="Conductivity")
+add_flash_field("dfcf", function=NullFunc, take_log=False,
+                display_name="Diffusion Equation Scalar")
+add_flash_field("fllm", function=NullFunc, take_log=False,
+                display_name="Flux Limit")
+add_flash_field("pipe", function=NullFunc, take_log=False,
+                display_name="P_i/P_e")
+add_flash_field("tite", function=NullFunc, take_log=False,
+                display_name="T_i/T_e")
+add_flash_field("dbgs", function=NullFunc, take_log=False,
+                display_name="Debug for Shocks")
+add_flash_field("cham", function=NullFunc, take_log=False,
+                display_name="Chamber Material Fraction")
+add_flash_field("targ", function=NullFunc, take_log=False,
+                display_name="Target Material Fraction")
+add_flash_field("sumy", function=NullFunc, take_log=False)
+add_flash_field("mgdc", function=NullFunc, take_log=False,
+                display_name="Emission Minus Absorption Diffusion Terms")
+
+for i in range(1, 1000):
+    add_flash_field("r{0:03}".format(i), function=NullFunc, take_log=False,
+        display_name="Energy Group {0}".format(i))
+
 
 for f,v in translation_dict.items():
     if v not in KnownFLASHFields:
@@ -300,3 +355,17 @@
           units=r"\rm{Gauss}\/\rm{cm}^{-1}")
 
 
+
+## Derived FLASH Fields
+def _nele(field, data):
+    return data['dens'] * data['ye'] * Na
+add_field('nele', function=_nele, take_log=True, units=r"\rm{cm}^{-3}")
+add_field('edens', function=_nele, take_log=True, units=r"\rm{cm}^{-3}")
+
+def _nion(field, data):
+    return data['dens'] * data['sumy'] * Na
+add_field('nion', function=_nion, take_log=True, units=r"\rm{cm}^{-3}")
+
+def _abar(field, data):
+    return 1.0 / data['sumy']
+add_field('abar', function=_abar, take_log=False)

diff -r 86838dfc73cdc83b39a3b82ab4a228357784ce45 -r 9b5c690e791ab9195415188482b532fbab758208 yt/frontends/flash/tests/test_outputs.py
--- a/yt/frontends/flash/tests/test_outputs.py
+++ b/yt/frontends/flash/tests/test_outputs.py
@@ -34,7 +34,7 @@
 _fields = ("Temperature", "Density", "VelocityMagnitude", "DivV")
 
 sloshing = "GasSloshingLowRes/sloshing_low_res_hdf5_plt_cnt_0300"
- at requires_pf(sloshing)
+ at requires_pf(sloshing, big_data=True)
 def test_sloshing():
     pf = data_dir_load(sloshing)
     yield assert_equal, str(pf), "sloshing_low_res_hdf5_plt_cnt_0300"
@@ -50,11 +50,3 @@
     yield assert_equal, str(pf), "windtunnel_4lev_hdf5_plt_cnt_0030"
     for test in small_patch_amr(wt, _fields_2d):
         yield test
-
-gcm = "GalaxyClusterMerger/fiducial_1to10_b0.273d_hdf5_plt_cnt_0245.gz"
- at requires_pf(gcm, big_data=True)
-def test_galaxy_cluster_merger():
-    pf = data_dir_load(gcm)
-    for test in big_patch_amr(gcm, _fields):
-        yield test
-

diff -r 86838dfc73cdc83b39a3b82ab4a228357784ce45 -r 9b5c690e791ab9195415188482b532fbab758208 yt/frontends/gdf/data_structures.py
--- a/yt/frontends/gdf/data_structures.py
+++ b/yt/frontends/gdf/data_structures.py
@@ -197,13 +197,13 @@
         self._handle = h5py.File(self.parameter_filename, "r")
         for field_name in self._handle["/field_types"]:
             current_field = self._handle["/field_types/%s" % field_name]
-            try:
+            if 'field_to_cgs' in current_field.attrs:
                 self.units[field_name] = current_field.attrs['field_to_cgs']
-            except:
+            else:
                 self.units[field_name] = 1.0
-            try:
-                current_fields_unit = current_field.attrs['field_units'][0]
-            except:
+            if 'field_units' in current_field.attrs:
+                current_fields_unit = just_one(current_field.attrs['field_units'])
+            else:
                 current_fields_unit = ""
             self._fieldinfo_known.add_field(field_name, function=NullFunc, take_log=False,
                    units=current_fields_unit, projected_units="",

diff -r 86838dfc73cdc83b39a3b82ab4a228357784ce45 -r 9b5c690e791ab9195415188482b532fbab758208 yt/frontends/orion/fields.py
--- a/yt/frontends/orion/fields.py
+++ b/yt/frontends/orion/fields.py
@@ -174,3 +174,18 @@
     add_field("particle_%s" % pf, function=pfunc,
               validators = [ValidateSpatial(0)],
               particle_type=True)
+
+def _ParticleMass(field, data):
+    particles = data["particle_mass"].astype('float64')
+    return particles
+
+def _ParticleMassMsun(field, data):
+    particles = data["particle_mass"].astype('float64')
+    return particles/1.989e33
+
+add_field("ParticleMass",
+          function=_ParticleMass, validators=[ValidateSpatial(0)],
+          particle_type=True)
+add_field("ParticleMassMsun",
+          function=_ParticleMassMsun, validators=[ValidateSpatial(0)],
+          particle_type=True)

diff -r 86838dfc73cdc83b39a3b82ab4a228357784ce45 -r 9b5c690e791ab9195415188482b532fbab758208 yt/frontends/pluto/api.py
--- /dev/null
+++ b/yt/frontends/pluto/api.py
@@ -0,0 +1,41 @@
+"""
+API for yt.frontends.pluto
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: UCSD
+Author: J.S. Oishi <jsoishi at gmail.com>
+Affiliation: KIPAC/SLAC/Stanford
+Author: Britton Smith <brittonsmith at gmail.com>
+Affiliation: MSU
+Homepage: http://yt.Chombotools.org/
+License:
+  Copyright (C) 2010-2011 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""
+
+from .data_structures import \
+      PlutoGrid, \
+      PlutoHierarchy, \
+      PlutoStaticOutput
+
+from .fields import \
+      PlutoFieldInfo, \
+      add_pluto_field
+
+from .io import \
+      IOHandlerPlutoHDF5

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt-3.0/commits/b75588a6221f/
Changeset:   b75588a6221f
Branch:      yt
User:        samskillman
Date:        2013-05-08 18:57:49
Summary:     Fixing reason plots, as reported by Bryan in irc.
Affected #:  2 files

diff -r 5b6b482fd29eac3ed397588d25ddb4ab0d8b7385 -r b75588a6221f1f934e89d77a300271810f599dbe yt/gui/reason/widget_store.py
--- a/yt/gui/reason/widget_store.py
+++ b/yt/gui/reason/widget_store.py
@@ -76,7 +76,8 @@
         sl = pf.h.slice(axis, coord, center = center, periodic = True)
         xax, yax = x_dict[axis], y_dict[axis]
         DLE, DRE = pf.domain_left_edge, pf.domain_right_edge
-        pw = PWViewerExtJS(sl, (DLE[xax], DRE[xax], DLE[yax], DRE[yax]), setup = False)
+        pw = PWViewerExtJS(sl, (DLE[xax], DRE[xax], DLE[yax], DRE[yax]), 
+                           setup = False, plot_type='SlicePlot')
         pw.set_current_field(field)
         field_list = list(set(pf.h.field_list + pf.h.derived_field_list))
         field_list = [dict(text = f) for f in sorted(field_list)]
@@ -96,7 +97,7 @@
         xax, yax = x_dict[axis], y_dict[axis]
         DLE, DRE = pf.domain_left_edge, pf.domain_right_edge
         pw = PWViewerExtJS(proj, (DLE[xax], DRE[xax], DLE[yax], DRE[yax]),
-                           setup = False)
+                           setup = False, plot_type='ProjectionPlot')
         pw.set_current_field(field)
         field_list = list(set(pf.h.field_list + pf.h.derived_field_list))
         field_list = [dict(text = f) for f in sorted(field_list)]

diff -r 5b6b482fd29eac3ed397588d25ddb4ab0d8b7385 -r b75588a6221f1f934e89d77a300271810f599dbe yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -551,8 +551,11 @@
     """A viewer for PlotWindows.
 
     """
+    _plot_type = None
     def __init__(self, *args,**kwargs):
         setup = kwargs.pop("setup", True)
+        if self._plot_type is None:
+            self._plot_type = kwargs.pop("plot_type")
         PlotWindow.__init__(self, *args,**kwargs)
         self._axes_unit_names = None
         self._callbacks = []


https://bitbucket.org/yt_analysis/yt-3.0/commits/ca608f4f08cc/
Changeset:   ca608f4f08cc
Branch:      yt
User:        MatthewTurk
Date:        2013-05-08 19:34:36
Summary:     Merged in samskillman/yt (pull request #493)

Fixing reason plots, as reported by Bryan in irc.
Affected #:  2 files

diff -r 9b5c690e791ab9195415188482b532fbab758208 -r ca608f4f08cc48dc1170295aacf20abbcd4b5541 yt/gui/reason/widget_store.py
--- a/yt/gui/reason/widget_store.py
+++ b/yt/gui/reason/widget_store.py
@@ -76,7 +76,8 @@
         sl = pf.h.slice(axis, coord, center = center, periodic = True)
         xax, yax = x_dict[axis], y_dict[axis]
         DLE, DRE = pf.domain_left_edge, pf.domain_right_edge
-        pw = PWViewerExtJS(sl, (DLE[xax], DRE[xax], DLE[yax], DRE[yax]), setup = False)
+        pw = PWViewerExtJS(sl, (DLE[xax], DRE[xax], DLE[yax], DRE[yax]), 
+                           setup = False, plot_type='SlicePlot')
         pw.set_current_field(field)
         field_list = list(set(pf.h.field_list + pf.h.derived_field_list))
         field_list = [dict(text = f) for f in sorted(field_list)]
@@ -96,7 +97,7 @@
         xax, yax = x_dict[axis], y_dict[axis]
         DLE, DRE = pf.domain_left_edge, pf.domain_right_edge
         pw = PWViewerExtJS(proj, (DLE[xax], DRE[xax], DLE[yax], DRE[yax]),
-                           setup = False)
+                           setup = False, plot_type='ProjectionPlot')
         pw.set_current_field(field)
         field_list = list(set(pf.h.field_list + pf.h.derived_field_list))
         field_list = [dict(text = f) for f in sorted(field_list)]

diff -r 9b5c690e791ab9195415188482b532fbab758208 -r ca608f4f08cc48dc1170295aacf20abbcd4b5541 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -551,8 +551,11 @@
     """A viewer for PlotWindows.
 
     """
+    _plot_type = None
     def __init__(self, *args,**kwargs):
         setup = kwargs.pop("setup", True)
+        if self._plot_type is None:
+            self._plot_type = kwargs.pop("plot_type")
         PlotWindow.__init__(self, *args,**kwargs)
         self._axes_unit_names = None
         self._callbacks = []


https://bitbucket.org/yt_analysis/yt-3.0/commits/996841e8daf4/
Changeset:   996841e8daf4
Branch:      yt
User:        brittonsmith
Date:        2013-05-08 20:10:06
Summary:     Adding option to field interpolators to give x, y, z bins explicitly
as arrays instead of just bounds.  Also, added tests for this.
Affected #:  2 files

diff -r 41aa542420be1ac1e3c7248753471c7f4d4b0ad8 -r 996841e8daf4a2327ab4c47b5f53ab51b999b78f yt/utilities/linear_interpolators.py
--- a/yt/utilities/linear_interpolators.py
+++ b/yt/utilities/linear_interpolators.py
@@ -31,12 +31,44 @@
 
 class UnilinearFieldInterpolator:
     def __init__(self, table, boundaries, field_names, truncate=False):
+        r"""Initialize a 1D interpolator for field data.
+
+        table : array
+            The data table over which interpolation is performed.
+        boundaries: tuple or array
+            If a tuple, this should specify the upper and lower bounds 
+            for the bins of the data table.  This assumes the bins are 
+            evenly spaced.  If an array, this specifies the bins 
+            explicitly.
+        field_names: str
+            Name of the field to be used as input data for interpolation.
+        truncate : bool
+            If False, an exception is raised if the input values are 
+            outside the bounds of the table.  If True, extrapolation is 
+            performed.
+        
+        Examples
+        --------
+
+        ad = pf.h.all_data()
+        table_data = np.random.random(64)
+        interp = UnilinearFieldInterpolator(table_data, (0.0, 1.0), "x",
+                                            truncate=True)
+        field_data = interp(ad)
+        
+        """
         self.table = table.astype('float64')
         self.truncate = truncate
-        x0, x1 = boundaries
         self.x_name = field_names
-        self.x_bins = np.linspace(x0, x1, table.shape[0]).astype('float64')
-
+        if isinstance(boundaries, np.ndarray):
+            if boundaries.size != table.shape[0]:
+                mylog.error("Bins array not the same length as the data.")
+                raise ValuesError
+            self.x_bins = boundaries
+        else:
+            x0, x1 = boundaries
+            self.x_bins = np.linspace(x0, x1, table.shape[0]).astype('float64')
+        
     def __call__(self, data_object):
         orig_shape = data_object[self.x_name].shape
         x_vals = data_object[self.x_name].ravel().astype('float64')
@@ -57,12 +89,51 @@
 
 class BilinearFieldInterpolator:
     def __init__(self, table, boundaries, field_names, truncate=False):
+        r"""Initialize a 2D interpolator for field data.
+
+        table : array
+            The data table over which interpolation is performed.
+        boundaries: tuple
+            Either a tuple of lower and upper bounds for the x and y bins 
+            given as (x0, x1, y0, y1) or a tuple of two arrays containing the 
+            x and y bins.
+        field_names: list
+            Names of the fields to be used as input data for interpolation.
+        truncate : bool
+            If False, an exception is raised if the input values are 
+            outside the bounds of the table.  If True, extrapolation is 
+            performed.
+        
+        Examples
+        --------
+
+        ad = pf.h.all_data()
+        table_data = np.random.random((64, 64))
+        interp = BilinearFieldInterpolator(table_data, (0.0, 1.0, 0.0, 1.0), 
+                                           ["x", "y"],
+                                           truncate=True)
+        field_data = interp(ad)
+        
+        """
         self.table = table.astype('float64')
         self.truncate = truncate
-        x0, x1, y0, y1 = boundaries
         self.x_name, self.y_name = field_names
-        self.x_bins = np.linspace(x0, x1, table.shape[0]).astype('float64')
-        self.y_bins = np.linspace(y0, y1, table.shape[1]).astype('float64')
+        if len(boundaries) == 4:
+            x0, x1, y0, y1 = boundaries
+            self.x_bins = np.linspace(x0, x1, table.shape[0]).astype('float64')
+            self.y_bins = np.linspace(y0, y1, table.shape[1]).astype('float64')
+        elif len(boundaries) == 2:
+            if boundaries[0].size != table.shape[0]:
+                mylog.error("X bins array not the same length as the data.")
+                raise ValueError
+            if boundaries[1].size != table.shape[1]:
+                mylog.error("Y bins array not the same length as the data.")
+                raise ValueError
+            self.x_bins = boundaries[0]
+            self.y_bins = boundaries[1]
+        else:
+            mylog.error("Boundaries must be given as (x0, x1, y0, y1) or as (x_bins, y_bins)")
+            raise ValueError
 
     def __call__(self, data_object):
         orig_shape = data_object[self.x_name].shape
@@ -90,14 +161,58 @@
 
 class TrilinearFieldInterpolator:
     def __init__(self, table, boundaries, field_names, truncate = False):
+        r"""Initialize a 3D interpolator for field data.
+
+        table : array
+            The data table over which interpolation is performed.
+        boundaries: tuple
+            Either a tuple of lower and upper bounds for the x, y, and z bins 
+            given as (x0, x1, y0, y1, z0, z1) or a tuple of three arrays 
+            containing the x, y, and z bins.
+        field_names: list
+            Names of the fields to be used as input data for interpolation.
+        truncate : bool
+            If False, an exception is raised if the input values are 
+            outside the bounds of the table.  If True, extrapolation is 
+            performed.
+        
+        Examples
+        --------
+
+        ad = pf.h.all_data()
+        table_data = np.random.random((64, 64, 64))
+        interp = BilinearFieldInterpolator(table_data, 
+                                           (0.0, 1.0, 0.0, 1.0, 0.0, 1.0), 
+                                           ["x", "y", "z"],
+                                           truncate=True)
+        field_data = interp(ad)
+        
+        """
         self.table = table.astype('float64')
         self.truncate = truncate
-        x0, x1, y0, y1, z0, z1 = boundaries
         self.x_name, self.y_name, self.z_name = field_names
-        self.x_bins = np.linspace(x0, x1, table.shape[0]).astype('float64')
-        self.y_bins = np.linspace(y0, y1, table.shape[1]).astype('float64')
-        self.z_bins = np.linspace(z0, z1, table.shape[2]).astype('float64')
-
+        if len(boundaries) == 6:
+            x0, x1, y0, y1, z0, z1 = boundaries
+            self.x_bins = np.linspace(x0, x1, table.shape[0]).astype('float64')
+            self.y_bins = np.linspace(y0, y1, table.shape[1]).astype('float64')
+            self.z_bins = np.linspace(z0, z1, table.shape[2]).astype('float64')
+        elif len(boundaries) == 3:
+            if boundaries[0].size != table.shape[0]:
+                mylog.error("X bins array not the same length as the data.")
+                raise ValueError
+            if boundaries[1].size != table.shape[1]:
+                mylog.error("Y bins array not the same length as the data.")
+                raise ValueError
+            if boundaries[2].size != table.shape[2]:
+                mylog.error("Z bins array not the same length as the data.")
+                raise ValueError
+            self.x_bins = boundaries[0]
+            self.y_bins = boundaries[1]
+            self.z_bins = boundaries[2]
+        else:
+            mylog.error("Boundaries must be given as (x0, x1, y0, y1, z0, z1) or as (x_bins, y_bins, z_bins)")
+            raise ValueError
+        
     def __call__(self, data_object):
         orig_shape = data_object[self.x_name].shape
         x_vals = data_object[self.x_name].ravel().astype('float64')

diff -r 41aa542420be1ac1e3c7248753471c7f4d4b0ad8 -r 996841e8daf4a2327ab4c47b5f53ab51b999b78f yt/utilities/tests/test_interpolators.py
--- a/yt/utilities/tests/test_interpolators.py
+++ b/yt/utilities/tests/test_interpolators.py
@@ -7,21 +7,58 @@
 def test_linear_interpolator_1d():
     random_data = np.random.random(64)
     fv = {'x': np.mgrid[0.0:1.0:64j]}
+    # evenly spaced bins
     ufi = lin.UnilinearFieldInterpolator(random_data, (0.0, 1.0), "x", True)
-    assert_array_equal(ufi(fv), random_data)
+    yield assert_array_equal, ufi(fv), random_data
+    
+    # randomly spaced bins
+    size = 64
+    shift = (1. / size) * np.random.random(size) - (0.5 / size)
+    fv["x"] += shift
+    ufi = lin.UnilinearFieldInterpolator(random_data, 
+                                         np.linspace(0.0, 1.0, size) + shift, 
+                                         "x", True)
+    yield assert_array_almost_equal, ufi(fv), random_data, 15
 
 def test_linear_interpolator_2d():
     random_data = np.random.random((64, 64))
+    # evenly spaced bins
     fv = dict((ax, v) for ax, v in zip("xyz",
                np.mgrid[0.0:1.0:64j, 0.0:1.0:64j]))
     bfi = lin.BilinearFieldInterpolator(random_data,
             (0.0, 1.0, 0.0, 1.0), "xy", True)
-    assert_array_equal(bfi(fv), random_data)
+    yield assert_array_equal, bfi(fv), random_data
+
+    # randomly spaced bins
+    size = 64
+    bins = np.linspace(0.0, 1.0, size)
+    shifts = dict((ax, (1. / size) * np.random.random(size) - (0.5 / size)) \
+                  for ax in "xy")
+    fv["x"] += shifts["x"][:, np.newaxis]
+    fv["y"] += shifts["y"]
+    bfi = lin.BilinearFieldInterpolator(random_data,
+            (bins + shifts["x"], bins + shifts["y"]), "xy", True)
+    yield assert_array_almost_equal, bfi(fv), random_data, 15
 
 def test_linear_interpolator_3d():
     random_data = np.random.random((64, 64, 64))
+    # evenly spaced bins
     fv = dict((ax, v) for ax, v in zip("xyz",
                np.mgrid[0.0:1.0:64j, 0.0:1.0:64j, 0.0:1.0:64j]))
     tfi = lin.TrilinearFieldInterpolator(random_data,
             (0.0, 1.0, 0.0, 1.0, 0.0, 1.0), "xyz", True)
-    assert_array_equal(tfi(fv), random_data)
+    yield assert_array_equal, tfi(fv), random_data
+
+    # randomly spaced bins
+    size = 64
+    bins = np.linspace(0.0, 1.0, size)
+    shifts = dict((ax, (1. / size) * np.random.random(size) - (0.5 / size)) \
+                  for ax in "xyz")
+    fv["x"] += shifts["x"][:, np.newaxis, np.newaxis]
+    fv["y"] += shifts["y"][:, np.newaxis]
+    fv["z"] += shifts["z"]
+    tfi = lin.TrilinearFieldInterpolator(random_data,
+            (bins + shifts["x"], bins + shifts["y"], 
+             bins + shifts["z"]), "xyz", True)
+    yield assert_array_almost_equal, tfi(fv), random_data, 15
+    


https://bitbucket.org/yt_analysis/yt-3.0/commits/cbdb1ea15403/
Changeset:   cbdb1ea15403
Branch:      yt
User:        brittonsmith
Date:        2013-05-08 20:12:52
Summary:     Merged.
Affected #:  90 files

diff -r 996841e8daf4a2327ab4c47b5f53ab51b999b78f -r cbdb1ea15403843d50f48ea55e09e934b58f2a6d .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -4,7 +4,9 @@
 freetype.cfg
 hdf5.cfg
 png.cfg
+rockstar.cfg
 yt_updater.log
+yt/analysis_modules/halo_finding/rockstar/rockstar_interface.c
 yt/frontends/ramses/_ramses_reader.cpp
 yt/utilities/amr_utils.c
 yt/utilities/kdtree/forthonf2c.h

diff -r 996841e8daf4a2327ab4c47b5f53ab51b999b78f -r cbdb1ea15403843d50f48ea55e09e934b58f2a6d doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -34,7 +34,7 @@
 
 INST_HG=1       # Install Mercurial or not?  If hg is not already
                 # installed, yt cannot be installed.
-INST_ZLIB=1     # On some systems (Kraken) matplotlib has issues with 
+INST_ZLIB=1     # On some systems (Kraken) matplotlib has issues with
                 # the system zlib, which is compiled statically.
                 # If need be, you can turn this off.
 INST_BZLIB=1    # On some systems, libbzip2 is missing.  This can
@@ -76,7 +76,7 @@
    echo "the script to re-enable root-level installation.  Sorry!"
    exit 1
 fi
-if [[ ${DEST_DIR%/} == /usr/local ]] 
+if [[ ${DEST_DIR%/} == /usr/local ]]
 then
    echo "******************************************************"
    echo "*                                                    *"
@@ -170,6 +170,19 @@
         echo "   $ module load gcc"
         echo
     fi
+    if [ "${MYHOST##midway}" != "${MYHOST}" ]
+    then
+        echo "Looks like you're on Midway."
+        echo
+        echo " ******************************************"
+        echo " * It may be better to use the yt module! *"
+        echo " *                                        *"
+        echo " *   $ module load yt                     *"
+        echo " *                                        *"
+        echo " ******************************************"
+        echo
+        return
+    fi
     if [ "${MYOS##Darwin}" != "${MYOS}" ]
     then
         echo "Looks like you're running on Mac OSX."
@@ -181,7 +194,7 @@
 	echo "must register for an account on the apple developer tools"
 	echo "website: https://developer.apple.com/downloads to obtain the"
 	echo "download link."
-	echo 
+	echo
 	echo "We have gathered some additional instructions for each"
 	echo "version of OS X below. If you have trouble installing yt"
 	echo "after following these instructions, don't hesitate to contact"
@@ -192,15 +205,15 @@
 	echo "menu bar.  We're assuming that you've installed all operating"
 	echo "system updates; if you have an older version, we suggest"
 	echo "running software update and installing all available updates."
-	echo 
-        echo "OS X 10.5.8: search for and download Xcode 3.1.4 from the" 
+	echo
+        echo "OS X 10.5.8: search for and download Xcode 3.1.4 from the"
 	echo "Apple developer tools website."
         echo
         echo "OS X 10.6.8: search for and download Xcode 3.2 from the Apple"
 	echo "developer tools website.  You can either download the"
 	echo "Xcode 3.2.2 Developer Tools package (744 MB) and then use"
-	echo "Software Update to update to XCode 3.2.6 or" 
-	echo "alternatively, you can download the Xcode 3.2.6/iOS SDK" 
+	echo "Software Update to update to XCode 3.2.6 or"
+	echo "alternatively, you can download the Xcode 3.2.6/iOS SDK"
 	echo "bundle (4.1 GB)."
         echo
         echo "OS X 10.7.5: download Xcode 4.2 from the mac app store"
@@ -208,20 +221,20 @@
         echo "Alternatively, download the Xcode command line tools from"
         echo "the Apple developer tools website."
         echo
-	echo "OS X 10.8.2: download Xcode 4.6 from the mac app store."
+	echo "OS X 10.8.2: download Xcode 4.6.1 from the mac app store."
 	echo "(search for Xcode)."
 	echo "Additionally, you will have to manually install the Xcode"
-	echo "command line tools, see:" 
+	echo "command line tools, see:"
 	echo "http://stackoverflow.com/questions/9353444"
 	echo "Alternatively, download the Xcode command line tools from"
 	echo "the Apple developer tools website."
 	echo
-        echo "NOTE: It's possible that the installation will fail, if so," 
-	echo "please set the following environment variables, remove any" 
+        echo "NOTE: It's possible that the installation will fail, if so,"
+	echo "please set the following environment variables, remove any"
 	echo "broken installation tree, and re-run this script verbatim."
         echo
-        echo "$ export CC=gcc-4.2"
-        echo "$ export CXX=g++-4.2"
+        echo "$ export CC=gcc"
+        echo "$ export CXX=g++"
 	echo
         OSX_VERSION=`sw_vers -productVersion`
         if [ "${OSX_VERSION##10.8}" != "${OSX_VERSION}" ]
@@ -278,7 +291,7 @@
         echo
         echo " INST_ZLIB=0"
         echo " INST_FTYPE=0"
-        echo 
+        echo
         echo " to avoid conflicts with other command-line programs "
         echo " (like eog and evince, for example)."
     fi
@@ -424,7 +437,7 @@
     cd ..
 }
 
-if type -P wget &>/dev/null 
+if type -P wget &>/dev/null
 then
     echo "Using wget"
     export GETFILE="wget -nv"
@@ -486,28 +499,27 @@
 cd ${DEST_DIR}/src
 
 # Now we dump all our SHA512 files out.
-
-echo 'eda1b8090e5e21e7e039ef4dd03de186a7b416df9d5a4e4422abeeb4d51383b9a6858e1ac4902d8e5010f661b295bbb2452c43c8738be668379b4eb4835d0f61  Cython-0.17.1.tar.gz' > Cython-0.17.1.tar.gz.sha512
-echo '44eea803870a66ff0bab08d13a8b3388b5578ebc1c807d1d9dca0a93e6371e91b15d02917a00b3b20dc67abb5a21dabaf9b6e9257a561f85eeff2147ac73b478  PyX-0.11.1.tar.gz' > PyX-0.11.1.tar.gz.sha512
-echo 'b981f8464575bb24c297631c87a3b9172312804a0fc14ce1fa7cb41ce2b0d2fd383cd1c816d6e10c36467d18bf9492d6faf557c81c04ff3b22debfa93f30ad0b  Python-2.7.3.tgz' > Python-2.7.3.tgz.sha512
-echo 'c017d3d59dd324ac91af0edc178c76b60a5f90fbb775cf843e39062f95bd846238f2c53705f8890ed3f34bc0e6e75671a73d13875eb0287d6201cb45f0a2d338  bzip2-1.0.5.tar.gz' > bzip2-1.0.5.tar.gz.sha512
+echo 'fb85d71bb4f80b35f0d0f1735c650dd75c5f84b05635ddf91d6241ff103b5a49158c5b851a20c15e05425f6dde32a4971b35fcbd7445f61865b4d61ffd1fbfa1  Cython-0.18.tar.gz' > Cython-0.18.tar.gz.sha512
+echo '4941f5aa21aff3743546495fb073c10d2657ff42b2aff401903498638093d0e31e344cce778980f28a7170c6d29eab72ac074277b9d4088376e8692dc71e55c1  PyX-0.12.1.tar.gz' > PyX-0.12.1.tar.gz.sha512
+echo '3349152c47ed2b63c5c9aabcfa92b8497ea9d71ca551fd721e827fcb8f91ff9fbbee6bba8f8cb2dea185701b8798878b4b2435c1496b63d4b4a37c624a625299  Python-2.7.4.tgz' > Python-2.7.4.tgz.sha512
+echo '00ace5438cfa0c577e5f578d8a808613187eff5217c35164ffe044fbafdfec9e98f4192c02a7d67e01e5a5ccced630583ad1003c37697219b0f147343a3fdd12  bzip2-1.0.6.tar.gz' > bzip2-1.0.6.tar.gz.sha512
 echo 'a296dfcaef7e853e58eed4e24b37c4fa29cfc6ac688def048480f4bb384b9e37ca447faf96eec7b378fd764ba291713f03ac464581d62275e28eb2ec99110ab6  reason-js-20120623.zip' > reason-js-20120623.zip.sha512
-echo 'b519218f93946400326e9b656669269ecb3e5232b944e18fbc3eadc4fe2b56244d68aae56d6f69042b4c87c58c881ee2aaa279561ea0f0f48d5842155f4de9de  freetype-2.4.4.tar.gz' > freetype-2.4.4.tar.gz.sha512
-echo 'b3290c498191684781ca5286ab454eb1bd045e8d894f5b86fb86beb88f174e22ac3ab008fb02d6562051d9fa6a9593920cab433223f6d5473999913223b8e183  h5py-2.1.0.tar.gz' > h5py-2.1.0.tar.gz.sha512
+echo 'b46c93d76f8ce09c94765b20b2eeadf71207671f1131777de178b3727c235b4dd77f6e60d62442b96648c3c6749e9e4c1194c1b02af7e946576be09e1ff7ada3  freetype-2.4.11.tar.gz' > freetype-2.4.11.tar.gz.sha512
+echo '15ca0209e8d8f172cb0708a2de946fbbde8551d9bebc4a95fa7ae31558457a7f43249d5289d7675490c577deb4e0153698fd2407644078bf30bd5ab10135fce3  h5py-2.1.2.tar.gz' > h5py-2.1.2.tar.gz.sha512
 echo 'c68a425bacaa7441037910b9166f25b89e1387776a7749a5350793f89b1690350df5f018060c31d03686e7c3ed2aa848bd2b945c96350dc3b6322e087934783a  hdf5-1.8.9.tar.gz' > hdf5-1.8.9.tar.gz.sha512
-echo 'dbefad00fa34f4f21dca0f1e92e95bd55f1f4478fa0095dcf015b4d06f0c823ff11755cd777e507efaf1c9098b74af18f613ec9000e5c3a5cc1c7554fb5aefb8  libpng-1.5.12.tar.gz' > libpng-1.5.12.tar.gz.sha512
-echo '5b1a0fb52dcb21ca5f0ab71c8a49550e1e8cf633552ec6598dc43f0b32c03422bf5af65b30118c163231ecdddfd40846909336f16da318959106076e80a3fad0  matplotlib-1.2.0.tar.gz' > matplotlib-1.2.0.tar.gz.sha512
-echo '91693ca5f34934956a7c2c98bb69a5648b2a5660afd2ecf4a05035c5420450d42c194eeef0606d7683e267e4eaaaab414df23f30b34c88219bdd5c1a0f1f66ed  mercurial-2.5.1.tar.gz' > mercurial-2.5.1.tar.gz.sha512
-echo 'de3dd37f753614055dcfed910e9886e03688b8078492df3da94b1ec37be796030be93291cba09e8212fffd3e0a63b086902c3c25a996cf1439e15c5b16e014d9  numpy-1.6.1.tar.gz' > numpy-1.6.1.tar.gz.sha512
-echo '5ad681f99e75849a5ca6f439c7a19bb51abc73d121b50f4f8e4c0da42891950f30407f761a53f0fe51b370b1dbd4c4f5a480557cb2444c8c7c7d5412b328a474  sqlite-autoconf-3070500.tar.gz' > sqlite-autoconf-3070500.tar.gz.sha512
-echo 'edae735960279d92acf58e1f4095c6392a7c2059b8f1d2c46648fc608a0fb06b392db2d073f4973f5762c034ea66596e769b95b3d26ad963a086b9b2d09825f2  zlib-1.2.3.tar.bz2' > zlib-1.2.3.tar.bz2.sha512
+echo 'b2b53ed358bacab9e8d63a51f17bd5f121ece60a1d7c53e8a8eb08ad8b1e4393a8d7a86eec06e2efc62348114f0d84c0a3dfc805e68e6edd93b20401962b3554  libpng-1.6.1.tar.gz' > libpng-1.6.1.tar.gz.sha512
+echo '497f91725eaf361bdb9bdf38db2bff5068a77038f1536df193db64c9b887e3b0d967486daee722eda6e2c4e60f034eee030673e53d07bf0db0f3f7c0ef3bd208  matplotlib-1.2.1.tar.gz' > matplotlib-1.2.1.tar.gz.sha512
+echo '928fdeaaf0eaec80adbd8765521de9666ab56aaa2101fb9ab2cb392d8b29475d3b052d89652ff9b67522cfcc6cd958717ac715f51b0573ee088e9a595f29afe2  mercurial-2.5.4.tar.gz' > mercurial-2.5.4.tar.gz.sha512
+echo 'a485daa556f6c76003de1dbb3e42b3daeee0a320c69c81b31a7d2ebbc2cf8ab8e96c214a4758e5e7bf814295dc1d6aa563092b714db7e719678d8462135861a8  numpy-1.7.0.tar.gz' > numpy-1.7.0.tar.gz.sha512
+echo '293d78d14a9347cb83e1a644e5f3e4447ed6fc21642c51683e5495dda08d2312194a73d1fc3c1d78287e33ed065aa251ecbaa7c0ea9189456c1702e96d78becd  sqlite-autoconf-3071601.tar.gz' > sqlite-autoconf-3071601.tar.gz.sha512
+echo 'b1c073ad26684e354f7c522c14655840592e03872bc0a94690f89cae2ff88f146fce1dad252ff27a889dac4a32ff9f8ab63ba940671f9da89e9ba3e19f1bf58d  zlib-1.2.7.tar.gz' > zlib-1.2.7.tar.gz.sha512
 echo '05ac335727a2c3036f31a2506fdd2615aa436bfbe2f81799fe6c51bffe2591ad6a8427f3b25c34e7e709fb4e7607a0589dc7a22185c1f9b894e90de6711a88aa  ipython-0.13.1.tar.gz' > ipython-0.13.1.tar.gz.sha512
-echo 'fb3cf421b2dc48c31956b3e3ee4ab6ebc743deec3bf626c2238a1996c8c51be87260bd6aa662793a1f0c34dcda9b3146763777bb162dfad6fec4ca7acc403b2e  zeromq-2.2.0.tar.gz' > zeromq-2.2.0.tar.gz.sha512
-echo 'd761b492352841cdc125d9f0c99ee6d6c435812472ea234728b7f0fb4ad1048e1eec9b399df2081fbc926566f333f7780fedd0ce23255a6633fe5c60ed15a6af  pyzmq-2.1.11.tar.gz' > pyzmq-2.1.11.tar.gz.sha512
-echo '57fa5e57dfb98154a42d2d477f29401c2260ae7ad3a8128a4098b42ee3b35c54367b1a3254bc76b9b3b14b4aab7c3e1135858f68abc5636daedf2f01f9b8a3cf  tornado-2.2.tar.gz' > tornado-2.2.tar.gz.sha512
-echo '1332e3d5465ca249c357314cf15d2a4e5e83a941841021b8f6a17a107dce268a7a082838ade5e8db944ecde6bfb111211ab218aa414ee90aafbb81f1491b3b93  Forthon-0.8.10.tar.gz' > Forthon-0.8.10.tar.gz.sha512
+echo 'b9d061ca49e54ea917e0aed2b2a48faef33061dbf6d17eae7f8c3fff0b35ca883e7324f6cb24bda542443f669dcd5748037a5f2309f4c359d68adef520894865  zeromq-3.2.2.tar.gz' > zeromq-3.2.2.tar.gz.sha512
+echo '852fce8a8308c4e1e4b19c77add2b2055ca2ba570b28e8364888df490af92b860c72e860adfb075b3405a9ceb62f343889f20a8711c9353a7d9059adee910f83  pyzmq-13.0.2.tar.gz' > pyzmq-13.0.2.tar.gz.sha512
+echo '303bd3fbea22be57fddf7df78ddf5a783d355a0c8071b1363250daafc20232ddd28eedc44aa1194f4a7afd82f9396628c5bb06819e02b065b6a1b1ae8a7c19e1  tornado-3.0.tar.gz' > tornado-3.0.tar.gz.sha512
+echo '3f53d0b474bfd79fea2536d0a9197eaef6c0927e95f2f9fd52dbd6c1d46409d0e649c21ac418d8f7767a9f10fe6114b516e06f2be4b06aec3ab5bdebc8768220  Forthon-0.8.11.tar.gz' > Forthon-0.8.11.tar.gz.sha512
 echo 'c13116c1f0547000cc565e15774687b9e884f8b74fb62a84e578408a868a84961704839065ae4f21b662e87f2aaedf6ea424ea58dfa9d3d73c06281f806d15dd  nose-1.2.1.tar.gz' > nose-1.2.1.tar.gz.sha512
-echo '73de2c99406a38f85273931597525cec4ebef55b93712adca3b0bfea8ca3fc99446e5d6495817e9ad55cf4d48feb7fb49734675c4cc8938db8d4a5225d30eca7  python-hglib-0.2.tar.gz' > python-hglib-0.2.tar.gz.sha512
+echo 'd67de9567256e6f1649e4f3f7dfee63371d5f00fd3fd4f92426198f862e97c57f70e827d19f4e5e1929ad85ef2ce7aa5a0596b101cafdac71672e97dc115b397  python-hglib-0.3.tar.gz' > python-hglib-0.3.tar.gz.sha512
 echo 'ffc602eb346717286b3d0a6770c60b03b578b3cf70ebd12f9e8b1c8c39cdb12ef219ddaa041d7929351a6b02dbb8caf1821b5452d95aae95034cbf4bc9904a7a  sympy-0.7.2.tar.gz' > sympy-0.7.2.tar.gz.sha512
 echo '172f2bc671145ebb0add2669c117863db35851fb3bdb192006cd710d4d038e0037497eb39a6d01091cb923f71a7e8982a77b6e80bf71d6275d5d83a363c8d7e5  rockstar-0.99.6.tar.gz' > rockstar-0.99.6.tar.gz.sha512
 echo 'd4fdd62f2db5285cd133649bd1bfa5175cb9da8304323abd74e0ef1207d55e6152f0f944da1da75f73e9dafb0f3bb14efba3c0526c732c348a653e0bd223ccfa  scipy-0.11.0.tar.gz' > scipy-0.11.0.tar.gz.sha512
@@ -515,50 +527,50 @@
 echo '8770214491e31f0a7a3efaade90eee7b0eb20a8a6ab635c5f854d78263f59a1849133c14ef5123d01023f0110cbb9fc6f818da053c01277914ae81473430a952  lapack-3.4.2.tar.gz' > lapack-3.4.2.tar.gz.sha512
 # Individual processes
 [ -z "$HDF5_DIR" ] && get_ytproject hdf5-1.8.9.tar.gz
-[ $INST_ZLIB -eq 1 ] && get_ytproject zlib-1.2.3.tar.bz2 
-[ $INST_BZLIB -eq 1 ] && get_ytproject bzip2-1.0.5.tar.gz
-[ $INST_PNG -eq 1 ] && get_ytproject libpng-1.5.12.tar.gz
-[ $INST_FTYPE -eq 1 ] && get_ytproject freetype-2.4.4.tar.gz
-[ $INST_SQLITE3 -eq 1 ] && get_ytproject sqlite-autoconf-3070500.tar.gz
-[ $INST_PYX -eq 1 ] && get_ytproject PyX-0.11.1.tar.gz
-[ $INST_0MQ -eq 1 ] && get_ytproject zeromq-2.2.0.tar.gz
-[ $INST_0MQ -eq 1 ] && get_ytproject pyzmq-2.1.11.tar.gz
-[ $INST_0MQ -eq 1 ] && get_ytproject tornado-2.2.tar.gz
+[ $INST_ZLIB -eq 1 ] && get_ytproject zlib-1.2.7.tar.gz
+[ $INST_BZLIB -eq 1 ] && get_ytproject bzip2-1.0.6.tar.gz
+[ $INST_PNG -eq 1 ] && get_ytproject libpng-1.6.1.tar.gz
+[ $INST_FTYPE -eq 1 ] && get_ytproject freetype-2.4.11.tar.gz
+[ $INST_SQLITE3 -eq 1 ] && get_ytproject sqlite-autoconf-3071601.tar.gz
+[ $INST_PYX -eq 1 ] && get_ytproject PyX-0.12.1.tar.gz
+[ $INST_0MQ -eq 1 ] && get_ytproject zeromq-3.2.2.tar.gz
+[ $INST_0MQ -eq 1 ] && get_ytproject pyzmq-13.0.2.tar.gz
+[ $INST_0MQ -eq 1 ] && get_ytproject tornado-3.0.tar.gz
 [ $INST_SCIPY -eq 1 ] && get_ytproject scipy-0.11.0.tar.gz
 [ $INST_SCIPY -eq 1 ] && get_ytproject blas.tar.gz
 [ $INST_SCIPY -eq 1 ] && get_ytproject lapack-3.4.2.tar.gz
-get_ytproject Python-2.7.3.tgz
-get_ytproject numpy-1.6.1.tar.gz
-get_ytproject matplotlib-1.2.0.tar.gz
-get_ytproject mercurial-2.5.1.tar.gz
+get_ytproject Python-2.7.4.tgz
+get_ytproject numpy-1.7.0.tar.gz
+get_ytproject matplotlib-1.2.1.tar.gz
+get_ytproject mercurial-2.5.4.tar.gz
 get_ytproject ipython-0.13.1.tar.gz
-get_ytproject h5py-2.1.0.tar.gz
-get_ytproject Cython-0.17.1.tar.gz
+get_ytproject h5py-2.1.2.tar.gz
+get_ytproject Cython-0.18.tar.gz
 get_ytproject reason-js-20120623.zip
-get_ytproject Forthon-0.8.10.tar.gz
-get_ytproject nose-1.2.1.tar.gz 
-get_ytproject python-hglib-0.2.tar.gz
+get_ytproject Forthon-0.8.11.tar.gz
+get_ytproject nose-1.2.1.tar.gz
+get_ytproject python-hglib-0.3.tar.gz
 get_ytproject sympy-0.7.2.tar.gz
 get_ytproject rockstar-0.99.6.tar.gz
 if [ $INST_BZLIB -eq 1 ]
 then
-    if [ ! -e bzip2-1.0.5/done ]
+    if [ ! -e bzip2-1.0.6/done ]
     then
-        [ ! -e bzip2-1.0.5 ] && tar xfz bzip2-1.0.5.tar.gz
+        [ ! -e bzip2-1.0.6 ] && tar xfz bzip2-1.0.6.tar.gz
         echo "Installing BZLIB"
-        cd bzip2-1.0.5
-        if [ `uname` = "Darwin" ] 
+        cd bzip2-1.0.6
+        if [ `uname` = "Darwin" ]
         then
-            if [ -z "${CC}" ] 
+            if [ -z "${CC}" ]
             then
                 sed -i.bak 's/soname/install_name/' Makefile-libbz2_so
             else
-                sed -i.bak -e 's/soname/install_name/' -e "s/CC=gcc/CC=${CC}/" Makefile-libbz2_so 
+                sed -i.bak -e 's/soname/install_name/' -e "s/CC=gcc/CC=${CC}/" Makefile-libbz2_so
             fi
         fi
         ( make install CFLAGS=-fPIC LDFLAGS=-fPIC PREFIX=${DEST_DIR} 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make -f Makefile-libbz2_so CFLAGS=-fPIC LDFLAGS=-fPIC PREFIX=${DEST_DIR} 2>&1 ) 1>> ${LOG_FILE} || do_exit
-        ( cp -v libbz2.so.1.0.4 ${DEST_DIR}/lib 2>&1 ) 1>> ${LOG_FILE} || do_exit
+        ( cp -v libbz2.so.1.0.6 ${DEST_DIR}/lib 2>&1 ) 1>> ${LOG_FILE} || do_exit
         touch done
         cd ..
     fi
@@ -569,11 +581,11 @@
 
 if [ $INST_ZLIB -eq 1 ]
 then
-    if [ ! -e zlib-1.2.3/done ]
+    if [ ! -e zlib-1.2.7/done ]
     then
-        [ ! -e zlib-1.2.3 ] && tar xfj zlib-1.2.3.tar.bz2
+        [ ! -e zlib-1.2.7 ] && tar xfz zlib-1.2.7.tar.gz
         echo "Installing ZLIB"
-        cd zlib-1.2.3
+        cd zlib-1.2.7
         ( ./configure --shared --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -587,11 +599,11 @@
 
 if [ $INST_PNG -eq 1 ]
 then
-    if [ ! -e libpng-1.5.12/done ]
+    if [ ! -e libpng-1.6.1/done ]
     then
-        [ ! -e libpng-1.5.12 ] && tar xfz libpng-1.5.12.tar.gz
+        [ ! -e libpng-1.6.1 ] && tar xfz libpng-1.6.1.tar.gz
         echo "Installing PNG"
-        cd libpng-1.5.12
+        cd libpng-1.6.1
         ( ./configure CPPFLAGS=-I${DEST_DIR}/include CFLAGS=-I${DEST_DIR}/include --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -605,11 +617,11 @@
 
 if [ $INST_FTYPE -eq 1 ]
 then
-    if [ ! -e freetype-2.4.4/done ]
+    if [ ! -e freetype-2.4.11/done ]
     then
-        [ ! -e freetype-2.4.4 ] && tar xfz freetype-2.4.4.tar.gz
+        [ ! -e freetype-2.4.11 ] && tar xfz freetype-2.4.11.tar.gz
         echo "Installing FreeType2"
-        cd freetype-2.4.4
+        cd freetype-2.4.11
         ( ./configure CFLAGS=-I${DEST_DIR}/include --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -642,11 +654,11 @@
 
 if [ $INST_SQLITE3 -eq 1 ]
 then
-    if [ ! -e sqlite-autoconf-3070500/done ]
+    if [ ! -e sqlite-autoconf-3071601/done ]
     then
-        [ ! -e sqlite-autoconf-3070500 ] && tar xfz sqlite-autoconf-3070500.tar.gz
+        [ ! -e sqlite-autoconf-3071601 ] && tar xfz sqlite-autoconf-3071601.tar.gz
         echo "Installing SQLite3"
-        cd sqlite-autoconf-3070500
+        cd sqlite-autoconf-3071601
         ( ./configure --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make ${MAKE_PROCS} install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -655,11 +667,11 @@
     fi
 fi
 
-if [ ! -e Python-2.7.3/done ]
+if [ ! -e Python-2.7.4/done ]
 then
     echo "Installing Python.  This may take a while, but don't worry.  yt loves you."
-    [ ! -e Python-2.7.3 ] && tar xfz Python-2.7.3.tgz
-    cd Python-2.7.3
+    [ ! -e Python-2.7.4 ] && tar xfz Python-2.7.4.tgz
+    cd Python-2.7.4
     ( ./configure --prefix=${DEST_DIR}/ ${PYCONF_ARGS} 2>&1 ) 1>> ${LOG_FILE} || do_exit
 
     ( make ${MAKE_PROCS} 2>&1 ) 1>> ${LOG_FILE} || do_exit
@@ -674,12 +686,11 @@
 
 if [ $INST_HG -eq 1 ]
 then
-    echo "Installing Mercurial."
-    do_setup_py mercurial-2.5.1
+    do_setup_py mercurial-2.5.4
     export HG_EXEC=${DEST_DIR}/bin/hg
 else
     # We assume that hg can be found in the path.
-    if type -P hg &>/dev/null 
+    if type -P hg &>/dev/null
     then
         export HG_EXEC=hg
     else
@@ -696,7 +707,7 @@
     elif [ -e $ORIG_PWD/../yt/mods.py ]
     then
         YT_DIR=`dirname $ORIG_PWD`
-    elif [ ! -e yt-hg ] 
+    elif [ ! -e yt-hg ]
     then
         YT_DIR="$PWD/yt-hg/"
         ( ${HG_EXEC} --debug clone https://bitbucket.org/yt_analysis/yt-supplemental/ 2>&1 ) 1>> ${LOG_FILE}
@@ -706,7 +717,7 @@
         ( ${HG_EXEC} --debug clone https://bitbucket.org/yt_analysis/yt/ ./yt-hg 2>&1 ) 1>> ${LOG_FILE}
         # Now we update to the branch we're interested in.
         ( ${HG_EXEC} -R ${YT_DIR} up -C ${BRANCH} 2>&1 ) 1>> ${LOG_FILE}
-    elif [ -e yt-hg ] 
+    elif [ -e yt-hg ]
     then
         YT_DIR="$PWD/yt-hg/"
     fi
@@ -714,7 +725,7 @@
 fi
 
 # This fixes problems with gfortran linking.
-unset LDFLAGS 
+unset LDFLAGS
 
 echo "Installing distribute"
 ( ${DEST_DIR}/bin/python2.7 ${YT_DIR}/distribute_setup.py 2>&1 ) 1>> ${LOG_FILE} || do_exit
@@ -724,7 +735,7 @@
 
 if [ $INST_SCIPY -eq 0 ]
 then
-    do_setup_py numpy-1.6.1 ${NUMPY_ARGS}
+    do_setup_py numpy-1.7.0 ${NUMPY_ARGS}
 else
     if [ ! -e scipy-0.11.0/done ]
     then
@@ -752,8 +763,8 @@
 	fi
     fi
     export BLAS=$PWD/BLAS/libfblas.a
-    export LAPACK=$PWD/lapack-3.4.2/liblapack.a    
-    do_setup_py numpy-1.6.1 ${NUMPY_ARGS}
+    export LAPACK=$PWD/lapack-3.4.2/liblapack.a
+    do_setup_py numpy-1.7.0 ${NUMPY_ARGS}
     do_setup_py scipy-0.11.0 ${NUMPY_ARGS}
 fi
 
@@ -776,10 +787,10 @@
     echo "Setting CFLAGS ${CFLAGS}"
 fi
 # Now we set up the basedir for matplotlib:
-mkdir -p ${DEST_DIR}/src/matplotlib-1.2.0
-echo "[directories]" >> ${DEST_DIR}/src/matplotlib-1.2.0/setup.cfg
-echo "basedirlist = ${DEST_DIR}" >> ${DEST_DIR}/src/matplotlib-1.2.0/setup.cfg
-do_setup_py matplotlib-1.2.0
+mkdir -p ${DEST_DIR}/src/matplotlib-1.2.1
+echo "[directories]" >> ${DEST_DIR}/src/matplotlib-1.2.1/setup.cfg
+echo "basedirlist = ${DEST_DIR}" >> ${DEST_DIR}/src/matplotlib-1.2.1/setup.cfg
+do_setup_py matplotlib-1.2.1
 if [ -n "${OLD_LDFLAGS}" ]
 then
     export LDFLAG=${OLD_LDFLAGS}
@@ -791,29 +802,29 @@
 # Now we do our IPython installation, which has two optional dependencies.
 if [ $INST_0MQ -eq 1 ]
 then
-    if [ ! -e zeromq-2.2.0/done ]
+    if [ ! -e zeromq-3.2.2/done ]
     then
-        [ ! -e zeromq-2.2.0 ] && tar xfz zeromq-2.2.0.tar.gz
+        [ ! -e zeromq-3.2.2 ] && tar xfz zeromq-3.2.2.tar.gz
         echo "Installing ZeroMQ"
-        cd zeromq-2.2.0
+        cd zeromq-3.2.2
         ( ./configure --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
         touch done
         cd ..
     fi
-    do_setup_py pyzmq-2.1.11 --zmq=${DEST_DIR}
-    do_setup_py tornado-2.2
+    do_setup_py pyzmq-13.0.2 --zmq=${DEST_DIR}
+    do_setup_py tornado-3.0
 fi
 
 do_setup_py ipython-0.13.1
-do_setup_py h5py-2.1.0
-do_setup_py Cython-0.17.1
-do_setup_py Forthon-0.8.10
+do_setup_py h5py-2.1.2
+do_setup_py Cython-0.18
+do_setup_py Forthon-0.8.11
 do_setup_py nose-1.2.1
-do_setup_py python-hglib-0.2
+do_setup_py python-hglib-0.3
 do_setup_py sympy-0.7.2
-[ $INST_PYX -eq 1 ] && do_setup_py PyX-0.11.1
+[ $INST_PYX -eq 1 ] && do_setup_py PyX-0.12.1
 
 # Now we build Rockstar and set its environment variable.
 if [ $INST_ROCKSTAR -eq 1 ]
@@ -837,16 +848,11 @@
 cd $YT_DIR
 ( ${HG_EXEC} pull 2>1 && ${HG_EXEC} up -C 2>1 ${BRANCH} 2>&1 ) 1>> ${LOG_FILE}
 
-echo "Building Fortran kD-tree module."
-cd yt/utilities/kdtree
-( make 2>&1 ) 1>> ${LOG_FILE}
-cd ../../..
-
 echo "Installing yt"
 echo $HDF5_DIR > hdf5.cfg
 [ $INST_PNG -eq 1 ] && echo $PNG_DIR > png.cfg
 [ $INST_FTYPE -eq 1 ] && echo $FTYPE_DIR > freetype.cfg
-( ${DEST_DIR}/bin/python2.7 setup.py develop 2>&1 ) 1>> ${LOG_FILE} || do_exit
+( export PATH=$DEST_DIR/bin:$PATH ; ${DEST_DIR}/bin/python2.7 setup.py develop 2>&1 ) 1>> ${LOG_FILE} || do_exit
 touch done
 cd $MY_PWD
 

diff -r 996841e8daf4a2327ab4c47b5f53ab51b999b78f -r cbdb1ea15403843d50f48ea55e09e934b58f2a6d setup.py
--- a/setup.py
+++ b/setup.py
@@ -4,14 +4,62 @@
 import sys
 import time
 import subprocess
+import shutil
+import glob
 import distribute_setup
 distribute_setup.use_setuptools()
 
 from distutils.command.build_py import build_py
 from numpy.distutils.misc_util import appendpath
+from numpy.distutils.command import install_data as np_install_data
 from numpy.distutils import log
 from distutils import version
 
+from distutils.core import Command
+from distutils.spawn import find_executable
+
+def find_fortran_deps():
+    return (find_executable("Forthon"),
+            find_executable("gfortran"))
+
+class BuildForthon(Command):
+
+    """Command for building Forthon modules"""
+
+    description = "Build Forthon modules"
+    user_options = []
+
+    def initialize_options(self):
+
+        """init options"""
+
+        pass
+
+    def finalize_options(self):
+
+        """finalize options"""
+
+        pass
+
+    def run(self):
+
+        """runner"""
+        (Forthon_exe, gfortran_exe) = find_fortran_deps()
+        if None in (Forthon_exe, gfortran_exe):
+            sys.stderr.write(
+                "fKDpy.so won't be built due to missing Forthon/gfortran\n"
+            )
+            return
+
+        cwd = os.getcwd()
+        os.chdir(os.path.join(cwd, 'yt/utilities/kdtree'))
+        cmd = [Forthon_exe, "-F", "gfortran", "--compile_first",
+               "fKD_source", "--no2underscores", "--fopt", "'-O3'", "fKD",
+               "fKD_source.f90"]
+        subprocess.check_call(cmd, shell=False)
+        shutil.move(glob.glob('build/lib*/fKDpy.so')[0], os.getcwd())
+        os.chdir(cwd)
+
 REASON_FILES = []
 REASON_DIRS = [
     "",
@@ -36,7 +84,7 @@
     files = []
     for ext in ["js", "html", "css", "png", "ico", "gif"]:
         files += glob.glob("%s/*.%s" % (dir_name, ext))
-    REASON_FILES.append( (dir_name, files) )
+    REASON_FILES.append((dir_name, files))
 
 # Verify that we have Cython installed
 try:
@@ -93,10 +141,10 @@
             language=extension.language, cplus=cplus,
             output_file=target_file)
         cython_result = Cython.Compiler.Main.compile(source,
-                                                   options=options)
+                                                     options=options)
         if cython_result.num_errors != 0:
-            raise DistutilsError("%d errors while compiling %r with Cython" \
-                  % (cython_result.num_errors, source))
+            raise DistutilsError("%d errors while compiling %r with Cython"
+                                 % (cython_result.num_errors, source))
     return target_file
 
 
@@ -107,9 +155,11 @@
 
 import setuptools
 
-VERSION = "2.5dev"
+VERSION = "2.6dev"
 
-if os.path.exists('MANIFEST'): os.remove('MANIFEST')
+if os.path.exists('MANIFEST'):
+    os.remove('MANIFEST')
+
 
 def get_mercurial_changeset_id(target_dir):
     """adapted from a script by Jason F. Harris, published at
@@ -123,11 +173,11 @@
                                      stdout=subprocess.PIPE,
                                      stderr=subprocess.PIPE,
                                      shell=True)
-        
+
     if (get_changeset.stderr.read() != ""):
         print "Error in obtaining current changeset of the Mercurial repository"
         changeset = None
-        
+
     changeset = get_changeset.stdout.read().strip()
     if (not re.search("^[0-9a-f]{12}", changeset)):
         print "Current changeset of the Mercurial repository is malformed"
@@ -135,12 +185,30 @@
 
     return changeset
 
+
+class my_build_src(build_src.build_src):
+    def run(self):
+        self.run_command("build_forthon")
+        build_src.build_src.run(self)
+
+
+class my_install_data(np_install_data.install_data):
+    def run(self):
+        (Forthon_exe, gfortran_exe) = find_fortran_deps()
+        if None in (Forthon_exe, gfortran_exe):
+            pass
+        else:
+            self.distribution.data_files.append(
+                ('yt/utilities/kdtree', ['yt/utilities/kdtree/fKDpy.so'])
+                )
+        np_install_data.install_data.run(self)
+
 class my_build_py(build_py):
     def run(self):
         # honor the --dry-run flag
         if not self.dry_run:
-            target_dir = os.path.join(self.build_lib,'yt')
-            src_dir =  os.getcwd() 
+            target_dir = os.path.join(self.build_lib, 'yt')
+            src_dir = os.getcwd()
             changeset = get_mercurial_changeset_id(src_dir)
             self.mkpath(target_dir)
             with open(os.path.join(target_dir, '__hg_version__.py'), 'w') as fobj:
@@ -148,6 +216,7 @@
 
             build_py.run(self)
 
+
 def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
 
@@ -158,7 +227,7 @@
                        quiet=True)
 
     config.make_config_py()
-    #config.make_svn_version_py()
+    # config.make_svn_version_py()
     config.add_subpackage('yt', 'yt')
     config.add_scripts("scripts/*")
 
@@ -176,25 +245,25 @@
                     + "simulations, focusing on Adaptive Mesh Refinement data "
                       "from Enzo, Orion, FLASH, and others.",
         classifiers=["Development Status :: 5 - Production/Stable",
-            "Environment :: Console",
-            "Intended Audience :: Science/Research",
-            "License :: OSI Approved :: GNU General Public License (GPL)",
-            "Operating System :: MacOS :: MacOS X",
-            "Operating System :: POSIX :: AIX",
-            "Operating System :: POSIX :: Linux",
-            "Programming Language :: C",
-            "Programming Language :: Python",
-            "Topic :: Scientific/Engineering :: Astronomy",
-            "Topic :: Scientific/Engineering :: Physics",
-            "Topic :: Scientific/Engineering :: Visualization"],
-        keywords='astronomy astrophysics visualization ' + \
-            'amr adaptivemeshrefinement',
+                     "Environment :: Console",
+                     "Intended Audience :: Science/Research",
+                     "License :: OSI Approved :: GNU General Public License (GPL)",
+                     "Operating System :: MacOS :: MacOS X",
+                     "Operating System :: POSIX :: AIX",
+                     "Operating System :: POSIX :: Linux",
+                     "Programming Language :: C",
+                     "Programming Language :: Python",
+                     "Topic :: Scientific/Engineering :: Astronomy",
+                     "Topic :: Scientific/Engineering :: Physics",
+                     "Topic :: Scientific/Engineering :: Visualization"],
+        keywords='astronomy astrophysics visualization ' +
+        'amr adaptivemeshrefinement',
         entry_points={'console_scripts': [
-                            'yt = yt.utilities.command_line:run_main',
-                      ],
-                      'nose.plugins.0.10': [
-                            'answer-testing = yt.utilities.answer_testing.framework:AnswerTesting'
-                      ]
+        'yt = yt.utilities.command_line:run_main',
+        ],
+            'nose.plugins.0.10': [
+                'answer-testing = yt.utilities.answer_testing.framework:AnswerTesting'
+            ]
         },
         author="Matthew J. Turk",
         author_email="matthewturk at gmail.com",
@@ -203,8 +272,9 @@
         configuration=configuration,
         zip_safe=False,
         data_files=REASON_FILES,
-        cmdclass = {'build_py': my_build_py},
-        )
+        cmdclass={'build_py': my_build_py, 'build_forthon': BuildForthon,
+                  'build_src': my_build_src, 'install_data': my_install_data},
+    )
     return
 
 if __name__ == '__main__':

diff -r 996841e8daf4a2327ab4c47b5f53ab51b999b78f -r cbdb1ea15403843d50f48ea55e09e934b58f2a6d yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -244,8 +244,9 @@
             If True, use dynamic load balancing to create the projections.
             Default: False.
 
-        Getting the Nearest Galaxies
-        ----------------------------
+        Notes
+        -----
+
         The light ray tool will use the HaloProfiler to calculate the
         distance and mass of the nearest halo to that pixel.  In order
         to do this, a dictionary called halo_profiler_parameters is used

diff -r 996841e8daf4a2327ab4c47b5f53ab51b999b78f -r cbdb1ea15403843d50f48ea55e09e934b58f2a6d yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -1367,6 +1367,7 @@
         self._groups = []
         self._max_dens = -1
         self.pf = pf
+        self.redshift = pf.current_redshift
         self.out_list = out_list
         self._data_source = pf.h.all_data()
         mylog.info("Parsing Rockstar halo list")

diff -r 996841e8daf4a2327ab4c47b5f53ab51b999b78f -r cbdb1ea15403843d50f48ea55e09e934b58f2a6d yt/analysis_modules/halo_finding/rockstar/rockstar.py
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar.py
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar.py
@@ -238,6 +238,7 @@
         tpf = ts[0]
 
         def _particle_count(field, data):
+            if data.NumberOfParticles == 0: return 0
             try:
                 data["particle_type"]
                 has_particle_type=True

diff -r 996841e8daf4a2327ab4c47b5f53ab51b999b78f -r cbdb1ea15403843d50f48ea55e09e934b58f2a6d yt/analysis_modules/halo_merger_tree/enzofof_merger_tree.py
--- a/yt/analysis_modules/halo_merger_tree/enzofof_merger_tree.py
+++ b/yt/analysis_modules/halo_merger_tree/enzofof_merger_tree.py
@@ -454,8 +454,8 @@
         halonum : int
             Halo number at the last output to trace.
 
-        Output
-        ------
+        Returns
+        -------
         output : dict
             Dictionary of redshifts, cycle numbers, and halo numbers
             of the most massive progenitor.  keys = {redshift, cycle,

diff -r 996841e8daf4a2327ab4c47b5f53ab51b999b78f -r cbdb1ea15403843d50f48ea55e09e934b58f2a6d yt/analysis_modules/halo_merger_tree/merger_tree.py
--- a/yt/analysis_modules/halo_merger_tree/merger_tree.py
+++ b/yt/analysis_modules/halo_merger_tree/merger_tree.py
@@ -143,7 +143,7 @@
         Note that this is not a string, so no quotes. Default = HaloFinder.
     halo_finder_threshold : Float
         If using HaloFinder or parallelHF, the value of the density threshold
-        used when halo finding. Default = 80.0.
+        used when halo finding. Default = 160.0.
     FOF_link_length : Float
         If using FOFHaloFinder, the linking length between particles.
         Default = 0.2.
@@ -169,7 +169,7 @@
     ... halo_finder_function=parallelHF)
     """
     def __init__(self, restart_files=[], database='halos.db',
-            halo_finder_function=HaloFinder, halo_finder_threshold=80.0,
+            halo_finder_function=HaloFinder, halo_finder_threshold=160.0,
             FOF_link_length=0.2, dm_only=False, refresh=False,
             index=True):
         ParallelAnalysisInterface.__init__(self)
@@ -758,17 +758,19 @@
     
     def query(self, string):
         r"""Performs a query of the database and returns the results as a list
-        of tuple(s), even if the result is singular.
+        of tuples, even if the result is singular.
         
         Parameters
         ----------
-        string : String
+        
+        string : str
             The SQL query of the database.
         
         Examples
-        -------
+        --------
+
         >>> results = mtc.query("SELECT GlobalHaloID from Halos where SnapHaloID = 0 and \
-        ... SnapZ = 0;")
+        ...    SnapZ = 0;")
         """
         # Query the database and return a list of tuples.
         if string is None:

diff -r 996841e8daf4a2327ab4c47b5f53ab51b999b78f -r cbdb1ea15403843d50f48ea55e09e934b58f2a6d yt/analysis_modules/halo_profiler/multi_halo_profiler.py
--- a/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
+++ b/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
@@ -430,8 +430,8 @@
         After all the calls to `add_profile`, this will trigger the actual
         calculations and output the profiles to disk.
 
-        Paramters
-        ---------
+        Parameters
+        ----------
 
         filename : str
             If set, a file will be written with all of the filtered halos

diff -r 996841e8daf4a2327ab4c47b5f53ab51b999b78f -r cbdb1ea15403843d50f48ea55e09e934b58f2a6d yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
--- a/yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
+++ b/yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
@@ -60,9 +60,9 @@
     
     Initialize an EmissivityIntegrator object.
 
-    Keyword Parameters
-    ------------------
-    filename: string
+    Parameters
+    ----------
+    filename: string, default None
         Path to data file containing emissivity values.  If None,
         a file called xray_emissivity.h5 is used.  This file contains 
         emissivity tables for primordial elements and for metals at 
@@ -146,8 +146,8 @@
     e_min: float
         the maximum energy in keV for the energy band.
 
-    Keyword Parameters
-    ------------------
+    Other Parameters
+    ----------------
     filename: string
         Path to data file containing emissivity values.  If None,
         a file called xray_emissivity.h5 is used.  This file contains 
@@ -220,8 +220,8 @@
     e_min: float
         the maximum energy in keV for the energy band.
 
-    Keyword Parameters
-    ------------------
+    Other Parameters
+    ----------------
     filename: string
         Path to data file containing emissivity values.  If None,
         a file called xray_emissivity.h5 is used.  This file contains 
@@ -277,8 +277,8 @@
     e_min: float
         the maximum energy in keV for the energy band.
 
-    Keyword Parameters
-    ------------------
+    Other Parameters
+    ----------------
     filename: string
         Path to data file containing emissivity values.  If None,
         a file called xray_emissivity.h5 is used.  This file contains 

diff -r 996841e8daf4a2327ab4c47b5f53ab51b999b78f -r cbdb1ea15403843d50f48ea55e09e934b58f2a6d yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -62,7 +62,7 @@
     notebook_password = '',
     answer_testing_tolerance = '3',
     answer_testing_bitwise = 'False',
-    gold_standard_filename = 'gold006',
+    gold_standard_filename = 'gold007',
     local_standard_filename = 'local001',
     sketchfab_api_key = 'None'
     )

diff -r 996841e8daf4a2327ab4c47b5f53ab51b999b78f -r cbdb1ea15403843d50f48ea55e09e934b58f2a6d yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -36,6 +36,8 @@
 import itertools
 import shelve
 import cStringIO
+import fileinput
+from re import finditer
 
 from yt.funcs import *
 from yt.config import ytcfg
@@ -178,7 +180,7 @@
         self.child_mask = 1
         self.ActiveDimensions = self.field_data['x'].shape
         self.DW = grid.pf.domain_right_edge - grid.pf.domain_left_edge
-        
+
     def __getitem__(self, field):
         if field not in self.field_data.keys():
             if field == "RadiusCode":
@@ -424,7 +426,7 @@
         return grids
 
     def select_grid_indices(self, level):
-        return np.where(self.grid_levels == level)
+        return np.where(self.grid_levels[:,0] == level)
 
     def __get_grid_left_edge(self):
         if self.__grid_left_edge == None:
@@ -461,6 +463,7 @@
     def __get_grid_levels(self):
         if self.__grid_levels == None:
             self.__grid_levels = np.array([g.Level for g in self._grids])
+            self.__grid_levels.shape = (self.__grid_levels.size, 1)
         return self.__grid_levels
 
     def __del_grid_levels(self):
@@ -474,7 +477,6 @@
     grid_levels = property(__get_grid_levels, __set_grid_levels,
                              __del_grid_levels)
 
-
     def __get_grid_dimensions(self):
         if self.__grid_dimensions == None:
             self.__grid_dimensions = np.array([g.ActiveDimensions for g in self._grids])
@@ -491,6 +493,19 @@
     grid_dimensions = property(__get_grid_dimensions, __set_grid_dimensions,
                              __del_grid_dimensions)
 
+    @property
+    def grid_corners(self):
+        return np.array([
+          [self.grid_left_edge[:,0], self.grid_left_edge[:,1], self.grid_left_edge[:,2]],
+          [self.grid_right_edge[:,0], self.grid_left_edge[:,1], self.grid_left_edge[:,2]],
+          [self.grid_right_edge[:,0], self.grid_right_edge[:,1], self.grid_left_edge[:,2]],
+          [self.grid_left_edge[:,0], self.grid_right_edge[:,1], self.grid_left_edge[:,2]],
+          [self.grid_left_edge[:,0], self.grid_left_edge[:,1], self.grid_right_edge[:,2]],
+          [self.grid_right_edge[:,0], self.grid_left_edge[:,1], self.grid_right_edge[:,2]],
+          [self.grid_right_edge[:,0], self.grid_right_edge[:,1], self.grid_right_edge[:,2]],
+          [self.grid_left_edge[:,0], self.grid_right_edge[:,1], self.grid_right_edge[:,2]],
+        ], dtype='float64')
+
 
 class AMR1DData(AMRData, GridPropertiesMixin):
     _spatial = False
@@ -530,7 +545,7 @@
             # generated it above.  This way, fields that are grabbed from the
             # grids are sorted properly.
             self[field] = self[field][self._sortkey]
-       
+
 class AMROrthoRayBase(AMR1DData):
     """
     This is an orthogonal ray cast through the entire domain, at a specific
@@ -673,9 +688,9 @@
             vs = self._get_line_at_coord(RE[:,i], i)
             p = p | ( ( (LE[:,i1] <= vs[:,i1]) & (RE[:,i1] >= vs[:,i1]) ) \
                     & ( (LE[:,i2] <= vs[:,i2]) & (RE[:,i2] >= vs[:,i2]) ) )
-        p = p | ( np.all( LE <= self.start_point, axis=1 ) 
+        p = p | ( np.all( LE <= self.start_point, axis=1 )
                 & np.all( RE >= self.start_point, axis=1 ) )
-        p = p | ( np.all( LE <= self.end_point,   axis=1 ) 
+        p = p | ( np.all( LE <= self.end_point,   axis=1 )
                 & np.all( RE >= self.end_point,   axis=1 ) )
         self._grids = self.hierarchy.grids[p]
 
@@ -695,7 +710,7 @@
         if not iterable(gf):
             gf = gf * np.ones(grid.child_mask.shape)
         return gf[mask]
-        
+
     @cache_mask
     def _get_cut_mask(self, grid):
         mask = np.zeros(grid.ActiveDimensions, dtype='int')
@@ -738,11 +753,11 @@
     --------
 
     >>> from yt.visualization.api import Streamlines
-    >>> streamlines = Streamlines(pf, [0.5]*3) 
+    >>> streamlines = Streamlines(pf, [0.5]*3)
     >>> streamlines.integrate_through_volume()
     >>> stream = streamlines.path(0)
     >>> matplotlib.pylab.semilogy(stream['t'], stream['Density'], '-x')
-    
+
     """
     _type_name = "streamline"
     _con_args = ('positions')
@@ -775,16 +790,16 @@
     @restore_grid_state
     def _get_data_from_grid(self, grid, field):
         # No child masking here; it happens inside the mask cut
-        mask = self._get_cut_mask(grid) 
+        mask = self._get_cut_mask(grid)
         if field == 'dts': return self._dts[grid.id]
         if field == 't': return self._ts[grid.id]
         return grid[field].flat[mask]
-        
+
     @cache_mask
     def _get_cut_mask(self, grid):
         #pdb.set_trace()
         points_in_grid = np.all(self.positions > grid.LeftEdge, axis=1) & \
-                         np.all(self.positions <= grid.RightEdge, axis=1) 
+                         np.all(self.positions <= grid.RightEdge, axis=1)
         pids = np.where(points_in_grid)[0]
         mask = np.zeros(points_in_grid.sum(), dtype='int')
         dts = np.zeros(points_in_grid.sum(), dtype='float64')
@@ -819,7 +834,7 @@
         AMRData.__init__(self, pf, fields, **kwargs)
         self.field = ensure_list(fields)[0]
         self.set_field_parameter("axis",axis)
-        
+
     def _convert_field_name(self, field):
         return field
 
@@ -838,7 +853,6 @@
             fields_to_get = self.fields[:]
         else:
             fields_to_get = ensure_list(fields)
-        temp_data = {}
         for field in fields_to_get:
             if self.field_data.has_key(field): continue
             if field not in self.hierarchy.field_list:
@@ -848,18 +862,13 @@
             # we're going to have to set the same thing several times
             data = [self._get_data_from_grid(grid, field)
                     for grid in self._get_grids()]
-            if len(data) == 0: data = np.array([])
-            else: data = np.concatenate(data)
-            temp_data[field] = data
+            if len(data) == 0:
+                data = np.array([])
+            else:
+                data = np.concatenate(data)
             # Now the next field can use this field
-            self[field] = temp_data[field] 
-        # We finalize
-        if temp_data != {}:
-            temp_data = self.comm.par_combine_object(temp_data,
-                    datatype='dict', op='cat')
-        # And set, for the next group
-        for field in temp_data.keys():
-            self[field] = temp_data[field]
+            self[field] = self.comm.par_combine_object(data, op='cat',
+                                                       datatype='array')
 
     def _get_pw(self, fields, center, width, origin, axes_unit, plot_type):
         axis = self.axis
@@ -874,7 +883,7 @@
         (bounds, center, units) = GetWindowParameters(axis, center, width, self.pf)
         if axes_unit is None and units != ('1', '1'):
             axes_unit = units
-        pw = PWViewerMPL(self, bounds, origin=origin, frb_generator=FixedResolutionBuffer, 
+        pw = PWViewerMPL(self, bounds, origin=origin, frb_generator=FixedResolutionBuffer,
                          plot_type=plot_type)
         pw.set_axes_unit(axes_unit)
         return pw
@@ -980,7 +989,7 @@
         for field in fields:
             #mylog.debug("Trying to obtain %s from node %s",
                 #self._convert_field_name(field), node_name)
-            fdata=self.hierarchy.get_data(node_name, 
+            fdata=self.hierarchy.get_data(node_name,
                 self._convert_field_name(field))
             if fdata is not None:
                 #mylog.debug("Got %s from node %s", field, node_name)
@@ -1138,7 +1147,7 @@
         t = points * ind[cm] * dx + (grid.LeftEdge[xaxis] + 0.5 * dx)
         # calculate ypoints array
         ind = cmI[1, :].ravel()   # yind
-        del cmI   # no longer needed 
+        del cmI   # no longer needed
         t = np.vstack( (t, points * ind[cm] * dy + \
                 (grid.LeftEdge[yaxis] + 0.5 * dy))
             )
@@ -1197,7 +1206,7 @@
     def hub_upload(self):
         self._mrep.upload()
 
-    def to_pw(self, fields=None, center='c', width=None, axes_unit=None, 
+    def to_pw(self, fields=None, center='c', width=None, axes_unit=None,
                origin='center-window'):
         r"""Create a :class:`~yt.visualization.plot_window.PWViewerMPL` from this
         object.
@@ -1477,7 +1486,7 @@
         self.dims = dims
         self.dds = self.width / self.dims
         self.bounds = np.array([0.0,1.0,0.0,1.0])
-        
+
         self.set_field_parameter('center', center)
         # Let's set up our plane equation
         # ax + by + cz + d = 0
@@ -1563,7 +1572,7 @@
 
             # Mark these pixels to speed things up
             self._pixelmask[pointI] = 0
-            
+
             return
         else:
             raise SyntaxError("Making a fixed resolution slice with "
@@ -1651,7 +1660,7 @@
         L_name = ("%s" % self._norm_vec).replace(" ","_")[1:-1]
         return "%s/c%s_L%s" % \
             (self._top_node, cen_name, L_name)
-        
+
 class AMRQuadTreeProjBase(AMR2DData):
     """
     This is a data object corresponding to a line integral through the
@@ -1809,7 +1818,7 @@
             convs[:] = 1.0
         return dls, convs
 
-    def to_pw(self, fields=None, center='c', width=None, axes_unit=None, 
+    def to_pw(self, fields=None, center='c', width=None, axes_unit=None,
                origin='center-window'):
         r"""Create a :class:`~yt.visualization.plot_window.PWViewerMPL` from this
         object.
@@ -1850,7 +1859,7 @@
                                  if g.Level == level],
                               self.get_dependencies(fields), self.hierarchy.io)
             self._add_level_to_tree(tree, level, fields)
-            mylog.debug("End of projecting level level %s, memory usage %0.3e", 
+            mylog.debug("End of projecting level level %s, memory usage %0.3e",
                         level, get_memory_usage()/1024.)
         # Note that this will briefly double RAM usage
         if self.proj_style == "mip":
@@ -1942,7 +1951,7 @@
         xpoints = (xind + (start_index[x_dict[self.axis]])).astype('int64')
         ypoints = (yind + (start_index[y_dict[self.axis]])).astype('int64')
         to_add = np.array([d[used_points].ravel() for d in full_proj], order='F')
-        tree.add_array_to_tree(grid.Level, xpoints, ypoints, 
+        tree.add_array_to_tree(grid.Level, xpoints, ypoints,
                     to_add, weight_proj[used_points].ravel())
 
     def _add_level_to_tree(self, tree, level, fields):
@@ -2283,7 +2292,7 @@
                 del self.__retval_coords[grid.id]
                 del self.__retval_fields[grid.id]
                 del self.__overlap_masks[grid.id]
-            mylog.debug("End of projecting level level %s, memory usage %0.3e", 
+            mylog.debug("End of projecting level level %s, memory usage %0.3e",
                         level, get_memory_usage()/1024.)
         coord_data = np.concatenate(coord_data, axis=1)
         field_data = np.concatenate(field_data, axis=1)
@@ -2314,7 +2323,7 @@
     def add_fields(self, fields, weight = "CellMassMsun"):
         pass
 
-    def to_pw(self, fields=None, center='c', width=None, axes_unit=None, 
+    def to_pw(self, fields=None, center='c', width=None, axes_unit=None,
                origin='center-window'):
         r"""Create a :class:`~yt.visualization.plot_window.PWViewerMPL` from this
         object.
@@ -2522,7 +2531,7 @@
         ref_ratio = self.pf.refine_by**(self.level - grid.Level)
         FillBuffer(ref_ratio,
             grid.get_global_startindex(), self.global_startindex,
-            c_fields, g_fields, 
+            c_fields, g_fields,
             self.ActiveDimensions, grid.ActiveDimensions,
             grid.child_mask, self.domain_width, dls[grid.Level],
             self.axis)
@@ -2683,9 +2692,9 @@
     def cut_region(self, field_cuts):
         """
         Return an InLineExtractedRegion, where the grid cells are cut on the
-        fly with a set of field_cuts.  It is very useful for applying 
+        fly with a set of field_cuts.  It is very useful for applying
         conditions to the fields in your data object.
-        
+
         Examples
         --------
         To find the total mass of gas above 10^6 K in your volume:
@@ -2726,7 +2735,7 @@
         useful for calculating, for instance, total isocontour area, or
         visualizing in an external program (such as `MeshLab
         <http://meshlab.sf.net>`_.)
-        
+
         Parameters
         ----------
         field : string
@@ -2840,7 +2849,7 @@
 
         Additionally, the returned flux is defined as flux *into* the surface,
         not flux *out of* the surface.
-        
+
         Parameters
         ----------
         field : string
@@ -2897,7 +2906,7 @@
             ff = np.ones(vals.shape, dtype="float64")
         else:
             ff = grid.get_vertex_centered_data(fluxing_field)
-        xv, yv, zv = [grid.get_vertex_centered_data(f) for f in 
+        xv, yv, zv = [grid.get_vertex_centered_data(f) for f in
                      [field_x, field_y, field_z]]
         return march_cubes_grid_flux(value, vals, xv, yv, zv,
                     ff, mask, grid.LeftEdge, grid.dds)
@@ -2990,7 +2999,7 @@
     ----------------
     force_refresh : bool
        Force a refresh of the data. Defaults to True.
-    
+
     Examples
     --------
     """
@@ -3230,7 +3239,7 @@
         if self._grids is not None: return
         GLE = self.pf.h.grid_left_edge
         GRE = self.pf.h.grid_right_edge
-        goodI = find_grids_in_inclined_box(self.box_vectors, self.center, 
+        goodI = find_grids_in_inclined_box(self.box_vectors, self.center,
                                            GLE, GRE)
         cgrids = self.pf.h.grids[goodI.astype('bool')]
        # find_grids_in_inclined_box seems to be broken.
@@ -3238,13 +3247,13 @@
         grids = []
         for i,grid in enumerate(cgrids):
             v = grid_points_in_volume(self.box_lengths, self.origin,
-                                      self._rot_mat, grid.LeftEdge, 
+                                      self._rot_mat, grid.LeftEdge,
                                       grid.RightEdge, grid.dds,
                                       grid.child_mask, 1)
             if v: grids.append(grid)
         self._grids = np.empty(len(grids), dtype='object')
         for gi, g in enumerate(grids): self._grids[gi] = g
-            
+
 
     def _is_fully_enclosed(self, grid):
         # This should be written at some point.
@@ -3257,10 +3266,10 @@
             return True
         pm = np.zeros(grid.ActiveDimensions, dtype='int32')
         grid_points_in_volume(self.box_lengths, self.origin,
-                              self._rot_mat, grid.LeftEdge, 
+                              self._rot_mat, grid.LeftEdge,
                               grid.RightEdge, grid.dds, pm, 0)
         return pm
-        
+
 
 class AMRRegionBase(AMR3DData):
     """A 3D region of data with an arbitrary center.
@@ -3396,9 +3405,9 @@
     _dx_pad = 0.0
     def __init__(self, center, left_edge, right_edge, fields = None,
                  pf = None, **kwargs):
-        AMRPeriodicRegionBase.__init__(self, center, left_edge, right_edge, 
+        AMRPeriodicRegionBase.__init__(self, center, left_edge, right_edge,
                                        fields = None, pf = None, **kwargs)
-    
+
 
 class AMRGridCollectionBase(AMR3DData):
     """
@@ -3565,7 +3574,7 @@
         self._C = C
         self._e0 = e0 = e0 / (e0**2.0).sum()**0.5
         self._tilt = tilt
-        
+
         # find the t1 angle needed to rotate about z axis to align e0 to x
         t1 = np.arctan(e0[1] / e0[0])
         # rotate e0 by -t1
@@ -3575,15 +3584,15 @@
         t2 = np.arctan(-r1[2] / r1[0])
         """
         calculate the original e1
-        given the tilt about the x axis when e0 was aligned 
+        given the tilt about the x axis when e0 was aligned
         to x after t1, t2 rotations about z, y
         """
-        RX = get_rotation_matrix(-tilt, (1,0,0)).transpose()
-        RY = get_rotation_matrix(-t2,   (0,1,0)).transpose()
-        RZ = get_rotation_matrix(-t1,   (0,0,1)).transpose()
-        e1 = ((0, 1, 0) * RX).sum(axis = 1)
-        e1 = (e1 * RY).sum(axis = 1)
-        e1 = (e1 * RZ).sum(axis = 1)
+        RX = get_rotation_matrix(-tilt, (1, 0, 0)).transpose()
+        RY = get_rotation_matrix(-t2,   (0, 1, 0)).transpose()
+        RZ = get_rotation_matrix(-t1,   (0, 0, 1)).transpose()
+        e1 = ((0, 1, 0) * RX).sum(axis=1)
+        e1 = (e1 * RY).sum(axis=1)
+        e1 = (e1 * RZ).sum(axis=1)
         e2 = np.cross(e0, e1)
 
         self._e1 = e1
@@ -3599,95 +3608,72 @@
         self._refresh_data()
 
         """
-        Having another function find_ellipsoid_grids is too much work, 
+        Having another function find_ellipsoid_grids is too much work,
         can just use the sphere one and forget about checking orientation
         but feed in the A parameter for radius
         """
-    def _get_list_of_grids(self, field = None):
+    def _get_list_of_grids(self, field=None):
         """
         This returns the grids that are possibly within the ellipse
         """
-        grids,ind = self.hierarchy.find_sphere_grids(self.center, self._A)
+        grids, ind = self.hierarchy.find_sphere_grids(self.center, self._A)
         # Now we sort by level
         grids = grids.tolist()
-        grids.sort(key=lambda x: (x.Level, \
-                                  x.LeftEdge[0], \
-                                  x.LeftEdge[1], \
+        grids.sort(key=lambda x: (x.Level,
+                                  x.LeftEdge[0],
+                                  x.LeftEdge[1],
                                   x.LeftEdge[2]))
-        self._grids = np.array(grids, dtype = 'object')
+        self._grids = np.array(grids, dtype='object')
 
     def _is_fully_enclosed(self, grid):
         """
         check if all grid corners are inside the ellipsoid
         """
-        # vector from corner to center
-        vr = (grid._corners - self.center)
-        # 3 possible cases of locations taking periodic BC into account
-        # just listing the components, find smallest later
-        dotarr=np.array([vr, vr + self.DW, vr - self.DW])
-        # these vrdote# finds the product of vr components with e#
-        # square the results
-        # find the smallest
-        # sums it
-        vrdote0_2 = (np.multiply(dotarr, self._e0)**2).min(axis \
-                                                           = 0).sum(axis = 1)
-        vrdote1_2 = (np.multiply(dotarr, self._e1)**2).min(axis \
-                                                           = 0).sum(axis = 1)
-        vrdote2_2 = (np.multiply(dotarr, self._e2)**2).min(axis \
-                                                           = 0).sum(axis = 1)
-        return np.all(vrdote0_2 / self._A**2 + \
-                      vrdote1_2 / self._B**2 + \
-                      vrdote2_2 / self._C**2 <=1.0)
-
-    @restore_grid_state # Pains me not to decorate with cache_mask here
-    def _get_cut_mask(self, grid, field = None):
+        return False
+
+    @restore_grid_state  # Pains me not to decorate with cache_mask here
+    def _get_cut_mask(self, grid, field=None):
         """
         This checks if each cell is inside the ellipsoid
         """
         # We have the *property* center, which is not necessarily
         # the same as the field_parameter
         if self._is_fully_enclosed(grid):
-            return True # We do not want child masking here
+            return True  # We do not want child masking here
         if not isinstance(grid, (FakeGridForParticles, GridChildMaskWrapper)) \
            and grid.id in self._cut_masks:
             return self._cut_masks[grid.id]
-        Inside = np.zeros(grid["x"].shape, dtype = 'float64')
-        dim = grid["x"].shape
-        # need this to take into account non-cube root grid tiles
-        if (len(dim) == 1):
-            dot_evec = np.zeros([3, dim[0]])
-        elif (len(dim) == 2):
-            dot_evec = np.zeros([3, dim[0], dim[1]])
-        elif (len(dim) == 3):
-            dot_evec = np.zeros([3, dim[0], dim[1], dim[2]])
+
+        dot_evecx = np.zeros(grid.ActiveDimensions)
+        dot_evecy = np.zeros(grid.ActiveDimensions)
+        dot_evecz = np.zeros(grid.ActiveDimensions)
 
         for i, ax in enumerate('xyz'):
             # distance to center
-            ar  = grid[ax]-self.center[i]
-            # cases to take into account periodic BC
-            case = np.array([ar, ar + self.DW[i], ar - self.DW[i]])
-            # find which of the 3 cases is smallest in magnitude
-            index = np.abs(case).argmin(axis = 0)
-            # restrict distance to only the smallest cases
-            vec = np.choose(index, case)
+            ar = grid[ax]-self.center[i]
+            # correct for periodicity
+            vec = np.array([ar, ar + self.DW[i], ar - self.DW[i]])
+            ind = np.argmin(np.abs(vec), axis=0)
+            vec = np.choose(ind, vec)
             # sum up to get the dot product with e_vectors
-            dot_evec += np.array([vec * self._e0[i], \
-                                  vec * self._e1[i], \
-                                  vec * self._e2[i]])
+            dot_evecx += vec * self._e0[i] / self._A
+            dot_evecy += vec * self._e1[i] / self._B
+            dot_evecz += vec * self._e2[i] / self._C
+
         # Calculate the eqn of ellipsoid, if it is inside
         # then result should be <= 1.0
-        Inside = dot_evec[0]**2 / self._A**2 + \
-                 dot_evec[1]**2 / self._B**2 + \
-                 dot_evec[2]**2 / self._C**2
-        cm = ((Inside <= 1.0) & grid.child_mask)
+        cm = ((dot_evecx**2 +
+               dot_evecy**2 +
+               dot_evecz**2 <= 1.0) & grid.child_mask)
         if not isinstance(grid, (FakeGridForParticles, GridChildMaskWrapper)):
             self._cut_masks[grid.id] = cm
         return cm
 
+
 class AMRCoveringGridBase(AMR3DData):
     """A 3D region with all data extracted to a single, specified
     resolution.
-    
+
     Parameters
     ----------
     level : int
@@ -3785,7 +3771,7 @@
             n_bad = np.where(self[obtain_fields[0]]==-999)[0].size
             mylog.error("Covering problem: %s cells are uncovered", n_bad)
             raise KeyError(n_bad)
-            
+
     def _generate_field(self, field):
         if self.pf.field_info.has_key(field):
             # First we check the validator; this might even raise!
@@ -3813,13 +3799,13 @@
     def _get_data_from_grid(self, grid, fields):
         ll = int(grid.Level == self.level)
         ref_ratio = self.pf.refine_by**(self.level - grid.Level)
-        g_fields = [gf.astype("float64") 
+        g_fields = [gf.astype("float64")
                     if gf.dtype != "float64"
                     else gf for gf in (grid[field] for field in fields)]
         c_fields = [self[field] for field in fields]
         count = FillRegion(ref_ratio,
             grid.get_global_startindex(), self.global_startindex,
-            c_fields, g_fields, 
+            c_fields, g_fields,
             self.ActiveDimensions, grid.ActiveDimensions,
             grid.child_mask, self.domain_width, ll, 0)
         return count
@@ -3835,7 +3821,7 @@
         c_fields = [self[field] for field in fields]
         FillRegion(ref_ratio,
             grid.get_global_startindex(), self.global_startindex,
-            c_fields, g_fields, 
+            c_fields, g_fields,
             self.ActiveDimensions, grid.ActiveDimensions,
             grid.child_mask, self.domain_width, ll, 1)
 
@@ -3856,7 +3842,7 @@
     fill the region to level 1, replacing any cells actually
     covered by level 1 data, and then recursively repeating this
     process until it reaches the specified `level`.
-    
+
     Parameters
     ----------
     level : int
@@ -3868,10 +3854,11 @@
     fields : array_like, optional
         A list of fields that you'd like pre-generated for your object
 
-    Example
-    -------
-    cube = pf.h.smoothed_covering_grid(2, left_edge=[0.0, 0.0, 0.0], \
-                              dims=[128, 128, 128])
+    Examples
+    --------
+
+    >>> cube = pf.h.smoothed_covering_grid(2, left_edge=[0.0, 0.0, 0.0], \
+    ...                          dims=[128, 128, 128])
     """
     _type_name = "smoothed_covering_grid"
     def __init__(self, *args, **kwargs):
@@ -3976,7 +3963,7 @@
     def _refine(self, dlevel, fields):
         rf = float(self.pf.refine_by**dlevel)
 
-        input_left = (self._old_global_startindex + 0.5) * rf 
+        input_left = (self._old_global_startindex + 0.5) * rf
         dx = np.fromiter((self['cd%s' % ax] for ax in 'xyz'), count=3, dtype='float64')
         output_dims = np.rint((self.ActiveDimensions*self.dds)/dx+0.5).astype('int32') + 2
         self._cur_dims = output_dims
@@ -3990,13 +3977,13 @@
 
     @restore_field_information_state
     def _get_data_from_grid(self, grid, fields):
-        g_fields = [gf.astype("float64") 
+        g_fields = [gf.astype("float64")
                     if gf.dtype != "float64"
                     else gf for gf in (grid[field] for field in fields)]
         c_fields = [self.field_data[field] for field in fields]
         count = FillRegion(1,
             grid.get_global_startindex(), self.global_startindex,
-            c_fields, g_fields, 
+            c_fields, g_fields,
             self._cur_dims, grid.ActiveDimensions,
             grid.child_mask, self.domain_width, 1, 0)
         return count
@@ -4008,14 +3995,14 @@
     """
     This will build a hybrid region based on the boolean logic
     of the regions.
-    
+
     Parameters
     ----------
     regions : list
         A list of region objects and strings describing the boolean logic
         to use when building the hybrid region. The boolean logic can be
         nested using parentheses.
-    
+
     Examples
     --------
     >>> re1 = pf.h.region([0.5, 0.5, 0.5], [0.4, 0.4, 0.4],
@@ -4028,7 +4015,7 @@
         sp1, ")"])
     """
     _type_name = "boolean"
-    _con_args = ("regions")
+    _con_args = ("regions",)
     def __init__(self, regions, fields = None, pf = None, **kwargs):
         # Center is meaningless, but we'll define it all the same.
         AMR3DData.__init__(self, [0.5]*3, fields, pf, **kwargs)
@@ -4040,7 +4027,7 @@
         self._get_all_regions()
         self._make_overlaps()
         self._get_list_of_grids()
-    
+
     def _get_all_regions(self):
         # Before anything, we simply find out which regions are involved in all
         # of this process, uniquely.
@@ -4050,7 +4037,7 @@
             # So cut_masks don't get messed up.
             item._boolean_touched = True
         self._all_regions = np.unique(self._all_regions)
-    
+
     def _make_overlaps(self):
         # Using the processed cut_masks, we'll figure out what grids
         # are left in the hybrid region.
@@ -4084,7 +4071,7 @@
                     continue
             pbar.update(i)
         pbar.finish()
-    
+
     def __repr__(self):
         # We'll do this the slow way to be clear what's going on
         s = "%s (%s): " % (self.__class__.__name__, self.pf)
@@ -4097,7 +4084,7 @@
             if i < (len(self.regions) - 1): s += ", "
         s += "]"
         return s
-    
+
     def _is_fully_enclosed(self, grid):
         return (grid in self._all_overlap)
 
@@ -4184,7 +4171,7 @@
     <http://meshlab.sf.net>`_.)  The object has the properties .vertices
     and will sample values if a field is requested.  The values are
     interpolated to the center of a given face.
-    
+
     Parameters
     ----------
     data_source : AMR3DDataObject
@@ -4259,7 +4246,7 @@
                 self[fields] = samples
             elif sample_type == "vertex":
                 self.vertex_samples[fields] = samples
-        
+
 
     @restore_grid_state
     def _extract_isocontours_from_grid(self, grid, field, value,
@@ -4296,7 +4283,7 @@
 
         Additionally, the returned flux is defined as flux *into* the surface,
         not flux *out of* the surface.
-        
+
         Parameters
         ----------
         field_x : string
@@ -4343,7 +4330,7 @@
         return flux
 
     @restore_grid_state
-    def _calculate_flux_in_grid(self, grid, 
+    def _calculate_flux_in_grid(self, grid,
                     field_x, field_y, field_z, fluxing_field = None):
         mask = self.data_source._get_cut_mask(grid) * grid.child_mask
         vals = grid.get_vertex_centered_data(self.surface_field)
@@ -4351,7 +4338,7 @@
             ff = np.ones(vals.shape, dtype="float64")
         else:
             ff = grid.get_vertex_centered_data(fluxing_field)
-        xv, yv, zv = [grid.get_vertex_centered_data(f) for f in 
+        xv, yv, zv = [grid.get_vertex_centered_data(f) for f in
                      [field_x, field_y, field_z]]
         return march_cubes_grid_flux(self.field_value, vals, xv, yv, zv,
                     ff, mask, grid.LeftEdge, grid.dds)
@@ -4366,6 +4353,230 @@
                 vv[:,i,j] = self.vertices[j,i::3]
         return vv
 
+    def export_obj(self, filename, transparency = 1.0, dist_fac = None,
+                   color_field = None, emit_field = None, color_map = "algae", 
+                   color_log = True, emit_log = True, plot_index = None, 
+                   color_field_max = None, color_field_min = None, 
+                   emit_field_max = None, emit_field_min = None):
+        r"""This exports the surface to the OBJ format, suitable for visualization
+        in many different programs (e.g., Blender).  NOTE: this exports an .obj file 
+        and an .mtl file, both with the general 'filename' as a prefix.  
+        The .obj file points to the .mtl file in its header, so if you move the 2 
+        files, make sure you change the .obj header to account for this. ALSO NOTE: 
+        the emit_field needs to be a combination of the other 2 fields used to 
+        have the emissivity track with the color.
+
+        Parameters
+        ----------
+        filename : string
+            The file this will be exported to.  This cannot be a file-like object.
+            Note - there are no file extentions included - both obj & mtl files 
+            are created.
+        transparency : float
+            This gives the transparency of the output surface plot.  Values
+            from 0.0 (invisible) to 1.0 (opaque).
+        dist_fac : float
+            Divide the axes distances by this amount.
+        color_field : string
+            Should a field be sample and colormapped?
+        emit_field : string
+            Should we track the emissivity of a field?
+              NOTE: this should be a combination of the other 2 fields being used.
+        color_map : string
+            Which color map should be applied?
+        color_log : bool
+            Should the color field be logged before being mapped?
+        emit_log : bool
+            Should the emitting field be logged before being mapped?
+        plot_index : integer
+            Index of plot for multiple plots.  If none, then only 1 plot.
+        color_field_max : float
+            Maximum value of the color field across all surfaces.
+        color_field_min : float
+            Minimum value of the color field across all surfaces.
+        emit_field_max : float
+            Maximum value of the emitting field across all surfaces.
+        emit_field_min : float
+            Minimum value of the emitting field across all surfaces.
+
+        Examples
+        --------
+
+        >>> sp = pf.h.sphere("max", (10, "kpc"))
+        >>> trans = 1.0
+        >>> distf = 3.1e18*1e3 # distances into kpc
+        >>> surf = pf.h.surface(sp, "Density", 5e-27)
+        >>> surf.export_obj("my_galaxy", transparency=trans, dist_fac = distf)
+
+        >>> sp = pf.h.sphere("max", (10, "kpc"))
+        >>> mi, ma = sp.quantities['Extrema']('Temperature')[0]
+        >>> rhos = [1e-24, 1e-25]
+        >>> trans = [0.5, 1.0]
+        >>> distf = 3.1e18*1e3 # distances into kpc
+        >>> for i, r in enumerate(rhos):
+        ...     surf = pf.h.surface(sp,'Density',r)
+        ...     surf.export_obj("my_galaxy", transparency=trans[i], 
+        ...                      color_field='Temperature', dist_fac = distf, 
+        ...                      plot_index = i, color_field_max = ma, 
+        ...                      color_field_min = mi)
+
+        >>> sp = pf.h.sphere("max", (10, "kpc"))
+        >>> rhos = [1e-24, 1e-25]
+        >>> trans = [0.5, 1.0]
+        >>> distf = 3.1e18*1e3 # distances into kpc
+        >>> def _Emissivity(field, data):
+        ...     return (data['Density']*data['Density']*np.sqrt(data['Temperature']))
+        >>> add_field("Emissivity", function=_Emissivity, units=r"\rm{g K}/\rm{cm}^{6}")
+        >>> for i, r in enumerate(rhos):
+        ...     surf = pf.h.surface(sp,'Density',r)
+        ...     surf.export_obj("my_galaxy", transparency=trans[i], 
+        ...                      color_field='Temperature', emit_field = 'Emissivity', 
+        ...                      dist_fac = distf, plot_index = i)
+
+        """
+        if self.vertices is None:
+            self.get_data(color_field,"face")
+        elif color_field is not None:
+            if color_field not in self.field_data:
+                self[color_field]
+        if emit_field is not None:
+            if color_field not in self.field_data:
+                self[emit_field]
+        only_on_root(self._export_obj, filename, transparency, dist_fac, color_field, emit_field, 
+                             color_map, color_log, emit_log, plot_index, color_field_max, 
+                             color_field_min, emit_field_max, emit_field_min)
+
+    def _color_samples_obj(self, cs, em, color_log, emit_log, color_map, arr, 
+                           color_field_max, color_field_min, 
+                           emit_field_max, emit_field_min): # this now holds for obj files
+        if color_log: cs = np.log10(cs)
+        if emit_log: em = np.log10(em)
+        if color_field_min is None:
+            mi = cs.min()
+        else:
+            mi = color_field_min
+            if color_log: mi = np.log10(mi)
+        if color_field_max is None:
+            ma = cs.max()
+        else:
+            ma = color_field_max
+            if color_log: ma = np.log10(ma)
+        cs = (cs - mi) / (ma - mi)
+        # to get color indicies for OBJ formatting
+        from yt.visualization._colormap_data import color_map_luts
+        lut = color_map_luts[color_map]
+        x = np.mgrid[0.0:1.0:lut[0].shape[0]*1j]
+        arr["cind"][:] = (np.interp(cs,x,x)*(lut[0].shape[0]-1)).astype("uint8")
+        # now, get emission
+        if emit_field_min is None:
+            emi = em.min()
+        else:
+            emi = emit_field_min
+            if emit_log: emi = np.log10(emi)
+        if emit_field_max is None:
+            ema = em.max()
+        else:
+            ema = emit_field_max
+            if emit_log: ema = np.log10(ema)
+        em = (em - emi)/(ema - emi)
+        x = np.mgrid[0.0:255.0:2j] # assume 1 emissivity per color
+        arr["emit"][:] = (np.interp(em,x,x))*2.0 # for some reason, max emiss = 2
+
+    @parallel_root_only
+    def _export_obj(self, filename, transparency, dist_fac = None, 
+                    color_field = None, emit_field = None, color_map = "algae", 
+                    color_log = True, emit_log = True, plot_index = None, 
+                    color_field_max = None, color_field_min = None, 
+                    emit_field_max = None, emit_field_min = None):
+        if plot_index is None:
+            plot_index = 0
+        if isinstance(filename, file):
+            fobj = filename + '.obj'
+            fmtl = filename + '.mtl'
+        else:
+            if plot_index == 0:
+                fobj = open(filename + '.obj', "w")
+                fmtl = open(filename + '.mtl', 'w')
+                cc = 1
+            else:
+                # read in last vertex
+                linesave = ''
+                for line in fileinput.input(filename + '.obj'):
+                    if line[0] == 'f':
+                        linesave = line
+                p = [m.start() for m in finditer(' ', linesave)]
+                cc = int(linesave[p[len(p)-1]:])+1
+                fobj = open(filename + '.obj', "a")
+                fmtl = open(filename + '.mtl', 'a')
+        ftype = [("cind", "uint8"), ("emit", "float")]
+        vtype = [("x","float"),("y","float"), ("z","float")]
+        if plot_index == 0:
+            fobj.write("# yt OBJ file\n")
+            fobj.write("# www.yt-project.com\n")
+            fobj.write("mtllib " + filename + '.mtl\n\n')  # use this material file for the faces
+            fmtl.write("# yt MLT file\n")
+            fmtl.write("# www.yt-project.com\n\n")
+        #(0) formulate vertices
+        nv = self.vertices.shape[1] # number of groups of vertices
+        f = np.empty(nv/self.vertices.shape[0], dtype=ftype) # store sets of face colors
+        v = np.empty(nv, dtype=vtype) # stores vertices
+        if color_field is not None:
+            cs = self[color_field]
+        else:
+            cs = np.empty(self.vertices.shape[1]/self.vertices.shape[0])
+        if emit_field is not None:
+            em = self[emit_field]
+        else:
+            em = np.empty(self.vertices.shape[1]/self.vertices.shape[0])            
+        self._color_samples_obj(cs, em, color_log, emit_log, color_map, f, 
+                                color_field_max, color_field_min, 
+                                emit_field_max, emit_field_min) # map color values to color scheme
+        from yt.visualization._colormap_data import color_map_luts # import colors for mtl file
+        lut = color_map_luts[color_map] # enumerate colors
+        # interpolate emissivity to enumerated colors
+        emiss = np.interp(np.mgrid[0:lut[0].shape[0]],np.mgrid[0:len(cs)],f["emit"][:])
+        if dist_fac is None: # then normalize by bounds
+            DLE = self.pf.domain_left_edge
+            DRE = self.pf.domain_right_edge
+            bounds = [(DLE[i], DRE[i]) for i in range(3)]
+            for i, ax in enumerate("xyz"):
+                # Do the bounds first since we cast to f32
+                tmp = self.vertices[i,:]
+                np.subtract(tmp, bounds[i][0], tmp)
+                w = bounds[i][1] - bounds[i][0]
+                np.divide(tmp, w, tmp)
+                np.subtract(tmp, 0.5, tmp) # Center at origin.
+                v[ax][:] = tmp   
+        else:
+            for i, ax in enumerate("xyz"):
+                tmp = self.vertices[i,:]
+                np.divide(tmp, dist_fac, tmp)
+                v[ax][:] = tmp
+        #(1) write all colors per surface to mtl file
+        for i in range(0,lut[0].shape[0]): 
+            omname = "material_" + str(i) + '_' + str(plot_index)  # name of the material
+            fmtl.write("newmtl " + omname +'\n') # the specific material (color) for this face
+            fmtl.write("Ka %.6f %.6f %.6f\n" %(0.0, 0.0, 0.0)) # ambient color, keep off
+            fmtl.write("Kd %.6f %.6f %.6f\n" %(lut[0][i], lut[1][i], lut[2][i])) # color of face
+            fmtl.write("Ks %.6f %.6f %.6f\n" %(0.0, 0.0, 0.0)) # specular color, keep off
+            fmtl.write("d %.6f\n" %(transparency))  # transparency
+            fmtl.write("em %.6f\n" %(emiss[i])) # emissivity per color
+            fmtl.write("illum 2\n") # not relevant, 2 means highlights on?
+            fmtl.write("Ns %.6f\n\n" %(0.0)) #keep off, some other specular thing
+        #(2) write vertices
+        for i in range(0,self.vertices.shape[1]):
+            fobj.write("v %.6f %.6f %.6f\n" %(v["x"][i], v["y"][i], v["z"][i]))    
+        fobj.write("#done defining vertices\n\n")
+        #(3) define faces and materials for each face
+        for i in range(0,self.triangles.shape[0]):
+            omname = 'material_' + str(f["cind"][i]) + '_' + str(plot_index) # which color to use
+            fobj.write("usemtl " + omname + '\n') # which material to use for this face (color)
+            fobj.write("f " + str(cc) + ' ' + str(cc+1) + ' ' + str(cc+2) + '\n\n') # vertices to color
+            cc = cc+3
+        fmtl.close()
+        fobj.close()
+
+
     def export_ply(self, filename, bounds = None, color_field = None,
                    color_map = "algae", color_log = True, sample_type = "face"):
         r"""This exports the surface to the PLY format, suitable for visualization
@@ -4469,7 +4680,7 @@
             w = bounds[i][1] - bounds[i][0]
             np.divide(tmp, w, tmp)
             np.subtract(tmp, 0.5, tmp) # Center at origin.
-            v[ax][:] = tmp 
+            v[ax][:] = tmp
         f.write("end_header\n")
         v.tofile(f)
         arr["ni"][:] = 3
@@ -4598,22 +4809,46 @@
             mylog.error("Problem uploading.")
         return upload_id
 
+# Many of these items are set up specifically to ensure that
+# we are not breaking old pickle files.  This means we must only call the
+# _reconstruct_object and that we cannot mandate any additional arguments to
+# the reconstruction function.
+#
+# In the future, this would be better off being set up to more directly
+# reference objects or retain state, perhaps with a context manager.
+#
+# One final detail: time series or multiple parameter files in a single pickle
+# seems problematic.
+
+class ReconstructedObject(tuple):
+    pass
+
+def _check_nested_args(arg, ref_pf):
+    if not isinstance(arg, (tuple, list, ReconstructedObject)):
+        return arg
+    elif isinstance(arg, ReconstructedObject) and ref_pf == arg[0]:
+        return arg[1]
+    narg = [_check_nested_args(a, ref_pf) for a in arg]
+    return narg
+
+def _get_pf_by_hash(hash):
+    from yt.data_objects.static_output import _cached_pfs
+    for pf in _cached_pfs.values():
+        if pf._hash() == hash: return pf
+    return None
 
 def _reconstruct_object(*args, **kwargs):
     pfid = args[0]
     dtype = args[1]
+    pf = _get_pf_by_hash(pfid)
+    if not pf:
+        pfs = ParameterFileStore()
+        pf = pfs.get_pf_hash(pfid)
     field_parameters = args[-1]
     # will be much nicer when we can do pfid, *a, fp = args
-    args, new_args = args[2:-1], []
-    for arg in args:
-        if iterable(arg) and len(arg) == 2 \
-           and not isinstance(arg, types.DictType) \
-           and isinstance(arg[1], AMRData):
-            new_args.append(arg[1])
-        else: new_args.append(arg)
-    pfs = ParameterFileStore()
-    pf = pfs.get_pf_hash(pfid)
+    args = args[2:-1]
+    new_args = [_check_nested_args(a, pf) for a in args]
     cls = getattr(pf.h, dtype)
     obj = cls(*new_args)
     obj.field_parameters.update(field_parameters)
-    return pf, obj
+    return ReconstructedObject((pf, obj))

diff -r 996841e8daf4a2327ab4c47b5f53ab51b999b78f -r cbdb1ea15403843d50f48ea55e09e934b58f2a6d yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -151,8 +151,12 @@
     particle masses in the object.
     """
     baryon_mass = data["CellMassMsun"].sum()
-    particle_mass = data["ParticleMassMsun"].sum()
-    return [baryon_mass + particle_mass]
+    try:
+        particle_mass = data["ParticleMassMsun"].sum()
+        total_mass = baryon_mass + particle_mass
+    except KeyError:
+        total_mass = baryon_mass
+    return [total_mass]
 def _combTotalMass(data, total_mass):
     return total_mass.sum()
 add_quantity("TotalMass", function=_TotalMass,

diff -r 996841e8daf4a2327ab4c47b5f53ab51b999b78f -r cbdb1ea15403843d50f48ea55e09e934b58f2a6d yt/data_objects/hierarchy.py
--- a/yt/data_objects/hierarchy.py
+++ b/yt/data_objects/hierarchy.py
@@ -209,7 +209,7 @@
         pf = self.parameter_file
         if find_max: c = self.find_max("Density")[1]
         else: c = (pf.domain_right_edge + pf.domain_left_edge)/2.0
-        return self.region(c, 
+        return self.region(c,
             pf.domain_left_edge, pf.domain_right_edge)
 
     def clear_all_data(self):
@@ -236,6 +236,8 @@
                 fn = os.path.join(self.directory,
                         "%s.yt" % self.parameter_file.basename)
         dir_to_check = os.path.dirname(fn)
+        if dir_to_check == '':
+            dir_to_check = '.'
         # We have four options:
         #    Writeable, does not exist      : create, open as append
         #    Writeable, does exist          : open as append
@@ -308,7 +310,7 @@
             self.save_data = self._save_data
         else:
             self.save_data = parallel_splitter(self._save_data, self._reload_data_file)
-    
+
     save_data = parallel_splitter(_save_data, _reload_data_file)
 
     def save_object(self, obj, name):
@@ -317,7 +319,7 @@
         under the name *name* on the node /Objects.
         """
         s = cPickle.dumps(obj, protocol=-1)
-        self.save_data(s, "/Objects", name, force = True)
+        self.save_data(np.array(s, dtype='c'), "/Objects", name, force = True)
 
     def load_object(self, name):
         """
@@ -367,7 +369,7 @@
         """
         Returns (in code units) the smallest cell size in the simulation.
         """
-        return self.select_grids(self.grid_levels.max())[0].dds[0]
+        return self.select_grids(self.grid_levels.max())[0].dds[:].min()
 
     def _add_object_class(self, name, class_name, base, dd):
         self.object_types.append(name)

diff -r 996841e8daf4a2327ab4c47b5f53ab51b999b78f -r cbdb1ea15403843d50f48ea55e09e934b58f2a6d yt/data_objects/object_finding_mixin.py
--- a/yt/data_objects/object_finding_mixin.py
+++ b/yt/data_objects/object_finding_mixin.py
@@ -198,8 +198,10 @@
         """
         Gets back all the grids between a left edge and right edge
         """
-        grid_i = np.where((np.all(self.grid_right_edge > left_edge, axis=1)
-                         & np.all(self.grid_left_edge < right_edge, axis=1)) == True)
+        eps = np.finfo(np.float64).eps
+        grid_i = np.where((np.all((self.grid_right_edge - left_edge) > eps, axis=1)
+                         & np.all((right_edge - self.grid_left_edge) > eps, axis=1)) == True)
+
         return self.grids[grid_i], grid_i
 
     def get_periodic_box_grids(self, left_edge, right_edge):

diff -r 996841e8daf4a2327ab4c47b5f53ab51b999b78f -r cbdb1ea15403843d50f48ea55e09e934b58f2a6d yt/data_objects/particle_io.py
--- a/yt/data_objects/particle_io.py
+++ b/yt/data_objects/particle_io.py
@@ -58,7 +58,8 @@
 
     def get_data(self, fields):
         fields = ensure_list(fields)
-        rvs = self.source.get_data(fields, force_particle_read=True)
+        self.source.get_data(fields, force_particle_read=True)
+        rvs = [self.source[field] for field in fields]
         if len(fields) == 1: return rvs[0]
         return rvs
 

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt-3.0/commits/44cd68d145bf/
Changeset:   44cd68d145bf
Branch:      yt
User:        xarthisius
Date:        2013-05-08 20:34:03
Summary:     Fix typo in docstring
Affected #:  1 file

diff -r cbdb1ea15403843d50f48ea55e09e934b58f2a6d -r 44cd68d145bf2241d65b43df7de65abad6928bc0 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -638,7 +638,7 @@
 
         Other Parameters
         ----------------
-        dyanmic_range : float (default: None)
+        dynamic_range : float (default: None)
             The dynamic range of the image.
             If zmin == None, will set zmin = zmax / dynamic_range
             If zmax == None, will set zmax = zmin * dynamic_range


https://bitbucket.org/yt_analysis/yt-3.0/commits/35eff80e8a87/
Changeset:   35eff80e8a87
Branch:      yt
User:        xarthisius
Date:        2013-05-08 20:39:51
Summary:     Fix another typo
Affected #:  1 file

diff -r 44cd68d145bf2241d65b43df7de65abad6928bc0 -r 35eff80e8a879e944e5274b7c8a75c0f8eb0a5ec yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -426,7 +426,7 @@
 
         parameters
         ----------
-        width : float, array of floats, (float, unit) tuple, or arry of (float, unit) tuples.
+        width : float, array of floats, (float, unit) tuple, or tuple of (float, unit) tuples.
              Width can have four different formats to support windows with variable
              x and y widths.  They are:
 


https://bitbucket.org/yt_analysis/yt-3.0/commits/148a3db45843/
Changeset:   148a3db45843
Branch:      yt
User:        ngoldbaum
Date:        2013-05-07 21:20:57
Summary:     Removing the alias from velo to VelocityMagnitude, closes #560.
Affected #:  1 file

diff -r 9b5c690e791ab9195415188482b532fbab758208 -r 148a3db45843ee87f5c8b4a62418a27d62ebd7b7 yt/frontends/flash/fields.py
--- a/yt/frontends/flash/fields.py
+++ b/yt/frontends/flash/fields.py
@@ -64,7 +64,6 @@
 translation_dict = {"x-velocity": "velx",
                     "y-velocity": "vely",
                     "z-velocity": "velz",
-                    "VelocityMagnitude": "velo",
                     "Density": "dens",
                     "Temperature": "temp",
                     "Pressure" : "pres", 


https://bitbucket.org/yt_analysis/yt-3.0/commits/c27c264ed585/
Changeset:   c27c264ed585
Branch:      yt
User:        jzuhone
Date:        2013-05-08 20:46:49
Summary:     Merged in ngoldbaum/yt (pull request #492)

Removing the alias from velo to VelocityMagnitude, closes #560.
Affected #:  1 file

diff -r 35eff80e8a879e944e5274b7c8a75c0f8eb0a5ec -r c27c264ed585c6406269737053dac0da51af195d yt/frontends/flash/fields.py
--- a/yt/frontends/flash/fields.py
+++ b/yt/frontends/flash/fields.py
@@ -64,7 +64,6 @@
 translation_dict = {"x-velocity": "velx",
                     "y-velocity": "vely",
                     "z-velocity": "velz",
-                    "VelocityMagnitude": "velo",
                     "Density": "dens",
                     "Temperature": "temp",
                     "Pressure" : "pres", 


https://bitbucket.org/yt_analysis/yt-3.0/commits/8e0f5a9ad390/
Changeset:   8e0f5a9ad390
Branch:      yt
User:        brittonsmith
Date:        2013-05-09 02:29:29
Summary:     Making mkdir_rec respect absolute paths.
Affected #:  1 file

diff -r cbdb1ea15403843d50f48ea55e09e934b58f2a6d -r 8e0f5a9ad390339a95510e322503ec0dd5c81c6f yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -613,7 +613,10 @@
     mkdir_rec("a/b/c")
     """
     dir_list = path.split("/")
-    basedir = "."
+    if path.startswith("/"):
+        basedir = ""
+    else:
+        basedir = "."
     for dir in dir_list:
         basedir = "%s/%s" % (basedir, dir)
         if not os.path.isdir(basedir): os.mkdir(basedir)


https://bitbucket.org/yt_analysis/yt-3.0/commits/40beb2f5266b/
Changeset:   40beb2f5266b
Branch:      yt
User:        brittonsmith
Date:        2013-05-09 17:33:15
Summary:     Replacing mkdir_rec with functionality from the os module and making it parallel safe.
Affected #:  2 files

diff -r 8e0f5a9ad390339a95510e322503ec0dd5c81c6f -r 40beb2f5266b247ad3386fad7690a4b6f06bb246 yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -2010,8 +2010,9 @@
         >>> halos.write_out("HopAnalysis.out")
         """
         # if path denoted in filename, assure path exists
-        if len(filename.split('/')) > 1:
-            mkdir_rec('/'.join(filename.split('/')[:-1]))
+        my_dir = os.path.dirname(filename)
+        if not os.path.exists(my_dir):
+            only_on_root(os.makedirs, my_dir)
 
         f = self.comm.write_on_root(filename)
         HaloList.write_out(self, f, ellipsoid_data)
@@ -2033,8 +2034,9 @@
         >>> halos.write_particle_lists_txt("halo-parts")
         """
         # if path denoted in prefix, assure path exists
-        if len(prefix.split('/')) > 1:
-            mkdir_rec('/'.join(prefix.split('/')[:-1]))
+        my_dir = os.path.dirname(prefix)
+        if not os.path.exists(my_dir):
+            only_on_root(os.makedirs, my_dir)
 
         f = self.comm.write_on_root("%s.txt" % prefix)
         HaloList.write_particle_lists_txt(self, prefix, fp=f)
@@ -2060,8 +2062,9 @@
         >>> halos.write_particle_lists("halo-parts")
         """
         # if path denoted in prefix, assure path exists
-        if len(prefix.split('/')) > 1:
-            mkdir_rec('/'.join(prefix.split('/')[:-1]))
+        my_dir = os.path.dirname(prefix)
+        if not os.path.exists(my_dir):
+            only_on_root(os.makedirs, my_dir)
 
         fn = "%s.h5" % self.comm.get_filename(prefix)
         f = h5py.File(fn, "w")
@@ -2097,8 +2100,9 @@
         >>> halos.dump("MyHalos")
         """
         # if path denoted in basename, assure path exists
-        if len(basename.split('/')) > 1:
-            mkdir_rec('/'.join(basename.split('/')[:-1]))
+        my_dir = os.path.dirname(basename)
+        if not os.path.exists(my_dir):
+            only_on_root(os.makedirs, my_dir)
 
         self.write_out("%s.out" % basename, ellipsoid_data)
         self.write_particle_lists(basename)

diff -r 8e0f5a9ad390339a95510e322503ec0dd5c81c6f -r 40beb2f5266b247ad3386fad7690a4b6f06bb246 yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -602,21 +602,3 @@
 def get_image_suffix(name):
     suffix = os.path.splitext(name)[1]
     return suffix if suffix in ['.png', '.eps', '.ps', '.pdf'] else ''
-
-def mkdir_rec(path):
-    """
-    Recursive mkdir, so that if you mkdir two levels deep and the first 
-    one doesn't exist, it creates the first, and then any subsequent dirs.
-
-    Examples
-    --------
-    mkdir_rec("a/b/c")
-    """
-    dir_list = path.split("/")
-    if path.startswith("/"):
-        basedir = ""
-    else:
-        basedir = "."
-    for dir in dir_list:
-        basedir = "%s/%s" % (basedir, dir)
-        if not os.path.isdir(basedir): os.mkdir(basedir)


https://bitbucket.org/yt_analysis/yt-3.0/commits/c2bb4cf6028a/
Changeset:   c2bb4cf6028a
Branch:      yt
User:        MatthewTurk
Date:        2013-05-09 17:47:36
Summary:     Merged in brittonsmith/yt (pull request #497)

Replacing mkdir_rec with functionality from os module.
Affected #:  2 files

diff -r c27c264ed585c6406269737053dac0da51af195d -r c2bb4cf6028a62b8ae174a19f46803764cc6842d yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -2010,8 +2010,9 @@
         >>> halos.write_out("HopAnalysis.out")
         """
         # if path denoted in filename, assure path exists
-        if len(filename.split('/')) > 1:
-            mkdir_rec('/'.join(filename.split('/')[:-1]))
+        my_dir = os.path.dirname(filename)
+        if not os.path.exists(my_dir):
+            only_on_root(os.makedirs, my_dir)
 
         f = self.comm.write_on_root(filename)
         HaloList.write_out(self, f, ellipsoid_data)
@@ -2033,8 +2034,9 @@
         >>> halos.write_particle_lists_txt("halo-parts")
         """
         # if path denoted in prefix, assure path exists
-        if len(prefix.split('/')) > 1:
-            mkdir_rec('/'.join(prefix.split('/')[:-1]))
+        my_dir = os.path.dirname(prefix)
+        if not os.path.exists(my_dir):
+            only_on_root(os.makedirs, my_dir)
 
         f = self.comm.write_on_root("%s.txt" % prefix)
         HaloList.write_particle_lists_txt(self, prefix, fp=f)
@@ -2060,8 +2062,9 @@
         >>> halos.write_particle_lists("halo-parts")
         """
         # if path denoted in prefix, assure path exists
-        if len(prefix.split('/')) > 1:
-            mkdir_rec('/'.join(prefix.split('/')[:-1]))
+        my_dir = os.path.dirname(prefix)
+        if not os.path.exists(my_dir):
+            only_on_root(os.makedirs, my_dir)
 
         fn = "%s.h5" % self.comm.get_filename(prefix)
         f = h5py.File(fn, "w")
@@ -2097,8 +2100,9 @@
         >>> halos.dump("MyHalos")
         """
         # if path denoted in basename, assure path exists
-        if len(basename.split('/')) > 1:
-            mkdir_rec('/'.join(basename.split('/')[:-1]))
+        my_dir = os.path.dirname(basename)
+        if not os.path.exists(my_dir):
+            only_on_root(os.makedirs, my_dir)
 
         self.write_out("%s.out" % basename, ellipsoid_data)
         self.write_particle_lists(basename)

diff -r c27c264ed585c6406269737053dac0da51af195d -r c2bb4cf6028a62b8ae174a19f46803764cc6842d yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -602,18 +602,3 @@
 def get_image_suffix(name):
     suffix = os.path.splitext(name)[1]
     return suffix if suffix in ['.png', '.eps', '.ps', '.pdf'] else ''
-
-def mkdir_rec(path):
-    """
-    Recursive mkdir, so that if you mkdir two levels deep and the first 
-    one doesn't exist, it creates the first, and then any subsequent dirs.
-
-    Examples
-    --------
-    mkdir_rec("a/b/c")
-    """
-    dir_list = path.split("/")
-    basedir = "."
-    for dir in dir_list:
-        basedir = "%s/%s" % (basedir, dir)
-        if not os.path.isdir(basedir): os.mkdir(basedir)


https://bitbucket.org/yt_analysis/yt-3.0/commits/924fad86026c/
Changeset:   924fad86026c
Branch:      yt
User:        atmyers
Date:        2013-05-07 08:29:47
Summary:     adding code to export yt dataset to radmcd3d
Affected #:  5 files

diff -r 34979d998227182ac38c2c51b876472755d650ff -r 924fad86026cb243c3110ad6279ad53842f7dcdd yt/analysis_modules/api.py
--- a/yt/analysis_modules/api.py
+++ b/yt/analysis_modules/api.py
@@ -117,3 +117,6 @@
 from .two_point_functions.api import \
     TwoPointFunctions, \
     FcnSet
+
+from .radmc3d_export.api import \
+    RadMC3DWriter

diff -r 34979d998227182ac38c2c51b876472755d650ff -r 924fad86026cb243c3110ad6279ad53842f7dcdd yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
--- /dev/null
+++ b/yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
@@ -0,0 +1,259 @@
+"""
+Code to export from yt to RadMC3D
+
+Author: Andrew Myers
+Affiliation: UCB
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2013 Andrew Myers.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""
+
+from yt.mods import *
+
+class RadMC3DLayer:
+    '''
+
+    This class represents an AMR 'layer' of the style described in
+    the radmc3d manual. Unlike grids, layers may not have more
+    than one parent, so level L grids will need to be split up
+    if they straddle two or more level L - 1 grids. 
+
+    '''
+    def __init__(self, level, parent, id, LE, RE, dim):
+        self.level = level
+        self.parent = parent
+        self.LeftEdge = LE
+        self.RightEdge = RE
+        self.ActiveDimensions = dim
+        self.id = id
+
+    def get_overlap_with(self, grid):
+        '''
+
+        Returns the overlapping region between two Layers,
+        or a layer and a grid. RE < LE means in any direction
+        means no overlap.
+
+        '''
+        LE = np.maximum(self.LeftEdge,  grid.LeftEdge)
+        RE = np.minimum(self.RightEdge, grid.RightEdge)
+        return LE, RE
+
+    def overlaps(self, grid):
+        '''
+
+        Returns whether or not this layer overlaps a given grid
+        
+        '''
+        LE, RE = self.get_overlap_with(grid)
+        if np.any(RE <= LE):
+            return False
+        else:
+            return True
+
+class RadMC3DWriter:
+    '''
+
+    This class provides a mechanism for writing out data files in a format
+    readable by radmc3d. Examples:
+    
+    from yt.mods import *
+    from RadMC3DInterface import *
+    
+    pf = load('../data.0199.3d.hdf5')
+    writer = RadMC3DWriter(pf)
+    
+    writer.write_amr_grid()
+    writer.write_dust_data('dust-density', 'dust_data.inp')
+    writer.write_dust_data('dust-temperature', 'dust_temperature.inp')
+    writer.write_line_data('number-density-co', 'numberdens_co.inp')
+    writer.write_line_data('gas-temperature', 'gas_temperature.inp')
+    writer.write_line_data( ['x-velocity', 'y-velocity', 'z-velocity'], 'gas_velocity.inp') 
+    '''
+
+    def __init__(self, pf, max_level=2):
+        self.max_level = max_level
+        self.cell_count = 0 
+        self.layers = []
+        self.domain_dimensions = pf.domain_dimensions
+        self.domain_left_edge  = pf.domain_left_edge
+        self.domain_right_edge = pf.domain_right_edge
+        self.grid_filename = 'amr_grid.inp'
+        self.pf = pf
+
+        base_layer = RadMC3DLayer(0, None, 0, \
+                                  self.domain_left_edge, \
+                                  self.domain_right_edge, \
+                                  self.domain_dimensions)
+
+        self.layers.append(base_layer)
+        self.cell_count += np.product(pf.domain_dimensions)
+
+        for grid in pf.h.grids:
+            if grid.Level <= self.max_level:
+                self._add_grid_to_layers(grid)
+
+    def _get_parents(self, grid):
+        parents = []  
+        for potential_parent in self.layers:
+            if potential_parent.level == grid.Level - 1:
+                if potential_parent.overlaps(grid):
+                    parents.append(potential_parent)
+        return parents
+
+    def _add_grid_to_layers(self, grid):
+        parents = self._get_parents(grid)
+        for parent in parents:
+            LE, RE = parent.get_overlap_with(grid)
+            N = (RE - LE) / grid.dds
+            N = np.array([int(n + 0.5) for n in N])
+            new_layer = RadMC3DLayer(grid.Level, parent.id, len(self.layers), \
+                                     LE, RE, N)
+            self.layers.append(new_layer)
+            self.cell_count += np.product(N)
+            
+    def write_amr_grid(self):
+        '''
+        This routine writes the 'amr_grid.inp' file that describes the mesh radmc3d
+        will use.
+
+        '''
+        dims = self.domain_dimensions
+        left_edge = self.domain_left_edge
+        right_edge = self.domain_right_edge
+
+        # calculate cell wall positions
+        xs = [str(x) for x in np.linspace(left_edge[0], right_edge[0], dims[0]+1)]
+        ys = [str(y) for y in np.linspace(left_edge[1], right_edge[1], dims[1]+1)]
+        zs = [str(z) for z in np.linspace(left_edge[2], right_edge[2], dims[2]+1)]
+
+        # writer file header
+        grid_file = open(self.grid_filename, 'w')
+        grid_file.write('1 \n') # iformat is always 1
+        if self.max_level == 0:
+            grid_file.write('0 \n')
+        else:
+            grid_file.write('10 \n') # only layer-style AMR files are supported
+        grid_file.write('1 \n') # only cartesian coordinates are supported
+        grid_file.write('0 \n') 
+        grid_file.write('{}    {}    {} \n'.format(1, 1, 1)) # assume 3D
+        grid_file.write('{}    {}    {} \n'.format(dims[0], dims[1], dims[2]))
+        if self.max_level != 0:
+            grid_file.write(str(self.max_level) + '    ' + str(len(self.layers)) + '\n')
+
+        # write base grid cell wall positions
+        for x in xs:
+            grid_file.write(x + '    ')
+        grid_file.write('\n')
+
+        for y in ys:
+            grid_file.write(y + '    ')
+        grid_file.write('\n')
+
+        for z in zs:
+            grid_file.write(z + '    ')
+        grid_file.write('\n')
+
+        # write information about fine layers, skipping the base layer:
+        for layer in self.layers[1:]:
+            p = layer.parent
+            dds = (layer.RightEdge - layer.LeftEdge) / (layer.ActiveDimensions)
+            if p == 0:
+                ind = (layer.LeftEdge - left_edge) / (2.0*dds) + 1
+            else:
+                LE = np.zeros(3)
+                for potential_parent in self.layers:
+                    if potential_parent.id == p:
+                        LE = potential_parent.LeftEdge
+                ind = (layer.LeftEdge - LE) / (2.0*dds) + 1
+            ix  = int(ind[0])
+            iy  = int(ind[1])
+            iz  = int(ind[2])
+            nx, ny, nz = layer.ActiveDimensions / 2
+            grid_file.write('{}    {}    {}    {}    {}    {}    {} \n'.format(p, ix, iy, iz, nx, ny, nz))
+
+        grid_file.close()
+
+    def _write_layer_data_to_file(self, fhandle, field, level, LE, dim):
+        import pyximport; pyximport.install()
+        from write_array import write_3D_array, write_3D_vector_array
+
+        cg = self.pf.h.covering_grid(level, LE, dim)
+        if type(field) == type([]):
+            data_x = cg[field[0]]
+            data_y = cg[field[1]]
+            data_z = cg[field[2]]
+            write_3D_vector_array(data_x, data_y, data_x, fhandle)
+        else:
+            data = cg[field]
+            write_3D_array(data, fhandle)
+
+    def write_dust_file(self, field, filename):
+        '''
+        This method writes out fields in the format radmc3d needs to compute
+        thermal dust emission. In particular, if you have a field called
+        "dust-density", you can write out a dust_density.inp file. 
+
+        '''
+        fhandle = open(filename, 'w')
+
+        # write header
+        fhandle.write('1 \n')
+        fhandle.write(str(self.cell_count) + ' \n')
+        fhandle.write('1 \n')
+
+        # now write fine layers:
+        for layer in self.layers:
+            lev = layer.level
+            if lev == 0:
+                LE = self.domain_left_edge
+                N  = self.domain_dimensions
+            else:
+                LE = layer.LeftEdge
+                N  = layer.ActiveDimensions
+
+            self._write_layer_data_to_file(fhandle, field, lev, LE, N)
+            
+        fhandle.close()
+
+    def write_line_file(self, field, filename):
+        '''
+        This method writes out fields in the format radmc3d needs to compute
+        line emission.
+
+        '''
+        fhandle = open(filename, 'w')
+
+        # write header
+        fhandle.write('1 \n')
+        fhandle.write(str(self.cell_count) + ' \n')
+
+        # now write fine layers:
+        for layer in self.layers:
+            lev = layer.level
+            if lev == 0:
+                LE = self.domain_left_edge
+                N  = self.domain_dimensions
+            else:
+                LE = layer.LeftEdge
+                N  = layer.ActiveDimensions
+
+            self._write_layer_data_to_file(fhandle, field, lev, LE, N)
+
+        fhandle.close()

diff -r 34979d998227182ac38c2c51b876472755d650ff -r 924fad86026cb243c3110ad6279ad53842f7dcdd yt/analysis_modules/radmc3d_export/api.py
--- /dev/null
+++ b/yt/analysis_modules/radmc3d_export/api.py
@@ -0,0 +1,30 @@
+"""
+API for RadMC3D Export code
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: UCSD
+Author: Andrew Myers <atmyers2 at gmail.com>
+Affiliation: UCB
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2010-2011 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""
+
+from .RadMC3DInterface import \
+    RadMC3DWriter

diff -r 34979d998227182ac38c2c51b876472755d650ff -r 924fad86026cb243c3110ad6279ad53842f7dcdd yt/analysis_modules/setup.py
--- a/yt/analysis_modules/setup.py
+++ b/yt/analysis_modules/setup.py
@@ -20,4 +20,5 @@
     config.add_subpackage("spectral_integrator")
     config.add_subpackage("star_analysis")
     config.add_subpackage("two_point_functions")
+    config.add_subpackage("radmc3d_export")
     return config


https://bitbucket.org/yt_analysis/yt-3.0/commits/ed10fa819095/
Changeset:   ed10fa819095
Branch:      yt
User:        atmyers
Date:        2013-05-07 09:56:07
Summary:     adding cython file IO routines
Affected #:  3 files

diff -r 924fad86026cb243c3110ad6279ad53842f7dcdd -r ed10fa819095703e9a8d1c9f431aff38a2d05f81 yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
--- a/yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
+++ b/yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
@@ -25,12 +25,13 @@
 """
 
 from yt.mods import *
+from yt.utilities.lib.write_array import write_3D_array, write_3D_vector_array
 
 class RadMC3DLayer:
     '''
 
     This class represents an AMR 'layer' of the style described in
-    the radmc3d manual. Unlike grids, layers may not have more
+    the radmc3d manual. Unlike yt grids, layers may not have more
     than one parent, so level L grids will need to be split up
     if they straddle two or more level L - 1 grids. 
 
@@ -74,7 +75,7 @@
     readable by radmc3d. Examples:
     
     from yt.mods import *
-    from RadMC3DInterface import *
+    from yt.analysis_modules.radmc3d_export.api import *
     
     pf = load('../data.0199.3d.hdf5')
     writer = RadMC3DWriter(pf)
@@ -191,9 +192,6 @@
         grid_file.close()
 
     def _write_layer_data_to_file(self, fhandle, field, level, LE, dim):
-        import pyximport; pyximport.install()
-        from write_array import write_3D_array, write_3D_vector_array
-
         cg = self.pf.h.covering_grid(level, LE, dim)
         if type(field) == type([]):
             data_x = cg[field[0]]

diff -r 924fad86026cb243c3110ad6279ad53842f7dcdd -r ed10fa819095703e9a8d1c9f431aff38a2d05f81 yt/utilities/lib/__init__.py
--- a/yt/utilities/lib/__init__.py
+++ b/yt/utilities/lib/__init__.py
@@ -39,3 +39,4 @@
 from .grid_traversal import *
 from .marching_cubes import *
 from .GridTree import *
+from .write_array import *

diff -r 924fad86026cb243c3110ad6279ad53842f7dcdd -r ed10fa819095703e9a8d1c9f431aff38a2d05f81 yt/utilities/lib/setup.py
--- a/yt/utilities/lib/setup.py
+++ b/yt/utilities/lib/setup.py
@@ -204,6 +204,8 @@
                           "yt/utilities/lib/field_interpolation_tables.pxd",
                           ]
           )
+    config.add_extension("write_array",
+                         ["yt/utilities/lib/write_array.pyx"])
     config.add_extension("GridTree", 
     ["yt/utilities/lib/GridTree.pyx"],
         libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd"])


https://bitbucket.org/yt_analysis/yt-3.0/commits/31cb0a3745b9/
Changeset:   31cb0a3745b9
Branch:      yt
User:        atmyers
Date:        2013-05-07 09:58:18
Summary:     forgot to actually add the file
Affected #:  1 file

diff -r ed10fa819095703e9a8d1c9f431aff38a2d05f81 -r 31cb0a3745b9cd89ef8741e5369a9b61468d75de yt/utilities/lib/write_array.pyx
--- /dev/null
+++ b/yt/utilities/lib/write_array.pyx
@@ -0,0 +1,66 @@
+"""
+Faster, cythonized file IO
+
+Author: Andrew Myers
+Affiliation: UCB
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2013 Andrew Myers.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import numpy as np
+cimport numpy as np
+cimport cython
+
+DTYPE = np.float64
+ctypedef np.float64_t DTYPE_t
+
+ at cython.boundscheck(False)
+def write_3D_array(np.ndarray[DTYPE_t, ndim=3] data, fhandle):
+    assert data.dtype == DTYPE
+    cdef int Nx, Ny, Nz
+    Nx = data.shape[0]
+    Ny = data.shape[1]
+    Nz = data.shape[2]
+    cdef unsigned int i, j, k
+
+    for i in np.arange(Nz):
+        for j in np.arange(Ny):
+            for k in np.arange(Nx):
+                fhandle.write(str(data[k, j, i]) + '\n')
+
+ at cython.boundscheck(False)
+def write_3D_vector_array(np.ndarray[DTYPE_t, ndim=3] data_x, 
+                          np.ndarray[DTYPE_t, ndim=3] data_y,
+                          np.ndarray[DTYPE_t, ndim=3] data_z,
+                          fhandle):
+
+    assert data_x.dtype == DTYPE
+    cdef int Nx, Ny, Nz
+    Nx = data_x.shape[0]
+    Ny = data_x.shape[1]
+    Nz = data_x.shape[2]
+    cdef unsigned int i, j, k
+
+    for i in np.arange(Nz):
+        for j in np.arange(Ny):
+            for k in np.arange(Nx):
+                fx = data_x[k, j, i]
+                fy = data_y[k, j, i]
+                fz = data_z[k, j, i]
+                fhandle.write('{}    {}    {} \n'.format(fx, fy, fz))


https://bitbucket.org/yt_analysis/yt-3.0/commits/1365195231df/
Changeset:   1365195231df
Branch:      yt
User:        atmyers
Date:        2013-05-07 19:12:19
Summary:     Merged yt_analysis/yt into yt
Affected #:  1 file

diff -r 31cb0a3745b9cd89ef8741e5369a9b61468d75de -r 1365195231df34e5cb5def2d2e17780267c447f7 yt/analysis_modules/halo_merger_tree/merger_tree.py
--- a/yt/analysis_modules/halo_merger_tree/merger_tree.py
+++ b/yt/analysis_modules/halo_merger_tree/merger_tree.py
@@ -143,7 +143,7 @@
         Note that this is not a string, so no quotes. Default = HaloFinder.
     halo_finder_threshold : Float
         If using HaloFinder or parallelHF, the value of the density threshold
-        used when halo finding. Default = 80.0.
+        used when halo finding. Default = 160.0.
     FOF_link_length : Float
         If using FOFHaloFinder, the linking length between particles.
         Default = 0.2.
@@ -169,7 +169,7 @@
     ... halo_finder_function=parallelHF)
     """
     def __init__(self, restart_files=[], database='halos.db',
-            halo_finder_function=HaloFinder, halo_finder_threshold=80.0,
+            halo_finder_function=HaloFinder, halo_finder_threshold=160.0,
             FOF_link_length=0.2, dm_only=False, refresh=False,
             index=True):
         ParallelAnalysisInterface.__init__(self)


https://bitbucket.org/yt_analysis/yt-3.0/commits/506793d5d284/
Changeset:   506793d5d284
Branch:      yt
User:        atmyers
Date:        2013-05-07 19:38:42
Summary:     update docstrings
Affected #:  1 file

diff -r 31cb0a3745b9cd89ef8741e5369a9b61468d75de -r 506793d5d2848007b898d9aeec24a039c6e63ff9 yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
--- a/yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
+++ b/yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
@@ -72,20 +72,64 @@
     '''
 
     This class provides a mechanism for writing out data files in a format
-    readable by radmc3d. Examples:
+    readable by radmc3d. Currently, only the ASCII format, "Layer" style
+    file format is supported. For more information please see the radmc3d
+    manual.
+
+    Parameters
+    ----------
+
+    pf : `StaticOutput`
+        This is the parameter file object corresponding to the
+        simulation output to be written out.
+
+    max_level : int
+        An int corresponding to the maximum number of levels of refinement
+        to include in the output. Often, this does not need to be very large
+        as information on very high levels is frequently unobservable.
+        Default = 2. 
+
+    Examples
+    --------
+
+    This will create a field called "DustDensity" and write it out to the
+    file "dust_data.inp" in a form readable by radmc3d:
+
+    >>> from yt.mods import *
+    >>> from yt.analysis_modules.radmc3d_export.api import *
+
+    >>> dust_to_gas = 0.01
+    >>> def _DustDensity(field, data):
+    >>>     return dust_to_gas*data['Density']
+    >>> add_field("DustDensity", function=_DustDensity)
     
-    from yt.mods import *
-    from yt.analysis_modules.radmc3d_export.api import *
+    >>> pf = load("galaxy0030/galaxy0030")
+    >>> writer = RadMC3DWriter(pf)
     
-    pf = load('../data.0199.3d.hdf5')
-    writer = RadMC3DWriter(pf)
+    >>> writer.write_amr_grid()
+    >>> writer.write_dust_file("DustDensity", "dust_data.inp")
+
+    This will create a field called "NumberDensityCO and write it out to
+    the file "numberdens_co.inp". It will also write out information about
+    the gas velocity to "gas_velocity.inp" so that this broadening may be
+    included in the radiative transfer calculation by radmc3d:
+
+    >>> from yt.mods import *
+    >>> from yt.analysis_modules.radmc3d_export.api import *
+
+    >>> x_co = 1.0e-4
+    >>> mu_h = 2.34e-24
+    >>> def _NumberDensityCO(field, data):
+    >>>     return (x_co/mu_h)*data['Density']
+    >>> add_field("NumberDensityCO", function=_NumberDensityCO)
     
-    writer.write_amr_grid()
-    writer.write_dust_data('dust-density', 'dust_data.inp')
-    writer.write_dust_data('dust-temperature', 'dust_temperature.inp')
-    writer.write_line_data('number-density-co', 'numberdens_co.inp')
-    writer.write_line_data('gas-temperature', 'gas_temperature.inp')
-    writer.write_line_data( ['x-velocity', 'y-velocity', 'z-velocity'], 'gas_velocity.inp') 
+    >>> pf = load("galaxy0030/galaxy0030")
+    >>> writer = RadMC3DWriter(pf)
+    
+    >>> writer.write_amr_grid()
+    >>> writer.write_line_file("NumberDensityCO", "numberdens_co.inp")
+    >>> writer.write_line_file(['x-velocity', 'y-velocity', 'z-velocity'], 'gas_velocity.inp') 
+
     '''
 
     def __init__(self, pf, max_level=2):
@@ -206,7 +250,7 @@
         '''
         This method writes out fields in the format radmc3d needs to compute
         thermal dust emission. In particular, if you have a field called
-        "dust-density", you can write out a dust_density.inp file. 
+        "DustDensity", you can write out a dust_density.inp file. 
 
         '''
         fhandle = open(filename, 'w')


https://bitbucket.org/yt_analysis/yt-3.0/commits/92bcbe541132/
Changeset:   92bcbe541132
Branch:      yt
User:        atmyers
Date:        2013-05-07 19:39:40
Summary:     merging
Affected #:  1 file

diff -r 506793d5d2848007b898d9aeec24a039c6e63ff9 -r 92bcbe541132f873c84966eae2f7ebab11c26456 yt/analysis_modules/halo_merger_tree/merger_tree.py
--- a/yt/analysis_modules/halo_merger_tree/merger_tree.py
+++ b/yt/analysis_modules/halo_merger_tree/merger_tree.py
@@ -143,7 +143,7 @@
         Note that this is not a string, so no quotes. Default = HaloFinder.
     halo_finder_threshold : Float
         If using HaloFinder or parallelHF, the value of the density threshold
-        used when halo finding. Default = 80.0.
+        used when halo finding. Default = 160.0.
     FOF_link_length : Float
         If using FOFHaloFinder, the linking length between particles.
         Default = 0.2.
@@ -169,7 +169,7 @@
     ... halo_finder_function=parallelHF)
     """
     def __init__(self, restart_files=[], database='halos.db',
-            halo_finder_function=HaloFinder, halo_finder_threshold=80.0,
+            halo_finder_function=HaloFinder, halo_finder_threshold=160.0,
             FOF_link_length=0.2, dm_only=False, refresh=False,
             index=True):
         ParallelAnalysisInterface.__init__(self)


https://bitbucket.org/yt_analysis/yt-3.0/commits/89843d4cf5ac/
Changeset:   89843d4cf5ac
Branch:      yt
User:        atmyers
Date:        2013-05-07 19:41:12
Summary:     pointless formatting tweak
Affected #:  1 file

diff -r 92bcbe541132f873c84966eae2f7ebab11c26456 -r 89843d4cf5acf4a30b17dd2085f7d51d262d9c3c yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
--- a/yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
+++ b/yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
@@ -72,9 +72,8 @@
     '''
 
     This class provides a mechanism for writing out data files in a format
-    readable by radmc3d. Currently, only the ASCII format, "Layer" style
-    file format is supported. For more information please see the radmc3d
-    manual.
+    readable by radmc3d. Currently, only the ASCII, "Layer" style file format
+    is supported. For more information please see the radmc3d manual.
 
     Parameters
     ----------


https://bitbucket.org/yt_analysis/yt-3.0/commits/510157e366b2/
Changeset:   510157e366b2
Branch:      yt
User:        atmyers
Date:        2013-05-07 19:52:02
Summary:     breaking up some overlong lines
Affected #:  1 file

diff -r 89843d4cf5acf4a30b17dd2085f7d51d262d9c3c -r 510157e366b2d5020c375cab743389731ffe3638 yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
--- a/yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
+++ b/yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
@@ -25,7 +25,8 @@
 """
 
 from yt.mods import *
-from yt.utilities.lib.write_array import write_3D_array, write_3D_vector_array
+from yt.utilities.lib.write_array import \
+    write_3D_array, write_3D_vector_array
 
 class RadMC3DLayer:
     '''
@@ -127,7 +128,8 @@
     
     >>> writer.write_amr_grid()
     >>> writer.write_line_file("NumberDensityCO", "numberdens_co.inp")
-    >>> writer.write_line_file(['x-velocity', 'y-velocity', 'z-velocity'], 'gas_velocity.inp') 
+    >>> velocity_fields = ['x-velocity', 'y-velocity', 'z-velocity']
+    >>> writer.write_line_file(velocity_fields, 'gas_velocity.inp') 
 
     '''
 
@@ -167,25 +169,26 @@
             LE, RE = parent.get_overlap_with(grid)
             N = (RE - LE) / grid.dds
             N = np.array([int(n + 0.5) for n in N])
-            new_layer = RadMC3DLayer(grid.Level, parent.id, len(self.layers), \
+            new_layer = RadMC3DLayer(grid.Level, parent.id, \
+                                     len(self.layers), \
                                      LE, RE, N)
             self.layers.append(new_layer)
             self.cell_count += np.product(N)
             
     def write_amr_grid(self):
         '''
-        This routine writes the 'amr_grid.inp' file that describes the mesh radmc3d
-        will use.
+        This routine writes the 'amr_grid.inp' file that describes the mesh
+        radmc3d will use.
 
         '''
         dims = self.domain_dimensions
-        left_edge = self.domain_left_edge
-        right_edge = self.domain_right_edge
+        LE   = self.domain_left_edge
+        RE   = self.domain_right_edge
 
         # calculate cell wall positions
-        xs = [str(x) for x in np.linspace(left_edge[0], right_edge[0], dims[0]+1)]
-        ys = [str(y) for y in np.linspace(left_edge[1], right_edge[1], dims[1]+1)]
-        zs = [str(z) for z in np.linspace(left_edge[2], right_edge[2], dims[2]+1)]
+        xs = [str(x) for x in np.linspace(LE[0], RE[0], dims[0]+1)]
+        ys = [str(y) for y in np.linspace(LE[1], RE[1], dims[1]+1)]
+        zs = [str(z) for z in np.linspace(LE[2], RE[2], dims[2]+1)]
 
         # writer file header
         grid_file = open(self.grid_filename, 'w')
@@ -199,7 +202,8 @@
         grid_file.write('{}    {}    {} \n'.format(1, 1, 1)) # assume 3D
         grid_file.write('{}    {}    {} \n'.format(dims[0], dims[1], dims[2]))
         if self.max_level != 0:
-            grid_file.write(str(self.max_level) + '    ' + str(len(self.layers)) + '\n')
+            s = str(self.max_level) + '    ' + str(len(self.layers)) + '\n'
+            grid_file.write(s)
 
         # write base grid cell wall positions
         for x in xs:
@@ -219,7 +223,7 @@
             p = layer.parent
             dds = (layer.RightEdge - layer.LeftEdge) / (layer.ActiveDimensions)
             if p == 0:
-                ind = (layer.LeftEdge - left_edge) / (2.0*dds) + 1
+                ind = (layer.LeftEdge - LE) / (2.0*dds) + 1
             else:
                 LE = np.zeros(3)
                 for potential_parent in self.layers:
@@ -230,7 +234,9 @@
             iy  = int(ind[1])
             iz  = int(ind[2])
             nx, ny, nz = layer.ActiveDimensions / 2
-            grid_file.write('{}    {}    {}    {}    {}    {}    {} \n'.format(p, ix, iy, iz, nx, ny, nz))
+            s = '{}    {}    {}    {}    {}    {}    {} \n'
+            s = s.format(p, ix, iy, iz, nx, ny, nz)
+            grid_file.write(s)
 
         grid_file.close()
 


https://bitbucket.org/yt_analysis/yt-3.0/commits/ad099ba81366/
Changeset:   ad099ba81366
Branch:      yt
User:        atmyers
Date:        2013-05-07 20:01:31
Summary:     forgot one instance of chombo->pluto
Affected #:  1 file

diff -r 510157e366b2d5020c375cab743389731ffe3638 -r ad099ba81366508c14a6b26a37c8c2159759dcab yt/frontends/pluto/data_structures.py
--- a/yt/frontends/pluto/data_structures.py
+++ b/yt/frontends/pluto/data_structures.py
@@ -99,7 +99,7 @@
 
     grid = PlutoGrid
 
-    def __init__(self,pf,data_style='chombo_hdf5'):
+    def __init__(self,pf,data_style='pluto_hdf5'):
         self.domain_left_edge = pf.domain_left_edge
         self.domain_right_edge = pf.domain_right_edge
         self.data_style = data_style
@@ -187,7 +187,7 @@
     _fieldinfo_fallback = PlutoFieldInfo
     _fieldinfo_known = KnownPlutoFields
 
-    def __init__(self, filename, data_style='chombo_hdf5',
+    def __init__(self, filename, data_style='pluto_hdf5',
                  storage_filename = None, ini_filename = None):
         self._handle = h5py.File(filename,'r')
         self.current_time = self._handle.attrs['time']


https://bitbucket.org/yt_analysis/yt-3.0/commits/081a3a75722c/
Changeset:   081a3a75722c
Branch:      yt
User:        atmyers
Date:        2013-05-07 20:37:55
Summary:     some more documentation
Affected #:  1 file

diff -r ad099ba81366508c14a6b26a37c8c2159759dcab -r 081a3a75722c9fc8e1d8ef5baa6d8472fb59ac2f yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
--- a/yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
+++ b/yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
@@ -93,7 +93,8 @@
     --------
 
     This will create a field called "DustDensity" and write it out to the
-    file "dust_data.inp" in a form readable by radmc3d:
+    file "dust_data.inp" in a form readable by radmc3d. It will also write
+    a "dust_temperature.inp" file with everything set to 10.0 K: 
 
     >>> from yt.mods import *
     >>> from yt.analysis_modules.radmc3d_export.api import *
@@ -102,12 +103,17 @@
     >>> def _DustDensity(field, data):
     >>>     return dust_to_gas*data['Density']
     >>> add_field("DustDensity", function=_DustDensity)
+
+    >>> def _DustTemperature(field, data):
+    >>>     return 10.0*data['Ones']
+    >>> add_field("DustTemperature", function=_DustTemperature)
     
     >>> pf = load("galaxy0030/galaxy0030")
     >>> writer = RadMC3DWriter(pf)
     
     >>> writer.write_amr_grid()
     >>> writer.write_dust_file("DustDensity", "dust_data.inp")
+    >>> writer.write_dust_file("DustTemperature", "dust_temperature.inp")
 
     This will create a field called "NumberDensityCO and write it out to
     the file "numberdens_co.inp". It will also write out information about
@@ -255,7 +261,17 @@
         '''
         This method writes out fields in the format radmc3d needs to compute
         thermal dust emission. In particular, if you have a field called
-        "DustDensity", you can write out a dust_density.inp file. 
+        "DustDensity", you can write out a dust_density.inp file.
+
+        Parameters
+        ----------
+
+        field : string
+            The name of the field to be written out
+        filename : string
+            The name of the file to write the data to. The filenames radmc3d
+            expects for its various modes of operations are described in the
+            radmc3d manual.
 
         '''
         fhandle = open(filename, 'w')
@@ -284,6 +300,17 @@
         This method writes out fields in the format radmc3d needs to compute
         line emission.
 
+        Parameters
+        ----------
+
+        field : string or list of 3 strings
+            If a string, the name of the field to be written out. If a list,
+            three fields that will be written to the file as a vector quantity.
+        filename : string
+            The name of the file to write the data to. The filenames radmc3d
+            expects for its various modes of operations are described in the
+            radmc3d manual.
+
         '''
         fhandle = open(filename, 'w')
 


https://bitbucket.org/yt_analysis/yt-3.0/commits/50eaf1b67738/
Changeset:   50eaf1b67738
Branch:      yt
User:        atmyers
Date:        2013-05-07 20:40:25
Summary:     add my email
Affected #:  2 files

diff -r 081a3a75722c9fc8e1d8ef5baa6d8472fb59ac2f -r 50eaf1b677384ec0236e23fee9cb57ab1e9a503c yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
--- a/yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
+++ b/yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
@@ -1,7 +1,7 @@
 """
 Code to export from yt to RadMC3D
 
-Author: Andrew Myers
+Author: Andrew Myers <atmyers2 at gmail.com>
 Affiliation: UCB
 Homepage: http://yt-project.org/
 License:

diff -r 081a3a75722c9fc8e1d8ef5baa6d8472fb59ac2f -r 50eaf1b677384ec0236e23fee9cb57ab1e9a503c yt/utilities/lib/write_array.pyx
--- a/yt/utilities/lib/write_array.pyx
+++ b/yt/utilities/lib/write_array.pyx
@@ -1,7 +1,7 @@
 """
 Faster, cythonized file IO
 
-Author: Andrew Myers
+Author: Andrew Myers <atmyers2 at gmail.com>
 Affiliation: UCB
 Homepage: http://yt-project.org/
 License:


https://bitbucket.org/yt_analysis/yt-3.0/commits/1c1d77b43395/
Changeset:   1c1d77b43395
Branch:      yt
User:        atmyers
Date:        2013-05-07 20:56:14
Summary:     forgot to port over these bug fixes
Affected #:  1 file

diff -r 50eaf1b677384ec0236e23fee9cb57ab1e9a503c -r 1c1d77b4339533b489c453963bab8d689a9b3c7c yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
--- a/yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
+++ b/yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
@@ -208,7 +208,7 @@
         grid_file.write('{}    {}    {} \n'.format(1, 1, 1)) # assume 3D
         grid_file.write('{}    {}    {} \n'.format(dims[0], dims[1], dims[2]))
         if self.max_level != 0:
-            s = str(self.max_level) + '    ' + str(len(self.layers)) + '\n'
+            s = str(self.max_level) + '    ' + str(len(self.layers)-1) + '\n'
             grid_file.write(s)
 
         # write base grid cell wall positions
@@ -236,9 +236,9 @@
                     if potential_parent.id == p:
                         LE = potential_parent.LeftEdge
                 ind = (layer.LeftEdge - LE) / (2.0*dds) + 1
-            ix  = int(ind[0])
-            iy  = int(ind[1])
-            iz  = int(ind[2])
+            ix  = int(ind[0]+0.5)
+            iy  = int(ind[1]+0.5)
+            iz  = int(ind[2]+0.5)
             nx, ny, nz = layer.ActiveDimensions / 2
             s = '{}    {}    {}    {}    {}    {}    {} \n'
             s = s.format(p, ix, iy, iz, nx, ny, nz)


https://bitbucket.org/yt_analysis/yt-3.0/commits/a6c0381686a4/
Changeset:   a6c0381686a4
Branch:      yt
User:        atmyers
Date:        2013-05-07 21:04:44
Summary:     a few typos in the docs
Affected #:  1 file

diff -r 1c1d77b4339533b489c453963bab8d689a9b3c7c -r a6c0381686a43cbe59976be6015004406e2ac847 yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
--- a/yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
+++ b/yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
@@ -115,7 +115,7 @@
     >>> writer.write_dust_file("DustDensity", "dust_data.inp")
     >>> writer.write_dust_file("DustTemperature", "dust_temperature.inp")
 
-    This will create a field called "NumberDensityCO and write it out to
+    This will create a field called "NumberDensityCO" and write it out to
     the file "numberdens_co.inp". It will also write out information about
     the gas velocity to "gas_velocity.inp" so that this broadening may be
     included in the radiative transfer calculation by radmc3d:
@@ -308,7 +308,7 @@
             three fields that will be written to the file as a vector quantity.
         filename : string
             The name of the file to write the data to. The filenames radmc3d
-            expects for its various modes of operations are described in the
+            expects for its various modes of operation are described in the
             radmc3d manual.
 
         '''


https://bitbucket.org/yt_analysis/yt-3.0/commits/a863d2583159/
Changeset:   a863d2583159
Branch:      yt
User:        atmyers
Date:        2013-05-07 21:26:45
Summary:     fixing a bug in the code that writes vector quantities to disk, thanks to Kacper Kowalik
Affected #:  1 file

diff -r a6c0381686a43cbe59976be6015004406e2ac847 -r a863d25831599e3d07e2383915bf94dc07cbad00 yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
--- a/yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
+++ b/yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
@@ -252,7 +252,7 @@
             data_x = cg[field[0]]
             data_y = cg[field[1]]
             data_z = cg[field[2]]
-            write_3D_vector_array(data_x, data_y, data_x, fhandle)
+            write_3D_vector_array(data_x, data_y, data_z, fhandle)
         else:
             data = cg[field]
             write_3D_array(data, fhandle)


https://bitbucket.org/yt_analysis/yt-3.0/commits/2731a478f0d0/
Changeset:   2731a478f0d0
Branch:      yt
User:        atmyers
Date:        2013-05-09 06:35:01
Summary:     use one row of ghost zones so fields that need them can be written out as well
Affected #:  1 file

diff -r a863d25831599e3d07e2383915bf94dc07cbad00 -r 2731a478f0d0a5d21cc328ac75c5877f1eaa7fc4 yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
--- a/yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
+++ b/yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
@@ -31,7 +31,7 @@
 class RadMC3DLayer:
     '''
 
-    This class represents an AMR 'layer' of the style described in
+    This class represents an AMR "layer" of the style described in
     the radmc3d manual. Unlike yt grids, layers may not have more
     than one parent, so level L grids will need to be split up
     if they straddle two or more level L - 1 grids. 
@@ -101,11 +101,11 @@
 
     >>> dust_to_gas = 0.01
     >>> def _DustDensity(field, data):
-    >>>     return dust_to_gas*data['Density']
+    >>>     return dust_to_gas*data["Density"]
     >>> add_field("DustDensity", function=_DustDensity)
 
     >>> def _DustTemperature(field, data):
-    >>>     return 10.0*data['Ones']
+    >>>     return 10.0*data["Ones"]
     >>> add_field("DustTemperature", function=_DustTemperature)
     
     >>> pf = load("galaxy0030/galaxy0030")
@@ -126,7 +126,7 @@
     >>> x_co = 1.0e-4
     >>> mu_h = 2.34e-24
     >>> def _NumberDensityCO(field, data):
-    >>>     return (x_co/mu_h)*data['Density']
+    >>>     return (x_co/mu_h)*data["Density"]
     >>> add_field("NumberDensityCO", function=_NumberDensityCO)
     
     >>> pf = load("galaxy0030/galaxy0030")
@@ -134,8 +134,8 @@
     
     >>> writer.write_amr_grid()
     >>> writer.write_line_file("NumberDensityCO", "numberdens_co.inp")
-    >>> velocity_fields = ['x-velocity', 'y-velocity', 'z-velocity']
-    >>> writer.write_line_file(velocity_fields, 'gas_velocity.inp') 
+    >>> velocity_fields = ["x-velocity", "y-velocity", "z-velocity"]
+    >>> writer.write_line_file(velocity_fields, "gas_velocity.inp") 
 
     '''
 
@@ -146,7 +146,7 @@
         self.domain_dimensions = pf.domain_dimensions
         self.domain_left_edge  = pf.domain_left_edge
         self.domain_right_edge = pf.domain_right_edge
-        self.grid_filename = 'amr_grid.inp'
+        self.grid_filename = "amr_grid.inp"
         self.pf = pf
 
         base_layer = RadMC3DLayer(0, None, 0, \
@@ -183,7 +183,7 @@
             
     def write_amr_grid(self):
         '''
-        This routine writes the 'amr_grid.inp' file that describes the mesh
+        This routine writes the "amr_grid.inp" file that describes the mesh
         radmc3d will use.
 
         '''
@@ -247,7 +247,7 @@
         grid_file.close()
 
     def _write_layer_data_to_file(self, fhandle, field, level, LE, dim):
-        cg = self.pf.h.covering_grid(level, LE, dim)
+        cg = self.pf.h.covering_grid(level, LE, dim, num_ghost_zones=1)
         if type(field) == type([]):
             data_x = cg[field[0]]
             data_y = cg[field[1]]


https://bitbucket.org/yt_analysis/yt-3.0/commits/5eab37202a58/
Changeset:   5eab37202a58
Branch:      yt
User:        atmyers
Date:        2013-05-09 06:35:44
Summary:     Merged yt_analysis/yt into yt
Affected #:  5 files

diff -r 2731a478f0d0a5d21cc328ac75c5877f1eaa7fc4 -r 5eab37202a58ab91e76ad2d0de6e1878fd3c532e yt/frontends/flash/fields.py
--- a/yt/frontends/flash/fields.py
+++ b/yt/frontends/flash/fields.py
@@ -64,7 +64,6 @@
 translation_dict = {"x-velocity": "velx",
                     "y-velocity": "vely",
                     "z-velocity": "velz",
-                    "VelocityMagnitude": "velo",
                     "Density": "dens",
                     "Temperature": "temp",
                     "Pressure" : "pres", 

diff -r 2731a478f0d0a5d21cc328ac75c5877f1eaa7fc4 -r 5eab37202a58ab91e76ad2d0de6e1878fd3c532e yt/gui/reason/widget_store.py
--- a/yt/gui/reason/widget_store.py
+++ b/yt/gui/reason/widget_store.py
@@ -76,7 +76,8 @@
         sl = pf.h.slice(axis, coord, center = center, periodic = True)
         xax, yax = x_dict[axis], y_dict[axis]
         DLE, DRE = pf.domain_left_edge, pf.domain_right_edge
-        pw = PWViewerExtJS(sl, (DLE[xax], DRE[xax], DLE[yax], DRE[yax]), setup = False)
+        pw = PWViewerExtJS(sl, (DLE[xax], DRE[xax], DLE[yax], DRE[yax]), 
+                           setup = False, plot_type='SlicePlot')
         pw.set_current_field(field)
         field_list = list(set(pf.h.field_list + pf.h.derived_field_list))
         field_list = [dict(text = f) for f in sorted(field_list)]
@@ -96,7 +97,7 @@
         xax, yax = x_dict[axis], y_dict[axis]
         DLE, DRE = pf.domain_left_edge, pf.domain_right_edge
         pw = PWViewerExtJS(proj, (DLE[xax], DRE[xax], DLE[yax], DRE[yax]),
-                           setup = False)
+                           setup = False, plot_type='ProjectionPlot')
         pw.set_current_field(field)
         field_list = list(set(pf.h.field_list + pf.h.derived_field_list))
         field_list = [dict(text = f) for f in sorted(field_list)]

diff -r 2731a478f0d0a5d21cc328ac75c5877f1eaa7fc4 -r 5eab37202a58ab91e76ad2d0de6e1878fd3c532e yt/utilities/linear_interpolators.py
--- a/yt/utilities/linear_interpolators.py
+++ b/yt/utilities/linear_interpolators.py
@@ -31,12 +31,44 @@
 
 class UnilinearFieldInterpolator:
     def __init__(self, table, boundaries, field_names, truncate=False):
+        r"""Initialize a 1D interpolator for field data.
+
+        table : array
+            The data table over which interpolation is performed.
+        boundaries: tuple or array
+            If a tuple, this should specify the upper and lower bounds 
+            for the bins of the data table.  This assumes the bins are 
+            evenly spaced.  If an array, this specifies the bins 
+            explicitly.
+        field_names: str
+            Name of the field to be used as input data for interpolation.
+        truncate : bool
+            If False, an exception is raised if the input values are 
+            outside the bounds of the table.  If True, extrapolation is 
+            performed.
+        
+        Examples
+        --------
+
+        ad = pf.h.all_data()
+        table_data = np.random.random(64)
+        interp = UnilinearFieldInterpolator(table_data, (0.0, 1.0), "x",
+                                            truncate=True)
+        field_data = interp(ad)
+        
+        """
         self.table = table.astype('float64')
         self.truncate = truncate
-        x0, x1 = boundaries
         self.x_name = field_names
-        self.x_bins = np.linspace(x0, x1, table.shape[0]).astype('float64')
-
+        if isinstance(boundaries, np.ndarray):
+            if boundaries.size != table.shape[0]:
+                mylog.error("Bins array not the same length as the data.")
+                raise ValuesError
+            self.x_bins = boundaries
+        else:
+            x0, x1 = boundaries
+            self.x_bins = np.linspace(x0, x1, table.shape[0]).astype('float64')
+        
     def __call__(self, data_object):
         orig_shape = data_object[self.x_name].shape
         x_vals = data_object[self.x_name].ravel().astype('float64')
@@ -57,12 +89,51 @@
 
 class BilinearFieldInterpolator:
     def __init__(self, table, boundaries, field_names, truncate=False):
+        r"""Initialize a 2D interpolator for field data.
+
+        table : array
+            The data table over which interpolation is performed.
+        boundaries: tuple
+            Either a tuple of lower and upper bounds for the x and y bins 
+            given as (x0, x1, y0, y1) or a tuple of two arrays containing the 
+            x and y bins.
+        field_names: list
+            Names of the fields to be used as input data for interpolation.
+        truncate : bool
+            If False, an exception is raised if the input values are 
+            outside the bounds of the table.  If True, extrapolation is 
+            performed.
+        
+        Examples
+        --------
+
+        ad = pf.h.all_data()
+        table_data = np.random.random((64, 64))
+        interp = BilinearFieldInterpolator(table_data, (0.0, 1.0, 0.0, 1.0), 
+                                           ["x", "y"],
+                                           truncate=True)
+        field_data = interp(ad)
+        
+        """
         self.table = table.astype('float64')
         self.truncate = truncate
-        x0, x1, y0, y1 = boundaries
         self.x_name, self.y_name = field_names
-        self.x_bins = np.linspace(x0, x1, table.shape[0]).astype('float64')
-        self.y_bins = np.linspace(y0, y1, table.shape[1]).astype('float64')
+        if len(boundaries) == 4:
+            x0, x1, y0, y1 = boundaries
+            self.x_bins = np.linspace(x0, x1, table.shape[0]).astype('float64')
+            self.y_bins = np.linspace(y0, y1, table.shape[1]).astype('float64')
+        elif len(boundaries) == 2:
+            if boundaries[0].size != table.shape[0]:
+                mylog.error("X bins array not the same length as the data.")
+                raise ValueError
+            if boundaries[1].size != table.shape[1]:
+                mylog.error("Y bins array not the same length as the data.")
+                raise ValueError
+            self.x_bins = boundaries[0]
+            self.y_bins = boundaries[1]
+        else:
+            mylog.error("Boundaries must be given as (x0, x1, y0, y1) or as (x_bins, y_bins)")
+            raise ValueError
 
     def __call__(self, data_object):
         orig_shape = data_object[self.x_name].shape
@@ -90,14 +161,58 @@
 
 class TrilinearFieldInterpolator:
     def __init__(self, table, boundaries, field_names, truncate = False):
+        r"""Initialize a 3D interpolator for field data.
+
+        table : array
+            The data table over which interpolation is performed.
+        boundaries: tuple
+            Either a tuple of lower and upper bounds for the x, y, and z bins 
+            given as (x0, x1, y0, y1, z0, z1) or a tuple of three arrays 
+            containing the x, y, and z bins.
+        field_names: list
+            Names of the fields to be used as input data for interpolation.
+        truncate : bool
+            If False, an exception is raised if the input values are 
+            outside the bounds of the table.  If True, extrapolation is 
+            performed.
+        
+        Examples
+        --------
+
+        ad = pf.h.all_data()
+        table_data = np.random.random((64, 64, 64))
+        interp = BilinearFieldInterpolator(table_data, 
+                                           (0.0, 1.0, 0.0, 1.0, 0.0, 1.0), 
+                                           ["x", "y", "z"],
+                                           truncate=True)
+        field_data = interp(ad)
+        
+        """
         self.table = table.astype('float64')
         self.truncate = truncate
-        x0, x1, y0, y1, z0, z1 = boundaries
         self.x_name, self.y_name, self.z_name = field_names
-        self.x_bins = np.linspace(x0, x1, table.shape[0]).astype('float64')
-        self.y_bins = np.linspace(y0, y1, table.shape[1]).astype('float64')
-        self.z_bins = np.linspace(z0, z1, table.shape[2]).astype('float64')
-
+        if len(boundaries) == 6:
+            x0, x1, y0, y1, z0, z1 = boundaries
+            self.x_bins = np.linspace(x0, x1, table.shape[0]).astype('float64')
+            self.y_bins = np.linspace(y0, y1, table.shape[1]).astype('float64')
+            self.z_bins = np.linspace(z0, z1, table.shape[2]).astype('float64')
+        elif len(boundaries) == 3:
+            if boundaries[0].size != table.shape[0]:
+                mylog.error("X bins array not the same length as the data.")
+                raise ValueError
+            if boundaries[1].size != table.shape[1]:
+                mylog.error("Y bins array not the same length as the data.")
+                raise ValueError
+            if boundaries[2].size != table.shape[2]:
+                mylog.error("Z bins array not the same length as the data.")
+                raise ValueError
+            self.x_bins = boundaries[0]
+            self.y_bins = boundaries[1]
+            self.z_bins = boundaries[2]
+        else:
+            mylog.error("Boundaries must be given as (x0, x1, y0, y1, z0, z1) or as (x_bins, y_bins, z_bins)")
+            raise ValueError
+        
     def __call__(self, data_object):
         orig_shape = data_object[self.x_name].shape
         x_vals = data_object[self.x_name].ravel().astype('float64')

diff -r 2731a478f0d0a5d21cc328ac75c5877f1eaa7fc4 -r 5eab37202a58ab91e76ad2d0de6e1878fd3c532e yt/utilities/tests/test_interpolators.py
--- a/yt/utilities/tests/test_interpolators.py
+++ b/yt/utilities/tests/test_interpolators.py
@@ -7,21 +7,58 @@
 def test_linear_interpolator_1d():
     random_data = np.random.random(64)
     fv = {'x': np.mgrid[0.0:1.0:64j]}
+    # evenly spaced bins
     ufi = lin.UnilinearFieldInterpolator(random_data, (0.0, 1.0), "x", True)
-    assert_array_equal(ufi(fv), random_data)
+    yield assert_array_equal, ufi(fv), random_data
+    
+    # randomly spaced bins
+    size = 64
+    shift = (1. / size) * np.random.random(size) - (0.5 / size)
+    fv["x"] += shift
+    ufi = lin.UnilinearFieldInterpolator(random_data, 
+                                         np.linspace(0.0, 1.0, size) + shift, 
+                                         "x", True)
+    yield assert_array_almost_equal, ufi(fv), random_data, 15
 
 def test_linear_interpolator_2d():
     random_data = np.random.random((64, 64))
+    # evenly spaced bins
     fv = dict((ax, v) for ax, v in zip("xyz",
                np.mgrid[0.0:1.0:64j, 0.0:1.0:64j]))
     bfi = lin.BilinearFieldInterpolator(random_data,
             (0.0, 1.0, 0.0, 1.0), "xy", True)
-    assert_array_equal(bfi(fv), random_data)
+    yield assert_array_equal, bfi(fv), random_data
+
+    # randomly spaced bins
+    size = 64
+    bins = np.linspace(0.0, 1.0, size)
+    shifts = dict((ax, (1. / size) * np.random.random(size) - (0.5 / size)) \
+                  for ax in "xy")
+    fv["x"] += shifts["x"][:, np.newaxis]
+    fv["y"] += shifts["y"]
+    bfi = lin.BilinearFieldInterpolator(random_data,
+            (bins + shifts["x"], bins + shifts["y"]), "xy", True)
+    yield assert_array_almost_equal, bfi(fv), random_data, 15
 
 def test_linear_interpolator_3d():
     random_data = np.random.random((64, 64, 64))
+    # evenly spaced bins
     fv = dict((ax, v) for ax, v in zip("xyz",
                np.mgrid[0.0:1.0:64j, 0.0:1.0:64j, 0.0:1.0:64j]))
     tfi = lin.TrilinearFieldInterpolator(random_data,
             (0.0, 1.0, 0.0, 1.0, 0.0, 1.0), "xyz", True)
-    assert_array_equal(tfi(fv), random_data)
+    yield assert_array_equal, tfi(fv), random_data
+
+    # randomly spaced bins
+    size = 64
+    bins = np.linspace(0.0, 1.0, size)
+    shifts = dict((ax, (1. / size) * np.random.random(size) - (0.5 / size)) \
+                  for ax in "xyz")
+    fv["x"] += shifts["x"][:, np.newaxis, np.newaxis]
+    fv["y"] += shifts["y"][:, np.newaxis]
+    fv["z"] += shifts["z"]
+    tfi = lin.TrilinearFieldInterpolator(random_data,
+            (bins + shifts["x"], bins + shifts["y"], 
+             bins + shifts["z"]), "xyz", True)
+    yield assert_array_almost_equal, tfi(fv), random_data, 15
+    

diff -r 2731a478f0d0a5d21cc328ac75c5877f1eaa7fc4 -r 5eab37202a58ab91e76ad2d0de6e1878fd3c532e yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -426,7 +426,7 @@
 
         parameters
         ----------
-        width : float, array of floats, (float, unit) tuple, or arry of (float, unit) tuples.
+        width : float, array of floats, (float, unit) tuple, or tuple of (float, unit) tuples.
              Width can have four different formats to support windows with variable
              x and y widths.  They are:
 
@@ -551,8 +551,11 @@
     """A viewer for PlotWindows.
 
     """
+    _plot_type = None
     def __init__(self, *args,**kwargs):
         setup = kwargs.pop("setup", True)
+        if self._plot_type is None:
+            self._plot_type = kwargs.pop("plot_type")
         PlotWindow.__init__(self, *args,**kwargs)
         self._axes_unit_names = None
         self._callbacks = []
@@ -635,7 +638,7 @@
 
         Other Parameters
         ----------------
-        dyanmic_range : float (default: None)
+        dynamic_range : float (default: None)
             The dynamic range of the image.
             If zmin == None, will set zmin = zmax / dynamic_range
             If zmax == None, will set zmax = zmin * dynamic_range


https://bitbucket.org/yt_analysis/yt-3.0/commits/c8471258c0a1/
Changeset:   c8471258c0a1
Branch:      yt
User:        atmyers
Date:        2013-05-09 18:37:28
Summary:     Merged yt_analysis/yt into yt
Affected #:  2 files

diff -r 5eab37202a58ab91e76ad2d0de6e1878fd3c532e -r c8471258c0a1414170f046b56ecd2cfb141f9f45 yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -2010,8 +2010,9 @@
         >>> halos.write_out("HopAnalysis.out")
         """
         # if path denoted in filename, assure path exists
-        if len(filename.split('/')) > 1:
-            mkdir_rec('/'.join(filename.split('/')[:-1]))
+        my_dir = os.path.dirname(filename)
+        if not os.path.exists(my_dir):
+            only_on_root(os.makedirs, my_dir)
 
         f = self.comm.write_on_root(filename)
         HaloList.write_out(self, f, ellipsoid_data)
@@ -2033,8 +2034,9 @@
         >>> halos.write_particle_lists_txt("halo-parts")
         """
         # if path denoted in prefix, assure path exists
-        if len(prefix.split('/')) > 1:
-            mkdir_rec('/'.join(prefix.split('/')[:-1]))
+        my_dir = os.path.dirname(prefix)
+        if not os.path.exists(my_dir):
+            only_on_root(os.makedirs, my_dir)
 
         f = self.comm.write_on_root("%s.txt" % prefix)
         HaloList.write_particle_lists_txt(self, prefix, fp=f)
@@ -2060,8 +2062,9 @@
         >>> halos.write_particle_lists("halo-parts")
         """
         # if path denoted in prefix, assure path exists
-        if len(prefix.split('/')) > 1:
-            mkdir_rec('/'.join(prefix.split('/')[:-1]))
+        my_dir = os.path.dirname(prefix)
+        if not os.path.exists(my_dir):
+            only_on_root(os.makedirs, my_dir)
 
         fn = "%s.h5" % self.comm.get_filename(prefix)
         f = h5py.File(fn, "w")
@@ -2097,8 +2100,9 @@
         >>> halos.dump("MyHalos")
         """
         # if path denoted in basename, assure path exists
-        if len(basename.split('/')) > 1:
-            mkdir_rec('/'.join(basename.split('/')[:-1]))
+        my_dir = os.path.dirname(basename)
+        if not os.path.exists(my_dir):
+            only_on_root(os.makedirs, my_dir)
 
         self.write_out("%s.out" % basename, ellipsoid_data)
         self.write_particle_lists(basename)

diff -r 5eab37202a58ab91e76ad2d0de6e1878fd3c532e -r c8471258c0a1414170f046b56ecd2cfb141f9f45 yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -602,18 +602,3 @@
 def get_image_suffix(name):
     suffix = os.path.splitext(name)[1]
     return suffix if suffix in ['.png', '.eps', '.ps', '.pdf'] else ''
-
-def mkdir_rec(path):
-    """
-    Recursive mkdir, so that if you mkdir two levels deep and the first 
-    one doesn't exist, it creates the first, and then any subsequent dirs.
-
-    Examples
-    --------
-    mkdir_rec("a/b/c")
-    """
-    dir_list = path.split("/")
-    basedir = "."
-    for dir in dir_list:
-        basedir = "%s/%s" % (basedir, dir)
-        if not os.path.isdir(basedir): os.mkdir(basedir)


https://bitbucket.org/yt_analysis/yt-3.0/commits/816702c2f14a/
Changeset:   816702c2f14a
Branch:      yt
User:        atmyers
Date:        2013-05-10 00:41:18
Summary:     dust_data.inp -> dust_density.inp
Affected #:  1 file

diff -r c8471258c0a1414170f046b56ecd2cfb141f9f45 -r 816702c2f14a7923ef5b4d67d2af703558af5120 yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
--- a/yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
+++ b/yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
@@ -93,7 +93,7 @@
     --------
 
     This will create a field called "DustDensity" and write it out to the
-    file "dust_data.inp" in a form readable by radmc3d. It will also write
+    file "dust_density.inp" in a form readable by radmc3d. It will also write
     a "dust_temperature.inp" file with everything set to 10.0 K: 
 
     >>> from yt.mods import *
@@ -112,7 +112,7 @@
     >>> writer = RadMC3DWriter(pf)
     
     >>> writer.write_amr_grid()
-    >>> writer.write_dust_file("DustDensity", "dust_data.inp")
+    >>> writer.write_dust_file("DustDensity", "dust_density.inp")
     >>> writer.write_dust_file("DustTemperature", "dust_temperature.inp")
 
     This will create a field called "NumberDensityCO" and write it out to


https://bitbucket.org/yt_analysis/yt-3.0/commits/66ab32b4e375/
Changeset:   66ab32b4e375
Branch:      yt
User:        atmyers
Date:        2013-05-10 00:42:20
Summary:     id -> unique_id
Affected #:  1 file

diff -r 816702c2f14a7923ef5b4d67d2af703558af5120 -r 66ab32b4e375aff4299a3fb90496dbb4de3a111c yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
--- a/yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
+++ b/yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
@@ -37,13 +37,13 @@
     if they straddle two or more level L - 1 grids. 
 
     '''
-    def __init__(self, level, parent, id, LE, RE, dim):
+    def __init__(self, level, parent, unique_id, LE, RE, dim):
         self.level = level
         self.parent = parent
         self.LeftEdge = LE
         self.RightEdge = RE
         self.ActiveDimensions = dim
-        self.id = id
+        self.id = unique_id
 
     def get_overlap_with(self, grid):
         '''


https://bitbucket.org/yt_analysis/yt-3.0/commits/13e2bc0d3cad/
Changeset:   13e2bc0d3cad
Branch:      yt
User:        atmyers
Date:        2013-05-10 00:44:37
Summary:     add url for radmc3d homepage
Affected #:  1 file

diff -r 66ab32b4e375aff4299a3fb90496dbb4de3a111c -r 13e2bc0d3cadeb3f7588321066378ce25cbf34e3 yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
--- a/yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
+++ b/yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
@@ -74,7 +74,8 @@
 
     This class provides a mechanism for writing out data files in a format
     readable by radmc3d. Currently, only the ASCII, "Layer" style file format
-    is supported. For more information please see the radmc3d manual.
+    is supported. For more information please see the radmc3d manual at:
+    http://www.ita.uni-heidelberg.de/~dullemond/software/radmc-3d
 
     Parameters
     ----------


https://bitbucket.org/yt_analysis/yt-3.0/commits/9d19a96471a6/
Changeset:   9d19a96471a6
Branch:      yt
User:        atmyers
Date:        2013-05-10 00:47:05
Summary:     >>> -> ... for multi-line inputs
Affected #:  1 file

diff -r 13e2bc0d3cadeb3f7588321066378ce25cbf34e3 -r 9d19a96471a6e1064140f603b1ed693881d542bb yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
--- a/yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
+++ b/yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
@@ -102,11 +102,11 @@
 
     >>> dust_to_gas = 0.01
     >>> def _DustDensity(field, data):
-    >>>     return dust_to_gas*data["Density"]
+    ...     return dust_to_gas*data["Density"]
     >>> add_field("DustDensity", function=_DustDensity)
 
     >>> def _DustTemperature(field, data):
-    >>>     return 10.0*data["Ones"]
+    ...     return 10.0*data["Ones"]
     >>> add_field("DustTemperature", function=_DustTemperature)
     
     >>> pf = load("galaxy0030/galaxy0030")
@@ -127,7 +127,7 @@
     >>> x_co = 1.0e-4
     >>> mu_h = 2.34e-24
     >>> def _NumberDensityCO(field, data):
-    >>>     return (x_co/mu_h)*data["Density"]
+    ...     return (x_co/mu_h)*data["Density"]
     >>> add_field("NumberDensityCO", function=_NumberDensityCO)
     
     >>> pf = load("galaxy0030/galaxy0030")


https://bitbucket.org/yt_analysis/yt-3.0/commits/70ad437a12e2/
Changeset:   70ad437a12e2
Branch:      yt
User:        atmyers
Date:        2013-05-10 00:48:23
Summary:     using isinstance() instead of type()
Affected #:  1 file

diff -r 9d19a96471a6e1064140f603b1ed693881d542bb -r 70ad437a12e2b7593a998f6c47ab3bb3e48cb4d0 yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
--- a/yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
+++ b/yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
@@ -249,7 +249,7 @@
 
     def _write_layer_data_to_file(self, fhandle, field, level, LE, dim):
         cg = self.pf.h.covering_grid(level, LE, dim, num_ghost_zones=1)
-        if type(field) == type([]):
+        if isinstance(field, list):
             data_x = cg[field[0]]
             data_y = cg[field[1]]
             data_z = cg[field[2]]


https://bitbucket.org/yt_analysis/yt-3.0/commits/dd6acf314502/
Changeset:   dd6acf314502
Branch:      yt
User:        ngoldbaum
Date:        2013-05-09 08:50:32
Summary:     Adding an alias useful for duck typing on numeric data types.
Affected #:  1 file

diff -r c27c264ed585c6406269737053dac0da51af195d -r dd6acf314502e694380ba96df409cb86a8fdc653 yt/utilities/definitions.py
--- a/yt/utilities/definitions.py
+++ b/yt/utilities/definitions.py
@@ -64,3 +64,5 @@
                   'days'  : sec_per_day}
 
 axis_labels = [('y','z'),('x','z'),('x','y')]
+
+numeric = (int, long, float)


https://bitbucket.org/yt_analysis/yt-3.0/commits/c5d810237838/
Changeset:   c5d810237838
Branch:      yt
User:        ngoldbaum
Date:        2013-05-09 09:15:57
Summary:     Removing ProjectionPlot's max_level keyword.
Affected #:  1 file

diff -r dd6acf314502e694380ba96df409cb86a8fdc653 -r c5d8102378389d68a3ec3a0c5bceffa539e83c55 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -1237,8 +1237,6 @@
          entire simulation.
     weight_field : string
          The name of the weighting field.  Set to None for no weight.
-    max_level: int
-         The maximum level to project to.
     fontsize : integer
          The size of the fonts for the axis, colorbar, and tick labels.
     field_parameters : dictionary
@@ -1258,7 +1256,7 @@
     _frb_generator = FixedResolutionBuffer
 
     def __init__(self, pf, axis, fields, center='c', width=None, axes_unit=None,
-                 weight_field=None, max_level=None, origin='center-window', fontsize=18,
+                 weight_field=None, origin='center-window', fontsize=18,
                  field_parameters=None, data_source=None):
         ts = self._initialize_dataset(pf)
         self.ts = ts
@@ -1268,8 +1266,8 @@
         if axes_unit is None  and units != ('1', '1'):
             axes_unit = units
         if field_parameters is None: field_parameters = {}
-        proj = pf.h.proj(axis, fields, weight_field=weight_field, max_level=max_level,
-                         center=center, source=data_source, **field_parameters)
+        proj = pf.h.proj(axis, fields, weight_field=weight_field, center=center,
+                         source=data_source, **field_parameters)
         PWViewerMPL.__init__(self, proj, bounds, origin=origin, fontsize=fontsize)
         self.set_axes_unit(axes_unit)
 


https://bitbucket.org/yt_analysis/yt-3.0/commits/a6ee22577cfb/
Changeset:   a6ee22577cfb
Branch:      yt
User:        ngoldbaum
Date:        2013-05-09 09:17:28
Summary:     Making set_width a bit more resilient to possible user input.
Affected #:  3 files

diff -r c5d8102378389d68a3ec3a0c5bceffa539e83c55 -r a6ee22577cfb26dc7d49dd9005f2230e93b87906 yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -208,3 +208,10 @@
         s = "There are too many vertices (%s) to upload to Sketchfab. " % (self.nv)
         s += "Your model has been saved as %s .  You should upload manually." % (self.fn)
         return s
+
+class YTInvalidWidthError(YTException):
+    def __init__(self, error):
+        self.error = error
+
+    def __str__(self):
+        return str(self.error)

diff -r c5d8102378389d68a3ec3a0c5bceffa539e83c55 -r a6ee22577cfb26dc7d49dd9005f2230e93b87906 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -60,12 +60,13 @@
     x_dict, x_names, \
     y_dict, y_names, \
     axis_names, \
-    axis_labels
+    axis_labels, \
+    numeric
 from yt.utilities.math_utils import \
     ortho_find
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     GroupOwnership
-from yt.utilities.exceptions import YTUnitNotRecognized
+from yt.utilities.exceptions import YTUnitNotRecognized, YTInvalidWidthError
 from yt.data_objects.time_series import \
     TimeSeriesData
 
@@ -152,6 +153,17 @@
 log_transform = FieldTransform('log10', np.log10, LogLocator())
 linear_transform = FieldTransform('linear', lambda x: x, LinearLocator())
 
+def assert_valid_width_tuple(width):
+    try:
+        assert iterable(width) and len(width) == 2,
+               "width (%s) is not a two element tuple" % width
+        valid = isinstance(width[0], numeric) and isinstance(width[1], str)
+        msg = "width (%s) is invalid " % str(width)
+        msg += "valid widths look like this: (12, 'au')"
+        assert valid, msg
+    except AssertionError, e:
+        raise YTInvalidWidthError(e)
+
 def StandardWidth(axis, width, depth, pf):
     if width is None:
         # Default to code units
@@ -164,17 +176,25 @@
             width = ((pf.domain_width.min(), '1'),
                      (pf.domain_width.min(), '1'))
     elif iterable(width):
-        if isinstance(width[1], str):
+        if isinstance(width[0], tuple) and isinstance(width[1], tuple):
+            assert_valid_width_tuple(width[0])
+            assert_valid_width_tuple(width[1])
+        elif isinstance(width[0], numeric) and isinstance(width[1], numeric):
+            width = ((width[0], '1'), (width[1], '1'))
+        else:
+            assert_valid_width_tuple(width)
             width = (width, width)
-        elif isinstance(width[1], (long, int, float)):
-            width = ((width[0], '1'), (width[1], '1'))
     else:
+        try:
+            assert isinstance(width, numeric), "width (%s) is invalid" % str(width)
+        except AssertionError, e:
+            raise YTInvalidWidthError(e)
         width = ((width, '1'), (width, '1'))
     if depth is not None:
         if iterable(depth) and isinstance(depth[1], str):
             depth = (depth,)
         elif iterable(depth):
-            raise RuntimeError("Depth must be a float or a (width,\"unit\") tuple")
+            assert_valid_width_tuple(depth)
         else:
             depth = ((depth, '1'),)
         width += depth
@@ -447,18 +467,31 @@
              in code units.  If units are provided the resulting plot axis labels will
              use the supplied units.
         unit : str
-             the unit the width has been specified in.
-             defaults to code units.  If width is a tuple this
-             argument is ignored
-
+             the unit the width has been specified in. If width is a tuple, this
+             argument is ignored. Defaults to code units.
         """
         if width is not None:
             set_axes_unit = True
         else:
             set_axes_unit = False
 
-        if isinstance(width, (int, long, float)):
+        if isinstance(width, numeric):
             width = (width, unit)
+        elif iterable(width):
+            if isinstance(width[0], tuple) and isinstance(width[1], tuple):
+                assert_valid_width_tuple(width[0])
+                assert_valid_width_tuple(width[1])
+            elif isinstance(width[0], numeric) and isinstance(width[1], numeric):
+                width = ((width[0], '1'), (width[1], '1'))
+            else:
+                assert_valid_width_tuple(width)
+                # If width and unit are both valid width tuples, we
+                # assume width controls x and unit controls y
+                try:
+                    assert_valid_width_tuple(unit)
+                    width = (width, unit)
+                except YTInvalidWidthError:
+                    width = (width, width)
 
         width = StandardWidth(self._frb.axis, width, None, self.pf)
 

diff -r c5d8102378389d68a3ec3a0c5bceffa539e83c55 -r a6ee22577cfb26dc7d49dd9005f2230e93b87906 yt/visualization/tests/test_plotwindow.py
--- a/yt/visualization/tests/test_plotwindow.py
+++ b/yt/visualization/tests/test_plotwindow.py
@@ -147,6 +147,13 @@
          (-5/pf['kpc'], 5/pf['kpc']),
          (15/pf['kpc'], 10/pf['kpc'])], 15
 
+    slc.set_width((15,'kpc'),(10000,'pc'))
+
+    yield assert_rel_equal, [slc.xlim, slc.ylim, slc.width], \
+        [(-7.5/pf['kpc'], 7.5/pf['kpc']),
+         (-5/pf['kpc'], 5/pf['kpc']),
+         (15/pf['kpc'], 10/pf['kpc'])], 15
+
 def test_save():
     """Test plot window creation and saving to disk."""
     # Perform I/O in safe place instead of yt main dir


https://bitbucket.org/yt_analysis/yt-3.0/commits/3edff4f5f43f/
Changeset:   3edff4f5f43f
Branch:      yt
User:        ngoldbaum
Date:        2013-05-09 10:16:49
Summary:     Making it possible to set the text color for a plot via set_font. Closes #536.
Affected #:  1 file

diff -r a6ee22577cfb26dc7d49dd9005f2230e93b87906 -r 3edff4f5f43fc1b974780bd79056513502ba3fb7 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -155,8 +155,8 @@
 
 def assert_valid_width_tuple(width):
     try:
-        assert iterable(width) and len(width) == 2,
-               "width (%s) is not a two element tuple" % width
+        assert iterable(width) and len(width) == 2, \
+            "width (%s) is not a two element tuple" % width
         valid = isinstance(width[0], numeric) and isinstance(width[1], str)
         msg = "width (%s) is invalid " % str(width)
         msg += "valid widths look like this: (12, 'au')"
@@ -776,6 +776,7 @@
         font_size = kwargs.pop("fontsize", 18)
         font_path = matplotlib.get_data_path() + '/fonts/ttf/STIXGeneral.ttf'
         self._font_properties = FontProperties(size=font_size, fname=font_path)
+        self._font_color = None
         PWViewer.__init__(self, *args, **kwargs)
 
     def _setup_origin(self):
@@ -921,6 +922,16 @@
 
             self.run_callbacks(f)
 
+            if self._font_color is not None:
+                ax = self.plots[f].axes
+                cbax = self.plots[f].cb.ax
+                labels = \
+                  ax.xaxis.get_ticklabels() + ax.yaxis.get_ticklabels() + \
+                  cbax.yaxis.get_ticklabels() + \
+                  [ax.xaxis.label, ax.yaxis.label, cbax.yaxis.label]
+                for label in labels:
+                    label.set_color(self._font_color)
+
         self._plot_valid = True
 
     def run_callbacks(self, f):
@@ -942,28 +953,49 @@
         ----------
         font_dict : dict
         A dict of keyword parameters to be passed to
-        matplotlib.font_manager.FontProperties.  See the matplotlib font
-        manager documentation for more details.
+        matplotlib.font_manager.FontProperties.
+
+        Possible keys include
+        * family - The font family. Can be serif, sans-serif, cursive, 'fantasy' or
+          'monospace'.
+        * style - The font style. Either normal, italic or oblique.
+        * color - A valid color string like 'r', 'g', 'red', 'cobalt', and
+          'orange'.
+        * variant: Either normal or small-caps.
+        * size: Either an relative value of xx-small, x-small, small, medium,
+          large, x-large, xx-large or an absolute font size, e.g. 12
+        * stretch: A numeric value in the range 0-1000 or one of
+          ultra-condensed, extra-condensed, condensed, semi-condensed, normal,
+          semi-expanded, expanded, extra-expanded or ultra-expanded
+        * weight: A numeric value in the range 0-1000 or one of ultralight,
+          light, normal, regular, book, medium, roman, semibold, demibold, demi,
+          bold, heavy, extra bold, or black
+
+        See the matplotlib font manager API documentation for more details.
         http://matplotlib.org/api/font_manager_api.html
 
         Notes
         -----
-        Mathtext axis labels will only obey the `size` keyword.
+        Mathtext axis labels will only obey the `size` and `color` keyword.
 
         Examples
         --------
-        This sets the font to be 24-pt, sans-serif, italic, and bold-face.
+        This sets the font to be 24-pt, blue, sans-serif, italic, and
+        bold-face.
 
         >>> slc = SlicePlot(pf, 'x', 'Density')
         >>> slc.set_font({'family':'sans-serif', 'style':'italic',
-                          'weight':'bold', 'size':24})
+                          'weight':'bold', 'size':24, 'color':'blue'})
 
         """
         if font_dict is None:
             font_dict = {}
+        if 'color' in font_dict:
+            self._font_color = font_dict.pop('color')
         self._font_properties = \
             FontProperties(**font_dict)
 
+
     @invalidate_plot
     def set_cmap(self, field, cmap):
         """set the colormap for one of the fields


https://bitbucket.org/yt_analysis/yt-3.0/commits/1f9bf12e30c6/
Changeset:   1f9bf12e30c6
Branch:      yt
User:        ngoldbaum
Date:        2013-05-09 10:24:03
Summary:     Very minor formatting change to an error.
Affected #:  1 file

diff -r 3edff4f5f43fc1b974780bd79056513502ba3fb7 -r 1f9bf12e30c66d899c4a193e011589d21f4d81bf yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -158,8 +158,8 @@
         assert iterable(width) and len(width) == 2, \
             "width (%s) is not a two element tuple" % width
         valid = isinstance(width[0], numeric) and isinstance(width[1], str)
-        msg = "width (%s) is invalid " % str(width)
-        msg += "valid widths look like this: (12, 'au')"
+        msg = "width (%s) is invalid. " % str(width)
+        msg += "Valid widths look like this: (12, 'au')"
         assert valid, msg
     except AssertionError, e:
         raise YTInvalidWidthError(e)


https://bitbucket.org/yt_analysis/yt-3.0/commits/4f765ccba659/
Changeset:   4f765ccba659
Branch:      yt
User:        ngoldbaum
Date:        2013-05-10 01:26:24
Summary:     Using the base numeric class to validate.

See http://docs.python.org/2/library/numbers.html
and http://www.python.org/dev/peps/pep-3141/
Affected #:  2 files

diff -r 1f9bf12e30c66d899c4a193e011589d21f4d81bf -r 4f765ccba659e26ce0a59b0811032cffd860eb9e yt/utilities/definitions.py
--- a/yt/utilities/definitions.py
+++ b/yt/utilities/definitions.py
@@ -64,5 +64,3 @@
                   'days'  : sec_per_day}
 
 axis_labels = [('y','z'),('x','z'),('x','y')]
-
-numeric = (int, long, float)

diff -r 1f9bf12e30c66d899c4a193e011589d21f4d81bf -r 4f765ccba659e26ce0a59b0811032cffd860eb9e yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -35,6 +35,7 @@
 from matplotlib.font_manager import FontProperties
 from distutils import version
 from functools import wraps
+from numbers import Number
 
 from ._mpl_imports import \
     FigureCanvasAgg, FigureCanvasPdf, FigureCanvasPS
@@ -60,8 +61,7 @@
     x_dict, x_names, \
     y_dict, y_names, \
     axis_names, \
-    axis_labels, \
-    numeric
+    axis_labels
 from yt.utilities.math_utils import \
     ortho_find
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
@@ -157,7 +157,7 @@
     try:
         assert iterable(width) and len(width) == 2, \
             "width (%s) is not a two element tuple" % width
-        valid = isinstance(width[0], numeric) and isinstance(width[1], str)
+        valid = isinstance(width[0], Number) and isinstance(width[1], str)
         msg = "width (%s) is invalid. " % str(width)
         msg += "Valid widths look like this: (12, 'au')"
         assert valid, msg
@@ -186,7 +186,7 @@
             width = (width, width)
     else:
         try:
-            assert isinstance(width, numeric), "width (%s) is invalid" % str(width)
+            assert isinstance(width, Number), "width (%s) is invalid" % str(width)
         except AssertionError, e:
             raise YTInvalidWidthError(e)
         width = ((width, '1'), (width, '1'))
@@ -475,7 +475,7 @@
         else:
             set_axes_unit = False
 
-        if isinstance(width, numeric):
+        if isinstance(width, Number):
             width = (width, unit)
         elif iterable(width):
             if isinstance(width[0], tuple) and isinstance(width[1], tuple):


https://bitbucket.org/yt_analysis/yt-3.0/commits/de875bf4152c/
Changeset:   de875bf4152c
Branch:      yt
User:        ngoldbaum
Date:        2013-05-10 01:27:09
Summary:     Defining a new function to avoid repeating myself.
Affected #:  1 file

diff -r 4f765ccba659e26ce0a59b0811032cffd860eb9e -r de875bf4152cb3c47df932ccb39f0abc1e73b215 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -164,6 +164,22 @@
     except AssertionError, e:
         raise YTInvalidWidthError(e)
 
+def validate_iterable_width(width, unit=None):
+    if isinstance(width[0], tuple) and isinstance(width[1], tuple):
+        assert_valid_width_tuple(width[0])
+        assert_valid_width_tuple(width[1])
+    elif isinstance(width[0], Number) and isinstance(width[1], Number):
+        width = ((width[0], '1'), (width[1], '1'))
+    else:
+        assert_valid_width_tuple(width)
+        # If width and unit are both valid width tuples, we
+        # assume width controls x and unit controls y
+        try:
+            assert_valid_width_tuple(unit)
+            width = (width, unit)
+        except YTInvalidWidthError:
+            width = (width, width)
+
 def StandardWidth(axis, width, depth, pf):
     if width is None:
         # Default to code units
@@ -176,14 +192,7 @@
             width = ((pf.domain_width.min(), '1'),
                      (pf.domain_width.min(), '1'))
     elif iterable(width):
-        if isinstance(width[0], tuple) and isinstance(width[1], tuple):
-            assert_valid_width_tuple(width[0])
-            assert_valid_width_tuple(width[1])
-        elif isinstance(width[0], numeric) and isinstance(width[1], numeric):
-            width = ((width[0], '1'), (width[1], '1'))
-        else:
-            assert_valid_width_tuple(width)
-            width = (width, width)
+        validate_iterable_width(width)
     else:
         try:
             assert isinstance(width, Number), "width (%s) is invalid" % str(width)
@@ -196,6 +205,10 @@
         elif iterable(depth):
             assert_valid_width_tuple(depth)
         else:
+            try:
+                assert isinstance(depth, Number), "width (%s) is invalid" % str(depth)
+            except: AssertionError, e
+                raise YTInvalidWidthError(e)
             depth = ((depth, '1'),)
         width += depth
     return width
@@ -478,20 +491,7 @@
         if isinstance(width, Number):
             width = (width, unit)
         elif iterable(width):
-            if isinstance(width[0], tuple) and isinstance(width[1], tuple):
-                assert_valid_width_tuple(width[0])
-                assert_valid_width_tuple(width[1])
-            elif isinstance(width[0], numeric) and isinstance(width[1], numeric):
-                width = ((width[0], '1'), (width[1], '1'))
-            else:
-                assert_valid_width_tuple(width)
-                # If width and unit are both valid width tuples, we
-                # assume width controls x and unit controls y
-                try:
-                    assert_valid_width_tuple(unit)
-                    width = (width, unit)
-                except YTInvalidWidthError:
-                    width = (width, width)
+            validate_iterable_width(width, unit)
 
         width = StandardWidth(self._frb.axis, width, None, self.pf)
 


https://bitbucket.org/yt_analysis/yt-3.0/commits/9ba87ebaed1e/
Changeset:   9ba87ebaed1e
Branch:      yt
User:        ngoldbaum
Date:        2013-05-10 01:27:25
Summary:     Using intersphinx to resolve the reference to a maptlotlib class.
Affected #:  1 file

diff -r de875bf4152cb3c47df932ccb39f0abc1e73b215 -r 9ba87ebaed1e17f0b579d58c49261eb57785bafb yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -953,7 +953,7 @@
         ----------
         font_dict : dict
         A dict of keyword parameters to be passed to
-        matplotlib.font_manager.FontProperties.
+        :py:class:`matplotlib.font_manager.FontProperties`.
 
         Possible keys include
         * family - The font family. Can be serif, sans-serif, cursive, 'fantasy' or


https://bitbucket.org/yt_analysis/yt-3.0/commits/70016c04e488/
Changeset:   70016c04e488
Branch:      yt
User:        ngoldbaum
Date:        2013-05-10 01:42:17
Summary:     validate_iterable_width needs to return a width as well.
Affected #:  1 file

diff -r 9ba87ebaed1e17f0b579d58c49261eb57785bafb -r 70016c04e48870cfd1a27bdd6fadf1c628ba1b07 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -168,17 +168,18 @@
     if isinstance(width[0], tuple) and isinstance(width[1], tuple):
         assert_valid_width_tuple(width[0])
         assert_valid_width_tuple(width[1])
+        return width
     elif isinstance(width[0], Number) and isinstance(width[1], Number):
-        width = ((width[0], '1'), (width[1], '1'))
+        return ((width[0], '1'), (width[1], '1'))
     else:
         assert_valid_width_tuple(width)
         # If width and unit are both valid width tuples, we
         # assume width controls x and unit controls y
         try:
             assert_valid_width_tuple(unit)
-            width = (width, unit)
+            return (width, unit)
         except YTInvalidWidthError:
-            width = (width, width)
+            return (width, width)
 
 def StandardWidth(axis, width, depth, pf):
     if width is None:
@@ -192,7 +193,7 @@
             width = ((pf.domain_width.min(), '1'),
                      (pf.domain_width.min(), '1'))
     elif iterable(width):
-        validate_iterable_width(width)
+        width = validate_iterable_width(width)
     else:
         try:
             assert isinstance(width, Number), "width (%s) is invalid" % str(width)
@@ -207,7 +208,7 @@
         else:
             try:
                 assert isinstance(depth, Number), "width (%s) is invalid" % str(depth)
-            except: AssertionError, e
+            except AssertionError, e:
                 raise YTInvalidWidthError(e)
             depth = ((depth, '1'),)
         width += depth
@@ -491,7 +492,7 @@
         if isinstance(width, Number):
             width = (width, unit)
         elif iterable(width):
-            validate_iterable_width(width, unit)
+            width = validate_iterable_width(width, unit)
 
         width = StandardWidth(self._frb.axis, width, None, self.pf)
 


https://bitbucket.org/yt_analysis/yt-3.0/commits/89c774429252/
Changeset:   89c774429252
Branch:      yt
User:        ngoldbaum
Date:        2013-05-10 04:16:57
Summary:     Backing out c5d810237838
Affected #:  1 file

diff -r 70016c04e48870cfd1a27bdd6fadf1c628ba1b07 -r 89c774429252921f1468456023c7c3bdd8668cb2 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -1303,6 +1303,8 @@
          entire simulation.
     weight_field : string
          The name of the weighting field.  Set to None for no weight.
+    max_level: int
+         The maximum level to project to.
     fontsize : integer
          The size of the fonts for the axis, colorbar, and tick labels.
     field_parameters : dictionary
@@ -1322,7 +1324,7 @@
     _frb_generator = FixedResolutionBuffer
 
     def __init__(self, pf, axis, fields, center='c', width=None, axes_unit=None,
-                 weight_field=None, origin='center-window', fontsize=18,
+                 weight_field=None, max_level=None, origin='center-window', fontsize=18,
                  field_parameters=None, data_source=None):
         ts = self._initialize_dataset(pf)
         self.ts = ts
@@ -1332,8 +1334,8 @@
         if axes_unit is None  and units != ('1', '1'):
             axes_unit = units
         if field_parameters is None: field_parameters = {}
-        proj = pf.h.proj(axis, fields, weight_field=weight_field, center=center,
-                         source=data_source, **field_parameters)
+        proj = pf.h.proj(axis, fields, weight_field=weight_field, max_level=max_level,
+                         center=center, source=data_source, **field_parameters)
         PWViewerMPL.__init__(self, proj, bounds, origin=origin, fontsize=fontsize)
         self.set_axes_unit(axes_unit)
 


https://bitbucket.org/yt_analysis/yt-3.0/commits/2bae91f5359d/
Changeset:   2bae91f5359d
Branch:      yt
User:        ngoldbaum
Date:        2013-05-10 04:28:20
Summary:     Setting the font for the 'offset text'.

See http://matplotlib.1069221.n5.nabble.com/Axis-font-and-exponent-questions-td11766.html
Affected #:  1 file

diff -r 89c774429252921f1468456023c7c3bdd8668cb2 -r 2bae91f5359db388ed1df9a3cb82771a76253d28 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -905,7 +905,9 @@
             self.plots[f].axes.set_ylabel(labels[1],fontproperties=fp)
 
             for label in (self.plots[f].axes.get_xticklabels() +
-                          self.plots[f].axes.get_yticklabels()):
+                          self.plots[f].axes.get_yticklabels() +
+                          [self.plots[f].axes.xaxis.get_offset_text(),
+                           self.plots[f].axes.yaxis.get_offset_text()]):
                 label.set_fontproperties(fp)
 
             colorbar_label = image.info['label']


https://bitbucket.org/yt_analysis/yt-3.0/commits/16d8e27e4f93/
Changeset:   16d8e27e4f93
Branch:      yt
User:        ngoldbaum
Date:        2013-05-10 05:09:53
Summary:     Fixing a typo in the plot window docstrings.
Affected #:  1 file

diff -r 2bae91f5359db388ed1df9a3cb82771a76253d28 -r 16d8e27e4f93677707b52031eb1cccd3018a7422 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -1133,7 +1133,7 @@
     fields : string
          The name of the field(s) to be plotted.
     center : two or three-element vector of sequence floats, 'c', or 'center', or 'max'
-         The coordinate of the center of the image.  If left blanck,
+         The coordinate of the center of the image.  If left blank,
          the image centers on the location of the maximum density
          cell.  If set to 'c' or 'center', the plot is centered on
          the middle of the domain.  If set to 'max', will be at the point
@@ -1243,7 +1243,7 @@
     fields : string
         The name of the field(s) to be plotted.
     center : two or three-element vector of sequence floats, 'c', or 'center', or 'max'
-         The coordinate of the center of the image.  If left blanck,
+         The coordinate of the center of the image.  If left blank,
          the image centers on the location of the maximum density
          cell.  If set to 'c' or 'center', the plot is centered on
          the middle of the domain.  If set to 'max', will be at the point
@@ -1361,7 +1361,7 @@
     fields : string
         The name of the field(s) to be plotted.
     center : A two or three-element vector of sequence floats, 'c', or 'center'
-        The coordinate of the center of the image.  If left blanck,
+        The coordinate of the center of the image.  If left blank,
         the image centers on the location of the maximum density
         cell.  If set to 'c' or 'center', the plot is centered on
         the middle of the domain.
@@ -1444,7 +1444,7 @@
     fields : string
         The name of the field(s) to be plotted.
     center : A two or three-element vector of sequence floats, 'c', or 'center'
-        The coordinate of the center of the image.  If left blanck,
+        The coordinate of the center of the image.  If left blank,
         the image centers on the location of the maximum density
         cell.  If set to 'c' or 'center', the plot is centered on
         the middle of the domain.


https://bitbucket.org/yt_analysis/yt-3.0/commits/eba217216a22/
Changeset:   eba217216a22
Branch:      yt
User:        chummels
Date:        2013-05-11 00:32:50
Summary:     Merged in ngoldbaum/yt (pull request #496)

Plot window improvements: set_width, set_font, and no more max_level for ProjectionPlot.
Affected #:  4 files

diff -r 70ad437a12e2b7593a998f6c47ab3bb3e48cb4d0 -r eba217216a22b7f387685ec1dcf3b0c58ad186d0 yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -208,3 +208,10 @@
         s = "There are too many vertices (%s) to upload to Sketchfab. " % (self.nv)
         s += "Your model has been saved as %s .  You should upload manually." % (self.fn)
         return s
+
+class YTInvalidWidthError(YTException):
+    def __init__(self, error):
+        self.error = error
+
+    def __str__(self):
+        return str(self.error)

diff -r 70ad437a12e2b7593a998f6c47ab3bb3e48cb4d0 -r eba217216a22b7f387685ec1dcf3b0c58ad186d0 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -35,6 +35,7 @@
 from matplotlib.font_manager import FontProperties
 from distutils import version
 from functools import wraps
+from numbers import Number
 
 from ._mpl_imports import \
     FigureCanvasAgg, FigureCanvasPdf, FigureCanvasPS
@@ -65,7 +66,7 @@
     ortho_find
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     GroupOwnership
-from yt.utilities.exceptions import YTUnitNotRecognized
+from yt.utilities.exceptions import YTUnitNotRecognized, YTInvalidWidthError
 from yt.data_objects.time_series import \
     TimeSeriesData
 
@@ -152,6 +153,34 @@
 log_transform = FieldTransform('log10', np.log10, LogLocator())
 linear_transform = FieldTransform('linear', lambda x: x, LinearLocator())
 
+def assert_valid_width_tuple(width):
+    try:
+        assert iterable(width) and len(width) == 2, \
+            "width (%s) is not a two element tuple" % width
+        valid = isinstance(width[0], Number) and isinstance(width[1], str)
+        msg = "width (%s) is invalid. " % str(width)
+        msg += "Valid widths look like this: (12, 'au')"
+        assert valid, msg
+    except AssertionError, e:
+        raise YTInvalidWidthError(e)
+
+def validate_iterable_width(width, unit=None):
+    if isinstance(width[0], tuple) and isinstance(width[1], tuple):
+        assert_valid_width_tuple(width[0])
+        assert_valid_width_tuple(width[1])
+        return width
+    elif isinstance(width[0], Number) and isinstance(width[1], Number):
+        return ((width[0], '1'), (width[1], '1'))
+    else:
+        assert_valid_width_tuple(width)
+        # If width and unit are both valid width tuples, we
+        # assume width controls x and unit controls y
+        try:
+            assert_valid_width_tuple(unit)
+            return (width, unit)
+        except YTInvalidWidthError:
+            return (width, width)
+
 def StandardWidth(axis, width, depth, pf):
     if width is None:
         # Default to code units
@@ -164,18 +193,23 @@
             width = ((pf.domain_width.min(), '1'),
                      (pf.domain_width.min(), '1'))
     elif iterable(width):
-        if isinstance(width[1], str):
-            width = (width, width)
-        elif isinstance(width[1], (long, int, float)):
-            width = ((width[0], '1'), (width[1], '1'))
+        width = validate_iterable_width(width)
     else:
+        try:
+            assert isinstance(width, Number), "width (%s) is invalid" % str(width)
+        except AssertionError, e:
+            raise YTInvalidWidthError(e)
         width = ((width, '1'), (width, '1'))
     if depth is not None:
         if iterable(depth) and isinstance(depth[1], str):
             depth = (depth,)
         elif iterable(depth):
-            raise RuntimeError("Depth must be a float or a (width,\"unit\") tuple")
+            assert_valid_width_tuple(depth)
         else:
+            try:
+                assert isinstance(depth, Number), "width (%s) is invalid" % str(depth)
+            except AssertionError, e:
+                raise YTInvalidWidthError(e)
             depth = ((depth, '1'),)
         width += depth
     return width
@@ -447,18 +481,18 @@
              in code units.  If units are provided the resulting plot axis labels will
              use the supplied units.
         unit : str
-             the unit the width has been specified in.
-             defaults to code units.  If width is a tuple this
-             argument is ignored
-
+             the unit the width has been specified in. If width is a tuple, this
+             argument is ignored. Defaults to code units.
         """
         if width is not None:
             set_axes_unit = True
         else:
             set_axes_unit = False
 
-        if isinstance(width, (int, long, float)):
+        if isinstance(width, Number):
             width = (width, unit)
+        elif iterable(width):
+            width = validate_iterable_width(width, unit)
 
         width = StandardWidth(self._frb.axis, width, None, self.pf)
 
@@ -743,6 +777,7 @@
         font_size = kwargs.pop("fontsize", 18)
         font_path = matplotlib.get_data_path() + '/fonts/ttf/STIXGeneral.ttf'
         self._font_properties = FontProperties(size=font_size, fname=font_path)
+        self._font_color = None
         PWViewer.__init__(self, *args, **kwargs)
 
     def _setup_origin(self):
@@ -870,7 +905,9 @@
             self.plots[f].axes.set_ylabel(labels[1],fontproperties=fp)
 
             for label in (self.plots[f].axes.get_xticklabels() +
-                          self.plots[f].axes.get_yticklabels()):
+                          self.plots[f].axes.get_yticklabels() +
+                          [self.plots[f].axes.xaxis.get_offset_text(),
+                           self.plots[f].axes.yaxis.get_offset_text()]):
                 label.set_fontproperties(fp)
 
             colorbar_label = image.info['label']
@@ -888,6 +925,16 @@
 
             self.run_callbacks(f)
 
+            if self._font_color is not None:
+                ax = self.plots[f].axes
+                cbax = self.plots[f].cb.ax
+                labels = \
+                  ax.xaxis.get_ticklabels() + ax.yaxis.get_ticklabels() + \
+                  cbax.yaxis.get_ticklabels() + \
+                  [ax.xaxis.label, ax.yaxis.label, cbax.yaxis.label]
+                for label in labels:
+                    label.set_color(self._font_color)
+
         self._plot_valid = True
 
     def run_callbacks(self, f):
@@ -909,28 +956,49 @@
         ----------
         font_dict : dict
         A dict of keyword parameters to be passed to
-        matplotlib.font_manager.FontProperties.  See the matplotlib font
-        manager documentation for more details.
+        :py:class:`matplotlib.font_manager.FontProperties`.
+
+        Possible keys include
+        * family - The font family. Can be serif, sans-serif, cursive, 'fantasy' or
+          'monospace'.
+        * style - The font style. Either normal, italic or oblique.
+        * color - A valid color string like 'r', 'g', 'red', 'cobalt', and
+          'orange'.
+        * variant: Either normal or small-caps.
+        * size: Either an relative value of xx-small, x-small, small, medium,
+          large, x-large, xx-large or an absolute font size, e.g. 12
+        * stretch: A numeric value in the range 0-1000 or one of
+          ultra-condensed, extra-condensed, condensed, semi-condensed, normal,
+          semi-expanded, expanded, extra-expanded or ultra-expanded
+        * weight: A numeric value in the range 0-1000 or one of ultralight,
+          light, normal, regular, book, medium, roman, semibold, demibold, demi,
+          bold, heavy, extra bold, or black
+
+        See the matplotlib font manager API documentation for more details.
         http://matplotlib.org/api/font_manager_api.html
 
         Notes
         -----
-        Mathtext axis labels will only obey the `size` keyword.
+        Mathtext axis labels will only obey the `size` and `color` keyword.
 
         Examples
         --------
-        This sets the font to be 24-pt, sans-serif, italic, and bold-face.
+        This sets the font to be 24-pt, blue, sans-serif, italic, and
+        bold-face.
 
         >>> slc = SlicePlot(pf, 'x', 'Density')
         >>> slc.set_font({'family':'sans-serif', 'style':'italic',
-                          'weight':'bold', 'size':24})
+                          'weight':'bold', 'size':24, 'color':'blue'})
 
         """
         if font_dict is None:
             font_dict = {}
+        if 'color' in font_dict:
+            self._font_color = font_dict.pop('color')
         self._font_properties = \
             FontProperties(**font_dict)
 
+
     @invalidate_plot
     def set_cmap(self, field, cmap):
         """set the colormap for one of the fields
@@ -1065,7 +1133,7 @@
     fields : string
          The name of the field(s) to be plotted.
     center : two or three-element vector of sequence floats, 'c', or 'center', or 'max'
-         The coordinate of the center of the image.  If left blanck,
+         The coordinate of the center of the image.  If left blank,
          the image centers on the location of the maximum density
          cell.  If set to 'c' or 'center', the plot is centered on
          the middle of the domain.  If set to 'max', will be at the point
@@ -1175,7 +1243,7 @@
     fields : string
         The name of the field(s) to be plotted.
     center : two or three-element vector of sequence floats, 'c', or 'center', or 'max'
-         The coordinate of the center of the image.  If left blanck,
+         The coordinate of the center of the image.  If left blank,
          the image centers on the location of the maximum density
          cell.  If set to 'c' or 'center', the plot is centered on
          the middle of the domain.  If set to 'max', will be at the point
@@ -1293,7 +1361,7 @@
     fields : string
         The name of the field(s) to be plotted.
     center : A two or three-element vector of sequence floats, 'c', or 'center'
-        The coordinate of the center of the image.  If left blanck,
+        The coordinate of the center of the image.  If left blank,
         the image centers on the location of the maximum density
         cell.  If set to 'c' or 'center', the plot is centered on
         the middle of the domain.
@@ -1376,7 +1444,7 @@
     fields : string
         The name of the field(s) to be plotted.
     center : A two or three-element vector of sequence floats, 'c', or 'center'
-        The coordinate of the center of the image.  If left blanck,
+        The coordinate of the center of the image.  If left blank,
         the image centers on the location of the maximum density
         cell.  If set to 'c' or 'center', the plot is centered on
         the middle of the domain.

diff -r 70ad437a12e2b7593a998f6c47ab3bb3e48cb4d0 -r eba217216a22b7f387685ec1dcf3b0c58ad186d0 yt/visualization/tests/test_plotwindow.py
--- a/yt/visualization/tests/test_plotwindow.py
+++ b/yt/visualization/tests/test_plotwindow.py
@@ -147,6 +147,13 @@
          (-5/pf['kpc'], 5/pf['kpc']),
          (15/pf['kpc'], 10/pf['kpc'])], 15
 
+    slc.set_width((15,'kpc'),(10000,'pc'))
+
+    yield assert_rel_equal, [slc.xlim, slc.ylim, slc.width], \
+        [(-7.5/pf['kpc'], 7.5/pf['kpc']),
+         (-5/pf['kpc'], 5/pf['kpc']),
+         (15/pf['kpc'], 10/pf['kpc'])], 15
+
 def test_save():
     """Test plot window creation and saving to disk."""
     # Perform I/O in safe place instead of yt main dir


https://bitbucket.org/yt_analysis/yt-3.0/commits/3b0ab6c3816b/
Changeset:   3b0ab6c3816b
Branch:      yt
User:        jzuhone
Date:        2012-12-11 17:18:50
Summary:     Merging
Affected #:  1 file

diff -r 380f62e5f4f2cd4cfa2627dfd988bebf253af05b -r 3b0ab6c3816b7a73f38528859acc85fdbbe97980 yt/frontends/athena/fields.py
--- a/yt/frontends/athena/fields.py
+++ b/yt/frontends/athena/fields.py
@@ -43,11 +43,7 @@
 
 log_translation_dict = {}
 
-translation_dict = {"Density": "density",
-                    "Pressure": "pressure",
-                    "x-velocity": "velocity_x",
-                    "y-velocity": "velocity_y",
-                    "z-velocity": "velocity_z"}
+translation_dict = {"Density": "density"}
 
 AthenaFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
 add_field = AthenaFieldInfo.add_field
@@ -56,29 +52,37 @@
 add_athena_field = KnownAthenaFields.add_field
 
 add_athena_field("density", function=NullFunc, take_log=False,
-          units=r"",
-          projected_units =r"")
+                 units=r"", projected_units =r"")
 
 add_athena_field("pressure", function=NullFunc, take_log=False,
-          units=r"")
+                 units=r"")
 
 add_athena_field("velocity_x", function=NullFunc, take_log=False,
-          units=r"")
+                 units=r"")
 
 add_athena_field("velocity_y", function=NullFunc, take_log=False,
-          units=r"")
+                 units=r"")
 
 add_athena_field("velocity_z", function=NullFunc, take_log=False,
-          units=r"")
+                 units=r"")
+
+add_athena_field("momentum_x", function=NullFunc, take_log=False,
+                 units=r"")
+
+add_athena_field("momentum_y", function=NullFunc, take_log=False,
+                 units=r"")
+
+add_athena_field("momentum_z", function=NullFunc, take_log=False,
+                 units=r"")
 
 add_athena_field("cell_centered_B_x", function=NullFunc, take_log=False,
-          units=r"", display_name=r"$\rm{cell\ centered\ B_x}$")
+                 units=r"", display_name=r"$\rm{cell\ centered\ B_x}$")
 
 add_athena_field("cell_centered_B_y", function=NullFunc, take_log=False,
-          units=r"", display_name=r"$\rm{cell\ centered\ B_y}$")
+                 units=r"", display_name=r"$\rm{cell\ centered\ B_y}$")
 
 add_athena_field("cell_centered_B_z", function=NullFunc, take_log=False,
-          units=r"", display_name=r"$\rm{cell\ centered\ B_z}$")
+                 units=r"", display_name=r"$\rm{cell\ centered\ B_z}$")
 
 for f,v in log_translation_dict.items():
     add_field(f, TranslationFunc(v), take_log=True)
@@ -86,7 +90,39 @@
 for f,v in translation_dict.items():
     add_field(f, TranslationFunc(v), take_log=False)
 
-def _Temperature(fields, data):
+# In Athena, conservative or primitive variables may be written out.
+# By default, yt concerns itself with primitive variables. The following
+# field definitions allow for conversions to primitive variables in the
+# case that the file contains the conservative ones.
+
+def _xvelocity(field, data) :
+    try:
+        xvel = data["velocity_x"]
+    except:
+        xvel = data["momentum_x"]/data["Density"]
+    return xvel
+add_field("x-velocity", function=_xvelocity, take_log=False,
+          units=r"")
+
+def _yvelocity(field, data) :
+    try:
+        yvel = data["velocity_y"]
+    except:
+        yvel = data["momentum_y"]/data["Density"]
+    return yvel
+add_field("y-velocity", function=_yvelocity, take_log=False,
+          units=r"")
+
+def _zvelocity(field, data) :
+    try:
+        zvel = data["velocity_z"]
+    except:
+        zvel = data["momentum_z"]/data["Density"]
+    return zvel
+add_field("z-velocity", function=_zvelocity, take_log=False,
+          units=r"")
+
+def _Temperature(field, data):
     if data.has_field_parameter("mu") :
         mu = data.get_field_parameter("mu")
     else:
@@ -95,19 +131,19 @@
 add_field("Temperature", function=_Temperature, take_log=False,
           units=r"\rm{K}")
 
-def _Bx(fields, data):
+def _Bx(field, data):
     factor = np.sqrt(4.*np.pi)
     return data['cell_centered_B_x']*factor
 add_field("Bx", function=_Bx, take_log=False,
           units=r"\rm{Gauss}", display_name=r"B_x")
 
-def _By(fields, data):
+def _By(field, data):
     factor = np.sqrt(4.*np.pi)
     return data['cell_centered_B_y']*factor
 add_field("By", function=_By, take_log=False,
           units=r"\rm{Gauss}", display_name=r"B_y")
 
-def _Bz(fields, data):
+def _Bz(field, data):
     factor = np.sqrt(4.*np.pi)
     return data['cell_centered_B_z']*factor
 add_field("Bz", function=_Bz, take_log=False,


https://bitbucket.org/yt_analysis/yt-3.0/commits/18607c558f38/
Changeset:   18607c558f38
Branch:      yt
User:        jzuhone
Date:        2012-12-11 17:23:39
Summary:     Merging
Affected #:  2 files

diff -r 3b0ab6c3816b7a73f38528859acc85fdbbe97980 -r 18607c558f38ff5486530de13fe216c07426cc30 yt/utilities/particle_generator.py
--- a/yt/utilities/particle_generator.py
+++ b/yt/utilities/particle_generator.py
@@ -330,7 +330,8 @@
         ParticleGenerator.__init__(self, pf, num_particles, field_list)
 
         num_cells = len(data_source["x"].flat)
-        max_density = data_source[density_field].max()
+        max_mass = (data_source[density_field]*
+                    data_source["CellVolume"]).max()
         num_particles_left = num_particles
         all_x = []
         all_y = []
@@ -341,12 +342,13 @@
         
         while num_particles_left > 0:
 
-            rho = np.random.uniform(high=1.01*max_density,
-                                    size=num_particles_left)
+            m = np.random.uniform(high=1.01*max_mass,
+                                  size=num_particles_left)
             idxs = np.random.random_integers(low=0, high=num_cells-1,
                                              size=num_particles_left)
-            rho_true = data_source[density_field].flat[idxs]
-            accept = rho <= rho_true
+            m_true = (data_source[density_field]*
+                      data_source["CellVolume"]).flat[idxs]
+            accept = m <= m_true
             num_accepted = accept.sum()
             accepted_idxs = idxs[accept]
             

diff -r 3b0ab6c3816b7a73f38528859acc85fdbbe97980 -r 18607c558f38ff5486530de13fe216c07426cc30 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -1059,7 +1059,7 @@
         (bounds, center, units) = GetWindowParameters(axis, center, width, pf)
         if axes_unit is None and units != ('1', '1'):
             axes_unit = units
-        slc = pf.h.slice(axis, center[axis], fields=fields)
+        slc = pf.h.slice(axis, center[axis], center=center, fields=fields)
         PWViewerMPL.__init__(self, slc, bounds, origin=origin)
         self.set_axes_unit(axes_unit)
 


https://bitbucket.org/yt_analysis/yt-3.0/commits/bea5473873e8/
Changeset:   bea5473873e8
Branch:      yt
User:        jzuhone
Date:        2012-12-20 23:27:17
Summary:     Merging
Affected #:  23 files

diff -r 18607c558f38ff5486530de13fe216c07426cc30 -r bea5473873e824088584621372b1f3a6a70dbdc0 distribute_setup.py
--- a/distribute_setup.py
+++ b/distribute_setup.py
@@ -14,11 +14,14 @@
 This file can also be run as a script to install or upgrade setuptools.
 """
 import os
+import shutil
 import sys
 import time
 import fnmatch
 import tempfile
 import tarfile
+import optparse
+
 from distutils import log
 
 try:
@@ -46,7 +49,7 @@
             args = [quote(arg) for arg in args]
         return os.spawnl(os.P_WAIT, sys.executable, *args) == 0
 
-DEFAULT_VERSION = "0.6.21"
+DEFAULT_VERSION = "0.6.32"
 DEFAULT_URL = "http://pypi.python.org/packages/source/d/distribute/"
 SETUPTOOLS_FAKED_VERSION = "0.6c11"
 
@@ -63,7 +66,7 @@
 """ % SETUPTOOLS_FAKED_VERSION
 
 
-def _install(tarball):
+def _install(tarball, install_args=()):
     # extracting the tarball
     tmpdir = tempfile.mkdtemp()
     log.warn('Extracting in %s', tmpdir)
@@ -81,11 +84,14 @@
 
         # installing
         log.warn('Installing Distribute')
-        if not _python_cmd('setup.py', 'install'):
+        if not _python_cmd('setup.py', 'install', *install_args):
             log.warn('Something went wrong during the installation.')
             log.warn('See the error message above.')
+            # exitcode will be 2
+            return 2
     finally:
         os.chdir(old_wd)
+        shutil.rmtree(tmpdir)
 
 
 def _build_egg(egg, tarball, to_dir):
@@ -110,6 +116,7 @@
 
     finally:
         os.chdir(old_wd)
+        shutil.rmtree(tmpdir)
     # returning the result
     log.warn(egg)
     if not os.path.exists(egg):
@@ -144,7 +151,7 @@
         except ImportError:
             return _do_download(version, download_base, to_dir, download_delay)
         try:
-            pkg_resources.require("distribute>="+version)
+            pkg_resources.require("distribute>=" + version)
             return
         except pkg_resources.VersionConflict:
             e = sys.exc_info()[1]
@@ -167,6 +174,7 @@
         if not no_fake:
             _create_fake_setuptools_pkg_info(to_dir)
 
+
 def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
                         to_dir=os.curdir, delay=15):
     """Download distribute from a specified location and return its filename
@@ -203,6 +211,7 @@
                 dst.close()
     return os.path.realpath(saveto)
 
+
 def _no_sandbox(function):
     def __no_sandbox(*args, **kw):
         try:
@@ -227,6 +236,7 @@
 
     return __no_sandbox
 
+
 def _patch_file(path, content):
     """Will backup the file then patch it"""
     existing_content = open(path).read()
@@ -245,15 +255,18 @@
 
 _patch_file = _no_sandbox(_patch_file)
 
+
 def _same_content(path, content):
     return open(path).read() == content
 
+
 def _rename_path(path):
     new_name = path + '.OLD.%s' % time.time()
-    log.warn('Renaming %s into %s', path, new_name)
+    log.warn('Renaming %s to %s', path, new_name)
     os.rename(path, new_name)
     return new_name
 
+
 def _remove_flat_installation(placeholder):
     if not os.path.isdir(placeholder):
         log.warn('Unkown installation at %s', placeholder)
@@ -267,7 +280,7 @@
         log.warn('Could not locate setuptools*.egg-info')
         return
 
-    log.warn('Removing elements out of the way...')
+    log.warn('Moving elements out of the way...')
     pkg_info = os.path.join(placeholder, file)
     if os.path.isdir(pkg_info):
         patched = _patch_egg_dir(pkg_info)
@@ -289,11 +302,13 @@
 
 _remove_flat_installation = _no_sandbox(_remove_flat_installation)
 
+
 def _after_install(dist):
     log.warn('After install bootstrap.')
     placeholder = dist.get_command_obj('install').install_purelib
     _create_fake_setuptools_pkg_info(placeholder)
 
+
 def _create_fake_setuptools_pkg_info(placeholder):
     if not placeholder or not os.path.exists(placeholder):
         log.warn('Could not find the install location')
@@ -307,7 +322,11 @@
         return
 
     log.warn('Creating %s', pkg_info)
-    f = open(pkg_info, 'w')
+    try:
+        f = open(pkg_info, 'w')
+    except EnvironmentError:
+        log.warn("Don't have permissions to write %s, skipping", pkg_info)
+        return
     try:
         f.write(SETUPTOOLS_PKG_INFO)
     finally:
@@ -321,7 +340,10 @@
     finally:
         f.close()
 
-_create_fake_setuptools_pkg_info = _no_sandbox(_create_fake_setuptools_pkg_info)
+_create_fake_setuptools_pkg_info = _no_sandbox(
+    _create_fake_setuptools_pkg_info
+)
+
 
 def _patch_egg_dir(path):
     # let's check if it's already patched
@@ -343,6 +365,7 @@
 
 _patch_egg_dir = _no_sandbox(_patch_egg_dir)
 
+
 def _before_install():
     log.warn('Before install bootstrap.')
     _fake_setuptools()
@@ -351,7 +374,7 @@
 def _under_prefix(location):
     if 'install' not in sys.argv:
         return True
-    args = sys.argv[sys.argv.index('install')+1:]
+    args = sys.argv[sys.argv.index('install') + 1:]
     for index, arg in enumerate(args):
         for option in ('--root', '--prefix'):
             if arg.startswith('%s=' % option):
@@ -359,7 +382,7 @@
                 return location.startswith(top_dir)
             elif arg == option:
                 if len(args) > index:
-                    top_dir = args[index+1]
+                    top_dir = args[index + 1]
                     return location.startswith(top_dir)
         if arg == '--user' and USER_SITE is not None:
             return location.startswith(USER_SITE)
@@ -376,11 +399,14 @@
         return
     ws = pkg_resources.working_set
     try:
-        setuptools_dist = ws.find(pkg_resources.Requirement.parse('setuptools',
-                                  replacement=False))
+        setuptools_dist = ws.find(
+            pkg_resources.Requirement.parse('setuptools', replacement=False)
+            )
     except TypeError:
         # old distribute API
-        setuptools_dist = ws.find(pkg_resources.Requirement.parse('setuptools'))
+        setuptools_dist = ws.find(
+            pkg_resources.Requirement.parse('setuptools')
+        )
 
     if setuptools_dist is None:
         log.warn('No setuptools distribution found')
@@ -414,7 +440,7 @@
         res = _patch_egg_dir(setuptools_location)
         if not res:
             return
-    log.warn('Patched done.')
+    log.warn('Patching complete.')
     _relaunch()
 
 
@@ -422,7 +448,9 @@
     log.warn('Relaunching...')
     # we have to relaunch the process
     # pip marker to avoid a relaunch bug
-    if sys.argv[:3] == ['-c', 'install', '--single-version-externally-managed']:
+    _cmd1 = ['-c', 'install', '--single-version-externally-managed']
+    _cmd2 = ['-c', 'install', '--record']
+    if sys.argv[:3] == _cmd1 or sys.argv[:3] == _cmd2:
         sys.argv[0] = 'setup.py'
     args = [sys.executable] + sys.argv
     sys.exit(subprocess.call(args))
@@ -448,7 +476,7 @@
             # Extract directories with a safe mode.
             directories.append(tarinfo)
             tarinfo = copy.copy(tarinfo)
-            tarinfo.mode = 448 # decimal for oct 0700
+            tarinfo.mode = 448  # decimal for oct 0700
         self.extract(tarinfo, path)
 
     # Reverse sort directories.
@@ -475,11 +503,39 @@
                 self._dbg(1, "tarfile: %s" % e)
 
 
-def main(argv, version=DEFAULT_VERSION):
+def _build_install_args(options):
+    """
+    Build the arguments to 'python setup.py install' on the distribute package
+    """
+    install_args = []
+    if options.user_install:
+        if sys.version_info < (2, 6):
+            log.warn("--user requires Python 2.6 or later")
+            raise SystemExit(1)
+        install_args.append('--user')
+    return install_args
+
+def _parse_args():
+    """
+    Parse the command line for options
+    """
+    parser = optparse.OptionParser()
+    parser.add_option(
+        '--user', dest='user_install', action='store_true', default=False,
+        help='install in user site package (requires Python 2.6 or later)')
+    parser.add_option(
+        '--download-base', dest='download_base', metavar="URL",
+        default=DEFAULT_URL,
+        help='alternative URL from where to download the distribute package')
+    options, args = parser.parse_args()
+    # positional arguments are ignored
+    return options
+
+def main(version=DEFAULT_VERSION):
     """Install or upgrade setuptools and EasyInstall"""
-    tarball = download_setuptools()
-    _install(tarball)
-
+    options = _parse_args()
+    tarball = download_setuptools(download_base=options.download_base)
+    return _install(tarball, _build_install_args(options))
 
 if __name__ == '__main__':
-    main(sys.argv[1:])
+    sys.exit(main())

diff -r 18607c558f38ff5486530de13fe216c07426cc30 -r bea5473873e824088584621372b1f3a6a70dbdc0 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -399,6 +399,14 @@
     ( ${SHASUM} -c $1.sha512 2>&1 ) 1>> ${LOG_FILE} || do_exit
 }
 
+function get_ytdata
+{
+    echo "Downloading $1 from yt-project.org"
+    [ -e $1 ] && return
+    ${GETFILE} "http://yt-project.org/data/$1" || do_exit
+    ( ${SHASUM} -c $1.sha512 2>&1 ) 1>> ${LOG_FILE} || do_exit
+}
+
 ORIG_PWD=`pwd`
 
 if [ -z "${DEST_DIR}" ]
@@ -407,6 +415,13 @@
     exit 1
 fi
 
+# Get supplemental data.
+
+mkdir -p ${DEST_DIR}/data
+cd ${DEST_DIR}/data
+echo 'de6d8c6ea849f0206d219303329a0276b3cce7c051eec34377d42aacbe0a4f47ac5145eb08966a338ecddd2b83c8f787ca9956508ad5c39ee2088ad875166410  xray_emissivity.h5' > xray_emissivity.h5.sha512
+get_ytdata xray_emissivity.h5
+
 mkdir -p ${DEST_DIR}/src
 cd ${DEST_DIR}/src
 
@@ -706,7 +721,7 @@
 then
     if [ ! -e Rockstar/done ]
     then
-        [ ! -e Rockstar] && tar xfz rockstar-0.99.6.tar.gz
+        [ ! -e Rockstar ] && tar xfz rockstar-0.99.6.tar.gz
         echo "Building Rockstar"
         cd Rockstar
         ( make lib 2>&1 ) 1>> ${LOG_FILE} || do_exit

diff -r 18607c558f38ff5486530de13fe216c07426cc30 -r bea5473873e824088584621372b1f3a6a70dbdc0 yt/analysis_modules/spectral_integrator/api.py
--- a/yt/analysis_modules/spectral_integrator/api.py
+++ b/yt/analysis_modules/spectral_integrator/api.py
@@ -30,4 +30,8 @@
 
 from .spectral_frequency_integrator import \
     SpectralFrequencyIntegrator, \
-    create_table_from_textfiles
+    create_table_from_textfiles, \
+    EmissivityIntegrator, \
+    add_xray_emissivity_field, \
+    add_xray_luminosity_field, \
+    add_xray_photon_emissivity_field

diff -r 18607c558f38ff5486530de13fe216c07426cc30 -r bea5473873e824088584621372b1f3a6a70dbdc0 yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
--- a/yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
+++ b/yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
@@ -4,9 +4,11 @@
 
 Author: Matthew Turk <matthewturk at gmail.com>
 Affiliation: KIPAC/SLAC/Stanford
+Author: Britton Smith <brittons at origins.colorado.edu>
+Affiliation: Michigan State University
 Homepage: http://yt-project.org/
 License:
-  Copyright (C) 2007-2011 Matthew Turk.  All Rights Reserved.
+  Copyright (C) 2007-2012 Matthew Turk.  All Rights Reserved.
 
   This file is part of yt.
 
@@ -24,16 +26,20 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
+from exceptions import IOError
+import h5py
 import numpy as np
+import os
 
 from yt.funcs import *
 
 from yt.data_objects.field_info_container import add_field
+from yt.utilities.exceptions import YTException
 from yt.utilities.linear_interpolators import \
-    UnilinearFieldInterpolator, \
-    BilinearFieldInterpolator, \
-    TrilinearFieldInterpolator
+    BilinearFieldInterpolator
 
+xray_data_version = 1
+    
 class SpectralFrequencyIntegrator(object):
     def __init__(self, table, field_names,
                  bounds, ev_bounds):
@@ -80,8 +86,8 @@
             return 10**interp(dd)
         add_field(name, function=frequency_bin_field,
                         projection_conversion="cm",
-                        units=r"\rm{ergs}\/\rm{cm}^{-3}\/\rm{s}^{-1}",
-                        projected_units=r"\rm{ergs}\/\rm{cm}^{-2}\/\rm{s}^{-1}")
+                        units=r"\rm{ergs}\ \rm{cm}^{-3}\ \rm{s}^{-1}",
+                        projected_units=r"\rm{ergs}\ \rm{cm}^{-2}\ \rm{s}^{-1}")
         return name
 
 def create_table_from_textfiles(pattern, rho_spec, e_spec, T_spec):
@@ -100,3 +106,304 @@
         table[ri,:,ei] = [float(l.split()[-1]) for l in open(pattern%(i+1)) if l[0] != "#"]
     return table
 
+class EnergyBoundsException(YTException):
+    def __init__(self, lower, upper):
+        self.lower = lower
+        self.upper = upper
+
+    def __str__(self):
+        return "Energy bounds are %e to %e keV." % \
+          (self.lower, self.upper)
+
+class ObsoleteDataException(YTException):
+    def __str__(self):
+        return "X-ray emissivity data is out of data.\nDownload the latest data from http://yt-project.org/data/xray_emissivity.h5 and move it to %s." % \
+          os.path.join(os.environ["YT_DEST"], "data", "xray_emissivity.h5")
+          
+class EmissivityIntegrator(object):
+    r"""Class for making X-ray emissivity fields with hdf5 data tables 
+    from Cloudy.
+    """
+    def __init__(self, filename=None):
+        r"""Initialize an EmissivityIntegrator object.
+
+        Keyword Parameters
+        ------------------
+        filename: string
+            Path to data file containing emissivity values.  If None,
+            a file called xray_emissivity.h5 is used.  This file contains 
+            emissivity tables for primordial elements and for metals at 
+            solar metallicity for the energy range 0.1 to 100 keV.
+            Default: None.
+            
+        """
+
+        default_filename = False
+        if filename is None:
+            filename = os.path.join(os.environ["YT_DEST"], 
+                                    "data", "xray_emissivity.h5")
+            default_filename = True
+
+        if not os.path.exists(filename):
+            raise IOError("File does not exist: %s." % filename)
+        only_on_root(mylog.info, "Loading emissivity data from %s." % filename)
+        in_file = h5py.File(filename, "r")
+        if "info" in in_file.attrs:
+            only_on_root(mylog.info, in_file.attrs["info"])
+        if default_filename and \
+          in_file.attrs["version"] < xray_data_version:
+            raise ObsoleteDataException()
+        else:
+            only_on_root(mylog.info, "X-ray emissivity data version: %s." % \
+                         in_file.attrs["version"])
+
+        for field in ["emissivity_primordial", "emissivity_metals",
+                      "log_nH", "log_T", "log_E"]:
+            setattr(self, field, in_file[field][:])
+        in_file.close()
+
+        E_diff = np.diff(self.log_E)
+        self.E_bins = \
+                  np.power(10, np.concatenate([self.log_E[:-1] - 0.5 * E_diff,
+                                               [self.log_E[-1] - 0.5 * E_diff[-1],
+                                                self.log_E[-1] + 0.5 * E_diff[-1]]]))
+        self.dnu = 2.41799e17 * np.diff(self.E_bins)
+
+    def _get_interpolator(self, data, e_min, e_max):
+        r"""Create an interpolator for total emissivity in a 
+        given energy range.
+
+        Parameters
+        ----------
+        e_min: float
+            the minimum energy in keV for the energy band.
+        e_min: float
+            the maximum energy in keV for the energy band.
+
+        """
+        if (e_min - self.E_bins[0]) / e_min < -1e-3 or \
+          (e_max - self.E_bins[-1]) / e_max > 1e-3:
+            raise EnergyBoundsException(np.power(10, self.E_bins[0]),
+                                        np.power(10, self.E_bins[-1]))
+        e_is, e_ie = np.digitize([e_min, e_max], self.E_bins)
+        e_is = np.clip(e_is - 1, 0, self.E_bins.size - 1)
+        e_ie = np.clip(e_ie, 0, self.E_bins.size - 1)
+
+        my_dnu = np.copy(self.dnu[e_is: e_ie])
+        # clip edge bins if the requested range is smaller
+        my_dnu[0] -= e_min - self.E_bins[e_is]
+        my_dnu[-1] -= self.E_bins[e_ie] - e_max
+
+        interp_data = (data[..., e_is:e_ie] * my_dnu).sum(axis=-1)
+        return BilinearFieldInterpolator(np.log10(interp_data),
+                                         [self.log_nH[0], self.log_nH[-1],
+                                          self.log_T[0],  self.log_T[-1]],
+                                         ["log_nH", "log_T"], truncate=True)
+
+def add_xray_emissivity_field(e_min, e_max, filename=None,
+                              with_metals=True,
+                              constant_metallicity=None):
+    r"""Create an X-ray emissivity field for a given energy range.
+
+    Parameters
+    ----------
+    e_min: float
+        the minimum energy in keV for the energy band.
+    e_min: float
+        the maximum energy in keV for the energy band.
+
+    Keyword Parameters
+    ------------------
+    filename: string
+        Path to data file containing emissivity values.  If None,
+        a file called xray_emissivity.h5 is used.  This file contains 
+        emissivity tables for primordial elements and for metals at 
+        solar metallicity for the energy range 0.1 to 100 keV.
+        Default: None.
+    with_metals: bool
+        If True, use the metallicity field to add the contribution from 
+        metals.  If False, only the emission from H/He is considered.
+        Default: True.
+    constant_metallicity: float
+        If specified, assume a constant metallicity for the emission 
+        from metals.  The *with_metals* keyword must be set to False 
+        to use this.
+        Default: None.
+
+    This will create a field named "Xray_Emissivity_{e_min}_{e_max}keV".
+    The units of the field are erg s^-1 cm^-3.
+
+    Examples
+    --------
+
+    >>> from yt.mods import *
+    >>> from yt.analysis_modules.spectral_integrator.api import *
+    >>> add_xray_emissivity_field(0.5, 2)
+    >>> pf = load(dataset)
+    >>> p = ProjectionPlot(pf, 'x', "Xray_Emissivity_0.5_2keV")
+    >>> p.save()
+
+    """
+
+    my_si = EmissivityIntegrator(filename=filename)
+
+    em_0 = my_si._get_interpolator(my_si.emissivity_primordial, e_min, e_max)
+    em_Z = None
+    if with_metals or constant_metallicity is not None:
+        em_Z = my_si._get_interpolator(my_si.emissivity_metals, e_min, e_max)
+
+    def _emissivity_field(field, data):
+        dd = {"log_nH" : np.log10(data["H_NumberDensity"]),
+              "log_T"   : np.log10(data["Temperature"])}
+
+        my_emissivity = np.power(10, em_0(dd))
+        if em_Z is not None:
+            if with_metals:
+                my_Z = data["Metallicity"]
+            elif constant_metallicity is not None:
+                my_Z = constant_metallicity
+            my_emissivity += my_Z * np.power(10, em_Z(dd))
+
+        return data["H_NumberDensity"]**2 * my_emissivity
+
+    field_name = "Xray_Emissivity_%s_%skeV" % (e_min, e_max)
+    add_field(field_name, function=_emissivity_field,
+              projection_conversion="cm",
+              display_name=r"\epsilon_{X}\ (%s-%s\ keV)" % (e_min, e_max),
+              units=r"\rm{erg}\ \rm{cm}^{-3}\ \rm{s}^{-1}",
+              projected_units=r"\rm{erg}\ \rm{cm}^{-2}\ \rm{s}^{-1}")
+    return field_name
+
+def add_xray_luminosity_field(e_min, e_max, filename=None,
+                              with_metals=True,
+                              constant_metallicity=None):
+    r"""Create an X-ray luminosity field for a given energy range.
+
+    Parameters
+    ----------
+    e_min: float
+        the minimum energy in keV for the energy band.
+    e_min: float
+        the maximum energy in keV for the energy band.
+
+    Keyword Parameters
+    ------------------
+    filename: string
+        Path to data file containing emissivity values.  If None,
+        a file called xray_emissivity.h5 is used.  This file contains 
+        emissivity tables for primordial elements and for metals at 
+        solar metallicity for the energy range 0.1 to 100 keV.
+        Default: None.
+    with_metals: bool
+        If True, use the metallicity field to add the contribution from 
+        metals.  If False, only the emission from H/He is considered.
+        Default: True.
+    constant_metallicity: float
+        If specified, assume a constant metallicity for the emission 
+        from metals.  The *with_metals* keyword must be set to False 
+        to use this.
+        Default: None.
+
+    This will create a field named "Xray_Luminosity_{e_min}_{e_max}keV".
+    The units of the field are erg s^-1.
+
+    Examples
+    --------
+
+    >>> from yt.mods import *
+    >>> from yt.analysis_modules.spectral_integrator.api import *
+    >>> add_xray_luminosity_field(0.5, 2)
+    >>> pf = load(dataset)
+    >>> sp = pf.h.sphere('max', (2., 'mpc'))
+    >>> print sp.quantities['TotalQuantity']('Xray_Luminosity_0.5_2keV')
+    
+    """
+
+    em_field = add_xray_emissivity_field(e_min, e_max, filename=filename,
+                                         with_metals=with_metals,
+                                         constant_metallicity=constant_metallicity)
+
+    def _luminosity_field(field, data):
+        return data[em_field] * data["CellVolume"]
+    field_name = "Xray_Luminosity_%s_%skeV" % (e_min, e_max)
+    add_field(field_name, function=_luminosity_field,
+              display_name=r"\rm{L}_{X}\ (%s-%s\ keV)" % (e_min, e_max),
+              units=r"\rm{erg}\ \rm{s}^{-1}")
+    return field_name
+
+def add_xray_photon_emissivity_field(e_min, e_max, filename=None,
+                                     with_metals=True,
+                                     constant_metallicity=None):
+    r"""Create an X-ray photon emissivity field for a given energy range.
+
+    Parameters
+    ----------
+    e_min: float
+        the minimum energy in keV for the energy band.
+    e_min: float
+        the maximum energy in keV for the energy band.
+
+    Keyword Parameters
+    ------------------
+    filename: string
+        Path to data file containing emissivity values.  If None,
+        a file called xray_emissivity.h5 is used.  This file contains 
+        emissivity tables for primordial elements and for metals at 
+        solar metallicity for the energy range 0.1 to 100 keV.
+        Default: None.
+    with_metals: bool
+        If True, use the metallicity field to add the contribution from 
+        metals.  If False, only the emission from H/He is considered.
+        Default: True.
+    constant_metallicity: float
+        If specified, assume a constant metallicity for the emission 
+        from metals.  The *with_metals* keyword must be set to False 
+        to use this.
+        Default: None.
+
+    This will create a field named "Xray_Photon_Emissivity_{e_min}_{e_max}keV".
+    The units of the field are photons s^-1 cm^-3.
+
+    Examples
+    --------
+
+    >>> from yt.mods import *
+    >>> from yt.analysis_modules.spectral_integrator.api import *
+    >>> add_xray_emissivity_field(0.5, 2)
+    >>> pf = load(dataset)
+    >>> p = ProjectionPlot(pf, 'x', "Xray_Emissivity_0.5_2keV")
+    >>> p.save()
+
+    """
+
+    my_si = EmissivityIntegrator(filename=filename)
+    energy_erg = np.power(10, my_si.log_E) * 1.60217646e-9
+
+    em_0 = my_si._get_interpolator((my_si.emissivity_primordial[..., :] / energy_erg),
+                                   e_min, e_max)
+    em_Z = None
+    if with_metals or constant_metallicity is not None:
+        em_Z = my_si._get_interpolator((my_si.emissivity_metals[..., :] / energy_erg),
+                                       e_min, e_max)
+
+    def _emissivity_field(field, data):
+        dd = {"log_nH" : np.log10(data["H_NumberDensity"]),
+              "log_T"   : np.log10(data["Temperature"])}
+
+        my_emissivity = np.power(10, em_0(dd))
+        if em_Z is not None:
+            if with_metals:
+                my_Z = data["Metallicity"]
+            elif constant_metallicity is not None:
+                my_Z = constant_metallicity
+            my_emissivity += my_Z * np.power(10, em_Z(dd))
+
+        return data["H_NumberDensity"]**2 * my_emissivity
+
+    field_name = "Xray_Photon_Emissivity_%s_%skeV" % (e_min, e_max)
+    add_field(field_name, function=_emissivity_field,
+              projection_conversion="cm",
+              display_name=r"\epsilon_{X}\ (%s-%s\ keV)" % (e_min, e_max),
+              units=r"\rm{photons}\ \rm{cm}^{-3}\ \rm{s}^{-1}",
+              projected_units=r"\rm{photons}\ \rm{cm}^{-2}\ \rm{s}^{-1}")
+    return field_name

diff -r 18607c558f38ff5486530de13fe216c07426cc30 -r bea5473873e824088584621372b1f3a6a70dbdc0 yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -59,9 +59,10 @@
     hub_url = 'https://hub.yt-project.org/upload',
     hub_api_key = '',
     ipython_notebook = 'False',
+    notebook_password = '',
     answer_testing_tolerance = '3',
     answer_testing_bitwise = 'False',
-    gold_standard_filename = 'gold004',
+    gold_standard_filename = 'gold005',
     local_standard_filename = 'local001',
     sketchfab_api_key = 'None'
     )

diff -r 18607c558f38ff5486530de13fe216c07426cc30 -r bea5473873e824088584621372b1f3a6a70dbdc0 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -1781,10 +1781,16 @@
             self._okay_to_serialize = True
 
     def _get_tree(self, nvals):
-        xd = self.pf.domain_dimensions[x_dict[self.axis]]
-        yd = self.pf.domain_dimensions[y_dict[self.axis]]
+        xax = x_dict[self.axis]
+        yax = y_dict[self.axis]
+        xd = self.pf.domain_dimensions[xax]
+        yd = self.pf.domain_dimensions[yax]
+        bounds = (self.pf.domain_left_edge[xax],
+                  self.pf.domain_right_edge[yax],
+                  self.pf.domain_left_edge[xax],
+                  self.pf.domain_right_edge[yax])
         return QuadTree(np.array([xd,yd], dtype='int64'), nvals,
-                        style = self.proj_style)
+                        bounds, style = self.proj_style)
 
     def _get_dls(self, grid, fields):
         # Place holder for a time when maybe we will not be doing just

diff -r 18607c558f38ff5486530de13fe216c07426cc30 -r bea5473873e824088584621372b1f3a6a70dbdc0 yt/data_objects/field_info_container.py
--- a/yt/data_objects/field_info_container.py
+++ b/yt/data_objects/field_info_container.py
@@ -280,7 +280,7 @@
             return np.ones(self.NumberOfParticles)
         return defaultdict.__missing__(self, field_name)
 
-    def get_field_parameter(self, param):
+    def get_field_parameter(self, param, default = None):
         self.requested_parameters.append(param)
         if param in ['bulk_velocity', 'center', 'normal']:
             return np.random.random(3) * 1e-2

diff -r 18607c558f38ff5486530de13fe216c07426cc30 -r bea5473873e824088584621372b1f3a6a70dbdc0 yt/data_objects/time_series.py
--- a/yt/data_objects/time_series.py
+++ b/yt/data_objects/time_series.py
@@ -258,9 +258,12 @@
         ...     SlicePlot(pf, "x", "Density").save()
 
         """
+        
         if isinstance(filenames, types.StringTypes):
             filenames = glob.glob(filenames)
             filenames.sort()
+        if len(filenames) == 0:
+            raise YTOutputNotIdentified(filenames, {})
         obj = cls(filenames[:], parallel = parallel, **kwargs)
         return obj
 

diff -r 18607c558f38ff5486530de13fe216c07426cc30 -r bea5473873e824088584621372b1f3a6a70dbdc0 yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -506,11 +506,11 @@
 def _CellVolume(field, data):
     if data['dx'].size == 1:
         try:
-            return data['dx']*data['dy']*data['dx']*\
+            return data['dx'] * data['dy'] * data['dz'] * \
                 np.ones(data.ActiveDimensions, dtype='float64')
         except AttributeError:
-            return data['dx']*data['dy']*data['dx']
-    return data["dx"]*data["dy"]*data["dz"]
+            return data['dx'] * data['dy'] * data['dz']
+    return data["dx"] * data["dy"] * data["dz"]
 def _ConvertCellVolumeMpc(data):
     return data.convert("mpc")**3.0
 def _ConvertCellVolumeCGS(data):

diff -r 18607c558f38ff5486530de13fe216c07426cc30 -r bea5473873e824088584621372b1f3a6a70dbdc0 yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -106,7 +106,7 @@
         # for now, the hierarchy file is the parameter file!
         self.hierarchy_filename = self.parameter_file.parameter_filename
         self.hierarchy = os.path.abspath(self.hierarchy_filename)
-        self.directory = os.path.dirname(self.hierarchy_filename)
+        self.directory = pf.fullpath
         self._fhandle = h5py.File(self.hierarchy_filename, 'r')
 
         self.float_type = self._fhandle['/level_0']['data:datatype=0'].dtype.name
@@ -143,9 +143,6 @@
                     self.grid_particle_count[ind] += 1
                     self.grids[ind].NumberOfParticles += 1
 
-    def _initialize_data_storage(self):
-        pass
-
     def _detect_fields(self):
         ncomp = int(self._fhandle['/'].attrs['num_components'])
         self.field_list = [c[1] for c in self._fhandle['/'].attrs.items()[-ncomp:]]

diff -r 18607c558f38ff5486530de13fe216c07426cc30 -r bea5473873e824088584621372b1f3a6a70dbdc0 yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -185,7 +185,7 @@
 
     .. code-block:: python
 
-       @rootonly
+       @deprecate
        def some_really_old_function(...):
 
     """
@@ -205,7 +205,7 @@
 
     .. code-block:: python
 
-       @rootonly
+       @pdb_run
        def some_function_to_debug(...):
 
     """
@@ -574,5 +574,5 @@
     return inv_axis_names.get(axis, axis)
 
 def get_image_suffix(name):
-    suffix = os.path.splitext(name)[1]
+    suffix = os.path.splitext(name)[1].lstrip('.')
     return suffix if suffix in ['png', 'eps', 'ps', 'pdf'] else ''

diff -r 18607c558f38ff5486530de13fe216c07426cc30 -r bea5473873e824088584621372b1f3a6a70dbdc0 yt/imods.py
--- /dev/null
+++ b/yt/imods.py
@@ -0,0 +1,17 @@
+# This custom importer for yt will set up some IPython notebook-specific
+# helpers.  For instance, it will eventually add items to the menubar.
+
+import __builtin__
+if "__IPYTHON__" not in dir(__builtin__):
+    raise ImportError
+
+from IPython.core.interactiveshell import InteractiveShell
+from IPython.core.display import display, display_html
+inst = InteractiveShell.instance()
+ip = inst.get_ipython()
+ip.enable_pylab("inline", import_all=False)
+
+from yt.config import ytcfg
+ytcfg["yt", "ipython_notebook"] = "True"
+
+from yt.mods import *

diff -r 18607c558f38ff5486530de13fe216c07426cc30 -r bea5473873e824088584621372b1f3a6a70dbdc0 yt/utilities/command_line.py
--- a/yt/utilities/command_line.py
+++ b/yt/utilities/command_line.py
@@ -30,7 +30,7 @@
 from yt.funcs import *
 from yt.utilities.minimal_representation import MinimalProjectDescription
 import argparse, os, os.path, math, sys, time, subprocess, getpass, tempfile
-import urllib, urllib2, base64
+import urllib, urllib2, base64, os
 
 def _fix_pf(arg):
     if os.path.isdir("%s" % arg) and \
@@ -591,19 +591,6 @@
             print
             loki = raw_input("Press enter to go on, Ctrl-C to exit.")
             cedit.config.setoption(uu, hgrc_path, "bb.username=%s" % bbusername)
-        bb_fp = "81:2b:08:90:dc:d3:71:ee:e0:7c:b4:75:ce:9b:6c:48:94:56:a1:fe"
-        if uu.config("hostfingerprints", "bitbucket.org", None) is None:
-            print "Let's also add bitbucket.org to the known hosts, so hg"
-            print "doesn't warn us about bitbucket."
-            print "We will add this:"
-            print
-            print "   [hostfingerprints]"
-            print "   bitbucket.org = %s" % (bb_fp)
-            print
-            loki = raw_input("Press enter to go on, Ctrl-C to exit.")
-            cedit.config.setoption(uu, hgrc_path,
-                                   "hostfingerprints.bitbucket.org=%s" % bb_fp)
-
         # We now reload the UI's config file so that it catches the [bb]
         # section changes.
         uu.readconfig(hgrc_path[0])
@@ -1398,6 +1385,67 @@
         import rpdb
         rpdb.run_rpdb(int(args.task))
 
+class YTNotebookCmd(YTCommand):
+    name = ["notebook"]
+    args = (
+            dict(short="-o", long="--open-browser", action="store_true",
+                 default = False, dest='open_browser',
+                 help="Open a web browser."),
+            dict(short="-p", long="--port", action="store",
+                 default = 0, dest='port',
+                 help="Port to listen on; defaults to auto-detection."),
+            dict(short="-n", long="--no-password", action="store_true",
+                 default = False, dest='no_password',
+                 help="If set, do not prompt or use a password."),
+            )
+    description = \
+        """
+        Run the IPython Notebook
+        """
+    def __call__(self, args):
+        kwargs = {}
+        from IPython.frontend.html.notebook.notebookapp import NotebookApp
+        pw = ytcfg.get("yt", "notebook_password")
+        if len(pw) == 0 and not args.no_password:
+            import IPython.lib
+            pw = IPython.lib.passwd()
+            print "If you would like to use this password in the future,"
+            print "place a line like this inside the [yt] section in your"
+            print "yt configuration file at ~/.yt/config"
+            print
+            print "notebook_password = %s" % pw
+            print
+        elif args.no_password:
+            pw = None
+        if args.port != 0:
+            kwargs['port'] = int(args.port)
+        if pw is not None:
+            kwargs['password'] = pw
+        app = NotebookApp(open_browser=args.open_browser,
+                          **kwargs)
+        app.initialize(argv=[])
+        print
+        print "***************************************************************"
+        print
+        print "The notebook is now live at:"
+        print
+        print "     http://127.0.0.1:%s/" % app.port
+        print
+        print "Recall you can create a new SSH tunnel dynamically by pressing"
+        print "~C and then typing -L%s:localhost:%s" % (app.port, app.port)
+        print
+        print "Additionally, while in the notebook, we recommend you start by"
+        print "replacing 'yt.mods' with 'yt.imods' like so:"
+        print
+        print "    from yt.imods import *"
+        print
+        print "This will enable some IPython-specific extensions to yt."
+        print
+        print "***************************************************************"
+        print
+        app.start()
+
+
 class YTGUICmd(YTCommand):
     name = ["serve", "reason"]
     args = (

diff -r 18607c558f38ff5486530de13fe216c07426cc30 -r bea5473873e824088584621372b1f3a6a70dbdc0 yt/utilities/grid_data_format/tests/test_writer.py
--- /dev/null
+++ b/yt/utilities/grid_data_format/tests/test_writer.py
@@ -0,0 +1,66 @@
+"""
+Testsuite for writing yt data to GDF
+
+Author: Kacper Kowalik <xarthisius.kk at gmail.com>
+Affiliation: Torun Center for Astronomy, NCU
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Kacper Kowalik.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+import tempfile
+import shutil
+import os
+import h5py as h5
+from yt.testing import \
+    fake_random_pf, assert_equal
+from yt.utilities.grid_data_format.writer import \
+    write_to_gdf
+from yt.frontends.gdf.data_structures import \
+    GDFStaticOutput
+from yt.mods import \
+    load
+
+TEST_AUTHOR = "yt test runner"
+TEST_COMMENT = "Testing write_to_gdf"
+
+
+def setup():
+    """Test specific setup."""
+    from yt.config import ytcfg
+    ytcfg["yt", "__withintesting"] = "True"
+
+
+def test_write_gdf():
+    """Main test suite for write_gdf"""
+    tmpdir = tempfile.mkdtemp()
+    tmpfile = os.path.join(tmpdir, 'test_gdf.h5')
+
+    test_pf = fake_random_pf(64)
+    write_to_gdf(test_pf, tmpfile, data_author=TEST_AUTHOR,
+                 data_comment=TEST_COMMENT)
+    del test_pf
+
+    assert isinstance(load(tmpfile), GDFStaticOutput)
+
+    h5f = h5.File(tmpfile, 'r')
+    gdf = h5f['gridded_data_format'].attrs
+    assert_equal(gdf['data_author'], TEST_AUTHOR)
+    assert_equal(gdf['data_comment'], TEST_COMMENT)
+    h5f.close()
+
+    shutil.rmtree(tmpdir)

diff -r 18607c558f38ff5486530de13fe216c07426cc30 -r bea5473873e824088584621372b1f3a6a70dbdc0 yt/utilities/lib/QuadTree.pyx
--- a/yt/utilities/lib/QuadTree.pyx
+++ b/yt/utilities/lib/QuadTree.pyx
@@ -115,9 +115,11 @@
     cdef int merged
     cdef int num_cells
     cdef QTN_combine *combine
+    cdef np.float64_t bounds[4]
+    cdef np.float64_t dds[2]
 
     def __cinit__(self, np.ndarray[np.int64_t, ndim=1] top_grid_dims,
-                  int nvals, style = "integrate"):
+                  int nvals, bounds, style = "integrate"):
         if style == "integrate":
             self.combine = QTN_add_value
         elif style == "mip":
@@ -133,9 +135,13 @@
         cdef np.float64_t weight_val = 0.0
         self.nvals = nvals
         for i in range(nvals): vals[i] = 0.0
+        for i in range(4):
+            self.bounds[i] = bounds[i]
 
         self.top_grid_dims[0] = top_grid_dims[0]
         self.top_grid_dims[1] = top_grid_dims[1]
+        self.dds[0] = (self.bounds[1] - self.bounds[0])/self.top_grid_dims[0]
+        self.dds[1] = (self.bounds[3] - self.bounds[2])/self.top_grid_dims[1]
 
         # This wouldn't be necessary if we did bitshifting...
         for i in range(80):
@@ -404,6 +410,58 @@
             wtoadd -= node.weight_val
         return added
 
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    def fill_image(self, np.ndarray[np.float64_t, ndim=2] buffer, _bounds,
+                   int val_index = 0):
+        cdef np.float64_t dds[2], pos[2]
+        cdef int nn[2], i, j
+        cdef np.float64_t bounds[4]
+        for i in range(4):
+            bounds[i] = _bounds[i]
+        for i in range(2):
+            nn[i] = buffer.shape[i]
+            dds[i] = (bounds[i*2 + 1] - bounds[i*2])/nn[i]
+        cdef QuadTreeNode *node
+        pos[0] = bounds[0]
+        for i in range(nn[0]):
+            pos[1] = bounds[2]
+            for j in range(nn[1]):
+                # We start at level zero.  In the future we could optimize by
+                # retaining oct information from previous cells.
+                node = self.find_node_at_pos(pos)
+                buffer[i,j] = node.val[val_index]
+                pos[1] += dds[1]
+            pos[0] += dds[0]
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef QuadTreeNode *find_node_at_pos(self, np.float64_t pos[2]):
+        cdef np.int64_t ind[2]
+        cdef np.float64_t cp[2]
+        cdef np.float64_t dds[2]
+        cdef QuadTreeNode *cur
+        for i in range(2):
+            ind[i] = <np.int64_t> (pos[i]/self.dds[i])
+            cp[i] = (ind[i] + 0.5) * self.dds[i]
+            dds[i] = self.dds[i]
+        cur = self.root_nodes[ind[0]][ind[1]]
+        while cur.children[0][0] != NULL:
+            for i in range(2):
+                # Note that below offset by half a dx for center, after
+                # updating to the next level
+                dds[i] = dds[i] / 2.0
+                if cp[i] < pos[i]:
+                    ind[i] = 0
+                    cp[i] -= dds[i] / 2.0 
+                else:
+                    ind[i] = 1
+                    cp[i] += dds[i] / 2.0
+            cur = cur.children[ind[0]][ind[1]]
+        return cur
+
     def __dealloc__(self):
         cdef int i, j
         for i in range(self.top_grid_dims[0]):

diff -r 18607c558f38ff5486530de13fe216c07426cc30 -r bea5473873e824088584621372b1f3a6a70dbdc0 yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -799,7 +799,7 @@
                 if target < size:
                     #print "RECEIVING FROM %02i on %02i" % (target, rank)
                     buf = self.recv_quadtree(target, tgd, args)
-                    qto = QuadTree(tgd, args[2])
+                    qto = QuadTree(tgd, args[2], qt.bounds)
                     qto.frombuffer(buf[0], buf[1], buf[2], merge_style)
                     merge_quadtrees(qt, qto, style = merge_style)
                     del qto
@@ -819,7 +819,7 @@
         self.comm.Bcast([buf[2], MPI.DOUBLE], root=0)
         self.refined = buf[0]
         if rank != 0:
-            qt = QuadTree(tgd, args[2])
+            qt = QuadTree(tgd, args[2], qt.bounds)
             qt.frombuffer(buf[0], buf[1], buf[2], merge_style)
         return qt
 

diff -r 18607c558f38ff5486530de13fe216c07426cc30 -r bea5473873e824088584621372b1f3a6a70dbdc0 yt/utilities/physical_constants.py
--- a/yt/utilities/physical_constants.py
+++ b/yt/utilities/physical_constants.py
@@ -1,26 +1,31 @@
 #
 # Physical Constants and Units Conversion Factors
 #
+# Values for these constants are drawn from IAU and IUPAC data 
+# unless otherwise noted:
+# http://maia.usno.navy.mil/NSFA/IAU2009_consts.html
+# http://goldbook.iupac.org/list_goldbook_phys_constants_defs.html
 
 # Masses
-mass_hydrogen_cgs = 1.67e-24  # g
-mass_electron_cgs = 9.11e-28  # g
-amu_cgs           = 1.66053886e-24  # g
-mass_sun_cgs = 1.9891e33  # g
+mass_hydrogen_cgs = 1.674534e-24  # g
+mass_electron_cgs = 9.1093898e-28  # g
+amu_cgs           = 1.6605402e-24  # g
+mass_sun_cgs = 1.98841586e33  # g
 # Velocities
 speed_of_light_cgs = 2.99792458e10  # cm/s, exact
 
 # Cross Sections
-cross_section_thompson_cgs = 6.65e-25  # cm^2
+# 8*pi/3 (alpha*hbar*c/(2*pi))**2
+cross_section_thompson_cgs = 6.65245854533e-25  # cm^2
 
 # Charge
-charge_proton_cgs = 4.803e-10  # esu = 1.602e-19  Coulombs
+charge_proton_cgs = 4.8032056e-10  # esu = 1.602176487e-19  Coulombs
 
 # Physical Constants
 boltzmann_constant_cgs = 1.3806504e-16  # erg K^-1
 gravitational_constant_cgs  = 6.67428e-8  # cm^3 g^-1 s^-2
 planck_constant_cgs   = 6.62606896e-27  # erg s
-stefan_boltzmann_constant_cgs = 5.670373e-5 # erg cm^-2 s^-1 K^-4
+stefan_boltzmann_constant_cgs = 5.67051e-5 # erg cm^-2 s^-1 K^-4
 rho_crit_now = 1.8788e-29  # g times h^2 (critical mass for closure, Cosmology)
 
 # Misc. Approximations
@@ -32,16 +37,16 @@
 mpc_per_mpc   = 1e0
 mpc_per_kpc   = 1e-3
 mpc_per_pc    = 1e-6
-mpc_per_au    = 4.847e-12
-mpc_per_rsun  = 2.253e-14
-mpc_per_miles = 5.216e-20
-mpc_per_cm    = 3.24e-25
+mpc_per_au    = 4.84813682e-12
+mpc_per_rsun  = 2.253962e-14
+mpc_per_miles = 5.21552871e-20
+mpc_per_cm    = 3.24077929e-25
 km_per_pc     = 1.3806504e13
 km_per_m      = 1e-3
 km_per_cm     = 1e-5
-pc_per_cm     = 3.24e-19
+pc_per_cm     = 3.24077929e-19
 
-m_per_fpc     = 0.0324077649
+m_per_fpc     = 0.0324077929
 
 kpc_per_mpc   = 1.0 / mpc_per_kpc
 pc_per_mpc    = 1.0 / mpc_per_pc

diff -r 18607c558f38ff5486530de13fe216c07426cc30 -r bea5473873e824088584621372b1f3a6a70dbdc0 yt/visualization/base_plot_types.py
--- /dev/null
+++ b/yt/visualization/base_plot_types.py
@@ -0,0 +1,84 @@
+"""
+This is a place for base classes of the various plot types.
+
+Author: Nathan Goldbaum <goldbaum at ucolick.org>
+Affiliation: UCSC Astronomy
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2010-2012 Nathan Goldbaum.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+import matplotlib
+from ._mpl_imports import \
+    FigureCanvasAgg, FigureCanvasPdf, FigureCanvasPS
+from yt.funcs import \
+    get_image_suffix, mylog
+
+
+class PlotMPL(object):
+    """A base class for all yt plots made using matplotlib.
+
+    """
+    def __init__(self, fsize, axrect):
+        """Initialize PlotMPL class"""
+        self._plot_valid = True
+        self.figure = matplotlib.figure.Figure(figsize=fsize,
+                                               frameon=True)
+        self.axes = self.figure.add_axes(axrect)
+
+    def save(self, name, mpl_kwargs, canvas=None):
+        """Choose backend and save image to disk"""
+        suffix = get_image_suffix(name)
+        if suffix == '':
+            suffix = '.png'
+            name = "%s%s" % (name, suffix)
+
+        mylog.info("Saving plot %s", name)
+
+        if suffix == ".png":
+            canvas = FigureCanvasAgg(self.figure)
+        elif suffix == ".pdf":
+            canvas = FigureCanvasPdf(self.figure)
+        elif suffix in (".eps", ".ps"):
+            canvas = FigureCanvasPS(self.figure)
+        else:
+            mylog.warning("Unknown suffix %s, defaulting to Agg", suffix)
+            canvas = FigureCanvasAgg(self.figure)
+
+        canvas.print_figure(name, **mpl_kwargs)
+        return name
+
+
+class ImagePlotMPL(PlotMPL):
+    """A base class for yt plots made using imshow
+
+    """
+    def __init__(self, fsize, axrect, caxrect, zlim):
+        """Initialize ImagePlotMPL class object"""
+        PlotMPL.__init__(self, fsize, axrect)
+        self.zmin, self.zmax = zlim
+        self.cax = self.figure.add_axes(caxrect)
+
+    def _init_image(self, data, cbnorm, cmap, extent, aspect=None):
+        """Store output of imshow in image variable"""
+        if (cbnorm == 'log10'):
+            norm = matplotlib.colors.LogNorm()
+        elif (cbnorm == 'linear'):
+            norm = matplotlib.colors.Normalize()
+        self.image = self.axes.imshow(data, origin='lower', extent=extent,
+                                      norm=norm, vmin=self.zmin, aspect=aspect,
+                                      vmax=self.zmax, cmap=cmap)

diff -r 18607c558f38ff5486530de13fe216c07426cc30 -r bea5473873e824088584621372b1f3a6a70dbdc0 yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -159,14 +159,27 @@
         info['projected_units'] = \
                 self.data_source.pf.field_info[item].get_projected_units()
         info['center'] = self.data_source.center
+        
         try:
             info['coord'] = self.data_source.coord
         except AttributeError:
             pass
+        
         try:
             info['weight_field'] = self.data_source.weight_field
         except AttributeError:
             pass
+        
+        info['label'] = self.data_source.pf.field_info[item].display_name
+        if info['label'] is None:
+            info['label'] = r'$\rm{'+item+r'}$'
+        elif info['label'].find('$') == -1:
+            info['label'] = r'$\rm{'+info['label']+r'}$'
+        if info['units'] is None or info['units'] == '':
+            pass
+        else:
+            info['label'] += r'$\/\/('+info['units']+r')$'
+        
         return info
 
     def convert_to_pixel(self, coords):

diff -r 18607c558f38ff5486530de13fe216c07426cc30 -r bea5473873e824088584621372b1f3a6a70dbdc0 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -658,24 +658,25 @@
 
         xf = axis_names[px_index]
         yf = axis_names[py_index]
+        dxf = "d%s" % xf
+        dyf = "d%s" % yf
 
         DomainRight = plot.data.pf.domain_right_edge
         DomainLeft = plot.data.pf.domain_left_edge
         DomainWidth = DomainRight - DomainLeft
-        
+
         nx, ny = plot.image._A.shape
         buff = np.zeros((nx,ny),dtype='float64')
         for i,clump in enumerate(reversed(self.clumps)):
             mylog.debug("Pixelizing contour %s", i)
 
-
             xf_copy = clump[xf].copy()
             yf_copy = clump[yf].copy()
-            
-            temp = _MPL.Pixelize(xf_copy, yf_copy, 
-                                 clump['dx']/2.0,
-                                 clump['dy']/2.0,
-                                 clump['dx']*0.0+i+1, # inits inside Pixelize
+
+            temp = _MPL.Pixelize(xf_copy, yf_copy,
+                                 clump[dxf]/2.0,
+                                 clump[dyf]/2.0,
+                                 clump[dxf]*0.0+i+1, # inits inside Pixelize
                                  int(nx), int(ny),
                              (x0, x1, y0, y1), 0).transpose()
             buff = np.maximum(temp, buff)

diff -r 18607c558f38ff5486530de13fe216c07426cc30 -r bea5473873e824088584621372b1f3a6a70dbdc0 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -25,29 +25,18 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 import base64
-import matplotlib.figure
-from matplotlib.mathtext import MathTextParser
-from distutils import version
+import numpy as np
 import matplotlib
-
-# Some magic for dealing with pyparsing being included or not
-# included in matplotlib (not in gentoo, yes in everything else)
-# Also accounting for the fact that in 1.2.0, pyparsing got renamed.
-try:
-    if version.LooseVersion(matplotlib.__version__) < version.LooseVersion("1.2.0"):
-        from matplotlib.pyparsing import ParseFatalException
-    else:
-        from matplotlib.pyparsing_py2 import ParseFatalException
-except ImportError:
-    from pyparsing import ParseFatalException
-
 import cStringIO
 import types
 import __builtin__
+
+from matplotlib.mathtext import MathTextParser
+from distutils import version
 from functools import wraps
 
-import numpy as np
-from ._mpl_imports import *
+from ._mpl_imports import \
+    FigureCanvasAgg, FigureCanvasPdf, FigureCanvasPS
 from .color_maps import yt_colormaps, is_colormap
 from .image_writer import \
     write_image, apply_colormap
@@ -58,10 +47,13 @@
 from .plot_modifications import get_smallest_appropriate_unit, \
     callback_registry
 from .tick_locators import LogLocator, LinearLocator
+from .base_plot_types import ImagePlotMPL
+
 from yt.utilities.delaunay.triangulate import Triangulation as triang
 from yt.config import ytcfg
-
-from yt.funcs import *
+from yt.funcs import \
+    mylog, defaultdict, iterable, ensure_list, \
+    fix_axis, get_image_suffix
 from yt.utilities.lib import write_png_to_string
 from yt.utilities.definitions import \
     x_dict, x_names, \
@@ -75,6 +67,17 @@
 from yt.data_objects.time_series import \
     TimeSeriesData
 
+# Some magic for dealing with pyparsing being included or not
+# included in matplotlib (not in gentoo, yes in everything else)
+# Also accounting for the fact that in 1.2.0, pyparsing got renamed.
+try:
+    if version.LooseVersion(matplotlib.__version__) < version.LooseVersion("1.2.0"):
+        from matplotlib.pyparsing import ParseFatalException
+    else:
+        from matplotlib.pyparsing_py2 import ParseFatalException
+except ImportError:
+    from pyparsing import ParseFatalException
+
 def invalidate_data(f):
     @wraps(f)
     def newfunc(*args, **kwargs):
@@ -763,7 +766,6 @@
             fields = self._frb.keys()
         self._colorbar_valid = True
         for f in self.fields:
-            md = self.get_metadata(f, strip_mathml = False, return_string = False)
             axis_index = self.data_source.axis
 
             if self.origin == 'center-window':
@@ -781,7 +783,11 @@
                 raise RuntimeError(
                     'origin keyword: \"%(k)s\" not recognized' % {'k': self.origin})
 
-            (unit_x, unit_y) = md['axes_unit_names']
+            if self._axes_unit_names is None:
+                unit = get_smallest_appropriate_unit(self.xlim[1] - self.xlim[0], self.pf)
+                (unit_x, unit_y) = (unit, unit)
+            else:
+                (unit_x, unit_y) = self._axes_unit_names
 
             extentx = [(self.xlim[i] - xc) * self.pf[unit_x] for i in (0,1)]
             extenty = [(self.ylim[i] - yc) * self.pf[unit_y] for i in (0,1)]
@@ -807,8 +813,11 @@
             # Correct the aspect ratio in case unit_x and unit_y are different
             aspect = self.pf[unit_x]/self.pf[unit_y]
             
-            self.plots[f] = WindowPlotMPL(self._frb[f], extent, aspect, self._field_transform[f], 
-                                          self._colormaps[f], size, zlim)
+            image = self._frb[f]
+
+            self.plots[f] = WindowPlotMPL(image, self._field_transform[f].name, 
+                                          self._colormaps[f], extent, aspect, 
+                                          zlim, size)
 
             self.plots[f].cb = self.plots[f].figure.colorbar(
                 self.plots[f].image, cax = self.plots[f].cax)
@@ -830,29 +839,15 @@
 
             self.plots[f].axes.tick_params(labelsize=self.fontsize)
 
-            field_name = self.data_source.pf.field_info[f].display_name
+            colorbar_label = image.info['label']
 
-            if field_name is None:
-                field_name = r'$\rm{'+f+r'}$'
-            elif field_name.find('$') == -1:
-                field_name = r'$\rm{'+field_name+r'}$'
-            
             parser = MathTextParser('Agg')
             try:
-                parser.parse(field_name)
+                parser.parse(colorbar_label)
             except ParseFatalException, err:
-                raise YTCannotParseFieldDisplayName(f,field_name,str(err))
-
-            if md['colorbar_unit'] is None or md['colorbar_unit'] == '':
-                label = field_name
-            else:
-                try:
-                    parser.parse(r'$'+md['colorbar_unit']+r'$')
-                except ParseFatalException, err:
-                    raise YTCannotParseUnitDisplayName(f, md['colorbar_unit'],str(err))
-                label = field_name+r'$\/\/('+md['colorbar_unit']+r')$'
-
-            self.plots[f].cb.set_label(label,fontsize=self.fontsize)
+                raise YTCannotParseUnitDisplayName(f, colorbar_label, str(err))
+                
+            self.plots[f].cb.set_label(colorbar_label, fontsize=self.fontsize)
 
             self.plots[f].cb.ax.tick_params(labelsize=self.fontsize)
 
@@ -1468,51 +1463,19 @@
         else:
             self._field_transform[field] = linear_transform
 
-class PlotMPL(object):
-    """A base class for all yt plots made using matplotlib.
-
-    """
-    datalabel = None
-    figure = None
-    def __init__(self, field, size):
-        self._plot_valid = True
+class WindowPlotMPL(ImagePlotMPL):
+    def __init__(self, data, cbname, cmap, extent, aspect, zlim, size):
         fsize, axrect, caxrect = self._get_best_layout(size)
-        
         if np.any(np.array(axrect) < 0):
-            self.figure = matplotlib.figure.Figure(figsize = size, 
-                                                   frameon = True)
-            self.axes = self.figure.add_axes((.07,.10,.8,.8))
-            self.cax = self.figure.add_axes((.87,.10,.04,.8))
             mylog.warning('The axis ratio of the requested plot is very narrow.  '
                           'There is a good chance the plot will not look very good, '
                           'consider making the plot manually using FixedResolutionBuffer '
                           'and matplotlib.')
-        else:
-            self.figure = matplotlib.figure.Figure(figsize = fsize, 
-                                                   frameon = True)
-            self.axes = self.figure.add_axes(axrect)
-            self.cax = self.figure.add_axes(caxrect)
-            
-    def save(self, name, mpl_kwargs, canvas = None):
-        suffix = get_image_suffix(name)
-        
-        if suffix == '':
-            suffix = '.png'
-            name = "%s%s" % (name, suffix)
-        mylog.info("Saving plot %s", name)
-        if suffix == ".png":
-            canvas = FigureCanvasAgg(self.figure)
-        elif suffix == ".pdf":
-            canvas = FigureCanvasPdf(self.figure)
-        elif suffix in (".eps", ".ps"):
-            canvas = FigureCanvasPS(self.figure)
-        else:
-            mylog.warning("Unknown suffix %s, defaulting to Agg", suffix)
-            canvas = FigureCanvasAgg(self.figure)
-
-
-        canvas.print_figure(name,**mpl_kwargs)
-        return name
+            axrect  = (0.07, 0.10, 0.80, 0.80)
+            caxrect = (0.87, 0.10, 0.04, 0.80)
+        ImagePlotMPL.__init__(self, fsize, axrect, caxrect, zlim)
+        self._init_image(data, cbname, cmap, extent, aspect)
+        self.image.axes.ticklabel_format(scilimits=(-2,3))
 
     def _get_best_layout(self, size):
         aspect = 1.0*size[0]/size[1]
@@ -1551,26 +1514,3 @@
         axrect = (text_buffx, text_bottomy, xfrac, yfrac )
         caxrect = (text_buffx+xfrac, text_bottomy, cbar_frac/4., yfrac )
         return newsize, axrect, caxrect
-
-    def _repr_png_(self):
-        canvas = FigureCanvasAgg(self.figure)
-        f = cStringIO.StringIO()
-        canvas.print_figure(f)
-        f.seek(0)
-        return f.read()
-
-class WindowPlotMPL(PlotMPL):
-    def __init__(self, data, extent, aspect, field_transform, cmap, size, zlim):
-        self.zmin, self.zmax = zlim
-        PlotMPL.__init__(self, data, size)
-        self.__init_image(data, extent, aspect, field_transform, cmap)
-
-    def __init_image(self, data, extent, aspect, field_transform, cmap):
-        if (field_transform.name == 'log10'):
-            norm = matplotlib.colors.LogNorm()
-        elif (field_transform.name == 'linear'):
-            norm = matplotlib.colors.Normalize()
-        self.image = self.axes.imshow(data, origin='lower', extent=extent,
-                                      norm=norm, vmin=self.zmin, aspect=aspect, 
-                                      vmax=self.zmax, cmap=cmap)
-        self.image.axes.ticklabel_format(scilimits=(-2,3))

diff -r 18607c558f38ff5486530de13fe216c07426cc30 -r bea5473873e824088584621372b1f3a6a70dbdc0 yt/visualization/tests/test_plotwindow.py
--- a/yt/visualization/tests/test_plotwindow.py
+++ b/yt/visualization/tests/test_plotwindow.py
@@ -1,28 +1,97 @@
-from yt.testing import *
-from yt.mods import SlicePlot, ProjectionPlot, \
-    OffAxisSlicePlot, OffAxisProjectionPlot
+"""
+Testsuite for PlotWindow class
+
+Author: Nathan Goldbaum <goldbaum at ucolick.org>
+Affiliation: UCSC Astronomy
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Nathan Goldbaum.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
 import os
+import sys
+import tempfile
+import shutil
+from yt.testing import \
+    fake_random_pf
+from yt.mods import \
+    SlicePlot, ProjectionPlot, OffAxisSlicePlot, OffAxisProjectionPlot
+
+
+EXT_TO_TYPE = {
+    '.ps': 'PostScript document text conforming DSC level 3.0',
+    '.eps': 'PostScript document text conforming DSC level 3.0, type EPS',
+    '.pdf': 'PDF document, version 1.4',
+    '.png': 'PNG image data, 1070 x 1000, 8-bit/color RGBA, non-interlaced'
+}
+
 
 def setup():
+    """Test specific setup."""
     from yt.config import ytcfg
-    ytcfg["yt","__withintesting"] = "True"
+    ytcfg["yt", "__withintesting"] = "True"
 
-def teardown_func(fns):
-    for fn in fns:
-        os.remove(fn)
+
+def assert_fname(fname):
+    """Function that checks file type using libmagic"""
+    if fname is None:
+        return
+
+    try:
+        import magic
+    except ImportError:
+        # OS X doesn't come with libmagic
+        pass
+
+    if 'magic' in sys.modules:
+        ext = os.path.splitext(fname)[1]
+        mds = magic.open(magic.MAGIC_NONE)
+        mds.load()
+        magic_text = mds.file(fname)
+        mds.close()
+        assert magic_text == EXT_TO_TYPE[ext]
+
 
 def test_plotwindow():
-    pf = fake_random_pf(64)
-    fns = []
-    for dim in [0,1,2]:
-        slc = SlicePlot(pf, dim, 'Density')
-        fns.append(slc.save()[0])
-        prj = ProjectionPlot(pf, dim, 'Density')
-        fns.append(prj.save()[0])
-    normal = [1,1,1]
-    oaslc = OffAxisSlicePlot(pf, normal, 'Density')
-    fns.append(oaslc.save()[0])
-    oaprj = OffAxisProjectionPlot(pf, normal, 'Density')
-    fns.append(oaprj.save()[0])
-    teardown_func(fns)
-    
+    """Main test suite for PlotWindow."""
+    # Perform I/O in safe place instead of yt main dir
+    tmpdir = tempfile.mkdtemp()
+    curdir = os.getcwd()
+    os.chdir(tmpdir)
+
+    normal = [1, 1, 1]
+
+    test_pf = fake_random_pf(64)
+    test_flnms = [None, 'test.png', 'test.eps',
+                  'test.ps', 'test.pdf']
+    for fname in test_flnms:
+        for dim in [0, 1, 2]:
+            obj = SlicePlot(test_pf, dim, 'Density')
+            assert_fname(obj.save(fname)[0])
+
+            obj = ProjectionPlot(test_pf, dim, 'Density')
+            assert_fname(obj.save(fname)[0])
+
+        obj = OffAxisSlicePlot(test_pf, normal, 'Density')
+        assert_fname(obj.save(fname)[0])
+
+        obj = OffAxisProjectionPlot(test_pf, normal, 'Density')
+        assert_fname(obj.save(fname)[0])
+
+    os.chdir(curdir)
+    # clean up
+    shutil.rmtree(tmpdir)


https://bitbucket.org/yt_analysis/yt-3.0/commits/b90af0249db1/
Changeset:   b90af0249db1
Branch:      yt
User:        jzuhone
Date:        2013-01-12 16:28:32
Summary:     Merging
Affected #:  5 files

diff -r 18607c558f38ff5486530de13fe216c07426cc30 -r b90af0249db1f1fef85913ba2f58719ac8ff88f0 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -399,6 +399,14 @@
     ( ${SHASUM} -c $1.sha512 2>&1 ) 1>> ${LOG_FILE} || do_exit
 }
 
+function get_ytdata
+{
+    echo "Downloading $1 from yt-project.org"
+    [ -e $1 ] && return
+    ${GETFILE} "http://yt-project.org/data/$1" || do_exit
+    ( ${SHASUM} -c $1.sha512 2>&1 ) 1>> ${LOG_FILE} || do_exit
+}
+
 ORIG_PWD=`pwd`
 
 if [ -z "${DEST_DIR}" ]
@@ -407,6 +415,13 @@
     exit 1
 fi
 
+# Get supplemental data.
+
+mkdir -p ${DEST_DIR}/data
+cd ${DEST_DIR}/data
+echo 'de6d8c6ea849f0206d219303329a0276b3cce7c051eec34377d42aacbe0a4f47ac5145eb08966a338ecddd2b83c8f787ca9956508ad5c39ee2088ad875166410  xray_emissivity.h5' > xray_emissivity.h5.sha512
+get_ytdata xray_emissivity.h5
+
 mkdir -p ${DEST_DIR}/src
 cd ${DEST_DIR}/src
 

diff -r 18607c558f38ff5486530de13fe216c07426cc30 -r b90af0249db1f1fef85913ba2f58719ac8ff88f0 yt/analysis_modules/spectral_integrator/api.py
--- a/yt/analysis_modules/spectral_integrator/api.py
+++ b/yt/analysis_modules/spectral_integrator/api.py
@@ -30,4 +30,8 @@
 
 from .spectral_frequency_integrator import \
     SpectralFrequencyIntegrator, \
-    create_table_from_textfiles
+    create_table_from_textfiles, \
+    EmissivityIntegrator, \
+    add_xray_emissivity_field, \
+    add_xray_luminosity_field, \
+    add_xray_photon_emissivity_field

diff -r 18607c558f38ff5486530de13fe216c07426cc30 -r b90af0249db1f1fef85913ba2f58719ac8ff88f0 yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
--- a/yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
+++ b/yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
@@ -4,9 +4,11 @@
 
 Author: Matthew Turk <matthewturk at gmail.com>
 Affiliation: KIPAC/SLAC/Stanford
+Author: Britton Smith <brittons at origins.colorado.edu>
+Affiliation: Michigan State University
 Homepage: http://yt-project.org/
 License:
-  Copyright (C) 2007-2011 Matthew Turk.  All Rights Reserved.
+  Copyright (C) 2007-2012 Matthew Turk.  All Rights Reserved.
 
   This file is part of yt.
 
@@ -24,16 +26,20 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
+from exceptions import IOError
+import h5py
 import numpy as np
+import os
 
 from yt.funcs import *
 
 from yt.data_objects.field_info_container import add_field
+from yt.utilities.exceptions import YTException
 from yt.utilities.linear_interpolators import \
-    UnilinearFieldInterpolator, \
-    BilinearFieldInterpolator, \
-    TrilinearFieldInterpolator
+    BilinearFieldInterpolator
 
+xray_data_version = 1
+    
 class SpectralFrequencyIntegrator(object):
     def __init__(self, table, field_names,
                  bounds, ev_bounds):
@@ -80,8 +86,8 @@
             return 10**interp(dd)
         add_field(name, function=frequency_bin_field,
                         projection_conversion="cm",
-                        units=r"\rm{ergs}\/\rm{cm}^{-3}\/\rm{s}^{-1}",
-                        projected_units=r"\rm{ergs}\/\rm{cm}^{-2}\/\rm{s}^{-1}")
+                        units=r"\rm{ergs}\ \rm{cm}^{-3}\ \rm{s}^{-1}",
+                        projected_units=r"\rm{ergs}\ \rm{cm}^{-2}\ \rm{s}^{-1}")
         return name
 
 def create_table_from_textfiles(pattern, rho_spec, e_spec, T_spec):
@@ -100,3 +106,304 @@
         table[ri,:,ei] = [float(l.split()[-1]) for l in open(pattern%(i+1)) if l[0] != "#"]
     return table
 
+class EnergyBoundsException(YTException):
+    def __init__(self, lower, upper):
+        self.lower = lower
+        self.upper = upper
+
+    def __str__(self):
+        return "Energy bounds are %e to %e keV." % \
+          (self.lower, self.upper)
+
+class ObsoleteDataException(YTException):
+    def __str__(self):
+        return "X-ray emissivity data is out of data.\nDownload the latest data from http://yt-project.org/data/xray_emissivity.h5 and move it to %s." % \
+          os.path.join(os.environ["YT_DEST"], "data", "xray_emissivity.h5")
+          
+class EmissivityIntegrator(object):
+    r"""Class for making X-ray emissivity fields with hdf5 data tables 
+    from Cloudy.
+    """
+    def __init__(self, filename=None):
+        r"""Initialize an EmissivityIntegrator object.
+
+        Keyword Parameters
+        ------------------
+        filename: string
+            Path to data file containing emissivity values.  If None,
+            a file called xray_emissivity.h5 is used.  This file contains 
+            emissivity tables for primordial elements and for metals at 
+            solar metallicity for the energy range 0.1 to 100 keV.
+            Default: None.
+            
+        """
+
+        default_filename = False
+        if filename is None:
+            filename = os.path.join(os.environ["YT_DEST"], 
+                                    "data", "xray_emissivity.h5")
+            default_filename = True
+
+        if not os.path.exists(filename):
+            raise IOError("File does not exist: %s." % filename)
+        only_on_root(mylog.info, "Loading emissivity data from %s." % filename)
+        in_file = h5py.File(filename, "r")
+        if "info" in in_file.attrs:
+            only_on_root(mylog.info, in_file.attrs["info"])
+        if default_filename and \
+          in_file.attrs["version"] < xray_data_version:
+            raise ObsoleteDataException()
+        else:
+            only_on_root(mylog.info, "X-ray emissivity data version: %s." % \
+                         in_file.attrs["version"])
+
+        for field in ["emissivity_primordial", "emissivity_metals",
+                      "log_nH", "log_T", "log_E"]:
+            setattr(self, field, in_file[field][:])
+        in_file.close()
+
+        E_diff = np.diff(self.log_E)
+        self.E_bins = \
+                  np.power(10, np.concatenate([self.log_E[:-1] - 0.5 * E_diff,
+                                               [self.log_E[-1] - 0.5 * E_diff[-1],
+                                                self.log_E[-1] + 0.5 * E_diff[-1]]]))
+        self.dnu = 2.41799e17 * np.diff(self.E_bins)
+
+    def _get_interpolator(self, data, e_min, e_max):
+        r"""Create an interpolator for total emissivity in a 
+        given energy range.
+
+        Parameters
+        ----------
+        e_min: float
+            the minimum energy in keV for the energy band.
+        e_min: float
+            the maximum energy in keV for the energy band.
+
+        """
+        if (e_min - self.E_bins[0]) / e_min < -1e-3 or \
+          (e_max - self.E_bins[-1]) / e_max > 1e-3:
+            raise EnergyBoundsException(np.power(10, self.E_bins[0]),
+                                        np.power(10, self.E_bins[-1]))
+        e_is, e_ie = np.digitize([e_min, e_max], self.E_bins)
+        e_is = np.clip(e_is - 1, 0, self.E_bins.size - 1)
+        e_ie = np.clip(e_ie, 0, self.E_bins.size - 1)
+
+        my_dnu = np.copy(self.dnu[e_is: e_ie])
+        # clip edge bins if the requested range is smaller
+        my_dnu[0] -= e_min - self.E_bins[e_is]
+        my_dnu[-1] -= self.E_bins[e_ie] - e_max
+
+        interp_data = (data[..., e_is:e_ie] * my_dnu).sum(axis=-1)
+        return BilinearFieldInterpolator(np.log10(interp_data),
+                                         [self.log_nH[0], self.log_nH[-1],
+                                          self.log_T[0],  self.log_T[-1]],
+                                         ["log_nH", "log_T"], truncate=True)
+
+def add_xray_emissivity_field(e_min, e_max, filename=None,
+                              with_metals=True,
+                              constant_metallicity=None):
+    r"""Create an X-ray emissivity field for a given energy range.
+
+    Parameters
+    ----------
+    e_min: float
+        the minimum energy in keV for the energy band.
+    e_min: float
+        the maximum energy in keV for the energy band.
+
+    Keyword Parameters
+    ------------------
+    filename: string
+        Path to data file containing emissivity values.  If None,
+        a file called xray_emissivity.h5 is used.  This file contains 
+        emissivity tables for primordial elements and for metals at 
+        solar metallicity for the energy range 0.1 to 100 keV.
+        Default: None.
+    with_metals: bool
+        If True, use the metallicity field to add the contribution from 
+        metals.  If False, only the emission from H/He is considered.
+        Default: True.
+    constant_metallicity: float
+        If specified, assume a constant metallicity for the emission 
+        from metals.  The *with_metals* keyword must be set to False 
+        to use this.
+        Default: None.
+
+    This will create a field named "Xray_Emissivity_{e_min}_{e_max}keV".
+    The units of the field are erg s^-1 cm^-3.
+
+    Examples
+    --------
+
+    >>> from yt.mods import *
+    >>> from yt.analysis_modules.spectral_integrator.api import *
+    >>> add_xray_emissivity_field(0.5, 2)
+    >>> pf = load(dataset)
+    >>> p = ProjectionPlot(pf, 'x', "Xray_Emissivity_0.5_2keV")
+    >>> p.save()
+
+    """
+
+    my_si = EmissivityIntegrator(filename=filename)
+
+    em_0 = my_si._get_interpolator(my_si.emissivity_primordial, e_min, e_max)
+    em_Z = None
+    if with_metals or constant_metallicity is not None:
+        em_Z = my_si._get_interpolator(my_si.emissivity_metals, e_min, e_max)
+
+    def _emissivity_field(field, data):
+        dd = {"log_nH" : np.log10(data["H_NumberDensity"]),
+              "log_T"   : np.log10(data["Temperature"])}
+
+        my_emissivity = np.power(10, em_0(dd))
+        if em_Z is not None:
+            if with_metals:
+                my_Z = data["Metallicity"]
+            elif constant_metallicity is not None:
+                my_Z = constant_metallicity
+            my_emissivity += my_Z * np.power(10, em_Z(dd))
+
+        return data["H_NumberDensity"]**2 * my_emissivity
+
+    field_name = "Xray_Emissivity_%s_%skeV" % (e_min, e_max)
+    add_field(field_name, function=_emissivity_field,
+              projection_conversion="cm",
+              display_name=r"\epsilon_{X}\ (%s-%s\ keV)" % (e_min, e_max),
+              units=r"\rm{erg}\ \rm{cm}^{-3}\ \rm{s}^{-1}",
+              projected_units=r"\rm{erg}\ \rm{cm}^{-2}\ \rm{s}^{-1}")
+    return field_name
+
+def add_xray_luminosity_field(e_min, e_max, filename=None,
+                              with_metals=True,
+                              constant_metallicity=None):
+    r"""Create an X-ray luminosity field for a given energy range.
+
+    Parameters
+    ----------
+    e_min: float
+        the minimum energy in keV for the energy band.
+    e_min: float
+        the maximum energy in keV for the energy band.
+
+    Keyword Parameters
+    ------------------
+    filename: string
+        Path to data file containing emissivity values.  If None,
+        a file called xray_emissivity.h5 is used.  This file contains 
+        emissivity tables for primordial elements and for metals at 
+        solar metallicity for the energy range 0.1 to 100 keV.
+        Default: None.
+    with_metals: bool
+        If True, use the metallicity field to add the contribution from 
+        metals.  If False, only the emission from H/He is considered.
+        Default: True.
+    constant_metallicity: float
+        If specified, assume a constant metallicity for the emission 
+        from metals.  The *with_metals* keyword must be set to False 
+        to use this.
+        Default: None.
+
+    This will create a field named "Xray_Luminosity_{e_min}_{e_max}keV".
+    The units of the field are erg s^-1.
+
+    Examples
+    --------
+
+    >>> from yt.mods import *
+    >>> from yt.analysis_modules.spectral_integrator.api import *
+    >>> add_xray_luminosity_field(0.5, 2)
+    >>> pf = load(dataset)
+    >>> sp = pf.h.sphere('max', (2., 'mpc'))
+    >>> print sp.quantities['TotalQuantity']('Xray_Luminosity_0.5_2keV')
+    
+    """
+
+    em_field = add_xray_emissivity_field(e_min, e_max, filename=filename,
+                                         with_metals=with_metals,
+                                         constant_metallicity=constant_metallicity)
+
+    def _luminosity_field(field, data):
+        return data[em_field] * data["CellVolume"]
+    field_name = "Xray_Luminosity_%s_%skeV" % (e_min, e_max)
+    add_field(field_name, function=_luminosity_field,
+              display_name=r"\rm{L}_{X}\ (%s-%s\ keV)" % (e_min, e_max),
+              units=r"\rm{erg}\ \rm{s}^{-1}")
+    return field_name
+
+def add_xray_photon_emissivity_field(e_min, e_max, filename=None,
+                                     with_metals=True,
+                                     constant_metallicity=None):
+    r"""Create an X-ray photon emissivity field for a given energy range.
+
+    Parameters
+    ----------
+    e_min: float
+        the minimum energy in keV for the energy band.
+    e_min: float
+        the maximum energy in keV for the energy band.
+
+    Keyword Parameters
+    ------------------
+    filename: string
+        Path to data file containing emissivity values.  If None,
+        a file called xray_emissivity.h5 is used.  This file contains 
+        emissivity tables for primordial elements and for metals at 
+        solar metallicity for the energy range 0.1 to 100 keV.
+        Default: None.
+    with_metals: bool
+        If True, use the metallicity field to add the contribution from 
+        metals.  If False, only the emission from H/He is considered.
+        Default: True.
+    constant_metallicity: float
+        If specified, assume a constant metallicity for the emission 
+        from metals.  The *with_metals* keyword must be set to False 
+        to use this.
+        Default: None.
+
+    This will create a field named "Xray_Photon_Emissivity_{e_min}_{e_max}keV".
+    The units of the field are photons s^-1 cm^-3.
+
+    Examples
+    --------
+
+    >>> from yt.mods import *
+    >>> from yt.analysis_modules.spectral_integrator.api import *
+    >>> add_xray_emissivity_field(0.5, 2)
+    >>> pf = load(dataset)
+    >>> p = ProjectionPlot(pf, 'x', "Xray_Emissivity_0.5_2keV")
+    >>> p.save()
+
+    """
+
+    my_si = EmissivityIntegrator(filename=filename)
+    energy_erg = np.power(10, my_si.log_E) * 1.60217646e-9
+
+    em_0 = my_si._get_interpolator((my_si.emissivity_primordial[..., :] / energy_erg),
+                                   e_min, e_max)
+    em_Z = None
+    if with_metals or constant_metallicity is not None:
+        em_Z = my_si._get_interpolator((my_si.emissivity_metals[..., :] / energy_erg),
+                                       e_min, e_max)
+
+    def _emissivity_field(field, data):
+        dd = {"log_nH" : np.log10(data["H_NumberDensity"]),
+              "log_T"   : np.log10(data["Temperature"])}
+
+        my_emissivity = np.power(10, em_0(dd))
+        if em_Z is not None:
+            if with_metals:
+                my_Z = data["Metallicity"]
+            elif constant_metallicity is not None:
+                my_Z = constant_metallicity
+            my_emissivity += my_Z * np.power(10, em_Z(dd))
+
+        return data["H_NumberDensity"]**2 * my_emissivity
+
+    field_name = "Xray_Photon_Emissivity_%s_%skeV" % (e_min, e_max)
+    add_field(field_name, function=_emissivity_field,
+              projection_conversion="cm",
+              display_name=r"\epsilon_{X}\ (%s-%s\ keV)" % (e_min, e_max),
+              units=r"\rm{photons}\ \rm{cm}^{-3}\ \rm{s}^{-1}",
+              projected_units=r"\rm{photons}\ \rm{cm}^{-2}\ \rm{s}^{-1}")
+    return field_name

diff -r 18607c558f38ff5486530de13fe216c07426cc30 -r b90af0249db1f1fef85913ba2f58719ac8ff88f0 yt/data_objects/time_series.py
--- a/yt/data_objects/time_series.py
+++ b/yt/data_objects/time_series.py
@@ -258,9 +258,12 @@
         ...     SlicePlot(pf, "x", "Density").save()
 
         """
+        
         if isinstance(filenames, types.StringTypes):
             filenames = glob.glob(filenames)
             filenames.sort()
+        if len(filenames) == 0:
+            raise YTOutputNotIdentified(filenames, {})
         obj = cls(filenames[:], parallel = parallel, **kwargs)
         return obj
 

diff -r 18607c558f38ff5486530de13fe216c07426cc30 -r b90af0249db1f1fef85913ba2f58719ac8ff88f0 yt/frontends/flash/fields.py
--- a/yt/frontends/flash/fields.py
+++ b/yt/frontends/flash/fields.py
@@ -233,7 +233,7 @@
         mu = data.get_field_parameter("mu")
     else:
         mu = 0.6
-    return kboltz*data["Density"]*data["Temperature"]/(mu*mh) / (data.pf["Gamma"] - 1.0)
+    return kboltz*data["NumberDensity"]*data["Temperature"] / (data.pf["Gamma"] - 1.0)
     
 add_field("ThermalEnergy", function=_ThermalEnergy,
           units=r"\rm{ergs}/\rm{g}")
@@ -299,4 +299,34 @@
 add_field("DivB", function=_DivB, take_log=False,
           units=r"\rm{Gauss}\/\rm{cm}^{-1}")
 
+def _ConvertNumberDensity(data):
+    return 1.0/mh
 
+def _NumberDensity(field, data):
+    field_data = np.zeros(data["Density"].shape,
+                         dtype = data["Density"].dtype)
+    try:
+        mu = data.pf.parameters["eos_singleSpeciesA"]
+    except:
+        if data.has_field_parameter("mu"):
+            mu = data.get_field_parameter("mu")
+        else:
+            mu = 0.6
+        field_data += data["Density"] / mu
+    return field_data
+add_field("NumberDensity", units=r"\rm{cm}^{-3}",
+          function=_NumberDensity,
+          convert_function=_ConvertNumberDensity)
+
+def _H_NumberDensity(field, data):
+    field_data = np.zeros(data["Density"].shape,
+                          dtype=data["Density"].dtype)
+    if data.has_field_parameter("muh"):
+        muh = data.get_field_parameter("muh")
+    else:
+        muh = 0.75
+    field_data += data["Density"] / muh
+    return field_data
+add_field("H_NumberDensity", units=r"\rm{cm}^{-3}",
+          function=_H_NumberDensity,
+          convert_function=_ConvertNumberDensity)


https://bitbucket.org/yt_analysis/yt-3.0/commits/4190004c06a0/
Changeset:   4190004c06a0
Branch:      yt
User:        jzuhone
Date:        2013-01-12 16:31:39
Summary:     Merging
Affected #:  37 files

diff -r b90af0249db1f1fef85913ba2f58719ac8ff88f0 -r 4190004c06a0ff83bd5805438875ccbb0c5208fe .hgchurn
--- /dev/null
+++ b/.hgchurn
@@ -0,0 +1,11 @@
+stephenskory at yahoo.com = s at skory.us
+"Stephen Skory stephenskory at yahoo.com" = s at skory.us
+yuan at astro.columbia.edu = bear0980 at gmail.com
+juxtaposicion at gmail.com = cemoody at ucsc.edu
+chummels at gmail.com = chummels at astro.columbia.edu
+jwise at astro.princeton.edu = jwise at physics.gatech.edu
+atmyers = atmyers at berkeley.edu
+sam.skillman at gmail.com = samskillman at gmail.com
+casey at thestarkeffect.com = caseywstark at gmail.com
+chiffre = chiffre at posteo.de
+Christian Karch = chiffre at posteo.de

diff -r b90af0249db1f1fef85913ba2f58719ac8ff88f0 -r 4190004c06a0ff83bd5805438875ccbb0c5208fe .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -4,6 +4,7 @@
 freetype.cfg
 hdf5.cfg
 png.cfg
+yt_updater.log
 yt/frontends/ramses/_ramses_reader.cpp
 yt/utilities/amr_utils.c
 yt/utilities/kdtree/forthonf2c.h
@@ -26,6 +27,7 @@
 yt/utilities/lib/RayIntegrators.c
 yt/utilities/lib/VolumeIntegrator.c
 yt/utilities/lib/grid_traversal.c
+yt/utilities/lib/GridTree.c
 yt/utilities/lib/marching_cubes.c
 yt/utilities/lib/png_writer.h
 syntax: glob

diff -r b90af0249db1f1fef85913ba2f58719ac8ff88f0 -r 4190004c06a0ff83bd5805438875ccbb0c5208fe distribute_setup.py
--- a/distribute_setup.py
+++ b/distribute_setup.py
@@ -14,11 +14,14 @@
 This file can also be run as a script to install or upgrade setuptools.
 """
 import os
+import shutil
 import sys
 import time
 import fnmatch
 import tempfile
 import tarfile
+import optparse
+
 from distutils import log
 
 try:
@@ -46,7 +49,7 @@
             args = [quote(arg) for arg in args]
         return os.spawnl(os.P_WAIT, sys.executable, *args) == 0
 
-DEFAULT_VERSION = "0.6.21"
+DEFAULT_VERSION = "0.6.32"
 DEFAULT_URL = "http://pypi.python.org/packages/source/d/distribute/"
 SETUPTOOLS_FAKED_VERSION = "0.6c11"
 
@@ -63,7 +66,7 @@
 """ % SETUPTOOLS_FAKED_VERSION
 
 
-def _install(tarball):
+def _install(tarball, install_args=()):
     # extracting the tarball
     tmpdir = tempfile.mkdtemp()
     log.warn('Extracting in %s', tmpdir)
@@ -81,11 +84,14 @@
 
         # installing
         log.warn('Installing Distribute')
-        if not _python_cmd('setup.py', 'install'):
+        if not _python_cmd('setup.py', 'install', *install_args):
             log.warn('Something went wrong during the installation.')
             log.warn('See the error message above.')
+            # exitcode will be 2
+            return 2
     finally:
         os.chdir(old_wd)
+        shutil.rmtree(tmpdir)
 
 
 def _build_egg(egg, tarball, to_dir):
@@ -110,6 +116,7 @@
 
     finally:
         os.chdir(old_wd)
+        shutil.rmtree(tmpdir)
     # returning the result
     log.warn(egg)
     if not os.path.exists(egg):
@@ -144,7 +151,7 @@
         except ImportError:
             return _do_download(version, download_base, to_dir, download_delay)
         try:
-            pkg_resources.require("distribute>="+version)
+            pkg_resources.require("distribute>=" + version)
             return
         except pkg_resources.VersionConflict:
             e = sys.exc_info()[1]
@@ -167,6 +174,7 @@
         if not no_fake:
             _create_fake_setuptools_pkg_info(to_dir)
 
+
 def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
                         to_dir=os.curdir, delay=15):
     """Download distribute from a specified location and return its filename
@@ -203,6 +211,7 @@
                 dst.close()
     return os.path.realpath(saveto)
 
+
 def _no_sandbox(function):
     def __no_sandbox(*args, **kw):
         try:
@@ -227,6 +236,7 @@
 
     return __no_sandbox
 
+
 def _patch_file(path, content):
     """Will backup the file then patch it"""
     existing_content = open(path).read()
@@ -245,15 +255,18 @@
 
 _patch_file = _no_sandbox(_patch_file)
 
+
 def _same_content(path, content):
     return open(path).read() == content
 
+
 def _rename_path(path):
     new_name = path + '.OLD.%s' % time.time()
-    log.warn('Renaming %s into %s', path, new_name)
+    log.warn('Renaming %s to %s', path, new_name)
     os.rename(path, new_name)
     return new_name
 
+
 def _remove_flat_installation(placeholder):
     if not os.path.isdir(placeholder):
         log.warn('Unkown installation at %s', placeholder)
@@ -267,7 +280,7 @@
         log.warn('Could not locate setuptools*.egg-info')
         return
 
-    log.warn('Removing elements out of the way...')
+    log.warn('Moving elements out of the way...')
     pkg_info = os.path.join(placeholder, file)
     if os.path.isdir(pkg_info):
         patched = _patch_egg_dir(pkg_info)
@@ -289,11 +302,13 @@
 
 _remove_flat_installation = _no_sandbox(_remove_flat_installation)
 
+
 def _after_install(dist):
     log.warn('After install bootstrap.')
     placeholder = dist.get_command_obj('install').install_purelib
     _create_fake_setuptools_pkg_info(placeholder)
 
+
 def _create_fake_setuptools_pkg_info(placeholder):
     if not placeholder or not os.path.exists(placeholder):
         log.warn('Could not find the install location')
@@ -307,7 +322,11 @@
         return
 
     log.warn('Creating %s', pkg_info)
-    f = open(pkg_info, 'w')
+    try:
+        f = open(pkg_info, 'w')
+    except EnvironmentError:
+        log.warn("Don't have permissions to write %s, skipping", pkg_info)
+        return
     try:
         f.write(SETUPTOOLS_PKG_INFO)
     finally:
@@ -321,7 +340,10 @@
     finally:
         f.close()
 
-_create_fake_setuptools_pkg_info = _no_sandbox(_create_fake_setuptools_pkg_info)
+_create_fake_setuptools_pkg_info = _no_sandbox(
+    _create_fake_setuptools_pkg_info
+)
+
 
 def _patch_egg_dir(path):
     # let's check if it's already patched
@@ -343,6 +365,7 @@
 
 _patch_egg_dir = _no_sandbox(_patch_egg_dir)
 
+
 def _before_install():
     log.warn('Before install bootstrap.')
     _fake_setuptools()
@@ -351,7 +374,7 @@
 def _under_prefix(location):
     if 'install' not in sys.argv:
         return True
-    args = sys.argv[sys.argv.index('install')+1:]
+    args = sys.argv[sys.argv.index('install') + 1:]
     for index, arg in enumerate(args):
         for option in ('--root', '--prefix'):
             if arg.startswith('%s=' % option):
@@ -359,7 +382,7 @@
                 return location.startswith(top_dir)
             elif arg == option:
                 if len(args) > index:
-                    top_dir = args[index+1]
+                    top_dir = args[index + 1]
                     return location.startswith(top_dir)
         if arg == '--user' and USER_SITE is not None:
             return location.startswith(USER_SITE)
@@ -376,11 +399,14 @@
         return
     ws = pkg_resources.working_set
     try:
-        setuptools_dist = ws.find(pkg_resources.Requirement.parse('setuptools',
-                                  replacement=False))
+        setuptools_dist = ws.find(
+            pkg_resources.Requirement.parse('setuptools', replacement=False)
+            )
     except TypeError:
         # old distribute API
-        setuptools_dist = ws.find(pkg_resources.Requirement.parse('setuptools'))
+        setuptools_dist = ws.find(
+            pkg_resources.Requirement.parse('setuptools')
+        )
 
     if setuptools_dist is None:
         log.warn('No setuptools distribution found')
@@ -414,7 +440,7 @@
         res = _patch_egg_dir(setuptools_location)
         if not res:
             return
-    log.warn('Patched done.')
+    log.warn('Patching complete.')
     _relaunch()
 
 
@@ -422,7 +448,9 @@
     log.warn('Relaunching...')
     # we have to relaunch the process
     # pip marker to avoid a relaunch bug
-    if sys.argv[:3] == ['-c', 'install', '--single-version-externally-managed']:
+    _cmd1 = ['-c', 'install', '--single-version-externally-managed']
+    _cmd2 = ['-c', 'install', '--record']
+    if sys.argv[:3] == _cmd1 or sys.argv[:3] == _cmd2:
         sys.argv[0] = 'setup.py'
     args = [sys.executable] + sys.argv
     sys.exit(subprocess.call(args))
@@ -448,7 +476,7 @@
             # Extract directories with a safe mode.
             directories.append(tarinfo)
             tarinfo = copy.copy(tarinfo)
-            tarinfo.mode = 448 # decimal for oct 0700
+            tarinfo.mode = 448  # decimal for oct 0700
         self.extract(tarinfo, path)
 
     # Reverse sort directories.
@@ -475,11 +503,39 @@
                 self._dbg(1, "tarfile: %s" % e)
 
 
-def main(argv, version=DEFAULT_VERSION):
+def _build_install_args(options):
+    """
+    Build the arguments to 'python setup.py install' on the distribute package
+    """
+    install_args = []
+    if options.user_install:
+        if sys.version_info < (2, 6):
+            log.warn("--user requires Python 2.6 or later")
+            raise SystemExit(1)
+        install_args.append('--user')
+    return install_args
+
+def _parse_args():
+    """
+    Parse the command line for options
+    """
+    parser = optparse.OptionParser()
+    parser.add_option(
+        '--user', dest='user_install', action='store_true', default=False,
+        help='install in user site package (requires Python 2.6 or later)')
+    parser.add_option(
+        '--download-base', dest='download_base', metavar="URL",
+        default=DEFAULT_URL,
+        help='alternative URL from where to download the distribute package')
+    options, args = parser.parse_args()
+    # positional arguments are ignored
+    return options
+
+def main(version=DEFAULT_VERSION):
     """Install or upgrade setuptools and EasyInstall"""
-    tarball = download_setuptools()
-    _install(tarball)
-
+    options = _parse_args()
+    tarball = download_setuptools(download_base=options.download_base)
+    return _install(tarball, _build_install_args(options))
 
 if __name__ == '__main__':
-    main(sys.argv[1:])
+    sys.exit(main())

diff -r b90af0249db1f1fef85913ba2f58719ac8ff88f0 -r 4190004c06a0ff83bd5805438875ccbb0c5208fe doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -721,7 +721,7 @@
 then
     if [ ! -e Rockstar/done ]
     then
-        [ ! -e Rockstar] && tar xfz rockstar-0.99.6.tar.gz
+        [ ! -e Rockstar ] && tar xfz rockstar-0.99.6.tar.gz
         echo "Building Rockstar"
         cd Rockstar
         ( make lib 2>&1 ) 1>> ${LOG_FILE} || do_exit

diff -r b90af0249db1f1fef85913ba2f58719ac8ff88f0 -r 4190004c06a0ff83bd5805438875ccbb0c5208fe yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
--- a/yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
+++ b/yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
@@ -269,9 +269,9 @@
     field_name = "Xray_Emissivity_%s_%skeV" % (e_min, e_max)
     add_field(field_name, function=_emissivity_field,
               projection_conversion="cm",
-              display_name=r"\epsilon_{X}\ (%s-%s\ keV)" % (e_min, e_max),
-              units=r"\rm{erg}\ \rm{cm}^{-3}\ \rm{s}^{-1}",
-              projected_units=r"\rm{erg}\ \rm{cm}^{-2}\ \rm{s}^{-1}")
+              display_name=r"\epsilon_{X}\/(%s-%s\/keV)" % (e_min, e_max),
+              units=r"\rm{erg}\/\rm{cm}^{-3}\/\rm{s}^{-1}",
+              projected_units=r"\rm{erg}\/\rm{cm}^{-2}\/\rm{s}^{-1}")
     return field_name
 
 def add_xray_luminosity_field(e_min, e_max, filename=None,
@@ -327,8 +327,8 @@
         return data[em_field] * data["CellVolume"]
     field_name = "Xray_Luminosity_%s_%skeV" % (e_min, e_max)
     add_field(field_name, function=_luminosity_field,
-              display_name=r"\rm{L}_{X}\ (%s-%s\ keV)" % (e_min, e_max),
-              units=r"\rm{erg}\ \rm{s}^{-1}")
+              display_name=r"\rm{L}_{X}\/(%s-%s\/keV)" % (e_min, e_max),
+              units=r"\rm{erg}\/\rm{s}^{-1}")
     return field_name
 
 def add_xray_photon_emissivity_field(e_min, e_max, filename=None,
@@ -403,7 +403,7 @@
     field_name = "Xray_Photon_Emissivity_%s_%skeV" % (e_min, e_max)
     add_field(field_name, function=_emissivity_field,
               projection_conversion="cm",
-              display_name=r"\epsilon_{X}\ (%s-%s\ keV)" % (e_min, e_max),
-              units=r"\rm{photons}\ \rm{cm}^{-3}\ \rm{s}^{-1}",
-              projected_units=r"\rm{photons}\ \rm{cm}^{-2}\ \rm{s}^{-1}")
+              display_name=r"\epsilon_{X}\/(%s-%s\/keV)" % (e_min, e_max),
+              units=r"\rm{photons}\/\rm{cm}^{-3}\/\rm{s}^{-1}",
+              projected_units=r"\rm{photons}\/\rm{cm}^{-2}\/\rm{s}^{-1}")
     return field_name

diff -r b90af0249db1f1fef85913ba2f58719ac8ff88f0 -r 4190004c06a0ff83bd5805438875ccbb0c5208fe yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -59,9 +59,10 @@
     hub_url = 'https://hub.yt-project.org/upload',
     hub_api_key = '',
     ipython_notebook = 'False',
+    notebook_password = '',
     answer_testing_tolerance = '3',
     answer_testing_bitwise = 'False',
-    gold_standard_filename = 'gold004',
+    gold_standard_filename = 'gold005',
     local_standard_filename = 'local001',
     sketchfab_api_key = 'None'
     )

diff -r b90af0249db1f1fef85913ba2f58719ac8ff88f0 -r 4190004c06a0ff83bd5805438875ccbb0c5208fe yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -1781,10 +1781,16 @@
             self._okay_to_serialize = True
 
     def _get_tree(self, nvals):
-        xd = self.pf.domain_dimensions[x_dict[self.axis]]
-        yd = self.pf.domain_dimensions[y_dict[self.axis]]
+        xax = x_dict[self.axis]
+        yax = y_dict[self.axis]
+        xd = self.pf.domain_dimensions[xax]
+        yd = self.pf.domain_dimensions[yax]
+        bounds = (self.pf.domain_left_edge[xax],
+                  self.pf.domain_right_edge[yax],
+                  self.pf.domain_left_edge[xax],
+                  self.pf.domain_right_edge[yax])
         return QuadTree(np.array([xd,yd], dtype='int64'), nvals,
-                        style = self.proj_style)
+                        bounds, style = self.proj_style)
 
     def _get_dls(self, grid, fields):
         # Place holder for a time when maybe we will not be doing just
@@ -2589,10 +2595,14 @@
                self.pf.field_info[field].particle_type and \
                self.pf.h.io._particle_reader and \
                not isinstance(self, AMRBooleanRegionBase):
-                self.particles.get_data(field)
-                if field not in self.field_data:
-                    if self._generate_field(field): continue
-                continue
+                try:
+                    self.particles.get_data(field)
+                    if field not in self.field_data:
+                        self._generate_field(field)
+                    continue
+                except KeyError:
+                    # This happens for fields like ParticleRadiuskpc
+                    pass
             if field not in self.hierarchy.field_list and not in_grids:
                 if self._generate_field(field):
                     continue # True means we already assigned it

diff -r b90af0249db1f1fef85913ba2f58719ac8ff88f0 -r 4190004c06a0ff83bd5805438875ccbb0c5208fe yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -204,6 +204,29 @@
 add_quantity("WeightedAverageQuantity", function=_WeightedAverageQuantity,
              combine_function=_combWeightedAverageQuantity, n_ret = 2)
 
+def _WeightedVariance(data, field, weight):
+    """
+    This function returns the variance of a field.
+
+    :param field: The target field
+    :param weight: The field to weight by
+
+    Returns the weighted variance and the weighted mean.
+    """
+    my_weight = data[weight].sum()
+    if my_weight == 0:
+        return 0.0, 0.0, 0.0
+    my_mean = (data[field] * data[weight]).sum() / my_weight
+    my_var2 = (data[weight] * (data[field] - my_mean)**2).sum() / my_weight
+    return my_weight, my_mean, my_var2
+def _combWeightedVariance(data, my_weight, my_mean, my_var2):
+    all_weight = my_weight.sum()
+    all_mean = (my_weight * my_mean).sum() / all_weight
+    return [np.sqrt((my_weight * (my_var2 + (my_mean - all_mean)**2)).sum() / 
+                    all_weight), all_mean]
+add_quantity("WeightedVariance", function=_WeightedVariance,
+             combine_function=_combWeightedVariance, n_ret=3)
+
 def _BulkVelocity(data):
     """
     This function returns the mass-weighted average velocity in the object.

diff -r b90af0249db1f1fef85913ba2f58719ac8ff88f0 -r 4190004c06a0ff83bd5805438875ccbb0c5208fe yt/data_objects/field_info_container.py
--- a/yt/data_objects/field_info_container.py
+++ b/yt/data_objects/field_info_container.py
@@ -280,7 +280,7 @@
             return np.ones(self.NumberOfParticles)
         return defaultdict.__missing__(self, field_name)
 
-    def get_field_parameter(self, param):
+    def get_field_parameter(self, param, default = None):
         self.requested_parameters.append(param)
         if param in ['bulk_velocity', 'center', 'normal']:
             return np.random.random(3) * 1e-2

diff -r b90af0249db1f1fef85913ba2f58719ac8ff88f0 -r 4190004c06a0ff83bd5805438875ccbb0c5208fe yt/data_objects/object_finding_mixin.py
--- a/yt/data_objects/object_finding_mixin.py
+++ b/yt/data_objects/object_finding_mixin.py
@@ -117,27 +117,32 @@
         """
         Returns the (objects, indices) of leaf grids containing a number of (x,y,z) points
         """
-        num_points = len(x)
+        x = ensure_numpy_array(x)
+        y = ensure_numpy_array(y)
+        z = ensure_numpy_array(z)
+        if not len(x) == len(y) == len(z):
+            raise AssertionError("Arrays of indices must be of the same size")
+
         grid_tree = self.get_grid_tree()
-        pts = MatchPointsToGrids(grid_tree,num_points,x,y,z)
-        ind = pts.find_points_in_tree() 
+        pts = MatchPointsToGrids(grid_tree, len(x), x, y, z)
+        ind = pts.find_points_in_tree()
         return self.grids[ind], ind
-    
+
     def find_field_value_at_point(self, fields, coord):
         r"""Find the value of fields at a point.
-        
+
         Returns the values [field1, field2,...] of the fields at the given
         (x,y,z) point. Returns a list of field values in the same order
         as the input *fields*.
-        
+
         Parameters
         ----------
         fields : string or list of strings
             The field(s) that will be returned.
-        
+
         coord : list or array of floats
             The location for which field values will be returned.
-        
+
         Examples
         --------
         >>> pf.h.find_field_value_at_point(['Density', 'Temperature'],

diff -r b90af0249db1f1fef85913ba2f58719ac8ff88f0 -r 4190004c06a0ff83bd5805438875ccbb0c5208fe yt/data_objects/tests/test_derived_quantities.py
--- a/yt/data_objects/tests/test_derived_quantities.py
+++ b/yt/data_objects/tests/test_derived_quantities.py
@@ -22,3 +22,31 @@
         (mi, ma), = dd.quantities["Extrema"]("RadialVelocity")
         yield assert_equal, mi, np.nanmin(dd["RadialVelocity"])
         yield assert_equal, ma, np.nanmax(dd["RadialVelocity"])
+
+def test_average():
+    for nprocs in [1, 2, 4, 8]:
+        pf = fake_random_pf(16, nprocs = nprocs, fields = ("Density",))
+        ad = pf.h.all_data()
+        
+        my_mean = ad.quantities["WeightedAverageQuantity"]("Density", "Ones")
+        yield assert_rel_equal, my_mean, ad["Density"].mean(), 12
+
+        my_mean = ad.quantities["WeightedAverageQuantity"]("Density", "CellMass")
+        a_mean = (ad["Density"] * ad["CellMass"]).sum() / ad["CellMass"].sum()
+        yield assert_rel_equal, my_mean, a_mean, 12
+
+def test_variance():
+    for nprocs in [1, 2, 4, 8]:
+        pf = fake_random_pf(16, nprocs = nprocs, fields = ("Density", ))
+        ad = pf.h.all_data()
+        
+        my_std, my_mean = ad.quantities["WeightedVariance"]("Density", "Ones")
+        yield assert_rel_equal, my_mean, ad["Density"].mean(), 12
+        yield assert_rel_equal, my_std, ad["Density"].std(), 12
+
+        my_std, my_mean = ad.quantities["WeightedVariance"]("Density", "CellMass")        
+        a_mean = (ad["Density"] * ad["CellMass"]).sum() / ad["CellMass"].sum()
+        yield assert_rel_equal, my_mean, a_mean, 12
+        a_std = np.sqrt((ad["CellMass"] * (ad["Density"] - a_mean)**2).sum() / 
+                        ad["CellMass"].sum())
+        yield assert_rel_equal, my_std, a_std, 12

diff -r b90af0249db1f1fef85913ba2f58719ac8ff88f0 -r 4190004c06a0ff83bd5805438875ccbb0c5208fe yt/data_objects/time_series.py
--- a/yt/data_objects/time_series.py
+++ b/yt/data_objects/time_series.py
@@ -346,10 +346,10 @@
 
         # Figure out the starting and stopping times and redshift.
         self._calculate_simulation_bounds()
-        self.print_key_parameters()
-
         # Get all possible datasets.
         self._get_all_outputs(find_outputs=find_outputs)
+        
+        self.print_key_parameters()
 
     def __repr__(self):
         return self.parameter_filename
@@ -377,3 +377,5 @@
                     continue
                 v = getattr(self, a)
                 mylog.info("Parameters: %-25s = %s", a, v)
+        mylog.info("Total datasets: %d." % len(self.all_outputs))
+

diff -r b90af0249db1f1fef85913ba2f58719ac8ff88f0 -r 4190004c06a0ff83bd5805438875ccbb0c5208fe yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -506,11 +506,11 @@
 def _CellVolume(field, data):
     if data['dx'].size == 1:
         try:
-            return data['dx']*data['dy']*data['dx']*\
+            return data['dx'] * data['dy'] * data['dz'] * \
                 np.ones(data.ActiveDimensions, dtype='float64')
         except AttributeError:
-            return data['dx']*data['dy']*data['dx']
-    return data["dx"]*data["dy"]*data["dz"]
+            return data['dx'] * data['dy'] * data['dz']
+    return data["dx"] * data["dy"] * data["dz"]
 def _ConvertCellVolumeMpc(data):
     return data.convert("mpc")**3.0
 def _ConvertCellVolumeCGS(data):

diff -r b90af0249db1f1fef85913ba2f58719ac8ff88f0 -r 4190004c06a0ff83bd5805438875ccbb0c5208fe yt/frontends/athena/fields.py
--- a/yt/frontends/athena/fields.py
+++ b/yt/frontends/athena/fields.py
@@ -76,13 +76,13 @@
                  units=r"")
 
 add_athena_field("cell_centered_B_x", function=NullFunc, take_log=False,
-                 units=r"", display_name=r"$\rm{cell\ centered\ B_x}$")
+                 units=r"", display_name=r"$\rm{cell\/centered\/B_x}$")
 
 add_athena_field("cell_centered_B_y", function=NullFunc, take_log=False,
-                 units=r"", display_name=r"$\rm{cell\ centered\ B_y}$")
+                 units=r"", display_name=r"$\rm{cell\/centered\/B_y}$")
 
 add_athena_field("cell_centered_B_z", function=NullFunc, take_log=False,
-                 units=r"", display_name=r"$\rm{cell\ centered\ B_z}$")
+                 units=r"", display_name=r"$\rm{cell\/centered\/B_z}$")
 
 for f,v in log_translation_dict.items():
     add_field(f, TranslationFunc(v), take_log=True)

diff -r b90af0249db1f1fef85913ba2f58719ac8ff88f0 -r 4190004c06a0ff83bd5805438875ccbb0c5208fe yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -42,7 +42,7 @@
      pluto2enzoDict, \
      yt2plutoFieldsDict, \
      parameterDict \
-     
+
 from yt.funcs import *
 from yt.data_objects.grid_patch import \
      AMRGridPatch
@@ -74,7 +74,7 @@
         """
         Return the integer starting index for each dimension at the current
         level.
-        
+
         """
         if self.start_index != None:
             return self.start_index
@@ -96,7 +96,7 @@
 class ChomboHierarchy(AMRHierarchy):
 
     grid = ChomboGrid
-    
+
     def __init__(self,pf,data_style='chombo_hdf5'):
         self.domain_left_edge = pf.domain_left_edge
         self.domain_right_edge = pf.domain_right_edge
@@ -106,7 +106,7 @@
         # for now, the hierarchy file is the parameter file!
         self.hierarchy_filename = self.parameter_file.parameter_filename
         self.hierarchy = os.path.abspath(self.hierarchy_filename)
-        self.directory = os.path.dirname(self.hierarchy_filename)
+        self.directory = pf.fullpath
         self._fhandle = h5py.File(self.hierarchy_filename, 'r')
 
         self.float_type = self._fhandle['/level_0']['data:datatype=0'].dtype.name
@@ -127,7 +127,7 @@
                 particle_position_z = float(line.split(' ')[3])
                 coord = [particle_position_x, particle_position_y, particle_position_z]
                 # for each particle, determine which grids contain it
-                # copied from object_finding_mixin.py                                                                                                             
+                # copied from object_finding_mixin.py
                 mask=np.ones(self.num_grids)
                 for i in xrange(len(coord)):
                     np.choose(np.greater(self.grid_left_edge[:,i],coord[i]), (mask,0), mask)
@@ -143,13 +143,10 @@
                     self.grid_particle_count[ind] += 1
                     self.grids[ind].NumberOfParticles += 1
 
-    def _initialize_data_storage(self):
-        pass
-
     def _detect_fields(self):
         ncomp = int(self._fhandle['/'].attrs['num_components'])
         self.field_list = [c[1] for c in self._fhandle['/'].attrs.items()[-ncomp:]]
-    
+
     def _setup_classes(self):
         dd = self._get_data_reader_dict()
         AMRHierarchy._setup_classes(self, dd)
@@ -159,10 +156,10 @@
         self.num_grids = 0
         for lev in self._levels:
             self.num_grids += self._fhandle[lev]['Processors'].len()
-        
+
     def _parse_hierarchy(self):
         f = self._fhandle # shortcut
-        
+
         # this relies on the first Group in the H5 file being
         # 'Chombo_global'
         levels = f.keys()[1:]
@@ -214,7 +211,7 @@
     _hierarchy_class = ChomboHierarchy
     _fieldinfo_fallback = ChomboFieldInfo
     _fieldinfo_known = KnownChomboFields
-    
+
     def __init__(self, filename, data_style='chombo_hdf5',
                  storage_filename = None, ini_filename = None):
         fileh = h5py.File(filename,'r')
@@ -223,7 +220,8 @@
         self.fullplotdir = os.path.abspath(filename)
         StaticOutput.__init__(self,filename,data_style)
         self.storage_filename = storage_filename
-        
+        fileh.close()
+
     def _set_units(self):
         """
         Generates the conversion to various physical _units based on the parameter file
@@ -277,6 +275,7 @@
             self.dimensionality = 3
             fileh = h5py.File(self.parameter_filename,'r')
             self.refine_by = fileh['/level_0'].attrs['ref_ratio']
+            fileh.close()
 
     def _parse_pluto_file(self, ini_filename):
         """
@@ -291,7 +290,7 @@
         lines = open(self.ini_filename).readlines()
         # read the file line by line, storing important parameters
         for lineI, line in enumerate(lines):
-            try: 
+            try:
                 param, sep, vals = map(rstrip,line.partition(' '))
             except ValueError:
                 mylog.error("ValueError: '%s'", line)
@@ -319,13 +318,14 @@
         RE = dx0*((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[3:] + 1)
         fileh.close()
         return RE
-                  
+
     def __calc_domain_dimensions(self):
         fileh = h5py.File(self.parameter_filename,'r')
         L_index = ((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[0:3])
         R_index = ((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[3:] + 1)
+        fileh.close()
         return R_index - L_index
- 
+
     @classmethod
     def _is_valid(self, *args, **kwargs):
         try:

diff -r b90af0249db1f1fef85913ba2f58719ac8ff88f0 -r 4190004c06a0ff83bd5805438875ccbb0c5208fe yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -294,7 +294,7 @@
 for dim in range(1,4):
     f = KnownEnzoFields["RadAccel%d" % dim]
     f._convert_function = _convertRadiationAccel
-    f._units=r"\rm{cm}\ \rm{s}^{-2}"
+    f._units=r"\rm{cm}\/\rm{s}^{-2}"
     f.take_log=False
 def _RadiationAccelerationMagnitude(field, data):
     return ( data["RadAccel1"]**2 + data["RadAccel2"]**2 +
@@ -302,7 +302,7 @@
 add_field("RadiationAcceleration", 
           function=_RadiationAccelerationMagnitude,
           validators=ValidateDataField(["RadAccel1", "RadAccel2", "RadAccel3"]),
-          display_name="Radiation\ Acceleration", units=r"\rm{cm} \rm{s}^{-2}")
+          display_name="Radiation\/Acceleration", units=r"\rm{cm} \rm{s}^{-2}")
 
 # Now we override
 
@@ -318,18 +318,18 @@
           convert_function=_convertDensity,
           validators=[ValidateDataField("Dark_Matter_Density"),
                       ValidateSpatial(0)],
-          display_name = "Dark\ Matter\ Density",
+          display_name = "Dark\/Matter\/Density",
           not_in_all = True)
 
 def _Dark_Matter_Mass(field, data):
     return data['Dark_Matter_Density'] * data["CellVolume"]
 add_field("Dark_Matter_Mass", function=_Dark_Matter_Mass,
           validators=ValidateDataField("Dark_Matter_Density"),
-          display_name="Dark\ Matter\ Mass", units=r"\rm{g}")
+          display_name="Dark\/Matter\/Mass", units=r"\rm{g}")
 add_field("Dark_Matter_MassMsun", function=_Dark_Matter_Mass,
           convert_function=_convertCellMassMsun,
           validators=ValidateDataField("Dark_Matter_Density"),
-          display_name="Dark\ Matter\ Mass", units=r"M_{\odot}")
+          display_name="Dark\/Matter\/Mass", units=r"M_{\odot}")
 
 KnownEnzoFields["Temperature"]._units = r"\rm{K}"
 KnownEnzoFields["Temperature"].units = r"K"

diff -r b90af0249db1f1fef85913ba2f58719ac8ff88f0 -r 4190004c06a0ff83bd5805438875ccbb0c5208fe yt/frontends/enzo/simulation_handling.py
--- a/yt/frontends/enzo/simulation_handling.py
+++ b/yt/frontends/enzo/simulation_handling.py
@@ -40,6 +40,8 @@
     InvalidSimulationTimeSeries, \
     MissingParameter, \
     NoStoppingCondition
+from yt.utilities.parallel_tools.parallel_analysis_interface import \
+    parallel_objects
 
 from yt.convenience import \
     load
@@ -196,7 +198,7 @@
 
         if not my_all_outputs:
             TimeSeriesData.__init__(self, outputs=[], parallel=parallel)
-            mylog.info("%d outputs loaded into time series." % 0)
+            mylog.info("0 outputs loaded into time series.")
             return
 
         # Apply selection criteria to the set.
@@ -251,7 +253,7 @@
                 init_outputs.append(output['filename'])
             
         TimeSeriesData.__init__(self, outputs=init_outputs, parallel=parallel)
-        mylog.info("%d outputs loaded into time series." % len(init_outputs))
+        mylog.info("%d outputs loaded into time series.", len(init_outputs))
 
     def _parse_parameter_file(self):
         """
@@ -415,7 +417,7 @@
 
         elif self.parameters['dtDataDump'] > 0 and \
           self.parameters['CycleSkipDataDump'] > 0:
-            mylog.info("Simulation %s has both dtDataDump and CycleSkipDataDump set." % self.parameter_filename )
+            mylog.info("Simulation %s has both dtDataDump and CycleSkipDataDump set.", self.parameter_filename )
             mylog.info("    Unable to calculate datasets.  Attempting to search in the current directory")
             self._find_outputs()
 
@@ -433,8 +435,6 @@
             if self.parameters['CycleSkipDataDump'] <= 0:
                 self.all_outputs.sort(key=lambda obj:obj['time'])
 
-        mylog.info("Total datasets: %d." % len(self.all_outputs))
-
     def _calculate_simulation_bounds(self):
         """
         Figure out the starting and stopping time and redshift for the simulation.
@@ -471,7 +471,7 @@
                     'StopCycle' in self.parameters):
                 raise NoStoppingCondition(self.parameter_filename)
             if self.final_time is None:
-                mylog.warn('Simulation %s has no stop time set, stopping condition will be based only on cycles.' %
+                mylog.warn('Simulation %s has no stop time set, stopping condition will be based only on cycles.',
                            self.parameter_filename)
 
     def _set_parameter_defaults(self):
@@ -530,16 +530,23 @@
 
         self.all_outputs = self.all_time_outputs + self.all_redshift_outputs
         self.all_outputs.sort(key=lambda obj: obj['time'])
-        mylog.info("Located %d total outputs." % len(self.all_outputs))
+        only_on_root(mylog.info, "Located %d total outputs.", len(self.all_outputs))
+
+        # manually set final time and redshift with last output
+        if self.all_outputs:
+            self.final_time = self.all_outputs[-1]['time']
+            if self.cosmological_simulation:
+                self.final_redshift = self.all_outputs[-1]['redshift']
 
     def _check_for_outputs(self, potential_outputs):
         r"""Check a list of files to see if they are valid datasets."""
 
-        mylog.info("Checking %d potential outputs." %
-                   len(potential_outputs))
+        only_on_root(mylog.info, "Checking %d potential outputs.", 
+                     len(potential_outputs))
 
-        my_outputs = []
-        for output in potential_outputs:
+        my_outputs = {}
+        for my_storage, output in parallel_objects(potential_outputs, 
+                                                   storage=my_outputs):
             if self.parameters['DataDumpDir'] in output:
                 dir_key = self.parameters['DataDumpDir']
                 output_key = self.parameters['DataDumpName']
@@ -554,12 +561,14 @@
                 try:
                     pf = load(filename)
                     if pf is not None:
-                        my_outputs.append({'filename': filename,
-                                           'time': pf.current_time})
+                        my_storage.result = {'filename': filename,
+                                             'time': pf.current_time}
                         if pf.cosmological_simulation:
-                            my_outputs[-1]['redshift'] = pf.current_redshift
+                            my_storage.result['redshift'] = pf.current_redshift
                 except YTOutputNotIdentified:
-                    mylog.error('Failed to load %s' % filename)
+                    mylog.error('Failed to load %s', filename)
+        my_outputs = [my_output for my_output in my_outputs.values() \
+                      if my_output is not None]
 
         return my_outputs
 
@@ -601,7 +610,7 @@
                     and outputs[0] not in my_outputs:
                 my_outputs.append(outputs[0])
             else:
-                mylog.error("No dataset added for %s = %f." % (key, value))
+                mylog.error("No dataset added for %s = %f.", key, value)
 
         outputs.sort(key=lambda obj: obj['time'])
         return my_outputs
@@ -668,7 +677,7 @@
         r"""Write cosmology output parameters for a cosmology splice.
         """
 
-        mylog.info("Writing redshift output list to %s." % filename)
+        mylog.info("Writing redshift output list to %s.", filename)
         f = open(filename, 'w')
         for q, output in enumerate(outputs):
             z_string = "%%s[%%d] = %%.%df" % decimals

diff -r b90af0249db1f1fef85913ba2f58719ac8ff88f0 -r 4190004c06a0ff83bd5805438875ccbb0c5208fe yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -390,13 +390,8 @@
                     if vn in self.parameters and self.parameters[vn] != pval:
                         mylog.warning("{0} {1} overwrites a simulation scalar of the same name".format(hn[:-1],vn))
                     self.parameters[vn] = pval
-        self.domain_left_edge = np.array(
-            [self.parameters["%smin" % ax] for ax in 'xyz']).astype("float64")
-        self.domain_right_edge = np.array(
-            [self.parameters["%smax" % ax] for ax in 'xyz']).astype("float64")
-        self.min_level = self.parameters.get("lrefine_min", 1) - 1
-
-        # Determine domain dimensions
+        
+        # Determine block size
         try:
             nxb = self.parameters["nxb"]
             nyb = self.parameters["nyb"]
@@ -404,6 +399,8 @@
         except KeyError:
             nxb, nyb, nzb = [int(self._handle["/simulation parameters"]['n%sb' % ax])
                               for ax in 'xyz'] # FLASH2 only!
+        
+        # Determine dimensionality
         try:
             dimensionality = self.parameters["dimensionality"]
         except KeyError:
@@ -412,7 +409,10 @@
             if nyb == 1: dimensionality = 1
             if dimensionality < 3:
                 mylog.warning("Guessing dimensionality as %s", dimensionality)
+        
+        self.dimensionality = dimensionality
 
+        # Determine base grid parameters
         if 'lrefine_min' in self.parameters.keys() : # PARAMESH
             nblockx = self.parameters["nblockx"]
             nblocky = self.parameters["nblocky"]
@@ -423,20 +423,34 @@
             nblockz = self.parameters["kprocs"]
 
         # In case the user wasn't careful
-        if dimensionality <= 2 : nblockz = 1
-        if dimensionality == 1 : nblocky = 1
+        if self.dimensionality <= 2 : nblockz = 1
+        if self.dimensionality == 1 : nblocky = 1
 
-        self.dimensionality = dimensionality
+        # Determine domain boundaries
+        self.domain_left_edge = np.array(
+            [self.parameters["%smin" % ax] for ax in 'xyz']).astype("float64")
+        self.domain_right_edge = np.array(
+            [self.parameters["%smax" % ax] for ax in 'xyz']).astype("float64")
+        if self.dimensionality < 3:
+            for d in (dimensionality)+range(3-dimensionality):
+                if self.domain_left_edge[d] == self.domain_right_edge[d]:
+                    mylog.warning('Identical domain left edge and right edges '
+                                  'along dummy dimension (%i), attempting to read anyway' % d)
+                    self.domain_right_edge[d] = self.domain_left_edge[d]+1.0
         self.domain_dimensions = \
             np.array([nblockx*nxb,nblocky*nyb,nblockz*nzb])
+
+        # Try to determine Gamma
         try:
             self.parameters["Gamma"] = self.parameters["gamma"]
         except:
             mylog.warning("Cannot find Gamma")
             pass
 
+        # Get the simulation time
         self.current_time = self.parameters["time"]
 
+        # Determine cosmological parameters.
         try: 
             self.parameters["usecosmology"]
             self.cosmological_simulation = 1

diff -r b90af0249db1f1fef85913ba2f58719ac8ff88f0 -r 4190004c06a0ff83bd5805438875ccbb0c5208fe yt/frontends/gdf/data_structures.py
--- a/yt/frontends/gdf/data_structures.py
+++ b/yt/frontends/gdf/data_structures.py
@@ -247,8 +247,8 @@
         try:
             fileh = h5py.File(args[0],'r')
             if "gridded_data_format" in fileh:
+                fileh.close()
                 return True
-                fileh.close()
             fileh.close()
         except:
             pass

diff -r b90af0249db1f1fef85913ba2f58719ac8ff88f0 -r 4190004c06a0ff83bd5805438875ccbb0c5208fe yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -671,6 +671,8 @@
     last_gc = base_pf.h.num_grids
     cur_gc = -1
     pf = base_pf    
+    bbox = np.array( [ (pf.domain_left_edge[i], pf.domain_right_edge[i])
+                       for i in range(3) ])
     while pf.h.max_level < max_level and last_gc != cur_gc:
         mylog.info("Refining another level.  Current max level: %s",
                   pf.h.max_level)
@@ -691,7 +693,7 @@
             fg = FlaggingGrid(g, refinement_criteria)
             nsg = fg.find_subgrids()
             for sg in nsg:
-                LE = sg.left_index * g.dds
+                LE = sg.left_index * g.dds + pf.domain_left_edge
                 dims = sg.dimensions * pf.refine_by
                 grid = pf.h.smoothed_covering_grid(g.Level + 1, LE, dims)
                 gd = dict(left_edge = LE, right_edge = grid.right_edge,
@@ -700,7 +702,9 @@
                     if not pf.field_info[field].particle_type :
                         gd[field] = grid[field]
                 grid_data.append(gd)
-        pf = load_amr_grids(grid_data, pf.domain_dimensions, 1.0)
+        
+        pf = load_amr_grids(grid_data, pf.domain_dimensions, 1.0,
+                            bbox = bbox)
         cur_gc = pf.h.num_grids
 
     # Now reassign particle data to grids

diff -r b90af0249db1f1fef85913ba2f58719ac8ff88f0 -r 4190004c06a0ff83bd5805438875ccbb0c5208fe yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -27,6 +27,7 @@
 import time, types, signal, inspect, traceback, sys, pdb, os
 import contextlib
 import warnings, struct, subprocess
+import numpy as np
 from distutils import version
 from math import floor, ceil
 
@@ -61,6 +62,18 @@
         return [obj]
     return obj
 
+def ensure_numpy_array(obj):
+    """
+    This function ensures that *obj* is a numpy array. Typically used to
+    convert scalar, list or tuple argument passed to functions using Cython.
+    """
+    if isinstance(obj, np.ndarray):
+        return obj
+    elif isinstance(obj, (types.ListType, types.TupleType)):
+        return np.asarray(obj)
+    else:
+        return np.asarray([obj])
+
 def read_struct(f, fmt):
     """
     This reads a struct, and only that struct, from an open file.
@@ -168,7 +181,7 @@
     @wraps(func)
     def check_parallel_rank(*args, **kwargs):
         if ytcfg.getint("yt","__topcomm_parallel_rank") > 0:
-            return 
+            return
         return func(*args, **kwargs)
     return check_parallel_rank
 
@@ -185,7 +198,7 @@
 
     .. code-block:: python
 
-       @rootonly
+       @deprecate
        def some_really_old_function(...):
 
     """
@@ -205,7 +218,7 @@
 
     .. code-block:: python
 
-       @rootonly
+       @pdb_run
        def some_function_to_debug(...):
 
     """
@@ -569,10 +582,10 @@
     if nt < 0:
         return os.environ.get("OMP_NUM_THREADS", 0)
     return nt
-        
+
 def fix_axis(axis):
     return inv_axis_names.get(axis, axis)
 
 def get_image_suffix(name):
-    suffix = os.path.splitext(name)[1]
+    suffix = os.path.splitext(name)[1].lstrip('.')
     return suffix if suffix in ['png', 'eps', 'ps', 'pdf'] else ''

diff -r b90af0249db1f1fef85913ba2f58719ac8ff88f0 -r 4190004c06a0ff83bd5805438875ccbb0c5208fe yt/imods.py
--- /dev/null
+++ b/yt/imods.py
@@ -0,0 +1,17 @@
+# This custom importer for yt will set up some IPython notebook-specific
+# helpers.  For instance, it will eventually add items to the menubar.
+
+import __builtin__
+if "__IPYTHON__" not in dir(__builtin__):
+    raise ImportError
+
+from IPython.core.interactiveshell import InteractiveShell
+from IPython.core.display import display, display_html
+inst = InteractiveShell.instance()
+ip = inst.get_ipython()
+ip.enable_pylab("inline", import_all=False)
+
+from yt.config import ytcfg
+ytcfg["yt", "ipython_notebook"] = "True"
+
+from yt.mods import *

diff -r b90af0249db1f1fef85913ba2f58719ac8ff88f0 -r 4190004c06a0ff83bd5805438875ccbb0c5208fe yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -27,7 +27,7 @@
 from numpy.testing import assert_array_equal, assert_almost_equal, \
     assert_approx_equal, assert_array_almost_equal, assert_equal, \
     assert_array_less, assert_string_equal, assert_array_almost_equal_nulp,\
-    assert_allclose
+    assert_allclose, assert_raises
 
 def assert_rel_equal(a1, a2, decimals, err_msg='', verbose=True):
     # We have nan checks in here because occasionally we have fields that get
@@ -43,7 +43,7 @@
                                verbose=verbose)
 
 def amrspace(extent, levels=7, cells=8):
-    """Creates two numpy arrays representing the left and right bounds of 
+    """Creates two numpy arrays representing the left and right bounds of
     an AMR grid as well as an array for the AMR level of each cell.
 
     Parameters
@@ -57,7 +57,7 @@
         length ndims), then each dimension will be refined down to this level.
         All values in this array must be the same or zero.  A zero valued dimension
         indicates that this dim should not be refined.  Taking the 3D cylindrical
-        example above if we don't want refine theta but want r and z at 5 we would 
+        example above if we don't want refine theta but want r and z at 5 we would
         set levels=(5, 5, 0).
     cells : int, optional
         This is the number of cells per refinement level.

diff -r b90af0249db1f1fef85913ba2f58719ac8ff88f0 -r 4190004c06a0ff83bd5805438875ccbb0c5208fe yt/utilities/command_line.py
--- a/yt/utilities/command_line.py
+++ b/yt/utilities/command_line.py
@@ -30,7 +30,7 @@
 from yt.funcs import *
 from yt.utilities.minimal_representation import MinimalProjectDescription
 import argparse, os, os.path, math, sys, time, subprocess, getpass, tempfile
-import urllib, urllib2, base64
+import urllib, urllib2, base64, os
 
 def _fix_pf(arg):
     if os.path.isdir("%s" % arg) and \
@@ -119,7 +119,11 @@
     log     = dict(short="-l", long="--log",
                    action="store_true",
                    dest="takelog", default=True,
-                   help="Take the log of the field?"),
+                   help="Use logarithmic scale for image"),
+    linear  = dict(long="--linear",
+                   action="store_false",
+                   dest="takelog",
+                   help="Use linear scale for image"),
     text    = dict(short="-t", long="--text",
                    action="store", type=str,
                    dest="text", default=None,
@@ -180,7 +184,7 @@
                    dest="skip", default=1,
                    help="Skip factor for outputs"),
     proj    = dict(short="-p", long="--projection",
-                   action="store_true", 
+                   action="store_true",
                    dest="projection", default=False,
                    help="Use a projection rather than a slice"),
     maxw    = dict(long="--max-width",
@@ -221,7 +225,7 @@
                    dest="threshold", default=None,
                    help="Density threshold"),
     dm_only = dict(long="--all-particles",
-                   action="store_false", 
+                   action="store_false",
                    dest="dm_only", default=True,
                    help="Use all particles"),
     grids   = dict(long="--show-grids",
@@ -591,19 +595,6 @@
             print
             loki = raw_input("Press enter to go on, Ctrl-C to exit.")
             cedit.config.setoption(uu, hgrc_path, "bb.username=%s" % bbusername)
-        bb_fp = "81:2b:08:90:dc:d3:71:ee:e0:7c:b4:75:ce:9b:6c:48:94:56:a1:fe"
-        if uu.config("hostfingerprints", "bitbucket.org", None) is None:
-            print "Let's also add bitbucket.org to the known hosts, so hg"
-            print "doesn't warn us about bitbucket."
-            print "We will add this:"
-            print
-            print "   [hostfingerprints]"
-            print "   bitbucket.org = %s" % (bb_fp)
-            print
-            loki = raw_input("Press enter to go on, Ctrl-C to exit.")
-            cedit.config.setoption(uu, hgrc_path,
-                                   "hostfingerprints.bitbucket.org=%s" % bb_fp)
-
         # We now reload the UI's config file so that it catches the [bb]
         # section changes.
         uu.readconfig(hgrc_path[0])
@@ -643,7 +634,7 @@
         print "At any time in advance of the upload of the bug, you should feel free"
         print "to ctrl-C out and submit the bug report manually by going here:"
         print "   http://hg.yt-project.org/yt/issues/new"
-        print 
+        print
         print "Also, in order to submit a bug through this interface, you"
         print "need a Bitbucket account. If you don't have one, exit this "
         print "bugreport now and run the 'yt bootstrap_dev' command to create one."
@@ -711,7 +702,7 @@
         data['content'] = content
         print
         print "==============================================================="
-        print 
+        print
         print "Okay, we're going to submit with this:"
         print
         print "Summary: %s" % (data['title'])
@@ -731,7 +722,7 @@
         import json
         retval = json.loads(retval)
         url = "http://hg.yt-project.org/yt/issue/%s" % retval['local_id']
-        print 
+        print
         print "==============================================================="
         print
         print "Thanks for your bug report!  Together we'll make yt totally bug free!"
@@ -798,7 +789,7 @@
         if len(email) == 0: sys.exit(1)
         print
         print "Please choose a password:"
-        print 
+        print
         while 1:
             password1 = getpass.getpass("Password? ")
             password2 = getpass.getpass("Confirm? ")
@@ -817,7 +808,7 @@
         print
         loki = raw_input()
         data = dict(name = name, email = email, username = username,
-                    password = password1, password2 = password2, 
+                    password = password1, password2 = password2,
                     url = url, zap = "rowsdower")
         data = urllib.urlencode(data)
         hub_url = "https://hub.yt-project.org/create_user"
@@ -1007,7 +998,7 @@
         print
         loki = raw_input()
 
-        mpd = MinimalProjectDescription(title, bb_url, summary, 
+        mpd = MinimalProjectDescription(title, bb_url, summary,
                 categories[cat_id], image_url)
         mpd.upload()
 
@@ -1054,7 +1045,7 @@
             print
             if "site-packages" not in path:
                 print "This installation CAN be automatically updated."
-                if opts.update_source:  
+                if opts.update_source:
                     update_hg(path)
                 print "Updated successfully."
         elif opts.update_source:
@@ -1106,7 +1097,7 @@
             # prepend sys.path with current working directory
             sys.path.insert(0,'')
             IPython.embed(config=cfg,user_ns=local_ns)
-            
+
 
 class YTMapserverCmd(YTCommand):
     args = ("proj", "field", "weight",
@@ -1116,7 +1107,7 @@
                    dest="host", default=None, help="IP Address to bind on"),
             "pf",
             )
-    
+
     name = "mapserver"
     description = \
         """
@@ -1191,7 +1182,7 @@
     name = "pastebin_grab"
     description = \
         """
-        Print an online pastebin to STDOUT for local use. 
+        Print an online pastebin to STDOUT for local use.
         """
 
     def __call__(self, args):
@@ -1233,16 +1224,15 @@
         print
 
 class YTPlotCmd(YTCommand):
-    args = ("width", "unit", "bn", "proj", "center",
-            "zlim", "axis", "field", "weight", "skip",
-            "cmap", "output", "grids", "time", "pf",
-            "max")
-    
+    args = ("width", "unit", "bn", "proj", "center", "zlim", "axis", "field",
+            "weight", "skip", "cmap", "output", "grids", "time", "pf", "max",
+            "log", "linear")
+
     name = "plot"
-    
+
     description = \
         """
-        Create a set of images 
+        Create a set of images
 
         """
 
@@ -1281,21 +1271,22 @@
                                 width=width)
             if args.grids:
                 plt.annotate_grids()
-            if args.time: 
+            if args.time:
                 time = pf.current_time*pf['Time']*pf['years']
                 plt.annotate_text((0.2,0.8), 't = %5.2e yr'%time)
 
             plt.set_cmap(args.field, args.cmap)
+            plt.set_log(args.field, args.takelog)
             if args.zlim:
                 plt.set_zlim(args.field,*args.zlim)
             if not os.path.isdir(args.output): os.makedirs(args.output)
             plt.save(os.path.join(args.output,"%s" % (pf)))
 
 class YTRenderCmd(YTCommand):
-        
+
     args = ("width", "unit", "center","enhance",'outputfn',
-            "field", "cmap", "contours", "viewpoint",
-            "pixels","up","valrange","log","contour_width", "pf")
+            "field", "cmap", "contours", "viewpoint", "linear",
+            "pixels", "up", "valrange", "log","contour_width", "pf")
     name = "render"
     description = \
         """
@@ -1327,16 +1318,16 @@
 
         N = args.pixels
         if N is None:
-            N = 512 
-        
+            N = 512
+
         up = args.up
         if up is None:
             up = [0.,0.,1.]
-            
+
         field = args.field
         if field is None:
             field = 'Density'
-        
+
         log = args.takelog
         if log is None:
             log = True
@@ -1369,7 +1360,7 @@
             for i in range(3):
                 image[:,:,i] = image[:,:,i]/(image[:,:,i].mean() + 5.*image[:,:,i].std())
             image[image>1.0]=1.0
-            
+
         save_name = args.output
         if save_name is None:
             save_name = "%s"%pf+"_"+field+"_rendering.png"
@@ -1398,6 +1389,67 @@
         import rpdb
         rpdb.run_rpdb(int(args.task))
 
+class YTNotebookCmd(YTCommand):
+    name = ["notebook"]
+    args = (
+            dict(short="-o", long="--open-browser", action="store_true",
+                 default = False, dest='open_browser',
+                 help="Open a web browser."),
+            dict(short="-p", long="--port", action="store",
+                 default = 0, dest='port',
+                 help="Port to listen on; defaults to auto-detection."),
+            dict(short="-n", long="--no-password", action="store_true",
+                 default = False, dest='no_password',
+                 help="If set, do not prompt or use a password."),
+            )
+    description = \
+        """
+        Run the IPython Notebook
+        """
+    def __call__(self, args):
+        kwargs = {}
+        from IPython.frontend.html.notebook.notebookapp import NotebookApp
+        pw = ytcfg.get("yt", "notebook_password")
+        if len(pw) == 0 and not args.no_password:
+            import IPython.lib
+            pw = IPython.lib.passwd()
+            print "If you would like to use this password in the future,"
+            print "place a line like this inside the [yt] section in your"
+            print "yt configuration file at ~/.yt/config"
+            print
+            print "notebook_password = %s" % pw
+            print
+        elif args.no_password:
+            pw = None
+        if args.port != 0:
+            kwargs['port'] = int(args.port)
+        if pw is not None:
+            kwargs['password'] = pw
+        app = NotebookApp(open_browser=args.open_browser,
+                          **kwargs)
+        app.initialize(argv=[])
+        print
+        print "***************************************************************"
+        print
+        print "The notebook is now live at:"
+        print
+        print "     http://127.0.0.1:%s/" % app.port
+        print
+        print "Recall you can create a new SSH tunnel dynamically by pressing"
+        print "~C and then typing -L%s:localhost:%s" % (app.port, app.port)
+        print
+        print "Additionally, while in the notebook, we recommend you start by"
+        print "replacing 'yt.mods' with 'yt.imods' like so:"
+        print
+        print "    from yt.imods import *"
+        print
+        print "This will enable some IPython-specific extensions to yt."
+        print
+        print "***************************************************************"
+        print
+        app.start()
+
+
 class YTGUICmd(YTCommand):
     name = ["serve", "reason"]
     args = (

diff -r b90af0249db1f1fef85913ba2f58719ac8ff88f0 -r 4190004c06a0ff83bd5805438875ccbb0c5208fe yt/utilities/grid_data_format/tests/test_writer.py
--- /dev/null
+++ b/yt/utilities/grid_data_format/tests/test_writer.py
@@ -0,0 +1,66 @@
+"""
+Testsuite for writing yt data to GDF
+
+Author: Kacper Kowalik <xarthisius.kk at gmail.com>
+Affiliation: Torun Center for Astronomy, NCU
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Kacper Kowalik.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+import tempfile
+import shutil
+import os
+import h5py as h5
+from yt.testing import \
+    fake_random_pf, assert_equal
+from yt.utilities.grid_data_format.writer import \
+    write_to_gdf
+from yt.frontends.gdf.data_structures import \
+    GDFStaticOutput
+from yt.mods import \
+    load
+
+TEST_AUTHOR = "yt test runner"
+TEST_COMMENT = "Testing write_to_gdf"
+
+
+def setup():
+    """Test specific setup."""
+    from yt.config import ytcfg
+    ytcfg["yt", "__withintesting"] = "True"
+
+
+def test_write_gdf():
+    """Main test suite for write_gdf"""
+    tmpdir = tempfile.mkdtemp()
+    tmpfile = os.path.join(tmpdir, 'test_gdf.h5')
+
+    test_pf = fake_random_pf(64)
+    write_to_gdf(test_pf, tmpfile, data_author=TEST_AUTHOR,
+                 data_comment=TEST_COMMENT)
+    del test_pf
+
+    assert isinstance(load(tmpfile), GDFStaticOutput)
+
+    h5f = h5.File(tmpfile, 'r')
+    gdf = h5f['gridded_data_format'].attrs
+    assert_equal(gdf['data_author'], TEST_AUTHOR)
+    assert_equal(gdf['data_comment'], TEST_COMMENT)
+    h5f.close()
+
+    shutil.rmtree(tmpdir)

diff -r b90af0249db1f1fef85913ba2f58719ac8ff88f0 -r 4190004c06a0ff83bd5805438875ccbb0c5208fe yt/utilities/lib/QuadTree.pyx
--- a/yt/utilities/lib/QuadTree.pyx
+++ b/yt/utilities/lib/QuadTree.pyx
@@ -115,9 +115,11 @@
     cdef int merged
     cdef int num_cells
     cdef QTN_combine *combine
+    cdef np.float64_t bounds[4]
+    cdef np.float64_t dds[2]
 
     def __cinit__(self, np.ndarray[np.int64_t, ndim=1] top_grid_dims,
-                  int nvals, style = "integrate"):
+                  int nvals, bounds, style = "integrate"):
         if style == "integrate":
             self.combine = QTN_add_value
         elif style == "mip":
@@ -133,9 +135,13 @@
         cdef np.float64_t weight_val = 0.0
         self.nvals = nvals
         for i in range(nvals): vals[i] = 0.0
+        for i in range(4):
+            self.bounds[i] = bounds[i]
 
         self.top_grid_dims[0] = top_grid_dims[0]
         self.top_grid_dims[1] = top_grid_dims[1]
+        self.dds[0] = (self.bounds[1] - self.bounds[0])/self.top_grid_dims[0]
+        self.dds[1] = (self.bounds[3] - self.bounds[2])/self.top_grid_dims[1]
 
         # This wouldn't be necessary if we did bitshifting...
         for i in range(80):
@@ -404,6 +410,58 @@
             wtoadd -= node.weight_val
         return added
 
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    def fill_image(self, np.ndarray[np.float64_t, ndim=2] buffer, _bounds,
+                   int val_index = 0):
+        cdef np.float64_t dds[2], pos[2]
+        cdef int nn[2], i, j
+        cdef np.float64_t bounds[4]
+        for i in range(4):
+            bounds[i] = _bounds[i]
+        for i in range(2):
+            nn[i] = buffer.shape[i]
+            dds[i] = (bounds[i*2 + 1] - bounds[i*2])/nn[i]
+        cdef QuadTreeNode *node
+        pos[0] = bounds[0]
+        for i in range(nn[0]):
+            pos[1] = bounds[2]
+            for j in range(nn[1]):
+                # We start at level zero.  In the future we could optimize by
+                # retaining oct information from previous cells.
+                node = self.find_node_at_pos(pos)
+                buffer[i,j] = node.val[val_index]
+                pos[1] += dds[1]
+            pos[0] += dds[0]
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef QuadTreeNode *find_node_at_pos(self, np.float64_t pos[2]):
+        cdef np.int64_t ind[2]
+        cdef np.float64_t cp[2]
+        cdef np.float64_t dds[2]
+        cdef QuadTreeNode *cur
+        for i in range(2):
+            ind[i] = <np.int64_t> (pos[i]/self.dds[i])
+            cp[i] = (ind[i] + 0.5) * self.dds[i]
+            dds[i] = self.dds[i]
+        cur = self.root_nodes[ind[0]][ind[1]]
+        while cur.children[0][0] != NULL:
+            for i in range(2):
+                # Note that below offset by half a dx for center, after
+                # updating to the next level
+                dds[i] = dds[i] / 2.0
+                if cp[i] < pos[i]:
+                    ind[i] = 0
+                    cp[i] -= dds[i] / 2.0 
+                else:
+                    ind[i] = 1
+                    cp[i] += dds[i] / 2.0
+            cur = cur.children[ind[0]][ind[1]]
+        return cur
+
     def __dealloc__(self):
         cdef int i, j
         for i in range(self.top_grid_dims[0]):

diff -r b90af0249db1f1fef85913ba2f58719ac8ff88f0 -r 4190004c06a0ff83bd5805438875ccbb0c5208fe yt/utilities/lib/tests/test_grid_tree.py
--- a/yt/utilities/lib/tests/test_grid_tree.py
+++ b/yt/utilities/lib/tests/test_grid_tree.py
@@ -1,75 +1,128 @@
+"""
+Tests for GridTree
+
+Author: John ZuHone <jzuhone at gmail.com>
+Affiliation: NASA/Goddard Space Flight Center
+Homepage: http://yt-project.org/
+License:
+Copyright (C) 2012 John ZuHone.  All Rights Reserved.
+
+This file is part of yt.
+
+yt is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
 import numpy as np
+import random
 
-from yt.testing import *
-from yt.frontends.stream.api import load_amr_grids
+from yt.testing import \
+    assert_equal, assert_raises
+from yt.frontends.stream.api import \
+    load_amr_grids
+
 
 def setup():
+    """Prepare setup specific environment"""
+    global test_pf
 
-    global pf
-    
     grid_data = [
-        dict(left_edge = [0.0, 0.0, 0.0], right_edge = [1.0, 1.0, 1.],
-             level = 0, dimensions = [16, 16, 16]),
-        dict(left_edge = [0.25, 0.25, 0.25], right_edge = [0.75, 0.75, 0.75],
-             level = 1, dimensions = [16, 16, 16]),
-        dict(left_edge = [0.25, 0.25, 0.375], right_edge = [0.5, 0.5, 0.625],
-             level = 2, dimensions = [16, 16, 16]),
-        dict(left_edge = [0.5, 0.5, 0.375], right_edge = [0.75, 0.75, 0.625],
-             level = 2, dimensions = [16, 16, 16]),
-        dict(left_edge = [0.3125, 0.3125, 0.4375], right_edge = [0.4375, 0.4375, 0.5625],
-             level = 3, dimensions = [16, 16, 16]),
-        dict(left_edge = [0.5625, 0.5625, 0.4375], right_edge = [0.6875, 0.6875, 0.5625],
-             level = 3, dimensions = [16, 16, 16])
-        ]
+        dict(left_edge=[0.0, 0.0, 0.0], right_edge=[1.0, 1.0, 1.],
+             level=0, dimensions=[16, 16, 16]),
+        dict(left_edge=[0.25, 0.25, 0.25], right_edge=[0.75, 0.75, 0.75],
+             level=1, dimensions=[16, 16, 16]),
+        dict(left_edge=[0.25, 0.25, 0.375], right_edge=[0.5, 0.5, 0.625],
+             level=2, dimensions=[16, 16, 16]),
+        dict(left_edge=[0.5, 0.5, 0.375], right_edge=[0.75, 0.75, 0.625],
+             level=2, dimensions=[16, 16, 16]),
+        dict(left_edge=[0.3125, 0.3125, 0.4375],
+             right_edge=[0.4375, 0.4375, 0.5625],
+             level=3, dimensions=[16, 16, 16]),
+        dict(left_edge=[0.5625, 0.5625, 0.4375],
+             right_edge=[0.6875, 0.6875, 0.5625],
+             level=3, dimensions=[16, 16, 16])
+    ]
 
-    for g in grid_data: g["Density"] = np.random.random(g["dimensions"]) * 2**g["level"]
-    pf = load_amr_grids(grid_data, [16, 16, 16], 1.0)
+    for grid in grid_data:
+        grid["Density"] = \
+            np.random.random(grid["dimensions"]) * 2 ** grid["level"]
+    test_pf = load_amr_grids(grid_data, [16, 16, 16], 1.0)
 
-def test_grid_tree() :
 
-    grid_tree = pf.h.get_grid_tree()
+def test_grid_tree():
+    """Main test suite for GridTree"""
+    grid_tree = test_pf.h.get_grid_tree()
     indices, levels, nchild, children = grid_tree.return_tree_info()
 
-    grid_levels = [grid.Level for grid in pf.h.grids]
-    grid_indices = [grid.id-grid._id_offset for grid in pf.h.grids]
-    grid_nchild = [len(grid.Children) for grid in pf.h.grids]
+    grid_levels = [grid.Level for grid in test_pf.h.grids]
 
-    print levels, grid_levels
-    assert_equal(levels, grid_levels)
-    assert_equal(indices, grid_indices)
-    assert_equal(nchild, grid_nchild)
+    grid_indices = [grid.id - grid._id_offset for grid in test_pf.h.grids]
+    grid_nchild = [len(grid.Children) for grid in test_pf.h.grids]
 
-    for i, grid in enumerate(pf.h.grids) :
+    yield assert_equal, levels, grid_levels
+    yield assert_equal, indices, grid_indices
+    yield assert_equal, nchild, grid_nchild
+
+    for i, grid in enumerate(test_pf.h.grids):
         if grid_nchild[i] > 0:
-            grid_children = np.array([child.id-child._id_offset
+            grid_children = np.array([child.id - child._id_offset
                                       for child in grid.Children])
-            assert_equal(grid_children, children[i])
+            yield assert_equal, grid_children, children[i]
 
-def test_find_points() :
-    
+
+def test_find_points():
+    """Main test suite for MatchPoints"""
     num_points = 100
+    randx = np.random.uniform(low=test_pf.domain_left_edge[0],
+                              high=test_pf.domain_right_edge[0],
+                              size=num_points)
+    randy = np.random.uniform(low=test_pf.domain_left_edge[1],
+                              high=test_pf.domain_right_edge[1],
+                              size=num_points)
+    randz = np.random.uniform(low=test_pf.domain_left_edge[2],
+                              high=test_pf.domain_right_edge[2],
+                              size=num_points)
 
-    x = np.random.uniform(low=pf.domain_left_edge[0],
-                          high=pf.domain_right_edge[0], size=num_points)
-    y = np.random.uniform(low=pf.domain_left_edge[1],
-                          high=pf.domain_right_edge[1], size=num_points)
-    z = np.random.uniform(low=pf.domain_left_edge[2],
-                          high=pf.domain_right_edge[2], size=num_points)
-
-    point_grids, point_grid_inds = pf.h.find_points(x,y,z)
+    point_grids, point_grid_inds = test_pf.h.find_points(randx, randy, randz)
 
     grid_inds = np.zeros((num_points), dtype='int64')
 
-    for i, xx, yy, zz in zip(range(num_points), x, y, z) :
+    for ind, ixx, iyy, izz in zip(range(num_points), randx, randy, randz):
 
         pt_level = -1
-        
-        for grid in pf.h.grids:
 
-            if grid.is_in_grid(xx, yy, zz) :
-            
-                if grid.Level > pt_level :
+        for grid in test_pf.h.grids:
+
+            if grid.is_in_grid(ixx, iyy, izz):
+
+                if grid.Level > pt_level:
                     pt_level = grid.Level
-                    grid_inds[i] = grid.id-grid._id_offset
-                    
-    assert_equal(point_grid_inds, grid_inds)
+                    grid_inds[ind] = grid.id - grid._id_offset
+
+    yield assert_equal, point_grid_inds, grid_inds
+
+    # Test wheter find_points works for lists
+    point_grids, point_grid_inds = test_pf.h.find_points(randx.tolist(),
+                                                         randy.tolist(),
+                                                         randz.tolist())
+    yield assert_equal, point_grid_inds, grid_inds
+
+    # Test if find_points works for scalar
+    ind = random.randint(0, num_points - 1)
+    point_grids, point_grid_inds = test_pf.h.find_points(randx[ind],
+                                                         randy[ind],
+                                                         randz[ind])
+    yield assert_equal, point_grid_inds, grid_inds[ind]
+
+    # Test if find_points fails properly for non equal indices' array sizes
+    yield assert_raises, AssertionError, test_pf.h.find_points, \
+        [0], 1.0, [2, 3]

diff -r b90af0249db1f1fef85913ba2f58719ac8ff88f0 -r 4190004c06a0ff83bd5805438875ccbb0c5208fe yt/utilities/orientation.py
--- a/yt/utilities/orientation.py
+++ b/yt/utilities/orientation.py
@@ -58,6 +58,8 @@
         if north_vector is not None: self.steady_north = True
         self.north_vector = north_vector
         self._setup_normalized_vectors(normal_vector, north_vector)
+        if self.north_vector is None:
+            self.north_vector = self.unit_vectors[1] 
 
     def _setup_normalized_vectors(self, normal_vector, north_vector):
         # Now we set up our various vectors

diff -r b90af0249db1f1fef85913ba2f58719ac8ff88f0 -r 4190004c06a0ff83bd5805438875ccbb0c5208fe yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -799,7 +799,7 @@
                 if target < size:
                     #print "RECEIVING FROM %02i on %02i" % (target, rank)
                     buf = self.recv_quadtree(target, tgd, args)
-                    qto = QuadTree(tgd, args[2])
+                    qto = QuadTree(tgd, args[2], qt.bounds)
                     qto.frombuffer(buf[0], buf[1], buf[2], merge_style)
                     merge_quadtrees(qt, qto, style = merge_style)
                     del qto
@@ -819,7 +819,7 @@
         self.comm.Bcast([buf[2], MPI.DOUBLE], root=0)
         self.refined = buf[0]
         if rank != 0:
-            qt = QuadTree(tgd, args[2])
+            qt = QuadTree(tgd, args[2], qt.bounds)
             qt.frombuffer(buf[0], buf[1], buf[2], merge_style)
         return qt
 

diff -r b90af0249db1f1fef85913ba2f58719ac8ff88f0 -r 4190004c06a0ff83bd5805438875ccbb0c5208fe yt/utilities/physical_constants.py
--- a/yt/utilities/physical_constants.py
+++ b/yt/utilities/physical_constants.py
@@ -1,26 +1,31 @@
 #
 # Physical Constants and Units Conversion Factors
 #
+# Values for these constants are drawn from IAU and IUPAC data 
+# unless otherwise noted:
+# http://maia.usno.navy.mil/NSFA/IAU2009_consts.html
+# http://goldbook.iupac.org/list_goldbook_phys_constants_defs.html
 
 # Masses
-mass_hydrogen_cgs = 1.67e-24  # g
-mass_electron_cgs = 9.11e-28  # g
-amu_cgs           = 1.66053886e-24  # g
-mass_sun_cgs = 1.9891e33  # g
+mass_hydrogen_cgs = 1.674534e-24  # g
+mass_electron_cgs = 9.1093898e-28  # g
+amu_cgs           = 1.6605402e-24  # g
+mass_sun_cgs = 1.98841586e33  # g
 # Velocities
 speed_of_light_cgs = 2.99792458e10  # cm/s, exact
 
 # Cross Sections
-cross_section_thompson_cgs = 6.65e-25  # cm^2
+# 8*pi/3 (alpha*hbar*c/(2*pi))**2
+cross_section_thompson_cgs = 6.65245854533e-25  # cm^2
 
 # Charge
-charge_proton_cgs = 4.803e-10  # esu = 1.602e-19  Coulombs
+charge_proton_cgs = 4.8032056e-10  # esu = 1.602176487e-19  Coulombs
 
 # Physical Constants
 boltzmann_constant_cgs = 1.3806504e-16  # erg K^-1
 gravitational_constant_cgs  = 6.67428e-8  # cm^3 g^-1 s^-2
 planck_constant_cgs   = 6.62606896e-27  # erg s
-stefan_boltzmann_constant_cgs = 5.670373e-5 # erg cm^-2 s^-1 K^-4
+stefan_boltzmann_constant_cgs = 5.67051e-5 # erg cm^-2 s^-1 K^-4
 rho_crit_now = 1.8788e-29  # g times h^2 (critical mass for closure, Cosmology)
 
 # Misc. Approximations
@@ -32,16 +37,16 @@
 mpc_per_mpc   = 1e0
 mpc_per_kpc   = 1e-3
 mpc_per_pc    = 1e-6
-mpc_per_au    = 4.847e-12
-mpc_per_rsun  = 2.253e-14
-mpc_per_miles = 5.216e-20
-mpc_per_cm    = 3.24e-25
+mpc_per_au    = 4.84813682e-12
+mpc_per_rsun  = 2.253962e-14
+mpc_per_miles = 5.21552871e-20
+mpc_per_cm    = 3.24077929e-25
 km_per_pc     = 1.3806504e13
 km_per_m      = 1e-3
 km_per_cm     = 1e-5
-pc_per_cm     = 3.24e-19
+pc_per_cm     = 3.24077929e-19
 
-m_per_fpc     = 0.0324077649
+m_per_fpc     = 0.0324077929
 
 kpc_per_mpc   = 1.0 / mpc_per_kpc
 pc_per_mpc    = 1.0 / mpc_per_pc

diff -r b90af0249db1f1fef85913ba2f58719ac8ff88f0 -r 4190004c06a0ff83bd5805438875ccbb0c5208fe yt/visualization/base_plot_types.py
--- /dev/null
+++ b/yt/visualization/base_plot_types.py
@@ -0,0 +1,84 @@
+"""
+This is a place for base classes of the various plot types.
+
+Author: Nathan Goldbaum <goldbaum at ucolick.org>
+Affiliation: UCSC Astronomy
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2010-2012 Nathan Goldbaum.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+import matplotlib
+from ._mpl_imports import \
+    FigureCanvasAgg, FigureCanvasPdf, FigureCanvasPS
+from yt.funcs import \
+    get_image_suffix, mylog
+
+
+class PlotMPL(object):
+    """A base class for all yt plots made using matplotlib.
+
+    """
+    def __init__(self, fsize, axrect):
+        """Initialize PlotMPL class"""
+        self._plot_valid = True
+        self.figure = matplotlib.figure.Figure(figsize=fsize,
+                                               frameon=True)
+        self.axes = self.figure.add_axes(axrect)
+
+    def save(self, name, mpl_kwargs, canvas=None):
+        """Choose backend and save image to disk"""
+        suffix = get_image_suffix(name)
+        if suffix == '':
+            suffix = '.png'
+            name = "%s%s" % (name, suffix)
+
+        mylog.info("Saving plot %s", name)
+
+        if suffix == ".png":
+            canvas = FigureCanvasAgg(self.figure)
+        elif suffix == ".pdf":
+            canvas = FigureCanvasPdf(self.figure)
+        elif suffix in (".eps", ".ps"):
+            canvas = FigureCanvasPS(self.figure)
+        else:
+            mylog.warning("Unknown suffix %s, defaulting to Agg", suffix)
+            canvas = FigureCanvasAgg(self.figure)
+
+        canvas.print_figure(name, **mpl_kwargs)
+        return name
+
+
+class ImagePlotMPL(PlotMPL):
+    """A base class for yt plots made using imshow
+
+    """
+    def __init__(self, fsize, axrect, caxrect, zlim):
+        """Initialize ImagePlotMPL class object"""
+        PlotMPL.__init__(self, fsize, axrect)
+        self.zmin, self.zmax = zlim
+        self.cax = self.figure.add_axes(caxrect)
+
+    def _init_image(self, data, cbnorm, cmap, extent, aspect=None):
+        """Store output of imshow in image variable"""
+        if (cbnorm == 'log10'):
+            norm = matplotlib.colors.LogNorm()
+        elif (cbnorm == 'linear'):
+            norm = matplotlib.colors.Normalize()
+        self.image = self.axes.imshow(data, origin='lower', extent=extent,
+                                      norm=norm, vmin=self.zmin, aspect=aspect,
+                                      vmax=self.zmax, cmap=cmap)

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt-3.0/commits/f3b4fcb6ba47/
Changeset:   f3b4fcb6ba47
Branch:      yt
User:        jzuhone
Date:        2013-01-22 02:59:54
Summary:     Merged yt_analysis/yt into yt
Affected #:  11 files

diff -r 4190004c06a0ff83bd5805438875ccbb0c5208fe -r f3b4fcb6ba474caf05d24ba1cbfdcd97d45aa499 yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -835,6 +835,9 @@
         self.bin_count = None
         self.overdensity = None
         self.indices = np.array([])  # Never used for a LoadedHalo.
+        self._saved_fields = {}
+        self._ds_sort = None
+        self._particle_mask = None
         # A supplementary data dict.
         if supp is None:
             self.supp = {}

diff -r 4190004c06a0ff83bd5805438875ccbb0c5208fe -r f3b4fcb6ba474caf05d24ba1cbfdcd97d45aa499 yt/analysis_modules/halo_profiler/multi_halo_profiler.py
--- a/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
+++ b/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
@@ -281,10 +281,10 @@
         # Create output directories.
         self.output_dir = output_dir
         if output_dir is None:
-            output_dir = '.'
+            self.output_dir = self.pf.fullpath
         else:
             self.__check_directory(output_dir)
-        self.output_dir = os.path.join(output_dir, os.path.basename(self.pf.fullpath))
+            self.output_dir = os.path.join(output_dir, self.pf.directory)
         self.__check_directory(self.output_dir)
         self.profile_output_dir = os.path.join(self.output_dir, profile_output_dir)
         self.projection_output_dir = os.path.join(self.output_dir, projection_output_dir)

diff -r 4190004c06a0ff83bd5805438875ccbb0c5208fe -r f3b4fcb6ba474caf05d24ba1cbfdcd97d45aa499 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -3674,7 +3674,13 @@
         Inside = np.zeros(grid["x"].shape, dtype = 'float64')
         dim = grid["x"].shape
         # need this to take into account non-cube root grid tiles
-        dot_evec = np.zeros([3, dim[0], dim[1], dim[2]])
+        if (len(dim) == 1):
+            dot_evec = np.zeros([3, dim[0]])
+        elif (len(dim) == 2):
+            dot_evec = np.zeros([3, dim[0], dim[1]])
+        elif (len(dim) == 3):
+            dot_evec = np.zeros([3, dim[0], dim[1], dim[2]])
+
         for i, ax in enumerate('xyz'):
             # distance to center
             ar  = grid[ax]-self.center[i]

diff -r 4190004c06a0ff83bd5805438875ccbb0c5208fe -r f3b4fcb6ba474caf05d24ba1cbfdcd97d45aa499 yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -389,7 +389,7 @@
     # in code.
     G = 6.67e-8 / data.convert("cm") # cm^3 g^-1 s^-2
     # Check for periodicity of the clump.
-    two_root = 2. / np.array(data.pf.domain_dimensions)
+    two_root = 2. * np.array(data.pf.domain_width) / np.array(data.pf.domain_dimensions)
     domain_period = data.pf.domain_right_edge - data.pf.domain_left_edge
     periodic = np.array([0., 0., 0.])
     for i,dim in enumerate(["x", "y", "z"]):
@@ -432,7 +432,7 @@
         # Calculate the binding energy using the treecode method.
         # Faster but less accurate.
         # The octree doesn't like uneven root grids, so we will make it cubical.
-        root_dx = 1./np.array(data.pf.domain_dimensions).astype('float64')
+        root_dx = (data.pf.domain_width/np.array(data.pf.domain_dimensions)).astype('float64')
         left = min([np.amin(local_data['x']), np.amin(local_data['y']),
             np.amin(local_data['z'])])
         right = max([np.amax(local_data['x']), np.amax(local_data['y']),
@@ -443,8 +443,8 @@
         # edges for making indexes.
         cover_min = cover_min - cover_min % root_dx
         cover_max = cover_max - cover_max % root_dx
-        cover_imin = (cover_min * np.array(data.pf.domain_dimensions)).astype('int64')
-        cover_imax = (cover_max * np.array(data.pf.domain_dimensions) + 1).astype('int64')
+        cover_imin = (cover_min / root_dx).astype('int64')
+        cover_imax = (cover_max / root_dx + 1).astype('int64')
         cover_ActiveDimensions = cover_imax - cover_imin
         # Create the octree with these dimensions.
         # One value (mass) with incremental=True.
@@ -456,7 +456,7 @@
         dyes = np.unique(data['dy']) # so these will all have the same
         dzes = np.unique(data['dz']) # order.
         # We only need one dim to figure out levels, we'll use x.
-        dx = 1./data.pf.domain_dimensions[0]
+        dx = data.pf.domain_width[0]/data.pf.domain_dimensions[0]
         levels = (np.log(dx / dxes) / np.log(data.pf.refine_by)).astype('int')
         lsort = levels.argsort()
         levels = levels[lsort]

diff -r 4190004c06a0ff83bd5805438875ccbb0c5208fe -r f3b4fcb6ba474caf05d24ba1cbfdcd97d45aa499 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -105,7 +105,8 @@
 class StreamHandler(object):
     def __init__(self, left_edges, right_edges, dimensions,
                  levels, parent_ids, particle_count, processor_ids,
-                 fields, io = None, particle_types = {}):
+                 fields, io = None, particle_types = None):
+        if particle_types is None: particle_types = {}
         self.left_edges = left_edges
         self.right_edges = right_edges
         self.dimensions = dimensions

diff -r 4190004c06a0ff83bd5805438875ccbb0c5208fe -r f3b4fcb6ba474caf05d24ba1cbfdcd97d45aa499 yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -587,5 +587,5 @@
     return inv_axis_names.get(axis, axis)
 
 def get_image_suffix(name):
-    suffix = os.path.splitext(name)[1].lstrip('.')
-    return suffix if suffix in ['png', 'eps', 'ps', 'pdf'] else ''
+    suffix = os.path.splitext(name)[1]
+    return suffix if suffix in ['.png', '.eps', '.ps', '.pdf'] else ''

diff -r 4190004c06a0ff83bd5805438875ccbb0c5208fe -r f3b4fcb6ba474caf05d24ba1cbfdcd97d45aa499 yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -143,7 +143,7 @@
 
 # Import some helpful math utilities
 from yt.utilities.math_utils import \
-    ortho_find, quartiles
+    ortho_find, quartiles, periodic_position 
 
 
 # We load plugins.  Keep in mind, this can be fairly dangerous -

diff -r 4190004c06a0ff83bd5805438875ccbb0c5208fe -r f3b4fcb6ba474caf05d24ba1cbfdcd97d45aa499 yt/utilities/command_line.py
--- a/yt/utilities/command_line.py
+++ b/yt/utilities/command_line.py
@@ -1437,6 +1437,9 @@
         print
         print "Recall you can create a new SSH tunnel dynamically by pressing"
         print "~C and then typing -L%s:localhost:%s" % (app.port, app.port)
+        print "where the first number is the port on your local machine. "
+        print
+        print "If you are using %s on your machine already, try -L8889:localhost:%s" % (app.port, app.port)
         print
         print "Additionally, while in the notebook, we recommend you start by"
         print "replacing 'yt.mods' with 'yt.imods' like so:"

diff -r 4190004c06a0ff83bd5805438875ccbb0c5208fe -r f3b4fcb6ba474caf05d24ba1cbfdcd97d45aa499 yt/utilities/math_utils.py
--- a/yt/utilities/math_utils.py
+++ b/yt/utilities/math_utils.py
@@ -30,6 +30,30 @@
 import numpy as np
 import math
 
+def periodic_position(pos, pf):
+    r"""Assuming periodicity, find the periodic position within the domain.
+    
+    Parameters
+    ----------
+    pos : array
+        An array of floats.
+    
+    pf : StaticOutput
+        A simulation static output.
+    
+    Examples
+    --------
+    >>> a = np.array([1.1, 0.5, 0.5])
+    >>> data = {'Density':np.ones([32,32,32])}
+    >>> pf = load_uniform_grid(data, [32,32,32], 1.0)
+    >>> ppos = periodic_position(a, pf)
+    >>> ppos
+    array([ 0.1,  0.5,  0.5])
+    """
+ 
+    off = (pos - pf.domain_left_edge) % pf.domain_width
+    return pf.domain_left_edge + off
+
 def periodic_dist(a, b, period):
     r"""Find the Euclidian periodic distance between two points.
     

diff -r 4190004c06a0ff83bd5805438875ccbb0c5208fe -r f3b4fcb6ba474caf05d24ba1cbfdcd97d45aa499 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -758,6 +758,64 @@
         if self._plot_type is None:
             self._plot_type = kwargs.pop("plot_type")
         PWViewer.__init__(self, *args, **kwargs)
+        
+    def _setup_origin(self):
+        origin = self.origin
+        axis_index = self.data_source.axis
+        if isinstance(origin, basestring):
+            origin = tuple(origin.split('-'))[:3]
+        if 1 == len(origin):
+            origin = ('lower', 'left') + origin
+        elif 2 == len(origin) and origin[0] in set(['left','right','center']):
+            o0map = {'left': 'lower', 'right': 'upper', 'center': 'center'}
+            origin = (o0map[origin[0]],) + origin
+        elif 2 == len(origin) and origin[0] in set(['lower','upper','center']):
+            origin = (origin[0], 'center', origin[-1])
+        assert origin[-1] in ['window', 'domain', 'native']
+
+        if origin[2] == 'window':
+            xllim, xrlim = self.xlim
+            yllim, yrlim = self.ylim
+        elif origin[2] == 'domain':
+            xllim = self.pf.domain_left_edge[x_dict[axis_index]]
+            xrlim = self.pf.domain_right_edge[x_dict[axis_index]]
+            yllim = self.pf.domain_left_edge[y_dict[axis_index]]
+            yrlim = self.pf.domain_right_edge[y_dict[axis_index]]
+        elif origin[2] == 'native':
+            return 0.0, 0.0
+        else:
+            mylog.warn("origin = {0}".format(origin))
+            msg = ('origin keyword "{0}" not recognized, must declare "domain" '
+                   'or "center" as the last term in origin.').format(self.origin)
+            raise RuntimeError(msg)
+
+        if origin[0] == 'lower':
+            yc = yllim
+        elif origin[0] == 'upper':
+            yc = yrlim
+        elif origin[0] == 'center':
+            yc = (yllim + yrlim)/2.0
+        else:
+            mylog.warn("origin = {0}".format(origin))
+            msg = ('origin keyword "{0}" not recognized, must declare "lower" '
+                   '"upper" or "center" as the first term in origin.')
+            msg = msg.format(self.origin)
+            raise RuntimeError(msg)
+
+        if origin[1] == 'left':
+            xc = xllim
+        elif origin[1] == 'right':
+            xc = xrlim
+        elif origin[1] == 'center':
+            xc = (xllim + xrlim)/2.0
+        else:
+            mylog.warn("origin = {0}".format(origin))
+            msg = ('origin keyword "{0}" not recognized, must declare "left" '
+                   '"right" or "center" as the second term in origin.')
+            msg = msg.format(self.origin)
+            raise RuntimeError(msg)
+
+        return xc, yc
 
     def _setup_plots(self):
         if self._current_field is not None:
@@ -768,20 +826,7 @@
         for f in self.fields:
             axis_index = self.data_source.axis
 
-            if self.origin == 'center-window':
-                xc = (self.xlim[0]+self.xlim[1])/2
-                yc = (self.ylim[0]+self.ylim[1])/2
-            elif self.origin == 'center-domain':
-                xc = (self.pf.domain_left_edge[x_dict[axis_index]]+
-                      self.pf.domain_right_edge[x_dict[axis_index]])/2
-                yc = (self.pf.domain_left_edge[y_dict[axis_index]]+
-                      self.pf.domain_right_edge[y_dict[axis_index]])/2
-            elif self.origin == 'left-domain':
-                xc = self.pf.domain_left_edge[x_dict[axis_index]]
-                yc = self.pf.domain_left_edge[y_dict[axis_index]]
-            else:
-                raise RuntimeError(
-                    'origin keyword: \"%(k)s\" not recognized' % {'k': self.origin})
+            xc, yc = self._setup_origin()
 
             if self._axes_unit_names is None:
                 unit = get_smallest_appropriate_unit(self.xlim[1] - self.xlim[0], self.pf)
@@ -976,7 +1021,7 @@
     _frb_generator = FixedResolutionBuffer
 
     def __init__(self, pf, axis, fields, center='c', width=None, axes_unit=None,
-                 origin='center-window', fontsize=15):
+                 origin='center-window', fontsize=15, field_parameters=None):
         r"""Creates a slice plot from a parameter file
         
         Given a pf object, an axis to slice along, and a field name
@@ -1027,14 +1072,36 @@
              Defaults to None, which automatically picks an appropriate unit.
              If axes_unit is '1', 'u', or 'unitary', it will not display the 
              units, and only show the axes name.
-        origin : string
-             The location of the origin of the plot coordinate system.
-             Currently, can be set to three options: 'left-domain', corresponding
-             to the bottom-left hand corner of the simulation domain, 'center-domain',
-             corresponding the center of the simulation domain, or 'center-window' for 
-             the center of the plot window.
+        origin : string or length 1, 2, or 3 sequence of strings
+             The location of the origin of the plot coordinate system.  This is 
+             represented by '-' separated string or a tuple of strings.  In the
+             first index the y-location is given by 'lower', 'upper', or 'center'.
+             The second index is the x-location, given as 'left', 'right', or 
+             'center'.  Finally, the whether the origin is applied in 'domain' space,
+             plot 'window' space or 'native' simulation coordinate system is given.
+             For example, both 'upper-right-domain' and ['upper', 'right', 'domain']
+             both place the origin in the upper right hand corner of domain space.
+             If x or y are not given, a value is inffered.  For instance, 'left-domain'
+             corresponds to the lower-left hand corner of the simulation domain,
+             'center-domain' corresponds to the center of the simulation domain,
+             or 'center-window' for the center of the plot window. Further examples:
+
+             ==================================     ============================
+             format                                 example                
+             ==================================     ============================
+             '{space}'                              'domain'
+             '{xloc}-{space}'                       'left-window'
+             '{yloc}-{space}'                       'upper-domain'
+             '{yloc}-{xloc}-{space}'                'lower-right-window'
+             ('{space}',)                           ('window',)
+             ('{xloc}', '{space}')                  ('right', 'domain')
+             ('{yloc}', '{space}')                  ('lower', 'window')
+             ('{yloc}', '{xloc}', '{space}')        ('lower', 'right', 'window')
+             ==================================     ============================
         fontsize : integer
              The size of the fonts for the axis, colorbar, and tick labels.
+        field_parameters : dictionary
+             A dictionary of field parameters than can be accessed by derived fields.
              
         Examples
         --------
@@ -1054,7 +1121,8 @@
         (bounds, center, units) = GetWindowParameters(axis, center, width, pf)
         if axes_unit is None and units != ('1', '1'):
             axes_unit = units
-        slc = pf.h.slice(axis, center[axis], center=center, fields=fields)
+        if field_parameters is None: field_parameters = {}
+        slc = pf.h.slice(axis, center[axis], center=center, fields=fields, **field_parameters)
         PWViewerMPL.__init__(self, slc, bounds, origin=origin)
         self.set_axes_unit(axes_unit)
 
@@ -1063,7 +1131,8 @@
     _frb_generator = FixedResolutionBuffer
 
     def __init__(self, pf, axis, fields, center='c', width=None, axes_unit=None,
-                 weight_field=None, max_level=None, origin='center-window', fontsize=15):
+                 weight_field=None, max_level=None, origin='center-window', fontsize=15, 
+                 field_parameters=None):
         r"""Creates a projection plot from a parameter file
         
         Given a pf object, an axis to project along, and a field name
@@ -1114,19 +1183,42 @@
              Defaults to None, which automatically picks an appropriate unit.
              If axes_unit is '1', 'u', or 'unitary', it will not display the 
              units, and only show the axes name.
-        origin : A string
-             The location of the origin of the plot coordinate system.
-             Currently, can be set to three options: 'left-domain', corresponding
-             to the bottom-left hand corner of the simulation domain, 'center-domain',
-             corresponding the center of the simulation domain, or 'center-window' for 
-             the center of the plot window.
+        origin : string or length 1, 2, or 3 sequence of strings
+             The location of the origin of the plot coordinate system.  This is 
+             represented by '-' separated string or a tuple of strings.  In the
+             first index the y-location is given by 'lower', 'upper', or 'center'.
+             The second index is the x-location, given as 'left', 'right', or 
+             'center'.  Finally, the whether the origin is applied in 'domain' space,
+             plot 'window' space or 'native' simulation coordinate system is given.
+             For example, both 'upper-right-domain' and ['upper', 'right', 'domain']
+             both place the origin in the upper right hand corner of domain space.
+             If x or y are not given, a value is inffered.  For instance, 'left-domain'
+             corresponds to the lower-left hand corner of the simulation domain,
+             'center-domain' corresponds to the center of the simulation domain,
+             or 'center-window' for the center of the plot window. Further examples:
+
+             ==================================     ============================
+             format                                 example
+             ==================================     ============================ 
+             '{space}'                              'domain'
+             '{xloc}-{space}'                       'left-window'
+             '{yloc}-{space}'                       'upper-domain'
+             '{yloc}-{xloc}-{space}'                'lower-right-window'
+             ('{space}',)                           ('window',)
+             ('{xloc}', '{space}')                  ('right', 'domain')
+             ('{yloc}', '{space}')                  ('lower', 'window')
+             ('{yloc}', '{xloc}', '{space}')        ('lower', 'right', 'window')
+             ==================================     ============================
+             
         weight_field : string
              The name of the weighting field.  Set to None for no weight.
         max_level: int
              The maximum level to project to.
         fontsize : integer
              The size of the fonts for the axis, colorbar, and tick labels.
-        
+        field_parameters : dictionary
+             A dictionary of field parameters than can be accessed by derived fields.
+
         Examples
         --------
         
@@ -1144,7 +1236,9 @@
         (bounds, center, units) = GetWindowParameters(axis, center, width, pf)
         if axes_unit is None  and units != ('1', '1'):
             axes_unit = units
-        proj = pf.h.proj(axis,fields,weight_field=weight_field,max_level=max_level,center=center)
+        if field_parameters is None: field_parameters = {}
+        proj = pf.h.proj(axis,fields,weight_field=weight_field,max_level=max_level,
+                         center=center, **field_parameters)
         PWViewerMPL.__init__(self,proj,bounds,origin=origin)
         self.set_axes_unit(axes_unit)
 
@@ -1153,7 +1247,8 @@
     _frb_generator = ObliqueFixedResolutionBuffer
 
     def __init__(self, pf, normal, fields, center='c', width=None, 
-                 axes_unit=None, north_vector=None, fontsize=15):
+                 axes_unit=None, north_vector=None, fontsize=15,
+                 field_parameters=None):
         r"""Creates an off axis slice plot from a parameter file
 
         Given a pf object, a normal vector defining a slicing plane, and
@@ -1192,11 +1287,14 @@
             set, an arbitrary grid-aligned north-vector is chosen.
         fontsize : integer
              The size of the fonts for the axis, colorbar, and tick labels.
+        field_parameters : dictionary
+             A dictionary of field parameters than can be accessed by derived fields.
         """
         (bounds, center_rot, units) = GetObliqueWindowParameters(normal,center,width,pf)
         if axes_unit is None and units != ('1', '1'):
             axes_unit = units
-        cutting = pf.h.cutting(normal, center, fields=fields, north_vector=north_vector)
+        if field_parameters is None: field_parameters = {}
+        cutting = pf.h.cutting(normal, center, fields=fields, north_vector=north_vector, **field_parameters)
         # Hard-coding the origin keyword since the other two options
         # aren't well-defined for off-axis data objects
         PWViewerMPL.__init__(self,cutting,bounds,origin='center-window',periodic=False,oblique=True)

diff -r 4190004c06a0ff83bd5805438875ccbb0c5208fe -r f3b4fcb6ba474caf05d24ba1cbfdcd97d45aa499 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -118,7 +118,7 @@
             lead to large speed improvements, but at a loss of
             accuracy/smoothness in resulting image.  The effects are
             less notable when the transfer function is smooth and
-            broad. Default: False
+            broad. Default: True
         tree_type: string, optional
             Specifies the type of kd-Tree to be constructed/cast.
             There are three options, the default being 'domain'. Only


https://bitbucket.org/yt_analysis/yt-3.0/commits/1ef8ad7f9081/
Changeset:   1ef8ad7f9081
Branch:      yt
User:        jzuhone
Date:        2013-02-10 07:02:47
Summary:     Merged yt_analysis/yt into yt
Affected #:  51 files

diff -r f3b4fcb6ba474caf05d24ba1cbfdcd97d45aa499 -r 1ef8ad7f9081b46d7cc2a3abbf28510196c94314 .hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -5152,6 +5152,7 @@
 0000000000000000000000000000000000000000 svn.993
 fff7118f00e25731ccf37cba3082b8fcb73cf90e svn.371
 0000000000000000000000000000000000000000 svn.371
+6528c562fed6f994b8d1ecabaf375ddc4707dade mpi-opaque
+0000000000000000000000000000000000000000 mpi-opaque
 f15825659f5af3ce64aaad30062aff3603cbfb66 hop callback
 0000000000000000000000000000000000000000 hop callback
-0000000000000000000000000000000000000000 hop callback

diff -r f3b4fcb6ba474caf05d24ba1cbfdcd97d45aa499 -r 1ef8ad7f9081b46d7cc2a3abbf28510196c94314 doc/coding_styleguide.txt
--- a/doc/coding_styleguide.txt
+++ b/doc/coding_styleguide.txt
@@ -13,7 +13,7 @@
  * Do not use nested classes unless you have a very good reason to, such as
    requiring a namespace or class-definition modification.  Classes should live
    at the top level.  __metaclass__ is exempt from this.
- * Do not use unecessary parenthesis in conditionals.  if((something) and
+ * Do not use unnecessary parenthesis in conditionals.  if((something) and
    (something_else)) should be rewritten as if something and something_else.
    Python is more forgiving than C.
  * Avoid copying memory when possible. For example, don't do 
@@ -50,7 +50,7 @@
    replace the old class.  Too many options makes for a confusing user
    experience.
  * Parameter files are a last resort.
- * The usage of the **kwargs construction should be avoided.  If they cannoted
+ * The usage of the **kwargs construction should be avoided.  If they cannot
    be avoided, they must be explained, even if they are only to be passed on to
    a nested function.
 

diff -r f3b4fcb6ba474caf05d24ba1cbfdcd97d45aa499 -r 1ef8ad7f9081b46d7cc2a3abbf28510196c94314 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -8,9 +8,7 @@
 # that's the next one, DEST_DIR.  But, if you want to use an existing HDF5
 # installation you can set HDF5_DIR, or if you want to use some other
 # subversion checkout of YT, you can set YT_DIR, too.  (It'll already
-# check the current directory and one up.
-#
-# NOTE: If you have trouble with wxPython, set INST_WXPYTHON=0 .
+# check the current directory and one up).
 #
 # And, feel free to drop me a line: matthewturk at gmail.com
 #
@@ -19,11 +17,16 @@
 DEST_DIR="`pwd`/${DEST_SUFFIX/ /}"   # Installation location
 BRANCH="yt" # This is the branch to which we will forcibly update.
 
+if [ ${REINST_YT} -eq 1 ] && [ -n ${YT_DEST} ]
+then
+    DEST_DIR=${YT_DEST}
+fi
+
 # Here's where you put the HDF5 path if you like; otherwise it'll download it
 # and install it on its own
 #HDF5_DIR=
 
-# If you need to supply arguments to the NumPy build, supply them here
+# If you need to supply arguments to the NumPy or SciPy build, supply them here
 # This one turns on gfortran manually:
 #NUMPY_ARGS="--fcompiler=gnu95"
 # If you absolutely can't get the fortran to work, try this:
@@ -44,6 +47,7 @@
                 # working TeX installation.
 INST_0MQ=1      # Install 0mq (for IPython) and affiliated bindings?
 INST_ROCKSTAR=0 # Install the Rockstar halo finder?
+INST_SCIPY=0    # Install scipy?
 
 # If you've got YT some other place, set this to point to it.
 YT_DIR=""
@@ -155,18 +159,6 @@
         echo "   $ module swap PE-pgi PE-gnu"
         echo
     fi
-    if [ "${MYHOSTLONG%%ranger}" != "${MYHOSTLONG}" ]
-    then
-        echo "Looks like you're on Ranger."
-        echo
-        echo "NOTE: YOU MUST BE IN THE GNU PROGRAMMING ENVIRONMENT"
-        echo "These commands should take care of that for you:"
-        echo
-        echo "   $ module unload mvapich2"
-        echo "   $ module swap pgi gcc"
-        echo "   $ module load mvapich2"
-        echo
-    fi
     if [ "${MYHOST##steele}" != "${MYHOST}" ]
     then
         echo "Looks like you're on Steele."
@@ -184,24 +176,53 @@
         echo
         echo "NOTE: you must have the Xcode command line tools installed."
         echo
-        echo "OS X 10.5: download Xcode 3.0 from the mac developer tools"
-        echo "website"
+	echo "The instructions for obtaining these tools varies according"
+	echo "to your exact OS version.  On older versions of OS X, you"
+	echo "must register for an account on the apple developer tools"
+	echo "website: https://developer.apple.com/downloads to obtain the"
+	echo "download link."
+	echo 
+	echo "We have gathered some additional instructions for each"
+	echo "version of OS X below. If you have trouble installing yt"
+	echo "after following these instructions, don't hesitate to contact"
+	echo "the yt user's e-mail list."
+	echo
+	echo "You can see which version of OSX you are running by clicking"
+	echo "'About This Mac' in the apple menu on the left hand side of"
+	echo "menu bar.  We're assuming that you've installed all operating"
+	echo "system updates; if you have an older version, we suggest"
+	echo "running software update and installing all available updates."
+	echo 
+        echo "OS X 10.5.8: search for and download Xcode 3.1.4 from the" 
+	echo "Apple developer tools website."
         echo
-        echo "OS X 10.6: download Xcode 3.2 from the mac developer tools"
-        echo "website"
+        echo "OS X 10.6.8: search for and download Xcode 3.2 from the Apple"
+	echo "developer tools website.  You can either download the"
+	echo "Xcode 3.2.2 Developer Tools package (744 MB) and then use"
+	echo "Software Update to update to XCode 3.2.6 or" 
+	echo "alternatively, you can download the Xcode 3.2.6/iOS SDK" 
+	echo "bundle (4.1 GB)."
         echo
-        echo "OS X 10.7: download Xcode 4.0 from the mac app store or"
-        echo "alternatively download the Xcode command line tools from"
-        echo "the mac developer tools website"
+        echo "OS X 10.7.5: download Xcode 4.2 from the mac app store"
+	echo "(search for Xcode)."
+        echo "Alternatively, download the Xcode command line tools from"
+        echo "the Apple developer tools website."
         echo
-        echo "NOTE: You may have problems if you are running OSX 10.6 (Snow"
-        echo "Leopard) or newer.  If you do, please set the following"
-        echo "environment variables, remove any broken installation tree, and"
-        echo "re-run this script verbatim."
+	echo "OS X 10.8.2: download Xcode 4.6 from the mac app store."
+	echo "(search for Xcode)."
+	echo "Additionally, you will have to manually install the Xcode"
+	echo "command line tools, see:" 
+	echo "http://stackoverflow.com/questions/9353444"
+	echo "Alternatively, download the Xcode command line tools from"
+	echo "the Apple developer tools website."
+	echo
+        echo "NOTE: It's possible that the installation will fail, if so," 
+	echo "please set the following environment variables, remove any" 
+	echo "broken installation tree, and re-run this script verbatim."
         echo
         echo "$ export CC=gcc-4.2"
         echo "$ export CXX=g++-4.2"
-        echo
+	echo
         OSX_VERSION=`sw_vers -productVersion`
         if [ "${OSX_VERSION##10.8}" != "${OSX_VERSION}" ]
         then
@@ -240,6 +261,20 @@
         echo " to avoid conflicts with other command-line programs "
         echo " (like eog and evince, for example)."
     fi
+    if [ $INST_SCIPY -eq 1 ]
+    then
+	echo
+	echo "Looks like you've requested that the install script build SciPy."
+	echo
+	echo "If the SciPy build fails, please uncomment one of the the lines"
+	echo "at the top of the install script that sets NUMPY_ARGS, delete"
+	echo "any broken installation tree, and re-run the install script"
+	echo "verbatim."
+	echo
+	echo "If that doesn't work, don't hesitate to ask for help on the yt"
+	echo "user's mailing list."
+	echo
+    fi
     if [ ! -z "${CFLAGS}" ]
     then
         echo "******************************************"
@@ -298,6 +333,10 @@
 get_willwont ${INST_PYX}
 echo "be installing PyX"
 
+printf "%-15s = %s so I " "INST_SCIPY" "${INST_SCIPY}"
+get_willwont ${INST_SCIPY}
+echo "be installing scipy"
+
 printf "%-15s = %s so I " "INST_0MQ" "${INST_0MQ}"
 get_willwont ${INST_0MQ}
 echo "be installing ZeroMQ"
@@ -450,6 +489,9 @@
 echo '73de2c99406a38f85273931597525cec4ebef55b93712adca3b0bfea8ca3fc99446e5d6495817e9ad55cf4d48feb7fb49734675c4cc8938db8d4a5225d30eca7  python-hglib-0.2.tar.gz' > python-hglib-0.2.tar.gz.sha512
 echo 'ffc602eb346717286b3d0a6770c60b03b578b3cf70ebd12f9e8b1c8c39cdb12ef219ddaa041d7929351a6b02dbb8caf1821b5452d95aae95034cbf4bc9904a7a  sympy-0.7.2.tar.gz' > sympy-0.7.2.tar.gz.sha512
 echo '172f2bc671145ebb0add2669c117863db35851fb3bdb192006cd710d4d038e0037497eb39a6d01091cb923f71a7e8982a77b6e80bf71d6275d5d83a363c8d7e5  rockstar-0.99.6.tar.gz' > rockstar-0.99.6.tar.gz.sha512
+echo 'd4fdd62f2db5285cd133649bd1bfa5175cb9da8304323abd74e0ef1207d55e6152f0f944da1da75f73e9dafb0f3bb14efba3c0526c732c348a653e0bd223ccfa  scipy-0.11.0.tar.gz' > scipy-0.11.0.tar.gz.sha512
+echo '276bd9c061ec9a27d478b33078a86f93164ee2da72210e12e2c9da71dcffeb64767e4460b93f257302b09328eda8655e93c4b9ae85e74472869afbeae35ca71e  blas.tar.gz' > blas.tar.gz.sha512
+echo '8770214491e31f0a7a3efaade90eee7b0eb20a8a6ab635c5f854d78263f59a1849133c14ef5123d01023f0110cbb9fc6f818da053c01277914ae81473430a952  lapack-3.4.2.tar.gz' > lapack-3.4.2.tar.gz.sha512
 # Individual processes
 [ -z "$HDF5_DIR" ] && get_ytproject hdf5-1.8.9.tar.gz
 [ $INST_ZLIB -eq 1 ] && get_ytproject zlib-1.2.3.tar.bz2 
@@ -461,6 +503,9 @@
 [ $INST_0MQ -eq 1 ] && get_ytproject zeromq-2.2.0.tar.gz
 [ $INST_0MQ -eq 1 ] && get_ytproject pyzmq-2.1.11.tar.gz
 [ $INST_0MQ -eq 1 ] && get_ytproject tornado-2.2.tar.gz
+[ $INST_SCIPY -eq 1 ] && get_ytproject scipy-0.11.0.tar.gz
+[ $INST_SCIPY -eq 1 ] && get_ytproject blas.tar.gz
+[ $INST_SCIPY -eq 1 ] && get_ytproject lapack-3.4.2.tar.gz
 get_ytproject Python-2.7.3.tgz
 get_ytproject numpy-1.6.1.tar.gz
 get_ytproject matplotlib-1.2.0.tar.gz
@@ -656,7 +701,40 @@
 echo "Installing pip"
 ( ${DEST_DIR}/bin/easy_install-2.7 pip 2>&1 ) 1>> ${LOG_FILE} || do_exit
 
-do_setup_py numpy-1.6.1 ${NUMPY_ARGS}
+if [ $INST_SCIPY -eq 0 ]
+then
+    do_setup_py numpy-1.6.1 ${NUMPY_ARGS}
+else
+    if [ ! -e scipy-0.11.0/done ]
+    then
+	if [ ! -e BLAS/done ]
+	then
+	    tar xfz blas.tar.gz
+	    echo "Building BLAS"
+	    cd BLAS
+	    gfortran -O2 -fPIC -fno-second-underscore -c *.f
+	    ar r libfblas.a *.o 1>> ${LOG_FILE}
+	    ranlib libfblas.a 1>> ${LOG_FILE}
+	    rm -rf *.o
+	    touch done
+	    cd ..
+	fi
+	if [ ! -e lapack-3.4.2/done ]
+	then
+	    tar xfz lapack-3.4.2.tar.gz
+	    echo "Building LAPACK"
+	    cd lapack-3.4.2/
+	    cp INSTALL/make.inc.gfortran make.inc
+	    make lapacklib OPTS="-fPIC -O2" NOOPT="-fPIC -O0" CFLAGS=-fPIC LDFLAGS=-fPIC 1>> ${LOG_FILE} || do_exit
+	    touch done
+	    cd ..
+	fi
+    fi
+    export BLAS=$PWD/BLAS/libfblas.a
+    export LAPACK=$PWD/lapack-3.4.2/liblapack.a    
+    do_setup_py numpy-1.6.1 ${NUMPY_ARGS}
+    do_setup_py scipy-0.11.0 ${NUMPY_ARGS}
+fi
 
 if [ -n "${MPL_SUPP_LDFLAGS}" ]
 then
@@ -846,3 +924,6 @@
 
 print_afterword
 print_afterword >> ${LOG_FILE}
+
+echo "yt dependencies were last updated on" > ${DEST_DIR}/.yt_update
+date >> ${DEST_DIR}/.yt_update

diff -r f3b4fcb6ba474caf05d24ba1cbfdcd97d45aa499 -r 1ef8ad7f9081b46d7cc2a3abbf28510196c94314 yt/analysis_modules/halo_finding/rockstar/rockstar.py
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar.py
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar.py
@@ -188,6 +188,8 @@
         rh = RockstarHaloFinder(ts)
         rh.run()
         """
+        mylog.warning("The citation for the Rockstar halo finder can be found at")
+        mylog.warning("http://adsabs.harvard.edu/abs/2013ApJ...762..109B")
         ParallelAnalysisInterface.__init__(self)
         # Decide how we're working.
         if ytcfg.getboolean("yt", "inline") == True:

diff -r f3b4fcb6ba474caf05d24ba1cbfdcd97d45aa499 -r 1ef8ad7f9081b46d7cc2a3abbf28510196c94314 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -3782,6 +3782,11 @@
                     continue
                 except NeedsOriginalGrid, ngt_exception:
                     pass
+            elif self.pf.field_info[field].particle_type:
+                region = self.pf.h.region(self.center,
+                            self.left_edge, self.right_edge)
+                self.field_data[field] = region[field]
+                continue
             obtain_fields.append(field)
             self[field] = np.zeros(self.ActiveDimensions, dtype='float64') -999
         if len(obtain_fields) == 0: return
@@ -3922,6 +3927,11 @@
                     continue
                 except NeedsOriginalGrid, ngt_exception:
                     pass
+            elif self.pf.field_info[field].particle_type:
+                region = self.pf.h.region(self.center,
+                            self.left_edge, self.right_edge)
+                self.field_data[field] = region[field]
+                continue
             fields_to_get.append(field)
         if len(fields_to_get) == 0: return
         # Note that, thanks to some trickery, we have different dimensions
@@ -4371,6 +4381,16 @@
         return march_cubes_grid_flux(self.field_value, vals, xv, yv, zv,
                     ff, mask, grid.LeftEdge, grid.dds)
 
+    @property
+    def triangles(self):
+        if self.vertices is None:
+            self.get_data()
+        vv = np.empty((self.vertices.shape[1]/3, 3, 3), dtype="float64")
+        for i in range(3):
+            for j in range(3):
+                vv[:,i,j] = self.vertices[j,i::3]
+        return vv
+
     def export_ply(self, filename, bounds = None, color_field = None,
                    color_map = "algae", color_log = True, sample_type = "face"):
         r"""This exports the surface to the PLY format, suitable for visualization

diff -r f3b4fcb6ba474caf05d24ba1cbfdcd97d45aa499 -r 1ef8ad7f9081b46d7cc2a3abbf28510196c94314 yt/data_objects/field_info_container.py
--- a/yt/data_objects/field_info_container.py
+++ b/yt/data_objects/field_info_container.py
@@ -226,6 +226,7 @@
             pf.domain_left_edge = np.zeros(3, 'float64')
             pf.domain_right_edge = np.ones(3, 'float64')
             pf.dimensionality = 3
+            pf.periodicity = (True, True, True)
         self.pf = pf
 
         class fake_hierarchy(object):

diff -r f3b4fcb6ba474caf05d24ba1cbfdcd97d45aa499 -r 1ef8ad7f9081b46d7cc2a3abbf28510196c94314 yt/data_objects/tests/test_streamlines.py
--- a/yt/data_objects/tests/test_streamlines.py
+++ b/yt/data_objects/tests/test_streamlines.py
@@ -7,7 +7,7 @@
 
 _fields = ("Density", "x-velocity", "y-velocity", "z-velocity")
 
-def test_covering_grid():
+def test_streamlines():
     # We decompose in different ways
     cs = np.mgrid[0.47:0.53:2j,0.47:0.53:2j,0.47:0.53:2j]
     cs = np.array([a.ravel() for a in cs]).T

diff -r f3b4fcb6ba474caf05d24ba1cbfdcd97d45aa499 -r 1ef8ad7f9081b46d7cc2a3abbf28510196c94314 yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -66,7 +66,8 @@
     get_cyl_theta_component, \
     get_cyl_r, get_cyl_theta, \
     get_cyl_z, get_sph_r, \
-    get_sph_theta, get_sph_phi
+    get_sph_theta, get_sph_phi, \
+    periodic_dist, euclidean_dist
      
 # Note that, despite my newfound efforts to comply with PEP-8,
 # I violate it here in order to keep the name/func_name relationship
@@ -783,25 +784,23 @@
          units=r"\rm{g}\/\rm{cm}^2/\rm{s}", particle_type=True,
          validators=[ValidateParameter('center')])
 
+def get_radius(positions, data):
+    c = data.get_field_parameter("center")
+    n_tup = tuple([1 for i in range(positions.ndim-1)])
+    center = np.tile(np.reshape(c, (positions.shape[0],)+n_tup),(1,)+positions.shape[1:])
+    periodicity = data.pf.periodicity
+    if any(periodicity):
+        period = data.pf.domain_right_edge - data.pf.domain_left_edge
+        return periodic_dist(positions, center, period, periodicity)
+    else:
+        return euclidean_dist(positions, center)
+def _ParticleRadius(field, data):
+    positions = np.array([data["particle_position_%s" % ax] for ax in 'xyz'])
+    return get_radius(positions, data)
+def _Radius(field, data):
+    positions = np.array([data['x'], data['y'], data['z']])
+    return get_radius(positions, data)
 
-def _ParticleRadius(field, data):
-    center = data.get_field_parameter("center")
-    DW = data.pf.domain_right_edge - data.pf.domain_left_edge
-    radius = np.zeros(data["particle_position_x"].shape, dtype='float64')
-    for i, ax in enumerate('xyz'):
-        r = np.abs(data["particle_position_%s" % ax] - center[i])
-        radius += np.minimum(r, np.abs(DW[i]-r))**2.0
-    np.sqrt(radius, radius)
-    return radius
-def _Radius(field, data):
-    center = data.get_field_parameter("center")
-    DW = data.pf.domain_right_edge - data.pf.domain_left_edge
-    radius = np.zeros(data["x"].shape, dtype='float64')
-    for i, ax in enumerate('xyz'):
-        r = np.abs(data[ax] - center[i])
-        radius += np.minimum(r, np.abs(DW[i]-r))**2.0
-    np.sqrt(radius, radius)
-    return radius
 def _ConvertRadiusCGS(data):
     return data.convert("cm")
 add_field("ParticleRadius", function=_ParticleRadius,
@@ -973,12 +972,12 @@
     return data.convert("Density")
 def _pdensity(field, data):
     blank = np.zeros(data.ActiveDimensions, dtype='float32')
-    if data.NumberOfParticles == 0: return blank
+    if data["particle_position_x"].size == 0: return blank
     CICDeposit_3(data["particle_position_x"].astype(np.float64),
                  data["particle_position_y"].astype(np.float64),
                  data["particle_position_z"].astype(np.float64),
                  data["particle_mass"].astype(np.float32),
-                 np.int64(data.NumberOfParticles),
+                 data["particle_position_x"].size,
                  blank, np.array(data.LeftEdge).astype(np.float64),
                  np.array(data.ActiveDimensions).astype(np.int32),
                  np.float64(data['dx']))

diff -r f3b4fcb6ba474caf05d24ba1cbfdcd97d45aa499 -r 1ef8ad7f9081b46d7cc2a3abbf28510196c94314 yt/frontends/_skeleton/data_structures.py
--- a/yt/frontends/_skeleton/data_structures.py
+++ b/yt/frontends/_skeleton/data_structures.py
@@ -137,6 +137,7 @@
         #   self.domain_right_edge      <= array of float64
         #   self.dimensionality         <= int
         #   self.domain_dimensions      <= array of int64
+        #   self.periodicity            <= three-element tuple of booleans
         #   self.current_time           <= simulation time in code units
         #
         # We also set up cosmological information.  Set these to zero if

diff -r f3b4fcb6ba474caf05d24ba1cbfdcd97d45aa499 -r 1ef8ad7f9081b46d7cc2a3abbf28510196c94314 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -610,6 +610,7 @@
         """
         self.dimensionality = 3
         self.refine_by = 2
+        self.periodicity = (True, True, True)
         self.cosmological_simulation = True
         self.parameters = {}
         self.unique_identifier = \

diff -r f3b4fcb6ba474caf05d24ba1cbfdcd97d45aa499 -r 1ef8ad7f9081b46d7cc2a3abbf28510196c94314 yt/frontends/athena/data_structures.py
--- a/yt/frontends/athena/data_structures.py
+++ b/yt/frontends/athena/data_structures.py
@@ -402,6 +402,10 @@
         self.num_ghost_zones = 0
         self.field_ordering = 'fortran'
         self.boundary_conditions = [1]*6
+        if 'periodicity' in self.specified_parameters:
+            self.periodicity = ensure_tuple(self.specified_parameters['periodicity'])
+        else:
+            self.periodicity = (True,)*self.dimensionality
 
         dname = self.parameter_filename
         gridlistread = glob.glob('id*/%s-id*%s' % (dname[4:-9],dname[-9:] ))

diff -r f3b4fcb6ba474caf05d24ba1cbfdcd97d45aa499 -r 1ef8ad7f9081b46d7cc2a3abbf28510196c94314 yt/frontends/castro/data_structures.py
--- a/yt/frontends/castro/data_structures.py
+++ b/yt/frontends/castro/data_structures.py
@@ -621,6 +621,7 @@
 
         self.parameters["TopGridRank"] = len(self.parameters["TopGridDimensions"])
         self.dimensionality = self.parameters["TopGridRank"]
+        self.periodicity = ensure_tuple(self.fparameters['castro.lo_bc'] == 0)
         self.domain_dimensions = self.parameters["TopGridDimensions"]
         self.refine_by = self.parameters.get("RefineBy", 2)
 

diff -r f3b4fcb6ba474caf05d24ba1cbfdcd97d45aa499 -r 1ef8ad7f9081b46d7cc2a3abbf28510196c94314 yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -819,6 +819,7 @@
         for k, v in data_label_factors.items():
             self.conversion_factors[data_labels[k]] = v
         self.refine_by = self.parameters["RefineBy"]
+        self.periodicity = ensure_tuple(self.parameters["LeftFaceBoundaryCondition"] == 3)
         self.dimensionality = self.parameters["TopGridRank"]
         if self.dimensionality > 1:
             self.domain_dimensions = self.parameters["TopGridDimensions"]
@@ -826,6 +827,7 @@
                 tmp = self.domain_dimensions.tolist()
                 tmp.append(1)
                 self.domain_dimensions = np.array(tmp)
+                self.periodicity += (False,)
             self.domain_left_edge = np.array(self.parameters["DomainLeftEdge"],
                                              "float64").copy()
             self.domain_right_edge = np.array(self.parameters["DomainRightEdge"],
@@ -836,6 +838,7 @@
             self.domain_right_edge = np.array(self.parameters["DomainRightEdge"],
                                              "float64")
             self.domain_dimensions = np.array([self.parameters["TopGridDimensions"],1,1])
+            self.periodicity += (False, False)
 
         self.current_time = self.parameters["InitialTime"]
         # To be enabled when we can break old pickles:

diff -r f3b4fcb6ba474caf05d24ba1cbfdcd97d45aa499 -r 1ef8ad7f9081b46d7cc2a3abbf28510196c94314 yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -346,7 +346,7 @@
 
 def _spdensity(field, data):
     blank = np.zeros(data.ActiveDimensions, dtype='float32')
-    if data.NumberOfParticles == 0: return blank
+    if data["particle_position_x"].size == 0: return blank
     filter = data['creation_time'] > 0.0
     if not filter.any(): return blank
     amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(np.float64),
@@ -363,18 +363,19 @@
 
 def _dmpdensity(field, data):
     blank = np.zeros(data.ActiveDimensions, dtype='float32')
-    if data.NumberOfParticles == 0: return blank
+    if data["particle_position_x"].size == 0: return blank
     if 'creation_time' in data.pf.field_info:
         filter = data['creation_time'] <= 0.0
         if not filter.any(): return blank
+        num = filter.sum()
     else:
-        filter = np.ones(data.NumberOfParticles, dtype='bool')
-    if not filter.any(): return blank
+        filter = None
+        num = data["particle_position_x"].size
     amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(np.float64),
                            data["particle_position_y"][filter].astype(np.float64),
                            data["particle_position_z"][filter].astype(np.float64),
                            data["particle_mass"][filter].astype(np.float32),
-                           np.int64(np.where(filter)[0].size),
+                           num,
                            blank, np.array(data.LeftEdge).astype(np.float64),
                            np.array(data.ActiveDimensions).astype(np.int32), 
                            np.float64(data['dx']))
@@ -389,13 +390,13 @@
     """
     particle_field = field.name[4:]
     top = np.zeros(data.ActiveDimensions, dtype='float32')
-    if data.NumberOfParticles == 0: return top
+    if data["particle_position_x"].size == 0: return top
     particle_field_data = data[particle_field] * data['particle_mass']
     amr_utils.CICDeposit_3(data["particle_position_x"].astype(np.float64),
                            data["particle_position_y"].astype(np.float64),
                            data["particle_position_z"].astype(np.float64),
                            particle_field_data.astype(np.float32),
-                           np.int64(data.NumberOfParticles),
+                           data["particle_position_x"].size,
                            top, np.array(data.LeftEdge).astype(np.float64),
                            np.array(data.ActiveDimensions).astype(np.int32), 
                            np.float64(data['dx']))
@@ -406,7 +407,7 @@
                            data["particle_position_y"].astype(np.float64),
                            data["particle_position_z"].astype(np.float64),
                            data["particle_mass"].astype(np.float32),
-                           np.int64(data.NumberOfParticles),
+                           data["particle_position_x"].size,
                            bottom, np.array(data.LeftEdge).astype(np.float64),
                            np.array(data.ActiveDimensions).astype(np.int32), 
                            np.float64(data['dx']))
@@ -428,7 +429,7 @@
     """
     particle_field = field.name[5:]
     top = np.zeros(data.ActiveDimensions, dtype='float32')
-    if data.NumberOfParticles == 0: return top
+    if data["particle_position_x"].size == 0: return top
     filter = data['creation_time'] > 0.0
     if not filter.any(): return top
     particle_field_data = data[particle_field][filter] * data['particle_mass'][filter]

diff -r f3b4fcb6ba474caf05d24ba1cbfdcd97d45aa499 -r 1ef8ad7f9081b46d7cc2a3abbf28510196c94314 yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -450,6 +450,10 @@
         # Get the simulation time
         self.current_time = self.parameters["time"]
 
+        # Determine if this is a periodic box
+        p = [self.parameters.get("%sl_boundary_type" % ax, None) == "periodic" for ax in 'xyz']
+        self.periodicity = tuple(p)
+
         # Determine cosmological parameters.
         try: 
             self.parameters["usecosmology"]

diff -r f3b4fcb6ba474caf05d24ba1cbfdcd97d45aa499 -r 1ef8ad7f9081b46d7cc2a3abbf28510196c94314 yt/frontends/gdf/data_structures.py
--- a/yt/frontends/gdf/data_structures.py
+++ b/yt/frontends/gdf/data_structures.py
@@ -229,6 +229,8 @@
         self.num_ghost_zones = sp["num_ghost_zones"]
         self.field_ordering = sp["field_ordering"]
         self.boundary_conditions = sp["boundary_conditions"][:]
+        p = [bnd == 0 for bnd in self.boundary_conditions[::2]]
+        self.periodicity = ensure_tuple(p)
         if self.cosmological_simulation:
             self.current_redshift = sp["current_redshift"]
             self.omega_lambda = sp["omega_lambda"]

diff -r f3b4fcb6ba474caf05d24ba1cbfdcd97d45aa499 -r 1ef8ad7f9081b46d7cc2a3abbf28510196c94314 yt/frontends/nyx/data_structures.py
--- a/yt/frontends/nyx/data_structures.py
+++ b/yt/frontends/nyx/data_structures.py
@@ -605,6 +605,8 @@
                 self.domain_left_edge = np.array([float(i) for i in vals])
             elif param.startswith("particles.write_in_plotfile"):
                 self.use_particles = boxlib_bool_to_int(vals[0])
+            elif param.startswith("nyx.lo_bc"):
+                self.periodicity = ensure_tuple([i == 0 for i in vals])
 
         # aliases we need
         self.parameters["TopGridRank"] = len(self.parameters["TopGridDimensions"])

diff -r f3b4fcb6ba474caf05d24ba1cbfdcd97d45aa499 -r 1ef8ad7f9081b46d7cc2a3abbf28510196c94314 yt/frontends/orion/data_structures.py
--- a/yt/frontends/orion/data_structures.py
+++ b/yt/frontends/orion/data_structures.py
@@ -549,6 +549,8 @@
             elif param.startswith("geometry.prob_lo"):
                 self.domain_left_edge = \
                     np.array([float(i) for i in vals.split()])
+            elif param.startswith("Prob.lo_bc"):
+                self.periodicity = ensure_tuple([i == 0 for i in vals])
 
         self.parameters["TopGridRank"] = len(self.parameters["TopGridDimensions"])
         self.dimensionality = self.parameters["TopGridRank"]

diff -r f3b4fcb6ba474caf05d24ba1cbfdcd97d45aa499 -r 1ef8ad7f9081b46d7cc2a3abbf28510196c94314 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -354,6 +354,7 @@
         # distinguish them.
         mylog.warning("No current mechanism of distinguishing cosmological simulations in RAMSES!")
         self.cosmological_simulation = 1
+        self.periodicity = (True, True, True)
         self.current_redshift = (1.0 / rheader["aexp"]) - 1.0
         self.omega_lambda = rheader["omega_l"]
         self.omega_matter = rheader["omega_m"]

diff -r f3b4fcb6ba474caf05d24ba1cbfdcd97d45aa499 -r 1ef8ad7f9081b46d7cc2a3abbf28510196c94314 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -105,7 +105,8 @@
 class StreamHandler(object):
     def __init__(self, left_edges, right_edges, dimensions,
                  levels, parent_ids, particle_count, processor_ids,
-                 fields, io = None, particle_types = None):
+                 fields, io = None, particle_types = None, 
+                 periodicity = (True, True, True)):
         if particle_types is None: particle_types = {}
         self.left_edges = left_edges
         self.right_edges = right_edges
@@ -118,6 +119,7 @@
         self.fields = fields
         self.io = io
         self.particle_types = particle_types
+        self.periodicity = periodicity
             
     def get_fields(self):
         return self.fields.all_fields
@@ -313,6 +315,7 @@
         self.domain_right_edge = self.stream_handler.domain_right_edge[:]
         self.refine_by = self.stream_handler.refine_by
         self.dimensionality = self.stream_handler.dimensionality
+        self.periodicity = self.stream_handler.periodicity
         self.domain_dimensions = self.stream_handler.domain_dimensions
         self.current_time = self.stream_handler.simulation_time
         if self.stream_handler.cosmology_simulation:
@@ -398,7 +401,7 @@
     pf.h.update_data(grid_pdata)
                                         
 def load_uniform_grid(data, domain_dimensions, sim_unit_to_cm, bbox=None,
-                      nprocs=1, sim_time=0.0):
+                      nprocs=1, sim_time=0.0, periodicity=(True, True, True)):
     r"""Load a uniform grid of data into yt as a
     :class:`~yt.frontends.stream.data_structures.StreamHandler`.
 
@@ -427,6 +430,9 @@
         If greater than 1, will create this number of subarrays out of data
     sim_time : float, optional
         The simulation time in seconds
+    periodicity : tuple of booleans
+        Determines whether the data will be treated as periodic along
+        each axis
 
     Examples
     --------
@@ -492,7 +498,8 @@
         np.zeros(nprocs, dtype='int64').reshape(nprocs,1), # Temporary
         np.zeros(nprocs).reshape((nprocs,1)),
         sfh,
-        particle_types=particle_types
+        particle_types=particle_types,
+        periodicity=periodicity
     )
 
     handler.name = "UniformGridData"

diff -r f3b4fcb6ba474caf05d24ba1cbfdcd97d45aa499 -r 1ef8ad7f9081b46d7cc2a3abbf28510196c94314 yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -74,6 +74,19 @@
     else:
         return np.asarray([obj])
 
+def ensure_tuple(obj):
+    """
+    This function ensures that *obj* is a tuple.  Typically used to convert
+    scalar, list, or array arguments specified by a user in a context where
+    we assume a tuple internally
+    """
+    if isinstance(obj, types.TupleType):
+        return obj
+    elif isinstance(obj, (types.ListType, np.ndarray)):
+        return tuple(obj)
+    else:
+        return (obj,)
+
 def read_struct(f, fmt):
     """
     This reads a struct, and only that struct, from an open file.

diff -r f3b4fcb6ba474caf05d24ba1cbfdcd97d45aa499 -r 1ef8ad7f9081b46d7cc2a3abbf28510196c94314 yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -22,6 +22,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
+import itertools as it
 import numpy as np
 from yt.funcs import *
 from numpy.testing import assert_array_equal, assert_almost_equal, \
@@ -163,3 +164,100 @@
                  for field,offset in zip(fields,offsets))
     ug = load_uniform_grid(data, ndims, 1.0, nprocs = nprocs)
     return ug
+
+def expand_keywords(keywords, full=False):
+    """
+    expand_keywords is a means for testing all possible keyword
+    arguments in the nosetests.  Simply pass it a dictionary of all the
+    keyword arguments and all of the values for these arguments that you
+    want to test.
+
+    It will return a list of **kwargs dicts containing combinations of
+    the various kwarg values you passed it.  These can then be passed
+    to the appropriate function in nosetests. 
+
+    If full=True, then every possible combination of keywords is produced,
+    otherwise, every keyword option is included at least once in the output
+    list.  Be careful, by using full=True, you may be in for an exponentially
+    larger number of tests! 
+
+    keywords : dict
+        a dictionary where the keys are the keywords for the function,
+        and the values of each key are the possible values that this key
+        can take in the function
+
+   full : bool
+        if set to True, every possible combination of given keywords is 
+        returned
+
+    Returns
+    -------
+    array of dicts
+        An array of **kwargs dictionaries to be individually passed to
+        the appropriate function matching these kwargs.
+
+    Examples
+    --------
+    >>> keywords = {}
+    >>> keywords['dpi'] = (50, 100, 200)
+    >>> keywords['cmap'] = ('algae', 'jet')
+    >>> list_of_kwargs = expand_keywords(keywords)
+    >>> print list_of_kwargs
+
+    array([{'cmap': 'algae', 'dpi': 50}, 
+           {'cmap': 'jet', 'dpi': 100},
+           {'cmap': 'algae', 'dpi': 200}], dtype=object)
+
+    >>> list_of_kwargs = expand_keywords(keywords, full=True)
+    >>> print list_of_kwargs
+
+    array([{'cmap': 'algae', 'dpi': 50}, 
+           {'cmap': 'algae', 'dpi': 100},
+           {'cmap': 'algae', 'dpi': 200}, 
+           {'cmap': 'jet', 'dpi': 50},
+           {'cmap': 'jet', 'dpi': 100}, 
+           {'cmap': 'jet', 'dpi': 200}], dtype=object)
+
+    >>> for kwargs in list_of_kwargs:
+    ...     write_projection(*args, **kwargs)
+    """
+
+    # if we want every possible combination of keywords, use iter magic
+    if full:
+        keys = sorted(keywords)
+        list_of_kwarg_dicts = np.array([dict(zip(keys, prod)) for prod in \
+                              it.product(*(keywords[key] for key in keys))])
+            
+    # if we just want to probe each keyword, but not necessarily every 
+    # combination
+    else:
+        # Determine the maximum number of values any of the keywords has
+        num_lists = 0
+        for val in keywords.values():
+            if isinstance(val, str):
+                num_lists = max(1.0, num_lists)
+            else:
+                num_lists = max(len(val), num_lists)
+    
+        # Construct array of kwargs dicts, each element of the list is a different
+        # **kwargs dict.  each kwargs dict gives a different combination of
+        # the possible values of the kwargs
+    
+        # initialize array
+        list_of_kwarg_dicts = np.array([dict() for x in range(num_lists)])
+    
+        # fill in array
+        for i in np.arange(num_lists):
+            list_of_kwarg_dicts[i] = {}
+            for key in keywords.keys():
+                # if it's a string, use it (there's only one)
+                if isinstance(keywords[key], str):
+                    list_of_kwarg_dicts[i][key] = keywords[key]
+                # if there are more options, use the i'th val
+                elif i < len(keywords[key]):
+                    list_of_kwarg_dicts[i][key] = keywords[key][i]
+                # if there are not more options, use the 0'th val
+                else:
+                    list_of_kwarg_dicts[i][key] = keywords[key][0]
+
+    return list_of_kwarg_dicts

diff -r f3b4fcb6ba474caf05d24ba1cbfdcd97d45aa499 -r 1ef8ad7f9081b46d7cc2a3abbf28510196c94314 yt/utilities/amr_kdtree/amr_kdtools.py
--- /dev/null
+++ b/yt/utilities/amr_kdtree/amr_kdtools.py
@@ -0,0 +1,402 @@
+"""
+AMR kD-Tree Tools 
+
+Authors: Samuel Skillman <samskillman at gmail.com>
+Affiliation: University of Colorado at Boulder
+
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2010-2011 Samuel Skillman.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+import numpy as np
+from yt.funcs import *
+from yt.utilities.lib import kdtree_get_choices
+
+def _lchild_id(node_id): return (node_id<<1)
+def _rchild_id(node_id): return (node_id<<1) + 1
+def _parent_id(node_id): return (node_id-1) >> 1
+
+class Node(object):
+    def __init__(self, parent, left, right,
+            left_edge, right_edge, grid_id, node_id):
+        self.left = left
+        self.right = right
+        self.left_edge = left_edge
+        self.right_edge = right_edge
+        self.grid = grid_id
+        self.parent = parent
+        self.id = node_id
+        self.data = None
+        self.split = None
+
+class Split(object):
+    def __init__(self, dim, pos):
+        self.dim = dim
+        self.pos = pos
+
+def should_i_build(node, rank, size):
+    if (node.id < size) or (node.id >= 2*size):
+        return True
+    elif node.id - size == rank:
+        return True
+    else:
+        return False
+
+def add_grids(node, gles, gres, gids, rank, size):
+    if not should_i_build(node, rank, size):
+        return
+
+    if kd_is_leaf(node):
+        insert_grids(node, gles, gres, gids, rank, size)
+    else:
+        less_ids = gles[:,node.split.dim] < node.split.pos
+        if len(less_ids) > 0:
+            add_grids(node.left, gles[less_ids], gres[less_ids],
+                      gids[less_ids], rank, size)
+
+        greater_ids = gres[:,node.split.dim] > node.split.pos
+        if len(greater_ids) > 0:
+            add_grids(node.right, gles[greater_ids], gres[greater_ids],
+                      gids[greater_ids], rank, size)
+
+def should_i_split(node, rank, size):
+    return node.id < size
+
+def geo_split(node, gles, gres, grid_ids, rank, size):
+    big_dim = np.argmax(gres[0]-gles[0])
+    new_pos = (gres[0][big_dim] + gles[0][big_dim])/2.
+    old_gre = gres[0].copy()
+    new_gle = gles[0].copy()
+    new_gle[big_dim] = new_pos
+    gres[0][big_dim] = new_pos
+    gles = np.append(gles, np.array([new_gle]), axis=0)
+    gres = np.append(gres, np.array([old_gre]), axis=0)
+    grid_ids = np.append(grid_ids, grid_ids, axis=0)
+
+    split = Split(big_dim, new_pos)
+
+    # Create a Split
+    divide(node, split)
+
+    # Populate Left Node
+    #print 'Inserting left node', node.left_edge, node.right_edge
+    insert_grids(node.left, gles[:1], gres[:1],
+            grid_ids[:1], rank, size)
+
+    # Populate Right Node
+    #print 'Inserting right node', node.left_edge, node.right_edge
+    insert_grids(node.right, gles[1:], gres[1:],
+            grid_ids[1:], rank, size)
+    return
+
+def insert_grids(node, gles, gres, grid_ids, rank, size):
+    if not should_i_build(node, rank, size) or grid_ids.size == 0:
+        return
+
+    if len(grid_ids) == 1:
+        # If we should continue to split based on parallelism, do so!
+        if should_i_split(node, rank, size):
+            geo_split(node, gles, gres, grid_ids, rank, size)
+            return
+
+        if np.all(gles[0] <= node.left_edge) and \
+                np.all(gres[0] >= node.right_edge):
+            node.grid = grid_ids[0]
+            assert(node.grid is not None)
+            return
+
+    # Split the grids
+    check = split_grids(node, gles, gres, grid_ids, rank, size)
+    # If check is -1, then we have found a place where there are no choices.
+    # Exit out and set the node to None.
+    if check == -1:
+        node.grid = None
+    return
+
+def split_grids(node, gles, gres, grid_ids, rank, size):
+    # Find a Split
+    data = np.array([(gles[i,:], gres[i,:]) for i in
+        xrange(grid_ids.shape[0])], copy=False)
+    best_dim, split_pos, less_ids, greater_ids = \
+        kdtree_get_choices(data, node.left_edge, node.right_edge)
+
+    # If best_dim is -1, then we have found a place where there are no choices.
+    # Exit out and set the node to None.
+    if best_dim == -1:
+        return -1
+
+    split = Split(best_dim, split_pos)
+
+    del data, best_dim, split_pos
+
+    # Create a Split
+    divide(node, split)
+
+    # Populate Left Node
+    #print 'Inserting left node', node.left_edge, node.right_edge
+    insert_grids(node.left, gles[less_ids], gres[less_ids],
+                 grid_ids[less_ids], rank, size)
+
+    # Populate Right Node
+    #print 'Inserting right node', node.left_edge, node.right_edge
+    insert_grids(node.right, gles[greater_ids], gres[greater_ids],
+                 grid_ids[greater_ids], rank, size)
+
+    return
+
+def new_right(Node, split):
+    new_right = Node.right_edge.copy()
+    new_right[split.dim] = split.pos
+    return new_right
+
+def new_left(Node, split):
+    new_left = Node.left_edge.copy()
+    new_left[split.dim] = split.pos
+    return new_left
+
+def divide(node, split):
+    # Create a Split
+    node.split = split
+    node.left = Node(node, None, None,
+            node.left_edge, new_right(node, split), node.grid,
+                     _lchild_id(node.id))
+    node.right = Node(node, None, None,
+            new_left(node, split), node.right_edge, node.grid,
+                      _rchild_id(node.id))
+    return
+
+def kd_sum_volume(node):
+    if (node.left is None) and (node.right is None):
+        if node.grid is None:
+            return 0.0
+        return np.prod(node.right_edge - node.left_edge)
+    else:
+        return kd_sum_volume(node.left) + kd_sum_volume(node.right)
+
+def kd_sum_cells(node):
+    if (node.left is None) and (node.right is None):
+        if node.grid is None:
+            return 0.0
+        return np.prod(node.right_edge - node.left_edge)
+    else:
+        return kd_sum_volume(node.left) + kd_sum_volume(node.right)
+
+
+def kd_node_check(node):
+    assert (node.left is None) == (node.right is None)
+    if (node.left is None) and (node.right is None):
+        if node.grid is not None:
+            return np.prod(node.right_edge - node.left_edge)
+        else: return 0.0
+    else:
+        return kd_node_check(node.left)+kd_node_check(node.right)
+
+def kd_is_leaf(node):
+    has_l_child = node.left is None
+    has_r_child = node.right is None
+    assert has_l_child == has_r_child
+    return has_l_child
+
+def step_depth(current, previous):
+    '''
+    Takes a single step in the depth-first traversal
+    '''
+    if kd_is_leaf(current): # At a leaf, move back up
+        previous = current
+        current = current.parent
+
+    elif current.parent is previous: # Moving down, go left first
+        previous = current
+        if current.left is not None:
+            current = current.left
+        elif current.right is not None:
+            current = current.right
+        else:
+            current = current.parent
+
+    elif current.left is previous: # Moving up from left, go right 
+        previous = current
+        if current.right is not None:
+            current = current.right
+        else:
+            current = current.parent
+
+    elif current.right is previous: # Moving up from right child, move up
+        previous = current
+        current = current.parent
+
+    return current, previous
+
+def depth_traverse(tree, max_node=None):
+    '''
+    Yields a depth-first traversal of the kd tree always going to
+    the left child before the right.
+    '''
+    current = tree.trunk
+    previous = None
+    if max_node is None:
+        max_node = np.inf
+    while current is not None:
+        yield current
+        current, previous = step_depth(current, previous)
+        if current is None: break
+        if current.id >= max_node:
+            current = current.parent
+            previous = current.right
+
+def depth_first_touch(tree, max_node=None):
+    '''
+    Yields a depth-first traversal of the kd tree always going to
+    the left child before the right.
+    '''
+    current = tree.trunk
+    previous = None
+    if max_node is None:
+        max_node = np.inf
+    while current is not None:
+        if previous is None or previous.parent != current:
+            yield current
+        current, previous = step_depth(current, previous)
+        if current is None: break
+        if current.id >= max_node:
+            current = current.parent
+            previous = current.right
+
+def breadth_traverse(tree):
+    '''
+    Yields a breadth-first traversal of the kd tree always going to
+    the left child before the right.
+    '''
+    current = tree.trunk
+    previous = None
+    while current is not None:
+        yield current
+        current, previous = step_depth(current, previous)
+
+
+def viewpoint_traverse(tree, viewpoint):
+    '''
+    Yields a viewpoint dependent traversal of the kd-tree.  Starts
+    with nodes furthest away from viewpoint.
+    '''
+
+    current = tree.trunk
+    previous = None
+    while current is not None:
+        yield current
+        current, previous = step_viewpoint(current, previous, viewpoint)
+
+def step_viewpoint(current, previous, viewpoint):
+    '''
+    Takes a single step in the viewpoint based traversal.  Always
+    goes to the node furthest away from viewpoint first.
+    '''
+    if kd_is_leaf(current): # At a leaf, move back up
+        previous = current
+        current = current.parent
+    elif current.split.dim is None: # This is a dead node
+        previous = current
+        current = current.parent
+
+    elif current.parent is previous: # Moving down
+        previous = current
+        if viewpoint[current.split.dim] <= current.split.pos:
+            if current.right is not None:
+                current = current.right
+            else:
+                previous = current.right
+        else:
+            if current.left is not None:
+                current = current.left
+            else:
+                previous = current.left
+
+    elif current.right is previous: # Moving up from right 
+        previous = current
+        if viewpoint[current.split.dim] <= current.split.pos:
+            if current.left is not None:
+                current = current.left
+            else:
+                current = current.parent
+        else:
+            current = current.parent
+
+    elif current.left is previous: # Moving up from left child
+        previous = current
+        if viewpoint[current.split.dim] > current.split.pos:
+            if current.right is not None:
+                current = current.right
+            else:
+                current = current.parent
+        else:
+            current = current.parent
+
+    return current, previous
+
+
+def receive_and_reduce(comm, incoming_rank, image, add_to_front):
+    mylog.debug( 'Receiving image from %04i' % incoming_rank)
+    #mylog.debug( '%04i receiving image from %04i'%(self.comm.rank,back.owner))
+    arr2 = comm.recv_array(incoming_rank, incoming_rank).reshape(
+        (image.shape[0], image.shape[1], image.shape[2]))
+
+    if add_to_front:
+        front = arr2
+        back = image
+    else:
+        front = image
+        back = arr2
+
+    if image.shape[2] == 3:
+        # Assume Projection Camera, Add
+        np.add(image, front, image)
+        return image
+
+    ta = 1.0 - front[:,:,3]
+    np.maximum(ta, 0.0, ta)
+    # This now does the following calculation, but in a memory
+    # conservative fashion
+    # image[:,:,i  ] = front[:,:,i] + ta*back[:,:,i]
+    image = back.copy()
+    for i in range(4):
+        np.multiply(image[:,:,i], ta, image[:,:,i])
+    np.add(image, front, image)
+    return image
+
+def send_to_parent(comm, outgoing_rank, image):
+    mylog.debug( 'Sending image to %04i' % outgoing_rank)
+    comm.send_array(image, outgoing_rank, tag=comm.rank)
+
+def scatter_image(comm, root, image):
+    mylog.debug( 'Scattering from %04i' % root)
+    image = comm.mpi_bcast(image, root=root)
+    return image
+
+def find_node(node, pos):
+    """
+    Find the AMRKDTree node enclosing a position
+    """
+    assert(np.all(node.left_edge <= pos))
+    assert(np.all(node.right_edge > pos))
+    while not kd_is_leaf(node):
+        if pos[node.split.dim] < node.split.pos:
+            node = node.left
+        else:
+            node = node.right
+    return node
+

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt-3.0/commits/9ca5bb56ea08/
Changeset:   9ca5bb56ea08
Branch:      yt
User:        jzuhone
Date:        2013-02-12 18:00:29
Summary:     Simple FLASH answer tests. This is just to get the answer testing framework for FLASH data going, more will be added later.
Affected #:  1 file

diff -r 1ef8ad7f9081b46d7cc2a3abbf28510196c94314 -r 9ca5bb56ea0815d0804c0d75b4a1d2c6c6621bb3 yt/frontends/flash/tests/test_outputs.py
--- /dev/null
+++ b/yt/frontends/flash/tests/test_outputs.py
@@ -0,0 +1,58 @@
+"""
+FLASH frontend tests
+
+Author: John ZuHone <jzuhone at gmail.com>
+Affiliation: NASA/Goddard Space Flight Center
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 John ZuHone.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from yt.testing import *
+from yt.utilities.answer_testing.framework import \
+    requires_pf, \
+    small_patch_amr, \
+    big_patch_amr, \
+    data_dir_load
+from yt.frontends.flash.api import FLASHStaticOutput
+
+_fields = ("Temperature", "Density", "VelocityMagnitude", "DivV")
+
+sloshing = "GasSloshingLowRes/sloshing_low_res_hdf5_plt_cnt_0300"
+ at requires_pf(sloshing)
+def test_sloshing():
+    pf = data_dir_load(sloshing)
+    yield assert_equal, str(pf), ""
+    for test in small_patch_amr(m7, _fields):
+        yield test
+
+wt = "WindTunnel/windtunnel_4lev_hdf5_plt_cnt_0030"
+ at requires_pf(wt)
+def test_wind_tunnel():
+    pf = data_dir_load(wt)
+    yield assert_equal, str(pf), "windtunnel_4lev_hdf5_plt_cnt_0030"
+    for test in small_patch_amr(wt, _fields):
+        yield test
+
+gcm = "GalaxyClusterMerger/fiducial_1to10_b0.273d_hdf5_plt_cnt_0245.gz"
+ at requires_pf(gcm, big_data=True)
+def test_galaxy_cluster_merger():
+    pf = data_dir_load(gcm)
+    for test in big_patch_amr(gcm, _fields):
+        yield test
+


https://bitbucket.org/yt_analysis/yt-3.0/commits/813b806f980e/
Changeset:   813b806f980e
Branch:      yt
User:        jzuhone
Date:        2013-02-16 04:55:27
Summary:     Merged yt_analysis/yt into yt
Affected #:  62 files

diff -r 9ca5bb56ea0815d0804c0d75b4a1d2c6c6621bb3 -r 813b806f980e0a8f9acfd1956493d4221ec3d04c CREDITS
--- a/CREDITS
+++ b/CREDITS
@@ -1,29 +1,41 @@
 YT is a group effort.
 
-Contributors:                   Matthew Turk (matthewturk at gmail.com)
-                                Britton Smith (brittonsmith at gmail.com)
-                                Jeff Oishi (jsoishi at gmail.com)
-                                Stephen Skory (s at skory.us)
-                                Sam Skillman (samskillman at gmail.com)
-                                Devin Silvia (devin.silvia at gmail.com)
-                                John Wise (jwise at astro.princeton.edu)
-                                David Collins (dcollins at physics.ucsd.edu)
-                                Christopher Moody (cemoody at ucsc.edu)
-                                Oliver Hahn (ohahn at stanford.edu)
-                                John ZuHone (jzuhone at cfa.harvard.edu)
-                                Chris Malone (cmalone at mail.astro.sunysb.edu)
-                                Cameron Hummels (chummels at astro.columbia.edu)
-                                Stefan Klemer (sklemer at phys.uni-goettingen.de)
-                                Tom Abel (tabel at stanford.edu)
-                                Andrew Myers (atmyers at astro.berkeley.edu)
-                                Michael Kuhlen (mqk at astro.berkeley.edu)
-                                Casey Stark (caseywstark at gmail.com)
-                                JC Passy (jcpassy at gmail.com)
-                                Eve Lee (elee at cita.utoronto.ca)
-                                Elizabeth Tasker (tasker at astro1.sci.hokudai.ac.jp)
-                                Kacper Kowalik (xarthisius.kk at gmail.com)
-                                Nathan Goldbaum (goldbaum at ucolick.org)
-                                Anna Rosen (rosen at ucolick.org)
+Contributors:                   Tom Abel (tabel at stanford.edu)
+				David Collins (dcollins at physics.ucsd.edu)
+				Brian Crosby (crosby.bd at gmail.com)
+				Andrew Cunningham (ajcunn at gmail.com)
+				Nathan Goldbaum (goldbaum at ucolick.org)
+				Markus Haider (markus.haider at uibk.ac.at)
+				Cameron Hummels (chummels at gmail.com)
+				Christian Karch (chiffre at posteo.de)
+				Ji-hoon Kim (me at jihoonkim.org)
+				Steffen Klemer (sklemer at phys.uni-goettingen.de)
+				Kacper Kowalik (xarthisius.kk at gmail.com)
+				Michael Kuhlen (mqk at astro.berkeley.edu)
+				Eve Lee (elee at cita.utoronto.ca)
+				Yuan Li (yuan at astro.columbia.edu)
+				Chris Malone (chris.m.malone at gmail.com)
+				Josh Maloney (joshua.moloney at colorado.edu)
+				Chris Moody (cemoody at ucsc.edu)
+				Andrew Myers (atmyers at astro.berkeley.edu)
+				Jeff Oishi (jsoishi at gmail.com)
+				Jean-Claude Passy (jcpassy at uvic.ca)
+				Mark Richardson (Mark.L.Richardson at asu.edu)
+				Thomas Robitaille (thomas.robitaille at gmail.com)
+				Anna Rosen (rosen at ucolick.org)
+				Anthony Scopatz (scopatz at gmail.com)
+				Devin Silvia (devin.silvia at colorado.edu)
+				Sam Skillman (samskillman at gmail.com)
+				Stephen Skory (s at skory.us)
+				Britton Smith (brittonsmith at gmail.com)
+				Geoffrey So (gsiisg at gmail.com)
+				Casey Stark (caseywstark at gmail.com)
+				Elizabeth Tasker (tasker at astro1.sci.hokudai.ac.jp)
+				Stephanie Tonnesen (stonnes at gmail.com)
+				Matthew Turk (matthewturk at gmail.com)
+				Rich Wagner (rwagner at physics.ucsd.edu)
+				John Wise (jwise at physics.gatech.edu)
+				John ZuHone (jzuhone at gmail.com)
 
 We also include the Delaunay Triangulation module written by Robert Kern of
 Enthought, the cmdln.py module by Trent Mick, and the progressbar module by

diff -r 9ca5bb56ea0815d0804c0d75b4a1d2c6c6621bb3 -r 813b806f980e0a8f9acfd1956493d4221ec3d04c doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -7,8 +7,8 @@
 # There are a few options, but you only need to set *one* of them.  And
 # that's the next one, DEST_DIR.  But, if you want to use an existing HDF5
 # installation you can set HDF5_DIR, or if you want to use some other
-# subversion checkout of YT, you can set YT_DIR, too.  (It'll already
-# check the current directory and one up).
+# subversion checkout of yt, you can set YT_DIR, too.  (It'll already
+# check the current directory and one up.
 #
 # And, feel free to drop me a line: matthewturk at gmail.com
 #
@@ -49,7 +49,7 @@
 INST_ROCKSTAR=0 # Install the Rockstar halo finder?
 INST_SCIPY=0    # Install scipy?
 
-# If you've got YT some other place, set this to point to it.
+# If you've got yt some other place, set this to point to it.
 YT_DIR=""
 
 # If you need to pass anything to matplotlib, do so here.
@@ -230,6 +230,27 @@
             MPL_SUPP_CXXFLAGS="${MPL_SUPP_CXXFLAGS} -mmacosx-version-min=10.7"
         fi
     fi
+    if [ -f /etc/SuSE-release ] && [ `grep --count SUSE /etc/SuSE-release` -gt 0 ]
+    then
+        echo "Looks like you're on an OpenSUSE-compatible machine."
+        echo
+        echo "You need to have these packages installed:"
+        echo
+        echo "  * devel_C_C++"
+        echo "  * libopenssl-devel"
+        echo "  * libuuid-devel"
+        echo "  * zip"
+        echo "  * gcc-c++"
+        echo
+        echo "You can accomplish this by executing:"
+        echo
+        echo "$ sudo zypper install -t pattern devel_C_C++"
+        echo "$ sudo zypper install gcc-c++ libopenssl-devel libuuid-devel zip"
+        echo
+        echo "I am also setting special configure arguments to Python to"
+        echo "specify control lib/lib64 issues."
+        PYCONF_ARGS="--libdir=${DEST_DIR}/lib"
+    fi
     if [ -f /etc/lsb-release ] && [ `grep --count buntu /etc/lsb-release` -gt 0 ]
     then
         echo "Looks like you're on an Ubuntu-compatible machine."
@@ -293,9 +314,9 @@
 echo
 echo "========================================================================"
 echo
-echo "Hi there!  This is the YT installation script.  We're going to download"
+echo "Hi there!  This is the yt installation script.  We're going to download"
 echo "some stuff and install it to create a self-contained, isolated"
-echo "environment for YT to run within."
+echo "environment for yt to run within."
 echo
 echo "Inside the installation script you can set a few variables.  Here's what"
 echo "they're currently set to -- you can hit Ctrl-C and edit the values in "
@@ -476,7 +497,7 @@
 echo 'c68a425bacaa7441037910b9166f25b89e1387776a7749a5350793f89b1690350df5f018060c31d03686e7c3ed2aa848bd2b945c96350dc3b6322e087934783a  hdf5-1.8.9.tar.gz' > hdf5-1.8.9.tar.gz.sha512
 echo 'dbefad00fa34f4f21dca0f1e92e95bd55f1f4478fa0095dcf015b4d06f0c823ff11755cd777e507efaf1c9098b74af18f613ec9000e5c3a5cc1c7554fb5aefb8  libpng-1.5.12.tar.gz' > libpng-1.5.12.tar.gz.sha512
 echo '5b1a0fb52dcb21ca5f0ab71c8a49550e1e8cf633552ec6598dc43f0b32c03422bf5af65b30118c163231ecdddfd40846909336f16da318959106076e80a3fad0  matplotlib-1.2.0.tar.gz' > matplotlib-1.2.0.tar.gz.sha512
-echo '52d1127de2208aaae693d16fef10ffc9b8663081bece83b7597d65706e9568af3b9e56bd211878774e1ebed92e21365ee9c49602a0ff5e48f89f12244d79c161  mercurial-2.4.tar.gz' > mercurial-2.4.tar.gz.sha512
+echo '91693ca5f34934956a7c2c98bb69a5648b2a5660afd2ecf4a05035c5420450d42c194eeef0606d7683e267e4eaaaab414df23f30b34c88219bdd5c1a0f1f66ed  mercurial-2.5.1.tar.gz' > mercurial-2.5.1.tar.gz.sha512
 echo 'de3dd37f753614055dcfed910e9886e03688b8078492df3da94b1ec37be796030be93291cba09e8212fffd3e0a63b086902c3c25a996cf1439e15c5b16e014d9  numpy-1.6.1.tar.gz' > numpy-1.6.1.tar.gz.sha512
 echo '5ad681f99e75849a5ca6f439c7a19bb51abc73d121b50f4f8e4c0da42891950f30407f761a53f0fe51b370b1dbd4c4f5a480557cb2444c8c7c7d5412b328a474  sqlite-autoconf-3070500.tar.gz' > sqlite-autoconf-3070500.tar.gz.sha512
 echo 'edae735960279d92acf58e1f4095c6392a7c2059b8f1d2c46648fc608a0fb06b392db2d073f4973f5762c034ea66596e769b95b3d26ad963a086b9b2d09825f2  zlib-1.2.3.tar.bz2' > zlib-1.2.3.tar.bz2.sha512
@@ -509,7 +530,7 @@
 get_ytproject Python-2.7.3.tgz
 get_ytproject numpy-1.6.1.tar.gz
 get_ytproject matplotlib-1.2.0.tar.gz
-get_ytproject mercurial-2.4.tar.gz
+get_ytproject mercurial-2.5.1.tar.gz
 get_ytproject ipython-0.13.1.tar.gz
 get_ytproject h5py-2.1.0.tar.gz
 get_ytproject Cython-0.17.1.tar.gz
@@ -636,10 +657,10 @@
 
 if [ ! -e Python-2.7.3/done ]
 then
-    echo "Installing Python.  This may take a while, but don't worry.  YT loves you."
+    echo "Installing Python.  This may take a while, but don't worry.  yt loves you."
     [ ! -e Python-2.7.3 ] && tar xfz Python-2.7.3.tgz
     cd Python-2.7.3
-    ( ./configure --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
+    ( ./configure --prefix=${DEST_DIR}/ ${PYCONF_ARGS} 2>&1 ) 1>> ${LOG_FILE} || do_exit
 
     ( make ${MAKE_PROCS} 2>&1 ) 1>> ${LOG_FILE} || do_exit
     ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
@@ -654,7 +675,7 @@
 if [ $INST_HG -eq 1 ]
 then
     echo "Installing Mercurial."
-    do_setup_py mercurial-2.4
+    do_setup_py mercurial-2.5.1
     export HG_EXEC=${DEST_DIR}/bin/hg
 else
     # We assume that hg can be found in the path.

diff -r 9ca5bb56ea0815d0804c0d75b4a1d2c6c6621bb3 -r 813b806f980e0a8f9acfd1956493d4221ec3d04c yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
@@ -36,14 +36,20 @@
 speed_of_light_kms = speed_of_light_cgs * km_per_cm
 
 class AbsorptionSpectrum(object):
+    r"""Create an absorption spectrum object.
+
+    Parameters
+    ----------
+
+    lambda_min : float
+       lower wavelength bound in angstroms.
+    lambda_max : float
+       upper wavelength bound in angstroms.
+    n_lambda : float
+       number of wavelength bins.
+    """
+
     def __init__(self, lambda_min, lambda_max, n_lambda):
-        """
-        Create an absorption spectrum object.
-        :param lambda_min (float): lower wavelength bound in angstroms.
-        :param lambda_max (float): upper wavelength bound in angstroms.
-        :param n_lambda (float): number of wavelength bins.
-        """
-
         self.n_lambda = n_lambda
         self.tau_field = None
         self.flux_field = None
@@ -56,16 +62,24 @@
     def add_line(self, label, field_name, wavelength,
                  f_value, gamma, atomic_mass,
                  label_threshold=None):
+        r"""Add an absorption line to the list of lines included in the spectrum.
+
+        Parameters
+        ----------
+        
+        label : string
+           label for the line.
+        field_name : string
+           field name from ray data for column densities.
+        wavelength : float
+           line rest wavelength in angstroms.
+        f_value  : float
+           line f-value.
+        gamma : float
+           line gamme value.
+        atomic_mass : float
+           mass of atom in amu.
         """
-        Add an absorption line to the list of lines included in the spectrum.
-        :param label (string): label for the line.
-        :param field_name (string): field name from ray data for column densities.
-        :param wavelength (float): line rest wavelength in angstroms.
-        :param f_value (float): line f-value.
-        :param gamma (float): line gamme value.
-        :param atomic_mass (float): mass of atom in amu.
-        """
-
         self.line_list.append({'label': label, 'field_name': field_name,
                                'wavelength': wavelength, 'f_value': f_value,
                                'gamma': gamma, 'atomic_mass': atomic_mass,
@@ -75,11 +89,20 @@
                       normalization, index):
         """
         Add a continuum feature that follows a power-law.
-        :param label (string): label for the feature.
-        :param field_name (string): field name from ray data for column densities.
-        :param wavelength (float): line rest wavelength in angstroms.
-        :param normalization (float): the column density normalization.
-        :param index (float): the power-law index for the wavelength dependence.
+
+        Parameters
+        ----------
+
+        label : string
+           label for the feature.
+        field_name : string
+           field name from ray data for column densities.
+        wavelength : float
+           line rest wavelength in angstroms.
+        normalization : float
+           the column density normalization.
+        index : float
+           the power-law index for the wavelength dependence.
         """
 
         self.continuum_list.append({'label': label, 'field_name': field_name,
@@ -92,14 +115,17 @@
                       use_peculiar_velocity=True):
         """
         Make spectrum from ray data using the line list.
-        :param input_file (string): path to input ray data.
-        :param output_file (string): path for output file.
-               File formats are chosen based on the filename extension.
-                    - .h5: hdf5.
-                    - .fits: fits.
-                    - anything else: ascii.
-        :param use_peculiar_velocity (bool): if True, include line of sight
-        velocity for shifting lines.
+
+        Parameters
+        ----------
+
+        input_file : string
+           path to input ray data.
+        output_file : string
+           path for output file.  File formats are chosen based on the filename extension.
+           ``.h5`` for hdf5, ``.fits`` for fits, and everything else is ASCII.
+        use_peculiar_velocity : bool
+           if True, include line of sight velocity for shifting lines.
         """
 
         input_fields = ['dl', 'redshift', 'Temperature']

diff -r 9ca5bb56ea0815d0804c0d75b4a1d2c6c6621bb3 -r 813b806f980e0a8f9acfd1956493d4221ec3d04c yt/analysis_modules/api.py
--- a/yt/analysis_modules/api.py
+++ b/yt/analysis_modules/api.py
@@ -106,8 +106,9 @@
     RadialColumnDensity
 
 from .spectral_integrator.api import \
-    SpectralFrequencyIntegrator, \
-    create_table_from_textfiles
+     add_xray_emissivity_field, \
+     add_xray_luminosity_field, \
+     add_xray_photon_emissivity_field
 
 from .star_analysis.api import \
     StarFormationRate, \

diff -r 9ca5bb56ea0815d0804c0d75b4a1d2c6c6621bb3 -r 813b806f980e0a8f9acfd1956493d4221ec3d04c yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
+++ b/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
@@ -49,6 +49,64 @@
      _light_cone_projection
 
 class LightCone(CosmologySplice):
+    """
+    Initialize a LightCone object.
+
+    Parameters
+    ----------
+    near_redshift : float
+        The near (lowest) redshift for the light cone.
+    far_redshift : float
+        The far (highest) redshift for the light cone.
+    observer_redshift : float
+        The redshift of the observer.
+        Default: 0.0.
+    field_of_view_in_arcminutes : float
+        The field of view of the image in units of arcminutes.
+        Default: 600.0.
+    image_resolution_in_arcseconds : float
+        The size of each image pixel in units of arcseconds.
+        Default: 60.0.
+    use_minimum_datasets : bool
+        If True, the minimum number of datasets is used to connect the initial
+        and final redshift.  If false, the light cone solution will contain
+        as many entries as possible within the redshift interval.
+        Default: True.
+    deltaz_min : float
+        Specifies the minimum :math:`\Delta z` between consecutive datasets in
+        the returned list.
+        Default: 0.0.
+    minimum_coherent_box_fraction : float
+        Used with use_minimum_datasets set to False, this parameter specifies
+        the fraction of the total box size to be traversed before rerandomizing
+        the projection axis and center.  This was invented to allow light cones
+        with thin slices to sample coherent large scale structure, but in
+        practice does not work so well.  Try setting this parameter to 1 and
+        see what happens.
+        Default: 0.0.
+    time_data : bool
+        Whether or not to include time outputs when gathering
+        datasets for time series.
+        Default: True.
+    redshift_data : bool
+        Whether or not to include redshift outputs when gathering
+        datasets for time series.
+        Default: True.
+    find_outputs : bool
+        Whether or not to search for parameter files in the current 
+        directory.
+        Default: False.
+    set_parameters : dict
+        Dictionary of parameters to attach to pf.parameters.
+        Default: None.
+    output_dir : string
+        The directory in which images and data files will be written.
+        Default: 'LC'.
+    output_prefix : string
+        The prefix of all images and data files.
+        Default: 'LightCone'.
+
+    """
     def __init__(self, parameter_filename, simulation_type,
                  near_redshift, far_redshift,
                  observer_redshift=0.0,
@@ -59,64 +117,6 @@
                  time_data=True, redshift_data=True,
                  find_outputs=False, set_parameters=None,
                  output_dir='LC', output_prefix='LightCone'):
-        """
-        Initialize a LightCone object.
-
-        Parameters
-        ----------
-        near_redshift : float
-            The near (lowest) redshift for the light cone.
-        far_redshift : float
-            The far (highest) redshift for the light cone.
-        observer_redshift : float
-            The redshift of the observer.
-            Default: 0.0.
-        field_of_view_in_arcminutes : float
-            The field of view of the image in units of arcminutes.
-            Default: 600.0.
-        image_resolution_in_arcseconds : float
-            The size of each image pixel in units of arcseconds.
-            Default: 60.0.
-        use_minimum_datasets : bool
-            If True, the minimum number of datasets is used to connect the initial
-            and final redshift.  If false, the light cone solution will contain
-            as many entries as possible within the redshift interval.
-            Default: True.
-        deltaz_min : float
-            Specifies the minimum :math:`\Delta z` between consecutive datasets in
-            the returned list.
-            Default: 0.0.
-        minimum_coherent_box_fraction : float
-            Used with use_minimum_datasets set to False, this parameter specifies
-            the fraction of the total box size to be traversed before rerandomizing
-            the projection axis and center.  This was invented to allow light cones
-            with thin slices to sample coherent large scale structure, but in
-            practice does not work so well.  Try setting this parameter to 1 and
-            see what happens.
-            Default: 0.0.
-        time_data : bool
-            Whether or not to include time outputs when gathering
-            datasets for time series.
-            Default: True.
-        redshift_data : bool
-            Whether or not to include redshift outputs when gathering
-            datasets for time series.
-            Default: True.
-        find_outputs : bool
-            Whether or not to search for parameter files in the current 
-            directory.
-            Default: False.
-        set_parameters : dict
-            Dictionary of parameters to attach to pf.parameters.
-            Default: None.
-        output_dir : string
-            The directory in which images and data files will be written.
-            Default: 'LC'.
-        output_prefix : string
-            The prefix of all images and data files.
-            Default: 'LightCone'.
-
-        """
 
         self.near_redshift = near_redshift
         self.far_redshift = far_redshift

diff -r 9ca5bb56ea0815d0804c0d75b4a1d2c6c6621bb3 -r 813b806f980e0a8f9acfd1956493d4221ec3d04c yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -40,66 +40,66 @@
     parallel_root_only
 
 class LightRay(CosmologySplice):
+    """
+    Create a LightRay object.  A light ray is much like a light cone,
+    in that it stacks together multiple datasets in order to extend a
+    redshift interval.  Unlike a light cone, which does randomly
+    oriented projections for each dataset, a light ray consists of
+    randomly oriented single rays.  The purpose of these is to create
+    synthetic QSO lines of sight.
+
+    Once the LightRay object is set up, use LightRay.make_light_ray to
+    begin making rays.  Different randomizations can be created with a
+    single object by providing different random seeds to make_light_ray.
+
+    Parameters
+    ----------
+    parameter_filename : string
+        The simulation parameter file.
+    simulation_type : string
+        The simulation type.
+    near_redshift : float
+        The near (lowest) redshift for the light ray.
+    far_redshift : float
+        The far (highest) redshift for the light ray.
+    use_minimum_datasets : bool
+        If True, the minimum number of datasets is used to connect the
+        initial and final redshift.  If false, the light ray solution
+        will contain as many entries as possible within the redshift
+        interval.
+        Default: True.
+    deltaz_min : float
+        Specifies the minimum :math:`\Delta z` between consecutive
+        datasets in the returned list.
+        Default: 0.0.
+    minimum_coherent_box_fraction : float
+        Used with use_minimum_datasets set to False, this parameter
+        specifies the fraction of the total box size to be traversed
+        before rerandomizing the projection axis and center.  This
+        was invented to allow light rays with thin slices to sample
+        coherent large scale structure, but in practice does not work
+        so well.  Try setting this parameter to 1 and see what happens.
+        Default: 0.0.
+    time_data : bool
+        Whether or not to include time outputs when gathering
+        datasets for time series.
+        Default: True.
+    redshift_data : bool
+        Whether or not to include redshift outputs when gathering
+        datasets for time series.
+        Default: True.
+    find_outputs : bool
+        Whether or not to search for parameter files in the current 
+        directory.
+        Default: False.
+
+    """
     def __init__(self, parameter_filename, simulation_type,
                  near_redshift, far_redshift,
                  use_minimum_datasets=True, deltaz_min=0.0,
                  minimum_coherent_box_fraction=0.0,
                  time_data=True, redshift_data=True,
                  find_outputs=False):
-        """
-        Create a LightRay object.  A light ray is much like a light cone,
-        in that it stacks together multiple datasets in order to extend a
-        redshift interval.  Unlike a light cone, which does randomly
-        oriented projections for each dataset, a light ray consists of
-        randomly oriented single rays.  The purpose of these is to create
-        synthetic QSO lines of sight.
-
-        Once the LightRay object is set up, use LightRay.make_light_ray to
-        begin making rays.  Different randomizations can be created with a
-        single object by providing different random seeds to make_light_ray.
-
-        Parameters
-        ----------
-        parameter_filename : string
-            The simulation parameter file.
-        simulation_type : string
-            The simulation type.
-        near_redshift : float
-            The near (lowest) redshift for the light ray.
-        far_redshift : float
-            The far (highest) redshift for the light ray.
-        use_minimum_datasets : bool
-            If True, the minimum number of datasets is used to connect the
-            initial and final redshift.  If false, the light ray solution
-            will contain as many entries as possible within the redshift
-            interval.
-            Default: True.
-        deltaz_min : float
-            Specifies the minimum :math:`\Delta z` between consecutive
-            datasets in the returned list.
-            Default: 0.0.
-        minimum_coherent_box_fraction : float
-            Used with use_minimum_datasets set to False, this parameter
-            specifies the fraction of the total box size to be traversed
-            before rerandomizing the projection axis and center.  This
-            was invented to allow light rays with thin slices to sample
-            coherent large scale structure, but in practice does not work
-            so well.  Try setting this parameter to 1 and see what happens.
-            Default: 0.0.
-        time_data : bool
-            Whether or not to include time outputs when gathering
-            datasets for time series.
-            Default: True.
-        redshift_data : bool
-            Whether or not to include redshift outputs when gathering
-            datasets for time series.
-            Default: True.
-        find_outputs : bool
-            Whether or not to search for parameter files in the current 
-            directory.
-            Default: False.
-
-        """
 
         self.near_redshift = near_redshift
         self.far_redshift = far_redshift
@@ -270,47 +270,43 @@
         Examples
         --------
 
-        from yt.mods import *
-        from yt.analysis_modules.halo_profiler.api import *
-        from yt.analysis_modules.cosmological_analysis.light_ray.api import LightRay
-
-        halo_profiler_kwargs = {'halo_list_file': 'HopAnalysis.out'}
-
-        halo_profiler_actions = []
-        # Add a virial filter.
-        halo_profiler_actions.append({'function': add_halo_filter,
-                                      'args': VirialFilter,
-                                      'kwargs': {'overdensity_field': 'ActualOverdensity',
-                                                 'virial_overdensity': 200,
-                                                 'virial_filters': \
-                                                     [['TotalMassMsun','>=','1e14']],
-                                                 'virial_quantities': \
-                                                     ['TotalMassMsun','RadiusMpc']}})
-        # Make the profiles.
-        halo_profiler_actions.append({'function': make_profiles,
-                                      'args': None,
-                                      'kwargs': {'filename': 'VirializedHalos.out'}})
-
-        halo_list = 'filtered'
-
-        halo_profiler_parameters = dict(halo_profiler_kwargs=halo_profiler_kwargs,
-                                        halo_profiler_actions=halo_profiler_actions,
-                                        halo_list=halo_list)
-
-        my_ray = LightRay('simulation.par', 'Enzo', 0., 0.1,
-                          use_minimum_datasets=True,
-                          time_data=False)
-
-        my_ray.make_light_ray(seed=12345,
-                              solution_filename='solution.txt',
-                              data_filename='my_ray.h5',
-                              fields=['Temperature', 'Density'],
-                              get_nearest_halo=True,
-                              nearest_halo_fields=['TotalMassMsun_100',
-                                                   'RadiusMpc_100'],
-                              halo_profiler_parameters=halo_profiler_parameters,
-                              get_los_velocity=True)
-
+        >>> from yt.mods import *
+        >>> from yt.analysis_modules.halo_profiler.api import *
+        >>> from yt.analysis_modules.cosmological_analysis.light_ray.api import LightRay
+        >>> halo_profiler_kwargs = {'halo_list_file': 'HopAnalysis.out'}
+        >>> halo_profiler_actions = []
+        >>> # Add a virial filter.
+        >>> halo_profiler_actions.append({'function': add_halo_filter,
+        ...                           'args': VirialFilter,
+        ...                           'kwargs': {'overdensity_field': 'ActualOverdensity',
+        ...                                      'virial_overdensity': 200,
+        ...                                      'virial_filters': [['TotalMassMsun','>=','1e14']],
+        ...                                      'virial_quantities': ['TotalMassMsun','RadiusMpc']}})
+        ...
+        >>> # Make the profiles.
+        >>> halo_profiler_actions.append({'function': make_profiles,
+        ...                           'args': None,
+        ...                           'kwargs': {'filename': 'VirializedHalos.out'}})
+        ...
+        >>> halo_list = 'filtered'
+        >>> halo_profiler_parameters = dict(halo_profiler_kwargs=halo_profiler_kwargs,
+        ...                             halo_profiler_actions=halo_profiler_actions,
+        ...                             halo_list=halo_list)
+        ...
+        >>> my_ray = LightRay('simulation.par', 'Enzo', 0., 0.1,
+        ...                use_minimum_datasets=True,
+        ...                time_data=False)
+        ...
+        >>> my_ray.make_light_ray(seed=12345,
+        ...                   solution_filename='solution.txt',
+        ...                   data_filename='my_ray.h5',
+        ...                   fields=['Temperature', 'Density'],
+        ...                   get_nearest_halo=True,
+        ...                   nearest_halo_fields=['TotalMassMsun_100',
+        ...                                        'RadiusMpc_100'],
+        ...                   halo_profiler_parameters=halo_profiler_parameters,
+        ...                   get_los_velocity=True)
+        
         """
 
         if halo_profiler_parameters is None:

diff -r 9ca5bb56ea0815d0804c0d75b4a1d2c6c6621bb3 -r 813b806f980e0a8f9acfd1956493d4221ec3d04c yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -142,18 +142,30 @@
         if self.CoM is not None:
             return self.CoM
         pm = self["ParticleMassMsun"]
-        cx = self["particle_position_x"]
-        cy = self["particle_position_y"]
-        cz = self["particle_position_z"]
-        if isinstance(self, FOFHalo):
-            c_vec = np.array([cx[0], cy[0], cz[0]]) - self.pf.domain_center
-        else:
-            c_vec = self.maximum_density_location() - self.pf.domain_center
-        cx = (cx - c_vec[0])
-        cy = (cy - c_vec[1])
-        cz = (cz - c_vec[2])
-        com = np.array([v - np.floor(v) for v in [cx, cy, cz]])
-        return (com * pm).sum(axis=1) / pm.sum() + c_vec
+        c = {}
+        c[0] = self["particle_position_x"]
+        c[1] = self["particle_position_y"]
+        c[2] = self["particle_position_z"]
+        c_vec = np.zeros(3)
+        com = []
+        for i in range(3):
+            # A halo is likely periodic around a boundary if the distance 
+            # between the max and min particle
+            # positions are larger than half the box. 
+            # So skip the rest if the converse is true.
+            # Note we might make a change here when periodicity-handling is
+            # fully implemented.
+            if (c[i].max() - c[i].min()) < (self.pf.domain_width[i] / 2.):
+                com.append(c[i])
+                continue
+            # Now we want to flip around only those close to the left boundary.
+            d_left = c[i] - self.pf.domain_left_edge[i]
+            sel = (d_left <= (self.pf.domain_width[i]/2))
+            c[i][sel] += self.pf.domain_width[i]
+            com.append(c[i])
+        com = np.array(com)
+        c = (com * pm).sum(axis=1) / pm.sum()
+        return c%self.pf.domain_width
 
     def maximum_density(self):
         r"""Return the HOP-identified maximum density. Not applicable to
@@ -809,7 +821,6 @@
     _radjust = 1.05
 
     def __init__(self, pf, id, size=None, CoM=None,
-
         max_dens_point=None, group_total_mass=None, max_radius=None, bulk_vel=None,
         rms_vel=None, fnames=None, mag_A=None, mag_B=None, mag_C=None,
         e1_vec=None, tilt=None, supp=None):
@@ -843,6 +854,10 @@
             self.supp = {}
         else:
             self.supp = supp
+        self._saved_fields = {}
+        self._ds_sort = None
+        self._particle_mask = None
+
 
     def __getitem__(self, key):
         # This function will try to get particle data in one of three ways,
@@ -1044,7 +1059,7 @@
 
     _fields = ["particle_position_%s" % ax for ax in 'xyz']
 
-    def __init__(self, data_source, dm_only=True):
+    def __init__(self, data_source, dm_only=True, redshift=-1):
         """
         Run hop on *data_source* with a given density *threshold*.  If
         *dm_only* is set, only run it on the dark matter particles, otherwise
@@ -1059,6 +1074,7 @@
         mylog.info("Parsing outputs")
         self._parse_output()
         mylog.debug("Finished. (%s)", len(self))
+        self.redshift = redshift
 
     def __obtain_particles(self):
         if self.dm_only:
@@ -1242,6 +1258,7 @@
         else:
             f = open(filename, "w")
         f.write("# HALOS FOUND WITH %s\n" % (self._name))
+        f.write("# REDSHIFT OF OUTPUT = %f\n" % (self.redshift))
 
         if not ellipsoid_data:
             f.write("\t".join(["# Group","Mass","# part","max dens"
@@ -1438,18 +1455,17 @@
         pass
 
 class HOPHaloList(HaloList):
-
+    """
+    Run hop on *data_source* with a given density *threshold*.  If
+    *dm_only* is set, only run it on the dark matter particles, otherwise
+    on all particles.  Returns an iterable collection of *HopGroup* items.
+    """
     _name = "HOP"
     _halo_class = HOPHalo
     _fields = ["particle_position_%s" % ax for ax in 'xyz'] + \
               ["ParticleMassMsun"]
 
     def __init__(self, data_source, threshold=160.0, dm_only=True):
-        """
-        Run hop on *data_source* with a given density *threshold*.  If
-        *dm_only* is set, only run it on the dark matter particles, otherwise
-        on all particles.  Returns an iterable collection of *HopGroup* items.
-        """
         self.threshold = threshold
         mylog.info("Initializing HOP")
         HaloList.__init__(self, data_source, dm_only)
@@ -1487,10 +1503,10 @@
     _name = "FOF"
     _halo_class = FOFHalo
 
-    def __init__(self, data_source, link=0.2, dm_only=True):
+    def __init__(self, data_source, link=0.2, dm_only=True, redshift=-1):
         self.link = link
         mylog.info("Initializing FOF")
-        HaloList.__init__(self, data_source, dm_only)
+        HaloList.__init__(self, data_source, dm_only, redshift=redshift)
 
     def _run_finder(self):
         self.tags = \
@@ -1638,6 +1654,11 @@
 
 
 class parallelHOPHaloList(HaloList, ParallelAnalysisInterface):
+    """
+    Run hop on *data_source* with a given density *threshold*.  If
+    *dm_only* is set, only run it on the dark matter particles, otherwise
+    on all particles.  Returns an iterable collection of *HopGroup* items.
+    """
     _name = "parallelHOP"
     _halo_class = parallelHOPHalo
     _fields = ["particle_position_%s" % ax for ax in 'xyz'] + \
@@ -1646,11 +1667,6 @@
     def __init__(self, data_source, padding, num_neighbors, bounds, total_mass,
         period, threshold=160.0, dm_only=True, rearrange=True, premerge=True,
         tree='F'):
-        """
-        Run hop on *data_source* with a given density *threshold*.  If
-        *dm_only* is set, only run it on the dark matter particles, otherwise
-        on all particles.  Returns an iterable collection of *HopGroup* items.
-        """
         ParallelAnalysisInterface.__init__(self)
         self.threshold = threshold
         self.num_neighbors = num_neighbors
@@ -1992,6 +2008,10 @@
         --------
         >>> halos.write_out("HopAnalysis.out")
         """
+        # if path denoted in filename, assure path exists
+        if len(filename.split('/')) > 1:
+            mkdir_rec('/'.join(filename.split('/')[:-1]))
+
         f = self.comm.write_on_root(filename)
         HaloList.write_out(self, f, ellipsoid_data)
 
@@ -2011,6 +2031,10 @@
         --------
         >>> halos.write_particle_lists_txt("halo-parts")
         """
+        # if path denoted in prefix, assure path exists
+        if len(prefix.split('/')) > 1:
+            mkdir_rec('/'.join(prefix.split('/')[:-1]))
+
         f = self.comm.write_on_root("%s.txt" % prefix)
         HaloList.write_particle_lists_txt(self, prefix, fp=f)
 
@@ -2034,6 +2058,10 @@
         --------
         >>> halos.write_particle_lists("halo-parts")
         """
+        # if path denoted in prefix, assure path exists
+        if len(prefix.split('/')) > 1:
+            mkdir_rec('/'.join(prefix.split('/')[:-1]))
+
         fn = "%s.h5" % self.comm.get_filename(prefix)
         f = h5py.File(fn, "w")
         for halo in self._groups:
@@ -2067,94 +2095,98 @@
         --------
         >>> halos.dump("MyHalos")
         """
+        # if path denoted in basename, assure path exists
+        if len(basename.split('/')) > 1:
+            mkdir_rec('/'.join(basename.split('/')[:-1]))
+
         self.write_out("%s.out" % basename, ellipsoid_data)
         self.write_particle_lists(basename)
         self.write_particle_lists_txt(basename)
 
 
 class parallelHF(GenericHaloFinder, parallelHOPHaloList):
+    r"""Parallel HOP halo finder.
+
+    Halos are built by:
+    1. Calculating a density for each particle based on a smoothing kernel.
+    2. Recursively linking particles to other particles from lower density
+    particles to higher.
+    3. Geometrically proximate chains are identified and
+    4. merged into final halos following merging rules.
+
+    Lower thresholds generally produce more halos, and the largest halos
+    become larger. Also, halos become more filamentary and over-connected.
+
+    This is very similar to HOP, but it does not produce precisely the
+    same halos due to unavoidable numerical differences.
+
+    Skory et al. "Parallel HOP: A Scalable Halo Finder for Massive
+    Cosmological Data Sets." arXiv (2010) 1001.3411
+
+    Parameters
+    ----------
+    pf : `StaticOutput`
+        The parameter file on which halo finding will be conducted.
+    threshold : float
+        The density threshold used when building halos. Default = 160.0.
+    dm_only : bool
+        If True, only dark matter particles are used when building halos.
+        Default = False.
+    resize : bool
+        Turns load-balancing on or off. Default = True.
+    kdtree : string
+        Chooses which kD Tree to use. The Fortran one (kdtree = 'F') is
+        faster, but uses more memory. The Cython one (kdtree = 'C') is
+        slower but is more memory efficient.
+        Default = 'F'
+    rearrange : bool
+        Turns on faster nearest neighbor searches at the cost of increased
+        memory usage.
+        This option only applies when using the Fortran tree.
+        Default = True.
+    fancy_padding : bool
+        True calculates padding independently for each face of each
+        subvolume. Default = True.
+    safety : float
+        Due to variances in inter-particle spacing in the volume, the
+        padding may need to be increased above the raw calculation.
+        This number is multiplied to the calculated padding, and values
+        >1 increase the padding. Default = 1.5.
+    premerge : bool
+        True merges chains in two steps (rather than one with False), which
+        can speed up halo finding by 25% or more. However, True can result
+        in small (<<1%) variations in the final halo masses when compared
+        to False. Default = True.
+    sample : float
+        The fraction of the full dataset on which load-balancing is
+        performed. Default = 0.03.
+    total_mass : float
+        If HOP is run on the same dataset mulitple times, the total mass
+        of particles in Msun units in the full volume can be supplied here
+        to save time.
+        This must correspond to the particles being operated on, meaning
+        if stars are included in the halo finding, they must be included
+        in this mass as well, and visa-versa.
+        If halo finding on a subvolume, this still corresponds with the
+        mass in the entire volume.
+        Default = None, which means the total mass is automatically
+        calculated.
+    num_particles : integer
+        The total number of particles in the volume, in the same fashion
+        as `total_mass` is calculated. Specifying this turns off
+        fancy_padding.
+        Default = None, which means the number of particles is
+        automatically calculated.
+
+    Examples
+    -------
+    >>> pf = load("RedshiftOutput0000")
+    >>> halos = parallelHF(pf)
+    """
     def __init__(self, pf, subvolume=None, threshold=160, dm_only=True, \
         resize=True, rearrange=True,\
         fancy_padding=True, safety=1.5, premerge=True, sample=0.03, \
         total_mass=None, num_particles=None, tree='F'):
-        r"""Parallel HOP halo finder.
-
-        Halos are built by:
-        1. Calculating a density for each particle based on a smoothing kernel.
-        2. Recursively linking particles to other particles from lower density
-        particles to higher.
-        3. Geometrically proximate chains are identified and
-        4. merged into final halos following merging rules.
-
-        Lower thresholds generally produce more halos, and the largest halos
-        become larger. Also, halos become more filamentary and over-connected.
-
-        This is very similar to HOP, but it does not produce precisely the
-        same halos due to unavoidable numerical differences.
-
-        Skory et al. "Parallel HOP: A Scalable Halo Finder for Massive
-        Cosmological Data Sets." arXiv (2010) 1001.3411
-
-        Parameters
-        ----------
-        pf : `StaticOutput`
-            The parameter file on which halo finding will be conducted.
-        threshold : float
-            The density threshold used when building halos. Default = 160.0.
-        dm_only : bool
-            If True, only dark matter particles are used when building halos.
-            Default = False.
-        resize : bool
-            Turns load-balancing on or off. Default = True.
-        kdtree : string
-            Chooses which kD Tree to use. The Fortran one (kdtree = 'F') is
-            faster, but uses more memory. The Cython one (kdtree = 'C') is
-            slower but is more memory efficient.
-            Default = 'F'
-        rearrange : bool
-            Turns on faster nearest neighbor searches at the cost of increased
-            memory usage.
-            This option only applies when using the Fortran tree.
-            Default = True.
-        fancy_padding : bool
-            True calculates padding independently for each face of each
-            subvolume. Default = True.
-        safety : float
-            Due to variances in inter-particle spacing in the volume, the
-            padding may need to be increased above the raw calculation.
-            This number is multiplied to the calculated padding, and values
-            >1 increase the padding. Default = 1.5.
-        premerge : bool
-            True merges chains in two steps (rather than one with False), which
-            can speed up halo finding by 25% or more. However, True can result
-            in small (<<1%) variations in the final halo masses when compared
-            to False. Default = True.
-        sample : float
-            The fraction of the full dataset on which load-balancing is
-            performed. Default = 0.03.
-        total_mass : float
-            If HOP is run on the same dataset mulitple times, the total mass
-            of particles in Msun units in the full volume can be supplied here
-            to save time.
-            This must correspond to the particles being operated on, meaning
-            if stars are included in the halo finding, they must be included
-            in this mass as well, and visa-versa.
-            If halo finding on a subvolume, this still corresponds with the
-            mass in the entire volume.
-            Default = None, which means the total mass is automatically
-            calculated.
-        num_particles : integer
-            The total number of particles in the volume, in the same fashion
-            as `total_mass` is calculated. Specifying this turns off
-            fancy_padding.
-            Default = None, which means the number of particles is
-            automatically calculated.
-
-        Examples
-        -------
-        >>> pf = load("RedshiftOutput0000")
-        >>> halos = parallelHF(pf)
-        """
         if subvolume is not None:
             ds_LE = np.array(subvolume.left_edge)
             ds_RE = np.array(subvolume.right_edge)
@@ -2401,58 +2433,58 @@
 
 
 class HOPHaloFinder(GenericHaloFinder, HOPHaloList):
+    r"""HOP halo finder.
+
+    Halos are built by:
+    1. Calculating a density for each particle based on a smoothing kernel.
+    2. Recursively linking particles to other particles from lower density
+    particles to higher.
+    3. Geometrically proximate chains are identified and
+    4. merged into final halos following merging rules.
+
+    Lower thresholds generally produce more halos, and the largest halos
+    become larger. Also, halos become more filamentary and over-connected.
+
+    Eisenstein and Hut. "HOP: A New Group-Finding Algorithm for N-Body
+    Simulations." ApJ (1998) vol. 498 pp. 137-142
+
+    Parameters
+    ----------
+    pf : `StaticOutput`
+        The parameter file on which halo finding will be conducted.
+    subvolume : `yt.data_objects.api.AMRData`, optional
+        A region over which HOP will be run, which can be used to run HOP
+        on a subvolume of the full volume. Default = None, which defaults
+        to the full volume automatically.
+    threshold : float
+        The density threshold used when building halos. Default = 160.0.
+    dm_only : bool
+        If True, only dark matter particles are used when building halos.
+        Default = False.
+    padding : float
+        When run in parallel, the finder needs to surround each subvolume
+        with duplicated particles for halo finidng to work. This number
+        must be no smaller than the radius of the largest halo in the box
+        in code units. Default = 0.02.
+    total_mass : float
+        If HOP is run on the same dataset mulitple times, the total mass
+        of particles in Msun units in the full volume can be supplied here
+        to save time.
+        This must correspond to the particles being operated on, meaning
+        if stars are included in the halo finding, they must be included
+        in this mass as well, and visa-versa.
+        If halo finding on a subvolume, this still corresponds with the
+        mass in the entire volume.
+        Default = None, which means the total mass is automatically
+        calculated.
+
+    Examples
+    --------
+    >>> pf = load("RedshiftOutput0000")
+    >>> halos = HaloFinder(pf)
+    """
     def __init__(self, pf, subvolume=None, threshold=160, dm_only=True,
             padding=0.02, total_mass=None):
-        r"""HOP halo finder.
-
-        Halos are built by:
-        1. Calculating a density for each particle based on a smoothing kernel.
-        2. Recursively linking particles to other particles from lower density
-        particles to higher.
-        3. Geometrically proximate chains are identified and
-        4. merged into final halos following merging rules.
-
-        Lower thresholds generally produce more halos, and the largest halos
-        become larger. Also, halos become more filamentary and over-connected.
-
-        Eisenstein and Hut. "HOP: A New Group-Finding Algorithm for N-Body
-        Simulations." ApJ (1998) vol. 498 pp. 137-142
-
-        Parameters
-        ----------
-        pf : `StaticOutput`
-            The parameter file on which halo finding will be conducted.
-        subvolume : `yt.data_objects.api.AMRData`, optional
-            A region over which HOP will be run, which can be used to run HOP
-            on a subvolume of the full volume. Default = None, which defaults
-            to the full volume automatically.
-        threshold : float
-            The density threshold used when building halos. Default = 160.0.
-        dm_only : bool
-            If True, only dark matter particles are used when building halos.
-            Default = False.
-        padding : float
-            When run in parallel, the finder needs to surround each subvolume
-            with duplicated particles for halo finidng to work. This number
-            must be no smaller than the radius of the largest halo in the box
-            in code units. Default = 0.02.
-        total_mass : float
-            If HOP is run on the same dataset mulitple times, the total mass
-            of particles in Msun units in the full volume can be supplied here
-            to save time.
-            This must correspond to the particles being operated on, meaning
-            if stars are included in the halo finding, they must be included
-            in this mass as well, and visa-versa.
-            If halo finding on a subvolume, this still corresponds with the
-            mass in the entire volume.
-            Default = None, which means the total mass is automatically
-            calculated.
-
-        Examples
-        --------
-        >>> pf = load("RedshiftOutput0000")
-        >>> halos = HaloFinder(pf)
-        """
         if subvolume is not None:
             ds_LE = np.array(subvolume.left_edge)
             ds_RE = np.array(subvolume.right_edge)
@@ -2506,53 +2538,54 @@
 
 
 class FOFHaloFinder(GenericHaloFinder, FOFHaloList):
+    r"""Friends-of-friends halo finder.
+
+    Halos are found by linking together all pairs of particles closer than
+    some distance from each other. Particles may have multiple links,
+    and halos are found by recursively linking together all such pairs.
+
+    Larger linking lengths produce more halos, and the largest halos
+    become larger. Also, halos become more filamentary and over-connected.
+
+    Davis et al. "The evolution of large-scale structure in a universe
+    dominated by cold dark matter." ApJ (1985) vol. 292 pp. 371-394
+
+    Parameters
+    ----------
+    pf : `StaticOutput`
+        The parameter file on which halo finding will be conducted.
+    subvolume : `yt.data_objects.api.AMRData`, optional
+        A region over which HOP will be run, which can be used to run HOP
+        on a subvolume of the full volume. Default = None, which defaults
+        to the full volume automatically.
+    link : float
+        If positive, the interparticle distance (compared to the overall
+        average) used to build the halos. If negative, this is taken to be
+        the *actual* linking length, and no other calculations will be
+        applied.  Default = 0.2.
+    dm_only : bool
+        If True, only dark matter particles are used when building halos.
+        Default = False.
+    padding : float
+        When run in parallel, the finder needs to surround each subvolume
+        with duplicated particles for halo finidng to work. This number
+        must be no smaller than the radius of the largest halo in the box
+        in code units. Default = 0.02.
+
+    Examples
+    --------
+    >>> pf = load("RedshiftOutput0000")
+    >>> halos = FOFHaloFinder(pf)
+    """
     def __init__(self, pf, subvolume=None, link=0.2, dm_only=True,
         padding=0.02):
-        r"""Friends-of-friends halo finder.
-
-        Halos are found by linking together all pairs of particles closer than
-        some distance from each other. Particles may have multiple links,
-        and halos are found by recursively linking together all such pairs.
-
-        Larger linking lengths produce more halos, and the largest halos
-        become larger. Also, halos become more filamentary and over-connected.
-
-        Davis et al. "The evolution of large-scale structure in a universe
-        dominated by cold dark matter." ApJ (1985) vol. 292 pp. 371-394
-
-        Parameters
-        ----------
-        pf : `StaticOutput`
-            The parameter file on which halo finding will be conducted.
-        subvolume : `yt.data_objects.api.AMRData`, optional
-            A region over which HOP will be run, which can be used to run HOP
-            on a subvolume of the full volume. Default = None, which defaults
-            to the full volume automatically.
-        link : float
-            If positive, the interparticle distance (compared to the overall
-            average) used to build the halos. If negative, this is taken to be
-            the *actual* linking length, and no other calculations will be
-            applied.  Default = 0.2.
-        dm_only : bool
-            If True, only dark matter particles are used when building halos.
-            Default = False.
-        padding : float
-            When run in parallel, the finder needs to surround each subvolume
-            with duplicated particles for halo finidng to work. This number
-            must be no smaller than the radius of the largest halo in the box
-            in code units. Default = 0.02.
-
-        Examples
-        --------
-        >>> pf = load("RedshiftOutput0000")
-        >>> halos = FOFHaloFinder(pf)
-        """
         if subvolume is not None:
             ds_LE = np.array(subvolume.left_edge)
             ds_RE = np.array(subvolume.right_edge)
         self.period = pf.domain_right_edge - pf.domain_left_edge
         self.pf = pf
         self.hierarchy = pf.h
+        self.redshift = pf.current_redshift
         self._data_source = pf.h.all_data()
         GenericHaloFinder.__init__(self, pf, self._data_source, dm_only,
             padding)
@@ -2587,7 +2620,8 @@
         #self._reposition_particles((LE, RE))
         # here is where the FOF halo finder is run
         mylog.info("Using a linking length of %0.3e", linking_length)
-        FOFHaloList.__init__(self, self._data_source, linking_length, dm_only)
+        FOFHaloList.__init__(self, self._data_source, linking_length, dm_only,
+                             redshift=self.redshift)
         self._parse_halolist(1.)
         self._join_halolists()
 
@@ -2595,84 +2629,84 @@
 
 
 class LoadHaloes(GenericHaloFinder, LoadedHaloList):
+    r"""Load the full halo data into memory.
+
+    This function takes the output of `GenericHaloFinder.dump` and
+    re-establishes the list of halos in memory. This enables the full set
+    of halo analysis features without running the halo finder again. To
+    be precise, the particle data for each halo is only read in when
+    necessary, so examining a single halo will not require as much memory
+    as is required for halo finding.
+
+    Parameters
+    ----------
+    basename : String
+        The base name of the files that will be read in. This should match
+        what was used when `GenericHaloFinder.dump` was called. Default =
+        "HopAnalysis".
+
+    Examples
+    --------
+    >>> pf = load("data0005")
+    >>> halos = LoadHaloes(pf, "HopAnalysis")
+    """
     def __init__(self, pf, basename):
-        r"""Load the full halo data into memory.
-
-        This function takes the output of `GenericHaloFinder.dump` and
-        re-establishes the list of halos in memory. This enables the full set
-        of halo analysis features without running the halo finder again. To
-        be precise, the particle data for each halo is only read in when
-        necessary, so examining a single halo will not require as much memory
-        as is required for halo finding.
-
-        Parameters
-        ----------
-        basename : String
-            The base name of the files that will be read in. This should match
-            what was used when `GenericHaloFinder.dump` was called. Default =
-            "HopAnalysis".
-
-        Examples
-        --------
-        >>> pf = load("data0005")
-        >>> halos = LoadHaloes(pf, "HopAnalysis")
-        """
         self.basename = basename
         LoadedHaloList.__init__(self, pf, self.basename)
 
 class LoadTextHaloes(GenericHaloFinder, TextHaloList):
+    r"""Load a text file of halos.
+    
+    Like LoadHaloes, but when all that is available is a plain
+    text file. This assumes the text file has the 3-positions of halos
+    along with a radius. The halo objects created are spheres.
+
+    Parameters
+    ----------
+    fname : String
+        The name of the text file to read in.
+    
+    columns : dict
+        A dict listing the column name : column number pairs for data
+        in the text file. It is zero-based (like Python).
+        An example is {'x':0, 'y':1, 'z':2, 'r':3, 'm':4}.
+        Any column name outside of ['x', 'y', 'z', 'r'] will be attached
+        to each halo object in the supplementary dict 'supp'. See
+        example.
+    
+    comment : String
+        If the first character of a line is equal to this, the line is
+        skipped. Default = "#".
+
+    Examples
+    --------
+    >>> pf = load("data0005")
+    >>> halos = LoadTextHaloes(pf, "list.txt",
+        {'x':0, 'y':1, 'z':2, 'r':3, 'm':4},
+        comment = ";")
+    >>> halos[0].supp['m']
+        3.28392048e14
+    """
     def __init__(self, pf, filename, columns, comment = "#"):
-        r"""Load a text file of halos.
-        
-        Like LoadHaloes, but when all that is available is a plain
-        text file. This assumes the text file has the 3-positions of halos
-        along with a radius. The halo objects created are spheres.
-
-        Parameters
-        ----------
-        fname : String
-            The name of the text file to read in.
-        
-        columns : dict
-            A dict listing the column name : column number pairs for data
-            in the text file. It is zero-based (like Python).
-            An example is {'x':0, 'y':1, 'z':2, 'r':3, 'm':4}.
-            Any column name outside of ['x', 'y', 'z', 'r'] will be attached
-            to each halo object in the supplementary dict 'supp'. See
-            example.
-        
-        comment : String
-            If the first character of a line is equal to this, the line is
-            skipped. Default = "#".
-
-        Examples
-        --------
-        >>> pf = load("data0005")
-        >>> halos = LoadTextHaloes(pf, "list.txt",
-            {'x':0, 'y':1, 'z':2, 'r':3, 'm':4},
-            comment = ";")
-        >>> halos[0].supp['m']
-            3.28392048e14
-        """
         TextHaloList.__init__(self, pf, filename, columns, comment)
 
 LoadTextHalos = LoadTextHaloes
 
 class LoadRockstarHalos(GenericHaloFinder, RockstarHaloList):
+    r"""Load Rockstar halos off disk from Rockstar-output format.
+
+    Parameters
+    ----------
+    fname : String
+        The name of the Rockstar file to read in. Default = 
+        "rockstar_halos/out_0.list'.
+
+    Examples
+    --------
+    >>> pf = load("data0005")
+    >>> halos = LoadRockstarHalos(pf, "other_name.out")
+    """
     def __init__(self, pf, filename = None):
-        r"""Load Rockstar halos off disk from Rockstar-output format.
-
-        Parameters
-        ----------
-        fname : String
-            The name of the Rockstar file to read in. Default = 
-            "rockstar_halos/out_0.list'.
-
-        Examples
-        --------
-        >>> pf = load("data0005")
-        >>> halos = LoadRockstarHalos(pf, "other_name.out")
-        """
         if filename is None:
             filename = 'rockstar_halos/out_0.list'
         RockstarHaloList.__init__(self, pf, filename)

diff -r 9ca5bb56ea0815d0804c0d75b4a1d2c6c6621bb3 -r 813b806f980e0a8f9acfd1956493d4221ec3d04c yt/analysis_modules/halo_finding/rockstar/rockstar.py
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar.py
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar.py
@@ -114,80 +114,80 @@
         return pool, workgroup
 
 class RockstarHaloFinder(ParallelAnalysisInterface):
+    r"""Spawns the Rockstar Halo finder, distributes dark matter
+    particles and finds halos.
+
+    The halo finder requires dark matter particles of a fixed size.
+    Rockstar has three main processes: reader, writer, and the 
+    server which coordinates reader/writer processes.
+
+    Parameters
+    ----------
+    ts   : TimeSeriesData, StaticOutput
+        This is the data source containing the DM particles. Because 
+        halo IDs may change from one snapshot to the next, the only
+        way to keep a consistent halo ID across time is to feed 
+        Rockstar a set of snapshots, ie, via TimeSeriesData.
+    num_readers: int
+        The number of reader can be increased from the default
+        of 1 in the event that a single snapshot is split among
+        many files. This can help in cases where performance is
+        IO-limited. Default is 1. If run inline, it is
+        equal to the number of MPI threads.
+    num_writers: int
+        The number of writers determines the number of processing threads
+        as well as the number of threads writing output data.
+        The default is set to comm.size-num_readers-1. If run inline,
+        the default is equal to the number of MPI threads.
+    outbase: str
+        This is where the out*list files that Rockstar makes should be
+        placed. Default is 'rockstar_halos'.
+    dm_type: 1
+        In order to exclude stars and other particle types, define
+        the dm_type. Default is 1, as Enzo has the DM particle type=1.
+    force_res: float
+        This parameter specifies the force resolution that Rockstar uses
+        in units of Mpc/h.
+        If no value is provided, this parameter is automatically set to
+        the width of the smallest grid element in the simulation from the
+        last data snapshot (i.e. the one where time has evolved the
+        longest) in the time series:
+        ``pf_last.h.get_smallest_dx() * pf_last['mpch']``.
+    total_particles : int
+        If supplied, this is a pre-calculated total number of dark matter
+        particles present in the simulation. For example, this is useful
+        when analyzing a series of snapshots where the number of dark
+        matter particles should not change and this will save some disk
+        access time. If left unspecified, it will
+        be calculated automatically. Default: ``None``.
+    dm_only : boolean
+        If set to ``True``, it will be assumed that there are only dark
+        matter particles present in the simulation. This can save analysis
+        time if this is indeed the case. Default: ``False``.
+        
+    Returns
+    -------
+    None
+
+    Examples
+    --------
+    To use the script below you must run it using MPI:
+    mpirun -np 3 python test_rockstar.py --parallel
+
+    test_rockstar.py:
+
+    from yt.analysis_modules.halo_finding.rockstar.api import RockstarHaloFinder
+    from yt.mods import *
+    import sys
+
+    ts = TimeSeriesData.from_filenames('/u/cmoody3/data/a*')
+    pm = 7.81769027e+11
+    rh = RockstarHaloFinder(ts)
+    rh.run()
+    """
     def __init__(self, ts, num_readers = 1, num_writers = None,
             outbase="rockstar_halos", dm_type=1, 
             force_res=None, total_particles=None, dm_only=False):
-        r"""Spawns the Rockstar Halo finder, distributes dark matter
-        particles and finds halos.
-
-        The halo finder requires dark matter particles of a fixed size.
-        Rockstar has three main processes: reader, writer, and the 
-        server which coordinates reader/writer processes.
-
-        Parameters
-        ----------
-        ts   : TimeSeriesData, StaticOutput
-            This is the data source containing the DM particles. Because 
-            halo IDs may change from one snapshot to the next, the only
-            way to keep a consistent halo ID across time is to feed 
-            Rockstar a set of snapshots, ie, via TimeSeriesData.
-        num_readers: int
-            The number of reader can be increased from the default
-            of 1 in the event that a single snapshot is split among
-            many files. This can help in cases where performance is
-            IO-limited. Default is 1. If run inline, it is
-            equal to the number of MPI threads.
-        num_writers: int
-            The number of writers determines the number of processing threads
-            as well as the number of threads writing output data.
-            The default is set to comm.size-num_readers-1. If run inline,
-            the default is equal to the number of MPI threads.
-        outbase: str
-            This is where the out*list files that Rockstar makes should be
-            placed. Default is 'rockstar_halos'.
-        dm_type: 1
-            In order to exclude stars and other particle types, define
-            the dm_type. Default is 1, as Enzo has the DM particle type=1.
-        force_res: float
-            This parameter specifies the force resolution that Rockstar uses
-            in units of Mpc/h.
-            If no value is provided, this parameter is automatically set to
-            the width of the smallest grid element in the simulation from the
-            last data snapshot (i.e. the one where time has evolved the
-            longest) in the time series:
-            ``pf_last.h.get_smallest_dx() * pf_last['mpch']``.
-        total_particles : int
-            If supplied, this is a pre-calculated total number of dark matter
-            particles present in the simulation. For example, this is useful
-            when analyzing a series of snapshots where the number of dark
-            matter particles should not change and this will save some disk
-            access time. If left unspecified, it will
-            be calculated automatically. Default: ``None``.
-        dm_only : boolean
-            If set to ``True``, it will be assumed that there are only dark
-            matter particles present in the simulation. This can save analysis
-            time if this is indeed the case. Default: ``False``.
-            
-        Returns
-        -------
-        None
-
-        Examples
-        --------
-        To use the script below you must run it using MPI:
-        mpirun -np 3 python test_rockstar.py --parallel
-
-        test_rockstar.py:
-
-        from yt.analysis_modules.halo_finding.rockstar.api import RockstarHaloFinder
-        from yt.mods import *
-        import sys
-
-        ts = TimeSeriesData.from_filenames('/u/cmoody3/data/a*')
-        pm = 7.81769027e+11
-        rh = RockstarHaloFinder(ts)
-        rh.run()
-        """
         mylog.warning("The citation for the Rockstar halo finder can be found at")
         mylog.warning("http://adsabs.harvard.edu/abs/2013ApJ...762..109B")
         ParallelAnalysisInterface.__init__(self)

diff -r 9ca5bb56ea0815d0804c0d75b4a1d2c6c6621bb3 -r 813b806f980e0a8f9acfd1956493d4221ec3d04c yt/analysis_modules/halo_mass_function/halo_mass_function.py
--- a/yt/analysis_modules/halo_mass_function/halo_mass_function.py
+++ b/yt/analysis_modules/halo_mass_function/halo_mass_function.py
@@ -33,52 +33,52 @@
     parallel_blocking_call
 
 class HaloMassFcn(ParallelAnalysisInterface):
+    """
+    Initalize a HaloMassFcn object to analyze the distribution of haloes
+    as a function of mass.
+    :param halo_file (str): The filename of the output of the Halo Profiler.
+    Default=None.
+    :param omega_matter0 (float): The fraction of the universe made up of
+    matter (dark and baryonic). Default=None.
+    :param omega_lambda0 (float): The fraction of the universe made up of
+    dark energy. Default=None.
+    :param omega_baryon0 (float): The fraction of the universe made up of
+    ordinary baryonic matter. This should match the value
+    used to create the initial conditions, using 'inits'. This is 
+    *not* stored in the enzo datset so it must be checked by hand.
+    Default=0.05.
+    :param hubble0 (float): The expansion rate of the universe in units of
+    100 km/s/Mpc. Default=None.
+    :param sigma8input (float): The amplitude of the linear power
+    spectrum at z=0 as specified by the rms amplitude of mass-fluctuations
+    in a top-hat sphere of radius 8 Mpc/h. This should match the value
+    used to create the initial conditions, using 'inits'. This is 
+    *not* stored in the enzo datset so it must be checked by hand.
+    Default=0.86.
+    :param primoridal_index (float): This is the index of the mass power
+    spectrum before modification by the transfer function. A value of 1
+    corresponds to the scale-free primordial spectrum. This should match
+    the value used to make the initial conditions using 'inits'. This is 
+    *not* stored in the enzo datset so it must be checked by hand.
+    Default=1.0.
+    :param this_redshift (float): The current redshift. Default=None.
+    :param log_mass_min (float): The log10 of the mass of the minimum of the
+    halo mass range. Default=None.
+    :param log_mass_max (float): The log10 of the mass of the maximum of the
+    halo mass range. Default=None.
+    :param num_sigma_bins (float): The number of bins (points) to use for
+    the calculations and generated fit. Default=360.
+    :param fitting_function (int): Which fitting function to use.
+    1 = Press-schechter, 2 = Jenkins, 3 = Sheth-Tormen, 4 = Warren fit
+    5 = Tinker
+    Default=4.
+    :param mass_column (int): The column of halo_file that contains the
+    masses of the haloes. Default=4.
+    """
     def __init__(self, pf, halo_file=None, omega_matter0=None, omega_lambda0=None,
     omega_baryon0=0.05, hubble0=None, sigma8input=0.86, primordial_index=1.0,
     this_redshift=None, log_mass_min=None, log_mass_max=None, num_sigma_bins=360,
     fitting_function=4, mass_column=5):
-        """
-        Initalize a HaloMassFcn object to analyze the distribution of haloes
-        as a function of mass.
-        :param halo_file (str): The filename of the output of the Halo Profiler.
-        Default=None.
-        :param omega_matter0 (float): The fraction of the universe made up of
-        matter (dark and baryonic). Default=None.
-        :param omega_lambda0 (float): The fraction of the universe made up of
-        dark energy. Default=None.
-        :param omega_baryon0 (float): The fraction of the universe made up of
-        ordinary baryonic matter. This should match the value
-        used to create the initial conditions, using 'inits'. This is 
-        *not* stored in the enzo datset so it must be checked by hand.
-        Default=0.05.
-        :param hubble0 (float): The expansion rate of the universe in units of
-        100 km/s/Mpc. Default=None.
-        :param sigma8input (float): The amplitude of the linear power
-        spectrum at z=0 as specified by the rms amplitude of mass-fluctuations
-        in a top-hat sphere of radius 8 Mpc/h. This should match the value
-        used to create the initial conditions, using 'inits'. This is 
-        *not* stored in the enzo datset so it must be checked by hand.
-        Default=0.86.
-        :param primoridal_index (float): This is the index of the mass power
-        spectrum before modification by the transfer function. A value of 1
-        corresponds to the scale-free primordial spectrum. This should match
-        the value used to make the initial conditions using 'inits'. This is 
-        *not* stored in the enzo datset so it must be checked by hand.
-        Default=1.0.
-        :param this_redshift (float): The current redshift. Default=None.
-        :param log_mass_min (float): The log10 of the mass of the minimum of the
-        halo mass range. Default=None.
-        :param log_mass_max (float): The log10 of the mass of the maximum of the
-        halo mass range. Default=None.
-        :param num_sigma_bins (float): The number of bins (points) to use for
-        the calculations and generated fit. Default=360.
-        :param fitting_function (int): Which fitting function to use.
-        1 = Press-schechter, 2 = Jenkins, 3 = Sheth-Tormen, 4 = Warren fit
-        5 = Tinker
-        Default=4.
-        :param mass_column (int): The column of halo_file that contains the
-        masses of the haloes. Default=4.
-        """
         ParallelAnalysisInterface.__init__(self)
         self.pf = pf
         self.halo_file = halo_file
@@ -132,7 +132,6 @@
         not stored in enzo datasets, so must be entered by hand.
         sigma8input=%f primordial_index=%f omega_baryon0=%f
         """ % (self.sigma8input, self.primordial_index, self.omega_baryon0))
-        time.sleep(1)
         
         # Do the calculations.
         self.sigmaM()
@@ -544,22 +543,22 @@
 """
 
 class TransferFunction(object):
+    """
+    /* This routine takes cosmological parameters and a redshift and sets up
+    all the internal scalar quantities needed to compute the transfer function. */
+    /* INPUT: omega_matter -- Density of CDM, baryons, and massive neutrinos,
+                    in units of the critical density. */
+    /* 	  omega_baryon -- Density of baryons, in units of critical. */
+    /* 	  omega_hdm    -- Density of massive neutrinos, in units of critical */
+    /* 	  degen_hdm    -- (Int) Number of degenerate massive neutrino species */
+    /*        omega_lambda -- Cosmological constant */
+    /* 	  hubble       -- Hubble constant, in units of 100 km/s/Mpc */
+    /*        redshift     -- The redshift at which to evaluate */
+    /* OUTPUT: Returns 0 if all is well, 1 if a warning was issued.  Otherwise,
+        sets many global variables for use in TFmdm_onek_mpc() */
+    """
     def __init__(self, omega_matter, omega_baryon, omega_hdm,
 	    degen_hdm, omega_lambda, hubble, redshift):
-        """
-        /* This routine takes cosmological parameters and a redshift and sets up
-        all the internal scalar quantities needed to compute the transfer function. */
-        /* INPUT: omega_matter -- Density of CDM, baryons, and massive neutrinos,
-                        in units of the critical density. */
-        /* 	  omega_baryon -- Density of baryons, in units of critical. */
-        /* 	  omega_hdm    -- Density of massive neutrinos, in units of critical */
-        /* 	  degen_hdm    -- (Int) Number of degenerate massive neutrino species */
-        /*        omega_lambda -- Cosmological constant */
-        /* 	  hubble       -- Hubble constant, in units of 100 km/s/Mpc */
-        /*        redshift     -- The redshift at which to evaluate */
-        /* OUTPUT: Returns 0 if all is well, 1 if a warning was issued.  Otherwise,
-            sets many global variables for use in TFmdm_onek_mpc() */
-        """
         self.qwarn = 0;
         self.theta_cmb = 2.728/2.7 # Assuming T_cmb = 2.728 K
     

diff -r 9ca5bb56ea0815d0804c0d75b4a1d2c6c6621bb3 -r 813b806f980e0a8f9acfd1956493d4221ec3d04c yt/analysis_modules/halo_merger_tree/api.py
--- a/yt/analysis_modules/halo_merger_tree/api.py
+++ b/yt/analysis_modules/halo_merger_tree/api.py
@@ -38,5 +38,7 @@
     MergerTreeTextOutput
 
 from .enzofof_merger_tree import \
+    HaloCatalog, \
     find_halo_relationships, \
-    EnzoFOFMergerTree
+    EnzoFOFMergerTree, \
+    plot_halo_evolution

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt-3.0/commits/700cd9eb832e/
Changeset:   700cd9eb832e
Branch:      yt
User:        jzuhone
Date:        2013-03-18 17:15:18
Summary:     Merged yt_analysis/yt into yt
Affected #:  78 files

diff -r 813b806f980e0a8f9acfd1956493d4221ec3d04c -r 700cd9eb832eea4673cc4467164e1547834bf454 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -837,16 +837,11 @@
 cd $YT_DIR
 ( ${HG_EXEC} pull 2>1 && ${HG_EXEC} up -C 2>1 ${BRANCH} 2>&1 ) 1>> ${LOG_FILE}
 
-echo "Building Fortran kD-tree module."
-cd yt/utilities/kdtree
-( make 2>&1 ) 1>> ${LOG_FILE}
-cd ../../..
-
 echo "Installing yt"
 echo $HDF5_DIR > hdf5.cfg
 [ $INST_PNG -eq 1 ] && echo $PNG_DIR > png.cfg
 [ $INST_FTYPE -eq 1 ] && echo $FTYPE_DIR > freetype.cfg
-( ${DEST_DIR}/bin/python2.7 setup.py develop 2>&1 ) 1>> ${LOG_FILE} || do_exit
+( export PATH=$DEST_DIR/bin:$PATH ; ${DEST_DIR}/bin/python2.7 setup.py develop 2>&1 ) 1>> ${LOG_FILE} || do_exit
 touch done
 cd $MY_PWD
 

diff -r 813b806f980e0a8f9acfd1956493d4221ec3d04c -r 700cd9eb832eea4673cc4467164e1547834bf454 setup.py
--- a/setup.py
+++ b/setup.py
@@ -4,14 +4,62 @@
 import sys
 import time
 import subprocess
+import shutil
+import glob
 import distribute_setup
 distribute_setup.use_setuptools()
 
 from distutils.command.build_py import build_py
 from numpy.distutils.misc_util import appendpath
+from numpy.distutils.command import install_data as np_install_data
 from numpy.distutils import log
 from distutils import version
 
+from distutils.core import Command
+from distutils.spawn import find_executable
+
+def find_fortran_deps():
+    return (find_executable("Forthon"),
+            find_executable("gfortran"))
+
+class BuildForthon(Command):
+
+    """Command for building Forthon modules"""
+
+    description = "Build Forthon modules"
+    user_options = []
+
+    def initialize_options(self):
+
+        """init options"""
+
+        pass
+
+    def finalize_options(self):
+
+        """finalize options"""
+
+        pass
+
+    def run(self):
+
+        """runner"""
+        (Forthon_exe, gfortran_exe) = find_fortran_deps()
+        if None in (Forthon_exe, gfortran_exe):
+            sys.stderr.write(
+                "fKDpy.so won't be built due to missing Forthon/gfortran\n"
+            )
+            return
+
+        cwd = os.getcwd()
+        os.chdir(os.path.join(cwd, 'yt/utilities/kdtree'))
+        cmd = [Forthon_exe, "-F", "gfortran", "--compile_first",
+               "fKD_source", "--no2underscores", "--fopt", "'-O3'", "fKD",
+               "fKD_source.f90"]
+        subprocess.check_call(cmd, shell=False)
+        shutil.move(glob.glob('build/lib*/fKDpy.so')[0], os.getcwd())
+        os.chdir(cwd)
+
 REASON_FILES = []
 REASON_DIRS = [
     "",
@@ -36,7 +84,7 @@
     files = []
     for ext in ["js", "html", "css", "png", "ico", "gif"]:
         files += glob.glob("%s/*.%s" % (dir_name, ext))
-    REASON_FILES.append( (dir_name, files) )
+    REASON_FILES.append((dir_name, files))
 
 # Verify that we have Cython installed
 try:
@@ -93,10 +141,10 @@
             language=extension.language, cplus=cplus,
             output_file=target_file)
         cython_result = Cython.Compiler.Main.compile(source,
-                                                   options=options)
+                                                     options=options)
         if cython_result.num_errors != 0:
-            raise DistutilsError("%d errors while compiling %r with Cython" \
-                  % (cython_result.num_errors, source))
+            raise DistutilsError("%d errors while compiling %r with Cython"
+                                 % (cython_result.num_errors, source))
     return target_file
 
 
@@ -107,9 +155,11 @@
 
 import setuptools
 
-VERSION = "2.5dev"
+VERSION = "2.6dev"
 
-if os.path.exists('MANIFEST'): os.remove('MANIFEST')
+if os.path.exists('MANIFEST'):
+    os.remove('MANIFEST')
+
 
 def get_mercurial_changeset_id(target_dir):
     """adapted from a script by Jason F. Harris, published at
@@ -123,11 +173,11 @@
                                      stdout=subprocess.PIPE,
                                      stderr=subprocess.PIPE,
                                      shell=True)
-        
+
     if (get_changeset.stderr.read() != ""):
         print "Error in obtaining current changeset of the Mercurial repository"
         changeset = None
-        
+
     changeset = get_changeset.stdout.read().strip()
     if (not re.search("^[0-9a-f]{12}", changeset)):
         print "Current changeset of the Mercurial repository is malformed"
@@ -135,12 +185,30 @@
 
     return changeset
 
+
+class my_build_src(build_src.build_src):
+    def run(self):
+        self.run_command("build_forthon")
+        build_src.build_src.run(self)
+
+
+class my_install_data(np_install_data.install_data):
+    def run(self):
+        (Forthon_exe, gfortran_exe) = find_fortran_deps()
+        if None in (Forthon_exe, gfortran_exe):
+            pass
+        else:
+            self.distribution.data_files.append(
+                ('yt/utilities/kdtree', ['yt/utilities/kdtree/fKDpy.so'])
+                )
+        np_install_data.install_data.run(self)
+
 class my_build_py(build_py):
     def run(self):
         # honor the --dry-run flag
         if not self.dry_run:
-            target_dir = os.path.join(self.build_lib,'yt')
-            src_dir =  os.getcwd() 
+            target_dir = os.path.join(self.build_lib, 'yt')
+            src_dir = os.getcwd()
             changeset = get_mercurial_changeset_id(src_dir)
             self.mkpath(target_dir)
             with open(os.path.join(target_dir, '__hg_version__.py'), 'w') as fobj:
@@ -148,6 +216,7 @@
 
             build_py.run(self)
 
+
 def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
 
@@ -158,7 +227,7 @@
                        quiet=True)
 
     config.make_config_py()
-    #config.make_svn_version_py()
+    # config.make_svn_version_py()
     config.add_subpackage('yt', 'yt')
     config.add_scripts("scripts/*")
 
@@ -176,25 +245,25 @@
                     + "simulations, focusing on Adaptive Mesh Refinement data "
                       "from Enzo, Orion, FLASH, and others.",
         classifiers=["Development Status :: 5 - Production/Stable",
-            "Environment :: Console",
-            "Intended Audience :: Science/Research",
-            "License :: OSI Approved :: GNU General Public License (GPL)",
-            "Operating System :: MacOS :: MacOS X",
-            "Operating System :: POSIX :: AIX",
-            "Operating System :: POSIX :: Linux",
-            "Programming Language :: C",
-            "Programming Language :: Python",
-            "Topic :: Scientific/Engineering :: Astronomy",
-            "Topic :: Scientific/Engineering :: Physics",
-            "Topic :: Scientific/Engineering :: Visualization"],
-        keywords='astronomy astrophysics visualization ' + \
-            'amr adaptivemeshrefinement',
+                     "Environment :: Console",
+                     "Intended Audience :: Science/Research",
+                     "License :: OSI Approved :: GNU General Public License (GPL)",
+                     "Operating System :: MacOS :: MacOS X",
+                     "Operating System :: POSIX :: AIX",
+                     "Operating System :: POSIX :: Linux",
+                     "Programming Language :: C",
+                     "Programming Language :: Python",
+                     "Topic :: Scientific/Engineering :: Astronomy",
+                     "Topic :: Scientific/Engineering :: Physics",
+                     "Topic :: Scientific/Engineering :: Visualization"],
+        keywords='astronomy astrophysics visualization ' +
+        'amr adaptivemeshrefinement',
         entry_points={'console_scripts': [
-                            'yt = yt.utilities.command_line:run_main',
-                      ],
-                      'nose.plugins.0.10': [
-                            'answer-testing = yt.utilities.answer_testing.framework:AnswerTesting'
-                      ]
+        'yt = yt.utilities.command_line:run_main',
+        ],
+            'nose.plugins.0.10': [
+                'answer-testing = yt.utilities.answer_testing.framework:AnswerTesting'
+            ]
         },
         author="Matthew J. Turk",
         author_email="matthewturk at gmail.com",
@@ -203,8 +272,9 @@
         configuration=configuration,
         zip_safe=False,
         data_files=REASON_FILES,
-        cmdclass = {'build_py': my_build_py},
-        )
+        cmdclass={'build_py': my_build_py, 'build_forthon': BuildForthon,
+                  'build_src': my_build_src, 'install_data': my_install_data},
+    )
     return
 
 if __name__ == '__main__':

diff -r 813b806f980e0a8f9acfd1956493d4221ec3d04c -r 700cd9eb832eea4673cc4467164e1547834bf454 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -244,8 +244,9 @@
             If True, use dynamic load balancing to create the projections.
             Default: False.
 
-        Getting the Nearest Galaxies
-        ----------------------------
+        Notes
+        -----
+
         The light ray tool will use the HaloProfiler to calculate the
         distance and mass of the nearest halo to that pixel.  In order
         to do this, a dictionary called halo_profiler_parameters is used

diff -r 813b806f980e0a8f9acfd1956493d4221ec3d04c -r 700cd9eb832eea4673cc4467164e1547834bf454 yt/analysis_modules/halo_finding/rockstar/rockstar.py
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar.py
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar.py
@@ -164,6 +164,13 @@
         If set to ``True``, it will be assumed that there are only dark
         matter particles present in the simulation. This can save analysis
         time if this is indeed the case. Default: ``False``.
+    hires_dm_mass : float
+        If supplied, use only the highest resolution dark matter
+        particles, with a mass less than (1.1*hires_dm_mass), in units
+        of ParticleMassMsun. This is useful for multi-dm-mass
+        simulations. Note that this will only give sensible results for
+        halos that are not "polluted" by lower resolution
+        particles. Default: ``None``.
         
     Returns
     -------
@@ -187,7 +194,8 @@
     """
     def __init__(self, ts, num_readers = 1, num_writers = None,
             outbase="rockstar_halos", dm_type=1, 
-            force_res=None, total_particles=None, dm_only=False):
+            force_res=None, total_particles=None, dm_only=False,
+            hires_dm_mass=None):
         mylog.warning("The citation for the Rockstar halo finder can be found at")
         mylog.warning("http://adsabs.harvard.edu/abs/2013ApJ...762..109B")
         ParallelAnalysisInterface.__init__(self)
@@ -217,6 +225,7 @@
             self.force_res = force_res
         self.total_particles = total_particles
         self.dm_only = dm_only
+        self.hires_dm_mass = hires_dm_mass
         # Setup pool and workgroups.
         self.pool, self.workgroup = self.runner.setup_pool()
         p = self._setup_parameters(ts)
@@ -227,28 +236,52 @@
     def _setup_parameters(self, ts):
         if self.workgroup.name != "readers": return None
         tpf = ts[0]
+
         def _particle_count(field, data):
-            if self.dm_only:
-                return np.prod(data["particle_position_x"].shape)
+            if data.NumberOfParticles == 0: return 0
             try:
-                return (data["particle_type"]==self.dm_type).sum()
+                data["particle_type"]
+                has_particle_type=True
             except KeyError:
-                return np.prod(data["particle_position_x"].shape)
+                has_particle_type=False
+                
+            if (self.dm_only or (not has_particle_type)):
+                if self.hires_dm_mass is None:
+                    return np.prod(data["particle_position_x"].shape)
+                else:
+                    return (data['ParticleMassMsun'] < self.hires_dm_mass*1.1).sum()
+            elif has_particle_type:
+                if self.hires_dm_mass is None:
+                    return (data["particle_type"]==self.dm_type).sum()
+                else:
+                    return ( (data["particle_type"]==self.dm_type) & 
+                             (data['ParticleMassMsun'] < self.hires_dm_mass*1.1) ).sum()
+            else:                
+                raise RuntimeError() # should never get here
+
         add_field("particle_count", function=_particle_count,
                   not_in_all=True, particle_type=True)
         dd = tpf.h.all_data()
         # Get DM particle mass.
         all_fields = set(tpf.h.derived_field_list + tpf.h.field_list)
-        for g in tpf.h._get_objs("grids"):
-            if g.NumberOfParticles == 0: continue
-            if self.dm_only:
-                iddm = Ellipsis
-            elif "particle_type" in all_fields:
-                iddm = g["particle_type"] == self.dm_type
-            else:
-                iddm = Ellipsis
-            particle_mass = g['ParticleMassMsun'][iddm][0] / tpf.hubble_constant
-            break
+        has_particle_type = ("particle_type" in all_fields)
+
+        if self.hires_dm_mass is None:
+            for g in tpf.h._get_objs("grids"):
+                if g.NumberOfParticles == 0: continue
+
+                if (self.dm_only or (not has_particle_type)):
+                    iddm = Ellipsis
+                elif has_particle_type:
+                    iddm = g["particle_type"] == self.dm_type
+                else:                    
+                    iddm = Ellipsis # should never get here
+
+                particle_mass = g['ParticleMassMsun'][iddm][0] / tpf.hubble_constant
+                break
+        else:
+            particle_mass = self.hires_dm_mass / tpf.hubble_constant
+
         p = {}
         if self.total_particles is None:
             # Get total_particles in parallel.
@@ -302,6 +335,7 @@
                     force_res = self.force_res,
                     particle_mass = float(self.particle_mass),
                     dm_only = int(self.dm_only),
+                    hires_only = (self.hires_dm_mass is not None),
                     **kwargs)
         # Make the directory to store the halo lists in.
         if self.comm.rank == 0:

diff -r 813b806f980e0a8f9acfd1956493d4221ec3d04c -r 700cd9eb832eea4673cc4467164e1547834bf454 yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
@@ -163,6 +163,7 @@
     SCALE_NOW = 1.0/(pf.current_redshift+1.0)
     # Now we want to grab data from only a subset of the grids for each reader.
     all_fields = set(pf.h.derived_field_list + pf.h.field_list)
+    has_particle_type = ("particle_type" in all_fields)
 
     # First we need to find out how many this reader is going to read in
     # if the number of readers > 1.
@@ -170,12 +171,19 @@
         local_parts = 0
         for g in pf.h._get_objs("grids"):
             if g.NumberOfParticles == 0: continue
-            if rh.dm_only:
-                iddm = Ellipsis
-            elif "particle_type" in all_fields:
-                iddm = g["particle_type"] == rh.dm_type
+            if (rh.dm_only or (not has_particle_type)):
+                if rh.hires_only:
+                    iddm = (g['ParticleMassMsun'] < PARTICLE_MASS*1.1)
+                else:
+                    iddm = Ellipsis
+            elif has_particle_type:
+                if rh.hires_only:
+                    iddm = ( (g["particle_type"]==rh.dm_type) &
+                             (g['ParticleMassMsun'] < PARTICLE_MASS*1.1) )                    
+                else:
+                    iddm = g["particle_type"] == rh.dm_type
             else:
-                iddm = Ellipsis
+                iddm = Ellipsis # should never get here
             arri = g["particle_index"].astype("int64")
             arri = arri[iddm] #pick only DM
             local_parts += arri.size
@@ -195,12 +203,19 @@
     pi = 0
     for g in pf.h._get_objs("grids"):
         if g.NumberOfParticles == 0: continue
-        if rh.dm_only:
-            iddm = Ellipsis
-        elif "particle_type" in all_fields:
-            iddm = g["particle_type"] == rh.dm_type
-        else:
-            iddm = Ellipsis
+        if (rh.dm_only or (not has_particle_type)):
+            if rh.hires_only:
+                iddm = (g['ParticleMassMsun'] < PARTICLE_MASS*1.1)
+            else:
+                iddm = Ellipsis
+        elif has_particle_type:
+            if rh.hires_only:
+                iddm = ( (g["particle_type"]==rh.dm_type) &
+                         (g['ParticleMassMsun'] < PARTICLE_MASS*1.1) )                    
+            else:
+                iddm = g["particle_type"] == rh.dm_type
+        else:            
+            iddm = Ellipsis # should never get here
         arri = g["particle_index"].astype("int64")
         arri = arri[iddm] #pick only DM
         npart = arri.size
@@ -230,6 +245,7 @@
     cdef public int dm_type
     cdef public int total_particles
     cdef public int dm_only
+    cdef public int hires_only
 
     def __cinit__(self, ts):
         self.ts = ts
@@ -244,7 +260,7 @@
                        int writing_port = -1, int block_ratio = 1,
                        int periodic = 1, force_res=None,
                        int min_halo_size = 25, outbase = "None",
-                       int dm_only = 0):
+                       int dm_only = 0, int hires_only = False):
         global PARALLEL_IO, PARALLEL_IO_SERVER_ADDRESS, PARALLEL_IO_SERVER_PORT
         global FILENAME, FILE_FORMAT, NUM_SNAPS, STARTING_SNAP, h0, Ol, Om
         global BOX_SIZE, PERIODIC, PARTICLE_MASS, NUM_BLOCKS, NUM_READERS
@@ -276,6 +292,7 @@
         TOTAL_PARTICLES = total_particles
         self.block_ratio = block_ratio
         self.dm_only = dm_only
+        self.hires_only = hires_only
         
         tpf = self.ts[0]
         h0 = tpf.hubble_constant

diff -r 813b806f980e0a8f9acfd1956493d4221ec3d04c -r 700cd9eb832eea4673cc4467164e1547834bf454 yt/analysis_modules/halo_merger_tree/enzofof_merger_tree.py
--- a/yt/analysis_modules/halo_merger_tree/enzofof_merger_tree.py
+++ b/yt/analysis_modules/halo_merger_tree/enzofof_merger_tree.py
@@ -454,8 +454,8 @@
         halonum : int
             Halo number at the last output to trace.
 
-        Output
-        ------
+        Returns
+        -------
         output : dict
             Dictionary of redshifts, cycle numbers, and halo numbers
             of the most massive progenitor.  keys = {redshift, cycle,
@@ -810,6 +810,6 @@
         ax.set_xscale("log")
     if y_log:
         ax.set_yscale("log")
-    ofn = "%s_%s_%s.png" % (basename, fields[0], fields[1])
+    ofn = "%s/%s_%s_%s.png" % (FOF_directory, basename, fields[0], fields[1])
     plt.savefig(ofn)
     plt.clf()

diff -r 813b806f980e0a8f9acfd1956493d4221ec3d04c -r 700cd9eb832eea4673cc4467164e1547834bf454 yt/analysis_modules/halo_merger_tree/merger_tree.py
--- a/yt/analysis_modules/halo_merger_tree/merger_tree.py
+++ b/yt/analysis_modules/halo_merger_tree/merger_tree.py
@@ -758,17 +758,19 @@
     
     def query(self, string):
         r"""Performs a query of the database and returns the results as a list
-        of tuple(s), even if the result is singular.
+        of tuples, even if the result is singular.
         
         Parameters
         ----------
-        string : String
+        
+        string : str
             The SQL query of the database.
         
         Examples
-        -------
+        --------
+
         >>> results = mtc.query("SELECT GlobalHaloID from Halos where SnapHaloID = 0 and \
-        ... SnapZ = 0;")
+        ...    SnapZ = 0;")
         """
         # Query the database and return a list of tuples.
         if string is None:

diff -r 813b806f980e0a8f9acfd1956493d4221ec3d04c -r 700cd9eb832eea4673cc4467164e1547834bf454 yt/analysis_modules/halo_profiler/multi_halo_profiler.py
--- a/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
+++ b/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
@@ -430,8 +430,8 @@
         After all the calls to `add_profile`, this will trigger the actual
         calculations and output the profiles to disk.
 
-        Paramters
-        ---------
+        Parameters
+        ----------
 
         filename : str
             If set, a file will be written with all of the filtered halos

diff -r 813b806f980e0a8f9acfd1956493d4221ec3d04c -r 700cd9eb832eea4673cc4467164e1547834bf454 yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
--- a/yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
+++ b/yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
@@ -60,9 +60,9 @@
     
     Initialize an EmissivityIntegrator object.
 
-    Keyword Parameters
-    ------------------
-    filename: string
+    Parameters
+    ----------
+    filename: string, default None
         Path to data file containing emissivity values.  If None,
         a file called xray_emissivity.h5 is used.  This file contains 
         emissivity tables for primordial elements and for metals at 
@@ -146,8 +146,8 @@
     e_min: float
         the maximum energy in keV for the energy band.
 
-    Keyword Parameters
-    ------------------
+    Other Parameters
+    ----------------
     filename: string
         Path to data file containing emissivity values.  If None,
         a file called xray_emissivity.h5 is used.  This file contains 
@@ -220,8 +220,8 @@
     e_min: float
         the maximum energy in keV for the energy band.
 
-    Keyword Parameters
-    ------------------
+    Other Parameters
+    ----------------
     filename: string
         Path to data file containing emissivity values.  If None,
         a file called xray_emissivity.h5 is used.  This file contains 
@@ -277,8 +277,8 @@
     e_min: float
         the maximum energy in keV for the energy band.
 
-    Keyword Parameters
-    ------------------
+    Other Parameters
+    ----------------
     filename: string
         Path to data file containing emissivity values.  If None,
         a file called xray_emissivity.h5 is used.  This file contains 

diff -r 813b806f980e0a8f9acfd1956493d4221ec3d04c -r 700cd9eb832eea4673cc4467164e1547834bf454 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -178,7 +178,7 @@
         self.child_mask = 1
         self.ActiveDimensions = self.field_data['x'].shape
         self.DW = grid.pf.domain_right_edge - grid.pf.domain_left_edge
-        
+
     def __getitem__(self, field):
         if field not in self.field_data.keys():
             if field == "RadiusCode":
@@ -424,7 +424,7 @@
         return grids
 
     def select_grid_indices(self, level):
-        return np.where(self.grid_levels == level)
+        return np.where(self.grid_levels[:,0] == level)
 
     def __get_grid_left_edge(self):
         if self.__grid_left_edge == None:
@@ -461,6 +461,7 @@
     def __get_grid_levels(self):
         if self.__grid_levels == None:
             self.__grid_levels = np.array([g.Level for g in self._grids])
+            self.__grid_levels.shape = (self.__grid_levels.size, 1)
         return self.__grid_levels
 
     def __del_grid_levels(self):
@@ -474,7 +475,6 @@
     grid_levels = property(__get_grid_levels, __set_grid_levels,
                              __del_grid_levels)
 
-
     def __get_grid_dimensions(self):
         if self.__grid_dimensions == None:
             self.__grid_dimensions = np.array([g.ActiveDimensions for g in self._grids])
@@ -491,6 +491,19 @@
     grid_dimensions = property(__get_grid_dimensions, __set_grid_dimensions,
                              __del_grid_dimensions)
 
+    @property
+    def grid_corners(self):
+        return np.array([
+          [self.grid_left_edge[:,0], self.grid_left_edge[:,1], self.grid_left_edge[:,2]],
+          [self.grid_right_edge[:,0], self.grid_left_edge[:,1], self.grid_left_edge[:,2]],
+          [self.grid_right_edge[:,0], self.grid_right_edge[:,1], self.grid_left_edge[:,2]],
+          [self.grid_left_edge[:,0], self.grid_right_edge[:,1], self.grid_left_edge[:,2]],
+          [self.grid_left_edge[:,0], self.grid_left_edge[:,1], self.grid_right_edge[:,2]],
+          [self.grid_right_edge[:,0], self.grid_left_edge[:,1], self.grid_right_edge[:,2]],
+          [self.grid_right_edge[:,0], self.grid_right_edge[:,1], self.grid_right_edge[:,2]],
+          [self.grid_left_edge[:,0], self.grid_right_edge[:,1], self.grid_right_edge[:,2]],
+        ], dtype='float64')
+
 
 class AMR1DData(AMRData, GridPropertiesMixin):
     _spatial = False
@@ -530,7 +543,7 @@
             # generated it above.  This way, fields that are grabbed from the
             # grids are sorted properly.
             self[field] = self[field][self._sortkey]
-       
+
 class AMROrthoRayBase(AMR1DData):
     """
     This is an orthogonal ray cast through the entire domain, at a specific
@@ -673,9 +686,9 @@
             vs = self._get_line_at_coord(RE[:,i], i)
             p = p | ( ( (LE[:,i1] <= vs[:,i1]) & (RE[:,i1] >= vs[:,i1]) ) \
                     & ( (LE[:,i2] <= vs[:,i2]) & (RE[:,i2] >= vs[:,i2]) ) )
-        p = p | ( np.all( LE <= self.start_point, axis=1 ) 
+        p = p | ( np.all( LE <= self.start_point, axis=1 )
                 & np.all( RE >= self.start_point, axis=1 ) )
-        p = p | ( np.all( LE <= self.end_point,   axis=1 ) 
+        p = p | ( np.all( LE <= self.end_point,   axis=1 )
                 & np.all( RE >= self.end_point,   axis=1 ) )
         self._grids = self.hierarchy.grids[p]
 
@@ -695,7 +708,7 @@
         if not iterable(gf):
             gf = gf * np.ones(grid.child_mask.shape)
         return gf[mask]
-        
+
     @cache_mask
     def _get_cut_mask(self, grid):
         mask = np.zeros(grid.ActiveDimensions, dtype='int')
@@ -738,11 +751,11 @@
     --------
 
     >>> from yt.visualization.api import Streamlines
-    >>> streamlines = Streamlines(pf, [0.5]*3) 
+    >>> streamlines = Streamlines(pf, [0.5]*3)
     >>> streamlines.integrate_through_volume()
     >>> stream = streamlines.path(0)
     >>> matplotlib.pylab.semilogy(stream['t'], stream['Density'], '-x')
-    
+
     """
     _type_name = "streamline"
     _con_args = ('positions')
@@ -775,16 +788,16 @@
     @restore_grid_state
     def _get_data_from_grid(self, grid, field):
         # No child masking here; it happens inside the mask cut
-        mask = self._get_cut_mask(grid) 
+        mask = self._get_cut_mask(grid)
         if field == 'dts': return self._dts[grid.id]
         if field == 't': return self._ts[grid.id]
         return grid[field].flat[mask]
-        
+
     @cache_mask
     def _get_cut_mask(self, grid):
         #pdb.set_trace()
         points_in_grid = np.all(self.positions > grid.LeftEdge, axis=1) & \
-                         np.all(self.positions <= grid.RightEdge, axis=1) 
+                         np.all(self.positions <= grid.RightEdge, axis=1)
         pids = np.where(points_in_grid)[0]
         mask = np.zeros(points_in_grid.sum(), dtype='int')
         dts = np.zeros(points_in_grid.sum(), dtype='float64')
@@ -819,7 +832,7 @@
         AMRData.__init__(self, pf, fields, **kwargs)
         self.field = ensure_list(fields)[0]
         self.set_field_parameter("axis",axis)
-        
+
     def _convert_field_name(self, field):
         return field
 
@@ -838,7 +851,6 @@
             fields_to_get = self.fields[:]
         else:
             fields_to_get = ensure_list(fields)
-        temp_data = {}
         for field in fields_to_get:
             if self.field_data.has_key(field): continue
             if field not in self.hierarchy.field_list:
@@ -848,18 +860,13 @@
             # we're going to have to set the same thing several times
             data = [self._get_data_from_grid(grid, field)
                     for grid in self._get_grids()]
-            if len(data) == 0: data = np.array([])
-            else: data = np.concatenate(data)
-            temp_data[field] = data
+            if len(data) == 0:
+                data = np.array([])
+            else:
+                data = np.concatenate(data)
             # Now the next field can use this field
-            self[field] = temp_data[field] 
-        # We finalize
-        if temp_data != {}:
-            temp_data = self.comm.par_combine_object(temp_data,
-                    datatype='dict', op='cat')
-        # And set, for the next group
-        for field in temp_data.keys():
-            self[field] = temp_data[field]
+            self[field] = self.comm.par_combine_object(data, op='cat',
+                                                       datatype='array')
 
     def _get_pw(self, fields, center, width, origin, axes_unit, plot_type):
         axis = self.axis
@@ -874,7 +881,7 @@
         (bounds, center, units) = GetWindowParameters(axis, center, width, self.pf)
         if axes_unit is None and units != ('1', '1'):
             axes_unit = units
-        pw = PWViewerMPL(self, bounds, origin=origin, frb_generator=FixedResolutionBuffer, 
+        pw = PWViewerMPL(self, bounds, origin=origin, frb_generator=FixedResolutionBuffer,
                          plot_type=plot_type)
         pw.set_axes_unit(axes_unit)
         return pw
@@ -980,7 +987,7 @@
         for field in fields:
             #mylog.debug("Trying to obtain %s from node %s",
                 #self._convert_field_name(field), node_name)
-            fdata=self.hierarchy.get_data(node_name, 
+            fdata=self.hierarchy.get_data(node_name,
                 self._convert_field_name(field))
             if fdata is not None:
                 #mylog.debug("Got %s from node %s", field, node_name)
@@ -1138,7 +1145,7 @@
         t = points * ind[cm] * dx + (grid.LeftEdge[xaxis] + 0.5 * dx)
         # calculate ypoints array
         ind = cmI[1, :].ravel()   # yind
-        del cmI   # no longer needed 
+        del cmI   # no longer needed
         t = np.vstack( (t, points * ind[cm] * dy + \
                 (grid.LeftEdge[yaxis] + 0.5 * dy))
             )
@@ -1197,7 +1204,7 @@
     def hub_upload(self):
         self._mrep.upload()
 
-    def to_pw(self, fields=None, center='c', width=None, axes_unit=None, 
+    def to_pw(self, fields=None, center='c', width=None, axes_unit=None,
                origin='center-window'):
         r"""Create a :class:`~yt.visualization.plot_window.PWViewerMPL` from this
         object.
@@ -1477,7 +1484,7 @@
         self.dims = dims
         self.dds = self.width / self.dims
         self.bounds = np.array([0.0,1.0,0.0,1.0])
-        
+
         self.set_field_parameter('center', center)
         # Let's set up our plane equation
         # ax + by + cz + d = 0
@@ -1563,7 +1570,7 @@
 
             # Mark these pixels to speed things up
             self._pixelmask[pointI] = 0
-            
+
             return
         else:
             raise SyntaxError("Making a fixed resolution slice with "
@@ -1651,7 +1658,7 @@
         L_name = ("%s" % self._norm_vec).replace(" ","_")[1:-1]
         return "%s/c%s_L%s" % \
             (self._top_node, cen_name, L_name)
-        
+
 class AMRQuadTreeProjBase(AMR2DData):
     """
     This is a data object corresponding to a line integral through the
@@ -1809,7 +1816,7 @@
             convs[:] = 1.0
         return dls, convs
 
-    def to_pw(self, fields=None, center='c', width=None, axes_unit=None, 
+    def to_pw(self, fields=None, center='c', width=None, axes_unit=None,
                origin='center-window'):
         r"""Create a :class:`~yt.visualization.plot_window.PWViewerMPL` from this
         object.
@@ -1850,7 +1857,7 @@
                                  if g.Level == level],
                               self.get_dependencies(fields), self.hierarchy.io)
             self._add_level_to_tree(tree, level, fields)
-            mylog.debug("End of projecting level level %s, memory usage %0.3e", 
+            mylog.debug("End of projecting level level %s, memory usage %0.3e",
                         level, get_memory_usage()/1024.)
         # Note that this will briefly double RAM usage
         if self.proj_style == "mip":
@@ -1942,7 +1949,7 @@
         xpoints = (xind + (start_index[x_dict[self.axis]])).astype('int64')
         ypoints = (yind + (start_index[y_dict[self.axis]])).astype('int64')
         to_add = np.array([d[used_points].ravel() for d in full_proj], order='F')
-        tree.add_array_to_tree(grid.Level, xpoints, ypoints, 
+        tree.add_array_to_tree(grid.Level, xpoints, ypoints,
                     to_add, weight_proj[used_points].ravel())
 
     def _add_level_to_tree(self, tree, level, fields):
@@ -2068,6 +2075,7 @@
                  source=None, node_name = None, field_cuts = None,
                  preload_style='level', serialize=True,**kwargs):
         AMR2DData.__init__(self, axis, field, pf, node_name = None, **kwargs)
+        self.proj_style = "integrate"
         self.weight_field = weight_field
         self._field_cuts = field_cuts
         self.serialize = serialize
@@ -2282,7 +2290,7 @@
                 del self.__retval_coords[grid.id]
                 del self.__retval_fields[grid.id]
                 del self.__overlap_masks[grid.id]
-            mylog.debug("End of projecting level level %s, memory usage %0.3e", 
+            mylog.debug("End of projecting level level %s, memory usage %0.3e",
                         level, get_memory_usage()/1024.)
         coord_data = np.concatenate(coord_data, axis=1)
         field_data = np.concatenate(field_data, axis=1)
@@ -2313,7 +2321,7 @@
     def add_fields(self, fields, weight = "CellMassMsun"):
         pass
 
-    def to_pw(self, fields=None, center='c', width=None, axes_unit=None, 
+    def to_pw(self, fields=None, center='c', width=None, axes_unit=None,
                origin='center-window'):
         r"""Create a :class:`~yt.visualization.plot_window.PWViewerMPL` from this
         object.
@@ -2521,7 +2529,7 @@
         ref_ratio = self.pf.refine_by**(self.level - grid.Level)
         FillBuffer(ref_ratio,
             grid.get_global_startindex(), self.global_startindex,
-            c_fields, g_fields, 
+            c_fields, g_fields,
             self.ActiveDimensions, grid.ActiveDimensions,
             grid.child_mask, self.domain_width, dls[grid.Level],
             self.axis)
@@ -2682,9 +2690,9 @@
     def cut_region(self, field_cuts):
         """
         Return an InLineExtractedRegion, where the grid cells are cut on the
-        fly with a set of field_cuts.  It is very useful for applying 
+        fly with a set of field_cuts.  It is very useful for applying
         conditions to the fields in your data object.
-        
+
         Examples
         --------
         To find the total mass of gas above 10^6 K in your volume:
@@ -2725,7 +2733,7 @@
         useful for calculating, for instance, total isocontour area, or
         visualizing in an external program (such as `MeshLab
         <http://meshlab.sf.net>`_.)
-        
+
         Parameters
         ----------
         field : string
@@ -2839,7 +2847,7 @@
 
         Additionally, the returned flux is defined as flux *into* the surface,
         not flux *out of* the surface.
-        
+
         Parameters
         ----------
         field : string
@@ -2896,7 +2904,7 @@
             ff = np.ones(vals.shape, dtype="float64")
         else:
             ff = grid.get_vertex_centered_data(fluxing_field)
-        xv, yv, zv = [grid.get_vertex_centered_data(f) for f in 
+        xv, yv, zv = [grid.get_vertex_centered_data(f) for f in
                      [field_x, field_y, field_z]]
         return march_cubes_grid_flux(value, vals, xv, yv, zv,
                     ff, mask, grid.LeftEdge, grid.dds)
@@ -2989,7 +2997,7 @@
     ----------------
     force_refresh : bool
        Force a refresh of the data. Defaults to True.
-    
+
     Examples
     --------
     """
@@ -3229,7 +3237,7 @@
         if self._grids is not None: return
         GLE = self.pf.h.grid_left_edge
         GRE = self.pf.h.grid_right_edge
-        goodI = find_grids_in_inclined_box(self.box_vectors, self.center, 
+        goodI = find_grids_in_inclined_box(self.box_vectors, self.center,
                                            GLE, GRE)
         cgrids = self.pf.h.grids[goodI.astype('bool')]
        # find_grids_in_inclined_box seems to be broken.
@@ -3237,13 +3245,13 @@
         grids = []
         for i,grid in enumerate(cgrids):
             v = grid_points_in_volume(self.box_lengths, self.origin,
-                                      self._rot_mat, grid.LeftEdge, 
+                                      self._rot_mat, grid.LeftEdge,
                                       grid.RightEdge, grid.dds,
                                       grid.child_mask, 1)
             if v: grids.append(grid)
         self._grids = np.empty(len(grids), dtype='object')
         for gi, g in enumerate(grids): self._grids[gi] = g
-            
+
 
     def _is_fully_enclosed(self, grid):
         # This should be written at some point.
@@ -3256,10 +3264,10 @@
             return True
         pm = np.zeros(grid.ActiveDimensions, dtype='int32')
         grid_points_in_volume(self.box_lengths, self.origin,
-                              self._rot_mat, grid.LeftEdge, 
+                              self._rot_mat, grid.LeftEdge,
                               grid.RightEdge, grid.dds, pm, 0)
         return pm
-        
+
 
 class AMRRegionBase(AMR3DData):
     """A 3D region of data with an arbitrary center.
@@ -3395,9 +3403,9 @@
     _dx_pad = 0.0
     def __init__(self, center, left_edge, right_edge, fields = None,
                  pf = None, **kwargs):
-        AMRPeriodicRegionBase.__init__(self, center, left_edge, right_edge, 
+        AMRPeriodicRegionBase.__init__(self, center, left_edge, right_edge,
                                        fields = None, pf = None, **kwargs)
-    
+
 
 class AMRGridCollectionBase(AMR3DData):
     """
@@ -3564,7 +3572,7 @@
         self._C = C
         self._e0 = e0 = e0 / (e0**2.0).sum()**0.5
         self._tilt = tilt
-        
+
         # find the t1 angle needed to rotate about z axis to align e0 to x
         t1 = np.arctan(e0[1] / e0[0])
         # rotate e0 by -t1
@@ -3574,7 +3582,7 @@
         t2 = np.arctan(-r1[2] / r1[0])
         """
         calculate the original e1
-        given the tilt about the x axis when e0 was aligned 
+        given the tilt about the x axis when e0 was aligned
         to x after t1, t2 rotations about z, y
         """
         RX = get_rotation_matrix(-tilt, (1,0,0)).transpose()
@@ -3598,7 +3606,7 @@
         self._refresh_data()
 
         """
-        Having another function find_ellipsoid_grids is too much work, 
+        Having another function find_ellipsoid_grids is too much work,
         can just use the sphere one and forget about checking orientation
         but feed in the A parameter for radius
         """
@@ -3686,7 +3694,7 @@
 class AMRCoveringGridBase(AMR3DData):
     """A 3D region with all data extracted to a single, specified
     resolution.
-    
+
     Parameters
     ----------
     level : int
@@ -3784,7 +3792,7 @@
             n_bad = np.where(self[obtain_fields[0]]==-999)[0].size
             mylog.error("Covering problem: %s cells are uncovered", n_bad)
             raise KeyError(n_bad)
-            
+
     def _generate_field(self, field):
         if self.pf.field_info.has_key(field):
             # First we check the validator; this might even raise!
@@ -3812,13 +3820,13 @@
     def _get_data_from_grid(self, grid, fields):
         ll = int(grid.Level == self.level)
         ref_ratio = self.pf.refine_by**(self.level - grid.Level)
-        g_fields = [gf.astype("float64") 
+        g_fields = [gf.astype("float64")
                     if gf.dtype != "float64"
                     else gf for gf in (grid[field] for field in fields)]
         c_fields = [self[field] for field in fields]
         count = FillRegion(ref_ratio,
             grid.get_global_startindex(), self.global_startindex,
-            c_fields, g_fields, 
+            c_fields, g_fields,
             self.ActiveDimensions, grid.ActiveDimensions,
             grid.child_mask, self.domain_width, ll, 0)
         return count
@@ -3834,7 +3842,7 @@
         c_fields = [self[field] for field in fields]
         FillRegion(ref_ratio,
             grid.get_global_startindex(), self.global_startindex,
-            c_fields, g_fields, 
+            c_fields, g_fields,
             self.ActiveDimensions, grid.ActiveDimensions,
             grid.child_mask, self.domain_width, ll, 1)
 
@@ -3855,7 +3863,7 @@
     fill the region to level 1, replacing any cells actually
     covered by level 1 data, and then recursively repeating this
     process until it reaches the specified `level`.
-    
+
     Parameters
     ----------
     level : int
@@ -3867,10 +3875,11 @@
     fields : array_like, optional
         A list of fields that you'd like pre-generated for your object
 
-    Example
-    -------
-    cube = pf.h.smoothed_covering_grid(2, left_edge=[0.0, 0.0, 0.0], \
-                              dims=[128, 128, 128])
+    Examples
+    --------
+
+    >>> cube = pf.h.smoothed_covering_grid(2, left_edge=[0.0, 0.0, 0.0], \
+    ...                          dims=[128, 128, 128])
     """
     _type_name = "smoothed_covering_grid"
     def __init__(self, *args, **kwargs):
@@ -3975,7 +3984,7 @@
     def _refine(self, dlevel, fields):
         rf = float(self.pf.refine_by**dlevel)
 
-        input_left = (self._old_global_startindex + 0.5) * rf 
+        input_left = (self._old_global_startindex + 0.5) * rf
         dx = np.fromiter((self['cd%s' % ax] for ax in 'xyz'), count=3, dtype='float64')
         output_dims = np.rint((self.ActiveDimensions*self.dds)/dx+0.5).astype('int32') + 2
         self._cur_dims = output_dims
@@ -3989,13 +3998,13 @@
 
     @restore_field_information_state
     def _get_data_from_grid(self, grid, fields):
-        g_fields = [gf.astype("float64") 
+        g_fields = [gf.astype("float64")
                     if gf.dtype != "float64"
                     else gf for gf in (grid[field] for field in fields)]
         c_fields = [self.field_data[field] for field in fields]
         count = FillRegion(1,
             grid.get_global_startindex(), self.global_startindex,
-            c_fields, g_fields, 
+            c_fields, g_fields,
             self._cur_dims, grid.ActiveDimensions,
             grid.child_mask, self.domain_width, 1, 0)
         return count
@@ -4007,14 +4016,14 @@
     """
     This will build a hybrid region based on the boolean logic
     of the regions.
-    
+
     Parameters
     ----------
     regions : list
         A list of region objects and strings describing the boolean logic
         to use when building the hybrid region. The boolean logic can be
         nested using parentheses.
-    
+
     Examples
     --------
     >>> re1 = pf.h.region([0.5, 0.5, 0.5], [0.4, 0.4, 0.4],
@@ -4027,7 +4036,7 @@
         sp1, ")"])
     """
     _type_name = "boolean"
-    _con_args = ("regions")
+    _con_args = ("regions",)
     def __init__(self, regions, fields = None, pf = None, **kwargs):
         # Center is meaningless, but we'll define it all the same.
         AMR3DData.__init__(self, [0.5]*3, fields, pf, **kwargs)
@@ -4039,7 +4048,7 @@
         self._get_all_regions()
         self._make_overlaps()
         self._get_list_of_grids()
-    
+
     def _get_all_regions(self):
         # Before anything, we simply find out which regions are involved in all
         # of this process, uniquely.
@@ -4049,7 +4058,7 @@
             # So cut_masks don't get messed up.
             item._boolean_touched = True
         self._all_regions = np.unique(self._all_regions)
-    
+
     def _make_overlaps(self):
         # Using the processed cut_masks, we'll figure out what grids
         # are left in the hybrid region.
@@ -4083,7 +4092,7 @@
                     continue
             pbar.update(i)
         pbar.finish()
-    
+
     def __repr__(self):
         # We'll do this the slow way to be clear what's going on
         s = "%s (%s): " % (self.__class__.__name__, self.pf)
@@ -4096,7 +4105,7 @@
             if i < (len(self.regions) - 1): s += ", "
         s += "]"
         return s
-    
+
     def _is_fully_enclosed(self, grid):
         return (grid in self._all_overlap)
 
@@ -4183,7 +4192,7 @@
     <http://meshlab.sf.net>`_.)  The object has the properties .vertices
     and will sample values if a field is requested.  The values are
     interpolated to the center of a given face.
-    
+
     Parameters
     ----------
     data_source : AMR3DDataObject
@@ -4258,7 +4267,7 @@
                 self[fields] = samples
             elif sample_type == "vertex":
                 self.vertex_samples[fields] = samples
-        
+
 
     @restore_grid_state
     def _extract_isocontours_from_grid(self, grid, field, value,
@@ -4295,7 +4304,7 @@
 
         Additionally, the returned flux is defined as flux *into* the surface,
         not flux *out of* the surface.
-        
+
         Parameters
         ----------
         field_x : string
@@ -4342,7 +4351,7 @@
         return flux
 
     @restore_grid_state
-    def _calculate_flux_in_grid(self, grid, 
+    def _calculate_flux_in_grid(self, grid,
                     field_x, field_y, field_z, fluxing_field = None):
         mask = self.data_source._get_cut_mask(grid) * grid.child_mask
         vals = grid.get_vertex_centered_data(self.surface_field)
@@ -4350,7 +4359,7 @@
             ff = np.ones(vals.shape, dtype="float64")
         else:
             ff = grid.get_vertex_centered_data(fluxing_field)
-        xv, yv, zv = [grid.get_vertex_centered_data(f) for f in 
+        xv, yv, zv = [grid.get_vertex_centered_data(f) for f in
                      [field_x, field_y, field_z]]
         return march_cubes_grid_flux(self.field_value, vals, xv, yv, zv,
                     ff, mask, grid.LeftEdge, grid.dds)
@@ -4468,7 +4477,7 @@
             w = bounds[i][1] - bounds[i][0]
             np.divide(tmp, w, tmp)
             np.subtract(tmp, 0.5, tmp) # Center at origin.
-            v[ax][:] = tmp 
+            v[ax][:] = tmp
         f.write("end_header\n")
         v.tofile(f)
         arr["ni"][:] = 3
@@ -4597,22 +4606,46 @@
             mylog.error("Problem uploading.")
         return upload_id
 
+# Many of these items are set up specifically to ensure that
+# we are not breaking old pickle files.  This means we must only call the
+# _reconstruct_object and that we cannot mandate any additional arguments to
+# the reconstruction function.
+#
+# In the future, this would be better off being set up to more directly
+# reference objects or retain state, perhaps with a context manager.
+#
+# One final detail: time series or multiple parameter files in a single pickle
+# seems problematic.
+
+class ReconstructedObject(tuple):
+    pass
+
+def _check_nested_args(arg, ref_pf):
+    if not isinstance(arg, (tuple, list, ReconstructedObject)):
+        return arg
+    elif isinstance(arg, ReconstructedObject) and ref_pf == arg[0]:
+        return arg[1]
+    narg = [_check_nested_args(a, ref_pf) for a in arg]
+    return narg
+
+def _get_pf_by_hash(hash):
+    from yt.data_objects.static_output import _cached_pfs
+    for pf in _cached_pfs.values():
+        if pf._hash() == hash: return pf
+    return None
 
 def _reconstruct_object(*args, **kwargs):
     pfid = args[0]
     dtype = args[1]
+    pf = _get_pf_by_hash(pfid)
+    if not pf:
+        pfs = ParameterFileStore()
+        pf = pfs.get_pf_hash(pfid)
     field_parameters = args[-1]
     # will be much nicer when we can do pfid, *a, fp = args
-    args, new_args = args[2:-1], []
-    for arg in args:
-        if iterable(arg) and len(arg) == 2 \
-           and not isinstance(arg, types.DictType) \
-           and isinstance(arg[1], AMRData):
-            new_args.append(arg[1])
-        else: new_args.append(arg)
-    pfs = ParameterFileStore()
-    pf = pfs.get_pf_hash(pfid)
+    args = args[2:-1]
+    new_args = [_check_nested_args(a, pf) for a in args]
     cls = getattr(pf.h, dtype)
     obj = cls(*new_args)
     obj.field_parameters.update(field_parameters)
-    return pf, obj
+    return ReconstructedObject((pf, obj))

diff -r 813b806f980e0a8f9acfd1956493d4221ec3d04c -r 700cd9eb832eea4673cc4467164e1547834bf454 yt/data_objects/hierarchy.py
--- a/yt/data_objects/hierarchy.py
+++ b/yt/data_objects/hierarchy.py
@@ -209,7 +209,7 @@
         pf = self.parameter_file
         if find_max: c = self.find_max("Density")[1]
         else: c = (pf.domain_right_edge + pf.domain_left_edge)/2.0
-        return self.region(c, 
+        return self.region(c,
             pf.domain_left_edge, pf.domain_right_edge)
 
     def clear_all_data(self):
@@ -308,7 +308,7 @@
             self.save_data = self._save_data
         else:
             self.save_data = parallel_splitter(self._save_data, self._reload_data_file)
-    
+
     save_data = parallel_splitter(_save_data, _reload_data_file)
 
     def save_object(self, obj, name):
@@ -367,7 +367,7 @@
         """
         Returns (in code units) the smallest cell size in the simulation.
         """
-        return self.select_grids(self.grid_levels.max())[0].dds[0]
+        return self.select_grids(self.grid_levels.max())[0].dds[:].min()
 
     def _add_object_class(self, name, class_name, base, dd):
         self.object_types.append(name)

diff -r 813b806f980e0a8f9acfd1956493d4221ec3d04c -r 700cd9eb832eea4673cc4467164e1547834bf454 yt/data_objects/image_array.py
--- a/yt/data_objects/image_array.py
+++ b/yt/data_objects/image_array.py
@@ -71,12 +71,12 @@
 
     >>> im = np.zeros([64,128,3])
     >>> for i in xrange(im.shape[0]):
-    >>>     for k in xrange(im.shape[2]):
-    >>>         im[i,:,k] = np.linspace(0.,0.3*k, im.shape[1])
+    ...     for k in xrange(im.shape[2]):
+    ...         im[i,:,k] = np.linspace(0.,0.3*k, im.shape[1])
 
     >>> myinfo = {'field':'dinosaurs', 'east_vector':np.array([1.,0.,0.]), 
-    >>>     'north_vector':np.array([0.,0.,1.]), 'normal_vector':np.array([0.,1.,0.]),  
-    >>>     'width':0.245, 'units':'cm', 'type':'rendering'}
+    ...     'north_vector':np.array([0.,0.,1.]), 'normal_vector':np.array([0.,1.,0.]),  
+    ...     'width':0.245, 'units':'cm', 'type':'rendering'}
 
     >>> im_arr = ImageArray(im, info=myinfo)
     >>> im_arr.save('test_ImageArray')
@@ -112,12 +112,12 @@
         -------- 
         >>> im = np.zeros([64,128,3])
         >>> for i in xrange(im.shape[0]):
-        >>>     for k in xrange(im.shape[2]):
-        >>>         im[i,:,k] = np.linspace(0.,0.3*k, im.shape[1])
+        ...     for k in xrange(im.shape[2]):
+        ...         im[i,:,k] = np.linspace(0.,0.3*k, im.shape[1])
 
         >>> myinfo = {'field':'dinosaurs', 'east_vector':np.array([1.,0.,0.]), 
-        >>>     'north_vector':np.array([0.,0.,1.]), 'normal_vector':np.array([0.,1.,0.]),  
-        >>>     'width':0.245, 'units':'cm', 'type':'rendering'}
+        ...     'north_vector':np.array([0.,0.,1.]), 'normal_vector':np.array([0.,1.,0.]),  
+        ...     'width':0.245, 'units':'cm', 'type':'rendering'}
 
         >>> im_arr = ImageArray(im, info=myinfo)
         >>> im_arr.write_hdf5('test_ImageArray.h5')
@@ -133,38 +133,191 @@
             d.attrs.create(k, v)
         f.close()
 
-    def write_png(self, filename, clip_ratio=None):
+    def add_background_color(self, background='black', inline=True):
+        r"""Adds a background color to a 4-channel ImageArray
+
+        This adds a background color to a 4-channel ImageArray, by default
+        doing so inline.  The ImageArray must already be normalized to the
+        [0,1] range.
+
+        Parameters
+        ----------
+        background: 
+            This can be used to set a background color for the image, and can
+            take several types of values:
+
+               * ``white``: white background, opaque
+               * ``black``: black background, opaque
+               * ``None``: transparent background
+               * 4-element array [r,g,b,a]: arbitrary rgba setting.
+
+            Default: 'black'
+        inline: boolean, optional
+            If True, original ImageArray is modified. If False, a copy is first
+            created, then modified. Default: True
+
+        Returns
+        -------
+        out: ImageArray
+            The modified ImageArray with a background color added.
+       
+        Examples
+        --------
+        >>> im = np.zeros([64,128,4])
+        >>> for i in xrange(im.shape[0]):
+        ...     for k in xrange(im.shape[2]):
+        ...         im[i,:,k] = np.linspace(0.,10.*k, im.shape[1])
+
+        >>> im_arr = ImageArray(im)
+        >>> im_arr.rescale()
+        >>> new_im = im_arr.add_background_color([1.,0.,0.,1.], inline=False)
+        >>> new_im.write_png('red_bg.png')
+        >>> im_arr.add_background_color('black')
+        >>> im_arr.write_png('black_bg.png')
+        """
+        assert(self.shape[-1] == 4)
+        
+        if background == None:
+            background = (0., 0., 0., 0.)
+        elif background == 'white':
+            background = (1., 1., 1., 1.)
+        elif background == 'black':
+            background = (0., 0., 0., 1.)
+
+        # Alpha blending to background
+        if inline:
+            out = self
+        else:
+            out = self.copy()
+
+        for i in range(3):
+            out[:,:,i] = self[:,:,i]*self[:,:,3] + \
+                    background[i]*background[3]*(1.0-self[:,:,3])
+        out[:,:,3] = self[:,:,3] + background[3]*(1.0-self[:,:,3]) 
+        return out 
+
+
+    def rescale(self, cmax=None, amax=None, inline=True):
+        r"""Rescales the image to be in [0,1] range.
+
+        Parameters
+        ----------
+        cmax: float, optional
+            Normalization value to use for rgb channels. Defaults to None,
+            corresponding to using the maximum value in the rgb channels.
+        amax: float, optional
+            Normalization value to use for alpha channel. Defaults to None,
+            corresponding to using the maximum value in the alpha channel.
+        inline: boolean, optional
+            Specifies whether or not the rescaling is done inline. If false,
+            a new copy of the ImageArray will be created, returned. 
+            Default:True.
+
+        Returns
+        -------
+        out: ImageArray
+            The rescaled ImageArray, clipped to the [0,1] range.
+
+        Notes
+        -----
+        This requires that the shape of the ImageArray to have a length of 3,
+        and for the third dimension to be >= 3.  If the third dimension has
+        a shape of 4, the alpha channel will also be rescaled.
+       
+        Examples
+        -------- 
+        >>> im = np.zeros([64,128,4])
+        >>> for i in xrange(im.shape[0]):
+        ...     for k in xrange(im.shape[2]):
+        ...         im[i,:,k] = np.linspace(0.,0.3*k, im.shape[1])
+
+        >>> im_arr.write_png('original.png')
+        >>> im_arr.rescale()
+        >>> im_arr.write_png('normalized.png')
+
+        """
+        assert(len(self.shape) == 3)
+        assert(self.shape[2] >= 3)
+        if inline:
+            out = self
+        else:
+            out = self.copy()
+        if cmax is None: 
+            cmax = self[:,:,:3].sum(axis=2).max()
+
+        np.multiply(self[:,:,:3], 1./cmax, out[:,:,:3])
+
+        if self.shape[2] == 4:
+            if amax is None:
+                amax = self[:,:,3].max()
+            if amax > 0.0:
+                np.multiply(self[:,:,3], 1./amax, out[:,:,3])
+        
+        np.clip(out, 0.0, 1.0, out)
+        return out
+
+    def write_png(self, filename, clip_ratio=None, background='black',
+                 rescale=True):
         r"""Writes ImageArray to png file.
 
         Parameters
         ----------
         filename: string
             Note filename not be modified.
+        clip_ratio: float, optional
+            Image will be clipped before saving to the standard deviation
+            of the image multiplied by this value.  Useful for enhancing 
+            images. Default: None
+        background: 
+            This can be used to set a background color for the image, and can
+            take several types of values:
+
+               * ``white``: white background, opaque
+               * ``black``: black background, opaque
+               * ``None``: transparent background
+               * 4-element array [r,g,b,a]: arbitrary rgba setting.
+
+            Default: 'black'
+        rescale: boolean, optional
+            If True, will write out a rescaled image (without modifying the
+            original image). Default: True
        
         Examples
         --------
-        
-        >>> im = np.zeros([64,128,3])
+        >>> im = np.zeros([64,128,4])
         >>> for i in xrange(im.shape[0]):
-        >>>     for k in xrange(im.shape[2]):
-        >>>         im[i,:,k] = np.linspace(0.,0.3*k, im.shape[1])
+        ...     for k in xrange(im.shape[2]):
+        ...         im[i,:,k] = np.linspace(0.,10.*k, im.shape[1])
 
-        >>> myinfo = {'field':'dinosaurs', 'east_vector':np.array([1.,0.,0.]), 
-        >>>     'north_vector':np.array([0.,0.,1.]), 'normal_vector':np.array([0.,1.,0.]),  
-        >>>     'width':0.245, 'units':'cm', 'type':'rendering'}
-
-        >>> im_arr = ImageArray(im, info=myinfo)
-        >>> im_arr.write_png('test_ImageArray.png')
+        >>> im_arr = ImageArray(im)
+        >>> im_arr.write_png('standard.png')
+        >>> im_arr.write_png('non-scaled.png', rescale=False)
+        >>> im_arr.write_png('black_bg.png', background='black')
+        >>> im_arr.write_png('white_bg.png', background='white')
+        >>> im_arr.write_png('green_bg.png', background=[0,1,0,1])
+        >>> im_arr.write_png('transparent_bg.png', background=None)
 
         """
+        if rescale:
+            scaled = self.rescale(inline=False)
+        else:
+            scaled = self
+
+        if self.shape[-1] == 4:
+            out = scaled.add_background_color(background, inline=False)
+        else:
+            out = scaled
+
         if filename[-4:] != '.png': 
             filename += '.png'
 
         if clip_ratio is not None:
-            return write_bitmap(self.swapaxes(0, 1), filename,
-                                clip_ratio * self.std())
+            nz = out[:,:,:3][out[:,:,:3].nonzero()]
+            return write_bitmap(out.swapaxes(0, 1), filename,
+                                nz.mean() + \
+                                clip_ratio * nz.std())
         else:
-            return write_bitmap(self.swapaxes(0, 1), filename)
+            return write_bitmap(out.swapaxes(0, 1), filename)
 
     def write_image(self, filename, color_bounds=None, channel=None,  cmap_name="algae", func=lambda x: x):
         r"""Writes a single channel of the ImageArray to a png file.
@@ -197,11 +350,11 @@
         
         >>> im = np.zeros([64,128])
         >>> for i in xrange(im.shape[0]):
-        >>>     im[i,:] = np.linspace(0.,0.3*k, im.shape[1])
+        ...     im[i,:] = np.linspace(0.,0.3*k, im.shape[1])
 
         >>> myinfo = {'field':'dinosaurs', 'east_vector':np.array([1.,0.,0.]), 
-        >>>     'north_vector':np.array([0.,0.,1.]), 'normal_vector':np.array([0.,1.,0.]),  
-        >>>     'width':0.245, 'units':'cm', 'type':'rendering'}
+        ...     'north_vector':np.array([0.,0.,1.]), 'normal_vector':np.array([0.,1.,0.]),  
+        ...     'width':0.245, 'units':'cm', 'type':'rendering'}
 
         >>> im_arr = ImageArray(im, info=myinfo)
         >>> im_arr.write_image('test_ImageArray.png')
@@ -245,27 +398,3 @@
 
     __doc__ += np.ndarray.__doc__
 
-if __name__ == "__main__":
-    im = np.zeros([64,128,3])
-    for i in xrange(im.shape[0]):
-        for k in xrange(im.shape[2]):
-            im[i,:,k] = np.linspace(0.,0.3*k, im.shape[1])
-
-    myinfo = {'field':'dinosaurs', 'east_vector':np.array([1.,0.,0.]), 
-        'north_vector':np.array([0.,0.,1.]), 'normal_vector':np.array([0.,1.,0.]),  
-        'width':0.245, 'units':'cm', 'type':'rendering'}
-
-    im_arr = ImageArray(im, info=myinfo)
-    im_arr.save('test_3d_ImageArray')
-
-    im = np.zeros([64,128])
-    for i in xrange(im.shape[0]):
-        im[i,:] = np.linspace(0.,0.3*k, im.shape[1])
-
-    myinfo = {'field':'dinosaurs', 'east_vector':np.array([1.,0.,0.]), 
-        'north_vector':np.array([0.,0.,1.]), 'normal_vector':np.array([0.,1.,0.]),  
-        'width':0.245, 'units':'cm', 'type':'rendering'}
-
-    im_arr = ImageArray(im, info=myinfo)
-    im_arr.save('test_2d_ImageArray')
-

diff -r 813b806f980e0a8f9acfd1956493d4221ec3d04c -r 700cd9eb832eea4673cc4467164e1547834bf454 yt/data_objects/object_finding_mixin.py
--- a/yt/data_objects/object_finding_mixin.py
+++ b/yt/data_objects/object_finding_mixin.py
@@ -198,8 +198,10 @@
         """
         Gets back all the grids between a left edge and right edge
         """
-        grid_i = np.where((np.all(self.grid_right_edge > left_edge, axis=1)
-                         & np.all(self.grid_left_edge < right_edge, axis=1)) == True)
+        eps = np.finfo(np.float64).eps
+        grid_i = np.where((np.all((self.grid_right_edge - left_edge) > eps, axis=1)
+                         & np.all((right_edge - self.grid_left_edge) > eps, axis=1)) == True)
+
         return self.grids[grid_i], grid_i
 
     def get_periodic_box_grids(self, left_edge, right_edge):

diff -r 813b806f980e0a8f9acfd1956493d4221ec3d04c -r 700cd9eb832eea4673cc4467164e1547834bf454 yt/data_objects/tests/test_image_array.py
--- /dev/null
+++ b/yt/data_objects/tests/test_image_array.py
@@ -0,0 +1,130 @@
+from yt.testing import *
+from yt.data_objects.image_array import ImageArray
+import numpy as np
+import os
+import tempfile
+import shutil
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","__withintesting"] = "True"
+    np.seterr(all = 'ignore')
+
+def test_rgba_rescale():
+    im = np.zeros([64,128,4])
+    for i in xrange(im.shape[0]):
+        for k in xrange(im.shape[2]):
+            im[i,:,k] = np.linspace(0.,10.*k, im.shape[1])
+    im_arr = ImageArray(im)
+
+    new_im = im_arr.rescale(inline=False)
+    yield assert_equal, im_arr[:,:,:3].max(), 2*10.
+    yield assert_equal, im_arr[:,:,3].max(), 3*10.
+    yield assert_equal, new_im[:,:,:3].sum(axis=2).max(), 1.0 
+    yield assert_equal, new_im[:,:,3].max(), 1.0
+
+    im_arr.rescale()
+    yield assert_equal, im_arr[:,:,:3].sum(axis=2).max(), 1.0
+    yield assert_equal, im_arr[:,:,3].max(), 1.0
+
+def test_image_array_hdf5():
+    # Perform I/O in safe place instead of yt main dir
+    tmpdir = tempfile.mkdtemp()
+    curdir = os.getcwd()
+    os.chdir(tmpdir)
+
+    im = np.zeros([64,128,3])
+    for i in xrange(im.shape[0]):
+        for k in xrange(im.shape[2]):
+            im[i,:,k] = np.linspace(0.,0.3*k, im.shape[1])
+
+    myinfo = {'field':'dinosaurs', 'east_vector':np.array([1.,0.,0.]), 
+        'north_vector':np.array([0.,0.,1.]), 'normal_vector':np.array([0.,1.,0.]),  
+        'width':0.245, 'units':'cm', 'type':'rendering'}
+
+    im_arr = ImageArray(im, info=myinfo)
+    im_arr.save('test_3d_ImageArray')
+
+    im = np.zeros([64,128])
+    for i in xrange(im.shape[0]):
+        im[i,:] = np.linspace(0.,0.3*k, im.shape[1])
+
+    myinfo = {'field':'dinosaurs', 'east_vector':np.array([1.,0.,0.]), 
+        'north_vector':np.array([0.,0.,1.]), 'normal_vector':np.array([0.,1.,0.]),  
+        'width':0.245, 'units':'cm', 'type':'rendering'}
+
+    im_arr = ImageArray(im, info=myinfo)
+    im_arr.save('test_2d_ImageArray')
+
+    os.chdir(curdir)
+    # clean up
+    shutil.rmtree(tmpdir)
+
+def test_image_array_rgb_png():
+    # Perform I/O in safe place instead of yt main dir
+    tmpdir = tempfile.mkdtemp()
+    curdir = os.getcwd()
+    os.chdir(tmpdir)
+
+    im = np.zeros([64,128,3])
+    for i in xrange(im.shape[0]):
+        for k in xrange(im.shape[2]):
+            im[i,:,k] = np.linspace(0.,10.*k, im.shape[1])
+
+    im_arr = ImageArray(im)
+    im_arr.write_png('standard.png')
+
+def test_image_array_rgba_png():
+    # Perform I/O in safe place instead of yt main dir
+    tmpdir = tempfile.mkdtemp()
+    curdir = os.getcwd()
+    os.chdir(tmpdir)
+
+    im = np.zeros([64,128,4])
+    for i in xrange(im.shape[0]):
+        for k in xrange(im.shape[2]):
+            im[i,:,k] = np.linspace(0.,10.*k, im.shape[1])
+
+    im_arr = ImageArray(im)
+    im_arr.write_png('standard.png')
+    im_arr.write_png('non-scaled.png', rescale=False)
+    im_arr.write_png('black_bg.png', background='black')
+    im_arr.write_png('white_bg.png', background='white')
+    im_arr.write_png('green_bg.png', background=[0.,1.,0.,1.])
+    im_arr.write_png('transparent_bg.png', background=None)
+
+
+def test_image_array_background():
+    # Perform I/O in safe place instead of yt main dir
+    tmpdir = tempfile.mkdtemp()
+    curdir = os.getcwd()
+    os.chdir(tmpdir)
+
+    im = np.zeros([64,128,4])
+    for i in xrange(im.shape[0]):
+        for k in xrange(im.shape[2]):
+            im[i,:,k] = np.linspace(0.,10.*k, im.shape[1])
+
+    im_arr = ImageArray(im)
+    im_arr.rescale()
+    new_im = im_arr.add_background_color([1.,0.,0.,1.], inline=False)
+    new_im.write_png('red_bg.png')
+    im_arr.add_background_color('black')
+    im_arr.write_png('black_bg2.png')
+ 
+    os.chdir(curdir)
+    # clean up
+    shutil.rmtree(tmpdir)
+
+
+
+
+
+
+
+
+
+
+
+
+

diff -r 813b806f980e0a8f9acfd1956493d4221ec3d04c -r 700cd9eb832eea4673cc4467164e1547834bf454 yt/data_objects/tests/test_pickle.py
--- /dev/null
+++ b/yt/data_objects/tests/test_pickle.py
@@ -0,0 +1,69 @@
+"""
+Testsuite for pickling yt objects.
+
+Author: Elizabeth Tasker <tasker at astro1.sci.hokudai.ac.jp>
+Affiliation: Hokkaido University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2013 Elizabeth Tasker. All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+import cPickle
+import os
+import tempfile
+from yt.testing \
+    import fake_random_pf, assert_equal
+
+
+def setup():
+    """Test specific setup."""
+    from yt.config import ytcfg
+    ytcfg["yt", "__withintesting"] = "True"
+
+
+def test_save_load_pickle():
+    """Main test for loading pickled objects"""
+    test_pf = fake_random_pf(64)
+
+    # create extracted region from boolean (fairly complex object)
+    center = (test_pf.domain_left_edge + test_pf.domain_right_edge) / 2
+    sp_outer = test_pf.h.sphere(center, test_pf.domain_width[0])
+    sp_inner = test_pf.h.sphere(center, test_pf.domain_width[0] / 10.0)
+    sp_boolean = test_pf.h.boolean([sp_outer, "NOT", sp_inner])
+
+    minv, maxv = sp_boolean.quantities["Extrema"]("Density")[0]
+    contour_threshold = min(minv * 10.0, 0.9 * maxv)
+
+    contours = sp_boolean.extract_connected_sets(
+        "Density", 1, contour_threshold, maxv + 1, log_space=True, cache=True)
+
+    # save object
+    cpklfile = tempfile.NamedTemporaryFile(delete=False)
+    cPickle.dump(contours[1][0], cpklfile)
+    cpklfile.close()
+
+    # load object
+    test_load = cPickle.load(open(cpklfile.name, "rb"))
+
+    assert_equal.description = \
+        "%s: File was pickle-loaded succesfully" % __name__
+    yield assert_equal, test_load is not None, True
+    assert_equal.description = \
+        "%s: Length of pickle-loaded connected set object" % __name__
+    yield assert_equal, len(contours[1][0]), len(test_load)
+
+    os.remove(cpklfile.name)

diff -r 813b806f980e0a8f9acfd1956493d4221ec3d04c -r 700cd9eb832eea4673cc4467164e1547834bf454 yt/data_objects/tests/test_slice.py
--- a/yt/data_objects/tests/test_slice.py
+++ b/yt/data_objects/tests/test_slice.py
@@ -1,24 +1,60 @@
-from yt.testing import *
+"""
+Tests for AMRSlice
+
+Authors: Samuel Skillman <samskillman at gmail.com>
+Affiliation: University of Colorado at Boulder
+Author: Kacper Kowalik <xarthisius.kk at gmail.com>
+Affiliation: CA UMK
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Samuel Skillman.  All Rights Reserved.
+  Copyright (C) 2013 Kacper Kowalik.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
 import os
+import numpy as np
+from nose.tools import raises
+from yt.testing import \
+    fake_random_pf, assert_equal, assert_array_equal
+from yt.utilities.definitions import \
+    x_dict, y_dict
+from yt.utilities.exceptions import \
+    YTNoDataInObjectError
 
 def setup():
     from yt.config import ytcfg
-    ytcfg["yt","__withintesting"] = "True"
+    ytcfg["yt", "__withintesting"] = "True"
+
 
 def teardown_func(fns):
     for fn in fns:
         os.remove(fn)
 
+
 def test_slice():
     for nprocs in [8, 1]:
         # We want to test both 1 proc and 8 procs, to make sure that
         # parallelism isn't broken
-        pf = fake_random_pf(64, nprocs = nprocs)
+        pf = fake_random_pf(64, nprocs=nprocs)
         dims = pf.domain_dimensions
         xn, yn, zn = pf.domain_dimensions
-        xi, yi, zi = pf.domain_left_edge + 1.0/(pf.domain_dimensions * 2)
-        xf, yf, zf = pf.domain_right_edge - 1.0/(pf.domain_dimensions * 2)
-        coords = np.mgrid[xi:xf:xn*1j, yi:yf:yn*1j, zi:zf:zn*1j]
+        xi, yi, zi = pf.domain_left_edge + 1.0 / (pf.domain_dimensions * 2)
+        xf, yf, zf = pf.domain_right_edge - 1.0 / (pf.domain_dimensions * 2)
+        coords = np.mgrid[xi:xf:xn * 1j, yi:yf:yn * 1j, zi:zf:zn * 1j]
         uc = [np.unique(c) for c in coords]
         slc_pos = 0.5
         # Some simple slice tests with single grids
@@ -33,31 +69,45 @@
                 yield assert_equal, slc["Ones"].max(), 1.0
                 yield assert_equal, np.unique(slc["px"]), uc[xax]
                 yield assert_equal, np.unique(slc["py"]), uc[yax]
-                yield assert_equal, np.unique(slc["pdx"]), 1.0/(dims[xax]*2.0)
-                yield assert_equal, np.unique(slc["pdy"]), 1.0/(dims[yax]*2.0)
+                yield assert_equal, np.unique(slc["pdx"]), 0.5 / dims[xax]
+                yield assert_equal, np.unique(slc["pdy"]), 0.5 / dims[yax]
                 pw = slc.to_pw()
                 fns += pw.save()
-                frb = slc.to_frb((1.0,'unitary'), 64)
+                frb = slc.to_frb((1.0, 'unitary'), 64)
                 for slc_field in ['Ones', 'Density']:
                     yield assert_equal, frb[slc_field].info['data_source'], \
-                            slc.__str__()
+                        slc.__str__()
                     yield assert_equal, frb[slc_field].info['axis'], \
-                            ax
+                        ax
                     yield assert_equal, frb[slc_field].info['field'], \
-                            slc_field
+                        slc_field
                     yield assert_equal, frb[slc_field].info['units'], \
-                            pf.field_info[slc_field].get_units()
+                        pf.field_info[slc_field].get_units()
                     yield assert_equal, frb[slc_field].info['xlim'], \
-                            frb.bounds[:2]
+                        frb.bounds[:2]
                     yield assert_equal, frb[slc_field].info['ylim'], \
-                            frb.bounds[2:]
+                        frb.bounds[2:]
                     yield assert_equal, frb[slc_field].info['length_to_cm'], \
-                            pf['cm']
+                        pf['cm']
                     yield assert_equal, frb[slc_field].info['center'], \
-                            slc.center
+                        slc.center
                     yield assert_equal, frb[slc_field].info['coord'], \
-                            slc_pos
+                        slc_pos
                 teardown_func(fns)
             # wf == None
             yield assert_equal, wf, None
 
+
+def test_slice_over_edges():
+    pf = fake_random_pf(64, nprocs=8, fields=["Density"], negative=[False])
+
+    slc = pf.h.slice(0, 0.0, "Density")
+    yield assert_array_equal, slc.grid_left_edge[:, 0], np.zeros((4))
+    slc = pf.h.slice(1, 0.5, "Density")
+    yield assert_array_equal, slc.grid_left_edge[:, 1], np.ones((4)) * 0.5
+
+
+ at raises(YTNoDataInObjectError)
+def test_slice_over_outer_boundary():
+    pf = fake_random_pf(64, nprocs=8, fields=["Density"], negative=[False])
+    slc = pf.h.slice(2, 1.0, "Density")

diff -r 813b806f980e0a8f9acfd1956493d4221ec3d04c -r 700cd9eb832eea4673cc4467164e1547834bf454 yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -55,7 +55,7 @@
      G, \
      rho_crit_now, \
      speed_of_light_cgs, \
-     km_per_cm
+     km_per_cm, keV_per_K
 
 from yt.utilities.math_utils import \
     get_sph_r_component, \
@@ -216,18 +216,25 @@
            data["Density"] * data["ThermalEnergy"]
 add_field("Pressure", function=_Pressure, units=r"\rm{dyne}/\rm{cm}^{2}")
 
+def _TempkeV(field, data):
+    return data["Temperature"] * keV_per_K
+add_field("TempkeV", function=_TempkeV, units=r"\rm{keV}",
+          display_name="Temperature")
+
 def _Entropy(field, data):
     if data.has_field_parameter("mu"):
         mw = mh*data.get_field_parameter("mu")
     else :
         mw = mh
+    try:
+        gammam1 = data.pf["Gamma"] - 1.0
+    except:
+        gammam1 = 5./3. - 1.0
     return kboltz * data["Temperature"] / \
-           ((data["Density"]/mw)**(data.pf["Gamma"] - 1.0))
+           ((data["Density"]/mw)**gammam1)
 add_field("Entropy", units=r"\rm{ergs}\ \rm{cm}^{3\gamma-3}",
           function=_Entropy)
 
-
-
 ### spherical coordinates: r (radius)
 def _sph_r(field, data):
     center = data.get_field_parameter("center")
@@ -784,22 +791,28 @@
          units=r"\rm{g}\/\rm{cm}^2/\rm{s}", particle_type=True,
          validators=[ValidateParameter('center')])
 
-def get_radius(positions, data):
-    c = data.get_field_parameter("center")
-    n_tup = tuple([1 for i in range(positions.ndim-1)])
-    center = np.tile(np.reshape(c, (positions.shape[0],)+n_tup),(1,)+positions.shape[1:])
-    periodicity = data.pf.periodicity
-    if any(periodicity):
-        period = data.pf.domain_right_edge - data.pf.domain_left_edge
-        return periodic_dist(positions, center, period, periodicity)
-    else:
-        return euclidean_dist(positions, center)
+def get_radius(data, field_prefix):
+    center = data.get_field_parameter("center")
+    DW = data.pf.domain_right_edge - data.pf.domain_left_edge
+    radius = np.zeros(data[field_prefix+"x"].shape, dtype='float64')
+    r = radius.copy()
+    if any(data.pf.periodicity):
+        rdw = radius.copy()
+    for i, ax in enumerate('xyz'):
+        np.subtract(data["%s%s" % (field_prefix, ax)], center[i], r)
+        if data.pf.periodicity[i] == True:
+            np.subtract(DW[i], r, rdw)
+            np.abs(r, r)
+            np.minimum(r, rdw, r)
+        np.power(r, 2.0, r)
+        np.add(radius, r, radius)
+    np.sqrt(radius, radius)
+    return radius
+
 def _ParticleRadius(field, data):
-    positions = np.array([data["particle_position_%s" % ax] for ax in 'xyz'])
-    return get_radius(positions, data)
+    return get_radius(data, "particle_position_")
 def _Radius(field, data):
-    positions = np.array([data['x'], data['y'], data['z']])
-    return get_radius(positions, data)
+    return get_radius(data, "")
 
 def _ConvertRadiusCGS(data):
     return data.convert("cm")

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt-3.0/commits/ea7c50bf715d/
Changeset:   ea7c50bf715d
Branch:      yt
User:        jzuhone
Date:        2013-03-01 19:56:15
Summary:     Merging
Affected #:  108 files

diff -r bea5473873e824088584621372b1f3a6a70dbdc0 -r ea7c50bf715d4156fe1f676e147134d0928a9574 .hgchurn
--- /dev/null
+++ b/.hgchurn
@@ -0,0 +1,11 @@
+stephenskory at yahoo.com = s at skory.us
+"Stephen Skory stephenskory at yahoo.com" = s at skory.us
+yuan at astro.columbia.edu = bear0980 at gmail.com
+juxtaposicion at gmail.com = cemoody at ucsc.edu
+chummels at gmail.com = chummels at astro.columbia.edu
+jwise at astro.princeton.edu = jwise at physics.gatech.edu
+atmyers = atmyers at berkeley.edu
+sam.skillman at gmail.com = samskillman at gmail.com
+casey at thestarkeffect.com = caseywstark at gmail.com
+chiffre = chiffre at posteo.de
+Christian Karch = chiffre at posteo.de

diff -r bea5473873e824088584621372b1f3a6a70dbdc0 -r ea7c50bf715d4156fe1f676e147134d0928a9574 .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -4,6 +4,7 @@
 freetype.cfg
 hdf5.cfg
 png.cfg
+yt_updater.log
 yt/frontends/ramses/_ramses_reader.cpp
 yt/utilities/amr_utils.c
 yt/utilities/kdtree/forthonf2c.h
@@ -26,6 +27,7 @@
 yt/utilities/lib/RayIntegrators.c
 yt/utilities/lib/VolumeIntegrator.c
 yt/utilities/lib/grid_traversal.c
+yt/utilities/lib/GridTree.c
 yt/utilities/lib/marching_cubes.c
 yt/utilities/lib/png_writer.h
 syntax: glob

diff -r bea5473873e824088584621372b1f3a6a70dbdc0 -r ea7c50bf715d4156fe1f676e147134d0928a9574 .hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -5152,6 +5152,7 @@
 0000000000000000000000000000000000000000 svn.993
 fff7118f00e25731ccf37cba3082b8fcb73cf90e svn.371
 0000000000000000000000000000000000000000 svn.371
+6528c562fed6f994b8d1ecabaf375ddc4707dade mpi-opaque
+0000000000000000000000000000000000000000 mpi-opaque
 f15825659f5af3ce64aaad30062aff3603cbfb66 hop callback
 0000000000000000000000000000000000000000 hop callback
-0000000000000000000000000000000000000000 hop callback

diff -r bea5473873e824088584621372b1f3a6a70dbdc0 -r ea7c50bf715d4156fe1f676e147134d0928a9574 CREDITS
--- a/CREDITS
+++ b/CREDITS
@@ -1,29 +1,41 @@
 YT is a group effort.
 
-Contributors:                   Matthew Turk (matthewturk at gmail.com)
-                                Britton Smith (brittonsmith at gmail.com)
-                                Jeff Oishi (jsoishi at gmail.com)
-                                Stephen Skory (s at skory.us)
-                                Sam Skillman (samskillman at gmail.com)
-                                Devin Silvia (devin.silvia at gmail.com)
-                                John Wise (jwise at astro.princeton.edu)
-                                David Collins (dcollins at physics.ucsd.edu)
-                                Christopher Moody (cemoody at ucsc.edu)
-                                Oliver Hahn (ohahn at stanford.edu)
-                                John ZuHone (jzuhone at cfa.harvard.edu)
-                                Chris Malone (cmalone at mail.astro.sunysb.edu)
-                                Cameron Hummels (chummels at astro.columbia.edu)
-                                Stefan Klemer (sklemer at phys.uni-goettingen.de)
-                                Tom Abel (tabel at stanford.edu)
-                                Andrew Myers (atmyers at astro.berkeley.edu)
-                                Michael Kuhlen (mqk at astro.berkeley.edu)
-                                Casey Stark (caseywstark at gmail.com)
-                                JC Passy (jcpassy at gmail.com)
-                                Eve Lee (elee at cita.utoronto.ca)
-                                Elizabeth Tasker (tasker at astro1.sci.hokudai.ac.jp)
-                                Kacper Kowalik (xarthisius.kk at gmail.com)
-                                Nathan Goldbaum (goldbaum at ucolick.org)
-                                Anna Rosen (rosen at ucolick.org)
+Contributors:                   Tom Abel (tabel at stanford.edu)
+				David Collins (dcollins at physics.ucsd.edu)
+				Brian Crosby (crosby.bd at gmail.com)
+				Andrew Cunningham (ajcunn at gmail.com)
+				Nathan Goldbaum (goldbaum at ucolick.org)
+				Markus Haider (markus.haider at uibk.ac.at)
+				Cameron Hummels (chummels at gmail.com)
+				Christian Karch (chiffre at posteo.de)
+				Ji-hoon Kim (me at jihoonkim.org)
+				Steffen Klemer (sklemer at phys.uni-goettingen.de)
+				Kacper Kowalik (xarthisius.kk at gmail.com)
+				Michael Kuhlen (mqk at astro.berkeley.edu)
+				Eve Lee (elee at cita.utoronto.ca)
+				Yuan Li (yuan at astro.columbia.edu)
+				Chris Malone (chris.m.malone at gmail.com)
+				Josh Maloney (joshua.moloney at colorado.edu)
+				Chris Moody (cemoody at ucsc.edu)
+				Andrew Myers (atmyers at astro.berkeley.edu)
+				Jeff Oishi (jsoishi at gmail.com)
+				Jean-Claude Passy (jcpassy at uvic.ca)
+				Mark Richardson (Mark.L.Richardson at asu.edu)
+				Thomas Robitaille (thomas.robitaille at gmail.com)
+				Anna Rosen (rosen at ucolick.org)
+				Anthony Scopatz (scopatz at gmail.com)
+				Devin Silvia (devin.silvia at colorado.edu)
+				Sam Skillman (samskillman at gmail.com)
+				Stephen Skory (s at skory.us)
+				Britton Smith (brittonsmith at gmail.com)
+				Geoffrey So (gsiisg at gmail.com)
+				Casey Stark (caseywstark at gmail.com)
+				Elizabeth Tasker (tasker at astro1.sci.hokudai.ac.jp)
+				Stephanie Tonnesen (stonnes at gmail.com)
+				Matthew Turk (matthewturk at gmail.com)
+				Rich Wagner (rwagner at physics.ucsd.edu)
+				John Wise (jwise at physics.gatech.edu)
+				John ZuHone (jzuhone at gmail.com)
 
 We also include the Delaunay Triangulation module written by Robert Kern of
 Enthought, the cmdln.py module by Trent Mick, and the progressbar module by

diff -r bea5473873e824088584621372b1f3a6a70dbdc0 -r ea7c50bf715d4156fe1f676e147134d0928a9574 doc/coding_styleguide.txt
--- a/doc/coding_styleguide.txt
+++ b/doc/coding_styleguide.txt
@@ -13,7 +13,7 @@
  * Do not use nested classes unless you have a very good reason to, such as
    requiring a namespace or class-definition modification.  Classes should live
    at the top level.  __metaclass__ is exempt from this.
- * Do not use unecessary parenthesis in conditionals.  if((something) and
+ * Do not use unnecessary parenthesis in conditionals.  if((something) and
    (something_else)) should be rewritten as if something and something_else.
    Python is more forgiving than C.
  * Avoid copying memory when possible. For example, don't do 
@@ -50,7 +50,7 @@
    replace the old class.  Too many options makes for a confusing user
    experience.
  * Parameter files are a last resort.
- * The usage of the **kwargs construction should be avoided.  If they cannoted
+ * The usage of the **kwargs construction should be avoided.  If they cannot
    be avoided, they must be explained, even if they are only to be passed on to
    a nested function.
 

diff -r bea5473873e824088584621372b1f3a6a70dbdc0 -r ea7c50bf715d4156fe1f676e147134d0928a9574 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -7,11 +7,9 @@
 # There are a few options, but you only need to set *one* of them.  And
 # that's the next one, DEST_DIR.  But, if you want to use an existing HDF5
 # installation you can set HDF5_DIR, or if you want to use some other
-# subversion checkout of YT, you can set YT_DIR, too.  (It'll already
+# subversion checkout of yt, you can set YT_DIR, too.  (It'll already
 # check the current directory and one up.
 #
-# NOTE: If you have trouble with wxPython, set INST_WXPYTHON=0 .
-#
 # And, feel free to drop me a line: matthewturk at gmail.com
 #
 
@@ -19,11 +17,16 @@
 DEST_DIR="`pwd`/${DEST_SUFFIX/ /}"   # Installation location
 BRANCH="yt" # This is the branch to which we will forcibly update.
 
+if [ ${REINST_YT} -eq 1 ] && [ -n ${YT_DEST} ]
+then
+    DEST_DIR=${YT_DEST}
+fi
+
 # Here's where you put the HDF5 path if you like; otherwise it'll download it
 # and install it on its own
 #HDF5_DIR=
 
-# If you need to supply arguments to the NumPy build, supply them here
+# If you need to supply arguments to the NumPy or SciPy build, supply them here
 # This one turns on gfortran manually:
 #NUMPY_ARGS="--fcompiler=gnu95"
 # If you absolutely can't get the fortran to work, try this:
@@ -44,8 +47,9 @@
                 # working TeX installation.
 INST_0MQ=1      # Install 0mq (for IPython) and affiliated bindings?
 INST_ROCKSTAR=0 # Install the Rockstar halo finder?
+INST_SCIPY=0    # Install scipy?
 
-# If you've got YT some other place, set this to point to it.
+# If you've got yt some other place, set this to point to it.
 YT_DIR=""
 
 # If you need to pass anything to matplotlib, do so here.
@@ -155,18 +159,6 @@
         echo "   $ module swap PE-pgi PE-gnu"
         echo
     fi
-    if [ "${MYHOSTLONG%%ranger}" != "${MYHOSTLONG}" ]
-    then
-        echo "Looks like you're on Ranger."
-        echo
-        echo "NOTE: YOU MUST BE IN THE GNU PROGRAMMING ENVIRONMENT"
-        echo "These commands should take care of that for you:"
-        echo
-        echo "   $ module unload mvapich2"
-        echo "   $ module swap pgi gcc"
-        echo "   $ module load mvapich2"
-        echo
-    fi
     if [ "${MYHOST##steele}" != "${MYHOST}" ]
     then
         echo "Looks like you're on Steele."
@@ -184,24 +176,53 @@
         echo
         echo "NOTE: you must have the Xcode command line tools installed."
         echo
-        echo "OS X 10.5: download Xcode 3.0 from the mac developer tools"
-        echo "website"
+	echo "The instructions for obtaining these tools varies according"
+	echo "to your exact OS version.  On older versions of OS X, you"
+	echo "must register for an account on the apple developer tools"
+	echo "website: https://developer.apple.com/downloads to obtain the"
+	echo "download link."
+	echo 
+	echo "We have gathered some additional instructions for each"
+	echo "version of OS X below. If you have trouble installing yt"
+	echo "after following these instructions, don't hesitate to contact"
+	echo "the yt user's e-mail list."
+	echo
+	echo "You can see which version of OSX you are running by clicking"
+	echo "'About This Mac' in the apple menu on the left hand side of"
+	echo "menu bar.  We're assuming that you've installed all operating"
+	echo "system updates; if you have an older version, we suggest"
+	echo "running software update and installing all available updates."
+	echo 
+        echo "OS X 10.5.8: search for and download Xcode 3.1.4 from the" 
+	echo "Apple developer tools website."
         echo
-        echo "OS X 10.6: download Xcode 3.2 from the mac developer tools"
-        echo "website"
+        echo "OS X 10.6.8: search for and download Xcode 3.2 from the Apple"
+	echo "developer tools website.  You can either download the"
+	echo "Xcode 3.2.2 Developer Tools package (744 MB) and then use"
+	echo "Software Update to update to XCode 3.2.6 or" 
+	echo "alternatively, you can download the Xcode 3.2.6/iOS SDK" 
+	echo "bundle (4.1 GB)."
         echo
-        echo "OS X 10.7: download Xcode 4.0 from the mac app store or"
-        echo "alternatively download the Xcode command line tools from"
-        echo "the mac developer tools website"
+        echo "OS X 10.7.5: download Xcode 4.2 from the mac app store"
+	echo "(search for Xcode)."
+        echo "Alternatively, download the Xcode command line tools from"
+        echo "the Apple developer tools website."
         echo
-        echo "NOTE: You may have problems if you are running OSX 10.6 (Snow"
-        echo "Leopard) or newer.  If you do, please set the following"
-        echo "environment variables, remove any broken installation tree, and"
-        echo "re-run this script verbatim."
+	echo "OS X 10.8.2: download Xcode 4.6 from the mac app store."
+	echo "(search for Xcode)."
+	echo "Additionally, you will have to manually install the Xcode"
+	echo "command line tools, see:" 
+	echo "http://stackoverflow.com/questions/9353444"
+	echo "Alternatively, download the Xcode command line tools from"
+	echo "the Apple developer tools website."
+	echo
+        echo "NOTE: It's possible that the installation will fail, if so," 
+	echo "please set the following environment variables, remove any" 
+	echo "broken installation tree, and re-run this script verbatim."
         echo
         echo "$ export CC=gcc-4.2"
         echo "$ export CXX=g++-4.2"
-        echo
+	echo
         OSX_VERSION=`sw_vers -productVersion`
         if [ "${OSX_VERSION##10.8}" != "${OSX_VERSION}" ]
         then
@@ -209,6 +230,27 @@
             MPL_SUPP_CXXFLAGS="${MPL_SUPP_CXXFLAGS} -mmacosx-version-min=10.7"
         fi
     fi
+    if [ -f /etc/SuSE-release ] && [ `grep --count SUSE /etc/SuSE-release` -gt 0 ]
+    then
+        echo "Looks like you're on an OpenSUSE-compatible machine."
+        echo
+        echo "You need to have these packages installed:"
+        echo
+        echo "  * devel_C_C++"
+        echo "  * libopenssl-devel"
+        echo "  * libuuid-devel"
+        echo "  * zip"
+        echo "  * gcc-c++"
+        echo
+        echo "You can accomplish this by executing:"
+        echo
+        echo "$ sudo zypper install -t pattern devel_C_C++"
+        echo "$ sudo zypper install gcc-c++ libopenssl-devel libuuid-devel zip"
+        echo
+        echo "I am also setting special configure arguments to Python to"
+        echo "specify control lib/lib64 issues."
+        PYCONF_ARGS="--libdir=${DEST_DIR}/lib"
+    fi
     if [ -f /etc/lsb-release ] && [ `grep --count buntu /etc/lsb-release` -gt 0 ]
     then
         echo "Looks like you're on an Ubuntu-compatible machine."
@@ -240,6 +282,20 @@
         echo " to avoid conflicts with other command-line programs "
         echo " (like eog and evince, for example)."
     fi
+    if [ $INST_SCIPY -eq 1 ]
+    then
+	echo
+	echo "Looks like you've requested that the install script build SciPy."
+	echo
+	echo "If the SciPy build fails, please uncomment one of the the lines"
+	echo "at the top of the install script that sets NUMPY_ARGS, delete"
+	echo "any broken installation tree, and re-run the install script"
+	echo "verbatim."
+	echo
+	echo "If that doesn't work, don't hesitate to ask for help on the yt"
+	echo "user's mailing list."
+	echo
+    fi
     if [ ! -z "${CFLAGS}" ]
     then
         echo "******************************************"
@@ -258,9 +314,9 @@
 echo
 echo "========================================================================"
 echo
-echo "Hi there!  This is the YT installation script.  We're going to download"
+echo "Hi there!  This is the yt installation script.  We're going to download"
 echo "some stuff and install it to create a self-contained, isolated"
-echo "environment for YT to run within."
+echo "environment for yt to run within."
 echo
 echo "Inside the installation script you can set a few variables.  Here's what"
 echo "they're currently set to -- you can hit Ctrl-C and edit the values in "
@@ -298,6 +354,10 @@
 get_willwont ${INST_PYX}
 echo "be installing PyX"
 
+printf "%-15s = %s so I " "INST_SCIPY" "${INST_SCIPY}"
+get_willwont ${INST_SCIPY}
+echo "be installing scipy"
+
 printf "%-15s = %s so I " "INST_0MQ" "${INST_0MQ}"
 get_willwont ${INST_0MQ}
 echo "be installing ZeroMQ"
@@ -437,7 +497,7 @@
 echo 'c68a425bacaa7441037910b9166f25b89e1387776a7749a5350793f89b1690350df5f018060c31d03686e7c3ed2aa848bd2b945c96350dc3b6322e087934783a  hdf5-1.8.9.tar.gz' > hdf5-1.8.9.tar.gz.sha512
 echo 'dbefad00fa34f4f21dca0f1e92e95bd55f1f4478fa0095dcf015b4d06f0c823ff11755cd777e507efaf1c9098b74af18f613ec9000e5c3a5cc1c7554fb5aefb8  libpng-1.5.12.tar.gz' > libpng-1.5.12.tar.gz.sha512
 echo '5b1a0fb52dcb21ca5f0ab71c8a49550e1e8cf633552ec6598dc43f0b32c03422bf5af65b30118c163231ecdddfd40846909336f16da318959106076e80a3fad0  matplotlib-1.2.0.tar.gz' > matplotlib-1.2.0.tar.gz.sha512
-echo '52d1127de2208aaae693d16fef10ffc9b8663081bece83b7597d65706e9568af3b9e56bd211878774e1ebed92e21365ee9c49602a0ff5e48f89f12244d79c161  mercurial-2.4.tar.gz' > mercurial-2.4.tar.gz.sha512
+echo '91693ca5f34934956a7c2c98bb69a5648b2a5660afd2ecf4a05035c5420450d42c194eeef0606d7683e267e4eaaaab414df23f30b34c88219bdd5c1a0f1f66ed  mercurial-2.5.1.tar.gz' > mercurial-2.5.1.tar.gz.sha512
 echo 'de3dd37f753614055dcfed910e9886e03688b8078492df3da94b1ec37be796030be93291cba09e8212fffd3e0a63b086902c3c25a996cf1439e15c5b16e014d9  numpy-1.6.1.tar.gz' > numpy-1.6.1.tar.gz.sha512
 echo '5ad681f99e75849a5ca6f439c7a19bb51abc73d121b50f4f8e4c0da42891950f30407f761a53f0fe51b370b1dbd4c4f5a480557cb2444c8c7c7d5412b328a474  sqlite-autoconf-3070500.tar.gz' > sqlite-autoconf-3070500.tar.gz.sha512
 echo 'edae735960279d92acf58e1f4095c6392a7c2059b8f1d2c46648fc608a0fb06b392db2d073f4973f5762c034ea66596e769b95b3d26ad963a086b9b2d09825f2  zlib-1.2.3.tar.bz2' > zlib-1.2.3.tar.bz2.sha512
@@ -450,6 +510,9 @@
 echo '73de2c99406a38f85273931597525cec4ebef55b93712adca3b0bfea8ca3fc99446e5d6495817e9ad55cf4d48feb7fb49734675c4cc8938db8d4a5225d30eca7  python-hglib-0.2.tar.gz' > python-hglib-0.2.tar.gz.sha512
 echo 'ffc602eb346717286b3d0a6770c60b03b578b3cf70ebd12f9e8b1c8c39cdb12ef219ddaa041d7929351a6b02dbb8caf1821b5452d95aae95034cbf4bc9904a7a  sympy-0.7.2.tar.gz' > sympy-0.7.2.tar.gz.sha512
 echo '172f2bc671145ebb0add2669c117863db35851fb3bdb192006cd710d4d038e0037497eb39a6d01091cb923f71a7e8982a77b6e80bf71d6275d5d83a363c8d7e5  rockstar-0.99.6.tar.gz' > rockstar-0.99.6.tar.gz.sha512
+echo 'd4fdd62f2db5285cd133649bd1bfa5175cb9da8304323abd74e0ef1207d55e6152f0f944da1da75f73e9dafb0f3bb14efba3c0526c732c348a653e0bd223ccfa  scipy-0.11.0.tar.gz' > scipy-0.11.0.tar.gz.sha512
+echo '276bd9c061ec9a27d478b33078a86f93164ee2da72210e12e2c9da71dcffeb64767e4460b93f257302b09328eda8655e93c4b9ae85e74472869afbeae35ca71e  blas.tar.gz' > blas.tar.gz.sha512
+echo '8770214491e31f0a7a3efaade90eee7b0eb20a8a6ab635c5f854d78263f59a1849133c14ef5123d01023f0110cbb9fc6f818da053c01277914ae81473430a952  lapack-3.4.2.tar.gz' > lapack-3.4.2.tar.gz.sha512
 # Individual processes
 [ -z "$HDF5_DIR" ] && get_ytproject hdf5-1.8.9.tar.gz
 [ $INST_ZLIB -eq 1 ] && get_ytproject zlib-1.2.3.tar.bz2 
@@ -461,10 +524,13 @@
 [ $INST_0MQ -eq 1 ] && get_ytproject zeromq-2.2.0.tar.gz
 [ $INST_0MQ -eq 1 ] && get_ytproject pyzmq-2.1.11.tar.gz
 [ $INST_0MQ -eq 1 ] && get_ytproject tornado-2.2.tar.gz
+[ $INST_SCIPY -eq 1 ] && get_ytproject scipy-0.11.0.tar.gz
+[ $INST_SCIPY -eq 1 ] && get_ytproject blas.tar.gz
+[ $INST_SCIPY -eq 1 ] && get_ytproject lapack-3.4.2.tar.gz
 get_ytproject Python-2.7.3.tgz
 get_ytproject numpy-1.6.1.tar.gz
 get_ytproject matplotlib-1.2.0.tar.gz
-get_ytproject mercurial-2.4.tar.gz
+get_ytproject mercurial-2.5.1.tar.gz
 get_ytproject ipython-0.13.1.tar.gz
 get_ytproject h5py-2.1.0.tar.gz
 get_ytproject Cython-0.17.1.tar.gz
@@ -591,10 +657,10 @@
 
 if [ ! -e Python-2.7.3/done ]
 then
-    echo "Installing Python.  This may take a while, but don't worry.  YT loves you."
+    echo "Installing Python.  This may take a while, but don't worry.  yt loves you."
     [ ! -e Python-2.7.3 ] && tar xfz Python-2.7.3.tgz
     cd Python-2.7.3
-    ( ./configure --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
+    ( ./configure --prefix=${DEST_DIR}/ ${PYCONF_ARGS} 2>&1 ) 1>> ${LOG_FILE} || do_exit
 
     ( make ${MAKE_PROCS} 2>&1 ) 1>> ${LOG_FILE} || do_exit
     ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
@@ -609,7 +675,7 @@
 if [ $INST_HG -eq 1 ]
 then
     echo "Installing Mercurial."
-    do_setup_py mercurial-2.4
+    do_setup_py mercurial-2.5.1
     export HG_EXEC=${DEST_DIR}/bin/hg
 else
     # We assume that hg can be found in the path.
@@ -656,7 +722,40 @@
 echo "Installing pip"
 ( ${DEST_DIR}/bin/easy_install-2.7 pip 2>&1 ) 1>> ${LOG_FILE} || do_exit
 
-do_setup_py numpy-1.6.1 ${NUMPY_ARGS}
+if [ $INST_SCIPY -eq 0 ]
+then
+    do_setup_py numpy-1.6.1 ${NUMPY_ARGS}
+else
+    if [ ! -e scipy-0.11.0/done ]
+    then
+	if [ ! -e BLAS/done ]
+	then
+	    tar xfz blas.tar.gz
+	    echo "Building BLAS"
+	    cd BLAS
+	    gfortran -O2 -fPIC -fno-second-underscore -c *.f
+	    ar r libfblas.a *.o 1>> ${LOG_FILE}
+	    ranlib libfblas.a 1>> ${LOG_FILE}
+	    rm -rf *.o
+	    touch done
+	    cd ..
+	fi
+	if [ ! -e lapack-3.4.2/done ]
+	then
+	    tar xfz lapack-3.4.2.tar.gz
+	    echo "Building LAPACK"
+	    cd lapack-3.4.2/
+	    cp INSTALL/make.inc.gfortran make.inc
+	    make lapacklib OPTS="-fPIC -O2" NOOPT="-fPIC -O0" CFLAGS=-fPIC LDFLAGS=-fPIC 1>> ${LOG_FILE} || do_exit
+	    touch done
+	    cd ..
+	fi
+    fi
+    export BLAS=$PWD/BLAS/libfblas.a
+    export LAPACK=$PWD/lapack-3.4.2/liblapack.a    
+    do_setup_py numpy-1.6.1 ${NUMPY_ARGS}
+    do_setup_py scipy-0.11.0 ${NUMPY_ARGS}
+fi
 
 if [ -n "${MPL_SUPP_LDFLAGS}" ]
 then
@@ -846,3 +945,6 @@
 
 print_afterword
 print_afterword >> ${LOG_FILE}
+
+echo "yt dependencies were last updated on" > ${DEST_DIR}/.yt_update
+date >> ${DEST_DIR}/.yt_update

diff -r bea5473873e824088584621372b1f3a6a70dbdc0 -r ea7c50bf715d4156fe1f676e147134d0928a9574 yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
@@ -36,14 +36,20 @@
 speed_of_light_kms = speed_of_light_cgs * km_per_cm
 
 class AbsorptionSpectrum(object):
+    r"""Create an absorption spectrum object.
+
+    Parameters
+    ----------
+
+    lambda_min : float
+       lower wavelength bound in angstroms.
+    lambda_max : float
+       upper wavelength bound in angstroms.
+    n_lambda : float
+       number of wavelength bins.
+    """
+
     def __init__(self, lambda_min, lambda_max, n_lambda):
-        """
-        Create an absorption spectrum object.
-        :param lambda_min (float): lower wavelength bound in angstroms.
-        :param lambda_max (float): upper wavelength bound in angstroms.
-        :param n_lambda (float): number of wavelength bins.
-        """
-
         self.n_lambda = n_lambda
         self.tau_field = None
         self.flux_field = None
@@ -56,16 +62,24 @@
     def add_line(self, label, field_name, wavelength,
                  f_value, gamma, atomic_mass,
                  label_threshold=None):
+        r"""Add an absorption line to the list of lines included in the spectrum.
+
+        Parameters
+        ----------
+        
+        label : string
+           label for the line.
+        field_name : string
+           field name from ray data for column densities.
+        wavelength : float
+           line rest wavelength in angstroms.
+        f_value  : float
+           line f-value.
+        gamma : float
+           line gamme value.
+        atomic_mass : float
+           mass of atom in amu.
         """
-        Add an absorption line to the list of lines included in the spectrum.
-        :param label (string): label for the line.
-        :param field_name (string): field name from ray data for column densities.
-        :param wavelength (float): line rest wavelength in angstroms.
-        :param f_value (float): line f-value.
-        :param gamma (float): line gamme value.
-        :param atomic_mass (float): mass of atom in amu.
-        """
-
         self.line_list.append({'label': label, 'field_name': field_name,
                                'wavelength': wavelength, 'f_value': f_value,
                                'gamma': gamma, 'atomic_mass': atomic_mass,
@@ -75,11 +89,20 @@
                       normalization, index):
         """
         Add a continuum feature that follows a power-law.
-        :param label (string): label for the feature.
-        :param field_name (string): field name from ray data for column densities.
-        :param wavelength (float): line rest wavelength in angstroms.
-        :param normalization (float): the column density normalization.
-        :param index (float): the power-law index for the wavelength dependence.
+
+        Parameters
+        ----------
+
+        label : string
+           label for the feature.
+        field_name : string
+           field name from ray data for column densities.
+        wavelength : float
+           line rest wavelength in angstroms.
+        normalization : float
+           the column density normalization.
+        index : float
+           the power-law index for the wavelength dependence.
         """
 
         self.continuum_list.append({'label': label, 'field_name': field_name,
@@ -92,14 +115,17 @@
                       use_peculiar_velocity=True):
         """
         Make spectrum from ray data using the line list.
-        :param input_file (string): path to input ray data.
-        :param output_file (string): path for output file.
-               File formats are chosen based on the filename extension.
-                    - .h5: hdf5.
-                    - .fits: fits.
-                    - anything else: ascii.
-        :param use_peculiar_velocity (bool): if True, include line of sight
-        velocity for shifting lines.
+
+        Parameters
+        ----------
+
+        input_file : string
+           path to input ray data.
+        output_file : string
+           path for output file.  File formats are chosen based on the filename extension.
+           ``.h5`` for hdf5, ``.fits`` for fits, and everything else is ASCII.
+        use_peculiar_velocity : bool
+           if True, include line of sight velocity for shifting lines.
         """
 
         input_fields = ['dl', 'redshift', 'Temperature']

diff -r bea5473873e824088584621372b1f3a6a70dbdc0 -r ea7c50bf715d4156fe1f676e147134d0928a9574 yt/analysis_modules/api.py
--- a/yt/analysis_modules/api.py
+++ b/yt/analysis_modules/api.py
@@ -106,8 +106,9 @@
     RadialColumnDensity
 
 from .spectral_integrator.api import \
-    SpectralFrequencyIntegrator, \
-    create_table_from_textfiles
+     add_xray_emissivity_field, \
+     add_xray_luminosity_field, \
+     add_xray_photon_emissivity_field
 
 from .star_analysis.api import \
     StarFormationRate, \

diff -r bea5473873e824088584621372b1f3a6a70dbdc0 -r ea7c50bf715d4156fe1f676e147134d0928a9574 yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
+++ b/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
@@ -49,6 +49,64 @@
      _light_cone_projection
 
 class LightCone(CosmologySplice):
+    """
+    Initialize a LightCone object.
+
+    Parameters
+    ----------
+    near_redshift : float
+        The near (lowest) redshift for the light cone.
+    far_redshift : float
+        The far (highest) redshift for the light cone.
+    observer_redshift : float
+        The redshift of the observer.
+        Default: 0.0.
+    field_of_view_in_arcminutes : float
+        The field of view of the image in units of arcminutes.
+        Default: 600.0.
+    image_resolution_in_arcseconds : float
+        The size of each image pixel in units of arcseconds.
+        Default: 60.0.
+    use_minimum_datasets : bool
+        If True, the minimum number of datasets is used to connect the initial
+        and final redshift.  If false, the light cone solution will contain
+        as many entries as possible within the redshift interval.
+        Default: True.
+    deltaz_min : float
+        Specifies the minimum :math:`\Delta z` between consecutive datasets in
+        the returned list.
+        Default: 0.0.
+    minimum_coherent_box_fraction : float
+        Used with use_minimum_datasets set to False, this parameter specifies
+        the fraction of the total box size to be traversed before rerandomizing
+        the projection axis and center.  This was invented to allow light cones
+        with thin slices to sample coherent large scale structure, but in
+        practice does not work so well.  Try setting this parameter to 1 and
+        see what happens.
+        Default: 0.0.
+    time_data : bool
+        Whether or not to include time outputs when gathering
+        datasets for time series.
+        Default: True.
+    redshift_data : bool
+        Whether or not to include redshift outputs when gathering
+        datasets for time series.
+        Default: True.
+    find_outputs : bool
+        Whether or not to search for parameter files in the current 
+        directory.
+        Default: False.
+    set_parameters : dict
+        Dictionary of parameters to attach to pf.parameters.
+        Default: None.
+    output_dir : string
+        The directory in which images and data files will be written.
+        Default: 'LC'.
+    output_prefix : string
+        The prefix of all images and data files.
+        Default: 'LightCone'.
+
+    """
     def __init__(self, parameter_filename, simulation_type,
                  near_redshift, far_redshift,
                  observer_redshift=0.0,
@@ -59,64 +117,6 @@
                  time_data=True, redshift_data=True,
                  find_outputs=False, set_parameters=None,
                  output_dir='LC', output_prefix='LightCone'):
-        """
-        Initialize a LightCone object.
-
-        Parameters
-        ----------
-        near_redshift : float
-            The near (lowest) redshift for the light cone.
-        far_redshift : float
-            The far (highest) redshift for the light cone.
-        observer_redshift : float
-            The redshift of the observer.
-            Default: 0.0.
-        field_of_view_in_arcminutes : float
-            The field of view of the image in units of arcminutes.
-            Default: 600.0.
-        image_resolution_in_arcseconds : float
-            The size of each image pixel in units of arcseconds.
-            Default: 60.0.
-        use_minimum_datasets : bool
-            If True, the minimum number of datasets is used to connect the initial
-            and final redshift.  If false, the light cone solution will contain
-            as many entries as possible within the redshift interval.
-            Default: True.
-        deltaz_min : float
-            Specifies the minimum :math:`\Delta z` between consecutive datasets in
-            the returned list.
-            Default: 0.0.
-        minimum_coherent_box_fraction : float
-            Used with use_minimum_datasets set to False, this parameter specifies
-            the fraction of the total box size to be traversed before rerandomizing
-            the projection axis and center.  This was invented to allow light cones
-            with thin slices to sample coherent large scale structure, but in
-            practice does not work so well.  Try setting this parameter to 1 and
-            see what happens.
-            Default: 0.0.
-        time_data : bool
-            Whether or not to include time outputs when gathering
-            datasets for time series.
-            Default: True.
-        redshift_data : bool
-            Whether or not to include redshift outputs when gathering
-            datasets for time series.
-            Default: True.
-        find_outputs : bool
-            Whether or not to search for parameter files in the current 
-            directory.
-            Default: False.
-        set_parameters : dict
-            Dictionary of parameters to attach to pf.parameters.
-            Default: None.
-        output_dir : string
-            The directory in which images and data files will be written.
-            Default: 'LC'.
-        output_prefix : string
-            The prefix of all images and data files.
-            Default: 'LightCone'.
-
-        """
 
         self.near_redshift = near_redshift
         self.far_redshift = far_redshift

diff -r bea5473873e824088584621372b1f3a6a70dbdc0 -r ea7c50bf715d4156fe1f676e147134d0928a9574 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -40,66 +40,66 @@
     parallel_root_only
 
 class LightRay(CosmologySplice):
+    """
+    Create a LightRay object.  A light ray is much like a light cone,
+    in that it stacks together multiple datasets in order to extend a
+    redshift interval.  Unlike a light cone, which does randomly
+    oriented projections for each dataset, a light ray consists of
+    randomly oriented single rays.  The purpose of these is to create
+    synthetic QSO lines of sight.
+
+    Once the LightRay object is set up, use LightRay.make_light_ray to
+    begin making rays.  Different randomizations can be created with a
+    single object by providing different random seeds to make_light_ray.
+
+    Parameters
+    ----------
+    parameter_filename : string
+        The simulation parameter file.
+    simulation_type : string
+        The simulation type.
+    near_redshift : float
+        The near (lowest) redshift for the light ray.
+    far_redshift : float
+        The far (highest) redshift for the light ray.
+    use_minimum_datasets : bool
+        If True, the minimum number of datasets is used to connect the
+        initial and final redshift.  If false, the light ray solution
+        will contain as many entries as possible within the redshift
+        interval.
+        Default: True.
+    deltaz_min : float
+        Specifies the minimum :math:`\Delta z` between consecutive
+        datasets in the returned list.
+        Default: 0.0.
+    minimum_coherent_box_fraction : float
+        Used with use_minimum_datasets set to False, this parameter
+        specifies the fraction of the total box size to be traversed
+        before rerandomizing the projection axis and center.  This
+        was invented to allow light rays with thin slices to sample
+        coherent large scale structure, but in practice does not work
+        so well.  Try setting this parameter to 1 and see what happens.
+        Default: 0.0.
+    time_data : bool
+        Whether or not to include time outputs when gathering
+        datasets for time series.
+        Default: True.
+    redshift_data : bool
+        Whether or not to include redshift outputs when gathering
+        datasets for time series.
+        Default: True.
+    find_outputs : bool
+        Whether or not to search for parameter files in the current 
+        directory.
+        Default: False.
+
+    """
     def __init__(self, parameter_filename, simulation_type,
                  near_redshift, far_redshift,
                  use_minimum_datasets=True, deltaz_min=0.0,
                  minimum_coherent_box_fraction=0.0,
                  time_data=True, redshift_data=True,
                  find_outputs=False):
-        """
-        Create a LightRay object.  A light ray is much like a light cone,
-        in that it stacks together multiple datasets in order to extend a
-        redshift interval.  Unlike a light cone, which does randomly
-        oriented projections for each dataset, a light ray consists of
-        randomly oriented single rays.  The purpose of these is to create
-        synthetic QSO lines of sight.
-
-        Once the LightRay object is set up, use LightRay.make_light_ray to
-        begin making rays.  Different randomizations can be created with a
-        single object by providing different random seeds to make_light_ray.
-
-        Parameters
-        ----------
-        parameter_filename : string
-            The simulation parameter file.
-        simulation_type : string
-            The simulation type.
-        near_redshift : float
-            The near (lowest) redshift for the light ray.
-        far_redshift : float
-            The far (highest) redshift for the light ray.
-        use_minimum_datasets : bool
-            If True, the minimum number of datasets is used to connect the
-            initial and final redshift.  If false, the light ray solution
-            will contain as many entries as possible within the redshift
-            interval.
-            Default: True.
-        deltaz_min : float
-            Specifies the minimum :math:`\Delta z` between consecutive
-            datasets in the returned list.
-            Default: 0.0.
-        minimum_coherent_box_fraction : float
-            Used with use_minimum_datasets set to False, this parameter
-            specifies the fraction of the total box size to be traversed
-            before rerandomizing the projection axis and center.  This
-            was invented to allow light rays with thin slices to sample
-            coherent large scale structure, but in practice does not work
-            so well.  Try setting this parameter to 1 and see what happens.
-            Default: 0.0.
-        time_data : bool
-            Whether or not to include time outputs when gathering
-            datasets for time series.
-            Default: True.
-        redshift_data : bool
-            Whether or not to include redshift outputs when gathering
-            datasets for time series.
-            Default: True.
-        find_outputs : bool
-            Whether or not to search for parameter files in the current 
-            directory.
-            Default: False.
-
-        """
 
         self.near_redshift = near_redshift
         self.far_redshift = far_redshift
@@ -270,47 +270,43 @@
         Examples
         --------
 
-        from yt.mods import *
-        from yt.analysis_modules.halo_profiler.api import *
-        from yt.analysis_modules.cosmological_analysis.light_ray.api import LightRay
-
-        halo_profiler_kwargs = {'halo_list_file': 'HopAnalysis.out'}
-
-        halo_profiler_actions = []
-        # Add a virial filter.
-        halo_profiler_actions.append({'function': add_halo_filter,
-                                      'args': VirialFilter,
-                                      'kwargs': {'overdensity_field': 'ActualOverdensity',
-                                                 'virial_overdensity': 200,
-                                                 'virial_filters': \
-                                                     [['TotalMassMsun','>=','1e14']],
-                                                 'virial_quantities': \
-                                                     ['TotalMassMsun','RadiusMpc']}})
-        # Make the profiles.
-        halo_profiler_actions.append({'function': make_profiles,
-                                      'args': None,
-                                      'kwargs': {'filename': 'VirializedHalos.out'}})
-
-        halo_list = 'filtered'
-
-        halo_profiler_parameters = dict(halo_profiler_kwargs=halo_profiler_kwargs,
-                                        halo_profiler_actions=halo_profiler_actions,
-                                        halo_list=halo_list)
-
-        my_ray = LightRay('simulation.par', 'Enzo', 0., 0.1,
-                          use_minimum_datasets=True,
-                          time_data=False)
-
-        my_ray.make_light_ray(seed=12345,
-                              solution_filename='solution.txt',
-                              data_filename='my_ray.h5',
-                              fields=['Temperature', 'Density'],
-                              get_nearest_halo=True,
-                              nearest_halo_fields=['TotalMassMsun_100',
-                                                   'RadiusMpc_100'],
-                              halo_profiler_parameters=halo_profiler_parameters,
-                              get_los_velocity=True)
-
+        >>> from yt.mods import *
+        >>> from yt.analysis_modules.halo_profiler.api import *
+        >>> from yt.analysis_modules.cosmological_analysis.light_ray.api import LightRay
+        >>> halo_profiler_kwargs = {'halo_list_file': 'HopAnalysis.out'}
+        >>> halo_profiler_actions = []
+        >>> # Add a virial filter.
+        >>> halo_profiler_actions.append({'function': add_halo_filter,
+        ...                           'args': VirialFilter,
+        ...                           'kwargs': {'overdensity_field': 'ActualOverdensity',
+        ...                                      'virial_overdensity': 200,
+        ...                                      'virial_filters': [['TotalMassMsun','>=','1e14']],
+        ...                                      'virial_quantities': ['TotalMassMsun','RadiusMpc']}})
+        ...
+        >>> # Make the profiles.
+        >>> halo_profiler_actions.append({'function': make_profiles,
+        ...                           'args': None,
+        ...                           'kwargs': {'filename': 'VirializedHalos.out'}})
+        ...
+        >>> halo_list = 'filtered'
+        >>> halo_profiler_parameters = dict(halo_profiler_kwargs=halo_profiler_kwargs,
+        ...                             halo_profiler_actions=halo_profiler_actions,
+        ...                             halo_list=halo_list)
+        ...
+        >>> my_ray = LightRay('simulation.par', 'Enzo', 0., 0.1,
+        ...                use_minimum_datasets=True,
+        ...                time_data=False)
+        ...
+        >>> my_ray.make_light_ray(seed=12345,
+        ...                   solution_filename='solution.txt',
+        ...                   data_filename='my_ray.h5',
+        ...                   fields=['Temperature', 'Density'],
+        ...                   get_nearest_halo=True,
+        ...                   nearest_halo_fields=['TotalMassMsun_100',
+        ...                                        'RadiusMpc_100'],
+        ...                   halo_profiler_parameters=halo_profiler_parameters,
+        ...                   get_los_velocity=True)
+        
         """
 
         if halo_profiler_parameters is None:

diff -r bea5473873e824088584621372b1f3a6a70dbdc0 -r ea7c50bf715d4156fe1f676e147134d0928a9574 yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -142,18 +142,30 @@
         if self.CoM is not None:
             return self.CoM
         pm = self["ParticleMassMsun"]
-        cx = self["particle_position_x"]
-        cy = self["particle_position_y"]
-        cz = self["particle_position_z"]
-        if isinstance(self, FOFHalo):
-            c_vec = np.array([cx[0], cy[0], cz[0]]) - self.pf.domain_center
-        else:
-            c_vec = self.maximum_density_location() - self.pf.domain_center
-        cx = (cx - c_vec[0])
-        cy = (cy - c_vec[1])
-        cz = (cz - c_vec[2])
-        com = np.array([v - np.floor(v) for v in [cx, cy, cz]])
-        return (com * pm).sum(axis=1) / pm.sum() + c_vec
+        c = {}
+        c[0] = self["particle_position_x"]
+        c[1] = self["particle_position_y"]
+        c[2] = self["particle_position_z"]
+        c_vec = np.zeros(3)
+        com = []
+        for i in range(3):
+            # A halo is likely periodic around a boundary if the distance 
+            # between the max and min particle
+            # positions are larger than half the box. 
+            # So skip the rest if the converse is true.
+            # Note we might make a change here when periodicity-handling is
+            # fully implemented.
+            if (c[i].max() - c[i].min()) < (self.pf.domain_width[i] / 2.):
+                com.append(c[i])
+                continue
+            # Now we want to flip around only those close to the left boundary.
+            d_left = c[i] - self.pf.domain_left_edge[i]
+            sel = (d_left <= (self.pf.domain_width[i]/2))
+            c[i][sel] += self.pf.domain_width[i]
+            com.append(c[i])
+        com = np.array(com)
+        c = (com * pm).sum(axis=1) / pm.sum()
+        return c%self.pf.domain_width
 
     def maximum_density(self):
         r"""Return the HOP-identified maximum density. Not applicable to
@@ -809,7 +821,6 @@
     _radjust = 1.05
 
     def __init__(self, pf, id, size=None, CoM=None,
-
         max_dens_point=None, group_total_mass=None, max_radius=None, bulk_vel=None,
         rms_vel=None, fnames=None, mag_A=None, mag_B=None, mag_C=None,
         e1_vec=None, tilt=None, supp=None):
@@ -835,11 +846,18 @@
         self.bin_count = None
         self.overdensity = None
         self.indices = np.array([])  # Never used for a LoadedHalo.
+        self._saved_fields = {}
+        self._ds_sort = None
+        self._particle_mask = None
         # A supplementary data dict.
         if supp is None:
             self.supp = {}
         else:
             self.supp = supp
+        self._saved_fields = {}
+        self._ds_sort = None
+        self._particle_mask = None
+
 
     def __getitem__(self, key):
         # This function will try to get particle data in one of three ways,
@@ -1041,7 +1059,7 @@
 
     _fields = ["particle_position_%s" % ax for ax in 'xyz']
 
-    def __init__(self, data_source, dm_only=True):
+    def __init__(self, data_source, dm_only=True, redshift=-1):
         """
         Run hop on *data_source* with a given density *threshold*.  If
         *dm_only* is set, only run it on the dark matter particles, otherwise
@@ -1056,6 +1074,7 @@
         mylog.info("Parsing outputs")
         self._parse_output()
         mylog.debug("Finished. (%s)", len(self))
+        self.redshift = redshift
 
     def __obtain_particles(self):
         if self.dm_only:
@@ -1239,6 +1258,7 @@
         else:
             f = open(filename, "w")
         f.write("# HALOS FOUND WITH %s\n" % (self._name))
+        f.write("# REDSHIFT OF OUTPUT = %f\n" % (self.redshift))
 
         if not ellipsoid_data:
             f.write("\t".join(["# Group","Mass","# part","max dens"
@@ -1435,18 +1455,17 @@
         pass
 
 class HOPHaloList(HaloList):
-
+    """
+    Run hop on *data_source* with a given density *threshold*.  If
+    *dm_only* is set, only run it on the dark matter particles, otherwise
+    on all particles.  Returns an iterable collection of *HopGroup* items.
+    """
     _name = "HOP"
     _halo_class = HOPHalo
     _fields = ["particle_position_%s" % ax for ax in 'xyz'] + \
               ["ParticleMassMsun"]
 
     def __init__(self, data_source, threshold=160.0, dm_only=True):
-        """
-        Run hop on *data_source* with a given density *threshold*.  If
-        *dm_only* is set, only run it on the dark matter particles, otherwise
-        on all particles.  Returns an iterable collection of *HopGroup* items.
-        """
         self.threshold = threshold
         mylog.info("Initializing HOP")
         HaloList.__init__(self, data_source, dm_only)
@@ -1484,10 +1503,10 @@
     _name = "FOF"
     _halo_class = FOFHalo
 
-    def __init__(self, data_source, link=0.2, dm_only=True):
+    def __init__(self, data_source, link=0.2, dm_only=True, redshift=-1):
         self.link = link
         mylog.info("Initializing FOF")
-        HaloList.__init__(self, data_source, dm_only)
+        HaloList.__init__(self, data_source, dm_only, redshift=redshift)
 
     def _run_finder(self):
         self.tags = \
@@ -1635,6 +1654,11 @@
 
 
 class parallelHOPHaloList(HaloList, ParallelAnalysisInterface):
+    """
+    Run hop on *data_source* with a given density *threshold*.  If
+    *dm_only* is set, only run it on the dark matter particles, otherwise
+    on all particles.  Returns an iterable collection of *HopGroup* items.
+    """
     _name = "parallelHOP"
     _halo_class = parallelHOPHalo
     _fields = ["particle_position_%s" % ax for ax in 'xyz'] + \
@@ -1643,11 +1667,6 @@
     def __init__(self, data_source, padding, num_neighbors, bounds, total_mass,
         period, threshold=160.0, dm_only=True, rearrange=True, premerge=True,
         tree='F'):
-        """
-        Run hop on *data_source* with a given density *threshold*.  If
-        *dm_only* is set, only run it on the dark matter particles, otherwise
-        on all particles.  Returns an iterable collection of *HopGroup* items.
-        """
         ParallelAnalysisInterface.__init__(self)
         self.threshold = threshold
         self.num_neighbors = num_neighbors
@@ -1989,6 +2008,10 @@
         --------
         >>> halos.write_out("HopAnalysis.out")
         """
+        # if path denoted in filename, assure path exists
+        if len(filename.split('/')) > 1:
+            mkdir_rec('/'.join(filename.split('/')[:-1]))
+
         f = self.comm.write_on_root(filename)
         HaloList.write_out(self, f, ellipsoid_data)
 
@@ -2008,6 +2031,10 @@
         --------
         >>> halos.write_particle_lists_txt("halo-parts")
         """
+        # if path denoted in prefix, assure path exists
+        if len(prefix.split('/')) > 1:
+            mkdir_rec('/'.join(prefix.split('/')[:-1]))
+
         f = self.comm.write_on_root("%s.txt" % prefix)
         HaloList.write_particle_lists_txt(self, prefix, fp=f)
 
@@ -2031,6 +2058,10 @@
         --------
         >>> halos.write_particle_lists("halo-parts")
         """
+        # if path denoted in prefix, assure path exists
+        if len(prefix.split('/')) > 1:
+            mkdir_rec('/'.join(prefix.split('/')[:-1]))
+
         fn = "%s.h5" % self.comm.get_filename(prefix)
         f = h5py.File(fn, "w")
         for halo in self._groups:
@@ -2064,94 +2095,98 @@
         --------
         >>> halos.dump("MyHalos")
         """
+        # if path denoted in basename, assure path exists
+        if len(basename.split('/')) > 1:
+            mkdir_rec('/'.join(basename.split('/')[:-1]))
+
         self.write_out("%s.out" % basename, ellipsoid_data)
         self.write_particle_lists(basename)
         self.write_particle_lists_txt(basename)
 
 
 class parallelHF(GenericHaloFinder, parallelHOPHaloList):
+    r"""Parallel HOP halo finder.
+
+    Halos are built by:
+    1. Calculating a density for each particle based on a smoothing kernel.
+    2. Recursively linking particles to other particles from lower density
+    particles to higher.
+    3. Geometrically proximate chains are identified and
+    4. merged into final halos following merging rules.
+
+    Lower thresholds generally produce more halos, and the largest halos
+    become larger. Also, halos become more filamentary and over-connected.
+
+    This is very similar to HOP, but it does not produce precisely the
+    same halos due to unavoidable numerical differences.
+
+    Skory et al. "Parallel HOP: A Scalable Halo Finder for Massive
+    Cosmological Data Sets." arXiv (2010) 1001.3411
+
+    Parameters
+    ----------
+    pf : `StaticOutput`
+        The parameter file on which halo finding will be conducted.
+    threshold : float
+        The density threshold used when building halos. Default = 160.0.
+    dm_only : bool
+        If True, only dark matter particles are used when building halos.
+        Default = False.
+    resize : bool
+        Turns load-balancing on or off. Default = True.
+    kdtree : string
+        Chooses which kD Tree to use. The Fortran one (kdtree = 'F') is
+        faster, but uses more memory. The Cython one (kdtree = 'C') is
+        slower but is more memory efficient.
+        Default = 'F'
+    rearrange : bool
+        Turns on faster nearest neighbor searches at the cost of increased
+        memory usage.
+        This option only applies when using the Fortran tree.
+        Default = True.
+    fancy_padding : bool
+        True calculates padding independently for each face of each
+        subvolume. Default = True.
+    safety : float
+        Due to variances in inter-particle spacing in the volume, the
+        padding may need to be increased above the raw calculation.
+        This number is multiplied to the calculated padding, and values
+        >1 increase the padding. Default = 1.5.
+    premerge : bool
+        True merges chains in two steps (rather than one with False), which
+        can speed up halo finding by 25% or more. However, True can result
+        in small (<<1%) variations in the final halo masses when compared
+        to False. Default = True.
+    sample : float
+        The fraction of the full dataset on which load-balancing is
+        performed. Default = 0.03.
+    total_mass : float
+        If HOP is run on the same dataset mulitple times, the total mass
+        of particles in Msun units in the full volume can be supplied here
+        to save time.
+        This must correspond to the particles being operated on, meaning
+        if stars are included in the halo finding, they must be included
+        in this mass as well, and visa-versa.
+        If halo finding on a subvolume, this still corresponds with the
+        mass in the entire volume.
+        Default = None, which means the total mass is automatically
+        calculated.
+    num_particles : integer
+        The total number of particles in the volume, in the same fashion
+        as `total_mass` is calculated. Specifying this turns off
+        fancy_padding.
+        Default = None, which means the number of particles is
+        automatically calculated.
+
+    Examples
+    -------
+    >>> pf = load("RedshiftOutput0000")
+    >>> halos = parallelHF(pf)
+    """
     def __init__(self, pf, subvolume=None, threshold=160, dm_only=True, \
         resize=True, rearrange=True,\
         fancy_padding=True, safety=1.5, premerge=True, sample=0.03, \
         total_mass=None, num_particles=None, tree='F'):
-        r"""Parallel HOP halo finder.
-
-        Halos are built by:
-        1. Calculating a density for each particle based on a smoothing kernel.
-        2. Recursively linking particles to other particles from lower density
-        particles to higher.
-        3. Geometrically proximate chains are identified and
-        4. merged into final halos following merging rules.
-
-        Lower thresholds generally produce more halos, and the largest halos
-        become larger. Also, halos become more filamentary and over-connected.
-
-        This is very similar to HOP, but it does not produce precisely the
-        same halos due to unavoidable numerical differences.
-
-        Skory et al. "Parallel HOP: A Scalable Halo Finder for Massive
-        Cosmological Data Sets." arXiv (2010) 1001.3411
-
-        Parameters
-        ----------
-        pf : `StaticOutput`
-            The parameter file on which halo finding will be conducted.
-        threshold : float
-            The density threshold used when building halos. Default = 160.0.
-        dm_only : bool
-            If True, only dark matter particles are used when building halos.
-            Default = False.
-        resize : bool
-            Turns load-balancing on or off. Default = True.
-        kdtree : string
-            Chooses which kD Tree to use. The Fortran one (kdtree = 'F') is
-            faster, but uses more memory. The Cython one (kdtree = 'C') is
-            slower but is more memory efficient.
-            Default = 'F'
-        rearrange : bool
-            Turns on faster nearest neighbor searches at the cost of increased
-            memory usage.
-            This option only applies when using the Fortran tree.
-            Default = True.
-        fancy_padding : bool
-            True calculates padding independently for each face of each
-            subvolume. Default = True.
-        safety : float
-            Due to variances in inter-particle spacing in the volume, the
-            padding may need to be increased above the raw calculation.
-            This number is multiplied to the calculated padding, and values
-            >1 increase the padding. Default = 1.5.
-        premerge : bool
-            True merges chains in two steps (rather than one with False), which
-            can speed up halo finding by 25% or more. However, True can result
-            in small (<<1%) variations in the final halo masses when compared
-            to False. Default = True.
-        sample : float
-            The fraction of the full dataset on which load-balancing is
-            performed. Default = 0.03.
-        total_mass : float
-            If HOP is run on the same dataset mulitple times, the total mass
-            of particles in Msun units in the full volume can be supplied here
-            to save time.
-            This must correspond to the particles being operated on, meaning
-            if stars are included in the halo finding, they must be included
-            in this mass as well, and visa-versa.
-            If halo finding on a subvolume, this still corresponds with the
-            mass in the entire volume.
-            Default = None, which means the total mass is automatically
-            calculated.
-        num_particles : integer
-            The total number of particles in the volume, in the same fashion
-            as `total_mass` is calculated. Specifying this turns off
-            fancy_padding.
-            Default = None, which means the number of particles is
-            automatically calculated.
-
-        Examples
-        -------
-        >>> pf = load("RedshiftOutput0000")
-        >>> halos = parallelHF(pf)
-        """
         if subvolume is not None:
             ds_LE = np.array(subvolume.left_edge)
             ds_RE = np.array(subvolume.right_edge)
@@ -2398,58 +2433,58 @@
 
 
 class HOPHaloFinder(GenericHaloFinder, HOPHaloList):
+    r"""HOP halo finder.
+
+    Halos are built by:
+    1. Calculating a density for each particle based on a smoothing kernel.
+    2. Recursively linking particles to other particles from lower density
+    particles to higher.
+    3. Geometrically proximate chains are identified and
+    4. merged into final halos following merging rules.
+
+    Lower thresholds generally produce more halos, and the largest halos
+    become larger. Also, halos become more filamentary and over-connected.
+
+    Eisenstein and Hut. "HOP: A New Group-Finding Algorithm for N-Body
+    Simulations." ApJ (1998) vol. 498 pp. 137-142
+
+    Parameters
+    ----------
+    pf : `StaticOutput`
+        The parameter file on which halo finding will be conducted.
+    subvolume : `yt.data_objects.api.AMRData`, optional
+        A region over which HOP will be run, which can be used to run HOP
+        on a subvolume of the full volume. Default = None, which defaults
+        to the full volume automatically.
+    threshold : float
+        The density threshold used when building halos. Default = 160.0.
+    dm_only : bool
+        If True, only dark matter particles are used when building halos.
+        Default = False.
+    padding : float
+        When run in parallel, the finder needs to surround each subvolume
+        with duplicated particles for halo finidng to work. This number
+        must be no smaller than the radius of the largest halo in the box
+        in code units. Default = 0.02.
+    total_mass : float
+        If HOP is run on the same dataset mulitple times, the total mass
+        of particles in Msun units in the full volume can be supplied here
+        to save time.
+        This must correspond to the particles being operated on, meaning
+        if stars are included in the halo finding, they must be included
+        in this mass as well, and visa-versa.
+        If halo finding on a subvolume, this still corresponds with the
+        mass in the entire volume.
+        Default = None, which means the total mass is automatically
+        calculated.
+
+    Examples
+    --------
+    >>> pf = load("RedshiftOutput0000")
+    >>> halos = HaloFinder(pf)
+    """
     def __init__(self, pf, subvolume=None, threshold=160, dm_only=True,
             padding=0.02, total_mass=None):
-        r"""HOP halo finder.
-
-        Halos are built by:
-        1. Calculating a density for each particle based on a smoothing kernel.
-        2. Recursively linking particles to other particles from lower density
-        particles to higher.
-        3. Geometrically proximate chains are identified and
-        4. merged into final halos following merging rules.
-
-        Lower thresholds generally produce more halos, and the largest halos
-        become larger. Also, halos become more filamentary and over-connected.
-
-        Eisenstein and Hut. "HOP: A New Group-Finding Algorithm for N-Body
-        Simulations." ApJ (1998) vol. 498 pp. 137-142
-
-        Parameters
-        ----------
-        pf : `StaticOutput`
-            The parameter file on which halo finding will be conducted.
-        subvolume : `yt.data_objects.api.AMRData`, optional
-            A region over which HOP will be run, which can be used to run HOP
-            on a subvolume of the full volume. Default = None, which defaults
-            to the full volume automatically.
-        threshold : float
-            The density threshold used when building halos. Default = 160.0.
-        dm_only : bool
-            If True, only dark matter particles are used when building halos.
-            Default = False.
-        padding : float
-            When run in parallel, the finder needs to surround each subvolume
-            with duplicated particles for halo finidng to work. This number
-            must be no smaller than the radius of the largest halo in the box
-            in code units. Default = 0.02.
-        total_mass : float
-            If HOP is run on the same dataset mulitple times, the total mass
-            of particles in Msun units in the full volume can be supplied here
-            to save time.
-            This must correspond to the particles being operated on, meaning
-            if stars are included in the halo finding, they must be included
-            in this mass as well, and visa-versa.
-            If halo finding on a subvolume, this still corresponds with the
-            mass in the entire volume.
-            Default = None, which means the total mass is automatically
-            calculated.
-
-        Examples
-        --------
-        >>> pf = load("RedshiftOutput0000")
-        >>> halos = HaloFinder(pf)
-        """
         if subvolume is not None:
             ds_LE = np.array(subvolume.left_edge)
             ds_RE = np.array(subvolume.right_edge)
@@ -2503,53 +2538,54 @@
 
 
 class FOFHaloFinder(GenericHaloFinder, FOFHaloList):
+    r"""Friends-of-friends halo finder.
+
+    Halos are found by linking together all pairs of particles closer than
+    some distance from each other. Particles may have multiple links,
+    and halos are found by recursively linking together all such pairs.
+
+    Larger linking lengths produce more halos, and the largest halos
+    become larger. Also, halos become more filamentary and over-connected.
+
+    Davis et al. "The evolution of large-scale structure in a universe
+    dominated by cold dark matter." ApJ (1985) vol. 292 pp. 371-394
+
+    Parameters
+    ----------
+    pf : `StaticOutput`
+        The parameter file on which halo finding will be conducted.
+    subvolume : `yt.data_objects.api.AMRData`, optional
+        A region over which HOP will be run, which can be used to run HOP
+        on a subvolume of the full volume. Default = None, which defaults
+        to the full volume automatically.
+    link : float
+        If positive, the interparticle distance (compared to the overall
+        average) used to build the halos. If negative, this is taken to be
+        the *actual* linking length, and no other calculations will be
+        applied.  Default = 0.2.
+    dm_only : bool
+        If True, only dark matter particles are used when building halos.
+        Default = False.
+    padding : float
+        When run in parallel, the finder needs to surround each subvolume
+        with duplicated particles for halo finidng to work. This number
+        must be no smaller than the radius of the largest halo in the box
+        in code units. Default = 0.02.
+
+    Examples
+    --------
+    >>> pf = load("RedshiftOutput0000")
+    >>> halos = FOFHaloFinder(pf)
+    """
     def __init__(self, pf, subvolume=None, link=0.2, dm_only=True,
         padding=0.02):
-        r"""Friends-of-friends halo finder.
-
-        Halos are found by linking together all pairs of particles closer than
-        some distance from each other. Particles may have multiple links,
-        and halos are found by recursively linking together all such pairs.
-
-        Larger linking lengths produce more halos, and the largest halos
-        become larger. Also, halos become more filamentary and over-connected.
-
-        Davis et al. "The evolution of large-scale structure in a universe
-        dominated by cold dark matter." ApJ (1985) vol. 292 pp. 371-394
-
-        Parameters
-        ----------
-        pf : `StaticOutput`
-            The parameter file on which halo finding will be conducted.
-        subvolume : `yt.data_objects.api.AMRData`, optional
-            A region over which HOP will be run, which can be used to run HOP
-            on a subvolume of the full volume. Default = None, which defaults
-            to the full volume automatically.
-        link : float
-            If positive, the interparticle distance (compared to the overall
-            average) used to build the halos. If negative, this is taken to be
-            the *actual* linking length, and no other calculations will be
-            applied.  Default = 0.2.
-        dm_only : bool
-            If True, only dark matter particles are used when building halos.
-            Default = False.
-        padding : float
-            When run in parallel, the finder needs to surround each subvolume
-            with duplicated particles for halo finidng to work. This number
-            must be no smaller than the radius of the largest halo in the box
-            in code units. Default = 0.02.
-
-        Examples
-        --------
-        >>> pf = load("RedshiftOutput0000")
-        >>> halos = FOFHaloFinder(pf)
-        """
         if subvolume is not None:
             ds_LE = np.array(subvolume.left_edge)
             ds_RE = np.array(subvolume.right_edge)
         self.period = pf.domain_right_edge - pf.domain_left_edge
         self.pf = pf
         self.hierarchy = pf.h
+        self.redshift = pf.current_redshift
         self._data_source = pf.h.all_data()
         GenericHaloFinder.__init__(self, pf, self._data_source, dm_only,
             padding)
@@ -2584,7 +2620,8 @@
         #self._reposition_particles((LE, RE))
         # here is where the FOF halo finder is run
         mylog.info("Using a linking length of %0.3e", linking_length)
-        FOFHaloList.__init__(self, self._data_source, linking_length, dm_only)
+        FOFHaloList.__init__(self, self._data_source, linking_length, dm_only,
+                             redshift=self.redshift)
         self._parse_halolist(1.)
         self._join_halolists()
 
@@ -2592,84 +2629,84 @@
 
 
 class LoadHaloes(GenericHaloFinder, LoadedHaloList):
+    r"""Load the full halo data into memory.
+
+    This function takes the output of `GenericHaloFinder.dump` and
+    re-establishes the list of halos in memory. This enables the full set
+    of halo analysis features without running the halo finder again. To
+    be precise, the particle data for each halo is only read in when
+    necessary, so examining a single halo will not require as much memory
+    as is required for halo finding.
+
+    Parameters
+    ----------
+    basename : String
+        The base name of the files that will be read in. This should match
+        what was used when `GenericHaloFinder.dump` was called. Default =
+        "HopAnalysis".
+
+    Examples
+    --------
+    >>> pf = load("data0005")
+    >>> halos = LoadHaloes(pf, "HopAnalysis")
+    """
     def __init__(self, pf, basename):
-        r"""Load the full halo data into memory.
-
-        This function takes the output of `GenericHaloFinder.dump` and
-        re-establishes the list of halos in memory. This enables the full set
-        of halo analysis features without running the halo finder again. To
-        be precise, the particle data for each halo is only read in when
-        necessary, so examining a single halo will not require as much memory
-        as is required for halo finding.
-
-        Parameters
-        ----------
-        basename : String
-            The base name of the files that will be read in. This should match
-            what was used when `GenericHaloFinder.dump` was called. Default =
-            "HopAnalysis".
-
-        Examples
-        --------
-        >>> pf = load("data0005")
-        >>> halos = LoadHaloes(pf, "HopAnalysis")
-        """
         self.basename = basename
         LoadedHaloList.__init__(self, pf, self.basename)
 
 class LoadTextHaloes(GenericHaloFinder, TextHaloList):
+    r"""Load a text file of halos.
+    
+    Like LoadHaloes, but when all that is available is a plain
+    text file. This assumes the text file has the 3-positions of halos
+    along with a radius. The halo objects created are spheres.
+
+    Parameters
+    ----------
+    fname : String
+        The name of the text file to read in.
+    
+    columns : dict
+        A dict listing the column name : column number pairs for data
+        in the text file. It is zero-based (like Python).
+        An example is {'x':0, 'y':1, 'z':2, 'r':3, 'm':4}.
+        Any column name outside of ['x', 'y', 'z', 'r'] will be attached
+        to each halo object in the supplementary dict 'supp'. See
+        example.
+    
+    comment : String
+        If the first character of a line is equal to this, the line is
+        skipped. Default = "#".
+
+    Examples
+    --------
+    >>> pf = load("data0005")
+    >>> halos = LoadTextHaloes(pf, "list.txt",
+        {'x':0, 'y':1, 'z':2, 'r':3, 'm':4},
+        comment = ";")
+    >>> halos[0].supp['m']
+        3.28392048e14
+    """
     def __init__(self, pf, filename, columns, comment = "#"):
-        r"""Load a text file of halos.
-        
-        Like LoadHaloes, but when all that is available is a plain
-        text file. This assumes the text file has the 3-positions of halos
-        along with a radius. The halo objects created are spheres.
-
-        Parameters
-        ----------
-        fname : String
-            The name of the text file to read in.
-        
-        columns : dict
-            A dict listing the column name : column number pairs for data
-            in the text file. It is zero-based (like Python).
-            An example is {'x':0, 'y':1, 'z':2, 'r':3, 'm':4}.
-            Any column name outside of ['x', 'y', 'z', 'r'] will be attached
-            to each halo object in the supplementary dict 'supp'. See
-            example.
-        
-        comment : String
-            If the first character of a line is equal to this, the line is
-            skipped. Default = "#".
-
-        Examples
-        --------
-        >>> pf = load("data0005")
-        >>> halos = LoadTextHaloes(pf, "list.txt",
-            {'x':0, 'y':1, 'z':2, 'r':3, 'm':4},
-            comment = ";")
-        >>> halos[0].supp['m']
-            3.28392048e14
-        """
         TextHaloList.__init__(self, pf, filename, columns, comment)
 
 LoadTextHalos = LoadTextHaloes
 
 class LoadRockstarHalos(GenericHaloFinder, RockstarHaloList):
+    r"""Load Rockstar halos off disk from Rockstar-output format.
+
+    Parameters
+    ----------
+    fname : String
+        The name of the Rockstar file to read in. Default = 
+        "rockstar_halos/out_0.list'.
+
+    Examples
+    --------
+    >>> pf = load("data0005")
+    >>> halos = LoadRockstarHalos(pf, "other_name.out")
+    """
     def __init__(self, pf, filename = None):
-        r"""Load Rockstar halos off disk from Rockstar-output format.
-
-        Parameters
-        ----------
-        fname : String
-            The name of the Rockstar file to read in. Default = 
-            "rockstar_halos/out_0.list'.
-
-        Examples
-        --------
-        >>> pf = load("data0005")
-        >>> halos = LoadRockstarHalos(pf, "other_name.out")
-        """
         if filename is None:
             filename = 'rockstar_halos/out_0.list'
         RockstarHaloList.__init__(self, pf, filename)

diff -r bea5473873e824088584621372b1f3a6a70dbdc0 -r ea7c50bf715d4156fe1f676e147134d0928a9574 yt/analysis_modules/halo_finding/rockstar/rockstar.py
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar.py
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar.py
@@ -114,80 +114,82 @@
         return pool, workgroup
 
 class RockstarHaloFinder(ParallelAnalysisInterface):
+    r"""Spawns the Rockstar Halo finder, distributes dark matter
+    particles and finds halos.
+
+    The halo finder requires dark matter particles of a fixed size.
+    Rockstar has three main processes: reader, writer, and the 
+    server which coordinates reader/writer processes.
+
+    Parameters
+    ----------
+    ts   : TimeSeriesData, StaticOutput
+        This is the data source containing the DM particles. Because 
+        halo IDs may change from one snapshot to the next, the only
+        way to keep a consistent halo ID across time is to feed 
+        Rockstar a set of snapshots, ie, via TimeSeriesData.
+    num_readers: int
+        The number of reader can be increased from the default
+        of 1 in the event that a single snapshot is split among
+        many files. This can help in cases where performance is
+        IO-limited. Default is 1. If run inline, it is
+        equal to the number of MPI threads.
+    num_writers: int
+        The number of writers determines the number of processing threads
+        as well as the number of threads writing output data.
+        The default is set to comm.size-num_readers-1. If run inline,
+        the default is equal to the number of MPI threads.
+    outbase: str
+        This is where the out*list files that Rockstar makes should be
+        placed. Default is 'rockstar_halos'.
+    dm_type: 1
+        In order to exclude stars and other particle types, define
+        the dm_type. Default is 1, as Enzo has the DM particle type=1.
+    force_res: float
+        This parameter specifies the force resolution that Rockstar uses
+        in units of Mpc/h.
+        If no value is provided, this parameter is automatically set to
+        the width of the smallest grid element in the simulation from the
+        last data snapshot (i.e. the one where time has evolved the
+        longest) in the time series:
+        ``pf_last.h.get_smallest_dx() * pf_last['mpch']``.
+    total_particles : int
+        If supplied, this is a pre-calculated total number of dark matter
+        particles present in the simulation. For example, this is useful
+        when analyzing a series of snapshots where the number of dark
+        matter particles should not change and this will save some disk
+        access time. If left unspecified, it will
+        be calculated automatically. Default: ``None``.
+    dm_only : boolean
+        If set to ``True``, it will be assumed that there are only dark
+        matter particles present in the simulation. This can save analysis
+        time if this is indeed the case. Default: ``False``.
+        
+    Returns
+    -------
+    None
+
+    Examples
+    --------
+    To use the script below you must run it using MPI:
+    mpirun -np 3 python test_rockstar.py --parallel
+
+    test_rockstar.py:
+
+    from yt.analysis_modules.halo_finding.rockstar.api import RockstarHaloFinder
+    from yt.mods import *
+    import sys
+
+    ts = TimeSeriesData.from_filenames('/u/cmoody3/data/a*')
+    pm = 7.81769027e+11
+    rh = RockstarHaloFinder(ts)
+    rh.run()
+    """
     def __init__(self, ts, num_readers = 1, num_writers = None,
             outbase="rockstar_halos", dm_type=1, 
             force_res=None, total_particles=None, dm_only=False):
-        r"""Spawns the Rockstar Halo finder, distributes dark matter
-        particles and finds halos.
-
-        The halo finder requires dark matter particles of a fixed size.
-        Rockstar has three main processes: reader, writer, and the 
-        server which coordinates reader/writer processes.
-
-        Parameters
-        ----------
-        ts   : TimeSeriesData, StaticOutput
-            This is the data source containing the DM particles. Because 
-            halo IDs may change from one snapshot to the next, the only
-            way to keep a consistent halo ID across time is to feed 
-            Rockstar a set of snapshots, ie, via TimeSeriesData.
-        num_readers: int
-            The number of reader can be increased from the default
-            of 1 in the event that a single snapshot is split among
-            many files. This can help in cases where performance is
-            IO-limited. Default is 1. If run inline, it is
-            equal to the number of MPI threads.
-        num_writers: int
-            The number of writers determines the number of processing threads
-            as well as the number of threads writing output data.
-            The default is set to comm.size-num_readers-1. If run inline,
-            the default is equal to the number of MPI threads.
-        outbase: str
-            This is where the out*list files that Rockstar makes should be
-            placed. Default is 'rockstar_halos'.
-        dm_type: 1
-            In order to exclude stars and other particle types, define
-            the dm_type. Default is 1, as Enzo has the DM particle type=1.
-        force_res: float
-            This parameter specifies the force resolution that Rockstar uses
-            in units of Mpc/h.
-            If no value is provided, this parameter is automatically set to
-            the width of the smallest grid element in the simulation from the
-            last data snapshot (i.e. the one where time has evolved the
-            longest) in the time series:
-            ``pf_last.h.get_smallest_dx() * pf_last['mpch']``.
-        total_particles : int
-            If supplied, this is a pre-calculated total number of dark matter
-            particles present in the simulation. For example, this is useful
-            when analyzing a series of snapshots where the number of dark
-            matter particles should not change and this will save some disk
-            access time. If left unspecified, it will
-            be calculated automatically. Default: ``None``.
-        dm_only : boolean
-            If set to ``True``, it will be assumed that there are only dark
-            matter particles present in the simulation. This can save analysis
-            time if this is indeed the case. Default: ``False``.
-            
-        Returns
-        -------
-        None
-
-        Examples
-        --------
-        To use the script below you must run it using MPI:
-        mpirun -np 3 python test_rockstar.py --parallel
-
-        test_rockstar.py:
-
-        from yt.analysis_modules.halo_finding.rockstar.api import RockstarHaloFinder
-        from yt.mods import *
-        import sys
-
-        ts = TimeSeriesData.from_filenames('/u/cmoody3/data/a*')
-        pm = 7.81769027e+11
-        rh = RockstarHaloFinder(ts)
-        rh.run()
-        """
+        mylog.warning("The citation for the Rockstar halo finder can be found at")
+        mylog.warning("http://adsabs.harvard.edu/abs/2013ApJ...762..109B")
         ParallelAnalysisInterface.__init__(self)
         # Decide how we're working.
         if ytcfg.getboolean("yt", "inline") == True:

diff -r bea5473873e824088584621372b1f3a6a70dbdc0 -r ea7c50bf715d4156fe1f676e147134d0928a9574 yt/analysis_modules/halo_mass_function/halo_mass_function.py
--- a/yt/analysis_modules/halo_mass_function/halo_mass_function.py
+++ b/yt/analysis_modules/halo_mass_function/halo_mass_function.py
@@ -33,52 +33,52 @@
     parallel_blocking_call
 
 class HaloMassFcn(ParallelAnalysisInterface):
+    """
+    Initalize a HaloMassFcn object to analyze the distribution of haloes
+    as a function of mass.
+    :param halo_file (str): The filename of the output of the Halo Profiler.
+    Default=None.
+    :param omega_matter0 (float): The fraction of the universe made up of
+    matter (dark and baryonic). Default=None.
+    :param omega_lambda0 (float): The fraction of the universe made up of
+    dark energy. Default=None.
+    :param omega_baryon0 (float): The fraction of the universe made up of
+    ordinary baryonic matter. This should match the value
+    used to create the initial conditions, using 'inits'. This is 
+    *not* stored in the enzo datset so it must be checked by hand.
+    Default=0.05.
+    :param hubble0 (float): The expansion rate of the universe in units of
+    100 km/s/Mpc. Default=None.
+    :param sigma8input (float): The amplitude of the linear power
+    spectrum at z=0 as specified by the rms amplitude of mass-fluctuations
+    in a top-hat sphere of radius 8 Mpc/h. This should match the value
+    used to create the initial conditions, using 'inits'. This is 
+    *not* stored in the enzo datset so it must be checked by hand.
+    Default=0.86.
+    :param primoridal_index (float): This is the index of the mass power
+    spectrum before modification by the transfer function. A value of 1
+    corresponds to the scale-free primordial spectrum. This should match
+    the value used to make the initial conditions using 'inits'. This is 
+    *not* stored in the enzo datset so it must be checked by hand.
+    Default=1.0.
+    :param this_redshift (float): The current redshift. Default=None.
+    :param log_mass_min (float): The log10 of the mass of the minimum of the
+    halo mass range. Default=None.
+    :param log_mass_max (float): The log10 of the mass of the maximum of the
+    halo mass range. Default=None.
+    :param num_sigma_bins (float): The number of bins (points) to use for
+    the calculations and generated fit. Default=360.
+    :param fitting_function (int): Which fitting function to use.
+    1 = Press-schechter, 2 = Jenkins, 3 = Sheth-Tormen, 4 = Warren fit
+    5 = Tinker
+    Default=4.
+    :param mass_column (int): The column of halo_file that contains the
+    masses of the haloes. Default=4.
+    """
     def __init__(self, pf, halo_file=None, omega_matter0=None, omega_lambda0=None,
     omega_baryon0=0.05, hubble0=None, sigma8input=0.86, primordial_index=1.0,
     this_redshift=None, log_mass_min=None, log_mass_max=None, num_sigma_bins=360,
     fitting_function=4, mass_column=5):
-        """
-        Initalize a HaloMassFcn object to analyze the distribution of haloes
-        as a function of mass.
-        :param halo_file (str): The filename of the output of the Halo Profiler.
-        Default=None.
-        :param omega_matter0 (float): The fraction of the universe made up of
-        matter (dark and baryonic). Default=None.
-        :param omega_lambda0 (float): The fraction of the universe made up of
-        dark energy. Default=None.
-        :param omega_baryon0 (float): The fraction of the universe made up of
-        ordinary baryonic matter. This should match the value
-        used to create the initial conditions, using 'inits'. This is 
-        *not* stored in the enzo datset so it must be checked by hand.
-        Default=0.05.
-        :param hubble0 (float): The expansion rate of the universe in units of
-        100 km/s/Mpc. Default=None.
-        :param sigma8input (float): The amplitude of the linear power
-        spectrum at z=0 as specified by the rms amplitude of mass-fluctuations
-        in a top-hat sphere of radius 8 Mpc/h. This should match the value
-        used to create the initial conditions, using 'inits'. This is 
-        *not* stored in the enzo datset so it must be checked by hand.
-        Default=0.86.
-        :param primoridal_index (float): This is the index of the mass power
-        spectrum before modification by the transfer function. A value of 1
-        corresponds to the scale-free primordial spectrum. This should match
-        the value used to make the initial conditions using 'inits'. This is 
-        *not* stored in the enzo datset so it must be checked by hand.
-        Default=1.0.
-        :param this_redshift (float): The current redshift. Default=None.
-        :param log_mass_min (float): The log10 of the mass of the minimum of the
-        halo mass range. Default=None.
-        :param log_mass_max (float): The log10 of the mass of the maximum of the
-        halo mass range. Default=None.
-        :param num_sigma_bins (float): The number of bins (points) to use for
-        the calculations and generated fit. Default=360.
-        :param fitting_function (int): Which fitting function to use.
-        1 = Press-schechter, 2 = Jenkins, 3 = Sheth-Tormen, 4 = Warren fit
-        5 = Tinker
-        Default=4.
-        :param mass_column (int): The column of halo_file that contains the
-        masses of the haloes. Default=4.
-        """
         ParallelAnalysisInterface.__init__(self)
         self.pf = pf
         self.halo_file = halo_file
@@ -132,7 +132,6 @@
         not stored in enzo datasets, so must be entered by hand.
         sigma8input=%f primordial_index=%f omega_baryon0=%f
         """ % (self.sigma8input, self.primordial_index, self.omega_baryon0))
-        time.sleep(1)
         
         # Do the calculations.
         self.sigmaM()
@@ -544,22 +543,22 @@
 """
 
 class TransferFunction(object):
+    """
+    /* This routine takes cosmological parameters and a redshift and sets up
+    all the internal scalar quantities needed to compute the transfer function. */
+    /* INPUT: omega_matter -- Density of CDM, baryons, and massive neutrinos,
+                    in units of the critical density. */
+    /* 	  omega_baryon -- Density of baryons, in units of critical. */
+    /* 	  omega_hdm    -- Density of massive neutrinos, in units of critical */
+    /* 	  degen_hdm    -- (Int) Number of degenerate massive neutrino species */
+    /*        omega_lambda -- Cosmological constant */
+    /* 	  hubble       -- Hubble constant, in units of 100 km/s/Mpc */
+    /*        redshift     -- The redshift at which to evaluate */
+    /* OUTPUT: Returns 0 if all is well, 1 if a warning was issued.  Otherwise,
+        sets many global variables for use in TFmdm_onek_mpc() */
+    """
     def __init__(self, omega_matter, omega_baryon, omega_hdm,
 	    degen_hdm, omega_lambda, hubble, redshift):
-        """
-        /* This routine takes cosmological parameters and a redshift and sets up
-        all the internal scalar quantities needed to compute the transfer function. */
-        /* INPUT: omega_matter -- Density of CDM, baryons, and massive neutrinos,
-                        in units of the critical density. */
-        /* 	  omega_baryon -- Density of baryons, in units of critical. */
-        /* 	  omega_hdm    -- Density of massive neutrinos, in units of critical */
-        /* 	  degen_hdm    -- (Int) Number of degenerate massive neutrino species */
-        /*        omega_lambda -- Cosmological constant */
-        /* 	  hubble       -- Hubble constant, in units of 100 km/s/Mpc */
-        /*        redshift     -- The redshift at which to evaluate */
-        /* OUTPUT: Returns 0 if all is well, 1 if a warning was issued.  Otherwise,
-            sets many global variables for use in TFmdm_onek_mpc() */
-        """
         self.qwarn = 0;
         self.theta_cmb = 2.728/2.7 # Assuming T_cmb = 2.728 K
     

diff -r bea5473873e824088584621372b1f3a6a70dbdc0 -r ea7c50bf715d4156fe1f676e147134d0928a9574 yt/analysis_modules/halo_merger_tree/api.py
--- a/yt/analysis_modules/halo_merger_tree/api.py
+++ b/yt/analysis_modules/halo_merger_tree/api.py
@@ -38,5 +38,7 @@
     MergerTreeTextOutput
 
 from .enzofof_merger_tree import \
+    HaloCatalog, \
     find_halo_relationships, \
-    EnzoFOFMergerTree
+    EnzoFOFMergerTree, \
+    plot_halo_evolution

This diff is so big that we needed to truncate the remainder.

Repository URL: https://bitbucket.org/yt_analysis/yt-3.0/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list