[yt-svn] commit/yt: 4 new changesets

Bitbucket commits-noreply at bitbucket.org
Mon Dec 17 20:17:52 PST 2012


4 new commits in yt:


https://bitbucket.org/yt_analysis/yt/changeset/53fa42c3dbc9/
changeset:   53fa42c3dbc9
branch:      yt
user:        Andrew Myers
date:        2012-11-13 03:26:23
summary:     changing a few of the orion fields from 'known' to 'fallback'
affected #:  1 file

diff -r d84dc871dfc9657a2b2f461cb7a6e0bad7508386 -r 53fa42c3dbc9c0228203900a1b1a169e732e9a8c yt/frontends/orion/fields.py
--- a/yt/frontends/orion/fields.py
+++ b/yt/frontends/orion/fields.py
@@ -126,19 +126,19 @@
         data["x-velocity"]**2.0
         + data["y-velocity"]**2.0
         + data["z-velocity"]**2.0 )
-add_orion_field("ThermalEnergy", function=_ThermalEnergy,
-                units=r"\rm{ergs}/\rm{cm^3}")
+add_field("ThermalEnergy", function=_ThermalEnergy,
+          units=r"\rm{ergs}/\rm{cm^3}")
 
 def _Pressure(field,data):
     """M{(Gamma-1.0)*e, where e is thermal energy density
        NB: this will need to be modified for radiation
     """
     return (data.pf["Gamma"] - 1.0)*data["ThermalEnergy"]
-add_orion_field("Pressure", function=_Pressure, units=r"\rm{dyne}/\rm{cm}^{2}")
+add_field("Pressure", function=_Pressure, units=r"\rm{dyne}/\rm{cm}^{2}")
 
 def _Temperature(field,data):
     return (data.pf["Gamma"]-1.0)*data.pf["mu"]*mh*data["ThermalEnergy"]/(kboltz*data["Density"])
-add_orion_field("Temperature",function=_Temperature,units=r"\rm{Kelvin}",take_log=False)
+add_field("Temperature",function=_Temperature,units=r"\rm{Kelvin}",take_log=False)
 
 # particle fields
 



https://bitbucket.org/yt_analysis/yt/changeset/d15ab61fb9ef/
changeset:   d15ab61fb9ef
branch:      yt
user:        Andrew Myers
date:        2012-12-18 05:10:00
summary:     merging from tip
affected #:  72 files







diff -r 53fa42c3dbc9c0228203900a1b1a169e732e9a8c -r d15ab61fb9efddea5e4cd117e590dd16303e5393 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -43,6 +43,7 @@
 INST_PYX=0      # Install PyX?  Sometimes PyX can be problematic without a
                 # working TeX installation.
 INST_0MQ=1      # Install 0mq (for IPython) and affiliated bindings?
+INST_ROCKSTAR=0 # Install the Rockstar halo finder?
 
 # If you've got YT some other place, set this to point to it.
 YT_DIR=""
@@ -398,6 +399,14 @@
     ( ${SHASUM} -c $1.sha512 2>&1 ) 1>> ${LOG_FILE} || do_exit
 }
 
+function get_ytdata
+{
+    echo "Downloading $1 from yt-project.org"
+    [ -e $1 ] && return
+    ${GETFILE} "http://yt-project.org/data/$1" || do_exit
+    ( ${SHASUM} -c $1.sha512 2>&1 ) 1>> ${LOG_FILE} || do_exit
+}
+
 ORIG_PWD=`pwd`
 
 if [ -z "${DEST_DIR}" ]
@@ -406,6 +415,13 @@
     exit 1
 fi
 
+# Get supplemental data.
+
+mkdir -p ${DEST_DIR}/data
+cd ${DEST_DIR}/data
+echo 'de6d8c6ea849f0206d219303329a0276b3cce7c051eec34377d42aacbe0a4f47ac5145eb08966a338ecddd2b83c8f787ca9956508ad5c39ee2088ad875166410  xray_emissivity.h5' > xray_emissivity.h5.sha512
+get_ytdata xray_emissivity.h5
+
 mkdir -p ${DEST_DIR}/src
 cd ${DEST_DIR}/src
 
@@ -433,7 +449,7 @@
 echo 'c13116c1f0547000cc565e15774687b9e884f8b74fb62a84e578408a868a84961704839065ae4f21b662e87f2aaedf6ea424ea58dfa9d3d73c06281f806d15dd  nose-1.2.1.tar.gz' > nose-1.2.1.tar.gz.sha512
 echo '73de2c99406a38f85273931597525cec4ebef55b93712adca3b0bfea8ca3fc99446e5d6495817e9ad55cf4d48feb7fb49734675c4cc8938db8d4a5225d30eca7  python-hglib-0.2.tar.gz' > python-hglib-0.2.tar.gz.sha512
 echo 'ffc602eb346717286b3d0a6770c60b03b578b3cf70ebd12f9e8b1c8c39cdb12ef219ddaa041d7929351a6b02dbb8caf1821b5452d95aae95034cbf4bc9904a7a  sympy-0.7.2.tar.gz' > sympy-0.7.2.tar.gz.sha512
-
+echo '172f2bc671145ebb0add2669c117863db35851fb3bdb192006cd710d4d038e0037497eb39a6d01091cb923f71a7e8982a77b6e80bf71d6275d5d83a363c8d7e5  rockstar-0.99.6.tar.gz' > rockstar-0.99.6.tar.gz.sha512
 # Individual processes
 [ -z "$HDF5_DIR" ] && get_ytproject hdf5-1.8.9.tar.gz
 [ $INST_ZLIB -eq 1 ] && get_ytproject zlib-1.2.3.tar.bz2 
@@ -457,6 +473,7 @@
 get_ytproject nose-1.2.1.tar.gz 
 get_ytproject python-hglib-0.2.tar.gz
 get_ytproject sympy-0.7.2.tar.gz
+get_ytproject rockstar-0.99.6.tar.gz
 if [ $INST_BZLIB -eq 1 ]
 then
     if [ ! -e bzip2-1.0.5/done ]
@@ -699,6 +716,23 @@
 do_setup_py sympy-0.7.2
 [ $INST_PYX -eq 1 ] && do_setup_py PyX-0.11.1
 
+# Now we build Rockstar and set its environment variable.
+if [ $INST_ROCKSTAR -eq 1 ]
+then
+    if [ ! -e Rockstar/done ]
+    then
+        [ ! -e Rockstar ] && tar xfz rockstar-0.99.6.tar.gz
+        echo "Building Rockstar"
+        cd Rockstar
+        ( make lib 2>&1 ) 1>> ${LOG_FILE} || do_exit
+        cp librockstar.so ${DEST_DIR}/lib
+        ROCKSTAR_DIR=${DEST_DIR}/src/Rockstar
+        echo $ROCKSTAR_DIR > ${YT_DIR}/rockstar.cfg
+        touch done
+        cd ..
+    fi
+fi
+
 echo "Doing yt update, wiping local changes and updating to branch ${BRANCH}"
 MY_PWD=`pwd`
 cd $YT_DIR
@@ -727,7 +761,7 @@
 then
     echo "Cloning a copy of Enzo."
     cd ${DEST_DIR}/src/
-    ${HG_EXEC} clone https://enzo.googlecode.com/hg/ ./enzo-hg-stable
+    ${HG_EXEC} clone https://bitbucket.org/enzo/enzo-stable ./enzo-hg-stable
     cd $MY_PWD
 fi
 




diff -r 53fa42c3dbc9c0228203900a1b1a169e732e9a8c -r d15ab61fb9efddea5e4cd117e590dd16303e5393 setup.py
--- a/setup.py
+++ b/setup.py
@@ -7,6 +7,7 @@
 import distribute_setup
 distribute_setup.use_setuptools()
 
+from distutils.command.build_py import build_py
 from numpy.distutils.misc_util import appendpath
 from numpy.distutils import log
 from distutils import version
@@ -110,6 +111,42 @@
 
 if os.path.exists('MANIFEST'): os.remove('MANIFEST')
 
+def get_mercurial_changeset_id(target_dir):
+    """adapted from a script by Jason F. Harris, published at
+
+    http://jasonfharris.com/blog/2010/05/versioning-your-application-with-the-mercurial-changeset-hash/
+
+    """
+    import subprocess
+    import re
+    get_changeset = subprocess.Popen('hg identify -b -i',
+                                     stdout=subprocess.PIPE,
+                                     stderr=subprocess.PIPE,
+                                     shell=True)
+        
+    if (get_changeset.stderr.read() != ""):
+        print "Error in obtaining current changeset of the Mercurial repository"
+        changeset = None
+        
+    changeset = get_changeset.stdout.read().strip()
+    if (not re.search("^[0-9a-f]{12}", changeset)):
+        print "Current changeset of the Mercurial repository is malformed"
+        changeset = None
+
+    return changeset
+
+class my_build_py(build_py):
+    def run(self):
+        # honor the --dry-run flag
+        if not self.dry_run:
+            target_dir = os.path.join(self.build_lib,'yt')
+            src_dir =  os.getcwd() 
+            changeset = get_mercurial_changeset_id(src_dir)
+            self.mkpath(target_dir)
+            with open(os.path.join(target_dir, '__hg_version__.py'), 'w') as fobj:
+                fobj.write("hg_version = '%s'\n" % changeset)
+
+            build_py.run(self)
 
 def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
@@ -166,6 +203,7 @@
         configuration=configuration,
         zip_safe=False,
         data_files=REASON_FILES,
+        cmdclass = {'build_py': my_build_py},
         )
     return
 


diff -r 53fa42c3dbc9c0228203900a1b1a169e732e9a8c -r d15ab61fb9efddea5e4cd117e590dd16303e5393 yt/analysis_modules/halo_finding/api.py
--- a/yt/analysis_modules/halo_finding/api.py
+++ b/yt/analysis_modules/halo_finding/api.py
@@ -45,4 +45,8 @@
     FOFHaloFinder, \
     HaloFinder, \
     LoadHaloes, \
-    LoadTextHaloes
+    LoadTextHalos, \
+    LoadTextHaloes, \
+    RockstarHalo, \
+    RockstarHaloList, \
+    LoadRockstarHalos


diff -r 53fa42c3dbc9c0228203900a1b1a169e732e9a8c -r d15ab61fb9efddea5e4cd117e590dd16303e5393 yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -34,6 +34,8 @@
 import numpy as np
 import random
 import sys
+import glob
+import os
 import os.path as path
 from collections import defaultdict
 
@@ -60,9 +62,6 @@
 
 TINY = 1.e-40
 
-# Ellipsoid funtions.
-# Rotation Matrixes should already be imported at top
-
 class Halo(object):
     """
     A data source that returns particle information about the members of a
@@ -79,6 +78,7 @@
     def __init__(self, halo_list, id, indices=None, size=None, CoM=None,
         max_dens_point=None, group_total_mass=None, max_radius=None,
         bulk_vel=None, tasks=None, rms_vel=None, supp=None):
+        self.halo_list = halo_list
         self._max_dens = halo_list._max_dens
         self.id = id
         self.data = halo_list._data_source
@@ -106,6 +106,29 @@
             self.supp = {}
         else:
             self.supp = supp
+        self._saved_fields = {}
+        self._ds_sort = None
+        self._particle_mask = None
+
+    @property
+    def particle_mask(self):
+        # Dynamically create the masking array for particles, and get
+        # the data using standard yt methods.
+        if self._particle_mask is not None:
+            return self._particle_mask
+        # This is from disk.
+        pid = self.__getitem__('particle_index')
+        # This is from the sphere.
+        if self._name == "RockstarHalo":
+            ds = self.pf.h.sphere(self.CoM, self._radjust * self.max_radius)
+        elif self._name == "LoadedHalo":
+            ds = self.pf.h.sphere(self.CoM, self._radjust * self.max_radius)
+        sp_pid = ds['particle_index']
+        self._ds_sort = sp_pid.argsort()
+        sp_pid = sp_pid[self._ds_sort]
+        # This matches them up.
+        self._particle_mask = np.in1d(sp_pid, pid)
+        return self._particle_mask
 
     def center_of_mass(self):
         r"""Calculate and return the center of mass.
@@ -540,45 +563,9 @@
             e0_vector[2], tilt)
 
 class RockstarHalo(Halo):
-    def __init__(self,halo_list,index,ID, DescID, Mvir, Vmax, Vrms, Rvir, Rs, Np, 
-                  X, Y, Z, VX, VY, VZ, JX, JY, JZ, Spin):
-        """Implement the properties reported by Rockstar: ID, Descendant ID,
-           Mvir, Vmax, Vrms, Rvir, Rs, Np, XYZ, VXYZ, JXYZ, and spin.
-           Most defaults are removed since we don't read in which halos
-           particles belong to. 
-        """
-        #we can still use get_sphere!
-        self.ID = ID #from rockstar
-        self.id = index #index in the halo list
-        self.pf = halo_list.pf
-
-        self.DescID = DescID
-        self.Mvir = Mvir
-        self.Vmax = Vmax
-        self.Vrms = Vrms
-        self.Rvir = Rvir
-        self.Rs   = Rs
-        self.Np   = Np
-        self.X    = X
-        self.Y    = Y
-        self.Z    = Z
-        self.VX   = VX
-        self.VY   = VY
-        self.VZ   = VZ
-        self.JX   = JX
-        self.JY   = JY
-        self.JZ   = JZ
-        self.Spin = Spin
-
-        #Halo.__init__(self,halo_list,index,
-        self.size=Np 
-        self.CoM=np.array([X,Y,Z])
-        self.max_dens_point=-1
-        self.group_total_mass=-1
-        self.max_radius=Rvir
-        self.bulk_vel=np.array([VX,VY,VZ])*1e5
-        self.rms_vel=-1
-        self.group_total_mass = -1 #not implemented 
+    _name = "RockstarHalo"
+    # See particle_mask
+    _radjust = 4.
     
     def maximum_density(self):
         r"""Not implemented."""
@@ -588,42 +575,57 @@
         r"""Not implemented."""
         return self.center_of_mass()
 
-    def total_mass(self):
-        r"""Not implemented."""
-        return -1
-
-    def get_size(self):
-        r"""Return the number of particles belonging to the halo."""
-        return self.Np
-
     def write_particle_list(self,handle):
         r"""Not implemented."""
         return -1
 
     def virial_mass(self):
         r"""Virial mass in Msun/h"""
-        return self.Mvir
+        return self.supp['m']
 
     def virial_radius(self):
         r"""Virial radius in Mpc/h comoving"""
-        return self.Rvir
+        return self.supp['r']
 
-    def virial_bin(self):
-        r"""Not implemented"""
-        return -1
+    def __getitem__(self, key):
+        # This function will try to get particle data in one of three ways,
+        # in descending preference.
+        # 1. From saved_fields, e.g. we've already got it.
+        # 2. From the halo binary files off disk.
+        # 3. Use the unique particle indexes of the halo to select a missing
+        # field from an AMR Sphere.
+        if key in self._saved_fields:
+            # We've already got it.
+            return self._saved_fields[key]
+        # Gotta go get it from the Rockstar binary file.
+        if key == 'particle_index':
+            IDs = self._get_particle_data(self.supp['id'],
+                self.halo_list.halo_to_fname, self.size, key)
+            IDs = IDs[IDs.argsort()]
+            self._saved_fields[key] = IDs
+            return self._saved_fields[key]
+        # We won't store this field below in saved_fields because
+        # that would mean keeping two copies of it, one in the yt
+        # machinery and one here.
+        ds = self.pf.h.sphere(self.CoM, 4 * self.max_radius)
+        return np.take(ds[key][self._ds_sort], self.particle_mask)
 
-    def virial_density(self):
-        r"""Not implemented """
-        return -1
-
-    def virial_info(self):
-        r"""Not implemented"""
-        return -1 
-
-    def __getitem__(self,key):
-        r"""Not implemented"""
-        return None
-
+    def _get_particle_data(self, halo, fnames, size, field):
+        # Given a list of file names, a halo, its size, and the desired field,
+        # this returns the particle indices for that halo.
+        file = fnames[halo]
+        mylog.info("Getting %d particles from Rockstar binary file %s.", self.supp['num_p'], file)
+        fp = open(file, 'rb')
+        # We need to skip past the header and all the halos.
+        fp.seek(self.halo_list._header_dt.itemsize + \
+            self.halo_list.fname_halos[file] * \
+            self.halo_list._halo_dt.itemsize, os.SEEK_CUR)
+        # Now we skip ahead to where this halos particles begin.
+        fp.seek(self.supp['p_start'] * 8, os.SEEK_CUR)
+        # And finally, read in the ids.
+        IDs = np.fromfile(fp, dtype=np.int64, count=self.supp['num_p'])
+        fp.close()
+        return IDs
 
     def get_ellipsoid_parameters(self):
         r"""Calculate the parameters that describe the ellipsoid of
@@ -802,6 +804,10 @@
 
 
 class LoadedHalo(Halo):
+    _name = "LoadedHalo"
+    # See particle_mask
+    _radjust = 1.05
+
     def __init__(self, pf, id, size=None, CoM=None,
 
         max_dens_point=None, group_total_mass=None, max_radius=None, bulk_vel=None,
@@ -828,9 +834,6 @@
         self.fnames = fnames
         self.bin_count = None
         self.overdensity = None
-        self.saved_fields = {}
-        self.particle_mask = None
-        self.ds_sort = None
         self.indices = np.array([])  # Never used for a LoadedHalo.
         # A supplementary data dict.
         if supp is None:
@@ -845,37 +848,22 @@
         # 2. From the halo h5 files off disk.
         # 3. Use the unique particle indexes of the halo to select a missing
         # field from an AMR Sphere.
-        try:
+        if key in self._saved_fields:
             # We've already got it.
-            return self.saved_fields[key]
-        except KeyError:
-            # Gotta go get it from the halo h5 files.
-            field_data = self._get_particle_data(self.id, self.fnames,
-                self.size, key)
-            #if key == 'particle_position_x': field_data = None
-            if field_data is not None:
-                self.saved_fields[key] = field_data
-                return self.saved_fields[key]
-            else:
-                # Dynamically create the masking array for particles, and get
-                # the data using standard yt methods. The 1.05 is there to
-                # account for possible silliness having to do with whether
-                # the maximum density or center of mass was used to calculate
-                # the maximum radius.
-                ds = self.pf.h.sphere(self.CoM, 1.05 * self.max_radius)
-                if self.particle_mask is None:
-                    pid = self.__getitem__('particle_index')
-                    sp_pid = ds['particle_index']
-                    self.ds_sort = sp_pid.argsort()
-                    sp_pid = sp_pid[self.ds_sort]
-                    # The result of searchsorted is an array with the positions
-                    # of the indexes in pid as they are in sp_pid. This is
-                    # because each element of pid is in sp_pid only once.
-                    self.particle_mask = np.searchsorted(sp_pid, pid)
-                # We won't store this field below in saved_fields because
-                # that would mean keeping two copies of it, one in the yt
-                # machinery and one here.
-                return ds[key][self.ds_sort][self.particle_mask]
+            return self._saved_fields[key]
+        # Gotta go get it from the halo h5 files.
+        field_data = self._get_particle_data(self.id, self.fnames,
+            self.size, key)
+        if field_data is not None:
+            if key == 'particle_index':
+                field_data = field_data[field_data.argsort()]
+            self._saved_fields[key] = field_data
+            return self._saved_fields[key]
+        # We won't store this field below in saved_fields because
+        # that would mean keeping two copies of it, one in the yt
+        # machinery and one here.
+        ds = self.pf.h.sphere(self.CoM, 1.05 * self.max_radius)
+        return np.take(ds[key][self._ds_sort], self.particle_mask)
 
     def _get_particle_data(self, halo, fnames, size, field):
         # Given a list of file names, a halo, its size, and the desired field,
@@ -1318,21 +1306,51 @@
         f.close()
 
 class RockstarHaloList(HaloList):
-    #because we don't yet no halo-particle affiliations
-    #most of the halo list methods are not implemented
-    #furthermore, Rockstar only accepts DM particles of
-    #a fixed mass, so we don't allow stars at all
-    #Still, we inherit from HaloList because in the future
-    #we might implement halo-particle affiliations
-    def __init__(self,pf,out_list):
+    _name = "Rockstar"
+    _halo_class = RockstarHalo
+    # see io_internal.h in Rockstar.
+    BINARY_HEADER_SIZE=256
+    _header_dt = np.dtype([('magic', np.uint64), ('snap', np.int64),
+        ('chunk', np.int64), ('scale', np.float32), ('Om', np.float32),
+        ('Ol', np.float32), ('h0', np.float32),
+        ('bounds', (np.float32, 6)), ('num_halos', np.int64),
+        ('num_particles', np.int64), ('box_size', np.float32),
+        ('particle_mass', np.float32), ('particle_type', np.int64),
+        ('unused', (np.byte, BINARY_HEADER_SIZE - 4*12 - 8*6))])
+    # see halo.h.
+    _halo_dt = np.dtype([('id', np.int64), ('pos', (np.float32, 6)),
+        ('corevel', (np.float32, 3)), ('bulkvel', (np.float32, 3)),
+        ('m', np.float32), ('r', np.float32), ('child_r', np.float32),
+        ('vmax_r', np.float32), 
+        ('mgrav', np.float32), ('vmax', np.float32),
+        ('rvmax', np.float32), ('rs', np.float32),
+        ('klypin_rs', np.float32), 
+        ('vrms', np.float32), ('J', (np.float32, 3)),
+        ('energy', np.float32), ('spin', np.float32),
+        ('alt_m', (np.float32, 4)), ('Xoff', np.float32),
+        ('Voff', np.float32), ('b_to_a', np.float32),
+        ('c_to_a', np.float32), ('A', (np.float32, 3)),
+        ('bullock_spin', np.float32), ('kin_to_pot', np.float32),
+        ('num_p', np.int64),
+        ('num_child_particles', np.int64), ('p_start', np.int64),
+        ('desc', np.int64), ('flags', np.int64), ('n_core', np.int64),
+        ('min_pos_err', np.float32), ('min_vel_err', np.float32),
+        ('min_bulkvel_err', np.float32), ('padding2', np.float32),])
+    # Above, padding* are due to c byte ordering which pads between
+    # 4 and 8 byte values in the struct as to not overlap memory registers.
+    _tocleanup = ['padding2']
+
+    def __init__(self, pf, out_list):
+        ParallelAnalysisInterface.__init__(self)
         mylog.info("Initializing Rockstar List")
         self._data_source = None
         self._groups = []
         self._max_dens = -1
         self.pf = pf
         self.out_list = out_list
+        self._data_source = pf.h.all_data()
         mylog.info("Parsing Rockstar halo list")
-        self._parse_output(out_list)
+        self._parse_output()
         mylog.info("Finished %s"%out_list)
 
     def _run_finder(self):
@@ -1344,70 +1362,77 @@
     def _get_dm_indices(self):
         pass
 
-    def _parse_output(self,out_list=None):
+    def _get_halos_binary(self, files):
+        """
+        Parse the binary files to get information about halos in higher
+        precision than the text file.
+        """
+        halos = None
+        self.halo_to_fname = {}
+        self.fname_halos = {}
+        for file in files:
+            fp = open(file, 'rb')
+            # read the header
+            header = np.fromfile(fp, dtype=self._header_dt, count=1)
+            # read the halo information
+            new_halos = np.fromfile(fp, dtype=self._halo_dt,
+                count=header['num_halos'])
+            # Record which binary file holds these halos.
+            for halo in new_halos['id']:
+                self.halo_to_fname[halo] = file
+            # Record how many halos are stored in each binary file.
+            self.fname_halos[file] = header['num_halos']
+            # Add to existing.
+            if halos is not None:
+                halos = np.concatenate((new_halos, halos))
+            else:
+                halos = new_halos.copy()
+            fp.close()
+        # Sort them by mass.
+        halos.sort(order='m')
+        halos = np.flipud(halos)
+        return halos
+
+    def _parse_output(self):
         """
         Read the out_*.list text file produced
         by Rockstar into memory."""
         
         pf = self.pf
-
-        if out_list is None:
-            out_list = self.out_list
-
-        lines = open(out_list).readlines()
-        names = []
-        formats = []
-        
-        #find the variables names from the first defining line
-        names = lines[0].replace('#','').split(' ')
-        for j,line in enumerate(lines):
-            if not line.startswith('#'): break
-
-        #find out the table datatypes but evaluating the first data line
-        splits = filter(lambda x: len(x.strip()) > 0 ,line.split(' '))
-        for num in splits:
-            if 'nan' not in num:
-                formats += np.array(eval(num)).dtype,
-            else:
-                formats += np.dtype('float'),
-        assert len(formats) == len(names)
-
+        # In order to read the binary data, we need to figure out which 
+        # binary files belong to this output.
+        basedir = os.path.dirname(self.out_list)
+        s = self.out_list.split('_')[-1]
+        s = s.rstrip('.list')
+        n = int(s)
+        fglob = path.join(basedir, 'halos_%d.*.bin' % n)
+        files = glob.glob(fglob)
+        halos = self._get_halos_binary(files)
         #Jc = 1.98892e33/pf['mpchcm']*1e5
         Jc = 1.0
-        conv = dict(X=1.0/pf['mpchcm'],
-                    Y=1.0/pf['mpchcm'],
-                    Z=1.0/pf['mpchcm'], #to unitary
-                    VX=1e0,VY=1e0,VZ=1e0, #to km/s
-                    Mvir=1.0, #Msun/h
-                    Vmax=1e0,Vrms=1e0,
-                    Rvir=1.0/pf['kpchcm'],
-                    Rs=1.0/pf['kpchcm'],
-                    JX=Jc,JY=Jc,JZ=Jc)
-        dtype = {'names':names,'formats':formats}
-        halo_table = np.loadtxt(out_list,skiprows=j-1,dtype=dtype,comments='#')            
-        #convert position units  
-        for name in names:
-            halo_table[name]=halo_table[name]*conv.get(name,1)
-        
-        for k,row in enumerate(halo_table):
-            args = tuple([val for val in row])
-            halo = RockstarHalo(self,k,*args)
+        length = 1.0 / pf['mpchcm']
+        conv = dict(pos = np.array([length, length, length,
+                                    1, 1, 1]), # to unitary
+                    r=1.0/pf['kpchcm'], # to unitary
+                    rs=1.0/pf['kpchcm'], # to unitary
+                    )
+        #convert units
+        for name in self._halo_dt.names:
+            halos[name]=halos[name]*conv.get(name,1)
+        # Store the halos in the halo list.
+        for i, row in enumerate(halos):
+            supp = {name:row[name] for name in self._halo_dt.names}
+            # Delete the padding columns. 'supp' below will contain
+            # repeated information, but that's OK.
+            for item in self._tocleanup: del supp[item]
+            halo = RockstarHalo(self, i, size=row['num_p'],
+                CoM=row['pos'][0:3], group_total_mass=row['m'],
+                max_radius=row['r'], bulk_vel=row['bulkvel'],
+                rms_vel=row['vrms'], supp=supp)
             self._groups.append(halo)
-    
 
-    #len is ok
-    #iter is OK
-    #getitem is ok
-    #nn is ok I think
-    #nn2d is ok I think
-
-    def write_out(self):
-        pass
     def write_particle_list(self):
         pass
-    
-
-    
 
 class HOPHaloList(HaloList):
 
@@ -2627,3 +2652,24 @@
             3.28392048e14
         """
         TextHaloList.__init__(self, pf, filename, columns, comment)
+
+LoadTextHalos = LoadTextHaloes
+
+class LoadRockstarHalos(GenericHaloFinder, RockstarHaloList):
+    def __init__(self, pf, filename = None):
+        r"""Load Rockstar halos off disk from Rockstar-output format.
+
+        Parameters
+        ----------
+        fname : String
+            The name of the Rockstar file to read in. Default = 
+            "rockstar_halos/out_0.list'.
+
+        Examples
+        --------
+        >>> pf = load("data0005")
+        >>> halos = LoadRockstarHalos(pf, "other_name.out")
+        """
+        if filename is None:
+            filename = 'rockstar_halos/out_0.list'
+        RockstarHaloList.__init__(self, pf, filename)


diff -r 53fa42c3dbc9c0228203900a1b1a169e732e9a8c -r d15ab61fb9efddea5e4cd117e590dd16303e5393 yt/analysis_modules/halo_finding/rockstar/rockstar.py
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar.py
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar.py
@@ -24,62 +24,252 @@
 """
 
 from yt.mods import *
+from yt.utilities.parallel_tools.parallel_analysis_interface import \
+    ParallelAnalysisInterface, ProcessorPool, Communicator
+from yt.analysis_modules.halo_finding.halo_objects import * #Halos & HaloLists
+from yt.config import ytcfg
+
+import rockstar_interface
+
+import socket
+import time
+import threading
+import signal
+import os
 from os import environ
 from os import mkdir
-from yt.utilities.parallel_tools.parallel_analysis_interface import \
-    ParallelAnalysisInterface, ProcessorPool, Communicator
+from os import path
 
-from yt.analysis_modules.halo_finding.halo_objects import * #Halos & HaloLists
-import rockstar_interface
-import socket
-import time
+class InlineRunner(ParallelAnalysisInterface):
+    def __init__(self):
+        # If this is being run inline, num_readers == comm.size, always.
+        psize = ytcfg.getint("yt", "__global_parallel_size")
+        self.num_readers = psize
+        # No choice for you, everyone's a writer too!
+        self.num_writers =  psize
+    
+    def run(self, handler, pool):
+        # If inline, we use forks.
+        server_pid = 0
+        # Start a server on only one machine/fork.
+        if pool.comm.rank == 0:
+            server_pid = os.fork()
+            if server_pid == 0:
+                handler.start_server()
+                os._exit(0)
+        # Start writers on all.
+        writer_pid = 0
+        time.sleep(0.05 + pool.comm.rank/10.0)
+        writer_pid = os.fork()
+        if writer_pid == 0:
+            handler.start_writer()
+            os._exit(0)
+        # Everyone's a reader!
+        time.sleep(0.05 + pool.comm.rank/10.0)
+        handler.start_reader()
+        # Make sure the forks are done, which they should be.
+        if writer_pid != 0:
+            os.waitpid(writer_pid, 0)
+        if server_pid != 0:
+            os.waitpid(server_pid, 0)
 
-class DomainDecomposer(ParallelAnalysisInterface):
-    def __init__(self, pf, comm):
-        ParallelAnalysisInterface.__init__(self, comm=comm)
-        self.pf = pf
-        self.hierarchy = pf.h
-        self.center = (pf.domain_left_edge + pf.domain_right_edge)/2.0
+    def setup_pool(self):
+        pool = ProcessorPool()
+        # Everyone is a reader, and when we're inline, that's all that matters.
+        readers = np.arange(ytcfg.getint("yt", "__global_parallel_size"))
+        pool.add_workgroup(ranks=readers, name="readers")
+        return pool, pool.workgroups[0]
 
-    def decompose(self):
-        dd = self.pf.h.all_data()
-        check, LE, RE, data_source = self.partition_hierarchy_3d(dd)
-        return data_source
+class StandardRunner(ParallelAnalysisInterface):
+    def __init__(self, num_readers, num_writers):
+        self.num_readers = num_readers
+        psize = ytcfg.getint("yt", "__global_parallel_size")
+        if num_writers is None:
+            self.num_writers =  psize - num_readers - 1
+        else:
+            self.num_writers = min(num_writers, psize)
+        if self.num_readers + self.num_writers + 1 != psize:
+            mylog.error('%i reader + %i writers != %i mpi',
+                    self.num_readers, self.num_writers, psize)
+            raise RuntimeError
+    
+    def run(self, handler, wg):
+        # Not inline so we just launch them directly from our MPI threads.
+        if wg.name == "server":
+            handler.start_server()
+        if wg.name == "readers":
+            time.sleep(0.05)
+            handler.start_reader()
+        if wg.name == "writers":
+            time.sleep(0.1)
+            handler.start_writer()
+    
+    def setup_pool(self):
+        pool = ProcessorPool()
+        pool, workgroup = ProcessorPool.from_sizes(
+           [ (1, "server"),
+             (self.num_readers, "readers"),
+             (self.num_writers, "writers") ]
+        )
+        return pool, workgroup
 
 class RockstarHaloFinder(ParallelAnalysisInterface):
-    def __init__(self, pf, num_readers = 1, num_writers = None, 
-            outbase=None,particle_mass=-1.0,overwrite=False,
-            left_edge = None, right_edge = None):
+    def __init__(self, ts, num_readers = 1, num_writers = None,
+            outbase="rockstar_halos", dm_type=1, 
+            force_res=None, total_particles=None, dm_only=False):
+        r"""Spawns the Rockstar Halo finder, distributes dark matter
+        particles and finds halos.
+
+        The halo finder requires dark matter particles of a fixed size.
+        Rockstar has three main processes: reader, writer, and the 
+        server which coordinates reader/writer processes.
+
+        Parameters
+        ----------
+        ts   : TimeSeriesData, StaticOutput
+            This is the data source containing the DM particles. Because 
+            halo IDs may change from one snapshot to the next, the only
+            way to keep a consistent halo ID across time is to feed 
+            Rockstar a set of snapshots, ie, via TimeSeriesData.
+        num_readers: int
+            The number of reader can be increased from the default
+            of 1 in the event that a single snapshot is split among
+            many files. This can help in cases where performance is
+            IO-limited. Default is 1. If run inline, it is
+            equal to the number of MPI threads.
+        num_writers: int
+            The number of writers determines the number of processing threads
+            as well as the number of threads writing output data.
+            The default is set to comm.size-num_readers-1. If run inline,
+            the default is equal to the number of MPI threads.
+        outbase: str
+            This is where the out*list files that Rockstar makes should be
+            placed. Default is 'rockstar_halos'.
+        dm_type: 1
+            In order to exclude stars and other particle types, define
+            the dm_type. Default is 1, as Enzo has the DM particle type=1.
+        force_res: float
+            This parameter specifies the force resolution that Rockstar uses
+            in units of Mpc/h.
+            If no value is provided, this parameter is automatically set to
+            the width of the smallest grid element in the simulation from the
+            last data snapshot (i.e. the one where time has evolved the
+            longest) in the time series:
+            ``pf_last.h.get_smallest_dx() * pf_last['mpch']``.
+        total_particles : int
+            If supplied, this is a pre-calculated total number of dark matter
+            particles present in the simulation. For example, this is useful
+            when analyzing a series of snapshots where the number of dark
+            matter particles should not change and this will save some disk
+            access time. If left unspecified, it will
+            be calculated automatically. Default: ``None``.
+        dm_only : boolean
+            If set to ``True``, it will be assumed that there are only dark
+            matter particles present in the simulation. This can save analysis
+            time if this is indeed the case. Default: ``False``.
+            
+        Returns
+        -------
+        None
+
+        Examples
+        --------
+        To use the script below you must run it using MPI:
+        mpirun -np 3 python test_rockstar.py --parallel
+
+        test_rockstar.py:
+
+        from yt.analysis_modules.halo_finding.rockstar.api import RockstarHaloFinder
+        from yt.mods import *
+        import sys
+
+        ts = TimeSeriesData.from_filenames('/u/cmoody3/data/a*')
+        pm = 7.81769027e+11
+        rh = RockstarHaloFinder(ts)
+        rh.run()
+        """
         ParallelAnalysisInterface.__init__(self)
-        # No subvolume support
-        self.pf = pf
-        self.hierarchy = pf.h
-        if num_writers is None:
-            num_writers = self.comm.size - num_readers -1
-        self.num_readers = num_readers
-        self.num_writers = num_writers
-        self.particle_mass = particle_mass 
-        self.overwrite = overwrite
-        if left_edge is None:
-            left_edge = pf.domain_left_edge
-        if right_edge is None:
-            right_edge = pf.domain_right_edge
-        self.le = left_edge
-        self.re = right_edge
-        if self.num_readers + self.num_writers + 1 != self.comm.size:
-            print '%i reader + %i writers != %i mpi'%\
-                    (self.num_readers, self.num_writers, self.comm.size)
-            raise RuntimeError
-        self.center = (pf.domain_right_edge + pf.domain_left_edge)/2.0
-        data_source = self.pf.h.all_data()
-        self.handler = rockstar_interface.RockstarInterface(
-                self.pf, data_source)
-        if outbase is None:
-            outbase = str(self.pf)+'_rockstar'
-        self.outbase = outbase        
+        # Decide how we're working.
+        if ytcfg.getboolean("yt", "inline") == True:
+            self.runner = InlineRunner()
+        else:
+            self.runner = StandardRunner(num_readers, num_writers)
+        self.num_readers = self.runner.num_readers
+        self.num_writers = self.runner.num_writers
+        mylog.info("Rockstar is using %d readers and %d writers",
+            self.num_readers, self.num_writers)
+        # Note that Rockstar does not support subvolumes.
+        # We assume that all of the snapshots in the time series
+        # use the same domain info as the first snapshots.
+        if not isinstance(ts, TimeSeriesData):
+            ts = TimeSeriesData([ts])
+        self.ts = ts
+        self.dm_type = dm_type
+        self.outbase = outbase
+        if force_res is None:
+            tpf = ts[-1] # Cache a reference
+            self.force_res = tpf.h.get_smallest_dx() * tpf['mpch']
+            # We have to delete now to wipe the hierarchy
+            del tpf
+        else:
+            self.force_res = force_res
+        self.total_particles = total_particles
+        self.dm_only = dm_only
+        # Setup pool and workgroups.
+        self.pool, self.workgroup = self.runner.setup_pool()
+        p = self._setup_parameters(ts)
+        params = self.comm.mpi_bcast(p, root = self.pool['readers'].ranks[0])
+        self.__dict__.update(params)
+        self.handler = rockstar_interface.RockstarInterface(self.ts)
+
+    def _setup_parameters(self, ts):
+        if self.workgroup.name != "readers": return None
+        tpf = ts[0]
+        def _particle_count(field, data):
+            if self.dm_only:
+                return np.prod(data["particle_position_x"].shape)
+            try:
+                return (data["particle_type"]==self.dm_type).sum()
+            except KeyError:
+                return np.prod(data["particle_position_x"].shape)
+        add_field("particle_count", function=_particle_count,
+                  not_in_all=True, particle_type=True)
+        dd = tpf.h.all_data()
+        # Get DM particle mass.
+        all_fields = set(tpf.h.derived_field_list + tpf.h.field_list)
+        for g in tpf.h._get_objs("grids"):
+            if g.NumberOfParticles == 0: continue
+            if self.dm_only:
+                iddm = Ellipsis
+            elif "particle_type" in all_fields:
+                iddm = g["particle_type"] == self.dm_type
+            else:
+                iddm = Ellipsis
+            particle_mass = g['ParticleMassMsun'][iddm][0] / tpf.hubble_constant
+            break
+        p = {}
+        if self.total_particles is None:
+            # Get total_particles in parallel.
+            p['total_particles'] = int(dd.quantities['TotalQuantity']('particle_count')[0])
+        p['left_edge'] = tpf.domain_left_edge
+        p['right_edge'] = tpf.domain_right_edge
+        p['center'] = (tpf.domain_right_edge + tpf.domain_left_edge)/2.0
+        p['particle_mass'] = particle_mass
+        return p
+
+
+    def __del__(self):
+        try:
+            self.pool.free_all()
+        except AttributeError:
+            # This really only acts to cut down on the misleading
+            # error messages when/if this class is called incorrectly
+            # or some other error happens and self.pool hasn't been created
+            # already.
+            pass
 
     def _get_hosts(self):
-        if self.comm.size == 1 or self.workgroup.name == "server":
+        if self.comm.rank == 0 or self.comm.size == 1:
             server_address = socket.gethostname()
             sock = socket.socket()
             sock.bind(('', 0))
@@ -95,49 +285,44 @@
         """
         
         """
-        if self.comm.size > 1:
-            self.pool = ProcessorPool()
-            mylog.debug("Num Writers = %s Num Readers = %s",
-                        self.num_writers, self.num_readers)
-            self.pool.add_workgroup(1, name = "server")
-            self.pool.add_workgroup(self.num_readers, name = "readers")
-            self.pool.add_workgroup(self.num_writers, name = "writers")
-            for wg in self.pool.workgroups:
-                if self.comm.rank in wg.ranks: self.workgroup = wg
         if block_ratio != 1:
             raise NotImplementedError
         self._get_hosts()
-        #because rockstar *always* write to exactly the same
-        #out_0.list filename we make a directory for it
-        #to sit inside so it doesn't get accidentally
-        #overwritten 
-        if self.workgroup.name == "server":
-            if not os.path.exists(self.outbase):
-                os.mkdir(self.outbase)
         self.handler.setup_rockstar(self.server_address, self.port,
+                    len(self.ts), self.total_particles, 
+                    self.dm_type,
                     parallel = self.comm.size > 1,
                     num_readers = self.num_readers,
                     num_writers = self.num_writers,
                     writing_port = -1,
                     block_ratio = block_ratio,
                     outbase = self.outbase,
+                    force_res = self.force_res,
                     particle_mass = float(self.particle_mass),
+                    dm_only = int(self.dm_only),
                     **kwargs)
+        # Make the directory to store the halo lists in.
+        if self.comm.rank == 0:
+            if not os.path.exists(self.outbase):
+                os.makedirs(self.outbase)
+            # Make a record of which dataset corresponds to which set of
+            # output files because it will be easy to lose this connection.
+            fp = open(self.outbase + '/pfs.txt', 'w')
+            fp.write("# pfname\tindex\n")
+            for i, pf in enumerate(self.ts):
+                pfloc = path.join(path.relpath(pf.fullpath), pf.basename)
+                line = "%s\t%d\n" % (pfloc, i)
+                fp.write(line)
+            fp.close()
+        # This barrier makes sure the directory exists before it might be used.
+        self.comm.barrier()
         if self.comm.size == 1:
             self.handler.call_rockstar()
         else:
-            self.comm.barrier()
-            if self.workgroup.name == "server":
-                self.handler.start_server()
-            elif self.workgroup.name == "readers":
-                time.sleep(0.1 + self.workgroup.comm.rank/10.0)
-                self.handler.start_client()
-            elif self.workgroup.name == "writers":
-                time.sleep(0.2 + self.workgroup.comm.rank/10.0)
-                self.handler.start_client()
-            self.pool.free_all()
+            # And run it!
+            self.runner.run(self.handler, self.workgroup)
         self.comm.barrier()
-        #quickly rename the out_0.list 
+        self.pool.free_all()
     
     def halo_list(self,file_name='out_0.list'):
         """


diff -r 53fa42c3dbc9c0228203900a1b1a169e732e9a8c -r d15ab61fb9efddea5e4cd117e590dd16303e5393 yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
@@ -29,6 +29,8 @@
 cimport cython
 from libc.stdlib cimport malloc
 
+from yt.config import ytcfg
+
 cdef import from "particle.h":
     struct particle:
         np.int64_t id
@@ -44,15 +46,17 @@
 cdef import from "config.h":
     void setup_config()
 
-cdef import from "server.h":
+cdef import from "server.h" nogil:
     int server()
+    np.int64_t READER_TYPE
+    np.int64_t WRITER_TYPE
 
-cdef import from "client.h":
-    void client()
+cdef import from "client.h" nogil:
+    void client(np.int64_t in_type)
 
 cdef import from "meta_io.h":
     void read_particles(char *filename)
-    void output_and_free_halos(np.int64_t id_offset, np.int64_t snap, 
+    void output_halos(np.int64_t id_offset, np.int64_t snap, 
 			   np.int64_t chunk, float *bounds)
 
 cdef import from "config_vars.h":
@@ -142,127 +146,63 @@
     np.float64_t AVG_PARTICLE_SPACING
     np.int64_t SINGLE_SNAP
 
-def print_rockstar_settings():
-    # We have to do the config
-    print "FILE_FORMAT =", FILE_FORMAT
-    print "PARTICLE_MASS =", PARTICLE_MASS
-
-    print "MASS_DEFINITION =", MASS_DEFINITION
-    print "MIN_HALO_OUTPUT_SIZE =", MIN_HALO_OUTPUT_SIZE
-    print "FORCE_RES =", FORCE_RES
-
-    print "SCALE_NOW =", SCALE_NOW
-    print "h0 =", h0
-    print "Ol =", Ol
-    print "Om =", Om
-
-    print "GADGET_ID_BYTES =", GADGET_ID_BYTES
-    print "GADGET_MASS_CONVERSION =", GADGET_MASS_CONVERSION
-    print "GADGET_LENGTH_CONVERSION =", GADGET_LENGTH_CONVERSION
-    print "GADGET_SKIP_NON_HALO_PARTICLES =", GADGET_SKIP_NON_HALO_PARTICLES
-    print "RESCALE_PARTICLE_MASS =", RESCALE_PARTICLE_MASS
-
-    print "PARALLEL_IO =", PARALLEL_IO
-    print "PARALLEL_IO_SERVER_ADDRESS =", PARALLEL_IO_SERVER_ADDRESS
-    print "PARALLEL_IO_SERVER_PORT =", PARALLEL_IO_SERVER_PORT
-    print "PARALLEL_IO_WRITER_PORT =", PARALLEL_IO_WRITER_PORT
-    print "PARALLEL_IO_SERVER_INTERFACE =", PARALLEL_IO_SERVER_INTERFACE
-    print "RUN_ON_SUCCESS =", RUN_ON_SUCCESS
-
-    print "INBASE =", INBASE
-    print "FILENAME =", FILENAME
-    print "STARTING_SNAP =", STARTING_SNAP
-    print "NUM_SNAPS =", NUM_SNAPS
-    print "NUM_BLOCKS =", NUM_BLOCKS
-    print "NUM_READERS =", NUM_READERS
-    print "PRELOAD_PARTICLES =", PRELOAD_PARTICLES
-    print "SNAPSHOT_NAMES =", SNAPSHOT_NAMES
-    print "LIGHTCONE_ALT_SNAPS =", LIGHTCONE_ALT_SNAPS
-    print "BLOCK_NAMES =", BLOCK_NAMES
-
-    print "OUTBASE =", OUTBASE
-    print "OVERLAP_LENGTH =", OVERLAP_LENGTH
-    print "NUM_WRITERS =", NUM_WRITERS
-    print "FORK_READERS_FROM_WRITERS =", FORK_READERS_FROM_WRITERS
-    print "FORK_PROCESSORS_PER_MACHINE =", FORK_PROCESSORS_PER_MACHINE
-
-    print "OUTPUT_FORMAT =", OUTPUT_FORMAT
-    print "DELETE_BINARY_OUTPUT_AFTER_FINISHED =", DELETE_BINARY_OUTPUT_AFTER_FINISHED
-    print "FULL_PARTICLE_CHUNKS =", FULL_PARTICLE_CHUNKS
-    print "BGC2_SNAPNAMES =", BGC2_SNAPNAMES
-
-    print "BOUND_PROPS =", BOUND_PROPS
-    print "BOUND_OUT_TO_HALO_EDGE =", BOUND_OUT_TO_HALO_EDGE
-    print "DO_MERGER_TREE_ONLY =", DO_MERGER_TREE_ONLY
-    print "IGNORE_PARTICLE_IDS =", IGNORE_PARTICLE_IDS
-    print "TRIM_OVERLAP =", TRIM_OVERLAP
-    print "ROUND_AFTER_TRIM =", ROUND_AFTER_TRIM
-    print "LIGHTCONE =", LIGHTCONE
-    print "PERIODIC =", PERIODIC
-
-    print "LIGHTCONE_ORIGIN =", LIGHTCONE_ORIGIN[0]
-    print "LIGHTCONE_ORIGIN[1] =", LIGHTCONE_ORIGIN[1]
-    print "LIGHTCONE_ORIGIN[2] =", LIGHTCONE_ORIGIN[2]
-    print "LIGHTCONE_ALT_ORIGIN =", LIGHTCONE_ALT_ORIGIN[0]
-    print "LIGHTCONE_ALT_ORIGIN[1] =", LIGHTCONE_ALT_ORIGIN[1]
-    print "LIGHTCONE_ALT_ORIGIN[2] =", LIGHTCONE_ALT_ORIGIN[2]
-
-    print "LIMIT_CENTER =", LIMIT_CENTER[0]
-    print "LIMIT_CENTER[1] =", LIMIT_CENTER[1]
-    print "LIMIT_CENTER[2] =", LIMIT_CENTER[2]
-    print "LIMIT_RADIUS =", LIMIT_RADIUS
-
-    print "SWAP_ENDIANNESS =", SWAP_ENDIANNESS
-    print "GADGET_VARIANT =", GADGET_VARIANT
-
-    print "FOF_FRACTION =", FOF_FRACTION
-    print "FOF_LINKING_LENGTH =", FOF_LINKING_LENGTH
-    print "INCLUDE_HOST_POTENTIAL_RATIO =", INCLUDE_HOST_POTENTIAL_RATIO
-    print "DOUBLE_COUNT_SUBHALO_MASS_RATIO =", DOUBLE_COUNT_SUBHALO_MASS_RATIO
-    print "TEMPORAL_HALO_FINDING =", TEMPORAL_HALO_FINDING
-    print "MIN_HALO_PARTICLES =", MIN_HALO_PARTICLES
-    print "UNBOUND_THRESHOLD =", UNBOUND_THRESHOLD
-    print "ALT_NFW_METRIC =", ALT_NFW_METRIC
-
-    print "TOTAL_PARTICLES =", TOTAL_PARTICLES
-    print "BOX_SIZE =", BOX_SIZE
-    print "OUTPUT_HMAD =", OUTPUT_HMAD
-    print "OUTPUT_PARTICLES =", OUTPUT_PARTICLES
-    print "OUTPUT_LEVELS =", OUTPUT_LEVELS
-    print "DUMP_PARTICLES =", DUMP_PARTICLES[0]
-    print "DUMP_PARTICLES[1] =", DUMP_PARTICLES[1]
-    print "DUMP_PARTICLES[2] =", DUMP_PARTICLES[2]
-
-    print "AVG_PARTICLE_SPACING =", AVG_PARTICLE_SPACING
-    print "SINGLE_SNAP =", SINGLE_SNAP
-
+# Forward declare
 cdef class RockstarInterface
 
-cdef void rh_read_particles(char *filename, particle **p, np.int64_t *num_p):
-    print 'reading from particle filename %s'%filename # should print ./inline.0
+cdef void rh_read_particles(char *filename, particle **p, np.int64_t *num_p) with gil:
+    global SCALE_NOW
     cdef np.float64_t conv[6], left_edge[6]
     cdef np.ndarray[np.int64_t, ndim=1] arri
     cdef np.ndarray[np.float64_t, ndim=1] arr
+    cdef unsigned long long pi,fi,i
+    pf = rh.tsl.next()
+    print 'reading from particle filename %s: %s'%(filename,pf.basename)
     block = int(str(filename).rsplit(".")[-1])
+    n = rh.block_ratio
 
-    # Now we want to grab data from only a subset of the grids.
-    n = rh.block_ratio
-    dd = rh.pf.h.all_data()
-    grids = np.array_split(dd._grids, NUM_BLOCKS)[block]
-    tnpart = 0
-    for g in grids:
-        tnpart += dd._get_data_from_grid(g, "particle_index").size
-    p[0] = <particle *> malloc(sizeof(particle) * tnpart)
-    #print "Loading indices: size = ", tnpart
-    conv[0] = conv[1] = conv[2] = rh.pf["mpchcm"]
+    SCALE_NOW = 1.0/(pf.current_redshift+1.0)
+    # Now we want to grab data from only a subset of the grids for each reader.
+    all_fields = set(pf.h.derived_field_list + pf.h.field_list)
+
+    # First we need to find out how many this reader is going to read in
+    # if the number of readers > 1.
+    if NUM_BLOCKS > 1:
+        local_parts = 0
+        for g in pf.h._get_objs("grids"):
+            if g.NumberOfParticles == 0: continue
+            if rh.dm_only:
+                iddm = Ellipsis
+            elif "particle_type" in all_fields:
+                iddm = g["particle_type"] == rh.dm_type
+            else:
+                iddm = Ellipsis
+            arri = g["particle_index"].astype("int64")
+            arri = arri[iddm] #pick only DM
+            local_parts += arri.size
+    else:
+        local_parts = TOTAL_PARTICLES
+
+    #print "local_parts", local_parts
+
+    p[0] = <particle *> malloc(sizeof(particle) * local_parts)
+
+    conv[0] = conv[1] = conv[2] = pf["mpchcm"]
     conv[3] = conv[4] = conv[5] = 1e-5
-    left_edge[0] = rh.pf.domain_left_edge[0]
-    left_edge[1] = rh.pf.domain_left_edge[1]
-    left_edge[2] = rh.pf.domain_left_edge[2]
+    left_edge[0] = pf.domain_left_edge[0]
+    left_edge[1] = pf.domain_left_edge[1]
+    left_edge[2] = pf.domain_left_edge[2]
     left_edge[3] = left_edge[4] = left_edge[5] = 0.0
     pi = 0
-    for g in grids:
-        arri = dd._get_data_from_grid(g, "particle_index").astype("int64")
+    for g in pf.h._get_objs("grids"):
+        if g.NumberOfParticles == 0: continue
+        if rh.dm_only:
+            iddm = Ellipsis
+        elif "particle_type" in all_fields:
+            iddm = g["particle_type"] == rh.dm_type
+        else:
+            iddm = Ellipsis
+        arri = g["particle_index"].astype("int64")
+        arri = arri[iddm] #pick only DM
         npart = arri.size
         for i in range(npart):
             p[0][i+pi].id = arri[i]
@@ -271,39 +211,50 @@
                       "particle_position_z",
                       "particle_velocity_x", "particle_velocity_y",
                       "particle_velocity_z"]:
-            arr = dd._get_data_from_grid(g, field).astype("float64")
+            arr = g[field].astype("float64")
+            arr = arr[iddm] #pick DM
             for i in range(npart):
                 p[0][i+pi].pos[fi] = (arr[i]-left_edge[fi])*conv[fi]
             fi += 1
         pi += npart
-    num_p[0] = tnpart
-    print "Block #%i | Particles %i | Grids %i"%\
-            ( block, pi, len(grids))
+    num_p[0] = local_parts
 
 cdef class RockstarInterface:
 
-    cdef public object pf
     cdef public object data_source
+    cdef public object ts
+    cdef public object tsl
     cdef int rank
     cdef int size
     cdef public int block_ratio
+    cdef public int dm_type
+    cdef public int total_particles
+    cdef public int dm_only
 
-    def __cinit__(self, pf, data_source):
-        self.pf = pf
-        self.data_source = data_source
+    def __cinit__(self, ts):
+        self.ts = ts
+        self.tsl = ts.__iter__() #timseries generator used by read
 
     def setup_rockstar(self, char *server_address, char *server_port,
-                       np.float64_t particle_mass = -1.0,
+                       int num_snaps, np.int64_t total_particles,
+                       int dm_type,
+                       np.float64_t particle_mass,
                        int parallel = False, int num_readers = 1,
                        int num_writers = 1,
                        int writing_port = -1, int block_ratio = 1,
-                       int periodic = 1, int num_snaps = 1,
-                       int min_halo_size = 25, outbase = "None"):
+                       int periodic = 1, force_res=None,
+                       int min_halo_size = 25, outbase = "None",
+                       int dm_only = 0):
         global PARALLEL_IO, PARALLEL_IO_SERVER_ADDRESS, PARALLEL_IO_SERVER_PORT
         global FILENAME, FILE_FORMAT, NUM_SNAPS, STARTING_SNAP, h0, Ol, Om
         global BOX_SIZE, PERIODIC, PARTICLE_MASS, NUM_BLOCKS, NUM_READERS
         global FORK_READERS_FROM_WRITERS, PARALLEL_IO_WRITER_PORT, NUM_WRITERS
         global rh, SCALE_NOW, OUTBASE, MIN_HALO_OUTPUT_SIZE
+        global OVERLAP_LENGTH, TOTAL_PARTICLES, FORCE_RES
+        if force_res is not None:
+            FORCE_RES=np.float64(force_res)
+            #print "set force res to ",FORCE_RES
+        OVERLAP_LENGTH = 0.0
         if parallel:
             PARALLEL_IO = 1
             PARALLEL_IO_SERVER_ADDRESS = server_address
@@ -319,41 +270,46 @@
         OUTPUT_FORMAT = "ASCII"
         NUM_SNAPS = num_snaps
         NUM_READERS = num_readers
-        NUM_SNAPS = 1
         NUM_WRITERS = num_writers
         NUM_BLOCKS = num_readers
         MIN_HALO_OUTPUT_SIZE=min_halo_size
+        TOTAL_PARTICLES = total_particles
         self.block_ratio = block_ratio
-
-        h0 = self.pf.hubble_constant
-        Ol = self.pf.omega_lambda
-        Om = self.pf.omega_matter
-        SCALE_NOW = 1.0/(self.pf.current_redshift+1.0)
+        self.dm_only = dm_only
+        
+        tpf = self.ts[0]
+        h0 = tpf.hubble_constant
+        Ol = tpf.omega_lambda
+        Om = tpf.omega_matter
+        SCALE_NOW = 1.0/(tpf.current_redshift+1.0)
         if not outbase =='None'.decode('UTF-8'):
             #output directory. since we can't change the output filenames
             #workaround is to make a new directory
-            print 'using %s as outbase'%outbase
             OUTBASE = outbase 
 
-        if particle_mass < 0:
-            print "Assuming single-mass particle."
-            particle_mass = self.pf.h.grids[0]["ParticleMassMsun"][0] / h0
         PARTICLE_MASS = particle_mass
         PERIODIC = periodic
-        BOX_SIZE = (self.pf.domain_right_edge[0] -
-                    self.pf.domain_left_edge[0]) * self.pf['mpchcm']
+        BOX_SIZE = (tpf.domain_right_edge[0] -
+                    tpf.domain_left_edge[0]) * tpf['mpchcm']
         setup_config()
         rh = self
+        rh.dm_type = dm_type
         cdef LPG func = rh_read_particles
         set_load_particles_generic(func)
 
     def call_rockstar(self):
         read_particles("generic")
         rockstar(NULL, 0)
-        output_and_free_halos(0, 0, 0, NULL)
+        output_halos(0, 0, 0, NULL)
 
     def start_server(self):
-        server()
+        with nogil:
+            server()
 
-    def start_client(self):
-        client()
+    def start_reader(self):
+        cdef np.int64_t in_type = np.int64(READER_TYPE)
+        client(in_type)
+
+    def start_writer(self):
+        cdef np.int64_t in_type = np.int64(WRITER_TYPE)
+        client(in_type)


diff -r 53fa42c3dbc9c0228203900a1b1a169e732e9a8c -r d15ab61fb9efddea5e4cd117e590dd16303e5393 yt/analysis_modules/halo_finding/rockstar/setup.py
--- a/yt/analysis_modules/halo_finding/rockstar/setup.py
+++ b/yt/analysis_modules/halo_finding/rockstar/setup.py
@@ -9,7 +9,14 @@
     config = Configuration('rockstar',parent_package,top_path)
     config.make_config_py() # installs __config__.py
     #config.make_svn_version_py()
-    rd = os.environ["ROCKSTAR_DIR"]
+    try:
+        rd = open("rockstar.cfg").read().strip()
+    except IOError:
+        print "Reading Rockstar location from rockstar.cfg failed."
+        print "Please place the base directory of your"
+        print "Rockstar install in rockstar.cfg and restart."
+        print "(ex: \"echo '/path/to/Rockstar-0.99' > rockstar.cfg\" )"
+        sys.exit(1)
     config.add_extension("rockstar_interface",
                          "yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx",
                          library_dirs=[rd],


diff -r 53fa42c3dbc9c0228203900a1b1a169e732e9a8c -r d15ab61fb9efddea5e4cd117e590dd16303e5393 yt/analysis_modules/halo_finding/setup.py
--- a/yt/analysis_modules/halo_finding/setup.py
+++ b/yt/analysis_modules/halo_finding/setup.py
@@ -10,7 +10,7 @@
     config.add_subpackage("fof")
     config.add_subpackage("hop")
     config.add_subpackage("parallel_hop")
-    if "ROCKSTAR_DIR" in os.environ:
+    if os.path.exists("rockstar.cfg"):
         config.add_subpackage("rockstar")
     config.make_config_py() # installs __config__.py
     #config.make_svn_version_py()


diff -r 53fa42c3dbc9c0228203900a1b1a169e732e9a8c -r d15ab61fb9efddea5e4cd117e590dd16303e5393 yt/analysis_modules/spectral_integrator/api.py
--- a/yt/analysis_modules/spectral_integrator/api.py
+++ b/yt/analysis_modules/spectral_integrator/api.py
@@ -30,4 +30,8 @@
 
 from .spectral_frequency_integrator import \
     SpectralFrequencyIntegrator, \
-    create_table_from_textfiles
+    create_table_from_textfiles, \
+    EmissivityIntegrator, \
+    add_xray_emissivity_field, \
+    add_xray_luminosity_field, \
+    add_xray_photon_emissivity_field


diff -r 53fa42c3dbc9c0228203900a1b1a169e732e9a8c -r d15ab61fb9efddea5e4cd117e590dd16303e5393 yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
--- a/yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
+++ b/yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
@@ -4,9 +4,11 @@
 
 Author: Matthew Turk <matthewturk at gmail.com>
 Affiliation: KIPAC/SLAC/Stanford
+Author: Britton Smith <brittons at origins.colorado.edu>
+Affiliation: Michigan State University
 Homepage: http://yt-project.org/
 License:
-  Copyright (C) 2007-2011 Matthew Turk.  All Rights Reserved.
+  Copyright (C) 2007-2012 Matthew Turk.  All Rights Reserved.
 
   This file is part of yt.
 
@@ -24,16 +26,20 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
+from exceptions import IOError
+import h5py
 import numpy as np
+import os
 
 from yt.funcs import *
 
 from yt.data_objects.field_info_container import add_field
+from yt.utilities.exceptions import YTException
 from yt.utilities.linear_interpolators import \
-    UnilinearFieldInterpolator, \
-    BilinearFieldInterpolator, \
-    TrilinearFieldInterpolator
+    BilinearFieldInterpolator
 
+xray_data_version = 1
+    
 class SpectralFrequencyIntegrator(object):
     def __init__(self, table, field_names,
                  bounds, ev_bounds):
@@ -63,6 +69,8 @@
             bin_table, self.bounds, self.field_names[:],
             truncate=True)
 
+
+
     def add_frequency_bin_field(self, ev_min, ev_max):
         """
         Add a new field to the FieldInfoContainer, which is an
@@ -73,13 +81,13 @@
         interp = self._get_interpolator(ev_min, ev_max)
         name = "XRay_%s_%s" % (ev_min, ev_max)
         def frequency_bin_field(field, data):
-            dd = {'NumberDensity' : np.log10(data["NumberDensity"]),
+            dd = {'H_NumberDensity' : np.log10(data["H_NumberDensity"]),
                   'Temperature'   : np.log10(data["Temperature"])}
             return 10**interp(dd)
         add_field(name, function=frequency_bin_field,
                         projection_conversion="cm",
-                        units=r"\rm{ergs}\/\rm{cm}^{-3}\/\rm{s}^{-1}",
-                        projected_units=r"\rm{ergs}\/\rm{cm}^{-2}\/\rm{s}^{-1}")
+                        units=r"\rm{ergs}\ \rm{cm}^{-3}\ \rm{s}^{-1}",
+                        projected_units=r"\rm{ergs}\ \rm{cm}^{-2}\ \rm{s}^{-1}")
         return name
 
 def create_table_from_textfiles(pattern, rho_spec, e_spec, T_spec):
@@ -98,3 +106,304 @@
         table[ri,:,ei] = [float(l.split()[-1]) for l in open(pattern%(i+1)) if l[0] != "#"]
     return table
 
+class EnergyBoundsException(YTException):
+    def __init__(self, lower, upper):
+        self.lower = lower
+        self.upper = upper
+
+    def __str__(self):
+        return "Energy bounds are %e to %e keV." % \
+          (self.lower, self.upper)
+
+class ObsoleteDataException(YTException):
+    def __str__(self):
+        return "X-ray emissivity data is out of data.\nDownload the latest data from http://yt-project.org/data/xray_emissivity.h5 and move it to %s." % \
+          os.path.join(os.environ["YT_DEST"], "data", "xray_emissivity.h5")
+          
+class EmissivityIntegrator(object):
+    r"""Class for making X-ray emissivity fields with hdf5 data tables 
+    from Cloudy.
+    """
+    def __init__(self, filename=None):
+        r"""Initialize an EmissivityIntegrator object.
+
+        Keyword Parameters
+        ------------------
+        filename: string
+            Path to data file containing emissivity values.  If None,
+            a file called xray_emissivity.h5 is used.  This file contains 
+            emissivity tables for primordial elements and for metals at 
+            solar metallicity for the energy range 0.1 to 100 keV.
+            Default: None.
+            
+        """
+
+        default_filename = False
+        if filename is None:
+            filename = os.path.join(os.environ["YT_DEST"], 
+                                    "data", "xray_emissivity.h5")
+            default_filename = True
+
+        if not os.path.exists(filename):
+            raise IOError("File does not exist: %s." % filename)
+        only_on_root(mylog.info, "Loading emissivity data from %s." % filename)
+        in_file = h5py.File(filename, "r")
+        if "info" in in_file.attrs:
+            only_on_root(mylog.info, in_file.attrs["info"])
+        if default_filename and \
+          in_file.attrs["version"] < xray_data_version:
+            raise ObsoleteDataException()
+        else:
+            only_on_root(mylog.info, "X-ray emissivity data version: %s." % \
+                         in_file.attrs["version"])
+
+        for field in ["emissivity_primordial", "emissivity_metals",
+                      "log_nH", "log_T", "log_E"]:
+            setattr(self, field, in_file[field][:])
+        in_file.close()
+
+        E_diff = np.diff(self.log_E)
+        self.E_bins = \
+                  np.power(10, np.concatenate([self.log_E[:-1] - 0.5 * E_diff,
+                                               [self.log_E[-1] - 0.5 * E_diff[-1],
+                                                self.log_E[-1] + 0.5 * E_diff[-1]]]))
+        self.dnu = 2.41799e17 * np.diff(self.E_bins)
+
+    def _get_interpolator(self, data, e_min, e_max):
+        r"""Create an interpolator for total emissivity in a 
+        given energy range.
+
+        Parameters
+        ----------
+        e_min: float
+            the minimum energy in keV for the energy band.
+        e_min: float
+            the maximum energy in keV for the energy band.
+
+        """
+        if (e_min - self.E_bins[0]) / e_min < -1e-3 or \
+          (e_max - self.E_bins[-1]) / e_max > 1e-3:
+            raise EnergyBoundsException(np.power(10, self.E_bins[0]),
+                                        np.power(10, self.E_bins[-1]))
+        e_is, e_ie = np.digitize([e_min, e_max], self.E_bins)
+        e_is = np.clip(e_is - 1, 0, self.E_bins.size - 1)
+        e_ie = np.clip(e_ie, 0, self.E_bins.size - 1)
+
+        my_dnu = np.copy(self.dnu[e_is: e_ie])
+        # clip edge bins if the requested range is smaller
+        my_dnu[0] -= e_min - self.E_bins[e_is]
+        my_dnu[-1] -= self.E_bins[e_ie] - e_max
+
+        interp_data = (data[..., e_is:e_ie] * my_dnu).sum(axis=-1)
+        return BilinearFieldInterpolator(np.log10(interp_data),
+                                         [self.log_nH[0], self.log_nH[-1],
+                                          self.log_T[0],  self.log_T[-1]],
+                                         ["log_nH", "log_T"], truncate=True)
+
+def add_xray_emissivity_field(e_min, e_max, filename=None,
+                              with_metals=True,
+                              constant_metallicity=None):
+    r"""Create an X-ray emissivity field for a given energy range.
+
+    Parameters
+    ----------
+    e_min: float
+        the minimum energy in keV for the energy band.
+    e_min: float
+        the maximum energy in keV for the energy band.
+
+    Keyword Parameters
+    ------------------
+    filename: string
+        Path to data file containing emissivity values.  If None,
+        a file called xray_emissivity.h5 is used.  This file contains 
+        emissivity tables for primordial elements and for metals at 
+        solar metallicity for the energy range 0.1 to 100 keV.
+        Default: None.
+    with_metals: bool
+        If True, use the metallicity field to add the contribution from 
+        metals.  If False, only the emission from H/He is considered.
+        Default: True.
+    constant_metallicity: float
+        If specified, assume a constant metallicity for the emission 
+        from metals.  The *with_metals* keyword must be set to False 
+        to use this.
+        Default: None.
+
+    This will create a field named "Xray_Emissivity_{e_min}_{e_max}keV".
+    The units of the field are erg s^-1 cm^-3.
+
+    Examples
+    --------
+
+    >>> from yt.mods import *
+    >>> from yt.analysis_modules.spectral_integrator.api import *
+    >>> add_xray_emissivity_field(0.5, 2)
+    >>> pf = load(dataset)
+    >>> p = ProjectionPlot(pf, 'x', "Xray_Emissivity_0.5_2keV")
+    >>> p.save()
+
+    """
+
+    my_si = EmissivityIntegrator(filename=filename)
+
+    em_0 = my_si._get_interpolator(my_si.emissivity_primordial, e_min, e_max)
+    em_Z = None
+    if with_metals or constant_metallicity is not None:
+        em_Z = my_si._get_interpolator(my_si.emissivity_metals, e_min, e_max)
+
+    def _emissivity_field(field, data):
+        dd = {"log_nH" : np.log10(data["H_NumberDensity"]),
+              "log_T"   : np.log10(data["Temperature"])}
+
+        my_emissivity = np.power(10, em_0(dd))
+        if em_Z is not None:
+            if with_metals:
+                my_Z = data["Metallicity"]
+            elif constant_metallicity is not None:
+                my_Z = constant_metallicity
+            my_emissivity += my_Z * np.power(10, em_Z(dd))
+
+        return data["H_NumberDensity"]**2 * my_emissivity
+
+    field_name = "Xray_Emissivity_%s_%skeV" % (e_min, e_max)
+    add_field(field_name, function=_emissivity_field,
+              projection_conversion="cm",
+              display_name=r"\epsilon_{X}\ (%s-%s\ keV)" % (e_min, e_max),
+              units=r"\rm{erg}\ \rm{cm}^{-3}\ \rm{s}^{-1}",
+              projected_units=r"\rm{erg}\ \rm{cm}^{-2}\ \rm{s}^{-1}")
+    return field_name
+
+def add_xray_luminosity_field(e_min, e_max, filename=None,
+                              with_metals=True,
+                              constant_metallicity=None):
+    r"""Create an X-ray luminosity field for a given energy range.
+
+    Parameters
+    ----------
+    e_min: float
+        the minimum energy in keV for the energy band.
+    e_min: float
+        the maximum energy in keV for the energy band.
+
+    Keyword Parameters
+    ------------------
+    filename: string
+        Path to data file containing emissivity values.  If None,
+        a file called xray_emissivity.h5 is used.  This file contains 
+        emissivity tables for primordial elements and for metals at 
+        solar metallicity for the energy range 0.1 to 100 keV.
+        Default: None.
+    with_metals: bool
+        If True, use the metallicity field to add the contribution from 
+        metals.  If False, only the emission from H/He is considered.
+        Default: True.
+    constant_metallicity: float
+        If specified, assume a constant metallicity for the emission 
+        from metals.  The *with_metals* keyword must be set to False 
+        to use this.
+        Default: None.
+
+    This will create a field named "Xray_Luminosity_{e_min}_{e_max}keV".
+    The units of the field are erg s^-1.
+
+    Examples
+    --------
+
+    >>> from yt.mods import *
+    >>> from yt.analysis_modules.spectral_integrator.api import *
+    >>> add_xray_luminosity_field(0.5, 2)
+    >>> pf = load(dataset)
+    >>> sp = pf.h.sphere('max', (2., 'mpc'))
+    >>> print sp.quantities['TotalQuantity']('Xray_Luminosity_0.5_2keV')
+    
+    """
+
+    em_field = add_xray_emissivity_field(e_min, e_max, filename=filename,
+                                         with_metals=with_metals,
+                                         constant_metallicity=constant_metallicity)
+
+    def _luminosity_field(field, data):
+        return data[em_field] * data["CellVolume"]
+    field_name = "Xray_Luminosity_%s_%skeV" % (e_min, e_max)
+    add_field(field_name, function=_luminosity_field,
+              display_name=r"\rm{L}_{X}\ (%s-%s\ keV)" % (e_min, e_max),
+              units=r"\rm{erg}\ \rm{s}^{-1}")
+    return field_name
+
+def add_xray_photon_emissivity_field(e_min, e_max, filename=None,
+                                     with_metals=True,
+                                     constant_metallicity=None):
+    r"""Create an X-ray photon emissivity field for a given energy range.
+
+    Parameters
+    ----------
+    e_min: float
+        the minimum energy in keV for the energy band.
+    e_min: float
+        the maximum energy in keV for the energy band.
+
+    Keyword Parameters
+    ------------------
+    filename: string
+        Path to data file containing emissivity values.  If None,
+        a file called xray_emissivity.h5 is used.  This file contains 
+        emissivity tables for primordial elements and for metals at 
+        solar metallicity for the energy range 0.1 to 100 keV.
+        Default: None.
+    with_metals: bool
+        If True, use the metallicity field to add the contribution from 
+        metals.  If False, only the emission from H/He is considered.
+        Default: True.
+    constant_metallicity: float
+        If specified, assume a constant metallicity for the emission 
+        from metals.  The *with_metals* keyword must be set to False 
+        to use this.
+        Default: None.
+
+    This will create a field named "Xray_Photon_Emissivity_{e_min}_{e_max}keV".
+    The units of the field are photons s^-1 cm^-3.
+
+    Examples
+    --------
+
+    >>> from yt.mods import *
+    >>> from yt.analysis_modules.spectral_integrator.api import *
+    >>> add_xray_emissivity_field(0.5, 2)
+    >>> pf = load(dataset)
+    >>> p = ProjectionPlot(pf, 'x', "Xray_Emissivity_0.5_2keV")
+    >>> p.save()
+
+    """
+
+    my_si = EmissivityIntegrator(filename=filename)
+    energy_erg = np.power(10, my_si.log_E) * 1.60217646e-9
+
+    em_0 = my_si._get_interpolator((my_si.emissivity_primordial[..., :] / energy_erg),
+                                   e_min, e_max)
+    em_Z = None
+    if with_metals or constant_metallicity is not None:
+        em_Z = my_si._get_interpolator((my_si.emissivity_metals[..., :] / energy_erg),
+                                       e_min, e_max)
+
+    def _emissivity_field(field, data):
+        dd = {"log_nH" : np.log10(data["H_NumberDensity"]),
+              "log_T"   : np.log10(data["Temperature"])}
+
+        my_emissivity = np.power(10, em_0(dd))
+        if em_Z is not None:
+            if with_metals:
+                my_Z = data["Metallicity"]
+            elif constant_metallicity is not None:
+                my_Z = constant_metallicity
+            my_emissivity += my_Z * np.power(10, em_Z(dd))
+
+        return data["H_NumberDensity"]**2 * my_emissivity
+
+    field_name = "Xray_Photon_Emissivity_%s_%skeV" % (e_min, e_max)
+    add_field(field_name, function=_emissivity_field,
+              projection_conversion="cm",
+              display_name=r"\epsilon_{X}\ (%s-%s\ keV)" % (e_min, e_max),
+              units=r"\rm{photons}\ \rm{cm}^{-3}\ \rm{s}^{-1}",
+              projected_units=r"\rm{photons}\ \rm{cm}^{-2}\ \rm{s}^{-1}")
+    return field_name


diff -r 53fa42c3dbc9c0228203900a1b1a169e732e9a8c -r d15ab61fb9efddea5e4cd117e590dd16303e5393 yt/analysis_modules/sunrise_export/sunrise_exporter.py
--- a/yt/analysis_modules/sunrise_export/sunrise_exporter.py
+++ b/yt/analysis_modules/sunrise_export/sunrise_exporter.py
@@ -33,16 +33,11 @@
 
 import time
 import numpy as np
-import numpy.linalg as linalg
-import collections
-
 from yt.funcs import *
 import yt.utilities.lib as amr_utils
 from yt.data_objects.universal_fields import add_field
 from yt.mods import *
 
-debug = True
-
 def export_to_sunrise(pf, fn, star_particle_type, fc, fwidth, ncells_wide=None,
         debug=False,dd=None,**kwargs):
     r"""Convert the contents of a dataset to a FITS file format that Sunrise
@@ -77,7 +72,6 @@
     http://sunrise.googlecode.com/ for more information.
 
     """
-
     fc = np.array(fc)
     fwidth = np.array(fwidth)
     
@@ -95,7 +89,7 @@
     #Create a list of the star particle properties in PARTICLE_DATA
     #Include ID, parent-ID, position, velocity, creation_mass, 
     #formation_time, mass, age_m, age_l, metallicity, L_bol
-    particle_data = prepare_star_particles(pf,star_particle_type,fle=fle,fre=fre,
+    particle_data,nstars = prepare_star_particles(pf,star_particle_type,fle=fle,fre=fre,
                                            dd=dd,**kwargs)
 
     #Create the refinement hilbert octree in GRIDSTRUCTURE
@@ -109,7 +103,7 @@
 
     create_fits_file(pf,fn, refinement,output,particle_data,fle,fre)
 
-    return fle,fre,ile,ire,dd,nleaf
+    return fle,fre,ile,ire,dd,nleaf,nstars
 
 def export_to_sunrise_from_halolist(pf,fni,star_particle_type,
                                         halo_list,domains_list=None,**kwargs):
@@ -193,17 +187,23 @@
     domains_halos  = [d[2] for d in domains_list]
     return domains_list
 
-def prepare_octree(pf,ile,start_level=0,debug=False,dd=None,center=None):
-    add_fields() #add the metal mass field that sunrise wants
+def prepare_octree(pf,ile,start_level=0,debug=True,dd=None,center=None):
+    if dd is None:
+        #we keep passing dd around to not regenerate the data all the time
+        dd = pf.h.all_data()
+    try:
+        dd['MetalMass']
+    except KeyError:
+        add_fields() #add the metal mass field that sunrise wants
+    def _temp_times_mass(field, data):
+        return data["Temperature"]*data["CellMassMsun"]
+    add_field("TemperatureTimesCellMassMsun", function=_temp_times_mass)
     fields = ["CellMassMsun","TemperatureTimesCellMassMsun", 
               "MetalMass","CellVolumeCode"]
     
     #gather the field data from octs
     pbar = get_pbar("Retrieving field data",len(fields))
     field_data = [] 
-    if dd is None:
-        #we keep passing dd around to not regenerate the data all the time
-        dd = pf.h.all_data()
     for fi,f in enumerate(fields):
         field_data += dd[f],
         pbar.update(fi)
@@ -251,6 +251,7 @@
     output   = np.zeros((o_length,len(fields)), dtype='float64')
     refined  = np.zeros(r_length, dtype='int32')
     levels   = np.zeros(r_length, dtype='int32')
+    ids      = np.zeros(r_length, dtype='int32')
     pos = position()
     hs       = hilbert_state()
     start_time = time.time()
@@ -259,7 +260,7 @@
             c = center*pf['kpc']
         else:
             c = ile*1.0/pf.domain_dimensions*pf['kpc']
-        printing = lambda x: print_oct(x,pf['kpc'],c)
+        printing = lambda x: print_oct(x)
     else:
         printing = None
     pbar = get_pbar("Building Hilbert DFO octree",len(refined))
@@ -271,6 +272,7 @@
             output,refined,levels,
             grids,
             start_level,
+            ids,
             debug=printing,
             tracker=pbar)
     pbar.finish()
@@ -278,6 +280,7 @@
     #for the next spot, so we're off by 1
     print 'took %1.2e seconds'%(time.time()-start_time)
     print 'refinement tree # of cells %i, # of leaves %i'%(pos.refined_pos,pos.output_pos) 
+    print 'first few entries :',refined[:12]
     output  = output[:pos.output_pos]
     refined = refined[:pos.refined_pos] 
     levels = levels[:pos.refined_pos] 
@@ -287,6 +290,7 @@
     ci = data['cell_index']
     l  = data['level']
     g  = data['grid']
+    o  = g.offset
     fle = g.left_edges+g.dx*ci
     fre = g.left_edges+g.dx*(ci+1)
     if nd is not None:
@@ -295,12 +299,14 @@
         if nc is not None:
             fle -= nc
             fre -= nc
-    txt  = '%1i '
-    txt += '%1.3f '*3+'- '
-    txt += '%1.3f '*3
-    print txt%((l,)+tuple(fle)+tuple(fre))
+    txt  = '%+1i '
+    txt += '%+1i '
+    txt += '%+1.3f '*3+'- '
+    txt += '%+1.3f '*3
+    if l<2:
+        print txt%((l,)+(o,)+tuple(fle)+tuple(fre))
 
-def RecurseOctreeDepthFirstHilbert(cell_index, #integer (rep as a float) on the grids[grid_index]
+def RecurseOctreeDepthFirstHilbert(cell_index, #integer (rep as a float) on the [grid_index]
                             pos, #the output hydro data position and refinement position
                             grid,  #grid that this oct lives on (not its children)
                             hilbert,  #the hilbert state
@@ -309,6 +315,7 @@
                             levels, #For a given Oct, what is the level
                             grids, #list of all patch grids available to us
                             level, #starting level of the oct (not the children)
+                            ids, #record the oct ID
                             debug=None,tracker=True):
     if tracker is not None:
         if pos.refined_pos%1000 == 500 : tracker.update(pos.refined_pos)
@@ -316,16 +323,19 @@
         debug(vars())
     child_grid_index = grid.child_indices[cell_index[0],cell_index[1],cell_index[2]]
     #record the refinement state
-    refined[pos.refined_pos] = child_grid_index!=-1
-    levels[pos.output_pos]  = level
+    levels[pos.refined_pos]  = level
+    is_leaf = (child_grid_index==-1) and (level>0)
+    refined[pos.refined_pos] = not is_leaf #True is oct, False is leaf
+    ids[pos.refined_pos] = child_grid_index #True is oct, False is leaf
     pos.refined_pos+= 1 
-    if child_grid_index == -1 and level>=0: #never subdivide if we are on a superlevel
+    if is_leaf: #never subdivide if we are on a superlevel
         #then we have hit a leaf cell; write it out
         for field_index in range(grid.fields.shape[0]):
             output[pos.output_pos,field_index] = \
                     grid.fields[field_index,cell_index[0],cell_index[1],cell_index[2]]
         pos.output_pos+= 1 
     else:
+        assert child_grid_index>-1
         #find the grid we descend into
         #then find the eight cells we break up into
         subgrid = grids[child_grid_index]
@@ -338,18 +348,21 @@
             #denote each of the 8 octs
             if level < 0:
                 subgrid = grid #we don't actually descend if we're a superlevel
-                child_ile = cell_index + vertex*2**(-level)
+                #child_ile = cell_index + np.array(vertex)*2**(-level)
+                child_ile = cell_index + np.array(vertex)*2**(-(level+1))
+                child_ile = child_ile.astype('int')
             else:
                 child_ile = subgrid_ile+np.array(vertex)
                 child_ile = child_ile.astype('int')
+
             RecurseOctreeDepthFirstHilbert(child_ile,pos,
-                    subgrid,hilbert_child,output,refined,levels,grids,level+1,
-                    debug=debug,tracker=tracker)
+                subgrid,hilbert_child,output,refined,levels,grids,
+                level+1,ids = ids,
+                debug=debug,tracker=tracker)
 
 
 
 def create_fits_file(pf,fn, refined,output,particle_data,fle,fre):
-
     #first create the grid structure
     structure = pyfits.Column("structure", format="B", array=refined.astype("bool"))
     cols = pyfits.ColDefs([structure])
@@ -360,8 +373,6 @@
     for i,a in enumerate('xyz'):
         st_table.header.update("min%s" % a, fle[i] * pf['kpc'])
         st_table.header.update("max%s" % a, fre[i] * pf['kpc'])
-        #st_table.header.update("min%s" % a, 0) #WARNING: this is for debugging
-        #st_table.header.update("max%s" % a, 2) #
         st_table.header.update("n%s" % a, fdx[i])
         st_table.header.update("subdiv%s" % a, 2)
     st_table.header.update("subdivtp", "OCTREE", "Type of grid subdivision")
@@ -457,6 +468,7 @@
             #quit if idxq is true:
             idxq = idx[0]>0 and np.all(idx==idx[0])
             out  = np.all(fle>cfle) and np.all(fre<cfre) 
+            out &= abs(np.log2(idx[0])-np.rint(np.log2(idx[0])))<1e-5 #nwide should be a power of 2
             assert width[0] < 1.1 #can't go larger than the simulation volume
         nwide = idx[0]
     else:
@@ -495,11 +507,15 @@
                           dd=None):
     if dd is None:
         dd = pf.h.all_data()
-    idx = dd["particle_type"] == star_type
+    idxst = dd["particle_type"] == star_type
+
+    #make sure we select more than a single particle
+    assert na.sum(idxst)>0
     if pos is None:
         pos = np.array([dd["particle_position_%s" % ax]
                         for ax in 'xyz']).transpose()
-    idx = idx & np.all(pos>fle,axis=1) & np.all(pos<fre,axis=1)
+    idx = idxst & np.all(pos>fle,axis=1) & np.all(pos<fre,axis=1)
+    assert np.sum(idx)>0
     pos = pos[idx]*pf['kpc'] #unitary units -> kpc
     if age is None:
         age = dd["particle_age"][idx]*pf['years'] # seconds->years
@@ -518,8 +534,7 @@
     if metallicity is None:
         #this should be in dimensionless units, metals mass / particle mass
         metallicity = dd["particle_metallicity"][idx]
-        #metallicity *=0.0198
-        #print 'WARNING: multiplying metallicirt by 0.0198'
+        assert np.all(metallicity>0.0)
     if radius is None:
         radius = initial_mass*0.0+10.0/1000.0 #10pc radius
     formation_time = pf.current_time*pf['years']-age
@@ -534,19 +549,19 @@
     col_list.append(pyfits.Column("radius", format="D", array=radius, unit="kpc"))
     col_list.append(pyfits.Column("mass", format="D", array=current_mass, unit="Msun"))
     col_list.append(pyfits.Column("age", format="D", array=age,unit='yr'))
-    #col_list.append(pyfits.Column("age_l", format="D", array=age, unit = 'yr'))
     #For particles, Sunrise takes 
     #the dimensionless metallicity, not the mass of the metals
     col_list.append(pyfits.Column("metallicity", format="D",
         array=metallicity,unit="Msun")) 
-    #col_list.append(pyfits.Column("L_bol", format="D",
-    #    array=np.zeros(current_mass.size)))
     
     #make the table
     cols = pyfits.ColDefs(col_list)
     pd_table = pyfits.new_table(cols)
     pd_table.name = "PARTICLEDATA"
-    return pd_table
+    
+    #make sure we have nonzero particle number
+    assert pd_table.data.shape[0]>0
+    return pd_table,na.sum(idx)
 
 
 def add_fields():
@@ -556,10 +571,8 @@
         
     def _convMetalMass(data):
         return 1.0
-    
     add_field("MetalMass", function=_MetalMass,
               convert_function=_convMetalMass)
-
     def _initial_mass_cen_ostriker(field, data):
         # SFR in a cell. This assumes stars were created by the Cen & Ostriker algorithm
         # Check Grid_AddToDiskProfile.C and star_maker7.src
@@ -576,9 +589,6 @@
 
     add_field("InitialMassCenOstriker", function=_initial_mass_cen_ostriker)
 
-    def _temp_times_mass(field, data):
-        return data["Temperature"]*data["CellMassMsun"]
-    add_field("TemperatureTimesCellMassMsun", function=_temp_times_mass)
 
 class position:
     def __init__(self):
@@ -668,254 +678,3 @@
         j+=1
         yield vertex, self.descend(j)
 
-def generate_sunrise_cameraset_positions(pf,sim_center,cameraset=None,**kwargs):
-    if cameraset is None:
-        cameraset =cameraset_vertex 
-    campos =[]
-    names = []
-    dd = pf.h.all_data()
-    for name, (scene_pos,scene_up, scene_rot)  in cameraset.iteritems():
-        kwargs['scene_position']=scene_pos
-        kwargs['scene_up']=scene_up
-        kwargs['scene_rot']=scene_rot
-        kwargs['dd']=dd
-        line = generate_sunrise_camera_position(pf,sim_center,**kwargs)
-        campos += line,
-        names += name,
-    return names,campos     
-
-def generate_sunrise_camera_position(pf,sim_center,sim_axis_short=None,sim_axis_long=None,
-                                     sim_sphere_radius=None,sim_halo_radius=None,
-                                     scene_position=[0.0,0.0,1.0],scene_distance=None,
-                                     scene_up=[0.,0.,1.],scene_fov=None,scene_rot=True,
-                                     dd=None):
-    """Translate the simulation to center on sim_center, 
-    then rotate such that sim_up is along the +z direction. Then we are in the 
-    'scene' basis coordinates from which scene_up and scene_offset are defined.
-    Then a position vector, direction vector, up vector and angular field of view
-    are returned. The 3-vectors are in absolute physical kpc, not relative to the center.
-    The angular field of view is in radians. The 10 numbers should match the inputs to
-    camera_positions in Sunrise.
-    """
-
-    sim_center = np.array(sim_center)
-    if sim_sphere_radius is None:
-        sim_sphere_radius = 10.0/pf['kpc']
-    if sim_axis_short is None:
-        if dd is None:
-            dd = pf.h.all_data()
-        pos = np.array([dd["particle_position_%s"%i] for i in "xyz"]).T
-        idx = np.sqrt(np.sum((pos-sim_center)**2.0,axis=1))<sim_sphere_radius
-        mas = dd["particle_mass"]
-        pos = pos[idx]
-        mas = mas[idx]
-        mo_inertia = position_moment(pos,mas)
-        eigva, eigvc = linalg.eig(mo_inertia)
-        #order into short, long axes
-        order = eigva.real.argsort()
-        ax_short,ax_med,ax_long = [ eigvc[:,order[i]] for i in (0,1,2)]
-    else:
-        ax_short = sim_axis_short
-        ax_long  = sim_axis_long
-    if sim_halo_radius is None:
-        sim_halo_radius = 200.0/pf['kpc']
-    if scene_distance is  None:
-        scene_distance = 1e4/pf['kpc'] #this is how far the camera is from the target
-    if scene_fov is None:
-        radii = np.sqrt(np.sum((pos-sim_center)**2.0,axis=1))
-        #idx= radii < sim_halo_radius*0.10
-        #radii = radii[idx]
-        #mass  = mas[idx] #copying mass into mas
-        si = np.argsort(radii)
-        radii = radii[si]
-        mass  = mas[si]
-        idx, = np.where(np.cumsum(mass)>mass.sum()/2.0)
-        re = radii[idx[0]]
-        scene_fov = 5*re
-        scene_fov = max(scene_fov,3.0/pf['kpc']) #min size is 3kpc
-        scene_fov = min(scene_fov,20.0/pf['kpc']) #max size is 3kpc
-    #find rotation matrix
-    angles=find_half_euler_angles(ax_short,ax_long)
-    rotation  = euler_matrix(*angles)
-    irotation = numpy.linalg.inv(rotation)
-    axs = (ax_short,ax_med,ax_long)
-    ax_rs,ax_rm,ax_rl = (matmul(rotation,ax) for ax in axs)
-    axs = ([1,0,0],[0,1,0],[0,0,1])
-    ax_is,ax_im,ax_il = (matmul(irotation,ax) for ax in axs)
-    
-    #rotate the camera
-    if scene_rot :
-        irotation = np.eye(3)
-    sunrise_pos = matmul(irotation,np.array(scene_position)*scene_distance) #do NOT include sim center
-    sunrise_up  = matmul(irotation,scene_up)
-    sunrise_direction = -sunrise_pos
-    sunrise_afov = 2.0*np.arctan((scene_fov/2.0)/scene_distance)#convert from distance FOV to angular
-
-    #change to physical kpc
-    sunrise_pos *= pf['kpc']
-    sunrise_direction *= pf['kpc']
-    return sunrise_pos,sunrise_direction,sunrise_up,sunrise_afov,scene_fov
-
-def matmul(m, v):
-    """Multiply a matrix times a set of vectors, or a single vector.
-    My nPart x nDim convention leads to two transpositions, which is
-    why this is hidden away in a function.  Note that if you try to
-    use this to muliply two matricies, it will think that you're
-    trying to multiply by a set of vectors and all hell will break
-    loose."""    
-    assert type(v) is not np.matrix
-    v = np.asarray(v)
-    m, vs = [np.asmatrix(a) for a in (m, v)]
-
-    result = np.asarray(np.transpose(m * np.transpose(vs)))    
-    if len(v.shape) == 1:
-        return result[0]
-    return result
-
-
-def mag(vs):
-    """Compute the norms of a set of vectors or a single vector."""
-    vs = np.asarray(vs)
-    if len(vs.shape) == 1:
-        return np.sqrt( (vs**2).sum() )
-    return np.sqrt( (vs**2).sum(axis=1) )
-
-def mag2(vs):
-    """Compute the norms of a set of vectors or a single vector."""
-    vs = np.asarray(vs)
-    if len(vs.shape) == 1:
-        return (vs**2).sum()
-    return (vs**2).sum(axis=1)
-
-
-def position_moment(rs, ms=None, axes=None):
-    """Find second position moment tensor.
-    If axes is specified, weight by the elliptical radius (Allgood 2005)"""
-    rs = np.asarray(rs)
-    Npart, N = rs.shape
-    if ms is None: ms = np.ones(Npart)
-    else: ms = np.asarray(ms)    
-    if axes is not None:
-        axes = np.asarray(axes,dtype=float64)
-        axes = axes/axes.max()
-        norms2 = mag2(rs/axes)
-    else:
-        norms2 = np.ones(Npart)
-    M = ms.sum()
-    result = np.zeros((N,N))
-    # matrix is symmetric, so only compute half of it then fill in the
-    # other half
-    for i in range(N):
-        for j in range(i+1):
-            result[i,j] = ( rs[:,i] * rs[:,j] * ms / norms2).sum() / M
-        
-    result = result + result.transpose() - np.identity(N)*result
-    return result
-    
-
-
-def find_half_euler_angles(v,w,check=True):
-    """Find the passive euler angles that will make v lie along the z
-    axis and w lie along the x axis.  v and w are uncertain up to
-    inversions (ie, eigenvectors) so this routine removes degeneracies
-    associated with that
-
-    (old) Calculate angles to bring a body into alignment with the
-    coordinate system.  If v1 is the SHORTEST axis and v2 is the
-    LONGEST axis, then this will return the angle (Euler angles) to
-    make the long axis line up with the x axis and the short axis line
-    up with the x (z) axis for the 2 (3) dimensional case."""
-    # Make sure the vectors are normalized and orthogonal
-    mag = lambda x: np.sqrt(np.sum(x**2.0))
-    v = v/mag(v)
-    w = w/mag(w)    
-    if check:
-        if abs((v*w).sum()) / (mag(v)*mag(w)) > 1e-5: raise ValueError
-
-    # Break eigenvector scaling degeneracy by forcing it to have a positive
-    # z component
-    if v[2] < 0: v = -v
-    phi,theta = find_euler_phi_theta(v)
-
-    # Rotate w according to phi,theta and then break inversion
-    # degeneracy by requiring that resulting vector has positive
-    # x component
-    w_prime = euler_passive(w,phi,theta,0.)
-    if w_prime[0] < 0: w_prime = -w_prime
-    # Now last Euler angle should just be this:
-    psi = np.arctan2(w_prime[1],w_prime[0])
-    return phi, theta, psi
-
-def find_euler_phi_theta(v):
-    """Find (passive) euler angles that will make v point in the z
-    direction"""
-    # Make sure the vector is normalized
-    v = v/mag(v)
-    theta = np.arccos(v[2])
-    phi = np.arctan2(v[0],-v[1])
-    return phi,theta
-
-def euler_matrix(phi, the, psi):
-    """Make an Euler transformation matrix"""
-    cpsi=np.cos(psi)
-    spsi=np.sin(psi)
-    cphi=np.cos(phi)
-    sphi=np.sin(phi)
-    cthe=np.cos(the)
-    sthe=np.sin(the)
-    m = np.mat(np.zeros((3,3)))
-    m[0,0] = cpsi*cphi - cthe*sphi*spsi
-    m[0,1] = cpsi*sphi + cthe*cphi*spsi
-    m[0,2] = spsi*sthe
-    m[1,0] = -spsi*cphi - cthe*sphi*cpsi
-    m[1,1] = -spsi*sphi + cthe*cphi*cpsi 
-    m[1,2] = cpsi*sthe
-    m[2,0] = sthe*sphi
-    m[2,1] = -sthe*cphi
-    m[2,2] = cthe
-    return m
-
-def euler_passive(v, phi, the, psi):
-    """Passive Euler transform"""
-    m = euler_matrix(phi, the, psi)
-    return matmul(m,v)
-
-
-#the format for these camerasets is name,up vector,camera location, 
-#rotate to the galaxy's up direction?
-cameraset_compass = collections.OrderedDict([
-    ['top',([0.,0.,1.],[0.,-1.,0],True)], #up is north=+y
-    ['bottom',([0.,0.,-1.],[0.,-1.,0.],True)],#up is north=+y
-    ['north',([0.,1.,0.],[0.,0.,-1.],True)],#up is along z
-    ['south',([0.,-1.,0.],[0.,0.,-1.],True)],#up is along z
-    ['east',([1.,0.,0.],[0.,0.,-1.],True)],#up is along z
-    ['west',([-1.,0.,0.],[0.,0.,-1.],True)],#up is along z
-    ['top-north',([0.,0.7071,0.7071],[0., 0., -1.],True)],
-    ['top-south',([0.,-0.7071,0.7071],[0., 0., -1.],True)],
-    ['top-east',([ 0.7071,0.,0.7071],[0., 0., -1.],True)],
-    ['top-west',([-0.7071,0.,0.7071],[0., 0., -1.],True)]
-    ])
-
-cameraset_vertex = collections.OrderedDict([
-    ['top',([0.,0.,1.],[0.,-1.,0],True)], #up is north=+y
-    ['north',([0.,1.,0.],[0.,0.,-1.],True)],#up is along z
-    ['top-north',([0.,0.7071,0.7071],[0., 0., -1.],True)],
-    ['Z',([0.,0.,1.],[0.,-1.,0],False)], #up is north=+y
-    ['Y',([0.,1.,0.],[0.,0.,-1.],False)],#up is along z
-    ['ZY',([0.,0.7071,0.7071],[0., 0., -1.],False)]
-    ])
-
-#up is 45deg down from z, towards north
-#'bottom-north':([0.,0.7071,-0.7071],[0., 0., -1.])
-#up is -45deg down from z, towards north
-
-cameraset_ring = collections.OrderedDict()
-
-segments = 20
-for angle in np.linspace(0,360,segments):
-    pos = [np.cos(angle),0.,np.sin(angle)]
-    vc  = [np.cos(90-angle),0.,np.sin(90-angle)] 
-    cameraset_ring['02i'%angle]=(pos,vc)
-            
-
-


diff -r 53fa42c3dbc9c0228203900a1b1a169e732e9a8c -r d15ab61fb9efddea5e4cd117e590dd16303e5393 yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -59,6 +59,11 @@
     hub_url = 'https://hub.yt-project.org/upload',
     hub_api_key = '',
     ipython_notebook = 'False',
+    answer_testing_tolerance = '3',
+    answer_testing_bitwise = 'False',
+    gold_standard_filename = 'gold004',
+    local_standard_filename = 'local001',
+    sketchfab_api_key = 'None'
     )
 # Here is the upgrade.  We're actually going to parse the file in its entirety
 # here.  Then, if it has any of the Forbidden Sections, it will be rewritten


diff -r 53fa42c3dbc9c0228203900a1b1a169e732e9a8c -r d15ab61fb9efddea5e4cd117e590dd16303e5393 yt/convenience.py
--- a/yt/convenience.py
+++ b/yt/convenience.py
@@ -61,6 +61,12 @@
     valid_file = [os.path.exists(arg) if isinstance(arg, types.StringTypes) 
             else False for arg in args]
     if not any(valid_file):
+        try:
+            from yt.data_objects.time_series import TimeSeriesData
+            ts = TimeSeriesData.from_filenames(*args, **kwargs)
+            return ts
+        except YTOutputNotIdentified:
+            pass
         mylog.error("None of the arguments provided to load() is a valid file")
         mylog.error("Please check that you have used a correct path")
         raise YTOutputNotIdentified(args, kwargs)


diff -r 53fa42c3dbc9c0228203900a1b1a169e732e9a8c -r d15ab61fb9efddea5e4cd117e590dd16303e5393 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -35,8 +35,10 @@
 import exceptions
 import itertools
 import shelve
+import cStringIO
 
 from yt.funcs import *
+from yt.config import ytcfg
 
 from yt.data_objects.derived_quantities import GridChildMaskWrapper
 from yt.data_objects.particle_io import particle_handler_registry
@@ -48,7 +50,7 @@
     DataCubeRefine, DataCubeReplace, FillRegion, FillBuffer
 from yt.utilities.definitions import axis_names, x_dict, y_dict
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
-    ParallelAnalysisInterface
+    ParallelAnalysisInterface, parallel_root_only
 from yt.utilities.linear_interpolators import \
     UnilinearFieldInterpolator, \
     BilinearFieldInterpolator, \
@@ -867,9 +869,11 @@
         else:
             self.fields = ensure_list(fields)
         from yt.visualization.plot_window import \
-            GetBoundsAndCenter, PWViewerMPL
+            GetWindowParameters, PWViewerMPL
         from yt.visualization.fixed_resolution import FixedResolutionBuffer
-        (bounds, center) = GetBoundsAndCenter(axis, center, width, self.pf)
+        (bounds, center, units) = GetWindowParameters(axis, center, width, self.pf)
+        if axes_unit is None and units != ('1', '1'):
+            axes_unit = units
         pw = PWViewerMPL(self, bounds, origin=origin, frb_generator=FixedResolutionBuffer, 
                          plot_type=plot_type)
         pw.set_axes_unit(axes_unit)
@@ -2777,12 +2781,12 @@
             ma = np.max(verts, axis=0)
             verts = (verts - mi) / (ma - mi).max()
         if filename is not None and self.comm.rank == 0:
-            f = open(filename, "w")
+            if hasattr(filename, "write"): f = filename
             for v1 in verts:
                 f.write("v %0.16e %0.16e %0.16e\n" % (v1[0], v1[1], v1[2]))
             for i in range(len(verts)/3):
                 f.write("f %s %s %s\n" % (i*3+1, i*3+2, i*3+3))
-            f.close()
+            if not hasattr(filename, "write"): f.close()
         if sample_values is not None:
             return verts, samples
         return verts
@@ -3808,7 +3812,9 @@
     def _get_data_from_grid(self, grid, fields):
         ll = int(grid.Level == self.level)
         ref_ratio = self.pf.refine_by**(self.level - grid.Level)
-        g_fields = [grid[field].astype("float64") for field in fields]
+        g_fields = [gf.astype("float64") 
+                    if gf.dtype != "float64"
+                    else gf for gf in (grid[field] for field in fields)]
         c_fields = [self[field] for field in fields]
         count = FillRegion(ref_ratio,
             grid.get_global_startindex(), self.global_startindex,
@@ -3979,8 +3985,9 @@
 
     @restore_field_information_state
     def _get_data_from_grid(self, grid, fields):
-        fields = ensure_list(fields)
-        g_fields = [grid[field].astype("float64") for field in fields]
+        g_fields = [gf.astype("float64") 
+                    if gf.dtype != "float64"
+                    else gf for gf in (grid[field] for field in fields)]
         c_fields = [self.field_data[field] for field in fields]
         count = FillRegion(1,
             grid.get_global_startindex(), self.global_startindex,
@@ -4162,6 +4169,427 @@
             self._cut_masks[grid.id] = this_cut_mask
         return this_cut_mask
 
+class AMRSurfaceBase(AMRData, ParallelAnalysisInterface):
+    _type_name = "surface"
+    _con_args = ("data_source", "surface_field", "field_value")
+    vertices = None
+    def __init__(self, data_source, surface_field, field_value):
+        r"""This surface object identifies isocontours on a cell-by-cell basis,
+        with no consideration of global connectedness, and returns the vertices
+        of the Triangles in that isocontour.
+
+        This object simply returns the vertices of all the triangles
+        calculated by the marching cubes algorithm; for more complex
+        operations, such as identifying connected sets of cells above a given
+        threshold, see the extract_connected_sets function.  This is more
+        useful for calculating, for instance, total isocontour area, or
+        visualizing in an external program (such as `MeshLab
+        <http://meshlab.sf.net>`_.)  The object has the properties .vertices
+        and will sample values if a field is requested.  The values are
+        interpolated to the center of a given face.
+        
+        Parameters
+        ----------
+        data_source : AMR3DDataObject
+            This is the object which will used as a source
+        surface_field : string
+            Any field that can be obtained in a data object.  This is the field
+            which will be isocontoured.
+        field_value : float
+            The value at which the isocontour should be calculated.
+
+        References
+        ----------
+
+        .. [1] Marching Cubes: http://en.wikipedia.org/wiki/Marching_cubes
+
+        Examples
+        --------
+        This will create a data object, find a nice value in the center, and
+        output the vertices to "triangles.obj" after rescaling them.
+
+        >>> sp = pf.h.sphere("max", (10, "kpc")
+        >>> surf = pf.h.surface(sp, "Density", 5e-27)
+        >>> print surf["Temperature"]
+        >>> print surf.vertices
+        >>> bounds = [(sp.center[i] - 5.0/pf['kpc'],
+        ...            sp.center[i] + 5.0/pf['kpc']) for i in range(3)]
+        >>> surf.export_ply("my_galaxy.ply", bounds = bounds)
+        """
+        ParallelAnalysisInterface.__init__(self)
+        self.data_source = data_source
+        self.surface_field = surface_field
+        self.field_value = field_value
+        self.vertex_samples = YTFieldData()
+        center = data_source.get_field_parameter("center")
+        AMRData.__init__(self, center = center, fields = None, pf =
+                         data_source.pf)
+        self._grids = self.data_source._grids.copy()
+
+    def get_data(self, fields = None, sample_type = "face"):
+        if isinstance(fields, list) and len(fields) > 1:
+            for field in fields: self.get_data(field)
+            return
+        elif isinstance(fields, list):
+            fields = fields[0]
+        # Now we have a "fields" value that is either a string or None
+        pb = get_pbar("Extracting (sampling: %s)" % fields,
+                      len(list(self._get_grid_objs())))
+        verts = []
+        samples = []
+        for i,g in enumerate(self._get_grid_objs()):
+            pb.update(i)
+            my_verts = self._extract_isocontours_from_grid(
+                            g, self.surface_field, self.field_value,
+                            fields, sample_type)
+            if fields is not None:
+                my_verts, svals = my_verts
+                samples.append(svals)
+            verts.append(my_verts)
+        pb.finish()
+        verts = np.concatenate(verts).transpose()
+        verts = self.comm.par_combine_object(verts, op='cat', datatype='array')
+        self.vertices = verts
+        if fields is not None:
+            samples = np.concatenate(samples)
+            samples = self.comm.par_combine_object(samples, op='cat',
+                                datatype='array')
+            if sample_type == "face":
+                self[fields] = samples
+            elif sample_type == "vertex":
+                self.vertex_samples[fields] = samples
+        
+
+    @restore_grid_state
+    def _extract_isocontours_from_grid(self, grid, field, value,
+                                       sample_values = None,
+                                       sample_type = "face"):
+        mask = self.data_source._get_cut_mask(grid) * grid.child_mask
+        vals = grid.get_vertex_centered_data(field, no_ghost = False)
+        if sample_values is not None:
+            svals = grid.get_vertex_centered_data(sample_values)
+        else:
+            svals = None
+        sample_type = {"face":1, "vertex":2}[sample_type]
+        my_verts = march_cubes_grid(value, vals, mask, grid.LeftEdge,
+                                    grid.dds, svals, sample_type)
+        return my_verts
+
+    def calculate_flux(self, field_x, field_y, field_z, fluxing_field = None):
+        r"""This calculates the flux over the surface.
+
+        This function will conduct marching cubes on all the cells in a given
+        data container (grid-by-grid), and then for each identified triangular
+        segment of an isocontour in a given cell, calculate the gradient (i.e.,
+        normal) in the isocontoured field, interpolate the local value of the
+        "fluxing" field, the area of the triangle, and then return:
+
+        area * local_flux_value * (n dot v)
+
+        Where area, local_value, and the vector v are interpolated at the barycenter
+        (weighted by the vertex values) of the triangle.  Note that this
+        specifically allows for the field fluxing across the surface to be
+        *different* from the field being contoured.  If the fluxing_field is
+        not specified, it is assumed to be 1.0 everywhere, and the raw flux
+        with no local-weighting is returned.
+
+        Additionally, the returned flux is defined as flux *into* the surface,
+        not flux *out of* the surface.
+        
+        Parameters
+        ----------
+        field_x : string
+            The x-component field
+        field_y : string
+            The y-component field
+        field_z : string
+            The z-component field
+        fluxing_field : string, optional
+            The field whose passage over the surface is of interest.  If not
+            specified, assumed to be 1.0 everywhere.
+
+        Returns
+        -------
+        flux : float
+            The summed flux.  Note that it is not currently scaled; this is
+            simply the code-unit area times the fields.
+
+        References
+        ----------
+
+        .. [1] Marching Cubes: http://en.wikipedia.org/wiki/Marching_cubes
+
+        Examples
+        --------
+
+        This will create a data object, find a nice value in the center, and
+        calculate the metal flux over it.
+
+        >>> sp = pf.h.sphere("max", (10, "kpc")
+        >>> surf = pf.h.surface(sp, "Density", 5e-27)
+        >>> flux = surf.calculate_flux(
+        ...     "x-velocity", "y-velocity", "z-velocity", "Metal_Density")
+        """
+        flux = 0.0
+        pb = get_pbar("Fluxing %s" % fluxing_field,
+                len(list(self._get_grid_objs())))
+        for i, g in enumerate(self._get_grid_objs()):
+            pb.update(i)
+            flux += self._calculate_flux_in_grid(g,
+                    field_x, field_y, field_z, fluxing_field)
+        pb.finish()
+        flux = self.comm.mpi_allreduce(flux, op="sum")
+        return flux
+
+    @restore_grid_state
+    def _calculate_flux_in_grid(self, grid, 
+                    field_x, field_y, field_z, fluxing_field = None):
+        mask = self.data_source._get_cut_mask(grid) * grid.child_mask
+        vals = grid.get_vertex_centered_data(self.surface_field)
+        if fluxing_field is None:
+            ff = np.ones(vals.shape, dtype="float64")
+        else:
+            ff = grid.get_vertex_centered_data(fluxing_field)
+        xv, yv, zv = [grid.get_vertex_centered_data(f) for f in 
+                     [field_x, field_y, field_z]]
+        return march_cubes_grid_flux(self.field_value, vals, xv, yv, zv,
+                    ff, mask, grid.LeftEdge, grid.dds)
+
+    def export_ply(self, filename, bounds = None, color_field = None,
+                   color_map = "algae", color_log = True, sample_type = "face"):
+        r"""This exports the surface to the PLY format, suitable for visualization
+        in many different programs (e.g., MeshLab).
+
+        Parameters
+        ----------
+        filename : string
+            The file this will be exported to.  This cannot be a file-like object.
+        bounds : list of tuples
+            The bounds the vertices will be normalized to.  This is of the format:
+            [(xmin, xmax), (ymin, ymax), (zmin, zmax)].  Defaults to the full
+            domain.
+        color_field : string
+            Should a field be sample and colormapped?
+        color_map : string
+            Which color map should be applied?
+        color_log : bool
+            Should the color field be logged before being mapped?
+
+        Examples
+        --------
+
+        >>> sp = pf.h.sphere("max", (10, "kpc")
+        >>> surf = pf.h.surface(sp, "Density", 5e-27)
+        >>> print surf["Temperature"]
+        >>> print surf.vertices
+        >>> bounds = [(sp.center[i] - 5.0/pf['kpc'],
+        ...            sp.center[i] + 5.0/pf['kpc']) for i in range(3)]
+        >>> surf.export_ply("my_galaxy.ply", bounds = bounds)
+        """
+        if self.vertices is None:
+            self.get_data(color_field, sample_type)
+        elif color_field is not None:
+            if sample_type == "face" and \
+                color_field not in self.field_data:
+                self[color_field]
+            elif sample_type == "vertex" and \
+                color_field not in self.vertex_data:
+                self.get_data(color_field, sample_type)
+        self._export_ply(filename, bounds, color_field, color_map, color_log,
+                         sample_type)
+
+    def _color_samples(self, cs, color_log, color_map, arr):
+            if color_log: cs = np.log10(cs)
+            mi, ma = cs.min(), cs.max()
+            cs = (cs - mi) / (ma - mi)
+            from yt.visualization.image_writer import map_to_colors
+            cs = map_to_colors(cs, color_map)
+            arr["red"][:] = cs[0,:,0]
+            arr["green"][:] = cs[0,:,1]
+            arr["blue"][:] = cs[0,:,2]
+
+    @parallel_root_only
+    def _export_ply(self, filename, bounds = None, color_field = None,
+                   color_map = "algae", color_log = True, sample_type = "face"):
+        if isinstance(filename, file):
+            f = filename
+        else:
+            f = open(filename, "wb")
+        if bounds is None:
+            DLE = self.pf.domain_left_edge
+            DRE = self.pf.domain_right_edge
+            bounds = [(DLE[i], DRE[i]) for i in range(3)]
+        nv = self.vertices.shape[1]
+        vs = [("x", "<f"), ("y", "<f"), ("z", "<f"),
+              ("red", "uint8"), ("green", "uint8"), ("blue", "uint8") ]
+        fs = [("ni", "uint8"), ("v1", "<i4"), ("v2", "<i4"), ("v3", "<i4"),
+              ("red", "uint8"), ("green", "uint8"), ("blue", "uint8") ]
+        f.write("ply\n")
+        f.write("format binary_little_endian 1.0\n")
+        f.write("element vertex %s\n" % (nv))
+        f.write("property float x\n")
+        f.write("property float y\n")
+        f.write("property float z\n")
+        if color_field is not None and sample_type == "vertex":
+            f.write("property uchar red\n")
+            f.write("property uchar green\n")
+            f.write("property uchar blue\n")
+            v = np.empty(self.vertices.shape[1], dtype=vs)
+            cs = self.vertex_samples[color_field]
+            self._color_samples(cs, color_log, color_map, v)
+        else:
+            v = np.empty(self.vertices.shape[1], dtype=vs[:3])
+        f.write("element face %s\n" % (nv/3))
+        f.write("property list uchar int vertex_indices\n")
+        if color_field is not None and sample_type == "face":
+            f.write("property uchar red\n")
+            f.write("property uchar green\n")
+            f.write("property uchar blue\n")
+            # Now we get our samples
+            cs = self[color_field]
+            arr = np.empty(cs.shape[0], dtype=np.dtype(fs))
+            self._color_samples(cs, color_log, color_map, arr)
+        else:
+            arr = np.empty(nv/3, np.dtype(fs[:-3]))
+        for i, ax in enumerate("xyz"):
+            # Do the bounds first since we cast to f32
+            tmp = self.vertices[i,:]
+            np.subtract(tmp, bounds[i][0], tmp)
+            w = bounds[i][1] - bounds[i][0]
+            np.divide(tmp, w, tmp)
+            np.subtract(tmp, 0.5, tmp) # Center at origin.
+            v[ax][:] = tmp 
+        f.write("end_header\n")
+        v.tofile(f)
+        arr["ni"][:] = 3
+        vi = np.arange(nv, dtype="<i")
+        vi.shape = (nv/3, 3)
+        arr["v1"][:] = vi[:,0]
+        arr["v2"][:] = vi[:,1]
+        arr["v3"][:] = vi[:,2]
+        arr.tofile(f)
+        if filename is not f:
+            f.close()
+
+    def export_sketchfab(self, title, description, api_key = None,
+                            color_field = None, color_map = "algae",
+                            color_log = True, bounds = None):
+        r"""This exports Surfaces to SketchFab.com, where they can be viewed
+        interactively in a web browser.
+
+        SketchFab.com is a proprietary web service that provides WebGL
+        rendering of models.  This routine will use temporary files to
+        construct a compressed binary representation (in .PLY format) of the
+        Surface and any optional fields you specify and upload it to
+        SketchFab.com.  It requires an API key, which can be found on your
+        SketchFab.com dashboard.  You can either supply the API key to this
+        routine directly or you can place it in the variable
+        "sketchfab_api_key" in your ~/.yt/config file.  This function is
+        parallel-safe.
+
+        Parameters
+        ----------
+        title : string
+            The title for the model on the website
+        description : string
+            How you want the model to be described on the website
+        api_key : string
+            Optional; defaults to using the one in the config file
+        color_field : string
+            If specified, the field by which the surface will be colored
+        color_map : string
+            The name of the color map to use to map the color field
+        color_log : bool
+            Should the field be logged before being mapped to RGB?
+        bounds : list of tuples
+            [ (xmin, xmax), (ymin, ymax), (zmin, zmax) ] within which the model
+            will be scaled and centered.  Defaults to the full domain.
+
+        Returns
+        -------
+        URL : string
+            The URL at which your model can be viewed.
+
+        Examples
+        --------
+
+        from yt.mods import *
+        pf = load("redshift0058")
+        dd = pf.h.sphere("max", (200, "kpc"))
+        rho = 5e-27
+
+        bounds = [(dd.center[i] - 100.0/pf['kpc'],
+                   dd.center[i] + 100.0/pf['kpc']) for i in range(3)]
+
+        surf = pf.h.surface(dd, "Density", rho)
+
+        rv = surf.export_sketchfab(
+            title = "Testing Upload",
+            description = "A simple test of the uploader",
+            color_field = "Temperature",
+            color_map = "hot",
+            color_log = True,
+            bounds = bounds
+        )
+        """
+        api_key = api_key or ytcfg.get("yt","sketchfab_api_key")
+        if api_key in (None, "None"):
+            raise YTNoAPIKey("SketchFab.com", "sketchfab_api_key")
+        import zipfile, json
+        from tempfile import TemporaryFile
+
+        ply_file = TemporaryFile()
+        self.export_ply(ply_file, bounds, color_field, color_map, color_log,
+                        sample_type = "vertex")
+        ply_file.seek(0)
+        # Greater than ten million vertices and we throw an error but dump
+        # to a file.
+        if self.vertices.shape[1] > 1e7:
+            tfi = 0
+            fn = "temp_model_%03i.ply" % tfi
+            while os.path.exists(fn):
+                fn = "temp_model_%03i.ply" % tfi
+                tfi += 1
+            open(fn, "wb").write(ply_file.read())
+            raise YTTooManyVertices(self.vertices.shape[1], fn)
+
+        zfs = TemporaryFile()
+        with zipfile.ZipFile(zfs, "w", zipfile.ZIP_DEFLATED) as zf:
+            zf.writestr("yt_export.ply", ply_file.read())
+        zfs.seek(0)
+
+        zfs.seek(0)
+        data = {
+            'title': title,
+            'token': api_key,
+            'description': description,
+            'fileModel': zfs,
+            'filenameModel': "yt_export.zip",
+        }
+        upload_id = self._upload_to_sketchfab(data)
+        upload_id = self.comm.mpi_bcast(upload_id, root = 0)
+        return upload_id
+
+    @parallel_root_only
+    def _upload_to_sketchfab(self, data):
+        import urllib2, json
+        from yt.utilities.poster.encode import multipart_encode
+        from yt.utilities.poster.streaminghttp import register_openers
+        register_openers()
+        datamulti, headers = multipart_encode(data)
+        request = urllib2.Request("https://api.sketchfab.com/v1/models",
+                        datamulti, headers)
+        rv = urllib2.urlopen(request).read()
+        rv = json.loads(rv)
+        upload_id = rv.get("result", {}).get("id", None)
+        if upload_id:
+            mylog.info("Model uploaded to: https://sketchfab.com/show/%s",
+                       upload_id)
+        else:
+            mylog.error("Problem uploading.")
+        return upload_id
+
+
 def _reconstruct_object(*args, **kwargs):
     pfid = args[0]
     dtype = args[1]


diff -r 53fa42c3dbc9c0228203900a1b1a169e732e9a8c -r d15ab61fb9efddea5e4cd117e590dd16303e5393 yt/data_objects/field_info_container.py
--- a/yt/data_objects/field_info_container.py
+++ b/yt/data_objects/field_info_container.py
@@ -280,7 +280,7 @@
             return np.ones(self.NumberOfParticles)
         return defaultdict.__missing__(self, field_name)
 
-    def get_field_parameter(self, param):
+    def get_field_parameter(self, param, default = None):
         self.requested_parameters.append(param)
         if param in ['bulk_velocity', 'center', 'normal']:
             return np.random.random(3) * 1e-2


diff -r 53fa42c3dbc9c0228203900a1b1a169e732e9a8c -r d15ab61fb9efddea5e4cd117e590dd16303e5393 yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -153,7 +153,7 @@
         """
         Returns a single field.  Will add if necessary.
         """
-        if not self.field_data.has_key(key):
+        if key not in self.field_data:
             self.get_data(key)
         return self.field_data[key]
 
@@ -242,6 +242,23 @@
         cond = np.logical_and(cond, self.LeftEdge[y] <= RE[:,y])
         return cond
 
+    def is_in_grid(self, x, y, z) :
+        """
+        Generate a mask that shows which points in *x*, *y*, and *z*
+        fall within this grid's boundaries.
+        """
+        xcond = np.logical_and(x >= self.LeftEdge[0],
+                               x < self.RightEdge[0])
+        ycond = np.logical_and(y >= self.LeftEdge[1],
+                               y < self.RightEdge[1])
+        zcond = np.logical_and(z >= self.LeftEdge[2],
+                               z < self.RightEdge[2])
+
+        cond = np.logical_and(xcond, ycond)
+        cond = np.logical_and(zcond, cond)
+
+        return cond
+        
     def __repr__(self):
         return "AMRGridPatch_%04i" % (self.id)
 


diff -r 53fa42c3dbc9c0228203900a1b1a169e732e9a8c -r d15ab61fb9efddea5e4cd117e590dd16303e5393 yt/data_objects/hierarchy.py
--- a/yt/data_objects/hierarchy.py
+++ b/yt/data_objects/hierarchy.py
@@ -136,6 +136,12 @@
             mylog.warning("Refine by something other than two: reverting to"
                         + " overlap_proj")
             self.proj = self.overlap_proj
+        if self.pf.dimensionality < 3 and hasattr(self, 'proj') and \
+            hasattr(self, 'overlap_proj'):
+            mylog.warning("Dimensionality less than 3: reverting to"
+                        + " overlap_proj")
+            self.proj = self.overlap_proj
+
         self.object_types.sort()
 
     def _setup_unknown_fields(self):


diff -r 53fa42c3dbc9c0228203900a1b1a169e732e9a8c -r d15ab61fb9efddea5e4cd117e590dd16303e5393 yt/data_objects/object_finding_mixin.py
--- a/yt/data_objects/object_finding_mixin.py
+++ b/yt/data_objects/object_finding_mixin.py
@@ -29,8 +29,11 @@
 from yt.utilities.lib import \
     get_box_grids_level, \
     get_box_grids_below_level
+from yt.utilities.lib import \
+    MatchPointsToGrids, \
+    GridTree
 
-class ObjectFindingMixin(object):
+class ObjectFindingMixin(object) :
 
     def find_ray_grids(self, coord, axis):
         """
@@ -110,6 +113,16 @@
         ind = np.where(mask == 1)
         return self.grids[ind], ind
 
+    def find_points(self, x, y, z) :
+        """
+        Returns the (objects, indices) of leaf grids containing a number of (x,y,z) points
+        """
+        num_points = len(x)
+        grid_tree = self.get_grid_tree()
+        pts = MatchPointsToGrids(grid_tree,num_points,x,y,z)
+        ind = pts.find_points_in_tree() 
+        return self.grids[ind], ind
+    
     def find_field_value_at_point(self, fields, coord):
         r"""Find the value of fields at a point.
         
@@ -239,3 +252,24 @@
                     mask[gi] = True
         return self.grids[mask], np.where(mask)
 
+    def get_grid_tree(self) :
+
+        left_edge = np.zeros((self.num_grids, 3))
+        right_edge = np.zeros((self.num_grids, 3))
+        level = np.zeros((self.num_grids), dtype='int64')
+        parent_ind = np.zeros((self.num_grids), dtype='int64')
+        num_children = np.zeros((self.num_grids), dtype='int64')
+
+        for i, grid in enumerate(self.grids) :
+
+            left_edge[i,:] = grid.LeftEdge
+            right_edge[i,:] = grid.RightEdge
+            level[i] = grid.Level
+            if grid.Parent is None :
+                parent_ind[i] = -1
+            else :
+                parent_ind[i] = grid.Parent.id - grid.Parent._id_offset
+            num_children[i] = np.int64(len(grid.Children))
+
+        return GridTree(self.num_grids, left_edge, right_edge, parent_ind,
+                        level, num_children)


diff -r 53fa42c3dbc9c0228203900a1b1a169e732e9a8c -r d15ab61fb9efddea5e4cd117e590dd16303e5393 yt/data_objects/tests/test_fluxes.py
--- /dev/null
+++ b/yt/data_objects/tests/test_fluxes.py
@@ -0,0 +1,21 @@
+from yt.testing import *
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","__withintesting"] = "True"
+
+def test_flux_calculation():
+    pf = fake_random_pf(64, nprocs = 4)
+    dd = pf.h.all_data()
+    surf = pf.h.surface(dd, "x", 0.51)
+    yield assert_equal, surf["x"], 0.51
+    flux = surf.calculate_flux("Ones", "Zeros", "Zeros", "Ones")
+    yield assert_almost_equal, flux, 1.0, 12
+
+def test_sampling():
+    pf = fake_random_pf(64, nprocs = 4)
+    dd = pf.h.all_data()
+    for i, ax in enumerate('xyz'):
+        surf = pf.h.surface(dd, ax, 0.51)
+        surf.get_data(ax, "vertex")
+        yield assert_equal, surf.vertex_samples[ax], surf.vertices[i,:]


diff -r 53fa42c3dbc9c0228203900a1b1a169e732e9a8c -r d15ab61fb9efddea5e4cd117e590dd16303e5393 yt/data_objects/tests/test_projection.py
--- a/yt/data_objects/tests/test_projection.py
+++ b/yt/data_objects/tests/test_projection.py
@@ -1,9 +1,14 @@
 from yt.testing import *
+import os
 
 def setup():
     from yt.config import ytcfg
     ytcfg["yt","__withintesting"] = "True"
 
+def teardown_func(fns):
+    for fn in fns:
+        os.remove(fn)
+
 def test_projection():
     for nprocs in [8, 1]:
         # We want to test both 1 proc and 8 procs, to make sure that
@@ -22,6 +27,7 @@
             xax = x_dict[ax]
             yax = y_dict[ax]
             for wf in ["Density", None]:
+                fns = []
                 proj = pf.h.proj(ax, ["Ones", "Density"], weight_field = wf)
                 yield assert_equal, proj["Ones"].sum(), proj["Ones"].size
                 yield assert_equal, proj["Ones"].min(), 1.0
@@ -30,6 +36,8 @@
                 yield assert_equal, np.unique(proj["py"]), uc[yax]
                 yield assert_equal, np.unique(proj["pdx"]), 1.0/(dims[xax]*2.0)
                 yield assert_equal, np.unique(proj["pdy"]), 1.0/(dims[yax]*2.0)
+                pw = proj.to_pw()
+                fns += pw.save()
                 frb = proj.to_frb((1.0,'unitary'), 64)
                 for proj_field in ['Ones', 'Density']:
                     yield assert_equal, frb[proj_field].info['data_source'], \
@@ -50,6 +58,7 @@
                             proj.center
                     yield assert_equal, frb[proj_field].info['weight_field'], \
                             wf
+                teardown_func(fns)
             # wf == None
             yield assert_equal, wf, None
             v1 = proj["Density"].sum()


diff -r 53fa42c3dbc9c0228203900a1b1a169e732e9a8c -r d15ab61fb9efddea5e4cd117e590dd16303e5393 yt/data_objects/tests/test_slice.py
--- a/yt/data_objects/tests/test_slice.py
+++ b/yt/data_objects/tests/test_slice.py
@@ -1,9 +1,14 @@
 from yt.testing import *
+import os
 
 def setup():
     from yt.config import ytcfg
     ytcfg["yt","__withintesting"] = "True"
 
+def teardown_func(fns):
+    for fn in fns:
+        os.remove(fn)
+
 def test_slice():
     for nprocs in [8, 1]:
         # We want to test both 1 proc and 8 procs, to make sure that
@@ -21,6 +26,7 @@
             xax = x_dict[ax]
             yax = y_dict[ax]
             for wf in ["Density", None]:
+                fns = []
                 slc = pf.h.slice(ax, slc_pos, ["Ones", "Density"])
                 yield assert_equal, slc["Ones"].sum(), slc["Ones"].size
                 yield assert_equal, slc["Ones"].min(), 1.0
@@ -29,6 +35,8 @@
                 yield assert_equal, np.unique(slc["py"]), uc[yax]
                 yield assert_equal, np.unique(slc["pdx"]), 1.0/(dims[xax]*2.0)
                 yield assert_equal, np.unique(slc["pdy"]), 1.0/(dims[yax]*2.0)
+                pw = slc.to_pw()
+                fns += pw.save()
                 frb = slc.to_frb((1.0,'unitary'), 64)
                 for slc_field in ['Ones', 'Density']:
                     yield assert_equal, frb[slc_field].info['data_source'], \
@@ -49,7 +57,7 @@
                             slc.center
                     yield assert_equal, frb[slc_field].info['coord'], \
                             slc_pos
+                teardown_func(fns)
             # wf == None
             yield assert_equal, wf, None
 
-


diff -r 53fa42c3dbc9c0228203900a1b1a169e732e9a8c -r d15ab61fb9efddea5e4cd117e590dd16303e5393 yt/data_objects/time_series.py
--- a/yt/data_objects/time_series.py
+++ b/yt/data_objects/time_series.py
@@ -78,7 +78,7 @@
         raise AttributeError(attr)
 
 class TimeSeriesData(object):
-    def __init__(self, outputs, parallel = True):
+    def __init__(self, outputs, parallel = True ,**kwargs):
         r"""The TimeSeriesData object is a container of multiple datasets,
         allowing easy iteration and computation on them.
 
@@ -107,12 +107,13 @@
             setattr(self, type_name, functools.partial(
                 TimeSeriesDataObject, self, type_name))
         self.parallel = parallel
+        self.kwargs = kwargs
 
     def __iter__(self):
         # We can make this fancier, but this works
         for o in self._pre_outputs:
             if isinstance(o, types.StringTypes):
-                yield load(o)
+                yield load(o,**self.kwargs)
             else:
                 yield o
 
@@ -124,7 +125,7 @@
             return TimeSeriesData(self._pre_outputs[key], self.parallel)
         o = self._pre_outputs[key]
         if isinstance(o, types.StringTypes):
-            o = load(o)
+            o = load(o,**self.kwargs)
         return o
 
     def __len__(self):
@@ -172,12 +173,12 @@
         This demonstrates how one might store results:
 
         >>> ts = TimeSeriesData.from_filenames("DD*/DD*.hierarchy")
-        >>> storage = {}
-        >>> for sto, pf in ts.piter():
+        >>> my_storage = {}
+        >>> for sto, pf in ts.piter(storage=my_storage):
         ...     v, c = pf.h.find_max("Density")
         ...     sto.result = (v, c)
         ...
-        >>> for i, (v, c) in sorted(storage.items()):
+        >>> for i, (v, c) in sorted(my_storage.items()):
         ...     print "% 4i  %0.3e" % (i, v)
         ...
 
@@ -223,7 +224,7 @@
         return [v for k, v in sorted(return_values.items())]
 
     @classmethod
-    def from_filenames(cls, filenames, parallel = True):
+    def from_filenames(cls, filenames, parallel = True, **kwargs):
         r"""Create a time series from either a filename pattern or a list of
         filenames.
 
@@ -257,13 +258,13 @@
         ...     SlicePlot(pf, "x", "Density").save()
 
         """
+        
         if isinstance(filenames, types.StringTypes):
-            pattern = filenames
             filenames = glob.glob(filenames)
             filenames.sort()
-            if len(filenames) == 0:
-                raise YTNoFilenamesMatchPattern(pattern)
-        obj = cls(filenames[:], parallel = parallel)
+        if len(filenames) == 0:
+            raise YTOutputNotIdentified(filenames, {})
+        obj = cls(filenames[:], parallel = parallel, **kwargs)
         return obj
 
     @classmethod


diff -r 53fa42c3dbc9c0228203900a1b1a169e732e9a8c -r d15ab61fb9efddea5e4cd117e590dd16303e5393 yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -131,6 +131,13 @@
 add_field("OnesOverDx", function=_OnesOverDx,
           display_field=False)
 
+def _Zeros(field, data):
+    return np.zeros(data.ActiveDimensions, dtype='float64')
+add_field("Zeros", function=_Zeros,
+          validators=[ValidateSpatial(0)],
+          projection_conversion="unitary",
+          display_field = False)
+
 def _Ones(field, data):
     return np.ones(data.ActiveDimensions, dtype='float64')
 add_field("Ones", function=_Ones,
@@ -499,11 +506,11 @@
 def _CellVolume(field, data):
     if data['dx'].size == 1:
         try:
-            return data['dx']*data['dy']*data['dx']*\
+            return data['dx'] * data['dy'] * data['dz'] * \
                 np.ones(data.ActiveDimensions, dtype='float64')
         except AttributeError:
-            return data['dx']*data['dy']*data['dx']
-    return data["dx"]*data["dy"]*data["dz"]
+            return data['dx'] * data['dy'] * data['dz']
+    return data["dx"] * data["dy"] * data["dz"]
 def _ConvertCellVolumeMpc(data):
     return data.convert("mpc")**3.0
 def _ConvertCellVolumeCGS(data):


diff -r 53fa42c3dbc9c0228203900a1b1a169e732e9a8c -r d15ab61fb9efddea5e4cd117e590dd16303e5393 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -3,6 +3,8 @@
 
 Author: Matthew Turk <matthewturk at gmail.com>
 Affiliation: UCSD
+Author: Christopher Moody <cemoody at ucsc.edu>
+Affiliation: UCSC
 Homepage: http://yt-project.org/
 License:
   Copyright (C) 2010-2011 Matthew Turk.  All Rights Reserved.
@@ -18,17 +20,16 @@
   but WITHOUT ANY WARRANTY; without even the implied warranty of
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   GNU General Public License for more details.
-
+.
   You should have received a copy of the GNU General Public License
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
-
 import numpy as np
+import os.path
+import glob
 import stat
 import weakref
-import cPickle
-import os
-import struct
+import cStringIO
 
 from yt.funcs import *
 from yt.data_objects.grid_patch import \
@@ -42,64 +43,65 @@
 from .fields import \
     ARTFieldInfo, add_art_field, KnownARTFields
 from yt.utilities.definitions import \
-    mpc_conversion, sec_conversion
+    mpc_conversion
 from yt.utilities.io_handler import \
     io_registry
+from yt.utilities.lib import \
+    get_box_grids_level
 import yt.utilities.lib as amr_utils
 
-try:
-    import yt.frontends.ramses._ramses_reader as _ramses_reader
-except ImportError:
-    _ramses_reader = None
+from .definitions import *
+from io import _read_child_mask_level
+from io import read_particles
+from io import read_stars
+from io import spread_ages
+from io import _count_art_octs
+from io import _read_art_level_info
+from io import _read_art_child
+from io import _skip_record
+from io import _read_record
+from io import _read_frecord
+from io import _read_record_size
+from io import _read_struct
+from io import b2t
 
+
+import yt.frontends.ramses._ramses_reader as _ramses_reader
+
+from .fields import ARTFieldInfo, KnownARTFields
+from yt.utilities.definitions import \
+    mpc_conversion, sec_conversion
+from yt.utilities.lib import \
+    get_box_grids_level
+from yt.utilities.io_handler import \
+    io_registry
+from yt.data_objects.field_info_container import \
+    FieldInfoContainer, NullFunc
 from yt.utilities.physical_constants import \
     mass_hydrogen_cgs, sec_per_Gyr
 
-from yt.frontends.art.definitions import art_particle_field_names
-
-from yt.frontends.art.io import _read_child_mask_level
-from yt.frontends.art.io import read_particles
-from yt.frontends.art.io import read_stars
-from yt.frontends.art.io import _count_art_octs
-from yt.frontends.art.io import _read_art_level_info
-from yt.frontends.art.io import _read_art_child
-from yt.frontends.art.io import _skip_record
-from yt.frontends.art.io import _read_record
-from yt.frontends.art.io import _read_frecord
-from yt.frontends.art.io import _read_record_size
-from yt.frontends.art.io import _read_struct
-from yt.frontends.art.io import b2t
-
-def num_deep_inc(f):
-    def wrap(self, *args, **kwargs):
-        self.num_deep += 1
-        rv = f(self, *args, **kwargs)
-        self.num_deep -= 1
-        return rv
-    return wrap
-
 class ARTGrid(AMRGridPatch):
     _id_offset = 0
 
-    def __init__(self, id, hierarchy, level, locations, props,child_mask=None):
+    def __init__(self, id, hierarchy, level, locations,start_index, le,re,gd,
+            child_mask=None,nop=0):
         AMRGridPatch.__init__(self, id, filename = hierarchy.hierarchy_filename,
                               hierarchy = hierarchy)
-        start_index = props[0]
+        start_index =start_index 
         self.Level = level
         self.Parent = []
         self.Children = []
         self.locations = locations
         self.start_index = start_index.copy()
         
-        self.LeftEdge = props[0]
-        self.RightEdge = props[1]
-        self.ActiveDimensions = props[2] 
-        #if child_mask is not None:
-        #    self._set_child_mask(child_mask)
+        self.LeftEdge = le
+        self.RightEdge = re
+        self.ActiveDimensions = gd
+        self.NumberOfParticles=nop
+        for particle_field in particle_fields:
+            setattr(self,particle_field,np.array([]))
 
     def _setup_dx(self):
-        # So first we figure out what the index is.  We don't assume
-        # that dx=dy=dz , at least here.  We probably do elsewhere.
         id = self.id - self._id_offset
         if len(self.Parent) > 0:
             self.dds = self.Parent[0].dds / self.pf.refine_by
@@ -109,7 +111,8 @@
             self.dds = np.array((RE-LE)/self.ActiveDimensions)
         if self.pf.dimensionality < 2: self.dds[1] = 1.0
         if self.pf.dimensionality < 3: self.dds[2] = 1.0
-        self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
+        self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] \
+                = self.dds
 
     def get_global_startindex(self):
         """
@@ -124,381 +127,272 @@
         pdx = self.Parent[0].dds
         start_index = (self.Parent[0].get_global_startindex()) + \
                        np.rint((self.LeftEdge - self.Parent[0].LeftEdge)/pdx)
-        self.start_index = (start_index*self.pf.refine_by).astype('int64').ravel()
+        self.start_index = (start_index*self.pf.refine_by)\
+                           .astype('int64').ravel()
         return self.start_index
 
     def __repr__(self):
         return "ARTGrid_%04i (%s)" % (self.id, self.ActiveDimensions)
 
 class ARTHierarchy(AMRHierarchy):
-
     grid = ARTGrid
     _handle = None
     
     def __init__(self, pf, data_style='art'):
         self.data_style = data_style
         self.parameter_file = weakref.proxy(pf)
-        #for now, the hierarchy file is the parameter file!
+        self.max_level = pf.max_level
         self.hierarchy_filename = self.parameter_file.parameter_filename
         self.directory = os.path.dirname(self.hierarchy_filename)
         self.float_type = np.float64
         AMRHierarchy.__init__(self,pf,data_style)
         self._setup_field_list()
-        
+
     def _initialize_data_storage(self):
         pass
-
+    
     def _detect_fields(self):
-        # This will need to be generalized to be used elsewhere.
-        self.field_list = [ 'Density','TotalEnergy',
-             'XMomentumDensity','YMomentumDensity','ZMomentumDensity',
-             'Pressure','Gamma','GasEnergy',
-             'MetalDensitySNII', 'MetalDensitySNIa',
-             'PotentialNew','PotentialOld']
-        self.field_list += art_particle_field_names
-
+        self.field_list = []
+        self.field_list += fluid_fields
+        self.field_list += particle_fields
+        
     def _setup_classes(self):
         dd = self._get_data_reader_dict()
         AMRHierarchy._setup_classes(self, dd)
         self.object_types.sort()
-
+            
     def _count_grids(self):
         LEVEL_OF_EDGE = 7
         MAX_EDGE = (2 << (LEVEL_OF_EDGE- 1))
-        
         min_eff = 0.30
-        
         vol_max = 128**3
-        
-        f = open(self.pf.parameter_filename,'rb')
-        
-        
-        (self.pf.nhydro_vars, self.pf.level_info,
-        self.pf.level_oct_offsets, 
-        self.pf.level_child_offsets) = \
-                         _count_art_octs(f, 
-                          self.pf.child_grid_offset,
-                          self.pf.min_level, self.pf.max_level)
-        self.pf.level_info[0]=self.pf.ncell
-        self.pf.level_info = np.array(self.pf.level_info)        
-        self.pf.level_offsets = self.pf.level_child_offsets
-        self.pf.level_offsets = np.array(self.pf.level_offsets, dtype='int64')
-        self.pf.level_offsets[0] = self.pf.root_grid_offset
-        
-        self.pf.level_art_child_masks = {}
-        cm = self.pf.root_iOctCh>0
-        cm_shape = (1,)+cm.shape 
-        self.pf.level_art_child_masks[0] = cm.reshape(cm_shape).astype('uint8')        
-        del cm
-        
-        root_psg = _ramses_reader.ProtoSubgrid(
-                        np.zeros(3, dtype='int64'), # left index of PSG
-                        self.pf.domain_dimensions, # dim of PSG
-                        np.zeros((1,3), dtype='int64'), # left edges of grids
-                        np.zeros((1,6), dtype='int64') # empty
-                        )
-        
-        self.proto_grids = [[root_psg],]
-        for level in xrange(1, len(self.pf.level_info)):
-            if self.pf.level_info[level] == 0:
-                self.proto_grids.append([])
-                continue
-            psgs = []
-            effs,sizes = [], []
-
-            if level > self.pf.limit_level : continue
-            
-            #refers to the left index for the art octgrid
-            left_index, fl, nocts = _read_art_level_info(f, self.pf.level_oct_offsets,level)
-            #left_index_gridpatch = left_index >> LEVEL_OF_EDGE
-            
-            #read in the child masks for this level and save them
-            idc, art_child_mask = _read_child_mask_level(f, self.pf.level_child_offsets,
-                level,nocts*8,nhydro_vars=self.pf.nhydro_vars)
-            art_child_mask = art_child_mask.reshape((nocts,2,2,2))
-            self.pf.level_art_child_masks[level]=art_child_mask
-            #child_mask is zero where child grids exist and
-            #thus where higher resolution data is available
-            
-            
-            #compute the hilbert indices up to a certain level
-            #the indices will associate an oct grid to the nearest
-            #hilbert index?
-            base_level = int( np.log10(self.pf.domain_dimensions.max()) /
-                              np.log10(2))
-            hilbert_indices = _ramses_reader.get_hilbert_indices(
-                                    level + base_level, left_index)
-            #print base_level, hilbert_indices.max(),
-            hilbert_indices = hilbert_indices >> base_level + LEVEL_OF_EDGE
-            #print hilbert_indices.max()
-            
-            # Strictly speaking, we don't care about the index of any
-            # individual oct at this point.  So we can then split them up.
-            unique_indices = np.unique(hilbert_indices)
-            mylog.info("Level % 2i has % 10i unique indices for %0.3e octs",
-                        level, unique_indices.size, hilbert_indices.size)
-            
-            #use the hilbert indices to order oct grids so that consecutive
-            #items on a list are spatially near each other
-            #this is useful because we will define grid patches over these
-            #octs, which are more efficient if the octs are spatially close
-            
-            #split into list of lists, with domains containing 
-            #lists of sub octgrid left indices and an index
-            #referring to the domain on which they live
-            pbar = get_pbar("Calc Hilbert Indices ",1)
-            locs, lefts = _ramses_reader.get_array_indices_lists(
-                        hilbert_indices, unique_indices, left_index, fl)
-            pbar.finish()
-            
-            #iterate over the domains    
-            step=0
-            pbar = get_pbar("Re-gridding  Level %i "%level, len(locs))
-            psg_eff = []
-            for ddleft_index, ddfl in zip(lefts, locs):
-                #iterate over just the unique octs
-                #why would we ever have non-unique octs?
-                #perhaps the hilbert ordering may visit the same
-                #oct multiple times - review only unique octs 
-                #for idomain in np.unique(ddfl[:,1]):
-                #dom_ind = ddfl[:,1] == idomain
-                #dleft_index = ddleft_index[dom_ind,:]
-                #dfl = ddfl[dom_ind,:]
+        with open(self.pf.parameter_filename,'rb') as f:
+            (self.pf.nhydro_vars, self.pf.level_info,
+            self.pf.level_oct_offsets, 
+            self.pf.level_child_offsets) = \
+                             _count_art_octs(f, 
+                              self.pf.child_grid_offset,
+                              self.pf.min_level, self.pf.max_level)
+            self.pf.level_info[0]=self.pf.ncell
+            self.pf.level_info = np.array(self.pf.level_info)
+            self.pf.level_offsets = self.pf.level_child_offsets
+            self.pf.level_offsets = np.array(self.pf.level_offsets, 
+                                             dtype='int64')
+            self.pf.level_offsets[0] = self.pf.root_grid_offset
+            self.pf.level_art_child_masks = {}
+            cm = self.pf.root_iOctCh>0
+            cm_shape = (1,)+cm.shape 
+            self.pf.level_art_child_masks[0] = \
+                    cm.reshape(cm_shape).astype('uint8')        
+            del cm
+            root_psg = _ramses_reader.ProtoSubgrid(
+                            np.zeros(3, dtype='int64'), # left index of PSG
+                            self.pf.domain_dimensions, # dim of PSG
+                            np.zeros((1,3), dtype='int64'),# left edges of grids
+                            np.zeros((1,6), dtype='int64') # empty
+                            )
+            self.proto_grids = [[root_psg],]
+            for level in xrange(1, len(self.pf.level_info)):
+                if self.pf.level_info[level] == 0:
+                    self.proto_grids.append([])
+                    continue
+                psgs = []
+                effs,sizes = [], []
+                if self.pf.limit_level:
+                    if level > self.pf.limit_level : continue
+                #refers to the left index for the art octgrid
+                left_index, fl, nocts,root_level = _read_art_level_info(f, 
+                        self.pf.level_oct_offsets,level,
+                        coarse_grid=self.pf.domain_dimensions[0])
+                if level>1:
+                    assert root_level == last_root_level
+                last_root_level = root_level
+                #left_index_gridpatch = left_index >> LEVEL_OF_EDGE
+                #read in the child masks for this level and save them
+                idc, art_child_mask = _read_child_mask_level(f, 
+                        self.pf.level_child_offsets,
+                    level,nocts*8,nhydro_vars=self.pf.nhydro_vars)
+                art_child_mask = art_child_mask.reshape((nocts,2,2,2))
+                self.pf.level_art_child_masks[level]=art_child_mask
+                #child_mask is zero where child grids exist and
+                #thus where higher resolution data is available
+                #compute the hilbert indices up to a certain level
+                #the indices will associate an oct grid to the nearest
+                #hilbert index?
+                base_level = int( np.log10(self.pf.domain_dimensions.max()) /
+                                  np.log10(2))
+                hilbert_indices = _ramses_reader.get_hilbert_indices(
+                                        level + base_level, left_index)
+                #print base_level, hilbert_indices.max(),
+                hilbert_indices = hilbert_indices >> base_level + LEVEL_OF_EDGE
+                #print hilbert_indices.max()
+                # Strictly speaking, we don't care about the index of any
+                # individual oct at this point.  So we can then split them up.
+                unique_indices = np.unique(hilbert_indices)
+                mylog.info("Level % 2i has % 10i unique indices for %0.3e octs",
+                            level, unique_indices.size, hilbert_indices.size)
+                #use the hilbert indices to order oct grids so that consecutive
+                #items on a list are spatially near each other
+                #this is useful because we will define grid patches over these
+                #octs, which are more efficient if the octs are spatially close
+                #split into list of lists, with domains containing 
+                #lists of sub octgrid left indices and an index
+                #referring to the domain on which they live
+                pbar = get_pbar("Calc Hilbert Indices ",1)
+                locs, lefts = _ramses_reader.get_array_indices_lists(
+                            hilbert_indices, unique_indices, left_index, fl)
+                pbar.finish()
+                #iterate over the domains    
+                step=0
+                pbar = get_pbar("Re-gridding  Level %i "%level, len(locs))
+                psg_eff = []
+                for ddleft_index, ddfl in zip(lefts, locs):
+                    #iterate over just the unique octs
+                    #why would we ever have non-unique octs?
+                    #perhaps the hilbert ordering may visit the same
+                    #oct multiple times - review only unique octs 
+                    #for idomain in np.unique(ddfl[:,1]):
+                    #dom_ind = ddfl[:,1] == idomain
+                    #dleft_index = ddleft_index[dom_ind,:]
+                    #dfl = ddfl[dom_ind,:]
+                    dleft_index = ddleft_index
+                    dfl = ddfl
+                    initial_left = np.min(dleft_index, axis=0)
+                    idims = (np.max(dleft_index, axis=0) - initial_left).ravel()
+                    idims +=2
+                    #this creates a grid patch that doesn't cover the whole leve
+                    #necessarily, but with other patches covers all the regions
+                    #with octs. This object automatically shrinks its size
+                    #to barely encompass the octs inside of it.
+                    psg = _ramses_reader.ProtoSubgrid(initial_left, idims,
+                                    dleft_index, dfl)
+                    if psg.efficiency <= 0: continue
+                    #because grid patches maybe mostly empty, and with octs
+                    #that only partially fill the grid, it may be more efficient
+                    #to split large patches into smaller patches. We split
+                    #if less than 10% the volume of a patch is covered with octs
+                    if idims.prod() > vol_max or psg.efficiency < min_eff:
+                        psg_split = _ramses_reader.recursive_patch_splitting(
+                            psg, idims, initial_left, 
+                            dleft_index, dfl,min_eff=min_eff,use_center=True,
+                            split_on_vol=vol_max)
+                        psgs.extend(psg_split)
+                        psg_eff += [x.efficiency for x in psg_split] 
+                    else:
+                        psgs.append(psg)
+                        psg_eff =  [psg.efficiency,]
+                    tol = 1.00001
+                    step+=1
+                    pbar.update(step)
+                eff_mean = np.mean(psg_eff)
+                eff_nmin = np.sum([e<=min_eff*tol for e in psg_eff])
+                eff_nall = len(psg_eff)
+                mylog.info("Average subgrid efficiency %02.1f %%",
+                            eff_mean*100.0)
+                mylog.info("%02.1f%% (%i/%i) of grids had minimum efficiency",
+                            eff_nmin*100.0/eff_nall,eff_nmin,eff_nall)
                 
-                dleft_index = ddleft_index
-                dfl = ddfl
-                initial_left = np.min(dleft_index, axis=0)
-                idims = (np.max(dleft_index, axis=0) - initial_left).ravel()+2
-                #this creates a grid patch that doesn't cover the whole level
-                #necessarily, but with other patches covers all the regions
-                #with octs. This object automatically shrinks its size
-                #to barely encompass the octs inside of it.
-                psg = _ramses_reader.ProtoSubgrid(initial_left, idims,
-                                dleft_index, dfl)
-                if psg.efficiency <= 0: continue
-                
-                #because grid patches may still be mostly empty, and with octs
-                #that only partially fill the grid,it  may be more efficient
-                #to split large patches into smaller patches. We split
-                #if less than 10% the volume of a patch is covered with octs
-                if idims.prod() > vol_max or psg.efficiency < min_eff:
-                    psg_split = _ramses_reader.recursive_patch_splitting(
-                        psg, idims, initial_left, 
-                        dleft_index, dfl,min_eff=min_eff,use_center=True,
-                        split_on_vol=vol_max)
-                    
-                    psgs.extend(psg_split)
-                    psg_eff += [x.efficiency for x in psg_split] 
-                else:
-                    psgs.append(psg)
-                    psg_eff =  [psg.efficiency,]
-                
-                tol = 1.00001
-                
-                
-                step+=1
-                pbar.update(step)
-            eff_mean = np.mean(psg_eff)
-            eff_nmin = np.sum([e<=min_eff*tol for e in psg_eff])
-            eff_nall = len(psg_eff)
-            mylog.info("Average subgrid efficiency %02.1f %%",
-                        eff_mean*100.0)
-            mylog.info("%02.1f%% (%i/%i) of grids had minimum efficiency",
-                        eff_nmin*100.0/eff_nall,eff_nmin,eff_nall)
-            
-        
-            mylog.debug("Done with level % 2i", level)
-            pbar.finish()
-            self.proto_grids.append(psgs)
-            #print sum(len(psg.grid_file_locations) for psg in psgs)
-            #mylog.info("Final grid count: %s", len(self.proto_grids[level]))
-            if len(self.proto_grids[level]) == 1: continue
+                mylog.info("Done with level % 2i; max LE %i", level,
+                           np.max(left_index))
+                pbar.finish()
+                self.proto_grids.append(psgs)
+                if len(self.proto_grids[level]) == 1: continue
         self.num_grids = sum(len(l) for l in self.proto_grids)
-                    
-            
-            
-
-    num_deep = 0
-
         
     def _parse_hierarchy(self):
-        """ The root grid has no octs except one which is refined.
-        Still, it is the size of 128 cells along a length.
-        Ignore the proto subgrid created for the root grid - it is wrong.
-        """
         grids = []
         gi = 0
-        
+        dd=self.pf.domain_dimensions
         for level, grid_list in enumerate(self.proto_grids):
-            #The root level spans [0,2]
-            #The next level spans [0,256]
-            #The 3rd Level spans up to 128*2^3, etc.
-            #Correct root level to span up to 128
-            correction=1L
-            if level == 0:
-                correction=64L
+            dds = ((2**level) * dd).astype("float64")
             for g in grid_list:
                 fl = g.grid_file_locations
-                props = g.get_properties()*correction
-                dds = ((2**level) * self.pf.domain_dimensions).astype("float64")
-                self.grid_left_edge[gi,:] = props[0,:] / dds
-                self.grid_right_edge[gi,:] = props[1,:] / dds
-                self.grid_dimensions[gi,:] = props[2,:]
+                props = g.get_properties()
+                start_index = props[0,:]
+                le = props[0,:].astype('float64')/dds
+                re = props[1,:].astype('float64')/dds
+                gd = props[2,:].astype('int64')
+                if level==0:
+                    le = np.zeros(3,dtype='float64')
+                    re = np.ones(3,dtype='float64')
+                    gd = dd
+                self.grid_left_edge[gi,:] = le
+                self.grid_right_edge[gi,:] = re
+                self.grid_dimensions[gi,:] = gd
+                assert np.all(self.grid_left_edge[gi,:]<=1.0)    
                 self.grid_levels[gi,:] = level
                 child_mask = np.zeros(props[2,:],'uint8')
-                amr_utils.fill_child_mask(fl,props[0],
+                amr_utils.fill_child_mask(fl,start_index,
                     self.pf.level_art_child_masks[level],
                     child_mask)
                 grids.append(self.grid(gi, self, level, fl, 
-                    props*np.array(correction).astype('int64')))
+                    start_index,le,re,gd))
                 gi += 1
         self.grids = np.empty(len(grids), dtype='object')
-        
-
-        if self.pf.file_particle_data:
+        if not self.pf.skip_particles and self.pf.file_particle_data:
             lspecies = self.pf.parameters['lspecies']
             wspecies = self.pf.parameters['wspecies']
-            Nrow     = self.pf.parameters['Nrow']
-            nstars = lspecies[-1]
-            a = self.pf.parameters['aexpn']
-            hubble = self.pf.parameters['hubble']
-            ud  = self.pf.parameters['r0']*a/hubble #proper Mpc units
-            uv  = self.pf.parameters['v0']/(a**1.0)*1.0e5 #proper cm/s
-            um  = self.pf.parameters['aM0'] #mass units in solar masses
-            um *= 1.989e33 #convert solar masses to grams 
-            pbar = get_pbar("Loading Particles   ",5)
-            self.pf.particle_position,self.pf.particle_velocity = \
-                read_particles(self.pf.file_particle_data,nstars,Nrow)
-            pbar.update(1)
-            npa,npb=0,0
-            npb = lspecies[-1]
-            clspecies = np.concatenate(([0,],lspecies))
-            if self.pf.only_particle_type is not None:
-                npb = lspecies[0]
-                if type(self.pf.only_particle_type)==type(5):
-                    npa = clspecies[self.pf.only_particle_type]
-                    npb = clspecies[self.pf.only_particle_type+1]
-            np = npb-npa
-            self.pf.particle_position   = self.pf.particle_position[npa:npb]
-            #do NOT correct by an offset of 1.0
-            #self.pf.particle_position  -= 1.0 #fortran indices start with 0
-            pbar.update(2)
-            self.pf.particle_position  /= self.pf.domain_dimensions #to unitary units (comoving)
-            pbar.update(3)
-            self.pf.particle_velocity   = self.pf.particle_velocity[npa:npb]
-            self.pf.particle_velocity  *= uv #to proper cm/s
-            pbar.update(4)
-            self.pf.particle_type         = np.zeros(np,dtype='uint8')
-            self.pf.particle_mass         = np.zeros(np,dtype='float64')
-            self.pf.particle_mass_initial = np.zeros(np,dtype='float64')-1
-            self.pf.particle_creation_time= np.zeros(np,dtype='float64')-1
-            self.pf.particle_metallicity1 = np.zeros(np,dtype='float64')-1
-            self.pf.particle_metallicity2 = np.zeros(np,dtype='float64')-1
-            self.pf.particle_age          = np.zeros(np,dtype='float64')-1
-            
-            dist = self.pf['cm']/self.pf.domain_dimensions[0]
-            self.pf.conversion_factors['particle_mass'] = 1.0 #solar mass in g
-            self.pf.conversion_factors['particle_mass_initial'] = 1.0 #solar mass in g
-            self.pf.conversion_factors['particle_species'] = 1.0
-            for ax in 'xyz':
-                self.pf.conversion_factors['particle_velocity_%s'%ax] = 1.0
-                #already in unitary units
-                self.pf.conversion_factors['particle_position_%s'%ax] = 1.0 
-            self.pf.conversion_factors['particle_creation_time'] =  31556926.0
-            self.pf.conversion_factors['particle_metallicity']=1.0
-            self.pf.conversion_factors['particle_metallicity1']=1.0
-            self.pf.conversion_factors['particle_metallicity2']=1.0
-            self.pf.conversion_factors['particle_index']=1.0
-            self.pf.conversion_factors['particle_type']=1
-            self.pf.conversion_factors['particle_age']=1
-            self.pf.conversion_factors['Msun'] = 5.027e-34 #conversion to solar mass units
-            
-
-            a,b=0,0
+            self.pf.particle_position,self.pf.particle_velocity= \
+                read_particles(self.pf.file_particle_data,
+                        self.pf.parameters['Nrow'])
+            nparticles = lspecies[-1]
+            if not np.all(self.pf.particle_position[nparticles:]==0.0):
+                mylog.info('WARNING: unused particles discovered from lspecies')
+            self.pf.particle_position = self.pf.particle_position[:nparticles]
+            self.pf.particle_velocity = self.pf.particle_velocity[:nparticles]
+            self.pf.particle_position  /= self.pf.domain_dimensions 
+            self.pf.particle_type = np.zeros(nparticles,dtype='int')
+            self.pf.particle_mass = np.zeros(nparticles,dtype='float64')
+            self.pf.particle_star_index = len(wspecies)-1
+            a=0
             for i,(b,m) in enumerate(zip(lspecies,wspecies)):
-                if type(self.pf.only_particle_type)==type(5):
-                    if not i==self.pf.only_particle_type:
-                        continue
-                    self.pf.particle_type += i
-                    self.pf.particle_mass += m*um
-
+                if i == self.pf.particle_star_index:
+                    assert m==0.0
+                    sa,sb = a,b
                 else:
-                    self.pf.particle_type[a:b] = i #particle type
-                    self.pf.particle_mass[a:b] = m*um #mass in solar masses
+                    assert m>0.0
+                self.pf.particle_type[a:b] = i #particle type
+                self.pf.particle_mass[a:b] = m #mass in code units
                 a=b
-            pbar.finish()
-
-            nparticles = [0,]+list(lspecies)
-            for j,np in enumerate(nparticles):
-                mylog.debug('found %i of particle type %i'%(j,np))
-            
-            self.pf.particle_star_index = i
-            
-            do_stars = (self.pf.only_particle_type is None) or \
-                       (self.pf.only_particle_type == -1) or \
-                       (self.pf.only_particle_type == len(lspecies))
-            if self.pf.file_star_data and do_stars: 
-                nstars, mass, imass, tbirth, metallicity1, metallicity2 \
-                     = read_stars(self.pf.file_star_data,nstars,Nrow)
-                nstars = nstars[0] 
-                if nstars > 0 :
+            if not self.pf.skip_stars and self.pf.file_particle_stars: 
+                (nstars_rs,), mass, imass, tbirth, metallicity1, metallicity2, \
+                        ws_old,ws_oldi,tdum,adum \
+                     = read_stars(self.pf.file_particle_stars)
+                self.pf.nstars_rs = nstars_rs     
+                self.pf.nstars_pa = b-a
+                inconsistent=self.pf.particle_type==self.pf.particle_star_index
+                if not nstars_rs==np.sum(inconsistent):
+                    mylog.info('WARNING!: nstars is inconsistent!')
+                del inconsistent
+                if nstars_rs > 0 :
                     n=min(1e2,len(tbirth))
-                    pbar = get_pbar("Stellar Ages        ",n)
-                    sages  = \
-                        b2t(tbirth,n=n,logger=lambda x: pbar.update(x)).astype('float64')
-                    sages *= sec_per_Gyr #from Gyr to seconds
-                    sages = self.pf.current_time-sages
-                    self.pf.particle_age[-nstars:] = sages
-                    pbar.finish()
-                    self.pf.particle_metallicity1[-nstars:] = metallicity1
-                    self.pf.particle_metallicity2[-nstars:] = metallicity2
-                    #self.pf.particle_metallicity1 *= 0.0199 
-                    #self.pf.particle_metallicity2 *= 0.0199 
-                    self.pf.particle_mass_initial[-nstars:] = imass*um
-                    self.pf.particle_mass[-nstars:] = mass*um
-
-            done = 0
-            init = self.pf.particle_position.shape[0]
-            pos = self.pf.particle_position
-            #particle indices travel with the particle positions
-            #pos = np.vstack((np.arange(pos.shape[0]),pos.T)).T 
-            if type(self.pf.grid_particles) == type(5):
-                particle_level = min(self.pf.max_level,self.pf.grid_particles)
-            else:
-                particle_level = 2
-            grid_particle_count = np.zeros((len(grids),1),dtype='int64')
-
-            pbar = get_pbar("Gridding Particles ",init)
-            assignment,ilists = amr_utils.assign_particles_to_cell_lists(
-                    self.grid_levels.ravel().astype('int32'),
-                    np.zeros(len(pos[:,0])).astype('int32')-1,
-                    particle_level, #dont grid particles past this
-                    self.grid_left_edge.astype('float32'),
-                    self.grid_right_edge.astype('float32'),
-                    pos[:,0].astype('float32'),
-                    pos[:,1].astype('float32'),
-                    pos[:,2].astype('float32'))
-            pbar.finish()
-            
-            pbar = get_pbar("Filling grids ",init)
-            for gidx,(g,ilist) in enumerate(zip(grids,ilists)):
-                np = len(ilist)
-                grid_particle_count[gidx,0]=np
-                g.hierarchy.grid_particle_count = grid_particle_count
-                g.particle_indices = ilist
-                grids[gidx] = g
-                done += np
-                pbar.update(done)
-            pbar.finish()
-
-            #assert init-done== 0 #we have gridded every particle
-            
-        pbar = get_pbar("Finalizing grids ",len(grids))
-        for gi, g in enumerate(grids): 
-            self.grids[gi] = g
-        pbar.finish()
-            
-
+                    birthtimes= b2t(tbirth,n=n)
+                    birthtimes = birthtimes.astype('float64')
+                    assert birthtimes.shape == tbirth.shape    
+                    birthtimes*= 1.0e9 #from Gyr to yr
+                    birthtimes*= 365*24*3600 #to seconds
+                    ages = self.pf.current_time-birthtimes
+                    spread = self.pf.spread_age
+                    if type(spread)==type(5.5):
+                        ages = spread_ages(ages,spread=spread)
+                    elif spread:
+                        ages = spread_ages(ages)
+                    idx = self.pf.particle_type == self.pf.particle_star_index
+                    for psf in particle_star_fields:
+                        if getattr(self.pf,psf,None) is None:
+                            setattr(self.pf,psf,
+                                    np.zeros(nparticles,dtype='float64'))
+                    self.pf.particle_age[sa:sb] = ages
+                    self.pf.particle_mass[sa:sb] = mass
+                    self.pf.particle_mass_initial[sa:sb] = imass
+                    self.pf.particle_creation_time[sa:sb] = birthtimes
+                    self.pf.particle_metallicity1[sa:sb] = metallicity1
+                    self.pf.particle_metallicity2[sa:sb] = metallicity2
+                    self.pf.particle_metallicity[sa:sb]  = metallicity1\
+                                                          + metallicity2
+        for gi,g in enumerate(grids):    
+            self.grids[gi]=g
+                    
     def _get_grid_parents(self, grid, LE, RE):
         mask = np.zeros(self.num_grids, dtype='bool')
         grids, grid_ind = self.get_box_grids(LE, RE)
@@ -507,53 +401,58 @@
         return self.grids[mask]
 
     def _populate_grid_objects(self):
+        mask = np.empty(self.grids.size, dtype='int32')
+        pb = get_pbar("Populating grids", len(self.grids))
         for gi,g in enumerate(self.grids):
-            parents = self._get_grid_parents(g,
-                            self.grid_left_edge[gi,:],
-                            self.grid_right_edge[gi,:])
+            pb.update(gi)
+            amr_utils.get_box_grids_level(self.grid_left_edge[gi,:],
+                                self.grid_right_edge[gi,:],
+                                g.Level - 1,
+                                self.grid_left_edge, self.grid_right_edge,
+                                self.grid_levels, mask)
+            parents = self.grids[mask.astype("bool")]
             if len(parents) > 0:
-                g.Parent.extend(parents.tolist())
+                g.Parent.extend((p for p in parents.tolist()
+                        if p.locations[0,0] == g.locations[0,0]))
                 for p in parents: p.Children.append(g)
+            #Now we do overlapping siblings; note that one has to "win" with
+            #siblings, so we assume the lower ID one will "win"
+            amr_utils.get_box_grids_level(self.grid_left_edge[gi,:],
+                                self.grid_right_edge[gi,:],
+                                g.Level,
+                                self.grid_left_edge, self.grid_right_edge,
+                                self.grid_levels, mask, gi)
+            mask[gi] = False
+            siblings = self.grids[mask.astype("bool")]
+            if len(siblings) > 0:
+                g.OverlappingSiblings = siblings.tolist()
             g._prepare_grid()
             g._setup_dx()
+            #instead of gridding particles assign them all to the root grid
+            if gi==0:
+                for particle_field in particle_fields:
+                    source = getattr(self.pf,particle_field,None)
+                    if source is None:
+                        for i,ax in enumerate('xyz'):
+                            pf = particle_field.replace('_%s'%ax,'')
+                            source = getattr(self.pf,pf,None)
+                            if source is not None:
+                                source = source[:,i]
+                                break
+                    if source is not None:
+                        mylog.info("Attaching %s to the root grid",
+                                    particle_field)
+                        g.NumberOfParticles = source.shape[0]
+                        setattr(g,particle_field,source)
+                g.particle_index = np.arange(g.NumberOfParticles)
+        pb.finish()
         self.max_level = self.grid_levels.max()
 
-    # def _populate_grid_objects(self):
-    #     mask = np.empty(self.grids.size, dtype='int32')
-    #     pb = get_pbar("Populating grids", len(self.grids))
-    #     for gi,g in enumerate(self.grids):
-    #         pb.update(gi)
-    #         amr_utils.get_box_grids_level(self.grid_left_edge[gi,:],
-    #                             self.grid_right_edge[gi,:],
-    #                             g.Level - 1,
-    #                             self.grid_left_edge, self.grid_right_edge,
-    #                             self.grid_levels, mask)
-    #         parents = self.grids[mask.astype("bool")]
-    #         if len(parents) > 0:
-    #             g.Parent.extend((p for p in parents.tolist()
-    #                     if p.locations[0,0] == g.locations[0,0]))
-    #             for p in parents: p.Children.append(g)
-    #         # Now we do overlapping siblings; note that one has to "win" with
-    #         # siblings, so we assume the lower ID one will "win"
-    #         amr_utils.get_box_grids_level(self.grid_left_edge[gi,:],
-    #                             self.grid_right_edge[gi,:],
-    #                             g.Level,
-    #                             self.grid_left_edge, self.grid_right_edge,
-    #                             self.grid_levels, mask, gi)
-    #         mask[gi] = False
-    #         siblings = self.grids[mask.astype("bool")]
-    #         if len(siblings) > 0:
-    #             g.OverlappingSiblings = siblings.tolist()
-    #         g._prepare_grid()
-    #         g._setup_dx()
-    #     pb.finish()
-    #     self.max_level = self.grid_levels.max()
-
     def _setup_field_list(self):
-        if self.parameter_file.use_particles:
+        if not self.parameter_file.skip_particles:
             # We know which particle fields will exist -- pending further
             # changes in the future.
-            for field in art_particle_field_names:
+            for field in particle_fields:
                 def external_wrapper(f):
                     def _convert_function(data):
                         return data.convert(f)
@@ -580,97 +479,67 @@
     _hierarchy_class = ARTHierarchy
     _fieldinfo_fallback = ARTFieldInfo
     _fieldinfo_known = KnownARTFields
-    _handle = None
     
-    def __init__(self, filename, data_style='art',
-                 storage_filename = None, 
-                 file_particle_header=None, 
-                 file_particle_data=None,
-                 file_star_data=None,
-                 discover_particles=True,
-                 use_particles=True,
-                 limit_level=None,
-                 only_particle_type = None,
-                 grid_particles=False,
-                 single_particle_mass=False,
-                 single_particle_type=0):
-        
-        #dirn = os.path.dirname(filename)
-        base = os.path.basename(filename)
-        aexp = base.split('_')[2].replace('.d','')
-        if not aexp.startswith('a'):
-            aexp = '_'+aexp
-        
-        self.file_particle_header = file_particle_header
-        self.file_particle_data = file_particle_data
-        self.file_star_data = file_star_data
-        self.only_particle_type = only_particle_type
-        self.grid_particles = grid_particles
-        self.single_particle_mass = single_particle_mass
-        
-        if limit_level is None:
-            self.limit_level = np.inf
-        else:
-            limit_level = int(limit_level)
-            mylog.info("Using maximum level: %i",limit_level)
-            self.limit_level = limit_level
-        
-        def repu(x):
-            for i in range(5):
-                x=x.replace('__','_')
-            return x    
-        if discover_particles:
-            if file_particle_header is None:
-                loc = filename.replace(base,'PMcrd%s.DAT'%aexp)
-                loc = repu(loc)
-                if os.path.exists(loc):
-                    self.file_particle_header = loc
-                    mylog.info("Discovered particle header: %s",os.path.basename(loc))
-            if file_particle_data is None:
-                loc = filename.replace(base,'PMcrs0%s.DAT'%aexp)
-                loc = repu(loc)
-                if os.path.exists(loc):
-                    self.file_particle_data = loc
-                    mylog.info("Discovered particle data:   %s",os.path.basename(loc))
-            if file_star_data is None:
-                loc = filename.replace(base,'stars_%s.dat'%aexp)
-                loc = repu(loc)
-                if os.path.exists(loc):
-                    self.file_star_data = loc
-                    mylog.info("Discovered stellar data:    %s",os.path.basename(loc))
-        
-        self.use_particles = any([self.file_particle_header,
-            self.file_star_data, self.file_particle_data])
-        StaticOutput.__init__(self, filename, data_style)
-        
-        self.dimensionality = 3
-        self.refine_by = 2
-        self.parameters["HydroMethod"] = 'art'
-        self.parameters["Time"] = 1. # default unit is 1...
-        self.parameters["InitialTime"]=self.current_time
+    def __init__(self, file_amr, storage_filename = None,
+            skip_particles=False,skip_stars=False,limit_level=None,
+            spread_age=True,data_style='art'):
+        self.data_style = data_style
+        self._find_files(file_amr)
+        self.skip_particles = skip_particles
+        self.skip_stars = skip_stars
+        self.file_amr = file_amr
+        self.parameter_filename = file_amr
+        self.limit_level = limit_level
+        self.spread_age = spread_age
+        self.domain_left_edge  = np.zeros(3,dtype='float64')
+        self.domain_right_edge = np.ones(3,dtype='float64') 
+        StaticOutput.__init__(self, file_amr, data_style)
         self.storage_filename = storage_filename
-        
-        
+
+    def _find_files(self,file_amr):
+        """
+        Given the AMR base filename, attempt to find the
+        particle header, star files, etc.
+        """
+        prefix,suffix = filename_pattern['amr'].split('%s')
+        affix = os.path.basename(file_amr).replace(prefix,'')
+        affix = affix.replace(suffix,'')
+        affix = affix.replace('_','')
+        affix = affix[1:-1]
+        dirname = os.path.dirname(file_amr)
+        for filetype, pattern in filename_pattern.items():
+            #sometimes the affix is surrounded by an extraneous _
+            #so check for an extra character on either side
+            check_filename = dirname+'/'+pattern%('?%s?'%affix)
+            filenames = glob.glob(check_filename)
+            if len(filenames)==1:
+                setattr(self,"file_"+filetype,filenames[0])
+                mylog.info('discovered %s',filetype)
+            elif len(filenames)>1:
+                setattr(self,"file_"+filetype,None)
+                mylog.info("Ambiguous number of files found for %s",
+                        check_filename)
+            else:
+                setattr(self,"file_"+filetype,None)
+
     def __repr__(self):
         return self.basename.rsplit(".", 1)[0]
         
     def _set_units(self):
         """
-        Generates the conversion to various physical _units based on the parameter file
+        Generates the conversion to various physical units based 
+		on the parameters from the header
         """
         self.units = {}
         self.time_units = {}
-        if len(self.parameters) == 0:
-            self._parse_parameter_file()
-        self.conversion_factors = defaultdict(lambda: 1.0)
+        self.time_units['1'] = 1
         self.units['1'] = 1.0
-        self.units['unitary'] = 1.0 / (self.domain_right_edge - self.domain_left_edge).max()
-        
-        
-        z = self.current_redshift
-        
-        h = self.hubble_constant
-        boxcm_cal = self["boxh"]
+        self.units['unitary'] = 1.0
+
+        #spatial units
+        z   = self.current_redshift
+        h   = self.hubble_constant
+        boxcm_cal = self.parameters["boxh"]
         boxcm_uncal = boxcm_cal / h
         box_proper = boxcm_uncal/(1+z)
         aexpn = self["aexpn"]
@@ -679,269 +548,130 @@
             self.units[unit+'h'] = mpc_conversion[unit] * box_proper * h
             self.units[unit+'cm'] = mpc_conversion[unit] * boxcm_uncal
             self.units[unit+'hcm'] = mpc_conversion[unit] * boxcm_cal
-        # Variable names have been chosen to reflect primary reference
-        #Om0 = self["Om0"]
-        #boxh = self["boxh"]
-        wmu = self["wmu"]
-        #ng = self.domain_dimensions[0]
-        #r0 = self["cmh"]/ng # comoving cm h^-1
-        #t0 = 6.17e17/(self.hubble_constant + np.sqrt(self.omega_matter))
-        #v0 = r0 / t0
-        #rho0 = 1.8791e-29 * self.hubble_constant**2.0 * self.omega_matter
-        #e0 = v0**2.0
+
+        #all other units
+        wmu = self.parameters["wmu"]
+        Om0 = self.parameters['Om0']
+        ng  = self.parameters['ng']
+        wmu = self.parameters["wmu"]
+        boxh   = self.parameters['boxh'] 
+        aexpn  = self.parameters["aexpn"]
+        hubble = self.parameters['hubble']
+
+        cf = defaultdict(lambda: 1.0)
+        r0 = boxh/ng
+        P0= 4.697e-16 * Om0**2.0 * r0**2.0 * hubble**2.0
+        T_0 = 3.03e5 * r0**2.0 * wmu * Om0 # [K]
+        S_0 = 52.077 * wmu**(5.0/3.0)
+        S_0 *= hubble**(-4.0/3.0)*Om0**(1.0/3.0)*r0**2.0
+        #v0 =  r0 * 50.0*1.0e5 * np.sqrt(self.omega_matter)  #cm/s
+        v0 = 50.0*r0*np.sqrt(Om0)
+        t0 = r0/v0
+        #rho0 = 1.8791e-29 * hubble**2.0 * self.omega_matter
+        rho0 = 2.776e11 * hubble**2.0 * Om0
+        tr = 2./3. *(3.03e5*r0**2.0*wmu*self.omega_matter)*(1.0/(aexpn**2))     
+        aM0 = rho0 * (boxh/hubble)**3.0 / ng**3.0
+        cf['r0']=r0
+        cf['P0']=P0
+        cf['T_0']=T_0
+        cf['S_0']=S_0
+        cf['v0']=v0
+        cf['t0']=t0
+        cf['rho0']=rho0
+        cf['tr']=tr
+        cf['aM0']=aM0
+
+        #factors to multiply the native code units to CGS
+        cf['Pressure'] = P0 #already cgs
+        cf['Velocity'] = v0/aexpn*1.0e5 #proper cm/s
+        cf["Mass"] = aM0 * 1.98892e33
+        cf["Density"] = rho0*(aexpn**-3.0)
+        cf["GasEnergy"] = rho0*v0**2*(aexpn**-5.0)
+        cf["Potential"] = 1.0
+        cf["Entropy"] = S_0
+        cf["Temperature"] = tr
+        self.cosmological_simulation = True
+        self.conversion_factors = cf
         
-        wmu = self["wmu"]
-        boxh = self["boxh"]
-        aexpn = self["aexpn"]
-        hubble = self.hubble_constant
-        ng = self.domain_dimensions[0]
-        self.r0 = boxh/ng
-        self.v0 =  self.r0 * 50.0*1.0e5 * np.sqrt(self.omega_matter)  #cm/s
-        self.t0 = self.r0/self.v0
-        # this is 3H0^2 / (8pi*G) *h*Omega0 with H0=100km/s. 
-        # ie, critical density 
-        self.rho0 = 1.8791e-29 * hubble**2.0 * self.omega_matter
-        self.tr = 2./3. *(3.03e5*self.r0**2.0*wmu*self.omega_matter)*(1.0/(aexpn**2))     
-        tr  = self.tr
-        
-        #factors to multiply the native code units to CGS
-        self.conversion_factors['Pressure'] = self.parameters["P0"] #already cgs
-        self.conversion_factors['Velocity'] = self.parameters['v0']*1e3 #km/s -> cm/s
-        self.conversion_factors["Mass"] = self.parameters["aM0"] * 1.98892e33
-        self.conversion_factors["Density"] = self.rho0*(aexpn**-3.0)
-        self.conversion_factors["GasEnergy"] = self.rho0*self.v0**2*(aexpn**-5.0)
-        #self.conversion_factors["Temperature"] = tr 
-        self.conversion_factors["Potential"] = 1.0
-        self.cosmological_simulation = True
-        
-        # Now our conversion factors
+        for particle_field in particle_fields:
+            self.conversion_factors[particle_field] =  1.0
         for ax in 'xyz':
-            # Add on the 1e5 to get to cm/s
-            self.conversion_factors["%s-velocity" % ax] = self.v0/aexpn
-        seconds = self.t0
+            self.conversion_factors["%s-velocity" % ax] = 1.0
+            self.conversion_factors["particle_velocity_%s"%ax] = cf['Velocity']
         for unit in sec_conversion.keys():
             self.time_units[unit] = 1.0 / sec_conversion[unit]
+        self.conversion_factors['particle_mass'] = cf['Mass']
+        self.conversion_factors['particle_creation_time'] =  31556926.0
+        self.conversion_factors['Msun'] = 5.027e-34 
 
-        #we were already in seconds, go back in to code units
-        #self.current_time /= self.t0 
-        #self.current_time = b2t(self.current_time,n=1)
-        
-    
     def _parse_parameter_file(self):
-        # We set our domain to run from 0 .. 1 since we are otherwise
-        # unconstrained.
-        self.domain_left_edge = np.zeros(3, dtype="float64")
-        self.domain_right_edge = np.ones(3, dtype="float64")
+        """
+        Get the various simulation parameters & constants.
+        """
+        self.dimensionality = 3
+        self.refine_by = 2
+        self.cosmological_simulation = True
+        self.parameters = {}
         self.unique_identifier = \
             int(os.stat(self.parameter_filename)[stat.ST_CTIME])
-        self.parameters = {}
-
-        header_struct = [
-            ('>i','pad byte'),
-            ('>256s','jname'),
-            ('>i','pad byte'),
-            
-            ('>i','pad byte'),
-            ('>i','istep'),
-            ('>d','t'),
-            ('>d','dt'),
-            ('>f','aexpn'),
-            ('>f','ainit'),
-            ('>i','pad byte'),
-            
-            ('>i','pad byte'),
-            ('>f','boxh'),
-            ('>f','Om0'),
-            ('>f','Oml0'),
-            ('>f','Omb0'),
-            ('>f','hubble'),
-            ('>i','pad byte'),
-            
-            ('>i','pad byte'),
-            ('>i','nextras'),
-            ('>i','pad byte'),
-
-            ('>i','pad byte'),
-            ('>f','extra1'),
-            ('>f','extra2'),
-            ('>i','pad byte'),
-
-            ('>i','pad byte'),
-            ('>256s','lextra'),
-            ('>256s','lextra'),
-            ('>i','pad byte'),
-            
-            ('>i', 'pad byte'),
-            ('>i', 'min_level'),
-            ('>i', 'max_level'),
-            ('>i', 'pad byte'),
-            ]
-        
-        f = open(self.parameter_filename, "rb")
-        header_vals = {}
-        for format, name in header_struct:
-            size = struct.calcsize(format)
-            # We parse single values at a time, so this will
-            # always need to be indexed with 0
-            output = struct.unpack(format, f.read(size))[0]
-            header_vals[name] = output
-        self.dimensionality = 3 # We only support three
-        self.refine_by = 2 # Octree
-        # Update our parameters with the header and with some compile-time
-        # constants we will set permanently.
-        self.parameters.update(header_vals)
-        self.parameters["Y_p"] = 0.245
-        self.parameters["wmu"] = 4.0/(8.0-5.0*self.parameters["Y_p"])
-        self.parameters["gamma"] = 5./3.
-        self.parameters["T_CMB0"] = 2.726  
-        self.parameters["T_min"] = 300.0 #T floor in K
-        self.parameters["boxh"] = header_vals['boxh']
-        self.parameters['ng'] = 128 # of 0 level cells in 1d 
+        self.parameters.update(constants)
+        with open(self.file_amr,'rb') as f:
+            amr_header_vals = _read_struct(f,amr_header_struct)
+            for to_skip in ['tl','dtl','tlold','dtlold','iSO']:
+                _skip_record(f)
+            (self.ncell,) = struct.unpack('>l', _read_record(f))
+            # Try to figure out the root grid dimensions
+            est = int(np.rint(self.ncell**(1.0/3.0)))
+            # Note here: this is the number of *cells* on the root grid.
+            # This is not the same as the number of Octs.
+            self.domain_dimensions = np.ones(3, dtype='int64')*est 
+            self.root_grid_mask_offset = f.tell()
+            root_cells = self.domain_dimensions.prod()
+            self.root_iOctCh = _read_frecord(f,'>i')[:root_cells]
+            self.root_iOctCh = self.root_iOctCh.reshape(self.domain_dimensions,
+                 order='F')
+            self.root_grid_offset = f.tell()
+            _skip_record(f) # hvar
+            _skip_record(f) # var
+            self.iOctFree, self.nOct = struct.unpack('>ii', _read_record(f))
+            self.child_grid_offset = f.tell()
+        self.parameters.update(amr_header_vals)
+        if not self.skip_particles and self.file_particle_header:
+            with open(self.file_particle_header,"rb") as fh:
+                particle_header_vals = _read_struct(fh,particle_header_struct)
+                fh.seek(seek_extras)
+                n = particle_header_vals['Nspecies']
+                wspecies = np.fromfile(fh,dtype='>f',count=10)
+                lspecies = np.fromfile(fh,dtype='>i',count=10)
+            self.parameters['wspecies'] = wspecies[:n]
+            self.parameters['lspecies'] = lspecies[:n]
+            ls_nonzero = np.diff(lspecies)[:n-1]
+            mylog.info("Discovered %i species of particles",len(ls_nonzero))
+            mylog.info("Particle populations: "+'%1.1e '*len(ls_nonzero),
+                *ls_nonzero)
+            for k,v in particle_header_vals.items():
+                if k in self.parameters.keys():
+                    if not self.parameters[k] == v:
+                        mylog.info("Inconsistent parameter %s %1.1e  %1.1e",k,v,
+                                   self.parameters[k])
+                else:
+                    self.parameters[k]=v
+            self.parameters_particles = particle_header_vals
+    
+        #setup standard simulation yt expects to see
         self.current_redshift = self.parameters["aexpn"]**-1.0 - 1.0
-        self.parameters['CosmologyInitialRedshift']=self.current_redshift
-        self.data_comment = header_vals['jname']
-        self.current_time_raw = header_vals['t']
-        self.current_time = header_vals['t']
-        self.omega_lambda = header_vals['Oml0']
-        self.omega_matter = header_vals['Om0']
-        self.hubble_constant = header_vals['hubble']
-        self.min_level = header_vals['min_level']
-        self.max_level = header_vals['max_level']
-        self.nhydro_vars = 10 #this gets updated later, but we'll default to this
-        #nchem is nhydrovars-8, so we typically have 2 extra chem species 
+        self.omega_lambda = amr_header_vals['Oml0']
+        self.omega_matter = amr_header_vals['Om0']
+        self.hubble_constant = amr_header_vals['hubble']
+        self.min_level = amr_header_vals['min_level']
+        self.max_level = amr_header_vals['max_level']
         self.hubble_time  = 1.0/(self.hubble_constant*100/3.08568025e19)
-        #self.hubble_time /= 3.168876e7 #Gyr in s 
-        # def integrand(x,oml=self.omega_lambda,omb=self.omega_matter):
-        #     return 1./(x*np.sqrt(oml+omb*x**-3.0))
-        # spacings = np.logspace(-5,np.log10(self.parameters['aexpn']),1e5)
-        # integrand_arr = integrand(spacings)
-        # self.current_time = np.trapz(integrand_arr,dx=np.diff(spacings))
-        # self.current_time *= self.hubble_time
-        self.current_time = b2t(self.current_time_raw) * sec_per_Gyr
-        for to_skip in ['tl','dtl','tlold','dtlold','iSO']:
-            _skip_record(f)
-
-        
-        Om0 = self.parameters['Om0']
-        hubble = self.parameters['hubble']
-        dummy = 100.0 * hubble * np.sqrt(Om0)
-        ng = self.parameters['ng']
-        wmu = self.parameters["wmu"]
-        boxh = header_vals['boxh'] 
-        
-        #distance unit #boxh is units of h^-1 Mpc
-        self.parameters["r0"] = self.parameters["boxh"] / self.parameters['ng']
-        r0 = self.parameters["r0"]
-        #time, yrs
-        self.parameters["t0"] = 2.0 / dummy * 3.0856e19 / 3.15e7
-        #velocity velocity units in km/s
-        self.parameters["v0"] = 50.0*self.parameters["r0"]*\
-                np.sqrt(self.parameters["Om0"])
-        #density = 3H0^2 * Om0 / (8*pi*G) - unit of density in Msun/Mpc^3
-        self.parameters["rho0"] = 2.776e11 * hubble**2.0 * Om0
-        rho0 = self.parameters["rho0"]
-        #Pressure = rho0 * v0**2 - unit of pressure in g/cm/s^2
-        self.parameters["P0"] = 4.697e-16 * Om0**2.0 * r0**2.0 * hubble**2.0
-        #T_0 = unit of temperature in K and in keV)
-        #T_0 = 2.61155 * r0**2 * wmu * Om0 ! [keV]
-        self.parameters["T_0"] = 3.03e5 * r0**2.0 * wmu * Om0 # [K]
-        #S_0 = unit of entropy in keV * cm^2
-        self.parameters["S_0"] = 52.077 * wmu**(5.0/3.0) * hubble**(-4.0/3.0)*Om0**(1.0/3.0)*r0**2.0
-        
-        #mass conversion (Mbox = rho0 * Lbox^3, Mbox_code = Ng^3
-        #     for non-cosmological run aM0 must be defined during initialization
-        #     [aM0] = [Msun]
-        self.parameters["aM0"] = rho0 * (boxh/hubble)**3.0 / ng**3.0
-        
-        #CGS for everything in the next block
-    
-        (self.ncell,) = struct.unpack('>l', _read_record(f))
-        # Try to figure out the root grid dimensions
-        est = int(np.rint(self.ncell**(1.0/3.0)))
-        # Note here: this is the number of *cells* on the root grid.
-        # This is not the same as the number of Octs.
-        self.domain_dimensions = np.ones(3, dtype='int64')*est 
-
-        self.root_grid_mask_offset = f.tell()
-        #_skip_record(f) # iOctCh
-        root_cells = self.domain_dimensions.prod()
-        self.root_iOctCh = _read_frecord(f,'>i')[:root_cells]
-        self.root_iOctCh = self.root_iOctCh.reshape(self.domain_dimensions,order='F')
-        self.root_grid_offset = f.tell()
-        _skip_record(f) # hvar
-        _skip_record(f) # var
-
-        self.iOctFree, self.nOct = struct.unpack('>ii', _read_record(f))
-        self.child_grid_offset = f.tell()
-
-        f.close()
-        
-        if self.file_particle_header is not None:
-            self._read_particle_header(self.file_particle_header)
-        
-    def _read_particle_header(self,fn):    
-        """ Reads control information, various parameters from the 
-            particle data set. Adapted from Daniel Ceverino's 
-            Read_Particles_Binary in analysis_ART.F   
-        """ 
-        header_struct = [
-            ('>i','pad'),
-            ('45s','header'), 
-            ('>f','aexpn'),
-            ('>f','aexp0'),
-            ('>f','amplt'),
-            ('>f','astep'),
-
-            ('>i','istep'),
-            ('>f','partw'),
-            ('>f','tintg'),
-
-            ('>f','Ekin'),
-            ('>f','Ekin1'),
-            ('>f','Ekin2'),
-            ('>f','au0'),
-            ('>f','aeu0'),
-
-
-            ('>i','Nrow'),
-            ('>i','Ngridc'),
-            ('>i','Nspecies'),
-            ('>i','Nseed'),
-
-            ('>f','Om0'),
-            ('>f','Oml0'),
-            ('>f','hubble'),
-            ('>f','Wp5'),
-            ('>f','Ocurv'),
-            ('>f','Omb0'),
-            ('>%ds'%(396),'extras'),
-            ('>f','unknown'),
-
-            ('>i','pad')]
-        fh = open(fn,'rb')
-        vals = _read_struct(fh,header_struct)
-        
-        for k,v in vals.iteritems():
-            self.parameters[k]=v
-        
-        seek_extras = 137
-        fh.seek(seek_extras)
-        n = self.parameters['Nspecies']
-        self.parameters['wspecies'] = np.fromfile(fh,dtype='>f',count=10)
-        self.parameters['lspecies'] = np.fromfile(fh,dtype='>i',count=10)
-        self.parameters['wspecies'] = self.parameters['wspecies'][:n]
-        self.parameters['lspecies'] = self.parameters['lspecies'][:n]
-        fh.close()
-        
-        ls_nonzero = [ls for ls in self.parameters['lspecies'] if ls>0 ]
-        mylog.debug("Discovered %i species of particles",len(ls_nonzero))
-        mylog.debug("Particle populations: "+'%1.1e '*len(ls_nonzero),ls_nonzero)
-        
+        self.current_time = b2t(self.parameters['t']) * sec_per_Gyr
 
     @classmethod
     def _is_valid(self, *args, **kwargs):
         """
-        Defined for Daniel Ceverino's file naming scheme.
+        Defined for the NMSU file naming scheme.
         This could differ for other formats.
         """
         fn = ("%s" % (os.path.basename(args[0])))


diff -r 53fa42c3dbc9c0228203900a1b1a169e732e9a8c -r d15ab61fb9efddea5e4cd117e590dd16303e5393 yt/frontends/art/definitions.py
--- a/yt/frontends/art/definitions.py
+++ b/yt/frontends/art/definitions.py
@@ -1,7 +1,7 @@
 """
 Definitions specific to ART
 
-Author: Christopher E. Moody <cemoody at ucsc.ed>
+Author: Christopher E. Moody <cemoody at ucsc.edu>
 Affiliation: UC Santa Cruz
 Homepage: http://yt-project.org/
 License:
@@ -25,19 +25,128 @@
 
 """
 
-art_particle_field_names = [
-'particle_age',
-'particle_index',
-'particle_mass',
-'particle_mass_initial',
-'particle_creation_time',
-'particle_metallicity1',
-'particle_metallicity2',
-'particle_metallicity',
-'particle_position_x',
-'particle_position_y',
-'particle_position_z',
-'particle_velocity_x',
-'particle_velocity_y',
-'particle_velocity_z',
-'particle_type']
+fluid_fields= [ 
+    'Density',
+    'TotalEnergy',
+    'XMomentumDensity',
+    'YMomentumDensity',
+    'ZMomentumDensity',
+    'Pressure',
+    'Gamma',
+    'GasEnergy',
+    'MetalDensitySNII',
+    'MetalDensitySNIa',
+    'PotentialNew',
+    'PotentialOld'
+]
+
+particle_fields= [
+    'particle_age',
+    'particle_index',
+    'particle_mass',
+    'particle_mass_initial',
+    'particle_creation_time',
+    'particle_metallicity1',
+    'particle_metallicity2',
+    'particle_metallicity',
+    'particle_position_x',
+    'particle_position_y',
+    'particle_position_z',
+    'particle_velocity_x',
+    'particle_velocity_y',
+    'particle_velocity_z',
+    'particle_type'
+]
+
+particle_star_fields = [
+    'particle_age',
+    'particle_mass',
+    'particle_mass_initial',
+    'particle_creation_time',
+    'particle_metallicity1',
+    'particle_metallicity2',
+    'particle_metallicity',
+]
+
+filename_pattern = {				
+	'amr':'10MpcBox_csf512_%s.d',
+	'particle_header':'PMcrd%s.DAT',
+	'particle_data':'PMcrs0%s.DAT',
+	'particle_stars':'stars_%s.dat'
+}
+
+amr_header_struct = [
+    ('>i','pad byte'),
+    ('>256s','jname'),
+    ('>i','pad byte'),
+    ('>i','pad byte'),
+    ('>i','istep'),
+    ('>d','t'),
+    ('>d','dt'),
+    ('>f','aexpn'),
+    ('>f','ainit'),
+    ('>i','pad byte'),
+    ('>i','pad byte'),
+    ('>f','boxh'),
+    ('>f','Om0'),
+    ('>f','Oml0'),
+    ('>f','Omb0'),
+    ('>f','hubble'),
+    ('>i','pad byte'),
+    ('>i','pad byte'),
+    ('>i','nextras'),
+    ('>i','pad byte'),
+    ('>i','pad byte'),
+    ('>f','extra1'),
+    ('>f','extra2'),
+    ('>i','pad byte'),
+    ('>i','pad byte'),
+    ('>256s','lextra'),
+    ('>256s','lextra'),
+    ('>i','pad byte'),
+    ('>i', 'pad byte'),
+    ('>i', 'min_level'),
+    ('>i', 'max_level'),
+    ('>i', 'pad byte'),
+]
+
+particle_header_struct =[
+    ('>i','pad'),
+    ('45s','header'), 
+    ('>f','aexpn'),
+    ('>f','aexp0'),
+    ('>f','amplt'),
+    ('>f','astep'),
+    ('>i','istep'),
+    ('>f','partw'),
+    ('>f','tintg'),
+    ('>f','Ekin'),
+    ('>f','Ekin1'),
+    ('>f','Ekin2'),
+    ('>f','au0'),
+    ('>f','aeu0'),
+    ('>i','Nrow'),
+    ('>i','Ngridc'),
+    ('>i','Nspecies'),
+    ('>i','Nseed'),
+    ('>f','Om0'),
+    ('>f','Oml0'),
+    ('>f','hubble'),
+    ('>f','Wp5'),
+    ('>f','Ocurv'),
+    ('>f','Omb0'),
+    ('>%ds'%(396),'extras'),
+    ('>f','unknown'),
+    ('>i','pad')
+]
+
+constants = {
+    "Y_p":0.245,
+    "gamma":5./3.,
+    "T_CMB0":2.726,
+    "T_min":300.,
+    "ng":128,
+    "wmu":4.0/(8.0-5.0*0.245)
+}
+
+seek_extras = 137


diff -r 53fa42c3dbc9c0228203900a1b1a169e732e9a8c -r d15ab61fb9efddea5e4cd117e590dd16303e5393 yt/frontends/art/fields.py
--- a/yt/frontends/art/fields.py
+++ b/yt/frontends/art/fields.py
@@ -34,8 +34,6 @@
     ValidateSpatial, \
     ValidateGridType
 import yt.data_objects.universal_fields
-from yt.utilities.physical_constants import \
-    boltzmann_constant_cgs, mass_hydrogen_cgs
 import yt.utilities.lib as amr_utils
 
 KnownARTFields = FieldInfoContainer()
@@ -62,6 +60,7 @@
 #Density
 #Temperature
 #metallicities
+#MetalDensity SNII + SNia
 
 #Hydro Fields that need to be tested:
 #TotalEnergy
@@ -69,7 +68,6 @@
 #Pressure
 #Gamma
 #GasEnergy
-#MetalDensity SNII + SNia
 #Potentials
 #xyzvelocity
 
@@ -170,32 +168,27 @@
 ####### Derived fields
 
 def _temperature(field, data):
-    cd = data.pf.conversion_factors["Density"]
-    cg = data.pf.conversion_factors["GasEnergy"]
-    ct = data.pf.tr
     dg = data["GasEnergy"].astype('float64')
+    dg /= data.pf.conversion_factors["GasEnergy"]
     dd = data["Density"].astype('float64')
-    di = dd==0.0
+    dd /= data.pf.conversion_factors["Density"]
+    tr = dg/dd*data.pf.tr
+    #ghost cells have zero density?
+    tr[np.isnan(tr)] = 0.0
     #dd[di] = -1.0
-    tr = dg/dd
-    #tr[np.isnan(tr)] = 0.0
     #if data.id==460:
-    #    import pdb;pdb.set_trace()
-    tr /= data.pf.conversion_factors["GasEnergy"]
-    tr *= data.pf.conversion_factors["Density"]
-    tr *= data.pf.tr
     #tr[di] = -1.0 #replace the zero-density points with zero temp
     #print tr.min()
     #assert np.all(np.isfinite(tr))
     return tr
 def _converttemperature(data):
-    x = data.pf.conversion_factors["Temperature"]
+    #x = data.pf.conversion_factors["Temperature"]
     x = 1.0
     return x
 add_field("Temperature", function=_temperature, units = r"\mathrm{K}",take_log=True)
 ARTFieldInfo["Temperature"]._units = r"\mathrm{K}"
 ARTFieldInfo["Temperature"]._projected_units = r"\mathrm{K}"
-ARTFieldInfo["Temperature"]._convert_function=_converttemperature
+#ARTFieldInfo["Temperature"]._convert_function=_converttemperature
 
 def _metallicity_snII(field, data):
     tr  = data["MetalDensitySNII"] / data["Density"]
@@ -218,28 +211,27 @@
 ARTFieldInfo["Metallicity"]._units = r""
 ARTFieldInfo["Metallicity"]._projected_units = r""
 
-def _x_velocity(data):
+def _x_velocity(field,data):
     tr  = data["XMomentumDensity"]/data["Density"]
     return tr
 add_field("x-velocity", function=_x_velocity, units = r"\mathrm{cm/s}",take_log=False)
 ARTFieldInfo["x-velocity"]._units = r"\rm{cm}/\rm{s}"
 ARTFieldInfo["x-velocity"]._projected_units = r"\rm{cm}/\rm{s}"
 
-def _y_velocity(data):
+def _y_velocity(field,data):
     tr  = data["YMomentumDensity"]/data["Density"]
     return tr
 add_field("y-velocity", function=_y_velocity, units = r"\mathrm{cm/s}",take_log=False)
 ARTFieldInfo["y-velocity"]._units = r"\rm{cm}/\rm{s}"
 ARTFieldInfo["y-velocity"]._projected_units = r"\rm{cm}/\rm{s}"
 
-def _z_velocity(data):
+def _z_velocity(field,data):
     tr  = data["ZMomentumDensity"]/data["Density"]
     return tr
 add_field("z-velocity", function=_z_velocity, units = r"\mathrm{cm/s}",take_log=False)
 ARTFieldInfo["z-velocity"]._units = r"\rm{cm}/\rm{s}"
 ARTFieldInfo["z-velocity"]._projected_units = r"\rm{cm}/\rm{s}"
 
-
 def _metal_density(field, data):
     tr  = data["MetalDensitySNIa"]
     tr += data["MetalDensitySNII"]
@@ -251,20 +243,63 @@
 
 #Particle fields
 
+def ParticleMass(field,data):
+    return data['particle_mass']
+add_field("ParticleMass",function=ParticleMass,units=r"\rm{g}",particle_type=True)
+
+
 #Derived particle fields
 
+def ParticleMassMsun(field,data):
+    return data['particle_mass']*data.pf['Msun']
+add_field("ParticleMassMsun",function=ParticleMassMsun,units=r"\rm{g}",particle_type=True)
+
+def _creation_time(field,data):
+    pa = data["particle_age"]
+    tr = np.zeros(pa.shape,dtype='float')-1.0
+    tr[pa>0] = pa[pa>0]
+    return tr
+add_field("creation_time",function=_creation_time,units=r"\rm{s}",particle_type=True)
+
 def mass_dm(field, data):
+    tr = np.ones(data.ActiveDimensions, dtype='float32')
     idx = data["particle_type"]<5
     #make a dumb assumption that the mass is evenly spread out in the grid
     #must return an array the shape of the grid cells
-    tr  = data["Ones"] #create a grid in the right size
     if np.sum(idx)>0:
-        tr /= np.prod(tr.shape) #divide by the volume
-        tr *= np.sum(data['particle_mass'][idx]) #Multiply by total contaiend mass
+        tr /= np.prod(data['CellVolumeCode']*data.pf['mpchcm']**3.0) #divide by the volume
+        tr *= np.sum(data['particle_mass'][idx])*data.pf['Msun'] #Multiply by total contaiend mass
+        print tr.shape
         return tr
     else:
-        return tr*0.0
+        return tr*1e-9
 
-add_field("particle_cell_mass_dm", function=mass_dm,
-          validators=[ValidateSpatial(0)])
+add_field("particle_cell_mass_dm", function=mass_dm, units = r"\mathrm{M_{sun}}",
+        validators=[ValidateSpatial(0)],        
+        take_log=False,
+        projection_conversion="1")
 
+def _spdensity(field, data):
+    grid_mass = np.zeros(data.ActiveDimensions, dtype='float32')
+    if data.star_mass.shape[0] ==0 : return grid_mass 
+    amr_utils.CICDeposit_3(data.star_position_x,
+                           data.star_position_y,
+                           data.star_position_z,
+                           data.star_mass.astype('float32'),
+                           data.star_mass.shape[0],
+                           grid_mass, 
+                           np.array(data.LeftEdge).astype(np.float64),
+                           np.array(data.ActiveDimensions).astype(np.int32), 
+                           np.float64(data['dx']))
+    return grid_mass 
+
+#add_field("star_density", function=_spdensity,
+#          validators=[ValidateSpatial(0)], convert_function=_convertDensity)
+
+def _simple_density(field,data):
+    mass = np.sum(data.star_mass)
+    volume = data['dx']*data.ActiveDimensions.prod().astype('float64')
+    return mass/volume
+
+add_field("star_density", function=_simple_density,
+          validators=[ValidateSpatial(0)], convert_function=_convertDensity)


diff -r 53fa42c3dbc9c0228203900a1b1a169e732e9a8c -r d15ab61fb9efddea5e4cd117e590dd16303e5393 yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -36,7 +36,7 @@
     BaseIOHandler
 import yt.utilities.lib as au
 
-from yt.frontends.art.definitions import art_particle_field_names
+from yt.frontends.art.definitions import *
 
 class IOHandlerART(BaseIOHandler):
     _data_style = "art"
@@ -121,45 +121,19 @@
         self.level_data.pop(level, None)
 
     def _read_particle_field(self, grid, field):
-        #This will be cleaned up later
-        idx = np.array(grid.particle_indices)
-        if field == 'particle_index':
-            return np.array(idx)
-        if field == 'particle_type':
-            return grid.pf.particle_type[idx]
-        if field == 'particle_position_x':
-            return grid.pf.particle_position[idx][:,0]
-        if field == 'particle_position_y':
-            return grid.pf.particle_position[idx][:,1]
-        if field == 'particle_position_z':
-            return grid.pf.particle_position[idx][:,2]
-        if field == 'particle_mass':
-            return grid.pf.particle_mass[idx]
-        if field == 'particle_velocity_x':
-            return grid.pf.particle_velocity[idx][:,0]
-        if field == 'particle_velocity_y':
-            return grid.pf.particle_velocity[idx][:,1]
-        if field == 'particle_velocity_z':
-            return grid.pf.particle_velocity[idx][:,2]
-        
-        #stellar fields
-        if field == 'particle_age':
-            return grid.pf.particle_age[idx]
-        if field == 'particle_metallicity':
-            return grid.pf.particle_metallicity1[idx] +\
-                   grid.pf.particle_metallicity2[idx]
-        if field == 'particle_metallicity1':
-            return grid.pf.particle_metallicity1[idx]
-        if field == 'particle_metallicity2':
-            return grid.pf.particle_metallicity2[idx]
-        if field == 'particle_mass_initial':
-            return grid.pf.particle_mass_initial[idx]
-        
-        raise 'Should have matched one of the particle fields...'
-
+        dat = getattr(grid,field,None)
+        if dat is not None: 
+            return dat
+        starfield = field.replace('star','particle')
+        dat = getattr(grid,starfield,None)
+        if dat is not None:
+            psi = grid.pf.particle_star_index
+            idx = grid.particle_type==psi
+            return dat[idx]
+        raise KeyError
         
     def _read_data_set(self, grid, field):
-        if field in art_particle_field_names:
+        if field in particle_fields:
             return self._read_particle_field(grid, field)
         pf = grid.pf
         field_id = grid.pf.h.field_list.index(field)
@@ -198,9 +172,9 @@
     level_child_offsets= [0,]
     f.seek(offset)
     nchild,ntot=8,0
-    Level = np.zeros(MaxLevelNow+1 - MinLev, dtype='i')
-    iNOLL = np.zeros(MaxLevelNow+1 - MinLev, dtype='i')
-    iHOLL = np.zeros(MaxLevelNow+1 - MinLev, dtype='i')
+    Level = np.zeros(MaxLevelNow+1 - MinLev, dtype='int64')
+    iNOLL = np.zeros(MaxLevelNow+1 - MinLev, dtype='int64')
+    iHOLL = np.zeros(MaxLevelNow+1 - MinLev, dtype='int64')
     for Lev in xrange(MinLev + 1, MaxLevelNow+1):
         level_oct_offsets.append(f.tell())
 
@@ -232,7 +206,7 @@
     f.seek(offset)
     return nhydrovars, iNOLL, level_oct_offsets, level_child_offsets
 
-def _read_art_level_info(f, level_oct_offsets,level,root_level=15):
+def _read_art_level_info(f, level_oct_offsets,level,coarse_grid=128):
     pos = f.tell()
     f.seek(level_oct_offsets[level])
     #Get the info for this level, skip the rest
@@ -283,13 +257,18 @@
     le = le[idx]
     fl = fl[idx]
 
+
     #left edges are expressed as if they were on 
     #level 15, so no matter what level max(le)=2**15 
     #correct to the yt convention
     #le = le/2**(root_level-1-level)-1
 
+    #try to find the root_level first
+    root_level=np.floor(np.log2(le.max()*1.0/coarse_grid))
+    root_level = root_level.astype('int64')
+
     #try without the -1
-    le = le/2**(root_level-2-level)-1
+    le = le/2**(root_level+1-level)-1
 
     #now read the hvars and vars arrays
     #we are looking for iOctCh
@@ -299,13 +278,12 @@
     
     
     f.seek(pos)
-    return le,fl,nLevel
+    return le,fl,nLevel,root_level
 
 
-def read_particles(file,nstars,Nrow):
+def read_particles(file,Nrow):
     words = 6 # words (reals) per particle: x,y,z,vx,vy,vz
     real_size = 4 # for file_particle_data; not always true?
-    np = nstars # number of particles including stars, should come from lspecies[-1]
     np_per_page = Nrow**2 # defined in ART a_setup.h
     num_pages = os.path.getsize(file)/(real_size*words*np_per_page)
 
@@ -314,7 +292,7 @@
     data = np.squeeze(np.dstack(pages)).T # x,y,z,vx,vy,vz
     return data[:,0:3],data[:,3:]
 
-def read_stars(file,nstars,Nrow):
+def read_stars(file):
     fh = open(file,'rb')
     tdum,adum   = _read_frecord(fh,'>d')
     nstars      = _read_frecord(fh,'>i')
@@ -327,7 +305,8 @@
     if fh.tell() < os.path.getsize(file):
         metallicity2 = _read_frecord(fh,'>f')     
     assert fh.tell() == os.path.getsize(file)
-    return nstars, mass, imass, tbirth, metallicity1, metallicity2
+    return  nstars, mass, imass, tbirth, metallicity1, metallicity2,\
+            ws_old,ws_oldi,tdum,adum
 
 def _read_child_mask_level(f, level_child_offsets,level,nLevel,nhydro_vars):
     f.seek(level_child_offsets[level])
@@ -346,7 +325,7 @@
         arr = arr.reshape((width, chunk), order="F")
         assert np.all(arr[0,:]==arr[-1,:]) #pads must be equal
         idc[a:b]    = arr[1,:]-1 #fix fortran indexing
-        ioctch[a:b] = arr[2,:]==0 #if it is above zero, then refined info available
+        ioctch[a:b] = arr[2,:]==0 #if it is above zero, then refined available
         #zero in the mask means there is refinement available
         a=b
         left -= chunk
@@ -476,3 +455,29 @@
     #fb2t = interp1d(tbs,ages)
     return fb2t
 
+def spread_ages(ages,logger=None,spread=1.0e7*365*24*3600):
+    #stars are formed in lumps; spread out the ages linearly
+    da= np.diff(ages)
+    assert np.all(da<=0)
+    #ages should always be decreasing, and ordered so
+    agesd = np.zeros(ages.shape)
+    idx, = np.where(da<0)
+    idx+=1 #mark the right edges
+    #spread this age evenly out to the next age
+    lidx=0
+    lage=0
+    for i in idx:
+        n = i-lidx #n stars affected
+        rage = ages[i]
+        lage = max(rage-spread,0.0)
+        agesd[lidx:i]=np.linspace(lage,rage,n)
+        lidx=i
+        #lage=rage
+        if logger: logger(i)
+    #we didn't get the last iter
+    i=ages.shape[0]-1
+    n = i-lidx #n stars affected
+    rage = ages[i]
+    lage = max(rage-spread,0.0)
+    agesd[lidx:i]=np.linspace(lage,rage,n)
+    return agesd


diff -r 53fa42c3dbc9c0228203900a1b1a169e732e9a8c -r d15ab61fb9efddea5e4cd117e590dd16303e5393 yt/frontends/athena/data_structures.py
--- a/yt/frontends/athena/data_structures.py
+++ b/yt/frontends/athena/data_structures.py
@@ -30,6 +30,7 @@
 import h5py
 import numpy as np
 import weakref
+import glob #ST 9/12
 from yt.funcs import *
 from yt.data_objects.grid_patch import \
            AMRGridPatch
@@ -112,6 +113,8 @@
         grid['read_field'] = field
         grid['read_type'] = 'vector'
 
+
+
 class AthenaHierarchy(AMRHierarchy):
 
     grid = AthenaGrid
@@ -215,34 +218,94 @@
                   (np.prod(grid['dimensions']), grid['ncells']))
             raise TypeError
 
+        # Need to determine how many grids: self.num_grids
+        dname = self.hierarchy_filename
+        gridlistread = glob.glob('id*/%s-id*%s' % (dname[4:-9],dname[-9:] ))
+        gridlistread.insert(0,self.hierarchy_filename)
+        self.num_grids = len(gridlistread)
         dxs=[]
         self.grids = np.empty(self.num_grids, dtype='object')
         levels = np.zeros(self.num_grids, dtype='int32')
-        single_grid_width = grid['dds']*grid['dimensions']
-        grids_per_dim = (self.parameter_file.domain_width/single_grid_width).astype('int32')
-        glis = np.empty((self.num_grids,3), dtype='int64')
-        for i in range(self.num_grids):
-            procz = i/(grids_per_dim[0]*grids_per_dim[1])
-            procy = (i - procz*(grids_per_dim[0]*grids_per_dim[1]))/grids_per_dim[0]
-            procx = i - procz*(grids_per_dim[0]*grids_per_dim[1]) - procy*grids_per_dim[0]
-            glis[i, 0] = procx*grid['dimensions'][0]
-            glis[i, 1] = procy*grid['dimensions'][1]
-            glis[i, 2] = procz*grid['dimensions'][2]
+        glis = np.empty((self.num_grids,3), dtype='float64')
+        gdds = np.empty((self.num_grids,3), dtype='float64')
         gdims = np.ones_like(glis)
-        gdims[:] = grid['dimensions']
+        j = 0
+        while j < (self.num_grids):
+            f = open(gridlistread[j],'rb')
+            f.close()
+            if j == 0:
+                f = open(dname,'rb')
+            if j != 0:
+                f = open('id%i/%s-id%i%s' % (j, dname[4:-9],j, dname[-9:]),'rb')
+            gridread = {}
+            gridread['read_field'] = None
+            gridread['read_type'] = None
+            table_read=False
+            line = f.readline()
+            while gridread['read_field'] is None:
+                parse_line(line, gridread)
+                if "SCALAR" in line.strip().split():
+                    break
+                if "VECTOR" in line.strip().split():
+                    break 
+                if 'TABLE' in line.strip().split():
+                    break
+                if len(line) == 0: break
+                line = f.readline()
+            f.close()
+            glis[j,0] = gridread['left_edge'][0]
+            glis[j,1] = gridread['left_edge'][1]
+            glis[j,2] = gridread['left_edge'][2]
+            # It seems some datasets have a mismatch between ncells and 
+            # the actual grid dimensions.
+            if np.prod(gridread['dimensions']) != gridread['ncells']:
+                gridread['dimensions'] -= 1
+                gridread['dimensions'][gridread['dimensions']==0]=1
+            if np.prod(gridread['dimensions']) != gridread['ncells']:
+                mylog.error('product of dimensions %i not equal to number of cells %i' % 
+                      (np.prod(gridread['dimensions']), gridread['ncells']))
+                raise TypeError
+            gdims[j,0] = gridread['dimensions'][0]
+            gdims[j,1] = gridread['dimensions'][1]
+            gdims[j,2] = gridread['dimensions'][2]
+            # Setting dds=1 for non-active dimensions in 1D/2D datasets
+            gridread['dds'][gridread['dimensions']==1] = 1.
+            gdds[j,:] = gridread['dds']
+            
+            j=j+1
+
+        gres = glis + gdims*gdds
+        # Now we convert the glis, which were left edges (floats), to indices 
+        # from the domain left edge.  Then we do a bunch of fixing now that we
+        # know the extent of all the grids. 
+        glis = np.round((glis - self.parameter_file.domain_left_edge)/gdds).astype('int')
+        new_dre = np.max(gres,axis=0)
+        self.parameter_file.domain_right_edge = np.round(new_dre, decimals=6)
+        self.parameter_file.domain_width = \
+                (self.parameter_file.domain_right_edge - 
+                 self.parameter_file.domain_left_edge)
+        self.parameter_file.domain_center = \
+                0.5*(self.parameter_file.domain_left_edge + 
+                     self.parameter_file.domain_right_edge)
+        self.parameter_file.domain_dimensions = \
+                np.round(self.parameter_file.domain_width/gdds[0]).astype('int')
+        if self.parameter_file.dimensionality <= 2 :
+            self.parameter_file.domain_dimensions[2] = np.int(1)
+        if self.parameter_file.dimensionality == 1 :
+            self.parameter_file.domain_dimensions[1] = np.int(1)
         for i in range(levels.shape[0]):
-            self.grids[i] = self.grid(i, self, levels[i],
+            self.grids[i] = self.grid(i,self,levels[i],
                                       glis[i],
                                       gdims[i])
-
             dx = (self.parameter_file.domain_right_edge-
                   self.parameter_file.domain_left_edge)/self.parameter_file.domain_dimensions
             dx = dx/self.parameter_file.refine_by**(levels[i])
-            dxs.append(grid['dds'])
+            dxs.append(dx)
+        
         dx = np.array(dxs)
-        self.grid_left_edge = self.parameter_file.domain_left_edge + dx*glis
+        self.grid_left_edge = np.round(self.parameter_file.domain_left_edge + dx*glis, decimals=6)
         self.grid_dimensions = gdims.astype("int32")
-        self.grid_right_edge = self.grid_left_edge + dx*self.grid_dimensions
+        self.grid_right_edge = np.round(self.grid_left_edge + dx*self.grid_dimensions, decimals=6)
         self.grid_particle_count = np.zeros([self.num_grids, 1], dtype='int64')
 
     def _populate_grid_objects(self):
@@ -256,9 +319,6 @@
                 g1.Parent.append(g)
         self.max_level = self.grid_levels.max()
 
-#     def _setup_derived_fields(self):
-#         self.derived_field_list = []
-
     def _get_grid_children(self, grid):
         mask = np.zeros(self.num_grids, dtype='bool')
         grids, grid_ind = self.get_box_grids(grid.LeftEdge, grid.RightEdge)
@@ -277,6 +337,11 @@
         StaticOutput.__init__(self, filename, data_style)
         self.filename = filename
         self.storage_filename = filename[4:-4]
+        
+        # Unfortunately we now have to mandate that the hierarchy gets 
+        # instantiated so that we can make sure we have the correct left 
+        # and right domain edges.
+        self.h
 
     def _set_units(self):
         """
@@ -315,18 +380,22 @@
             line = self._handle.readline()
 
         self.domain_left_edge = grid['left_edge']
-        if 'domain_right_edge' in self.specified_parameters:
-            self.domain_right_edge = np.array(self.specified_parameters['domain_right_edge'])
-        else:
-            mylog.info("Please set 'domain_right_edge' in parameters dictionary argument " +
-                    "if it is not equal to -domain_left_edge.")
-            self.domain_right_edge = -self.domain_left_edge
+        mylog.info("Temporarily setting domain_right_edge = -domain_left_edge."+
+                  " This will be corrected automatically if it is not the case.")
+        self.domain_right_edge = -self.domain_left_edge
         self.domain_width = self.domain_right_edge-self.domain_left_edge
-        self.domain_dimensions = self.domain_width/grid['dds']
+        self.domain_dimensions = np.round(self.domain_width/grid['dds']).astype('int32')
         refine_by = None
         if refine_by is None: refine_by = 2
         self.refine_by = refine_by
-        self.dimensionality = 3
+        dimensionality = 3
+        if grid['dimensions'][2] == 1 :
+            dimensionality = 2
+        if grid['dimensions'][1] == 1 :
+            dimensionality = 1
+        if dimensionality <= 2 : self.domain_dimensions[2] = np.int32(1)
+        if dimensionality == 1 : self.domain_dimensions[1] = np.int32(1)
+        self.dimensionality = dimensionality
         self.current_time = grid["time"]
         self.unique_identifier = self._handle.__hash__()
         self.cosmological_simulation = False
@@ -334,7 +403,9 @@
         self.field_ordering = 'fortran'
         self.boundary_conditions = [1]*6
 
-        self.nvtk = int(np.product(self.domain_dimensions/(grid['dimensions']-1)))
+        dname = self.parameter_filename
+        gridlistread = glob.glob('id*/%s-id*%s' % (dname[4:-9],dname[-9:] ))
+        self.nvtk = len(gridlistread)+1 
 
         self.current_redshift = self.omega_lambda = self.omega_matter = \
             self.hubble_constant = self.cosmological_simulation = 0.0
@@ -342,6 +413,7 @@
         self.parameters["HydroMethod"] = 0 # Hardcode for now until field staggering is supported.
         self._handle.close()
 
+
     @classmethod
     def _is_valid(self, *args, **kwargs):
         try:


diff -r 53fa42c3dbc9c0228203900a1b1a169e732e9a8c -r d15ab61fb9efddea5e4cd117e590dd16303e5393 yt/frontends/athena/fields.py
--- a/yt/frontends/athena/fields.py
+++ b/yt/frontends/athena/fields.py
@@ -26,6 +26,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
+import numpy as np
 from yt.data_objects.field_info_container import \
     FieldInfoContainer, \
     FieldInfo, \
@@ -36,6 +37,8 @@
     ValidateGridType, \
     NullFunc, \
     TranslationFunc
+from yt.utilities.physical_constants import \
+    kboltz,mh
 import yt.data_objects.universal_fields
 
 log_translation_dict = {}
@@ -44,10 +47,7 @@
                     "Pressure": "pressure",
                     "x-velocity": "velocity_x",
                     "y-velocity": "velocity_y",
-                    "z-velocity": "velocity_z",
-                    "mag_field_x": "cell_centered_B_x ",
-                    "mag_field_y": "cell_centered_B_y ",
-                    "mag_field_z": "cell_centered_B_z "}
+                    "z-velocity": "velocity_z"}
 
 AthenaFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
 add_field = AthenaFieldInfo.add_field
@@ -72,13 +72,13 @@
           units=r"")
 
 add_athena_field("cell_centered_B_x", function=NullFunc, take_log=False,
-          units=r"")
+          units=r"", display_name=r"$\rm{cell\ centered\ B_x}$")
 
 add_athena_field("cell_centered_B_y", function=NullFunc, take_log=False,
-          units=r"")
+          units=r"", display_name=r"$\rm{cell\ centered\ B_y}$")
 
 add_athena_field("cell_centered_B_z", function=NullFunc, take_log=False,
-          units=r"")
+          units=r"", display_name=r"$\rm{cell\ centered\ B_z}$")
 
 for f,v in log_translation_dict.items():
     add_field(f, TranslationFunc(v), take_log=True)
@@ -86,3 +86,30 @@
 for f,v in translation_dict.items():
     add_field(f, TranslationFunc(v), take_log=False)
 
+def _Temperature(fields, data):
+    if data.has_field_parameter("mu") :
+        mu = data.get_field_parameter("mu")
+    else:
+        mu = 0.6
+    return mu*mh*data["Pressure"]/data["Density"]/kboltz
+add_field("Temperature", function=_Temperature, take_log=False,
+          units=r"\rm{K}")
+
+def _Bx(fields, data):
+    factor = np.sqrt(4.*np.pi)
+    return data['cell_centered_B_x']*factor
+add_field("Bx", function=_Bx, take_log=False,
+          units=r"\rm{Gauss}", display_name=r"B_x")
+
+def _By(fields, data):
+    factor = np.sqrt(4.*np.pi)
+    return data['cell_centered_B_y']*factor
+add_field("By", function=_By, take_log=False,
+          units=r"\rm{Gauss}", display_name=r"B_y")
+
+def _Bz(fields, data):
+    factor = np.sqrt(4.*np.pi)
+    return data['cell_centered_B_z']*factor
+add_field("Bz", function=_Bz, take_log=False,
+          units=r"\rm{Gauss}", display_name=r"B_z")
+


diff -r 53fa42c3dbc9c0228203900a1b1a169e732e9a8c -r d15ab61fb9efddea5e4cd117e590dd16303e5393 yt/frontends/athena/io.py
--- a/yt/frontends/athena/io.py
+++ b/yt/frontends/athena/io.py
@@ -45,10 +45,15 @@
 
     def _read_data_set(self,grid,field):
         f = file(grid.filename, 'rb')
-        dtype, offset = grid.hierarchy._field_map[field]
+        dtype, offsetr = grid.hierarchy._field_map[field]
         grid_ncells = np.prod(grid.ActiveDimensions)
         grid_dims = grid.ActiveDimensions
+        grid0_ncells = np.prod(grid.hierarchy.grid_dimensions[0,:])
         read_table_offset = get_read_table_offset(f)
+        if grid_ncells != grid0_ncells:
+            offset = offsetr + ((grid_ncells-grid0_ncells) * (offsetr//grid0_ncells))
+        if grid_ncells == grid0_ncells:
+            offset = offsetr
         f.seek(read_table_offset+offset)
         if dtype == 'scalar':
             data = np.fromfile(f, dtype='>f4',
@@ -74,10 +79,15 @@
             sl.reverse()
 
         f = file(grid.filename, 'rb')
-        dtype, offset = grid.hierarchy._field_map[field]
+        dtype, offsetr = grid.hierarchy._field_map[field]
         grid_ncells = np.prod(grid.ActiveDimensions)
-
+        grid_dims = grid.ActiveDimensions
+        grid0_ncells = np.prod(grid.hierarchy.grid_dimensions[0,:])
         read_table_offset = get_read_table_offset(f)
+        if grid_ncells != grid0_ncells:
+            offset = offsetr + ((grid_ncells-grid0_ncells) * (offsetr//grid0_ncells))
+        if grid_ncells == grid0_ncells:
+            offset = offsetr
         f.seek(read_table_offset+offset)
         if dtype == 'scalar':
             data = np.fromfile(f, dtype='>f4', 


diff -r 53fa42c3dbc9c0228203900a1b1a169e732e9a8c -r d15ab61fb9efddea5e4cd117e590dd16303e5393 yt/frontends/enzo/answer_testing_support.py
--- a/yt/frontends/enzo/answer_testing_support.py
+++ b/yt/frontends/enzo/answer_testing_support.py
@@ -35,7 +35,8 @@
      GridValuesTest, \
      ProjectionValuesTest, \
      ParentageRelationshipsTest, \
-     temp_cwd
+     temp_cwd, \
+     AssertWrapper
 
 def requires_outputlog(path = ".", prefix = ""):
     def ffalse(func):
@@ -59,19 +60,20 @@
 def standard_small_simulation(pf_fn, fields):
     if not can_run_pf(pf_fn): return
     dso = [None]
-    yield GridHierarchyTest(pf_fn)
-    yield ParentageRelationshipsTest(pf_fn)
+    tolerance = ytcfg.getint("yt", "answer_testing_tolerance")
+    bitwise = ytcfg.getboolean("yt", "answer_testing_bitwise")
     for field in fields:
-        yield GridValuesTest(pf_fn, field)
+        if bitwise:
+            yield GridValuesTest(pf_fn, field)
         if 'particle' in field: continue
-        for axis in [0, 1, 2]:
-            for ds in dso:
+        for ds in dso:
+            for axis in [0, 1, 2]:
                 for weight_field in [None, "Density"]:
                     yield ProjectionValuesTest(
                         pf_fn, axis, field, weight_field,
-                        ds)
-                yield FieldValuesTest(
-                        pf_fn, field, ds)
+                        ds, decimals=tolerance)
+            yield FieldValuesTest(
+                    pf_fn, field, ds, decimals=tolerance)
                     
 class ShockTubeTest(object):
     def __init__(self, data_file, solution_file, fields, 
@@ -96,9 +98,10 @@
             for xmin, xmax in zip(self.left_edges, self.right_edges):
                 mask = (position >= xmin)*(position <= xmax)
                 exact_field = np.interp(position[mask], exact['pos'], exact[k]) 
+                myname = "ShockTubeTest_%s" % k
                 # yield test vs analytical solution 
-                yield assert_allclose, field[mask], exact_field, \
-                    self.rtol, self.atol
+                yield AssertWrapper(myname, assert_allclose, field[mask], 
+                                    exact_field, self.rtol, self.atol)
 
     def get_analytical_solution(self):
         # Reads in from file 


diff -r 53fa42c3dbc9c0228203900a1b1a169e732e9a8c -r d15ab61fb9efddea5e4cd117e590dd16303e5393 yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -221,6 +221,27 @@
           function=_NumberDensity,
           convert_function=_ConvertNumberDensity)
 
+def _H_NumberDensity(field, data):
+    field_data = np.zeros(data["Density"].shape,
+                          dtype=data["Density"].dtype)
+    if data.pf.parameters["MultiSpecies"] == 0:
+        field_data += data["Density"] * \
+          data.pf.parameters["HydrogenFractionByMass"]
+    if data.pf.parameters["MultiSpecies"] > 0:
+        field_data += data["HI_Density"]
+        field_data += data["HII_Density"]
+    if data.pf.parameters["MultiSpecies"] > 1:
+        field_data += data["HM_Density"]
+        field_data += data["H2I_Density"]
+        field_data += data["H2II_Density"]
+    if data.pf.parameters["MultiSpecies"] > 2:
+        field_data += data["HDI_Density"] / 2.0
+    return field_data
+add_field("H_NumberDensity", units=r"\rm{cm}^{-3}",
+          function=_H_NumberDensity,
+          convert_function=_ConvertNumberDensity)
+
+
 # Now we add all the fields that we want to control, but we give a null function
 # This is every Enzo field we can think of.  This will be installation-dependent,
 


diff -r 53fa42c3dbc9c0228203900a1b1a169e732e9a8c -r d15ab61fb9efddea5e4cd117e590dd16303e5393 yt/frontends/enzo/io.py
--- a/yt/frontends/enzo/io.py
+++ b/yt/frontends/enzo/io.py
@@ -181,8 +181,10 @@
         mylog.debug("Finished read of %s", sets)
 
     def _read_data_set(self, grid, field):
-        return self.modify(hdf5_light_reader.ReadData(grid.filename,
-                "/Grid%08i/%s" % (grid.id, field)))
+        tr = hdf5_light_reader.ReadData(grid.filename,
+                "/Grid%08i/%s" % (grid.id, field))
+        if tr.dtype == "float32": tr = tr.astype("float64")
+        return self.modify(tr)
 
     def _read_data_slice(self, grid, field, axis, coord):
         axis = _axis_ids[axis]


diff -r 53fa42c3dbc9c0228203900a1b1a169e732e9a8c -r d15ab61fb9efddea5e4cd117e590dd16303e5393 yt/frontends/enzo/simulation_handling.py
--- a/yt/frontends/enzo/simulation_handling.py
+++ b/yt/frontends/enzo/simulation_handling.py
@@ -37,7 +37,7 @@
 from yt.utilities.definitions import \
     sec_conversion
 from yt.utilities.exceptions import \
-    AmbiguousOutputs, \
+    InvalidSimulationTimeSeries, \
     MissingParameter, \
     NoStoppingCondition
 
@@ -183,8 +183,7 @@
         if (initial_redshift is not None or \
             final_redshift is not None) and \
             not self.cosmological_simulation:
-            mylog.error('An initial or final redshift has been given for a noncosmological simulation.')
-            return
+            raise InvalidSimulationTimeSeries('An initial or final redshift has been given for a noncosmological simulation.')
 
         if time_data and redshift_data:
             my_all_outputs = self.all_outputs
@@ -193,7 +192,11 @@
         elif redshift_data:
             my_all_outputs = self.all_redshift_outputs
         else:
-            mylog.error('Both time_data and redshift_data are False.')
+            raise InvalidSimulationTimeSeries('Both time_data and redshift_data are False.')
+
+        if not my_all_outputs:
+            TimeSeriesData.__init__(self, outputs=[], parallel=parallel)
+            mylog.info("%d outputs loaded into time series." % 0)
             return
 
         # Apply selection criteria to the set.
@@ -215,6 +218,7 @@
                 final_cycle = self.parameters['StopCycle']
             else:
                 final_cycle = min(final_cycle, self.parameters['StopCycle'])
+
             my_outputs = my_all_outputs[int(ceil(float(initial_cycle) /
                                                  self.parameters['CycleSkipDataDump'])):
                                         (final_cycle /  self.parameters['CycleSkipDataDump'])+1]


diff -r 53fa42c3dbc9c0228203900a1b1a169e732e9a8c -r d15ab61fb9efddea5e4cd117e590dd16303e5393 yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -69,7 +69,8 @@
         self.hierarchy_filename = self.parameter_file.parameter_filename
         self.directory = os.path.dirname(self.hierarchy_filename)
         self._handle = pf._handle
-
+        self._particle_handle = pf._particle_handle
+        
         self.float_type = np.float64
         AMRHierarchy.__init__(self,pf,data_style)
 
@@ -79,9 +80,9 @@
     def _detect_fields(self):
         ncomp = self._handle["/unknown names"].shape[0]
         self.field_list = [s for s in self._handle["/unknown names"][:].flat]
-        if ("/particle names" in self._handle) :
+        if ("/particle names" in self._particle_handle) :
             self.field_list += ["particle_" + s[0].strip() for s
-                                in self._handle["/particle names"][:]]
+                                in self._particle_handle["/particle names"][:]]
     
     def _setup_classes(self):
         dd = self._get_data_reader_dict()
@@ -98,6 +99,7 @@
     def _parse_hierarchy(self):
         f = self._handle # shortcut
         pf = self.parameter_file # shortcut
+        f_part = self._particle_handle # shortcut
         
         # Initialize to the domain left / domain right
         ND = self.parameter_file.dimensionality
@@ -120,12 +122,15 @@
                               for ax in 'xyz']
         self.grid_dimensions[:] *= (nxb, nyb, nzb)
         try:
-            self.grid_particle_count[:] = f["/localnp"][:][:,None]
+            self.grid_particle_count[:] = f_part["/localnp"][:][:,None]
         except KeyError:
             self.grid_particle_count[:] = 0.0
         self._particle_indices = np.zeros(self.num_grids + 1, dtype='int64')
-        np.add.accumulate(self.grid_particle_count.squeeze(),
-                          out=self._particle_indices[1:])
+        if self.num_grids > 1 :
+            np.add.accumulate(self.grid_particle_count.squeeze(),
+                              out=self._particle_indices[1:])
+        else :
+            self._particle_indices[1] = self.grid_particle_count.squeeze()
         # This will become redundant, as _prepare_grid will reset it to its
         # current value.  Note that FLASH uses 1-based indexing for refinement
         # levels, but we do not, so we reduce the level by 1.
@@ -209,6 +214,7 @@
     
     def __init__(self, filename, data_style='flash_hdf5',
                  storage_filename = None,
+                 particle_filename = None, 
                  conversion_override = None):
 
         if self._handle is not None: return
@@ -216,6 +222,16 @@
         if conversion_override is None: conversion_override = {}
         self._conversion_override = conversion_override
 
+        self.particle_filename = particle_filename
+
+        if self.particle_filename is None :
+            self._particle_handle = self._handle
+        else :
+            try :
+                self._particle_handle = h5py.File(self.particle_filename, "r")
+            except :
+                raise IOError(self.particle_filename)
+                                                                
         StaticOutput.__init__(self, filename, data_style)
         self.storage_filename = storage_filename
 
@@ -274,6 +290,7 @@
             self.conversion_factors["Time"] = 1.0
         for unit in mpc_conversion.keys():
             self.units[unit] = mpc_conversion[unit] / mpc_conversion["cm"]
+            self.units[unit+"cm"] = self.units[unit]
             self.units[unit] /= (1.0+self.current_redshift)
             
     def _setup_cgs_units(self):
@@ -396,9 +413,19 @@
             if dimensionality < 3:
                 mylog.warning("Guessing dimensionality as %s", dimensionality)
 
-        nblockx = self.parameters["nblockx"]
-        nblocky = self.parameters["nblocky"]
-        nblockz = self.parameters["nblockz"]
+        if 'lrefine_min' in self.parameters.keys() : # PARAMESH
+            nblockx = self.parameters["nblockx"]
+            nblocky = self.parameters["nblocky"]
+            nblockz = self.parameters["nblockz"]
+        else : # Uniform Grid
+            nblockx = self.parameters["iprocs"]
+            nblocky = self.parameters["jprocs"]
+            nblockz = self.parameters["kprocs"]
+
+        # In case the user wasn't careful
+        if dimensionality <= 2 : nblockz = 1
+        if dimensionality == 1 : nblocky = 1
+
         self.dimensionality = dimensionality
         self.domain_dimensions = \
             np.array([nblockx*nxb,nblocky*nyb,nblockz*nzb])
@@ -423,6 +450,8 @@
                 self.hubble_constant = self.cosmological_simulation = 0.0
 
     def __del__(self):
+        if self._handle is not self._particle_handle:
+            self._particle_handle.close()
         self._handle.close()
 
     @classmethod


diff -r 53fa42c3dbc9c0228203900a1b1a169e732e9a8c -r d15ab61fb9efddea5e4cd117e590dd16303e5393 yt/frontends/flash/fields.py
--- a/yt/frontends/flash/fields.py
+++ b/yt/frontends/flash/fields.py
@@ -36,7 +36,7 @@
     ValidateGridType
 import yt.data_objects.universal_fields
 from yt.utilities.physical_constants import \
-    kboltz
+    kboltz, mh
 KnownFLASHFields = FieldInfoContainer()
 add_flash_field = KnownFLASHFields.add_field
 
@@ -266,12 +266,12 @@
 def GetMagRescalingFactor(pf):
     if pf['unitsystem'].lower() == "cgs":
          factor = 1
-    if pf['unitsystem'].lower() == "si":
+    elif pf['unitsystem'].lower() == "si":
          factor = np.sqrt(4*np.pi/1e7)
-    if pf['unitsystem'].lower() == "none":
+    elif pf['unitsystem'].lower() == "none":
          factor = np.sqrt(4*np.pi)
     else:
-        raise RuntimeError("Runtime parameter unitsystem with"
+        raise RuntimeError("Runtime parameter unitsystem with "
                            "value %s is unrecognized" % pf['unitsystem'])
     return factor
 


diff -r 53fa42c3dbc9c0228203900a1b1a169e732e9a8c -r d15ab61fb9efddea5e4cd117e590dd16303e5393 yt/frontends/flash/io.py
--- a/yt/frontends/flash/io.py
+++ b/yt/frontends/flash/io.py
@@ -39,9 +39,11 @@
         # Now we cache the particle fields
         self.pf = pf
         self._handle = pf._handle
+        self._particle_handle = pf._particle_handle
+        
         try :
             particle_fields = [s[0].strip() for s in
-                               self._handle["/particle names"][:]]
+                               self._particle_handle["/particle names"][:]]
             self._particle_fields = dict([("particle_" + s, i) for i, s in
                                           enumerate(particle_fields)])
         except KeyError:
@@ -53,12 +55,13 @@
 
     def _read_data_set(self, grid, field):
         f = self._handle
+        f_part = self._particle_handle
         if field in self._particle_fields:
             if grid.NumberOfParticles == 0: return np.array([], dtype='float64')
             start = self.pf.h._particle_indices[grid.id - grid._id_offset]
             end = self.pf.h._particle_indices[grid.id - grid._id_offset + 1]
             fi = self._particle_fields[field]
-            tr = f["/tracer particles"][start:end, fi]
+            tr = f_part["/tracer particles"][start:end, fi]
         else:
             tr = f["/%s" % field][grid.id - grid._id_offset,:,:,:].transpose()
         return tr.astype("float64")


diff -r 53fa42c3dbc9c0228203900a1b1a169e732e9a8c -r d15ab61fb9efddea5e4cd117e590dd16303e5393 yt/frontends/gdf/data_structures.py
--- a/yt/frontends/gdf/data_structures.py
+++ b/yt/frontends/gdf/data_structures.py
@@ -248,6 +248,8 @@
             fileh = h5py.File(args[0],'r')
             if "gridded_data_format" in fileh:
                 return True
+                fileh.close()
+            fileh.close()
         except:
             pass
         return False


diff -r 53fa42c3dbc9c0228203900a1b1a169e732e9a8c -r d15ab61fb9efddea5e4cd117e590dd16303e5393 yt/frontends/orion/fields.py
--- a/yt/frontends/orion/fields.py
+++ b/yt/frontends/orion/fields.py
@@ -127,7 +127,7 @@
         + data["y-velocity"]**2.0
         + data["z-velocity"]**2.0 )
 add_field("ThermalEnergy", function=_ThermalEnergy,
-          units=r"\rm{ergs}/\rm{cm^3}")
+                units=r"\rm{ergs}/\rm{cm^3}")
 
 def _Pressure(field,data):
     """M{(Gamma-1.0)*e, where e is thermal energy density


diff -r 53fa42c3dbc9c0228203900a1b1a169e732e9a8c -r d15ab61fb9efddea5e4cd117e590dd16303e5393 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -105,7 +105,7 @@
 class StreamHandler(object):
     def __init__(self, left_edges, right_edges, dimensions,
                  levels, parent_ids, particle_count, processor_ids,
-                 fields, io = None):
+                 fields, io = None, particle_types = {}):
         self.left_edges = left_edges
         self.right_edges = right_edges
         self.dimensions = dimensions
@@ -116,10 +116,18 @@
         self.num_grids = self.levels.size
         self.fields = fields
         self.io = io
-
+        self.particle_types = particle_types
+            
     def get_fields(self):
         return self.fields.all_fields
 
+    def get_particle_type(self, field) :
+
+        if self.particle_types.has_key(field) :
+            return self.particle_types[field]
+        else :
+            return False
+        
 class StreamHierarchy(AMRHierarchy):
 
     grid = StreamGrid
@@ -150,11 +158,12 @@
                         return data.convert(f)
                     return _convert_function
                 cf = external_wrapper(field)
+            ptype = self.stream_handler.get_particle_type(field)
             # Note that we call add_field on the field_info directly.  This
             # will allow the same field detection mechanism to work for 1D, 2D
             # and 3D fields.
             self.pf.field_info.add_field(
-                    field, lambda a, b: None,
+                    field, lambda a, b: None, particle_type = ptype,
                     convert_function=cf, take_log=False)
 
     def _parse_hierarchy(self):
@@ -249,6 +258,34 @@
         else:
             self.io = io_registry[self.data_style](self.stream_handler)
 
+    def update_data(self, data) :
+
+        """
+        Update the stream data with a new data dict. If fields already exist,
+        they will be replaced, but if they do not, they will be added. Fields
+        already in the stream but not part of the data dict will be left
+        alone. 
+        """
+        
+        particle_types = set_particle_types(data[0])
+
+        for key in data[0].keys() :
+            if key is "number_of_particles": continue
+            self.stream_handler.particle_types[key] = particle_types[key]
+            if key not in self.field_list:
+                self.field_list.append(key)
+                
+        self._setup_unknown_fields()
+
+        for i, grid in enumerate(self.grids) :
+            if data[i].has_key("number_of_particles") :
+                grid.NumberOfParticles = data[i].pop("number_of_particles")
+            for key in data[i].keys() :
+                if key in grid.keys() : grid.field_data.pop(key, None)
+                self.stream_handler.fields[grid.id][key] = data[i][key]
+            
+        self._detect_fields()
+                
 class StreamStaticOutput(StaticOutput):
     _hierarchy_class = StreamHierarchy
     _fieldinfo_fallback = StreamFieldInfo
@@ -299,8 +336,68 @@
     @property
     def all_fields(self): return self[0].keys()
 
+def set_particle_types(data) :
+
+    particle_types = {}
+    
+    for key in data.keys() :
+
+        if key is "number_of_particles": continue
+        
+        if len(data[key].shape) == 1:
+            particle_types[key] = True
+        else :
+            particle_types[key] = False
+    
+    return particle_types
+
+def assign_particle_data(pf, pdata) :
+
+    """
+    Assign particle data to the grids using find_points. This
+    will overwrite any existing particle data, so be careful!
+    """
+    
+    if pf.h.num_grids > 1 :
+
+        try :
+            x = pdata["particle_position_x"]
+            y = pdata["particle_position_y"]
+            z = pdata["particle_position_z"]
+        except:
+            raise KeyError("Cannot decompose particle data without position fields!")
+        
+        particle_grids, particle_grid_inds = pf.h.find_points(x,y,z)
+        idxs = np.argsort(particle_grid_inds)
+        particle_grid_count = np.bincount(particle_grid_inds,
+                                          minlength=pf.h.num_grids)
+        particle_indices = np.zeros(pf.h.num_grids + 1, dtype='int64')
+        if pf.h.num_grids > 1 :
+            np.add.accumulate(particle_grid_count.squeeze(),
+                              out=particle_indices[1:])
+        else :
+            particle_indices[1] = particle_grid_count.squeeze()
+    
+        pdata.pop("number_of_particles")    
+        grid_pdata = []
+        
+        for i, pcount in enumerate(particle_grid_count) :
+            grid = {}
+            grid["number_of_particles"] = pcount
+            start = particle_indices[i]
+            end = particle_indices[i+1]
+            for key in pdata.keys() :
+                grid[key] = pdata[key][idxs][start:end]
+            grid_pdata.append(grid)
+
+    else :
+
+        grid_pdata = [pdata]
+        
+    pf.h.update_data(grid_pdata)
+                                        
 def load_uniform_grid(data, domain_dimensions, sim_unit_to_cm, bbox=None,
-                      nprocs=1, sim_time=0.0, number_of_particles=0):
+                      nprocs=1, sim_time=0.0):
     r"""Load a uniform grid of data into yt as a
     :class:`~yt.frontends.stream.data_structures.StreamHandler`.
 
@@ -312,6 +409,9 @@
           disappointing or non-existent in most cases.
         * Particles may be difficult to integrate.
 
+    Particle fields are detected as one-dimensional fields. The number of particles
+    is set by the "number_of_particles" key in data.
+    
     Parameters
     ----------
     data : dict
@@ -326,8 +426,6 @@
         If greater than 1, will create this number of subarrays out of data
     sim_time : float, optional
         The simulation time in seconds
-    number_of_particles : int, optional
-        If particle fields are included, set this to the number of particles
 
     Examples
     --------
@@ -347,14 +445,29 @@
     grid_levels = np.zeros(nprocs, dtype='int32').reshape((nprocs,1))
 
     sfh = StreamDictFieldHandler()
-
+    
+    if data.has_key("number_of_particles") :
+        number_of_particles = data.pop("number_of_particles")
+    else :
+        number_of_particles = int(0)
+    
+    if number_of_particles > 0 :
+        particle_types = set_particle_types(data)
+        pdata = {}
+        pdata["number_of_particles"] = number_of_particles
+        for key in data.keys() :
+            if len(data[key].shape) == 1 :
+                pdata[key] = data.pop(key)
+    else :
+        particle_types = {}
+    
     if nprocs > 1:
         temp = {}
         new_data = {}
         for key in data.keys():
             psize = get_psize(np.array(data[key].shape), nprocs)
             grid_left_edges, grid_right_edges, temp[key] = \
-                decompose_array(data[key], psize, bbox)
+                             decompose_array(data[key], psize, bbox)
             grid_dimensions = np.array([grid.shape for grid in temp[key]],
                                        dtype="int32")
         for gid in range(nprocs):
@@ -375,9 +488,10 @@
         grid_dimensions,
         grid_levels,
         -np.ones(nprocs, dtype='int64'),
-        number_of_particles*np.ones(nprocs, dtype='int64').reshape(nprocs,1),
+        np.zeros(nprocs, dtype='int64').reshape(nprocs,1), # Temporary
         np.zeros(nprocs).reshape((nprocs,1)),
         sfh,
+        particle_types=particle_types
     )
 
     handler.name = "UniformGridData"
@@ -396,10 +510,16 @@
     box_in_mpc = sim_unit_to_cm / mpc_conversion['cm']
     for unit in mpc_conversion.keys():
         spf.units[unit] = mpc_conversion[unit] * box_in_mpc
+
+    # Now figure out where the particles go
+
+    if number_of_particles > 0 :
+        assign_particle_data(spf, pdata)
+    
     return spf
 
 def load_amr_grids(grid_data, domain_dimensions, sim_unit_to_cm, bbox=None,
-                   sim_time=0.0, number_of_particles=0):
+                   sim_time=0.0):
     r"""Load a set of grids of data into yt as a
     :class:`~yt.frontends.stream.data_structures.StreamHandler`.
 
@@ -418,8 +538,9 @@
     grid_data : list of dicts
         This is a list of dicts.  Each dict must have entries "left_edge",
         "right_edge", "dimensions", "level", and then any remaining entries are
-        assumed to be fields.  This will be modified in place and can't be
-        assumed to be static..
+        assumed to be fields.  They also may include a particle count, otherwise
+        assumed to be zero. This will be modified in place and can't be
+        assumed to be static.
     domain_dimensions : array_like
         This is the domain dimensions of the grid
     sim_unit_to_cm : float
@@ -428,8 +549,6 @@
         Size of computational domain in units sim_unit_to_cm
     sim_time : float, optional
         The simulation time in seconds
-    number_of_particles : int, optional
-        If particle fields are included, set this to the number of particles
 
     Examples
     --------
@@ -438,11 +557,13 @@
     ...     dict(left_edge = [0.0, 0.0, 0.0],
     ...          right_edge = [1.0, 1.0, 1.],
     ...          level = 0,
-    ...          dimensions = [32, 32, 32]),
+    ...          dimensions = [32, 32, 32],
+    ...          number_of_particles = 0)
     ...     dict(left_edge = [0.25, 0.25, 0.25],
     ...          right_edge = [0.75, 0.75, 0.75],
     ...          level = 1,
-    ...          dimensions = [32, 32, 32])
+    ...          dimensions = [32, 32, 32],
+    ...          number_of_particles = 0)
     ... ]
     ... 
     >>> for g in grid_data:
@@ -461,23 +582,27 @@
     grid_left_edges = np.zeros((ngrids, 3), dtype="float32")
     grid_right_edges = np.zeros((ngrids, 3), dtype="float32")
     grid_dimensions = np.zeros((ngrids, 3), dtype="int32")
+    number_of_particles = np.zeros((ngrids,1), dtype='int64')
     sfh = StreamDictFieldHandler()
     for i, g in enumerate(grid_data):
         grid_left_edges[i,:] = g.pop("left_edge")
         grid_right_edges[i,:] = g.pop("right_edge")
         grid_dimensions[i,:] = g.pop("dimensions")
         grid_levels[i,:] = g.pop("level")
+        if g.has_key("number_of_particles") :
+            number_of_particles[i,:] = g.pop("number_of_particles")  
         sfh[i] = g
-
+            
     handler = StreamHandler(
         grid_left_edges,
         grid_right_edges,
         grid_dimensions,
         grid_levels,
         None, # parent_ids is none
-        number_of_particles*np.ones(ngrids, dtype='int64').reshape(ngrids,1),
+        number_of_particles,
         np.zeros(ngrids).reshape((ngrids,1)),
         sfh,
+        particle_types=set_particle_types(grid_data[0])
     )
 
     handler.name = "AMRGridData"
@@ -529,6 +654,20 @@
     >>> ug = load_uniform_grid({'Density': data}, domain_dims, 1.0)
     >>> pf = refine_amr(ug, rc, fo, 5)
     """
+
+    # If we have particle data, set it aside for now
+
+    number_of_particles = np.sum([grid.NumberOfParticles
+                                  for grid in base_pf.h.grids])
+
+    if number_of_particles > 0 :
+        pdata = {}
+        for field in base_pf.h.field_list :
+            if base_pf.field_info[field].particle_type :
+                pdata[field] = np.concatenate([grid[field]
+                                               for grid in base_pf.h.grids])
+        pdata["number_of_particles"] = number_of_particles
+        
     last_gc = base_pf.h.num_grids
     cur_gc = -1
     pf = base_pf    
@@ -545,7 +684,8 @@
                        level = g.Level,
                        dimensions = g.ActiveDimensions )
             for field in pf.h.field_list:
-                gd[field] = g[field]
+                if not pf.field_info[field].particle_type :
+                    gd[field] = g[field]
             grid_data.append(gd)
             if g.Level < pf.h.max_level: continue
             fg = FlaggingGrid(g, refinement_criteria)
@@ -557,8 +697,14 @@
                 gd = dict(left_edge = LE, right_edge = grid.right_edge,
                           level = g.Level + 1, dimensions = dims)
                 for field in pf.h.field_list:
-                    gd[field] = grid[field]
+                    if not pf.field_info[field].particle_type :
+                        gd[field] = grid[field]
                 grid_data.append(gd)
         pf = load_amr_grids(grid_data, pf.domain_dimensions, 1.0)
         cur_gc = pf.h.num_grids
+
+    # Now reassign particle data to grids
+
+    if number_of_particles > 0 : assign_particle_data(pf, pdata)
+    
     return pf


diff -r 53fa42c3dbc9c0228203900a1b1a169e732e9a8c -r d15ab61fb9efddea5e4cd117e590dd16303e5393 yt/frontends/stream/io.py
--- a/yt/frontends/stream/io.py
+++ b/yt/frontends/stream/io.py
@@ -60,8 +60,8 @@
     def _read_data_slice(self, grid, field, axis, coord):
         sl = [slice(None), slice(None), slice(None)]
         sl[axis] = slice(coord, coord + 1)
-        sl = tuple(reversed(sl))
-        tr = self.fields[grid.id][field][sl].swapaxes(0,2)
+        sl = tuple(sl)
+        tr = self.fields[grid.id][field][sl]
         # In-place unit conversion requires we return a copy
         return tr.copy()
 


diff -r 53fa42c3dbc9c0228203900a1b1a169e732e9a8c -r d15ab61fb9efddea5e4cd117e590dd16303e5393 yt/frontends/stream/tests/test_stream_particles.py
--- /dev/null
+++ b/yt/frontends/stream/tests/test_stream_particles.py
@@ -0,0 +1,130 @@
+import numpy as np
+from yt.testing import *
+from yt.frontends.stream.api import load_uniform_grid, refine_amr, load_amr_grids
+import yt.utilities.initial_conditions as ic
+import yt.utilities.flagging_methods as fm
+
+def setup() :
+    pass
+
+# Field information
+
+def test_stream_particles() :
+    
+    num_particles = 100000
+    domain_dims = (64, 64, 64)
+    dens = np.random.random(domain_dims) 
+    x = np.random.uniform(size=num_particles)
+    y = np.random.uniform(size=num_particles)
+    z = np.random.uniform(size=num_particles)
+    m = np.ones((num_particles))
+
+    # Field operators and cell flagging methods
+
+    fo = []
+    fo.append(ic.TopHatSphere(0.1, [0.2,0.3,0.4],{"Density": 2.0}))
+    fo.append(ic.TopHatSphere(0.05, [0.7,0.4,0.75],{"Density": 20.0}))
+    rc = [fm.flagging_method_registry["overdensity"](1.0)]
+    
+    # Check that all of this runs ok without particles
+    
+    ug0 = load_uniform_grid({"Density": dens}, domain_dims, 1.0)
+    ug0 = load_uniform_grid({"Density": dens}, domain_dims, 1.0, nprocs=8)
+    amr0 = refine_amr(ug0, rc, fo, 3)
+
+    grid_data = []
+    
+    for grid in amr0.h.grids :
+        
+        data = dict(left_edge = grid.LeftEdge,
+                    right_edge = grid.RightEdge,
+                    level = grid.Level,
+                    dimensions = grid.ActiveDimensions,
+                    number_of_particles = grid.NumberOfParticles)
+    
+        for field in amr0.h.field_list :
+            
+            data[field] = grid[field]
+            
+        grid_data.append(data)
+
+    amr0 = load_amr_grids(grid_data, domain_dims, 1.0)
+                        
+    # Now add particles
+
+    fields1 = {"Density": dens,
+               "particle_position_x": x,
+               "particle_position_y": y,
+               "particle_position_z": z,
+               "particle_mass": m,
+               "number_of_particles": num_particles}
+
+    fields2 = fields1.copy()
+
+    ug1 = load_uniform_grid(fields1, domain_dims, 1.0)
+    ug2 = load_uniform_grid(fields2, domain_dims, 1.0, nprocs=8)
+
+    # Check to make sure the number of particles is the same
+
+    number_of_particles1 = np.sum([grid.NumberOfParticles for grid in ug1.h.grids])
+    number_of_particles2 = np.sum([grid.NumberOfParticles for grid in ug2.h.grids])
+    
+    assert number_of_particles1 == num_particles
+    assert number_of_particles1 == number_of_particles2
+
+    # Check to make sure the fields have been defined correctly
+    
+    assert ug1.field_info["particle_position_x"].particle_type
+    assert ug1.field_info["particle_position_y"].particle_type
+    assert ug1.field_info["particle_position_z"].particle_type
+    assert ug1.field_info["particle_mass"].particle_type
+    assert not ug1.field_info["Density"].particle_type
+
+    assert ug2.field_info["particle_position_x"].particle_type
+    assert ug2.field_info["particle_position_y"].particle_type
+    assert ug2.field_info["particle_position_z"].particle_type
+    assert ug2.field_info["particle_mass"].particle_type
+    assert not ug2.field_info["Density"].particle_type
+    
+    # Now refine this
+
+    amr1 = refine_amr(ug1, rc, fo, 3)
+    
+    grid_data = []
+    
+    for grid in amr1.h.grids :
+        
+        data = dict(left_edge = grid.LeftEdge,
+                    right_edge = grid.RightEdge,
+                    level = grid.Level,
+                    dimensions = grid.ActiveDimensions,
+                    number_of_particles = grid.NumberOfParticles)
+
+        for field in amr1.h.field_list :
+
+            data[field] = grid[field]
+            
+        grid_data.append(data)
+    
+    amr2 = load_amr_grids(grid_data, domain_dims, 1.0)
+
+    # Check everything again
+
+    number_of_particles1 = [grid.NumberOfParticles for grid in amr1.h.grids]
+    number_of_particles2 = [grid.NumberOfParticles for grid in amr2.h.grids]
+    
+    assert np.sum(number_of_particles1) == num_particles
+    assert_equal(number_of_particles1, number_of_particles2)
+    
+    assert amr1.field_info["particle_position_x"].particle_type
+    assert amr1.field_info["particle_position_y"].particle_type
+    assert amr1.field_info["particle_position_z"].particle_type
+    assert amr1.field_info["particle_mass"].particle_type
+    assert not amr1.field_info["Density"].particle_type
+    
+    assert amr2.field_info["particle_position_x"].particle_type
+    assert amr2.field_info["particle_position_y"].particle_type
+    assert amr2.field_info["particle_position_z"].particle_type
+    assert amr2.field_info["particle_mass"].particle_type
+    assert not amr2.field_info["Density"].particle_type
+


diff -r 53fa42c3dbc9c0228203900a1b1a169e732e9a8c -r d15ab61fb9efddea5e4cd117e590dd16303e5393 yt/frontends/stream/tests/test_update_data.py
--- /dev/null
+++ b/yt/frontends/stream/tests/test_update_data.py
@@ -0,0 +1,23 @@
+from yt.testing import *
+from yt.data_objects.profiles import BinnedProfile1D
+from numpy.random import uniform
+
+def setup():
+    global pf
+    pf = fake_random_pf(64, nprocs=8)
+    pf.h
+    
+def test_update_data() :
+    dims = (32,32,32)
+    grid_data = [{"Temperature":uniform(size=dims)}
+                 for i in xrange(pf.h.num_grids)]
+    pf.h.update_data(grid_data)
+    prj = pf.h.proj(2, "Temperature")
+    prj["Temperature"]
+    dd = pf.h.all_data()
+    profile = BinnedProfile1D(dd, 10, "Density",
+                              dd["Density"].min(),
+                              dd["Density"].max())
+    profile.add_fields(["Temperature"])
+    profile["Temperature"]
+                              


diff -r 53fa42c3dbc9c0228203900a1b1a169e732e9a8c -r d15ab61fb9efddea5e4cd117e590dd16303e5393 yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -466,6 +466,11 @@
     return u.popbuffer()
 
 def get_yt_version():
+    try:
+        from yt.__hg_version__ import hg_version
+        return hg_version
+    except ImportError:
+        pass
     import pkg_resources
     yt_provider = pkg_resources.get_provider("yt")
     path = os.path.dirname(yt_provider.module_path)
@@ -567,3 +572,7 @@
         
 def fix_axis(axis):
     return inv_axis_names.get(axis, axis)
+
+def get_image_suffix(name):
+    suffix = os.path.splitext(name)[1].lstrip('.')
+    return suffix if suffix in ['png', 'eps', 'ps', 'pdf'] else ''


diff -r 53fa42c3dbc9c0228203900a1b1a169e732e9a8c -r d15ab61fb9efddea5e4cd117e590dd16303e5393 yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -105,6 +105,10 @@
 #from yt.frontends.maestro.api import \
 #    MaestroStaticOutput, MaestroFieldInfo, add_maestro_field
 
+from yt.frontends.stream.api import \
+    StreamStaticOutput, StreamFieldInfo, add_stream_field, \
+    StreamHandler, load_uniform_grid, load_amr_grids
+
 from yt.analysis_modules.list_modules import \
     get_available_modules, amods
 available_analysis_modules = get_available_modules()


diff -r 53fa42c3dbc9c0228203900a1b1a169e732e9a8c -r d15ab61fb9efddea5e4cd117e590dd16303e5393 yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -29,7 +29,7 @@
     assert_array_less, assert_string_equal, assert_array_almost_equal_nulp,\
     assert_allclose
 
-def assert_rel_equal(a1, a2, decimals, err_msg=''):
+def assert_rel_equal(a1, a2, decimals, err_msg='', verbose=True):
     # We have nan checks in here because occasionally we have fields that get
     # weighted without non-zero weights.  I'm looking at you, particle fields!
     if isinstance(a1, np.ndarray):
@@ -39,7 +39,8 @@
         a2[np.isnan(a2)] = 1.0
     elif np.isnan(a1) and np.isnan(a2):
         return True
-    return assert_almost_equal(a1/a2, 1.0, decimals, err_msg=err_msg)
+    return assert_almost_equal(a1/a2, 1.0, decimals, err_msg=err_msg,
+                               verbose=verbose)
 
 def amrspace(extent, levels=7, cells=8):
     """Creates two numpy arrays representing the left and right bounds of 


diff -r 53fa42c3dbc9c0228203900a1b1a169e732e9a8c -r d15ab61fb9efddea5e4cd117e590dd16303e5393 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -29,6 +29,7 @@
 import contextlib
 import urllib2
 import cPickle
+import sys
 
 from nose.plugins import Plugin
 from yt.testing import *
@@ -44,62 +45,103 @@
 mylog = logging.getLogger('nose.plugins.answer-testing')
 run_big_data = False
 
-_latest = "gold001"
+# Set the latest gold and local standard filenames
+_latest = ytcfg.get("yt", "gold_standard_filename")
+_latest_local = ytcfg.get("yt", "local_standard_filename")
 _url_path = "http://yt-answer-tests.s3-website-us-east-1.amazonaws.com/%s_%s"
 
 class AnswerTesting(Plugin):
     name = "answer-testing"
+    _my_version = None
 
     def options(self, parser, env=os.environ):
         super(AnswerTesting, self).options(parser, env=env)
-        parser.add_option("--answer-compare", dest="compare_name",
-            default=_latest, help="The name against which we will compare")
+        parser.add_option("--answer-name", dest="answer_name", metavar='str',
+            default=None, help="The name of the standard to store/compare against")
+        parser.add_option("--answer-store", dest="store_results", metavar='bool',
+            default=False, action="store_true",
+            help="Should we store this result instead of comparing?")
+        parser.add_option("--local", dest="local_results",
+            default=False, action="store_true", help="Store/load reference results locally?")
         parser.add_option("--answer-big-data", dest="big_data",
             default=False, help="Should we run against big data, too?",
             action="store_true")
-        parser.add_option("--answer-name", dest="this_name",
-            default=None,
-            help="The name we'll call this set of tests")
-        parser.add_option("--answer-store", dest="store_results",
-            default=False, action="store_true")
-        parser.add_option("--local-store", dest="store_local_results",
-            default=False, action="store_true", help="Store/Load local results?")
+
+    @property
+    def my_version(self, version=None):
+        if self._my_version is not None:
+            return self._my_version
+        if version is None:
+            try:
+                version = get_yt_version()
+            except:
+                version = "UNKNOWN%s" % (time.time())
+        self._my_version = version
+        return self._my_version
 
     def configure(self, options, conf):
         super(AnswerTesting, self).configure(options, conf)
         if not self.enabled:
             return
         disable_stream_logging()
-        try:
-            my_hash = get_yt_version()
-        except:
-            my_hash = "UNKNOWN%s" % (time.time())
-        if options.this_name is None: options.this_name = my_hash
-        from yt.config import ytcfg
+
+        # Parse through the storage flags to make sense of them
+        # and use reasonable defaults
+        # If we're storing the data, default storage name is local
+        # latest version
+        if options.store_results:
+            if options.answer_name is None:
+                self.store_name = _latest_local
+            else:
+                self.store_name = options.answer_name
+            self.compare_name = None
+        # if we're not storing, then we're comparing, and we want default
+        # comparison name to be the latest gold standard 
+        # either on network or local
+        else:
+            if options.answer_name is None:
+                if options.local_results:
+                    self.compare_name = _latest_local
+                else:
+                    self.compare_name = _latest
+            else:
+                self.compare_name = options.answer_name
+            self.store_name = self.my_version
+
+        self.store_results = options.store_results
+
         ytcfg["yt","__withintesting"] = "True"
         AnswerTestingTest.result_storage = \
             self.result_storage = defaultdict(dict)
-        if options.compare_name == "SKIP":
-            options.compare_name = None
-        elif options.compare_name == "latest":
-            options.compare_name = _latest
+        if self.compare_name == "SKIP":
+            self.compare_name = None
+        elif self.compare_name == "latest":
+            self.compare_name = _latest
+            
+        # Local/Cloud storage 
+        if options.local_results:
+            storage_class = AnswerTestLocalStorage
+            # Fix up filename for local storage 
+            if self.compare_name is not None:
+                self.compare_name = "%s/%s/%s" % \
+                    (os.path.realpath(options.output_dir), self.compare_name, 
+                     self.compare_name)
+            if self.store_name is not None:
+                name_dir_path = "%s/%s" % \
+                    (os.path.realpath(options.output_dir), 
+                    self.store_name)
+                if not os.path.isdir(name_dir_path):
+                    os.makedirs(name_dir_path)
+                self.store_name= "%s/%s" % \
+                        (name_dir_path, self.store_name)
+        else:
+            storage_class = AnswerTestCloudStorage
 
-        # We only either store or test.
-        if options.store_local_results:
-            if options.compare_name is not None:
-                options.compare_name = "%s/%s" % \
-                        (os.path.realpath(options.output_dir), 
-                         options.compare_name)
-            AnswerTestingTest.reference_storage = \
-                self.storage = \
-                    AnswerTestLocalStorage(options.compare_name, 
-                                           not options.store_results)
-        else:
-            AnswerTestingTest.reference_storage = \
-                self.storage = AnswerTestCloudStorage(options.compare_name, not options.store_results)
+        # Initialize answer/reference storage
+        AnswerTestingTest.reference_storage = self.storage = \
+                storage_class(self.compare_name, self.store_name)
 
-        self.store_results = options.store_results
-        self.store_local_results = options.store_local_results
+        self.local_results = options.local_results
         global run_big_data
         run_big_data = options.big_data
 
@@ -108,10 +150,10 @@
         self.storage.dump(self.result_storage)        
 
 class AnswerTestStorage(object):
-    def __init__(self, reference_name, read=True):
+    def __init__(self, reference_name=None, answer_name=None):
         self.reference_name = reference_name
+        self.answer_name = answer_name
         self.cache = {}
-        self.read = read
     def dump(self, result_storage, result):
         raise NotImplementedError 
     def get(self, pf_name, default=None):
@@ -119,23 +161,32 @@
 
 class AnswerTestCloudStorage(AnswerTestStorage):
     def get(self, pf_name, default = None):
-        if not self.read: return default
+        if self.reference_name is None: return default
         if pf_name in self.cache: return self.cache[pf_name]
         url = _url_path % (self.reference_name, pf_name)
         try:
             resp = urllib2.urlopen(url)
-            # This is dangerous, but we have a controlled S3 environment
-            data = resp.read()
-            rv = cPickle.loads(data)
         except urllib2.HTTPError as ex:
             raise YTNoOldAnswer(url)
-            mylog.warning("Missing %s (%s)", url, ex)
-            rv = default
+        else:
+            for this_try in range(3):
+                try:
+                    data = resp.read()
+                except:
+                    time.sleep(0.01)
+                else:
+                    # We were succesful
+                    break
+            else:
+                # Raise error if all tries were unsuccessful
+                raise YTCloudError(url)
+            # This is dangerous, but we have a controlled S3 environment
+            rv = cPickle.loads(data)
         self.cache[pf_name] = rv
         return rv
 
     def dump(self, result_storage):
-        if self.read: return
+        if self.answer_name is None: return
         # This is where we dump our result storage up to Amazon, if we are able
         # to.
         import boto
@@ -144,18 +195,18 @@
         bucket = c.get_bucket("yt-answer-tests")
         for pf_name in result_storage:
             rs = cPickle.dumps(result_storage[pf_name])
-            tk = bucket.get_key("%s_%s" % (self.reference_name, pf_name)) 
+            tk = bucket.get_key("%s_%s" % (self.answer_name, pf_name)) 
             if tk is not None: tk.delete()
             k = Key(bucket)
-            k.key = "%s_%s" % (self.reference_name, pf_name)
+            k.key = "%s_%s" % (self.answer_name, pf_name)
             k.set_contents_from_string(rs)
             k.set_acl("public-read")
 
 class AnswerTestLocalStorage(AnswerTestStorage):
     def dump(self, result_storage):
-        if self.read: return 
+        if self.answer_name is None: return
         # Store data using shelve
-        ds = shelve.open(self.reference_name, protocol=-1)
+        ds = shelve.open(self.answer_name, protocol=-1)
         for pf_name in result_storage:
             answer_name = "%s" % pf_name
             if name in ds:
@@ -164,7 +215,7 @@
         ds.close()
 
     def get(self, pf_name, default=None):
-        if not self.read: return default
+        if self.reference_name is None: return default
         # Read data using shelve
         answer_name = "%s" % pf_name
         ds = shelve.open(self.reference_name, protocol=-1)
@@ -183,11 +234,11 @@
     os.chdir(oldcwd)
 
 def can_run_pf(pf_fn):
+    if isinstance(pf_fn, StaticOutput):
+        return AnswerTestingTest.result_storage is not None
     path = ytcfg.get("yt", "test_data_dir")
     if not os.path.isdir(path):
         return False
-    if isinstance(pf_fn, StaticOutput):
-        return AnswerTestingTest.result_storage is not None
     with temp_cwd(path):
         try:
             load(pf_fn)
@@ -197,9 +248,9 @@
 
 def data_dir_load(pf_fn):
     path = ytcfg.get("yt", "test_data_dir")
+    if isinstance(pf_fn, StaticOutput): return pf_fn
     if not os.path.isdir(path):
         return False
-    if isinstance(pf_fn, StaticOutput): return pf_fn
     with temp_cwd(path):
         pf = load(pf_fn)
         pf.h
@@ -224,10 +275,10 @@
 
     def __call__(self):
         nv = self.run()
-        if self.reference_storage.read and \
-           self.reference_storage.reference_name is not None:
+        if self.reference_storage.reference_name is not None:
             dd = self.reference_storage.get(self.storage_name)
-            if dd is None: raise YTNoOldAnswer(self.storage_name)
+            if dd is None or self.description not in dd: 
+                raise YTNoOldAnswer("%s : %s" % (self.storage_name , self.description))
             ov = dd[self.description]
             self.compare(nv, ov)
         else:
@@ -309,8 +360,8 @@
             assert_equal(new_result, old_result, 
                          err_msg=err_msg, verbose=True)
         else:
-            assert_rel_equal(new_result, old_result, self.decimals,
-                             err_msg=err_msg)
+            assert_allclose(new_result, old_result, 10.**(-self.decimals),
+                             err_msg=err_msg, verbose=True)
 
 class AllFieldValuesTest(AnswerTestingTest):
     _type_name = "AllFieldValues"
@@ -334,7 +385,7 @@
                          err_msg=err_msg, verbose=True)
         else:
             assert_rel_equal(new_result, old_result, self.decimals,
-                             err_msg=err_msg)
+                             err_msg=err_msg, verbose=True)
             
 class ProjectionValuesTest(AnswerTestingTest):
     _type_name = "ProjectionValues"
@@ -345,7 +396,7 @@
         super(ProjectionValuesTest, self).__init__(pf_fn)
         self.axis = axis
         self.field = field
-        self.weight_field = field
+        self.weight_field = weight_field
         self.obj_type = obj_type
         self.decimals = decimals
 
@@ -354,24 +405,29 @@
             obj = self.create_obj(self.pf, self.obj_type)
         else:
             obj = None
+        if self.pf.domain_dimensions[self.axis] == 1: return None
         proj = self.pf.h.proj(self.axis, self.field,
                               weight_field=self.weight_field,
                               data_source = obj)
         return proj.field_data
 
     def compare(self, new_result, old_result):
+        if new_result is None:
+            return
         assert(len(new_result) == len(old_result))
         for k in new_result:
             assert (k in old_result)
         for k in new_result:
             err_msg = "%s values of %s (%s weighted) projection (axis %s) not equal." % \
               (k, self.field, self.weight_field, self.axis)
+            if k == 'weight_field' and self.weight_field is None:
+                continue
             if self.decimals is None:
                 assert_equal(new_result[k], old_result[k],
                              err_msg=err_msg)
             else:
-                assert_rel_equal(new_result[k], old_result[k], 
-                                 self.decimals, err_msg=err_msg)
+                assert_allclose(new_result[k], old_result[k], 
+                                 10.**-(self.decimals), err_msg=err_msg)
 
 class PixelizedProjectionValuesTest(AnswerTestingTest):
     _type_name = "PixelizedProjectionValues"
@@ -532,3 +588,18 @@
                     yield PixelizedProjectionValuesTest(
                         pf_fn, axis, field, weight_field,
                         ds)
+
+class AssertWrapper(object):
+    """
+    Used to wrap a numpy testing assertion, in order to provide a useful name
+    for a given assertion test.
+    """
+    def __init__(self, description, *args):
+        # The key here is to add a description attribute, which nose will pick
+        # up.
+        self.args = args
+        self.description = description
+
+    def __call__(self):
+        self.args[0](*self.args[1:])
+


diff -r 53fa42c3dbc9c0228203900a1b1a169e732e9a8c -r d15ab61fb9efddea5e4cd117e590dd16303e5393 yt/utilities/command_line.py
--- a/yt/utilities/command_line.py
+++ b/yt/utilities/command_line.py
@@ -339,9 +339,15 @@
     return u.popbuffer()
 
 def get_yt_version():
+    try:
+        from yt.__hg_version__ import hg_version
+        return hg_version
+    except ImportError:
+        pass
     import pkg_resources
     yt_provider = pkg_resources.get_provider("yt")
     path = os.path.dirname(yt_provider.module_path)
+    if not os.path.isdir(os.path.join(path, ".hg")): return None
     version = _get_hg_version(path)[:12]
     return version
 
@@ -585,19 +591,6 @@
             print
             loki = raw_input("Press enter to go on, Ctrl-C to exit.")
             cedit.config.setoption(uu, hgrc_path, "bb.username=%s" % bbusername)
-        bb_fp = "81:2b:08:90:dc:d3:71:ee:e0:7c:b4:75:ce:9b:6c:48:94:56:a1:fe"
-        if uu.config("hostfingerprints", "bitbucket.org", None) is None:
-            print "Let's also add bitbucket.org to the known hosts, so hg"
-            print "doesn't warn us about bitbucket."
-            print "We will add this:"
-            print
-            print "   [hostfingerprints]"
-            print "   bitbucket.org = %s" % (bb_fp)
-            print
-            loki = raw_input("Press enter to go on, Ctrl-C to exit.")
-            cedit.config.setoption(uu, hgrc_path,
-                                   "hostfingerprints.bitbucket.org=%s" % bb_fp)
-
         # We now reload the UI's config file so that it catches the [bb]
         # section changes.
         uu.readconfig(hgrc_path[0])
@@ -1037,9 +1030,8 @@
                 print "The supplemental repositories are located at:"
                 print "    %s" % (spath)
                 update_supp = True
-        vstring = None
-        if "site-packages" not in path:
-            vstring = get_hg_version(path)
+        vstring = get_yt_version()
+        if vstring is not None:
             print
             print "The current version of the code is:"
             print
@@ -1047,10 +1039,11 @@
             print vstring.strip()
             print "---"
             print
-            print "This installation CAN be automatically updated."
-            if opts.update_source:  
-                update_hg(path)
-            print "Updated successfully."
+            if "site-packages" not in path:
+                print "This installation CAN be automatically updated."
+                if opts.update_source:  
+                    update_hg(path)
+                print "Updated successfully."
         elif opts.update_source:
             print
             print "YT site-packages not in path, so you must"


diff -r 53fa42c3dbc9c0228203900a1b1a169e732e9a8c -r d15ab61fb9efddea5e4cd117e590dd16303e5393 yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -101,14 +101,13 @@
                 "contains the following LaTeX parser errors:\n" ) \
             % (self.unit_name, self.field_name) + self.mathtext_error
 
-class AmbiguousOutputs(YTException):
-    def __init__(self, pf):
-        YTException.__init__(self, pf)
+class InvalidSimulationTimeSeries(YTException):
+    def __init__(self, message):
+        self.message = message
 
     def __str__(self):
-        return "Simulation %s has both dtDataDump and CycleSkipDataDump set.  Unable to calculate datasets." % \
-            self.pf
-
+        return self.message
+            
 class MissingParameter(YTException):
     def __init__(self, pf, parameter):
         YTException.__init__(self, pf)
@@ -164,6 +163,14 @@
         return "There is no old answer available.\n" + \
                str(self.path)
 
+class YTCloudError(YTException):
+    def __init__(self, path):
+        self.path = path
+
+    def __str__(self):
+        return "Failed to retrieve cloud data. Connection may be broken.\n" + \
+               str(self.path)
+
 class YTEllipsoidOrdering(YTException):
     def __init__(self, pf, A, B, C):
         YTException.__init__(self, pf)
@@ -182,3 +189,22 @@
     def __str__(self):
         return "Enzo test output file (OutputLog) not generated for: " + \
             "'%s'" % (self.testname) + ".\nTest did not complete."
+
+class YTNoAPIKey(YTException):
+    def __init__(self, service, config_name):
+        self.service = service
+        self.config_name = config_name
+
+    def __str__(self):
+        return "You need to set an API key for %s in ~/.yt/config as %s" % (
+            self.service, self.config_name)
+
+class YTTooManyVertices(YTException):
+    def __init__(self, nv, fn):
+        self.nv = nv
+        self.fn = fn
+
+    def __str__(self):
+        s = "There are too many vertices (%s) to upload to Sketchfab. " % (self.nv)
+        s += "Your model has been saved as %s .  You should upload manually." % (self.fn)
+        return s


diff -r 53fa42c3dbc9c0228203900a1b1a169e732e9a8c -r d15ab61fb9efddea5e4cd117e590dd16303e5393 yt/utilities/initial_conditions.py
--- a/yt/utilities/initial_conditions.py
+++ b/yt/utilities/initial_conditions.py
@@ -67,6 +67,47 @@
             val = ((r[ind] - cr2) / (r2 - cr2))**0.5 * (outer_val - inner_val)
             grid[field][ind] = val + inner_val
 
+class BetaModelSphere(FluidOperator):
+    def __init__(self, beta, core_radius, radius, center, fields):
+        self.radius = radius
+        self.center = center
+        self.fields = fields
+        self.core_radius = core_radius
+        self.beta = beta
+        
+    def __call__(self, grid, sub_select = None):
+        r = np.zeros(grid.ActiveDimensions, dtype="float64")
+        r2 = self.radius**2
+        cr2 = self.core_radius**2
+        for i, ax in enumerate("xyz"):
+            np.add(r, (grid[ax] - self.center[i])**2.0, r)            
+        ind = (r <= r2)
+        if sub_select is not None:
+            ind &= sub_select            
+        for field, core_val in self.fields.iteritems() :
+            val = core_val*(1.+r[ind]/cr2)**(-1.5*self.beta)
+            grid[field][ind] = val
+
+class NFWModelSphere(FluidOperator):
+    def __init__(self, scale_radius, radius, center, fields):
+        self.radius = radius
+        self.center = center
+        self.fields = fields
+        self.scale_radius = scale_radius
+        
+    def __call__(self, grid, sub_select = None):
+        r = np.zeros(grid.ActiveDimensions, dtype="float64")
+        for i, ax in enumerate("xyz"):
+            np.add(r, (grid[ax] - self.center[i])**2.0, r)
+        np.sqrt(r,r)
+        ind = (r <= self.radius)
+        r /= self.scale_radius
+        if sub_select is not None:
+            ind &= sub_select
+        for field, scale_val in self.fields.iteritems() :
+            val = scale_val/(r[ind]*(1.+r[ind])**2)
+            grid[field][ind] = val
+            
 class RandomFluctuation(FluidOperator):
     def __init__(self, fields):
         self.fields = fields


diff -r 53fa42c3dbc9c0228203900a1b1a169e732e9a8c -r d15ab61fb9efddea5e4cd117e590dd16303e5393 yt/utilities/lib/CICDeposit.pyx
--- a/yt/utilities/lib/CICDeposit.pyx
+++ b/yt/utilities/lib/CICDeposit.pyx
@@ -117,6 +117,63 @@
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
+def CICSample_3(np.ndarray[np.float64_t, ndim=1] posx,
+                np.ndarray[np.float64_t, ndim=1] posy,
+                np.ndarray[np.float64_t, ndim=1] posz,
+                np.ndarray[np.float64_t, ndim=1] sample,
+                np.int64_t npositions,
+                np.ndarray[np.float64_t, ndim=3] field,
+                np.ndarray[np.float64_t, ndim=1] leftEdge,
+                np.ndarray[np.int32_t, ndim=1] gridDimension,
+                np.float64_t cellSize):
+    
+    cdef int i1, j1, k1, n
+    cdef np.float64_t xpos, ypos, zpos
+    cdef np.float64_t fact, edge0, edge1, edge2
+    cdef np.float64_t le0, le1, le2
+    cdef np.float64_t dx, dy, dz, dx2, dy2, dz2
+    
+    edge0 = (<np.float64_t> gridDimension[0]) - 0.5001
+    edge1 = (<np.float64_t> gridDimension[1]) - 0.5001
+    edge2 = (<np.float64_t> gridDimension[2]) - 0.5001
+    fact = 1.0 / cellSize
+    
+    le0 = leftEdge[0] 
+    le1 = leftEdge[1] 
+    le2 = leftEdge[2] 
+                                                    
+    for n in range(npositions):
+
+        # Compute the position of the central cell
+        xpos = fclip((posx[n] - le0)*fact, 0.5001, edge0)
+        ypos = fclip((posy[n] - le1)*fact, 0.5001, edge1)
+        zpos = fclip((posz[n] - le2)*fact, 0.5001, edge2)
+
+        i1  = <int> (xpos + 0.5)
+        j1  = <int> (ypos + 0.5)
+        k1  = <int> (zpos + 0.5)
+        
+        # Compute the weights
+        dx = (<float> i1) + 0.5 - xpos
+        dy = (<float> j1) + 0.5 - ypos
+        dz = (<float> k1) + 0.5 - zpos
+        dx2 =  1.0 - dx
+        dy2 =  1.0 - dy
+        dz2 =  1.0 - dz
+
+        # Interpolate from field onto the particle
+        sample[n] = (field[i1-1,j1-1,k1-1] * dx  * dy  * dz +
+                     field[i1  ,j1-1,k1-1] * dx2 * dy  * dz +
+                     field[i1-1,j1  ,k1-1] * dx  * dy2 * dz +
+                     field[i1  ,j1  ,k1-1] * dx2 * dy2 * dz +
+                     field[i1-1,j1-1,k1  ] * dx  * dy  * dz2 +
+                     field[i1  ,j1-1,k1  ] * dx2 * dy  * dz2 +
+                     field[i1-1,j1  ,k1  ] * dx  * dy2 * dz2 +
+                     field[i1  ,j1  ,k1  ] * dx2 * dy2 * dz2)
+                                                                                                                        
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
 def assign_particles_to_cells(np.ndarray[np.int32_t, ndim=1] levels, #for cells
                               np.ndarray[np.float32_t, ndim=2] left_edges, #many cells
                               np.ndarray[np.float32_t, ndim=2] right_edges,


diff -r 53fa42c3dbc9c0228203900a1b1a169e732e9a8c -r d15ab61fb9efddea5e4cd117e590dd16303e5393 yt/utilities/lib/GridTree.pyx
--- /dev/null
+++ b/yt/utilities/lib/GridTree.pyx
@@ -0,0 +1,273 @@
+"""
+Matching points on the grid to specific grids
+
+Author: John ZuHone <jzuhone at gmail.com>
+Affiliation: NASA/Goddard Space Flight Center
+Homepage: http://yt-project.org/
+License:
+Copyright (C) 2012 John ZuHone.  All Rights Reserved.
+
+This file is part of yt.
+
+yt is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import numpy as np
+cimport numpy as np
+cimport cython
+
+from libc.stdlib cimport malloc, free
+
+cdef struct GridTreeNode :
+    int num_children
+    int level
+    int index
+    np.float64_t left_edge[3]
+    np.float64_t right_edge[3]
+    GridTreeNode **children
+                
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef GridTreeNode Grid_initialize(np.ndarray[np.float64_t, ndim=1] le,
+                                  np.ndarray[np.float64_t, ndim=1] re,
+                                  int num_children, int level, int index) :
+
+    cdef GridTreeNode node
+    cdef int i
+
+    node.index = index
+    node.level = level
+    for i in range(3) :
+        node.left_edge[i] = le[i]
+        node.right_edge[i] = re[i]
+    node.num_children = num_children
+    
+    if num_children > 0:
+        node.children = <GridTreeNode **> malloc(sizeof(GridTreeNode *) *
+                                                 num_children)
+        for i in range(num_children) :
+            node.children[i] = NULL
+    else :
+        node.children = NULL
+
+    return node
+
+cdef class GridTree :
+
+    cdef GridTreeNode *grids
+    cdef GridTreeNode *root_grids
+    cdef int num_grids
+    cdef int num_root_grids
+    cdef int num_leaf_grids
+    
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    def __cinit__(self, int num_grids, 
+                  np.ndarray[np.float64_t, ndim=2] left_edge,
+                  np.ndarray[np.float64_t, ndim=2] right_edge,
+                  np.ndarray[np.int64_t, ndim=1] parent_ind,
+                  np.ndarray[np.int64_t, ndim=1] level,
+                  np.ndarray[np.int64_t, ndim=1] num_children) :
+
+        cdef int i, j, k
+        cdef np.ndarray[np.int64_t, ndim=1] child_ptr
+
+        child_ptr = np.zeros(num_grids, dtype='int64')
+
+        self.num_grids = num_grids
+        self.num_root_grids = 0
+        self.num_leaf_grids = 0
+        
+        self.grids = <GridTreeNode *> malloc(sizeof(GridTreeNode) *
+                                             num_grids)
+                
+        for i in range(num_grids) :
+
+            self.grids[i] = Grid_initialize(left_edge[i,:],
+                                            right_edge[i,:],
+                                            num_children[i],
+                                            level[i], i)
+            if level[i] == 0 :
+                self.num_root_grids += 1
+
+            if num_children[i] == 0 : self.num_leaf_grids += 1
+
+        self.root_grids = <GridTreeNode *> malloc(sizeof(GridTreeNode) *
+                                                  self.num_root_grids)
+                
+        k = 0
+        
+        for i in range(num_grids) :
+
+            j = parent_ind[i]
+            
+            if j >= 0:
+                
+                self.grids[j].children[child_ptr[j]] = &self.grids[i]
+
+                child_ptr[j] += 1
+
+            else :
+
+                self.root_grids[k] = self.grids[i] 
+                
+                k = k + 1
+    
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    def return_tree_info(self) :
+
+        cdef int i, j
+        
+        levels = []
+        indices = []
+        nchild = []
+        children = []
+        
+        for i in range(self.num_grids) : 
+
+            childs = []
+            
+            levels.append(self.grids[i].level)
+            indices.append(self.grids[i].index)
+            nchild.append(self.grids[i].num_children)
+            for j in range(self.grids[i].num_children) :
+                childs.append(self.grids[i].children[j].index)
+            children.append(childs)
+
+        return indices, levels, nchild, children
+    
+cdef class MatchPointsToGrids :
+
+    cdef int num_points
+    cdef np.float64_t * xp
+    cdef np.float64_t * yp
+    cdef np.float64_t * zp
+    cdef GridTree tree
+    cdef np.int64_t * point_grids
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    def __cinit__(self, GridTree tree,
+                  int num_points, 
+                  np.ndarray[np.float64_t, ndim=1] x,
+                  np.ndarray[np.float64_t, ndim=1] y,
+                  np.ndarray[np.float64_t, ndim=1] z) :
+
+        cdef int i
+        
+        self.num_points = num_points
+
+        self.xp = <np.float64_t *> malloc(sizeof(np.float64_t) *
+                                          num_points)
+        self.yp = <np.float64_t *> malloc(sizeof(np.float64_t) *
+                                          num_points)
+        self.zp = <np.float64_t *> malloc(sizeof(np.float64_t) *
+                                          num_points)
+        self.point_grids = <np.int64_t *> malloc(sizeof(np.int64_t) *
+                                              num_points)
+        
+        for i in range(num_points) :
+            self.xp[i] = x[i]
+            self.yp[i] = y[i]
+            self.zp[i] = z[i]
+            self.point_grids[i] = -1
+            
+        self.tree = tree
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    def find_points_in_tree(self) :
+
+        cdef np.ndarray[np.int64_t, ndim=1] pt_grids
+        cdef int i, j
+        cdef np.uint8_t in_grid
+        
+        pt_grids = np.zeros(self.num_points, dtype='int64')
+
+        for i in range(self.num_points) :
+
+            in_grid = 0
+            
+            for j in range(self.tree.num_root_grids) :
+
+                if not in_grid : 
+                    in_grid = self.check_position(i, self.xp[i], self.yp[i], self.zp[i],
+                                                  &self.tree.root_grids[j])
+
+        for i in range(self.num_points) :
+            pt_grids[i] = self.point_grids[i]
+        
+        return pt_grids
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    cdef np.uint8_t check_position(self,
+                                   np.int64_t pt_index, 
+                                   np.float64_t x,
+                                   np.float64_t y,
+                                   np.float64_t z,
+                                   GridTreeNode * grid) :
+
+        cdef int i
+        cdef np.uint8_t in_grid
+	
+        in_grid = self.is_in_grid(x, y, z, grid)
+
+        if in_grid :
+
+            if grid.num_children > 0 :
+
+                in_grid = 0
+                
+                for i in range(grid.num_children) :
+
+                    if not in_grid :
+
+                        in_grid = self.check_position(pt_index, x, y, z, grid.children[i])
+
+                if not in_grid :
+                    self.point_grids[pt_index] = grid.index
+                    in_grid = 1
+                    
+            else :
+
+                self.point_grids[pt_index] = grid.index
+                in_grid = 1
+                
+        return in_grid
+    
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef np.uint8_t is_in_grid(self,
+			 np.float64_t x,
+			 np.float64_t y,
+			 np.float64_t z,
+			 GridTreeNode * grid) :
+
+        cdef np.uint8_t xcond, ycond, zcond, cond
+            
+        xcond = x >= grid.left_edge[0] and x < grid.right_edge[0]
+        ycond = y >= grid.left_edge[1] and y < grid.right_edge[1]
+        zcond = z >= grid.left_edge[2] and z < grid.right_edge[2]
+	
+        cond = xcond and ycond
+        cond = cond and zcond
+
+        return cond


diff -r 53fa42c3dbc9c0228203900a1b1a169e732e9a8c -r d15ab61fb9efddea5e4cd117e590dd16303e5393 yt/utilities/lib/__init__.py
--- a/yt/utilities/lib/__init__.py
+++ b/yt/utilities/lib/__init__.py
@@ -38,3 +38,4 @@
 from .RayIntegrators import *
 from .grid_traversal import *
 from .marching_cubes import *
+from .GridTree import *


diff -r 53fa42c3dbc9c0228203900a1b1a169e732e9a8c -r d15ab61fb9efddea5e4cd117e590dd16303e5393 yt/utilities/lib/grid_traversal.pyx
--- a/yt/utilities/lib/grid_traversal.pyx
+++ b/yt/utilities/lib/grid_traversal.pyx
@@ -989,9 +989,9 @@
             tmax[i] = 1e60
     # We have to jumpstart our calculation
     for i in range(3):
-        if cur_ind[i] == vc.dims[i] and step[i] == 1:
+        if cur_ind[i] == vc.dims[i] and step[i] >= 0:
             return 0
-        if cur_ind[i] == -1 and step[i] == -1:
+        if cur_ind[i] == -1 and step[i] <= -1:
             return 0
     enter_t = intersect_t
     hit = 0


diff -r 53fa42c3dbc9c0228203900a1b1a169e732e9a8c -r d15ab61fb9efddea5e4cd117e590dd16303e5393 yt/utilities/lib/marching_cubes.pyx
--- a/yt/utilities/lib/marching_cubes.pyx
+++ b/yt/utilities/lib/marching_cubes.pyx
@@ -33,7 +33,7 @@
 cdef struct Triangle:
     Triangle *next
     np.float64_t p[3][3]
-    np.float64_t val
+    np.float64_t val[3] # Usually only use one value
 
 cdef struct TriangleCollection:
     int count
@@ -64,12 +64,14 @@
     return count
 
 cdef void FillTriangleValues(np.ndarray[np.float64_t, ndim=1] values,
-                             Triangle *first):
+                             Triangle *first, int nskip = 1):
     cdef Triangle *this = first
     cdef Triangle *last
     cdef int i = 0
+    cdef int j
     while this != NULL:
-        values[i] = this.val
+        for j in range(nskip):
+            values[i*nskip + j] = this.val[j]
         i += 1
         last = this
         this = this.next
@@ -463,7 +465,7 @@
                      np.ndarray[np.int32_t, ndim=3] mask,
                      np.ndarray[np.float64_t, ndim=1] left_edge,
                      np.ndarray[np.float64_t, ndim=1] dxs,
-                     obj_sample = None):
+                     obj_sample = None, int sample_type = 1):
     cdef int dims[3]
     cdef int i, j, k, n, m, nt
     cdef int offset
@@ -478,7 +480,7 @@
     if obj_sample is not None:
         sample = obj_sample
         sdata = <np.float64_t *> sample.data
-        do_sample = 1
+        do_sample = sample_type # 1 for face, 2 for vertex
     else:
         do_sample = 0
     for i in range(3):
@@ -502,13 +504,16 @@
                     offset_fill(dims, intdata, gv)
                     nt = march_cubes(gv, isovalue, dds, pos[0], pos[1], pos[2],
                                 &triangles)
-                    if do_sample == 1 and nt > 0:
+                    if nt == 0 or do_sample == 0:
+                        pos[2] += dds[2]
+                        continue
+                    if last == NULL and triangles.first != NULL:
+                        current = triangles.first
+                        last = NULL
+                    elif last != NULL:
+                        current = last.next
+                    if do_sample == 1:
                         # At each triangle's center, sample our secondary field
-                        if last == NULL and triangles.first != NULL:
-                            current = triangles.first
-                            last = NULL
-                        elif last != NULL:
-                            current = last.next
                         while current != NULL:
                             for n in range(3):
                                 point[n] = 0.0
@@ -517,24 +522,38 @@
                                     point[m] += (current.p[n][m]-pos[m])*idds[m]
                             for n in range(3):
                                 point[n] /= 3.0
-                            current.val = offset_interpolate(dims, point,
+                            current.val[0] = offset_interpolate(dims, point,
                                                              sdata + offset)
                             last = current
                             if current.next == NULL: break
                             current = current.next
+                    elif do_sample == 2:
+                        while current != NULL:
+                            for n in range(3):
+                                for m in range(3):
+                                    point[m] = (current.p[n][m]-pos[m])*idds[m]
+                                current.val[n] = offset_interpolate(dims,
+                                                    point, sdata + offset)
+                            last = current
+                            if current.next == NULL: break
+                            current = current.next
                 pos[2] += dds[2]
             pos[1] += dds[1]
         pos[0] += dds[0]
     # Hallo, we are all done.
     cdef np.ndarray[np.float64_t, ndim=2] vertices 
     vertices = np.zeros((triangles.count*3,3), dtype='float64')
+    if do_sample == 0:
+        FillAndWipeTriangles(vertices, triangles.first)
+    cdef int nskip
     if do_sample == 1:
-        sampled = np.zeros(triangles.count, dtype='float64')
-        FillTriangleValues(sampled, triangles.first)
-        FillAndWipeTriangles(vertices, triangles.first)
-        return vertices, sampled
+        nskip = 1
+    elif do_sample == 2:
+        nskip = 3
+    sampled = np.zeros(triangles.count * nskip, dtype='float64')
+    FillTriangleValues(sampled, triangles.first, nskip)
     FillAndWipeTriangles(vertices, triangles.first)
-    return vertices
+    return vertices, sampled
 
 @cython.boundscheck(False)
 @cython.wraparound(False)


diff -r 53fa42c3dbc9c0228203900a1b1a169e732e9a8c -r d15ab61fb9efddea5e4cd117e590dd16303e5393 yt/utilities/lib/setup.py
--- a/yt/utilities/lib/setup.py
+++ b/yt/utilities/lib/setup.py
@@ -204,6 +204,10 @@
                           "yt/utilities/lib/field_interpolation_tables.pxd",
                           ]
           )
+    config.add_extension("GridTree", 
+    ["yt/utilities/lib/GridTree.pyx"],
+        libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd"])
+
     if os.environ.get("GPERFTOOLS", "no").upper() != "NO":
         gpd = os.environ["GPERFTOOLS"]
         idir = os.path.join(gpd, "include")


diff -r 53fa42c3dbc9c0228203900a1b1a169e732e9a8c -r d15ab61fb9efddea5e4cd117e590dd16303e5393 yt/utilities/lib/tests/test_grid_tree.py
--- /dev/null
+++ b/yt/utilities/lib/tests/test_grid_tree.py
@@ -0,0 +1,75 @@
+import numpy as np
+
+from yt.testing import *
+from yt.frontends.stream.api import load_amr_grids
+
+def setup():
+
+    global pf
+    
+    grid_data = [
+        dict(left_edge = [0.0, 0.0, 0.0], right_edge = [1.0, 1.0, 1.],
+             level = 0, dimensions = [16, 16, 16]),
+        dict(left_edge = [0.25, 0.25, 0.25], right_edge = [0.75, 0.75, 0.75],
+             level = 1, dimensions = [16, 16, 16]),
+        dict(left_edge = [0.25, 0.25, 0.375], right_edge = [0.5, 0.5, 0.625],
+             level = 2, dimensions = [16, 16, 16]),
+        dict(left_edge = [0.5, 0.5, 0.375], right_edge = [0.75, 0.75, 0.625],
+             level = 2, dimensions = [16, 16, 16]),
+        dict(left_edge = [0.3125, 0.3125, 0.4375], right_edge = [0.4375, 0.4375, 0.5625],
+             level = 3, dimensions = [16, 16, 16]),
+        dict(left_edge = [0.5625, 0.5625, 0.4375], right_edge = [0.6875, 0.6875, 0.5625],
+             level = 3, dimensions = [16, 16, 16])
+        ]
+
+    for g in grid_data: g["Density"] = np.random.random(g["dimensions"]) * 2**g["level"]
+    pf = load_amr_grids(grid_data, [16, 16, 16], 1.0)
+
+def test_grid_tree() :
+
+    grid_tree = pf.h.get_grid_tree()
+    indices, levels, nchild, children = grid_tree.return_tree_info()
+
+    grid_levels = [grid.Level for grid in pf.h.grids]
+    grid_indices = [grid.id-grid._id_offset for grid in pf.h.grids]
+    grid_nchild = [len(grid.Children) for grid in pf.h.grids]
+
+    print levels, grid_levels
+    assert_equal(levels, grid_levels)
+    assert_equal(indices, grid_indices)
+    assert_equal(nchild, grid_nchild)
+
+    for i, grid in enumerate(pf.h.grids) :
+        if grid_nchild[i] > 0:
+            grid_children = np.array([child.id-child._id_offset
+                                      for child in grid.Children])
+            assert_equal(grid_children, children[i])
+
+def test_find_points() :
+    
+    num_points = 100
+
+    x = np.random.uniform(low=pf.domain_left_edge[0],
+                          high=pf.domain_right_edge[0], size=num_points)
+    y = np.random.uniform(low=pf.domain_left_edge[1],
+                          high=pf.domain_right_edge[1], size=num_points)
+    z = np.random.uniform(low=pf.domain_left_edge[2],
+                          high=pf.domain_right_edge[2], size=num_points)
+
+    point_grids, point_grid_inds = pf.h.find_points(x,y,z)
+
+    grid_inds = np.zeros((num_points), dtype='int64')
+
+    for i, xx, yy, zz in zip(range(num_points), x, y, z) :
+
+        pt_level = -1
+        
+        for grid in pf.h.grids:
+
+            if grid.is_in_grid(xx, yy, zz) :
+            
+                if grid.Level > pt_level :
+                    pt_level = grid.Level
+                    grid_inds[i] = grid.id-grid._id_offset
+                    
+    assert_equal(point_grid_inds, grid_inds)


diff -r 53fa42c3dbc9c0228203900a1b1a169e732e9a8c -r d15ab61fb9efddea5e4cd117e590dd16303e5393 yt/utilities/lib/tests/test_sample.py
--- /dev/null
+++ b/yt/utilities/lib/tests/test_sample.py
@@ -0,0 +1,42 @@
+import numpy as np
+
+from yt.testing import *
+from yt.utilities.lib import CICSample_3
+
+def setup():
+    pass
+
+def test_sample() :
+
+    grid = {}
+
+    dims = np.array([64,64,64], dtype='int32')
+    
+    inds = np.indices(dims)
+    grid["x"] = inds[0] + 0.5
+    grid["y"] = inds[1] + 0.5
+    grid["z"] = inds[2] + 0.5
+
+    num_particles = np.int64(1000)
+    
+    xp = np.random.uniform(low=1.0, high=63.0, size=num_particles)
+    yp = np.random.uniform(low=1.0, high=63.0, size=num_particles)
+    zp = np.random.uniform(low=1.0, high=63.0, size=num_particles)
+
+    xfield = np.zeros((num_particles))
+    yfield = np.zeros((num_particles))
+    zfield = np.zeros((num_particles))
+
+    dx = 1.
+    le = np.zeros((3))
+    
+    CICSample_3(xp,yp,zp,xfield,num_particles,grid["x"],
+                le,dims,dx)
+    CICSample_3(xp,yp,zp,yfield,num_particles,grid["y"],
+                le,dims,dx)
+    CICSample_3(xp,yp,zp,zfield,num_particles,grid["z"],
+                le,dims,dx)
+
+    assert_allclose(xp,xfield)
+    assert_allclose(yp,yfield)
+    assert_allclose(zp,zfield)


diff -r 53fa42c3dbc9c0228203900a1b1a169e732e9a8c -r d15ab61fb9efddea5e4cd117e590dd16303e5393 yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -252,9 +252,10 @@
     @wraps(func)
     def root_only(*args, **kwargs):
         comm = _get_comm(args)
+        rv = None
         if comm.rank == 0:
             try:
-                func(*args, **kwargs)
+                rv = func(*args, **kwargs)
                 all_clear = 1
             except:
                 traceback.print_last()
@@ -263,6 +264,7 @@
             all_clear = None
         all_clear = comm.mpi_bcast(all_clear)
         if not all_clear: raise RuntimeError
+        return rv
     if parallel_capable: return root_only
     return func
 
@@ -290,13 +292,14 @@
         if size is None:
             size = len(self.available_ranks)
         if len(self.available_ranks) < size:
-            print 'Not enough resources available', size, self.available_ranks
+            mylog.error('Not enough resources available, asked for %d have %d',
+                size, self.available_ranks)
             raise RuntimeError
         if ranks is None:
             ranks = [self.available_ranks.pop(0) for i in range(size)]
         # Default name to the workgroup number.
         if name is None: 
-            name = string(len(self.workgroups))
+            name = str(len(self.workgroups))
         group = self.comm.comm.Get_group().Incl(ranks)
         new_comm = self.comm.comm.Create(group)
         if self.comm.rank in ranks:


diff -r 53fa42c3dbc9c0228203900a1b1a169e732e9a8c -r d15ab61fb9efddea5e4cd117e590dd16303e5393 yt/utilities/particle_generator.py
--- /dev/null
+++ b/yt/utilities/particle_generator.py
@@ -0,0 +1,380 @@
+import numpy as np
+import h5py
+from yt.utilities.lib import CICSample_3
+from yt.funcs import *
+
+class ParticleGenerator(object) :
+
+    default_fields = ["particle_position_x",
+                      "particle_position_y",
+                      "particle_position_z",
+                      "particle_index"]
+
+    def __init__(self, pf, num_particles, field_list) :
+        """
+        Base class for generating particle fields which may be applied to
+        streams. Normally this would not be called directly, since it doesn't
+        really do anything except allocate memory. Takes a *pf* to serve as the
+        basis for determining grids, the number of particles *num_particles*,
+        and a list of fields, *field_list*.
+        """
+        self.pf = pf
+        self.num_particles = num_particles
+        self.field_list = field_list
+            
+        try :
+            self.posx_index = self.field_list.index(self.default_fields[0])
+            self.posy_index = self.field_list.index(self.default_fields[1])
+            self.posz_index = self.field_list.index(self.default_fields[2])
+            self.index_index = self.field_list.index(self.default_fields[3])
+        except :
+            raise KeyError("Field list must contain the following fields: " +
+                           "\'particle_position_x\', \'particle_position_y\'" +
+                           ", \'particle_position_z\', \'particle_index\' ")
+
+        self.num_grids = self.pf.h.num_grids
+        self.NumberOfParticles = np.zeros((self.num_grids), dtype='int64')
+        self.ParticleIndices = np.zeros(self.num_grids + 1, dtype='int64')
+        
+        self.num_fields = len(self.field_list)
+        
+        self.particles = np.zeros((self.num_particles, self.num_fields),
+                                  dtype='float64')
+
+    def has_key(self, key) :
+        """
+        Check to see if *key* is in the particle field list.
+        """
+        return (key in self.field_list)
+            
+    def keys(self) :
+        """
+        Return the list of particle fields.
+        """
+        return self.field_list
+    
+    def __getitem__(self, key) :
+        """
+        Get the field associated with key.
+        """
+        return self.particles[:,self.field_list.index(key)]
+    
+    def __setitem__(self, key, val) :
+        """
+        Sets a field to be some other value. Note that we assume
+        that the particles have been sorted by grid already, so
+        make sure the setting of the field is consistent with this.
+        """
+        self.particles[:,self.field_list.index(key)] = val[:]
+                
+    def __len__(self) :
+        """
+        The number of particles
+        """
+        return self.num_particles
+
+    def get_for_grid(self, grid) :
+        """
+        Return a dict containing all of the particle fields in the specified grid.
+        """
+        ind = grid.id-grid._id_offset
+        start = self.ParticleIndices[ind]
+        end = self.ParticleIndices[ind+1]
+        return dict([(field, self.particles[start:end,self.field_list.index(field)])
+                     for field in self.field_list])
+    
+    def _setup_particles(self,x,y,z,setup_fields=None) :
+        """
+        Assigns grids to particles and sets up particle positions. *setup_fields* is
+        a dict of fields other than the particle positions to set up. 
+        """
+        particle_grids, particle_grid_inds = self.pf.h.find_points(x,y,z)
+        idxs = np.argsort(particle_grid_inds)
+        self.particles[:,self.posx_index] = x[idxs]
+        self.particles[:,self.posy_index] = y[idxs]
+        self.particles[:,self.posz_index] = z[idxs]
+        self.NumberOfParticles = np.bincount(particle_grid_inds,
+                                             minlength=self.num_grids)
+        if self.num_grids > 1 :
+            np.add.accumulate(self.NumberOfParticles.squeeze(),
+                              out=self.ParticleIndices[1:])
+        else :
+            self.ParticleIndices[1] = self.NumberOfParticles.squeeze()
+        if setup_fields is not None:
+            for key, value in setup_fields.items():
+                if key not in self.default_fields:
+                    self.particles[:,self.field_list.index(key)] = value[idxs]
+    
+    def assign_indices(self, function=None, **kwargs) :
+        """
+        Assign unique indices to the particles. The default is to just use
+        numpy.arange, but any function may be supplied with keyword arguments.
+        """
+        if function is None :
+            self.particles[:,self.index_index] = np.arange((self.num_particles))
+        else :
+            self.particles[:,self.index_index] = function(**kwargs)
+            
+    def map_grid_fields_to_particles(self, mapping_dict) :
+        r"""
+        For the fields in  *mapping_dict*, map grid fields to the particles
+        using CIC sampling.
+
+        Examples
+        --------
+        >>> field_map = {'Density':'particle_density',
+        >>>              'Temperature':'particle_temperature'}
+        >>> particles.map_grid_fields_to_particles(field_map)
+        """
+        pbar = get_pbar("Mapping fields to particles", self.num_grids)
+        for i, grid in enumerate(self.pf.h.grids) :
+            pbar.update(i)
+            if self.NumberOfParticles[i] > 0:
+                start = self.ParticleIndices[i]
+                end = self.ParticleIndices[i+1]
+                # Note we add one ghost zone to the grid!
+                cube = grid.retrieve_ghost_zones(1, mapping_dict.keys())
+                le = np.array(grid.LeftEdge).astype(np.float64)
+                dims = np.array(grid.ActiveDimensions).astype(np.int32)
+                for gfield, pfield in mapping_dict.items() :
+                    field_index = self.field_list.index(pfield)
+                    CICSample_3(self.particles[start:end,self.posx_index],
+                                self.particles[start:end,self.posy_index],
+                                self.particles[start:end,self.posz_index],
+                                self.particles[start:end,field_index],
+                                np.int64(self.NumberOfParticles[i]),
+                                cube[gfield], le, dims,
+                                np.float64(grid['dx']))
+        pbar.finish()
+
+    def apply_to_stream(self, clobber=False) :
+        """
+        Apply the particles to a stream parameter file. If particles already exist,
+        and clobber=False, do not overwrite them, but add the new ones to them. 
+        """
+        grid_data = []
+        for i,g in enumerate(self.pf.h.grids) :
+            data = {}
+            if clobber :
+                data["number_of_particles"] = self.NumberOfParticles[i]
+            else :
+                data["number_of_particles"] = self.NumberOfParticles[i] + \
+                                              g.NumberOfParticles
+            grid_particles = self.get_for_grid(g)
+            for field in self.field_list :
+                if data["number_of_particles"] > 0 :
+                    # We have particles in this grid
+                    if g.NumberOfParticles > 0 and not clobber:
+                        # Particles already exist
+                        if field in self.pf.h.field_list :
+                            # This field already exists
+                            prev_particles = g[field]
+                        else :
+                            # This one doesn't, set the previous particles' field
+                            # values to zero
+                            prev_particles = np.zeros((g.NumberOfParticles))
+                        data[field] = np.concatenate((prev_particles,
+                                                      grid_particles[field]))
+                    else :
+                        # Particles do not already exist or we're clobbering
+                        data[field] = grid_particles[field]
+                else :
+                    # We don't have particles in this grid
+                    data[field] = np.array([], dtype='float64')
+            grid_data.append(data)
+        self.pf.h.update_data(grid_data)
+
+class FromListParticleGenerator(ParticleGenerator) :
+
+    def __init__(self, pf, num_particles, data) :
+        r"""
+        Generate particle fields from array-like lists contained in a dict.
+
+        Parameters
+        ----------
+        pf : `StaticOutput`
+            The parameter file which will serve as the base for these particles.
+        num_particles : int
+            The number of particles in the dict.
+        data : dict of NumPy arrays
+            The particle fields themselves.
+
+        Examples
+        --------
+        >>> num_p = 100000
+        >>> posx = np.random.random((num_p))
+        >>> posy = np.random.random((num_p))
+        >>> posz = np.random.random((num_p))
+        >>> mass = np.ones((num_p))
+        >>> data = {'particle_position_x': posx, 'particle_position_y': posy,
+        >>>         'particle_position_z': posz, 'particle_mass': mass}
+        >>> particles = FromListParticleGenerator(pf, num_p, data)
+        """
+
+        field_list = data.keys()
+        x = data.pop("particle_position_x")
+        y = data.pop("particle_position_y")
+        z = data.pop("particle_position_z")
+
+        xcond = np.logical_or(x < pf.domain_left_edge[0],
+                              x >= pf.domain_right_edge[0])
+        ycond = np.logical_or(y < pf.domain_left_edge[1],
+                              y >= pf.domain_right_edge[1])
+        zcond = np.logical_or(z < pf.domain_left_edge[2],
+                              z >= pf.domain_right_edge[2])
+        cond = np.logical_or(xcond, ycond)
+        cond = np.logical_or(zcond, cond)
+
+        if np.any(cond) :
+            raise ValueError("Some particles are outside of the domain!!!")
+
+        ParticleGenerator.__init__(self, pf, num_particles, field_list)
+        self._setup_particles(x,y,z,setup_fields=data)
+        
+class LatticeParticleGenerator(ParticleGenerator) :
+
+    def __init__(self, pf, particles_dims, particles_left_edge,
+                 particles_right_edge, field_list) :
+        r"""
+        Generate particles in a lattice arrangement. 
+
+        Parameters
+        ----------
+        pf : `StaticOutput`
+            The parameter file which will serve as the base for these particles.
+        particles_dims : int, array-like 
+            The number of particles along each dimension
+        particles_left_edge : float, array-like
+            The 'left-most' starting positions of the lattice.
+        particles_right_edge : float, array-like
+             The 'right-most' ending positions of the lattice.
+        field_list : list of strings
+             A list of particle fields
+             
+        Examples
+        --------
+        >>> dims = (128,128,128)
+        >>> le = np.array([0.25,0.25,0.25])
+        >>> re = np.array([0.75,0.75,0.75])
+        >>> fields = ["particle_position_x","particle_position_y",
+        >>>           "particle_position_z","particle_index",
+        >>>           "particle_density","particle_temperature"]
+        >>> particles = LatticeParticleGenerator(pf, dims, le, re, fields)
+        """
+
+        num_x = particles_dims[0]
+        num_y = particles_dims[1]
+        num_z = particles_dims[2]
+        xmin = particles_left_edge[0]
+        ymin = particles_left_edge[1]
+        zmin = particles_left_edge[2]
+        xmax = particles_right_edge[0]
+        ymax = particles_right_edge[1]
+        zmax = particles_right_edge[2]
+
+        xcond = (xmin < pf.domain_left_edge[0]) or \
+                (xmax >= pf.domain_right_edge[0])
+        ycond = (ymin < pf.domain_left_edge[1]) or \
+                (ymax >= pf.domain_right_edge[1])
+        zcond = (zmin < pf.domain_left_edge[2]) or \
+                (zmax >= pf.domain_right_edge[2])
+        cond = xcond or ycond or zcond
+
+        if cond :
+            raise ValueError("Proposed bounds for particles are outside domain!!!")
+
+        ParticleGenerator.__init__(self, pf, num_x*num_y*num_z, field_list)
+
+        dx = (xmax-xmin)/(num_x-1)
+        dy = (ymax-ymin)/(num_y-1)
+        dz = (zmax-zmin)/(num_z-1)
+        inds = np.indices((num_x,num_y,num_z))
+        xpos = inds[0]*dx + xmin
+        ypos = inds[1]*dy + ymin
+        zpos = inds[2]*dz + zmin
+        
+        self._setup_particles(xpos.flat[:], ypos.flat[:], zpos.flat[:])
+        
+class WithDensityParticleGenerator(ParticleGenerator) :
+
+    def __init__(self, pf, data_source, num_particles, field_list,
+                 density_field="Density") :
+        r"""
+        Generate particles based on a density field.
+
+        Parameters
+        ----------
+        pf : `StaticOutput`
+            The parameter file which will serve as the base for these particles.
+        data_source : `yt.data_objects.api.AMRData`
+            The data source containing the density field.
+        num_particles : int
+            The number of particles to be generated
+        field_list : list of strings
+            A list of particle fields
+        density_field : string, optional
+            A density field which will serve as the distribution function for the
+            particle positions. Theoretically, this could be any 'per-volume' field. 
+            
+        Examples
+        --------
+        >>> sphere = pf.h.sphere(pf.domain_center, 0.5)
+        >>> num_p = 100000
+        >>> fields = ["particle_position_x","particle_position_y",
+        >>>           "particle_position_z","particle_index",
+        >>>           "particle_density","particle_temperature"]
+        >>> particles = WithDensityParticleGenerator(pf, sphere, num_particles,
+        >>>                                          fields, density_field='Dark_Matter_Density')
+        """
+
+        ParticleGenerator.__init__(self, pf, num_particles, field_list)
+
+        num_cells = len(data_source["x"].flat)
+        max_mass = (data_source[density_field]*
+                    data_source["CellVolume"]).max()
+        num_particles_left = num_particles
+        all_x = []
+        all_y = []
+        all_z = []
+        
+        pbar = get_pbar("Generating Particles", num_particles)
+        tot_num_accepted = int(0)
+        
+        while num_particles_left > 0:
+
+            m = np.random.uniform(high=1.01*max_mass,
+                                  size=num_particles_left)
+            idxs = np.random.random_integers(low=0, high=num_cells-1,
+                                             size=num_particles_left)
+            m_true = (data_source[density_field]*
+                      data_source["CellVolume"]).flat[idxs]
+            accept = m <= m_true
+            num_accepted = accept.sum()
+            accepted_idxs = idxs[accept]
+            
+            xpos = data_source["x"].flat[accepted_idxs] + \
+                   np.random.uniform(low=-0.5, high=0.5, size=num_accepted) * \
+                   data_source["dx"].flat[accepted_idxs]
+            ypos = data_source["y"].flat[accepted_idxs] + \
+                   np.random.uniform(low=-0.5, high=0.5, size=num_accepted) * \
+                   data_source["dy"].flat[accepted_idxs]
+            zpos = data_source["z"].flat[accepted_idxs] + \
+                   np.random.uniform(low=-0.5, high=0.5, size=num_accepted) * \
+                   data_source["dz"].flat[accepted_idxs]
+
+            all_x.append(xpos)
+            all_y.append(ypos)
+            all_z.append(zpos)
+
+            num_particles_left -= num_accepted
+            tot_num_accepted += num_accepted
+            pbar.update(tot_num_accepted)
+
+        pbar.finish()
+
+        x = np.concatenate(all_x)
+        y = np.concatenate(all_y)
+        z = np.concatenate(all_z)
+
+        self._setup_particles(x,y,z)
+        


diff -r 53fa42c3dbc9c0228203900a1b1a169e732e9a8c -r d15ab61fb9efddea5e4cd117e590dd16303e5393 yt/utilities/physical_constants.py
--- a/yt/utilities/physical_constants.py
+++ b/yt/utilities/physical_constants.py
@@ -1,25 +1,31 @@
 #
 # Physical Constants and Units Conversion Factors
 #
+# Values for these constants are drawn from IAU and IUPAC data 
+# unless otherwise noted:
+# http://maia.usno.navy.mil/NSFA/IAU2009_consts.html
+# http://goldbook.iupac.org/list_goldbook_phys_constants_defs.html
 
 # Masses
-mass_hydrogen_cgs = 1.67e-24  # g
-mass_electron_cgs = 9.11e-28  # g
-amu_cgs           = 1.66053886e-24  # g
-mass_sun_cgs = 1.9891e33  # g
+mass_hydrogen_cgs = 1.674534e-24  # g
+mass_electron_cgs = 9.1093898e-28  # g
+amu_cgs           = 1.6605402e-24  # g
+mass_sun_cgs = 1.98841586e33  # g
 # Velocities
 speed_of_light_cgs = 2.99792458e10  # cm/s, exact
 
 # Cross Sections
-cross_section_thompson_cgs = 6.65e-25  # cm^2
+# 8*pi/3 (alpha*hbar*c/(2*pi))**2
+cross_section_thompson_cgs = 6.65245854533e-25  # cm^2
 
 # Charge
-charge_proton_cgs = 4.803e-10  # esu = 1.602e-19  Coulombs
+charge_proton_cgs = 4.8032056e-10  # esu = 1.602176487e-19  Coulombs
 
 # Physical Constants
 boltzmann_constant_cgs = 1.3806504e-16  # erg K^-1
 gravitational_constant_cgs  = 6.67428e-8  # cm^3 g^-1 s^-2
 planck_constant_cgs   = 6.62606896e-27  # erg s
+stefan_boltzmann_constant_cgs = 5.67051e-5 # erg cm^-2 s^-1 K^-4
 rho_crit_now = 1.8788e-29  # g times h^2 (critical mass for closure, Cosmology)
 
 # Misc. Approximations
@@ -31,16 +37,16 @@
 mpc_per_mpc   = 1e0
 mpc_per_kpc   = 1e-3
 mpc_per_pc    = 1e-6
-mpc_per_au    = 4.847e-12
-mpc_per_rsun  = 2.253e-14
-mpc_per_miles = 5.216e-20
-mpc_per_cm    = 3.24e-25
+mpc_per_au    = 4.84813682e-12
+mpc_per_rsun  = 2.253962e-14
+mpc_per_miles = 5.21552871e-20
+mpc_per_cm    = 3.24077929e-25
 km_per_pc     = 1.3806504e13
 km_per_m      = 1e-3
 km_per_cm     = 1e-5
-pc_per_cm     = 3.24e-19
+pc_per_cm     = 3.24077929e-19
 
-m_per_fpc     = 0.0324077649
+m_per_fpc     = 0.0324077929
 
 kpc_per_mpc   = 1.0 / mpc_per_kpc
 pc_per_mpc    = 1.0 / mpc_per_pc


diff -r 53fa42c3dbc9c0228203900a1b1a169e732e9a8c -r d15ab61fb9efddea5e4cd117e590dd16303e5393 yt/utilities/tests/test_particle_generator.py
--- /dev/null
+++ b/yt/utilities/tests/test_particle_generator.py
@@ -0,0 +1,105 @@
+import numpy as np
+from yt.testing import *
+from yt.utilities.particle_generator import *
+from yt.frontends.stream.api import load_uniform_grid, refine_amr
+import yt.utilities.initial_conditions as ic
+import yt.utilities.flagging_methods as fm
+from IPython import embed
+
+def setup() :
+    pass
+
+def test_particle_generator() :
+    
+    # First generate our pf
+    domain_dims = (128, 128, 128)
+    dens = np.zeros(domain_dims) + 0.1
+    temp = 4.*np.ones(domain_dims)
+    fields = {"Density": dens, "Temperature": temp}
+    ug = load_uniform_grid(fields, domain_dims, 1.0)
+    fo = [ic.BetaModelSphere(1.0,0.1,0.5,[0.5,0.5,0.5],{"Density":(10.0)})]
+    rc = [fm.flagging_method_registry["overdensity"](4.0)]
+    pf = refine_amr(ug, rc, fo, 3)
+
+    # Now generate particles from density
+
+    field_list = ["particle_position_x","particle_position_y",
+                  "particle_position_z","particle_index",
+                  "particle_gas_density"]
+    num_particles = 1000000
+    field_dict = {"Density": "particle_gas_density"}
+    sphere = pf.h.sphere(pf.domain_center, 0.45)
+
+    particles1 = WithDensityParticleGenerator(pf, sphere, num_particles, field_list)
+    particles1.assign_indices()
+    particles1.map_grid_fields_to_particles(field_dict)
+    
+    # Test to make sure we ended up with the right number of particles per grid
+    particles1.apply_to_stream()
+    particles_per_grid1 = [grid.NumberOfParticles for grid in pf.h.grids]
+    assert_equal(particles_per_grid1, particles1.NumberOfParticles)
+    particles_per_grid1 = [len(grid["particle_position_x"]) for grid in pf.h.grids]
+    assert_equal(particles_per_grid1, particles1.NumberOfParticles)
+
+    # Set up a lattice of particles
+    pdims = np.array([64,64,64])
+    def new_indices() :
+        # We just add new indices onto the existing ones
+        return np.arange((np.product(pdims)))+num_particles
+    le = np.array([0.25,0.25,0.25])
+    re = np.array([0.75,0.75,0.75])
+    new_field_list = field_list + ["particle_gas_temperature"]
+    new_field_dict = {"Density": "particle_gas_density",
+                      "Temperature": "particle_gas_temperature"}
+
+    particles2 = LatticeParticleGenerator(pf, pdims, le, re, new_field_list)
+    particles2.assign_indices(function=new_indices)
+    particles2.map_grid_fields_to_particles(new_field_dict)
+
+    #Test lattice positions
+    xpos = np.unique(particles2["particle_position_x"])
+    ypos = np.unique(particles2["particle_position_y"])
+    zpos = np.unique(particles2["particle_position_z"])
+
+    xpred = np.linspace(le[0],re[0],num=pdims[0],endpoint=True)
+    ypred = np.linspace(le[1],re[1],num=pdims[1],endpoint=True)
+    zpred = np.linspace(le[2],re[2],num=pdims[2],endpoint=True)
+
+    assert_almost_equal(xpos, xpred)
+    assert_almost_equal(ypos, ypred)
+    assert_almost_equal(zpos, zpred)
+
+    #Test the number of particles again
+    particles2.apply_to_stream()
+    particles_per_grid2 = [grid.NumberOfParticles for grid in pf.h.grids]
+    assert_equal(particles_per_grid2, particles1.NumberOfParticles+particles2.NumberOfParticles)
+    particles_per_grid2 = [len(grid["particle_position_x"]) for grid in pf.h.grids]
+    assert_equal(particles_per_grid2, particles1.NumberOfParticles+particles2.NumberOfParticles)
+
+    #Test the uniqueness of tags
+    tags = np.concatenate([grid["particle_index"] for grid in pf.h.grids])
+    tags.sort()
+    assert_equal(tags, np.arange((np.product(pdims)+num_particles)))
+
+    # Test that the old particles have zero for the new field
+    old_particle_temps = [grid["particle_gas_temperature"][:particles_per_grid1[i]]
+                          for i, grid in enumerate(pf.h.grids)]
+    test_zeros = [np.zeros((particles_per_grid1[i])) 
+                  for i, grid in enumerate(pf.h.grids)]
+    assert_equal(old_particle_temps, test_zeros)
+
+    #Now dump all of these particle fields out into a dict
+    pdata = {}
+    dd = pf.h.all_data()
+    for field in new_field_list :
+        pdata[field] = dd[field]
+
+    #Test the "from-list" generator and particle field clobber
+    particles3 = FromListParticleGenerator(pf, num_particles+np.product(pdims), pdata)
+    particles3.apply_to_stream(clobber=True)
+    
+    #Test the number of particles again
+    particles_per_grid3 = [grid.NumberOfParticles for grid in pf.h.grids]
+    assert_equal(particles_per_grid3, particles1.NumberOfParticles+particles2.NumberOfParticles)
+    particles_per_grid2 = [len(grid["particle_position_z"]) for grid in pf.h.grids]
+    assert_equal(particles_per_grid3, particles1.NumberOfParticles+particles2.NumberOfParticles)


diff -r 53fa42c3dbc9c0228203900a1b1a169e732e9a8c -r d15ab61fb9efddea5e4cd117e590dd16303e5393 yt/visualization/image_writer.py
--- a/yt/visualization/image_writer.py
+++ b/yt/visualization/image_writer.py
@@ -420,7 +420,7 @@
     else:
         dpi = None
 
-    suffix = os.path.splitext(filename)[1]
+    suffix = get_image_suffix(filename)
 
     if suffix == '':
         suffix = '.png'


diff -r 53fa42c3dbc9c0228203900a1b1a169e732e9a8c -r d15ab61fb9efddea5e4cd117e590dd16303e5393 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -307,14 +307,17 @@
 
 class GridBoundaryCallback(PlotCallback):
     _type_name = "grids"
-    def __init__(self, alpha=1.0, min_pix=1, min_pix_ids=20, draw_ids=False, periodic=True):
+    def __init__(self, alpha=1.0, min_pix=1, min_pix_ids=20, draw_ids=False, periodic=True, 
+                 min_level=None, max_level=None):
         """
         annotate_grids(alpha=1.0, min_pix=1, draw_ids=False, periodic=True)
 
         Adds grid boundaries to a plot, optionally with *alpha*-blending.
         Cuttoff for display is at *min_pix* wide.
         *draw_ids* puts the grid id in the corner of the grid.  (Not so great in projections...)
-        Grids must be wider than *min_pix_ids* otherwise the ID will not be drawn.
+        Grids must be wider than *min_pix_ids* otherwise the ID will not be drawn.  If *min_level* 
+        is specified, only draw grids at or above min_level.  If *max_level* is specified, only 
+        draw grids at or below max_level.
         """
         PlotCallback.__init__(self)
         self.alpha = alpha
@@ -322,6 +325,8 @@
         self.min_pix_ids = min_pix_ids
         self.draw_ids = draw_ids # put grid numbers in the corner.
         self.periodic = periodic
+        self.min_level = min_level
+        self.max_level = max_level
 
     def __call__(self, plot):
         x0, x1 = plot.xlim
@@ -341,6 +346,14 @@
             pxs, pys = np.mgrid[0:0:1j,0:0:1j]
         GLE = plot.data.grid_left_edge
         GRE = plot.data.grid_right_edge
+        grid_levels = plot.data.grid_levels
+        min_level = self.min_level
+        max_level = self.max_level
+        if min_level is None:
+            min_level = 0
+        if max_level is None:
+            max_level = plot.data.pf.h.max_level
+
         for px_off, py_off in zip(pxs.ravel(), pys.ravel()):
             pxo = px_off * dom[px_index]
             pyo = py_off * dom[py_index]
@@ -349,7 +362,9 @@
             right_edge_x = (GRE[:,px_index]+pxo-x0)*dx + xx0
             right_edge_y = (GRE[:,py_index]+pyo-y0)*dy + yy0
             visible =  ( xpix * (right_edge_x - left_edge_x) / (xx1 - xx0) > self.min_pix ) & \
-                       ( ypix * (right_edge_y - left_edge_y) / (yy1 - yy0) > self.min_pix )
+                       ( ypix * (right_edge_y - left_edge_y) / (yy1 - yy0) > self.min_pix ) & \
+                       ( grid_levels >= min_level) & \
+                       ( grid_levels <= max_level)
             if visible.nonzero()[0].size == 0: continue
             verts = np.array(
                 [(left_edge_x, left_edge_x, right_edge_x, right_edge_x),
@@ -643,24 +658,25 @@
 
         xf = axis_names[px_index]
         yf = axis_names[py_index]
+        dxf = "d%s" % xf
+        dyf = "d%s" % yf
 
         DomainRight = plot.data.pf.domain_right_edge
         DomainLeft = plot.data.pf.domain_left_edge
         DomainWidth = DomainRight - DomainLeft
-        
+
         nx, ny = plot.image._A.shape
         buff = np.zeros((nx,ny),dtype='float64')
         for i,clump in enumerate(reversed(self.clumps)):
             mylog.debug("Pixelizing contour %s", i)
 
-
             xf_copy = clump[xf].copy()
             yf_copy = clump[yf].copy()
-            
-            temp = _MPL.Pixelize(xf_copy, yf_copy, 
-                                 clump['dx']/2.0,
-                                 clump['dy']/2.0,
-                                 clump['dx']*0.0+i+1, # inits inside Pixelize
+
+            temp = _MPL.Pixelize(xf_copy, yf_copy,
+                                 clump[dxf]/2.0,
+                                 clump[dyf]/2.0,
+                                 clump[dxf]*0.0+i+1, # inits inside Pixelize
                                  int(nx), int(ny),
                              (x0, x1, y0, y1), 0).transpose()
             buff = np.maximum(temp, buff)


diff -r 53fa42c3dbc9c0228203900a1b1a169e732e9a8c -r d15ab61fb9efddea5e4cd117e590dd16303e5393 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -146,53 +146,35 @@
 log_transform = FieldTransform('log10', np.log10, LogLocator())
 linear_transform = FieldTransform('linear', lambda x: x, LinearLocator())
 
-def GetBoundsAndCenter(axis, center, width, pf, unit='1'):
-    if width == None:
-        width = (pf.domain_width[x_dict[axis]],
-                 pf.domain_width[y_dict[axis]])
+def StandardWidth(axis, width, depth, pf):
+    if width is None:
+        # Default to code units
+        if not iterable(axis):
+            width = ((pf.domain_width[x_dict[axis]], '1'),
+                     (pf.domain_width[y_dict[axis]], '1'))
+        else:
+            # axis is actually the normal vector
+            # for an off-axis data object.
+            width = ((pf.domain_width.min(), '1'),
+                     (pf.domain_width.min(), '1'))
     elif iterable(width): 
-        if isinstance(width[1],str):
-            w,unit = width
-            width = (w, w)
-        elif isinstance(width[1],tuple):
-            wx,unitx = width[0]
-            wy,unity = width[1]
-            width = (wx/pf[unitx],wy/pf[unity])
+        if isinstance(width[1], str):
+            width = (width, width)
+        elif isinstance(width[1], tuple):
+            pass
     else:
-        width = (width, width)
-    Wx, Wy = width
-    width = (Wx/pf[unit], Wy/pf[unit])
-    if isinstance(center,str):
-        if center.lower() == 'm' or center.lower() == 'max':
-            v, center = pf.h.find_max("Density")
-        elif center.lower() == "center" or center.lower() == "c":
-            center = (pf.domain_right_edge + pf.domain_left_edge)/2.0
-        else:
-            raise RuntimeError('center keyword \"%s\" not recognized'%center)
-    bounds = [center[x_dict[axis]]-width[0]/2,
-              center[x_dict[axis]]+width[0]/2,
-              center[y_dict[axis]]-width[1]/2,
-              center[y_dict[axis]]+width[1]/2]
-    return (bounds,center)
-
-def GetOffAxisBoundsAndCenter(normal, center, width, pf, unit='1',depth=None):
-    if width == None:
-        width = (pf.domain_width.min(),
-                 pf.domain_width.min())
-    elif iterable(width) and isinstance(width[1],str):
-        w,unit = width
-        width = w
-    if not iterable(width):
-        width = (width, width)
-    Wx, Wy = width
-    width = np.array((Wx/pf[unit], Wy/pf[unit]))
-    if depth != None:
-        if iterable(depth) and isinstance(depth[1],str):
-            d,unit = depth
-            depth = d/pf[unit]
+        width = ((width, '1'), (width, '1'))
+    if depth is not None:
+        if iterable(depth) and isinstance(depth[1], str):
+            depth = (depth,)
         elif iterable(depth):
             raise RuntimeError("Depth must be a float or a (width,\"unit\") tuple")
-        width = np.append(width,depth)
+        else:
+            depth = ((depth, '1'),)
+        width += depth
+    return width
+
+def StandardCenter(center, pf):
     if isinstance(center,str):
         if center.lower() == 'm' or center.lower() == 'max':
             v, center = pf.h.find_max("Density")
@@ -200,21 +182,39 @@
             center = (pf.domain_left_edge + pf.domain_right_edge) / 2
         else:
             raise RuntimeError('center keyword \"%s\" not recognized'%center)
+    return center
 
-    if width.shape == (2,):
+def GetWindowParameters(axis, center, width, pf):
+    width = StandardWidth(axis, width, None, pf)
+    center = StandardCenter(center, pf)
+    units = (width[0][1], width[1][1])
+    bounds = (center[x_dict[axis]]-width[0][0]/pf[units[0]]/2,  
+              center[x_dict[axis]]+width[0][0]/pf[units[0]]/2, 
+              center[y_dict[axis]]-width[1][0]/pf[units[1]]/2, 
+              center[y_dict[axis]]+width[1][0]/pf[units[1]]/2)
+    return (bounds, center, units)
+
+def GetObliqueWindowParameters(normal, center, width, pf, depth=None):
+    width = StandardWidth(normal, width, depth, pf)
+    center = StandardCenter(center, pf)
+
+    if len(width) == 2:
         # Transforming to the cutting plane coordinate system
         center = np.array(center)
         center = (center - pf.domain_left_edge)/pf.domain_width - 0.5
         (normal,perp1,perp2) = ortho_find(normal)
         mat = np.transpose(np.column_stack((perp1,perp2,normal)))
         center = np.dot(mat,center)
-        width = width
     
-        bounds = [-width[0]/2, width[0]/2, -width[1]/2, width[1]/2]
+        units = (width[0][1], width[1][1])
+        bounds = (-width[0][0]/pf[units[0]]/2, width[0][0]/pf[units[0]]/2, 
+                  -width[1][0]/pf[units[1]]/2, width[1][0]/pf[units[1]]/2)
     else:
-        bounds = [-width[0]/2, width[0]/2, -width[1]/2, width[1]/2, -width[2]/2, width[2]/2]
-
-    return (bounds,center)
+        units = (width[0][1], width[1][1], width[2][1])
+        bounds = (-width[0][0]/pf[units[0]]/2, width[0][0]/pf[units[0]]/2, 
+                  -width[1][0]/pf[units[1]]/2, width[1][0]/pf[units[1]]/2, 
+                  -width[2][0]/pf[units[2]]/2, width[2][0]/pf[units[2]]/2)
+    return (bounds, center, units)
 
 class PlotWindow(object):
     _plot_valid = False
@@ -264,7 +264,7 @@
         self.oblique = oblique
         self.data_source = data_source
         self.buff_size = buff_size
-        self.antialias = True
+        self.antialias = antialias
         self.set_window(bounds) # this automatically updates the data and plot
         self.origin = origin
         self.fontsize = fontsize
@@ -435,42 +435,43 @@
              wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a window 
              that is 10 kiloparsecs wide along the x axis and 15 kiloparsecs wide along 
              the y axis.  In the other two examples, code units are assumed, for example
-             (0.2, 0.3) requests a plot that has and x width of 0.2 and a y width of 0.3 
-             in code units.  the width of the image.
+             (0.2, 0.3) requests a plot that has an x width of 0.2 and a y width of 0.3 
+             in code units.  If units are provided the resulting plot axis labels will  
+             use the supplied units.
         unit : str
-            the unit the width has been specified in.
-            defaults to code units.  If width is a tuple this 
-            argument is ignored
+             the unit the width has been specified in.
+             defaults to code units.  If width is a tuple this 
+             argument is ignored
 
         """
-        if iterable(width): 
-            if isinstance(width[1],str):
-                w, unit = width
-                width = (w, w)
-            elif isinstance(width[1], tuple):
-                wx,unitx = width[0]
-                wy,unity = width[1]
-                width = (wx/self.pf[unitx],wy/self.pf[unity])
+        if width is not None:
+            set_axes_unit = True
         else:
-            width = (width, width)
-        Wx, Wy = width
-        width = (Wx,Wy)
-        width = [w / self.pf[unit] for w in width]
+            set_axes_unit = False
+
+        width = StandardWidth(self._frb.axis, width, None, self.pf)
 
         centerx = (self.xlim[1] + self.xlim[0])/2.
         centery = (self.ylim[1] + self.ylim[0])/2. 
         
-        self.xlim = (centerx - width[0]/2.,
-                     centerx + width[0]/2.)
-        self.ylim = (centery - width[1]/2.,
-                     centery + width[1]/2.)
+        units = (width[0][1], width[1][1])
+
+        if set_axes_unit:
+            self._axes_unit_names = units
+        else:
+            self._axes_unit_names = None
+
+        self.xlim = (centerx - width[0][0]/self.pf[units[0]]/2.,
+                     centerx + width[0][0]/self.pf[units[0]]/2.)
+        self.ylim = (centery - width[1][0]/self.pf[units[1]]/2.,
+                     centery + width[1][0]/self.pf[units[1]]/2.)
         
         if hasattr(self,'zlim'):
             centerz = (self.zlim[1] + self.zlim[0])/2.
-            mw = max(width)
+            mw = max([width[0][0], width[1][0]])
             self.zlim = (centerz - mw/2.,
                          centerz + mw/2.)
-        
+
     @invalidate_data
     def set_center(self, new_center, unit = '1'):
         """Sets a new center for the plot window
@@ -527,7 +528,7 @@
     def __init__(self, *args,**kwargs):
         setup = kwargs.pop("setup", True)
         PlotWindow.__init__(self, *args,**kwargs)
-        self._unit = None
+        self._axes_unit_names = None
         self._callbacks = []
         self._field_transform = {}
         self._colormaps = defaultdict(lambda: 'algae')
@@ -654,12 +655,14 @@
 
         Parameters
         ----------
-        unit_name : string
+        unit_name : string or two element tuple of strings
             A unit, available for conversion in the parameter file, that the
             image extents will be displayed in.  If set to None, any previous
             units will be reset.  If the unit is None, the default is chosen.
             If unit_name is '1', 'u', or 'unitary', it will not display the 
-            units, and only show the axes name.
+            units, and only show the axes name. If unit_name is a tuple, the first
+            element is assumed to be the unit for the x axis and the second element
+            the unit for the y axis.
 
         Raises
         ------
@@ -677,12 +680,13 @@
         >>> p.show()
         """
         # blind except because it could be in conversion_factors or units
-        try:
-            self.pf[unit_name]
-        except KeyError: 
-            if unit_name is not None:
-                raise YTUnitNotRecognized(unit_name)
-        self._unit = unit_name
+        if unit_name is not None:
+            for un in unit_name:
+                try:
+                    self.pf[un]
+                except KeyError: 
+                    raise YTUnitNotRecognized(un)
+        self._axes_unit_names = unit_name
 
     def get_metadata(self, field, strip_mathml = True, return_string = True):
         fval = self._frb[field]
@@ -690,10 +694,11 @@
         ma = fval.max()
         x_width = self.xlim[1] - self.xlim[0]
         y_width = self.ylim[1] - self.ylim[0]
-        if self._unit is None:
+        if self._axes_unit_names is None:
             unit = get_smallest_appropriate_unit(x_width, self.pf)
+            unit = (unit, unit)
         else:
-            unit = self._unit
+            unit = self._axes_unit_names
         units = self.get_field_units(field, strip_mathml)
         center = getattr(self._frb.data_source, "center", None)
         if center is None or self._frb.axis == 4:
@@ -707,16 +712,16 @@
         if return_string:
             md = _metadata_template % dict(
                 pf = self.pf,
-                x_width = x_width*self.pf[unit],
-                y_width = y_width*self.pf[unit],
-                unit = unit, units = units, mi = mi, ma = ma,
-                xc = xc, yc = yc, zc = zc)
+                x_width = x_width*self.pf[unit[0]],
+                y_width = y_width*self.pf[unit[1]],
+                axes_unit_names = unit[0], colorbar_unit = units, 
+                mi = mi, ma = ma, xc = xc, yc = yc, zc = zc)
         else:
             md = dict(pf = self.pf,
-                      x_width = x_width*self.pf[unit],
-                      y_width = y_width*self.pf[unit],
-                      unit = unit, units = units, mi = mi, ma = ma,
-                      xc = xc, yc = yc, zc = zc)
+                      x_width = x_width*self.pf[unit[0]],
+                      y_width = y_width*self.pf[unit[1]],
+                      axes_unit_names = unit, colorbar_unit = units, 
+                      mi = mi, ma = ma, xc = xc, yc = yc, zc = zc)
         return md
 
     def get_field_units(self, field, strip_mathml = True):
@@ -745,9 +750,9 @@
     _plot_type = None
 
     def __init__(self, *args, **kwargs):
-        if self._frb_generator == None:
+        if self._frb_generator is None:
             self._frb_generator = kwargs.pop("frb_generator")
-        if self._plot_type == None:
+        if self._plot_type is None:
             self._plot_type = kwargs.pop("plot_type")
         PWViewer.__init__(self, *args, **kwargs)
 
@@ -776,42 +781,49 @@
                 raise RuntimeError(
                     'origin keyword: \"%(k)s\" not recognized' % {'k': self.origin})
 
-            extent = [self.xlim[i] - xc for i in (0,1)]
-            extent.extend([self.ylim[i] - yc for i in (0,1)])
-            extent = [el*self.pf[md['unit']] for el in extent]
+            (unit_x, unit_y) = md['axes_unit_names']
+
+            extentx = [(self.xlim[i] - xc) * self.pf[unit_x] for i in (0,1)]
+            extenty = [(self.ylim[i] - yc) * self.pf[unit_y] for i in (0,1)]
+
+            extent = extentx + extenty
 
             if f in self.plots.keys():
-                zlim = (self.plots[f].zmin,self.plots[f].zmax)
+                zlim = (self.plots[f].zmin, self.plots[f].zmax)
             else:
-                zlim = (None,None)
+                zlim = (None, None)
 
-            aspect = (self.xlim[1] - self.xlim[0])/(self.ylim[1]-self.ylim[0])
-
+            plot_aspect = (self.xlim[1] - self.xlim[0]) / (self.ylim[1] - self.ylim[0])
+            
             # This sets the size of the figure, and defaults to making one of the dimensions smaller.
             # This should protect against giant images in the case of a very large aspect ratio.
             norm_size = 10.0
             cbar_frac = 0.0
-            if aspect > 1.0:
-                size = (norm_size*(1.+cbar_frac), norm_size/aspect)
+            if plot_aspect > 1.0:
+                size = (norm_size*(1.+cbar_frac), norm_size/plot_aspect)
             else:
-                size = (aspect*norm_size*(1.+cbar_frac), norm_size)
+                size = (plot_aspect*norm_size*(1.+cbar_frac), norm_size)
 
-            self.plots[f] = WindowPlotMPL(self._frb[f], extent, self._field_transform[f], 
+            # Correct the aspect ratio in case unit_x and unit_y are different
+            aspect = self.pf[unit_x]/self.pf[unit_y]
+            
+            self.plots[f] = WindowPlotMPL(self._frb[f], extent, aspect, self._field_transform[f], 
                                           self._colormaps[f], size, zlim)
+
             self.plots[f].cb = self.plots[f].figure.colorbar(
                 self.plots[f].image, cax = self.plots[f].cax)
 
-            if not md['unit'] in ['1', 'u', 'unitary']:
-                axes_unit_label = '\/\/('+md['unit']+')'
+            axes_unit_labels = ['', '']
+            for i, un in enumerate((unit_x, unit_y)):
+                if un not in ['1', 'u', 'unitary']:
+                    axes_unit_labels[i] = '\/\/('+un+')'
+                    
+            if self.oblique:
+                labels = [r'$\rm{Image\/x'+axes_unit_labels[0]+'}$',
+                          r'$\rm{Image\/y'+axes_unit_labels[1]+'}$']
             else:
-                axes_unit_label = ''
-
-            if self.oblique == False:
                 labels = [r'$\rm{'+axis_labels[axis_index][i]+
-                        axes_unit_label + r'}$' for i in (0,1)]
-            else:
-                labels = [r'$\rm{Image\/x'+axes_unit_label+'}$',
-                          r'$\rm{Image\/y'+axes_unit_label+'}$']
+                          axes_unit_labels[i] + r'}$' for i in (0,1)]
 
             self.plots[f].axes.set_xlabel(labels[0],fontsize=self.fontsize)
             self.plots[f].axes.set_ylabel(labels[1],fontsize=self.fontsize)
@@ -831,14 +843,14 @@
             except ParseFatalException, err:
                 raise YTCannotParseFieldDisplayName(f,field_name,str(err))
 
-            if md['units'] == None or md['units'] == '':
+            if md['colorbar_unit'] is None or md['colorbar_unit'] == '':
                 label = field_name
             else:
                 try:
-                    parser.parse(r'$'+md['units']+r'$')
+                    parser.parse(r'$'+md['colorbar_unit']+r'$')
                 except ParseFatalException, err:
-                    raise YTCannotParseUnitDisplayName(f, md['units'],str(err))
-                label = field_name+r'$\/\/('+md['units']+r')$'
+                    raise YTCannotParseUnitDisplayName(f, md['colorbar_unit'],str(err))
+                label = field_name+r'$\/\/('+md['colorbar_unit']+r')$'
 
             self.plots[f].cb.set_label(label,fontsize=self.fontsize)
 
@@ -905,9 +917,9 @@
         """
         names = []
         if mpl_kwargs is None: mpl_kwargs = {}
-        if name == None:
+        if name is None:
             name = str(self.pf)
-        suffix = os.path.splitext(name)[1]
+        suffix = get_image_suffix(name)
         if suffix != '':
             for k, v in self.plots.iteritems():
                 names.append(v.save(name,mpl_kwargs))
@@ -1012,13 +1024,14 @@
              wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a window 
              that is 10 kiloparsecs wide along the x axis and 15 kiloparsecs wide along 
              the y axis.  In the other two examples, code units are assumed, for example
-             (0.2, 0.3) requests a plot that has and x width of 0.2 and a y width of 0.3 
-             in code units.  
+             (0.2, 0.3) requests a plot that has an x width of 0.2 and a y width of 0.3 
+             in code units.  If units are provided the resulting plot axis labels will
+             use the supplied units.
         axes_unit : A string
-            The name of the unit for the tick labels on the x and y axes.  
-            Defaults to None, which automatically picks an appropriate unit.
-            If axes_unit is '1', 'u', or 'unitary', it will not display the 
-            units, and only show the axes name.
+             The name of the unit for the tick labels on the x and y axes.  
+             Defaults to None, which automatically picks an appropriate unit.
+             If axes_unit is '1', 'u', or 'unitary', it will not display the 
+             units, and only show the axes name.
         origin : string
              The location of the origin of the plot coordinate system.
              Currently, can be set to three options: 'left-domain', corresponding
@@ -1038,13 +1051,15 @@
         >>> p.save('sliceplot')
         
         """
-        # tHis will handle time series data and controllers
+        # this will handle time series data and controllers
         ts = self._initialize_dataset(pf) 
         self.ts = ts
         pf = self.pf = ts[0]
         axis = fix_axis(axis)
-        (bounds, center) = GetBoundsAndCenter(axis, center, width, pf)
-        slc = pf.h.slice(axis, center[axis], fields=fields)
+        (bounds, center, units) = GetWindowParameters(axis, center, width, pf)
+        if axes_unit is None and units != ('1', '1'):
+            axes_unit = units
+        slc = pf.h.slice(axis, center[axis], center=center, fields=fields)
         PWViewerMPL.__init__(self, slc, bounds, origin=origin)
         self.set_axes_unit(axes_unit)
 
@@ -1096,23 +1111,24 @@
              wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a window 
              that is 10 kiloparsecs wide along the x axis and 15 kiloparsecs wide along 
              the y axis.  In the other two examples, code units are assumed, for example
-             (0.2, 0.3) requests a plot that has and x width of 0.2 and a y width of 0.3 
-             in code units.
+             (0.2, 0.3) requests a plot that has an x width of 0.2 and a y width of 0.3 
+             in code units.  If units are provided the resulting plot axis labels will 
+             use the supplied units.
         axes_unit : A string
-            The name of the unit for the tick labels on the x and y axes.  
-            Defaults to None, which automatically picks an appropriate unit.
-            If axes_unit is '1', 'u', or 'unitary', it will not display the 
-            units, and only show the axes name.
+             The name of the unit for the tick labels on the x and y axes.  
+             Defaults to None, which automatically picks an appropriate unit.
+             If axes_unit is '1', 'u', or 'unitary', it will not display the 
+             units, and only show the axes name.
         origin : A string
-            The location of the origin of the plot coordinate system.
-            Currently, can be set to three options: 'left-domain', corresponding
-            to the bottom-left hand corner of the simulation domain, 'center-domain',
-            corresponding the center of the simulation domain, or 'center-window' for 
-            the center of the plot window.
+             The location of the origin of the plot coordinate system.
+             Currently, can be set to three options: 'left-domain', corresponding
+             to the bottom-left hand corner of the simulation domain, 'center-domain',
+             corresponding the center of the simulation domain, or 'center-window' for 
+             the center of the plot window.
         weight_field : string
-            The name of the weighting field.  Set to None for no weight.
+             The name of the weighting field.  Set to None for no weight.
         max_level: int
-            The maximum level to project to.
+             The maximum level to project to.
         fontsize : integer
              The size of the fonts for the axis, colorbar, and tick labels.
         
@@ -1130,7 +1146,9 @@
         self.ts = ts
         pf = self.pf = ts[0]
         axis = fix_axis(axis)
-        (bounds, center) = GetBoundsAndCenter(axis, center, width, pf)
+        (bounds, center, units) = GetWindowParameters(axis, center, width, pf)
+        if axes_unit is None  and units != ('1', '1'):
+            axes_unit = units
         proj = pf.h.proj(axis,fields,weight_field=weight_field,max_level=max_level,center=center)
         PWViewerMPL.__init__(self,proj,bounds,origin=origin)
         self.set_axes_unit(axes_unit)
@@ -1139,7 +1157,7 @@
     _plot_type = 'OffAxisSlice'
     _frb_generator = ObliqueFixedResolutionBuffer
 
-    def __init__(self, pf, normal, fields, center='c', width=(1,'unitary'), 
+    def __init__(self, pf, normal, fields, center='c', width=None, 
                  axes_unit=None, north_vector=None, fontsize=15):
         r"""Creates an off axis slice plot from a parameter file
 
@@ -1180,8 +1198,10 @@
         fontsize : integer
              The size of the fonts for the axis, colorbar, and tick labels.
         """
-        (bounds,center_rot) = GetOffAxisBoundsAndCenter(normal,center,width,pf)
-        cutting = pf.h.cutting(normal,center,fields=fields,north_vector=north_vector)
+        (bounds, center_rot, units) = GetObliqueWindowParameters(normal,center,width,pf)
+        if axes_unit is None and units != ('1', '1'):
+            axes_unit = units
+        cutting = pf.h.cutting(normal, center, fields=fields, north_vector=north_vector)
         # Hard-coding the origin keyword since the other two options
         # aren't well-defined for off-axis data objects
         PWViewerMPL.__init__(self,cutting,bounds,origin='center-window',periodic=False,oblique=True)
@@ -1214,8 +1234,8 @@
     _plot_type = 'OffAxisProjection'
     _frb_generator = OffAxisProjectionFixedResolutionBuffer
 
-    def __init__(self, pf, normal, fields, center='c', width=(1,'unitary'), 
-                 depth=(1,'unitary'), axes_unit=None, weight_field=None, 
+    def __init__(self, pf, normal, fields, center='c', width=None, 
+                 depth=(1, '1'), axes_unit=None, weight_field=None, 
                  max_level=None, north_vector=None, volume=None, no_ghost=False, 
                  le=None, re=None, interpolated=False, fontsize=15):
         r"""Creates an off axis projection plot from a parameter file
@@ -1264,8 +1284,9 @@
             set, an arbitrary grid-aligned north-vector is chosen.
 
         """
-        (bounds,center_rot) = GetOffAxisBoundsAndCenter(normal,center,width,pf,depth=depth)
-        # Hard-coding the resolution for now
+        (bounds, center_rot, units) = GetObliqueWindowParameters(normal,center,width,pf,depth=depth)
+        if axes_unit is None and units != ('1', '1', '1'):
+            axes_unit = units[:2]
         fields = ensure_list(fields)[:]
         width = np.array((bounds[1] - bounds[0], bounds[3] - bounds[2], bounds[5] - bounds[4]))
         OffAxisProj = OffAxisProjectionDummyDataSource(center_rot, pf, normal, width, fields, interpolated,
@@ -1279,9 +1300,9 @@
 _metadata_template = """
 %(pf)s<br><br>
-Field of View:  %(x_width)0.3f %(unit)s<br>
-Minimum Value:  %(mi)0.3e %(units)s<br>
-Maximum Value:  %(ma)0.3e %(units)s<br>
+Field of View:  %(x_width)0.3f %(axes_unit_names)s<br>
+Minimum Value:  %(mi)0.3e %(colorbar_unit)s<br>
+Maximum Value:  %(ma)0.3e %(colorbar_unit)s<br>
 Central Point:  (data coords)<br>
    %(xc)0.14f<br>
    %(yc)0.14f<br>
@@ -1473,7 +1494,7 @@
             self.cax = self.figure.add_axes(caxrect)
             
     def save(self, name, mpl_kwargs, canvas = None):
-        suffix = os.path.splitext(name)[1]
+        suffix = get_image_suffix(name)
         
         if suffix == '':
             suffix = '.png'
@@ -1539,17 +1560,17 @@
         return f.read()
 
 class WindowPlotMPL(PlotMPL):
-    def __init__(self, data, extent, field_transform, cmap, size, zlim):
+    def __init__(self, data, extent, aspect, field_transform, cmap, size, zlim):
         self.zmin, self.zmax = zlim
         PlotMPL.__init__(self, data, size)
-        self.__init_image(data, extent, field_transform, cmap)
+        self.__init_image(data, extent, aspect, field_transform, cmap)
 
-    def __init_image(self, data, extent, field_transform, cmap):
+    def __init_image(self, data, extent, aspect, field_transform, cmap):
         if (field_transform.name == 'log10'):
             norm = matplotlib.colors.LogNorm()
         elif (field_transform.name == 'linear'):
             norm = matplotlib.colors.Normalize()
-        self.image = self.axes.imshow(data, origin='lower', extent = extent,
-                                      norm = norm, vmin = self.zmin, 
-                                      vmax = self.zmax, cmap = cmap)
-        self.image.axes.ticklabel_format(scilimits=(-4,3))
+        self.image = self.axes.imshow(data, origin='lower', extent=extent,
+                                      norm=norm, vmin=self.zmin, aspect=aspect, 
+                                      vmax=self.zmax, cmap=cmap)
+        self.image.axes.ticklabel_format(scilimits=(-2,3))


diff -r 53fa42c3dbc9c0228203900a1b1a169e732e9a8c -r d15ab61fb9efddea5e4cd117e590dd16303e5393 yt/visualization/profile_plotter.py
--- a/yt/visualization/profile_plotter.py
+++ b/yt/visualization/profile_plotter.py
@@ -36,7 +36,6 @@
 from yt.data_objects.profiles import \
     BinnedProfile1D, \
     BinnedProfile2D
-from .plot_types import ProfilePlot, PhasePlot
 from .tick_locators import LogLocator, LinearLocator
 from yt.utilities.logger import ytLogger as mylog
 


diff -r 53fa42c3dbc9c0228203900a1b1a169e732e9a8c -r d15ab61fb9efddea5e4cd117e590dd16303e5393 yt/visualization/tests/test_plotwindow.py
--- /dev/null
+++ b/yt/visualization/tests/test_plotwindow.py
@@ -0,0 +1,28 @@
+from yt.testing import *
+from yt.mods import SlicePlot, ProjectionPlot, \
+    OffAxisSlicePlot, OffAxisProjectionPlot
+import os
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","__withintesting"] = "True"
+
+def teardown_func(fns):
+    for fn in fns:
+        os.remove(fn)
+
+def test_plotwindow():
+    pf = fake_random_pf(64)
+    fns = []
+    for dim in [0,1,2]:
+        slc = SlicePlot(pf, dim, 'Density')
+        fns.append(slc.save()[0])
+        prj = ProjectionPlot(pf, dim, 'Density')
+        fns.append(prj.save()[0])
+    normal = [1,1,1]
+    oaslc = OffAxisSlicePlot(pf, normal, 'Density')
+    fns.append(oaslc.save()[0])
+    oaprj = OffAxisProjectionPlot(pf, normal, 'Density')
+    fns.append(oaprj.save()[0])
+    teardown_func(fns)
+    





https://bitbucket.org/yt_analysis/yt/changeset/50879b511cd7/
changeset:   50879b511cd7
branch:      yt
user:        Andrew Myers
date:        2012-12-18 05:11:41
summary:     fixing a bug that broke writing to .yt files with chombo
affected #:  1 file

diff -r d15ab61fb9efddea5e4cd117e590dd16303e5393 -r 50879b511cd7f11b922adee215f85556bbf42b6f yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -106,7 +106,7 @@
         # for now, the hierarchy file is the parameter file!
         self.hierarchy_filename = self.parameter_file.parameter_filename
         self.hierarchy = os.path.abspath(self.hierarchy_filename)
-        self.directory = os.path.dirname(self.hierarchy_filename)
+        self.directory = pf.fullpath
         self._fhandle = h5py.File(self.hierarchy_filename, 'r')
 
         self.float_type = self._fhandle['/level_0']['data:datatype=0'].dtype.name



https://bitbucket.org/yt_analysis/yt/changeset/8078b0962799/
changeset:   8078b0962799
branch:      yt
user:        MatthewTurk
date:        2012-12-18 05:17:48
summary:     Merged in atmyers/yt (pull request #378: Writing to .yt files with Chombo)
affected #:  2 files

diff -r ccd2b8a816a109272d884e8cfbee0a5dfd1761ab -r 8078b096279969875b4e4fd2fc0e263dbc9bdf77 yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -106,7 +106,7 @@
         # for now, the hierarchy file is the parameter file!
         self.hierarchy_filename = self.parameter_file.parameter_filename
         self.hierarchy = os.path.abspath(self.hierarchy_filename)
-        self.directory = os.path.dirname(self.hierarchy_filename)
+        self.directory = pf.fullpath
         self._fhandle = h5py.File(self.hierarchy_filename, 'r')
 
         self.float_type = self._fhandle['/level_0']['data:datatype=0'].dtype.name

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list