[yt-svn] commit/yt: 14 new changesets

Bitbucket commits-noreply at bitbucket.org
Sat Jun 23 05:29:32 PDT 2012


14 new commits in yt:


https://bitbucket.org/yt_analysis/yt/changeset/45444e96642a/
changeset:   45444e96642a
branch:      yt
user:        gsiisg
date:        2011-12-04 21:35:58
summary:     applied patch from worksforme.diff
affected #:  2 files

diff -r 01838e47eca2e941b3ab2eece57babea2e3e1715 -r 45444e96642a4d6a5483382413ec70a55708ee5f yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -5,6 +5,8 @@
 Affiliation: KIPAC/SLAC/Stanford
 Author: Stephen Skory <s at skory.us>
 Affiliation: UCSD Physics/CASS
+Author: Geoffrey So <gsiisg at gmail.com> (Ellipsoidal functions)
+Affiliation: UCSD Physics/CASS
 Homepage: http://yt-project.org/
 License:
   Copyright (C) 2008-2011 Matthew Turk.  All Rights Reserved.
@@ -56,6 +58,22 @@
 
 TINY = 1.e-40
 
+# Ellipsoid funtions.
+
+# define the rotation matrix needed later
+def RX(ax):
+    rot_matrix = na.array([[1, 0, 0], [0, na.cos(ax), na.sin(ax)],
+        [0, -na.sin(ax), na.cos(ax)]])
+    return rot_matrix
+def RY(ay):
+    rot_matrix = na.array([[na.cos(ay), 0, -na.sin(ay)], [0, 1, 0],
+        [na.sin(ay), 0, na.cos(ay)]])
+    return rot_matrix
+def RZ(az):
+    rot_matrix = na.array([[na.cos(az), na.sin(az), 0],
+        [-na.sin(az), na.cos(az), 0], [0, 0, 1]])
+    return rot_matrix
+
 class Halo(object):
     """
     A data source that returns particle information about the members of a
@@ -76,6 +94,8 @@
         self.id = id
         self.data = halo_list._data_source
         self.pf = self.data.pf
+        self.gridsize = (self.pf.domain_right_edge - \
+                 self.pf.domain_left_edge)
         if indices is not None:
             self.indices = halo_list._base_indices[indices]
         else:
@@ -404,7 +424,142 @@
         (4./3. * math.pi * rho_crit * \
         (self.radial_bins * cm)**3.0)
         
+    def _get_ellipsoid_parameters_basic(self):
+        na.seterr(all='ignore')
+        # Calculate the parameters that describe the ellipsoid of
+        # the particles that constitute the halo. This function returns
+        # all the parameters except for the center of mass.
+        com = self.center_of_mass()
+        position = [self["particle_position_x"],
+		    self["particle_position_y"],
+		    self["particle_position_z"]]
+        # Locate the furthest particle from com, its vector length and index
+	DW = na.array([self.gridsize[0],self.gridsize[1],self.gridsize[2]])
+	position = [position[0] - com[0],
+		    position[1] - com[1],
+		    position[2] - com[2]]
+	# different cases of particles being on other side of boundary
+	for axis in range(na.size(DW)):
+	    cases = na.array([position[axis],
+	  		      position[axis] + DW[axis],
+			      position[axis] - DW[axis]])        
+            # pick out the smallest absolute distance from com
+            position[axis] = na.choose(na.abs(cases).argmin(axis=0), cases)
+	# find the furthest particle's index
+	r = na.sqrt(position[0]**2 +
+		    position[1]**2 +
+		    position[2]**2)
+        A_index = r.argmax()
+        mag_A = r.max()
+        # designate the A vector
+	A_vector = (position[0][A_index],
+		    position[1][A_index],
+		    position[2][A_index])
+        # designate the e0 unit vector
+        e0_vector = A_vector / mag_A
+        # locate the tB particle position by finding the max B
+	e0_vector_copy = na.empty((na.size(position[0]), 3), dtype='float64')
+        for i in range(3):
+            e0_vector_copy[:, i] = e0_vector[i]
+        rr = na.array([position[0],
+		       position[1],
+		       position[2]]).T # Similar to tB_vector in old code.
+        tC_vector = na.cross(e0_vector_copy, rr)
+        te2 = tC_vector.copy()
+        for dim in range(3):
+            te2[:,dim] *= na.sum(tC_vector**2., axis = 1)**(-0.5)
+        te1 = na.cross(te2, e0_vector_copy)
+        length = na.abs(-na.sum(rr * te1, axis = 1) * \
+            (1. - na.sum(rr * e0_vector_copy, axis = 1)**2. * \
+            mag_A**-2.)**(-0.5))
+        # This problem apparently happens sometimes, that the NaNs are turned
+        # into infs, which messes up the nanargmax below.
+        length[length == na.inf] = 0.
+        tB_index = na.nanargmax(length) # ignores NaNs created above.
+        mag_B = length[tB_index]
+        e1_vector = te1[tB_index]
+        e2_vector = te2[tB_index]
+        temp_e0 = rr.copy()
+        temp_e1 = rr.copy()
+        temp_e2 = rr.copy()
+        for dim in range(3):
+            temp_e0[:,dim] = e0_vector[dim]
+            temp_e1[:,dim] = e1_vector[dim]
+            temp_e2[:,dim] = e2_vector[dim]
+        length = na.abs(na.sum(rr * temp_e2, axis = 1) * (1 - \
+            na.sum(rr * temp_e0, axis = 1)**2. * mag_A**-2. - \
+            na.sum(rr * temp_e1, axis = 1)**2. * mag_B**-2)**(-0.5))
+        length[length == na.inf] = 0.
+        tC_index = na.nanargmax(length)
+        mag_C = length[tC_index]
+        # tilt is calculated from the rotation about x axis
+        # needed to align e1 vector with the y axis
+        # after e0 is aligned with x axis
+        # find the t1 angle needed to rotate about z axis to align e0 to x
+        t1 = na.arctan(e0_vector[1] / e0_vector[0])
+        r1 = (e0_vector * RZ(t1).transpose()).sum(axis = 1)
+        # find the t2 angle needed to rotate about y axis to align e0 to x
+        t2 = na.arctan(-r1[2] / r1[0])
+        r2 = na.dot(RY(t2), na.dot(RZ(t1), e1_vector))
+        tilt = na.arctan(r2[2]/r2[1])
+        return (mag_A, mag_B, mag_C, e0_vector[0], e0_vector[1],
+            e0_vector[2], tilt)
 
+    def get_ellipsoid_parameters(self):
+        r"""Calculate the parameters that describe the ellipsoid of
+        the particles that constitute the halo.
+        
+        Parameters
+        ----------
+        None
+        
+        Returns
+        -------
+        tuple : (cm, mag_A, mag_B, mag_C, e0_vector, tilt)
+            The 6-tuple has in order:
+              #. The center of mass as an array.
+              #. mag_A as a float.
+              #. mag_B as a float.
+              #. mag_C as a float.
+              #. e0_vector as an array.
+              #. tilt as a float.
+        
+        Examples
+        --------
+        >>> params = halos[0].get_ellipsoid_parameters()
+        """
+        basic_parameters = self._get_ellipsoid_parameters_basic()
+        toreturn = [self.center_of_mass()]
+        updated = [basic_parameters[0], basic_parameters[1],
+            basic_parameters[2], na.array([basic_parameters[3],
+            basic_parameters[4], basic_parameters[5]]), basic_parameters[6]]
+        toreturn.extend(updated)
+        return tuple(toreturn)
+    
+    def get_ellipsoid(self):
+        r"""Returns an ellipsoidal data object.
+        
+        This will generate a new, empty ellipsoidal data object for this
+        halo.
+        
+        Parameters
+        ----------
+        None.
+        
+        Returns
+        -------
+        ellipsoid : `yt.data_objects.api.AMREllipsoidBase`
+            The ellipsoidal data object.
+        
+        Examples
+        --------
+        >>> ell = halos[0].get_ellipsoid()
+        """
+        ep = self.get_ellipsoid_parameters()
+        ell = self.data.hierarchy.ellipsoid(ep[0], ep[1], ep[2], ep[3],
+            ep[4], ep[5])
+        return ell
+    
 class HOPHalo(Halo):
     pass
 
@@ -758,6 +913,39 @@
         (4./3. * math.pi * rho_crit * \
         (self.radial_bins * self.data.pf["cm"])**3.0)
 
+    def _get_ellipsoid_parameters_basic(self):
+        mylog.error("Ellipsoid calculation does not work for parallelHF halos." + \
+        " Please save the halos using .dump(), and reload them using" + \
+        " LoadHaloes() to use this function.")
+        return None
+
+    def get_ellipsoid_parameters(self):
+        r"""Calculate the parameters that describe the ellipsoid of
+        the particles that constitute the halo.
+        
+        Parameters
+        ----------
+        None
+        
+        Returns
+        -------
+        tuple : (cm, mag_A, mag_B, mag_C, e1_vector, tilt)
+            The 6-tuple has in order:
+              #. The center of mass as an array.
+              #. mag_A as a float.
+              #. mag_B as a float.
+              #. mag_C as a float.
+              #. e1_vector as an array.
+              #. tilt as a float.
+        
+        Examples
+        --------
+        >>> params = halos[0].get_ellipsoid_parameters()
+        """
+        mylog.error("get_ellipsoid_parameters does not work for parallelHF halos." + \
+        " Please save the halos using .dump(), and reload them using" + \
+        " LoadHaloes() to use this function.")
+        return None
 
 class FOFHalo(Halo):
 
@@ -793,8 +981,11 @@
 class LoadedHalo(Halo):
     def __init__(self, pf, id, size=None, CoM=None,
         max_dens_point=None, group_total_mass=None, max_radius=None, bulk_vel=None,
-        rms_vel=None, fnames=None):
+        rms_vel=None, fnames=None, mag_A=None, mag_B=None, mag_C=None,
+        e1_vec=None, tilt=None):
         self.pf = pf
+        self.gridsize = (self.pf.domain_right_edge - \
+            self.pf.domain_left_edge)
         self.id = id
         self.size = size
         self.CoM = CoM
@@ -803,6 +994,11 @@
         self.max_radius = max_radius
         self.bulk_vel = bulk_vel
         self.rms_vel = rms_vel
+        self.mag_A = mag_A
+        self.mag_B = mag_B
+        self.mag_C = mag_C
+        self.e1_vec = e1_vec
+        self.tilt = tilt
         # locs=the names of the h5 files that have particle data for this halo
         self.fnames = fnames
         self.bin_count = None
@@ -975,6 +1171,68 @@
         """
         return self.max_radius
 
+    def _get_ellipsoid_parameters_basic_loadedhalo(self):
+        if self.mag_A is not None:
+            return (self.mag_A, self.mag_B, self.mag_C, self.e1_vec[0],
+                self.e1_vec[1], self.e1_vec[2], self.tilt)
+        else:
+            return self._get_ellipsoid_parameters_basic()
+
+    def get_ellipsoid_parameters(self):
+        r"""Calculate the parameters that describe the ellipsoid of
+        the particles that constitute the halo.
+
+        Parameters
+        ----------
+        None
+
+        Returns
+        -------
+        tuple : (cm, mag_A, mag_B, mag_C, e1_vector, tilt)
+            The 6-tuple has in order:
+              #. The center of mass as an array.
+              #. mag_A as a float.
+              #. mag_B as a float.
+              #. mag_C as a float.
+              #. e1_vector as an array.
+              #. tilt as a float.
+
+        Examples
+        --------
+        >>> params = halos[0].get_ellipsoid_parameters()
+	"""
+
+        basic_parameters = self._get_ellipsoid_parameters_basic_loadedhalo()
+        toreturn = [self.center_of_mass()]
+        updated = [basic_parameters[0], basic_parameters[1],
+            basic_parameters[2], na.array([basic_parameters[3],
+            basic_parameters[4], basic_parameters[5]]), basic_parameters[6]]
+        toreturn.extend(updated)
+        return tuple(toreturn)
+    
+    def get_ellipsoid(self):
+        r"""Returns an ellipsoidal data object.        
+        This will generate a new, empty ellipsoidal data object for this
+        halo.
+        
+        Parameters
+        ----------
+        None.
+        
+        Returns
+        -------
+        ellipsoid : `yt.data_objects.api.AMREllipsoidBase`
+            The ellipsoidal data object.
+        
+        Examples
+        --------
+        >>> ell = halos[0].get_ellipsoid()
+        """
+        ep = self.get_ellipsoid_parameters()
+        ell = self.pf.hierarchy.ellipsoid(ep[0], ep[1], ep[2], ep[3],
+            ep[4], ep[5])
+        return ell
+
     def get_sphere(self):
         r"""Returns a sphere source.
 
@@ -1176,13 +1434,16 @@
             n_points.append([math.sqrt(n[0]),n[1].haloID])
         return n_points
 
-    def write_out(self, filename):
+    def write_out(self, filename, ellipsoid_data=False):
         r"""Write out standard halo information to a text file.
         
         Parameters
         ----------
         filename : String
             The name of the file to write to.
+        ellipsoid_data : bool.
+            Whether to print the ellipsoidal information to the file.
+            Default = False.
         
         Examples
         --------
@@ -1193,10 +1454,18 @@
         else:
             f = open(filename,"w")
         f.write("# HALOS FOUND WITH %s\n" % (self._name))
-        f.write("\t".join(["# Group","Mass","# part","max dens"
-                           "x","y","z", "center-of-mass",
-                           "x","y","z",
-                           "vx","vy","vz","max_r","rms_v","\n"]))
+        if not ellipsoid_data:
+            f.write("\t".join(["# Group","Mass","# part","max dens"
+                               "x","y","z", "center-of-mass",
+                               "x","y","z",
+                               "vx","vy","vz","max_r","rms_v","\n"]))
+        else:
+            f.write("\t".join(["# Group","Mass","# part","max dens"
+                               "x","y","z", "center-of-mass",
+                               "x","y","z",
+                               "vx","vy","vz","max_r","rms_v",
+                               "mag_A", "mag_B", "mag_C", "e1_vec0",
+                               "e1_vec1", "e1_vec2", "tilt", "\n"]))
         for group in self:
             f.write("%10i\t" % group.id)
             f.write("%0.9e\t" % group.total_mass())
@@ -1210,6 +1479,8 @@
             f.write("\t")
             f.write("%0.9e\t" % group.maximum_radius())
             f.write("%0.9e\t" % group.rms_velocity())
+            if ellipsoid_data:
+                f.write("\t".join(["%0.9e" % v for v in group._get_ellipsoid_parameters_basic()]))
             f.write("\n")
             f.flush()
         f.close()
@@ -1272,19 +1543,22 @@
         self.particle_fields["densities"] = self.densities
         self.particle_fields["tags"] = self.tags
 
-    def write_out(self, filename="HopAnalysis.out"):
+    def write_out(self, filename="HopAnalysis.out", ellipsoid_data=False):
         r"""Write out standard halo information to a text file.
         
         Parameters
         ----------
         filename : String
             The name of the file to write to. Default = "HopAnalysis.out".
+        ellipsoid_data : bool.
+            Whether to print the ellipsoidal information to the file.
+            Default = False.
         
         Examples
         --------
         >>> halos.write_out("HopAnalysis.out")
         """
-        HaloList.write_out(self, filename)
+        HaloList.write_out(self, filename, ellipsoid_data)
 
 class FOFHaloList(HaloList):
     _name = "FOF"
@@ -1305,19 +1579,22 @@
         self.particle_fields["densities"] = self.densities
         self.particle_fields["tags"] = self.tags
 
-    def write_out(self, filename="FOFAnalysis.out"):
+    def write_out(self, filename="FOFAnalysis.out", ellipsoid_data=False):
         r"""Write out standard halo information to a text file.
         
         Parameters
         ----------
         filename : String
             The name of the file to write to. Default = "FOFAnalysis.out".
-        
+        ellipsoid_data : bool.
+            Whether to print the ellipsoidal information to the file.
+            Default = False.
+
         Examples
         --------
         >>> halos.write_out("FOFAnalysis.out")
         """
-        HaloList.write_out(self, filename)
+        HaloList.write_out(self, filename, ellipsoid_data)
 
 class LoadedHaloList(HaloList):
     _name = "Loaded"
@@ -1336,6 +1613,7 @@
         locations = self._collect_halo_data_locations()
         halo = 0
         for line in lines:
+            orig = line
             # Skip the comment lines at top.
             if line[0] == "#": continue
             line = line.split()
@@ -1351,9 +1629,32 @@
             bulk_vel = na.array([float(line[10]), float(line[11]),
                 float(line[12])])
             rms_vel = float(line[14])
-            self._groups.append(LoadedHalo(self.pf, halo, size, CoM,
-                max_dens_point, group_total_mass, max_radius, bulk_vel,
-                rms_vel, fnames))
+            if len(line) == 15:
+                # No ellipsoid information
+                self._groups.append(LoadedHalo(self.pf, halo, size = size,
+                    CoM = CoM,
+                    max_dens_point = max_dens_point,
+                    group_total_mass = group_total_mass, max_radius = max_radius,
+                    bulk_vel = bulk_vel, rms_vel = rms_vel, fnames = fnames))
+            elif len(line) == 22:
+                # Ellipsoid information
+                mag_A = float(line[15])
+                mag_B = float(line[16])
+                mag_C = float(line[17])
+                e1_vec0 = float(line[18])
+                e1_vec1 = float(line[19])
+                e1_vec2 = float(line[20])
+                e1_vec = na.array([e1_vec0, e1_vec1, e1_vec2])
+                tilt = float(line[21])
+                self._groups.append(LoadedHalo(self.pf, halo, size = size,
+                    CoM = CoM,
+                    max_dens_point = max_dens_point,
+                    group_total_mass = group_total_mass, max_radius = max_radius,
+                    bulk_vel = bulk_vel, rms_vel = rms_vel, fnames = fnames,
+                    mag_A = mag_A, mag_B = mag_B, mag_C = mag_C, e1_vec = e1_vec,
+                    tilt = tilt))
+            else:
+                mylog.error("I am unable to parse this line. Too many or too few items. %s" % orig)
             halo += 1
     
     def _collect_halo_data_locations(self):
@@ -1593,7 +1894,7 @@
     def __len__(self):
         return self.group_count
 
-    def write_out(self, filename="parallelHopAnalysis.out"):
+    def write_out(self, filename="parallelHopAnalysis.out", ellipsoid_data=False):
         r"""Write out standard halo information to a text file.
         
         Parameters
@@ -1606,7 +1907,7 @@
         --------
         >>> halos.write_out("parallelHopAnalysis.out")
         """
-        HaloList.write_out(self, filename)
+        HaloList.write_out(self, filename, ellipsoid_data)
 
 class GenericHaloFinder(HaloList, ParallelAnalysisInterface):
     def __init__(self, pf, ds, dm_only=True, padding=0.0):
@@ -1694,20 +1995,23 @@
             arr[arr < LE[i]-self.padding] += dw[i]
             arr[arr > RE[i]+self.padding] -= dw[i]
 
-    def write_out(self, filename):
+    def write_out(self, filename, ellipsoid_data=False):
         r"""Write out standard halo information to a text file.
         
         Parameters
         ----------
         filename : String
             The name of the file to write to.
+        ellipsoid_data : bool.
+            Whether to print the ellipsoidal information to the file.
+            Default = False.
         
         Examples
         --------
         >>> halos.write_out("HopAnalysis.out")
         """
         f = self.comm.write_on_root(filename)
-        HaloList.write_out(self, f)
+        HaloList.write_out(self, f, ellipsoid_data)
 
     def write_particle_lists_txt(self, prefix):
         r"""Write out the names of the HDF5 files containing halo particle data
@@ -1755,7 +2059,7 @@
             halo.write_particle_list(f)
         f.close()
 
-    def dump(self, basename="HopAnalysis"):
+    def dump(self, basename="HopAnalysis", ellipsoid_data=False):
         r"""Save the full halo data to disk.
         
         This function will save the halo data in such a manner that it can be
@@ -1772,12 +2076,15 @@
         basename : String
             The base name for the files the data will be written to. Default = 
             "HopAnalysis".
+        ellipsoid_data : bool.
+            Whether to save the ellipsoidal information to the files.
+            Default = False.
         
         Examples
         --------
         >>> halos.dump("MyHalos")
         """
-        self.write_out("%s.out" % basename)
+        self.write_out("%s.out" % basename, ellipsoid_data)
         self.write_particle_lists(basename)
         self.write_particle_lists_txt(basename)
 


diff -r 01838e47eca2e941b3ab2eece57babea2e3e1715 -r 45444e96642a4d6a5483382413ec70a55708ee5f yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -5,6 +5,8 @@
 Affiliation: KIPAC/SLAC/Stanford
 Author: Britton Smith <Britton.Smith at colorado.edu>
 Affiliation: University of Colorado at Boulder
+Author: Geoffrey So <gsiisg at gmail.com> (AMREllipsoidBase)
+Affiliation: UCSD Physics/CASS
 Homepage: http://yt-project.org/
 License:
   Copyright (C) 2007-2011 Matthew Turk.  All Rights Reserved.
@@ -3205,6 +3207,154 @@
             self._cut_masks[grid.id] = cm
         return cm
 
+class AMREllipsoidBase(AMR3DData):
+    """
+    We can define an ellipsoid to act as a data object.
+    """
+    _type_name = "ellipsoid"
+    _con_args = ('center', '_A', '_B', '_C', '_e1', '_tilt')
+    def __init__(self, center, A, B, C, e1, tilt, fields=None,
+                 pf=None, **kwargs):
+        """
+        By providing a *center*,*A*,*B*,*C*,*e1*,*tilt* we
+        can define a ellipsoid of any proportion.  Only cells whose centers are
+        within the ellipsoid will be selected.
+        """
+        AMR3DData.__init__(self, na.array(center), fields, pf, **kwargs)
+        # make sure the smallest side is not smaller than dx
+        if C < self.hierarchy.get_smallest_dx():
+            raise YTSphereTooSmall(pf, C, self.hierarchy.get_smallest_dx())
+        self._A = A
+        self._B = B
+        self._C = C
+        self._e1 = e1
+        self._tilt = tilt
+        
+        # define the rotation matrix needed later
+        def RX(ax):
+            rot_matrix = na.array([[1, 0, 0], \
+                                   [0, na.cos(ax), na.sin(ax)], \
+                                   [0,-na.sin(ax), na.cos(ax)]])
+            return rot_matrix
+        def RY(ay):
+            rot_matrix = na.array([[na.cos(ay), 0,-na.sin(ay)], \
+                                   [0, 1, 0], \
+                                   [na.sin(ay), 0, na.cos(ay)]])
+            return rot_matrix
+        def RZ(az):
+            rot_matrix = na.array([[na.cos(az), na.sin(az), 0], \
+                                   [-na.sin(az), na.cos(az), 0], \
+                                   [0, 0, 1]])
+            return rot_matrix
+
+        # find the t1 angle needed to rotate about z axis to align e1 to x
+        t1 = na.arctan(e1[1] / e1[0])
+        # rotate e1 by -t1
+        r1 = (e1 * RZ(-t1).transpose()).sum(axis = 1)
+        # find the t2 angle needed to rotate about y axis to align e1 to x
+        t2 = na.arctan(-r1[2] / r1[0])
+        """
+        calculate the original e2
+        given the tilt about the x axis when e1 was aligned 
+        to x after t1, t2 rotations about z, y
+        """
+        e2 = ((0, 1, 0) * RX(tilt).transpose()).sum(axis = 1)
+        e2 = (e2 * RY(t2).transpose()).sum(axis = 1)
+        e2 = (e2 * RZ(t1).transpose()).sum(axis = 1)
+        e3 = na.cross(e1, e2)
+
+	self._e2 = e2
+	self._e3 = e3
+
+        self.set_field_parameter('A', A)
+        self.set_field_parameter('B', B)
+        self.set_field_parameter('C', C)
+        self.set_field_parameter('e1', e1)
+        self.set_field_parameter('e2', e2)
+        self.set_field_parameter('e3', e3)
+        self.DW = self.pf.domain_right_edge - self.pf.domain_left_edge
+        self._refresh_data()
+
+        """
+        Having another function find_ellipsoid_grids is too much work, 
+        can just use the sphere one and forget about checking orientation
+        but feed in the A parameter for radius
+        """
+    def _get_list_of_grids(self, field = None):
+        """
+        This returns the grids that are possibly within the ellipse
+        """
+        grids,ind = self.hierarchy.find_sphere_grids(self.center, self._A)
+        # Now we sort by level
+        grids = grids.tolist()
+        grids.sort(key=lambda x: (x.Level, \
+                                  x.LeftEdge[0], \
+                                  x.LeftEdge[1], \
+                                  x.LeftEdge[2]))
+        self._grids = na.array(grids, dtype = 'object')
+
+    def _is_fully_enclosed(self, grid):
+        """
+        check if all grid corners are inside the ellipsoid
+        """
+        # vector from corner to center
+        vr = (grid._corners - self.center)
+        # 3 possible cases of locations taking periodic BC into account
+        # just listing the components, find smallest later
+        dotarr=na.array([vr, vr + self.DW, vr - self.DW])
+        # these vrdote# finds the product of vr components with e#
+        # square the results
+        # find the smallest
+        # sums it
+        vrdote1_2 = (na.multiply(dotarr, self._e1)**2).min(axis \
+                                                           = 0).sum(axis = 1)
+        vrdote2_2 = (na.multiply(dotarr, self._e2)**2).min(axis \
+                                                           = 0).sum(axis = 1)
+        vrdote3_2 = (na.multiply(dotarr, self._e3)**2).min(axis \
+                                                           = 0).sum(axis = 1)
+        return na.all(vrdote1_2 / self._A**2 + \
+                      vrdote2_2 / self._B**2 + \
+                      vrdote3_2 / self._C**2 <=1.0)
+
+    @restore_grid_state # Pains me not to decorate with cache_mask here
+    def _get_cut_mask(self, grid, field = None):
+        """
+        This checks if each cell is inside the ellipsoid
+        """
+        # We have the *property* center, which is not necessarily
+        # the same as the field_parameter
+        if self._is_fully_enclosed(grid):
+            return True # We do not want child masking here
+        if not isinstance(grid, (FakeGridForParticles, GridChildMaskWrapper)) \
+           and grid.id in self._cut_masks:
+            return self._cut_masks[grid.id]
+        Inside = na.zeros(grid["x"].shape, dtype = 'float64')
+        dim = grid["x"].shape
+        # need this to take into account non-cube root grid tiles
+        dot_evec = na.zeros([3, dim[0], dim[1], dim[2]])
+        for i, ax in enumerate('xyz'):
+            # distance to center
+            ar  = grid[ax]-self.center[i]
+            # cases to take into account periodic BC
+            case = na.array([ar, ar + self.DW[i], ar - self.DW[i]])
+            # find which of the 3 cases is smallest in magnitude
+            index = na.abs(case).argmin(axis = 0)
+            # restrict distance to only the smallest cases
+            vec = na.choose(index, case)
+            # sum up to get the dot product with e_vectors
+            dot_evec += na.array([vec * self._e1[i], \
+                                  vec * self._e2[i], \
+                                  vec * self._e3[i]])
+        # Calculate the eqn of ellipsoid, if it is inside
+        # then result should be <= 1.0
+        Inside = dot_evec[0]**2 / self._A**2 + \
+                 dot_evec[1]**2 / self._B**2 + \
+                 dot_evec[2]**2 / self._C**2
+        cm = ((Inside <= 1.0) & grid.child_mask)
+        if not isinstance(grid, (FakeGridForParticles, GridChildMaskWrapper)):
+            self._cut_masks[grid.id] = cm
+        return cm
+
 class AMRCoveringGridBase(AMR3DData):
     _spatial = True
     _type_name = "covering_grid"



https://bitbucket.org/yt_analysis/yt/changeset/9b82c124da9d/
changeset:   9b82c124da9d
branch:      yt
user:        gsiisg
date:        2011-12-07 22:29:07
summary:     merging with Stephen's LoadHalo fix
affected #:  2 files

diff -r 45444e96642a4d6a5483382413ec70a55708ee5f -r 9b82c124da9d28ce931ff51cb2accb73fb44a06b yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -34,6 +34,7 @@
 import numpy as na
 import random
 import sys
+import os.path as path
 from collections import defaultdict
 
 from yt.funcs import *
@@ -1661,12 +1662,16 @@
         # The halos are listed in order in the file.
         lines = file("%s.txt" % self.basename)
         locations = []
+        realpath = path.realpath("%s.txt" % self.basename)
         for line in lines:
             line = line.split()
             # Prepend the hdf5 file names with the full path.
             temp = []
             for item in line[1:]:
-                temp.append(self.pf.fullpath + '/' + item)
+                # This assumes that the .txt is in the same place as
+                # the h5 files, which is a good one I think.
+                item = item.split("/")
+                temp.append(path.join(path.dirname(realpath), item[-1]))
             locations.append(temp)
         lines.close()
         return locations


diff -r 45444e96642a4d6a5483382413ec70a55708ee5f -r 9b82c124da9d28ce931ff51cb2accb73fb44a06b yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -265,6 +265,7 @@
     def _find_parameter(self, ptype, pname, scalar = False):
         nn = "/%s %s" % (ptype,
                 {False: "runtime parameters", True: "scalars"}[scalar])
+        if nn not in self._handle: raise KeyError(nn)
         for tpname, pval in self._handle[nn][:]:
             if tpname.strip() == pname:
                 return pval
@@ -285,20 +286,26 @@
             [self._find_parameter("real", "%smin" % ax) for ax in 'xyz'])
         self.domain_right_edge = na.array(
             [self._find_parameter("real", "%smax" % ax) for ax in 'xyz'])
-        self.dimensionality = self._find_parameter("integer", "dimensionality",
-                                scalar = True)
 
         # Determine domain dimensions
         try:
             nxb = self._find_parameter("integer", "nxb", scalar = True)
             nyb = self._find_parameter("integer", "nyb", scalar = True)
             nzb = self._find_parameter("integer", "nzb", scalar = True)
+            dimensionality = self._find_parameter("integer", "dimensionality",
+                                    scalar = True)
         except KeyError:
             nxb, nyb, nzb = [int(self._handle["/simulation parameters"]['n%sb' % ax])
                               for ax in 'xyz']
+            dimensionality = 3
+            if nzb == 1: dimensionality = 2
+            if nyb == 1: dimensionality = 1
+            if dimensionality < 3:
+                mylog.warning("Guessing dimensionality as %s", dimensionality)
         nblockx = self._find_parameter("integer", "nblockx")
         nblocky = self._find_parameter("integer", "nblockx")
         nblockz = self._find_parameter("integer", "nblockx")
+        self.dimensionality = dimensionality
         self.domain_dimensions = \
             na.array([nblockx*nxb,nblocky*nyb,nblockz*nzb])
 



https://bitbucket.org/yt_analysis/yt/changeset/1fee5cbd4bc9/
changeset:   1fee5cbd4bc9
branch:      yt
user:        gsiisg
date:        2012-03-19 23:48:06
summary:     merge
affected #:  192 files
Diff too large to display.

https://bitbucket.org/yt_analysis/yt/changeset/15de07629ceb/
changeset:   15de07629ceb
branch:      yt
user:        gsiisg
date:        2012-03-20 00:08:45
summary:     fix halo_objet missing rotation function
affected #:  1 file

diff -r 1fee5cbd4bc9f7c94bc1edc78dd8fba4ece3b14d -r 15de07629ceb11e66068220fd2bd35bd4407cb14 yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -60,6 +60,21 @@
 
 TINY = 1.e-40
 
+# Ellipsoid funtions.
+# define the rotation matrix needed later
+def RX(ax):
+    rot_matrix = na.array([[1, 0, 0], [0, na.cos(ax), na.sin(ax)],
+        [0, -na.sin(ax), na.cos(ax)]])
+    return rot_matrix
+def RY(ay):
+    rot_matrix = na.array([[na.cos(ay), 0, -na.sin(ay)], [0, 1, 0],
+        [na.sin(ay), 0, na.cos(ay)]])
+    return rot_matrix
+def RZ(az):
+    rot_matrix = na.array([[na.cos(az), na.sin(az), 0],
+        [-na.sin(az), na.cos(az), 0], [0, 0, 1]])
+    return rot_matrix
+
 class Halo(object):
     """
     A data source that returns particle information about the members of a



https://bitbucket.org/yt_analysis/yt/changeset/7fca13bb64d8/
changeset:   7fca13bb64d8
branch:      yt
user:        gsiisg
date:        2012-03-20 00:32:26
summary:     changed the principle vector e to start with e0 instead of e1 in data_containers.py to be consistent wtih halo_objects.py
affected #:  1 file

diff -r 15de07629ceb11e66068220fd2bd35bd4407cb14 -r 7fca13bb64d80a19282828382d46664613ede6b2 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -3337,11 +3337,11 @@
     We can define an ellipsoid to act as a data object.
     """
     _type_name = "ellipsoid"
-    _con_args = ('center', '_A', '_B', '_C', '_e1', '_tilt')
-    def __init__(self, center, A, B, C, e1, tilt, fields=None,
+    _con_args = ('center', '_A', '_B', '_C', '_e0', '_tilt')
+    def __init__(self, center, A, B, C, e0, tilt, fields=None,
                  pf=None, **kwargs):
         """
-        By providing a *center*,*A*,*B*,*C*,*e1*,*tilt* we
+        By providing a *center*,*A*,*B*,*C*,*e0*,*tilt* we
         can define a ellipsoid of any proportion.  Only cells whose centers are
         within the ellipsoid will be selected.
         """
@@ -3352,7 +3352,7 @@
         self._A = A
         self._B = B
         self._C = C
-        self._e1 = e1
+        self._e0 = e0
         self._tilt = tilt
         
         # define the rotation matrix needed later
@@ -3372,31 +3372,31 @@
                                    [0, 0, 1]])
             return rot_matrix
 
-        # find the t1 angle needed to rotate about z axis to align e1 to x
-        t1 = na.arctan(e1[1] / e1[0])
-        # rotate e1 by -t1
-        r1 = (e1 * RZ(-t1).transpose()).sum(axis = 1)
-        # find the t2 angle needed to rotate about y axis to align e1 to x
+        # find the t1 angle needed to rotate about z axis to align e0 to x
+        t1 = na.arctan(e0[1] / e0[0])
+        # rotate e0 by -t1
+        r1 = (e0 * RZ(-t1).transpose()).sum(axis = 1)
+        # find the t2 angle needed to rotate about y axis to align e0 to x
         t2 = na.arctan(-r1[2] / r1[0])
         """
-        calculate the original e2
-        given the tilt about the x axis when e1 was aligned 
+        calculate the original e1
+        given the tilt about the x axis when e0 was aligned 
         to x after t1, t2 rotations about z, y
         """
-        e2 = ((0, 1, 0) * RX(tilt).transpose()).sum(axis = 1)
-        e2 = (e2 * RY(t2).transpose()).sum(axis = 1)
-        e2 = (e2 * RZ(t1).transpose()).sum(axis = 1)
-        e3 = na.cross(e1, e2)
-
+        e1 = ((0, 1, 0) * RX(tilt).transpose()).sum(axis = 1)
+        e1 = (e1 * RY(t2).transpose()).sum(axis = 1)
+        e1 = (e1 * RZ(t1).transpose()).sum(axis = 1)
+        e2 = na.cross(e0, e1)
+
+	self._e1 = e1
 	self._e2 = e2
-	self._e3 = e3
 
         self.set_field_parameter('A', A)
         self.set_field_parameter('B', B)
         self.set_field_parameter('C', C)
+        self.set_field_parameter('e0', e0)
         self.set_field_parameter('e1', e1)
         self.set_field_parameter('e2', e2)
-        self.set_field_parameter('e3', e3)
         self.DW = self.pf.domain_right_edge - self.pf.domain_left_edge
         self._refresh_data()
 
@@ -3431,15 +3431,15 @@
         # square the results
         # find the smallest
         # sums it
+        vrdote0_2 = (na.multiply(dotarr, self._e0)**2).min(axis \
+                                                           = 0).sum(axis = 1)
         vrdote1_2 = (na.multiply(dotarr, self._e1)**2).min(axis \
                                                            = 0).sum(axis = 1)
         vrdote2_2 = (na.multiply(dotarr, self._e2)**2).min(axis \
                                                            = 0).sum(axis = 1)
-        vrdote3_2 = (na.multiply(dotarr, self._e3)**2).min(axis \
-                                                           = 0).sum(axis = 1)
-        return na.all(vrdote1_2 / self._A**2 + \
-                      vrdote2_2 / self._B**2 + \
-                      vrdote3_2 / self._C**2 <=1.0)
+        return na.all(vrdote0_2 / self._A**2 + \
+                      vrdote1_2 / self._B**2 + \
+                      vrdote2_2 / self._C**2 <=1.0)
 
     @restore_grid_state # Pains me not to decorate with cache_mask here
     def _get_cut_mask(self, grid, field = None):
@@ -3467,9 +3467,9 @@
             # restrict distance to only the smallest cases
             vec = na.choose(index, case)
             # sum up to get the dot product with e_vectors
-            dot_evec += na.array([vec * self._e1[i], \
-                                  vec * self._e2[i], \
-                                  vec * self._e3[i]])
+            dot_evec += na.array([vec * self._e0[i], \
+                                  vec * self._e1[i], \
+                                  vec * self._e2[i]])
         # Calculate the eqn of ellipsoid, if it is inside
         # then result should be <= 1.0
         Inside = dot_evec[0]**2 / self._A**2 + \



https://bitbucket.org/yt_analysis/yt/changeset/4b18e35630ad/
changeset:   4b18e35630ad
branch:      yt
user:        gsiisg
date:        2012-03-20 22:21:17
summary:     added return zeros when an ellipsoid encounters a halo with less than 4 particles
affected #:  1 file

diff -r 7fca13bb64d80a19282828382d46664613ede6b2 -r 4b18e35630ad9ee88f77574bbbc541b123c5dd70 yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -458,6 +458,13 @@
         
     def _get_ellipsoid_parameters_basic(self):
         na.seterr(all='ignore')
+        # check if there are 4 particles to form an ellipsoid
+        # neglecting to check if 4 particles in the same plane,
+        # that is almost certainly never to occur,
+        # will deal with it later if it ever comes up
+        if na.size(self["particle_position_x"]) < 4:
+            print "not enough particles to form ellipsoid returning zeros"
+            return (0, 0, 0, 0, 0, 0, 0)
         # Calculate the parameters that describe the ellipsoid of
         # the particles that constitute the halo. This function returns
         # all the parameters except for the center of mass.



https://bitbucket.org/yt_analysis/yt/changeset/8b97e3dfb122/
changeset:   8b97e3dfb122
branch:      yt
user:        gsiisg
date:        2012-04-19 00:37:49
summary:     merge
affected #:  32 files

diff -r 4b18e35630ad9ee88f77574bbbc541b123c5dd70 -r 8b97e3dfb122b2d702f89dbc0865272221542b74 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -42,6 +42,7 @@
 INST_SQLITE3=1  # Install a local version of SQLite3?
 INST_PYX=0      # Install PyX?  Sometimes PyX can be problematic without a
                 # working TeX installation.
+INST_0MQ=1      # Install 0mq (for IPython) and affiliated bindings?
 
 # If you've got YT some other place, set this to point to it.
 YT_DIR=""
@@ -180,6 +181,17 @@
         echo "$ export CXX=g++-4.2"
         echo
     fi
+    if [ ! -z "${CFLAGS}" ]
+    then
+        echo "******************************************"
+        echo "******************************************"
+        echo "**                                      **"
+        echo "**    Your CFLAGS is not empty.         **"
+        echo "**    This can beak h5py compilation.   **"
+        echo "**                                      **"
+        echo "******************************************"
+        echo "******************************************"
+    fi
 }
 
 
@@ -227,6 +239,10 @@
 get_willwont ${INST_PYX}
 echo "be installing PyX"
 
+printf "%-15s = %s so I " "INST_0MQ" "${INST_0MQ}"
+get_willwont ${INST_0MQ}
+echo "be installing ZeroMQ"
+
 echo
 
 if [ -z "$HDF5_DIR" ]
@@ -257,7 +273,15 @@
 
 function do_exit
 {
-    echo "Failure.  Check ${LOG_FILE}."
+    echo "********************************************"
+    echo "        FAILURE REPORT:"
+    echo "********************************************"
+    echo
+    tail -n 10 ${LOG_FILE}
+    echo
+    echo "********************************************"
+    echo "********************************************"
+    echo "Failure.  Check ${LOG_FILE}.  The last 10 lines are above."
     exit 1
 }
 
@@ -339,32 +363,33 @@
 echo 'b519218f93946400326e9b656669269ecb3e5232b944e18fbc3eadc4fe2b56244d68aae56d6f69042b4c87c58c881ee2aaa279561ea0f0f48d5842155f4de9de  freetype-2.4.4.tar.gz' > freetype-2.4.4.tar.gz.sha512
 echo '1531789e0a77d4829796d18552a4de7aecae7e8b63763a7951a8091921995800740fe03e72a7dbd496a5590828131c5f046ddead695e5cba79343b8c205148d1  h5py-2.0.1.tar.gz' > h5py-2.0.1.tar.gz.sha512
 echo '9644896e4a84665ad22f87eb885cbd4a0c60a5c30085d5dd5dba5f3c148dbee626f0cb01e59a7995a84245448a3f1e9ba98687d3f10250e2ee763074ed8ddc0e  hdf5-1.8.7.tar.gz' > hdf5-1.8.7.tar.gz.sha512
-echo '2c883d64886e5d595775dde497f101ff2ecec0786eabcdc69861c20e7d081e67b5e97551194236933b78f1ff7b119fcba0a9ce3aa4851440fc58f84d2094177b  ipython-0.10.tar.gz' > ipython-0.10.tar.gz.sha512
+echo 'ffc5c9e0c8c8ea66479abd467e442419bd1c867e6dbd180be6a032869467955dc570cfdf1388452871303a440738f302d3227ab7728878c4a114cfc45d29d23c  ipython-0.12.tar.gz' > ipython-0.12.tar.gz.sha512
 echo 'e748b66a379ee1e7963b045c3737670acf6aeeff1ebed679f427e74b642faa77404c2d5bbddb922339f009c229d0af1ae77cc43eab290e50af6157a6406d833f  libpng-1.2.43.tar.gz' > libpng-1.2.43.tar.gz.sha512
 echo 'f5ab95c29ef6958096970265a6079f0eb8c43a500924346c4a6c6eb89d9110eeeb6c34a53715e71240e82ded2b76a7b8d5a9b05a07baa000b2926718264ad8ff  matplotlib-1.1.0.tar.gz' > matplotlib-1.1.0.tar.gz.sha512
 echo '78715bb2bd7ed3291089948530a59d5eff146a64179eae87904a2c328716f26749abb0c5417d6001cadfeebabb4e24985d5a59ceaae4d98c4762163970f83975  mercurial-2.0.tar.gz' > mercurial-2.0.tar.gz.sha512
 echo 'de3dd37f753614055dcfed910e9886e03688b8078492df3da94b1ec37be796030be93291cba09e8212fffd3e0a63b086902c3c25a996cf1439e15c5b16e014d9  numpy-1.6.1.tar.gz' > numpy-1.6.1.tar.gz.sha512
 echo '5ad681f99e75849a5ca6f439c7a19bb51abc73d121b50f4f8e4c0da42891950f30407f761a53f0fe51b370b1dbd4c4f5a480557cb2444c8c7c7d5412b328a474  sqlite-autoconf-3070500.tar.gz' > sqlite-autoconf-3070500.tar.gz.sha512
 echo 'edae735960279d92acf58e1f4095c6392a7c2059b8f1d2c46648fc608a0fb06b392db2d073f4973f5762c034ea66596e769b95b3d26ad963a086b9b2d09825f2  zlib-1.2.3.tar.bz2' > zlib-1.2.3.tar.bz2.sha512
+echo 'fb3cf421b2dc48c31956b3e3ee4ab6ebc743deec3bf626c2238a1996c8c51be87260bd6aa662793a1f0c34dcda9b3146763777bb162dfad6fec4ca7acc403b2e  zeromq-2.2.0.tar.gz' > zeromq-2.2.0.tar.gz.sha512
+echo 'd761b492352841cdc125d9f0c99ee6d6c435812472ea234728b7f0fb4ad1048e1eec9b399df2081fbc926566f333f7780fedd0ce23255a6633fe5c60ed15a6af  pyzmq-2.1.11.tar.gz' > pyzmq-2.1.11.tar.gz.sha512
+echo '57fa5e57dfb98154a42d2d477f29401c2260ae7ad3a8128a4098b42ee3b35c54367b1a3254bc76b9b3b14b4aab7c3e1135858f68abc5636daedf2f01f9b8a3cf  tornado-2.2.tar.gz' > tornado-2.2.tar.gz.sha512
 
 # Individual processes
-if [ -z "$HDF5_DIR" ]
-then
-    echo "Downloading HDF5"
-    get_enzotools hdf5-1.8.7.tar.gz
-fi
-
+[ -z "$HDF5_DIR" ] && get_enzotools hdf5-1.8.7.tar.gz
 [ $INST_ZLIB -eq 1 ] && get_enzotools zlib-1.2.3.tar.bz2 
 [ $INST_BZLIB -eq 1 ] && get_enzotools bzip2-1.0.5.tar.gz
 [ $INST_PNG -eq 1 ] && get_enzotools libpng-1.2.43.tar.gz
 [ $INST_FTYPE -eq 1 ] && get_enzotools freetype-2.4.4.tar.gz
 [ $INST_SQLITE3 -eq 1 ] && get_enzotools sqlite-autoconf-3070500.tar.gz
 [ $INST_PYX -eq 1 ] && get_enzotools PyX-0.11.1.tar.gz
+[ $INST_0MQ -eq 1 ] && get_enzotools zeromq-2.2.0.tar.gz
+[ $INST_0MQ -eq 1 ] && get_enzotools pyzmq-2.1.11.tar.gz
+[ $INST_0MQ -eq 1 ] && get_enzotools tornado-2.2.tar.gz
 get_enzotools Python-2.7.2.tgz
 get_enzotools numpy-1.6.1.tar.gz
 get_enzotools matplotlib-1.1.0.tar.gz
 get_enzotools mercurial-2.0.tar.gz
-get_enzotools ipython-0.10.tar.gz
+get_enzotools ipython-0.12.tar.gz
 get_enzotools h5py-2.0.1.tar.gz
 get_enzotools Cython-0.15.1.tar.gz
 get_enzotools ext-3.3.2.zip
@@ -585,7 +610,26 @@
 [ -n "${OLD_LDFLAGS}" ] && export LDFLAGS=${OLD_LDFLAGS}
 [ -n "${OLD_CXXFLAGS}" ] && export CXXFLAGS=${OLD_CXXFLAGS}
 [ -n "${OLD_CFLAGS}" ] && export CFLAGS=${OLD_CFLAGS}
-do_setup_py ipython-0.10
+
+# Now we do our IPython installation, which has two optional dependencies.
+if [ $INST_0MQ -eq 1 ]
+then
+    if [ ! -e zeromq-2.2.0/done ]
+    then
+        [ ! -e zeromq-2.2.0 ] && tar xfz zeromq-2.2.0.tar.gz
+        echo "Installing ZeroMQ"
+        cd zeromq-2.2.0
+        ( ./configure --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
+        ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
+        ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
+        touch done
+        cd ..
+    fi
+    do_setup_py pyzmq-2.1.11 --zmq=${DEST_DIR}
+    do_setup_py tornado-2.2
+fi
+
+do_setup_py ipython-0.12
 do_setup_py h5py-2.0.1
 do_setup_py Cython-0.15.1
 [ $INST_PYX -eq 1 ] && do_setup_py PyX-0.11.1


diff -r 4b18e35630ad9ee88f77574bbbc541b123c5dd70 -r 8b97e3dfb122b2d702f89dbc0865272221542b74 scripts/iyt
--- a/scripts/iyt
+++ b/scripts/iyt
@@ -24,7 +24,8 @@
 
 if IPython.__version__.startswith("0.10"):
     api_version = '0.10'
-elif IPython.__version__.startswith("0.11"):
+elif IPython.__version__.startswith("0.11") or \
+     IPython.__version__.startswith("0.12"):
     api_version = '0.11'
 
 if api_version == "0.10" and "DISPLAY" in os.environ:


diff -r 4b18e35630ad9ee88f77574bbbc541b123c5dd70 -r 8b97e3dfb122b2d702f89dbc0865272221542b74 yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -1901,7 +1901,7 @@
             if self.comm.rank == 0:
                 self._recursive_divide(root_points, topbounds, 0, cut_list)
             self.bucket_bounds = \
-                self.comm.mpi_bcast_pickled(self.bucket_bounds)
+                self.comm.mpi_bcast(self.bucket_bounds)
             my_bounds = self.bucket_bounds[self.comm.rank]
             LE, RE = my_bounds[0], my_bounds[1]
             self._data_source = self.hierarchy.region_strict([0.] * 3, LE, RE)


diff -r 4b18e35630ad9ee88f77574bbbc541b123c5dd70 -r 8b97e3dfb122b2d702f89dbc0865272221542b74 yt/analysis_modules/halo_finding/rockstar/rockstar.py
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar.py
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar.py
@@ -76,7 +76,7 @@
             del sock
         else:
             server_address, port = None, None
-        self.server_address, self.port = self.comm.mpi_bcast_pickled(
+        self.server_address, self.port = self.comm.mpi_bcast(
             (server_address, port))
         self.port = str(self.port)
 


diff -r 4b18e35630ad9ee88f77574bbbc541b123c5dd70 -r 8b97e3dfb122b2d702f89dbc0865272221542b74 yt/analysis_modules/halo_merger_tree/merger_tree.py
--- a/yt/analysis_modules/halo_merger_tree/merger_tree.py
+++ b/yt/analysis_modules/halo_merger_tree/merger_tree.py
@@ -86,9 +86,28 @@
 "ChildHaloID3", "ChildHaloFrac3",
 "ChildHaloID4", "ChildHaloFrac4"]
 
+# Below we make the SQL command that creates the table "Halos" in the
+# database. This table is where all the data is stored.
+# Each column of data is named and its datatype is specified.
+# The GlobalHaloID is given the PRIMARY KEY property, which means that
+# the SQLite machinery assigns a consecutive and unique integer value
+# to that field automatically as each new entry is entered (that is,
+# if GlobalHaloID isn't specified already).
+create_db_line = "CREATE TABLE Halos ("
+for i, col in enumerate(columns):
+    if i == 0:
+        create_db_line += "%s %s PRIMARY KEY," % (col, column_types[col])
+    else:
+        create_db_line += " %s %s," % (col, column_types[col])
+# Clean of trailing comma, and closing stuff.
+create_db_line = create_db_line[:-1] + ");"
+
 NumNeighbors = 15
 NumDB = 5
 
+def minus_one():
+    return -1
+
 class DatabaseFunctions(object):
     # Common database functions so it doesn't have to be repeated.
     def _open_database(self):
@@ -109,7 +128,7 @@
 class MergerTree(DatabaseFunctions, ParallelAnalysisInterface):
     def __init__(self, restart_files=[], database='halos.db',
             halo_finder_function=HaloFinder, halo_finder_threshold=80.0,
-            FOF_link_length=0.2, dm_only=False, refresh=False, sleep=1,
+            FOF_link_length=0.2, dm_only=False, refresh=False,
             index=True):
         r"""Build a merger tree of halos over a time-ordered set of snapshots.
         This will run a halo finder to find the halos first if it hasn't already
@@ -140,12 +159,6 @@
         refresh : Boolean
             True forces the halo finder to run even if the halo data has been
             detected on disk. Default = False.
-        sleep : Float
-            Due to the nature of the SQLite database and network file systems,
-            it is crucial that all tasks see the database in the same state at
-            all times. This parameter specifies how long in seconds the merger
-            tree waits between checks to ensure the database is synched across
-            all tasks. Default = 1.
         index : Boolean
             SQLite databases can have added to them an index which greatly
             speeds up future queries of the database,
@@ -168,29 +181,32 @@
         self.FOF_link_length= FOF_link_length # For FOF
         self.dm_only = dm_only
         self.refresh = refresh
-        self.sleep = sleep # How long to wait between db sync checks.
-        if self.sleep <= 0.:
-            self.sleep = 5
+        self.index = index
+        self.zs = {}
         # MPI stuff
-        self.mine = self.comm.rank
-        if self.mine is None:
-            self.mine = 0
-        self.size = self.comm.size
-        if self.size is None:
-            self.size = 1
+        if self.comm.rank is None:
+            self.comm.rank = 0
+        if self.comm.size is None:
+            self.comm.size = 1
         # Get to work.
-        if self.refresh and self.mine == 0:
+        if self.refresh and self.comm.rank == 0:
             try:
                 os.unlink(self.database)
             except:
                 pass
-        self.comm.barrier()
-        self._open_create_database()
-        self._create_halo_table()
+        if self.comm.rank == 0:
+            self._open_create_database()
+            self._create_halo_table()
         self._run_halo_finder_add_to_db()
         # Find the h5 file names for all the halos.
         for snap in self.restart_files:
             self._build_h5_refs(snap)
+        # Find out how much work is already stored in the database.
+        if self.comm.rank == 0:
+            z_progress = self._find_progress()
+        else:
+            z_progress = None
+        z_progress = self.comm.mpi_bcast(z_progress)
         # Loop over the pairs of snapshots to locate likely neighbors, and
         # then use those likely neighbors to compute fractional contributions.
         last = None
@@ -199,14 +215,22 @@
         for snap, pair in enumerate(zip(self.restart_files[:-1], self.restart_files[1:])):
             if not self.with_halos[snap] or not self.with_halos[snap+1]:
                 continue
+            if self.zs[pair[0]] > z_progress:
+                continue
             self._find_likely_children(pair[0], pair[1])
             # last is the data for the parent dataset, which can be supplied
             # as the child from the previous round for all but the first loop.
             last = self._compute_child_fraction(pair[0], pair[1], last)
+            if self.comm.rank == 0:
+                mylog.info("Updating database with parent-child relationships.")
+                self._copy_and_update_db()
+                # This has to happen because we delete the old database above.
+                self._open_create_database()
         del last
-        # Now update the database with all the writes.
-        mylog.info("Updating database with parent-child relationships.")
-        self._copy_and_update_db()
+        if self.comm.rank == 0:
+            if self.index:
+                self._write_index()
+            self._close_database()
         self.comm.barrier()
         mylog.info("Done!")
         
@@ -220,6 +244,7 @@
         for cycle, file in enumerate(self.restart_files):
             gc.collect()
             pf = load(file)
+            self.zs[file] = pf.current_redshift
             self.period = pf.domain_right_edge - pf.domain_left_edge
             # If the halos are already found, skip this data step, unless
             # refresh is True.
@@ -247,12 +272,17 @@
                 del halos
             # Now add halo data to the db if it isn't already there by
             # checking the first halo.
-            currt = pf.unique_identifier
-            line = "SELECT GlobalHaloID from Halos where SnapHaloID=0\
-            and SnapCurrentTimeIdentifier=%d;" % currt
-            self.cursor.execute(line)
-            result = self.cursor.fetchone()
-            if result != None:
+            continue_check = False
+            if self.comm.rank == 0:
+                currt = pf.unique_identifier
+                line = "SELECT GlobalHaloID from Halos where SnapHaloID=0\
+                and SnapCurrentTimeIdentifier=%d;" % currt
+                self.cursor.execute(line)
+                result = self.cursor.fetchone()
+                if result != None:
+                    continue_check = True
+            continue_check = self.comm.mpi_bcast(continue_check)
+            if continue_check:
                 continue
             red = pf.current_redshift
             # Read the halos off the disk using the Halo Profiler tools.
@@ -261,9 +291,10 @@
             if len(hp.all_halos) == 0:
                 mylog.info("Dataset %s has no halos." % file)
                 self.with_halos[cycle] = False
+                del hp
                 continue
             mylog.info("Entering halos into database for z=%f" % red)
-            if self.mine == 0:
+            if self.comm.rank == 0:
                 for ID,halo in enumerate(hp.all_halos):
                     numpart = int(halo['numpart'])
                     values = (None, currt, red, ID, halo['mass'], numpart,
@@ -284,134 +315,100 @@
     
     def _open_create_database(self):
         # open the database. This creates the database file on disk if it
-        # doesn't already exist. Open it first on root, and then on the others.
-        if self.mine == 0:
-            self.conn = sql.connect(self.database)
-        self.comm.barrier()
-        self._ensure_db_sync()
-        if self.mine != 0:
-            self.conn = sql.connect(self.database)
+        # doesn't already exist. Open it on root only.
+        self.conn = sql.connect(self.database)
         self.cursor = self.conn.cursor()
 
-    def _ensure_db_sync(self):
-        # If the database becomes out of sync for each task, ostensibly due to
-        # parallel file system funniness, things will go bad very quickly.
-        # Therefore, just to be very, very careful, we will ensure that the
-        # md5 hash of the file is identical across all tasks before proceeding.
-        self.comm.barrier()
-        for i in range(5):
-            try:
-                file = open(self.database)
-            except IOError:
-                # This is to give a little bit of time for the database creation
-                # to replicate across the file system.
-                time.sleep(self.sleep)
-                file = open(self.database)
-            hash = md5.md5(file.read()).hexdigest()
-            file.close()
-            ignore, hashes = self.comm.mpi_info_dict(hash)
-            hashes = set(hashes.values())
-            if len(hashes) == 1:
-                break
-            else:
-                # Wait a little bit for the file system to (hopefully) sync up.
-                time.sleep(self.sleep)
-        if len(hashes) == 1:
-            return
-        else:
-            mylog.error("The file system is not properly synchronizing the database.")
-            raise RunTimeError("Fatal error. Exiting.")
-
     def _create_halo_table(self):
-        if self.mine == 0:
-            # Handle the error if it already exists.
-            try:
-                # Create the table that will store the halo data.
-                line = "CREATE TABLE Halos (GlobalHaloID INTEGER PRIMARY KEY,\
-                    SnapCurrentTimeIdentifier INTEGER, SnapZ FLOAT, SnapHaloID INTEGER, \
-                    HaloMass FLOAT,\
-                    NumPart INTEGER, CenMassX FLOAT, CenMassY FLOAT,\
-                    CenMassZ FLOAT, BulkVelX FLOAT, BulkVelY FLOAT, BulkVelZ FLOAT,\
-                    MaxRad FLOAT,\
-                    ChildHaloID0 INTEGER, ChildHaloFrac0 FLOAT, \
-                    ChildHaloID1 INTEGER, ChildHaloFrac1 FLOAT, \
-                    ChildHaloID2 INTEGER, ChildHaloFrac2 FLOAT, \
-                    ChildHaloID3 INTEGER, ChildHaloFrac3 FLOAT, \
-                    ChildHaloID4 INTEGER, ChildHaloFrac4 FLOAT);"
-                self.cursor.execute(line)
-                self.conn.commit()
-            except sql.OperationalError:
-                pass
-        self.comm.barrier()
+        # Handle the error if the table already exists by doing nothing.
+        try:
+            self.cursor.execute(create_db_line)
+            self.conn.commit()
+        except sql.OperationalError:
+            pass
     
     def _find_likely_children(self, parentfile, childfile):
         # For each halo in the parent list, identify likely children in the 
         # list of children.
-        
+
         # First, read in the locations of the child halos.
         child_pf = load(childfile)
         child_t = child_pf.unique_identifier
-        line = "SELECT SnapHaloID, CenMassX, CenMassY, CenMassZ FROM \
-        Halos WHERE SnapCurrentTimeIdentifier = %d" % child_t
-        self.cursor.execute(line)
-        
-        mylog.info("Finding likely parents for z=%1.5f child halos." % \
-            child_pf.current_redshift)
-        
-        # Build the kdtree for the children by looping over the fetched rows.
-        # Normalize the points for use only within the kdtree.
-        child_points = []
-        for row in self.cursor:
-            child_points.append([row[1] / self.period[0],
-            row[2] / self.period[1],
-            row[3] / self.period[2]])
-        # Turn it into fortran.
-        child_points = na.array(child_points)
-        fKD.pos = na.asfortranarray(child_points.T)
-        fKD.qv = na.empty(3, dtype='float64')
-        fKD.dist = na.empty(NumNeighbors, dtype='float64')
-        fKD.tags = na.empty(NumNeighbors, dtype='int64')
-        fKD.nn = NumNeighbors
-        fKD.sort = True
-        fKD.rearrange = True
-        create_tree(0)
-
+        if self.comm.rank == 0:
+            line = "SELECT SnapHaloID, CenMassX, CenMassY, CenMassZ FROM \
+            Halos WHERE SnapCurrentTimeIdentifier = %d" % child_t
+            self.cursor.execute(line)
+            
+            mylog.info("Finding likely parents for z=%1.5f child halos." % \
+                child_pf.current_redshift)
+            
+            # Build the kdtree for the children by looping over the fetched rows.
+            # Normalize the points for use only within the kdtree.
+            child_points = []
+            for row in self.cursor:
+                child_points.append([row[1] / self.period[0],
+                row[2] / self.period[1],
+                row[3] / self.period[2]])
+            # Turn it into fortran.
+            child_points = na.array(child_points)
+            fKD.pos = na.asfortranarray(child_points.T)
+            fKD.qv = na.empty(3, dtype='float64')
+            fKD.dist = na.empty(NumNeighbors, dtype='float64')
+            fKD.tags = na.empty(NumNeighbors, dtype='int64')
+            fKD.nn = NumNeighbors
+            fKD.sort = True
+            fKD.rearrange = True
+            create_tree(0)
+    
         # Find the parent points from the database.
         parent_pf = load(parentfile)
         parent_t = parent_pf.unique_identifier
-        line = "SELECT SnapHaloID, CenMassX, CenMassY, CenMassZ FROM \
-        Halos WHERE SnapCurrentTimeIdentifier = %d" % parent_t
-        self.cursor.execute(line)
+        if self.comm.rank == 0:
+            line = "SELECT SnapHaloID, CenMassX, CenMassY, CenMassZ FROM \
+            Halos WHERE SnapCurrentTimeIdentifier = %d" % parent_t
+            self.cursor.execute(line)
+    
+            # Loop over the returned rows, and find the likely neighbors for the
+            # parents.
+            candidates = {}
+            for row in self.cursor:
+                # Normalize positions for use within the kdtree.
+                fKD.qv = na.array([row[1] / self.period[0],
+                row[2] / self.period[1],
+                row[3] / self.period[2]])
+                find_nn_nearest_neighbors()
+                NNtags = fKD.tags[:] - 1
+                nIDs = []
+                for n in NNtags:
+                    nIDs.append(n)
+                # We need to fill in fake halos if there aren't enough halos,
+                # which can happen at high redshifts.
+                while len(nIDs) < NumNeighbors:
+                    nIDs.append(-1)
+                candidates[row[0]] = nIDs
+            
+            del fKD.pos, fKD.tags, fKD.dist
+            free_tree(0) # Frees the kdtree object.
+        else:
+            candidates = None
 
-        # Loop over the returned rows, and find the likely neighbors for the
-        # parents.
-        candidates = {}
-        for row in self.cursor:
-            # Normalize positions for use within the kdtree.
-            fKD.qv = na.array([row[1] / self.period[0],
-            row[2] / self.period[1],
-            row[3] / self.period[2]])
-            find_nn_nearest_neighbors()
-            NNtags = fKD.tags[:] - 1
-            nIDs = []
-            for n in NNtags:
-                nIDs.append(n)
-            # We need to fill in fake halos if there aren't enough halos,
-            # which can happen at high redshifts.
-            while len(nIDs) < NumNeighbors:
-                nIDs.append(-1)
-            candidates[row[0]] = nIDs
-        
-        del fKD.pos, fKD.tags, fKD.dist
-        free_tree(0) # Frees the kdtree object.
-        
+        # Sync across tasks.
+        candidates = self.comm.mpi_bcast(candidates)
         self.candidates = candidates
         
         # This stores the masses contributed to each child candidate.
-        self.child_mass_arr = na.zeros(len(candidates)*NumNeighbors, dtype='float64')
+        # The +1 is an extra element in the array that collects garbage
+        # values. This is allowing us to eliminate a try/except later.
+        # This extra array element will be cut off eventually.
+        self.child_mass_arr = na.zeros(len(candidates)*NumNeighbors + 1,
+            dtype='float64')
         # Records where to put the entries in the above array.
         self.child_mass_loc = defaultdict(dict)
+        # Fill it out with sub-nested default dicts that point to the
+        # garbage slot, and then fill it will correct values for (possibly)
+        # related parent/child halo pairs.
         for i,halo in enumerate(sorted(candidates)):
+            self.child_mass_loc[halo] = defaultdict(minus_one)
             for j, child in enumerate(candidates[halo]):
                 self.child_mass_loc[halo][child] = i*NumNeighbors + j
 
@@ -457,7 +454,7 @@
             parent_masses = na.array([], dtype='float64')
             parent_halos = na.array([], dtype='int32')
             for i,pname in enumerate(parent_names):
-                if i>=self.mine and i%self.size==self.mine:
+                if i>=self.comm.rank and i%self.comm.size==self.comm.rank:
                     h5fp = h5py.File(pname)
                     for group in h5fp:
                         gID = int(group[4:])
@@ -489,7 +486,7 @@
         child_masses = na.array([], dtype='float64')
         child_halos = na.array([], dtype='int32')
         for i,cname in enumerate(child_names):
-            if i>=self.mine and i%self.size==self.mine:
+            if i>=self.comm.rank and i%self.comm.size==self.comm.rank:
                 h5fp = h5py.File(cname)
                 for group in h5fp:
                     gID = int(group[4:])
@@ -510,39 +507,9 @@
         child_send = na.ones(child_IDs.size, dtype='bool')
         del sort
         
-        # Parent IDs on the left, child IDs on the right. We skip down both
-        # columns matching IDs. If they are out of synch, the index(es) is/are
-        # advanced until they match up again.
-        left = 0
-        right = 0
-        while left < parent_IDs.size and right < child_IDs.size:
-            if parent_IDs[left] == child_IDs[right]:
-                # They match up, add this relationship.
-                try:
-                    loc = self.child_mass_loc[parent_halos[left]][child_halos[right]]
-                except KeyError:
-                    # This happens when a child halo contains a particle from
-                    # a parent halo, but the child is not identified as a 
-                    # candidate child halo. So we do nothing and move on with
-                    # our lives.
-                    left += 1
-                    right += 1
-                    continue
-                self.child_mass_arr[loc] += parent_masses[left]
-                # Mark this pair so we don't send them later.
-                parent_send[left] = False
-                child_send[right] = False
-                left += 1
-                right += 1
-                continue
-            if parent_IDs[left] < child_IDs[right]:
-                # The left is too small, so we need to increase it.
-                left += 1
-                continue
-            if parent_IDs[left] > child_IDs[right]:
-                # Right too small.
-                right += 1
-                continue
+        # Match particles in halos.
+        self._match(parent_IDs, child_IDs, parent_halos, child_halos,
+            parent_masses, parent_send, child_send)
 
         # Now we send all the un-matched particles to the root task for one more
         # pass. This depends on the assumption that most of the particles do
@@ -576,61 +543,42 @@
         child_halos_tosend = child_halos_tosend[Csort]
         del Psort, Csort
 
-        # Now Again.
-        if self.mine == 0:
-            matched = 0
-            left = 0
-            right = 0
-            while left < parent_IDs_tosend.size and right < child_IDs_tosend.size:
-                if parent_IDs_tosend[left] == child_IDs_tosend[right]:
-                    # They match up, add this relationship.
-                    try:
-                        loc = self.child_mass_loc[parent_halos_tosend[left]][child_halos_tosend[right]]
-                    except KeyError:
-                        # This happens when a child halo contains a particle from
-                        # a parent halo, but the child is not identified as a 
-                        # candidate child halo. So we do nothing and move on with
-                        # our lives.
-                        left += 1
-                        right += 1
-                        continue
-                    self.child_mass_arr[loc] += parent_masses_tosend[left]
-                    matched += 1
-                    left += 1
-                    right += 1
-                    continue
-                if parent_IDs_tosend[left] < child_IDs_tosend[right]:
-                    # The left is too small, so we need to increase it.
-                    left += 1
-                    continue
-                if parent_IDs_tosend[left] > child_IDs_tosend[right]:
-                    # Right too small.
-                    right += 1
-                    continue
-            mylog.info("Clean-up round matched %d of %d parents and %d children." % \
-            (matched, parent_IDs_tosend.size, child_IDs_tosend.size))
+        # Now again, but only on the root task.
+        if self.comm.rank == 0:
+            self._match(parent_IDs_tosend, child_IDs_tosend,
+            parent_halos_tosend, child_halos_tosend, parent_masses_tosend)
 
         # Now we sum up the contributions globally.
         self.child_mass_arr = self.comm.mpi_allreduce(self.child_mass_arr)
         
-        # Turn these Msol masses into percentages of the parent.
-        line = "SELECT HaloMass FROM Halos WHERE SnapCurrentTimeIdentifier=%d \
-        ORDER BY SnapHaloID ASC;" % parent_currt
-        self.cursor.execute(line)
-        mark = 0
-        result = self.cursor.fetchone()
-        while result:
-            mass = result[0]
-            self.child_mass_arr[mark:mark+NumNeighbors] /= mass
-            mark += NumNeighbors
+        # Trim off the garbage collection.
+        self.child_mass_arr = self.child_mass_arr[:-1]
+        
+        if self.comm.rank == 0:
+            # Turn these Msol masses into percentages of the parent.
+            line = "SELECT HaloMass FROM Halos WHERE SnapCurrentTimeIdentifier=%d \
+            ORDER BY SnapHaloID ASC;" % parent_currt
+            self.cursor.execute(line)
+            mark = 0
             result = self.cursor.fetchone()
+            while result:
+                mass = result[0]
+                self.child_mass_arr[mark:mark+NumNeighbors] /= mass
+                mark += NumNeighbors
+                result = self.cursor.fetchone()
+            
+            # Get the global ID for the SnapHaloID=0 from the child, this will
+            # be used to prevent unnecessary SQL reads.
+            line = "SELECT GlobalHaloID FROM Halos WHERE SnapCurrentTimeIdentifier=%d \
+            AND SnapHaloID=0;" % child_currt
+            self.cursor.execute(line)
+            baseChildID = self.cursor.fetchone()[0]
+        else:
+            baseChildID = None
         
-        # Get the global ID for the SnapHaloID=0 from the child, this will
-        # be used to prevent unnecessary SQL reads.
-        line = "SELECT GlobalHaloID FROM Halos WHERE SnapCurrentTimeIdentifier=%d \
-        AND SnapHaloID=0;" % child_currt
-        self.cursor.execute(line)
-        baseChildID = self.cursor.fetchone()[0]
+        # Sync up data on all tasks.
+        self.child_mass_arr = self.comm.mpi_bcast(self.child_mass_arr)
+        baseChildID = self.comm.mpi_bcast(baseChildID)
         
         # Now we prepare a big list of writes to put in the database.
         for i,parent_halo in enumerate(sorted(self.candidates)):
@@ -663,76 +611,117 @@
         del parent_IDs, parent_masses, parent_halos
         del parent_IDs_tosend, parent_masses_tosend
         del parent_halos_tosend, child_IDs_tosend, child_halos_tosend
+        gc.collect()
         
         return (child_IDs, child_masses, child_halos)
 
+    def _match(self, parent_IDs, child_IDs, parent_halos, child_halos,
+            parent_masses, parent_send = None, child_send = None):
+        # Pick out IDs that are in both arrays.
+        parent_in_child = na.in1d(parent_IDs, child_IDs, assume_unique = True)
+        child_in_parent = na.in1d(child_IDs, parent_IDs, assume_unique = True)
+        # Pare down the arrays to just matched particle IDs.
+        parent_halos_cut = parent_halos[parent_in_child]
+        child_halos_cut = child_halos[child_in_parent]
+        parent_masses_cut = parent_masses[parent_in_child]
+        # Mark the IDs that have matches so they're not sent later.
+        if parent_send is not None:
+            parent_send[parent_in_child] = False
+            child_send[child_in_parent] = False
+        # For matching pairs of particles, add the contribution of the mass.
+        # Occasionally, there are matches of particle IDs where the parent
+        # and child halos have not been identified as likely relations,
+        # and in that case loc will be returned as -1, which is the 'garbage'
+        # position in child_mass_arr. This will be trimmed off later.
+        for i,pair in enumerate(zip(parent_halos_cut, child_halos_cut)):
+            loc = self.child_mass_loc[pair[0]][pair[1]]
+            self.child_mass_arr[loc] += parent_masses_cut[i]
+        if parent_send is None:
+            mylog.info("Clean-up round matched %d of %d parents and %d children." % \
+            (parent_in_child.sum(), parent_IDs.size, child_IDs.size))
+
     def _copy_and_update_db(self):
         """
         Because doing an UPDATE of a SQLite database is really slow, what we'll
         do here is basically read in lines from the database, and then insert
         the parent-child relationships, writing to a new DB.
         """
+        # All of this happens only on the root task!
         temp_name = self.database + '-tmp'
-        if self.mine == 0:
-            to_write = []
-            # Open the temporary database.
+        to_write = []
+        # Open the temporary database.
+        try:
+            os.remove(temp_name)
+        except OSError:
+            pass
+        temp_conn = sql.connect(temp_name)
+        temp_cursor = temp_conn.cursor()
+        line = "CREATE TABLE Halos (GlobalHaloID INTEGER PRIMARY KEY,\
+                SnapCurrentTimeIdentifier INTEGER, SnapZ FLOAT, SnapHaloID INTEGER, \
+                HaloMass FLOAT,\
+                NumPart INTEGER, CenMassX FLOAT, CenMassY FLOAT,\
+                CenMassZ FLOAT, BulkVelX FLOAT, BulkVelY FLOAT, BulkVelZ FLOAT,\
+                MaxRad FLOAT,\
+                ChildHaloID0 INTEGER, ChildHaloFrac0 FLOAT, \
+                ChildHaloID1 INTEGER, ChildHaloFrac1 FLOAT, \
+                ChildHaloID2 INTEGER, ChildHaloFrac2 FLOAT, \
+                ChildHaloID3 INTEGER, ChildHaloFrac3 FLOAT, \
+                ChildHaloID4 INTEGER, ChildHaloFrac4 FLOAT);"
+        temp_cursor.execute(line)
+        temp_conn.commit()
+        # Get all the data!
+        self.cursor.execute("SELECT * FROM Halos;")
+        results = self.cursor.fetchone()
+        while results:
+            results = list(results)
+            currt = results[1]
+            hid = results[3]
+            # If for some reason this halo doesn't have relationships,
+            # we'll just keep the old results the same.
             try:
-                os.remove(temp_name)
-            except OSError:
-                pass
-            temp_conn = sql.connect(temp_name)
-            temp_cursor = temp_conn.cursor()
-            line = "CREATE TABLE Halos (GlobalHaloID INTEGER PRIMARY KEY,\
-                    SnapCurrentTimeIdentifier INTEGER, SnapZ FLOAT, SnapHaloID INTEGER, \
-                    HaloMass FLOAT,\
-                    NumPart INTEGER, CenMassX FLOAT, CenMassY FLOAT,\
-                    CenMassZ FLOAT, BulkVelX FLOAT, BulkVelY FLOAT, BulkVelZ FLOAT,\
-                    MaxRad FLOAT,\
-                    ChildHaloID0 INTEGER, ChildHaloFrac0 FLOAT, \
-                    ChildHaloID1 INTEGER, ChildHaloFrac1 FLOAT, \
-                    ChildHaloID2 INTEGER, ChildHaloFrac2 FLOAT, \
-                    ChildHaloID3 INTEGER, ChildHaloFrac3 FLOAT, \
-                    ChildHaloID4 INTEGER, ChildHaloFrac4 FLOAT);"
-            temp_cursor.execute(line)
-            temp_conn.commit()
-            # Get all the data!
-            self.cursor.execute("SELECT * FROM Halos;")
+                lookup = self.write_values_dict[currt][hid]
+                new = tuple(results[:-10] + lookup)
+            except KeyError:
+                new = tuple(results)
+            to_write.append(new)
             results = self.cursor.fetchone()
-            while results:
-                results = list(results)
-                currt = results[1]
-                hid = results[3]
-                # If for some reason this halo doesn't have relationships,
-                # we'll just keep the old results the same.
-                try:
-                    lookup = self.write_values_dict[currt][hid]
-                    new = tuple(results[:-10] + lookup)
-                except KeyError:
-                    new = tuple(results)
-                to_write.append(new)
-                results = self.cursor.fetchone()
-            # Now write to the temp database.
-            # 23 question marks for 23 data columns.
-            line = ''
-            for i in range(23):
-                line += '?,'
-            # Pull off the last comma.
-            line = 'INSERT into Halos VALUES (' + line[:-1] + ')'
-            for insert in to_write:
-                temp_cursor.execute(line, insert)
-            temp_conn.commit()
-            mylog.info("Creating database index.")
-            line = "CREATE INDEX IF NOT EXISTS HalosIndex ON Halos ("
-            for name in columns:
-                line += name +","
-            line = line[:-1] + ");"
-            temp_cursor.execute(line)
-            temp_cursor.close()
-            temp_conn.close()
+        # Now write to the temp database.
+        # 23 question marks for 23 data columns.
+        line = ''
+        for i in range(23):
+            line += '?,'
+        # Pull off the last comma.
+        line = 'INSERT into Halos VALUES (' + line[:-1] + ')'
+        for insert in to_write:
+            temp_cursor.execute(line, insert)
+        temp_conn.commit()
+        temp_cursor.close()
+        temp_conn.close()
         self._close_database()
-        self.comm.barrier()
-        if self.mine == 0:
-            os.rename(temp_name, self.database)
+        os.rename(temp_name, self.database)
+
+    def _write_index(self):
+        mylog.info("Creating database index.")
+        line = "CREATE INDEX IF NOT EXISTS HalosIndex ON Halos ("
+        for name in columns:
+            line += name +","
+        line = line[:-1] + ");"
+        self.cursor.execute(line)
+
+    def _find_progress(self):
+        # This queries the database to see how far along work has already come
+        # to identify parent->child relationships.
+        line = """SELECT ChildHaloID0, SnapZ from halos WHERE SnapHaloID = 0
+        ORDER BY SnapZ DESC;"""
+        self.cursor.execute(line)
+        results = self.cursor.fetchone()
+        while results:
+            results = list(results)
+            if results[0] == -1:
+                # We've hit a dump that does not have relationships. Save this.
+                return results[1] # the SnapZ.
+            results = self.cursor.fetchone()
+        return 0.
 
 class MergerTreeConnect(DatabaseFunctions):
     def __init__(self, database='halos.db'):


diff -r 4b18e35630ad9ee88f77574bbbc541b123c5dd70 -r 8b97e3dfb122b2d702f89dbc0865272221542b74 yt/analysis_modules/sunrise_export/sunrise_exporter.py
--- a/yt/analysis_modules/sunrise_export/sunrise_exporter.py
+++ b/yt/analysis_modules/sunrise_export/sunrise_exporter.py
@@ -294,17 +294,17 @@
     o_length = na.sum(levels_finest.values())
     r_length = na.sum(levels_all.values())
     output = na.zeros((r_length,len(fields)), dtype='float64')
-    genealogy = na.zeros((r_length, 3), dtype='int32') - 1 # init to -1
+    genealogy = na.zeros((r_length, 3), dtype='int64') - 1 # init to -1
     corners = na.zeros((r_length, 3), dtype='float64')
     position = na.add.accumulate(
                 na.array([0] + [levels_all[v] for v in
-                    sorted(levels_all)[:-1]], dtype='int32'))
+                    sorted(levels_all)[:-1]], dtype='int64'), dtype="int64")
     pp = position.copy()
     amr_utils.RecurseOctreeByLevels(0, 0, 0,
                ogl[0].dimensions[0],
                ogl[0].dimensions[1],
                ogl[0].dimensions[2],
-               position.astype('int32'), 1,
+               position.astype('int64'), 1,
                output, genealogy, corners, ogl)
     return output, genealogy, levels_all, levels_finest, pp, corners
 


diff -r 4b18e35630ad9ee88f77574bbbc541b123c5dd70 -r 8b97e3dfb122b2d702f89dbc0865272221542b74 yt/analysis_modules/two_point_functions/two_point_functions.py
--- a/yt/analysis_modules/two_point_functions/two_point_functions.py
+++ b/yt/analysis_modules/two_point_functions/two_point_functions.py
@@ -23,6 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
+import h5py
 from yt.mods import *
 #from yt.utilities.math_utils import *
 from yt.utilities.performance_counters import yt_counters, time_function
@@ -403,7 +404,7 @@
             status = 0
         # Broadcast the status from root - we stop only if root thinks we should
         # stop.
-        status = self.comm.mpi_bcast_pickled(status)
+        status = self.comm.mpi_bcast(status)
         if status == 0: return True
         if self.comm_cycle_count < status:
             return True


diff -r 4b18e35630ad9ee88f77574bbbc541b123c5dd70 -r 8b97e3dfb122b2d702f89dbc0865272221542b74 yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -52,7 +52,7 @@
     pasteboard_repo = '',
     test_storage_dir = '/does/not/exist',
     enzo_db = '',
-    hub_url = 'https://127.0.0.1:5000/',
+    hub_url = 'https://data.yt-project.org/upload',
     hub_api_key = '',
     )
 # Here is the upgrade.  We're actually going to parse the file in its entirety


diff -r 4b18e35630ad9ee88f77574bbbc541b123c5dd70 -r 8b97e3dfb122b2d702f89dbc0865272221542b74 yt/convenience.py
--- a/yt/convenience.py
+++ b/yt/convenience.py
@@ -86,6 +86,12 @@
     candidates = []
     args = [os.path.expanduser(arg) if isinstance(arg, types.StringTypes)
             else arg for arg in args]
+    valid_file = [os.path.isfile(arg) if isinstance(arg, types.StringTypes) 
+            else False for arg in args]
+    if not any(valid_file):
+        mylog.error("None of the arguments provided to load() is a valid file")
+        mylog.error("Please check that you have used a correct path")
+        return None
     for n, c in output_type_registry.items():
         if n is None: continue
         if c._is_valid(*args, **kwargs): candidates.append(n)


diff -r 4b18e35630ad9ee88f77574bbbc541b123c5dd70 -r 8b97e3dfb122b2d702f89dbc0865272221542b74 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -56,7 +56,7 @@
 from yt.utilities.parameter_file_storage import \
     ParameterFileStore
 from yt.utilities.minimal_representation import \
-    MinimalProjectionData
+    MinimalProjectionData, MinimalSliceData
 
 from .derived_quantities import DerivedQuantityCollection
 from .field_info_container import \
@@ -851,7 +851,7 @@
         for field in temp_data.keys():
             self[field] = temp_data[field]
 
-    def to_frb(self, width, resolution, center = None):
+    def to_frb(self, width, resolution, center=None, height=None):
         r"""This function returns a FixedResolutionBuffer generated from this
         object.
 
@@ -866,6 +866,8 @@
             This can either be a floating point value, in the native domain
             units of the simulation, or a tuple of the (value, unit) style.
             This will be the width of the FRB.
+        height : height specifier
+            This will be the height of the FRB, by default it is equal to width.
         resolution : int or tuple of ints
             The number of pixels on a side of the final FRB.
         center : array-like of floats, optional
@@ -892,13 +894,18 @@
         if iterable(width):
             w, u = width
             width = w/self.pf[u]
+        if height is None:
+            height = width
+        elif iterable(height):
+            h, u = height
+            height = h/self.pf[u]
         if not iterable(resolution):
             resolution = (resolution, resolution)
         from yt.visualization.fixed_resolution import FixedResolutionBuffer
         xax = x_dict[self.axis]
         yax = y_dict[self.axis]
-        bounds = (center[xax] - width/2.0, center[xax] + width/2.0,
-                  center[yax] - width/2.0, center[yax] + width/2.0)
+        bounds = (center[xax] - width*0.5, center[xax] + width*0.5,
+                  center[yax] - height*0.5, center[yax] + height*0.5)
         frb = FixedResolutionBuffer(self, bounds, resolution)
         return frb
 
@@ -1142,6 +1149,10 @@
     __quantities = None
     quantities = property(__get_quantities)
 
+    @property
+    def _mrep(self):
+        return MinimalSliceData(self)
+
 class AMRCuttingPlaneBase(AMR2DData):
     _plane = None
     _top_node = "/CuttingPlanes"
@@ -1309,7 +1320,7 @@
         return "%s/c%s_L%s" % \
             (self._top_node, cen_name, L_name)
 
-    def to_frb(self, width, resolution):
+    def to_frb(self, width, resolution, height=None):
         r"""This function returns an ObliqueFixedResolutionBuffer generated
         from this object.
 
@@ -1327,6 +1338,8 @@
             This can either be a floating point value, in the native domain
             units of the simulation, or a tuple of the (value, unit) style.
             This will be the width of the FRB.
+        height : height specifier
+            This will be the height of the FRB, by default it is equal to width.
         resolution : int or tuple of ints
             The number of pixels on a side of the final FRB.
 
@@ -1348,10 +1361,15 @@
         if iterable(width):
             w, u = width
             width = w/self.pf[u]
+        if height is None:
+            height = width
+        elif iterable(height):
+            h, u = height
+            height = h/self.pf[u]
         if not iterable(resolution):
             resolution = (resolution, resolution)
         from yt.visualization.fixed_resolution import ObliqueFixedResolutionBuffer
-        bounds = (-width/2.0, width/2.0, -width/2.0, width/2.0)
+        bounds = (-width/2.0, width/2.0, -height/2.0, height/2.0)
         frb = ObliqueFixedResolutionBuffer(self, bounds, resolution)
         return frb
 
@@ -3653,62 +3671,16 @@
         self._base_dx = (
               (self.pf.domain_right_edge - self.pf.domain_left_edge) /
                self.pf.domain_dimensions.astype("float64"))
-        self.global_endindex = None
         AMRCoveringGridBase.__init__(self, *args, **kwargs)
         self._final_start_index = self.global_startindex
 
     def _get_list_of_grids(self):
         if self._grids is not None: return
-        # Check for ill-behaved AMR schemes (Enzo) where we may have
-        # root-tile-boundary issues.  This is specific to the root tiles not
-        # allowing grids to cross them and also allowing > 1 level of
-        # difference between neighboring areas.
-        nz = 0
-        buf = 0.0
-        self.min_level = 0
-        dl = ((self.global_startindex.astype("float64") + 1)
-           / (self.pf.refine_by**self.level))
-        dr = ((self.global_startindex.astype("float64")
-              + self.ActiveDimensions - 1)
-           / (self.pf.refine_by**self.level))
-        if na.any(dl == na.rint(dl)) or na.any(dr == na.rint(dr)):
-            nz = 2 * self.pf.refine_by**self.level
-            buf = self._base_dx
-        if nz <= self.pf.refine_by**3: # delta level of 3
-            last_buf = [None,None,None]
-            count = 0
-            # Repeat until no more grids are covered (up to a delta level of 3)
-            while na.any(buf != last_buf) or count == 3:
-                cg = self.pf.h.covering_grid(self.level,
-                     self.left_edge - buf, self.ActiveDimensions + nz)
-                cg._use_pbar = False
-                count = cg.ActiveDimensions.prod()
-                for g in cg._grids:
-                    count -= cg._get_data_from_grid(g, [])
-                    if count <= 0:
-                        self.min_level = g.Level
-                        break
-                last_buf = buf
-                # Increase box by 2 cell widths at the min covering level
-                buf = 2*self._base_dx / self.pf.refine_by**self.min_level
-                nz += 4 * self.pf.refine_by**(self.level-self.min_level)
-                count += 1
-        else:
-            nz = buf = 0
-            self.min_level = 0
-        # This should not cost substantial additional time.
-        BLE = self.left_edge - buf
-        BRE = self.right_edge + buf
-        if na.any(BLE < self.pf.domain_left_edge) or \
-           na.any(BRE > self.pf.domain_right_edge):
-            grids,ind = self.pf.hierarchy.get_periodic_box_grids_below_level(
-                            BLE, BRE, self.level, self.min_level)
-        else:
-            grids,ind = self.pf.hierarchy.get_box_grids_below_level(
-                BLE, BRE, self.level,
-                min(self.level, self.min_level))
-        sort_ind = na.argsort(self.pf.h.grid_levels.ravel()[ind])
-        self._grids = self.pf.hierarchy.grids[ind][(sort_ind,)]
+        buffer = ((self.pf.domain_right_edge - self.pf.domain_left_edge)
+                 / self.pf.domain_dimensions).max()
+        AMRCoveringGridBase._get_list_of_grids(self, buffer)
+        # We reverse the order to ensure that coarse grids are first
+        self._grids = self._grids[::-1]
 
     def get_data(self, field=None):
         self._get_list_of_grids()
@@ -3724,11 +3696,11 @@
         # We jump-start our task here
         mylog.debug("Getting fields %s from %s possible grids",
                    fields_to_get, len(self._grids))
-        self._update_level_state(self.min_level, fields_to_get, initialize=True)
+        self._update_level_state(0, fields_to_get)
         if self._use_pbar: pbar = \
                 get_pbar('Searching grids for values ', len(self._grids))
         # The grids are assumed to be pre-sorted
-        last_level = self.min_level
+        last_level = 0
         for gi, grid in enumerate(self._grids):
             if self._use_pbar: pbar.update(gi)
             if grid.Level > last_level and grid.Level <= self.level:
@@ -3746,31 +3718,27 @@
                     raise KeyError(n_bad)
         if self._use_pbar: pbar.finish()
 
-    def _update_level_state(self, level, fields = None, initialize=False):
+    def _update_level_state(self, level, fields = None):
         dx = self._base_dx / self.pf.refine_by**level
         self.field_data['cdx'] = dx[0]
         self.field_data['cdy'] = dx[1]
         self.field_data['cdz'] = dx[2]
         LL = self.left_edge - self.pf.domain_left_edge
-        RL = self.right_edge - self.pf.domain_left_edge
         self._old_global_startindex = self.global_startindex
-        self._old_global_endindex = self.global_endindex
-        # We use one grid cell at LEAST, plus one buffer on all sides
-        self.global_startindex = na.floor(LL / dx).astype('int64') - 1
-        self.global_endindex = na.ceil(RL / dx).astype('int64') + 1
+        self.global_startindex = na.rint(LL / dx).astype('int64') - 1
         self.domain_width = na.rint((self.pf.domain_right_edge -
                     self.pf.domain_left_edge)/dx).astype('int64')
-        if (level == 0 or initialize) and self.level > 0:
-            idims = self.global_endindex - self.global_startindex
+        if level == 0 and self.level > 0:
+            # We use one grid cell at LEAST, plus one buffer on all sides
+            idims = na.rint((self.right_edge-self.left_edge)/dx).astype('int64') + 2
             fields = ensure_list(fields)
             for field in fields:
                 self.field_data[field] = na.zeros(idims,dtype='float64')-999
             self._cur_dims = idims.astype("int32")
-        elif (level == 0 or initialize) and self.level == 0:
+        elif level == 0 and self.level == 0:
             DLE = self.pf.domain_left_edge
             self.global_startindex = na.array(na.floor(LL/ dx), dtype='int64')
             idims = na.rint((self.right_edge-self.left_edge)/dx).astype('int64')
-            #idims = self.global_endindex - self.global_startindex
             fields = ensure_list(fields)
             for field in fields:
                 self.field_data[field] = na.zeros(idims,dtype='float64')-999
@@ -3779,16 +3747,15 @@
     def _refine(self, dlevel, fields):
         rf = float(self.pf.refine_by**dlevel)
 
-        input_left = (self._old_global_startindex + 0.5) * rf
-        input_right = (self._old_global_endindex - 0.5) * rf
-        output_left = self.global_startindex + 0.5
-        output_right = self.global_endindex - 0.5
-        output_dims = (output_right - output_left + 1).astype('int32')
+        input_left = (self._old_global_startindex + 0.5) * rf 
+        dx = na.fromiter((self['cd%s' % ax] for ax in 'xyz'), count=3, dtype='float64')
+        output_dims = na.rint((self.right_edge-self.left_edge)/dx+0.5).astype('int32') + 2
 
         self._cur_dims = output_dims
 
         for field in fields:
             output_field = na.zeros(output_dims, dtype="float64")
+            output_left = self.global_startindex + 0.5
             ghost_zone_interpolate(rf, self[field], input_left,
                                    output_field, output_left)
             self.field_data[field] = output_field


diff -r 4b18e35630ad9ee88f77574bbbc541b123c5dd70 -r 8b97e3dfb122b2d702f89dbc0865272221542b74 yt/data_objects/hierarchy.py
--- a/yt/data_objects/hierarchy.py
+++ b/yt/data_objects/hierarchy.py
@@ -326,9 +326,9 @@
             return None
 
         full_name = "%s/%s" % (node, name)
-        try:
+        if len(self._data_file[full_name].shape) > 0:
             return self._data_file[full_name][:]
-        except TypeError:
+        else:
             return self._data_file[full_name]
 
     def _close_data_file(self):
@@ -337,18 +337,6 @@
             del self._data_file
             self._data_file = None
 
-    def _deserialize_hierarchy(self, harray):
-        # THIS IS BROKEN AND NEEDS TO BE FIXED
-        mylog.debug("Cached entry found.")
-        self.gridDimensions[:] = harray[:,0:3]
-        self.gridStartIndices[:] = harray[:,3:6]
-        self.gridEndIndices[:] = harray[:,6:9]
-        self.gridLeftEdge[:] = harray[:,9:12]
-        self.gridRightEdge[:] = harray[:,12:15]
-        self.gridLevels[:] = harray[:,15:16]
-        self.gridTimes[:] = harray[:,16:17]
-        self.gridNumberOfParticles[:] = harray[:,17:18]
-
     def get_smallest_dx(self):
         """
         Returns (in code units) the smallest cell size in the simulation.


diff -r 4b18e35630ad9ee88f77574bbbc541b123c5dd70 -r 8b97e3dfb122b2d702f89dbc0865272221542b74 yt/data_objects/object_finding_mixin.py
--- a/yt/data_objects/object_finding_mixin.py
+++ b/yt/data_objects/object_finding_mixin.py
@@ -212,7 +212,7 @@
         get_box_grids_below_level(left_edge, right_edge,
                             level,
                             self.grid_left_edge, self.grid_right_edge,
-                            self.grid_levels, mask, min_level)
+                            self.grid_levels.astype("int32"), mask, min_level)
         mask = mask.astype("bool")
         return self.grids[mask], na.where(mask)
 


diff -r 4b18e35630ad9ee88f77574bbbc541b123c5dd70 -r 8b97e3dfb122b2d702f89dbc0865272221542b74 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -205,23 +205,16 @@
                 v = getattr(self, a)
                 mylog.info("Parameters: %-25s = %s", a, v)
 
-    _field_info = None
     def create_field_info(self):
-        if getattr(self, "_field_info", None) is None:
+        if getattr(self, "field_info", None) is None:
             # The setting up of fields occurs in the hierarchy, which is only
             # instantiated once.  So we have to double check to make sure that,
             # in the event of double-loads of a parameter file, we do not blow
             # away the exising field_info.
-            self._field_info = FieldInfoContainer.create_with_fallback(
+            self.field_info = FieldInfoContainer.create_with_fallback(
                                 self._fieldinfo_fallback)
 
-    _get_hierarchy = True
-    @property
-    def field_info(self):
-        if self._get_hierarchy:
-            self._get_hierarchy=False
-            self.hierarchy
-        return self._field_info
+        
 
 def _reconstruct_pf(*args, **kwargs):
     pfs = ParameterFileStore()


diff -r 4b18e35630ad9ee88f77574bbbc541b123c5dd70 -r 8b97e3dfb122b2d702f89dbc0865272221542b74 yt/data_objects/time_series.py
--- a/yt/data_objects/time_series.py
+++ b/yt/data_objects/time_series.py
@@ -29,9 +29,11 @@
 from yt.convenience import load
 from .data_containers import data_object_registry
 from .analyzer_objects import create_quantity_proxy, \
-    analysis_task_registry
+    analysis_task_registry, AnalysisTask
 from .derived_quantities import quantity_info
 from yt.utilities.exceptions import YTException
+from yt.utilities.parallel_tools.parallel_analysis_interface \
+    import parallel_objects
 
 class AnalysisTaskProxy(object):
     def __init__(self, time_series):
@@ -51,14 +53,38 @@
     def __contains__(self, key):
         return key in analysis_task_registry
 
+def get_pf_prop(propname):
+    def _eval(params, pf):
+        return getattr(pf, propname)
+    cls = type(propname, (AnalysisTask,),
+                dict(eval = _eval, _params = tuple()))
+    return cls
+
+attrs = ("refine_by", "dimensionality", "current_time",
+         "domain_dimensions", "domain_left_edge",
+         "domain_right_edge", "unique_identifier",
+         "current_redshift", "cosmological_simulation",
+         "omega_matter", "omega_lambda", "hubble_constant")
+
+class TimeSeriesParametersContainer(object):
+    def __init__(self, data_object):
+        self.data_object = data_object
+
+    def __getattr__(self, attr):
+        if attr in attrs:
+            return self.data_object.eval(get_pf_prop(attr)())
+        raise AttributeError(attr)
+
 class TimeSeriesData(object):
-    def __init__(self, outputs = None):
+    def __init__(self, outputs = None, parallel = True):
         if outputs is None: outputs = []
         self.outputs = outputs
         self.tasks = AnalysisTaskProxy(self)
+        self.params = TimeSeriesParametersContainer(self)
         for type_name in data_object_registry:
             setattr(self, type_name, functools.partial(
                 TimeSeriesDataObject, self, type_name))
+        self.parallel = parallel
 
     def __iter__(self):
         # We can make this fancier, but this works
@@ -77,17 +103,23 @@
         self.outputs.append(pf)
         
     def eval(self, tasks, obj=None):
-        if obj == None: obj = TimeSeriesDataObject(self, "all_data")
         tasks = ensure_list(tasks)
-        return_values = []
-        for pf in self:
-            return_values.append([])
+        return_values = {}
+        if self.parallel == False:
+            njobs = 1
+        else:
+            if self.parallel == True: njobs = -1
+            else: njobs = self.parallel
+        for store, pf in parallel_objects(self.outputs, njobs, return_values):
+            store.result = []
             for task in tasks:
                 try:
                     style = inspect.getargspec(task.eval)[0][1]
                     if style == 'pf':
                         arg = pf
                     elif style == 'data_object':
+                        if obj == None:
+                            obj = TimeSeriesDataObject(self, "all_data")
                         arg = obj.get(pf)
                     rv = task.eval(arg)
                 # We catch and store YT-originating exceptions
@@ -95,27 +127,28 @@
                 # small.
                 except YTException as rv:
                     pass
-                return_values[-1].append(rv)
-        return return_values
+                store.result.append(rv)
+        return [v for k, v in sorted(return_values.items())]
 
     @classmethod
-    def from_filenames(cls, filename_list):
+    def from_filenames(cls, filename_list, parallel = True):
         outputs = []
         for fn in filename_list:
             outputs.append(load(fn))
-        obj = cls(outputs)
+        obj = cls(outputs, parallel = parallel)
         return obj
 
     @classmethod
     def from_output_log(cls, output_log,
-                        line_prefix = "DATASET WRITTEN"):
+                        line_prefix = "DATASET WRITTEN",
+                        parallel = True):
         outputs = []
         for line in open(output_log):
             if not line.startswith(line_prefix): continue
             cut_line = line[len(line_prefix):].strip()
             fn = cut_line.split()[0]
             outputs.append(load(fn))
-        obj = cls(outputs)
+        obj = cls(outputs, parallel = parallel)
         return obj
 
 class TimeSeriesQuantitiesContainer(object):


diff -r 4b18e35630ad9ee88f77574bbbc541b123c5dd70 -r 8b97e3dfb122b2d702f89dbc0865272221542b74 yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -354,6 +354,11 @@
             f = h5py.File(self.hierarchy_filename[:-9] + "harrays")
         except:
             return False
+        hash = f["/"].attrs.get("hash", None)
+        if hash != self.parameter_file._hash():
+            mylog.info("Binary hierarchy does not match: recreating")
+            f.close()
+            return False
         self.grid_dimensions[:] = f["/ActiveDimensions"][:]
         self.grid_left_edge[:] = f["/LeftEdges"][:]
         self.grid_right_edge[:] = f["/RightEdges"][:]
@@ -390,6 +395,7 @@
             f = h5py.File(self.hierarchy_filename[:-9] + "harrays", "w")
         except IOError:
             return
+        f["/"].attrs["hash"] = self.parameter_file._hash()
         f.create_dataset("/LeftEdges", data=self.grid_left_edge)
         f.create_dataset("/RightEdges", data=self.grid_right_edge)
         parents, procs, levels = [], [], []
@@ -462,7 +468,7 @@
                     field_list = field_list.union(gf)
         else:
             field_list = None
-        field_list = self.comm.mpi_bcast_pickled(field_list)
+        field_list = self.comm.mpi_bcast(field_list)
         self.save_data(list(field_list),"/","DataFields",passthrough=True)
         self.field_list = list(field_list)
 
@@ -481,7 +487,7 @@
                 mylog.debug("Added additional grid %s", first_grid)
             mylog.debug("Checking grids: %s", random_sample.tolist())
         else:
-            random_sample = na.mgrid[0:max(len(self.grids)-1,1)].astype("int32")
+            random_sample = na.mgrid[0:max(len(self.grids),1)].astype("int32")
         return self.grids[(random_sample,)]
 
     def find_particles_by_type(self, ptype, max_num=None, additional_fields=None):


diff -r 4b18e35630ad9ee88f77574bbbc541b123c5dd70 -r 8b97e3dfb122b2d702f89dbc0865272221542b74 yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -79,13 +79,6 @@
     def _detect_fields(self):
         ncomp = self._handle["/unknown names"].shape[0]
         self.field_list = [s for s in self._handle["/unknown names"][:].flat]
-        facevars = [s for s in self._handle
-                    if s.startswith(("fcx","fcy","fcz")) and s[-1].isdigit()]
-        nfacevars = len(facevars)
-        if (nfacevars > 0) :
-            ncomp += nfacevars
-            for facevar in facevars :
-                self.field_list.append(facevar)
         if ("/particle names" in self._handle) :
             self.field_list += ["particle_" + s[0].strip() for s
                                 in self._handle["/particle names"][:]]
@@ -106,14 +99,22 @@
         f = self._handle # shortcut
         pf = self.parameter_file # shortcut
         
-        self.grid_left_edge[:] = f["/bounding box"][:,:,0]
-        self.grid_right_edge[:] = f["/bounding box"][:,:,1]
+        # Initialize to the domain left / domain right
+        ND = self.parameter_file.dimensionality
+        DLE = self.parameter_file.domain_left_edge
+        DRE = self.parameter_file.domain_right_edge
+        for i in range(3):
+            self.grid_left_edge[:,i] = DLE[i]
+            self.grid_right_edge[:,i] = DRE[i]
+        # We only go up to ND for 2D datasets
+        self.grid_left_edge[:,:ND] = f["/bounding box"][:,:,0]
+        self.grid_right_edge[:,:ND] = f["/bounding box"][:,:,1]
         
         # Move this to the parameter file
         try:
-            nxb = pf._find_parameter("integer", "nxb", True)
-            nyb = pf._find_parameter("integer", "nyb", True)
-            nzb = pf._find_parameter("integer", "nzb", True)
+            nxb = pf.parameters['nxb']
+            nyb = pf.parameters['nyb']
+            nzb = pf.parameters['nzb']
         except KeyError:
             nxb, nyb, nzb = [int(f["/simulation parameters"]['n%sb' % ax])
                               for ax in 'xyz']
@@ -230,6 +231,9 @@
             self.parameters["EOSType"] = -1
         if self.cosmological_simulation == 1:
             self._setup_comoving_units()
+        if "pc_unitsbase" in self.parameters:
+            if self.parameters["pc_unitsbase"] == "CGS":
+                self._setup_cgs_units()
         else:
             self._setup_nounits_units()
         self.time_units['1'] = 1
@@ -265,6 +269,22 @@
         for unit in mpc_conversion.keys():
             self.units[unit] = mpc_conversion[unit] / mpc_conversion["cm"]
 
+    def _setup_cgs_units(self):
+        self.conversion_factors['dens'] = 1.0
+        self.conversion_factors['pres'] = 1.0
+        self.conversion_factors['eint'] = 1.0
+        self.conversion_factors['ener'] = 1.0
+        self.conversion_factors['temp'] = 1.0
+        self.conversion_factors['velx'] = 1.0
+        self.conversion_factors['vely'] = 1.0
+        self.conversion_factors['velz'] = 1.0
+        self.conversion_factors['particle_velx'] = 1.0
+        self.conversion_factors['particle_vely'] = 1.0
+        self.conversion_factors['particle_velz'] = 1.0
+        self.conversion_factors["Time"] = 1.0
+        for unit in mpc_conversion.keys():
+            self.units[unit] = mpc_conversion[unit] / mpc_conversion["cm"]
+
     def _setup_nounits_units(self):
         self.conversion_factors['dens'] = 1.0
         self.conversion_factors['pres'] = 1.0
@@ -277,7 +297,6 @@
         self.conversion_factors['particle_velx'] = 1.0
         self.conversion_factors['particle_vely'] = 1.0
         self.conversion_factors['particle_velz'] = 1.0
-        z = 0
         mylog.warning("Setting 1.0 in code units to be 1.0 cm")
         if not self.has_key("TimeUnits"):
             mylog.warning("No time units.  Setting 1.0 = 1 second.")
@@ -309,24 +328,60 @@
                 self._handle["sim info"][:]["file format version"])
         else:
             raise RuntimeError("Can't figure out FLASH file version.")
+        # First we load all of the parameters
+        hns = ["simulation parameters"]
+        # note the ordering here is important: runtime parameters should
+        # ovewrite scalars with the same name.
+        for ptype in ['scalars', 'runtime parameters']:
+            for vtype in ['integer', 'real', 'logical', 'string']:
+                hns.append("%s %s" % (vtype, ptype))
+        if self._flash_version > 7:
+            for hn in hns:
+                if hn not in self._handle:
+                    continue
+                for varname, val in zip(self._handle[hn][:,'name'],
+                                        self._handle[hn][:,'value']):
+                    vn = varname.strip()
+                    if hn.startswith("string") :
+                        pval = val.strip()
+                    else :
+                        pval = val
+                    if vn in self.parameters and self.parameters[vn] != pval:
+                        mylog.warning("{0} {1} overwrites a simulation scalar of the same name".format(hn[:-1],vn)) 
+                    self.parameters[vn] = pval
+        if self._flash_version == 7:
+            for hn in hns:
+                if hn not in self._handle:
+                    continue
+                if hn is 'simulation parameters':
+                    zipover = zip(self._handle[hn].dtype.names,self._handle[hn][0])
+                else:
+                    zipover = zip(self._handle[hn][:,'name'],self._handle[hn][:,'value'])
+                for varname, val in zipover:
+                    vn = varname.strip()
+                    if hn.startswith("string") :
+                        pval = val.strip()
+                    else :
+                        pval = val
+                    if vn in self.parameters and self.parameters[vn] != pval:
+                        mylog.warning("{0} {1} overwrites a simulation scalar of the same name".format(hn[:-1],vn))
+                    self.parameters[vn] = pval
         self.domain_left_edge = na.array(
-            [self._find_parameter("real", "%smin" % ax) for ax in 'xyz']).astype("float64")
+            [self.parameters["%smin" % ax] for ax in 'xyz']).astype("float64")
         self.domain_right_edge = na.array(
-            [self._find_parameter("real", "%smax" % ax) for ax in 'xyz']).astype("float64")
-        self.min_level = self._find_parameter(
-            "integer", "lrefine_min", scalar = False) - 1
+            [self.parameters["%smax" % ax] for ax in 'xyz']).astype("float64")
+        self.min_level = self.parameters["lrefine_min"] -1
 
         # Determine domain dimensions
         try:
-            nxb = self._find_parameter("integer", "nxb", scalar = True)
-            nyb = self._find_parameter("integer", "nyb", scalar = True)
-            nzb = self._find_parameter("integer", "nzb", scalar = True)
+            nxb = self.parameters["nxb"]
+            nyb = self.parameters["nyb"]
+            nzb = self.parameters["nzb"]
         except KeyError:
             nxb, nyb, nzb = [int(self._handle["/simulation parameters"]['n%sb' % ax])
                               for ax in 'xyz'] # FLASH2 only!
         try:
-            dimensionality = self._find_parameter("integer", "dimensionality",
-                                                  scalar = True)
+            dimensionality = self.parameters["dimensionality"]
         except KeyError:
             dimensionality = 3
             if nzb == 1: dimensionality = 2
@@ -334,45 +389,28 @@
             if dimensionality < 3:
                 mylog.warning("Guessing dimensionality as %s", dimensionality)
 
-        nblockx = self._find_parameter("integer", "nblockx")
-        nblocky = self._find_parameter("integer", "nblocky")
-        nblockz = self._find_parameter("integer", "nblockz")
+        nblockx = self.parameters["nblockx"]
+        nblocky = self.parameters["nblocky"]
+        nblockz = self.parameters["nblockz"]
         self.dimensionality = dimensionality
         self.domain_dimensions = \
             na.array([nblockx*nxb,nblocky*nyb,nblockz*nzb])
-
         try:
-            self.parameters['Gamma'] = self._find_parameter("real", "gamma")
-        except KeyError:
+            self.parameters["Gamma"] = self.parameters["gamma"]
+        except:
+            mylog.warning("Cannot find Gamma")
             pass
 
-        if self._flash_version == 7:
-            self.current_time = float(
-                self._handle["simulation parameters"][:]["time"])
-        else:
-            self.current_time = \
-                float(self._find_parameter("real", "time", scalar=True))
+        self.current_time = self.parameters["time"]
 
-        if self._flash_version == 7:
-            self.parameters['timestep'] = float(
-                self._handle["simulation parameters"]["timestep"])
-        else:
-            self.parameters['timestep'] = \
-                float(self._find_parameter("real", "dt", scalar=True))
-
-        try:
-            use_cosmo = self._find_parameter("logical", "usecosmology") 
+        try: 
+            self.parameters["usecosmology"]
+            self.cosmological_simulation = 1
+            self.current_redshift = self.parameters['redshift']
+            self.omega_lambda = self.parameters['cosmologicalconstant']
+            self.omega_matter = self.parameters['omegamatter']
+            self.hubble_constant = self.parameters['hubbleconstant']
         except:
-            use_cosmo = 0
-
-        if use_cosmo == 1:
-            self.cosmological_simulation = 1
-            self.current_redshift = self._find_parameter("real", "redshift",
-                                        scalar = True)
-            self.omega_lambda = self._find_parameter("real", "cosmologicalconstant")
-            self.omega_matter = self._find_parameter("real", "omegamatter")
-            self.hubble_constant = self._find_parameter("real", "hubbleconstant")
-        else:
             self.current_redshift = self.omega_lambda = self.omega_matter = \
                 self.hubble_constant = self.cosmological_simulation = 0.0
 


diff -r 4b18e35630ad9ee88f77574bbbc541b123c5dd70 -r 8b97e3dfb122b2d702f89dbc0865272221542b74 yt/frontends/flash/fields.py
--- a/yt/frontends/flash/fields.py
+++ b/yt/frontends/flash/fields.py
@@ -34,7 +34,8 @@
     ValidateSpatial, \
     ValidateGridType
 import yt.data_objects.universal_fields
-
+from yt.utilities.physical_constants import \
+    kboltz
 KnownFLASHFields = FieldInfoContainer()
 add_flash_field = KnownFLASHFields.add_field
 
@@ -62,9 +63,8 @@
                     "y-velocity": "vely",
                     "z-velocity": "velz",
                     "Density": "dens",
-                    "TotalEnergy": "ener",
-                    "GasEnergy": "eint",
                     "Temperature": "temp",
+                    "Pressure" : "pres", 
                     "particle_position_x" : "particle_posx",
                     "particle_position_y" : "particle_posy",
                     "particle_position_z" : "particle_posz",
@@ -193,17 +193,16 @@
         add_flash_field(v, function=NullFunc, take_log=False,
                   validators = [ValidateDataField(v)],
                   particle_type = pfield)
-    else:
-        if f.endswith("_Fraction") :
-            dname = "%s\/Fraction" % f.split("_")[0]
-        else :
-            dname = f                    
-        ff = KnownFLASHFields[v]
-        pfield = f.startswith("particle")
-        add_field(f, TranslationFunc(v),
-                  take_log=KnownFLASHFields[v].take_log,
-                  units = ff._units, display_name=dname,
-                  particle_type = pfield)
+    if f.endswith("_Fraction") :
+        dname = "%s\/Fraction" % f.split("_")[0]
+    else :
+        dname = f                    
+    ff = KnownFLASHFields[v]
+    pfield = f.startswith("particle")
+    add_field(f, TranslationFunc(v),
+              take_log=KnownFLASHFields[v].take_log,
+              units = ff._units, display_name=dname,
+              particle_type = pfield)
 
 def _convertParticleMassMsun(data):
     return 1.0/1.989e33
@@ -213,3 +212,44 @@
           function=_ParticleMassMsun, validators=[ValidateSpatial(0)],
           particle_type=True, convert_function=_convertParticleMassMsun,
           particle_convert_function=_ParticleMassMsun)
+
+def _ThermalEnergy(fields, data) :
+    try:
+        return data["eint"]
+    except:
+        pass
+    try:
+        return data["Pressure"] / (data.pf["Gamma"] - 1.0) / data["Density"]
+    except:
+        pass
+    if data.has_field_parameter("mu") :
+        mu = data.get_field_parameter("mu")
+    else:
+        mu = 0.6
+    return kboltz*data["Density"]*data["Temperature"]/(mu*mh) / (data.pf["Gamma"] - 1.0)
+    
+add_field("ThermalEnergy", function=_ThermalEnergy,
+          units=r"\rm{ergs}/\rm{g}")
+
+def _TotalEnergy(fields, data) :
+    try:
+        etot = data["ener"]
+    except:
+        etot = data["ThermalEnergy"] + 0.5 * (
+            data["x-velocity"]**2.0 +
+            data["y-velocity"]**2.0 +
+            data["z-velocity"]**2.0)
+    try:
+        etot += data['magp']/data["Density"]
+    except:
+        pass
+    return etot
+
+add_field("TotalEnergy", function=_TotalEnergy,
+          units=r"\rm{ergs}/\rm{g}")
+
+def _GasEnergy(fields, data) :
+    return data["ThermalEnergy"]
+
+add_field("GasEnergy", function=_GasEnergy, 
+          units=r"\rm{ergs}/\rm{g}")


diff -r 4b18e35630ad9ee88f77574bbbc541b123c5dd70 -r 8b97e3dfb122b2d702f89dbc0865272221542b74 yt/frontends/orion/data_structures.py
--- a/yt/frontends/orion/data_structures.py
+++ b/yt/frontends/orion/data_structures.py
@@ -151,7 +151,7 @@
         if not os.path.exists(fn): return
         with open(fn, 'r') as f:
             lines = f.readlines()
-            self.num_stars = int(lines[0].strip())
+            self.num_stars = int(lines[0].strip()[0])
             for line in lines[1:]:
                 particle_position_x = float(line.split(' ')[1])
                 particle_position_y = float(line.split(' ')[2])


diff -r 4b18e35630ad9ee88f77574bbbc541b123c5dd70 -r 8b97e3dfb122b2d702f89dbc0865272221542b74 yt/frontends/orion/fields.py
--- a/yt/frontends/orion/fields.py
+++ b/yt/frontends/orion/fields.py
@@ -29,6 +29,8 @@
     mh, kboltz
 from yt.data_objects.field_info_container import \
     FieldInfoContainer, \
+    NullFunc, \
+    TranslationFunc, \
     FieldInfo, \
     ValidateParameter, \
     ValidateDataField, \
@@ -44,24 +46,24 @@
 OrionFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
 add_field = OrionFieldInfo.add_field
 
-add_orion_field("density", function=lambda a,b: None, take_log=True,
+add_orion_field("density", function=NullFunc, take_log=True,
                 validators = [ValidateDataField("density")],
                 units=r"\rm{g}/\rm{cm}^3")
 KnownOrionFields["density"]._projected_units =r"\rm{g}/\rm{cm}^2"
 
-add_orion_field("eden", function=lambda a,b: None, take_log=True,
+add_orion_field("eden", function=NullFunc, take_log=True,
                 validators = [ValidateDataField("eden")],
                 units=r"\rm{erg}/\rm{cm}^3")
 
-add_orion_field("xmom", function=lambda a,b: None, take_log=False,
+add_orion_field("xmom", function=NullFunc, take_log=False,
                 validators = [ValidateDataField("xmom")],
                 units=r"\rm{g}/\rm{cm^2\ s}")
 
-add_orion_field("ymom", function=lambda a,b: None, take_log=False,
+add_orion_field("ymom", function=NullFunc, take_log=False,
                 validators = [ValidateDataField("ymom")],
                 units=r"\rm{gm}/\rm{cm^2\ s}")
 
-add_orion_field("zmom", function=lambda a,b: None, take_log=False,
+add_orion_field("zmom", function=NullFunc, take_log=False,
                 validators = [ValidateDataField("zmom")],
                 units=r"\rm{g}/\rm{cm^2\ s}")
 
@@ -76,15 +78,14 @@
                     "z-momentum": "zmom"
                    }
 
-def _generate_translation(mine, theirs):
-    add_field(theirs, function=lambda a, b: b[mine], take_log=True)
-
 for f,v in translation_dict.items():
-    if v not in OrionFieldInfo:
-        add_field(v, function=lambda a,b: None, take_log=False,
+    if v not in KnownOrionFields:
+        add_orion_field(v, function=NullFunc, take_log=False,
                   validators = [ValidateDataField(v)])
-    #print "Setting up translator from %s to %s" % (v, f)
-    _generate_translation(v, f)
+    ff = KnownOrionFields[v]
+    add_field(f, TranslationFunc(v),
+              take_log=KnownOrionFields[v].take_log,
+              units = ff._units, display_name=f)
 
 def _xVelocity(field, data):
     """generate x-velocity from x-momentum and density


diff -r 4b18e35630ad9ee88f77574bbbc541b123c5dd70 -r 8b97e3dfb122b2d702f89dbc0865272221542b74 yt/utilities/_amr_utils/DepthFirstOctree.pyx
--- a/yt/utilities/_amr_utils/DepthFirstOctree.pyx
+++ b/yt/utilities/_amr_utils/DepthFirstOctree.pyx
@@ -27,6 +27,21 @@
 cimport numpy as np
 cimport cython
 
+cdef extern from "math.h":
+    double exp(double x)
+    float expf(float x)
+    long double expl(long double x)
+    double floor(double x)
+    double ceil(double x)
+    double fmod(double x, double y)
+    double log2(double x)
+    long int lrint(double x)
+    double fabs(double x)
+    double cos(double x)
+    double sin(double x)
+    double asin(double x)
+    double acos(double x)
+
 cdef class position:
     cdef public int output_pos, refined_pos
     def __cinit__(self):
@@ -107,16 +122,15 @@
                                         curpos, ci - grid.offset, output, refined, grids)
     return s
 
- at cython.boundscheck(False)
 def RecurseOctreeByLevels(int i_i, int j_i, int k_i,
                           int i_f, int j_f, int k_f,
-                          np.ndarray[np.int32_t, ndim=1] curpos,
+                          np.ndarray[np.int64_t, ndim=1] curpos,
                           int gi, 
                           np.ndarray[np.float64_t, ndim=2] output,
-                          np.ndarray[np.int32_t, ndim=2] genealogy,
+                          np.ndarray[np.int64_t, ndim=2] genealogy,
                           np.ndarray[np.float64_t, ndim=2] corners,
                           OctreeGridList grids):
-    cdef np.int32_t i, i_off, j, j_off, k, k_off, ci, fi
+    cdef np.int64_t i, i_off, j, j_off, k, k_off, ci, fi
     cdef int child_i, child_j, child_k
     cdef OctreeGrid child_grid
     cdef OctreeGrid grid = grids[gi-1]
@@ -129,7 +143,7 @@
     cdef np.float64_t child_dx
     cdef np.ndarray[np.float64_t, ndim=1] child_leftedges
     cdef np.float64_t cx, cy, cz
-    cdef int cp
+    cdef np.int64_t cp
     cdef int s = 0
     for i_off in range(i_f):
         i = i_off + i_i
@@ -153,9 +167,9 @@
                     child_grid = grids[ci-1]
                     child_dx = child_grid.dx[0]
                     child_leftedges = child_grid.left_edges
-                    child_i = int((cx-child_leftedges[0])/child_dx)
-                    child_j = int((cy-child_leftedges[1])/child_dx)
-                    child_k = int((cz-child_leftedges[2])/child_dx)
+                    child_i = lrint((cx-child_leftedges[0])/child_dx)
+                    child_j = lrint((cy-child_leftedges[1])/child_dx)
+                    child_k = lrint((cz-child_leftedges[2])/child_dx)
                     # set current child id to id of next cell to examine
                     genealogy[cp, 0] = curpos[level+1] 
                     # set next parent id to id of current cell


diff -r 4b18e35630ad9ee88f77574bbbc541b123c5dd70 -r 8b97e3dfb122b2d702f89dbc0865272221542b74 yt/utilities/_amr_utils/misc_utilities.pyx
--- a/yt/utilities/_amr_utils/misc_utilities.pyx
+++ b/yt/utilities/_amr_utils/misc_utilities.pyx
@@ -42,8 +42,7 @@
                   np.ndarray[np.float64_t, ndim=1] mresult,
                   np.ndarray[np.float64_t, ndim=1] qresult,
                   np.ndarray[np.float64_t, ndim=1] used):
-    cdef int n
-    cdef np.int64_t bin
+    cdef int n, bin
     cdef np.float64_t wval, bval
     for n in range(bins_x.shape[0]):
         bin = bins_x[n]
@@ -55,6 +54,7 @@
         bresult[bin] += wval*bval
         mresult[bin] += wval * (bval - mresult[bin]) / wresult[bin]
         used[bin] = 1
+    return
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
@@ -68,7 +68,7 @@
                   np.ndarray[np.float64_t, ndim=2] mresult,
                   np.ndarray[np.float64_t, ndim=2] qresult,
                   np.ndarray[np.float64_t, ndim=2] used):
-    cdef int n
+    cdef int n, bini, binj
     cdef np.int64_t bin
     cdef np.float64_t wval, bval
     for n in range(bins_x.shape[0]):
@@ -82,6 +82,7 @@
         bresult[bini, binj] += wval*bval
         mresult[bini, binj] += wval * (bval - mresult[bini, binj]) / wresult[bini, binj]
         used[bini, binj] = 1
+    return
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
@@ -96,7 +97,7 @@
                   np.ndarray[np.float64_t, ndim=3] mresult,
                   np.ndarray[np.float64_t, ndim=3] qresult,
                   np.ndarray[np.float64_t, ndim=3] used):
-    cdef int n
+    cdef int n, bini, binj, bink
     cdef np.int64_t bin
     cdef np.float64_t wval, bval
     for n in range(bins_x.shape[0]):
@@ -111,6 +112,7 @@
         bresult[bini, binj, bink] += wval*bval
         mresult[bini, binj, bink] += wval * (bval - mresult[bini, binj, bink]) / wresult[bini, binj, bink]
         used[bini, binj, bink] = 1
+    return
 
 @cython.boundscheck(False)
 @cython.wraparound(False)


diff -r 4b18e35630ad9ee88f77574bbbc541b123c5dd70 -r 8b97e3dfb122b2d702f89dbc0865272221542b74 yt/utilities/command_line.py
--- a/yt/utilities/command_line.py
+++ b/yt/utilities/command_line.py
@@ -995,7 +995,8 @@
         import IPython
         if IPython.__version__.startswith("0.10"):
             api_version = '0.10'
-        elif IPython.__version__.startswith("0.11"):
+        elif IPython.__version__.startswith("0.11") or \
+             IPython.__version__.startswith("0.12"):
             api_version = '0.11'
 
         local_ns = yt.mods.__dict__.copy()
@@ -1010,11 +1011,7 @@
         else:
             from IPython.config.loader import Config
             cfg = Config()
-            cfg.InteractiveShellEmbed.local_ns = local_ns
-            IPython.embed(config=cfg)
-            from IPython.frontend.terminal.embed import InteractiveShellEmbed
-            ipshell = InteractiveShellEmbed(config=cfg)
-
+            IPython.embed(config=cfg,user_ns=local_ns)
 
 class YTMapserverCmd(YTCommand):
     args = ("proj", "field", "weight",
@@ -1329,25 +1326,25 @@
                     port=int(args.port), repl=hr)
 
 class YTStatsCmd(YTCommand):
-    args = ('outputfn','bn','skip','pf', 'field',
-            dict(long="--max", action="store_true", default=False,
-                 dest='max', help="Display maximum of requested field."),
-            dict(long="--min", action="store_true", default=False,
-                 dest='min', help="Display minimum of requested field."))
+    args = ('outputfn','bn','skip','pf','field',
+            dict(long="--max", action='store_true', default=False,
+                 dest='max', help="Display maximum of field requested through -f option."),
+            dict(long="--min", action='store_true', default=False,
+                 dest='min', help="Display minimum of field requested through -f option."))
     name = "stats"
     description = \
         """
         Print stats and max/min value of a given field (if requested),
         for one or more datasets
 
-        (default field is density)
+        (default field is Density)
 
         """
 
     def __call__(self, args):
         pf = args.pf
         pf.h.print_stats()
-        if args.field in pf.h.field_list:
+        if args.field in pf.h.derived_field_list:
             if args.max == True:
                 v, c = pf.h.find_max(args.field)
                 print "Maximum %s: %0.5e at %s" % (args.field, v, c)


diff -r 4b18e35630ad9ee88f77574bbbc541b123c5dd70 -r 8b97e3dfb122b2d702f89dbc0865272221542b74 yt/utilities/minimal_representation.py
--- a/yt/utilities/minimal_representation.py
+++ b/yt/utilities/minimal_representation.py
@@ -31,18 +31,18 @@
 from yt.config import ytcfg
 from yt.funcs import *
 
-try:
-    from poster.streaminghttp import register_openers
-    from poster.encode import multipart_encode
-    register_openers()
-except ImportError:
-    pass
+from .poster.streaminghttp import register_openers
+from .poster.encode import multipart_encode
+register_openers()
 
 class UploaderBar(object):
     pbar = None
+    def __init__(self, my_name = ""):
+        self.my_name = my_name
+
     def __call__(self, name, prog, total):
         if self.pbar is None:
-            self.pbar = get_pbar("Uploading %s" % name, total)
+            self.pbar = get_pbar("Uploading %s " % self.my_name, total)
         self.pbar.update(prog)
         if prog == total:
             self.pbar.finish()
@@ -113,12 +113,12 @@
         rv = urllib2.urlopen(request).read()
         uploader_info = json.loads(rv)
         new_url = url + "/handler/%s" % uploader_info['handler_uuid']
-        for cn, cv in chunks:
+        for i, (cn, cv) in enumerate(chunks):
             remaining = cv.size * cv.itemsize
             f = TemporaryFile()
             na.save(f, cv)
             f.seek(0)
-            pbar = UploaderBar()
+            pbar = UploaderBar("%s, % 2i/% 2i" % (self.type, i+1, len(chunks)))
             datagen, headers = multipart_encode({'chunk_data' : f}, cb = pbar)
             request = urllib2.Request(new_url, datagen, headers)
             rv = urllib2.urlopen(request).read()
@@ -152,8 +152,8 @@
 
 class MinimalMappableData(MinimalRepresentation):
 
-    weight = "None"
-    _attr_list = ("field_data", "field", "weight", "axis", "output_hash")
+    _attr_list = ("field_data", "field", "weight_field", "axis", "output_hash",
+                  "vm_type")
 
     def _generate_post(self):
         nobj = self._return_filtered_object(("field_data",))
@@ -163,3 +163,20 @@
 
 class MinimalProjectionData(MinimalMappableData):
     type = 'proj'
+    vm_type = "Projection"
+
+class MinimalSliceData(MinimalMappableData):
+    type = 'slice'
+    vm_type = "Slice"
+    weight_field = "None"
+
+class MinimalImageCollectionData(MinimalRepresentation):
+    type = "image_collection"
+    _attr_list = ("name", "output_hash", "images", "image_metadata")
+
+    def _generate_post(self):
+        nobj = self._return_filtered_object(("images",))
+        metadata = nobj._attrs
+        chunks = [(fn, d) for fn, d in self.images]
+        return (metadata, ('images', chunks))
+


diff -r 4b18e35630ad9ee88f77574bbbc541b123c5dd70 -r 8b97e3dfb122b2d702f89dbc0865272221542b74 yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -259,7 +259,7 @@
                 all_clear = 0
         else:
             all_clear = None
-        all_clear = comm.mpi_bcast_pickled(all_clear)
+        all_clear = comm.mpi_bcast(all_clear)
         if not all_clear: raise RuntimeError
     if parallel_capable: return root_only
     return func
@@ -503,9 +503,25 @@
         raise NotImplementedError
 
     @parallel_passthrough
-    def mpi_bcast_pickled(self, data):
-        data = self.comm.bcast(data, root=0)
-        return data
+    def mpi_bcast(self, data):
+        # The second check below makes sure that we know how to communicate
+        # this type of array. Otherwise, we'll pickle it.
+        if isinstance(data, na.ndarray) and \
+                get_mpi_type(data.dtype) is not None:
+            if self.comm.rank == 0:
+                info = (data.shape, data.dtype)
+            else:
+                info = ()
+            info = self.comm.bcast(info, root=0)
+            if self.comm.rank != 0:
+                data = na.empty(info[0], dtype=info[1])
+            mpi_type = get_mpi_type(info[1])
+            self.comm.Bcast([data, mpi_type], root = 0)
+            return data
+        else:
+            # Use pickled methods.
+            data = self.comm.bcast(data, root = 0)
+            return data
 
     def preload(self, grids, fields, io_handler):
         # This will preload if it detects we are parallel capable and


diff -r 4b18e35630ad9ee88f77574bbbc541b123c5dd70 -r 8b97e3dfb122b2d702f89dbc0865272221542b74 yt/utilities/poster/README
--- /dev/null
+++ b/yt/utilities/poster/README
@@ -0,0 +1,4 @@
+Poster is a module by Chris AtLee, licensed under the MIT License, included
+here.  For more information, see the poster home page:
+
+http://atlee.ca/software/poster


diff -r 4b18e35630ad9ee88f77574bbbc541b123c5dd70 -r 8b97e3dfb122b2d702f89dbc0865272221542b74 yt/utilities/poster/__init__.py
--- /dev/null
+++ b/yt/utilities/poster/__init__.py
@@ -0,0 +1,32 @@
+# Copyright (c) 2011 Chris AtLee
+# 
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+# 
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+# 
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+# THE SOFTWARE.
+"""poster module
+
+Support for streaming HTTP uploads, and multipart/form-data encoding
+
+```poster.version``` is a 3-tuple of integers representing the version number.
+New releases of poster will always have a version number that compares greater
+than an older version of poster.
+New in version 0.6."""
+
+import streaminghttp
+import encode
+
+version = (0, 8, 1) # Thanks JP!


diff -r 4b18e35630ad9ee88f77574bbbc541b123c5dd70 -r 8b97e3dfb122b2d702f89dbc0865272221542b74 yt/utilities/poster/encode.py
--- /dev/null
+++ b/yt/utilities/poster/encode.py
@@ -0,0 +1,414 @@
+"""multipart/form-data encoding module
+
+This module provides functions that faciliate encoding name/value pairs
+as multipart/form-data suitable for a HTTP POST or PUT request.
+
+multipart/form-data is the standard way to upload files over HTTP"""
+
+__all__ = ['gen_boundary', 'encode_and_quote', 'MultipartParam',
+        'encode_string', 'encode_file_header', 'get_body_size', 'get_headers',
+        'multipart_encode']
+
+try:
+    import uuid
+    def gen_boundary():
+        """Returns a random string to use as the boundary for a message"""
+        return uuid.uuid4().hex
+except ImportError:
+    import random, sha
+    def gen_boundary():
+        """Returns a random string to use as the boundary for a message"""
+        bits = random.getrandbits(160)
+        return sha.new(str(bits)).hexdigest()
+
+import urllib, re, os, mimetypes
+try:
+    from email.header import Header
+except ImportError:
+    # Python 2.4
+    from email.Header import Header
+
+def encode_and_quote(data):
+    """If ``data`` is unicode, return urllib.quote_plus(data.encode("utf-8"))
+    otherwise return urllib.quote_plus(data)"""
+    if data is None:
+        return None
+
+    if isinstance(data, unicode):
+        data = data.encode("utf-8")
+    return urllib.quote_plus(data)
+
+def _strify(s):
+    """If s is a unicode string, encode it to UTF-8 and return the results,
+    otherwise return str(s), or None if s is None"""
+    if s is None:
+        return None
+    if isinstance(s, unicode):
+        return s.encode("utf-8")
+    return str(s)
+
+class MultipartParam(object):
+    """Represents a single parameter in a multipart/form-data request
+
+    ``name`` is the name of this parameter.
+
+    If ``value`` is set, it must be a string or unicode object to use as the
+    data for this parameter.
+
+    If ``filename`` is set, it is what to say that this parameter's filename
+    is.  Note that this does not have to be the actual filename any local file.
+
+    If ``filetype`` is set, it is used as the Content-Type for this parameter.
+    If unset it defaults to "text/plain; charset=utf8"
+
+    If ``filesize`` is set, it specifies the length of the file ``fileobj``
+
+    If ``fileobj`` is set, it must be a file-like object that supports
+    .read().
+
+    Both ``value`` and ``fileobj`` must not be set, doing so will
+    raise a ValueError assertion.
+
+    If ``fileobj`` is set, and ``filesize`` is not specified, then
+    the file's size will be determined first by stat'ing ``fileobj``'s
+    file descriptor, and if that fails, by seeking to the end of the file,
+    recording the current position as the size, and then by seeking back to the
+    beginning of the file.
+
+    ``cb`` is a callable which will be called from iter_encode with (self,
+    current, total), representing the current parameter, current amount
+    transferred, and the total size.
+    """
+    def __init__(self, name, value=None, filename=None, filetype=None,
+                        filesize=None, fileobj=None, cb=None):
+        self.name = Header(name).encode()
+        self.value = _strify(value)
+        if filename is None:
+            self.filename = None
+        else:
+            if isinstance(filename, unicode):
+                # Encode with XML entities
+                self.filename = filename.encode("ascii", "xmlcharrefreplace")
+            else:
+                self.filename = str(filename)
+            self.filename = self.filename.encode("string_escape").\
+                    replace('"', '\\"')
+        self.filetype = _strify(filetype)
+
+        self.filesize = filesize
+        self.fileobj = fileobj
+        self.cb = cb
+
+        if self.value is not None and self.fileobj is not None:
+            raise ValueError("Only one of value or fileobj may be specified")
+
+        if fileobj is not None and filesize is None:
+            # Try and determine the file size
+            try:
+                self.filesize = os.fstat(fileobj.fileno()).st_size
+            except (OSError, AttributeError):
+                try:
+                    fileobj.seek(0, 2)
+                    self.filesize = fileobj.tell()
+                    fileobj.seek(0)
+                except:
+                    raise ValueError("Could not determine filesize")
+
+    def __cmp__(self, other):
+        attrs = ['name', 'value', 'filename', 'filetype', 'filesize', 'fileobj']
+        myattrs = [getattr(self, a) for a in attrs]
+        oattrs = [getattr(other, a) for a in attrs]
+        return cmp(myattrs, oattrs)
+
+    def reset(self):
+        if self.fileobj is not None:
+            self.fileobj.seek(0)
+        elif self.value is None:
+            raise ValueError("Don't know how to reset this parameter")
+
+    @classmethod
+    def from_file(cls, paramname, filename):
+        """Returns a new MultipartParam object constructed from the local
+        file at ``filename``.
+
+        ``filesize`` is determined by os.path.getsize(``filename``)
+
+        ``filetype`` is determined by mimetypes.guess_type(``filename``)[0]
+
+        ``filename`` is set to os.path.basename(``filename``)
+        """
+
+        return cls(paramname, filename=os.path.basename(filename),
+                filetype=mimetypes.guess_type(filename)[0],
+                filesize=os.path.getsize(filename),
+                fileobj=open(filename, "rb"))
+
+    @classmethod
+    def from_params(cls, params):
+        """Returns a list of MultipartParam objects from a sequence of
+        name, value pairs, MultipartParam instances,
+        or from a mapping of names to values
+
+        The values may be strings or file objects, or MultipartParam objects.
+        MultipartParam object names must match the given names in the
+        name,value pairs or mapping, if applicable."""
+        if hasattr(params, 'items'):
+            params = params.items()
+
+        retval = []
+        for item in params:
+            if isinstance(item, cls):
+                retval.append(item)
+                continue
+            name, value = item
+            if isinstance(value, cls):
+                assert value.name == name
+                retval.append(value)
+                continue
+            if hasattr(value, 'read'):
+                # Looks like a file object
+                filename = getattr(value, 'name', None)
+                if filename is not None:
+                    filetype = mimetypes.guess_type(filename)[0]
+                else:
+                    filetype = None
+
+                retval.append(cls(name=name, filename=filename,
+                    filetype=filetype, fileobj=value))
+            else:
+                retval.append(cls(name, value))
+        return retval
+
+    def encode_hdr(self, boundary):
+        """Returns the header of the encoding of this parameter"""
+        boundary = encode_and_quote(boundary)
+
+        headers = ["--%s" % boundary]
+
+        if self.filename:
+            disposition = 'form-data; name="%s"; filename="%s"' % (self.name,
+                    self.filename)
+        else:
+            disposition = 'form-data; name="%s"' % self.name
+
+        headers.append("Content-Disposition: %s" % disposition)
+
+        if self.filetype:
+            filetype = self.filetype
+        else:
+            filetype = "text/plain; charset=utf-8"
+
+        headers.append("Content-Type: %s" % filetype)
+
+        headers.append("")
+        headers.append("")
+
+        return "\r\n".join(headers)
+
+    def encode(self, boundary):
+        """Returns the string encoding of this parameter"""
+        if self.value is None:
+            value = self.fileobj.read()
+        else:
+            value = self.value
+
+        if re.search("^--%s$" % re.escape(boundary), value, re.M):
+            raise ValueError("boundary found in encoded string")
+
+        return "%s%s\r\n" % (self.encode_hdr(boundary), value)
+
+    def iter_encode(self, boundary, blocksize=4096):
+        """Yields the encoding of this parameter
+        If self.fileobj is set, then blocks of ``blocksize`` bytes are read and
+        yielded."""
+        total = self.get_size(boundary)
+        current = 0
+        if self.value is not None:
+            block = self.encode(boundary)
+            current += len(block)
+            yield block
+            if self.cb:
+                self.cb(self, current, total)
+        else:
+            block = self.encode_hdr(boundary)
+            current += len(block)
+            yield block
+            if self.cb:
+                self.cb(self, current, total)
+            last_block = ""
+            encoded_boundary = "--%s" % encode_and_quote(boundary)
+            boundary_exp = re.compile("^%s$" % re.escape(encoded_boundary),
+                    re.M)
+            while True:
+                block = self.fileobj.read(blocksize)
+                if not block:
+                    current += 2
+                    yield "\r\n"
+                    if self.cb:
+                        self.cb(self, current, total)
+                    break
+                last_block += block
+                if boundary_exp.search(last_block):
+                    raise ValueError("boundary found in file data")
+                last_block = last_block[-len(encoded_boundary)-2:]
+                current += len(block)
+                yield block
+                if self.cb:
+                    self.cb(self, current, total)
+
+    def get_size(self, boundary):
+        """Returns the size in bytes that this param will be when encoded
+        with the given boundary."""
+        if self.filesize is not None:
+            valuesize = self.filesize
+        else:
+            valuesize = len(self.value)
+
+        return len(self.encode_hdr(boundary)) + 2 + valuesize
+
+def encode_string(boundary, name, value):
+    """Returns ``name`` and ``value`` encoded as a multipart/form-data
+    variable.  ``boundary`` is the boundary string used throughout
+    a single request to separate variables."""
+
+    return MultipartParam(name, value).encode(boundary)
+
+def encode_file_header(boundary, paramname, filesize, filename=None,
+        filetype=None):
+    """Returns the leading data for a multipart/form-data field that contains
+    file data.
+
+    ``boundary`` is the boundary string used throughout a single request to
+    separate variables.
+
+    ``paramname`` is the name of the variable in this request.
+
+    ``filesize`` is the size of the file data.
+
+    ``filename`` if specified is the filename to give to this field.  This
+    field is only useful to the server for determining the original filename.
+
+    ``filetype`` if specified is the MIME type of this file.
+
+    The actual file data should be sent after this header has been sent.
+    """
+
+    return MultipartParam(paramname, filesize=filesize, filename=filename,
+            filetype=filetype).encode_hdr(boundary)
+
+def get_body_size(params, boundary):
+    """Returns the number of bytes that the multipart/form-data encoding
+    of ``params`` will be."""
+    size = sum(p.get_size(boundary) for p in MultipartParam.from_params(params))
+    return size + len(boundary) + 6
+
+def get_headers(params, boundary):
+    """Returns a dictionary with Content-Type and Content-Length headers
+    for the multipart/form-data encoding of ``params``."""
+    headers = {}
+    boundary = urllib.quote_plus(boundary)
+    headers['Content-Type'] = "multipart/form-data; boundary=%s" % boundary
+    headers['Content-Length'] = str(get_body_size(params, boundary))
+    return headers
+
+class multipart_yielder:
+    def __init__(self, params, boundary, cb):
+        self.params = params
+        self.boundary = boundary
+        self.cb = cb
+
+        self.i = 0
+        self.p = None
+        self.param_iter = None
+        self.current = 0
+        self.total = get_body_size(params, boundary)
+
+    def __iter__(self):
+        return self
+
+    def next(self):
+        """generator function to yield multipart/form-data representation
+        of parameters"""
+        if self.param_iter is not None:
+            try:
+                block = self.param_iter.next()
+                self.current += len(block)
+                if self.cb:
+                    self.cb(self.p, self.current, self.total)
+                return block
+            except StopIteration:
+                self.p = None
+                self.param_iter = None
+
+        if self.i is None:
+            raise StopIteration
+        elif self.i >= len(self.params):
+            self.param_iter = None
+            self.p = None
+            self.i = None
+            block = "--%s--\r\n" % self.boundary
+            self.current += len(block)
+            if self.cb:
+                self.cb(self.p, self.current, self.total)
+            return block
+
+        self.p = self.params[self.i]
+        self.param_iter = self.p.iter_encode(self.boundary)
+        self.i += 1
+        return self.next()
+
+    def reset(self):
+        self.i = 0
+        self.current = 0
+        for param in self.params:
+            param.reset()
+
+def multipart_encode(params, boundary=None, cb=None):
+    """Encode ``params`` as multipart/form-data.
+
+    ``params`` should be a sequence of (name, value) pairs or MultipartParam
+    objects, or a mapping of names to values.
+    Values are either strings parameter values, or file-like objects to use as
+    the parameter value.  The file-like objects must support .read() and either
+    .fileno() or both .seek() and .tell().
+
+    If ``boundary`` is set, then it as used as the MIME boundary.  Otherwise
+    a randomly generated boundary will be used.  In either case, if the
+    boundary string appears in the parameter values a ValueError will be
+    raised.
+
+    If ``cb`` is set, it should be a callback which will get called as blocks
+    of data are encoded.  It will be called with (param, current, total),
+    indicating the current parameter being encoded, the current amount encoded,
+    and the total amount to encode.
+
+    Returns a tuple of `datagen`, `headers`, where `datagen` is a
+    generator that will yield blocks of data that make up the encoded
+    parameters, and `headers` is a dictionary with the assoicated
+    Content-Type and Content-Length headers.
+
+    Examples:
+
+    >>> datagen, headers = multipart_encode( [("key", "value1"), ("key", "value2")] )
+    >>> s = "".join(datagen)
+    >>> assert "value2" in s and "value1" in s
+
+    >>> p = MultipartParam("key", "value2")
+    >>> datagen, headers = multipart_encode( [("key", "value1"), p] )
+    >>> s = "".join(datagen)
+    >>> assert "value2" in s and "value1" in s
+
+    >>> datagen, headers = multipart_encode( {"key": "value1"} )
+    >>> s = "".join(datagen)
+    >>> assert "value2" not in s and "value1" in s
+
+    """
+    if boundary is None:
+        boundary = gen_boundary()
+    else:
+        boundary = urllib.quote_plus(boundary)
+
+    headers = get_headers(params, boundary)
+    params = MultipartParam.from_params(params)
+
+    return multipart_yielder(params, boundary, cb), headers


diff -r 4b18e35630ad9ee88f77574bbbc541b123c5dd70 -r 8b97e3dfb122b2d702f89dbc0865272221542b74 yt/utilities/poster/streaminghttp.py
--- /dev/null
+++ b/yt/utilities/poster/streaminghttp.py
@@ -0,0 +1,199 @@
+"""Streaming HTTP uploads module.
+
+This module extends the standard httplib and urllib2 objects so that
+iterable objects can be used in the body of HTTP requests.
+
+In most cases all one should have to do is call :func:`register_openers()`
+to register the new streaming http handlers which will take priority over
+the default handlers, and then you can use iterable objects in the body
+of HTTP requests.
+
+**N.B.** You must specify a Content-Length header if using an iterable object
+since there is no way to determine in advance the total size that will be
+yielded, and there is no way to reset an interator.
+
+Example usage:
+
+>>> from StringIO import StringIO
+>>> import urllib2, poster.streaminghttp
+
+>>> opener = poster.streaminghttp.register_openers()
+
+>>> s = "Test file data"
+>>> f = StringIO(s)
+
+>>> req = urllib2.Request("http://localhost:5000", f,
+...                       {'Content-Length': str(len(s))})
+"""
+
+import httplib, urllib2, socket
+from httplib import NotConnected
+
+__all__ = ['StreamingHTTPConnection', 'StreamingHTTPRedirectHandler',
+        'StreamingHTTPHandler', 'register_openers']
+
+if hasattr(httplib, 'HTTPS'):
+    __all__.extend(['StreamingHTTPSHandler', 'StreamingHTTPSConnection'])
+
+class _StreamingHTTPMixin:
+    """Mixin class for HTTP and HTTPS connections that implements a streaming
+    send method."""
+    def send(self, value):
+        """Send ``value`` to the server.
+
+        ``value`` can be a string object, a file-like object that supports
+        a .read() method, or an iterable object that supports a .next()
+        method.
+        """
+        # Based on python 2.6's httplib.HTTPConnection.send()
+        if self.sock is None:
+            if self.auto_open:
+                self.connect()
+            else:
+                raise NotConnected()
+
+        # send the data to the server. if we get a broken pipe, then close
+        # the socket. we want to reconnect when somebody tries to send again.
+        #
+        # NOTE: we DO propagate the error, though, because we cannot simply
+        #       ignore the error... the caller will know if they can retry.
+        if self.debuglevel > 0:
+            print "send:", repr(value)
+        try:
+            blocksize = 8192
+            if hasattr(value, 'read') :
+                if hasattr(value, 'seek'):
+                    value.seek(0)
+                if self.debuglevel > 0:
+                    print "sendIng a read()able"
+                data = value.read(blocksize)
+                while data:
+                    self.sock.sendall(data)
+                    data = value.read(blocksize)
+            elif hasattr(value, 'next'):
+                if hasattr(value, 'reset'):
+                    value.reset()
+                if self.debuglevel > 0:
+                    print "sendIng an iterable"
+                for data in value:
+                    self.sock.sendall(data)
+            else:
+                self.sock.sendall(value)
+        except socket.error, v:
+            if v[0] == 32:      # Broken pipe
+                self.close()
+            raise
+
+class StreamingHTTPConnection(_StreamingHTTPMixin, httplib.HTTPConnection):
+    """Subclass of `httplib.HTTPConnection` that overrides the `send()` method
+    to support iterable body objects"""
+
+class StreamingHTTPRedirectHandler(urllib2.HTTPRedirectHandler):
+    """Subclass of `urllib2.HTTPRedirectHandler` that overrides the
+    `redirect_request` method to properly handle redirected POST requests
+
+    This class is required because python 2.5's HTTPRedirectHandler does
+    not remove the Content-Type or Content-Length headers when requesting
+    the new resource, but the body of the original request is not preserved.
+    """
+
+    handler_order = urllib2.HTTPRedirectHandler.handler_order - 1
+
+    # From python2.6 urllib2's HTTPRedirectHandler
+    def redirect_request(self, req, fp, code, msg, headers, newurl):
+        """Return a Request or None in response to a redirect.
+
+        This is called by the http_error_30x methods when a
+        redirection response is received.  If a redirection should
+        take place, return a new Request to allow http_error_30x to
+        perform the redirect.  Otherwise, raise HTTPError if no-one
+        else should try to handle this url.  Return None if you can't
+        but another Handler might.
+        """
+        m = req.get_method()
+        if (code in (301, 302, 303, 307) and m in ("GET", "HEAD")
+            or code in (301, 302, 303) and m == "POST"):
+            # Strictly (according to RFC 2616), 301 or 302 in response
+            # to a POST MUST NOT cause a redirection without confirmation
+            # from the user (of urllib2, in this case).  In practice,
+            # essentially all clients do redirect in this case, so we
+            # do the same.
+            # be conciliant with URIs containing a space
+            newurl = newurl.replace(' ', '%20')
+            newheaders = dict((k, v) for k, v in req.headers.items()
+                              if k.lower() not in (
+                                  "content-length", "content-type")
+                             )
+            return urllib2.Request(newurl,
+                           headers=newheaders,
+                           origin_req_host=req.get_origin_req_host(),
+                           unverifiable=True)
+        else:
+            raise urllib2.HTTPError(req.get_full_url(), code, msg, headers, fp)
+
+class StreamingHTTPHandler(urllib2.HTTPHandler):
+    """Subclass of `urllib2.HTTPHandler` that uses
+    StreamingHTTPConnection as its http connection class."""
+
+    handler_order = urllib2.HTTPHandler.handler_order - 1
+
+    def http_open(self, req):
+        """Open a StreamingHTTPConnection for the given request"""
+        return self.do_open(StreamingHTTPConnection, req)
+
+    def http_request(self, req):
+        """Handle a HTTP request.  Make sure that Content-Length is specified
+        if we're using an interable value"""
+        # Make sure that if we're using an iterable object as the request
+        # body, that we've also specified Content-Length
+        if req.has_data():
+            data = req.get_data()
+            if hasattr(data, 'read') or hasattr(data, 'next'):
+                if not req.has_header('Content-length'):
+                    raise ValueError(
+                            "No Content-Length specified for iterable body")
+        return urllib2.HTTPHandler.do_request_(self, req)
+
+if hasattr(httplib, 'HTTPS'):
+    class StreamingHTTPSConnection(_StreamingHTTPMixin,
+            httplib.HTTPSConnection):
+        """Subclass of `httplib.HTTSConnection` that overrides the `send()`
+        method to support iterable body objects"""
+
+    class StreamingHTTPSHandler(urllib2.HTTPSHandler):
+        """Subclass of `urllib2.HTTPSHandler` that uses
+        StreamingHTTPSConnection as its http connection class."""
+
+        handler_order = urllib2.HTTPSHandler.handler_order - 1
+
+        def https_open(self, req):
+            return self.do_open(StreamingHTTPSConnection, req)
+
+        def https_request(self, req):
+            # Make sure that if we're using an iterable object as the request
+            # body, that we've also specified Content-Length
+            if req.has_data():
+                data = req.get_data()
+                if hasattr(data, 'read') or hasattr(data, 'next'):
+                    if not req.has_header('Content-length'):
+                        raise ValueError(
+                                "No Content-Length specified for iterable body")
+            return urllib2.HTTPSHandler.do_request_(self, req)
+
+
+def get_handlers():
+    handlers = [StreamingHTTPHandler, StreamingHTTPRedirectHandler]
+    if hasattr(httplib, "HTTPS"):
+        handlers.append(StreamingHTTPSHandler)
+    return handlers
+    
+def register_openers():
+    """Register the streaming http handlers in the global urllib2 default
+    opener object.
+
+    Returns the created OpenerDirector object."""
+    opener = urllib2.build_opener(*get_handlers())
+
+    urllib2.install_opener(opener)
+
+    return opener


diff -r 4b18e35630ad9ee88f77574bbbc541b123c5dd70 -r 8b97e3dfb122b2d702f89dbc0865272221542b74 yt/utilities/setup.py
--- a/yt/utilities/setup.py
+++ b/yt/utilities/setup.py
@@ -162,6 +162,7 @@
     # https://bugzilla.redhat.com/show_bug.cgi?id=494579 ) we simply disable
     # support for setjmp.
     config.add_subpackage("amr_kdtree")
+    config.add_subpackage("poster")
     config.add_subpackage("answer_testing")
     config.add_subpackage("delaunay")  # From SciPy, written by Robert Kern
     config.add_subpackage("kdtree")


diff -r 4b18e35630ad9ee88f77574bbbc541b123c5dd70 -r 8b97e3dfb122b2d702f89dbc0865272221542b74 yt/visualization/eps_writer.py
--- a/yt/visualization/eps_writer.py
+++ b/yt/visualization/eps_writer.py
@@ -475,7 +475,7 @@
 
         cmap_im = pyx.bitmap.image(imsize[0], imsize[1], "RGB", cm_string)
         if orientation == "top" or orientation == "bottom":
-            imorigin = (imorigin[0] - shift[0], imorigin[1] - shift[1])
+            imorigin = (imorigin[0] - shift[0], imorigin[1] + shift[1])
             self.canvas.insert(pyx.bitmap.bitmap(imorigin[0], imorigin[1], cmap_im,
                                                  width=-size[0], height=size[1]))
         else:


diff -r 4b18e35630ad9ee88f77574bbbc541b123c5dd70 -r 8b97e3dfb122b2d702f89dbc0865272221542b74 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -680,7 +680,7 @@
         self.use_kd = isinstance(volume, AMRKDTree)
         self.volume = volume
 
-    def snapshot(self, fn = None):
+    def snapshot(self, fn = None, clim = None):
         nv = 12*self.nside**2
         image = na.zeros((nv,1,3), dtype='float64', order='C')
         vs = arr_pix2vec_nest(self.nside, na.arange(nv))
@@ -718,6 +718,7 @@
             implot = ax.imshow(img, extent=(-pi,pi,-pi/2,pi/2), clip_on=False, aspect=0.5)
             cb = fig.colorbar(implot, orientation='horizontal')
             cb.set_label(r"$\mathrm{log}\/\mathrm{Column}\/\mathrm{Density}\/[\mathrm{g}/\mathrm{cm}^2]$")
+            if clim is not None: cb.set_clim(*clim)
             ax.xaxis.set_ticks(())
             ax.yaxis.set_ticks(())
             canvas = matplotlib.backends.backend_agg.FigureCanvasAgg(fig)
@@ -1269,7 +1270,8 @@
         return R
 
 def off_axis_projection(pf, center, normal_vector, width, resolution,
-                        field, weight = None, volume = None, no_ghost = True):
+                        field, weight = None, volume = None, no_ghost = True,
+                        north_vector = None):
     r"""Project through a parameter file, off-axis, and return the image plane.
 
     This function will accept the necessary items to integrate through a volume
@@ -1336,7 +1338,8 @@
     cam = pf.h.camera(center, normal_vector, width, resolution, tf,
                       fields = fields,
                       log_fields = [False] * len(fields),
-                      volume = volume, no_ghost = no_ghost)
+                      volume = volume, no_ghost = no_ghost,
+                      north_vector = north_vector)
     vals = cam.snapshot()
     image = vals[:,:,0]
     if weight is None:


diff -r 4b18e35630ad9ee88f77574bbbc541b123c5dd70 -r 8b97e3dfb122b2d702f89dbc0865272221542b74 yt/visualization/volume_rendering/image_handling.py
--- a/yt/visualization/volume_rendering/image_handling.py
+++ b/yt/visualization/volume_rendering/image_handling.py
@@ -26,7 +26,6 @@
 try: import pyfits
 except: pass
 import numpy as na
-import matplotlib; from matplotlib import pylab
 
 from yt.funcs import *
 
@@ -82,6 +81,8 @@
     elements.  Optionally, *label*, *label_color* and *label_size* may be
     specified.
     """
+    import matplotlib
+    import pylab
     Nvec = image.shape[0]
     image[na.isnan(image)] = 0.0
     ma = image[image>0.0].max()
@@ -110,6 +111,7 @@
     with "_rgb.png."  *label*, *label_color* and *label_size* may also be
     specified.
     """
+    import pylab
     Nvec = image.shape[0]
     image[na.isnan(image)] = 0.0
     if image.shape[2] >= 4:



https://bitbucket.org/yt_analysis/yt/changeset/2bbd3f126279/
changeset:   2bbd3f126279
branch:      yt
user:        gsiisg
date:        2012-05-15 20:46:17
summary:     testing push
affected #:  1 file

diff -r 8b97e3dfb122b2d702f89dbc0865272221542b74 -r 2bbd3f126279501d3aa2d6acd308e80006078ca3 yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -459,7 +459,7 @@
     def _get_ellipsoid_parameters_basic(self):
         na.seterr(all='ignore')
         # check if there are 4 particles to form an ellipsoid
-        # neglecting to check if 4 particles in the same plane,
+        # neglecting to check if the 4 particles in the same plane,
         # that is almost certainly never to occur,
         # will deal with it later if it ever comes up
         if na.size(self["particle_position_x"]) < 4:



https://bitbucket.org/yt_analysis/yt/changeset/65db3370fd9b/
changeset:   65db3370fd9b
branch:      yt
user:        gsiisg
date:        2012-06-14 03:19:53
summary:     merge with yt
affected #:  81 files

diff -r 2bbd3f126279501d3aa2d6acd308e80006078ca3 -r 65db3370fd9b8f113d595fb295ad3182952f04ca MANIFEST.in
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,3 +1,3 @@
 include distribute_setup.py
-recursive-include yt/gui/reason/html/ *.html *.png *.ico *.js
-recursive-include yt/ *.pyx *.pxd *.hh *.h README* 
+recursive-include yt/gui/reason/html *.html *.png *.ico *.js
+recursive-include yt *.pyx *.pxd *.hh *.h README* 


diff -r 2bbd3f126279501d3aa2d6acd308e80006078ca3 -r 65db3370fd9b8f113d595fb295ad3182952f04ca doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -42,7 +42,7 @@
 INST_SQLITE3=1  # Install a local version of SQLite3?
 INST_PYX=0      # Install PyX?  Sometimes PyX can be problematic without a
                 # working TeX installation.
-INST_0MQ=1      # Install 0mq (for IPython) and affiliated bindings?
+INST_0MQ=0      # Install 0mq (for IPython) and affiliated bindings?
 
 # If you've got YT some other place, set this to point to it.
 YT_DIR=""
@@ -353,7 +353,7 @@
 
 # Now we dump all our SHA512 files out.
 
-echo '8da1b0af98203254a1cf776d73d09433f15b5090871f9fd6d712cea32bcd44446b7323ae1069b28907d2728e77944a642825c61bc3b54ceb46c91897cc4f6051  Cython-0.15.1.tar.gz' > Cython-0.15.1.tar.gz.sha512
+echo '2c1933ab31246b4f4eba049d3288156e0a72f1730604e3ed7357849967cdd329e4647cf236c9442ecfb06d0aff03e6fc892a7ba2a5c1cf5c011b7ab9c619acec  Cython-0.16.tar.gz' > Cython-0.16.tar.gz.sha512
 echo 'b8a12bf05b3aafa71135e47da81440fd0f16a4bd91954bc5615ad3d3b7f9df7d5a7d5620dc61088dc6b04952c5c66ebda947a4cfa33ed1be614c8ca8c0f11dff  PhiloGL-1.4.2.zip' > PhiloGL-1.4.2.zip.sha512
 echo '44eea803870a66ff0bab08d13a8b3388b5578ebc1c807d1d9dca0a93e6371e91b15d02917a00b3b20dc67abb5a21dabaf9b6e9257a561f85eeff2147ac73b478  PyX-0.11.1.tar.gz' > PyX-0.11.1.tar.gz.sha512
 echo '1a754d560bfa433f0960ab3b5a62edb5f291be98ec48cf4e5941fa5b84139e200b87a52efbbd6fa4a76d6feeff12439eed3e7a84db4421940d1bbb576f7a684e  Python-2.7.2.tgz' > Python-2.7.2.tgz.sha512
@@ -366,7 +366,7 @@
 echo 'ffc5c9e0c8c8ea66479abd467e442419bd1c867e6dbd180be6a032869467955dc570cfdf1388452871303a440738f302d3227ab7728878c4a114cfc45d29d23c  ipython-0.12.tar.gz' > ipython-0.12.tar.gz.sha512
 echo 'e748b66a379ee1e7963b045c3737670acf6aeeff1ebed679f427e74b642faa77404c2d5bbddb922339f009c229d0af1ae77cc43eab290e50af6157a6406d833f  libpng-1.2.43.tar.gz' > libpng-1.2.43.tar.gz.sha512
 echo 'f5ab95c29ef6958096970265a6079f0eb8c43a500924346c4a6c6eb89d9110eeeb6c34a53715e71240e82ded2b76a7b8d5a9b05a07baa000b2926718264ad8ff  matplotlib-1.1.0.tar.gz' > matplotlib-1.1.0.tar.gz.sha512
-echo '78715bb2bd7ed3291089948530a59d5eff146a64179eae87904a2c328716f26749abb0c5417d6001cadfeebabb4e24985d5a59ceaae4d98c4762163970f83975  mercurial-2.0.tar.gz' > mercurial-2.0.tar.gz.sha512
+echo '702f67c48e4dbe191dbe5ca0df6b5a84fa4f5c424cf1fae60b5053dfe6532531330738c7aa3012d900d49efdd743cd1ebc238bb15f354f67228e2a2c95b98a89  mercurial-2.2.tar.gz' > mercurial-2.2.tar.gz.sha512
 echo 'de3dd37f753614055dcfed910e9886e03688b8078492df3da94b1ec37be796030be93291cba09e8212fffd3e0a63b086902c3c25a996cf1439e15c5b16e014d9  numpy-1.6.1.tar.gz' > numpy-1.6.1.tar.gz.sha512
 echo '5ad681f99e75849a5ca6f439c7a19bb51abc73d121b50f4f8e4c0da42891950f30407f761a53f0fe51b370b1dbd4c4f5a480557cb2444c8c7c7d5412b328a474  sqlite-autoconf-3070500.tar.gz' > sqlite-autoconf-3070500.tar.gz.sha512
 echo 'edae735960279d92acf58e1f4095c6392a7c2059b8f1d2c46648fc608a0fb06b392db2d073f4973f5762c034ea66596e769b95b3d26ad963a086b9b2d09825f2  zlib-1.2.3.tar.bz2' > zlib-1.2.3.tar.bz2.sha512
@@ -388,10 +388,10 @@
 get_enzotools Python-2.7.2.tgz
 get_enzotools numpy-1.6.1.tar.gz
 get_enzotools matplotlib-1.1.0.tar.gz
-get_enzotools mercurial-2.0.tar.gz
+get_enzotools mercurial-2.2.tar.gz
 get_enzotools ipython-0.12.tar.gz
 get_enzotools h5py-2.0.1.tar.gz
-get_enzotools Cython-0.15.1.tar.gz
+get_enzotools Cython-0.16.tar.gz
 get_enzotools ext-3.3.2.zip
 get_enzotools ext-slate-110328.zip
 get_enzotools PhiloGL-1.4.2.zip
@@ -531,7 +531,7 @@
 if [ $INST_HG -eq 1 ]
 then
     echo "Installing Mercurial."
-    do_setup_py mercurial-2.0
+    do_setup_py mercurial-2.2
     export HG_EXEC=${DEST_DIR}/bin/hg
 else
     # We assume that hg can be found in the path.
@@ -631,7 +631,7 @@
 
 do_setup_py ipython-0.12
 do_setup_py h5py-2.0.1
-do_setup_py Cython-0.15.1
+do_setup_py Cython-0.16
 [ $INST_PYX -eq 1 ] && do_setup_py PyX-0.11.1
 
 echo "Doing yt update, wiping local changes and updating to branch ${BRANCH}"


diff -r 2bbd3f126279501d3aa2d6acd308e80006078ca3 -r 65db3370fd9b8f113d595fb295ad3182952f04ca scripts/iyt
--- a/scripts/iyt
+++ b/scripts/iyt
@@ -1,5 +1,6 @@
 #!python
 import os, re
+from distutils import version
 from yt.mods import *
 from yt.data_objects.data_containers import AMRData
 namespace = locals().copy()
@@ -22,6 +23,11 @@
     code.interact(doc, None, namespace)
     sys.exit()
 
+if version.LooseVersion(IPython.__version__) <= version.LooseVersion('0.10'):
+    api_version = '0.10'
+else:
+    api_version = '0.11'
+
 if IPython.__version__.startswith("0.10"):
     api_version = '0.10'
 elif IPython.__version__.startswith("0.11") or \


diff -r 2bbd3f126279501d3aa2d6acd308e80006078ca3 -r 65db3370fd9b8f113d595fb295ad3182952f04ca yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -544,6 +544,91 @@
         return (mag_A, mag_B, mag_C, e0_vector[0], e0_vector[1],
             e0_vector[2], tilt)
 
+class RockstarHalo(Halo):
+    def __init__(self,halo_list,index,ID, DescID, Mvir, Vmax, Vrms, Rvir, Rs, Np, 
+                  X, Y, Z, VX, VY, VZ, JX, JY, JZ, Spin):
+        """Implement the properties reported by Rockstar: ID, Descendant ID,
+           Mvir, Vmax, Vrms, Rvir, Rs, Np, XYZ, VXYZ, JXYZ, and spin.
+           Most defaults are removed since we don't read in which halos
+           particles belong to. 
+        """
+        #we can still use get_sphere!
+        self.ID = ID #from rockstar
+        self.id = index #index in the halo list
+        self.pf = halo_list.pf
+
+        self.DescID = DescID
+        self.Mvir = Mvir
+        self.Vmax = Vmax
+        self.Vrms = Vrms
+        self.Rvir = Rvir
+        self.Rs   = Rs
+        self.Np   = Np
+        self.X    = X
+        self.Y    = Y
+        self.Z    = Z
+        self.VX   = VX
+        self.VY   = VY
+        self.VZ   = VZ
+        self.JX   = JX
+        self.JY   = JY
+        self.JZ   = JZ
+        self.Spin = Spin
+
+        #Halo.__init__(self,halo_list,index,
+        self.size=Np 
+        self.CoM=na.array([X,Y,Z])
+        self.max_dens_point=-1
+        self.group_total_mass=-1
+        self.max_radius=Rvir
+        self.bulk_vel=na.array([VX,VY,VZ])*1e5
+        self.rms_vel=-1
+        self.group_total_mass = -1 #not implemented 
+    
+    def maximum_density(self):
+        r"""Not implemented."""
+        return -1
+
+    def maximum_density_location(self):
+        r"""Not implemented."""
+        return self.center_of_mass()
+
+    def total_mass(self):
+        r"""Not implemented."""
+        return -1
+
+    def get_size(self):
+        r"""Return the number of particles belonging to the halo."""
+        return self.Np
+
+    def write_particle_list(self,handle):
+        r"""Not implemented."""
+        return -1
+
+    def virial_mass(self):
+        r"""Virial mass in Msun/h"""
+        return self.Mvir
+
+    def virial_radius(self):
+        r"""Virial radius in Mpc/h comoving"""
+        return self.Rvir
+
+    def virial_bin(self):
+        r"""Not implemented"""
+        return -1
+
+    def virial_density(self):
+        r"""Not implemented """
+        return -1
+
+    def virial_info(self):
+        r"""Not implemented"""
+        return -1 
+
+    def __getitem__(self,key):
+        r"""Not implemented"""
+        return None
+
 
     def get_ellipsoid_parameters(self):
         r"""Calculate the parameters that describe the ellipsoid of
@@ -1184,6 +1269,97 @@
             f.flush()
         f.close()
 
+class RockstarHaloList(HaloList):
+    #because we don't yet no halo-particle affiliations
+    #most of the halo list methods are not implemented
+    #furthermore, Rockstar only accepts DM particles of
+    #a fixed mass, so we don't allow stars at all
+    #Still, we inherit from HaloList because in the future
+    #we might implement halo-particle affiliations
+    def __init__(self,pf,out_list):
+        mylog.info("Initializing Rockstar List")
+        self._data_source = None
+        self._groups = []
+        self._max_dens = -1
+        self.pf = pf
+        self.out_list = out_list
+        mylog.info("Parsing Rockstar halo list")
+        self._parse_output(out_list)
+        mylog.info("Finished %s"%out_list)
+
+    def _run_finder(self):
+        pass
+
+    def __obtain_particles(self):
+        pass
+
+    def _get_dm_indices(self):
+        pass
+
+    def _parse_output(self,out_list=None):
+        """
+        Read the out_*.list text file produced
+        by Rockstar into memory."""
+        
+        pf = self.pf
+
+        if out_list is None:
+            out_list = self.out_list
+
+        lines = open(out_list).readlines()
+        names = []
+        formats = []
+        
+        #find the variables names from the first defining line
+        names = lines[0].replace('#','').split(' ')
+        for j,line in enumerate(lines):
+            if not line.startswith('#'): break
+
+        #find out the table datatypes but evaluating the first data line
+        splits = filter(lambda x: len(x.strip()) > 0 ,line.split(' '))
+        for num in splits:
+            if 'nan' not in num:
+                formats += na.array(eval(num)).dtype,
+            else:
+                formats += na.dtype('float'),
+        assert len(formats) == len(names)
+
+        #Jc = 1.98892e33/pf['mpchcm']*1e5
+        Jc = 1.0
+        conv = dict(X=1.0/pf['mpchcm'],
+                    Y=1.0/pf['mpchcm'],
+                    Z=1.0/pf['mpchcm'], #to unitary
+                    VX=1e0,VY=1e0,VZ=1e0, #to km/s
+                    Mvir=1.0, #Msun/h
+                    Vmax=1e0,Vrms=1e0,
+                    Rvir=1.0/pf['kpchcm'],
+                    Rs=1.0/pf['kpchcm'],
+                    JX=Jc,JY=Jc,JZ=Jc)
+        dtype = {'names':names,'formats':formats}
+        halo_table = na.loadtxt(out_list,skiprows=j-1,dtype=dtype,comments='#')            
+        #convert position units  
+        for name in names:
+            halo_table[name]=halo_table[name]*conv.get(name,1)
+        
+        for k,row in enumerate(halo_table):
+            args = tuple([val for val in row])
+            halo = RockstarHalo(self,k,*args)
+            self._groups.append(halo)
+    
+
+    #len is ok
+    #iter is OK
+    #getitem is ok
+    #nn is ok I think
+    #nn2d is ok I think
+
+    def write_out(self):
+        pass
+    def write_particle_list(self):
+        pass
+    
+
+    
 
 class HOPHaloList(HaloList):
 
@@ -1558,7 +1734,7 @@
         while index < self.group_count:
             self._groups[index] = self._halo_class(self, index, \
                 size=self.group_sizes[index], CoM=self.CoM[index], \
-                max_dens_point=self.max_dens_point[i], \
+                max_dens_point=self.max_dens_point[index], \
                 group_total_mass=self.Tot_M[index],
                 max_radius=self.max_radius[index],
                 bulk_vel=self.bulk_vel[index], tasks=self.halo_taskmap[index],


diff -r 2bbd3f126279501d3aa2d6acd308e80006078ca3 -r 65db3370fd9b8f113d595fb295ad3182952f04ca yt/analysis_modules/halo_finding/rockstar/rockstar.py
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar.py
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar.py
@@ -25,9 +25,11 @@
 
 from yt.mods import *
 from os import environ
+from os import mkdir
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     ParallelAnalysisInterface, ProcessorPool, Communicator
 
+from yt.analysis_modules.halo_finding.halo_objects import * #Halos & HaloLists
 import rockstar_interface
 import socket
 import time
@@ -45,14 +47,28 @@
         return data_source
 
 class RockstarHaloFinder(ParallelAnalysisInterface):
-    def __init__(self, pf, num_readers = 0, num_writers = 0):
+    def __init__(self, pf, num_readers = 1, num_writers = None, 
+            outbase=None,particle_mass=-1.0,overwrite=False,
+            left_edge = None, right_edge = None):
         ParallelAnalysisInterface.__init__(self)
         # No subvolume support
         self.pf = pf
         self.hierarchy = pf.h
+        if num_writers is None:
+            num_writers = self.comm.size - num_readers -1
         self.num_readers = num_readers
         self.num_writers = num_writers
+        self.particle_mass = particle_mass 
+        self.overwrite = overwrite
+        if left_edge is None:
+            left_edge = pf.domain_left_edge
+        if right_edge is None:
+            right_edge = pf.domain_right_edge
+        self.le = left_edge
+        self.re = right_edge
         if self.num_readers + self.num_writers + 1 != self.comm.size:
+            print '%i reader + %i writers != %i mpi'%\
+                    (self.num_reader,self.num_writers,self.comm.size)
             raise RuntimeError
         self.center = (pf.domain_right_edge + pf.domain_left_edge)/2.0
         data_source = None
@@ -64,6 +80,9 @@
             for wg in self.pool.workgroups:
                 if self.comm.rank in wg.ranks: self.workgroup = wg
         data_source = self.pf.h.all_data()
+        if outbase is None:
+            outbase = str(self.pf)+'_rockstar'
+        self.outbase = outbase        
         self.handler = rockstar_interface.RockstarInterface(
                 self.pf, data_source)
 
@@ -80,16 +99,29 @@
             (server_address, port))
         self.port = str(self.port)
 
-    def run(self, block_ratio = 1):
+    def run(self, block_ratio = 1,**kwargs):
+        """
+        
+        """
         if block_ratio != 1:
             raise NotImplementedError
         self._get_hosts()
+        #because rockstar *always* write to exactly the same
+        #out_0.list filename we make a directory for it
+        #to sit inside so it doesn't get accidentally
+        #overwritten 
+        if self.workgroup.name == "server":
+            if not os.path.exists(self.outbase):
+                os.mkdir(self.outbase)
         self.handler.setup_rockstar(self.server_address, self.port,
                     parallel = self.comm.size > 1,
                     num_readers = self.num_readers,
                     num_writers = self.num_writers,
                     writing_port = -1,
-                    block_ratio = block_ratio)
+                    block_ratio = block_ratio,
+                    outbase = self.outbase,
+                    particle_mass = float(self.particle_mass),
+                    **kwargs)
         if self.comm.size == 1:
             self.handler.call_rockstar()
         else:
@@ -97,9 +129,17 @@
             if self.workgroup.name == "server":
                 self.handler.start_server()
             elif self.workgroup.name == "readers":
-                #time.sleep(0.5 + self.workgroup.comm.rank/10.0)
+                time.sleep(0.1 + self.workgroup.comm.rank/10.0)
                 self.handler.start_client()
             elif self.workgroup.name == "writers":
-                #time.sleep(1.0 + self.workgroup.comm.rank/10.0)
+                time.sleep(0.2 + self.workgroup.comm.rank/10.0)
                 self.handler.start_client()
         self.comm.barrier()
+        #quickly rename the out_0.list 
+    
+    def halo_list(self,file_name='out_0.list'):
+        """
+        Reads in the out_0.list file and generates RockstarHaloList
+        and RockstarHalo objects.
+        """
+        return RockstarHaloList(self.pf,self.outbase+'/%s'%file_name)


diff -r 2bbd3f126279501d3aa2d6acd308e80006078ca3 -r 65db3370fd9b8f113d595fb295ad3182952f04ca yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
@@ -241,7 +241,7 @@
 cdef RockstarInterface rh
 cdef void rh_read_particles(char *filename, particle **p, np.int64_t *num_p):
     cdef int i, fi, npart, tnpart
-    cdef np.float64_t conv[6], left_edge[6]
+    cdef np.float64_t conv[6], left_edge[6], right_edge[3]
     dd = rh.data_source
     cdef np.ndarray[np.int64_t, ndim=1] arri
     cdef np.ndarray[np.float64_t, ndim=1] arr
@@ -257,9 +257,12 @@
     #print "Loading indices: size = ", tnpart
     conv[0] = conv[1] = conv[2] = rh.pf["mpchcm"]
     conv[3] = conv[4] = conv[5] = 1e-5
-    left_edge[0] = rh.pf.domain_left_edge[0]
-    left_edge[1] = rh.pf.domain_left_edge[1]
-    left_edge[2] = rh.pf.domain_left_edge[2]
+    left_edge[0] = rh.le[0]
+    left_edge[1] = rh.le[1]
+    left_edge[2] = rh.le[2]
+    right_edge[0] = rh.re[0]
+    right_edge[1] = rh.re[1]
+    right_edge[2] = rh.re[2]
     left_edge[3] = left_edge[4] = left_edge[5] = 0.0
     pi = 0
     for g in grids:
@@ -274,11 +277,15 @@
                       "particle_velocity_z"]:
             arr = dd._get_data_from_grid(g, field).astype("float64")
             for i in range(npart):
+                if fi<3: 
+                    if  left_edge[i] > arr[i]: continue
+                    if right_edge[i] < arr[i]: continue
                 p[0][i+pi].pos[fi] = (arr[i]-left_edge[fi])*conv[fi]
             fi += 1
         pi += npart
     num_p[0] = tnpart
-    print "TOTAL", block, pi, tnpart, len(grids)
+    print "Block #%i | Particles %i | Grids %i"%\
+            ( block, pi, len(grids))
 
 cdef class RockstarInterface:
 
@@ -296,12 +303,14 @@
                        np.float64_t particle_mass = -1.0,
                        int parallel = False, int num_readers = 1,
                        int num_writers = 1,
-                       int writing_port = -1, int block_ratio = 1):
+                       int writing_port = -1, int block_ratio = 1,
+                       int periodic = 1, int min_halo_size = 20,
+                       char *outbase = 'None'):
         global PARALLEL_IO, PARALLEL_IO_SERVER_ADDRESS, PARALLEL_IO_SERVER_PORT
         global FILENAME, FILE_FORMAT, NUM_SNAPS, STARTING_SNAP, h0, Ol, Om
         global BOX_SIZE, PERIODIC, PARTICLE_MASS, NUM_BLOCKS, NUM_READERS
         global FORK_READERS_FROM_WRITERS, PARALLEL_IO_WRITER_PORT, NUM_WRITERS
-        global rh
+        global rh, SCALE_NOW, OUTBASE, MIN_HALO_OUTPUT_SIZE
         if parallel:
             PARALLEL_IO = 1
             PARALLEL_IO_SERVER_ADDRESS = server_address
@@ -324,12 +333,18 @@
         h0 = self.pf.hubble_constant
         Ol = self.pf.omega_lambda
         Om = self.pf.omega_matter
+        SCALE_NOW = 1.0/(self.pf.current_redshift+1.0)
+        if not outbase =='None'.decode('UTF-8'):
+            #output directory. since we can't change the output filenames
+            #workaround is to make a new directory
+            print 'using %s as outbase'%outbase
+            OUTBASE = outbase 
 
         if particle_mass < 0:
             print "Assuming single-mass particle."
             particle_mass = self.pf.h.grids[0]["ParticleMassMsun"][0] / h0
         PARTICLE_MASS = particle_mass
-        PERIODIC = 1
+        PERIODIC = periodic
         BOX_SIZE = (self.pf.domain_right_edge[0] -
                     self.pf.domain_left_edge[0]) * self.pf['mpchcm']
         setup_config()


diff -r 2bbd3f126279501d3aa2d6acd308e80006078ca3 -r 65db3370fd9b8f113d595fb295ad3182952f04ca yt/analysis_modules/halo_profiler/api.py
--- a/yt/analysis_modules/halo_profiler/api.py
+++ b/yt/analysis_modules/halo_profiler/api.py
@@ -34,5 +34,4 @@
 from .multi_halo_profiler import \
     HaloProfiler, \
     FakeProfile, \
-    get_halo_sphere, \
     standard_fields


diff -r 2bbd3f126279501d3aa2d6acd308e80006078ca3 -r 65db3370fd9b8f113d595fb295ad3182952f04ca yt/analysis_modules/halo_profiler/centering_methods.py
--- a/yt/analysis_modules/halo_profiler/centering_methods.py
+++ b/yt/analysis_modules/halo_profiler/centering_methods.py
@@ -43,14 +43,14 @@
 @add_function("Min_Dark_Matter_Density")
 def find_minimum_dm_density(data):
     ma, maxi, mx, my, mz, mg = data.quantities['MinLocation']('Dark_Matter_Density',
-                                                              lazy_reader=False,
+                                                              lazy_reader=True,
                                                               preload=False)
     return (mx, my, mz)
 
 @add_function("Max_Dark_Matter_Density")
 def find_maximum_dm_density(data):
     ma, maxi, mx, my, mz, mg = data.quantities['MaxLocation']('Dark_Matter_Density',
-                                                              lazy_reader=False,
+                                                              lazy_reader=True,
                                                               preload=False)
     return (mx, my, mz)
 
@@ -58,7 +58,7 @@
 def find_CoM_dm_density(data):
    dc_x, dc_y, dc_z = data.quantities['CenterOfMass'](use_cells=False, 
                                                       use_particles=True,
-                                                      lazy_reader=False,
+                                                      lazy_reader=True,
                                                       preload=False)
    return (dc_x, dc_y, dc_z)
 
@@ -67,14 +67,14 @@
 @add_function("Min_Gas_Density")
 def find_minimum_gas_density(data):
     ma, maxi, mx, my, mz, mg = data.quantities['MinLocation']('Density',
-                                                              lazy_reader=False,
+                                                              lazy_reader=True,
                                                               preload=False)
     return (mx, my, mz)
 
 @add_function("Max_Gas_Density")
 def find_maximum_gas_density(data):
     ma, maxi, mx, my, mz, mg = data.quantities['MaxLocation']('Density',
-                                                              lazy_reader=False,
+                                                              lazy_reader=True,
                                                               preload=False)
     return (mx, my, mz)
 
@@ -82,7 +82,7 @@
 def find_CoM_gas_density(data):
    dc_x, dc_y, dc_z = data.quantities['CenterOfMass'](use_cells=True, 
                                                       use_particles=False,
-                                                      lazy_reader=False,
+                                                      lazy_reader=True,
                                                       preload=False)
    return (dc_x, dc_y, dc_z)
 
@@ -91,14 +91,14 @@
 @add_function("Min_Total_Density")
 def find_minimum_total_density(data):
     ma, maxi, mx, my, mz, mg = data.quantities['MinLocation']('Matter_Density',
-                                                              lazy_reader=False,
+                                                              lazy_reader=True,
                                                               preload=False)
     return (mx, my, mz)
 
 @add_function("Max_Total_Density")
 def find_maximum_total_density(data):
     ma, maxi, mx, my, mz, mg = data.quantities['MaxLocation']('Matter_Density',
-                                                              lazy_reader=False,
+                                                              lazy_reader=True,
                                                               preload=False)
     return (mx, my, mz)
 
@@ -106,7 +106,7 @@
 def find_CoM_total_density(data):
    dc_x, dc_y, dc_z = data.quantities['CenterOfMass'](use_cells=True, 
                                                       use_particles=True,
-                                                      lazy_reader=False,
+                                                      lazy_reader=True,
                                                       preload=False)
    return (dc_x, dc_y, dc_z)
 
@@ -115,14 +115,14 @@
 @add_function("Min_Temperature")
 def find_minimum_temperature(data):
     ma, mini, mx, my, mz, mg = data.quantities['MinLocation']('Temperature',
-                                                              lazy_reader=False,
+                                                              lazy_reader=True,
                                                               preload=False)
     return (mx, my, mz)
 
 @add_function("Max_Temperature")
 def find_maximum_temperature(data):
     ma, maxi, mx, my, mz, mg = data.quantities['MaxLocation']('Temperature',
-                                                              lazy_reader=False,
+                                                              lazy_reader=True,
                                                               preload=False)
     return (mx, my, mz)
 


diff -r 2bbd3f126279501d3aa2d6acd308e80006078ca3 -r 65db3370fd9b8f113d595fb295ad3182952f04ca yt/analysis_modules/halo_profiler/multi_halo_profiler.py
--- a/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
+++ b/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
@@ -42,6 +42,8 @@
     centering_registry
 from yt.data_objects.field_info_container import \
     add_field
+from yt.data_objects.static_output import \
+    StaticOutput
 
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     ParallelAnalysisInterface, \
@@ -64,7 +66,7 @@
                                          dm_only=False, resize=True, 
                                          fancy_padding=True, rearrange=True),
                  halo_radius=None, radius_units='1', n_profile_bins=50,
-                 recenter = None,
+                 recenter=None,
                  profile_output_dir='radial_profiles', projection_output_dir='projections',
                  projection_width=8.0, projection_width_units='mpc', project_at_level='max',
                  velocity_center=['bulk', 'halo'], filter_quantities=['id', 'center', 'r_max'], 
@@ -111,8 +113,32 @@
             Args given with call to halo finder function.  Default: None.
         halo_finder_kwargs : dict
             kwargs given with call to halo finder function. Default: None.
-        recenter : {string, function
-            The name of a function that recenters the halo for analysis.
+        recenter : {string, function}
+            The exact location of the sphere center can significantly affect 
+            radial profiles.  The halo center loaded by the HaloProfiler will 
+            typically be the dark matter center of mass calculated by a halo 
+            finder.  However, this may not be the best location for centering 
+            profiles of baryon quantities.  For example, one may want to center 
+            on the maximum density.
+            If recenter is given as a string, one of the existing recentering 
+            functions will be used:
+                Min_Dark_Matter_Density : location of minimum dark matter density
+                Max_Dark_Matter_Density : location of maximum dark matter density
+                CoM_Dark_Matter_Density : dark matter center of mass
+                Min_Gas_Density : location of minimum gas density
+                Max_Gas_Density : location of maximum gas density
+                CoM_Gas_Density : gas center of mass
+                Min_Total_Density : location of minimum total density
+                Max_Total_Density : location of maximum total density
+                CoM_Total_Density : total center of mass
+                Min_Temperature : location of minimum temperature
+                Max_Temperature : location of maximum temperature
+            Alternately, a function can be supplied for custom recentering.
+            The function should take only one argument, a sphere object.
+                Example function:
+                    def my_center_of_mass(data):
+                       my_x, my_y, my_z = data.quantities['CenterOfMass']()
+                       return (my_x, my_y, my_z)
             Default: None.
         halo_radius : float
             If no halo radii are provided in the halo list file, this
@@ -148,8 +174,7 @@
                 * ["bulk", "sphere"]: the bulk velocity of the sphere
                   centered on the halo center.
     	        * ["max", field]: the velocity of the cell that is the
-    	          location of the maximum of the field 
-                  specified (used only when halos set to single).
+    	          location of the maximum of the field specified.
         filter_quantities : array_like
             Quantities from the original halo list file to be written out in the 
             filtered list file.  Default: ['id','center'].
@@ -161,8 +186,8 @@
         
         Examples
         --------
-        >>> import yt.analysis_modules.halo_profiler.api as HP
-        >>> hp = HP.halo_profiler("DD0242/DD0242")
+        >>> from yt.analysis_modules.halo_profiler.api import *
+        >>> hp = HaloProfiler("RedshiftOutput0005/RD0005")
         
         """
         ParallelAnalysisInterface.__init__(self)
@@ -226,13 +251,9 @@
         # Option to recenter sphere someplace else.
         self.recenter = recenter
 
-        # Look for any field that might need to have the bulk velocity set.
+        # Flag for whether calculating halo bulk velocity is necessary.
         self._need_bulk_velocity = False
-        for field in [hp['field'] for hp in self.profile_fields]:
-            if 'Velocity' in field or 'Mach' in field:
-                self._need_bulk_velocity = True
-                break
-
+        
         # Check validity for VelocityCenter parameter which toggles how the 
         # velocity is zeroed out for radial velocity profiles.
         self.velocity_center = velocity_center[:]
@@ -250,15 +271,16 @@
                 mylog.error("Second value of VelocityCenter must be either 'halo' or 'sphere' if first value is 'bulk'.")
                 return None
         elif self.velocity_center[0] == 'max':
-            if self.halos is 'multiple':
-                mylog.error("Getting velocity center from a max field value only works with halos='single'.")
-                return None
+            mylog.info('Using position of max %s for velocity center.' % self.velocity_center[1])
         else:
             mylog.error("First value of parameter, VelocityCenter, must be either 'bulk' or 'max'.")
             return None
 
         # Create dataset object.
-        self.pf = load(self.dataset)
+        if isinstance(self.dataset, StaticOutput):
+            self.pf = self.dataset
+        else:
+            self.pf = load(self.dataset)
         self.pf.h
 
         # Figure out what max radius to use for profiling.
@@ -284,7 +306,7 @@
                 mylog.error("No halos loaded, there will be nothing to do.")
                 return None
         else:
-            mylog.error("I don't know whether to get halos from hop or from density maximum.  This should not have happened.")
+            mylog.error("Keyword, halos, must be either 'single' or 'multiple'.")
             return None
 
     def add_halo_filter(self, function, *args, **kwargs):
@@ -351,6 +373,10 @@
             
         """
 
+        # Check for any field that might need to have the bulk velocity set.
+        if 'Velocity' in field or 'Mach' in field:
+            self._need_bulk_velocity = True
+
         self.profile_fields.append({'field':field, 'weight_field':weight_field, 
                                     'accumulation':accumulation})
 
@@ -379,11 +405,15 @@
 
         """
 
+        # Check for any field that might need to have the bulk velocity set.
+        if 'Velocity' in field or 'Mach' in field:
+            self._need_bulk_velocity = True
+
         self.projection_fields.append({'field':field, 'weight_field':weight_field, 
                                        'cmap': cmap})
 
     @parallel_blocking_call
-    def make_profiles(self, filename=None, prefilters=None, **kwargs):
+    def make_profiles(self, filename=None, prefilters=None, njobs=-1):
         r"""Make radial profiles for all halos in the list.
         
         After all the calls to `add_profile`, this will trigger the actual
@@ -394,7 +424,7 @@
         filename : string
             If set, a file will be written with all of the filtered halos
             and the quantities returned by the filter functions.
-            Default=None.
+            Default: None.
         prefilters : array_like
             A single dataset can contain thousands or tens of thousands of
             halos. Significant time can be saved by not profiling halos
@@ -402,6 +432,11 @@
             Simple filters based on quantities provided in the initial
             halo list can be used to filter out unwanted halos using this
             parameter.
+            Default: None.
+        njobs : int
+            The number of jobs over which to split the profiling.  Set
+            to -1 so that each halo is done by a single processor.
+            Default: -1.
         
         Examples
         --------
@@ -454,7 +489,7 @@
 
         # Profile all halos.
         updated_halos = []
-        for halo in parallel_objects(self.all_halos, -1):
+        for halo in parallel_objects(self.all_halos, njobs=njobs):
             # Apply prefilters to avoid profiling unwanted halos.
             filter_result = True
             haloQuantities = {}
@@ -468,7 +503,8 @@
 
                 profile_filename = "%s/Halo_%04d_profile.dat" % (my_output_dir, halo['id'])
 
-                profiledHalo = self._get_halo_profile(halo, profile_filename, virial_filter=virial_filter)
+                profiledHalo = self._get_halo_profile(halo, profile_filename,
+                                                      virial_filter=virial_filter)
 
                 if profiledHalo is None:
                     continue
@@ -487,26 +523,26 @@
                 for quantity in self.filter_quantities:
                     if halo.has_key(quantity): haloQuantities[quantity] = halo[quantity]
 
-                self.filtered_halos.append(haloQuantities)
+                only_on_root(self.filtered_halos.append, haloQuantities)
 
             # If we've gotten this far down, this halo is good and we want
             # to keep it. But we need to communicate the recentering changes
             # to all processors (the root one in particular) without having
             # one task clobber the other.
-            updated_halos.append(halo)
-        
+            only_on_root(updated_halos.append, halo)
+
         # And here is where we bring it all together.
         updated_halos = self.comm.par_combine_object(updated_halos,
                             datatype="list", op="cat")
-        updated_halos.sort(key = lambda a:a['id'])
+        updated_halos.sort(key=lambda a:a['id'])
         self.all_halos = updated_halos
 
         self.filtered_halos = self.comm.par_combine_object(self.filtered_halos,
                             datatype="list", op="cat")
-        self.filtered_halos.sort(key = lambda a:a['id'])
+        self.filtered_halos.sort(key=lambda a:a['id'])
 
         if filename is not None:
-            self._write_filtered_halo_list(filename, **kwargs)
+            self._write_filtered_halo_list(filename)
 
     def _get_halo_profile(self, halo, filename, virial_filter=True,
             force_write=False):
@@ -529,31 +565,13 @@
                 return None
 
             # get a sphere object to profile
-            sphere = get_halo_sphere(halo, self.pf, recenter=self.recenter)
+            sphere = self._get_halo_sphere(halo)
             if sphere is None: return None
 
-            if self._need_bulk_velocity:
-                # Set bulk velocity to zero out radial velocity profiles.
-                if self.velocity_center[0] == 'bulk':
-                    if self.velocity_center[1] == 'halo':
-                        sphere.set_field_parameter('bulk_velocity', halo['velocity'])
-                    elif self.velocity_center[1] == 'sphere':
-                        sphere.set_field_parameter('bulk_velocity', 
-                                                   sphere.quantities['BulkVelocity'](lazy_reader=False, 
-                                                                                     preload=False))
-                    else:
-                        mylog.error("Invalid parameter: VelocityCenter.")
-                elif self.velocity_center[0] == 'max':
-                    max_grid, max_cell, max_value, max_location = \
-                        self.pf.h.find_max_cell_location(self.velocity_center[1])
-                    sphere.set_field_parameter('bulk_velocity', [max_grid['x-velocity'][max_cell],
-                                                                 max_grid['y-velocity'][max_cell],
-                                                                 max_grid['z-velocity'][max_cell]])
-
             try:
                 profile = BinnedProfile1D(sphere, self.n_profile_bins, "RadiusMpc",
                                                 r_min, halo['r_max'],
-                                                log_space=True, lazy_reader=False,
+                                                log_space=True, lazy_reader=True,
                                                 end_collect=True)
             except EmptyProfileData:
                 mylog.error("Caught EmptyProfileData exception, returning None for this halo.")
@@ -586,9 +604,75 @@
 
         return profile
 
+    def _get_halo_sphere(self, halo):
+        """
+        Returns a sphere object for a given halo, performs the recentering,
+        and calculates bulk velocities.
+        """
+
+        sphere = self.pf.h.sphere(halo['center'], halo['r_max']/self.pf.units['mpc'])
+        if len(sphere._grids) == 0: return None
+        new_sphere = False
+
+        if self.recenter:
+            old = halo['center']
+            if self.recenter in centering_registry:
+                new_x, new_y, new_z = \
+                    centering_registry[self.recenter](sphere)
+            else:
+                # user supplied function
+                new_x, new_y, new_z = self.recenter(sphere)
+            if new_x < self.pf.domain_left_edge[0] or \
+                    new_y < self.pf.domain_left_edge[1] or \
+                    new_z < self.pf.domain_left_edge[2]:
+                mylog.info("Recentering rejected, skipping halo %d" % \
+                    halo['id'])
+                return None
+            halo['center'] = [new_x, new_y, new_z]
+            d = self.pf['kpc'] * periodic_dist(old, halo['center'],
+                self.pf.domain_right_edge - self.pf.domain_left_edge)
+            mylog.info("Recentered halo %d %1.3e kpc away." % (halo['id'], d))
+            # Expand the halo to account for recentering. 
+            halo['r_max'] += d / 1000. # d is in kpc -> want mpc
+            new_sphere = True
+
+        if new_sphere:
+            # Temporary solution to memory leak.
+            for g in self.pf.h.grids:
+                g.clear_data()
+            sphere.clear_data()
+            del sphere
+            sphere = self.pf.h.sphere(halo['center'], halo['r_max']/self.pf.units['mpc'])
+
+        if self._need_bulk_velocity:
+            # Set bulk velocity to zero out radial velocity profiles.
+            if self.velocity_center[0] == 'bulk':
+                if self.velocity_center[1] == 'halo':
+                    sphere.set_field_parameter('bulk_velocity', halo['velocity'])
+                elif self.velocity_center[1] == 'sphere':
+                    mylog.info('Calculating sphere bulk velocity.')
+                    sphere.set_field_parameter('bulk_velocity', 
+                                               sphere.quantities['BulkVelocity'](lazy_reader=True, 
+                                                                                 preload=False))
+                else:
+                    mylog.error("Invalid parameter: velocity_center.")
+                    return None
+            elif self.velocity_center[0] == 'max':
+                mylog.info('Setting bulk velocity with value at max %s.' % self.velocity_center[1])
+                max_val, maxi, mx, my, mz, mg = sphere.quantities['MaxLocation'](self.velocity_center[1],
+                                                                                 lazy_reader=True)
+                max_grid = self.pf.h.grids[mg]
+                max_cell = na.unravel_index(maxi, max_grid.ActiveDimensions)
+                sphere.set_field_parameter('bulk_velocity', [max_grid['x-velocity'][max_cell],
+                                                             max_grid['y-velocity'][max_cell],
+                                                             max_grid['z-velocity'][max_cell]])
+            mylog.info('Bulk velocity set.')
+
+        return sphere
+
     @parallel_blocking_call
     def make_projections(self, axes=[0, 1, 2], halo_list='filtered',
-            save_images=False, save_cube=True):
+                         save_images=False, save_cube=True, njobs=-1):
         r"""Make projections of all halos using specified fields.
         
         After adding fields using `add_projection`, this starts the actual
@@ -608,6 +692,10 @@
         save_cube : bool
             Whether or not to save the HDF5 files of the halo projections.
             Default=True.
+        njobs : int
+            The number of jobs over which to split the projections.  Set
+            to -1 so that each halo is done by a single processor.
+            Default: -1.
         
         Examples
         --------
@@ -656,7 +744,7 @@
                          self.pf.parameters['DomainRightEdge'][w])
                   for w in range(self.pf.parameters['TopGridRank'])]
 
-        for halo in parallel_objects(halo_projection_list, -1):
+        for halo in parallel_objects(halo_projection_list, njobs=njobs):
             if halo is None:
                 continue
             # Check if region will overlap domain edge.
@@ -745,7 +833,7 @@
 
     @parallel_blocking_call
     def analyze_halo_spheres(self, analysis_function, halo_list='filtered',
-                             analysis_output_dir=None):
+                             analysis_output_dir=None, njobs=-1):
         r"""Perform custom analysis on all halos.
         
         This will loop through all halo on the HaloProfiler's list, 
@@ -768,6 +856,10 @@
         analysis_output_dir : string, optional
             If specified, this directory will be created within the dataset to 
             contain any output from the analysis function.  Default: None.
+        njobs : int
+            The number of jobs over which to split the analysis.  Set
+            to -1 so that each halo is done by a single processor.
+            Default: -1.
 
         Examples
         --------
@@ -803,11 +895,11 @@
                 my_output_dir = "%s/%s" % (self.pf.fullpath, analysis_output_dir)
             self.__check_directory(my_output_dir)
 
-        for halo in parallel_objects(halo_analysis_list, -1):
+        for halo in parallel_objects(halo_analysis_list, njobs=njobs):
             if halo is None: continue
 
             # Get a sphere object to analze.
-            sphere = get_halo_sphere(halo, self.pf, recenter=self.recenter)
+            sphere = self._get_halo_sphere(halo)
             if sphere is None: continue
 
             # Call the given analysis function.
@@ -924,6 +1016,9 @@
         lines = f.readlines()
         f.close()
 
+        if not lines:
+            return None
+
         # Get fields from header.
         header = lines.pop(0)
         header = header.strip()
@@ -1042,94 +1137,6 @@
         else:
             os.mkdir(my_output_dir)
 
-def get_halo_sphere(halo, pf, recenter=None):
-    r"""Returns a sphere object for a given halo.
-        
-    With a dictionary containing halo properties, such as center 
-    and r_max, this creates a sphere object and optionally 
-    recenters and recreates the sphere using a recentering function.
-    This is to be used primarily to make spheres for a set of halos 
-    loaded by the HaloProfiler.
-    
-    Parameters
-    ----------
-    halo : dict, required
-        The dictionary containing halo properties used to make the sphere.
-        Required entries:
-            center : list with center coordinates.
-            r_max : sphere radius in Mpc.
-    pf : parameter file object, required
-        The parameter file from which the sphere will be made.
-    recenter : {None, string or function}
-        The exact location of the sphere center can significantly affect 
-        radial profiles.  The halo center loaded by the HaloProfiler will 
-        typically be the dark matter center of mass calculated by a halo 
-        finder.  However, this may not be the best location for centering 
-        profiles of baryon quantities.  For example, one may want to center 
-        on the maximum density.
-        If recenter is given as a string, one of the existing recentering 
-        functions will be used:
-            Min_Dark_Matter_Density : location of minimum dark matter density
-            Max_Dark_Matter_Density : location of maximum dark matter density
-            CoM_Dark_Matter_Density : dark matter center of mass
-            Min_Gas_Density : location of minimum gas density
-            Max_Gas_Density : location of maximum gas density
-            CoM_Gas_Density : gas center of mass
-            Min_Total_Density : location of minimum total density
-            Max_Total_Density : location of maximum total density
-            CoM_Total_Density : total center of mass
-            Min_Temperature : location of minimum temperature
-            Max_Temperature : location of maximum temperature
-        Alternately, a function can be supplied for custom recentering.
-        The function should take only one argument, a sphere object.
-            Example function:
-                def my_center_of_mass(data):
-                   my_x, my_y, my_z = data.quantities['CenterOfMass']()
-                   return (my_x, my_y, my_z)
-
-        Examples: this should primarily be used with the halo list of the HaloProfiler.
-        This is an example with an abstract halo asssuming a pre-defined pf.
-        >>> halo = {'center': [0.5, 0.5, 0.5], 'r_max': 1.0}
-        >>> my_sphere = get_halo_sphere(halo, pf, recenter='Max_Gas_Density')
-        >>> # Assuming the above example function has been defined.
-        >>> my_sphere = get_halo_sphere(halo, pf, recenter=my_center_of_mass)
-    """
-        
-    sphere = pf.h.sphere(halo['center'], halo['r_max']/pf.units['mpc'])
-    if len(sphere._grids) == 0: return None
-    new_sphere = False
-
-    if recenter:
-        old = halo['center']
-        if recenter in centering_registry:
-            new_x, new_y, new_z = \
-                centering_registry[recenter](sphere)
-        else:
-            # user supplied function
-            new_x, new_y, new_z = recenter(sphere)
-        if new_x < pf.domain_left_edge[0] or \
-                new_y < pf.domain_left_edge[1] or \
-                new_z < pf.domain_left_edge[2]:
-            mylog.info("Recentering rejected, skipping halo %d" % \
-                halo['id'])
-            return None
-        halo['center'] = [new_x, new_y, new_z]
-        d = pf['kpc'] * periodic_dist(old, halo['center'],
-            pf.domain_right_edge - pf.domain_left_edge)
-        mylog.info("Recentered halo %d %1.3e kpc away." % (halo['id'], d))
-        # Expand the halo to account for recentering. 
-        halo['r_max'] += d / 1000 # d is in kpc -> want mpc
-        new_sphere = True
-
-    if new_sphere:
-        # Temporary solution to memory leak.
-        for g in pf.h.grids:
-            g.clear_data()
-        sphere.clear_data()
-        del sphere
-        sphere = pf.h.sphere(halo['center'], halo['r_max']/pf.units['mpc'])
-    return sphere
-
 def _shift_projections(pf, projections, oldCenter, newCenter, axis):
     """
     Shift projection data around.


diff -r 2bbd3f126279501d3aa2d6acd308e80006078ca3 -r 65db3370fd9b8f113d595fb295ad3182952f04ca yt/analysis_modules/level_sets/clump_handling.py
--- a/yt/analysis_modules/level_sets/clump_handling.py
+++ b/yt/analysis_modules/level_sets/clump_handling.py
@@ -190,7 +190,7 @@
             elif (child._isValid()):
                 these_children.append(child)
             else:
-                print "Eliminating invalid, childless clump with %d cells." % len(child.data["CellMassMsun"])
+                print "Eliminating invalid, childless clump with %d cells." % len(child.data["Ones"])
         if (len(these_children) > 1):
             print "%d of %d children survived." % (len(these_children),len(clump.children))            
             clump.children = these_children


diff -r 2bbd3f126279501d3aa2d6acd308e80006078ca3 -r 65db3370fd9b8f113d595fb295ad3182952f04ca yt/analysis_modules/simulation_handler/enzo_simulation.py
--- a/yt/analysis_modules/simulation_handler/enzo_simulation.py
+++ b/yt/analysis_modules/simulation_handler/enzo_simulation.py
@@ -49,7 +49,8 @@
                  initial_redshift=None, final_redshift=None,
                  links=False, enzo_parameters=None, 
                  get_time_outputs=True, get_redshift_outputs=True, 
-                 get_available_data=False, get_data_by_force=False):
+                 get_available_data=False, get_data_by_force=False,
+                 parallel = True):
         r"""Initialize an Enzo Simulation object.
         
         initial_time : float
@@ -128,9 +129,9 @@
         self._get_all_outputs(brute_force=get_data_by_force)
 
         # Instantiate a TimeSeriesData object.
-        time_series_outputs = [load(output['filename']) \
-                                   for output in self.allOutputs]
-        TimeSeriesData.__init__(self, outputs=time_series_outputs)
+        time_series_outputs = [output['filename'] for output in self.allOutputs]
+        TimeSeriesData.__init__(self, outputs=time_series_outputs,
+                                parallel = parallel)
 
     def _calculate_redshift_dump_times(self):
         "Calculates time from redshift of redshift dumps."


diff -r 2bbd3f126279501d3aa2d6acd308e80006078ca3 -r 65db3370fd9b8f113d595fb295ad3182952f04ca yt/analysis_modules/star_analysis/sfr_spectrum.py
--- a/yt/analysis_modules/star_analysis/sfr_spectrum.py
+++ b/yt/analysis_modules/star_analysis/sfr_spectrum.py
@@ -246,7 +246,7 @@
 """
 
 class SpectrumBuilder(object):
-    def __init__(self, pf, bcdir="", model="chabrier"):
+    def __init__(self, pf, bcdir="", model="chabrier", time_now=None):
         r"""Initialize the data to build a summed flux spectrum for a
         collection of stars using the models of Bruzual & Charlot (2003).
         This function loads the necessary data tables into memory and
@@ -280,8 +280,12 @@
              OmegaLambdaNow = self._pf.omega_lambda,
              InitialRedshift = self._pf['CosmologyInitialRedshift'])
         # Find the time right now.
-        self.time_now = self.cosm.ComputeTimeFromRedshift(
-            self._pf.current_redshift) # seconds
+        
+        if time_now is None:
+            self.time_now = self.cosm.ComputeTimeFromRedshift(
+                self._pf.current_redshift) # seconds
+        else:
+            self.time_now = time_now
         
         # Read the tables.
         self.read_bclib()
@@ -404,7 +408,8 @@
         self.star_metal = self.star_metal[sort]
         
         # Interpolate the flux for each star, adding to the total by weight.
-        for star in itertools.izip(Mname, Aindex, ratio1, ratio2, self.star_mass):
+        pbar = get_pbar("Calculating fluxes",len(self.star_mass))
+        for i,star in enumerate(itertools.izip(Mname, Aindex, ratio1, ratio2, self.star_mass)):
             # Pick the right age bin for the right flux array.
             flux = self.flux[star[0]][star[1],:]
             # Get the one just before the one above.
@@ -413,6 +418,9 @@
             int_flux = star[3] * na.log10(flux_1) + star[2] * na.log10(flux)
             # Add this flux to the total, weighted by mass.
             self.final_spec += na.power(10., int_flux) * star[4]
+            pbar.update(i)
+        pbar.finish()    
+        
         # Normalize.
         self.total_mass = na.sum(self.star_mass)
         self.avg_mass = na.mean(self.star_mass)


diff -r 2bbd3f126279501d3aa2d6acd308e80006078ca3 -r 65db3370fd9b8f113d595fb295ad3182952f04ca yt/analysis_modules/sunrise_export/sunrise_exporter.py
--- a/yt/analysis_modules/sunrise_export/sunrise_exporter.py
+++ b/yt/analysis_modules/sunrise_export/sunrise_exporter.py
@@ -1,6 +1,8 @@
 """
 Code to export from yt to Sunrise
 
+Author: Chris Moody <juxtaposicion at gmail.com>
+Affiliation: UCSC
 Author: Matthew Turk <matthewturk at gmail.com>
 Affiliation: UCSD
 Homepage: http://yt-project.org/
@@ -26,8 +28,7 @@
 
 try:
     import pyfits
-except ImportError:
-    # We silently fail here
+except ImportError: 
     pass
 
 import time
@@ -36,9 +37,11 @@
 from yt.funcs import *
 import yt.utilities.amr_utils as amr_utils
 from yt.data_objects.universal_fields import add_field
+from yt.mods import *
 
-def export_to_sunrise(pf, fn, write_particles = True, subregion_bounds = None,
-    particle_mass=None, particle_pos=None, particle_age=None, particle_metal=None):
+debug = True
+
+def export_to_sunrise(pf, fn, star_particle_type, dle, dre,**kwargs):
     r"""Convert the contents of a dataset to a FITS file format that Sunrise
     understands.
 
@@ -54,18 +57,13 @@
     pf : `StaticOutput`
         The parameter file to convert.
     fn : string
-        The filename of the FITS file.
-    write_particles : bool or pyfits.ColDefs instance, default is True
-        Whether to write out the star particles or not.  If this variable is an
-        instance of pyfits.ColDefs, then this will be used to create a pyfits
-        table named PARTICLEDATA which will be appended.  If this is true, the
-        routine will attempt to create this table from hand.
-    subregion_bounds : list of tuples
-        This is a list of tuples describing the subregion of the top grid to
-        export.  This will only work when only *one* root grid exists.
-        It is of the format:
-        [ (start_index_x, nx), (start_index_y, ny), (start_index_z, nz) ]
-        where nx, ny, nz are the number of cells to extract.
+        The filename of the output FITS file.
+    dle : The domain left edge to extract
+    dre : The domain rght edge to extract
+        Array format is (nx,ny,nz) where each element is floating point
+        in unitary position units where 0 is leftmost edge and 1
+        the rightmost. 
+        
 
     Notes
     -----
@@ -74,144 +72,250 @@
     http://sunrise.googlecode.com/ for more information.
 
     """
-    # Now particles
-    #  output_file->addTable("PARTICLEDATA" , 0);
-    # addKey("timeunit", time_unit, "Time unit is "+time_unit);
-    # addKey("tempunit", temp_unit, "Temperature unit is "+temp_unit);
-    # 
-    # addColumn(Tint, "ID", 1, "" );
-    # addColumn(Tdouble, "position", 3, length_unit );
-    # addColumn(Tdouble, "stellar_radius", 1, length_unit );
-    # addColumn(Tdouble, "L_bol", 1, L_bol_unit );
-    # addColumn(Tdouble, "mass_stars", 1, mass_unit );
-    # addColumn(Tdouble, "mass_stellar_metals", 1, mass_unit );
-    # addColumn(Tdouble, "age_m", 1, time_unit+"*"+mass_unit );
-    # addColumn(Tdouble, "age_l", 1, time_unit+"*"+mass_unit );
-    # addColumn(Tfloat, "L_lambda", L_lambda.columns(), 
-    #			L_lambda_unit );
-    #	output->addKey("logflux", true, "Column L_lambda values are log (L_lambda)");
+    
+    #we must round the dle,dre to the nearest root grid cells
+    ile,ire,super_level= round_nearest_edge(pf,dle,dre)
+    super_level -= 1 #we're off by one (so we don't need a correction if we span 2 cells)
+    fle,fre = ile*1.0/pf.domain_dimensions, ire*1.0/pf.domain_dimensions
+    mylog.info("rounding specified region:")
+    mylog.info("from [%1.5f %1.5f %1.5f]-[%1.5f %1.5f %1.5f]"%(tuple(dle)+tuple(dre)))
+    mylog.info("to   [%07i %07i %07i]-[%07i %07i %07i]"%(tuple(ile)+tuple(ire)))
+    mylog.info("to   [%1.5f %1.5f %1.5f]-[%1.5f %1.5f %1.5f]"%(tuple(fle)+tuple(fre)))
 
-    col_list = []
-    if subregion_bounds == None:    
-        DLE, DRE = pf.domain_left_edge, pf.domain_right_edge
-        DX = pf.domain_dimensions
-    else:
-        DLE, DX = zip(*subregion_bounds)
-        DLE, DX = na.array(DLE), na.array(DX)
-        DRE = DLE + DX
-    reg = pf.h.region((DRE+DLE)/2.0, DLE, DRE)
 
-    if write_particles is True:
-        pi = reg["particle_type"] == 2
-        pos = na.array([reg["particle_position_%s" % ax][pi]*pf['kpc']
-                            for ax in 'xyz']).transpose()
-        vel = na.array([reg["particle_velocity_%s" % ax][pi]
-                            for ax in 'xyz']).transpose()
-        # Velocity is cm/s, we want it to be kpc/yr
-        vel *= (pf["kpc"]/pf["cm"]) / (365*24*3400.)
-        age = pf["years"] * (pf.current_time - reg["creation_time"][pi])
-        creation_time = reg["creation_time"][pi] * pf["years"]
+    #Create the refinement hilbert octree in GRIDSTRUCTURE
+    #For every leaf (not-refined) cell we have a column n GRIDDATA
+    #Include mass_gas, mass_metals, gas_temp_m, gas_teff_m, cell_volume, SFR
+    #since the octree always starts with one cell, an our 0-level mesh
+    #may have many cells, we must #create the octree region sitting 
+    #ontop of the first mesh by providing a negative level
+    output, refinement = prepare_octree(pf,ile,start_level=-super_level)
 
-        initial_mass = reg["InitialMassCenOstriker"][pi]
-        current_mass = reg["ParticleMassMsun"][pi]
-        col_list.append(pyfits.Column("ID", format="I", array=na.arange(current_mass.size)))
-        col_list.append(pyfits.Column("parent_ID", format="I", array=na.arange(current_mass.size)))
-        col_list.append(pyfits.Column("position", format="3D", array=pos, unit="kpc"))
-        col_list.append(pyfits.Column("velocity", format="3D", array=vel, unit="kpc/yr"))
-        col_list.append(pyfits.Column("creation_mass", format="D", array=initial_mass, unit="Msun"))
-        col_list.append(pyfits.Column("formation_time", format="D", array=creation_time, unit="yr"))
-        col_list.append(pyfits.Column("mass", format="D", array=current_mass, unit="Msun"))
-        col_list.append(pyfits.Column("age_m", format="D", array=age))
-        col_list.append(pyfits.Column("age_l", format="D", array=age))
-        #For particles, Sunrise takes 
-        #the dimensionless metallicity, not the mass of the metals
-        col_list.append(pyfits.Column("metallicity", format="D",
-            array=reg["metallicity_fraction"][pi],unit="Msun")) # wrong?
-        col_list.append(pyfits.Column("L_bol", format="D",
-            array=na.zeros(particle_mass.size)))
+    #Create a list of the star particle properties in PARTICLE_DATA
+    #Include ID, parent-ID, position, velocity, creation_mass, 
+    #formation_time, mass, age_m, age_l, metallicity, L_bol
+    particle_data = prepare_star_particles(pf,star_particle_type,fle=fle,fre=fre,**kwargs)
 
-        cols = pyfits.ColDefs(col_list)
-        pd_table = pyfits.new_table(cols)
-        pd_table.name = "PARTICLEDATA"
-    elif isinstance(write_particles, pyfits.ColDefs):
-        pd_table = pyfits.new_table(write_particles)
-        pd_table.name = "PARTICLEDATA"
-        write_particles = True
+    create_fits_file(pf,fn, refinement,output,particle_data,fre,fle)
 
-    def _MetalMass(field, data):
-        return data["Metal_Density"] * data["CellVolume"]
-        
-    def _convMetalMass(data):
-        return 1.0/1.989e33
-        
-    add_field("MetalMass", function=_MetalMass,
-              convert_function=_convMetalMass)
+def prepare_octree(pf,ile,start_level=0):
+    add_fields() #add the metal mass field that sunrise wants
+    fields = ["CellMassMsun","TemperatureTimesCellMassMsun", 
+              "MetalMass","CellVolumeCode"]
+    
+    #gather the field data from octs
+    pbar = get_pbar("Retrieving field data",len(fields))
+    field_data = [] 
+    dd = pf.h.all_data()
+    for fi,f in enumerate(fields):
+        field_data += dd[f],
+        pbar.update(fi)
+    pbar.finish()
+    del field_data
 
-    output, refined = generate_flat_octree(pf,
-            ["CellMassMsun","TemperatureTimesCellMassMsun", "MetalMass",
-             "CellVolumeCode"], subregion_bounds = subregion_bounds)
-    cvcgs = output["CellVolumeCode"].astype('float64') * pf['cm']**3.0
+    #first we cast every cell as an oct
+    #ngrids = na.max([g.id for g in pf._grids])
+    grids = {}
+    levels_all = {} 
+    levels_finest = {}
+    for l in range(100): 
+        levels_finest[l]=0
+        levels_all[l]=0
+    pbar = get_pbar("Initializing octs ",len(pf.h.grids))
+    for gi,g in enumerate(pf.h.grids):
+        ff = na.array([g[f] for f in fields])
+        og = amr_utils.OctreeGrid(
+                g.child_index_mask.astype('int32'),
+                ff.astype("float64"),
+                g.LeftEdge.astype("float64"),
+                g.ActiveDimensions.astype("int32"),
+                na.ones(1,dtype="float64")*g.dds[0],
+                g.Level,
+                g.id)
+        grids[g.id] = og
+        #how many refinement cells will we have?
+        #measure the 'volume' of each mesh, but many
+        #cells do not exist. an overstimate
+        levels_all[g.Level] += g.ActiveDimensions.prod()
+        #how many leaves do we have?
+        #this overestimates. a child of -1 means no child,
+        #but that cell may still be expanded on a submesh because
+        #(at least in ART) the meshes are inefficient.
+        g.clear_data()
+        pbar.update(gi)
+    pbar.finish()
+    
+    #create the octree grid list
+    oct_list =  amr_utils.OctreeGridList(grids)
+    
+    #initialize arrays to be passed to the recursion algo
+    o_length = na.sum(levels_all.values())
+    r_length = na.sum(levels_all.values())
+    output   = na.zeros((o_length,len(fields)), dtype='float64')
+    refined  = na.zeros(r_length, dtype='int32')
+    levels   = na.zeros(r_length, dtype='int32')
+    pos = position()
+    hs       = hilbert_state()
+    refined[0] = 1 #introduce the first cell as divided
+    levels[0]  = start_level-1 #introduce the first cell as divided
+    pos.refined_pos += 1
+    RecurseOctreeDepthFirstHilbert(
+            ile[0],ile[1],ile[2],
+            pos,0, hs, 
+            output,refined,levels,
+            grids,
+            start_level,
+            #physical_center = (ile)*1.0/pf.domain_dimensions*pf['kpc'],
+            physical_center = ile,
+            #physical_width  = pf['kpc'])
+            physical_width  = pf.domain_dimensions)
+    #by time we get it here the 'current' position is actually 
+    #for the next spot, so we're off by 1
+    print 'refinement tree # of cells %i, # of leaves %i'%(pos.refined_pos,pos.output_pos) 
+    output  = output[:pos.output_pos]
+    refined = refined[:pos.refined_pos] 
+    levels = levels[:pos.refined_pos] 
+    return output,refined
 
-    # First the structure
+def print_row(level,ple,pre,pw,pc,hs):
+    print level, 
+    print '%1.5f %1.5f %1.5f '%tuple(ple*pw-pc),
+    print '%1.5f %1.5f %1.5f '%tuple(pre*pw-pc),
+    print hs.dim, hs.sgn
+
+def print_child(level,grid,i,j,k,pw,pc):
+    ple = (grid.left_edges+na.array([i,j,k])*grid.dx)*pw-pc #parent LE 
+    pre = (grid.left_edges+na.array([i+1,j+1,k+1])*grid.dx)*pw-pc #parent RE 
+    print level, 
+    print '%1.5f %1.5f %1.5f '%tuple(ple),
+    print '%1.5f %1.5f %1.5f '%tuple(pre)
+
+def RecurseOctreeDepthFirstHilbert(xi,yi,zi,
+                            curpos, gi, 
+                            hs,
+                            output,
+                            refined,
+                            levels,
+                            grids,
+                            level,
+                            physical_center=None,
+                            physical_width=None,
+                            printr=False):
+    grid = grids[gi]
+    m = 2**(-level-1) if level < 0 else 1
+    ple = grid.left_edges+na.array([xi,yi,zi])*grid.dx #parent LE
+    pre = ple+grid.dx*m
+    if printr:
+        print_row(level,ple,pre,physical_width,physical_center,hs)
+
+    #here we go over the 8 octants
+    #in general however, a mesh cell on this level
+    #may have more than 8 children on the next level
+    #so we find the int float center (cxyz) of each child cell
+    # and from that find the child cell indices
+    for iv, (vertex,hs_child) in enumerate(hs):
+        #print ' '*(level+3), level,iv, vertex,curpos.refined_pos,curpos.output_pos,
+        #negative level indicates that we need to build a super-octree
+        if level < 0: 
+            #print ' '
+            #we are not on the root grid yet, but this is 
+            #how many equivalent root grid cells we would have
+            #level -1 means our oct grid's children are the same size
+            #as the root grid (hence the -level-1)
+            dx = 2**(-level-1) #this is the child width 
+            i,j,k = xi+vertex[0]*dx,yi+vertex[1]*dx,zi+vertex[2]*dx
+            #we always refine the negative levels
+            refined[curpos.refined_pos] = 1
+            levels[curpos.refined_pos] = level
+            curpos.refined_pos += 1
+            RecurseOctreeDepthFirstHilbert(i, j, k,
+                                curpos, 0, hs_child, output, refined, levels, grids,
+                                level+1,
+                                physical_center=physical_center,
+                                physical_width=physical_width,)
+        else:
+            i,j,k = xi+vertex[0],yi+vertex[1],zi+vertex[2]
+            ci = grid.child_indices[i,j,k] #is this oct subdivided?
+            if ci == -1:
+                for fi in range(grid.fields.shape[0]):
+                    output[curpos.output_pos,fi] = grid.fields[fi,i,j,k]
+                refined[curpos.refined_pos] = 0
+                levels[curpos.refined_pos] = level
+                curpos.output_pos += 1 #position updated after write
+                curpos.refined_pos += 1
+                if printr:
+                    print_child(level+1,grid,i,j,k,physical_width,physical_center)
+            else:
+                cx = (grid.left_edges[0] + i*grid.dx[0]) #floating le of the child
+                cy = (grid.left_edges[1] + j*grid.dx[0])
+                cz = (grid.left_edges[2] + k*grid.dx[0])
+                refined[curpos.refined_pos] = 1
+                levels[curpos.refined_pos] = level
+                curpos.refined_pos += 1 #position updated after write
+                child_grid = grids[ci]
+                child_dx = child_grid.dx[0]
+                child_leftedges = child_grid.left_edges
+                child_i = int((cx - child_leftedges[0])/child_dx)
+                child_j = int((cy - child_leftedges[1])/child_dx)
+                child_k = int((cz - child_leftedges[2])/child_dx)
+                RecurseOctreeDepthFirstHilbert(child_i, child_j, child_k,
+                                    curpos, ci, hs_child, output, refined, levels, grids,
+                                    level+1,
+                                    physical_center=physical_center,
+                                    physical_width=physical_width)
+
+def create_fits_file(pf,fn, refined,output,particle_data,fre,fle):
+
+    #first create the grid structure
     structure = pyfits.Column("structure", format="B", array=refined.astype("bool"))
     cols = pyfits.ColDefs([structure])
     st_table = pyfits.new_table(cols)
     st_table.name = "GRIDSTRUCTURE"
+    st_table.header.update("hierarch lengthunit", "kpc", comment="Length unit for grid")
+    fdx = fre-fle
+    for i,a in enumerate('xyz'):
+        st_table.header.update("min%s" % a, fle[i] * pf['kpc'])
+        st_table.header.update("max%s" % a, fre[i] * pf['kpc'])
+        #st_table.header.update("min%s" % a, 0) #WARNING: this is for debugging
+        #st_table.header.update("max%s" % a, 2) #
+        st_table.header.update("n%s" % a, fdx[i])
+        st_table.header.update("subdiv%s" % a, 2)
+    st_table.header.update("subdivtp", "OCTREE", "Type of grid subdivision")
 
-    # Now we update our table with units
-    # ("lengthunit", length_unit, "Length unit for grid");
-    # ("minx", getmin () [0], length_unit_comment);
-    # ("miny", getmin () [1], length_unit_comment);
-    # ("minz", getmin () [2], length_unit_comment);
-    # ("maxx", getmax () [0], length_unit_comment);
-    # ("maxy", getmax () [1], length_unit_comment);
-    # ("maxz", getmax () [2], length_unit_comment);
-    # ("nx", g_.getn () [0], "");
-    # ("ny", g_.getn () [1], "");
-    # ("nz", g_.getn () [2], "");
-    # ("subdivtp", subdivtp, "Type of grid subdivision");
-    # ("subdivx", sub_div[0], "");
-    # ("subdivy", sub_div[1], "");
-    # ("subdivz", sub_div[2], "");
-
-    st_table.header.update("hierarch lengthunit", "kpc", comment="Length unit for grid")
-    for i,a in enumerate('xyz'):
-        st_table.header.update("min%s" % a, DLE[i] * pf['kpc']/pf.domain_dimensions[i])
-        st_table.header.update("max%s" % a, DRE[i] * pf['kpc']/pf.domain_dimensions[i])
-        st_table.header.update("n%s" % a, DX[i])
-        st_table.header.update("subdiv%s" % a, 2)
-    st_table.header.update("subdivtp", "UNIFORM", "Type of grid subdivision")
-
-    # Now grid data itself
-    # ("M_g_tot", total_quantities.m_g(), "[" + mass_unit +
-    #         "] Total gas mass in all cells");
-    # ("SFR_tot", total_quantities.SFR, "[" + SFR_unit +
-    #         "] Total star formation rate of all cells");
-    # ("timeunit", time_unit, "Time unit is "+time_unit);
-    # ("tempunit", temp_unit, "Temperature unit is "+time_unit);
-
-    # (Tdouble, "mass_gas", 1, mass_unit );
-    # (Tdouble, "SFR", 1, SFR_unit );
-    # (Tdouble, "mass_metals", 1, mass_unit );
-    # (Tdouble, "gas_temp_m", 1, temp_unit+"*"+mass_unit );
-    # (Tdouble, "gas_teff_m", 1, temp_unit+"*"+mass_unit );
-    # (Tdouble, "cell_volume", 1, length_unit + "^3" );
-
+    #not the hydro grid data
+    fields = ["CellMassMsun","TemperatureTimesCellMassMsun", 
+              "MetalMass","CellVolumeCode"]
+    fd = {}
+    for i,f in enumerate(fields): 
+        fd[f]=output[:,i]
+    del output
     col_list = []
-    size = output["CellMassMsun"].size
-    tm = output["CellMassMsun"].sum()
+    size = fd["CellMassMsun"].size
+    tm = fd["CellMassMsun"].sum()
     col_list.append(pyfits.Column("mass_gas", format='D',
-                    array=output.pop('CellMassMsun'), unit="Msun"))
+                    array=fd['CellMassMsun'], unit="Msun"))
     col_list.append(pyfits.Column("mass_metals", format='D',
-                    array=output.pop('MetalMass'), unit="Msun"))
+                    array=fd['MetalMass'], unit="Msun"))
+    # col_list.append(pyfits.Column("mass_stars", format='D',
+    #                 array=na.zeros(size,dtype='D'),unit="Msun"))
+    # col_list.append(pyfits.Column("mass_stellar_metals", format='D',
+    #                 array=na.zeros(size,dtype='D'),unit="Msun"))
+    # col_list.append(pyfits.Column("age_m", format='D',
+    #                 array=na.zeros(size,dtype='D'),unit="yr*Msun"))
+    # col_list.append(pyfits.Column("age_l", format='D',
+    #                 array=na.zeros(size,dtype='D'),unit="yr*Msun"))
+    # col_list.append(pyfits.Column("L_bol", format='D',
+    #                 array=na.zeros(size,dtype='D')))
+    # col_list.append(pyfits.Column("L_lambda", format='D',
+    #                 array=na.zeros(size,dtype='D')))
     # The units for gas_temp are really K*Msun. For older Sunrise versions
     # you must set the unit to just K  
     col_list.append(pyfits.Column("gas_temp_m", format='D',
-                    array=output['TemperatureTimesCellMassMsun'], unit="K*Msun"))
+                    array=fd['TemperatureTimesCellMassMsun'], unit="K*Msun"))
     col_list.append(pyfits.Column("gas_teff_m", format='D',
-                    array=output.pop('TemperatureTimesCellMassMsun'), unit="K*Msun"))
+                    array=fd['TemperatureTimesCellMassMsun'], unit="K*Msun"))
     col_list.append(pyfits.Column("cell_volume", format='D',
-                    array=output.pop('CellVolumeCode').astype('float64')*pf['kpc']**3.0,
+                    array=fd['CellVolumeCode'].astype('float64')*pf['kpc']**3.0,
                     unit="kpc^3"))
     col_list.append(pyfits.Column("SFR", format='D',
                     array=na.zeros(size, dtype='D')))
@@ -229,101 +333,216 @@
     md_table.header.update("snaptime", pf.current_time*pf['years'])
     md_table.name = "YT"
 
-    hls = [pyfits.PrimaryHDU(), st_table, mg_table,md_table]
-    if write_particles: hls.append(pd_table)
+    phdu = pyfits.PrimaryHDU()
+    phdu.header.update('nbodycod','yt')
+    hls = [phdu, st_table, mg_table,md_table]
+    hls.append(particle_data)
     hdus = pyfits.HDUList(hls)
     hdus.writeto(fn, clobber=True)
 
-def initialize_octree_list(pf, fields):
-    o_length = r_length = 0
-    grids = []
-    levels_finest, levels_all = defaultdict(lambda: 0), defaultdict(lambda: 0)
-    for g in pf.h.grids:
-        ff = na.array([g[f] for f in fields])
-        grids.append(amr_utils.OctreeGrid(
-                        g.child_index_mask.astype('int32'),
-                        ff.astype("float64"),
-                        g.LeftEdge.astype('float64'),
-                        g.ActiveDimensions.astype('int32'),
-                        na.ones(1,dtype='float64') * g.dds[0], g.Level,
-                        g._id_offset))
-        levels_all[g.Level] += g.ActiveDimensions.prod()
-        levels_finest[g.Level] += g.child_mask.ravel().sum()
-        g.clear_data()
-    ogl = amr_utils.OctreeGridList(grids)
-    return ogl, levels_finest, levels_all
+def nearest_power(x):
+    #round to the nearest power of 2
+    x-=1
+    x |= x >> 1
+    x |= x >> 2 
+    x |= x >> 4
+    x |= x >> 8
+    x |= x >> 16
+    x+=1 
+    return x
 
-def generate_flat_octree(pf, fields, subregion_bounds = None):
-    """
-    Generates two arrays, one of the actual values in a depth-first flat
-    octree array, and the other of the values describing the refinement.
-    This allows for export to a code that understands this.  *field* is the
-    field used in the data array.
-    """
-    fields = ensure_list(fields)
-    ogl, levels_finest, levels_all = initialize_octree_list(pf, fields)
-    o_length = na.sum(levels_finest.values())
-    r_length = na.sum(levels_all.values())
-    output = na.zeros((o_length,len(fields)), dtype='float64')
-    refined = na.zeros(r_length, dtype='int32')
-    position = amr_utils.position()
-    if subregion_bounds is None:
-        sx, sy, sz = 0, 0, 0
-        nx, ny, nz = ogl[0].dimensions
-    else:
-        ss, ns = zip(*subregion_bounds)
-        sx, sy, sz = ss
-        nx, ny, nz = ns
-    print "Running from %s for %s cells" % (
-            (sx,sy,sz), (nx,ny,nz))
-    t1 = time.time()
-    amr_utils.RecurseOctreeDepthFirst(
-               sx, sy, sz, nx, ny, nz,
-               position, 0,
-               output, refined, ogl)
-    t2 = time.time()
-    print "Finished.  Took %0.3e seconds." % (t2-t1)
-    dd = {}
-    for i, field in enumerate(fields):
-        dd[field] = output[:position.output_pos,i]
-    return dd, refined[:position.refined_pos]
+def round_nearest_edge(pf,dle,dre):
+    dds = pf.domain_dimensions
+    ile = na.floor(dle*dds).astype('int')
+    ire = na.ceil(dre*dds).astype('int') 
+    
+    #this is the number of cells the super octree needs to expand to
+    #must round to the nearest power of 2
+    width = na.max(ire-ile)
+    width = nearest_power(width)
+    
+    maxlevel = na.rint(na.log2(width)).astype('int')
+    return ile,ire,maxlevel
 
-def generate_levels_octree(pf, fields):
-    fields = ensure_list(fields) + ["Ones", "Ones"]
-    ogl, levels_finest, levels_all = initialize_octree_list(pf, fields)
-    o_length = na.sum(levels_finest.values())
-    r_length = na.sum(levels_all.values())
-    output = na.zeros((r_length,len(fields)), dtype='float64')
-    genealogy = na.zeros((r_length, 3), dtype='int64') - 1 # init to -1
-    corners = na.zeros((r_length, 3), dtype='float64')
-    position = na.add.accumulate(
-                na.array([0] + [levels_all[v] for v in
-                    sorted(levels_all)[:-1]], dtype='int64'), dtype="int64")
-    pp = position.copy()
-    amr_utils.RecurseOctreeByLevels(0, 0, 0,
-               ogl[0].dimensions[0],
-               ogl[0].dimensions[1],
-               ogl[0].dimensions[2],
-               position.astype('int64'), 1,
-               output, genealogy, corners, ogl)
-    return output, genealogy, levels_all, levels_finest, pp, corners
+def prepare_star_particles(pf,star_type,pos=None,vel=None, age=None,
+                          creation_time=None,initial_mass=None,
+                          current_mass=None,metallicity=None,
+                          radius = None,
+                          fle=[0.,0.,0.],fre=[1.,1.,1.]):
+    dd = pf.h.all_data()
+    idx = dd["particle_type"] == star_type
+    if pos is None:
+        pos = na.array([dd["particle_position_%s" % ax]
+                        for ax in 'xyz']).transpose()
+    idx = idx & na.all(pos>fle,axis=1) & na.all(pos<fre,axis=1)
+    pos = pos[idx]*pf['kpc'] #unitary units -> kpc
+    if age is None:
+        age = dd["particle_age"][idx]*pf['years'] # seconds->years
+    if vel is None:
+        vel = na.array([dd["particle_velocity_%s" % ax][idx]
+                        for ax in 'xyz']).transpose()
+        # Velocity is cm/s, we want it to be kpc/yr
+        #vel *= (pf["kpc"]/pf["cm"]) / (365*24*3600.)
+        vel *= 1.02268944e-14 
+    if initial_mass is None:
+        #in solar masses
+        initial_mass = dd["particle_mass_initial"][idx]*pf['Msun']
+    if current_mass is None:
+        #in solar masses
+        current_mass = dd["particle_mass"][idx]*pf['Msun']
+    if metallicity is None:
+        #this should be in dimensionless units, metals mass / particle mass
+        metallicity = dd["particle_metallicity"][idx]
+    if radius is None:
+        radius = initial_mass*0.0+10.0/1000.0 #10pc radius
 
-def _initial_mass_cen_ostriker(field, data):
-    # SFR in a cell. This assumes stars were created by the Cen & Ostriker algorithm
-    # Check Grid_AddToDiskProfile.C and star_maker7.src
-    star_mass_ejection_fraction = data.pf.get_parameter("StarMassEjectionFraction",float)
-    star_maker_minimum_dynamical_time = 3e6 # years, which will get divided out
-    dtForSFR = star_maker_minimum_dynamical_time / data.pf["years"]
-    xv1 = ((data.pf["InitialTime"] - data["creation_time"])
-            / data["dynamical_time"])
-    xv2 = ((data.pf["InitialTime"] + dtForSFR - data["creation_time"])
-            / data["dynamical_time"])
-    denom = (1.0 - star_mass_ejection_fraction * (1.0 - (1.0 + xv1)*na.exp(-xv1)))
-    minitial = data["ParticleMassMsun"] / denom
-    return minitial
-add_field("InitialMassCenOstriker", function=_initial_mass_cen_ostriker)
+    formation_time = pf.current_time-age
+    #create every column
+    col_list = []
+    col_list.append(pyfits.Column("ID", format="I", array=na.arange(current_mass.size)))
+    col_list.append(pyfits.Column("parent_ID", format="I", array=na.arange(current_mass.size)))
+    col_list.append(pyfits.Column("position", format="3D", array=pos, unit="kpc"))
+    col_list.append(pyfits.Column("velocity", format="3D", array=vel, unit="kpc/yr"))
+    col_list.append(pyfits.Column("creation_mass", format="D", array=initial_mass, unit="Msun"))
+    col_list.append(pyfits.Column("formation_time", format="D", array=formation_time, unit="yr"))
+    col_list.append(pyfits.Column("radius", format="D", array=radius, unit="kpc"))
+    col_list.append(pyfits.Column("mass", format="D", array=current_mass, unit="Msun"))
+    col_list.append(pyfits.Column("age_m", format="D", array=age))
+    col_list.append(pyfits.Column("age_l", format="D", array=age))
+    #For particles, Sunrise takes 
+    #the dimensionless metallicity, not the mass of the metals
+    col_list.append(pyfits.Column("metallicity", format="D",
+        array=metallicity,unit="Msun")) 
+    col_list.append(pyfits.Column("L_bol", format="D",
+        array=na.zeros(current_mass.size)))
+    
+    #make the table
+    cols = pyfits.ColDefs(col_list)
+    pd_table = pyfits.new_table(cols)
+    pd_table.name = "PARTICLEDATA"
+    return pd_table
 
-def _temp_times_mass(field, data):
-    return data["Temperature"]*data["CellMassMsun"]
-add_field("TemperatureTimesCellMassMsun", function=_temp_times_mass)
 
+def add_fields():
+    """Add three Eulerian fields Sunrise uses"""
+    def _MetalMass(field, data):
+        return data["Metal_Density"] * data["CellVolume"]
+        
+    def _convMetalMass(data):
+        return 1.0/1.989e33
+    
+    add_field("MetalMass", function=_MetalMass,
+              convert_function=_convMetalMass)
+
+    def _initial_mass_cen_ostriker(field, data):
+        # SFR in a cell. This assumes stars were created by the Cen & Ostriker algorithm
+        # Check Grid_AddToDiskProfile.C and star_maker7.src
+        star_mass_ejection_fraction = data.pf.get_parameter("StarMassEjectionFraction",float)
+        star_maker_minimum_dynamical_time = 3e6 # years, which will get divided out
+        dtForSFR = star_maker_minimum_dynamical_time / data.pf["years"]
+        xv1 = ((data.pf["InitialTime"] - data["creation_time"])
+                / data["dynamical_time"])
+        xv2 = ((data.pf["InitialTime"] + dtForSFR - data["creation_time"])
+                / data["dynamical_time"])
+        denom = (1.0 - star_mass_ejection_fraction * (1.0 - (1.0 + xv1)*na.exp(-xv1)))
+        minitial = data["ParticleMassMsun"] / denom
+        return minitial
+
+    add_field("InitialMassCenOstriker", function=_initial_mass_cen_ostriker)
+
+    def _temp_times_mass(field, data):
+        return data["Temperature"]*data["CellMassMsun"]
+    add_field("TemperatureTimesCellMassMsun", function=_temp_times_mass)
+
+class position:
+    def __init__(self):
+        self.output_pos = 0
+        self.refined_pos = 0
+
+class hilbert_state():
+    def __init__(self,dim=None,sgn=None,octant=None):
+        if dim is None: dim = [0,1,2]
+        if sgn is None: sgn = [1,1,1]
+        if octant is None: octant = 5
+        self.dim = dim
+        self.sgn = sgn
+        self.octant = octant
+    def flip(self,i):
+        self.sgn[i]*=-1
+    def swap(self,i,j):
+        temp = self.dim[i]
+        self.dim[i]=self.dim[j]
+        self.dim[j]=temp
+        axis = self.sgn[i]
+        self.sgn[i] = self.sgn[j]
+        self.sgn[j] = axis
+    def reorder(self,i,j,k):
+        ndim = [self.dim[i],self.dim[j],self.dim[k]] 
+        nsgn = [self.sgn[i],self.sgn[j],self.sgn[k]]
+        self.dim = ndim
+        self.sgn = nsgn
+    def copy(self):
+        return hilbert_state([self.dim[0],self.dim[1],self.dim[2]],
+                             [self.sgn[0],self.sgn[1],self.sgn[2]],
+                             self.octant)
+    def descend(self,o):
+        child = self.copy()
+        child.octant = o
+        if o==0:
+            child.swap(0,2)
+        elif o==1:
+            child.swap(1,2)
+        elif o==2:
+            pass
+        elif o==3:
+            child.flip(0)
+            child.flip(2)
+            child.reorder(2,0,1)
+        elif o==4:
+            child.flip(0)
+            child.flip(1)
+            child.reorder(2,0,1)
+        elif o==5:
+            pass
+        elif o==6:
+            child.flip(1)
+            child.flip(2)
+            child.swap(1,2)
+        elif o==7:
+            child.flip(0)
+            child.flip(2)
+            child.swap(0,2)
+        return child
+
+    def __iter__(self):
+        vertex = [0,0,0]
+        j=0
+        for i in range(3):
+            vertex[self.dim[i]] = 0 if self.sgn[i]>0 else 1
+        yield vertex, self.descend(j)
+        vertex[self.dim[0]] += self.sgn[0]
+        j+=1
+        yield vertex, self.descend(j)
+        vertex[self.dim[1]] += self.sgn[1] 
+        j+=1
+        yield vertex, self.descend(j)
+        vertex[self.dim[0]] -= self.sgn[0] 
+        j+=1
+        yield vertex, self.descend(j)
+        vertex[self.dim[2]] += self.sgn[2] 
+        j+=1
+        yield vertex, self.descend(j)
+        vertex[self.dim[0]] += self.sgn[0] 
+        j+=1
+        yield vertex, self.descend(j)
+        vertex[self.dim[1]] -= self.sgn[1] 
+        j+=1
+        yield vertex, self.descend(j)
+        vertex[self.dim[0]] -= self.sgn[0] 
+        j+=1
+        yield vertex, self.descend(j)
+
+
+
+
+


diff -r 2bbd3f126279501d3aa2d6acd308e80006078ca3 -r 65db3370fd9b8f113d595fb295ad3182952f04ca yt/arraytypes.py
--- a/yt/arraytypes.py
+++ b/yt/arraytypes.py
@@ -43,14 +43,3 @@
     for atype in desc['formats']:
         blanks.append(na.zeros(elements, dtype=atype))
     return rec.fromarrays(blanks, **desc)
-
-class YTArrayHandler(object):
-    def __getattr__(self, name):
-        try:
-            return object.__getattribute__(self, name)
-        except AttributeError:
-            return getattr(na, name)
-        raise
-
-#na = YTArrayHandler()
-#print na.zeros




diff -r 2bbd3f126279501d3aa2d6acd308e80006078ca3 -r 65db3370fd9b8f113d595fb295ad3182952f04ca yt/astro_objects/api.py
--- a/yt/astro_objects/api.py
+++ /dev/null
@@ -1,34 +0,0 @@
-"""
-API for yt.astro_objects
-
-Author: Matthew Turk <matthewturk at gmail.com>
-Affiliation: Columbia University
-Homepage: http://yt-project.org/
-License:
-  Copyright (C) 2011 Matthew Turk.  All Rights Reserved.
-
-  This file is part of yt.
-
-  yt is free software; you can redistribute it and/or modify
-  it under the terms of the GNU General Public License as published by
-  the Free Software Foundation; either version 3 of the License, or
-  (at your option) any later version.
-
-  This program is distributed in the hope that it will be useful,
-  but WITHOUT ANY WARRANTY; without even the implied warranty of
-  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-  GNU General Public License for more details.
-
-  You should have received a copy of the GNU General Public License
-  along with this program.  If not, see <http://www.gnu.org/licenses/>.
-
-"""
-
-from .astrophysical_object import \
-    AstrophysicalObject, identification_method, correlation_method
-
-from .simulation_volume import \
-    SimulationVolume
-
-from .clumped_region import \
-    ClumpedRegion


diff -r 2bbd3f126279501d3aa2d6acd308e80006078ca3 -r 65db3370fd9b8f113d595fb295ad3182952f04ca yt/astro_objects/astrophysical_object.py
--- a/yt/astro_objects/astrophysical_object.py
+++ /dev/null
@@ -1,90 +0,0 @@
-"""
-A base-class representing an astrophysical object
-
-Author: Matthew Turk <matthewturk at gmail.com>
-Affiliation: Columbia University
-Homepage: http://yt-project.org/
-License:
-  Copyright (C) 2011 Matthew Turk.  All Rights Reserved.
-
-  This file is part of yt.
-
-  yt is free software; you can redistribute it and/or modify
-  it under the terms of the GNU General Public License as published by
-  the Free Software Foundation; either version 3 of the License, or
-  (at your option) any later version.
-
-  This program is distributed in the hope that it will be useful,
-  but WITHOUT ANY WARRANTY; without even the implied warranty of
-  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-  GNU General Public License for more details.
-
-  You should have received a copy of the GNU General Public License
-  along with this program.  If not, see <http://www.gnu.org/licenses/>.
-"""
-
-astro_object_registry = {}
-
-class AstrophysicalObject(object):
-    # No _type_name
-    _skip_add = False
-
-    class __metaclass__(type):
-        def __init__(cls, name, b, d):
-            type.__init__(cls, name, b, d)
-            if hasattr(cls, "_type_name") and not cls._skip_add:
-                astro_object_registry[cls._type_name] = cls
-            cls.identification_methods = {}
-            cls.correlation_methods = {}
-
-    def _lookup_object(self, obj_name):
-        if obj_name not in astro_object_registry:
-            raise KeyError(obj_name)
-        return astro_object_registry[obj_name]
-
-    def correlate(self, other_collection, correlation_name):
-        pass
-
-    def __init__(self, data_source):
-        self.objects = {}
-        # We mandate that every object have a corresponding AMR3DData source
-        # affiliated with it.
-        self.data_source = data_source
-
-    def find(self, obj_name, identification_name, *args, **kwargs):
-        obj = self._lookup_object(obj_name)
-        if callable(identification_name):
-            identification_method = identification_name
-        else:
-            if identification_name not in obj.identification_methods:
-                raise KeyError(identification_name)
-            identification_method = \
-                obj.identification_methods[identification_name]
-        new_objs = identification_method(self, *args, **kwargs)
-        setattr(self, obj_name, new_objs)
-        self.objects[obj_name] = new_objs
-        return new_objs
-
-    def correlate(self, other_set, correlation_name, *args, **kwargs):
-        if callable(correlation_name):
-            correlation_method = correlation_name
-        else:
-            if correlation_name not in self.correlation_methods:
-                raise KeyError(correlation_name)
-            correlation_method = self.correlation_methods[correlation_name]
-        linked_objs = correlation_method(self, *args, **kwargs)
-        return linked_objs
-
-def correlation_method(obj_name, link_name):
-    def passthrough(func):
-        obj = astro_object_registry[obj_name]
-        obj.correlation_methods[link_name] = func
-        return func
-    return passthrough
-
-def identification_method(obj_name, id_name):
-    def passthrough(func):
-        obj = astro_object_registry[obj_name]
-        obj.identification_methods[id_name] = func
-        return func
-    return passthrough


diff -r 2bbd3f126279501d3aa2d6acd308e80006078ca3 -r 65db3370fd9b8f113d595fb295ad3182952f04ca yt/astro_objects/clumped_region.py
--- a/yt/astro_objects/clumped_region.py
+++ /dev/null
@@ -1,39 +0,0 @@
-"""
-A base-class representing an astrophysical object
-
-Author: Matthew Turk <matthewturk at gmail.com>
-Affiliation: Columbia University
-Homepage: http://yt-project.org/
-License:
-  Copyright (C) 2011 Matthew Turk.  All Rights Reserved.
-
-  This file is part of yt.
-
-  yt is free software; you can redistribute it and/or modify
-  it under the terms of the GNU General Public License as published by
-  the Free Software Foundation; either version 3 of the License, or
-  (at your option) any later version.
-
-  This program is distributed in the hope that it will be useful,
-  but WITHOUT ANY WARRANTY; without even the implied warranty of
-  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-  GNU General Public License for more details.
-
-  You should have received a copy of the GNU General Public License
-  along with this program.  If not, see <http://www.gnu.org/licenses/>.
-"""
-
-from .astrophysical_object import \
-    AstrophysicalObject, identification_method, correlation_method
-    
-class ClumpedRegion(AstrophysicalObject):
-    _type_name = "clumped_region"
-    def __init__(self, data_source):
-        AstrophysicalObject.__init__(self, data_source)
-
- at identification_method("clumped_region", "level_set")
-def clumps(obj, field, min_val):
-    ds = obj.data_source
-    mi, ma = ds.quantities["Extrema"](field)[0]
-    cls = obj.data_source.extract_connected_sets(field, 1, min_val, ma)
-    return [ClumpedRegion(o) for o in cls[1][0]]


diff -r 2bbd3f126279501d3aa2d6acd308e80006078ca3 -r 65db3370fd9b8f113d595fb295ad3182952f04ca yt/astro_objects/setup.py
--- a/yt/astro_objects/setup.py
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/usr/bin/env python
-import setuptools
-import os
-import sys
-import os.path
-
-
-def configuration(parent_package='', top_path=None):
-    from numpy.distutils.misc_util import Configuration
-    config = Configuration('astro_objects', parent_package, top_path)
-    config.make_config_py()  # installs __config__.py
-    #config.make_svn_version_py()
-    return config


diff -r 2bbd3f126279501d3aa2d6acd308e80006078ca3 -r 65db3370fd9b8f113d595fb295ad3182952f04ca yt/astro_objects/simulation_volume.py
--- a/yt/astro_objects/simulation_volume.py
+++ /dev/null
@@ -1,32 +0,0 @@
-"""
-An AstrophysicalObject that represents a simulation volume
-
-Author: Matthew Turk <matthewturk at gmail.com>
-Affiliation: Columbia University
-Homepage: http://yt-project.org/
-License:
-  Copyright (C) 2011 Matthew Turk.  All Rights Reserved.
-
-  This file is part of yt.
-
-  yt is free software; you can redistribute it and/or modify
-  it under the terms of the GNU General Public License as published by
-  the Free Software Foundation; either version 3 of the License, or
-  (at your option) any later version.
-
-  This program is distributed in the hope that it will be useful,
-  but WITHOUT ANY WARRANTY; without even the implied warranty of
-  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-  GNU General Public License for more details.
-
-  You should have received a copy of the GNU General Public License
-  along with this program.  If not, see <http://www.gnu.org/licenses/>.
-"""
-
-from .astrophysical_object import \
-    AstrophysicalObject, identification_method, correlation_method
-    
-class SimulationVolume(AstrophysicalObject):
-    _type_name = "simulation_volume"
-    def __init__(self, data_source):
-        AstrophysicalObject.__init__(self, data_source)


diff -r 2bbd3f126279501d3aa2d6acd308e80006078ca3 -r 65db3370fd9b8f113d595fb295ad3182952f04ca yt/convenience.py
--- a/yt/convenience.py
+++ b/yt/convenience.py
@@ -35,35 +35,6 @@
     output_type_registry, \
     EnzoRunDatabase
 
-def all_pfs(basedir='.', skip=None, max_depth=1, name_spec="*.hierarchy", **kwargs):
-    """
-    This function searchs a directory and its sub-directories, up to a
-    depth of *max_depth*, for parameter files.  It looks for the
-    *name_spec* and then instantiates an EnzoStaticOutput from
-    each. You can skip every *skip* parameter files, if *skip* is not
-    None; otherwise it will return all files.  All subsequent *kwargs*
-    are passed on to the EnzoStaticOutput constructor.
-    """
-    list_of_names = []
-    basedir = os.path.expanduser(basedir)
-    for i in range(max_depth):
-        bb = list('*' * i) + [name_spec]
-        list_of_names += glob.glob(os.path.join(basedir,*bb))
-    list_of_names.sort(key=lambda b: os.path.basename(b))
-    for fn in list_of_names[::skip]:
-        yield load(fn[:-10], **kwargs)
-
-def max_spheres(width, unit, **kwargs):
-    """
-    This calls :func:`~yt.convenience.all_pfs` and then for each parameter file
-    creates a :class:`~yt.data_objects.api.AMRSphereBase` for each one,
-    centered on the point of highest density, with radius *width* in units of
-    *unit*.
-    """
-    for pf in all_pfs(**kwargs):
-        v, c = pf.h.find_max("Density")
-        yield pf.h.sphere(c, width/pf[unit])
-
 def load(*args ,**kwargs):
     """
     This function attempts to determine the base data type of a filename or
@@ -76,22 +47,22 @@
         try:
             import Tkinter, tkFileDialog
         except ImportError:
-            return None
+            raise YTOutputNotIdentified(args, kwargs)
         root = Tkinter.Tk()
         filename = tkFileDialog.askopenfilename(parent=root,title='Choose a file')
         if filename != None:
             return load(filename)
         else:
-            return None
+            raise YTOutputNotIdentified(args, kwargs)
     candidates = []
     args = [os.path.expanduser(arg) if isinstance(arg, types.StringTypes)
             else arg for arg in args]
-    valid_file = [os.path.isfile(arg) if isinstance(arg, types.StringTypes) 
+    valid_file = [os.path.exists(arg) if isinstance(arg, types.StringTypes) 
             else False for arg in args]
     if not any(valid_file):
         mylog.error("None of the arguments provided to load() is a valid file")
         mylog.error("Please check that you have used a correct path")
-        return None
+        raise YTOutputNotIdentified(args, kwargs)
     for n, c in output_type_registry.items():
         if n is None: continue
         if c._is_valid(*args, **kwargs): candidates.append(n)
@@ -108,11 +79,11 @@
                and output_type_registry[n]._is_valid(fn):
                 return output_type_registry[n](fn)
         mylog.error("Couldn't figure out output type for %s", args[0])
-        return None
+        raise YTOutputNotIdentified(args, kwargs)
     mylog.error("Multiple output type candidates for %s:", args[0])
     for c in candidates:
         mylog.error("    Possible: %s", c)
-    return None
+    raise YTOutputNotIdentified(args, kwargs)
 
 def projload(pf, axis, weight_field = None):
     # This is something of a hack, so that we can just get back a projection
@@ -140,77 +111,3 @@
     f.close()
     return proj
 
-def _chunk(arrlike, chunksize = 800000):
-    total_size = arrlike.shape[0]
-    pbar = get_pbar("Transferring %s " % (arrlike.name), total_size)
-    start = 0; end = 0
-    bits = []
-    while start < total_size:
-        bits.append(arrlike[start:start+chunksize])
-        pbar.update(start)
-        start += chunksize
-    pbar.finish()
-    return na.concatenate(bits)
-
-def dapload(p, axis, weight_field = None):
-    r"""Load a projection dataset from a DAP server.
-
-    If you have projections stored externally on a DAP server, this function
-    can load them (transferring in chunks to avoid overloading) locally and
-    display them.
-
-    Parameters
-    ----------
-    p : string
-        URL for the dataset on the DAP server
-    axis : int
-        The axis of projection to load (0, 1, 2)
-    weight_field : string
-        The weight_field used in the projection
-
-    Returns
-    -------
-    projmock : ProjMock
-        This is a mockup of a projection that mostly fills the API.  It can be
-        used with `yt.visualization.image_panner.api.VariableMeshPanner`
-        objects.
-
-    See Also
-    --------
-    http://www.opendap.org/ and http://pydap.org/2.x/ . (Note that HDF5 is not
-    supported on PyDAP 3.x servers.)
-
-    Examples
-    --------
-
-    >>> p = "http://datasets-r-us.org/output_0013.h5"
-    >>> proj = dapload(p, 0, "Density")
-    >>> vmp = VariableMeshPanner(proj, (512, 512), "Density", ImageSaver(0))
-    >>> vmp.zoom(1.0)
-    """
-    class PFMock(dict):
-        domain_left_edge = na.zeros(3, dtype='float64')
-        domain_right_edge = na.ones(3, dtype='float64')
-    pf = PFMock()
-    class ProjMock(dict):
-        pass
-    import dap.client
-    f = dap.client.open(p)
-    b = f["Projections"]["%s" % (axis)]
-    wf = "weight_field_%s" % weight_field
-    if wf not in b: raise KeyError(wf)
-    fields = []
-    for k in b:
-        if k.name.startswith("weight_field"): continue
-        if k.name.endswith("_%s" % weight_field):
-            fields.append(k.name)
-    proj = ProjMock()
-    for f in ["px","py","pdx","pdy"]:
-        proj[f] = _chunk(b[f])
-    for f in fields:
-        new_name = f[:-(len(str(weight_field)) + 1)]
-        proj[new_name] = _chunk(b[f])
-    proj.axis = axis
-    proj.pf = pf
-    return proj
-


diff -r 2bbd3f126279501d3aa2d6acd308e80006078ca3 -r 65db3370fd9b8f113d595fb295ad3182952f04ca yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -57,6 +57,7 @@
     ParameterFileStore
 from yt.utilities.minimal_representation import \
     MinimalProjectionData, MinimalSliceData
+from yt.utilities.orientation import Orientation
 
 from .derived_quantities import DerivedQuantityCollection
 from .field_info_container import \
@@ -241,6 +242,8 @@
             pass
         elif isinstance(center, (types.ListType, types.TupleType, na.ndarray)):
             center = na.array(center)
+        elif center in ("c", "center"):
+            center = self.pf.domain_center
         elif center == ("max"): # is this dangerous for race conditions?
             center = self.pf.h.find_max("Density")[1]
         elif center.startswith("max_"):
@@ -494,7 +497,7 @@
         self._sorted = {}
 
     def get_data(self, fields=None, in_grids=False):
-        if self._grids == None:
+        if self._grids is None:
             self._get_list_of_grids()
         points = []
         if not fields:
@@ -1153,6 +1156,9 @@
     def _mrep(self):
         return MinimalSliceData(self)
 
+    def hub_upload(self):
+        self._mrep.upload()
+
 class AMRCuttingPlaneBase(AMR2DData):
     _plane = None
     _top_node = "/CuttingPlanes"
@@ -1160,7 +1166,7 @@
     _type_name = "cutting"
     _con_args = ('normal', 'center')
     def __init__(self, normal, center, fields = None, node_name = None,
-                 **kwargs):
+                 north_vector = None, **kwargs):
         """
         This is a data object corresponding to an oblique slice through the
         simulation domain.
@@ -1209,16 +1215,11 @@
         self.set_field_parameter('center',center)
         # Let's set up our plane equation
         # ax + by + cz + d = 0
-        self._norm_vec = normal/na.sqrt(na.dot(normal,normal))
+        self.orienter = Orientation(normal, north_vector = north_vector)
+        self._norm_vec = self.orienter.normal_vector
         self._d = -1.0 * na.dot(self._norm_vec, self.center)
-        # First we try all three, see which has the best result:
-        vecs = na.identity(3)
-        _t = na.cross(self._norm_vec, vecs).sum(axis=1)
-        ax = _t.argmax()
-        self._x_vec = na.cross(vecs[ax,:], self._norm_vec).ravel()
-        self._x_vec /= na.sqrt(na.dot(self._x_vec, self._x_vec))
-        self._y_vec = na.cross(self._norm_vec, self._x_vec).ravel()
-        self._y_vec /= na.sqrt(na.dot(self._y_vec, self._y_vec))
+        self._x_vec = self.orienter.unit_vectors[0]
+        self._y_vec = self.orienter.unit_vectors[1]
         self._rot_mat = na.array([self._x_vec,self._y_vec,self._norm_vec])
         self._inv_mat = na.linalg.pinv(self._rot_mat)
         self.set_field_parameter('cp_x_vec',self._x_vec)
@@ -1338,7 +1339,7 @@
             This can either be a floating point value, in the native domain
             units of the simulation, or a tuple of the (value, unit) style.
             This will be the width of the FRB.
-        height : height specifier
+        height : height specifier, optional
             This will be the height of the FRB, by default it is equal to width.
         resolution : int or tuple of ints
             The number of pixels on a side of the final FRB.
@@ -1679,6 +1680,9 @@
     def _mrep(self):
         return MinimalProjectionData(self)
 
+    def hub_upload(self):
+        self._mrep.upload()
+
     def _convert_field_name(self, field):
         if field == "weight_field": return "weight_field_%s" % self._weight
         if field in self._key_fields: return field
@@ -2541,7 +2545,18 @@
     def cut_region(self, field_cuts):
         """
         Return an InLineExtractedRegion, where the grid cells are cut on the
-        fly with a set of field_cuts.
+        fly with a set of field_cuts.  It is very useful for applying 
+        conditions to the fields in your data object.
+        
+        Examples
+        --------
+        To find the total mass of gas above 10^6 K in your volume:
+
+        >>> pf = load("RedshiftOutput0005")
+        >>> ad = pf.h.all_data()
+        >>> cr = ad.cut_region(["grid['Temperature'] > 1e6"])
+        >>> print cr.quantities["TotalQuantity"]("CellMassMsun")
+
         """
         return InLineExtractedRegionBase(self, field_cuts)
 
@@ -3290,6 +3305,40 @@
         pointI = na.where(k == True)
         return pointI
 
+class AMRMaxLevelCollection(AMR3DData):
+    _type_name = "grid_collection_max_level"
+    _con_args = ("center", "max_level")
+    def __init__(self, center, max_level, fields = None,
+                 pf = None, **kwargs):
+        """
+        By selecting an arbitrary *max_level*, we can act on those grids.
+        Child cells are masked when the level of the grid is below the max
+        level.
+        """
+        AMR3DData.__init__(self, center, fields, pf, **kwargs)
+        self.max_level = max_level
+        self._refresh_data()
+
+    def _get_list_of_grids(self):
+        if self._grids is not None: return
+        gi = (self.pf.h.grid_levels <= self.max_level)[:,0]
+        self._grids = self.pf.h.grids[gi]
+
+    def _is_fully_enclosed(self, grid):
+        return True
+
+    @cache_mask
+    def _get_cut_mask(self, grid):
+        return na.ones(grid.ActiveDimensions, dtype='bool')
+
+    def _get_point_indices(self, grid, use_child_mask=True):
+        k = na.ones(grid.ActiveDimensions, dtype='bool')
+        if use_child_mask and grid.Level < self.max_level:
+            k[grid.child_indices] = False
+        pointI = na.where(k == True)
+        return pointI
+
+
 class AMRSphereBase(AMR3DData):
     """
     A sphere of points
@@ -3513,9 +3562,16 @@
             The resolution level data is uniformly gridded at
         left_edge : array_like
             The left edge of the region to be extracted
-        right_edge : array_like
-            The right edge of the region to be extracted
-
+        dims : array_like
+            Number of cells along each axis of resulting covering_grid
+        fields : array_like, optional
+            A list of fields that you'd like pre-generated for your object
+
+        Example
+        -------
+        cube = pf.h.covering_grid(2, left_edge=[0.0, 0.0, 0.0], \
+                                  right_edge=[1.0, 1.0, 1.0],
+                                  dims=[128, 128, 128])
         """
         AMR3DData.__init__(self, center=kwargs.pop("center", None),
                            fields=fields, pf=pf, **kwargs)
@@ -3651,7 +3707,8 @@
     @wraps(AMRCoveringGridBase.__init__)
     def __init__(self, *args, **kwargs):
         """A 3D region with all data extracted and interpolated to a
-        single, specified resolution.
+        single, specified resolution. (Identical to covering_grid,
+        except that it interpolates.)
 
         Smoothed covering grids start at level 0, interpolating to
         fill the region to level 1, replacing any cells actually
@@ -3664,9 +3721,15 @@
             The resolution level data is uniformly gridded at
         left_edge : array_like
             The left edge of the region to be extracted
-        right_edge : array_like
-            The right edge of the region to be extracted
-
+        dims : array_like
+            Number of cells along each axis of resulting covering_grid.
+        fields : array_like, optional
+            A list of fields that you'd like pre-generated for your object
+
+        Example
+        -------
+        cube = pf.h.smoothed_covering_grid(2, left_edge=[0.0, 0.0, 0.0], \
+                                  dims=[128, 128, 128])
         """
         self._base_dx = (
               (self.pf.domain_right_edge - self.pf.domain_left_edge) /
@@ -3704,10 +3767,16 @@
         for gi, grid in enumerate(self._grids):
             if self._use_pbar: pbar.update(gi)
             if grid.Level > last_level and grid.Level <= self.level:
+                mylog.debug("Updating level state to %s", last_level + 1)
                 self._update_level_state(last_level + 1)
                 self._refine(1, fields_to_get)
                 last_level = grid.Level
             self._get_data_from_grid(grid, fields_to_get)
+        while last_level < self.level:
+            mylog.debug("Grid-free refinement %s to %s", last_level, last_level + 1)
+            self._update_level_state(last_level + 1)
+            self._refine(1, fields_to_get)
+            last_level += 1
         if self.level > 0:
             for field in fields_to_get:
                 self[field] = self[field][1:-1,1:-1,1:-1]


diff -r 2bbd3f126279501d3aa2d6acd308e80006078ca3 -r 65db3370fd9b8f113d595fb295ad3182952f04ca yt/data_objects/hierarchy.py
--- a/yt/data_objects/hierarchy.py
+++ b/yt/data_objects/hierarchy.py
@@ -354,7 +354,7 @@
         #   1 = number of cells
         #   2 = blank
         desc = {'names': ['numgrids','numcells','level'],
-                'formats':['Int32']*3}
+                'formats':['Int64']*3}
         self.level_stats = blankRecordArray(desc, MAXLEVEL)
         self.level_stats['level'] = [i for i in range(MAXLEVEL)]
         self.level_stats['numgrids'] = [0 for i in range(MAXLEVEL)]
@@ -381,18 +381,18 @@
         """
         Prints out (stdout) relevant information about the simulation
         """
-        header = "%3s\t%6s\t%11s" % ("level","# grids", "# cells")
+        header = "%3s\t%6s\t%14s" % ("level","# grids", "# cells")
         print header
         print "%s" % (len(header.expandtabs())*"-")
         for level in xrange(MAXLEVEL):
             if (self.level_stats['numgrids'][level]) == 0:
                 break
-            print "% 3i\t% 6i\t% 11i" % \
+            print "% 3i\t% 6i\t% 14i" % \
                   (level, self.level_stats['numgrids'][level],
                    self.level_stats['numcells'][level])
             dx = self.select_grids(level)[0].dds[0]
         print "-" * 28
-        print "   \t% 6i\t% 11i" % (self.level_stats['numgrids'].sum(), self.level_stats['numcells'].sum())
+        print "   \t% 6i\t% 14i" % (self.level_stats['numgrids'].sum(), self.level_stats['numcells'].sum())
         print "\n"
         try:
             print "z = %0.8f" % (self["CosmologyCurrentRedshift"])


diff -r 2bbd3f126279501d3aa2d6acd308e80006078ca3 -r 65db3370fd9b8f113d595fb295ad3182952f04ca yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -128,6 +128,9 @@
     def _mrep(self):
         return MinimalStaticOutput(self)
 
+    def hub_upload(self):
+        self._mrep.upload()
+
     @classmethod
     def _is_valid(cls, *args, **kwargs):
         return False


diff -r 2bbd3f126279501d3aa2d6acd308e80006078ca3 -r 65db3370fd9b8f113d595fb295ad3182952f04ca yt/data_objects/time_series.py
--- a/yt/data_objects/time_series.py
+++ b/yt/data_objects/time_series.py
@@ -76,11 +76,10 @@
         raise AttributeError(attr)
 
 class TimeSeriesData(object):
-    def __init__(self, outputs = None, parallel = True):
-        if outputs is None: outputs = []
-        self.outputs = outputs
+    def __init__(self, outputs, parallel = True):
         self.tasks = AnalysisTaskProxy(self)
         self.params = TimeSeriesParametersContainer(self)
+        self._pre_outputs = outputs[:]
         for type_name in data_object_registry:
             setattr(self, type_name, functools.partial(
                 TimeSeriesDataObject, self, type_name))
@@ -88,29 +87,38 @@
 
     def __iter__(self):
         # We can make this fancier, but this works
-        return self.outputs.__iter__()
+        for o in self._pre_outputs:
+            if isinstance(o, types.StringTypes):
+                yield load(o)
+            else:
+                yield o
 
     def __getitem__(self, key):
         if isinstance(key, types.SliceType):
             if isinstance(key.start, types.FloatType):
                 return self.get_range(key.start, key.stop)
-        return self.outputs[key]
+            # This will return a sliced up object!
+            return TimeSeriesData(self._pre_outputs[key], self.parallel)
+        o = self._pre_outputs[key]
+        if isinstance(o, types.StringTypes):
+            o = load(o)
+        return o
         
-    def _insert(self, pf):
-        # We get handed an instantiated parameter file
-        # Here we'll figure out a couple things about it, and then stick it
-        # inside our list.
-        self.outputs.append(pf)
-        
-    def eval(self, tasks, obj=None):
-        tasks = ensure_list(tasks)
-        return_values = {}
+    def __len__(self):
+        return len(self._pre_outputs)
+
+    def piter(self, storage = None):
         if self.parallel == False:
             njobs = 1
         else:
             if self.parallel == True: njobs = -1
             else: njobs = self.parallel
-        for store, pf in parallel_objects(self.outputs, njobs, return_values):
+        return parallel_objects(self, njobs, storage)
+        
+    def eval(self, tasks, obj=None):
+        tasks = ensure_list(tasks)
+        return_values = {}
+        for store, pf in self.piter(return_values):
             store.result = []
             for task in tasks:
                 try:
@@ -132,23 +140,20 @@
 
     @classmethod
     def from_filenames(cls, filename_list, parallel = True):
-        outputs = []
-        for fn in filename_list:
-            outputs.append(load(fn))
-        obj = cls(outputs, parallel = parallel)
+        obj = cls(filename_list[:], parallel = parallel)
         return obj
 
     @classmethod
     def from_output_log(cls, output_log,
                         line_prefix = "DATASET WRITTEN",
                         parallel = True):
-        outputs = []
+        filenames = []
         for line in open(output_log):
             if not line.startswith(line_prefix): continue
             cut_line = line[len(line_prefix):].strip()
             fn = cut_line.split()[0]
-            outputs.append(load(fn))
-        obj = cls(outputs, parallel = parallel)
+            filenames.append(fn)
+        obj = cls(filenames, parallel = parallel)
         return obj
 
 class TimeSeriesQuantitiesContainer(object):


diff -r 2bbd3f126279501d3aa2d6acd308e80006078ca3 -r 65db3370fd9b8f113d595fb295ad3182952f04ca yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -467,7 +467,7 @@
           units=r"\rm{s}^{-1}")
 
 def _Contours(field, data):
-    return na.ones(data["Density"].shape)*-1
+    return -na.ones_like(data["Ones"])
 add_field("Contours", validators=[ValidateSpatial(0)], take_log=False,
           display_field=False, function=_Contours)
 add_field("tempContours", function=_Contours,
@@ -483,20 +483,8 @@
     zv = data["z-velocity"] - bv[2]
     return xv, yv, zv
 
-def _SpecificAngularMomentum(field, data):
-    """
-    Calculate the angular velocity.  Returns a vector for each cell.
-    """
-    r_vec = obtain_rvec(data)
-    xv, yv, zv = obtain_velocities(data)
-    v_vec = na.array([xv,yv,zv], dtype='float64')
-    return na.cross(r_vec, v_vec, axis=0)
 def _convertSpecificAngularMomentum(data):
     return data.convert("cm")
-add_field("SpecificAngularMomentum",
-          function=_SpecificAngularMomentum,
-          convert_function=_convertSpecificAngularMomentum, vector_field=True,
-          units=r"\rm{cm}^2/\rm{s}", validators=[ValidateParameter('center')])
 def _convertSpecificAngularMomentumKMSMPC(data):
     return data.convert("mpc")/1e5
 
@@ -518,21 +506,6 @@
               convert_function=_convertSpecificAngularMomentum,
               units=r"\rm{cm}^2/\rm{s}", validators=[ValidateParameter("center")])
 
-add_field("SpecificAngularMomentumKMSMPC",
-          function=_SpecificAngularMomentum,
-          convert_function=_convertSpecificAngularMomentumKMSMPC, vector_field=True,
-          units=r"\rm{km}\rm{Mpc}/\rm{s}", validators=[ValidateParameter('center')])
-def _AngularMomentum(field, data):
-    return data["CellMass"] * data["SpecificAngularMomentum"]
-add_field("AngularMomentum", function=_AngularMomentum,
-         units=r"\rm{g}\/\rm{cm}^2/\rm{s}", vector_field=True,
-         validators=[ValidateParameter('center')])
-def _AngularMomentumMSUNKMSMPC(field, data):
-    return data["CellMassMsun"] * data["SpecificAngularMomentumKMSMPC"]
-add_field("AngularMomentumMSUNKMSMPC", function=_AngularMomentum,
-          units=r"M_{\odot}\rm{km}\rm{Mpc}/\rm{s}", vector_field=True,
-         validators=[ValidateParameter('center')])
-
 def _AngularMomentumX(field, data):
     return data["CellMass"] * data["SpecificAngularMomentumX"]
 add_field("AngularMomentumX", function=_AngularMomentumX,
@@ -614,6 +587,9 @@
     add_field(n, function=eval("_%s" % n), particle_type=True,
               convert_function=_convertSpecificAngularMomentum,
               units=r"\rm{cm}^2/\rm{s}", validators=[ValidateParameter("center")])
+    add_field(n + "KMSMPC", function=eval("_%s" % n), particle_type=True,
+              convert_function=_convertSpecificAngularMomentumKMSMPC,
+              units=r"\rm{cm}^2/\rm{s}", validators=[ValidateParameter("center")])
 
 def _ParticleAngularMomentum(field, data):
     return data["ParticleMass"] * data["ParticleSpecificAngularMomentum"]
@@ -887,3 +863,376 @@
           units=r"\rm{s}^{-2}",
           convert_function=_convertVorticitySquared)
 
+def _gradPressureX(field, data):
+    # We need to set up stencils
+    if data.pf["HydroMethod"] == 2:
+        sl_left = slice(None,-2,None)
+        sl_right = slice(1,-1,None)
+        div_fac = 1.0
+    else:
+        sl_left = slice(None,-2,None)
+        sl_right = slice(2,None,None)
+        div_fac = 2.0
+    new_field = na.zeros(data["Pressure"].shape, dtype='float64')
+    ds = div_fac * data['dx'].flat[0]
+    new_field[1:-1,1:-1,1:-1]  = data["Pressure"][sl_right,1:-1,1:-1]/ds
+    new_field[1:-1,1:-1,1:-1] -= data["Pressure"][sl_left ,1:-1,1:-1]/ds
+    return new_field
+def _gradPressureY(field, data):
+    # We need to set up stencils
+    if data.pf["HydroMethod"] == 2:
+        sl_left = slice(None,-2,None)
+        sl_right = slice(1,-1,None)
+        div_fac = 1.0
+    else:
+        sl_left = slice(None,-2,None)
+        sl_right = slice(2,None,None)
+        div_fac = 2.0
+    new_field = na.zeros(data["Pressure"].shape, dtype='float64')
+    ds = div_fac * data['dy'].flat[0]
+    new_field[1:-1,1:-1,1:-1]  = data["Pressure"][1:-1,sl_right,1:-1]/ds
+    new_field[1:-1,1:-1,1:-1] -= data["Pressure"][1:-1,sl_left ,1:-1]/ds
+    return new_field
+def _gradPressureZ(field, data):
+    # We need to set up stencils
+    if data.pf["HydroMethod"] == 2:
+        sl_left = slice(None,-2,None)
+        sl_right = slice(1,-1,None)
+        div_fac = 1.0
+    else:
+        sl_left = slice(None,-2,None)
+        sl_right = slice(2,None,None)
+        div_fac = 2.0
+    new_field = na.zeros(data["Pressure"].shape, dtype='float64')
+    ds = div_fac * data['dz'].flat[0]
+    new_field[1:-1,1:-1,1:-1]  = data["Pressure"][1:-1,1:-1,sl_right]/ds
+    new_field[1:-1,1:-1,1:-1] -= data["Pressure"][1:-1,1:-1,sl_left ]/ds
+    return new_field
+def _convertgradPressure(data):
+    return 1.0/data.convert("cm")
+for ax in 'XYZ':
+    n = "gradPressure%s" % ax
+    add_field(n, function=eval("_%s" % n),
+              convert_function=_convertgradPressure,
+              validators=[ValidateSpatial(1, ["Pressure"])],
+              units=r"\rm{dyne}/\rm{cm}^{3}")
+
+def _gradPressureMagnitude(field, data):
+    return na.sqrt(data["gradPressureX"]**2 +
+                   data["gradPressureY"]**2 +
+                   data["gradPressureZ"]**2)
+add_field("gradPressureMagnitude", function=_gradPressureMagnitude,
+          validators=[ValidateSpatial(1, ["Pressure"])],
+          units=r"\rm{dyne}/\rm{cm}^{3}")
+
+def _gradDensityX(field, data):
+    # We need to set up stencils
+    if data.pf["HydroMethod"] == 2:
+        sl_left = slice(None,-2,None)
+        sl_right = slice(1,-1,None)
+        div_fac = 1.0
+    else:
+        sl_left = slice(None,-2,None)
+        sl_right = slice(2,None,None)
+        div_fac = 2.0
+    new_field = na.zeros(data["Density"].shape, dtype='float64')
+    ds = div_fac * data['dx'].flat[0]
+    new_field[1:-1,1:-1,1:-1]  = data["Density"][sl_right,1:-1,1:-1]/ds
+    new_field[1:-1,1:-1,1:-1] -= data["Density"][sl_left ,1:-1,1:-1]/ds
+    return new_field
+def _gradDensityY(field, data):
+    # We need to set up stencils
+    if data.pf["HydroMethod"] == 2:
+        sl_left = slice(None,-2,None)
+        sl_right = slice(1,-1,None)
+        div_fac = 1.0
+    else:
+        sl_left = slice(None,-2,None)
+        sl_right = slice(2,None,None)
+        div_fac = 2.0
+    new_field = na.zeros(data["Density"].shape, dtype='float64')
+    ds = div_fac * data['dy'].flat[0]
+    new_field[1:-1,1:-1,1:-1]  = data["Density"][1:-1,sl_right,1:-1]/ds
+    new_field[1:-1,1:-1,1:-1] -= data["Density"][1:-1,sl_left ,1:-1]/ds
+    return new_field
+def _gradDensityZ(field, data):
+    # We need to set up stencils
+    if data.pf["HydroMethod"] == 2:
+        sl_left = slice(None,-2,None)
+        sl_right = slice(1,-1,None)
+        div_fac = 1.0
+    else:
+        sl_left = slice(None,-2,None)
+        sl_right = slice(2,None,None)
+        div_fac = 2.0
+    new_field = na.zeros(data["Density"].shape, dtype='float64')
+    ds = div_fac * data['dz'].flat[0]
+    new_field[1:-1,1:-1,1:-1]  = data["Density"][1:-1,1:-1,sl_right]/ds
+    new_field[1:-1,1:-1,1:-1] -= data["Density"][1:-1,1:-1,sl_left ]/ds
+    return new_field
+def _convertgradDensity(data):
+    return 1.0/data.convert("cm")
+for ax in 'XYZ':
+    n = "gradDensity%s" % ax
+    add_field(n, function=eval("_%s" % n),
+              convert_function=_convertgradDensity,
+              validators=[ValidateSpatial(1, ["Density"])],
+              units=r"\rm{g}/\rm{cm}^{4}")
+
+def _gradDensityMagnitude(field, data):
+    return na.sqrt(data["gradDensityX"]**2 +
+                   data["gradDensityY"]**2 +
+                   data["gradDensityZ"]**2)
+add_field("gradDensityMagnitude", function=_gradDensityMagnitude,
+          validators=[ValidateSpatial(1, ["Density"])],
+          units=r"\rm{g}/\rm{cm}^{4}")
+
+def _BaroclinicVorticityX(field, data):
+    rho2 = data["Density"].astype('float64')**2
+    return (data["gradPressureY"] * data["gradDensityZ"] -
+            data["gradPressureZ"] * data["gradDensityY"]) / rho2
+def _BaroclinicVorticityY(field, data):
+    rho2 = data["Density"].astype('float64')**2
+    return (data["gradPressureZ"] * data["gradDensityX"] -
+            data["gradPressureX"] * data["gradDensityZ"]) / rho2
+def _BaroclinicVorticityZ(field, data):
+    rho2 = data["Density"].astype('float64')**2
+    return (data["gradPressureX"] * data["gradDensityY"] -
+            data["gradPressureY"] * data["gradDensityX"]) / rho2
+for ax in 'XYZ':
+    n = "BaroclinicVorticity%s" % ax
+    add_field(n, function=eval("_%s" % n),
+          validators=[ValidateSpatial(1, ["Density", "Pressure"])],
+          units=r"\rm{s}^{-1}")
+
+def _BaroclinicVorticityMagnitude(field, data):
+    return na.sqrt(data["BaroclinicVorticityX"]**2 +
+                   data["BaroclinicVorticityY"]**2 +
+                   data["BaroclinicVorticityZ"]**2)
+add_field("BaroclinicVorticityMagnitude",
+          function=_BaroclinicVorticityMagnitude,
+          validators=[ValidateSpatial(1, ["Density", "Pressure"])],
+          units=r"\rm{s}^{-1}")
+
+def _VorticityX(field, data):
+    # We need to set up stencils
+    if data.pf["HydroMethod"] == 2:
+        sl_left = slice(None,-2,None)
+        sl_right = slice(1,-1,None)
+        div_fac = 1.0
+    else:
+        sl_left = slice(None,-2,None)
+        sl_right = slice(2,None,None)
+        div_fac = 2.0
+    new_field = na.zeros(data["z-velocity"].shape, dtype='float64')
+    new_field[1:-1,1:-1,1:-1] = (data["z-velocity"][1:-1,sl_right,1:-1] -
+                                 data["z-velocity"][1:-1,sl_left,1:-1]) \
+                                 / (div_fac*data["dy"].flat[0])
+    new_field[1:-1,1:-1,1:-1] -= (data["y-velocity"][1:-1,1:-1,sl_right] -
+                                  data["y-velocity"][1:-1,1:-1,sl_left]) \
+                                  / (div_fac*data["dz"].flat[0])
+    return new_field
+def _VorticityY(field, data):
+    # We need to set up stencils
+    if data.pf["HydroMethod"] == 2:
+        sl_left = slice(None,-2,None)
+        sl_right = slice(1,-1,None)
+        div_fac = 1.0
+    else:
+        sl_left = slice(None,-2,None)
+        sl_right = slice(2,None,None)
+        div_fac = 2.0
+    new_field = na.zeros(data["z-velocity"].shape, dtype='float64')
+    new_field[1:-1,1:-1,1:-1] = (data["x-velocity"][1:-1,1:-1,sl_right] -
+                                 data["x-velocity"][1:-1,1:-1,sl_left]) \
+                                 / (div_fac*data["dz"].flat[0])
+    new_field[1:-1,1:-1,1:-1] -= (data["z-velocity"][sl_right,1:-1,1:-1] -
+                                  data["z-velocity"][sl_left,1:-1,1:-1]) \
+                                  / (div_fac*data["dx"].flat[0])
+    return new_field
+def _VorticityZ(field, data):
+    # We need to set up stencils
+    if data.pf["HydroMethod"] == 2:
+        sl_left = slice(None,-2,None)
+        sl_right = slice(1,-1,None)
+        div_fac = 1.0
+    else:
+        sl_left = slice(None,-2,None)
+        sl_right = slice(2,None,None)
+        div_fac = 2.0
+    new_field = na.zeros(data["x-velocity"].shape, dtype='float64')
+    new_field[1:-1,1:-1,1:-1] = (data["y-velocity"][sl_right,1:-1,1:-1] -
+                                 data["y-velocity"][sl_left,1:-1,1:-1]) \
+                                 / (div_fac*data["dx"].flat[0])
+    new_field[1:-1,1:-1,1:-1] -= (data["x-velocity"][1:-1,sl_right,1:-1] -
+                                  data["x-velocity"][1:-1,sl_left,1:-1]) \
+                                  / (div_fac*data["dy"].flat[0])
+    return new_field
+def _convertVorticity(data):
+    return 1.0/data.convert("cm")
+for ax in 'XYZ':
+    n = "Vorticity%s" % ax
+    add_field(n, function=eval("_%s" % n),
+              convert_function=_convertVorticity,
+              validators=[ValidateSpatial(1, 
+                          ["x-velocity", "y-velocity", "z-velocity"])],
+              units=r"\rm{s}^{-1}")
+
+def _VorticityMagnitude(field, data):
+    return na.sqrt(data["VorticityX"]**2 +
+                   data["VorticityY"]**2 +
+                   data["VorticityZ"]**2)
+add_field("VorticityMagnitude", function=_VorticityMagnitude,
+          validators=[ValidateSpatial(1, 
+                      ["x-velocity", "y-velocity", "z-velocity"])],
+          units=r"\rm{s}^{-1}")
+
+def _VorticityStretchingX(field, data):
+    return data["DivV"] * data["VorticityX"]
+def _VorticityStretchingY(field, data):
+    return data["DivV"] * data["VorticityY"]
+def _VorticityStretchingZ(field, data):
+    return data["DivV"] * data["VorticityZ"]
+for ax in 'XYZ':
+    n = "VorticityStretching%s" % ax
+    add_field(n, function=eval("_%s" % n),
+              validators=[ValidateSpatial(0)])
+def _VorticityStretchingMagnitude(field, data):
+    return na.sqrt(data["VorticityStretchingX"]**2 +
+                   data["VorticityStretchingY"]**2 +
+                   data["VorticityStretchingZ"]**2)
+add_field("VorticityStretchingMagnitude", 
+          function=_VorticityStretchingMagnitude,
+          validators=[ValidateSpatial(1, 
+                      ["x-velocity", "y-velocity", "z-velocity"])],
+          units=r"\rm{s}^{-1}")
+
+def _VorticityGrowthX(field, data):
+    return -data["VorticityStretchingX"] - data["BaroclinicVorticityX"]
+def _VorticityGrowthY(field, data):
+    return -data["VorticityStretchingY"] - data["BaroclinicVorticityY"]
+def _VorticityGrowthZ(field, data):
+    return -data["VorticityStretchingZ"] - data["BaroclinicVorticityZ"]
+for ax in 'XYZ':
+    n = "VorticityGrowth%s" % ax
+    add_field(n, function=eval("_%s" % n),
+              validators=[ValidateSpatial(1, 
+                          ["x-velocity", "y-velocity", "z-velocity"])],
+              units=r"\rm{s}^{-2}")
+def _VorticityGrowthMagnitude(field, data):
+    result = na.sqrt(data["VorticityGrowthX"]**2 +
+                     data["VorticityGrowthY"]**2 +
+                     data["VorticityGrowthZ"]**2)
+    dot = na.zeros(result.shape)
+    for ax in "XYZ":
+        dot += data["Vorticity%s" % ax] * data["VorticityGrowth%s" % ax]
+    result = na.sign(dot) * result
+    return result
+add_field("VorticityGrowthMagnitude", function=_VorticityGrowthMagnitude,
+          validators=[ValidateSpatial(1, 
+                      ["x-velocity", "y-velocity", "z-velocity"])],
+          units=r"\rm{s}^{-1}",
+          take_log=False)
+def _VorticityGrowthMagnitudeABS(field, data):
+    return na.sqrt(data["VorticityGrowthX"]**2 +
+                   data["VorticityGrowthY"]**2 +
+                   data["VorticityGrowthZ"]**2)
+add_field("VorticityGrowthMagnitudeABS", function=_VorticityGrowthMagnitudeABS,
+          validators=[ValidateSpatial(1, 
+                      ["x-velocity", "y-velocity", "z-velocity"])],
+          units=r"\rm{s}^{-1}")
+
+def _VorticityGrowthTimescale(field, data):
+    domegax_dt = data["VorticityX"] / data["VorticityGrowthX"]
+    domegay_dt = data["VorticityY"] / data["VorticityGrowthY"]
+    domegaz_dt = data["VorticityZ"] / data["VorticityGrowthZ"]
+    return na.sqrt(domegax_dt**2 + domegay_dt**2 + domegaz_dt)
+add_field("VorticityGrowthTimescale", function=_VorticityGrowthTimescale,
+          validators=[ValidateSpatial(1, 
+                      ["x-velocity", "y-velocity", "z-velocity"])],
+          units=r"\rm{s}")
+
+########################################################################
+# With radiation pressure
+########################################################################
+
+def _VorticityRadPressureX(field, data):
+    rho = data["Density"].astype('float64')
+    return (data["RadAccel2"] * data["gradDensityZ"] -
+            data["RadAccel3"] * data["gradDensityY"]) / rho
+def _VorticityRadPressureY(field, data):
+    rho = data["Density"].astype('float64')
+    return (data["RadAccel3"] * data["gradDensityX"] -
+            data["RadAccel1"] * data["gradDensityZ"]) / rho
+def _VorticityRadPressureZ(field, data):
+    rho = data["Density"].astype('float64')
+    return (data["RadAccel1"] * data["gradDensityY"] -
+            data["RadAccel2"] * data["gradDensityX"]) / rho
+def _convertRadAccel(data):
+    return data.convert("x-velocity")/data.convert("Time")
+for ax in 'XYZ':
+    n = "VorticityRadPressure%s" % ax
+    add_field(n, function=eval("_%s" % n),
+              convert_function=_convertRadAccel,
+              validators=[ValidateSpatial(1, 
+                   ["Density", "RadAccel1", "RadAccel2", "RadAccel3"])],
+              units=r"\rm{s}^{-1}")
+
+def _VorticityRadPressureMagnitude(field, data):
+    return na.sqrt(data["VorticityRadPressureX"]**2 +
+                   data["VorticityRadPressureY"]**2 +
+                   data["VorticityRadPressureZ"]**2)
+add_field("VorticityRadPressureMagnitude",
+          function=_VorticityRadPressureMagnitude,
+          validators=[ValidateSpatial(1, 
+                      ["Density", "RadAccel1", "RadAccel2", "RadAccel3"])],
+          units=r"\rm{s}^{-1}")
+
+def _VorticityRPGrowthX(field, data):
+    return -data["VorticityStretchingX"] - data["BaroclinicVorticityX"] \
+           -data["VorticityRadPressureX"]
+def _VorticityRPGrowthY(field, data):
+    return -data["VorticityStretchingY"] - data["BaroclinicVorticityY"] \
+           -data["VorticityRadPressureY"]
+def _VorticityRPGrowthZ(field, data):
+    return -data["VorticityStretchingZ"] - data["BaroclinicVorticityZ"] \
+           -data["VorticityRadPressureZ"]
+for ax in 'XYZ':
+    n = "VorticityRPGrowth%s" % ax
+    add_field(n, function=eval("_%s" % n),
+              validators=[ValidateSpatial(1, 
+                       ["Density", "RadAccel1", "RadAccel2", "RadAccel3"])],
+              units=r"\rm{s}^{-1}")
+def _VorticityRPGrowthMagnitude(field, data):
+    result = na.sqrt(data["VorticityRPGrowthX"]**2 +
+                     data["VorticityRPGrowthY"]**2 +
+                     data["VorticityRPGrowthZ"]**2)
+    dot = na.zeros(result.shape)
+    for ax in "XYZ":
+        dot += data["Vorticity%s" % ax] * data["VorticityGrowth%s" % ax]
+    result = na.sign(dot) * result
+    return result
+add_field("VorticityRPGrowthMagnitude", function=_VorticityGrowthMagnitude,
+          validators=[ValidateSpatial(1, 
+                      ["Density", "RadAccel1", "RadAccel2", "RadAccel3"])],
+          units=r"\rm{s}^{-1}",
+          take_log=False)
+def _VorticityRPGrowthMagnitudeABS(field, data):
+    return na.sqrt(data["VorticityRPGrowthX"]**2 +
+                   data["VorticityRPGrowthY"]**2 +
+                   data["VorticityRPGrowthZ"]**2)
+add_field("VorticityRPGrowthMagnitudeABS", 
+          function=_VorticityRPGrowthMagnitudeABS,
+          validators=[ValidateSpatial(1, 
+                      ["Density", "RadAccel1", "RadAccel2", "RadAccel3"])],
+          units=r"\rm{s}^{-1}")
+
+def _VorticityRPGrowthTimescale(field, data):
+    domegax_dt = data["VorticityX"] / data["VorticityRPGrowthX"]
+    domegay_dt = data["VorticityY"] / data["VorticityRPGrowthY"]
+    domegaz_dt = data["VorticityZ"] / data["VorticityRPGrowthZ"]
+    return na.sqrt(domegax_dt**2 + domegay_dt**2 + domegaz_dt**2)
+add_field("VorticityRPGrowthTimescale", function=_VorticityRPGrowthTimescale,
+          validators=[ValidateSpatial(1, 
+                      ["Density", "RadAccel1", "RadAccel2", "RadAccel3"])],
+          units=r"\rm{s}^{-1}")


diff -r 2bbd3f126279501d3aa2d6acd308e80006078ca3 -r 65db3370fd9b8f113d595fb295ad3182952f04ca yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -30,8 +30,6 @@
 import os
 import struct
 
-import pdb
-
 from yt.funcs import *
 from yt.data_objects.grid_patch import \
       AMRGridPatch
@@ -56,6 +54,21 @@
 
 from yt.utilities.physical_constants import \
     mass_hydrogen_cgs
+    
+from yt.frontends.art.definitions import art_particle_field_names
+
+from yt.frontends.art.io import _read_child_mask_level
+from yt.frontends.art.io import read_particles
+from yt.frontends.art.io import read_stars
+from yt.frontends.art.io import _count_art_octs
+from yt.frontends.art.io import _read_art_level_info
+from yt.frontends.art.io import _read_art_child
+from yt.frontends.art.io import _skip_record
+from yt.frontends.art.io import _read_record
+from yt.frontends.art.io import _read_frecord
+from yt.frontends.art.io import _read_record_size
+from yt.frontends.art.io import _read_struct
+from yt.frontends.art.io import b2t
 
 def num_deep_inc(f):
     def wrap(self, *args, **kwargs):
@@ -68,14 +81,21 @@
 class ARTGrid(AMRGridPatch):
     _id_offset = 0
 
-    def __init__(self, id, hierarchy, level, locations, start_index):
+    def __init__(self, id, hierarchy, level, locations, props,child_mask=None):
         AMRGridPatch.__init__(self, id, filename = hierarchy.hierarchy_filename,
                               hierarchy = hierarchy)
+        start_index = props[0]
         self.Level = level
         self.Parent = []
         self.Children = []
         self.locations = locations
         self.start_index = start_index.copy()
+        
+        self.LeftEdge = props[0]
+        self.RightEdge = props[1]
+        self.ActiveDimensions = props[2] 
+        #if child_mask is not None:
+        #    self._set_child_mask(child_mask)
 
     def _setup_dx(self):
         # So first we figure out what the index is.  We don't assume
@@ -118,92 +138,58 @@
     def __init__(self, pf, data_style='art'):
         self.data_style = data_style
         self.parameter_file = weakref.proxy(pf)
-        # for now, the hierarchy file is the parameter file!
+        #for now, the hierarchy file is the parameter file!
         self.hierarchy_filename = self.parameter_file.parameter_filename
         self.directory = os.path.dirname(self.hierarchy_filename)
-
         self.float_type = na.float64
         AMRHierarchy.__init__(self,pf,data_style)
-
+        self._setup_field_list()
+        
     def _initialize_data_storage(self):
         pass
 
     def _detect_fields(self):
         # This will need to be generalized to be used elsewhere.
         self.field_list = [ 'Density','TotalEnergy',
-                            'x-momentum','y-momentum','z-momentum',
-                            'Pressure','Gamma','GasEnergy',
-                            'Metal_DensitySNII', 'Metal_DensitySNIa',
-                            'Potential_New','Potential_Old']
-    
+             'XMomentumDensity','YMomentumDensity','ZMomentumDensity',
+             'Pressure','Gamma','GasEnergy',
+             'MetalDensitySNII', 'MetalDensitySNIa',
+             'PotentialNew','PotentialOld']
+        self.field_list += art_particle_field_names
+
     def _setup_classes(self):
         dd = self._get_data_reader_dict()
         AMRHierarchy._setup_classes(self, dd)
         self.object_types.sort()
 
     def _count_grids(self):
-        # We have to do all the patch-coalescing here.
-        #level_info is used by the IO so promoting it to the static
-        # output class
-        #self.pf.level_info = [self.pf.ncell] # skip root grid for now
-        #leve_info = []
-        # amr_utils.count_art_octs(
-        #         self.pf.parameter_filename, self.pf.child_grid_offset,
-        #         self.pf.min_level, self.pf.max_level, self.pf.nhydro_vars,
-        #         self.pf.level_info)
+        LEVEL_OF_EDGE = 7
+        MAX_EDGE = (2 << (LEVEL_OF_EDGE- 1))
+        
+        min_eff = 0.30
+        
+        vol_max = 128**3
         
         f = open(self.pf.parameter_filename,'rb')
-        self.pf.nhydro_vars, self.pf.level_info = _count_art_octs(f, 
+        
+        
+        (self.pf.nhydro_vars, self.pf.level_info,
+        self.pf.level_oct_offsets, 
+        self.pf.level_child_offsets) = \
+                         _count_art_octs(f, 
                           self.pf.child_grid_offset,
                           self.pf.min_level, self.pf.max_level)
         self.pf.level_info[0]=self.pf.ncell
-        f.close()
-        self.pf.level_info = na.array(self.pf.level_info)
-        num_ogrids = sum(self.pf.level_info) + self.pf.iOctFree
-        print 'found %i oct grids'%num_ogrids
-        num_ogrids *=7
-        print 'instantiating... %i grids'%num_ogrids
-        ogrid_left_indices = na.zeros((num_ogrids,3), dtype='int64') - 999
-        ogrid_levels = na.zeros(num_ogrids, dtype='int64')
-        ogrid_file_locations = na.zeros((num_ogrids,6), dtype='int64')
-        
-        #don't need parents?
-        #ogrid_parents = na.zeros(num_ogrids, dtype="int64")
-        
-        #don't need masks?
-        #ochild_masks = na.zeros((num_ogrids, 8), dtype='int64').ravel()
-        
-        self.pf.level_offsets = amr_utils.read_art_tree(
-                                self.pf.parameter_filename, 
-                                self.pf.child_grid_offset,
-                                self.pf.min_level, self.pf.max_level,
-                                ogrid_left_indices, ogrid_levels,
-                                ogrid_file_locations)
-                                #ochild_masks,
-                                #ogrid_parents, 
-                                
+        self.pf.level_info = na.array(self.pf.level_info)        
+        self.pf.level_offsets = self.pf.level_child_offsets
         self.pf.level_offsets = na.array(self.pf.level_offsets, dtype='int64')
         self.pf.level_offsets[0] = self.pf.root_grid_offset
-        #ochild_masks.reshape((num_ogrids, 8), order="F")
-        ogrid_levels[ogrid_left_indices[:,0] == -999] = -1
-        # This bit of code comes from Chris, and I'm still not sure I have a
-        # handle on what it does.
-        final_indices =  ogrid_left_indices[na.where(ogrid_levels==self.pf.max_level)[0]]
-        divisible=[na.all((final_indices%2**(level))==0) 
-            for level in xrange(self.pf.max_level*2)]
-        root_level = self.pf.max_level+na.where(na.logical_not(divisible))[0][0] 
-        ogrid_dimension = na.zeros(final_indices.shape,dtype='int')+2
-        ogrid_left_indices = ogrid_left_indices/2**(root_level - ogrid_levels[:,None] - 1) - 1
-
-        # Now we can rescale
-        # root_psg = _ramses_reader.ProtoSubgrid(
-        #                 na.zeros(3, dtype='int64'), # left index of PSG
-        #                 self.pf.domain_dimensions, # dim of PSG
-        #                 na.zeros((1,3), dtype='int64'), # left edges of grids
-        #                 self.pf.domain_dimensions[None,:], # right edges of grids
-        #                 self.pf.domain_dimensions[None,:], # dims of grids
-        #                 na.zeros((1,6), dtype='int64') # empty
-        #                 )
+        
+        self.pf.level_art_child_masks = {}
+        cm = self.pf.root_iOctCh>0
+        cm_shape = (1,)+cm.shape 
+        self.pf.level_art_child_masks[0] = cm.reshape(cm_shape).astype('uint8')        
+        del cm
         
         root_psg = _ramses_reader.ProtoSubgrid(
                         na.zeros(3, dtype='int64'), # left index of PSG
@@ -217,198 +203,315 @@
             if self.pf.level_info[level] == 0:
                 self.proto_grids.append([])
                 continue
-            ggi = (ogrid_levels == level).ravel()
-            mylog.info("Re-gridding level %s: %s octree grids", level, ggi.sum())
-            nd = self.pf.domain_dimensions * 2**level
-            dims = na.ones((ggi.sum(), 3), dtype='int64') * 2
-            fl = ogrid_file_locations[ggi,:]
-            # Now our initial protosubgrid
-            #if level == 6: raise RuntimeError
-            # We want grids that cover no more than MAX_EDGE cells in every direction
-            MAX_EDGE = 128
             psgs = []
+            effs,sizes = [], []
+
+            if level > self.pf.limit_level : continue
+            
             #refers to the left index for the art octgrid
-            left_index = ogrid_left_indices[ggi,:]
-            right_index = left_index + 2
-            #Since we are re-gridding these octs on larger meshes
-            #each sub grid has length MAX_EDGE, and so get the LE of
-            #grids fit inside the domain
-            # nd is the dimensions of the domain at this level
-            lefts = [na.mgrid[0:nd[i]:MAX_EDGE] for i in range(3)]
-            #lefts = zip(*[l.ravel() for l in lefts])
-            pbar = get_pbar("Re-gridding ", lefts[0].size)
-            min_ind = na.min(left_index, axis=0)
-            max_ind = na.max(right_index, axis=0)
+            left_index, fl, nocts = _read_art_level_info(f, self.pf.level_oct_offsets,level)
+            #left_index_gridpatch = left_index >> LEVEL_OF_EDGE
             
-            #iterate over the ith dimension of the yt grids
-            for i,dli in enumerate(lefts[0]):
-                pbar.update(i)
+            #read in the child masks for this level and save them
+            idc, art_child_mask = _read_child_mask_level(f, self.pf.level_child_offsets,
+                level,nocts*8,nhydro_vars=self.pf.nhydro_vars)
+            art_child_mask = art_child_mask.reshape((nocts,2,2,2))
+            self.pf.level_art_child_masks[level]=art_child_mask
+            #child_mask is zero where child grids exist and
+            #thus where higher resolution data is available
+            
+            
+            #compute the hilbert indices up to a certain level
+            #the indices will associate an oct grid to the nearest
+            #hilbert index?
+            base_level = int( na.log10(self.pf.domain_dimensions.max()) /
+                              na.log10(2))
+            hilbert_indices = _ramses_reader.get_hilbert_indices(
+                                    level + base_level, left_index)
+            #print base_level, hilbert_indices.max(),
+            hilbert_indices = hilbert_indices >> base_level + LEVEL_OF_EDGE
+            #print hilbert_indices.max()
+            
+            # Strictly speaking, we don't care about the index of any
+            # individual oct at this point.  So we can then split them up.
+            unique_indices = na.unique(hilbert_indices)
+            mylog.info("Level % 2i has % 10i unique indices for %0.3e octs",
+                        level, unique_indices.size, hilbert_indices.size)
+            
+            #use the hilbert indices to order oct grids so that consecutive
+            #items on a list are spatially near each other
+            #this is useful because we will define grid patches over these
+            #octs, which are more efficient if the octs are spatially close
+            
+            #split into list of lists, with domains containing 
+            #lists of sub octgrid left indices and an index
+            #referring to the domain on which they live
+            pbar = get_pbar("Calc Hilbert Indices ",1)
+            locs, lefts = _ramses_reader.get_array_indices_lists(
+                        hilbert_indices, unique_indices, left_index, fl)
+            pbar.finish()
+            
+            #iterate over the domains    
+            step=0
+            pbar = get_pbar("Re-gridding  Level %i "%level, len(locs))
+            psg_eff = []
+            for ddleft_index, ddfl in zip(lefts, locs):
+                #iterate over just the unique octs
+                #why would we ever have non-unique octs?
+                #perhaps the hilbert ordering may visit the same
+                #oct multiple times - review only unique octs 
+                #for idomain in na.unique(ddfl[:,1]):
+                #dom_ind = ddfl[:,1] == idomain
+                #dleft_index = ddleft_index[dom_ind,:]
+                #dfl = ddfl[dom_ind,:]
                 
-                #skip this grid if there are no art grids inside
-                #of the zeroeth dimension
-                if min_ind[0] > dli + nd[0]: continue
-                if max_ind[0] < dli: continue
+                dleft_index = ddleft_index
+                dfl = ddfl
+                initial_left = na.min(dleft_index, axis=0)
+                idims = (na.max(dleft_index, axis=0) - initial_left).ravel()+2
+                #this creates a grid patch that doesn't cover the whole level
+                #necessarily, but with other patches covers all the regions
+                #with octs. This object automatically shrinks its size
+                #to barely encompass the octs inside of it.
+                psg = _ramses_reader.ProtoSubgrid(initial_left, idims,
+                                dleft_index, dfl)
+                if psg.efficiency <= 0: continue
                 
-                # span of the current domain limited to max_edge
-                idim = min(nd[0] - dli, MAX_EDGE)
-
-                #gdi finds all of the art octs grids inside the 
-                #ith dimension of our current grid
-                gdi = ((dli  <= right_index[:,0])
-                     & (dli + idim >= left_index[:,0]))
-                     
-
-                #if none of our art octs fit inside, skip                    
-                if not na.any(gdi): continue
+                #because grid patches may still be mostly empty, and with octs
+                #that only partially fill the grid,it  may be more efficient
+                #to split large patches into smaller patches. We split
+                #if less than 10% the volume of a patch is covered with octs
+                if idims.prod() > vol_max or psg.efficiency < min_eff:
+                    psg_split = _ramses_reader.recursive_patch_splitting(
+                        psg, idims, initial_left, 
+                        dleft_index, dfl,min_eff=min_eff,use_center=True,
+                        split_on_vol=vol_max)
+                    
+                    psgs.extend(psg_split)
+                    psg_eff += [x.efficiency for x in psg_split] 
+                else:
+                    psgs.append(psg)
+                    psg_eff =  [psg.efficiency,]
                 
-                #iterate over the jth dimension of the yt grids
-                for dlj in lefts[1]:
-                    
-                    #this is the same process as in the previous dimension
-                    #find art octs inside this grid's jth dimension, 
-                    #skip if there are none
-                    if min_ind[1] > dlj + nd[1]: continue
-                    if max_ind[1] < dlj: continue
-                    idim = min(nd[1] - dlj, MAX_EDGE)
-                    gdj = ((dlj  <= right_index[:,1])
-                         & (dlj + idim >= left_index[:,1])
-                         & (gdi))
-                    if not na.any(gdj): continue
-                    
-                    #Same story: iterate over kth dimension grids
-                    for dlk in lefts[2]:
-                        if min_ind[2] > dlk + nd[2]: continue
-                        if max_ind[2] < dlk: continue
-                        idim = min(nd[2] - dlk, MAX_EDGE)
-                        gdk = ((dlk  <= right_index[:,2])
-                             & (dlk + idim >= left_index[:,2])
-                             & (gdj))
-                        if not na.any(gdk): continue
-                        
-                        #these are coordinates for yt grid
-                        left = na.array([dli, dlj, dlk])
-                        
-                        #does this ravel really do anything?
-                        domain_left = left.ravel()
-                        
-                        #why are we adding this to zero?
-                        initial_left = na.zeros(3, dtype='int64') + domain_left
-                        
-                        #still not sure why multiplying against one 
-                        #just type casting?
-                        idims = na.ones(3, dtype='int64') * na.minimum(nd - domain_left, MAX_EDGE)
-                        
-                        # We want to find how many grids are inside.
-                        
-                        #this gives us the LE and RE, domain dims,
-                        # and file locations
-                        # for art octs within this grid
-                        dleft_index = left_index[gdk,:]
-                        dright_index = right_index[gdk,:]
-                        ddims = dims[gdk,:]
-                        dfl = fl[gdk,:]
-                        
-                        #create a sub grid composed
-                        #of the new yt grid LE, span,
-                        #and a series of the contained art grid properties:
-                        # left edge, right edge, (not sure what dims is) and file locations
-                        psg = _ramses_reader.ProtoSubgrid(initial_left, idims,
-                                        dleft_index, dfl)
-                        
-                        #print "Gridding from %s to %s + %s" % (
-                        #    initial_left, initial_left, idims)
-                        if psg.efficiency <= 0: continue
-                        self.num_deep = 0
-                        # psgs.extend(self._recursive_patch_splitting(
-                        #     psg, idims, initial_left, 
-                        #     dleft_index, dright_index, ddims, dfl))
-                        
-                        #I'm not sure how this patch splitting process
-                        #does, or how it works
-                        psgs.extend(_ramses_reader.recursive_patch_splitting(
-                            psg, idims, initial_left, dleft_index, dfl))
-                        
-                        # psgs.extend(self._recursive_patch_splitting(
-                        #     psg, idims, initial_left, 
-                        #     dleft_index, dright_index, ddims, dfl))
-                        psgs.extend([psg])
+                tol = 1.00001
+                
+                
+                step+=1
+                pbar.update(step)
+            eff_mean = na.mean(psg_eff)
+            eff_nmin = na.sum([e<=min_eff*tol for e in psg_eff])
+            eff_nall = len(psg_eff)
+            mylog.info("Average subgrid efficiency %02.1f %%",
+                        eff_mean*100.0)
+            mylog.info("%02.1f%% (%i/%i) of grids had minimum efficiency",
+                        eff_nmin*100.0/eff_nall,eff_nmin,eff_nall)
+            
+        
+            mylog.debug("Done with level % 2i", level)
             pbar.finish()
             self.proto_grids.append(psgs)
-            sums = na.zeros(3, dtype='int64')
-            mylog.info("Final grid count: %s", len(self.proto_grids[level]))
+            #print sum(len(psg.grid_file_locations) for psg in psgs)
+            #mylog.info("Final grid count: %s", len(self.proto_grids[level]))
             if len(self.proto_grids[level]) == 1: continue
-            # for g in self.proto_grids[level]:
-            #     sums += [s.sum() for s in g.sigs]
-            # assert(na.all(sums == dims.prod(axis=1).sum()))
         self.num_grids = sum(len(l) for l in self.proto_grids)
+                    
+            
+            
 
     num_deep = 0
 
-    # @num_deep_inc
-    # def _recursive_patch_splitting(self, psg, dims, ind,
-    #         left_index, right_index, gdims, fl):
-    #     min_eff = 0.1 # This isn't always respected.
-    #     if self.num_deep > 40:
-    #         # If we've recursed more than 100 times, we give up.
-    #         psg.efficiency = min_eff
-    #         return [psg]
-    #     if psg.efficiency > min_eff or psg.efficiency < 0.0:
-    #         return [psg]
-    #     tt, ax, fp = psg.find_split()
-    #     if (fp % 2) != 0:
-    #         if dims[ax] != fp + 1:
-    #             fp += 1
-    #         else:
-    #             fp -= 1
-    #     #print " " * self.num_deep + "Got ax", ax, "fp", fp
-    #     dims_l = dims.copy()
-    #     dims_l[ax] = fp
-    #     li_l = ind.copy()
-    #     if na.any(dims_l <= 0): return [psg]
-    #     L = _ramses_reader.ProtoSubgrid(
-    #             li_l, dims_l, left_index, right_index, gdims, fl)
-    #     #print " " * self.num_deep + "L", tt, L.efficiency
-    #     #if L.efficiency > 1.0: raise RuntimeError
-    #     if L.efficiency <= 0.0: L = []
-    #     elif L.efficiency < min_eff:
-    #         L = self._recursive_patch_splitting(L, dims_l, li_l,
-    #                 left_index, right_index, gdims, fl)
-    #     else:
-    #         L = [L]
-    #     dims_r = dims.copy()
-    #     dims_r[ax] -= fp
-    #     li_r = ind.copy()
-    #     li_r[ax] += fp
-    #     if na.any(dims_r <= 0): return [psg]
-    #     R = _ramses_reader.ProtoSubgrid(
-    #             li_r, dims_r, left_index, right_index, gdims, fl)
-    #     #print " " * self.num_deep + "R", tt, R.efficiency
-    #     #if R.efficiency > 1.0: raise RuntimeError
-    #     if R.efficiency <= 0.0: R = []
-    #     elif R.efficiency < min_eff:
-    #         R = self._recursive_patch_splitting(R, dims_r, li_r,
-    #                 left_index, right_index, gdims, fl)
-    #     else:
-    #         R = [R]
-    #     return L + R
         
     def _parse_hierarchy(self):
-        # We have important work to do
+        """ The root grid has no octs except one which is refined.
+        Still, it is the size of 128 cells along a length.
+        Ignore the proto subgrid created for the root grid - it is wrong.
+        """
         grids = []
         gi = 0
+        
         for level, grid_list in enumerate(self.proto_grids):
+            #The root level spans [0,2]
+            #The next level spans [0,256]
+            #The 3rd Level spans up to 128*2^3, etc.
+            #Correct root level to span up to 128
+            correction=1L
+            if level == 0:
+                correction=64L
             for g in grid_list:
                 fl = g.grid_file_locations
-                props = g.get_properties()
+                props = g.get_properties()*correction
                 dds = ((2**level) * self.pf.domain_dimensions).astype("float64")
                 self.grid_left_edge[gi,:] = props[0,:] / dds
                 self.grid_right_edge[gi,:] = props[1,:] / dds
                 self.grid_dimensions[gi,:] = props[2,:]
                 self.grid_levels[gi,:] = level
-                grids.append(self.grid(gi, self, level, fl, props[0,:]))
+                child_mask = na.zeros(props[2,:],'uint8')
+                amr_utils.fill_child_mask(fl,props[0],
+                    self.pf.level_art_child_masks[level],
+                    child_mask)
+                grids.append(self.grid(gi, self, level, fl, 
+                    props*na.array(correction).astype('int64')))
                 gi += 1
         self.grids = na.empty(len(grids), dtype='object')
-        for gi, g in enumerate(grids): self.grids[gi] = g
+        
+
+        if self.pf.file_particle_data:
+            #import pdb; pdb.set_trace()
+            lspecies = self.pf.parameters['lspecies']
+            wspecies = self.pf.parameters['wspecies']
+            Nrow     = self.pf.parameters['Nrow']
+            nstars = lspecies[-1]
+            a = self.pf.parameters['aexpn']
+            hubble = self.pf.parameters['hubble']
+            ud  = self.pf.parameters['r0']*a/hubble #proper Mpc units
+            uv  = self.pf.parameters['v0']/(a**1.0)*1.0e5 #proper cm/s
+            um  = self.pf.parameters['aM0'] #mass units in solar masses
+            um *= 1.989e33 #convert solar masses to grams 
+            pbar = get_pbar("Loading Particles   ",5)
+            self.pf.particle_position,self.pf.particle_velocity = \
+                read_particles(self.pf.file_particle_data,nstars,Nrow)
+            pbar.update(1)
+            npa,npb=0,0
+            npb = lspecies[-1]
+            clspecies = na.concatenate(([0,],lspecies))
+            if self.pf.only_particle_type is not None:
+                npb = lspecies[0]
+                if type(self.pf.only_particle_type)==type(5):
+                    npa = clspecies[self.pf.only_particle_type]
+                    npb = clspecies[self.pf.only_particle_type+1]
+            np = npb-npa
+            self.pf.particle_position   = self.pf.particle_position[npa:npb]
+            #self.pf.particle_position  -= 1.0 #fortran indices start with 0
+            pbar.update(2)
+            self.pf.particle_position  /= self.pf.domain_dimensions #to unitary units (comoving)
+            pbar.update(3)
+            self.pf.particle_velocity   = self.pf.particle_velocity[npa:npb]
+            self.pf.particle_velocity  *= uv #to proper cm/s
+            pbar.update(4)
+            self.pf.particle_type         = na.zeros(np,dtype='uint8')
+            self.pf.particle_mass         = na.zeros(np,dtype='float64')
+            self.pf.particle_mass_initial = na.zeros(np,dtype='float64')-1
+            self.pf.particle_creation_time= na.zeros(np,dtype='float64')-1
+            self.pf.particle_metallicity1 = na.zeros(np,dtype='float64')-1
+            self.pf.particle_metallicity2 = na.zeros(np,dtype='float64')-1
+            self.pf.particle_age          = na.zeros(np,dtype='float64')-1
+            
+            dist = self.pf['cm']/self.pf.domain_dimensions[0]
+            self.pf.conversion_factors['particle_mass'] = 1.0 #solar mass in g
+            self.pf.conversion_factors['particle_mass_initial'] = 1.0 #solar mass in g
+            self.pf.conversion_factors['particle_species'] = 1.0
+            for ax in 'xyz':
+                self.pf.conversion_factors['particle_velocity_%s'%ax] = 1.0
+                #already in unitary units
+                self.pf.conversion_factors['particle_position_%s'%ax] = 1.0 
+            self.pf.conversion_factors['particle_creation_time'] =  31556926.0
+            self.pf.conversion_factors['particle_metallicity']=1.0
+            self.pf.conversion_factors['particle_metallicity1']=1.0
+            self.pf.conversion_factors['particle_metallicity2']=1.0
+            self.pf.conversion_factors['particle_index']=1.0
+            self.pf.conversion_factors['particle_type']=1
+            self.pf.conversion_factors['particle_age']=1
+            self.pf.conversion_factors['Msun'] = 5.027e-34 #conversion to solar mass units
+            
+
+            a,b=0,0
+            for i,(b,m) in enumerate(zip(lspecies,wspecies)):
+                if type(self.pf.only_particle_type)==type(5):
+                    if not i==self.pf.only_particle_type:
+                        continue
+                    self.pf.particle_type += i
+                    self.pf.particle_mass += m*um
+
+                else:
+                    self.pf.particle_type[a:b] = i #particle type
+                    self.pf.particle_mass[a:b] = m*um #mass in solar masses
+                a=b
+            pbar.finish()
+
+            nparticles = [0,]+list(lspecies)
+            for j,np in enumerate(nparticles):
+                mylog.debug('found %i of particle type %i'%(j,np))
+            
+            if self.pf.single_particle_mass:
+                #cast all particle masses to the same mass
+                cast_type = self.pf.single_particle_type
+                
+
+            
+            self.pf.particle_star_index = i
+            
+            do_stars = (self.pf.only_particle_type is None) or \
+                       (self.pf.only_particle_type == -1) or \
+                       (self.pf.only_particle_type == len(lspecies))
+            if self.pf.file_star_data and do_stars: 
+                nstars, mass, imass, tbirth, metallicity1, metallicity2 \
+                     = read_stars(self.pf.file_star_data,nstars,Nrow)
+                nstars = nstars[0] 
+                if nstars > 0 :
+                    n=min(1e2,len(tbirth))
+                    pbar = get_pbar("Stellar Ages        ",n)
+                    sages  = \
+                        b2t(tbirth,n=n,logger=lambda x: pbar.update(x)).astype('float64')
+                    sages *= 1.0e9
+                    sages *= 365*24*3600 #to seconds
+                    sages = self.pf.current_time-sages
+                    self.pf.particle_age[-nstars:] = sages
+                    pbar.finish()
+                    self.pf.particle_metallicity1[-nstars:] = metallicity1
+                    self.pf.particle_metallicity2[-nstars:] = metallicity2
+                    self.pf.particle_mass_initial[-nstars:] = imass*um
+                    self.pf.particle_mass[-nstars:] = mass*um
+
+            done = 0
+            init = self.pf.particle_position.shape[0]
+            pos = self.pf.particle_position
+            #particle indices travel with the particle positions
+            #pos = na.vstack((na.arange(pos.shape[0]),pos.T)).T 
+            #if type(self.pf.grid_particles) == type(5):
+            #    max_level = min(max_level,self.pf.grid_particles)
+            grid_particle_count = na.zeros((len(grids),1),dtype='int64')
+            
+            #grid particles at the finest level, removing them once gridded
+            #pbar = get_pbar("Gridding Particles ",init)
+            #assignment = amr_utils.assign_particles_to_cells(
+            #        self.grid_levels.ravel().astype('int32'),
+            #        self.grid_left_edge.astype('float32'),
+            #        self.grid_right_edge.astype('float32'),
+            #        pos[:,0].astype('float32'),
+            #        pos[:,1].astype('float32'),
+            #        pos[:,2].astype('float32'))
+            #pbar.finish()
+
+            pbar = get_pbar("Gridding Particles ",init)
+            assignment,ilists = amr_utils.assign_particles_to_cell_lists(
+                    self.grid_levels.ravel().astype('int32'),
+                    2, #only bother gridding particles to level 2
+                    self.grid_left_edge.astype('float32'),
+                    self.grid_right_edge.astype('float32'),
+                    pos[:,0].astype('float32'),
+                    pos[:,1].astype('float32'),
+                    pos[:,2].astype('float32'))
+            pbar.finish()
+            
+            
+            pbar = get_pbar("Filling grids ",init)
+            for gidx,(g,ilist) in enumerate(zip(grids,ilists)):
+                np = len(ilist)
+                grid_particle_count[gidx,0]=np
+                g.hierarchy.grid_particle_count = grid_particle_count
+                g.particle_indices = ilist
+                grids[gidx] = g
+                done += np
+                pbar.update(done)
+            pbar.finish()
+
+            #assert init-done== 0 #we have gridded every particle
+            
+        pbar = get_pbar("Finalizing grids ",len(grids))
+        for gi, g in enumerate(grids): 
+            self.grids[gi] = g
+        pbar.finish()
+            
 
     def _get_grid_parents(self, grid, LE, RE):
         mask = na.zeros(self.num_grids, dtype='bool')
@@ -429,6 +532,54 @@
             g._setup_dx()
         self.max_level = self.grid_levels.max()
 
+    # def _populate_grid_objects(self):
+    #     mask = na.empty(self.grids.size, dtype='int32')
+    #     pb = get_pbar("Populating grids", len(self.grids))
+    #     for gi,g in enumerate(self.grids):
+    #         pb.update(gi)
+    #         amr_utils.get_box_grids_level(self.grid_left_edge[gi,:],
+    #                             self.grid_right_edge[gi,:],
+    #                             g.Level - 1,
+    #                             self.grid_left_edge, self.grid_right_edge,
+    #                             self.grid_levels, mask)
+    #         parents = self.grids[mask.astype("bool")]
+    #         if len(parents) > 0:
+    #             g.Parent.extend((p for p in parents.tolist()
+    #                     if p.locations[0,0] == g.locations[0,0]))
+    #             for p in parents: p.Children.append(g)
+    #         # Now we do overlapping siblings; note that one has to "win" with
+    #         # siblings, so we assume the lower ID one will "win"
+    #         amr_utils.get_box_grids_level(self.grid_left_edge[gi,:],
+    #                             self.grid_right_edge[gi,:],
+    #                             g.Level,
+    #                             self.grid_left_edge, self.grid_right_edge,
+    #                             self.grid_levels, mask, gi)
+    #         mask[gi] = False
+    #         siblings = self.grids[mask.astype("bool")]
+    #         if len(siblings) > 0:
+    #             g.OverlappingSiblings = siblings.tolist()
+    #         g._prepare_grid()
+    #         g._setup_dx()
+    #     pb.finish()
+    #     self.max_level = self.grid_levels.max()
+
+    def _setup_field_list(self):
+        if self.parameter_file.use_particles:
+            # We know which particle fields will exist -- pending further
+            # changes in the future.
+            for field in art_particle_field_names:
+                def external_wrapper(f):
+                    def _convert_function(data):
+                        return data.convert(f)
+                    return _convert_function
+                cf = external_wrapper(field)
+                # Note that we call add_field on the field_info directly.  This
+                # will allow the same field detection mechanism to work for 1D,
+                # 2D and 3D fields.
+                self.pf.field_info.add_field(field, NullFunc,
+                                             convert_function=cf,
+                                             take_log=True, particle_type=True)
+
     def _setup_derived_fields(self):
         self.derived_field_list = []
 
@@ -446,16 +597,65 @@
     _handle = None
     
     def __init__(self, filename, data_style='art',
-                 storage_filename = None):
+                 storage_filename = None, 
+                 file_particle_header=None, 
+                 file_particle_data=None,
+                 file_star_data=None,
+                 discover_particles=False,
+                 use_particles=True,
+                 limit_level=None,
+                 only_particle_type = None,
+                 grid_particles=False,
+                 single_particle_mass=False,
+                 single_particle_type=0):
         import yt.frontends.ramses._ramses_reader as _ramses_reader
+        
+        
+        dirn = os.path.dirname(filename)
+        base = os.path.basename(filename)
+        aexp = base.split('_')[2].replace('.d','')
+        
+        self.file_particle_header = file_particle_header
+        self.file_particle_data = file_particle_data
+        self.file_star_data = file_star_data
+        self.only_particle_type = only_particle_type
+        self.grid_particles = grid_particles
+        self.single_particle_mass = single_particle_mass
+        
+        if limit_level is None:
+            self.limit_level = na.inf
+        else:
+            mylog.info("Using maximum level: %i",limit_level)
+            self.limit_level = limit_level
+        
+        if discover_particles:
+            if file_particle_header is None:
+                loc = filename.replace(base,'PMcrd%s.DAT'%aexp)
+                if os.path.exists(loc):
+                    self.file_particle_header = loc
+                    mylog.info("Discovered particle header: %s",os.path.basename(loc))
+            if file_particle_data is None:
+                loc = filename.replace(base,'PMcrs0%s.DAT'%aexp)
+                if os.path.exists(loc):
+                    self.file_particle_data = loc
+                    mylog.info("Discovered particle data:   %s",os.path.basename(loc))
+            if file_star_data is None:
+                loc = filename.replace(base,'stars_%s.dat'%aexp)
+                if os.path.exists(loc):
+                    self.file_star_data = loc
+                    mylog.info("Discovered stellar data:    %s",os.path.basename(loc))
+        
+        self.use_particles = any([self.file_particle_header,
+            self.file_star_data, self.file_particle_data])
         StaticOutput.__init__(self, filename, data_style)
-        self.storage_filename = storage_filename
         
         self.dimensionality = 3
         self.refine_by = 2
         self.parameters["HydroMethod"] = 'art'
         self.parameters["Time"] = 1. # default unit is 1...
         self.parameters["InitialTime"]=self.current_time
+        self.storage_filename = storage_filename
+        
         
     def __repr__(self):
         return self.basename.rsplit(".", 1)[0]
@@ -471,8 +671,10 @@
         self.conversion_factors = defaultdict(lambda: 1.0)
         self.units['1'] = 1.0
         self.units['unitary'] = 1.0 / (self.domain_right_edge - self.domain_left_edge).max()
-
+        
+        
         z = self.current_redshift
+        
         h = self.hubble_constant
         boxcm_cal = self["boxh"]
         boxcm_uncal = boxcm_cal / h
@@ -505,29 +707,35 @@
         # this is 3H0^2 / (8pi*G) *h*Omega0 with H0=100km/s. 
         # ie, critical density 
         self.rho0 = 1.8791e-29 * hubble**2.0 * self.omega_matter
-        self.tr = 2./3. *(3.03e5*self.r0**2.0*wmu*self.omega_matter)*(1.0/(aexpn**2))      
-        self.conversion_factors["Density"] = \
-            self.rho0*(aexpn**-3.0)
-        self.conversion_factors["GasEnergy"] = \
-            self.rho0*self.v0**2*(aexpn**-5.0)
+        self.tr = 2./3. *(3.03e5*self.r0**2.0*wmu*self.omega_matter)*(1.0/(aexpn**2))     
         tr  = self.tr
+        
+        #factors to multiply the native code units to CGS
+        self.conversion_factors['Pressure'] = self.parameters["P0"] #already cgs
+        self.conversion_factors['Velocity'] = self.parameters['v0']*1e3 #km/s -> cm/s
+        self.conversion_factors["Mass"] = self.parameters["aM0"] * 1.98892e33
+        self.conversion_factors["Density"] = self.rho0*(aexpn**-3.0)
+        self.conversion_factors["GasEnergy"] = self.rho0*self.v0**2*(aexpn**-5.0)
         self.conversion_factors["Temperature"] = tr
-        self.conversion_factors["Metal_Density"] = 1
+        self.conversion_factors["Potential"] = 1.0
+        self.cosmological_simulation = True
         
         # Now our conversion factors
         for ax in 'xyz':
             # Add on the 1e5 to get to cm/s
             self.conversion_factors["%s-velocity" % ax] = self.v0/aexpn
         seconds = self.t0
-        self.time_units['years'] = seconds / (365*3600*24.0)
-        self.time_units['days']  = seconds / (3600*24.0)
-        self.time_units['Myr'] = self.time_units['years'] / 1.0e6
-        self.time_units['Gyr']  = self.time_units['years'] / 1.0e9
+        self.time_units['Gyr']   = 1.0/(1.0e9*365*3600*24.0)
+        self.time_units['Myr']   = 1.0/(1.0e6*365*3600*24.0)
+        self.time_units['years'] = 1.0/(365*3600*24.0)
+        self.time_units['days']  = 1.0 / (3600*24.0)
+
 
         #we were already in seconds, go back in to code units
-        self.current_time /= self.t0 
+        #self.current_time /= self.t0 
+        #self.current_time = b2t(self.current_time,n=1)
         
-        
+    
     def _parse_parameter_file(self):
         # We set our domain to run from 0 .. 1 since we are otherwise
         # unconstrained.
@@ -594,8 +802,14 @@
         self.parameters["Y_p"] = 0.245
         self.parameters["wmu"] = 4.0/(8.0-5.0*self.parameters["Y_p"])
         self.parameters["gamma"] = 5./3.
+        self.parameters["T_CMB0"] = 2.726  
+        self.parameters["T_min"] = 300.0 #T floor in K
+        self.parameters["boxh"] = header_vals['boxh']
+        self.parameters['ng'] = 128 # of 0 level cells in 1d 
         self.current_redshift = self.parameters["aexpn"]**-1.0 - 1.0
+        self.parameters['CosmologyInitialRedshift']=self.current_redshift
         self.data_comment = header_vals['jname']
+        self.current_time_raw = header_vals['t']
         self.current_time = header_vals['t']
         self.omega_lambda = header_vals['Oml0']
         self.omega_matter = header_vals['Om0']
@@ -606,26 +820,62 @@
         #nchem is nhydrovars-8, so we typically have 2 extra chem species 
         self.hubble_time  = 1.0/(self.hubble_constant*100/3.08568025e19)
         #self.hubble_time /= 3.168876e7 #Gyr in s 
-        def integrand(x,oml=self.omega_lambda,omb=self.omega_matter):
-            return 1./(x*na.sqrt(oml+omb*x**-3.0))
-        spacings = na.logspace(-5,na.log10(self.parameters['aexpn']),1e5)
-        integrand_arr = integrand(spacings)
-        self.current_time = na.trapz(integrand_arr,dx=na.diff(spacings))
-        self.current_time *= self.hubble_time
-                
+        # def integrand(x,oml=self.omega_lambda,omb=self.omega_matter):
+        #     return 1./(x*na.sqrt(oml+omb*x**-3.0))
+        # spacings = na.logspace(-5,na.log10(self.parameters['aexpn']),1e5)
+        # integrand_arr = integrand(spacings)
+        # self.current_time = na.trapz(integrand_arr,dx=na.diff(spacings))
+        # self.current_time *= self.hubble_time
+        self.current_time = b2t(self.current_time_raw)*1.0e9*365*3600*24         
         for to_skip in ['tl','dtl','tlold','dtlold','iSO']:
             _skip_record(f)
 
+        
+        Om0 = self.parameters['Om0']
+        hubble = self.parameters['hubble']
+        dummy = 100.0 * hubble * na.sqrt(Om0)
+        ng = self.parameters['ng']
+        wmu = self.parameters["wmu"]
+        boxh = header_vals['boxh'] 
+        
+        #distance unit #boxh is units of h^-1 Mpc
+        self.parameters["r0"] = self.parameters["boxh"] / self.parameters['ng']
+        r0 = self.parameters["r0"]
+        #time, yrs
+        self.parameters["t0"] = 2.0 / dummy * 3.0856e19 / 3.15e7
+        #velocity velocity units in km/s
+        self.parameters["v0"] = 50.0*self.parameters["r0"]*\
+                na.sqrt(self.parameters["Om0"])
+        #density = 3H0^2 * Om0 / (8*pi*G) - unit of density in Msun/Mpc^3
+        self.parameters["rho0"] = 2.776e11 * hubble**2.0 * Om0
+        rho0 = self.parameters["rho0"]
+        #Pressure = rho0 * v0**2 - unit of pressure in g/cm/s^2
+        self.parameters["P0"] = 4.697e-16 * Om0**2.0 * r0**2.0 * hubble**2.0
+        #T_0 = unit of temperature in K and in keV)
+        #T_0 = 2.61155 * r0**2 * wmu * Om0 ! [keV]
+        self.parameters["T_0"] = 3.03e5 * r0**2.0 * wmu * Om0 # [K]
+        #S_0 = unit of entropy in keV * cm^2
+        self.parameters["S_0"] = 52.077 * wmu**(5.0/3.0) * hubble**(-4.0/3.0)*Om0**(1.0/3.0)*r0**2.0
+        
+        #mass conversion (Mbox = rho0 * Lbox^3, Mbox_code = Ng^3
+        #     for non-cosmological run aM0 must be defined during initialization
+        #     [aM0] = [Msun]
+        self.parameters["aM0"] = rho0 * (boxh/hubble)**3.0 / ng**3.0
+        
+        #CGS for everything in the next block
+    
         (self.ncell,) = struct.unpack('>l', _read_record(f))
         # Try to figure out the root grid dimensions
-        est = na.log2(self.ncell) / 3
-        if int(est) != est: raise RuntimeError
+        est = int(na.rint(self.ncell**(1.0/3.0)))
         # Note here: this is the number of *cells* on the root grid.
         # This is not the same as the number of Octs.
-        self.domain_dimensions = na.ones(3, dtype='int64') * int(2**est)
+        self.domain_dimensions = na.ones(3, dtype='int64')*est 
 
         self.root_grid_mask_offset = f.tell()
-        _skip_record(f) # iOctCh
+        #_skip_record(f) # iOctCh
+        root_cells = self.domain_dimensions.prod()
+        self.root_iOctCh = _read_frecord(f,'>i')[:root_cells]
+        self.root_iOctCh = self.root_iOctCh.reshape(self.domain_dimensions,order='F')
         self.root_grid_offset = f.tell()
         _skip_record(f) # hvar
         _skip_record(f) # var
@@ -634,61 +884,71 @@
         self.child_grid_offset = f.tell()
 
         f.close()
+        
+        if self.file_particle_header is not None:
+            self._read_particle_header(self.file_particle_header)
+        
+    def _read_particle_header(self,fn):    
+        """ Reads control information, various parameters from the 
+            particle data set. Adapted from Daniel Ceverino's 
+            Read_Particles_Binary in analysis_ART.F   
+        """ 
+        header_struct = [
+            ('>i','pad'),
+            ('45s','header'), 
+            ('>f','aexpn'),
+            ('>f','aexp0'),
+            ('>f','amplt'),
+            ('>f','astep'),
+
+            ('>i','istep'),
+            ('>f','partw'),
+            ('>f','tintg'),
+
+            ('>f','Ekin'),
+            ('>f','Ekin1'),
+            ('>f','Ekin2'),
+            ('>f','au0'),
+            ('>f','aeu0'),
+
+
+            ('>i','Nrow'),
+            ('>i','Ngridc'),
+            ('>i','Nspecies'),
+            ('>i','Nseed'),
+
+            ('>f','Om0'),
+            ('>f','Oml0'),
+            ('>f','hubble'),
+            ('>f','Wp5'),
+            ('>f','Ocurv'),
+            ('>f','Omb0'),
+            ('>%ds'%(396),'extras'),
+            ('>f','unknown'),
+
+            ('>i','pad')]
+        fh = open(fn,'rb')
+        vals = _read_struct(fh,header_struct)
+        
+        for k,v in vals.iteritems():
+            self.parameters[k]=v
+        
+        seek_extras = 137
+        fh.seek(seek_extras)
+        n = self.parameters['Nspecies']
+        self.parameters['wspecies'] = na.fromfile(fh,dtype='>f',count=10)
+        self.parameters['lspecies'] = na.fromfile(fh,dtype='>i',count=10)
+        self.parameters['wspecies'] = self.parameters['wspecies'][:n]
+        self.parameters['lspecies'] = self.parameters['lspecies'][:n]
+        fh.close()
+        
+        ls_nonzero = [ls for ls in self.parameters['lspecies'] if ls>0 ]
+        mylog.debug("Discovered %i species of particles",len(ls_nonzero))
+        mylog.debug("Particle populations: "+'%1.1e '*len(ls_nonzero),ls_nonzero)
+        
 
     @classmethod
     def _is_valid(self, *args, **kwargs):
         return False # We make no effort to auto-detect ART data
 
-def _skip_record(f):
-    s = struct.unpack('>i', f.read(struct.calcsize('>i')))
-    f.seek(s[0], 1)
-    s = struct.unpack('>i', f.read(struct.calcsize('>i')))
 
-def _read_record(f):
-    s = struct.unpack('>i', f.read(struct.calcsize('>i')))[0]
-    ss = f.read(s)
-    s = struct.unpack('>i', f.read(struct.calcsize('>i')))
-    return ss
-
-def _read_record_size(f):
-    pos = f.tell()
-    s = struct.unpack('>i', f.read(struct.calcsize('>i')))
-    f.seek(pos)
-    return s[0]
-
-def _count_art_octs(f, offset,
-                   MinLev, MaxLevelNow):
-    import gc
-    f.seek(offset)
-    nchild,ntot=8,0
-    Level = na.zeros(MaxLevelNow+1 - MinLev, dtype='i')
-    iNOLL = na.zeros(MaxLevelNow+1 - MinLev, dtype='i')
-    iHOLL = na.zeros(MaxLevelNow+1 - MinLev, dtype='i')
-    for Lev in xrange(MinLev + 1, MaxLevelNow+1):
-        #Get the info for this level, skip the rest
-        #print "Reading oct tree data for level", Lev
-        #print 'offset:',f.tell()
-        Level[Lev], iNOLL[Lev], iHOLL[Lev] = struct.unpack(
-           '>iii', _read_record(f))
-        print 'Level %i : '%Lev, iNOLL
-        #print 'offset after level record:',f.tell()
-        iOct = iHOLL[Lev] - 1
-        nLevel = iNOLL[Lev]
-        nLevCells = nLevel * nchild
-        ntot = ntot + nLevel
-
-        #Skip all the oct hierarchy data
-        ns = _read_record_size(f)
-        size = struct.calcsize('>i') + ns + struct.calcsize('>i')
-        f.seek(f.tell()+size * nLevel)
-        
-        #Skip the child vars data
-        ns = _read_record_size(f)
-        size = struct.calcsize('>i') + ns + struct.calcsize('>i')
-        f.seek(f.tell()+size * nLevel*nchild)
-        
-        #find nhydrovars
-        nhydrovars = 8+2
-    f.seek(offset)
-    return nhydrovars, iNOLL
-


diff -r 2bbd3f126279501d3aa2d6acd308e80006078ca3 -r 65db3370fd9b8f113d595fb295ad3182952f04ca yt/frontends/art/definitions.py
--- a/yt/frontends/art/definitions.py
+++ b/yt/frontends/art/definitions.py
@@ -0,0 +1,43 @@
+"""
+Definitions specific to ART
+
+Author: Christopher E. Moody <cemoody at ucsc.ed>
+Affiliation: UC Santa Cruz
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2011 Christopher E. Moody.  All Rights
+  Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""
+
+art_particle_field_names = [
+'particle_age',
+'particle_index',
+'particle_mass',
+'particle_mass_initial',
+'particle_creation_time',
+'particle_metallicity1',
+'particle_metallicity2',
+'particle_metallicity',
+'particle_position_x',
+'particle_position_y',
+'particle_position_z',
+'particle_velocity_x',
+'particle_velocity_y',
+'particle_velocity_z',
+'particle_type']


diff -r 2bbd3f126279501d3aa2d6acd308e80006078ca3 -r 65db3370fd9b8f113d595fb295ad3182952f04ca yt/frontends/art/fields.py
--- a/yt/frontends/art/fields.py
+++ b/yt/frontends/art/fields.py
@@ -37,32 +37,55 @@
 from yt.utilities.physical_constants import \
     boltzmann_constant_cgs, mass_hydrogen_cgs
 
+KnownARTFields = FieldInfoContainer()
+add_art_field = KnownARTFields.add_field
+
 ARTFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
 add_field = ARTFieldInfo.add_field
 
-KnownARTFields = FieldInfoContainer()
-add_art_field = KnownARTFields.add_field
+import numpy as na
 
-translation_dict = {"Density":"density",
-                    "TotalEnergy":"TotalEnergy",
-                    "x-velocity":"velocity_x",
-                    "y-velocity":"velocity_y",
-                    "z-velocity":"velocity_z",
-                    "Pressure":"pressure",
-                    "Metallicity":"metallicity",
-                    "GasEnergy":"GasEnergy"
-                   }
+#these are just the hydro fields
+known_art_fields = [ 'Density','TotalEnergy',
+                     'XMomentumDensity','YMomentumDensity','ZMomentumDensity',
+                     'Pressure','Gamma','GasEnergy',
+                     'MetalDensitySNII', 'MetalDensitySNIa',
+                     'PotentialNew','PotentialOld']
 
-for f,v in translation_dict.items():
-    add_art_field(v, function=NullFunc, take_log=False,
-                  validators = [ValidateDataField(v)])
-    add_art_field(f, function=TranslationFunc(v), take_log=True)
+#Add the fields, then later we'll individually defined units and names
+for f in known_art_fields:
+    add_art_field(f, function=NullFunc, take_log=True,
+              validators = [ValidateDataField(f)])
 
-#def _convertMetallicity(data):
-#    return data.convert("Metal_Density1")
-#KnownARTFields["Metal_Density1"]._units = r"1"
-#KnownARTFields["Metal_Density1"]._projected_units = r"1"
-#KnownARTFields["Metal_Density1"]._convert_function=_convertMetallicity
+#Hydro Fields that are verified to be OK unit-wise:
+#Density
+#Temperature
+
+#Hydro Fields that need to be tested:
+#TotalEnergy
+#XYZMomentum
+#Pressure
+#Gamma
+#GasEnergy
+#MetalDensity SNII + SNia
+#Potentials
+
+#Hydro Derived fields that are untested:
+#metallicities
+#xyzvelocity
+
+#Particle fields that are tested:
+#particle_position_xyz
+#particle_type
+#particle_index
+#particle_mass
+#particle_mass_initial
+#particle_age
+#particle_velocity
+#particle_metallicity12
+
+#Particle fields that are untested:
+#NONE
 
 
 def _convertDensity(data):
@@ -71,55 +94,143 @@
 KnownARTFields["Density"]._projected_units = r"\rm{g}/\rm{cm}^2"
 KnownARTFields["Density"]._convert_function=_convertDensity
 
-def _convertEnergy(data):
+def _convertTotalEnergy(data):
+    return data.convert("GasEnergy")
+KnownARTFields["TotalEnergy"]._units = r"\rm{g}/\rm{cm}^3"
+KnownARTFields["TotalEnergy"]._projected_units = r"\rm{K}"
+KnownARTFields["TotalEnergy"]._convert_function=_convertTotalEnergy
+
+def _convertXMomentumDensity(data):
+    tr  = data.convert("Mass")*data.convert("Velocity")
+    tr *= (data.convert("Density")/data.convert("Mass"))
+    return tr
+KnownARTFields["XMomentumDensity"]._units = r"\rm{mg}/\rm{s}/\rm{cm}^3"
+KnownARTFields["XMomentumDensity"]._projected_units = r"\rm{K}"
+KnownARTFields["XMomentumDensity"]._convert_function=_convertXMomentumDensity
+
+def _convertYMomentumDensity(data):
+    tr  = data.convert("Mass")*data.convert("Velocity")
+    tr *= (data.convert("Density")/data.convert("Mass"))
+    return tr
+KnownARTFields["YMomentumDensity"]._units = r"\rm{mg}/\rm{s}/\rm{cm}^3"
+KnownARTFields["YMomentumDensity"]._projected_units = r"\rm{K}"
+KnownARTFields["YMomentumDensity"]._convert_function=_convertYMomentumDensity
+
+def _convertZMomentumDensity(data):
+    tr  = data.convert("Mass")*data.convert("Velocity")
+    tr *= (data.convert("Density")/data.convert("Mass"))
+    return tr
+KnownARTFields["ZMomentumDensity"]._units = r"\rm{mg}/\rm{s}/\rm{cm}^3"
+KnownARTFields["ZMomentumDensity"]._projected_units = r"\rm{K}"
+KnownARTFields["ZMomentumDensity"]._convert_function=_convertZMomentumDensity
+
+def _convertPressure(data):
+    return data.convert("Pressure")
+KnownARTFields["Pressure"]._units = r"\rm{g}/\rm{cm}/\rm{s}^2"
+KnownARTFields["Pressure"]._projected_units = r"\rm{g}/\rm{s}^2"
+KnownARTFields["Pressure"]._convert_function=_convertPressure
+
+def _convertGamma(data):
+    return 1.0
+KnownARTFields["Gamma"]._units = r""
+KnownARTFields["Gamma"]._projected_units = r""
+KnownARTFields["Gamma"]._convert_function=_convertGamma
+
+def _convertGasEnergy(data):
     return data.convert("GasEnergy")
 KnownARTFields["GasEnergy"]._units = r"\rm{ergs}/\rm{g}"
-KnownARTFields["GasEnergy"]._convert_function=_convertEnergy
+KnownARTFields["GasEnergy"]._projected_units = r""
+KnownARTFields["GasEnergy"]._convert_function=_convertGasEnergy
 
-def _Temperature(field, data):
-    tr  = data["GasEnergy"] / data["Density"]
-    tr /= data.pf.conversion_factors["GasEnergy"]
-    tr *= data.pf.conversion_factors["Density"]
+def _convertMetalDensitySNII(data):
+    return data.convert("Density")
+KnownARTFields["MetalDensitySNII"]._units = r"\rm{g}/\rm{cm}^3"
+KnownARTFields["MetalDensitySNII"]._projected_units = r"\rm{g}/\rm{cm}^2"
+KnownARTFields["MetalDensitySNII"]._convert_function=_convertMetalDensitySNII
+
+def _convertMetalDensitySNIa(data):
+    return data.convert("Density")
+KnownARTFields["MetalDensitySNIa"]._units = r"\rm{g}/\rm{cm}^3"
+KnownARTFields["MetalDensitySNIa"]._projected_units = r"\rm{g}/\rm{cm}^2"
+KnownARTFields["MetalDensitySNIa"]._convert_function=_convertMetalDensitySNIa
+
+def _convertPotentialNew(data):
+    return data.convert("Potential")
+KnownARTFields["PotentialNew"]._units = r"\rm{g}/\rm{cm}^3"
+KnownARTFields["PotentialNew"]._projected_units = r"\rm{g}/\rm{cm}^2"
+KnownARTFields["PotentialNew"]._convert_function=_convertPotentialNew
+
+def _convertPotentialOld(data):
+    return data.convert("Potential")
+KnownARTFields["PotentialOld"]._units = r"\rm{g}/\rm{cm}^3"
+KnownARTFields["PotentialOld"]._projected_units = r"\rm{g}/\rm{cm}^2"
+KnownARTFields["PotentialOld"]._convert_function=_convertPotentialOld
+
+####### Derived fields
+
+def _temperature(field, data):
+    tr  = data["GasEnergy"].astype('float64') #~1
+    d = data["Density"].astype('float64')
+    d[d==0.0] = -1.0 #replace the zeroes (that cause infs)
+    tr /= d #
+    assert na.all(na.isfinite(tr)) #diagnosing some problem...
     return tr
-def _convertTemperature(data):
-    return data.convert("Temperature")
-add_art_field("Temperature", function=_Temperature, units = r"\mathrm{K}")
+def _converttemperature(data):
+    x  = data.pf.conversion_factors["Density"]
+    x /= data.pf.conversion_factors["GasEnergy"]
+    x *= data.pf.conversion_factors["Temperature"]
+    return x
+add_art_field("Temperature", function=_temperature, units = r"\mathrm{K}",take_log=True)
 KnownARTFields["Temperature"]._units = r"\mathrm{K}"
-KnownARTFields["Temperature"]._convert_function=_convertTemperature
+KnownARTFields["Temperature"]._projected_units = r"\mathrm{K}"
+KnownARTFields["Temperature"]._convert_function=_converttemperature
 
-def _MetallicitySNII(field, data):
-    #get the dimensionless mass fraction
-    tr  = data["Metal_DensitySNII"] / data["Density"]
-    tr *= data.pf.conversion_factors["Density"]    
+def _metallicity_snII(field, data):
+    tr  = data["MetalDensitySNII"] / data["Density"]
     return tr
-    
-add_art_field("MetallicitySNII", function=_MetallicitySNII, units = r"\mathrm{K}")
-KnownARTFields["MetallicitySNII"]._units = r"\mathrm{K}"
+add_art_field("Metallicity_SNII", function=_metallicity_snII, units = r"\mathrm{K}",take_log=True)
+KnownARTFields["Metallicity_SNII"]._units = r""
+KnownARTFields["Metallicity_SNII"]._projected_units = r""
 
-def _MetallicitySNIa(field, data):
-    #get the dimensionless mass fraction
-    tr  = data["Metal_DensitySNIa"] / data["Density"]
-    tr *= data.pf.conversion_factors["Density"]    
+def _metallicity_snIa(field, data):
+    tr  = data["MetalDensitySNIa"] / data["Density"]
     return tr
-    
-add_art_field("MetallicitySNIa", function=_MetallicitySNIa, units = r"\mathrm{K}")
-KnownARTFields["MetallicitySNIa"]._units = r"\mathrm{K}"
+add_art_field("Metallicity_SNIa", function=_metallicity_snIa, units = r"\mathrm{K}",take_log=True)
+KnownARTFields["Metallicity_SNIa"]._units = r""
+KnownARTFields["Metallicity_SNIa"]._projected_units = r""
 
-def _Metallicity(field, data):
-    #get the dimensionless mass fraction of the total metals
-    tr  = data["Metal_DensitySNIa"] / data["Density"]
-    tr += data["Metal_DensitySNII"] / data["Density"]
-    tr *= data.pf.conversion_factors["Density"]    
+def _x_velocity(data):
+    tr  = data["XMomentumDensity"]/data["Density"]
     return tr
-    
-add_art_field("Metallicity", function=_Metallicity, units = r"\mathrm{K}")
-KnownARTFields["Metallicity"]._units = r"\mathrm{K}"
+add_field("x_velocity", function=_x_velocity, units = r"\mathrm{cm/s}",take_log=False)
+ARTFieldInfo["x_velocity"]._units = r"\rm{cm}/\rm{s}"
+ARTFieldInfo["x_velocity"]._projected_units = r"\rm{cm}/\rm{s}"
 
-def _Metal_Density(field,data):
-    return data["Metal_DensitySNII"]+data["Metal_DensitySNIa"]
-def _convert_Metal_Density(data):
-    return data.convert("Metal_Density")
+def _y_velocity(data):
+    tr  = data["YMomentumDensity"]/data["Density"]
+    return tr
+add_field("y_velocity", function=_y_velocity, units = r"\mathrm{cm/s}",take_log=False)
+ARTFieldInfo["y_velocity"]._units = r"\rm{cm}/\rm{s}"
+ARTFieldInfo["y_velocity"]._projected_units = r"\rm{cm}/\rm{s}"
 
-add_art_field("Metal_Density", function=_Metal_Density, units = r"\mathrm{K}")
-KnownARTFields["Metal_Density"]._units = r"\mathrm{K}"
-KnownARTFields["Metal_Density"]._convert_function=_convert_Metal_Density
+def _z_velocity(data):
+    tr  = data["ZMomentumDensity"]/data["Density"]
+    return tr
+add_field("z_velocity", function=_z_velocity, units = r"\mathrm{cm/s}",take_log=False)
+ARTFieldInfo["z_velocity"]._units = r"\rm{cm}/\rm{s}"
+ARTFieldInfo["z_velocity"]._projected_units = r"\rm{cm}/\rm{s}"
+
+
+def _metal_density(field, data):
+    tr  = data["MetalDensitySNIa"]
+    tr += data["MetalDensitySNII"]
+    return tr
+add_art_field("Metal_Density", function=_metal_density, units = r"\mathrm{K}",take_log=True)
+KnownARTFields["Metal_Density"]._units = r""
+KnownARTFields["Metal_Density"]._projected_units = r""
+
+
+#Particle fields
+
+#Derived particle fields
+


diff -r 2bbd3f126279501d3aa2d6acd308e80006078ca3 -r 65db3370fd9b8f113d595fb295ad3182952f04ca yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -25,16 +25,19 @@
 
 import numpy as na
 import struct
-import pdb
+
+import os
+import os.path
 
 from yt.utilities.io_handler import \
     BaseIOHandler
-import numpy as na
 
 from yt.utilities.io_handler import \
     BaseIOHandler
 import yt.utilities.amr_utils as au
 
+from yt.frontends.art.definitions import art_particle_field_names
+
 class IOHandlerART(BaseIOHandler):
     _data_style = "art"
 
@@ -47,7 +50,41 @@
         self.level_offsets = level_offsets
         self.level_data = {}
 
-    def preload_level(self, level):
+    def preload_level(self, level,field=None):
+        """ Reads in the full ART tree. From the ART source:
+            iOctLv :    >0   - level of an oct
+            iOctPr :         - parent of an oct
+            iOctCh :    >0   - pointer to an oct of children
+                        0   - there are no children; the cell is a leaf
+            iOctNb :    >0   - pointers to neighbouring cells 
+            iOctPs :         - coordinates of Oct centers
+            
+            iOctLL1:         - doubly linked list of octs
+            iOctLL2:         - doubly linked list of octs
+            
+            tl - current  time moment for level L
+            tlold - previous time moment for level L
+            dtl - dtime0/2**iTimeBin
+            dtlold -  previous time step for level L
+            iSO - sweep order
+            
+            hvar(1,*) - gas density 
+            hvar(2,*) - gas energy 
+            hvar(3,*) - x-momentum 
+            hvar(4,*) - y-momentum
+            hvar(5,*) - z-momentum
+            hvar(6,*) - pressure
+            hvar(7,*) - Gamma
+            hvar(8,*) - internal energy 
+
+            var (1,*) - total density 
+            var (2,*) - potential (new)
+            var (3,*) - potential (old)
+            
+            
+            
+        """
+        
         if level in self.level_data: return
         if level == 0:
             self.preload_root_level()
@@ -58,44 +95,88 @@
         nvals = ncells * (self.nhydro_vars + 6) # 2 vars, 2 pads
         arr = na.fromfile(f, dtype='>f', count=nvals)
         arr = arr.reshape((self.nhydro_vars+6, ncells), order="F")
-        arr = arr[3:-1,:].astype("float64")
-        self.level_data[level] = arr
+        assert na.all(arr[0,:]==arr[-1,:]) #pads must be equal
+        arr = arr[3:-1,:] #skip beginning pad, idc, iOctCh, + ending pad
+        if field==None:
+            self.level_data[level] = arr.astype('float32')
+        else:
+            self.level_data[level] = arr.astype('float32')
+        del arr
 
     def preload_root_level(self):
         f = open(self.filename, 'rb')
         f.seek(self.level_offsets[0] + 4) # Ditch the header
         ncells = self.level_info[0]
-        #pdb.set_trace()
         nhvals = ncells * (self.nhydro_vars) # 0 vars, 0 pads
-        hvar = na.fromfile(f, dtype='>f', count=nhvals).astype("float64")
+        hvar = na.fromfile(f, dtype='>f', count=nhvals).astype("float32")
         hvar = hvar.reshape((self.nhydro_vars, ncells), order="F")
         na.fromfile(f,dtype='>i',count=2) #throw away the pads
         nvars = ncells * (2) # 0 vars, 0 pads
-        var = na.fromfile(f, dtype='>f', count=nvars).astype("float64")
+        var = na.fromfile(f, dtype='>f', count=nvars).astype("float32")
         var = var.reshape((2, ncells), order="F")
         arr = na.concatenate((hvar,var))
         self.level_data[0] = arr
 
     def clear_level(self, level):
         self.level_data.pop(level, None)
+
+    def _read_particle_field(self, grid, field):
+        #This will be cleaned up later
+        idx = na.array(grid.particle_indices)
+        if field == 'particle_index':
+            return na.array(idx)
+        if field == 'particle_type':
+            return grid.pf.particle_type[idx]
+        if field == 'particle_position_x':
+            return grid.pf.particle_position[idx][:,0]
+        if field == 'particle_position_y':
+            return grid.pf.particle_position[idx][:,1]
+        if field == 'particle_position_z':
+            return grid.pf.particle_position[idx][:,2]
+        if field == 'particle_mass':
+            return grid.pf.particle_mass[idx]
+        if field == 'particle_velocity_x':
+            return grid.pf.particle_velocity[idx][:,0]
+        if field == 'particle_velocity_y':
+            return grid.pf.particle_velocity[idx][:,1]
+        if field == 'particle_velocity_z':
+            return grid.pf.particle_velocity[idx][:,2]
+        
+        #stellar fields
+        if field == 'particle_age':
+            return grid.pf.particle_age[idx]
+        if field == 'particle_metallicity':
+            return grid.pf.particle_metallicity1[idx] +\
+                   grid.pf.particle_metallicity2[idx]
+        if field == 'particle_metallicity1':
+            return grid.pf.particle_metallicity1[idx]
+        if field == 'particle_metallicity2':
+            return grid.pf.particle_metallicity2[idx]
+        if field == 'particle_mass_initial':
+            return grid.pf.particle_mass_initial[idx]
+        
+        raise 'Should have matched one of the particle fields...'
+
         
     def _read_data_set(self, grid, field):
+        if field in art_particle_field_names:
+            return self._read_particle_field(grid, field)
         pf = grid.pf
         field_id = grid.pf.h.field_list.index(field)
         if grid.Level == 0: # We only have one root grid
             self.preload_level(0)
             tr = self.level_data[0][field_id,:].reshape(
                     pf.domain_dimensions, order="F").copy()
-            return tr.swapaxes(0, 2)
-        tr = na.zeros(grid.ActiveDimensions, dtype='float64')
-        filled = na.zeros(grid.ActiveDimensions, dtype='int32')
-        to_fill = grid.ActiveDimensions.prod()
+            return tr.swapaxes(0, 2).astype("float64")
+        tr = na.zeros(grid.ActiveDimensions, dtype='float32')
         grids = [grid]
         l_delta = 0
+        filled = na.zeros(grid.ActiveDimensions, dtype='uint8')
+        to_fill = grid.ActiveDimensions.prod()
         while to_fill > 0 and len(grids) > 0:
             next_grids = []
             for g in grids:
-                self.preload_level(g.Level)
+                self.preload_level(g.Level,field=field_id)
                 #print "Filling %s from %s (%s)" % (grid, g, g.Level)
                 to_fill -= au.read_art_grid(field_id, 
                         grid.get_global_startindex(), grid.ActiveDimensions,
@@ -104,11 +185,294 @@
                 next_grids += g.Parent
             grids = next_grids
             l_delta += 1
-        return tr
+        return tr.astype("float64")
 
     def _read_data_slice(self, grid, field, axis, coord):
         sl = [slice(None), slice(None), slice(None)]
         sl[axis] = slice(coord, coord + 1)
         return self._read_data_set(grid, field)[sl]
 
+def _count_art_octs(f, offset, 
+                   MinLev, MaxLevelNow):
+    level_oct_offsets= [0,]
+    level_child_offsets= [0,]
+    f.seek(offset)
+    nchild,ntot=8,0
+    Level = na.zeros(MaxLevelNow+1 - MinLev, dtype='i')
+    iNOLL = na.zeros(MaxLevelNow+1 - MinLev, dtype='i')
+    iHOLL = na.zeros(MaxLevelNow+1 - MinLev, dtype='i')
+    for Lev in xrange(MinLev + 1, MaxLevelNow+1):
+        level_oct_offsets.append(f.tell())
 
+        #Get the info for this level, skip the rest
+        #print "Reading oct tree data for level", Lev
+        #print 'offset:',f.tell()
+        Level[Lev], iNOLL[Lev], iHOLL[Lev] = struct.unpack(
+           '>iii', _read_record(f))
+        #print 'Level %i : '%Lev, iNOLL
+        #print 'offset after level record:',f.tell()
+        iOct = iHOLL[Lev] - 1
+        nLevel = iNOLL[Lev]
+        nLevCells = nLevel * nchild
+        ntot = ntot + nLevel
+
+        #Skip all the oct hierarchy data
+        ns = _read_record_size(f)
+        size = struct.calcsize('>i') + ns + struct.calcsize('>i')
+        f.seek(f.tell()+size * nLevel)
+
+        level_child_offsets.append(f.tell())
+        #Skip the child vars data
+        ns = _read_record_size(f)
+        size = struct.calcsize('>i') + ns + struct.calcsize('>i')
+        f.seek(f.tell()+size * nLevel*nchild)
+
+        #find nhydrovars
+        nhydrovars = 8+2
+    f.seek(offset)
+    return nhydrovars, iNOLL, level_oct_offsets, level_child_offsets
+
+def _read_art_level_info(f, level_oct_offsets,level,root_level=15):
+    pos = f.tell()
+    f.seek(level_oct_offsets[level])
+    #Get the info for this level, skip the rest
+    junk, nLevel, iOct = struct.unpack(
+       '>iii', _read_record(f))
+    
+    #fortran indices start at 1
+    
+    #Skip all the oct hierarchy data
+    le     = na.zeros((nLevel,3),dtype='int64')
+    fl     = na.ones((nLevel,6),dtype='int64')
+    iocts  = na.zeros(nLevel+1,dtype='int64')
+    idxa,idxb = 0,0
+    chunk = long(1e6) #this is ~111MB for 15 dimensional 64 bit arrays
+    left = nLevel
+    while left > 0 :
+        this_chunk = min(chunk,left)
+        idxb=idxa+this_chunk
+        data = na.fromfile(f,dtype='>i',count=this_chunk*15)
+        data=data.reshape(this_chunk,15)
+        left-=this_chunk
+        le[idxa:idxb,:] = data[:,1:4]
+        fl[idxa:idxb,1] = na.arange(idxa,idxb)
+        #pad byte is last, LL2, then ioct right before it
+        iocts[idxa:idxb] = data[:,-3] 
+        idxa=idxa+this_chunk
+    del data
+    
+    #ioct always represents the index of the next variable
+    #not the current, so shift forward one index
+    #the last index isn't used
+    ioctso = iocts.copy()
+    iocts[1:]=iocts[:-1] #shift
+    iocts = iocts[:nLevel] #chop off the last index
+    iocts[0]=iOct #starting value
+
+    #now correct iocts for fortran indices start @ 1
+    iocts = iocts-1
+
+    assert na.unique(iocts).shape[0] == nLevel
+    
+    #ioct tries to access arrays much larger than le & fl
+    #just make sure they appear in the right order, skipping
+    #the empty space in between
+    idx = na.argsort(iocts)
+    
+    #now rearrange le & fl in order of the ioct
+    le = le[idx]
+    fl = fl[idx]
+
+    #left edges are expressed as if they were on 
+    #level 15, so no matter what level max(le)=2**15 
+    #correct to the yt convention
+    #le = le/2**(root_level-1-level)-1
+
+    #try without the -1
+    le = le/2**(root_level-2-level)-1
+
+    #now read the hvars and vars arrays
+    #we are looking for iOctCh
+    #we record if iOctCh is >0, in which it is subdivided
+    iOctCh  = na.zeros((nLevel+1,8),dtype='bool')
+    
+    
+    
+    f.seek(pos)
+    return le,fl,nLevel
+
+
+def read_particles(file,nstars,Nrow):
+    words = 6 # words (reals) per particle: x,y,z,vx,vy,vz
+    real_size = 4 # for file_particle_data; not always true?
+    np = nstars # number of particles including stars, should come from lspecies[-1]
+    np_per_page = Nrow**2 # defined in ART a_setup.h
+    num_pages = os.path.getsize(file)/(real_size*words*np_per_page)
+
+    f = na.fromfile(file, dtype='>f4').astype('float32') # direct access
+    pages = na.vsplit(na.reshape(f, (num_pages, words, np_per_page)), num_pages)
+    data = na.squeeze(na.dstack(pages)).T # x,y,z,vx,vy,vz
+    return data[:,0:3],data[:,3:]
+
+def read_stars(file,nstars,Nrow):
+    fh = open(file,'rb')
+    tdum,adum   = _read_frecord(fh,'>d')
+    nstars      = _read_frecord(fh,'>i')
+    ws_old, ws_oldi = _read_frecord(fh,'>d')
+    mass    = _read_frecord(fh,'>f') 
+    imass   = _read_frecord(fh,'>f') 
+    tbirth  = _read_frecord(fh,'>f') 
+    if fh.tell() < os.path.getsize(file):
+        metallicity1 = _read_frecord(fh,'>f') 
+    if fh.tell() < os.path.getsize(file):
+        metallicity2 = _read_frecord(fh,'>f')     
+    assert fh.tell() == os.path.getsize(file)
+    return nstars, mass, imass, tbirth, metallicity1, metallicity2
+
+def _read_child_mask_level(f, level_child_offsets,level,nLevel,nhydro_vars):
+    f.seek(level_child_offsets[level])
+    nvals = nLevel * (nhydro_vars + 6) # 2 vars, 2 pads
+    ioctch = na.zeros(nLevel,dtype='uint8')
+    idc = na.zeros(nLevel,dtype='int32')
+    
+    chunk = long(1e6)
+    left = nLevel
+    width = nhydro_vars+6
+    a,b=0,0
+    while left > 0:
+        chunk = min(chunk,left)
+        b += chunk
+        arr = na.fromfile(f, dtype='>i', count=chunk*width)
+        arr = arr.reshape((width, chunk), order="F")
+        assert na.all(arr[0,:]==arr[-1,:]) #pads must be equal
+        idc[a:b]    = arr[1,:]-1 #fix fortran indexing
+        ioctch[a:b] = arr[2,:]==0 #if it is above zero, then refined info available
+        #zero in the mask means there is refinement available
+        a=b
+        left -= chunk
+    assert left==0
+    return idc,ioctch
+    
+nchem=8+2
+dtyp = na.dtype(">i4,>i8,>i8"+",>%sf4"%(nchem)+ \
+                ",>%sf4"%(2)+",>i4")
+def _read_art_child(f, level_child_offsets,level,nLevel,field):
+    pos=f.tell()
+    f.seek(level_child_offsets[level])
+    arr = na.fromfile(f, dtype='>f', count=nLevel * 8)
+    arr = arr.reshape((nLevel,16), order="F")
+    arr = arr[3:-1,:].astype("float64")
+    f.seek(pos)
+    return arr[field,:]
+
+def _skip_record(f):
+    s = struct.unpack('>i', f.read(struct.calcsize('>i')))
+    f.seek(s[0], 1)
+    s = struct.unpack('>i', f.read(struct.calcsize('>i')))
+
+def _read_frecord(f,fmt):
+    s1 = struct.unpack('>i', f.read(struct.calcsize('>i')))[0]
+    count = s1/na.dtype(fmt).itemsize
+    ss = na.fromfile(f,fmt,count=count)
+    s2 = struct.unpack('>i', f.read(struct.calcsize('>i')))[0]
+    assert s1==s2
+    return ss
+
+
+def _read_record(f,fmt=None):
+    s = struct.unpack('>i', f.read(struct.calcsize('>i')))[0]
+    ss = f.read(s)
+    s = struct.unpack('>i', f.read(struct.calcsize('>i')))
+    if fmt is not None:
+        return struct.unpack(ss,fmt)
+    return ss
+
+def _read_record_size(f):
+    pos = f.tell()
+    s = struct.unpack('>i', f.read(struct.calcsize('>i')))
+    f.seek(pos)
+    return s[0]
+
+def _read_struct(f,structure,verbose=False):
+    vals = {}
+    for format,name in structure:
+        size = struct.calcsize(format)
+        (val,) = struct.unpack(format,f.read(size))
+        vals[name] = val
+        if verbose: print "%s:\t%s\t (%d B)" %(name,val,f.tell())
+    return vals
+
+
+
+#All of these functions are to convert from hydro time var to 
+#proper time
+sqrt = na.sqrt
+sign = na.sign
+
+def find_root(f,a,b,tol=1e-6):
+    c = (a+b)/2.0
+    last = -na.inf
+    assert(sign(f(a)) != sign(f(b)))  
+    while na.abs(f(c)-last) > tol:
+        last=f(c)
+        if sign(last)==sign(f(b)):
+            b=c
+        else:
+            a=c
+        c = (a+b)/2.0
+    return c
+
+def quad(fintegrand,xmin,xmax,n=1e4):
+    spacings = na.logspace(na.log10(xmin),na.log10(xmax),n)
+    integrand_arr = fintegrand(spacings)
+    val = na.trapz(integrand_arr,dx=na.diff(spacings))
+    return val
+
+def a2b(at,Om0=0.27,Oml0=0.73,h=0.700):
+    def f_a2b(x):
+        val = 0.5*sqrt(Om0) / x**3.0
+        val /= sqrt(Om0/x**3.0 +Oml0 +(1.0 - Om0-Oml0)/x**2.0)
+        return val
+    #val, err = si.quad(f_a2b,1,at)
+    val = quad(f_a2b,1,at)
+    return val
+
+def b2a(bt,**kwargs):
+    #converts code time into expansion factor 
+    #if Om0 ==1and OmL == 0 then b2a is (1 / (1-td))**2
+    #if bt < -190.0 or bt > -.10:  raise 'bt outside of range'
+    f_b2a = lambda at: a2b(at,**kwargs)-bt
+    return find_root(f_b2a,1e-4,1.1)
+    #return so.brenth(f_b2a,1e-4,1.1)
+    #return brent.brent(f_b2a)
+
+def a2t(at,Om0=0.27,Oml0=0.73,h=0.700):
+    integrand = lambda x : 1./(x*sqrt(Oml0+Om0*x**-3.0))
+    #current_time,err = si.quad(integrand,0.0,at,epsabs=1e-6,epsrel=1e-6)
+    current_time = quad(integrand,1e-4,at)
+    #spacings = na.logspace(-5,na.log10(at),1e5)
+    #integrand_arr = integrand(spacings)
+    #current_time = na.trapz(integrand_arr,dx=na.diff(spacings))
+    current_time *= 9.779/h
+    return current_time
+
+def b2t(tb,n = 1e2,logger=None,**kwargs):
+    tb = na.array(tb)
+    if type(tb) == type(1.1): 
+        return a2t(b2a(tb))
+    if tb.shape == (): 
+        return a2t(b2a(tb))
+    if len(tb) < n: n= len(tb)
+    age_min = a2t(b2a(tb.max(),**kwargs),**kwargs)
+    age_max = a2t(b2a(tb.min(),**kwargs),**kwargs)
+    tbs  = -1.*na.logspace(na.log10(-tb.min()),
+                          na.log10(-tb.max()),n)
+    ages = []
+    for i,tbi in enumerate(tbs):
+        ages += a2t(b2a(tbi)),
+        if logger: logger(i)
+    ages = na.array(ages)
+    fb2t = na.interp(tb,tbs,ages)
+    #fb2t = interp1d(tbs,ages)
+    return fb2t
+


diff -r 2bbd3f126279501d3aa2d6acd308e80006078ca3 -r 65db3370fd9b8f113d595fb295ad3182952f04ca yt/frontends/art/setup.py
--- a/yt/frontends/art/setup.py
+++ b/yt/frontends/art/setup.py
@@ -1,13 +1,10 @@
 #!/usr/bin/env python
 import setuptools
-import os
-import sys
-import os.path
+import os, sys, os.path
 
-
-def configuration(parent_package='', top_path=None):
+def configuration(parent_package='',top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('art', parent_package, top_path)
-    config.make_config_py()  # installs __config__.py
+    config = Configuration('art',parent_package,top_path)
+    config.make_config_py() # installs __config__.py
     #config.make_svn_version_py()
     return config


diff -r 2bbd3f126279501d3aa2d6acd308e80006078ca3 -r 65db3370fd9b8f113d595fb295ad3182952f04ca yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -121,8 +121,37 @@
         self.float_type = self._fhandle['/level_0']['data:datatype=0'].dtype.name
         self._levels = self._fhandle.keys()[1:]
         AMRHierarchy.__init__(self,pf,data_style)
+        self._read_particles()
         self._fhandle.close()
 
+    def _read_particles(self):
+        self.particle_filename = self.hierarchy_filename[:-4] + 'sink'
+        if not os.path.exists(self.particle_filename): return
+        with open(self.particle_filename, 'r') as f:
+            lines = f.readlines()
+            self.num_stars = int(lines[0].strip().split(' ')[0])
+            for line in lines[1:]:
+                particle_position_x = float(line.split(' ')[1])
+                particle_position_y = float(line.split(' ')[2])
+                particle_position_z = float(line.split(' ')[3])
+                coord = [particle_position_x, particle_position_y, particle_position_z]
+                # for each particle, determine which grids contain it
+                # copied from object_finding_mixin.py                                                                                                             
+                mask=na.ones(self.num_grids)
+                for i in xrange(len(coord)):
+                    na.choose(na.greater(self.grid_left_edge[:,i],coord[i]), (mask,0), mask)
+                    na.choose(na.greater(self.grid_right_edge[:,i],coord[i]), (0,mask), mask)
+                ind = na.where(mask == 1)
+                selected_grids = self.grids[ind]
+                # in orion, particles always live on the finest level.
+                # so, we want to assign the particle to the finest of
+                # the grids we just found
+                if len(selected_grids) != 0:
+                    grid = sorted(selected_grids, key=lambda grid: grid.Level)[-1]
+                    ind = na.where(self.grids == grid)[0][0]
+                    self.grid_particle_count[ind] += 1
+                    self.grids[ind].NumberOfParticles += 1
+
     def _initialize_data_storage(self):
         pass
 


diff -r 2bbd3f126279501d3aa2d6acd308e80006078ca3 -r 65db3370fd9b8f113d595fb295ad3182952f04ca yt/frontends/chombo/fields.py
--- a/yt/frontends/chombo/fields.py
+++ b/yt/frontends/chombo/fields.py
@@ -33,6 +33,7 @@
     ValidateSpatial, \
     ValidateGridType
 import yt.data_objects.universal_fields
+import numpy as na
 
 KnownChomboFields = FieldInfoContainer()
 add_chombo_field = KnownChomboFields.add_field
@@ -76,12 +77,12 @@
                   units=r"",display_name=r"B_z")
 KnownChomboFields["Z-magnfield"]._projected_units=r""
 
-add_chombo_field("energy-density", function=lambda a,b: None, take_log=True,
+add_chombo_field("energy-density", function=NullFunc, take_log=True,
                  validators = [ValidateDataField("energy-density")],
                  units=r"\rm{erg}/\rm{cm}^3")
 KnownChomboFields["energy-density"]._projected_units =r""
 
-add_chombo_field("radiation-energy-density", function=lambda a,b: None, take_log=True,
+add_chombo_field("radiation-energy-density", function=NullFunc, take_log=True,
                  validators = [ValidateDataField("radiation-energy-density")],
                  units=r"\rm{erg}/\rm{cm}^3")
 KnownChomboFields["radiation-energy-density"]._projected_units =r""
@@ -125,3 +126,36 @@
     return data["Z-momentum"]/data["density"]
 add_field("z-velocity",function=_zVelocity, take_log=False,
           units=r'\rm{cm}/\rm{s}')
+
+def particle_func(p_field, dtype='float64'):
+    def _Particles(field, data):
+        io = data.hierarchy.io
+        if not data.NumberOfParticles > 0:
+            return na.array([], dtype=dtype)
+        else:
+            return io._read_particles(data, p_field).astype(dtype)
+        
+    return _Particles
+
+_particle_field_list = ["mass",
+                        "position_x",
+                        "position_y",
+                        "position_z",
+                        "momentum_x",
+                        "momentum_y",
+                        "momentum_z",
+                        "angmomen_x",
+                        "angmomen_y",
+                        "angmomen_z",
+                        "mlast",
+                        "mdeut",
+                        "n",
+                        "mdot",
+                        "burnstate",
+                        "id"]
+
+for pf in _particle_field_list:
+    pfunc = particle_func("particle_%s" % (pf))
+    add_field("particle_%s" % pf, function=pfunc,
+              validators = [ValidateSpatial(0)],
+              particle_type=True)


diff -r 2bbd3f126279501d3aa2d6acd308e80006078ca3 -r 65db3370fd9b8f113d595fb295ad3182952f04ca yt/frontends/chombo/io.py
--- a/yt/frontends/chombo/io.py
+++ b/yt/frontends/chombo/io.py
@@ -25,6 +25,7 @@
 """
 import h5py
 import re
+import numpy as na
 
 from yt.utilities.io_handler import \
            BaseIOHandler
@@ -70,3 +71,41 @@
         sl[axis] = slice(coord, coord + 1)
         return self._read_data_set(grid,field)[sl]
 
+    def _read_particles(self, grid, field):
+        """
+        parses the Orion Star Particle text files
+             
+        """
+        index = {'particle_mass': 0,
+                 'particle_position_x': 1,
+                 'particle_position_y': 2,
+                 'particle_position_z': 3,
+                 'particle_momentum_x': 4,
+                 'particle_momentum_y': 5,
+                 'particle_momentum_z': 6,
+                 'particle_angmomen_x': 7,
+                 'particle_angmomen_y': 8,
+                 'particle_angmomen_z': 9,
+                 'particle_mlast': 10,
+                 'particle_mdeut': 11,
+                 'particle_n': 12,
+                 'particle_mdot': 13,
+                 'particle_burnstate': 14,
+                 'particle_id': 15}
+
+        def read(line, field):
+            return float(line.split(' ')[index[field]])
+
+        fn = grid.pf.fullplotdir[:-4] + "sink"
+        with open(fn, 'r') as f:
+            lines = f.readlines()
+            particles = []
+            for line in lines[1:]:
+                if grid.NumberOfParticles > 0:
+                    coord = read(line, "particle_position_x"), \
+                            read(line, "particle_position_y"), \
+                            read(line, "particle_position_z")
+                    if ( (grid.LeftEdge < coord).all() and
+                         (coord <= grid.RightEdge).all() ):
+                        particles.append(read(line, field))
+        return na.array(particles)


diff -r 2bbd3f126279501d3aa2d6acd308e80006078ca3 -r 65db3370fd9b8f113d595fb295ad3182952f04ca yt/frontends/enzo/api.py
--- a/yt/frontends/enzo/api.py
+++ b/yt/frontends/enzo/api.py
@@ -38,6 +38,9 @@
       EnzoStaticOutput, \
       EnzoStaticOutputInMemory
 
+from .simulation_handling import \
+    EnzoSimulation
+
 from .fields import \
       EnzoFieldInfo, \
       Enzo2DFieldInfo, \


diff -r 2bbd3f126279501d3aa2d6acd308e80006078ca3 -r 65db3370fd9b8f113d595fb295ad3182952f04ca yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -51,7 +51,7 @@
 _speciesList = ["HI", "HII", "Electron",
                 "HeI", "HeII", "HeIII",
                 "H2I", "H2II", "HM",
-                "DI", "DII", "HDI", "Metal", "PreShock"]
+                "DI", "DII", "HDI", "Metal", "MetalSNIa", "PreShock"]
 _speciesMass = {"HI": 1.0, "HII": 1.0, "Electron": 1.0,
                 "HeI": 4.0, "HeII": 4.0, "HeIII": 4.0,
                 "H2I": 2.0, "H2II": 2.0, "HM": 1.0,
@@ -224,7 +224,10 @@
 _default_fields = ["Density","Temperature",
                    "x-velocity","y-velocity","z-velocity",
                    "x-momentum","y-momentum","z-momentum",
-                   "Bx", "By", "Bz", "Dust_Temperature"]
+                   "Bx", "By", "Bz", "Dust_Temperature",
+                   "HI_kph", "HeI_kph", "HeII_kph", "H2I_kdiss", "PhotoGamma",
+                   "RadAccel1", "RadAccel2", "RadAccel3", "SN_Colour",
+                   "Ray_Segments"]
 # else:
 #     _default_fields = ["Density","Temperature","Gas_Energy","Total_Energy",
 #                        "x-velocity","y-velocity","z-velocity"]
@@ -247,11 +250,35 @@
     f._units=r"\mathrm{Gau\ss}"
     f.take_log=False
 
+def _convertkph(data):
+    return data.convert("Time")
+for field in ["HI_kph", "HeI_kph", "HeII_kph", "H2I_kdiss"]:
+    f = KnownEnzoFields[field]
+    f._convert_function = _convertkph
+    f._units=r"\rm{s}^{-1}"
+    f.take_log=True
+
+def _convertRadiationAccel(data):
+    return data.convert("cm") / data.convert("Time")
+for dim in range(1,4):
+    f = KnownEnzoFields["RadAccel%d" % dim]
+    f._convert_function = _convertRadiationAccel
+    f._units=r"\rm{cm}\ \rm{s}^{-2}"
+    f.take_log=False
+def _RadiationAccelerationMagnitude(field, data):
+    return ( data["RadAccel1"]**2 + data["RadAccel2"]**2 +
+             data["RadAccel3"]**2 )**(1.0/2.0)
+add_field("RadiationAcceleration", 
+          function=_RadiationAccelerationMagnitude,
+          validators=ValidateDataField(["RadAccel1", "RadAccel2", "RadAccel3"]),
+          display_name="Radiation\ Acceleration", units=r"\rm{cm} \rm{s}^{-2}")
+
 # Now we override
 
 def _convertDensity(data):
     return data.convert("Density")
-for field in ["Density"] + [ "%s_Density" % sp for sp in _speciesList ]:
+for field in ["Density"] + [ "%s_Density" % sp for sp in _speciesList ] + \
+        ["SN_Colour"]:
     KnownEnzoFields[field]._units = r"\rm{g}/\rm{cm}^3"
     KnownEnzoFields[field]._projected_units = r"\rm{g}/\rm{cm}^2"
     KnownEnzoFields[field]._convert_function=_convertDensity


diff -r 2bbd3f126279501d3aa2d6acd308e80006078ca3 -r 65db3370fd9b8f113d595fb295ad3182952f04ca yt/frontends/enzo/io.py
--- a/yt/frontends/enzo/io.py
+++ b/yt/frontends/enzo/io.py
@@ -201,6 +201,8 @@
     _data_style = "enzo_packed_3d_gz"
 
     def modify(self, field):
+        if len(field.shape) < 3:
+            return field
         tr = field[3:-3,3:-3,3:-3].swapaxes(0,2)
         return tr.copy() # To ensure contiguous
 


diff -r 2bbd3f126279501d3aa2d6acd308e80006078ca3 -r 65db3370fd9b8f113d595fb295ad3182952f04ca yt/frontends/enzo/simulation_handling.py
--- /dev/null
+++ b/yt/frontends/enzo/simulation_handling.py
@@ -0,0 +1,692 @@
+"""
+EnzoSimulation class and member functions.
+
+Author: Britton Smith <brittonsmith at gmail.com>
+Affiliation: Michigan State University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2008-2012 Britton Smith.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from yt.funcs import *
+
+import numpy as na
+import glob
+import os
+
+from yt.data_objects.time_series import \
+    TimeSeriesData
+from yt.utilities.cosmology import \
+    Cosmology, \
+    EnzoCosmology
+from yt.utilities.exceptions import \
+    YTException
+from yt.utilities.parallel_tools.parallel_analysis_interface import \
+    parallel_root_only
+
+from yt.convenience import \
+    load
+
+class EnzoSimulation(TimeSeriesData):
+    r"""Super class for performing the same operation over all data outputs in 
+    a simulation from one redshift to another.
+    """
+    def __init__(self, parameter_filename):
+        r"""Initialize an Enzo Simulation object.
+
+        Upon creation, the parameter file is parsed and the time and redshift
+        are calculated and stored in all_outputs.  A time units dictionary is
+        instantiated to allow for time outputs to be requested with physical
+        time units.  The get_time_series can be used to generate a
+        TimeSeriesData object.
+
+        parameter_filename : str
+            The simulation parameter file.
+        
+        Examples
+        --------
+        >>> from yt.mods import *
+        >>> es = ES.EnzoSimulation("my_simulation.par")
+        >>> print es.all_outputs
+
+        """
+        self.parameter_filename = parameter_filename
+        self.parameters = {}
+
+        # Set some parameter defaults.
+        self._set_parameter_defaults()
+        # Read the simulation parameter file.
+        self._parse_parameter_file()
+        # Set up time units dictionary.
+        self._set_time_units()
+
+        # Figure out the starting and stopping times and redshift.
+        self._calculate_simulation_bounds()
+        self.print_key_parameters()
+        
+        # Get all possible datasets.
+        self._get_all_outputs()
+
+    def get_time_series(self, time_data=True, redshift_data=True,
+                        initial_time=None, final_time=None, time_units='1',
+                        initial_redshift=None, final_redshift=None,
+                        initial_cycle=None, final_cycle=None,
+                        times=None, redshifts=None, tolerance=None,
+                        find_outputs=False, parallel=True):
+
+        """
+        Instantiate a TimeSeriesData object for a set of outputs.
+
+        If no additional keywords given, a TimeSeriesData object will be
+        created with all potential datasets created by the simulation.
+
+        Outputs can be gather by specifying a time or redshift range
+        (or combination of time and redshift), with a specific list of
+        times or redshifts, a range of cycle numbers (for cycle based
+        output), or by simply searching all subdirectories within the
+        simulation directory.
+
+        time_data : bool
+            Whether or not to include time outputs when gathering
+            datasets for time series.
+            Default: True.
+        redshift_data : bool
+            Whether or not to include redshift outputs when gathering
+            datasets for time series.
+            Default: True.
+        initial_time : float
+            The earliest time for outputs to be included.  If None,
+            the initial time of the simulation is used.  This can be
+            used in combination with either final_time or
+            final_redshift.
+            Default: None.
+        final_time : float
+            The latest time for outputs to be included.  If None,
+            the final time of the simulation is used.  This can be
+            used in combination with either initial_time or
+            initial_redshift.
+            Default: None.
+        times : array_like
+            A list of times for which outputs will be found.
+            Default: None.
+        time_units : str
+            The time units used for requesting outputs by time.
+            Default: '1' (code units).
+        initial_redshift : float
+            The earliest redshift for outputs to be included.  If None,
+            the initial redshift of the simulation is used.  This can be
+            used in combination with either final_time or
+            final_redshift.
+            Default: None.
+        final_time : float
+            The latest redshift for outputs to be included.  If None,
+            the final redshift of the simulation is used.  This can be
+            used in combination with either initial_time or
+            initial_redshift.
+            Default: None.
+        redshifts : array_like
+            A list of redshifts for which outputs will be found.
+            Default: None.
+        initial_cycle : float
+            The earliest cycle for outputs to be included.  If None,
+            the initial cycle of the simulation is used.  This can
+            only be used with final_cycle.
+            Default: None.
+        final_cycle : float
+            The latest cycle for outputs to be included.  If None,
+            the final cycle of the simulation is used.  This can
+            only be used in combination with initial_cycle.
+            Default: None.
+        tolerance : float
+            Used in combination with "times" or "redshifts" keywords,
+            this is the tolerance within which outputs are accepted
+            given the requested times or redshifts.  If None, the
+            nearest output is always taken.
+            Default: None.
+        find_outputs : bool
+            If True, subdirectories within the GlobalDir directory are
+            searched one by one for datasets.  Time and redshift
+            information are gathered by temporarily instantiating each
+            dataset.  This can be used when simulation data was created
+            in a non-standard way, making it difficult to guess the
+            corresponding time and redshift information.
+            Default: False.
+        parallel : bool/int
+            If True, the generated TimeSeriesData will divide the work
+            such that a single processor works on each dataset.  If an
+            integer is supplied, the work will be divided into that
+            number of jobs.
+            Default: True.
+
+        Examples
+        --------
+        >>> es.get_time_series(initial_redshift=10, final_time=13.7,
+                               time_units='Gyr', redshift_data=False)
+
+        >>> es.get_time_series(redshifts=[3, 2, 1, 0])
+
+        >>> es.get_time_series(final_cycle=100000)
+
+        >>> es.get_time_series(find_outputs=True)
+
+        >>> # after calling get_time_series
+        >>> for pf in es.piter():
+        >>>     pc = PlotCollection(pf, 'c')
+        >>>     pc.add_projection('Density', 0)
+        >>>     pc.save()
+
+        """
+
+        if (initial_redshift is not None or \
+            final_redshift is not None) and \
+            not self.cosmological_simulation:
+            mylog.error('An initial or final redshift has been given for a noncosmological simulation.')
+            return
+
+        if find_outputs:
+            my_outputs = self._find_outputs()
+
+        else:
+            if time_data and redshift_data:
+                my_all_outputs = self.all_outputs
+            elif time_data:
+                my_all_outputs = self.all_time_outputs
+            elif redshift_data:
+                my_all_outputs = self.all_redshift_outputs
+            else:
+                mylog.error('Both time_data and redshift_data are False.')
+                return
+
+            if times is not None:
+                my_outputs = self._get_outputs_by_time(times, tolerance=tolerance,
+                                                       outputs=my_all_outputs,
+                                                       time_units=time_units)
+
+            elif redshifts is not None:
+                my_outputs = self._get_outputs_by_redshift(redshifts, tolerance=tolerance,
+                                                           outputs=my_all_outputs)
+
+            elif initial_cycle is not None or final_cycle is not None:
+                if initial_cycle is None:
+                    initial_cycle = 0
+                else:
+                    initial_cycle = max(initial_cycle, 0)
+                if final_cycle is None:
+                    final_cycle = self.parameters['StopCycle']
+                else:
+                    final_cycle = min(final_cycle, self.parameters['StopCycle'])
+                my_outputs = my_all_outputs[int(ceil(float(initial_cycle) /
+                                                     self.parameters['CycleSkipDataDump'])):
+                                            (final_cycle /  self.parameters['CycleSkipDataDump'])+1]
+
+            else:
+                if initial_time is not None:
+                    my_initial_time = initial_time / self.time_units[time_units]
+                elif initial_redshift is not None:
+                    my_initial_time = self.enzo_cosmology.ComputeTimeFromRedshift(initial_redshift) / \
+                        self.enzo_cosmology.TimeUnits
+                else:
+                    my_initial_time = self.initial_time
+
+                if final_time is not None:
+                    my_final_time = final_time / self.time_units[time_units]
+                elif final_redshift is not None:
+                    my_final_time = self.enzo_cosmology.ComputeTimeFromRedshift(final_redshift) / \
+                        self.enzo_cosmology.TimeUnits
+                else:
+                    my_final_time = self.final_time
+                    
+                my_times = na.array(map(lambda a:a['time'], my_all_outputs))
+                my_indices = na.digitize([my_initial_time, my_final_time], my_times)
+                if my_initial_time == my_times[my_indices[0] - 1]: my_indices[0] -= 1
+                my_outputs = my_all_outputs[my_indices[0]:my_indices[1]]
+
+        TimeSeriesData.__init__(self, outputs=[output['filename'] for output in my_outputs],
+                                parallel=parallel)
+        mylog.info("%d outputs loaded into time series." % len(my_outputs))
+
+    @parallel_root_only
+    def print_key_parameters(self):
+        """
+        Print out some key parameters for the simulation.
+        """
+        for a in ["domain_dimensions", "domain_left_edge",
+                  "domain_right_edge", "initial_time", "final_time",
+                  "stop_cycle", "cosmological_simulation"]:
+            if not hasattr(self, a):
+                mylog.error("Missing %s in parameter file definition!", a)
+                continue
+            v = getattr(self, a)
+            mylog.info("Parameters: %-25s = %s", a, v)
+        if hasattr(self, "cosmological_simulation") and \
+           getattr(self, "cosmological_simulation"):
+            for a in ["omega_lambda", "omega_matter",
+                      "hubble_constant", "initial_redshift",
+                      "final_redshift"]:
+                if not hasattr(self, a):
+                    mylog.error("Missing %s in parameter file definition!", a)
+                    continue
+                v = getattr(self, a)
+                mylog.info("Parameters: %-25s = %s", a, v)
+
+    def _parse_parameter_file(self):
+        """
+        Parses the parameter file and establishes the various
+        dictionaries.
+        """
+
+        self.conversion_factors = {}
+        redshift_outputs = []
+
+        # Let's read the file
+        lines = open(self.parameter_filename).readlines()
+        for line in (l.strip() for l in lines):
+            if '#' in line: line = line[0:line.find('#')]
+            if '//' in line: line = line[0:line.find('//')]
+            if len(line) < 2: continue
+            param, vals = (i.strip() for i in line.split("="))
+            # First we try to decipher what type of value it is.
+            vals = vals.split()
+            # Special case approaching.
+            if "(do" in vals: vals = vals[:1]
+            if len(vals) == 0:
+                pcast = str # Assume NULL output
+            else:
+                v = vals[0]
+                # Figure out if it's castable to floating point:
+                try:
+                    float(v)
+                except ValueError:
+                    pcast = str
+                else:
+                    if any("." in v or "e+" in v or "e-" in v for v in vals):
+                        pcast = float
+                    elif v == "inf":
+                        pcast = str
+                    else:
+                        pcast = int
+            # Now we figure out what to do with it.
+            if param.endswith("Units") and not param.startswith("Temperature"):
+                dataType = param[:-5]
+                # This one better be a float.
+                self.conversion_factors[dataType] = float(vals[0])
+            if param.startswith("CosmologyOutputRedshift["):
+                index = param[param.find("[")+1:param.find("]")]
+                redshift_outputs.append({'index':int(index), 'redshift':float(vals[0])})
+            elif len(vals) == 0:
+                vals = ""
+            elif len(vals) == 1:
+                vals = pcast(vals[0])
+            else:
+                vals = na.array([pcast(i) for i in vals if i != "-99999"])
+            self.parameters[param] = vals
+        self.refine_by = self.parameters["RefineBy"]
+        self.dimensionality = self.parameters["TopGridRank"]
+        if self.dimensionality > 1:
+            self.domain_dimensions = self.parameters["TopGridDimensions"]
+            if len(self.domain_dimensions) < 3:
+                tmp = self.domain_dimensions.tolist()
+                tmp.append(1)
+                self.domain_dimensions = na.array(tmp)
+            self.domain_left_edge = na.array(self.parameters["DomainLeftEdge"],
+                                             "float64").copy()
+            self.domain_right_edge = na.array(self.parameters["DomainRightEdge"],
+                                             "float64").copy()
+        else:
+            self.domain_left_edge = na.array(self.parameters["DomainLeftEdge"],
+                                             "float64")
+            self.domain_right_edge = na.array(self.parameters["DomainRightEdge"],
+                                             "float64")
+            self.domain_dimensions = na.array([self.parameters["TopGridDimensions"],1,1])
+
+        if self.parameters["ComovingCoordinates"]:
+            cosmo_attr = {'omega_lambda': 'CosmologyOmegaLambdaNow',
+                          'omega_matter': 'CosmologyOmegaMatterNow',
+                          'hubble_constant': 'CosmologyHubbleConstantNow',
+                          'initial_redshift': 'CosmologyInitialRedshift',
+                          'final_redshift': 'CosmologyFinalRedshift'}
+            self.cosmological_simulation = 1
+            for a, v in cosmo_attr.items():
+                if not v in self.parameters:
+                    raise MissingParameter(self.parameter_filename, v)
+                setattr(self, a, self.parameters[v])
+        else:
+            self.omega_lambda = self.omega_matter = \
+                self.hubble_constant = self.cosmological_simulation = 0.0
+
+        # make list of redshift outputs
+        self.all_redshift_outputs = []
+        if not self.cosmological_simulation: return
+        for output in redshift_outputs:
+            output['filename'] = os.path.join(self.parameters['GlobalDir'],
+                                              "%s%04d" % (self.parameters['RedshiftDumpDir'],
+                                                          output['index']),
+                                              "%s%04d" % (self.parameters['RedshiftDumpName'],
+                                                          output['index']))
+            del output['index']
+        self.all_redshift_outputs = redshift_outputs
+
+    def _calculate_redshift_dump_times(self):
+        "Calculates time from redshift of redshift outputs."
+
+        if not self.cosmological_simulation: return
+        for output in self.all_redshift_outputs:
+            output['time'] = self.enzo_cosmology.ComputeTimeFromRedshift(output['redshift']) / \
+                self.enzo_cosmology.TimeUnits
+
+    def _calculate_time_outputs(self):
+        "Calculate time outputs and their redshifts if cosmological."
+
+        if self.final_time is None or \
+            not 'dtDataDump' in self.parameters or \
+            self.parameters['dtDataDump'] <= 0.0: return []
+
+        self.all_time_outputs = []
+        index = 0
+        current_time = self.initial_time
+        while current_time <= self.final_time + self.parameters['dtDataDump']:
+            filename = os.path.join(self.parameters['GlobalDir'],
+                                    "%s%04d" % (self.parameters['DataDumpDir'], index),
+                                    "%s%04d" % (self.parameters['DataDumpName'], index))
+
+            output = {'index': index, 'filename': filename, 'time': current_time}
+            output['time'] = min(output['time'], self.final_time)
+            if self.cosmological_simulation:
+                output['redshift'] = self.enzo_cosmology.ComputeRedshiftFromTime(
+                    current_time * self.enzo_cosmology.TimeUnits)
+
+            self.all_time_outputs.append(output)
+            if na.abs(self.final_time - current_time) / self.final_time < 1e-4: break
+            current_time += self.parameters['dtDataDump']
+            index += 1
+
+    def _calculate_cycle_outputs(self):
+        "Calculate cycle outputs."
+
+        mylog.warn('Calculating cycle outputs.  Dataset times will be unavailable.')
+
+        if self.stop_cycle is None or \
+            not 'CycleSkipDataDump' in self.parameters or \
+            self.parameters['CycleSkipDataDump'] <= 0.0: return []
+
+        self.all_time_outputs = []
+        index = 0
+        for cycle in range(0, self.stop_cycle+1, self.parameters['CycleSkipDataDump']):
+            filename = os.path.join(self.parameters['GlobalDir'],
+                                    "%s%04d" % (self.parameters['DataDumpDir'], index),
+                                    "%s%04d" % (self.parameters['DataDumpName'], index))
+
+            output = {'index': index, 'filename': filename, 'cycle': cycle}
+            self.all_time_outputs.append(output)
+            index += 1
+
+    def _get_all_outputs(self):
+        "Get all potential datasets and combine into a time-sorted list."
+
+        if self.parameters['dtDataDump'] > 0 and \
+            self.parameters['CycleSkipDataDump'] > 0:
+            raise AmbiguousOutputs(self.parameter_filename)
+
+        # Get all time or cycle outputs.
+        if self.parameters['CycleSkipDataDump'] > 0:
+            self._calculate_cycle_outputs()
+        else:
+            self._calculate_time_outputs()
+
+        # Calculate times for redshift outputs.
+        self._calculate_redshift_dump_times()
+
+        self.all_outputs = self.all_time_outputs + self.all_redshift_outputs
+        if self.parameters['CycleSkipDataDump'] <= 0:
+            self.all_outputs.sort(key=lambda obj:obj['time'])
+
+        mylog.info("Total datasets: %d." % len(self.all_outputs))
+
+    def _calculate_simulation_bounds(self):
+        """
+        Figure out the starting and stopping time and redshift for the simulation.
+        """
+
+        if 'StopCycle' in self.parameters:
+            self.stop_cycle = self.parameters['StopCycle']
+
+        # Convert initial/final redshifts to times.
+        if self.cosmological_simulation:
+            # Instantiate EnzoCosmology object for units and time conversions.
+            self.enzo_cosmology = EnzoCosmology(HubbleConstantNow=
+                                                (100.0 * self.parameters['CosmologyHubbleConstantNow']),
+                                                OmegaMatterNow=self.parameters['CosmologyOmegaMatterNow'],
+                                                OmegaLambdaNow=self.parameters['CosmologyOmegaLambdaNow'],
+                                                InitialRedshift=self.parameters['CosmologyInitialRedshift'])
+            self.initial_time = self.enzo_cosmology.ComputeTimeFromRedshift(self.initial_redshift) / \
+                self.enzo_cosmology.TimeUnits
+            self.final_time = self.enzo_cosmology.ComputeTimeFromRedshift(self.final_redshift) / \
+                self.enzo_cosmology.TimeUnits
+
+        # If not a cosmology simulation, figure out the stopping criteria.
+        else:
+            if 'InitialTime' in self.parameters:
+                self.initial_time = self.parameters['InitialTime']
+            else:
+                self.initial_time = 0.
+
+            if 'StopTime' in self.parameters:
+                self.final_time = self.parameters['StopTime']
+            else:
+                self.final_time = None
+            if not ('StopTime' in self.parameters or
+                    'StopCycle' in self.parameters):
+                raise NoStoppingCondition(self.parameter_filename)
+            if self.final_time is None:
+                mylog.warn('Simulation %s has no stop time set, stopping condition will be based only on cycles.' %
+                           self.parameter_filename)
+
+    def _set_parameter_defaults(self):
+        "Set some default parameters to avoid problems if they are not in the parameter file."
+
+        self.parameters['GlobalDir'] = "."
+        self.parameters['DataDumpName'] = "data"
+        self.parameters['DataDumpDir'] = "DD"
+        self.parameters['RedshiftDumpName'] = "RedshiftOutput"
+        self.parameters['RedshiftDumpDir'] = "RD"
+        self.parameters['ComovingCoordinates'] = 0
+        self.parameters['TopGridRank'] = 3
+        self.parameters['DomainLeftEdge'] = na.zeros(self.parameters['TopGridRank'])
+        self.parameters['DomainRightEdge'] = na.ones(self.parameters['TopGridRank'])
+        self.parameters['Refineby'] = 2 # technically not the enzo default
+        self.parameters['StopCycle'] = 100000
+        self.parameters['dtDataDump'] = 0.
+        self.parameters['CycleSkipDataDump'] = 0.
+        self.parameters['TimeUnits'] = 1.
+
+    def _set_time_units(self):
+        """
+        Set up a dictionary of time units conversions.
+        """
+
+        self.time_units = {}
+        if self.cosmological_simulation:
+            self.parameters['TimeUnits'] = 2.52e17 / na.sqrt(self.omega_matter) \
+                / self.hubble_constant / (1 + self.initial_redshift)**1.5
+        self.time_units['1'] = 1.
+        self.time_units['seconds'] = self.parameters['TimeUnits']
+        self.time_units['years'] = self.time_units['seconds'] / (365*3600*24.0)
+        self.time_units['days']  = self.time_units['seconds'] / (3600*24.0)
+        self.time_units['Myr'] = self.time_units['years'] / 1.0e6
+        self.time_units['Gyr']  = self.time_units['years'] / 1.0e9
+
+    def _find_outputs(self):
+        """
+        Search for directories matching the data dump keywords.
+        If found, get dataset times py opening the pf.
+        """
+
+        # look for time outputs.
+        potential_outputs = glob.glob(os.path.join(self.parameters['GlobalDir'],
+                                                   "%s*" % self.parameters['DataDumpDir'])) + \
+                            glob.glob(os.path.join(self.parameters['GlobalDir'],
+                                                   "%s*" % self.parameters['RedshiftDumpDir']))
+        time_outputs = []
+        mylog.info("Checking %d potential time outputs." % 
+                   len(potential_outputs))
+
+        for output in potential_outputs:
+            if self.parameters['DataDumpDir'] in output:
+                dir_key = self.parameters['DataDumpDir']
+                output_key = self.parameters['DataDumpName']
+            else:
+                dir_key = self.parameters['RedshiftDumpDir']
+                output_key = self.parameters['RedshiftDumpName']
+            index = output[output.find(dir_key) + len(dir_key):]
+            filename = os.path.join(self.parameters['GlobalDir'],
+                                    "%s%s" % (dir_key, index),
+                                    "%s%s" % (output_key, index))
+            if os.path.exists(filename):
+                pf = load(filename)
+                if pf is not None:
+                    time_outputs.append({'filename': filename, 'time': pf.current_time})
+                    if pf.cosmological_simulation:
+                        time_outputs[-1]['redshift'] = pf.current_redshift
+                del pf
+        mylog.info("Located %d time outputs." % len(time_outputs))
+        time_outputs.sort(key=lambda obj: obj['time'])
+        return time_outputs
+
+    def _get_outputs_by_key(self, key, values, tolerance=None, outputs=None):
+        r"""Get datasets at or near to given values.
+        
+        Parameters
+        ----------
+        key: str
+            The key by which to retrieve outputs, usually 'time' or
+            'redshift'.
+        values: array_like
+            A list of values, given as floats.
+        tolerance : float
+            If not None, do not return a dataset unless the value is
+            within the tolerance value.  If None, simply return the
+            nearest dataset.
+            Default: None.
+        outputs : list
+            The list of outputs from which to choose.  If None,
+            self.all_outputs is used.
+            Default: None.
+        
+        Examples
+        --------
+        >>> datasets = es.get_outputs_by_key('redshift', [0, 1, 2], tolerance=0.1)
+        
+        """
+
+        values = ensure_list(values)
+        if outputs is None:
+            outputs = self.all_outputs
+        my_outputs = []
+        for value in values:
+            outputs.sort(key=lambda obj:na.fabs(value - obj[key]))
+            if (tolerance is None or na.abs(value - outputs[0][key]) <= tolerance) \
+                    and outputs[0] not in my_outputs:
+                my_outputs.append(outputs[0])
+            else:
+                mylog.error("No dataset added for %s = %f." % (key, value))
+
+        outputs.sort(key=lambda obj: obj['time'])
+        return my_outputs
+
+    def _get_outputs_by_redshift(self, redshifts, tolerance=None, outputs=None):
+        r"""Get datasets at or near to given redshifts.
+        
+        Parameters
+        ----------
+        redshifts: array_like
+            A list of redshifts, given as floats.
+        tolerance : float
+            If not None, do not return a dataset unless the value is
+            within the tolerance value.  If None, simply return the
+            nearest dataset.
+            Default: None.
+        outputs : list
+            The list of outputs from which to choose.  If None,
+            self.all_outputs is used.
+            Default: None.
+        
+        Examples
+        --------
+        >>> datasets = es.get_outputs_by_redshift([0, 1, 2], tolerance=0.1)
+        
+        """
+
+        return self._get_outputs_by_key('redshift', redshifts, tolerance=tolerance,
+                                     outputs=outputs)
+
+    def _get_outputs_by_time(self, times, tolerance=None, outputs=None,
+                             time_units='1'):
+        r"""Get datasets at or near to given times.
+        
+        Parameters
+        ----------
+        times: array_like
+            A list of times, given in code units as floats.
+        tolerance : float
+            If not None, do not return a dataset unless the time is
+            within the tolerance value.  If None, simply return the
+            nearest dataset.
+            Default = None.
+        outputs : list
+            The list of outputs from which to choose.  If None,
+            self.all_outputs is used.
+            Default: None.
+        time_units : str
+            The units of the list of times.
+            Default: '1' (code units).
+        
+        Examples
+        --------
+        >>> datasets = es.get_outputs_by_time([600, 500, 400], tolerance=10.)
+        
+        """
+
+        times = na.array(times) / self.time_units[time_units]
+        return self._get_outputs_by_key('time', times, tolerance=tolerance,
+                                        outputs=outputs)
+
+class MissingParameter(YTException):
+    def __init__(self, pf, parameter):
+        YTException.__init__(self, pf)
+        self.parameter = parameter
+
+    def __str__(self):
+        return "Parameter file %s is missing %s parameter." % \
+            (self.pf, self.parameter)
+
+class NoStoppingCondition(YTException):
+    def __init__(self, pf):
+        YTException.__init__(self, pf)
+
+    def __str__(self):
+        return "Simulation %s has no stopping condition.  StopTime or StopCycle should be set." % \
+            self.pf
+
+class AmbiguousOutputs(YTException):
+    def __init__(self, pf):
+        YTException.__init__(self, pf)
+
+    def __str__(self):
+        return "Simulation %s has both dtDataDump and CycleSkipDataDump set.  Unable to calculate datasets." % \
+            self.pf
+


diff -r 2bbd3f126279501d3aa2d6acd308e80006078ca3 -r 65db3370fd9b8f113d595fb295ad3182952f04ca yt/frontends/orion/data_structures.py
--- a/yt/frontends/orion/data_structures.py
+++ b/yt/frontends/orion/data_structures.py
@@ -163,16 +163,16 @@
                 for i in xrange(len(coord)):
                     na.choose(na.greater(self.grid_left_edge[:,i],coord[i]), (mask,0), mask)
                     na.choose(na.greater(self.grid_right_edge[:,i],coord[i]), (0,mask), mask)
-                    ind = na.where(mask == 1)
-                    selected_grids = self.grids[ind]
-                    # in orion, particles always live on the finest level.
-                    # so, we want to assign the particle to the finest of
-                    # the grids we just found
-                    if len(selected_grids) != 0:
-                        grid = sorted(selected_grids, key=lambda grid: grid.Level)[-1]
-                        ind = na.where(self.grids == grid)[0][0]
-                        self.grid_particle_count[ind] += 1
-                        self.grids[ind].NumberOfParticles += 1
+                ind = na.where(mask == 1)
+                selected_grids = self.grids[ind]
+                # in orion, particles always live on the finest level.
+                # so, we want to assign the particle to the finest of
+                # the grids we just found
+                if len(selected_grids) != 0:
+                    grid = sorted(selected_grids, key=lambda grid: grid.Level)[-1]
+                    ind = na.where(self.grids == grid)[0][0]
+                    self.grid_particle_count[ind] += 1
+                    self.grids[ind].NumberOfParticles += 1
         return True
                 
     def readGlobalHeader(self,filename,paranoid_read):


diff -r 2bbd3f126279501d3aa2d6acd308e80006078ca3 -r 65db3370fd9b8f113d595fb295ad3182952f04ca yt/frontends/ramses/_ramses_reader.pyx
--- a/yt/frontends/ramses/_ramses_reader.pyx
+++ b/yt/frontends/ramses/_ramses_reader.pyx
@@ -829,7 +829,7 @@
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
-    cdef void find_split(self, int *tr):
+    cdef void find_split(self, int *tr,):
         # First look for zeros
         cdef int i, center, ax
         cdef np.ndarray[ndim=1, dtype=np.int64_t] axes
@@ -837,9 +837,9 @@
         axes = np.argsort(self.dd)[::-1]
         cdef np.int64_t *sig
         for axi in range(3):
-            ax = axes[axi]
-            center = self.dimensions[ax] / 2
-            sig = self.sigs[ax]
+            ax = axes[axi] #iterate over domain dimensions
+            center = self.dimensions[ax] / 2 
+            sig = self.sigs[ax] #an array for the dimension, number of cells along that dim
             for i in range(self.dimensions[ax]):
                 if sig[i] == 0 and i > 0 and i < self.dimensions[ax] - 1:
                     #print "zero: %s (%s)" % (i, self.dimensions[ax])
@@ -871,6 +871,61 @@
         tr[0] = 1; tr[1] = ax; tr[2] = zcp
         return
 
+    @cython.wraparound(False)
+    cdef void find_split_center(self, int *tr,):
+        # First look for zeros
+        cdef int i, center, ax
+        cdef int flip
+        cdef np.ndarray[ndim=1, dtype=np.int64_t] axes
+        cdef np.int64_t strength, zcstrength, zcp
+        axes = np.argsort(self.dd)[::-1]
+        cdef np.int64_t *sig
+        for axi in range(3):
+            ax = axes[axi] #iterate over domain dimensions
+            center = self.dimensions[ax] / 2 
+            sig = self.sigs[ax] #an array for the dimension, number of cells along that dim
+            #frequently get stuck with many zeroes near the edge of the grid
+            #let's start from the middle, working out
+            for j in range(self.dimensions[ax]/2):
+                flip = 1
+                i = self.dimensions[ax]/2+j
+                if sig[i] == 0 and i > 0 and i < self.dimensions[ax] - 1:
+                    #print "zero: %s (%s)" % (i, self.dimensions[ax])
+                    tr[0] = 0; tr[1] = ax; tr[2] = i
+                    return
+                i = self.dimensions[ax]/2-j
+                if sig[i] == 0 and i > 0 and i < self.dimensions[ax] - 1:
+                    #print "zero: %s (%s)" % (i, self.dimensions[ax])
+                    tr[0] = 0; tr[1] = ax; tr[2] = i
+                    return
+                    
+                
+        zcstrength = 0
+        zcp = 0
+        zca = -1
+        cdef int temp
+        cdef np.int64_t *sig2d
+        for axi in range(3):
+            ax = axes[axi]
+            sig = self.sigs[ax]
+            sig2d = <np.int64_t *> malloc(sizeof(np.int64_t) * self.dimensions[ax])
+            sig2d[0] = sig2d[self.dimensions[ax]-1] = 0
+            for i in range(1, self.dimensions[ax] - 1):
+                sig2d[i] = sig[i-1] - 2*sig[i] + sig[i+1]
+            for i in range(1, self.dimensions[ax] - 1):
+                if sig2d[i] * sig2d[i+1] <= 0:
+                    strength = labs(sig2d[i] - sig2d[i+1])
+                    if (strength > zcstrength) or \
+                       (strength == zcstrength and (abs(center - i) <
+                                                    abs(center - zcp))):
+                        zcstrength = strength
+                        zcp = i
+                        zca = ax
+            free(sig2d)
+        #print "zcp: %s (%s)" % (zcp, self.dimensions[ax])
+        tr[0] = 1; tr[1] = ax; tr[2] = zcp
+        return
+
     @cython.boundscheck(False)
     @cython.wraparound(False)
     def get_properties(self):
@@ -970,21 +1025,29 @@
         hilbert_indices[o] = h
     return hilbert_indices
 
+
 @cython.boundscheck(False)
 @cython.wraparound(False)
-def get_array_indices_lists(np.ndarray[np.int64_t, ndim=1] ind,
+def get_array_indices_lists(np.ndarray[np.int64_t, ndim=1] ind, 
                             np.ndarray[np.int64_t, ndim=1] uind,
                             np.ndarray[np.int64_t, ndim=2] lefts,
                             np.ndarray[np.int64_t, ndim=2] files):
+    #ind are the hilbert indices 
+    #uind are the unique hilbert indices                        
+    #count[n] track of how many times the nth index of uind occurs in ind
+    
     cdef np.ndarray[np.int64_t, ndim=1] count = np.zeros(uind.shape[0], 'int64')
     cdef int n, i
     cdef np.int64_t mi, mui
+    
+    #fill in the count array
     for i in range(ind.shape[0]):
         mi = ind[i]
         for n in range(uind.shape[0]):
             if uind[n] == mi:
                 count[n] += 1
                 break
+    
     cdef np.int64_t **alefts
     cdef np.int64_t **afiles
     afiles = <np.int64_t **> malloc(sizeof(np.int64_t *) * uind.shape[0])
@@ -994,6 +1057,9 @@
     cdef np.ndarray[np.int64_t, ndim=2] left
     all_locations = []
     all_lefts = []
+    
+    #having measure the repetition of each hilbert index,
+    #we can know declare how much memory we will use
     for n in range(uind.shape[0]):
         locations = np.zeros((count[n], 6), 'int64')
         left = np.zeros((count[n], 3), 'int64')
@@ -1002,7 +1068,11 @@
         afiles[n] = <np.int64_t *> locations.data
         alefts[n] = <np.int64_t *> left.data
         li[n] = 0
+    
     cdef int fi
+    #now arrange all_locations and all_lefts sequentially
+    #such that when they return to python
+    #the 1d array mutates into a list of lists?
     for i in range(ind.shape[0]):
         mi = ind[i]
         for n in range(uind.shape[0]):
@@ -1022,19 +1092,31 @@
         np.ndarray[np.int64_t, ndim=1] ind,
         np.ndarray[np.int64_t, ndim=2] left_index,
         np.ndarray[np.int64_t, ndim=2] fl,
-        int num_deep = 0):
-    cdef float min_eff = 0.1
+        int num_deep = 0,
+        float min_eff = 0.1,
+        int use_center=0,
+        long split_on_vol = 0):
     cdef ProtoSubgrid L, R
     cdef np.ndarray[np.int64_t, ndim=1] dims_l, li_l
     cdef np.ndarray[np.int64_t, ndim=1] dims_r, li_r
     cdef int tt, ax, fp, i, j, k, gi
     cdef int tr[3]
-    if num_deep > 40:
+    cdef long volume  =0
+    cdef int max_depth = 40
+    volume = dims[0]*dims[1]*dims[2]
+    if split_on_vol>0:
+        if volume < split_on_vol:
+            return [psg]
+    if num_deep > max_depth:
         psg.efficiency = min_eff
         return [psg]
-    if psg.efficiency > min_eff or psg.efficiency < 0.0:
+    if (psg.efficiency > min_eff or psg.efficiency < 0.0):
         return [psg]
-    psg.find_split(tr)
+    if not use_center:    
+        psg.find_split(tr) #default
+    else:
+        psg.find_split_center(tr)    
+        
     tt = tr[0]
     ax = tr[1]
     fp = tr[2]
@@ -1059,7 +1141,7 @@
     if L.efficiency <= 0.0: rv_l = []
     elif L.efficiency < min_eff:
         rv_l = recursive_patch_splitting(L, dims_l, li_l,
-                left_index, fl, num_deep + 1)
+                left_index, fl, num_deep + 1, min_eff,use_center,split_on_vol)
     else:
         rv_l = [L]
     R = ProtoSubgrid(li_r, dims_r, left_index, fl)
@@ -1067,7 +1149,7 @@
     if R.efficiency <= 0.0: rv_r = []
     elif R.efficiency < min_eff:
         rv_r = recursive_patch_splitting(R, dims_r, li_r,
-                left_index, fl, num_deep + 1)
+                left_index, fl, num_deep + 1, min_eff,use_center,split_on_vol)
     else:
         rv_r = [R]
     return rv_r + rv_l


diff -r 2bbd3f126279501d3aa2d6acd308e80006078ca3 -r 65db3370fd9b8f113d595fb295ad3182952f04ca yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -125,6 +125,23 @@
     def _detect_fields(self):
         self.field_list = self.tree_proxy.field_names[:]
     
+    def _setup_field_list(self):
+        if self.parameter_file.use_particles:
+            # We know which particle fields will exist -- pending further
+            # changes in the future.
+            for field in art_particle_field_names:
+                def external_wrapper(f):
+                    def _convert_function(data):
+                        return data.convert(f)
+                    return _convert_function
+                cf = external_wrapper(field)
+                # Note that we call add_field on the field_info directly.  This
+                # will allow the same field detection mechanism to work for 1D,
+                # 2D and 3D fields.
+                self.pf.field_info.add_field(field, NullFunc,
+                                             convert_function=cf,
+                                             take_log=False, particle_type=True)
+
     def _setup_classes(self):
         dd = self._get_data_reader_dict()
         AMRHierarchy._setup_classes(self, dd)


diff -r 2bbd3f126279501d3aa2d6acd308e80006078ca3 -r 65db3370fd9b8f113d595fb295ad3182952f04ca yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -94,22 +94,6 @@
 except ImportError:
     pass
 
-def __memory_fallback(pid):
-    """
-    Get process memory from a system call.
-    """
-    value = os.popen('ps -o rss= -p %d' % pid).read().strip().split('\n')
-    if len(value) == 1: return float(value[0])
-    value.pop(0)
-    for line in value:
-        online = line.split()
-        if online[0] != pid: continue
-        try:
-            return float(online[2])
-        except:
-            return 0.0
-    return 0.0
-
 def get_memory_usage():
     """
     Returning resident size in megabytes
@@ -118,10 +102,10 @@
     try:
         pagesize = resource.getpagesize()
     except NameError:
-        return __memory_fallback(pid) / 1024
+        return -1024
     status_file = "/proc/%s/statm" % (pid)
     if not os.path.isfile(status_file):
-        return __memory_fallback(pid) / 1024
+        return -1024
     line = open(status_file).read()
     size, resident, share, text, library, data, dt = [int(i) for i in line.split()]
     return resident * pagesize / (1024 * 1024) # return in megs
@@ -568,10 +552,11 @@
 def parallel_profile(prefix):
     import cProfile
     from yt.config import ytcfg
-    fn = "%s_%04i.cprof" % (prefix,
+    fn = "%s_%04i_%04i.cprof" % (prefix,
+                ytcfg.getint("yt", "__topcomm_parallel_size"),
                 ytcfg.getint("yt", "__topcomm_parallel_rank"))
     p = cProfile.Profile()
     p.enable()
-    yield
+    yield fn
     p.disable()
     p.dump_stats(fn)


diff -r 2bbd3f126279501d3aa2d6acd308e80006078ca3 -r 65db3370fd9b8f113d595fb295ad3182952f04ca yt/gui/reason/extdirect_repl.py
--- a/yt/gui/reason/extdirect_repl.py
+++ b/yt/gui/reason/extdirect_repl.py
@@ -26,6 +26,7 @@
 
 import json
 import os
+import stat
 import cStringIO
 import logging
 import uuid
@@ -276,7 +277,7 @@
         for i in range(30):
             # Check for stop
             if self.stopped: return {'type':'shutdown'} # No race condition
-            if self.payload_handler.event.wait(1): # One second timeout
+            if self.payload_handler.event.wait(0.01): # One second timeout
                 return self.payload_handler.deliver_payloads()
         if self.debug: print "### Heartbeat ... finished: %s" % (time.ctime())
         return []
@@ -459,6 +460,36 @@
         return command
 
     @lockit
+    def load(self, base_dir, filename):
+        pp = os.path.join(base_dir, filename)
+        funccall = "pfs.append(load('%s'))" % pp
+        self.execute(funccall)
+        return []
+
+    def file_listing(self, base_dir, sub_dir):
+        if base_dir == "":
+            cur_dir = os.getcwd()
+        elif sub_dir == "":
+            cur_dir = base_dir
+        else:
+            cur_dir = os.path.join(base_dir, sub_dir)
+            cur_dir = os.path.abspath(cur_dir)
+        if not os.path.isdir(cur_dir):
+            return {'change':False}
+        fns = os.listdir(cur_dir)
+        results = [("..", 0, "directory")]
+        for fn in sorted((os.path.join(cur_dir, f) for f in fns)):
+            if not os.access(fn, os.R_OK): continue
+            if os.path.isfile(fn):
+                size = os.path.getsize(fn)
+                t = "file"
+            else:
+                size = 0
+                t = "directory"
+            results.append((os.path.basename(fn), size, t))
+        return dict(objs = results, cur_dir=cur_dir)
+
+    @lockit
     def create_phase(self, objname, field_x, field_y, field_z, weight):
         if weight == "None": weight = None
         else: weight = "'%s'" % (weight)


diff -r 2bbd3f126279501d3aa2d6acd308e80006078ca3 -r 65db3370fd9b8f113d595fb295ad3182952f04ca yt/gui/reason/html/images/file_dialog_directory.png
Binary file yt/gui/reason/html/images/file_dialog_directory.png has changed


diff -r 2bbd3f126279501d3aa2d6acd308e80006078ca3 -r 65db3370fd9b8f113d595fb295ad3182952f04ca yt/gui/reason/html/images/file_dialog_file.png
Binary file yt/gui/reason/html/images/file_dialog_file.png has changed


diff -r 2bbd3f126279501d3aa2d6acd308e80006078ca3 -r 65db3370fd9b8f113d595fb295ad3182952f04ca yt/gui/reason/html/index.html
--- a/yt/gui/reason/html/index.html
+++ b/yt/gui/reason/html/index.html
@@ -78,7 +78,8 @@
     <!-- FONTS --><!-- These will get pulled from Google, but Google might not be accessible.
          In that case, it will default to whatever is in the family. -->
-    <link rel="stylesheet" type="text/css" href="http://fonts.googleapis.com/css?family=Inconsolata">
+    <!--<link rel="stylesheet" type="text/css"
+    href="http://fonts.googleapis.com/css?family=Inconsolata">--><!-- LEAFLET STUFF --><script type="text/javascript" src="leaflet/leaflet.js"></script>
@@ -103,6 +104,9 @@
     <script type="text/javascript" src="js/menu_items.js"></script><!-- THE PLOT WINDOW FUNCTIONS -->
+    <script type="text/javascript" src="js/file_open.js"></script>
+
+    <!-- THE PLOT WINDOW FUNCTIONS --><script type="text/javascript" src="js/widget_plotwindow.js"></script><!-- THE GRID VIEWER FUNCTIONS -->


diff -r 2bbd3f126279501d3aa2d6acd308e80006078ca3 -r 65db3370fd9b8f113d595fb295ad3182952f04ca yt/gui/reason/html/js/file_open.js
--- /dev/null
+++ b/yt/gui/reason/html/js/file_open.js
@@ -0,0 +1,146 @@
+/**********************************************************************
+A file opener
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+***********************************************************************/
+
+function open_file() {
+    var filestore = new Ext.data.ArrayStore({
+      fields: ['filename', 
+               {name:'size', type:'float'},
+               'type'
+      ]
+    });
+    var cur_dir;
+    function fillStore(f, a){
+        if(a.status == false){
+          Ext.Msg.alert("Error", "Something has gone wrong.");
+          return;
+        }
+        if(a.result['change'] == false) {
+          win.get("current_file").setValue(cur_dir);
+          return;
+        }
+        filestore.removeAll();
+        var rec = [];
+        filestore.loadData(a.result['objs']);
+        cur_dir = a.result['cur_dir'];
+        win.get("current_file").setValue(cur_dir);
+    }
+
+    var win = new Ext.Window({
+        layout:'vbox',
+        layoutConfig: {
+            align: 'stretch',
+            pack: 'start',
+            defaultMargins: "5px 5px 5px 5px",
+        },
+        width:540,
+        height:480,
+        modal:true,
+        resizable:true,
+        draggable:true,
+        title:'Open File',
+        items: [
+            { xtype: 'textfield',
+              id: 'current_file',
+              listeners: {
+                specialkey: function(f, e) {
+                  if (e.getKey() != e.ENTER) { return; }
+                  yt_rpc.ExtDirectREPL.file_listing(
+                        {base_dir:f.getValue(), sub_dir:''}, fillStore);
+                }
+              }
+            }, {
+              xtype:'listview',
+              id: 'file_listing',
+              store: filestore ,
+              singleSelect:true,
+              emptyText: 'No images to display',
+              flex: 1.0,
+              columns: [
+              {
+                  header: 'Type',
+                  width: 0.1,
+                  tpl: '<img src="images/file_dialog_{type}.png" width=16 height=16>',
+                  dataIndex: 'type'
+              },{
+                  header: 'Filename',
+                  width: .75,
+                  dataIndex: 'filename'
+              },{
+                  header: 'Size',
+                  dataIndex: 'size',
+                  tpl: '{size:fileSize}',
+                  align: 'right',
+                  cls: 'listview-filesize'
+              }],
+              listeners: {
+                dblclick: function(view, index, node, e) {
+                    var fileRecord = filestore.getAt(index).data;
+                    if (fileRecord.type == 'directory') {
+                      yt_rpc.ExtDirectREPL.file_listing(
+                            {base_dir:cur_dir, sub_dir:fileRecord.filename},
+                            fillStore);
+                    } else {
+                      yt_rpc.ExtDirectREPL.load(
+                            {base_dir:cur_dir, filename:fileRecord.filename},
+                            handle_result);
+                      win.destroy();
+                    }
+                },
+                selectionchange: function(view, index, node, e) {
+                },
+              },
+            }, {
+              xtype: 'panel',
+              height: 40,
+              layout: 'hbox',
+              layoutConfig: {
+                  align: 'stretch',
+                  pack: 'start',
+                  defaultMargins: "5px 5px 5px 5px",
+              },
+              items: [
+                { flex: 1.0, xtype: 'button', text: 'Cancel',
+                    handler: function(b, e) { win.destroy(); } },
+                { flex: 1.0, xtype: 'button', text: 'Load',
+                    handler: function(b, e) {
+                      filename = "";
+                      var fl = win.get("file_listing");
+                      if (fl.getSelectionCount() == 1) {
+                        filename = fl.getSelectedRecords()[0].data.filename;
+                      }
+                      yt_rpc.ExtDirectREPL.load(
+                            {base_dir:cur_dir, filename:filename},
+                            handle_result);
+                      win.destroy();
+                    }
+                },
+              ],
+            },
+        ],
+    });
+    yt_rpc.ExtDirectREPL.file_listing(
+          {base_dir:"", sub_dir:""}, fillStore);
+    win.show(this);
+}


diff -r 2bbd3f126279501d3aa2d6acd308e80006078ca3 -r 65db3370fd9b8f113d595fb295ad3182952f04ca yt/gui/reason/html/js/menu_items.js
--- a/yt/gui/reason/html/js/menu_items.js
+++ b/yt/gui/reason/html/js/menu_items.js
@@ -33,7 +33,11 @@
     text: 'Menu',
     id: 'main_menu',
     menu: [
-           {xtype:'menuitem', text: 'Open', disabled: true},
+           {xtype:'menuitem', text: 'Open File', 
+               handler: function(b,e) {
+                  open_file()
+               },
+           },
            {xtype:'menuitem', text: 'Open Directory', disabled: true},
            {xtype: 'menuseparator'},
            {xtype:'menuitem', text: 'Save Script',


diff -r 2bbd3f126279501d3aa2d6acd308e80006078ca3 -r 65db3370fd9b8f113d595fb295ad3182952f04ca yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -67,7 +67,8 @@
     add_quantity, quantity_info
 
 from yt.frontends.enzo.api import \
-    EnzoStaticOutput, EnzoStaticOutputInMemory, EnzoFieldInfo, \
+    EnzoStaticOutput, EnzoStaticOutputInMemory, \
+    EnzoSimulation, EnzoFieldInfo, \
     add_enzo_field, add_enzo_1d_field, add_enzo_2d_field
 
 from yt.frontends.castro.api import \
@@ -128,7 +129,7 @@
 for name, cls in callback_registry.items():
     exec("%s = cls" % name)
 
-from yt.convenience import all_pfs, max_spheres, load, projload
+from yt.convenience import load, projload
 
 # Import some helpful math utilities
 from yt.utilities.math_utils import \


diff -r 2bbd3f126279501d3aa2d6acd308e80006078ca3 -r 65db3370fd9b8f113d595fb295ad3182952f04ca yt/setup.py
--- a/yt/setup.py
+++ b/yt/setup.py
@@ -8,7 +8,6 @@
     from numpy.distutils.misc_util import Configuration
     config = Configuration('yt', parent_package, top_path)
     config.add_subpackage('analysis_modules')
-    config.add_subpackage('astro_objects')
     config.add_subpackage('data_objects')
     config.add_subpackage('frontends')
     config.add_subpackage('gui')


diff -r 2bbd3f126279501d3aa2d6acd308e80006078ca3 -r 65db3370fd9b8f113d595fb295ad3182952f04ca yt/startup_tasks.py
--- a/yt/startup_tasks.py
+++ b/yt/startup_tasks.py
@@ -124,6 +124,9 @@
     #sys.argv = [a for a in unparsed_args]
     if opts.parallel:
         parallel_capable = turn_on_parallelism()
+    subparsers = parser.add_subparsers(title="subcommands",
+                        dest='subcommands',
+                        description="Valid subcommands",)
 else:
     subparsers = parser.add_subparsers(title="subcommands",
                         dest='subcommands',


diff -r 2bbd3f126279501d3aa2d6acd308e80006078ca3 -r 65db3370fd9b8f113d595fb295ad3182952f04ca yt/utilities/_amr_utils/CICDeposit.pyx
--- a/yt/utilities/_amr_utils/CICDeposit.pyx
+++ b/yt/utilities/_amr_utils/CICDeposit.pyx
@@ -1,8 +1,10 @@
 """
-Simle integrators for the radiative transfer equation
+Simple integrators for the radiative transfer equation
 
 Author: Britton Smith <brittonsmith at gmail.com>
 Affiliation: CASA/University of Colorado
+Author: Christopher Moody <juxtaposicion at gmail.com>
+Affiliation: cemoody at ucsc.edu
 Homepage: http://yt-project.org/
 License:
   Copyright (C) 2008 Matthew Turk.  All Rights Reserved.
@@ -107,3 +109,73 @@
         ind[2] = <int> ((pos_z[i] - left_edge[2]) * idds[2])
         sample[i] = arr[ind[0], ind[1], ind[2]]
     return sample
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+def assign_particles_to_cells(np.ndarray[np.int32_t, ndim=1] levels, #for cells
+                              np.ndarray[np.float32_t, ndim=2] left_edges, #many cells
+                              np.ndarray[np.float32_t, ndim=2] right_edges,
+                              np.ndarray[np.float32_t, ndim=1] pos_x, #particle
+                              np.ndarray[np.float32_t, ndim=1] pos_y,
+                              np.ndarray[np.float32_t, ndim=1] pos_z):
+    #for every cell, assign the particles belonging to it,
+    #skipping previously assigned particles
+    cdef long level_max = np.max(levels)
+    cdef long i,j,level
+    cdef long npart = pos_x.shape[0]
+    cdef long ncells = left_edges.shape[0] 
+    cdef np.ndarray[np.int32_t, ndim=1] assign = np.zeros(npart,dtype='int32')-1
+    for level in range(level_max,0,-1):
+        #start with the finest level
+        for i in range(ncells):
+            #go through every cell on the finest level first
+            if not levels[i] == level: continue
+            for j in range(npart):
+                #iterate over all particles, skip if assigned
+                if assign[j]>-1: continue
+                if (left_edges[i,0] <= pos_x[j] <= right_edges[i,0]):
+                    if (left_edges[i,1] <= pos_y[j] <= right_edges[i,1]):
+                        if (left_edges[i,2] <= pos_z[j] <= right_edges[i,2]):
+                            assign[j]=i
+    return assign
+
+
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+def assign_particles_to_cell_lists(np.ndarray[np.int32_t, ndim=1] levels, #for cells
+                              np.int64_t level_max, 
+                              np.ndarray[np.float32_t, ndim=2] left_edges, #many cells
+                              np.ndarray[np.float32_t, ndim=2] right_edges,
+                              np.ndarray[np.float32_t, ndim=1] pos_x, #particle
+                              np.ndarray[np.float32_t, ndim=1] pos_y,
+                              np.ndarray[np.float32_t, ndim=1] pos_z):
+    #for every cell, assign the particles belonging to it,
+    #skipping previously assigned particles
+    #Todo: instead of iterating every particles, could use kdtree 
+    cdef long i,j,level
+    cdef long npart = pos_x.shape[0]
+    cdef long ncells = left_edges.shape[0] 
+    cdef np.ndarray[np.int32_t, ndim=1] assign = np.zeros(npart,dtype='int32')-1
+    index_lists = []
+    for level in range(level_max,0,-1):
+        #start with the finest level
+        for i in range(ncells):
+            #go through every cell on the finest level first
+            if not levels[i] == level: continue
+            index_list = []
+            for j in range(npart):
+                #iterate over all particles, skip if assigned
+                if assign[j]>-1: continue
+                if (left_edges[i,0] <= pos_x[j] <= right_edges[i,0]):
+                    if (left_edges[i,1] <= pos_y[j] <= right_edges[i,1]):
+                        if (left_edges[i,2] <= pos_z[j] <= right_edges[i,2]):
+                            assign[j]=i
+                            index_list += j,
+            index_lists += index_list,
+    return assign,index_lists
+
+    
+    


diff -r 2bbd3f126279501d3aa2d6acd308e80006078ca3 -r 65db3370fd9b8f113d595fb295ad3182952f04ca yt/utilities/_amr_utils/DepthFirstOctree.pyx
--- a/yt/utilities/_amr_utils/DepthFirstOctree.pyx
+++ b/yt/utilities/_amr_utils/DepthFirstOctree.pyx
@@ -27,21 +27,6 @@
 cimport numpy as np
 cimport cython
 
-cdef extern from "math.h":
-    double exp(double x)
-    float expf(float x)
-    long double expl(long double x)
-    double floor(double x)
-    double ceil(double x)
-    double fmod(double x, double y)
-    double log2(double x)
-    long int lrint(double x)
-    double fabs(double x)
-    double cos(double x)
-    double sin(double x)
-    double asin(double x)
-    double acos(double x)
-
 cdef class position:
     cdef public int output_pos, refined_pos
     def __cinit__(self):
@@ -81,6 +66,7 @@
                             np.ndarray[np.float64_t, ndim=2] output,
                             np.ndarray[np.int32_t, ndim=1] refined,
                             OctreeGridList grids):
+    #cdef int s = curpos
     cdef int i, i_off, j, j_off, k, k_off, ci, fi
     cdef int child_i, child_j, child_k
     cdef OctreeGrid child_grid
@@ -93,8 +79,13 @@
     cdef np.float64_t child_dx
     cdef np.ndarray[np.float64_t, ndim=1] child_leftedges
     cdef np.float64_t cx, cy, cz
+    #here we go over the 8 octants
+    #in general however, a mesh cell on this level
+    #may have more than 8 children on the next level
+    #so we find the int float center (cxyz) of each child cell
+    # and from that find the child cell indices
     for i_off in range(i_f):
-        i = i_off + i_i
+        i = i_off + i_i #index
         cx = (leftedges[0] + i*dx)
         for j_off in range(j_f):
             j = j_off + j_i
@@ -118,19 +109,20 @@
                     child_i = int((cx - child_leftedges[0])/child_dx)
                     child_j = int((cy - child_leftedges[1])/child_dx)
                     child_k = int((cz - child_leftedges[2])/child_dx)
-                    s = RecurseOctreeDepthFirst(child_i, child_j, child_k, 2, 2, 2,
+                    # s = Recurs.....
+                    RecurseOctreeDepthFirst(child_i, child_j, child_k, 2, 2, 2,
                                         curpos, ci - grid.offset, output, refined, grids)
-    return s
 
+ at cython.boundscheck(False)
 def RecurseOctreeByLevels(int i_i, int j_i, int k_i,
                           int i_f, int j_f, int k_f,
-                          np.ndarray[np.int64_t, ndim=1] curpos,
+                          np.ndarray[np.int32_t, ndim=1] curpos,
                           int gi, 
                           np.ndarray[np.float64_t, ndim=2] output,
-                          np.ndarray[np.int64_t, ndim=2] genealogy,
+                          np.ndarray[np.int32_t, ndim=2] genealogy,
                           np.ndarray[np.float64_t, ndim=2] corners,
                           OctreeGridList grids):
-    cdef np.int64_t i, i_off, j, j_off, k, k_off, ci, fi
+    cdef np.int32_t i, i_off, j, j_off, k, k_off, ci, fi
     cdef int child_i, child_j, child_k
     cdef OctreeGrid child_grid
     cdef OctreeGrid grid = grids[gi-1]
@@ -143,11 +135,11 @@
     cdef np.float64_t child_dx
     cdef np.ndarray[np.float64_t, ndim=1] child_leftedges
     cdef np.float64_t cx, cy, cz
-    cdef np.int64_t cp
-    cdef int s = 0
+    cdef int cp
     for i_off in range(i_f):
         i = i_off + i_i
         cx = (leftedges[0] + i*dx)
+        if i_f > 2: print k, cz
         for j_off in range(j_f):
             j = j_off + j_i
             cy = (leftedges[1] + j*dx)
@@ -167,15 +159,16 @@
                     child_grid = grids[ci-1]
                     child_dx = child_grid.dx[0]
                     child_leftedges = child_grid.left_edges
-                    child_i = lrint((cx-child_leftedges[0])/child_dx)
-                    child_j = lrint((cy-child_leftedges[1])/child_dx)
-                    child_k = lrint((cz-child_leftedges[2])/child_dx)
+                    child_i = int((cx-child_leftedges[0])/child_dx)
+                    child_j = int((cy-child_leftedges[1])/child_dx)
+                    child_k = int((cz-child_leftedges[2])/child_dx)
                     # set current child id to id of next cell to examine
                     genealogy[cp, 0] = curpos[level+1] 
                     # set next parent id to id of current cell
                     genealogy[curpos[level+1]:curpos[level+1]+8, 1] = cp
-                    RecurseOctreeByLevels(child_i, child_j, child_k, 2, 2, 2,
+                    s = RecurseOctreeByLevels(child_i, child_j, child_k, 2, 2, 2,
                                               curpos, ci, output, genealogy,
                                               corners, grids)
                 curpos[level] += 1
-    return
+    return s
+


diff -r 2bbd3f126279501d3aa2d6acd308e80006078ca3 -r 65db3370fd9b8f113d595fb295ad3182952f04ca yt/utilities/_amr_utils/fortran_reader.pyx
--- a/yt/utilities/_amr_utils/fortran_reader.pyx
+++ b/yt/utilities/_amr_utils/fortran_reader.pyx
@@ -142,8 +142,6 @@
     # points to the start of the record *following* the reading of iOctFree and
     # nOct.  For those following along at home, we only need to read:
     #   iOctPr, iOctLv
-    print min_level, max_level 
-    
     cdef int nchild = 8
     cdef int i, Lev, cell_ind, iOct, nLevel, nLevCells, ic1
     cdef np.int64_t next_record
@@ -170,7 +168,7 @@
         fread(&readin, sizeof(int), 1, f); FIX_LONG(readin)
         iOct = iHOLL[Level] - 1
         nLevel = iNOLL[Level]
-        print "Reading Hierarchy for Level", Lev, Level, nLevel, iOct
+        #print "Reading Hierarchy for Level", Lev, Level, nLevel, iOct
         #print ftell(f)
         for ic1 in range(nLevel):
             iOctMax = max(iOctMax, iOct)
@@ -218,7 +216,7 @@
         
         #find the length of all of the children section
         child_record = ftell(f) +  (next_record+2*sizeof(int))*nLevel*nchild
-        print 'Skipping over hydro vars', ftell(f), child_record
+        #print 'Skipping over hydro vars', ftell(f), child_record
         fseek(f, child_record, SEEK_SET)
         
         # for ic1 in range(nLevel * nchild):
@@ -288,9 +286,9 @@
 def read_art_grid(int varindex, 
               np.ndarray[np.int64_t, ndim=1] start_index,
               np.ndarray[np.int32_t, ndim=1] grid_dims,
-              np.ndarray[np.float64_t, ndim=3] data,
-              np.ndarray[np.int32_t, ndim=3] filled,
-              np.ndarray[np.float64_t, ndim=2] level_data,
+              np.ndarray[np.float32_t, ndim=3] data,
+              np.ndarray[np.uint8_t, ndim=3] filled,
+              np.ndarray[np.float32_t, ndim=2] level_data,
               int level, int ref_factor,
               component_grid_info):
     cdef int gi, i, j, k, domain, offset, grid_id
@@ -312,7 +310,7 @@
         domain = ogrid_info[0]
         #print "Loading", domain, ogrid_info
         grid_id = ogrid_info[1]
-        og_start_index = ogrid_info[3:]
+        og_start_index = ogrid_info[3:6] #the oct left edge
         for i in range(2*ref_factor):
             di = i + og_start_index[0] * ref_factor
             if di < start_index[0] or di >= end_index[0]: continue
@@ -350,6 +348,30 @@
     return to_fill
 
 @cython.cdivision(True)
+ at cython.boundscheck(True)
+ at cython.wraparound(False)
+def fill_child_mask(np.ndarray[np.int64_t, ndim=2] file_locations,
+                    np.ndarray[np.int64_t, ndim=1] grid_le,
+                    np.ndarray[np.uint8_t, ndim=4] art_child_masks,
+                    np.ndarray[np.uint8_t, ndim=3] child_mask):
+
+    #loop over file_locations, for each row exracting the index & LE
+    #of the oct we will pull pull from art_child_masks
+    #then use the art_child_masks info to fill in child_mask
+    cdef int i,ioct,x,y,z
+    cdef int nocts = file_locations.shape[0]
+    cdef int lex,ley,lez
+    for i in range(nocts):
+        ioct = file_locations[i,1] #from fortran to python indexing?
+        lex = file_locations[i,3] - grid_le[0] #the oct left edge x
+        ley = file_locations[i,4] - grid_le[1]
+        lez = file_locations[i,5] - grid_le[2]
+        for x in range(2):
+            for y in range(2):
+                for z in range(2):
+                    child_mask[lex+x,ley+y,lez+z] = art_child_masks[ioct,x,y,z]
+
+ at cython.cdivision(True)
 @cython.boundscheck(False)
 @cython.wraparound(False)
 def read_castro_particles(char *fn, int offset, int fieldindex, int nfields,


diff -r 2bbd3f126279501d3aa2d6acd308e80006078ca3 -r 65db3370fd9b8f113d595fb295ad3182952f04ca yt/utilities/_amr_utils/misc_utilities.pyx
--- a/yt/utilities/_amr_utils/misc_utilities.pyx
+++ b/yt/utilities/_amr_utils/misc_utilities.pyx
@@ -287,6 +287,7 @@
         uniquedims[i] = <np.float64_t *> \
                 alloca(2*n_grids * sizeof(np.float64_t))
     my_max = 0
+    best_dim = -1
     for dim in range(3):
         n_unique = 0
         uniques = uniquedims[dim]


diff -r 2bbd3f126279501d3aa2d6acd308e80006078ca3 -r 65db3370fd9b8f113d595fb295ad3182952f04ca yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -1012,7 +1012,7 @@
                     # This node belongs to someone else, move along
                     current_node, previous_node = self.step_depth(current_node, previous_node)
                     continue
-                
+
             # If we are down to one grid, we are either in it or the parent grid
             if len(current_node.grids) == 1:
                 thisgrid = current_node.grids[0]
@@ -1031,25 +1031,27 @@
                         if len(children) > 0:
                             current_node.grids = self.pf.hierarchy.grids[na.array(children,copy=False)]
                             current_node.parent_grid = thisgrid
-                            # print 'My single grid covers the rest of the volume, and I have children, about to iterate on them'
+                            #print 'My single grid covers the rest of the volume, and I have children, about to iterate on them'
                             del children
                             continue
 
                     # Else make a leaf node (brick container)
+                    #print 'My single grid covers the rest of the volume, and I have no children', thisgrid
                     set_leaf(current_node, thisgrid, current_node.l_corner, current_node.r_corner)
-                    # print 'My single grid covers the rest of the volume, and I have no children'
                     current_node, previous_node = self.step_depth(current_node, previous_node)
                     continue
 
             # If we don't have any grids, this volume belongs to the parent        
             if len(current_node.grids) == 0:
+                #print 'This volume does not have a child grid, so it belongs to my parent!'
                 set_leaf(current_node, current_node.parent_grid, current_node.l_corner, current_node.r_corner)
-                # print 'This volume does not have a child grid, so it belongs to my parent!'
                 current_node, previous_node = self.step_depth(current_node, previous_node)
                 continue
 
             # If we've made it this far, time to build a dividing node
-            self._build_dividing_node(current_node)
+            # print 'Building dividing node'
+            # Continue if building failed
+            if self._build_dividing_node(current_node): continue
 
             # Step to the nest node in a depth-first traversal.
             current_node, previous_node = self.step_depth(current_node, previous_node)
@@ -1058,10 +1060,10 @@
         '''
         Given a node, finds all the choices for the next dividing plane.  
         '''
-        data = na.array([(child.LeftEdge, child.RightEdge) for child in current_node.grids],copy=False)
         # For some reason doing dim 0 separately is slightly faster.
         # This could be rewritten to all be in the loop below.
 
+        data = na.array([(child.LeftEdge, child.RightEdge) for child in current_node.grids],copy=False)
         best_dim, split, less_ids, greater_ids = \
             kdtree_get_choices(data, current_node.l_corner, current_node.r_corner)
         return data[:,:,best_dim], best_dim, split, less_ids, greater_ids
@@ -1071,8 +1073,19 @@
         Makes the current node a dividing node, and initializes the
         left and right children.
         '''
-        
-        data,best_dim,split,less_ids,greater_ids = self._get_choices(current_node)
+
+        data = na.array([(child.LeftEdge, child.RightEdge) for child in current_node.grids],copy=False)
+        best_dim, split, less_ids, greater_ids = \
+            kdtree_get_choices(data, current_node.l_corner, current_node.r_corner)
+
+        del data
+
+        # Here we break out if no unique grids were found. In this case, there
+        # are likely overlapping grids, and we assume that the first grid takes
+        # precedence.  This is fragile.
+        if best_dim == -1:
+            current_node.grids = [current_node.grids[0]]
+            return 1
 
         current_node.split_ax = best_dim
         current_node.split_pos = split
@@ -1080,7 +1093,7 @@
         #greater_ids0 = (split < data[:,1])
         #assert(na.all(less_ids0 == less_ids))
         #assert(na.all(greater_ids0 == greater_ids))
-        
+
         current_node.left_child = MasterNode(my_id=_lchild_id(current_node.id),
                                              parent=current_node,
                                              parent_grid=current_node.parent_grid,
@@ -1099,7 +1112,9 @@
         # build to work.  The other deletions are just to save memory.
         del current_node.grids, current_node.parent_grid, current_node.brick,\
             current_node.li, current_node.ri, current_node.dims
-        
+
+        return 0
+
     def traverse(self, back_center, front_center, image):
         r"""Traverses the kd-Tree, casting the partitioned grids from back to
             front.


diff -r 2bbd3f126279501d3aa2d6acd308e80006078ca3 -r 65db3370fd9b8f113d595fb295ad3182952f04ca yt/utilities/command_line.py
--- a/yt/utilities/command_line.py
+++ b/yt/utilities/command_line.py
@@ -82,11 +82,15 @@
         if cls.npfs > 1:
             self(args)
         else:
-            if len(getattr(args, "pf", [])) > 1:
+            pf_args = getattr(args, "pf", [])
+            if len(pf_args) > 1:
                 pfs = args.pf
                 for pf in pfs:
                     args.pf = pf
                     self(args)
+            elif len(pf_args) == 0:
+                pfs = []
+                self(args)
             else:
                 args.pf = getattr(args, 'pf', [None])[0]
                 self(args)
@@ -105,6 +109,8 @@
 _common_options = dict(
     pf      = dict(short="pf", action=GetParameterFiles,
                    nargs="+", help="Parameter files to run on"),
+    opf     = dict(action=GetParameterFiles, dest="pf",
+                   nargs="*", help="(Optional) Parameter files to run on"),
     axis    = dict(short="-a", long="--axis",
                    action="store", type=int,
                    dest="axis", default=4,
@@ -1269,7 +1275,8 @@
                  help="At startup, find all *.hierarchy files in the CWD"),
             dict(short="-d", long="--debug", action="store_true",
                  default = False, dest="debug",
-                 help="Add a debugging mode for cell execution")
+                 help="Add a debugging mode for cell execution"),
+            "opf"
             )
     description = \
         """
@@ -1315,12 +1322,12 @@
         from yt.gui.reason.bottle_mods import uuid_serve_functions, PayloadHandler
         hr = ExtDirectREPL(base_extjs_path)
         hr.debug = PayloadHandler.debug = args.debug
+        command_line = ["pfs = []"]
         if args.find:
             # We just have to find them and store references to them.
-            command_line = ["pfs = []"]
             for fn in sorted(glob.glob("*/*.hierarchy")):
                 command_line.append("pfs.append(load('%s'))" % fn[:-10])
-            hr.execute("\n".join(command_line))
+        hr.execute("\n".join(command_line))
         bottle.debug()
         uuid_serve_functions(open_browser=args.open_browser,
                     port=int(args.port), repl=hr)
@@ -1430,7 +1437,7 @@
         if 'upload' in rv and 'links' in rv['upload']:
             print
             print "Image successfully uploaded!  You can find it at:"
-            print "    %s" % (rv['upload']['links']['imgur_page'])
+            print "    %s" % (rv['upload']['links']['original'])
             print
             print "If you'd like to delete it, visit this page:"
             print "    %s" % (rv['upload']['links']['delete_page'])


diff -r 2bbd3f126279501d3aa2d6acd308e80006078ca3 -r 65db3370fd9b8f113d595fb295ad3182952f04ca yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -33,6 +33,15 @@
 
 # Data access exceptions:
 
+class YTOutputNotIdentified(YTException):
+    def __init__(self, args, kwargs):
+        self.args = args
+        self.kwargs = kwargs
+
+    def __str__(self):
+        return "Supplied %s %s, but could not load!" % (
+            self.args, self.kwargs)
+
 class YTSphereTooSmall(YTException):
     def __init__(self, pf, radius, smallest_cell):
         YTException.__init__(self, pf)


diff -r 2bbd3f126279501d3aa2d6acd308e80006078ca3 -r 65db3370fd9b8f113d595fb295ad3182952f04ca yt/utilities/grid_data_format/__init__.py
--- /dev/null
+++ b/yt/utilities/grid_data_format/__init__.py
@@ -0,0 +1,2 @@
+from conversion import *
+


diff -r 2bbd3f126279501d3aa2d6acd308e80006078ca3 -r 65db3370fd9b8f113d595fb295ad3182952f04ca yt/utilities/grid_data_format/conversion/__init__.py
--- /dev/null
+++ b/yt/utilities/grid_data_format/conversion/__init__.py
@@ -0,0 +1,3 @@
+from conversion_abc import Converter
+from conversion_athena import AthenaDistributedConverter, AthenaConverter
+


diff -r 2bbd3f126279501d3aa2d6acd308e80006078ca3 -r 65db3370fd9b8f113d595fb295ad3182952f04ca yt/utilities/grid_data_format/conversion/conversion_abc.py
--- /dev/null
+++ b/yt/utilities/grid_data_format/conversion/conversion_abc.py
@@ -0,0 +1,7 @@
+
+class Converter(object):
+    def __init__(self, basename, outname=None):
+        self.basename = basename
+        self.outname = outname
+    def convert(self):
+        pass


diff -r 2bbd3f126279501d3aa2d6acd308e80006078ca3 -r 65db3370fd9b8f113d595fb295ad3182952f04ca yt/utilities/grid_data_format/conversion/conversion_athena.py
--- /dev/null
+++ b/yt/utilities/grid_data_format/conversion/conversion_athena.py
@@ -0,0 +1,503 @@
+import os
+import weakref
+import numpy as na
+import h5py as h5
+from conversion_abc import *
+from glob import glob
+from collections import \
+    defaultdict
+from string import \
+    strip, \
+    rstrip
+from stat import \
+    ST_CTIME
+
+translation_dict = {}
+translation_dict['density'] = 'density'
+translation_dict['total_energy'] = 'specific_energy'
+translation_dict['velocity_x'] = 'velocity_x'
+translation_dict['velocity_y'] = 'velocity_y'
+translation_dict['velocity_z'] = 'velocity_z'
+translation_dict['cell_centered_B_x'] = 'mag_field_x'
+translation_dict['cell_centered_B_y'] = 'mag_field_y'
+translation_dict['cell_centered_B_z'] = 'mag_field_z'
+
+class AthenaDistributedConverter(Converter):
+    def __init__(self, basename, outname=None, source_dir=None, field_conversions=None):
+        self.fields = []
+        self.current_time=0.0
+        name = basename.split('.')
+        self.ddn = int(name[1])
+        if source_dir is None:
+            source_dir = './'
+        self.source_dir = source_dir+'/'
+        self.basename = name[0]
+        if outname is None:
+            outname = self.basename+'.%04i'%self.ddn+'.gdf'
+        self.outname = outname
+        if field_conversions is None:
+            field_conversions = {}
+        self.field_conversions = field_conversions
+        self.handle = None
+
+    def parse_line(self,line, grid):
+    #    print line
+        # grid is a dictionary
+        splitup = line.strip().split()
+        if "vtk" in splitup:
+            grid['vtk_version'] = splitup[-1]
+        elif "Really" in splitup:
+            grid['time'] = splitup[-1]
+            self.current_time = grid['time']
+        elif 'PRIMITIVE' in splitup:
+            grid['time'] = float(splitup[4].rstrip(','))
+            grid['level'] = int(splitup[6].rstrip(','))
+            grid['domain'] = int(splitup[8].rstrip(','))
+            self.current_time = grid['time']
+        elif "DIMENSIONS" in splitup:
+            grid['dimensions'] = na.array(splitup[-3:]).astype('int')
+        elif "ORIGIN" in splitup:
+            grid['left_edge'] = na.array(splitup[-3:]).astype('float64')
+        elif "SPACING" in splitup:
+            grid['dds'] = na.array(splitup[-3:]).astype('float64')
+        elif "CELL_DATA" in splitup:
+            grid["ncells"] = int(splitup[-1])
+        elif "SCALARS" in splitup:
+            field = splitup[1]
+            grid['read_field'] = field
+            grid['read_type'] = 'scalar'
+        elif "VECTORS" in splitup:
+            field = splitup[1]
+            grid['read_field'] = field
+            grid['read_type'] = 'vector'
+
+    def write_gdf_field(self, fn, grid_number, field, data):
+        f = self.handle
+        ## --------- Store Grid Data --------- ##
+        if 'grid_%010i'%grid_number not in f['data'].keys():
+            g = f['data'].create_group('grid_%010i'%grid_number)
+        else:
+            g = f['data']['grid_%010i'%grid_number]
+        name = field
+        try:
+            name = translation_dict[name]
+        except:
+            pass
+        # print 'Writing %s' % name
+        if not name in g.keys(): 
+            g.create_dataset(name,data=data)
+        
+
+
+    def read_and_write_hierarchy(self,basename, ddn, gdf_name):
+        """ Read Athena legacy vtk file from multiple cpus """
+        proc_names = glob(self.source_dir+'id*')
+        #print 'Reading a dataset from %i Processor Files' % len(proc_names)
+        N = len(proc_names)
+        grid_dims = na.empty([N,3],dtype='int64')
+        grid_left_edges = na.empty([N,3],dtype='float64')
+        grid_dds = na.empty([N,3],dtype='float64')
+        grid_levels = na.zeros(N,dtype='int64')
+        grid_parent_ids = -1*na.ones(N,dtype='int64')
+        grid_particle_counts = na.zeros([N,1],dtype='int64')
+
+        for i in range(N):
+            if i == 0:
+                fn = self.source_dir+'id%i/'%i + basename + '.%04i'%ddn + '.vtk'
+            else:
+                fn = self.source_dir+'id%i/'%i + basename + '-id%i'%i + '.%04i'%ddn + '.vtk'
+
+            print 'Reading file %s' % fn
+            f = open(fn,'rb')
+            grid = {}
+            grid['read_field'] = None
+            grid['read_type'] = None
+            table_read=False
+            line = f.readline()
+            while grid['read_field'] is None:
+                self.parse_line(line, grid)
+                if "SCALAR" in line.strip().split():
+                    break
+                if "VECTOR" in line.strip().split():
+                    break
+                if 'TABLE' in line.strip().split():
+                    break
+                if len(line) == 0: break
+                del line
+                line = f.readline()
+
+            if len(line) == 0: break
+            
+            if na.prod(grid['dimensions']) != grid['ncells']:
+                grid['dimensions'] -= 1
+                grid['dimensions'][grid['dimensions']==0]=1
+            if na.prod(grid['dimensions']) != grid['ncells']:
+                print 'product of dimensions %i not equal to number of cells %i' % \
+                      (na.prod(grid['dimensions']), grid['ncells'])
+                raise TypeError
+
+            # Append all hierachy info before reading this grid's data
+            grid_dims[i]=grid['dimensions']
+            grid_left_edges[i]=grid['left_edge']
+            grid_dds[i]=grid['dds']
+            #grid_ncells[i]=grid['ncells']
+            del grid
+
+            f.close()
+            del f
+        f = self.handle 
+
+        ## --------- Begin level nodes --------- ##
+        g = f.create_group('gridded_data_format')
+        g.attrs['format_version']=na.float32(1.0)
+        g.attrs['data_software']='athena'
+        data_g = f.create_group('data')
+        field_g = f.create_group('field_types')
+        part_g = f.create_group('particle_types')
+        pars_g = f.create_group('simulation_parameters')
+
+
+        gles = grid_left_edges
+        gdims = grid_dims
+        dle = na.min(gles,axis=0)
+        dre = na.max(gles+grid_dims*grid_dds,axis=0)
+        glis = ((gles - dle)/grid_dds).astype('int64')
+        gris = glis + gdims
+
+        ddims = (dre-dle)/grid_dds[0]
+
+        # grid_left_index
+        gli = f.create_dataset('grid_left_index',data=glis)
+        # grid_dimensions
+        gdim = f.create_dataset('grid_dimensions',data=gdims)
+
+        # grid_level
+        level = f.create_dataset('grid_level',data=grid_levels)
+
+        ## ----------QUESTIONABLE NEXT LINE--------- ##
+        # This data needs two dimensions for now. 
+        part_count = f.create_dataset('grid_particle_count',data=grid_particle_counts)
+
+        # grid_parent_id
+        pids = f.create_dataset('grid_parent_id',data=grid_parent_ids)
+
+        ## --------- Done with top level nodes --------- ##
+
+        pars_g.attrs['refine_by'] = na.int64(1)
+        pars_g.attrs['dimensionality'] = na.int64(3)
+        pars_g.attrs['domain_dimensions'] = ddims
+        pars_g.attrs['current_time'] = self.current_time
+        pars_g.attrs['domain_left_edge'] = dle
+        pars_g.attrs['domain_right_edge'] = dre
+        pars_g.attrs['unique_identifier'] = 'athenatest'
+        pars_g.attrs['cosmological_simulation'] = na.int64(0)
+        pars_g.attrs['num_ghost_zones'] = na.int64(0)
+        pars_g.attrs['field_ordering'] = na.int64(1)
+        pars_g.attrs['boundary_conditions'] = na.int64([0]*6) # For Now
+
+        # Extra pars:
+        # pars_g.attrs['n_cells'] = grid['ncells']
+        pars_g.attrs['vtk_version'] = 1.0
+
+        # Add particle types
+        # Nothing to do here
+
+        # Add particle field attributes
+        #f.close()
+
+
+    def read_and_write_data(self, basename, ddn, gdf_name):
+        proc_names = glob(self.source_dir+'id*')
+        #print 'Reading a dataset from %i Processor Files' % len(proc_names)
+        N = len(proc_names)
+        for i in range(N):
+            if i == 0:
+                fn = self.source_dir+'id%i/'%i + basename + '.%04i'%ddn + '.vtk'
+            else:
+                fn = self.source_dir+'id%i/'%i + basename + '-id%i'%i + '.%04i'%ddn + '.vtk'
+            f = open(fn,'rb')
+            #print 'Reading data from %s' % fn
+            line = f.readline()
+            while line is not '':
+                # print line
+                if len(line) == 0: break
+                splitup = line.strip().split()
+
+                if "DIMENSIONS" in splitup:
+                    grid_dims = na.array(splitup[-3:]).astype('int')
+                    line = f.readline()
+                    continue
+                elif "CELL_DATA" in splitup:
+                    grid_ncells = int(splitup[-1])
+                    line = f.readline()
+                    if na.prod(grid_dims) != grid_ncells:
+                        grid_dims -= 1
+                        grid_dims[grid_dims==0]=1
+                    if na.prod(grid_dims) != grid_ncells:
+                        print 'product of dimensions %i not equal to number of cells %i' % \
+                              (na.prod(grid_dims), grid_ncells)
+                        raise TypeError
+                    break
+                else:
+                    del line
+                    line = f.readline()
+            read_table = False
+            while line is not '':
+                if len(line) == 0: break
+                splitup = line.strip().split()
+                if 'SCALARS' in splitup:
+                    field = splitup[1]
+                    if not read_table:
+                        line = f.readline() # Read the lookup table line
+                        read_table = True
+                    data = na.fromfile(f, dtype='>f4', count=grid_ncells).reshape(grid_dims,order='F')
+                    if i == 0:
+                        self.fields.append(field)
+                    # print 'writing field %s' % field
+                    self.write_gdf_field(gdf_name, i, field, data)
+                    read_table=False
+
+                elif 'VECTORS' in splitup:
+                    field = splitup[1]
+                    data = na.fromfile(f, dtype='>f4', count=3*grid_ncells)
+                    data_x = data[0::3].reshape(grid_dims,order='F')
+                    data_y = data[1::3].reshape(grid_dims,order='F')
+                    data_z = data[2::3].reshape(grid_dims,order='F')
+                    if i == 0:
+                        self.fields.append(field+'_x')
+                        self.fields.append(field+'_y')
+                        self.fields.append(field+'_z')
+
+                    # print 'writing field %s' % field
+                    self.write_gdf_field(gdf_name, i, field+'_x', data_x)
+                    self.write_gdf_field(gdf_name, i, field+'_y', data_y)
+                    self.write_gdf_field(gdf_name, i, field+'_z', data_z)
+                    del data, data_x, data_y, data_z
+                del line
+                line = f.readline()
+            f.close()
+            del f
+
+        f = self.handle 
+        field_g = f['field_types']
+        # Add Field Attributes
+        for name in self.fields:
+            tname = name
+            try:
+                tname = translation_dict[name]
+            except:
+                pass
+            this_field = field_g.create_group(tname)
+            if name in self.field_conversions.keys():
+                this_field.attrs['field_to_cgs'] = self.field_conversions[name]
+            else:
+                this_field.attrs['field_to_cgs'] = na.float64('1.0') # For Now
+            
+
+    def convert(self, hierarchy=True, data=True):
+        self.handle = h5.File(self.outname, 'a')
+        if hierarchy:
+            self.read_and_write_hierarchy(self.basename, self.ddn ,self.outname)
+        if data:
+            self.read_and_write_data(self.basename, self.ddn ,self.outname)
+        self.handle.close()
+
+class AthenaConverter(Converter):
+    def __init__(self, basename, outname=None, field_conversions=None):
+        self.fields = []
+        self.basename = basename
+        name = basename.split('.')
+        fn = '%s.%04i'%(name[0],int(name[1]))
+        self.ddn = int(name[1])
+        self.basename = fn
+        if outname is None:
+            outname = fn+'.gdf'
+        self.outname = outname
+        if field_conversions is None:
+            field_conversions = {}
+        self.field_conversions = field_conversions
+
+
+    def parse_line(self, line, grid):
+    #    print line
+        # grid is a dictionary
+        splitup = line.strip().split()
+        if "vtk" in splitup:
+            grid['vtk_version'] = splitup[-1]
+        elif "Really" in splitup:
+            grid['time'] = splitup[-1]
+        elif "DIMENSIONS" in splitup:
+            grid['dimensions'] = na.array(splitup[-3:]).astype('int')
+        elif "ORIGIN" in splitup:
+            grid['left_edge'] = na.array(splitup[-3:]).astype('float64')
+        elif "SPACING" in splitup:
+            grid['dds'] = na.array(splitup[-3:]).astype('float64')
+        elif "CELL_DATA" in splitup:
+            grid["ncells"] = int(splitup[-1])
+        elif "SCALARS" in splitup:
+            field = splitup[1]
+            grid['read_field'] = field
+            grid['read_type'] = 'scalar'
+        elif "VECTORS" in splitup:
+            field = splitup[1]
+            grid['read_field'] = field
+            grid['read_type'] = 'vector'
+        
+    def read_grid(self, filename):
+        """ Read Athena legacy vtk file from single cpu """
+        f = open(filename,'rb')
+        #print 'Reading from %s'%filename
+        grid = {}
+        grid['read_field'] = None
+        grid['read_type'] = None
+        table_read=False
+        line = f.readline()
+        while line is not '':
+            while grid['read_field'] is None:
+                self.parse_line(line, grid)
+                if grid['read_type'] is 'vector':
+                    break
+                if table_read is False:             
+                    line = f.readline()
+                if 'TABLE' in line.strip().split():
+                    table_read = True
+                if len(line) == 0: break
+            #    print line
+
+            if len(line) == 0: break
+            if na.prod(grid['dimensions']) != grid['ncells']:
+                grid['dimensions'] -= 1
+            if na.prod(grid['dimensions']) != grid['ncells']:
+                print 'product of dimensions %i not equal to number of cells %i' % \
+                      (na.prod(grid['dimensions']), grid['ncells'])
+                raise TypeError
+
+            if grid['read_type'] is 'scalar':
+                grid[grid['read_field']] = \
+                    na.fromfile(f, dtype='>f4', count=grid['ncells']).reshape(grid['dimensions'],order='F')
+                self.fields.append(grid['read_field'])
+            elif grid['read_type'] is 'vector':
+                data = na.fromfile(f, dtype='>f4', count=3*grid['ncells'])
+                grid[grid['read_field']+'_x'] = data[0::3].reshape(grid['dimensions'],order='F')
+                grid[grid['read_field']+'_y'] = data[1::3].reshape(grid['dimensions'],order='F')
+                grid[grid['read_field']+'_z'] = data[2::3].reshape(grid['dimensions'],order='F')
+                self.fields.append(grid['read_field']+'_x')
+                self.fields.append(grid['read_field']+'_y')
+                self.fields.append(grid['read_field']+'_z')
+            else:
+                raise TypeError
+            grid['read_field'] = None
+            grid['read_type'] = None
+            line = f.readline()
+            if len(line) == 0: break
+        grid['right_edge'] = grid['left_edge']+grid['dds']*(grid['dimensions'])
+        return grid
+
+    def write_to_gdf(self, fn, grid):
+        f = h5.File(fn,'a')
+
+        ## --------- Begin level nodes --------- ##
+        g = f.create_group('gridded_data_format')
+        g.attrs['format_version']=na.float32(1.0)
+        g.attrs['data_software']='athena'
+        data_g = f.create_group('data')
+        field_g = f.create_group('field_types')
+        part_g = f.create_group('particle_types')
+        pars_g = f.create_group('simulation_parameters')
+
+        dle = grid['left_edge'] # True only in this case of one grid for the domain
+        gles = na.array([grid['left_edge']])
+        gdims = na.array([grid['dimensions']])
+        glis = ((gles - dle)/grid['dds']).astype('int64')
+        gris = glis + gdims
+
+        # grid_left_index
+        gli = f.create_dataset('grid_left_index',data=glis)
+        # grid_dimensions
+        gdim = f.create_dataset('grid_dimensions',data=gdims)
+
+        levels = na.array([0]).astype('int64') # unigrid example
+        # grid_level
+        level = f.create_dataset('grid_level',data=levels)
+
+        ## ----------QUESTIONABLE NEXT LINE--------- ##
+        # This data needs two dimensions for now. 
+        n_particles = na.array([[0]]).astype('int64')
+        #grid_particle_count
+        part_count = f.create_dataset('grid_particle_count',data=n_particles)
+
+        # Assume -1 means no parent.
+        parent_ids = na.array([-1]).astype('int64')
+        # grid_parent_id
+        pids = f.create_dataset('grid_parent_id',data=parent_ids)
+
+        ## --------- Done with top level nodes --------- ##
+
+        f.create_group('hierarchy')
+
+        ## --------- Store Grid Data --------- ##
+
+        g0 = data_g.create_group('grid_%010i'%0)
+        for field in self.fields:
+            name = field
+            if field in translation_dict.keys():
+                name = translation_dict[name]
+            if not name in g0.keys(): 
+                g0.create_dataset(name,data=grid[field])
+
+        ## --------- Store Particle Data --------- ##
+
+        # Nothing to do
+
+        ## --------- Attribute Tables --------- ##
+
+        pars_g.attrs['refine_by'] = na.int64(1)
+        pars_g.attrs['dimensionality'] = na.int64(3)
+        pars_g.attrs['domain_dimensions'] = grid['dimensions']
+        try:
+            pars_g.attrs['current_time'] = grid['time']
+        except:
+            pars_g.attrs['current_time'] = 0.0
+        pars_g.attrs['domain_left_edge'] = grid['left_edge'] # For Now
+        pars_g.attrs['domain_right_edge'] = grid['right_edge'] # For Now
+        pars_g.attrs['unique_identifier'] = 'athenatest'
+        pars_g.attrs['cosmological_simulation'] = na.int64(0)
+        pars_g.attrs['num_ghost_zones'] = na.int64(0)
+        pars_g.attrs['field_ordering'] = na.int64(0)
+        pars_g.attrs['boundary_conditions'] = na.int64([0]*6) # For Now
+
+        # Extra pars:
+        pars_g.attrs['n_cells'] = grid['ncells']
+        pars_g.attrs['vtk_version'] = grid['vtk_version']
+
+        # Add Field Attributes
+        for name in g0.keys():
+            tname = name
+            try:
+                tname = translation_dict[name]
+            except:
+                pass
+            this_field = field_g.create_group(tname)
+        if name in self.field_conversions.keys():
+            this_field.attrs['field_to_cgs'] = self.field_conversions[name]
+        else:
+            this_field.attrs['field_to_cgs'] = na.float64('1.0') # For Now
+
+        # Add particle types
+        # Nothing to do here
+
+        # Add particle field attributes
+        f.close()
+
+    def convert(self):
+        grid = self.read_grid(self.basename+'.vtk')
+        self.write_to_gdf(self.outname,grid)
+        
+# import sys
+# if __name__ == '__main__':
+#     n = sys.argv[-1]
+#     n = n.split('.')
+#     fn = '%s.%04i'%(n[0],int(n[1]))
+#     grid = read_grid(fn+'.vtk')
+#     write_to_hdf5(fn+'.gdf',grid)
+    


diff -r 2bbd3f126279501d3aa2d6acd308e80006078ca3 -r 65db3370fd9b8f113d595fb295ad3182952f04ca yt/utilities/grid_data_format/conversion/setup.py
--- /dev/null
+++ b/yt/utilities/grid_data_format/conversion/setup.py
@@ -0,0 +1,13 @@
+#!/usr/bin/env python
+import setuptools
+import os
+import sys
+import os.path
+
+
+def configuration(parent_package='', top_path=None):
+    from numpy.distutils.misc_util import Configuration
+    config = Configuration('conversion', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
+    #config.make_svn_version_py()
+    return config


diff -r 2bbd3f126279501d3aa2d6acd308e80006078ca3 -r 65db3370fd9b8f113d595fb295ad3182952f04ca yt/utilities/grid_data_format/docs/IRATE_notes.txt
--- /dev/null
+++ b/yt/utilities/grid_data_format/docs/IRATE_notes.txt
@@ -0,0 +1,39 @@
+Here is info from Erik Tollerud about the IRATE data format.
+
+The bitbucket project is at https://bitbucket.org/eteq/irate-format
+and I've posted a copy of the docs at
+http://www.physics.uci.edu/~etolleru/irate-docs/ , in particular
+http://www.physics.uci.edu/~etolleru/irate-docs/formatspec.html ,
+which details the actual requirements for data to fit in the format.
+As far as I can tell, the following steps are needed to make GDF fit
+inside the IRATE format:
+
+*move everything except "/simulation_parameters" into a group named "/GridData"
+
+*rename "/simulation_parameters" to "SimulationParameters"
+
+*remove the 'field_types' group (this is not absolutely necessary, but
+the convention we had in mind for IRATE is that the dataset names
+themselves (e.g. the datasets like /data/gridxxxxxx/density)  serve as
+the field definitions.
+
+* The unit information that's in 'field_types' should then be
+attributes in either "/GridData" or "/GridData/data" following the
+naming scheme e.g. "densityunitcgs" following the unit form given in
+the IRATE doc and an additional attribute e.g. "densityunitname"
+should be added with the human-readable name of the unit. This unit
+information can also live at the dataset level, but it probably makes
+more sense to put it instead at the higher level (IRATE supports both
+ways of doing it)
+
+* The Cosmology group (as defined in the IRATE specification) must be
+added - for simulations that are not technically "cosmological", you
+can just use one of the default cosmologies (WMAP7 is a reasonable
+choice - there's a function in the IRATE tools that automatically
+takes care of all the details for this).
+
+* optional: redo all the group names to follow the CamelCase
+convention - that's what we've been using elsewhere in IRATE.  This is
+an arbitrary choice, but it would be nice for it to be consistent
+throughout the format.
+


diff -r 2bbd3f126279501d3aa2d6acd308e80006078ca3 -r 65db3370fd9b8f113d595fb295ad3182952f04ca yt/utilities/grid_data_format/docs/gdf_specification.txt
--- /dev/null
+++ b/yt/utilities/grid_data_format/docs/gdf_specification.txt
@@ -0,0 +1,282 @@
+Gridded Data Format
+===================
+
+This is a pre-release of version 1.0 of this format.  Lots of formats have come
+before, but this one is simple and will work with yt; the idea is to create an
+import and export function in yt that will read this, so that other codes (such
+as ZEUS-MP) can export directly to it or convert their data to it, and so that
+yt can export to it from any format it recognizes and reads.
+
+Caveats and Notes
+-----------------
+
+#. We avoid having many attributes on many nodes, as access can be quite slow
+#. Cartesian data only for now
+#. All grids must have the same number of ghost zones.
+#. If “/grid_parent” does not exist, parentage relationships will be
+   reconstructed and assumed to allow multiple grids
+#. No parentage can skip levels
+#. All grids are at the same time
+#. This format is designed for single-fluid calculations (with color fields)
+   but it should be viewed as extensible to multiple-fluids.
+#. All fluid quantities are assumed to be in every grid, filling every zone.  Inside
+   a given grid, for a given particle type, all the affiliated fields must be the
+   same length.  (i.e., dark matter's velocity must be the same in all dimensions.)
+#. Everything is in a single file; for extremely large datasets, the user may
+   utilize HDF5 external links to link to files other than the primary.  (This
+   enables, for instance, Enzo datasets to have only a thin wrapper that creates
+   this format.)
+#. All fluid fields in this version of the format are assumed to have the
+   dimensionality of the grid they reside in plus any ghost zones, plus any
+   additionaly dimensionality required by the staggering property.
+#. Particles may have dataspaces affiliated with them.  (See Enzo's
+   OutputParticleTypeGrouping for more information.)  This enables a light
+   wrapper around data formats with interspersed particle types.
+#. Boundary conditions are very simply specified -- future revisions
+   will feature more complicated and rich specifications for the boundary.
+
+Furthermore, we make a distinction between fluid quantities and particle
+quantities.  Particles remain affiliated with grid nodes.  Positions of
+particles are global, but this will change with future versions of this
+document.
+
+Format Declaration
+------------------
+
+The file type is HDF5.  We require version 1.8 or greater.  At the root level,
+this group must exist: ::
+
+   /gridded_data_format
+
+This must contain the (float) attribute ``format_version``.  This document
+describes version 1.0.  Optional attributes may exist:
+
+``data_software``
+   string, references the application creating the file, not the
+   author of the data
+``data_software_version``
+   string, should reference a unique version number
+``data_author``
+   string, references the person or persons who created the data,
+   should include an email address
+``data_comment``
+   string, anything about the data
+
+Top Level Nodes
+---------------
+
+At least five top-level groups must exist, although some may be empty. ::
+
+   /gridded_data_format
+   /data
+   /simulation_parameters
+   /field_types
+   /particle_types
+
+Additionally, the grid structure elements must exist.  The 0-indexed index into this array
+defines a unique "Grid ID".
+
+``/grid_left_index``
+   (int64, Nx3): global, relative to current level, and only the active region
+``/grid_dimensions``
+   (int64, Nx3): only the active regions
+``/grid_level``
+   (int64, N): level, indexed by zero
+``/grid_particle_count``
+   (int64, N): total number of particles.  (May change in subsequent versions.)
+``/grid_parent_id``
+   (int64, N): optional, may only reference a single parent
+
+Grid Fields
+-----------
+
+Underneath ``/data/`` there must be entries for every grid, of the format
+``/data/grid_%010i``.  These grids need no attributes, and underneath them
+datasets live.
+
+Fluid Fields
+++++++++++++
+
+For every grid we then define ``/data/grid_%010i/%(field)s``.
+
+Where ``%(field)s`` draws from all of the fields defined.  We define no
+standard for which fields must be present, only the names and units.  Units
+should always be ''proper'' cgs (or conversion factors should be supplied, below), and
+field names should be drawn from this list, with these names.  Not all fields
+must be represented.  Field must extend beyond the active region if ghost zones
+are included.  All pre-defined fields are assumed to be cell-centered unless this
+is overridden in ``field_types``.
+
+  * ``density`` (g/cc)
+  * ``temperature`` (K)
+  * ``specific_thermal_energy`` (erg/g)
+  * ``specific_energy`` (erg/g, includes kinetic and magnetic)
+  * ``magnetic_energy`` (erg/g)
+  * ``velocity_x`` (cm/s)
+  * ``velocity_y`` (cm/s)
+  * ``velocity_z`` (cm/s)
+  * ``species_density_%s`` (g/cc) where %s is the species name including ionization
+    state, such as H2I, HI, HII, CO, "elec" for electron
+  * ``mag_field_x``
+  * ``mag_field_y``
+  * ``mag_field_z``
+
+Particle Fields
++++++++++++++++
+
+Particles are more expensive to sort and identify based on "type" -- for
+instance, dark matter versus star particles.  The particles should be separated
+based on type, under the group ``/data/grid_%010i/particles/``.
+
+The particles group will have sub-groups, each of which will be named after the
+type of particle it represents.  We only specify "dark_matter" as a type;
+anything else must be specified as described below.
+
+Each node, for instance ``/data/grid_%010i/particles/dark_matter/``, must
+contain the following fields:
+
+  * ``mass`` (g)
+  * ``id``
+  * ``position_x`` (in physical units)
+  * ``position_y`` (in physical units)
+  * ``position_z`` (in physical units)
+  * ``velocity_x`` (cm/s)
+  * ``velocity_y`` (cm/s)
+  * ``velocity_z`` (cm/s)
+  * ``dataspace`` (optional) an HDF5 dataspace to be used when opening
+    all affiliated fields.   If this is to be used, it must be appropriately set in
+    the particle type definition.  This is of type ``H5T_STD_REF_DSETREG``.
+    (See Enzo's OutputParticleTypeGrouping for an example.)
+
+Additional Fields
++++++++++++++++++
+
+Any additional fields from the data can be added, but must have a corresponding
+entry in the root field table (described below.)  The naming scheme is to be as
+explicit as possible, with units in cgs (or a conversion factor to the standard
+cgs unit, in the field table.)
+
+Attribute Table
+---------------
+
+In the root node, we define several groups which contain attributes.
+
+Simulation Parameters
++++++++++++++++++++++
+
+These attributes will all be associated with ``/simulation_parameters``.
+
+``refine_by``
+   relative global refinement
+``dimensionality``
+   1-, 2- or 3-D data
+``domain_dimensions``
+   dimensions in the top grid
+``current_time``
+   current time in simulation, in seconds, from “start” of simulation
+``domain_left_edge``
+   the left edge of the domain, in cm
+``domain_right_edge``
+   the right edge of the domain, in cm
+``unique_identifier``
+   regarded as a string, but can be anything
+``cosmological_simulation``
+   0 or 1
+``num_ghost_zones``
+   integer
+``field_ordering``
+   integer: 0 for C, 1 for Fortran
+``boundary_conditions``
+   integer (6): 0 for periodic, 1 for mirrored, 2 for outflow.  Needs one for each face
+   of the cube.  Any past the dimensionality should be set to -1.  The order of specification
+   goes left in 0th dimension, right in 0th dimension, left in 1st dimension, right in 1st dimensions,
+   left in 2nd dimension, right in 2nd dimension.  Note also that yt does not currently support non-periodic
+   boundary conditions, and that the assumption of periodicity shows up primarily in plots and
+   covering grids.
+
+Optionally, attributes for cosmological simulations can be provided, if
+cosmological_simulation above is set to 1:
+
+  * current_redshift
+  * omega_matter (at z=0)
+  * omega_lambda (at z=0)
+  * hubble_constant (h100)
+
+Fluid Field Attributes
+++++++++++++++++++++++
+
+Every field that is included that is not both in CGS already and in the list
+above requires parameters.  If a field is in the above list but is not in CGS,
+only the field_to_cgs attribute is necessary.  These will be stored under
+``/field_types`` and each must possess the following attributes:
+
+``field_name``
+   a string that will be used to describe the field; can contain spaces.
+``field_to_cgs``
+   a float that will be used to convert the field to cgs units, if necessary.
+   Set to 1.0 if no conversion necessary.  Note that if non-CGS units are desired
+   this field should simply be viewed as the value by which field values are
+   multiplied to get to some internally consistent unit system.
+``field_units``
+   a string that names the units.
+``staggering``
+   an integer: 0 for cell-centered, 1 for face-centered, 2 for vertex-centered.
+   Non-cellcentered data will be linearly-interpolated; more complicated
+   reconstruction will be defined in a future version of this standard; for 1.0
+   we only allow for simple definitions.
+
+Particle Types
+++++++++++++++
+
+Every particle type that is not recognized (i.e., all non-Dark Matter types)
+needs to have an entry under ``/particle_types``.  Each entry must possess the
+following attributes:
+
+``particle_type_name``
+   a string that will be used to describe the field; can contain spaces.
+``particle_use_dataspace``
+   (optional) if 1, the dataspace (see particle field definition above) will be used
+   for all particle fields for this type of particle.  Useful if a given type of particle
+   is embedded inside a larger list of different types of particle.
+``particle_type_num``
+   an integer giving the total number of particles of this type.
+
+For instance, to define a particle of type ``accreting_black_hole``, the file
+must contain ``/particle_types/accreting_black_hole``, with the
+``particle_type_name`` attribute of "Accreting Black Hole".
+
+Particle Field Attributes
++++++++++++++++++++++++++
+
+Every particle type that contains a new field (for instance, ``accretion_rate``)
+needs to have an entry under ``/particle_types/{particle_type_name}/{field_name}``
+containing the following attributes:
+
+``field_name``
+   a string that will be used to describe the field; can contain spaces.
+``field_to_cgs``
+   a float that will be used to convert the field to cgs units, if necessary.
+   Set to 1.0 if no conversion necessary.
+``field_units``
+   a string that names the units.
+
+Role of YT
+----------
+
+yt will provide a reader for this data, so that any data in this format can be
+used by the code.  Additionally, the names and specifications in this code
+reflect the internal yt data structures.
+
+yt will also provide a writer for this data, which will operate on any existing
+data format.  Provided that a simulation code can read this data, this will
+enable cross-platform comparison.  Furthermore, any external piece of software
+(i.e., Stranger) that implements reading this format will be able to read any
+format of data tha yt understands.
+
+Example File
+------------
+
+An example file constructed from the ``RD0005-mine`` dataset is available
+at http://yt.enzotools.org/files/RD0005.gdf .  It is not yet a complete
+conversion, but it is a working proof of concept.  Readers and writers are
+forthcoming.


diff -r 2bbd3f126279501d3aa2d6acd308e80006078ca3 -r 65db3370fd9b8f113d595fb295ad3182952f04ca yt/utilities/grid_data_format/scripts/convert_distributed_athena.py
--- /dev/null
+++ b/yt/utilities/grid_data_format/scripts/convert_distributed_athena.py
@@ -0,0 +1,22 @@
+from grid_data_format import *
+import sys
+# Assumes that last input is the basename for the athena dataset.
+# i.e. kh_3d_mhd_hlld_128_beta5000_sub_tanhd.0030
+basename = sys.argv[-1]
+converter = AthenaDistributedConverter(basename)
+converter.convert()
+
+# If you have units information, set up a conversion dictionary for
+# each field.  Each key is the name of the field that Athena uses.
+# Each value is what you have to multiply the raw output from Athena
+# by to get cgs units.
+
+# code_to_cgs = {'density':1.0e3,
+# 	       'total_energy':1.0e-3,
+# 	       'velocity_x':1.2345,
+# 	       'velocity_y':1.2345,
+# 	       'velocity_z':1.2345}
+
+# converter = AthenaDistributedConverter(basename, field_conversions=code_to_cgs)
+# converter.convert()
+


diff -r 2bbd3f126279501d3aa2d6acd308e80006078ca3 -r 65db3370fd9b8f113d595fb295ad3182952f04ca yt/utilities/grid_data_format/scripts/convert_single_athena.py
--- /dev/null
+++ b/yt/utilities/grid_data_format/scripts/convert_single_athena.py
@@ -0,0 +1,23 @@
+from grid_data_format import *
+import sys
+# Assumes that last input is the basename for the athena dataset.
+# i.e. kh_3d_mhd_hlld_128_beta5000_sub_tanhd.0030
+basename = sys.argv[-1]
+converter = AthenaConverter(basename)
+converter.convert()
+
+# If you have units information, set up a conversion dictionary for
+# each field.  Each key is the name of the field that Athena uses.
+# Each value is what you have to multiply the raw output from Athena
+# by to get cgs units.
+
+# code_to_cgs = {'density':1.0e3,
+# 	       'total_energy':1.0e-3,
+# 	       'velocity_x':1.2345,
+# 	       'velocity_y':1.2345,
+# 	       'velocity_z':1.2345}
+
+# converter = AthenaDistributedConverter(basename, field_conversions=code_to_cgs)
+# converter.convert()
+
+


diff -r 2bbd3f126279501d3aa2d6acd308e80006078ca3 -r 65db3370fd9b8f113d595fb295ad3182952f04ca yt/utilities/grid_data_format/setup.py
--- /dev/null
+++ b/yt/utilities/grid_data_format/setup.py
@@ -0,0 +1,14 @@
+#!/usr/bin/env python
+import setuptools
+import os
+import sys
+import os.path
+
+
+def configuration(parent_package='', top_path=None):
+    from numpy.distutils.misc_util import Configuration
+    config = Configuration('grid_data_format', parent_package, top_path)
+    config.add_subpackage("conversion")
+    config.make_config_py()  # installs __config__.py
+    #config.make_svn_version_py()
+    return config


diff -r 2bbd3f126279501d3aa2d6acd308e80006078ca3 -r 65db3370fd9b8f113d595fb295ad3182952f04ca yt/utilities/math_utils.py
--- a/yt/utilities/math_utils.py
+++ b/yt/utilities/math_utils.py
@@ -619,3 +619,16 @@
         # and check, use out array.
         result.append(na.mean(sorted[indexer], axis=axis, out=out))
     return na.array(result)
+
+def get_rotation_matrix(self, theta, rot_vector):
+    ux = rot_vector[0]
+    uy = rot_vector[1]
+    uz = rot_vector[2]
+    cost = na.cos(theta)
+    sint = na.sin(theta)
+    
+    R = na.array([[cost+ux**2*(1-cost), ux*uy*(1-cost)-uz*sint, ux*uz*(1-cost)+uy*sint],
+                  [uy*ux*(1-cost)+uz*sint, cost+uy**2*(1-cost), uy*uz*(1-cost)-ux*sint],
+                  [uz*ux*(1-cost)-uy*sint, uz*uy*(1-cost)+ux*sint, cost+uz**2*(1-cost)]])
+    
+    return R


diff -r 2bbd3f126279501d3aa2d6acd308e80006078ca3 -r 65db3370fd9b8f113d595fb295ad3182952f04ca yt/utilities/minimal_representation.py
--- a/yt/utilities/minimal_representation.py
+++ b/yt/utilities/minimal_representation.py
@@ -58,6 +58,7 @@
             setattr(self, attr, getattr(obj, attr, None))
         if hasattr(obj, "pf"):
             self.output_hash = obj.pf._hash()
+            self._pf_mrep = obj.pf._mrep
 
     def __init__(self, obj):
         self._update_attrs(obj, self._attr_list)
@@ -93,6 +94,8 @@
         api_key = ytcfg.get("yt","hub_api_key")
         url = ytcfg.get("yt","hub_url")
         metadata, (final_name, chunks) = self._generate_post()
+        if hasattr(self, "_pf_mrep"):
+            self._pf_mrep.upload()
         for i in metadata:
             if isinstance(metadata[i], na.ndarray):
                 metadata[i] = metadata[i].tolist()
@@ -110,7 +113,15 @@
                                              'api_key' : api_key})
         request = urllib2.Request(url, datagen, headers)
         # Actually do the request, and get the response
-        rv = urllib2.urlopen(request).read()
+        try:
+            rv = urllib2.urlopen(request).read()
+        except urllib2.HTTPError as ex:
+            if ex.code == 401:
+                mylog.error("You must create an API key before uploading.")
+                mylog.error("https://data.yt-project.org/getting_started.html")
+                return
+            else:
+                raise ex
         uploader_info = json.loads(rv)
         new_url = url + "/handler/%s" % uploader_info['handler_uuid']
         for i, (cn, cv) in enumerate(chunks):
@@ -125,8 +136,9 @@
 
         datagen, headers = multipart_encode({'status' : 'FINAL'})
         request = urllib2.Request(new_url, datagen, headers)
-        rv = urllib2.urlopen(request).read()
-        return json.loads(rv)
+        rv = json.loads(urllib2.urlopen(request).read())
+        mylog.info("Upload succeeded!  View here: %s", rv['url'])
+        return rv
 
 class FilteredRepresentation(MinimalRepresentation):
     def _generate_post(self):
@@ -180,3 +192,25 @@
         chunks = [(fn, d) for fn, d in self.images]
         return (metadata, ('images', chunks))
 
+_hub_categories = ("News", "Documents", "Simulation Management",
+                   "Data Management", "Analysis and Visualization",
+                   "Paper Repositories", "Astrophysical Utilities",
+                   "yt Scripts")
+
+class MinimalProjectDescription(MinimalRepresentation):
+    type = "project"
+    _attr_list = ("title", "url", "description", "category", "image_url")
+
+    def __init__(self, title, url, description,
+                 category, image_url = ""):
+        assert(category in _hub_categories)
+        self.title = title
+        self.url = url
+        self.description = description
+        self.category = category
+        self.image_url = image_url
+
+    def _generate_post(self):
+        metadata = self._attrs
+        chunks = []
+        return (metadata, ("chunks", []))


diff -r 2bbd3f126279501d3aa2d6acd308e80006078ca3 -r 65db3370fd9b8f113d595fb295ad3182952f04ca yt/utilities/orientation.py
--- /dev/null
+++ b/yt/utilities/orientation.py
@@ -0,0 +1,101 @@
+"""
+A class that manages the coordinate system for orientable data
+containers and cameras.
+
+Author: Nathan Goldbaum <goldbaum at ucolick.org>
+Affiliation: UCSC Astronomy
+License:
+  Copyright (C) 2008-2011 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import numpy as na
+
+from yt.funcs import *
+from yt.utilities.math_utils import get_rotation_matrix
+
+class Orientation:
+    def __init__(self, normal_vector, north_vector=None, steady_north=False):
+        r"""An object that returns a set of basis vectors for orienting
+        cameras a data containers.
+
+        Parameters
+        ----------
+        center        : array_like
+           The current "center" of the view port -- the normal_vector connects
+           the center and the origin
+        normal_vector : array_like
+           A vector normal to the image plane
+        north_vector  : array_like, optional
+           The 'up' direction to orient the image plane.  
+           If not specified, gets calculated automatically
+        steady_north  : bool, optional
+           Boolean to control whether to normalize the north_vector
+           by subtracting off the dot product of it and the normal 
+           vector.  Makes it easier to do rotations along a single
+           axis.  If north_vector is specified, is switched to
+           True.  Default: False
+           
+        """
+        self.steady_north = steady_north
+        if na.all(north_vector == normal_vector):
+            mylog.error("North vector and normal vector are the same.  Disregarding north vector.")
+            north_vector = None
+        if north_vector is not None: self.steady_north = True
+        self._setup_normalized_vectors(normal_vector, north_vector)
+
+    def _setup_normalized_vectors(self, normal_vector, north_vector):
+        # Now we set up our various vectors
+        normal_vector /= na.sqrt( na.dot(normal_vector, normal_vector))
+        if north_vector is None:
+            vecs = na.identity(3)
+            t = na.cross(normal_vector, vecs).sum(axis=1)
+            ax = t.argmax()
+            east_vector = na.cross(vecs[ax,:], normal_vector).ravel()
+            north_vector = na.cross(normal_vector, east_vector).ravel()
+        else:
+            if self.steady_north:
+                north_vector = north_vector - na.dot(north_vector,normal_vector)*normal_vector
+            east_vector = na.cross(north_vector, normal_vector).ravel()
+        north_vector /= na.sqrt(na.dot(north_vector, north_vector))
+        east_vector /= na.sqrt(na.dot(east_vector, east_vector))
+        self.normal_vector = normal_vector
+        self.north_vector = north_vector
+        self.unit_vectors = [east_vector, north_vector, normal_vector]
+        self.inv_mat = na.linalg.pinv(self.unit_vectors)
+        
+    def switch_orientation(self, normal_vector=None, north_vector=None):
+        r"""Change the view direction based on any of the orientation parameters.
+
+        This will recalculate all the necessary vectors and vector planes related
+        to a an orientable object.
+
+        Parameters
+        ----------
+        normal_vector: array_like, optional
+            The new looking vector.
+        north_vector : array_like, optional
+            The 'up' direction for the plane of rays.  If not specific,
+            calculated automatically.
+        """
+        if north_vector is None:
+            north_vector = self.north_vector
+        if normal_vector is None:
+            normal_vector = self.normal_vector
+        self._setup_normalized_vectors(normal_vector, north_vector)
+
+        


diff -r 2bbd3f126279501d3aa2d6acd308e80006078ca3 -r 65db3370fd9b8f113d595fb295ad3182952f04ca yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -288,7 +288,7 @@
         if size is None:
             size = len(self.available_ranks)
         if len(self.available_ranks) < size:
-            print 'Not enough resources available'
+            print 'Not enough resources available', size, self.available_ranks
             raise RuntimeError
         if ranks is None:
             ranks = [self.available_ranks.pop(0) for i in range(size)]
@@ -315,15 +315,34 @@
         for wg in self.workgroups:
             self.free_workgroup(wg)
 
+    @classmethod
+    def from_sizes(cls, sizes):
+        sizes = ensure_list(sizes)
+        pool = cls()
+        rank = pool.comm.rank
+        for i,size in enumerate(sizes):
+            if iterable(size):
+                size, name = size
+            else:
+                name = "workgroup_%02i" % i
+            pool.add_workgroup(size, name = name)
+        for wg in pool.workgroups:
+            if rank in wg.ranks: workgroup = wg
+        return pool, workgroup
+
+    def __getitem__(self, key):
+        for wg in self.workgroups:
+            if wg.name == key: return wg
+        raise KeyError(key)
+
 class ResultsStorage(object):
     slots = ['result', 'result_id']
     result = None
     result_id = None
 
-def parallel_objects(objects, njobs, storage = None):
+def parallel_objects(objects, njobs = 0, storage = None, barrier = True):
     if not parallel_capable:
         njobs = 1
-        mylog.warn("parallel_objects() is being used when parallel_capable is false. The loop is not being run in parallel. This may not be what was expected.")
     my_communicator = communication_system.communicators[-1]
     my_size = my_communicator.size
     if njobs <= 0:
@@ -343,7 +362,11 @@
     obj_ids = na.arange(len(objects))
 
     to_share = {}
-    for result_id, obj in zip(obj_ids, objects)[my_new_id::njobs]:
+    # If our objects object is slice-aware, like time series data objects are,
+    # this will prevent intermediate objects from being created.
+    oiter = itertools.izip(obj_ids[my_new_id::njobs],
+                           objects[my_new_id::njobs])
+    for result_id, obj in oiter:
         if storage is not None:
             rstore = ResultsStorage()
             rstore.result_id = result_id
@@ -358,6 +381,8 @@
         new_storage = my_communicator.par_combine_object(
                 to_share, datatype = 'dict', op = 'join')
         storage.update(new_storage)
+    if barrier:
+        my_communicator.barrier()
 
 class CommunicationSystem(object):
     communicators = []
@@ -391,6 +416,9 @@
         self.communicators.pop()
         self._update_parallel_state(self.communicators[-1])
 
+def _reconstruct_communicator():
+    return communication_system.communicators[-1]
+
 class Communicator(object):
     comm = None
     _grids = None
@@ -405,6 +433,11 @@
     functions for analyzing something in parallel.
     """
 
+    def __reduce__(self):
+        # We don't try to reconstruct any of the properties of the communicator
+        # or the processors.  In general, we don't want to.
+        return (_reconstruct_communicator, ())
+
     def barrier(self):
         if not self._distributed: return
         mylog.debug("Opening MPI Barrier on %s", self.comm.rank)
@@ -503,29 +536,30 @@
         raise NotImplementedError
 
     @parallel_passthrough
-    def mpi_bcast(self, data):
+    def mpi_bcast(self, data, root = 0):
         # The second check below makes sure that we know how to communicate
         # this type of array. Otherwise, we'll pickle it.
         if isinstance(data, na.ndarray) and \
                 get_mpi_type(data.dtype) is not None:
-            if self.comm.rank == 0:
+            if self.comm.rank == root:
                 info = (data.shape, data.dtype)
             else:
                 info = ()
-            info = self.comm.bcast(info, root=0)
-            if self.comm.rank != 0:
+            info = self.comm.bcast(info, root=root)
+            if self.comm.rank != root:
                 data = na.empty(info[0], dtype=info[1])
             mpi_type = get_mpi_type(info[1])
-            self.comm.Bcast([data, mpi_type], root = 0)
+            self.comm.Bcast([data, mpi_type], root = root)
             return data
         else:
             # Use pickled methods.
-            data = self.comm.bcast(data, root = 0)
+            data = self.comm.bcast(data, root = root)
             return data
 
     def preload(self, grids, fields, io_handler):
         # This will preload if it detects we are parallel capable and
         # if so, we load *everything* that we need.  Use with some care.
+        if len(fields) == 0: return
         mylog.debug("Preloading %s from %s grids", fields, len(grids))
         if not self._distributed: return
         io_handler.preload(grids, fields)


diff -r 2bbd3f126279501d3aa2d6acd308e80006078ca3 -r 65db3370fd9b8f113d595fb295ad3182952f04ca yt/utilities/setup.py
--- a/yt/utilities/setup.py
+++ b/yt/utilities/setup.py
@@ -168,6 +168,7 @@
     config.add_subpackage("kdtree")
     config.add_data_files(('kdtree', ['kdtree/fKDpy.so']))
     config.add_subpackage("spatial")
+    config.add_subpackage("grid_data_format")
     config.add_subpackage("parallel_tools")
     config.add_extension("data_point_utilities",
                 "yt/utilities/data_point_utilities.c", libraries=["m"])


diff -r 2bbd3f126279501d3aa2d6acd308e80006078ca3 -r 65db3370fd9b8f113d595fb295ad3182952f04ca yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -374,6 +374,6 @@
                                self.data_source.center, self.data_source._inv_mat, indices,
                                self.data_source[item],
                                self.buff_size[0], self.buff_size[1],
-                               self.bounds).transpose()
+                               self.bounds)
         self[item] = buff
         return buff


diff -r 2bbd3f126279501d3aa2d6acd308e80006078ca3 -r 65db3370fd9b8f113d595fb295ad3182952f04ca yt/visualization/image_writer.py
--- a/yt/visualization/image_writer.py
+++ b/yt/visualization/image_writer.py
@@ -150,6 +150,8 @@
         s1, s2 = bitmap_array.shape[:2]
         alpha_channel = 255*na.ones((s1,s2,1), dtype='uint8')
         bitmap_array = na.concatenate([bitmap_array, alpha_channel], axis=-1)
+    for channel in range(bitmap_array.shape[2]):
+        bitmap_array[:,:,channel] = bitmap_array[:,:,channel].T
     au.write_png(bitmap_array.copy(), filename)
     return bitmap_array
 


diff -r 2bbd3f126279501d3aa2d6acd308e80006078ca3 -r 65db3370fd9b8f113d595fb295ad3182952f04ca yt/visualization/plot_collection.py
--- a/yt/visualization/plot_collection.py
+++ b/yt/visualization/plot_collection.py
@@ -24,7 +24,10 @@
 """
 
 from matplotlib import figure
+import shutil
+import tempfile
 import numpy as na
+import os
 
 from yt.funcs import *
 
@@ -47,6 +50,8 @@
     PhasePlot, \
     LineQueryPlot, \
     ScatterPlot
+from yt.utilities.minimal_representation import \
+    MinimalImageCollectionData
 
 # No better place to put this
 def concatenate_pdfs(output_fn, input_fns):
@@ -60,6 +65,18 @@
 def _fix_axis(axis):
     return inv_axis_names.get(axis, axis)
 
+
+class ImageCollection(object):
+    def __init__(self, pf, name):
+        self.pf = pf
+        self.name = name
+        self.images = []
+        self.image_metadata = []
+
+    def add_image(self, fn, descr):
+        self.image_metadata.append(descr)
+        self.images.append((os.path.basename(fn), na.fromfile(fn, dtype='c')))
+
 class PlotCollection(object):
     __id_counter = 0
     def __init__(self, pf, center=None):
@@ -117,6 +134,19 @@
         for p in self.plots:
             yield p
 
+    @property
+    def _mrep(self):
+        ic = ImageCollection(self.pf, "Plot Collection with center %s" % self.c)
+        dd = tempfile.mkdtemp()
+        fns = self.save(os.path.join(dd, "temp"))
+        for fn, p in zip(fns, self.plots):
+            ic.add_image(fn, p._pretty_name())
+        shutil.rmtree(dd)
+        return MinimalImageCollectionData(ic)
+
+    def hub_upload(self):
+        self._mrep.upload()
+
     def save(self, basename=None, format="png", override=False, force_save=False):
         r"""Save out all the plots hanging off this plot collection, using
         generated names.


diff -r 2bbd3f126279501d3aa2d6acd308e80006078ca3 -r 65db3370fd9b8f113d595fb295ad3182952f04ca yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -679,6 +679,7 @@
         self.circle_args = circle_args
         self.text = text
         self.text_args = text_args
+        if self.text_args is None: self.text_args = {}
 
     def __call__(self, plot):
         from matplotlib.patches import Circle
@@ -696,7 +697,7 @@
         cir = Circle((center_x, center_y), radius, **self.circle_args)
         plot._axes.add_patch(cir)
         if self.text is not None:
-            plot._axes.text(center_x, center_y, "%s" % halo.id,
+            plot._axes.text(center_x, center_y, self.text,
                             **self.text_args)
 
 class HopCircleCallback(PlotCallback):
@@ -934,14 +935,15 @@
     _descriptor = None
     def __init__(self, width, p_size=1.0, col='k', marker='o', stride=1.0,
                  ptype=None, stars_only=False, dm_only=False,
-                 minimum_mass=None):
+                 minimum_mass=None, alpha=1.0):
         """
         Adds particle positions, based on a thick slab along *axis* with a
         *width* along the line of sight.  *p_size* controls the number of
         pixels per particle, and *col* governs the color.  *ptype* will
         restrict plotted particles to only those that are of a given type.
         *minimum_mass* will require that the particles be of a given mass,
-        calculated via ParticleMassMsun, to be plotted.
+        calculated via ParticleMassMsun, to be plotted. *alpha* determines
+        each particle's opacity.
         """
         PlotCallback.__init__(self)
         self.width = width
@@ -953,6 +955,7 @@
         self.stars_only = stars_only
         self.dm_only = dm_only
         self.minimum_mass = minimum_mass
+        self.alpha = alpha
 
     def __call__(self, plot):
         data = plot.data
@@ -983,7 +986,7 @@
                     [reg[field_x][gg][::self.stride],
                      reg[field_y][gg][::self.stride]])
         plot._axes.scatter(px, py, edgecolors='None', marker=self.marker,
-                           s=self.p_size, c=self.color)
+                           s=self.p_size, c=self.color,alpha=self.alpha)
         plot._axes.set_xlim(xx0,xx1)
         plot._axes.set_ylim(yy0,yy1)
         plot._axes.hold(False)


diff -r 2bbd3f126279501d3aa2d6acd308e80006078ca3 -r 65db3370fd9b8f113d595fb295ad3182952f04ca yt/visualization/plot_types.py
--- a/yt/visualization/plot_types.py
+++ b/yt/visualization/plot_types.py
@@ -295,6 +295,17 @@
             if not hasattr(c, '_type_name'): continue
             self.modify[c._type_name] = c
 
+    def _pretty_name(self):
+        width = self.im.get("Width", "NA")
+        unit = self.im.get("Unit", "NA")
+        field = self.axis_names.get("Z", self.axis_names.get("Field1"))
+        if hasattr(self.data, "_data_source"):
+            data = self.data._data_source
+        else:
+            data = self.data
+        return "%s: %s (%s %s) %s" % (self._type_name,
+            field, width, unit, data)
+
 class VMPlot(RavenPlot):
     _antialias = True
     _period = (0.0, 0.0)
@@ -493,6 +504,7 @@
         if self.colorbar != None:
             self.colorbar.set_label(str(data_label), **self.label_kws)
 
+
 class FixedResolutionPlot(VMPlot):
 
     # This is a great argument in favor of changing the name


diff -r 2bbd3f126279501d3aa2d6acd308e80006078ca3 -r 65db3370fd9b8f113d595fb295ad3182952f04ca yt/visualization/volume_rendering/api.py
--- a/yt/visualization/volume_rendering/api.py
+++ b/yt/visualization/volume_rendering/api.py
@@ -39,5 +39,6 @@
                              import_partitioned_grids
 from image_handling import export_rgba, import_rgba, \
                            plot_channel, plot_rgb
+
 from camera import Camera, PerspectiveCamera, StereoPairCamera, \
     off_axis_projection, FisheyeCamera, MosaicFisheyeCamera


diff -r 2bbd3f126279501d3aa2d6acd308e80006078ca3 -r 65db3370fd9b8f113d595fb295ad3182952f04ca yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -33,12 +33,13 @@
 from yt.utilities.amr_utils import TransferFunctionProxy, VectorPlane, \
     arr_vec2pix_nest, arr_pix2vec_nest, AdaptiveRaySource, \
     arr_ang2pix_nest, arr_fisheye_vectors, rotate_vectors
+from yt.utilities.math_utils import get_rotation_matrix
+from yt.utilities.orientation import Orientation
 from yt.visualization.image_writer import write_bitmap
 from yt.data_objects.data_containers import data_object_registry
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     ParallelAnalysisInterface, ProcessorPool
 from yt.utilities.amr_kdtree.api import AMRKDTree
-from numpy import pi
 
 class Camera(ParallelAnalysisInterface):
     def __init__(self, center, normal_vector, width,
@@ -48,8 +49,7 @@
                  log_fields = None,
                  sub_samples = 5, pf = None,
                  use_kd=True, l_max=None, no_ghost=True,
-                 tree_type='domain',expand_factor=1.0,
-                 le=None, re=None):
+                 tree_type='domain',le=None, re=None):
         r"""A viewpoint into a volume, for volume rendering.
 
         The camera represents the eye of an observer, which will be used to
@@ -64,7 +64,7 @@
             The vector between the camera position and the center.
         width : float or list of floats
             The current width of the image.  If a single float, the volume is
-            cubical, but if not, it is front/back, left/right, top/bottom.
+            cubical, but if not, it is left/right, top/bottom, front/back.
         resolution : int or list of ints
             The number of pixels in each direction.
         north_vector : array_like, optional
@@ -74,7 +74,7 @@
             Boolean to control whether to normalize the north_vector
             by subtracting off the dot product of it and the normal
             vector.  Makes it easier to do rotations along a single
-            axis.  If north_vector is specifies, is switched to
+            axis.  If north_vector is specified, is switched to
             True. Default: False
         volume : `yt.extensions.volume_rendering.HomogenizedVolume`, optional
             The volume to ray cast through.  Can be specified for finer-grained
@@ -136,12 +136,6 @@
             prone to longer data IO times.  If all the data can fit in
             memory on each cpu, this can be the fastest option for
             multiple ray casts on the same dataset.
-        expand_factor: float, optional
-            A parameter to be used with the PerspectiveCamera.
-            Controls how much larger a volume to render, which is
-            currently difficult to gauge for the PerspectiveCamera.
-            For full box renders, values in the 2.0-3.0 range seem to
-            produce desirable results. Default: 1.0
         le: array_like, optional
             Specifies the left edge of the volume to be rendered.
             Currently only works with use_kd=True.
@@ -187,24 +181,14 @@
         self.resolution = resolution
         self.sub_samples = sub_samples
         if not iterable(width):
-            width = (width, width, width) # front/back, left/right, top/bottom
-        self.width = width
-        self.center = center
-        self.steady_north = steady_north
-        self.expand_factor = expand_factor
-        # This seems to be necessary for now.  Not sure what goes wrong when not true.
-        if na.all(north_vector == normal_vector):
-            mylog.error("North vector and normal vector are the same.  Disregarding north vector.")
-            north_vector == None
-        if north_vector is not None: self.steady_north=True
-        self.north_vector = north_vector
-        self.rotation_vector = north_vector
+            width = (width, width, width) # left/right, top/bottom, front/back 
+        self.orienter = Orientation(normal_vector, north_vector=north_vector, steady_north=steady_north)
+        self._setup_box_properties(width, center, self.orienter.unit_vectors)
         if fields is None: fields = ["Density"]
         self.fields = fields
         if transfer_function is None:
             transfer_function = ProjectionTransferFunction()
         self.transfer_function = transfer_function
-        self._setup_normalized_vectors(normal_vector, north_vector)
         self.log_fields = log_fields
         self.use_kd = use_kd
         self.l_max = l_max
@@ -223,40 +207,21 @@
             self.use_kd = isinstance(volume, AMRKDTree)
         self.volume = volume
 
-    def _setup_normalized_vectors(self, normal_vector, north_vector):
-        # Now we set up our various vectors
-        normal_vector /= na.sqrt( na.dot(normal_vector, normal_vector))
-        if north_vector is None:
-            vecs = na.identity(3)
-            t = na.cross(normal_vector, vecs).sum(axis=1)
-            ax = t.argmax()
-            north_vector = na.cross(vecs[ax,:], normal_vector).ravel()
-            if self.rotation_vector is None:
-                self.rotation_vector=north_vector
-        else:
-            if self.steady_north:
-                north_vector = north_vector - na.dot(north_vector,normal_vector)*normal_vector
-        north_vector /= na.sqrt(na.dot(north_vector, north_vector))
-        east_vector = -na.cross(north_vector, normal_vector).ravel()
-        east_vector /= na.sqrt(na.dot(east_vector, east_vector))
-        self.normal_vector = normal_vector
-        self.unit_vectors = [north_vector, east_vector, normal_vector]
-        self.box_vectors = na.array([self.unit_vectors[0]*self.width[0],
-                                     self.unit_vectors[1]*self.width[1],
-                                     self.unit_vectors[2]*self.width[2]])
-
-        self.origin = self.center - 0.5*self.width[0]*self.unit_vectors[0] \
-                                  - 0.5*self.width[1]*self.unit_vectors[1] \
-                                  - 0.5*self.width[2]*self.unit_vectors[2]
-        self.back_center = self.center - 0.5*self.width[0]*self.unit_vectors[2]
-        self.front_center = self.center + 0.5*self.width[0]*self.unit_vectors[2]
-        self.inv_mat = na.linalg.pinv(self.unit_vectors)
+    def _setup_box_properties(self, width, center, unit_vectors):
+        self.width = width
+        self.center = center
+        self.box_vectors = na.array([unit_vectors[0]*width[0],
+                                     unit_vectors[1]*width[1],
+                                     unit_vectors[2]*width[2]])
+        self.origin = center - 0.5*na.dot(width,unit_vectors)
+        self.back_center =  center - 0.5*width[2]*unit_vectors[2]
+        self.front_center = center + 0.5*width[2]*unit_vectors[2]         
 
     def look_at(self, new_center, north_vector = None):
         r"""Change the view direction based on a new focal point.
 
-        This will recalculate all the necessary vectors and vector planes related
-        to a camera to point at a new location.
+        This will recalculate all the necessary vectors and vector planes to orient
+        the image plane so that it points at a new location.
 
         Parameters
         ----------
@@ -268,13 +233,14 @@
             calculated automatically.
         """
         normal_vector = self.front_center - new_center
-        self._setup_normalized_vectors(normal_vector, north_vector)
+        self.orienter.switch_orientation(normal_vector=normal_vector,
+                                         north_vector = north_vector)
 
     def switch_view(self, normal_vector=None, width=None, center=None, north_vector=None):
-        r"""Change the view direction based on any of the view parameters.
+        r"""Change the view based on any of the view parameters.
 
-        This will recalculate all the necessary vectors and vector planes related
-        to a camera with new normal vectors, widths, centers, or north vectors.
+        This will recalculate the orientation and width based on any of
+        normal_vector, width, center, and north_vector.
 
         Parameters
         ----------
@@ -282,7 +248,7 @@
             The new looking vector.
         width: float or array of floats, optional
             The new width.  Can be a single value W -> [W,W,W] or an
-            array [W1, W2, W3]
+            array [W1, W2, W3] (left/right, top/bottom, front/back)
         center: array_like, optional
             Specifies the new center.
         north_vector : array_like, optional
@@ -292,16 +258,18 @@
         if width is None:
             width = self.width
         if not iterable(width):
-            width = (width, width, width) # front/back, left/right, top/bottom
+            width = (width, width, width) # left/right, tom/bottom, front/back 
         self.width = width
         if center is not None:
             self.center = center
         if north_vector is None:
-            north_vector = self.north_vector
+            north_vector = self.orienter.north_vector
         if normal_vector is None:
-            normal_vector = self.front_center-self.center
-        self._setup_normalized_vectors(normal_vector, north_vector)
-        
+            normal_vector = self.front_cemter - self.center
+        self.orienter.switch_orientation(normal_vector = normal_vector,
+                                         north_vector = north_vector)
+        self._setup_box_properties(width, center, self.orienter.unit_vectors)
+
     def get_vector_plane(self, image):
         # We should move away from pre-generation of vectors like this and into
         # the usage of on-the-fly generation in the VolumeIntegrator module
@@ -310,8 +278,7 @@
                          self.resolution[0])[:,None]
         py = na.linspace(-self.width[1]/2.0, self.width[1]/2.0,
                          self.resolution[1])[None,:]
-        inv_mat = self.inv_mat
-        bc = self.back_center
+        inv_mat = self.orienter.inv_mat
         positions = na.zeros((self.resolution[0], self.resolution[1], 3),
                           dtype='float64', order='C')
         positions[:,:,0] = inv_mat[0,0]*px+inv_mat[0,1]*py+self.back_center[0]
@@ -320,8 +287,8 @@
         bounds = (px.min(), px.max(), py.min(), py.max())
         vector_plane = VectorPlane(positions, self.box_vectors[2],
                                       self.back_center, bounds, image,
-                                      self.unit_vectors[0],
-                                      self.unit_vectors[1])
+                                      self.orienter.unit_vectors[0],
+                                      self.orienter.unit_vectors[1])
         return vector_plane
 
     def snapshot(self, fn = None, clip_ratio = None, double_check = False):
@@ -393,8 +360,7 @@
 
         """
         self.width = [w / factor for w in self.width]
-        self._setup_normalized_vectors(
-                self.unit_vectors[2], self.unit_vectors[0])
+        self._setup_box_properties(self.width, self.center, self.orienter.unit_vectors)
 
     def zoomin(self, final, n_steps, clip_ratio = None):
         r"""Loop over a zoomin and return snapshots along the way.
@@ -426,8 +392,7 @@
             yield self.snapshot(clip_ratio = clip_ratio)
 
     def move_to(self, final, n_steps, final_width=None, exponential=False, clip_ratio = None):
-        r"""Loop over a look_at
-
+        r"""
         This will yield `n_steps` snapshots until the current view has been
         moved to a final center of `final` with a final width of final_width.
 
@@ -459,23 +424,23 @@
             if final_width is not None:
                 if not iterable(final_width):
                     width = na.array([final_width, final_width, final_width]) 
-                    # front/back, left/right, top/bottom
+                    # left/right, top/bottom, front/back 
                 if (self.center == 0.0).all():
                     self.center += (na.array(final) - self.center) / (10. * n_steps)
                 final_zoom = final_width/na.array(self.width)
                 dW = final_zoom**(1.0/n_steps)
             else:
-                dW = 1.0
+                dW = na.array([1.0,1.0,1.0])
             position_diff = (na.array(final)/self.center)*1.0
             dx = position_diff**(1.0/n_steps)
         else:
             if final_width is not None:
                 if not iterable(final_width):
                     width = na.array([final_width, final_width, final_width]) 
-                    # front/back, left/right, top/bottom
+                    # left/right, top/bottom, front/back
                 dW = (1.0*final_width-na.array(self.width))/n_steps
             else:
-                dW = 1.0
+                dW = na.array([0.0,0.0,0.0])
             dx = (na.array(final)-self.center)*1.0/n_steps
         for i in xrange(n_steps):
             if exponential:
@@ -507,15 +472,7 @@
         if rot_vector is None:
             rot_vector = self.rotation_vector
             
-        ux = rot_vector[0]
-        uy = rot_vector[1]
-        uz = rot_vector[2]
-        cost = na.cos(theta)
-        sint = na.sin(theta)
-        
-        R = na.array([[cost+ux**2*(1-cost), ux*uy*(1-cost)-uz*sint, ux*uz*(1-cost)+uy*sint],
-                      [uy*ux*(1-cost)+uz*sint, cost+uy**2*(1-cost), uy*uz*(1-cost)-ux*sint],
-                      [uz*ux*(1-cost)-uy*sint, uz*uy*(1-cost)+ux*sint, cost+uz**2*(1-cost)]])
+        R = get_rotation_matrix(self, theta, rot_vector)
 
         normal_vector = self.front_center-self.center
 
@@ -564,8 +521,7 @@
                  log_fields = None,
                  sub_samples = 5, pf = None,
                  use_kd=True, l_max=None, no_ghost=True,
-                 tree_type='domain',expand_factor=1.0,
-                 le=None, re=None):
+                 tree_type='domain',le=None, re=None):
         self.frames = []
         Camera.__init__(self, center, normal_vector, width,
                  resolution, transfer_function,
@@ -574,8 +530,7 @@
                  log_fields = log_fields,
                  sub_samples = sub_samples, pf = pf,
                  use_kd=use_kd, l_max=l_max, no_ghost=no_ghost,
-                 tree_type=tree_type,expand_factor=expand_factor,
-                 le=le, re=re)
+                 tree_type=tree_type,le=le, re=re)
 
     def snapshot(self, fn = None, clip_ratio = None):
         import matplotlib
@@ -613,6 +568,26 @@
 data_object_registry["interactive_camera"] = InteractiveCamera
 
 class PerspectiveCamera(Camera):
+    def __init__(self, center, normal_vector, width,
+                 resolution, transfer_function,
+                 north_vector = None, steady_north=False,
+                 volume = None, fields = None,
+                 log_fields = None,
+                 sub_samples = 5, pf = None,
+                 use_kd=True, l_max=None, no_ghost=True,
+                 tree_type='domain', expand_factor = 1.0,
+                 le=None, re=None):
+        self.expand_factor = 1.0
+        Camera.__init__(self, center, normal_vector, width,
+                 resolution, transfer_function,
+                 north_vector = None, steady_north=False,
+                 volume = None, fields = None,
+                 log_fields = None,
+                 sub_samples = 5, pf = None,
+                 use_kd=True, l_max=None, no_ghost=True,
+                 tree_type='domain', le=None, re=None)
+        
+
     def get_vector_plane(self, image):
         # We should move away from pre-generation of vectors like this and into
         # the usage of on-the-fly generation in the VolumeIntegrator module
@@ -624,8 +599,7 @@
                          self.resolution[0])[:,None]
         py = self.expand_factor*na.linspace(-self.width[1]/2.0, self.width[1]/2.0,
                          self.resolution[1])[None,:]
-        inv_mat = self.inv_mat
-        bc = self.back_center
+        inv_mat = self.orienter.inv_mat
         positions = na.zeros((self.resolution[0], self.resolution[1], 3),
                           dtype='float64', order='C')
         positions[:,:,0] = inv_mat[0,0]*px+inv_mat[0,1]*py+self.back_center[0]
@@ -640,8 +614,8 @@
 
         vector_plane = VectorPlane(positions, vectors,
                                       self.back_center, bounds, image,
-                                      self.unit_vectors[0],
-                                      self.unit_vectors[1])
+                                      self.orienter.unit_vectors[0],
+                                      self.orienter.unit_vectors[1])
         return vector_plane
 
 def corners(left_edge, right_edge):
@@ -1065,7 +1039,7 @@
             rot_vector = na.cross(rvec, self.normal_vector)
             rot_vector /= (rot_vector**2).sum()**0.5
             
-            self.rotation_matrix = self.get_rotation_matrix(angle,rot_vector)
+            self.rotation_matrix = get_rotation_matrix(angle,rot_vector)
             self.normal_vector = na.dot(self.rotation_matrix,self.normal_vector)
             self.north_vector = na.dot(self.rotation_matrix,self.north_vector)
             self.east_vector = na.dot(self.rotation_matrix,self.east_vector)
@@ -1182,7 +1156,7 @@
         
         dist = ((self.focal_center - self.center)**2).sum()**0.5
         
-        R = self.get_rotation_matrix(theta, rot_vector)
+        R = get_rotation_matrix(theta, rot_vector)
 
         self.vp = rotate_vectors(self.vp, R)
         self.normal_vector = na.dot(R,self.normal_vector)
@@ -1256,19 +1230,6 @@
                 self.center += dx
             yield self.snapshot()
 
-    def get_rotation_matrix(self, theta, rot_vector):
-        ux = rot_vector[0]
-        uy = rot_vector[1]
-        uz = rot_vector[2]
-        cost = na.cos(theta)
-        sint = na.sin(theta)
-        
-        R = na.array([[cost+ux**2*(1-cost), ux*uy*(1-cost)-uz*sint, ux*uz*(1-cost)+uy*sint],
-                      [uy*ux*(1-cost)+uz*sint, cost+uy**2*(1-cost), uy*uz*(1-cost)-ux*sint],
-                      [uz*ux*(1-cost)-uy*sint, uz*uy*(1-cost)+ux*sint, cost+uz**2*(1-cost)]])
-
-        return R
-
 def off_axis_projection(pf, center, normal_vector, width, resolution,
                         field, weight = None, volume = None, no_ghost = True,
                         north_vector = None):
@@ -1291,7 +1252,7 @@
         The vector between the camera position and the center.
     width : float or list of floats
         The current width of the image.  If a single float, the volume is
-        cubical, but if not, it is front/back, left/right, top/bottom.
+        cubical, but if not, it is left/right, top/bottom, front/back
     resolution : int or list of ints
         The number of pixels in each direction.
     field : string
@@ -1330,8 +1291,13 @@
     fields = [field]
     if weight is not None:
         # This is a temporary field, which we will remove at the end.
+        def _wf(f1, w1):
+            def WeightField(field, data):
+                return data[f1].astype("float64") * \
+                       data[w1].astype("float64")
+            return WeightField
         pf.field_info.add_field("temp_weightfield",
-            function=lambda a,b:b[field]*b[weight])
+                    function=_wf(field, weight))
         fields = ["temp_weightfield", weight]
         tf = ProjectionTransferFunction(n_fields = 2)
     tf = ProjectionTransferFunction(n_fields = len(fields))
@@ -1342,10 +1308,17 @@
                       north_vector = north_vector)
     vals = cam.snapshot()
     image = vals[:,:,0]
+    if iterable(width):
+        depth = width[2]
+    else:
+        depth = width
     if weight is None:
-        dl = width * pf.units[pf.field_info[field].projection_conversion]
+        dl = depth * pf.units[pf.field_info[field].projection_conversion]
         image *= dl
     else:
         image /= vals[:,:,1]
         pf.field_info.pop("temp_weightfield")
+        for g in pf.h.grids:
+            if "temp_weightfield" in g.keys():
+                del g["temp_weightfield"]
     return image


diff -r 2bbd3f126279501d3aa2d6acd308e80006078ca3 -r 65db3370fd9b8f113d595fb295ad3182952f04ca yt/visualization/volume_rendering/grid_partitioner.py
--- a/yt/visualization/volume_rendering/grid_partitioner.py
+++ b/yt/visualization/volume_rendering/grid_partitioner.py
@@ -41,7 +41,8 @@
 class HomogenizedVolume(ParallelAnalysisInterface):
     bricks = None
     def __init__(self, fields = "Density", source = None, pf = None,
-                 log_fields = None, no_ghost = False):
+                 log_fields = None, no_ghost = False,
+                 max_level = 48):
         # Typically, initialized as hanging off a hierarchy.  But, not always.
         ParallelAnalysisInterface.__init__(self)
         self.no_ghost = no_ghost
@@ -54,6 +55,7 @@
         else:
             log_fields = [self.pf.field_info[field].take_log
                          for field in self.fields]
+        self.max_level = max_level
         self.log_fields = log_fields
 
     def traverse(self, back_point, front_point, image):
@@ -84,8 +86,13 @@
         PP = ProtoPrism(grid.id, grid.LeftEdge, grid.RightEdge, GF)
 
         pgs = []
+        cm = grid.child_mask.copy()
+        if grid.Level > self.max_level:
+            return pgs
+        elif grid.Level == self.max_level:
+            cm[:] = 1
         for P in PP.sweep(0):
-            sl = P.get_brick(grid.LeftEdge, grid.dds, grid.child_mask)
+            sl = P.get_brick(grid.LeftEdge, grid.dds, cm)
             if len(sl) == 0: continue
             dd = [d[sl[0][0]:sl[0][1]+1,
                     sl[1][0]:sl[1][1]+1,



https://bitbucket.org/yt_analysis/yt/changeset/8394fd8f42d0/
changeset:   8394fd8f42d0
branch:      yt
user:        gsiisg
date:        2012-06-22 23:35:48
summary:     moved RX,Y,Z to math_utils.py
affected #:  4 files

diff -r 65db3370fd9b8f113d595fb295ad3182952f04ca -r 8394fd8f42d0125e583b50cf0ba344b4b48e35a1 yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -58,22 +58,14 @@
     ParallelAnalysisInterface, \
     parallel_blocking_call
 
+from yt.utilities.math_utils import RX
+from yt.utilities.math_utils import RY
+from yt.utilities.math_utils import RZ
+
 TINY = 1.e-40
 
 # Ellipsoid funtions.
-# define the rotation matrix needed later
-def RX(ax):
-    rot_matrix = na.array([[1, 0, 0], [0, na.cos(ax), na.sin(ax)],
-        [0, -na.sin(ax), na.cos(ax)]])
-    return rot_matrix
-def RY(ay):
-    rot_matrix = na.array([[na.cos(ay), 0, -na.sin(ay)], [0, 1, 0],
-        [na.sin(ay), 0, na.cos(ay)]])
-    return rot_matrix
-def RZ(az):
-    rot_matrix = na.array([[na.cos(az), na.sin(az), 0],
-        [-na.sin(az), na.cos(az), 0], [0, 0, 1]])
-    return rot_matrix
+# Rotation Matrixes should already be imported at top
 
 class Halo(object):
     """
@@ -463,8 +455,7 @@
         # that is almost certainly never to occur,
         # will deal with it later if it ever comes up
         if na.size(self["particle_position_x"]) < 4:
-            print "not enough particles to form ellipsoid returning zeros"
-            return (0, 0, 0, 0, 0, 0, 0)
+            raise YTNotEnoughParticles(na.size(self["particle_position_x"]))
         # Calculate the parameters that describe the ellipsoid of
         # the particles that constitute the halo. This function returns
         # all the parameters except for the center of mass.


diff -r 65db3370fd9b8f113d595fb295ad3182952f04ca -r 8394fd8f42d0125e583b50cf0ba344b4b48e35a1 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -58,6 +58,9 @@
 from yt.utilities.minimal_representation import \
     MinimalProjectionData, MinimalSliceData
 from yt.utilities.orientation import Orientation
+from yt.utilities.math_utils import RX
+from yt.utilities.math_utils import RY
+from yt.utilities.math_utils import RZ
 
 from .derived_quantities import DerivedQuantityCollection
 from .field_info_container import \
@@ -3422,23 +3425,7 @@
         self._e0 = e0
         self._tilt = tilt
         
-        # define the rotation matrix needed later
-        def RX(ax):
-            rot_matrix = na.array([[1, 0, 0], \
-                                   [0, na.cos(ax), na.sin(ax)], \
-                                   [0,-na.sin(ax), na.cos(ax)]])
-            return rot_matrix
-        def RY(ay):
-            rot_matrix = na.array([[na.cos(ay), 0,-na.sin(ay)], \
-                                   [0, 1, 0], \
-                                   [na.sin(ay), 0, na.cos(ay)]])
-            return rot_matrix
-        def RZ(az):
-            rot_matrix = na.array([[na.cos(az), na.sin(az), 0], \
-                                   [-na.sin(az), na.cos(az), 0], \
-                                   [0, 0, 1]])
-            return rot_matrix
-
+        # Should have rotation matrix RX,RY,RZ imported at top
         # find the t1 angle needed to rotate about z axis to align e0 to x
         t1 = na.arctan(e0[1] / e0[0])
         # rotate e0 by -t1


diff -r 65db3370fd9b8f113d595fb295ad3182952f04ca -r 8394fd8f42d0125e583b50cf0ba344b4b48e35a1 yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -67,3 +67,11 @@
         if self.obj_type == "slice":
             s += "  It may lie on a grid face.  Try offsetting slightly."
         return s
+
+class YTNotEnoughParticles(YTException):
+    def __init__(self, nparticles):
+        self.nparticles = nparticles
+        
+    def __str__(self):
+        return "There are %i particles, not enough for ellipsoid" % \
+            (self.nparticles)


diff -r 65db3370fd9b8f113d595fb295ad3182952f04ca -r 8394fd8f42d0125e583b50cf0ba344b4b48e35a1 yt/utilities/math_utils.py
--- a/yt/utilities/math_utils.py
+++ b/yt/utilities/math_utils.py
@@ -5,6 +5,8 @@
 Affiliation: UCSD Physics/CASS
 Author: Stephen Skory <s at skory.us>
 Affiliation: UCSD Physics/CASS
+Author: Geoffrey So <gsiisg at gmail.com>
+Affiliation: UCSD Physics/CASS
 Homepage: http://yt-project.org/
 License:
   Copyright (C) 2008-2011 Matthew Turk.  All Rights Reserved.
@@ -632,3 +634,61 @@
                   [uz*ux*(1-cost)-uy*sint, uz*uy*(1-cost)+ux*sint, cost+uz**2*(1-cost)]])
     
     return R
+
+def RX(ax):
+    """
+    Returns
+    -------
+    Gives the rotation matrix about the x-axis as an array
+
+    Example
+    -------
+    >>>>from yt.mods import *
+    >>>>from yt.utilities.math_utils import RX
+    >>>>RX(na.pi)
+    >>>>array([[  1.00000000e+00,   0.00000000e+00,   0.00000000e+00],
+               [  0.00000000e+00,  -1.00000000e+00,   1.22464680e-16],
+               [  0.00000000e+00,  -1.22464680e-16,  -1.00000000e+00]])
+    """
+    rot_matrix = na.array([[1, 0, 0], \
+                           [0, na.cos(ax), na.sin(ax)], \
+                           [0,-na.sin(ax), na.cos(ax)]])
+    return rot_matrix
+def RY(ay):
+    """
+    Returns
+    -------
+    Gives the rotation matrix about the y-axis as an array
+
+    Example
+    -------
+    >>>>from yt.mods import *
+    >>>>from yt.utilities.math_utils import RY
+    >>>>RY(na.pi)
+    >>>>array([[ -1.00000000e+00,   0.00000000e+00,  -1.22464680e-16],
+               [  0.00000000e+00,   1.00000000e+00,   0.00000000e+00],
+               [  1.22464680e-16,   0.00000000e+00,  -1.00000000e+00]])
+    """
+    rot_matrix = na.array([[na.cos(ay), 0,-na.sin(ay)], \
+                           [0, 1, 0], \
+                           [na.sin(ay), 0, na.cos(ay)]])
+    return rot_matrix
+def RZ(az):
+    """
+    Returns
+    -------
+    Gives the rotation matrix about the z-axis as an array
+
+    Example
+    -------
+    >>>>from yt.mods import *
+    >>>>from yt.utilities.math_utils import RZ
+    >>>>RZ(na.pi)
+    >>>>array([[ -1.00000000e+00,   1.22464680e-16,   0.00000000e+00],
+               [ -1.22464680e-16,  -1.00000000e+00,   0.00000000e+00],
+               [  0.00000000e+00,   0.00000000e+00,   1.00000000e+00]])
+    """
+    rot_matrix = na.array([[na.cos(az), na.sin(az), 0], \
+                           [-na.sin(az), na.cos(az), 0], \
+                           [0, 0, 1]])
+    return rot_matrix



https://bitbucket.org/yt_analysis/yt/changeset/4cdfc5f75d9a/
changeset:   4cdfc5f75d9a
branch:      yt
user:        gsiisg
date:        2012-06-22 23:51:13
summary:     changed exception of too few particles for ellipsoid to mylog.warning instead
affected #:  2 files

diff -r 8394fd8f42d0125e583b50cf0ba344b4b48e35a1 -r 4cdfc5f75d9a1278964f7391727114abefdbfcfe yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -455,7 +455,8 @@
         # that is almost certainly never to occur,
         # will deal with it later if it ever comes up
         if na.size(self["particle_position_x"]) < 4:
-            raise YTNotEnoughParticles(na.size(self["particle_position_x"]))
+            mylog.warning("Too few particles for ellipsoid parameters.")
+            return (0, 0, 0, 0, 0, 0, 0)
         # Calculate the parameters that describe the ellipsoid of
         # the particles that constitute the halo. This function returns
         # all the parameters except for the center of mass.


diff -r 8394fd8f42d0125e583b50cf0ba344b4b48e35a1 -r 4cdfc5f75d9a1278964f7391727114abefdbfcfe yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -67,11 +67,3 @@
         if self.obj_type == "slice":
             s += "  It may lie on a grid face.  Try offsetting slightly."
         return s
-
-class YTNotEnoughParticles(YTException):
-    def __init__(self, nparticles):
-        self.nparticles = nparticles
-        
-    def __str__(self):
-        return "There are %i particles, not enough for ellipsoid" % \
-            (self.nparticles)



https://bitbucket.org/yt_analysis/yt/changeset/552bf6a9b55f/
changeset:   552bf6a9b55f
branch:      yt
user:        gsiisg
date:        2012-06-23 00:01:03
summary:     merged
affected #:  124 files
Diff too large to display.

https://bitbucket.org/yt_analysis/yt/changeset/d4b17fba139c/
changeset:   d4b17fba139c
branch:      yt
user:        gsiisg
date:        2012-06-23 00:20:03
summary:     redefined RX,Y,Z using Nathan's get_rotation_matrix, mine apparently uses a opposite sign from his, so the input angles have a negative sign
affected #:  1 file

diff -r 552bf6a9b55fcaf455712c221f0d4f3cfc18bfd0 -r d4b17fba139ce9815177b1838e11c03ee9ee8265 yt/utilities/math_utils.py
--- a/yt/utilities/math_utils.py
+++ b/yt/utilities/math_utils.py
@@ -650,9 +650,7 @@
                [  0.00000000e+00,  -1.00000000e+00,   1.22464680e-16],
                [  0.00000000e+00,  -1.22464680e-16,  -1.00000000e+00]])
     """
-    rot_matrix = na.array([[1, 0, 0], \
-                           [0, na.cos(ax), na.sin(ax)], \
-                           [0,-na.sin(ax), na.cos(ax)]])
+    rot_matrix = get_rotation_matrix(-ax,(1,0,0))
     return rot_matrix
 def RY(ay):
     """
@@ -669,9 +667,7 @@
                [  0.00000000e+00,   1.00000000e+00,   0.00000000e+00],
                [  1.22464680e-16,   0.00000000e+00,  -1.00000000e+00]])
     """
-    rot_matrix = na.array([[na.cos(ay), 0,-na.sin(ay)], \
-                           [0, 1, 0], \
-                           [na.sin(ay), 0, na.cos(ay)]])
+    rot_matrix = get_rotation_matrix(-ay,(0,1,0))
     return rot_matrix
 def RZ(az):
     """
@@ -688,7 +684,5 @@
                [ -1.22464680e-16,  -1.00000000e+00,   0.00000000e+00],
                [  0.00000000e+00,   0.00000000e+00,   1.00000000e+00]])
     """
-    rot_matrix = na.array([[na.cos(az), na.sin(az), 0], \
-                           [-na.sin(az), na.cos(az), 0], \
-                           [0, 0, 1]])
+    rot_matrix = get_rotation_matrix(-az,(0,0,1))
     return rot_matrix



https://bitbucket.org/yt_analysis/yt/changeset/5052c5ca60c3/
changeset:   5052c5ca60c3
branch:      yt
user:        MatthewTurk
date:        2012-06-23 14:29:25
summary:     Merged in gsiisg/yt (pull request #173)
affected #:  4 files

diff -r 01d7918185b68265f5ad41bbe332ae3aa71e7205 -r 5052c5ca60c3aa8f309af54b2a4cf490a4391698 yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -5,6 +5,8 @@
 Affiliation: KIPAC/SLAC/Stanford
 Author: Stephen Skory <s at skory.us>
 Affiliation: UCSD Physics/CASS
+Author: Geoffrey So <gsiisg at gmail.com> (Ellipsoidal functions)
+Affiliation: UCSD Physics/CASS
 Homepage: http://yt-project.org/
 License:
   Copyright (C) 2008-2011 Matthew Turk.  All Rights Reserved.
@@ -56,8 +58,14 @@
     ParallelAnalysisInterface, \
     parallel_blocking_call
 
+from yt.utilities.math_utils import RX
+from yt.utilities.math_utils import RY
+from yt.utilities.math_utils import RZ
+
 TINY = 1.e-40
 
+# Ellipsoid funtions.
+# Rotation Matrixes should already be imported at top
 
 class Halo(object):
     """
@@ -79,6 +87,8 @@
         self.id = id
         self.data = halo_list._data_source
         self.pf = self.data.pf
+        self.gridsize = (self.pf.domain_right_edge - \
+                 self.pf.domain_left_edge)
         if indices is not None:
             self.indices = halo_list._base_indices[indices]
         else:
@@ -435,8 +445,96 @@
             self.mass_bins[i + 1] += self.mass_bins[i]
         # Calculate the over densities in the bins.
         self.overdensity = self.mass_bins * Msun2g / \
-        (4. / 3. * math.pi * rho_crit * \
-        (self.radial_bins * cm) ** 3.0)
+        (4./3. * math.pi * rho_crit * \
+        (self.radial_bins * cm)**3.0)
+        
+    def _get_ellipsoid_parameters_basic(self):
+        na.seterr(all='ignore')
+        # check if there are 4 particles to form an ellipsoid
+        # neglecting to check if the 4 particles in the same plane,
+        # that is almost certainly never to occur,
+        # will deal with it later if it ever comes up
+        if na.size(self["particle_position_x"]) < 4:
+            mylog.warning("Too few particles for ellipsoid parameters.")
+            return (0, 0, 0, 0, 0, 0, 0)
+        # Calculate the parameters that describe the ellipsoid of
+        # the particles that constitute the halo. This function returns
+        # all the parameters except for the center of mass.
+        com = self.center_of_mass()
+        position = [self["particle_position_x"],
+		    self["particle_position_y"],
+		    self["particle_position_z"]]
+        # Locate the furthest particle from com, its vector length and index
+	DW = na.array([self.gridsize[0],self.gridsize[1],self.gridsize[2]])
+	position = [position[0] - com[0],
+		    position[1] - com[1],
+		    position[2] - com[2]]
+	# different cases of particles being on other side of boundary
+	for axis in range(na.size(DW)):
+	    cases = na.array([position[axis],
+	  		      position[axis] + DW[axis],
+			      position[axis] - DW[axis]])        
+            # pick out the smallest absolute distance from com
+            position[axis] = na.choose(na.abs(cases).argmin(axis=0), cases)
+	# find the furthest particle's index
+	r = na.sqrt(position[0]**2 +
+		    position[1]**2 +
+		    position[2]**2)
+        A_index = r.argmax()
+        mag_A = r.max()
+        # designate the A vector
+	A_vector = (position[0][A_index],
+		    position[1][A_index],
+		    position[2][A_index])
+        # designate the e0 unit vector
+        e0_vector = A_vector / mag_A
+        # locate the tB particle position by finding the max B
+	e0_vector_copy = na.empty((na.size(position[0]), 3), dtype='float64')
+        for i in range(3):
+            e0_vector_copy[:, i] = e0_vector[i]
+        rr = na.array([position[0],
+		       position[1],
+		       position[2]]).T # Similar to tB_vector in old code.
+        tC_vector = na.cross(e0_vector_copy, rr)
+        te2 = tC_vector.copy()
+        for dim in range(3):
+            te2[:,dim] *= na.sum(tC_vector**2., axis = 1)**(-0.5)
+        te1 = na.cross(te2, e0_vector_copy)
+        length = na.abs(-na.sum(rr * te1, axis = 1) * \
+            (1. - na.sum(rr * e0_vector_copy, axis = 1)**2. * \
+            mag_A**-2.)**(-0.5))
+        # This problem apparently happens sometimes, that the NaNs are turned
+        # into infs, which messes up the nanargmax below.
+        length[length == na.inf] = 0.
+        tB_index = na.nanargmax(length) # ignores NaNs created above.
+        mag_B = length[tB_index]
+        e1_vector = te1[tB_index]
+        e2_vector = te2[tB_index]
+        temp_e0 = rr.copy()
+        temp_e1 = rr.copy()
+        temp_e2 = rr.copy()
+        for dim in range(3):
+            temp_e0[:,dim] = e0_vector[dim]
+            temp_e1[:,dim] = e1_vector[dim]
+            temp_e2[:,dim] = e2_vector[dim]
+        length = na.abs(na.sum(rr * temp_e2, axis = 1) * (1 - \
+            na.sum(rr * temp_e0, axis = 1)**2. * mag_A**-2. - \
+            na.sum(rr * temp_e1, axis = 1)**2. * mag_B**-2)**(-0.5))
+        length[length == na.inf] = 0.
+        tC_index = na.nanargmax(length)
+        mag_C = length[tC_index]
+        # tilt is calculated from the rotation about x axis
+        # needed to align e1 vector with the y axis
+        # after e0 is aligned with x axis
+        # find the t1 angle needed to rotate about z axis to align e0 to x
+        t1 = na.arctan(e0_vector[1] / e0_vector[0])
+        r1 = (e0_vector * RZ(t1).transpose()).sum(axis = 1)
+        # find the t2 angle needed to rotate about y axis to align e0 to x
+        t2 = na.arctan(-r1[2] / r1[0])
+        r2 = na.dot(RY(t2), na.dot(RZ(t1), e1_vector))
+        tilt = na.arctan(r2[2]/r2[1])
+        return (mag_A, mag_B, mag_C, e0_vector[0], e0_vector[1],
+            e0_vector[2], tilt)
 
 class RockstarHalo(Halo):
     def __init__(self,halo_list,index,ID, DescID, Mvir, Vmax, Vrms, Rvir, Rs, Np, 
@@ -524,6 +622,61 @@
         return None
 
 
+    def get_ellipsoid_parameters(self):
+        r"""Calculate the parameters that describe the ellipsoid of
+        the particles that constitute the halo.
+        
+        Parameters
+        ----------
+        None
+        
+        Returns
+        -------
+        tuple : (cm, mag_A, mag_B, mag_C, e0_vector, tilt)
+            The 6-tuple has in order:
+              #. The center of mass as an array.
+              #. mag_A as a float.
+              #. mag_B as a float.
+              #. mag_C as a float.
+              #. e0_vector as an array.
+              #. tilt as a float.
+        
+        Examples
+        --------
+        >>> params = halos[0].get_ellipsoid_parameters()
+        """
+        basic_parameters = self._get_ellipsoid_parameters_basic()
+        toreturn = [self.center_of_mass()]
+        updated = [basic_parameters[0], basic_parameters[1],
+            basic_parameters[2], na.array([basic_parameters[3],
+            basic_parameters[4], basic_parameters[5]]), basic_parameters[6]]
+        toreturn.extend(updated)
+        return tuple(toreturn)
+    
+    def get_ellipsoid(self):
+        r"""Returns an ellipsoidal data object.
+        
+        This will generate a new, empty ellipsoidal data object for this
+        halo.
+        
+        Parameters
+        ----------
+        None.
+        
+        Returns
+        -------
+        ellipsoid : `yt.data_objects.api.AMREllipsoidBase`
+            The ellipsoidal data object.
+        
+        Examples
+        --------
+        >>> ell = halos[0].get_ellipsoid()
+        """
+        ep = self.get_ellipsoid_parameters()
+        ell = self.data.hierarchy.ellipsoid(ep[0], ep[1], ep[2], ep[3],
+            ep[4], ep[5])
+        return ell
+    
 class HOPHalo(Halo):
     _name = "HOPHalo"
     pass
@@ -600,6 +753,39 @@
         (4. / 3. * math.pi * rho_crit * \
         (self.radial_bins * self.data.pf["cm"]) ** 3.0)
 
+    def _get_ellipsoid_parameters_basic(self):
+        mylog.error("Ellipsoid calculation does not work for parallelHF halos." + \
+        " Please save the halos using .dump(), and reload them using" + \
+        " LoadHaloes() to use this function.")
+        return None
+
+    def get_ellipsoid_parameters(self):
+        r"""Calculate the parameters that describe the ellipsoid of
+        the particles that constitute the halo.
+        
+        Parameters
+        ----------
+        None
+        
+        Returns
+        -------
+        tuple : (cm, mag_A, mag_B, mag_C, e1_vector, tilt)
+            The 6-tuple has in order:
+              #. The center of mass as an array.
+              #. mag_A as a float.
+              #. mag_B as a float.
+              #. mag_C as a float.
+              #. e1_vector as an array.
+              #. tilt as a float.
+        
+        Examples
+        --------
+        >>> params = halos[0].get_ellipsoid_parameters()
+        """
+        mylog.error("get_ellipsoid_parameters does not work for parallelHF halos." + \
+        " Please save the halos using .dump(), and reload them using" + \
+        " LoadHaloes() to use this function.")
+        return None
 
 class FOFHalo(Halo):
 
@@ -614,10 +800,14 @@
 
 class LoadedHalo(Halo):
     def __init__(self, pf, id, size=None, CoM=None,
-        max_dens_point=None, group_total_mass=None,
-        max_radius=None, bulk_vel=None,
-        rms_vel=None, fnames=None):
+
+        max_dens_point=None, group_total_mass=None, max_radius=None, bulk_vel=None,
+        rms_vel=None, fnames=None, mag_A=None, mag_B=None, mag_C=None,
+        e1_vec=None, tilt=None):
+
         self.pf = pf
+        self.gridsize = (self.pf.domain_right_edge - \
+            self.pf.domain_left_edge)
         self.id = id
         self.size = size
         self.CoM = CoM
@@ -626,6 +816,11 @@
         self.max_radius = max_radius
         self.bulk_vel = bulk_vel
         self.rms_vel = rms_vel
+        self.mag_A = mag_A
+        self.mag_B = mag_B
+        self.mag_C = mag_C
+        self.e1_vec = e1_vec
+        self.tilt = tilt
         # locs=the names of the h5 files that have particle data for this halo
         self.fnames = fnames
         self.bin_count = None
@@ -706,6 +901,68 @@
             del f
         return field_data
 
+    def _get_ellipsoid_parameters_basic_loadedhalo(self):
+        if self.mag_A is not None:
+            return (self.mag_A, self.mag_B, self.mag_C, self.e1_vec[0],
+                self.e1_vec[1], self.e1_vec[2], self.tilt)
+        else:
+            return self._get_ellipsoid_parameters_basic()
+
+    def get_ellipsoid_parameters(self):
+        r"""Calculate the parameters that describe the ellipsoid of
+        the particles that constitute the halo.
+
+        Parameters
+        ----------
+        None
+
+        Returns
+        -------
+        tuple : (cm, mag_A, mag_B, mag_C, e1_vector, tilt)
+            The 6-tuple has in order:
+              #. The center of mass as an array.
+              #. mag_A as a float.
+              #. mag_B as a float.
+              #. mag_C as a float.
+              #. e1_vector as an array.
+              #. tilt as a float.
+
+        Examples
+        --------
+        >>> params = halos[0].get_ellipsoid_parameters()
+	"""
+
+        basic_parameters = self._get_ellipsoid_parameters_basic_loadedhalo()
+        toreturn = [self.center_of_mass()]
+        updated = [basic_parameters[0], basic_parameters[1],
+            basic_parameters[2], na.array([basic_parameters[3],
+            basic_parameters[4], basic_parameters[5]]), basic_parameters[6]]
+        toreturn.extend(updated)
+        return tuple(toreturn)
+    
+    def get_ellipsoid(self):
+        r"""Returns an ellipsoidal data object.        
+        This will generate a new, empty ellipsoidal data object for this
+        halo.
+        
+        Parameters
+        ----------
+        None.
+        
+        Returns
+        -------
+        ellipsoid : `yt.data_objects.api.AMREllipsoidBase`
+            The ellipsoidal data object.
+        
+        Examples
+        --------
+        >>> ell = halos[0].get_ellipsoid()
+        """
+        ep = self.get_ellipsoid_parameters()
+        ell = self.pf.hierarchy.ellipsoid(ep[0], ep[1], ep[2], ep[3],
+            ep[4], ep[5])
+        return ell
+
     def get_sphere(self):
         r"""Returns a sphere source.
 
@@ -917,7 +1174,7 @@
             n_points.append([math.sqrt(n[0]), n[1].haloID])
         return n_points
 
-    def write_out(self, filename):
+    def write_out(self, filename, ellipsoid_data=False):
         r"""Write out standard halo information to a text file.
 
         Parameters
@@ -925,6 +1182,10 @@
         filename : String
             The name of the file to write to.
 
+        ellipsoid_data : bool.
+            Whether to print the ellipsoidal information to the file.
+            Default = False.
+        
         Examples
         --------
         >>> halos.write_out("HopAnalysis.out")
@@ -934,10 +1195,20 @@
         else:
             f = open(filename, "w")
         f.write("# HALOS FOUND WITH %s\n" % (self._name))
-        f.write("\t".join(["# Group", "Mass", "# part", "max dens"
-                           "x", "y", "z", "center-of-mass",
-                           "x", "y", "z",
-                           "vx", "vy", "vz", "max_r", "rms_v", "\n"]))
+
+        if not ellipsoid_data:
+            f.write("\t".join(["# Group","Mass","# part","max dens"
+                               "x","y","z", "center-of-mass",
+                               "x","y","z",
+                               "vx","vy","vz","max_r","rms_v","\n"]))
+        else:
+            f.write("\t".join(["# Group","Mass","# part","max dens"
+                               "x","y","z", "center-of-mass",
+                               "x","y","z",
+                               "vx","vy","vz","max_r","rms_v",
+                               "mag_A", "mag_B", "mag_C", "e1_vec0",
+                               "e1_vec1", "e1_vec2", "tilt", "\n"]))
+
         for group in self:
             f.write("%10i\t" % group.id)
             f.write("%0.9e\t" % group.total_mass())
@@ -952,6 +1223,8 @@
             f.write("\t")
             f.write("%0.9e\t" % group.maximum_radius())
             f.write("%0.9e\t" % group.rms_velocity())
+            if ellipsoid_data:
+                f.write("\t".join(["%0.9e" % v for v in group._get_ellipsoid_parameters_basic()]))
             f.write("\n")
             f.flush()
         f.close()
@@ -1107,7 +1380,7 @@
         self.particle_fields["densities"] = self.densities
         self.particle_fields["tags"] = self.tags
 
-    def write_out(self, filename="HopAnalysis.out"):
+    def write_out(self, filename="HopAnalysis.out", ellipsoid_data=False):
         r"""Write out standard halo information to a text file.
 
         Parameters
@@ -1115,11 +1388,15 @@
         filename : String
             The name of the file to write to. Default = "HopAnalysis.out".
 
+        ellipsoid_data : bool.
+            Whether to print the ellipsoidal information to the file.
+            Default = False.
+
         Examples
         --------
         >>> halos.write_out("HopAnalysis.out")
         """
-        HaloList.write_out(self, filename)
+        HaloList.write_out(self, filename, ellipsoid_data)
 
 
 class FOFHaloList(HaloList):
@@ -1141,7 +1418,7 @@
         self.particle_fields["densities"] = self.densities
         self.particle_fields["tags"] = self.tags
 
-    def write_out(self, filename="FOFAnalysis.out"):
+    def write_out(self, filename="FOFAnalysis.out", ellipsoid_data=False):
         r"""Write out standard halo information to a text file.
 
         Parameters
@@ -1149,11 +1426,15 @@
         filename : String
             The name of the file to write to. Default = "FOFAnalysis.out".
 
+        ellipsoid_data : bool.
+            Whether to print the ellipsoidal information to the file.
+            Default = False.
+
         Examples
         --------
         >>> halos.write_out("FOFAnalysis.out")
         """
-        HaloList.write_out(self, filename)
+        HaloList.write_out(self, filename, ellipsoid_data)
 
 
 class LoadedHaloList(HaloList):
@@ -1173,6 +1454,7 @@
         locations = self._collect_halo_data_locations()
         halo = 0
         for line in lines:
+            orig = line
             # Skip the comment lines at top.
             if line[0] == "#": continue
             line = line.split()
@@ -1188,9 +1470,32 @@
             bulk_vel = na.array([float(line[10]), float(line[11]),
                 float(line[12])])
             rms_vel = float(line[14])
-            self._groups.append(LoadedHalo(self.pf, halo, size, CoM,
-                max_dens_point, group_total_mass, max_radius, bulk_vel,
-                rms_vel, fnames))
+            if len(line) == 15:
+                # No ellipsoid information
+                self._groups.append(LoadedHalo(self.pf, halo, size = size,
+                    CoM = CoM,
+                    max_dens_point = max_dens_point,
+                    group_total_mass = group_total_mass, max_radius = max_radius,
+                    bulk_vel = bulk_vel, rms_vel = rms_vel, fnames = fnames))
+            elif len(line) == 22:
+                # Ellipsoid information
+                mag_A = float(line[15])
+                mag_B = float(line[16])
+                mag_C = float(line[17])
+                e1_vec0 = float(line[18])
+                e1_vec1 = float(line[19])
+                e1_vec2 = float(line[20])
+                e1_vec = na.array([e1_vec0, e1_vec1, e1_vec2])
+                tilt = float(line[21])
+                self._groups.append(LoadedHalo(self.pf, halo, size = size,
+                    CoM = CoM,
+                    max_dens_point = max_dens_point,
+                    group_total_mass = group_total_mass, max_radius = max_radius,
+                    bulk_vel = bulk_vel, rms_vel = rms_vel, fnames = fnames,
+                    mag_A = mag_A, mag_B = mag_B, mag_C = mag_C, e1_vec = e1_vec,
+                    tilt = tilt))
+            else:
+                mylog.error("I am unable to parse this line. Too many or too few items. %s" % orig)
             halo += 1
 
     def _collect_halo_data_locations(self):
@@ -1443,7 +1748,7 @@
     def __len__(self):
         return self.group_count
 
-    def write_out(self, filename="parallelHopAnalysis.out"):
+    def write_out(self, filename="parallelHopAnalysis.out", ellipsoid_data=False):
         r"""Write out standard halo information to a text file.
 
         Parameters
@@ -1456,7 +1761,7 @@
         --------
         >>> halos.write_out("parallelHopAnalysis.out")
         """
-        HaloList.write_out(self, filename)
+        HaloList.write_out(self, filename, ellipsoid_data)
 
 
 class GenericHaloFinder(HaloList, ParallelAnalysisInterface):
@@ -1551,7 +1856,7 @@
             arr[arr < LE[i] - self.padding] += dw[i]
             arr[arr > RE[i] + self.padding] -= dw[i]
 
-    def write_out(self, filename):
+    def write_out(self, filename, ellipsoid_data=False):
         r"""Write out standard halo information to a text file.
 
         Parameters
@@ -1559,12 +1864,16 @@
         filename : String
             The name of the file to write to.
 
+        ellipsoid_data : bool.
+            Whether to print the ellipsoidal information to the file.
+            Default = False.
+
         Examples
         --------
         >>> halos.write_out("HopAnalysis.out")
         """
         f = self.comm.write_on_root(filename)
-        HaloList.write_out(self, f)
+        HaloList.write_out(self, f, ellipsoid_data)
 
     def write_particle_lists_txt(self, prefix):
         r"""Write out the names of the HDF5 files containing halo particle data
@@ -1612,7 +1921,7 @@
             halo.write_particle_list(f)
         f.close()
 
-    def dump(self, basename="HopAnalysis"):
+    def dump(self, basename="HopAnalysis", ellipsoid_data=False):
         r"""Save the full halo data to disk.
 
         This function will save the halo data in such a manner that it can be
@@ -1630,11 +1939,15 @@
             The base name for the files the data will be written to. Default =
             "HopAnalysis".
 
+        ellipsoid_data : bool.
+            Whether to save the ellipsoidal information to the files.
+            Default = False.
+        
         Examples
         --------
         >>> halos.dump("MyHalos")
         """
-        self.write_out("%s.out" % basename)
+        self.write_out("%s.out" % basename, ellipsoid_data)
         self.write_particle_lists(basename)
         self.write_particle_lists_txt(basename)
 


diff -r 01d7918185b68265f5ad41bbe332ae3aa71e7205 -r 5052c5ca60c3aa8f309af54b2a4cf490a4391698 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -5,6 +5,8 @@
 Affiliation: KIPAC/SLAC/Stanford
 Author: Britton Smith <Britton.Smith at colorado.edu>
 Affiliation: University of Colorado at Boulder
+Author: Geoffrey So <gsiisg at gmail.com> (AMREllipsoidBase)
+Affiliation: UCSD Physics/CASS
 Homepage: http://yt-project.org/
 License:
   Copyright (C) 2007-2011 Matthew Turk.  All Rights Reserved.
@@ -56,6 +58,9 @@
 from yt.utilities.minimal_representation import \
     MinimalProjectionData, MinimalSliceData
 from yt.utilities.orientation import Orientation
+from yt.utilities.math_utils import RX
+from yt.utilities.math_utils import RY
+from yt.utilities.math_utils import RZ
 
 from .derived_quantities import DerivedQuantityCollection
 from .field_info_container import \
@@ -3397,6 +3402,138 @@
             self._cut_masks[grid.id] = cm
         return cm
 
+class AMREllipsoidBase(AMR3DData):
+    """
+    We can define an ellipsoid to act as a data object.
+    """
+    _type_name = "ellipsoid"
+    _con_args = ('center', '_A', '_B', '_C', '_e0', '_tilt')
+    def __init__(self, center, A, B, C, e0, tilt, fields=None,
+                 pf=None, **kwargs):
+        """
+        By providing a *center*,*A*,*B*,*C*,*e0*,*tilt* we
+        can define a ellipsoid of any proportion.  Only cells whose centers are
+        within the ellipsoid will be selected.
+        """
+        AMR3DData.__init__(self, na.array(center), fields, pf, **kwargs)
+        # make sure the smallest side is not smaller than dx
+        if C < self.hierarchy.get_smallest_dx():
+            raise YTSphereTooSmall(pf, C, self.hierarchy.get_smallest_dx())
+        self._A = A
+        self._B = B
+        self._C = C
+        self._e0 = e0
+        self._tilt = tilt
+        
+        # Should have rotation matrix RX,RY,RZ imported at top
+        # find the t1 angle needed to rotate about z axis to align e0 to x
+        t1 = na.arctan(e0[1] / e0[0])
+        # rotate e0 by -t1
+        r1 = (e0 * RZ(-t1).transpose()).sum(axis = 1)
+        # find the t2 angle needed to rotate about y axis to align e0 to x
+        t2 = na.arctan(-r1[2] / r1[0])
+        """
+        calculate the original e1
+        given the tilt about the x axis when e0 was aligned 
+        to x after t1, t2 rotations about z, y
+        """
+        e1 = ((0, 1, 0) * RX(tilt).transpose()).sum(axis = 1)
+        e1 = (e1 * RY(t2).transpose()).sum(axis = 1)
+        e1 = (e1 * RZ(t1).transpose()).sum(axis = 1)
+        e2 = na.cross(e0, e1)
+
+	self._e1 = e1
+	self._e2 = e2
+
+        self.set_field_parameter('A', A)
+        self.set_field_parameter('B', B)
+        self.set_field_parameter('C', C)
+        self.set_field_parameter('e0', e0)
+        self.set_field_parameter('e1', e1)
+        self.set_field_parameter('e2', e2)
+        self.DW = self.pf.domain_right_edge - self.pf.domain_left_edge
+        self._refresh_data()
+
+        """
+        Having another function find_ellipsoid_grids is too much work, 
+        can just use the sphere one and forget about checking orientation
+        but feed in the A parameter for radius
+        """
+    def _get_list_of_grids(self, field = None):
+        """
+        This returns the grids that are possibly within the ellipse
+        """
+        grids,ind = self.hierarchy.find_sphere_grids(self.center, self._A)
+        # Now we sort by level
+        grids = grids.tolist()
+        grids.sort(key=lambda x: (x.Level, \
+                                  x.LeftEdge[0], \
+                                  x.LeftEdge[1], \
+                                  x.LeftEdge[2]))
+        self._grids = na.array(grids, dtype = 'object')
+
+    def _is_fully_enclosed(self, grid):
+        """
+        check if all grid corners are inside the ellipsoid
+        """
+        # vector from corner to center
+        vr = (grid._corners - self.center)
+        # 3 possible cases of locations taking periodic BC into account
+        # just listing the components, find smallest later
+        dotarr=na.array([vr, vr + self.DW, vr - self.DW])
+        # these vrdote# finds the product of vr components with e#
+        # square the results
+        # find the smallest
+        # sums it
+        vrdote0_2 = (na.multiply(dotarr, self._e0)**2).min(axis \
+                                                           = 0).sum(axis = 1)
+        vrdote1_2 = (na.multiply(dotarr, self._e1)**2).min(axis \
+                                                           = 0).sum(axis = 1)
+        vrdote2_2 = (na.multiply(dotarr, self._e2)**2).min(axis \
+                                                           = 0).sum(axis = 1)
+        return na.all(vrdote0_2 / self._A**2 + \
+                      vrdote1_2 / self._B**2 + \
+                      vrdote2_2 / self._C**2 <=1.0)
+
+    @restore_grid_state # Pains me not to decorate with cache_mask here
+    def _get_cut_mask(self, grid, field = None):
+        """
+        This checks if each cell is inside the ellipsoid
+        """
+        # We have the *property* center, which is not necessarily
+        # the same as the field_parameter
+        if self._is_fully_enclosed(grid):
+            return True # We do not want child masking here
+        if not isinstance(grid, (FakeGridForParticles, GridChildMaskWrapper)) \
+           and grid.id in self._cut_masks:
+            return self._cut_masks[grid.id]
+        Inside = na.zeros(grid["x"].shape, dtype = 'float64')
+        dim = grid["x"].shape
+        # need this to take into account non-cube root grid tiles
+        dot_evec = na.zeros([3, dim[0], dim[1], dim[2]])
+        for i, ax in enumerate('xyz'):
+            # distance to center
+            ar  = grid[ax]-self.center[i]
+            # cases to take into account periodic BC
+            case = na.array([ar, ar + self.DW[i], ar - self.DW[i]])
+            # find which of the 3 cases is smallest in magnitude
+            index = na.abs(case).argmin(axis = 0)
+            # restrict distance to only the smallest cases
+            vec = na.choose(index, case)
+            # sum up to get the dot product with e_vectors
+            dot_evec += na.array([vec * self._e0[i], \
+                                  vec * self._e1[i], \
+                                  vec * self._e2[i]])
+        # Calculate the eqn of ellipsoid, if it is inside
+        # then result should be <= 1.0
+        Inside = dot_evec[0]**2 / self._A**2 + \
+                 dot_evec[1]**2 / self._B**2 + \
+                 dot_evec[2]**2 / self._C**2
+        cm = ((Inside <= 1.0) & grid.child_mask)
+        if not isinstance(grid, (FakeGridForParticles, GridChildMaskWrapper)):
+            self._cut_masks[grid.id] = cm
+        return cm
+
 class AMRCoveringGridBase(AMR3DData):
     _spatial = True
     _type_name = "covering_grid"




diff -r 01d7918185b68265f5ad41bbe332ae3aa71e7205 -r 5052c5ca60c3aa8f309af54b2a4cf490a4391698 yt/utilities/math_utils.py
--- a/yt/utilities/math_utils.py
+++ b/yt/utilities/math_utils.py
@@ -5,6 +5,8 @@
 Affiliation: UCSD Physics/CASS
 Author: Stephen Skory <s at skory.us>
 Affiliation: UCSD Physics/CASS
+Author: Geoffrey So <gsiisg at gmail.com>
+Affiliation: UCSD Physics/CASS
 Homepage: http://yt-project.org/
 License:
   Copyright (C) 2008-2011 Matthew Turk.  All Rights Reserved.
@@ -632,3 +634,55 @@
                   [uz*ux*(1-cost)-uy*sint, uz*uy*(1-cost)+ux*sint, cost+uz**2*(1-cost)]])
     
     return R
+
+def RX(ax):
+    """
+    Returns
+    -------
+    Gives the rotation matrix about the x-axis as an array
+
+    Example
+    -------
+    >>>>from yt.mods import *
+    >>>>from yt.utilities.math_utils import RX
+    >>>>RX(na.pi)
+    >>>>array([[  1.00000000e+00,   0.00000000e+00,   0.00000000e+00],
+               [  0.00000000e+00,  -1.00000000e+00,   1.22464680e-16],
+               [  0.00000000e+00,  -1.22464680e-16,  -1.00000000e+00]])
+    """
+    rot_matrix = get_rotation_matrix(-ax,(1,0,0))
+    return rot_matrix
+def RY(ay):
+    """
+    Returns
+    -------
+    Gives the rotation matrix about the y-axis as an array
+
+    Example
+    -------
+    >>>>from yt.mods import *
+    >>>>from yt.utilities.math_utils import RY
+    >>>>RY(na.pi)
+    >>>>array([[ -1.00000000e+00,   0.00000000e+00,  -1.22464680e-16],
+               [  0.00000000e+00,   1.00000000e+00,   0.00000000e+00],
+               [  1.22464680e-16,   0.00000000e+00,  -1.00000000e+00]])
+    """
+    rot_matrix = get_rotation_matrix(-ay,(0,1,0))
+    return rot_matrix
+def RZ(az):
+    """
+    Returns
+    -------
+    Gives the rotation matrix about the z-axis as an array
+
+    Example
+    -------
+    >>>>from yt.mods import *
+    >>>>from yt.utilities.math_utils import RZ
+    >>>>RZ(na.pi)
+    >>>>array([[ -1.00000000e+00,   1.22464680e-16,   0.00000000e+00],
+               [ -1.22464680e-16,  -1.00000000e+00,   0.00000000e+00],
+               [  0.00000000e+00,   0.00000000e+00,   1.00000000e+00]])
+    """
+    rot_matrix = get_rotation_matrix(-az,(0,0,1))
+    return rot_matrix

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list