[yt-svn] commit/yt: 46 new changesets

Bitbucket commits-noreply at bitbucket.org
Fri May 4 13:30:53 PDT 2012


46 new commits in yt:


https://bitbucket.org/yt_analysis/yt/changeset/45c690fab939/
changeset:   45c690fab939
branch:      yt
user:        Christopher Moody
date:        2012-01-30 23:46:27
summary:     special exception for the root grid
affected #:  1 file

diff -r f0450f016f159c4463c8b31b20a198dee4aa709c -r 45c690fab939acfa02e67630b92fecbce805cca6 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -204,7 +204,7 @@
         #                 self.pf.domain_dimensions[None,:], # dims of grids
         #                 na.zeros((1,6), dtype='int64') # empty
         #                 )
-        
+        #import pdb; pdb.set_trace()
         root_psg = _ramses_reader.ProtoSubgrid(
                         na.zeros(3, dtype='int64'), # left index of PSG
                         self.pf.domain_dimensions, # dim of PSG
@@ -393,10 +393,25 @@
     #     return L + R
         
     def _parse_hierarchy(self):
-        # We have important work to do
+        """ The root grid has no octs except one which is refined.
+        Still, it is the size of 128 cells along a length.
+        Ignore the proto subgrid created for the root grid - it is wrong.
+        """
+        import pdb; pdb.set_trace()
         grids = []
         gi = 0
         for level, grid_list in enumerate(self.proto_grids):
+            if level ==0:
+                assert len(grid_list)==1 #there should only be one grid on the root level
+                fl = grid_list[0].grid_file_locations
+                props = grid_list[0].get_properties()
+                self.grid_left_edge[gi,:] = na.array([0,0,0])
+                self.grid_right_edge[gi,:] = na.array(self.pf.domain_dimensions)
+                self.grid_dimensions[gi,:] = na.array(self.pf.domain_dimensions)
+                self.grid_levels[gi,:] = level
+                grids.append(self.grid(gi, self, level, fl, props[0,:]))
+                gi+=1
+                continue
             for g in grid_list:
                 fl = g.grid_file_locations
                 props = g.get_properties()
@@ -513,6 +528,7 @@
         tr  = self.tr
         self.conversion_factors["Temperature"] = tr
         self.conversion_factors["Metal_Density"] = 1
+        self.cosmological_simulation = True
         
         # Now our conversion factors
         for ax in 'xyz':
@@ -616,11 +632,10 @@
 
         (self.ncell,) = struct.unpack('>l', _read_record(f))
         # Try to figure out the root grid dimensions
-        est = na.log2(self.ncell) / 3
-        if int(est) != est: raise RuntimeError
+        est = int(na.rint(self.ncell**(1.0/3.0)))
         # Note here: this is the number of *cells* on the root grid.
         # This is not the same as the number of Octs.
-        self.domain_dimensions = na.ones(3, dtype='int64') * int(2**est)
+        self.domain_dimensions = na.ones(3, dtype='int64')*est 
 
         self.root_grid_mask_offset = f.tell()
         _skip_record(f) # iOctCh



https://bitbucket.org/yt_analysis/yt/changeset/41c966d5bcdc/
changeset:   41c966d5bcdc
branch:      yt
user:        Christopher Moody
date:        2012-01-31 00:30:10
summary:     record arrays overflow silently. changed the numgrid calculation to 64bit.
affected #:  1 file

diff -r 45c690fab939acfa02e67630b92fecbce805cca6 -r 41c966d5bcdc85b23da3e6a4e67defdd33461855 yt/data_objects/hierarchy.py
--- a/yt/data_objects/hierarchy.py
+++ b/yt/data_objects/hierarchy.py
@@ -366,7 +366,7 @@
         #   1 = number of cells
         #   2 = blank
         desc = {'names': ['numgrids','numcells','level'],
-                'formats':['Int32']*3}
+                'formats':['Int64']*3}
         self.level_stats = blankRecordArray(desc, MAXLEVEL)
         self.level_stats['level'] = [i for i in range(MAXLEVEL)]
         self.level_stats['numgrids'] = [0 for i in range(MAXLEVEL)]



https://bitbucket.org/yt_analysis/yt/changeset/8ae85483aad1/
changeset:   8ae85483aad1
branch:      yt
user:        Christopher Moody
date:        2012-01-31 01:18:48
summary:     fixing root grid size. projection of Ones field works.
affected #:  1 file

diff -r 41c966d5bcdc85b23da3e6a4e67defdd33461855 -r 8ae85483aad17c933ab69489e7b73187901cd1d8 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -30,8 +30,6 @@
 import os
 import struct
 
-import pdb
-
 from yt.funcs import *
 from yt.data_objects.grid_patch import \
       AMRGridPatch
@@ -204,7 +202,6 @@
         #                 self.pf.domain_dimensions[None,:], # dims of grids
         #                 na.zeros((1,6), dtype='int64') # empty
         #                 )
-        #import pdb; pdb.set_trace()
         root_psg = _ramses_reader.ProtoSubgrid(
                         na.zeros(3, dtype='int64'), # left index of PSG
                         self.pf.domain_dimensions, # dim of PSG
@@ -397,28 +394,23 @@
         Still, it is the size of 128 cells along a length.
         Ignore the proto subgrid created for the root grid - it is wrong.
         """
-        import pdb; pdb.set_trace()
         grids = []
         gi = 0
         for level, grid_list in enumerate(self.proto_grids):
-            if level ==0:
-                assert len(grid_list)==1 #there should only be one grid on the root level
-                fl = grid_list[0].grid_file_locations
-                props = grid_list[0].get_properties()
-                self.grid_left_edge[gi,:] = na.array([0,0,0])
-                self.grid_right_edge[gi,:] = na.array(self.pf.domain_dimensions)
-                self.grid_dimensions[gi,:] = na.array(self.pf.domain_dimensions)
-                self.grid_levels[gi,:] = level
-                grids.append(self.grid(gi, self, level, fl, props[0,:]))
-                gi+=1
-                continue
+            #The root level spans [0,2]
+            #The next level spans [0,256]
+            #The 3rd Level spans up to 128*2^3, etc.
+            #Correct root level to span up to 128
+            correction=1.0
+            if level == 0:
+                correction=64.0
             for g in grid_list:
                 fl = g.grid_file_locations
                 props = g.get_properties()
                 dds = ((2**level) * self.pf.domain_dimensions).astype("float64")
-                self.grid_left_edge[gi,:] = props[0,:] / dds
-                self.grid_right_edge[gi,:] = props[1,:] / dds
-                self.grid_dimensions[gi,:] = props[2,:]
+                self.grid_left_edge[gi,:] = props[0,:]*correction / dds
+                self.grid_right_edge[gi,:] = props[1,:]*correction / dds
+                self.grid_dimensions[gi,:] = props[2,:]*correction
                 self.grid_levels[gi,:] = level
                 grids.append(self.grid(gi, self, level, fl, props[0,:]))
                 gi += 1



https://bitbucket.org/yt_analysis/yt/changeset/ca8b7105e354/
changeset:   ca8b7105e354
branch:      yt
user:        Christopher Moody
date:        2012-01-31 01:50:03
summary:     implemented matt's hilbert-ordered patch-splitting
affected #:  1 file

diff -r 8ae85483aad17c933ab69489e7b73187901cd1d8 -r ca8b7105e354573ebf7a44c17e193fd2743062a5 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -140,15 +140,8 @@
         self.object_types.sort()
 
     def _count_grids(self):
-        # We have to do all the patch-coalescing here.
-        #level_info is used by the IO so promoting it to the static
-        # output class
-        #self.pf.level_info = [self.pf.ncell] # skip root grid for now
-        #leve_info = []
-        # amr_utils.count_art_octs(
-        #         self.pf.parameter_filename, self.pf.child_grid_offset,
-        #         self.pf.min_level, self.pf.max_level, self.pf.nhydro_vars,
-        #         self.pf.level_info)
+        LEVEL_OF_EDGE = 7
+        MAX_EDGE = (2 << (LEVEL_OF_EDGE- 1))
         
         f = open(self.pf.parameter_filename,'rb')
         self.pf.nhydro_vars, self.pf.level_info = _count_art_octs(f, 
@@ -219,175 +212,73 @@
             nd = self.pf.domain_dimensions * 2**level
             dims = na.ones((ggi.sum(), 3), dtype='int64') * 2
             fl = ogrid_file_locations[ggi,:]
-            # Now our initial protosubgrid
-            #if level == 6: raise RuntimeError
-            # We want grids that cover no more than MAX_EDGE cells in every direction
-            MAX_EDGE = 128
             psgs = []
+            
             #refers to the left index for the art octgrid
             left_index = ogrid_left_indices[ggi,:]
-            right_index = left_index + 2
-            #Since we are re-gridding these octs on larger meshes
-            #each sub grid has length MAX_EDGE, and so get the LE of
-            #grids fit inside the domain
-            # nd is the dimensions of the domain at this level
-            lefts = [na.mgrid[0:nd[i]:MAX_EDGE] for i in range(3)]
-            #lefts = zip(*[l.ravel() for l in lefts])
-            pbar = get_pbar("Re-gridding ", lefts[0].size)
-            min_ind = na.min(left_index, axis=0)
-            max_ind = na.max(right_index, axis=0)
             
-            #iterate over the ith dimension of the yt grids
-            for i,dli in enumerate(lefts[0]):
-                pbar.update(i)
-                
-                #skip this grid if there are no art grids inside
-                #of the zeroeth dimension
-                if min_ind[0] > dli + nd[0]: continue
-                if max_ind[0] < dli: continue
-                
-                # span of the current domain limited to max_edge
-                idim = min(nd[0] - dli, MAX_EDGE)
-
-                #gdi finds all of the art octs grids inside the 
-                #ith dimension of our current grid
-                gdi = ((dli  <= right_index[:,0])
-                     & (dli + idim >= left_index[:,0]))
-                     
-
-                #if none of our art octs fit inside, skip                    
-                if not na.any(gdi): continue
-                
-                #iterate over the jth dimension of the yt grids
-                for dlj in lefts[1]:
-                    
-                    #this is the same process as in the previous dimension
-                    #find art octs inside this grid's jth dimension, 
-                    #skip if there are none
-                    if min_ind[1] > dlj + nd[1]: continue
-                    if max_ind[1] < dlj: continue
-                    idim = min(nd[1] - dlj, MAX_EDGE)
-                    gdj = ((dlj  <= right_index[:,1])
-                         & (dlj + idim >= left_index[:,1])
-                         & (gdi))
-                    if not na.any(gdj): continue
-                    
-                    #Same story: iterate over kth dimension grids
-                    for dlk in lefts[2]:
-                        if min_ind[2] > dlk + nd[2]: continue
-                        if max_ind[2] < dlk: continue
-                        idim = min(nd[2] - dlk, MAX_EDGE)
-                        gdk = ((dlk  <= right_index[:,2])
-                             & (dlk + idim >= left_index[:,2])
-                             & (gdj))
-                        if not na.any(gdk): continue
-                        
-                        #these are coordinates for yt grid
-                        left = na.array([dli, dlj, dlk])
-                        
-                        #does this ravel really do anything?
-                        domain_left = left.ravel()
-                        
-                        #why are we adding this to zero?
-                        initial_left = na.zeros(3, dtype='int64') + domain_left
-                        
-                        #still not sure why multiplying against one 
-                        #just type casting?
-                        idims = na.ones(3, dtype='int64') * na.minimum(nd - domain_left, MAX_EDGE)
-                        
-                        # We want to find how many grids are inside.
-                        
-                        #this gives us the LE and RE, domain dims,
-                        # and file locations
-                        # for art octs within this grid
-                        dleft_index = left_index[gdk,:]
-                        dright_index = right_index[gdk,:]
-                        ddims = dims[gdk,:]
-                        dfl = fl[gdk,:]
-                        
-                        #create a sub grid composed
-                        #of the new yt grid LE, span,
-                        #and a series of the contained art grid properties:
-                        # left edge, right edge, (not sure what dims is) and file locations
-                        psg = _ramses_reader.ProtoSubgrid(initial_left, idims,
-                                        dleft_index, dfl)
-                        
-                        #print "Gridding from %s to %s + %s" % (
-                        #    initial_left, initial_left, idims)
-                        if psg.efficiency <= 0: continue
-                        self.num_deep = 0
-                        # psgs.extend(self._recursive_patch_splitting(
-                        #     psg, idims, initial_left, 
-                        #     dleft_index, dright_index, ddims, dfl))
-                        
-                        #I'm not sure how this patch splitting process
-                        #does, or how it works
-                        psgs.extend(_ramses_reader.recursive_patch_splitting(
-                            psg, idims, initial_left, dleft_index, dfl))
-                        
-                        # psgs.extend(self._recursive_patch_splitting(
-                        #     psg, idims, initial_left, 
-                        #     dleft_index, dright_index, ddims, dfl))
-                        psgs.extend([psg])
+            # We now calculate the hilbert curve position of every left_index,
+            # of the octs, with respect to a lower order hilbert curve.
+            left_index_gridpatch = left_index >> LEVEL_OF_EDGE
+            order = max(level + 1 - LEVEL_OF_EDGE, 0)
+            
+            #compute the hilbert indices up to a certain level
+            #this has nothing to do with our data yet
+            hilbert_indices = _ramses_reader.get_hilbert_indices(order, left_index_gridpatch)
+            
+            # Strictly speaking, we don't care about the index of any
+            # individual oct at this point.  So we can then split them up.
+            unique_indices = na.unique(hilbert_indices)
+            mylog.debug("Level % 2i has % 10i unique indices for %0.3e octs",
+                        level, unique_indices.size, hilbert_indices.size)
+            
+            #use the hilbert indices to order oct grids so that consecutive
+            #items on a list are spatially near each other
+            #this is useful because we will define grid patches over these
+            #octs, which are more efficient if the octs are spatially close
+            
+            #split into list of lists, with domains containing 
+            #lists of sub octgrid left indices and an index
+            #referring to the domain on which they live
+            locs, lefts = _ramses_reader.get_array_indices_lists(
+                        hilbert_indices, unique_indices, left_index, fl)
+            
+            #iterate over the domains    
+            step=0        
+            pbar = get_pbar("Re-gridding ", len(lefts))
+            for ddleft_index, ddfl in zip(lefts, locs):
+                #iterate over just the unique octs
+                #why would we ever have non-unique octs?
+                #perhaps the hilbert ordering may visit the same
+                #oct multiple times - review only unique octs 
+                for idomain in na.unique(ddfl[:,0]):
+                    dom_ind = ddfl[:,0] == idomain
+                    dleft_index = ddleft_index[dom_ind,:]
+                    dfl = ddfl[dom_ind,:]
+                    initial_left = na.min(dleft_index, axis=0)
+                    idims = (na.max(dleft_index, axis=0) - initial_left).ravel()+2
+                    psg = _ramses_reader.ProtoSubgrid(initial_left, idims,
+                                    dleft_index, dfl)
+                    if psg.efficiency <= 0: continue
+                    self.num_deep = 0
+                    psgs.extend(_ramses_reader.recursive_patch_splitting(
+                        psg, idims, initial_left, 
+                        dleft_index, dfl))
+                pbar.updte(step)
+                step+=1
+            mylog.debug("Done with level % 2i", level)
             pbar.finish()
             self.proto_grids.append(psgs)
-            sums = na.zeros(3, dtype='int64')
+            print sum(len(psg.grid_file_locations) for psg in psgs)
             mylog.info("Final grid count: %s", len(self.proto_grids[level]))
             if len(self.proto_grids[level]) == 1: continue
-            # for g in self.proto_grids[level]:
-            #     sums += [s.sum() for s in g.sigs]
-            # assert(na.all(sums == dims.prod(axis=1).sum()))
         self.num_grids = sum(len(l) for l in self.proto_grids)
+                    
+            
+            
 
     num_deep = 0
 
-    # @num_deep_inc
-    # def _recursive_patch_splitting(self, psg, dims, ind,
-    #         left_index, right_index, gdims, fl):
-    #     min_eff = 0.1 # This isn't always respected.
-    #     if self.num_deep > 40:
-    #         # If we've recursed more than 100 times, we give up.
-    #         psg.efficiency = min_eff
-    #         return [psg]
-    #     if psg.efficiency > min_eff or psg.efficiency < 0.0:
-    #         return [psg]
-    #     tt, ax, fp = psg.find_split()
-    #     if (fp % 2) != 0:
-    #         if dims[ax] != fp + 1:
-    #             fp += 1
-    #         else:
-    #             fp -= 1
-    #     #print " " * self.num_deep + "Got ax", ax, "fp", fp
-    #     dims_l = dims.copy()
-    #     dims_l[ax] = fp
-    #     li_l = ind.copy()
-    #     if na.any(dims_l <= 0): return [psg]
-    #     L = _ramses_reader.ProtoSubgrid(
-    #             li_l, dims_l, left_index, right_index, gdims, fl)
-    #     #print " " * self.num_deep + "L", tt, L.efficiency
-    #     #if L.efficiency > 1.0: raise RuntimeError
-    #     if L.efficiency <= 0.0: L = []
-    #     elif L.efficiency < min_eff:
-    #         L = self._recursive_patch_splitting(L, dims_l, li_l,
-    #                 left_index, right_index, gdims, fl)
-    #     else:
-    #         L = [L]
-    #     dims_r = dims.copy()
-    #     dims_r[ax] -= fp
-    #     li_r = ind.copy()
-    #     li_r[ax] += fp
-    #     if na.any(dims_r <= 0): return [psg]
-    #     R = _ramses_reader.ProtoSubgrid(
-    #             li_r, dims_r, left_index, right_index, gdims, fl)
-    #     #print " " * self.num_deep + "R", tt, R.efficiency
-    #     #if R.efficiency > 1.0: raise RuntimeError
-    #     if R.efficiency <= 0.0: R = []
-    #     elif R.efficiency < min_eff:
-    #         R = self._recursive_patch_splitting(R, dims_r, li_r,
-    #                 left_index, right_index, gdims, fl)
-    #     else:
-    #         R = [R]
-    #     return L + R
         
     def _parse_hierarchy(self):
         """ The root grid has no octs except one which is refined.



https://bitbucket.org/yt_analysis/yt/changeset/d4d82426e66c/
changeset:   d4d82426e66c
branch:      yt
user:        Christopher Moody
date:        2012-01-31 02:46:40
summary:     fixed a bug in the hilbert grad patching a bit. added lots of comments.
affected #:  2 files

diff -r ca8b7105e354573ebf7a44c17e193fd2743062a5 -r d4d82426e66ca80298a3d4e41e0ee9fe0acf678a yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -143,8 +143,11 @@
         LEVEL_OF_EDGE = 7
         MAX_EDGE = (2 << (LEVEL_OF_EDGE- 1))
         
+        min_eff = 0.2
+        
         f = open(self.pf.parameter_filename,'rb')
-        self.pf.nhydro_vars, self.pf.level_info = _count_art_octs(f, 
+        self.pf.nhydro_vars, self.pf.level_info, self.pf.level_offsetsa = \
+                         _count_art_octs(f, 
                           self.pf.child_grid_offset,
                           self.pf.min_level, self.pf.max_level)
         self.pf.level_info[0]=self.pf.ncell
@@ -223,7 +226,8 @@
             order = max(level + 1 - LEVEL_OF_EDGE, 0)
             
             #compute the hilbert indices up to a certain level
-            #this has nothing to do with our data yet
+            #the indices will associate an oct grid to the nearest
+            #hilbert index?
             hilbert_indices = _ramses_reader.get_hilbert_indices(order, left_index_gridpatch)
             
             # Strictly speaking, we don't care about the index of any
@@ -244,28 +248,41 @@
                         hilbert_indices, unique_indices, left_index, fl)
             
             #iterate over the domains    
-            step=0        
-            pbar = get_pbar("Re-gridding ", len(lefts))
+            pbar = get_pbar("Re-gridding ", len(locs))
+            import pdb; pdb.set_trace()
             for ddleft_index, ddfl in zip(lefts, locs):
                 #iterate over just the unique octs
                 #why would we ever have non-unique octs?
                 #perhaps the hilbert ordering may visit the same
                 #oct multiple times - review only unique octs 
-                for idomain in na.unique(ddfl[:,0]):
-                    dom_ind = ddfl[:,0] == idomain
-                    dleft_index = ddleft_index[dom_ind,:]
-                    dfl = ddfl[dom_ind,:]
-                    initial_left = na.min(dleft_index, axis=0)
-                    idims = (na.max(dleft_index, axis=0) - initial_left).ravel()+2
-                    psg = _ramses_reader.ProtoSubgrid(initial_left, idims,
-                                    dleft_index, dfl)
-                    if psg.efficiency <= 0: continue
-                    self.num_deep = 0
-                    psgs.extend(_ramses_reader.recursive_patch_splitting(
-                        psg, idims, initial_left, 
-                        dleft_index, dfl))
-                pbar.updte(step)
+                #for idomain in na.unique(ddfl[:,1]):
+                #dom_ind = ddfl[:,1] == idomain
+                #dleft_index = ddleft_index[dom_ind,:]
+                #dfl = ddfl[dom_ind,:]
+                
+                dleft_index = ddleft_index
+                dfl = ddfl
+                initial_left = na.min(dleft_index, axis=0)
+                idims = (na.max(dleft_index, axis=0) - initial_left).ravel()+2
+                
+                #this creates a grid patch that doesn't cover the whole level
+                #necessarily, but with other patches covers all the regions
+                #with octs. This object automatically shrinks its size
+                #to barely encompass the octs inside of it.
+                psg = _ramses_reader.ProtoSubgrid(initial_left, idims,
+                                dleft_index, dfl)
+                if psg.efficiency <= 0: continue
+                self.num_deep = 0
+                
+                #because grid patches may still be mostly empty, and with octs
+                #that only partially fill the grid,it  may be more efficient
+                #to split large patches into smaller patches. We split
+                #if less than 10% the volume of a patch is covered with octs
+                psgs.extend(_ramses_reader.recursive_patch_splitting(
+                    psg, idims, initial_left, 
+                    dleft_index, dfl,min_eff=min_eff))
                 step+=1
+                pbar.update(step)
             mylog.debug("Done with level % 2i", level)
             pbar.finish()
             self.proto_grids.append(psgs)
@@ -552,15 +569,17 @@
     f.seek(pos)
     return s[0]
 
-def _count_art_octs(f, offset,
+def _count_art_octs(f, offset, 
                    MinLev, MaxLevelNow):
-    import gc
+    level_offsets= []
     f.seek(offset)
     nchild,ntot=8,0
     Level = na.zeros(MaxLevelNow+1 - MinLev, dtype='i')
     iNOLL = na.zeros(MaxLevelNow+1 - MinLev, dtype='i')
     iHOLL = na.zeros(MaxLevelNow+1 - MinLev, dtype='i')
     for Lev in xrange(MinLev + 1, MaxLevelNow+1):
+        level_offsets.append(f.tell())
+        
         #Get the info for this level, skip the rest
         #print "Reading oct tree data for level", Lev
         #print 'offset:',f.tell()
@@ -586,5 +605,5 @@
         #find nhydrovars
         nhydrovars = 8+2
     f.seek(offset)
-    return nhydrovars, iNOLL
+    return nhydrovars, iNOLL, level_offsets
 


diff -r ca8b7105e354573ebf7a44c17e193fd2743062a5 -r d4d82426e66ca80298a3d4e41e0ee9fe0acf678a yt/frontends/ramses/_ramses_reader.pyx
--- a/yt/frontends/ramses/_ramses_reader.pyx
+++ b/yt/frontends/ramses/_ramses_reader.pyx
@@ -972,19 +972,26 @@
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
-def get_array_indices_lists(np.ndarray[np.int64_t, ndim=1] ind,
+def get_array_indices_lists(np.ndarray[np.int64_t, ndim=1] ind, 
                             np.ndarray[np.int64_t, ndim=1] uind,
                             np.ndarray[np.int64_t, ndim=2] lefts,
                             np.ndarray[np.int64_t, ndim=2] files):
+    #ind are the hilbert indices 
+    #uind are the unique hilbert indices                        
+    #count[n] track of how many times the nth index of uind occurs in ind
+    
     cdef np.ndarray[np.int64_t, ndim=1] count = np.zeros(uind.shape[0], 'int64')
     cdef int n, i
     cdef np.int64_t mi, mui
+    
+    #fill in the count array
     for i in range(ind.shape[0]):
         mi = ind[i]
         for n in range(uind.shape[0]):
             if uind[n] == mi:
                 count[n] += 1
                 break
+    
     cdef np.int64_t **alefts
     cdef np.int64_t **afiles
     afiles = <np.int64_t **> malloc(sizeof(np.int64_t *) * uind.shape[0])
@@ -994,6 +1001,9 @@
     cdef np.ndarray[np.int64_t, ndim=2] left
     all_locations = []
     all_lefts = []
+    
+    #having measure the repetition of each hilbert index,
+    #we can know declare how much memory we will use
     for n in range(uind.shape[0]):
         locations = np.zeros((count[n], 6), 'int64')
         left = np.zeros((count[n], 3), 'int64')
@@ -1002,7 +1012,11 @@
         afiles[n] = <np.int64_t *> locations.data
         alefts[n] = <np.int64_t *> left.data
         li[n] = 0
+    
     cdef int fi
+    #now arrange all_locations and all_lefts sequentially
+    #such that when they return to python
+    #the 1d array mutates into a list of lists?
     for i in range(ind.shape[0]):
         mi = ind[i]
         for n in range(uind.shape[0]):
@@ -1022,8 +1036,8 @@
         np.ndarray[np.int64_t, ndim=1] ind,
         np.ndarray[np.int64_t, ndim=2] left_index,
         np.ndarray[np.int64_t, ndim=2] fl,
-        int num_deep = 0):
-    cdef float min_eff = 0.1
+        int num_deep = 0,
+        float min_eff = 0.1):
     cdef ProtoSubgrid L, R
     cdef np.ndarray[np.int64_t, ndim=1] dims_l, li_l
     cdef np.ndarray[np.int64_t, ndim=1] dims_r, li_r



https://bitbucket.org/yt_analysis/yt/changeset/b1cb776fed6f/
changeset:   b1cb776fed6f
branch:      yt
user:        Christopher Moody
date:        2012-01-31 07:30:09
summary:     adding log info. starting on memory optimization; loading left edges by level.
affected #:  3 files

diff -r d4d82426e66ca80298a3d4e41e0ee9fe0acf678a -r b1cb776fed6f9659525e9115d407cde66c696c9b yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -143,7 +143,7 @@
         LEVEL_OF_EDGE = 7
         MAX_EDGE = (2 << (LEVEL_OF_EDGE- 1))
         
-        min_eff = 0.2
+        min_eff = 0.40
         
         f = open(self.pf.parameter_filename,'rb')
         self.pf.nhydro_vars, self.pf.level_info, self.pf.level_offsetsa = \
@@ -211,7 +211,6 @@
                 self.proto_grids.append([])
                 continue
             ggi = (ogrid_levels == level).ravel()
-            mylog.info("Re-gridding level %s: %s octree grids", level, ggi.sum())
             nd = self.pf.domain_dimensions * 2**level
             dims = na.ones((ggi.sum(), 3), dtype='int64') * 2
             fl = ogrid_file_locations[ggi,:]
@@ -248,8 +247,10 @@
                         hilbert_indices, unique_indices, left_index, fl)
             
             #iterate over the domains    
-            pbar = get_pbar("Re-gridding ", len(locs))
-            import pdb; pdb.set_trace()
+            step=0
+            pbar = get_pbar("Re-gridding  Level %i"%level, len(locs))
+            psg_eff = []
+            psg_dep = []
             for ddleft_index, ddfl in zip(lefts, locs):
                 #iterate over just the unique octs
                 #why would we ever have non-unique octs?
@@ -278,16 +279,34 @@
                 #that only partially fill the grid,it  may be more efficient
                 #to split large patches into smaller patches. We split
                 #if less than 10% the volume of a patch is covered with octs
-                psgs.extend(_ramses_reader.recursive_patch_splitting(
+                psg_split = _ramses_reader.recursive_patch_splitting(
                     psg, idims, initial_left, 
-                    dleft_index, dfl,min_eff=min_eff))
+                    dleft_index, dfl,min_eff=min_eff)
+                    
+                psgs.extend(psg_split)
+                
+                tol = 1.00001
+                psg_eff  += [x.efficiency for x in psg_split] 
+                psg_dep  += [x.num_deep for x in psg_split] 
+                
                 step+=1
                 pbar.update(step)
+            eff_mean = na.mean(psg_eff)
+            eff_nmin = na.sum([e<=min_eff*tol for e in psg_eff])
+            eff_nall = len(psg_eff)
+            dep_mean = na.rint(na.mean(psg_dep))
+            mylog.info("Average subgrid efficiency %02.1f %% and average depth %i",
+                        eff_mean*100.0, dep_mean)
+            mylog.info("%02.1f%% (%i/%i) of grids had minimum efficiency",
+                        eff_nmin*100.0/eff_nall,eff_nmin,eff_nall)
+            mylog.info("Re-gridding level %i: %s octree grids", level, ggi.sum())
+            
+        
             mylog.debug("Done with level % 2i", level)
             pbar.finish()
             self.proto_grids.append(psgs)
-            print sum(len(psg.grid_file_locations) for psg in psgs)
-            mylog.info("Final grid count: %s", len(self.proto_grids[level]))
+            #print sum(len(psg.grid_file_locations) for psg in psgs)
+            #mylog.info("Final grid count: %s", len(self.proto_grids[level]))
             if len(self.proto_grids[level]) == 1: continue
         self.num_grids = sum(len(l) for l in self.proto_grids)
                     
@@ -585,7 +604,7 @@
         #print 'offset:',f.tell()
         Level[Lev], iNOLL[Lev], iHOLL[Lev] = struct.unpack(
            '>iii', _read_record(f))
-        print 'Level %i : '%Lev, iNOLL
+        #print 'Level %i : '%Lev, iNOLL
         #print 'offset after level record:',f.tell()
         iOct = iHOLL[Lev] - 1
         nLevel = iNOLL[Lev]
@@ -607,3 +626,35 @@
     f.seek(offset)
     return nhydrovars, iNOLL, level_offsets
 
+def _read_art_level(f, level_offsets,level):
+    pos = f.tell()
+    f.seek(level_offsets[leve])
+    #Get the info for this level, skip the rest
+    #print "Reading oct tree data for level", Lev
+    #print 'offset:',f.tell()
+    Level[Lev], iNOLL[Lev], iHOLL[Lev] = struct.unpack(
+       '>iii', _read_record(f))
+    #print 'Level %i : '%Lev, iNOLL
+    #print 'offset after level record:',f.tell()
+    iOct = iHOLL[Lev] - 1
+    nLevel = iNOLL[Lev]
+    nLevCells = nLevel * nchild
+    ntot = ntot + nLevel
+
+    #Skip all the oct hierarchy data
+    #in the future, break this up into large chunks
+    count = nLevel*15
+    le  = numpy.zeros((count,3),dtype='int64')
+    fl  = numpy.zeros((count,6),dtype='int64')
+    idxa,idxb = 0,0
+    chunk = 1e9 #this is ~111MB for 15 dimensional 64 bit arrays
+    while left > 0 :
+        data = na.fromfile(f,dtype='>i',count=chunk*15)
+        data.reshape(chunk,15)
+        left = count-index
+        le[idxa:idxb,:] = data[0:3]
+        fl[idxa:idxb,1] = numpy.arange(chunk)
+    del data
+    f.seek(pos)
+    return le,fl
+


diff -r d4d82426e66ca80298a3d4e41e0ee9fe0acf678a -r b1cb776fed6f9659525e9115d407cde66c696c9b yt/frontends/ramses/_ramses_reader.pyx
--- a/yt/frontends/ramses/_ramses_reader.pyx
+++ b/yt/frontends/ramses/_ramses_reader.pyx
@@ -1043,7 +1043,7 @@
     cdef np.ndarray[np.int64_t, ndim=1] dims_r, li_r
     cdef int tt, ax, fp, i, j, k, gi
     cdef int tr[3]
-    if num_deep > 40:
+    if num_deep > 60:
         psg.efficiency = min_eff
         return [psg]
     if psg.efficiency > min_eff or psg.efficiency < 0.0:
@@ -1073,7 +1073,7 @@
     if L.efficiency <= 0.0: rv_l = []
     elif L.efficiency < min_eff:
         rv_l = recursive_patch_splitting(L, dims_l, li_l,
-                left_index, fl, num_deep + 1)
+                left_index, fl, num_deep + 1, min_eff)
     else:
         rv_l = [L]
     R = ProtoSubgrid(li_r, dims_r, left_index, fl)
@@ -1081,7 +1081,7 @@
     if R.efficiency <= 0.0: rv_r = []
     elif R.efficiency < min_eff:
         rv_r = recursive_patch_splitting(R, dims_r, li_r,
-                left_index, fl, num_deep + 1)
+                left_index, fl, num_deep + 1, min_eff)
     else:
         rv_r = [R]
     return rv_r + rv_l


diff -r d4d82426e66ca80298a3d4e41e0ee9fe0acf678a -r b1cb776fed6f9659525e9115d407cde66c696c9b yt/utilities/_amr_utils/fortran_reader.pyx
--- a/yt/utilities/_amr_utils/fortran_reader.pyx
+++ b/yt/utilities/_amr_utils/fortran_reader.pyx
@@ -142,8 +142,6 @@
     # points to the start of the record *following* the reading of iOctFree and
     # nOct.  For those following along at home, we only need to read:
     #   iOctPr, iOctLv
-    print min_level, max_level 
-    
     cdef int nchild = 8
     cdef int i, Lev, cell_ind, iOct, nLevel, nLevCells, ic1
     cdef np.int64_t next_record
@@ -170,7 +168,7 @@
         fread(&readin, sizeof(int), 1, f); FIX_LONG(readin)
         iOct = iHOLL[Level] - 1
         nLevel = iNOLL[Level]
-        print "Reading Hierarchy for Level", Lev, Level, nLevel, iOct
+        #print "Reading Hierarchy for Level", Lev, Level, nLevel, iOct
         #print ftell(f)
         for ic1 in range(nLevel):
             iOctMax = max(iOctMax, iOct)
@@ -218,7 +216,7 @@
         
         #find the length of all of the children section
         child_record = ftell(f) +  (next_record+2*sizeof(int))*nLevel*nchild
-        print 'Skipping over hydro vars', ftell(f), child_record
+        #print 'Skipping over hydro vars', ftell(f), child_record
         fseek(f, child_record, SEEK_SET)
         
         # for ic1 in range(nLevel * nchild):



https://bitbucket.org/yt_analysis/yt/changeset/4c7fcdbca996/
changeset:   4c7fcdbca996
branch:      yt
user:        Christopher Moody
date:        2012-01-31 19:05:06
summary:     bug fixes in the info messages
affected #:  2 files

diff -r b1cb776fed6f9659525e9115d407cde66c696c9b -r 4c7fcdbca996814da40bb626802512d7128a67b9 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -143,7 +143,7 @@
         LEVEL_OF_EDGE = 7
         MAX_EDGE = (2 << (LEVEL_OF_EDGE- 1))
         
-        min_eff = 0.40
+        min_eff = 0.20
         
         f = open(self.pf.parameter_filename,'rb')
         self.pf.nhydro_vars, self.pf.level_info, self.pf.level_offsetsa = \
@@ -248,9 +248,8 @@
             
             #iterate over the domains    
             step=0
-            pbar = get_pbar("Re-gridding  Level %i"%level, len(locs))
+            pbar = get_pbar("Re-gridding  Level %i "%level, len(locs))
             psg_eff = []
-            psg_dep = []
             for ddleft_index, ddfl in zip(lefts, locs):
                 #iterate over just the unique octs
                 #why would we ever have non-unique octs?
@@ -273,7 +272,6 @@
                 psg = _ramses_reader.ProtoSubgrid(initial_left, idims,
                                 dleft_index, dfl)
                 if psg.efficiency <= 0: continue
-                self.num_deep = 0
                 
                 #because grid patches may still be mostly empty, and with octs
                 #that only partially fill the grid,it  may be more efficient
@@ -287,19 +285,17 @@
                 
                 tol = 1.00001
                 psg_eff  += [x.efficiency for x in psg_split] 
-                psg_dep  += [x.num_deep for x in psg_split] 
                 
                 step+=1
                 pbar.update(step)
             eff_mean = na.mean(psg_eff)
             eff_nmin = na.sum([e<=min_eff*tol for e in psg_eff])
             eff_nall = len(psg_eff)
-            dep_mean = na.rint(na.mean(psg_dep))
-            mylog.info("Average subgrid efficiency %02.1f %% and average depth %i",
-                        eff_mean*100.0, dep_mean)
+            mylog.info("Average subgrid efficiency %02.1f %%",
+                        eff_mean*100.0)
             mylog.info("%02.1f%% (%i/%i) of grids had minimum efficiency",
                         eff_nmin*100.0/eff_nall,eff_nmin,eff_nall)
-            mylog.info("Re-gridding level %i: %s octree grids", level, ggi.sum())
+            mylog.info("Coalesced %s octree grids", ggi.sum())
             
         
             mylog.debug("Done with level % 2i", level)


diff -r b1cb776fed6f9659525e9115d407cde66c696c9b -r 4c7fcdbca996814da40bb626802512d7128a67b9 yt/frontends/ramses/_ramses_reader.pyx
--- a/yt/frontends/ramses/_ramses_reader.pyx
+++ b/yt/frontends/ramses/_ramses_reader.pyx
@@ -1043,7 +1043,7 @@
     cdef np.ndarray[np.int64_t, ndim=1] dims_r, li_r
     cdef int tt, ax, fp, i, j, k, gi
     cdef int tr[3]
-    if num_deep > 60:
+    if num_deep > 300:
         psg.efficiency = min_eff
         return [psg]
     if psg.efficiency > min_eff or psg.efficiency < 0.0:



https://bitbucket.org/yt_analysis/yt/changeset/dd43dbad7df5/
changeset:   dd43dbad7df5
branch:      yt
user:        Christopher Moody
date:        2012-02-02 03:25:30
summary:      working version of art reader. faster than the first pass, uses matt's hilbert mechanism.
affected #:  1 file

diff -r 4c7fcdbca996814da40bb626802512d7128a67b9 -r dd43dbad7df507dc7211d20a7b74a55e09469aaa yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -216,6 +216,8 @@
             fl = ogrid_file_locations[ggi,:]
             psgs = []
             
+            if level > 5: continue
+            
             #refers to the left index for the art octgrid
             left_index = ogrid_left_indices[ggi,:]
             



https://bitbucket.org/yt_analysis/yt/changeset/a1e85e8f0816/
changeset:   a1e85e8f0816
branch:      yt
user:        Christopher Moody
date:        2012-02-02 03:41:10
summary:      reduced memory
affected #:  2 files

diff -r dd43dbad7df507dc7211d20a7b74a55e09469aaa -r a1e85e8f0816f1c795ff65f9cdc300aa896fc9f3 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -55,6 +55,13 @@
 from yt.utilities.physical_constants import \
     mass_hydrogen_cgs
 
+from yt.frontends.art.io import _count_art_octs
+from yt.frontends.art.io import _read_art_level_info
+from yt.frontends.art.io import _read_art_child
+from yt.frontends.art.io import _skip_record
+from yt.frontends.art.io import _read_record
+from yt.frontends.art.io import _read_record_size
+
 def num_deep_inc(f):
     def wrap(self, *args, **kwargs):
         self.num_deep += 1
@@ -146,58 +153,20 @@
         min_eff = 0.20
         
         f = open(self.pf.parameter_filename,'rb')
-        self.pf.nhydro_vars, self.pf.level_info, self.pf.level_offsetsa = \
+        
+        
+        (self.pf.nhydro_vars, self.pf.level_info,
+        self.pf.level_oct_offsets, 
+        self.pf.level_child_offsets) = \
                          _count_art_octs(f, 
                           self.pf.child_grid_offset,
                           self.pf.min_level, self.pf.max_level)
         self.pf.level_info[0]=self.pf.ncell
-        f.close()
-        self.pf.level_info = na.array(self.pf.level_info)
-        num_ogrids = sum(self.pf.level_info) + self.pf.iOctFree
-        print 'found %i oct grids'%num_ogrids
-        num_ogrids *=7
-        print 'instantiating... %i grids'%num_ogrids
-        ogrid_left_indices = na.zeros((num_ogrids,3), dtype='int64') - 999
-        ogrid_levels = na.zeros(num_ogrids, dtype='int64')
-        ogrid_file_locations = na.zeros((num_ogrids,6), dtype='int64')
-        
-        #don't need parents?
-        #ogrid_parents = na.zeros(num_ogrids, dtype="int64")
-        
-        #don't need masks?
-        #ochild_masks = na.zeros((num_ogrids, 8), dtype='int64').ravel()
-        
-        self.pf.level_offsets = amr_utils.read_art_tree(
-                                self.pf.parameter_filename, 
-                                self.pf.child_grid_offset,
-                                self.pf.min_level, self.pf.max_level,
-                                ogrid_left_indices, ogrid_levels,
-                                ogrid_file_locations)
-                                #ochild_masks,
-                                #ogrid_parents, 
-                                
+        self.pf.level_info = na.array(self.pf.level_info)        
+        self.pf.level_offsets = self.pf.level_child_offsets
         self.pf.level_offsets = na.array(self.pf.level_offsets, dtype='int64')
         self.pf.level_offsets[0] = self.pf.root_grid_offset
-        #ochild_masks.reshape((num_ogrids, 8), order="F")
-        ogrid_levels[ogrid_left_indices[:,0] == -999] = -1
-        # This bit of code comes from Chris, and I'm still not sure I have a
-        # handle on what it does.
-        final_indices =  ogrid_left_indices[na.where(ogrid_levels==self.pf.max_level)[0]]
-        divisible=[na.all((final_indices%2**(level))==0) 
-            for level in xrange(self.pf.max_level*2)]
-        root_level = self.pf.max_level+na.where(na.logical_not(divisible))[0][0] 
-        ogrid_dimension = na.zeros(final_indices.shape,dtype='int')+2
-        ogrid_left_indices = ogrid_left_indices/2**(root_level - ogrid_levels[:,None] - 1) - 1
-
-        # Now we can rescale
-        # root_psg = _ramses_reader.ProtoSubgrid(
-        #                 na.zeros(3, dtype='int64'), # left index of PSG
-        #                 self.pf.domain_dimensions, # dim of PSG
-        #                 na.zeros((1,3), dtype='int64'), # left edges of grids
-        #                 self.pf.domain_dimensions[None,:], # right edges of grids
-        #                 self.pf.domain_dimensions[None,:], # dims of grids
-        #                 na.zeros((1,6), dtype='int64') # empty
-        #                 )
+        
         root_psg = _ramses_reader.ProtoSubgrid(
                         na.zeros(3, dtype='int64'), # left index of PSG
                         self.pf.domain_dimensions, # dim of PSG
@@ -206,23 +175,21 @@
                         )
         
         self.proto_grids = [[root_psg],]
+
         for level in xrange(1, len(self.pf.level_info)):
             if self.pf.level_info[level] == 0:
                 self.proto_grids.append([])
                 continue
-            ggi = (ogrid_levels == level).ravel()
-            nd = self.pf.domain_dimensions * 2**level
-            dims = na.ones((ggi.sum(), 3), dtype='int64') * 2
-            fl = ogrid_file_locations[ggi,:]
             psgs = []
-            
             if level > 5: continue
             
+
+            effs,sizes = [], []
+            
+            #if level > 6: continue
+            
             #refers to the left index for the art octgrid
-            left_index = ogrid_left_indices[ggi,:]
-            
-            # We now calculate the hilbert curve position of every left_index,
-            # of the octs, with respect to a lower order hilbert curve.
+            left_index, fl, nocts = _read_art_level_info(f, self.pf.level_oct_offsets,level)
             left_index_gridpatch = left_index >> LEVEL_OF_EDGE
             order = max(level + 1 - LEVEL_OF_EDGE, 0)
             
@@ -297,7 +264,6 @@
                         eff_mean*100.0)
             mylog.info("%02.1f%% (%i/%i) of grids had minimum efficiency",
                         eff_nmin*100.0/eff_nall,eff_nmin,eff_nall)
-            mylog.info("Coalesced %s octree grids", ggi.sum())
             
         
             mylog.debug("Done with level % 2i", level)
@@ -569,90 +535,4 @@
     def _is_valid(self, *args, **kwargs):
         return False # We make no effort to auto-detect ART data
 
-def _skip_record(f):
-    s = struct.unpack('>i', f.read(struct.calcsize('>i')))
-    f.seek(s[0], 1)
-    s = struct.unpack('>i', f.read(struct.calcsize('>i')))
 
-def _read_record(f):
-    s = struct.unpack('>i', f.read(struct.calcsize('>i')))[0]
-    ss = f.read(s)
-    s = struct.unpack('>i', f.read(struct.calcsize('>i')))
-    return ss
-
-def _read_record_size(f):
-    pos = f.tell()
-    s = struct.unpack('>i', f.read(struct.calcsize('>i')))
-    f.seek(pos)
-    return s[0]
-
-def _count_art_octs(f, offset, 
-                   MinLev, MaxLevelNow):
-    level_offsets= []
-    f.seek(offset)
-    nchild,ntot=8,0
-    Level = na.zeros(MaxLevelNow+1 - MinLev, dtype='i')
-    iNOLL = na.zeros(MaxLevelNow+1 - MinLev, dtype='i')
-    iHOLL = na.zeros(MaxLevelNow+1 - MinLev, dtype='i')
-    for Lev in xrange(MinLev + 1, MaxLevelNow+1):
-        level_offsets.append(f.tell())
-        
-        #Get the info for this level, skip the rest
-        #print "Reading oct tree data for level", Lev
-        #print 'offset:',f.tell()
-        Level[Lev], iNOLL[Lev], iHOLL[Lev] = struct.unpack(
-           '>iii', _read_record(f))
-        #print 'Level %i : '%Lev, iNOLL
-        #print 'offset after level record:',f.tell()
-        iOct = iHOLL[Lev] - 1
-        nLevel = iNOLL[Lev]
-        nLevCells = nLevel * nchild
-        ntot = ntot + nLevel
-
-        #Skip all the oct hierarchy data
-        ns = _read_record_size(f)
-        size = struct.calcsize('>i') + ns + struct.calcsize('>i')
-        f.seek(f.tell()+size * nLevel)
-        
-        #Skip the child vars data
-        ns = _read_record_size(f)
-        size = struct.calcsize('>i') + ns + struct.calcsize('>i')
-        f.seek(f.tell()+size * nLevel*nchild)
-        
-        #find nhydrovars
-        nhydrovars = 8+2
-    f.seek(offset)
-    return nhydrovars, iNOLL, level_offsets
-
-def _read_art_level(f, level_offsets,level):
-    pos = f.tell()
-    f.seek(level_offsets[leve])
-    #Get the info for this level, skip the rest
-    #print "Reading oct tree data for level", Lev
-    #print 'offset:',f.tell()
-    Level[Lev], iNOLL[Lev], iHOLL[Lev] = struct.unpack(
-       '>iii', _read_record(f))
-    #print 'Level %i : '%Lev, iNOLL
-    #print 'offset after level record:',f.tell()
-    iOct = iHOLL[Lev] - 1
-    nLevel = iNOLL[Lev]
-    nLevCells = nLevel * nchild
-    ntot = ntot + nLevel
-
-    #Skip all the oct hierarchy data
-    #in the future, break this up into large chunks
-    count = nLevel*15
-    le  = numpy.zeros((count,3),dtype='int64')
-    fl  = numpy.zeros((count,6),dtype='int64')
-    idxa,idxb = 0,0
-    chunk = 1e9 #this is ~111MB for 15 dimensional 64 bit arrays
-    while left > 0 :
-        data = na.fromfile(f,dtype='>i',count=chunk*15)
-        data.reshape(chunk,15)
-        left = count-index
-        le[idxa:idxb,:] = data[0:3]
-        fl[idxa:idxb,1] = numpy.arange(chunk)
-    del data
-    f.seek(pos)
-    return le,fl
-


diff -r dd43dbad7df507dc7211d20a7b74a55e09469aaa -r a1e85e8f0816f1c795ff65f9cdc300aa896fc9f3 yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -111,4 +111,100 @@
         sl[axis] = slice(coord, coord + 1)
         return self._read_data_set(grid, field)[sl]
 
+def _count_art_octs(f, offset, 
+                   MinLev, MaxLevelNow):
+    level_oct_offsets= [0,]
+    level_child_offsets= [0,]
+    f.seek(offset)
+    nchild,ntot=8,0
+    Level = na.zeros(MaxLevelNow+1 - MinLev, dtype='i')
+    iNOLL = na.zeros(MaxLevelNow+1 - MinLev, dtype='i')
+    iHOLL = na.zeros(MaxLevelNow+1 - MinLev, dtype='i')
+    for Lev in xrange(MinLev + 1, MaxLevelNow+1):
+        level_oct_offsets.append(f.tell())
 
+        #Get the info for this level, skip the rest
+        #print "Reading oct tree data for level", Lev
+        #print 'offset:',f.tell()
+        Level[Lev], iNOLL[Lev], iHOLL[Lev] = struct.unpack(
+           '>iii', _read_record(f))
+        #print 'Level %i : '%Lev, iNOLL
+        #print 'offset after level record:',f.tell()
+        iOct = iHOLL[Lev] - 1
+        nLevel = iNOLL[Lev]
+        nLevCells = nLevel * nchild
+        ntot = ntot + nLevel
+
+        #Skip all the oct hierarchy data
+        ns = _read_record_size(f)
+        size = struct.calcsize('>i') + ns + struct.calcsize('>i')
+        f.seek(f.tell()+size * nLevel)
+
+        level_child_offsets.append(f.tell())
+        #Skip the child vars data
+        ns = _read_record_size(f)
+        size = struct.calcsize('>i') + ns + struct.calcsize('>i')
+        f.seek(f.tell()+size * nLevel*nchild)
+
+        #find nhydrovars
+        nhydrovars = 8+2
+    f.seek(offset)
+    return nhydrovars, iNOLL, level_oct_offsets, level_child_offsets
+
+def _read_art_level_info(f, level_oct_offsets,level,root_level=15):
+    pos = f.tell()
+    f.seek(level_oct_offsets[level])
+    #Get the info for this level, skip the rest
+    junk, nLevel, iOct = struct.unpack(
+       '>iii', _read_record(f))
+    iOct = iOct - 1
+
+    #Skip all the oct hierarchy data
+    #in the future, break this up into large chunks
+    le  = na.zeros((nLevel,3),dtype='int64')
+    fl  = na.zeros((nLevel,6),dtype='int64')
+    idxa,idxb = 0,0
+    chunk = long(1e6) #this is ~111MB for 15 dimensional 64 bit arrays
+    left = nLevel
+    while left > 0 :
+        this_chunk = min(chunk,left)
+        idxb=idxa+this_chunk
+        data = na.fromfile(f,dtype='>i',count=this_chunk*15)
+        data=data.reshape(this_chunk,15)
+        left-=this_chunk
+        le[idxa:idxb,:] = data[:,1:4]
+        fl[idxa:idxb,1] = na.arange(this_chunk)
+        idxa=idxb
+    del data
+    le = le/2**(root_level-1-level)-1
+    f.seek(pos)
+    return le,fl,nLevel
+
+nchem=8+2
+dtyp = na.dtype(">i4,>i8,>i8"+",>%sf4"%(nchem)+ \
+                ",>%sf4"%(2)+",>i4")
+def _read_art_child(f, level_child_offsets,level,nLevel,field):
+    pos=f.tell()
+    f.seek(level_child_offsets[level])
+    arr = na.fromfile(f, dtype='>f', count=nLevel * 8)
+    arr = arr.reshape((nLevel,16), order="F")
+    arr = arr[3:-1,:].astype("float64")
+    f.seek(pos)
+    return arr[field,:]
+
+def _skip_record(f):
+    s = struct.unpack('>i', f.read(struct.calcsize('>i')))
+    f.seek(s[0], 1)
+    s = struct.unpack('>i', f.read(struct.calcsize('>i')))
+
+def _read_record(f):
+    s = struct.unpack('>i', f.read(struct.calcsize('>i')))[0]
+    ss = f.read(s)
+    s = struct.unpack('>i', f.read(struct.calcsize('>i')))
+    return ss
+
+def _read_record_size(f):
+    pos = f.tell()
+    s = struct.unpack('>i', f.read(struct.calcsize('>i')))
+    f.seek(pos)
+    return s[0]



https://bitbucket.org/yt_analysis/yt/changeset/98534589dd52/
changeset:   98534589dd52
branch:      yt
user:        Christopher Moody
date:        2012-02-03 02:10:51
summary:     new io is less memory intensive. seems to work too.
affected #:  2 files

diff -r a1e85e8f0816f1c795ff65f9cdc300aa896fc9f3 -r 98534589dd52e22db4b8db270808b262502cffd5 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -167,6 +167,20 @@
         self.pf.level_offsets = na.array(self.pf.level_offsets, dtype='int64')
         self.pf.level_offsets[0] = self.pf.root_grid_offset
         
+        
+        #double check bvalues
+        # num_ogrids = 75019704L/8
+        # ogrid_left_indices = na.zeros((num_ogrids,3), dtype='int64') - 999
+        # ogrid_levels = na.zeros(num_ogrids, dtype='int64')
+        # ogrid_file_locations = na.zeros((num_ogrids,6), dtype='int64')
+        # self.pf.level_offsetso = amr_utils.read_art_tree(
+        #                         self.pf.parameter_filename, 
+        #                         self.pf.child_grid_offset,
+        #                         self.pf.min_level, self.pf.max_level,
+        #                         ogrid_left_indices, ogrid_levels,
+        #                         ogrid_file_locations)
+        # ogrid_left_indices = ogrid_left_indices/2**(15 - ogrid_levels[:,None] - 1) - 1                        
+        
         root_psg = _ramses_reader.ProtoSubgrid(
                         na.zeros(3, dtype='int64'), # left index of PSG
                         self.pf.domain_dimensions, # dim of PSG
@@ -189,14 +203,14 @@
             #if level > 6: continue
             
             #refers to the left index for the art octgrid
-            left_index, fl, nocts = _read_art_level_info(f, self.pf.level_oct_offsets,level)
-            left_index_gridpatch = left_index >> LEVEL_OF_EDGE
+            left_index, fl, iocts,  nocts = _read_art_level_info(f, self.pf.level_oct_offsets,level)
+            #left_index_gridpatch = left_index >> LEVEL_OF_EDGE
             order = max(level + 1 - LEVEL_OF_EDGE, 0)
             
             #compute the hilbert indices up to a certain level
             #the indices will associate an oct grid to the nearest
             #hilbert index?
-            hilbert_indices = _ramses_reader.get_hilbert_indices(order, left_index_gridpatch)
+            hilbert_indices = _ramses_reader.get_hilbert_indices(order, left_index)
             
             # Strictly speaking, we don't care about the index of any
             # individual oct at this point.  So we can then split them up.


diff -r a1e85e8f0816f1c795ff65f9cdc300aa896fc9f3 -r 98534589dd52e22db4b8db270808b262502cffd5 yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -157,12 +157,14 @@
     #Get the info for this level, skip the rest
     junk, nLevel, iOct = struct.unpack(
        '>iii', _read_record(f))
-    iOct = iOct - 1
+    
+    #fortran indices start at 1
 
     #Skip all the oct hierarchy data
     #in the future, break this up into large chunks
-    le  = na.zeros((nLevel,3),dtype='int64')
-    fl  = na.zeros((nLevel,6),dtype='int64')
+    le     = na.zeros((nLevel,3),dtype='int64')
+    fl     = na.ones((nLevel,6),dtype='int64')
+    iocts  = na.zeros(nLevel+1,dtype='int64')
     idxa,idxb = 0,0
     chunk = long(1e6) #this is ~111MB for 15 dimensional 64 bit arrays
     left = nLevel
@@ -173,12 +175,40 @@
         data=data.reshape(this_chunk,15)
         left-=this_chunk
         le[idxa:idxb,:] = data[:,1:4]
-        fl[idxa:idxb,1] = na.arange(this_chunk)
-        idxa=idxb
+        fl[idxa:idxb,1] = na.arange(idxa,idxb)
+        #pad byte is last, LL2, then ioct right before it
+        iocts[idxa:idxb] = data[:,-3] 
+        idxa=idxa+this_chunk
     del data
+    
+    #ioct always represents the index of the next variable
+    #not the current, so shift forward one index
+    #the last index isn't used
+    ioctso = iocts.copy()
+    iocts[1:]=iocts[:-1] #shift
+    iocts = iocts[:nLevel] #chop off the last index
+    iocts[0]=iOct #starting value
+
+    #now correct iocts for fortran indices start @ 1
+    iocts = iocts-1
+
+    assert na.unique(iocts).shape[0] == nLevel
+    
+    #ioct tries to access arrays much larger than le & fl
+    #just make sure they appear in the right order, skipping
+    #the empty space in between
+    idx = na.argsort(iocts)
+
+    #now rearrange le & fl in order of the ioct
+    le = le[idx]
+    fl = fl[idx]
+
+    #left edges are expressed as if they were on 
+    #level 15, so no matter what level max(le)=2**15 
+    #correct to the yt convention
     le = le/2**(root_level-1-level)-1
     f.seek(pos)
-    return le,fl,nLevel
+    return le,fl,iocts,nLevel
 
 nchem=8+2
 dtyp = na.dtype(">i4,>i8,>i8"+",>%sf4"%(nchem)+ \



https://bitbucket.org/yt_analysis/yt/changeset/26eb767bac7f/
changeset:   26eb767bac7f
branch:      yt
user:        Christopher Moody
date:        2012-02-05 22:20:55
summary:     bug fixes to io. added new parameters and units to data structures.
affected #:  4 files

diff -r 98534589dd52e22db4b8db270808b262502cffd5 -r 26eb767bac7f27a9ecbb91025bb527e97a64166c yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -195,9 +195,6 @@
                 self.proto_grids.append([])
                 continue
             psgs = []
-            if level > 5: continue
-            
-
             effs,sizes = [], []
             
             #if level > 6: continue
@@ -505,6 +502,10 @@
         self.parameters["Y_p"] = 0.245
         self.parameters["wmu"] = 4.0/(8.0-5.0*self.parameters["Y_p"])
         self.parameters["gamma"] = 5./3.
+        self.parameters["T_CMB0"] = 2.726  
+        self.parameters["T_min"] = 300.0 #T floor in K
+        self.parameters["boxh"] = self.header_vals['boxh']
+        self.parameters['ng'] = 128 # of 0 level cells in 1d 
         self.current_redshift = self.parameters["aexpn"]**-1.0 - 1.0
         self.data_comment = header_vals['jname']
         self.current_time = header_vals['t']
@@ -527,6 +528,38 @@
         for to_skip in ['tl','dtl','tlold','dtlold','iSO']:
             _skip_record(f)
 
+        
+        Om0 = self.parameters['Om0']
+        hubble = self.parameters['hubble']
+        dummy = 100.0 * hubble * na.sqrt(Om0)
+        ng = self.parameters['ng']
+
+        #distance unit #boxh is units of h^-1 Mpc
+        self.parameters["r0"] = self.parameters["boxh"] / self.parameters['ng']
+        r0 = self.parameters["r0"]
+        #time, yrs
+        self.parameters["t0"] = 2.0 / dummy * 3.0856e19 / 3.15e7
+        #velocity velocity units in km/s
+        self.parameters["v0"] = 50.0*self.parameters["r0"]*\
+                na.sqrt(self.parameters["Om0"])
+        #density = 3H0^2 * Om0 / (8*pi*G) - unit of density in Msun/Mpc^3
+        self.parameters["rho0"] = 2.776e11 * hubble**2.0 * Om0
+        rho0 = self.parameters["rho0"]
+        #Pressure = rho0 * v0**2 - unit of pressure in g/cm/s^2
+        self.parameters["P0"] = 4.697e-16 * Om0**2.0 * r0**2.0 * hubble**2.0
+        #T_0 = unit of temperature in K and in keV)
+        #T_0 = 2.61155 * r0**2 * wmu * Om0 ! [keV]
+        self.parameters["T_0"] = 3.03e5 * r0**2.0 * wmu * Om0 # [K]
+        #S_0 = unit of entropy in keV * cm^2
+        self.parameters["S_0"] = 52.077 * wmu53 * hubble**(-4.0/3.0)*Om0**(1.0/3.0)*r0**2.0
+        
+        #mass conversion (Mbox = rho0 * Lbox^3, Mbox_code = Ng^3
+        #     for non-cosmological run aM0 must be defined during initialization
+        #     [aM0] = [Msun]
+        self.parameters["aM0"] = rho0 * (boxh/hubble)**3.0 / ng**3.0
+        
+
+    
         (self.ncell,) = struct.unpack('>l', _read_record(f))
         # Try to figure out the root grid dimensions
         est = int(na.rint(self.ncell**(1.0/3.0)))


diff -r 98534589dd52e22db4b8db270808b262502cffd5 -r 26eb767bac7f27a9ecbb91025bb527e97a64166c yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -47,7 +47,7 @@
         self.level_offsets = level_offsets
         self.level_data = {}
 
-    def preload_level(self, level):
+    def preload_level(self, level,field=None):
         if level in self.level_data: return
         if level == 0:
             self.preload_root_level()
@@ -58,8 +58,12 @@
         nvals = ncells * (self.nhydro_vars + 6) # 2 vars, 2 pads
         arr = na.fromfile(f, dtype='>f', count=nvals)
         arr = arr.reshape((self.nhydro_vars+6, ncells), order="F")
-        arr = arr[3:-1,:].astype("float64")
-        self.level_data[level] = arr
+        arr = arr[3:-1,:]
+        if field==None:
+            self.level_data[level] = arr
+        else:
+            self.level_data[level] = arr[:field+1,:]
+        del arr
 
     def preload_root_level(self):
         f = open(self.filename, 'rb')
@@ -67,11 +71,11 @@
         ncells = self.level_info[0]
         #pdb.set_trace()
         nhvals = ncells * (self.nhydro_vars) # 0 vars, 0 pads
-        hvar = na.fromfile(f, dtype='>f', count=nhvals).astype("float64")
+        hvar = na.fromfile(f, dtype='>f', count=nhvals).astype("float32")
         hvar = hvar.reshape((self.nhydro_vars, ncells), order="F")
         na.fromfile(f,dtype='>i',count=2) #throw away the pads
         nvars = ncells * (2) # 0 vars, 0 pads
-        var = na.fromfile(f, dtype='>f', count=nvars).astype("float64")
+        var = na.fromfile(f, dtype='>f', count=nvars).astype("float32")
         var = var.reshape((2, ncells), order="F")
         arr = na.concatenate((hvar,var))
         self.level_data[0] = arr
@@ -87,15 +91,15 @@
             tr = self.level_data[0][field_id,:].reshape(
                     pf.domain_dimensions, order="F").copy()
             return tr.swapaxes(0, 2)
-        tr = na.zeros(grid.ActiveDimensions, dtype='float64')
-        filled = na.zeros(grid.ActiveDimensions, dtype='int32')
+        tr = na.zeros(grid.ActiveDimensions, dtype='float32')
+        filled = na.zeros(grid.ActiveDimensions, dtype='uint8')
         to_fill = grid.ActiveDimensions.prod()
         grids = [grid]
         l_delta = 0
         while to_fill > 0 and len(grids) > 0:
             next_grids = []
             for g in grids:
-                self.preload_level(g.Level)
+                self.preload_level(g.Level,field=field_id)
                 #print "Filling %s from %s (%s)" % (grid, g, g.Level)
                 to_fill -= au.read_art_grid(field_id, 
                         grid.get_global_startindex(), grid.ActiveDimensions,
@@ -210,6 +214,10 @@
     f.seek(pos)
     return le,fl,iocts,nLevel
 
+
+def read_in_particles(f):
+    
+
 nchem=8+2
 dtyp = na.dtype(">i4,>i8,>i8"+",>%sf4"%(nchem)+ \
                 ",>%sf4"%(2)+",>i4")


diff -r 98534589dd52e22db4b8db270808b262502cffd5 -r 26eb767bac7f27a9ecbb91025bb527e97a64166c yt/frontends/ramses/_ramses_reader.pyx
--- a/yt/frontends/ramses/_ramses_reader.pyx
+++ b/yt/frontends/ramses/_ramses_reader.pyx
@@ -1043,10 +1043,12 @@
     cdef np.ndarray[np.int64_t, ndim=1] dims_r, li_r
     cdef int tt, ax, fp, i, j, k, gi
     cdef int tr[3]
-    if num_deep > 300:
+    cdef long volume  =0
+    volume = dims[0]*dims[1]*dims[2]
+    if num_deep > 300 and volume < 452984832L:
         psg.efficiency = min_eff
         return [psg]
-    if psg.efficiency > min_eff or psg.efficiency < 0.0:
+    if (psg.efficiency > min_eff or psg.efficiency < 0.0) and (volume < 452984832L):
         return [psg]
     psg.find_split(tr)
     tt = tr[0]


diff -r 98534589dd52e22db4b8db270808b262502cffd5 -r 26eb767bac7f27a9ecbb91025bb527e97a64166c yt/utilities/_amr_utils/fortran_reader.pyx
--- a/yt/utilities/_amr_utils/fortran_reader.pyx
+++ b/yt/utilities/_amr_utils/fortran_reader.pyx
@@ -286,9 +286,9 @@
 def read_art_grid(int varindex, 
               np.ndarray[np.int64_t, ndim=1] start_index,
               np.ndarray[np.int32_t, ndim=1] grid_dims,
-              np.ndarray[np.float64_t, ndim=3] data,
-              np.ndarray[np.int32_t, ndim=3] filled,
-              np.ndarray[np.float64_t, ndim=2] level_data,
+              np.ndarray[np.float32_t, ndim=3] data,
+              np.ndarray[np.uint8_t, ndim=3] filled,
+              np.ndarray[np.float32_t, ndim=2] level_data,
               int level, int ref_factor,
               component_grid_info):
     cdef int gi, i, j, k, domain, offset, grid_id
@@ -347,6 +347,72 @@
                     to_fill += 1
     return to_fill
 
+
+ at cython.cdivision(True)
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+def read_art_grid_light(int varindex, 
+              np.ndarray[np.int64_t, ndim=1] start_index,
+              np.ndarray[np.int32_t, ndim=1] grid_dims,
+              np.ndarray[np.float64_t, ndim=3] data,
+              np.ndarray[np.float64_t, ndim=2] level_data,
+              int level, int ref_factor,
+              component_grid_info):
+    cdef int gi, i, j, k, domain, offset, grid_id
+    cdef int ir, jr, kr
+    cdef int offi, offj, offk, odind
+    cdef np.int64_t di, dj, dk
+    cdef np.ndarray[np.int64_t, ndim=1] ogrid_info
+    cdef np.ndarray[np.int64_t, ndim=1] og_start_index
+    cdef np.float64_t temp_data
+    cdef np.int64_t end_index[3]
+    cdef int kr_offset, jr_offset, ir_offset
+    cdef int to_fill = 0
+    # Note that indexing into a cell is:
+    #   (k*2 + j)*2 + i
+    for i in range(3):
+        end_index[i] = start_index[i] + grid_dims[i]
+    for gi in range(len(component_grid_info)):
+        ogrid_info = component_grid_info[gi]
+        domain = ogrid_info[0]
+        #print "Loading", domain, ogrid_info
+        grid_id = ogrid_info[1]
+        og_start_index = ogrid_info[3:]
+        for i in range(2*ref_factor):
+            di = i + og_start_index[0] * ref_factor
+            if di < start_index[0] or di >= end_index[0]: continue
+            ir = <int> (i / ref_factor)
+            for j in range(2 * ref_factor):
+                dj = j + og_start_index[1] * ref_factor
+                if dj < start_index[1] or dj >= end_index[1]: continue
+                jr = <int> (j / ref_factor)
+                for k in range(2 * ref_factor):
+                    dk = k + og_start_index[2] * ref_factor
+                    if dk < start_index[2] or dk >= end_index[2]: continue
+                    kr = <int> (k / ref_factor)
+                    offi = di - start_index[0]
+                    offj = dj - start_index[1]
+                    offk = dk - start_index[2]
+                    #print offi, filled.shape[0],
+                    #print offj, filled.shape[1],
+                    #print offk, filled.shape[2]
+                    if level > 0:
+                        odind = (kr*2 + jr)*2 + ir
+                        # Replace with an ART-specific reader
+                        #temp_data = local_hydro_data.m_var_array[
+                        #        level][8*offset + odind]
+                        temp_data = level_data[varindex, 8*grid_id + odind]
+                    else:
+                        kr_offset = kr + <int> (start_index[0] / ref_factor)
+                        jr_offset = jr + <int> (start_index[1] / ref_factor)
+                        ir_offset = ir + <int> (start_index[2] / ref_factor)
+                        odind = (kr_offset * grid_dims[0] + jr_offset)*grid_dims[1] + ir_offset
+                        temp_data = level_data[varindex, odind]
+                    data[offi, offj, offk] = temp_data
+                    to_fill += 1
+    return to_fill
+
+
 @cython.cdivision(True)
 @cython.boundscheck(False)
 @cython.wraparound(False)



https://bitbucket.org/yt_analysis/yt/changeset/d202aaa72971/
changeset:   d202aaa72971
branch:      yt
user:        Christopher Moody
date:        2012-02-06 01:14:29
summary:     including particle headers
affected #:  2 files

diff -r 26eb767bac7f27a9ecbb91025bb527e97a64166c -r d202aaa72971d2be442744a338d65d3dc7ce0677 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -61,6 +61,7 @@
 from yt.frontends.art.io import _skip_record
 from yt.frontends.art.io import _read_record
 from yt.frontends.art.io import _read_record_size
+from yt.frontends.art.io import _read_struct
 
 def num_deep_inc(f):
     def wrap(self, *args, **kwargs):
@@ -355,16 +356,47 @@
     _handle = None
     
     def __init__(self, filename, data_style='art',
-                 storage_filename = None):
+                 storage_filename = None, 
+                 file_particle_header=None, 
+                 file_particle_data=None,
+                 file_star_data=None):
         import yt.frontends.ramses._ramses_reader as _ramses_reader
+        
+        
+        dirn = os.path.dirname(filename)
+        base = os.path.basename(filename)
+        aexp = base.split('_')[2].replace('.d','')
+        
+        self.file_particle_header = file_particle_header
+        self.file_particle_data = file_particle_data
+        self.file_star_data = file_star_data
+        
+        if file_particle_header is None:
+            loc = filename.replace(base,'PMcrd%s.DAT'%aexp)
+            if os.path.exists(loc):
+                self.file_particle_header = loc
+                mylog.info("Discovered particle header: %s",os.path.basename(loc))
+        if file_particle_data is None:
+            loc = filename.replace(base,'PMcrs0%s.DAT'%aexp)
+            if os.path.exists(loc):
+                self.file_particle_data = loc
+                mylog.info("Discovered particle data:   %s",os.path.basename(loc))
+        if file_star_data is None:
+            loc = filename.replace(base,'stars_%s.dat'%aexp)
+            if os.path.exists(loc):
+                self.file_star_data = loc
+                mylog.info("Discovered stellar data:    %s",os.path.basename(loc))
+        
+        import pdb; pdb.set_trace()
         StaticOutput.__init__(self, filename, data_style)
-        self.storage_filename = storage_filename
         
         self.dimensionality = 3
         self.refine_by = 2
         self.parameters["HydroMethod"] = 'art'
         self.parameters["Time"] = 1. # default unit is 1...
         self.parameters["InitialTime"]=self.current_time
+        self.storage_filename = storage_filename
+        
         
     def __repr__(self):
         return self.basename.rsplit(".", 1)[0]
@@ -435,7 +467,7 @@
         #we were already in seconds, go back in to code units
         self.current_time /= self.t0 
         
-        
+    
     def _parse_parameter_file(self):
         # We set our domain to run from 0 .. 1 since we are otherwise
         # unconstrained.
@@ -504,7 +536,7 @@
         self.parameters["gamma"] = 5./3.
         self.parameters["T_CMB0"] = 2.726  
         self.parameters["T_min"] = 300.0 #T floor in K
-        self.parameters["boxh"] = self.header_vals['boxh']
+        self.parameters["boxh"] = header_vals['boxh']
         self.parameters['ng'] = 128 # of 0 level cells in 1d 
         self.current_redshift = self.parameters["aexpn"]**-1.0 - 1.0
         self.data_comment = header_vals['jname']
@@ -533,7 +565,9 @@
         hubble = self.parameters['hubble']
         dummy = 100.0 * hubble * na.sqrt(Om0)
         ng = self.parameters['ng']
-
+        wmu = self.parameters["wmu"]
+        boxh = header_vals['boxh'] 
+        
         #distance unit #boxh is units of h^-1 Mpc
         self.parameters["r0"] = self.parameters["boxh"] / self.parameters['ng']
         r0 = self.parameters["r0"]
@@ -551,7 +585,7 @@
         #T_0 = 2.61155 * r0**2 * wmu * Om0 ! [keV]
         self.parameters["T_0"] = 3.03e5 * r0**2.0 * wmu * Om0 # [K]
         #S_0 = unit of entropy in keV * cm^2
-        self.parameters["S_0"] = 52.077 * wmu53 * hubble**(-4.0/3.0)*Om0**(1.0/3.0)*r0**2.0
+        self.parameters["S_0"] = 52.077 * wmu**(5.0/3.0) * hubble**(-4.0/3.0)*Om0**(1.0/3.0)*r0**2.0
         
         #mass conversion (Mbox = rho0 * Lbox^3, Mbox_code = Ng^3
         #     for non-cosmological run aM0 must be defined during initialization
@@ -577,6 +611,63 @@
         self.child_grid_offset = f.tell()
 
         f.close()
+        
+        if self.file_particle_header is not None:
+            self._read_particle_header(self.file_particle_header)
+        
+    def _read_particle_header(self,fn):    
+        """ Reads control information, various parameters from the 
+            particle data set. Adapted from Daniel Ceverino's 
+            Read_Particles_Binary in analysis_ART.F   
+        """ 
+        header_struct = [
+            ('>i','pad'),
+            ('45s','header'), 
+            ('>f','aexpn'),
+            ('>f','aexp0'),
+            ('>f','amplt'),
+            ('>f','astep'),
+
+            ('>i','istep'),
+            ('>f','partw'),
+            ('>f','tintg'),
+
+            ('>f','Ekin'),
+            ('>f','Ekin1'),
+            ('>f','Ekin2'),
+            ('>f','au0'),
+            ('>f','aeu0'),
+
+
+            ('>i','Nrow'),
+            ('>i','Ngridc'),
+            ('>i','Nspecies'),
+            ('>i','Nseed'),
+
+            ('>f','Om0'),
+            ('>f','Oml0'),
+            ('>f','hubble'),
+            ('>f','Wp5'),
+            ('>f','Ocurv'),
+            ('>f','Omb0'),
+            ('>%ds'%(396),'extras'),
+            ('>f','unknown'),
+
+            ('>i','pad')]
+        fh = open(fn,'rb')
+        vals = _read_struct(fh,header_struct)
+        
+        for k,v in vals.iteritems():
+            self.parameters[k]=v
+        
+        seek_extras = 137
+        fh.seek(seek_extras)
+        n = self.parameters['Nspecies']
+        self.parameters['wspecies'] = na.fromfile(fh,dtype='>f',count=10)
+        self.parameters['lspecies'] = na.fromfile(fh,dtype='>i',count=10)
+        self.parameters['wspecies'] = self.parameters['wspecies'][:n]
+        self.parameters['lspecies'] = self.parameters['lspecies'][:n]
+        fh.close()
 
     @classmethod
     def _is_valid(self, *args, **kwargs):


diff -r 26eb767bac7f27a9ecbb91025bb527e97a64166c -r d202aaa72971d2be442744a338d65d3dc7ce0677 yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -216,7 +216,10 @@
 
 
 def read_in_particles(f):
-    
+    pass
+
+def read_in_stars():
+    pass
 
 nchem=8+2
 dtyp = na.dtype(">i4,>i8,>i8"+",>%sf4"%(nchem)+ \
@@ -246,3 +249,12 @@
     s = struct.unpack('>i', f.read(struct.calcsize('>i')))
     f.seek(pos)
     return s[0]
+
+def _read_struct(f,structure,verbose=False):
+    vals = {}
+    for format,name in structure:
+        size = struct.calcsize(format)
+        (val,) = struct.unpack(format,f.read(size))
+        vals[name] = val
+        if verbose: print "%s:\t%s\t (%d B)" %(name,val,f.tell())
+    return vals



https://bitbucket.org/yt_analysis/yt/changeset/0fcc360c4d58/
changeset:   0fcc360c4d58
branch:      yt
user:        Christopher Moody
date:        2012-02-06 04:41:46
summary:     first pass at particle inclusion. change recursiv splitting to start from the middle dimension and work out.
affected #:  3 files

diff -r d202aaa72971d2be442744a338d65d3dc7ce0677 -r 0fcc360c4d58c0ec0cfd4daee09e007590795c8d yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -62,6 +62,7 @@
 from yt.frontends.art.io import _read_record
 from yt.frontends.art.io import _read_record_size
 from yt.frontends.art.io import _read_struct
+from yt.frontends.art.io import b2t
 
 def num_deep_inc(f):
     def wrap(self, *args, **kwargs):
@@ -260,7 +261,7 @@
                 #if less than 10% the volume of a patch is covered with octs
                 psg_split = _ramses_reader.recursive_patch_splitting(
                     psg, idims, initial_left, 
-                    dleft_index, dfl,min_eff=min_eff)
+                    dleft_index, dfl,min_eff=min_eff,use_center=True)
                     
                 psgs.extend(psg_split)
                 
@@ -318,7 +319,61 @@
                 grids.append(self.grid(gi, self, level, fl, props[0,:]))
                 gi += 1
         self.grids = na.empty(len(grids), dtype='object')
-        for gi, g in enumerate(grids): self.grids[gi] = g
+        
+        if self.file_particle_data:
+            lspecies = self.pf.parameters['lspecies']
+            Nrow     = self.pf.parameters['Nrow']
+            nstars = lspecies[-1]
+            a = self.pf.parameters['aexpn']
+            hubble = self.pf.parameters['hubble']
+            ud  = self.pf.parameters['r0']*a/hubble #proper Mpc units
+            uv  = self.pf.parameters['v0']/(a**1.0)*1.0e5 #proper cm/s
+            um  = self.pf.parameters['aM0'] #mass units in solar masses
+            um *= 1.989e33 #convert solar masses to grams 
+            pbar = get_pbar("Loading Particles ",len(g))
+            x,y,z,vx,vy,vz = io._read_in_particles(self.file_particle_data,
+                                                   nstars,Nrow)
+            self.particle_position_x = x*ud
+            self.particle_position_y = y*ud
+            self.particle_position_z = z*ud
+            self.particle_velocity_x = vx*uv
+            self.particle_velocity_y = vy*uv
+            self.particle_velocity_z = vz*uv
+            self.particle_species    = numpy.zeros(x.shape,dtype='int32')
+            self.particle_mass       = numpy.zeros(x.shape,dtype='float32')
+            
+            import pdb; pdb.set_trace()
+            a,b=0,0
+            for b,m in zip(self.pf.lspecies,pf.wspecies):
+                self.particle_species[a:b] = b
+                self.particle_mass[a:b]    = m*um
+                a=b
+            pbar.finish()
+            
+            
+            if self.file_star_data:
+                pbar = get_pbar("Loading Stars ",len(g))
+                data = io._read_in_stars(self.file_particle_data,nstars,nrow) 
+                tdum, adum, nstars, ws_old, ws_oldi, mass, initial_mass,\
+                    tbirth, metals1,metals2 = io._read_in_stars(self.file_star_data)
+                self.particle_star_ages = b2t(tbirth)
+                self.particle_star_metallicity1 = metals1*um
+                self.particle_star_metallicity2 = metals2*um
+                self.particle_star_mass_initial = initial_mass*um
+                self.particle_star_mass_current = mass*um
+                pbar.finish()
+            
+            pbar = get_pbar("Gridding  Particles ",len(g))
+            for gi, g in enumerate(grids): 
+                idx = self.particle_position_x                    
+                self.grids[gi] = g
+                pbar.update(gi)
+            pbar.finish()
+            
+        else:
+            for gi, g in enumerate(grids): 
+                self.grids[gi] = g
+            
 
     def _get_grid_parents(self, grid, LE, RE):
         mask = na.zeros(self.num_grids, dtype='bool')
@@ -387,7 +442,6 @@
                 self.file_star_data = loc
                 mylog.info("Discovered stellar data:    %s",os.path.basename(loc))
         
-        import pdb; pdb.set_trace()
         StaticOutput.__init__(self, filename, data_style)
         
         self.dimensionality = 3
@@ -668,6 +722,11 @@
         self.parameters['wspecies'] = self.parameters['wspecies'][:n]
         self.parameters['lspecies'] = self.parameters['lspecies'][:n]
         fh.close()
+        
+        ls_nonzero = [ls for ls in self.parameters['lspecies'] if ls>0 ]
+        mylog.debug("Discovered %i species of particles",len(ls_nonzero))
+        mylog.debug("Particle populations: "+'%1.1e '*len(ls_nonzero),ls_nonzero)
+        
 
     @classmethod
     def _is_valid(self, *args, **kwargs):


diff -r d202aaa72971d2be442744a338d65d3dc7ce0677 -r 0fcc360c4d58c0ec0cfd4daee09e007590795c8d yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -218,8 +218,17 @@
 def read_in_particles(f):
     pass
 
-def read_in_stars():
-    pass
+def read_in_stars(file,nstars,Nrow):
+    words = 6 # words (reals) per particle: x,y,z,vx,vy,vz
+    real_size = 4 # for file_particle_data; not always true?
+    np = nstars # number of particles including stars, should come from lspecies[-1]
+    np_per_page = Nrow**2 # defined in ART a_setup.h
+    num_pages = os.path.getsize(file)/(real_size*words*np_per_page)
+
+    f = na.fromfile(file, dtype='>f4') # direct access
+    pages = na.vsplit(na.reshape(f, (num_pages, words, np_per_page)), num_pages)
+    x,y,z,vx,vy,vz = tuple(na.squeeze(na.dstack(pages))) # x,y,z,vx,vy,vz
+    return x,y,z,vx,vy,vz
 
 nchem=8+2
 dtyp = na.dtype(">i4,>i8,>i8"+",>%sf4"%(nchem)+ \
@@ -258,3 +267,71 @@
         vals[name] = val
         if verbose: print "%s:\t%s\t (%d B)" %(name,val,f.tell())
     return vals
+
+    sqrt = numpy.sqrt
+    sign = numpy.sign
+
+
+#All of these functions are to convert from hydro time var to 
+#proper time
+
+def find_root(f,a,b,tol=1e-6):
+    c = (a+b)/2.0
+    last = -numpy.inf
+    assert(sign(f(a)) != sign(f(b)))  
+    while numpy.abs(f(c)-last) > tol:
+        last=f(c)
+        if sign(last)==sign(f(b)):
+            b=c
+        else:
+            a=c
+        c = (a+b)/2.0
+    return c
+
+def quad(fintegrand,xmin,xmax,n=1e4):
+    spacings = numpy.logspace(numpy.log10(xmin),numpy.log10(xmax),n)
+    integrand_arr = fintegrand(spacings)
+    val = numpy.trapz(integrand_arr,dx=numpy.diff(spacings))
+    return val
+
+def a2b(at,Om0=0.27,Oml0=0.73,h=0.700):
+    def f_a2b(x):
+        val = 0.5*sqrt(Om0) / x**3.0
+        val /= sqrt(Om0/x**3.0 +Oml0 +(1.0 - Om0-Oml0)/x**2.0)
+        return val
+    #val, err = si.quad(f_a2b,1,at)
+    val = quad(f_a2b,1,at)
+    return val
+
+def b2a(bt,**kwargs):
+    #converts code time into expansion factor 
+    #if Om0 ==1and OmL == 0 then b2a is (1 / (1-td))**2
+    #if bt < -190.0 or bt > -.10:  raise 'bt outside of range'
+    f_b2a = lambda at: a2b(at,**kwargs)-bt
+    return find_root(f_b2a,1e-4,1.1)
+    #return so.brenth(f_b2a,1e-4,1.1)
+    #return brent.brent(f_b2a)
+
+def a2t(at,Om0=0.27,Oml0=0.73,h=0.700):
+    integrand = lambda x : 1./(x*sqrt(Oml0+Om0*x**-3.0))
+    #current_time,err = si.quad(integrand,0.0,at,epsabs=1e-6,epsrel=1e-6)
+    current_time = quad(integrand,1e-4,at)
+    #spacings = numpy.logspace(-5,numpy.log10(at),1e5)
+    #integrand_arr = integrand(spacings)
+    #current_time = numpy.trapz(integrand_arr,dx=numpy.diff(spacings))
+    current_time *= 9.779/h
+    return current_time
+
+def b2t(tb,n = 1e2,**kwargs):
+    tb = numpy.array(tb)
+    if tb.shape == (): return a2t(b2a(tb))
+    age_min = a2t(b2a(tb.max(),**kwargs),**kwargs)
+    age_max = a2t(b2a(tb.min(),**kwargs),**kwargs)
+    tbs  = -1.*numpy.logspace(numpy.log10(-tb.min()),
+                          numpy.log10(-tb.max()),n)
+    ages = [a2t(b2a(tbi)) for tbi in tbs]
+    ages = numpy.array(ages)
+    fb2t = numpy.interp(tb,tbs,ages)
+    #fb2t = interp1d(tbs,ages)
+    return fb2t
+


diff -r d202aaa72971d2be442744a338d65d3dc7ce0677 -r 0fcc360c4d58c0ec0cfd4daee09e007590795c8d yt/frontends/ramses/_ramses_reader.pyx
--- a/yt/frontends/ramses/_ramses_reader.pyx
+++ b/yt/frontends/ramses/_ramses_reader.pyx
@@ -829,7 +829,7 @@
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
-    cdef void find_split(self, int *tr):
+    cdef void find_split(self, int *tr,):
         # First look for zeros
         cdef int i, center, ax
         cdef np.ndarray[ndim=1, dtype=np.int64_t] axes
@@ -837,9 +837,9 @@
         axes = np.argsort(self.dd)[::-1]
         cdef np.int64_t *sig
         for axi in range(3):
-            ax = axes[axi]
-            center = self.dimensions[ax] / 2
-            sig = self.sigs[ax]
+            ax = axes[axi] #iterate over domain dimensions
+            center = self.dimensions[ax] / 2 
+            sig = self.sigs[ax] #an array for the dimension, number of cells along that dim
             for i in range(self.dimensions[ax]):
                 if sig[i] == 0 and i > 0 and i < self.dimensions[ax] - 1:
                     #print "zero: %s (%s)" % (i, self.dimensions[ax])
@@ -871,6 +871,61 @@
         tr[0] = 1; tr[1] = ax; tr[2] = zcp
         return
 
+    @cython.wraparound(False)
+    cdef void find_split_center(self, int *tr,):
+        # First look for zeros
+        cdef int i, center, ax
+        cdef int flip
+        cdef np.ndarray[ndim=1, dtype=np.int64_t] axes
+        cdef np.int64_t strength, zcstrength, zcp
+        axes = np.argsort(self.dd)[::-1]
+        cdef np.int64_t *sig
+        for axi in range(3):
+            ax = axes[axi] #iterate over domain dimensions
+            center = self.dimensions[ax] / 2 
+            sig = self.sigs[ax] #an array for the dimension, number of cells along that dim
+            #frequently get stuck with many zeroes near the edge of the grid
+            #let's start from the middle, working out
+            for j in range(self.dimensions[ax]/2):
+                flip = 1
+                i = self.dimensions[ax]/2+j
+                if sig[i] == 0 and i > 0 and i < self.dimensions[ax] - 1:
+                    #print "zero: %s (%s)" % (i, self.dimensions[ax])
+                    tr[0] = 0; tr[1] = ax; tr[2] = i
+                    return
+                i = self.dimensions[ax]/2-j
+                if sig[i] == 0 and i > 0 and i < self.dimensions[ax] - 1:
+                    #print "zero: %s (%s)" % (i, self.dimensions[ax])
+                    tr[0] = 0; tr[1] = ax; tr[2] = i
+                    return
+                    
+                
+        zcstrength = 0
+        zcp = 0
+        zca = -1
+        cdef int temp
+        cdef np.int64_t *sig2d
+        for axi in range(3):
+            ax = axes[axi]
+            sig = self.sigs[ax]
+            sig2d = <np.int64_t *> malloc(sizeof(np.int64_t) * self.dimensions[ax])
+            sig2d[0] = sig2d[self.dimensions[ax]-1] = 0
+            for i in range(1, self.dimensions[ax] - 1):
+                sig2d[i] = sig[i-1] - 2*sig[i] + sig[i+1]
+            for i in range(1, self.dimensions[ax] - 1):
+                if sig2d[i] * sig2d[i+1] <= 0:
+                    strength = labs(sig2d[i] - sig2d[i+1])
+                    if (strength > zcstrength) or \
+                       (strength == zcstrength and (abs(center - i) <
+                                                    abs(center - zcp))):
+                        zcstrength = strength
+                        zcp = i
+                        zca = ax
+            free(sig2d)
+        #print "zcp: %s (%s)" % (zcp, self.dimensions[ax])
+        tr[0] = 1; tr[1] = ax; tr[2] = zcp
+        return
+
     @cython.boundscheck(False)
     @cython.wraparound(False)
     def get_properties(self):
@@ -1037,7 +1092,8 @@
         np.ndarray[np.int64_t, ndim=2] left_index,
         np.ndarray[np.int64_t, ndim=2] fl,
         int num_deep = 0,
-        float min_eff = 0.1):
+        float min_eff = 0.1,
+        bool use_center=False):
     cdef ProtoSubgrid L, R
     cdef np.ndarray[np.int64_t, ndim=1] dims_l, li_l
     cdef np.ndarray[np.int64_t, ndim=1] dims_r, li_r
@@ -1050,7 +1106,11 @@
         return [psg]
     if (psg.efficiency > min_eff or psg.efficiency < 0.0) and (volume < 452984832L):
         return [psg]
-    psg.find_split(tr)
+    if not use_center:    
+        psg.find_split(tr) #default
+    else:
+        psg.find_split_center(tr)    
+        
     tt = tr[0]
     ax = tr[1]
     fp = tr[2]
@@ -1075,7 +1135,7 @@
     if L.efficiency <= 0.0: rv_l = []
     elif L.efficiency < min_eff:
         rv_l = recursive_patch_splitting(L, dims_l, li_l,
-                left_index, fl, num_deep + 1, min_eff)
+                left_index, fl, num_deep + 1, min_eff,use_center)
     else:
         rv_l = [L]
     R = ProtoSubgrid(li_r, dims_r, left_index, fl)
@@ -1083,7 +1143,7 @@
     if R.efficiency <= 0.0: rv_r = []
     elif R.efficiency < min_eff:
         rv_r = recursive_patch_splitting(R, dims_r, li_r,
-                left_index, fl, num_deep + 1, min_eff)
+                left_index, fl, num_deep + 1, min_eff,use_center)
     else:
         rv_r = [R]
     return rv_r + rv_l



https://bitbucket.org/yt_analysis/yt/changeset/e3a1f76ae1c5/
changeset:   e3a1f76ae1c5
branch:      yt
user:        Christopher Moody
date:        2012-02-06 05:54:06
summary:     forced 32 bit numpy arrays for IO-cython compatibility
affected #:  4 files

diff -r 0fcc360c4d58c0ec0cfd4daee09e007590795c8d -r e3a1f76ae1c5fc12a2510513d36fac5da2334bdc yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -169,7 +169,6 @@
         self.pf.level_offsets = na.array(self.pf.level_offsets, dtype='int64')
         self.pf.level_offsets[0] = self.pf.root_grid_offset
         
-        
         #double check bvalues
         # num_ogrids = 75019704L/8
         # ogrid_left_indices = na.zeros((num_ogrids,3), dtype='int64') - 999
@@ -199,7 +198,7 @@
             psgs = []
             effs,sizes = [], []
             
-            #if level > 6: continue
+            if level > 5: continue
             
             #refers to the left index for the art octgrid
             left_index, fl, iocts,  nocts = _read_art_level_info(f, self.pf.level_oct_offsets,level)
@@ -261,7 +260,7 @@
                 #if less than 10% the volume of a patch is covered with octs
                 psg_split = _ramses_reader.recursive_patch_splitting(
                     psg, idims, initial_left, 
-                    dleft_index, dfl,min_eff=min_eff,use_center=True)
+                    dleft_index, dfl,min_eff=min_eff,use_center=False)
                     
                 psgs.extend(psg_split)
                 
@@ -320,7 +319,9 @@
                 gi += 1
         self.grids = na.empty(len(grids), dtype='object')
         
-        if self.file_particle_data:
+
+        if self.pf.file_particle_data:
+            import pdb; pdb.set_trace()
             lspecies = self.pf.parameters['lspecies']
             Nrow     = self.pf.parameters['Nrow']
             nstars = lspecies[-1]
@@ -333,16 +334,17 @@
             pbar = get_pbar("Loading Particles ",len(g))
             x,y,z,vx,vy,vz = io._read_in_particles(self.file_particle_data,
                                                    nstars,Nrow)
-            self.particle_position_x = x*ud
-            self.particle_position_y = y*ud
-            self.particle_position_z = z*ud
-            self.particle_velocity_x = vx*uv
-            self.particle_velocity_y = vy*uv
-            self.particle_velocity_z = vz*uv
+                                                   
+            lempc = le*pf['Mpc']/root_cells #left edge in kpc
+            rempc = re*pf['Mpc']/root_cells #right edge in kpc
+            wdmpc = rempc-lempc #width in mpc
+                                        
+            self.particle_position   = na.array([(x*ud-lempc)/wdmpc,
+                (y*ud-lempc)/wdmpc,(z*ud-lempc)/wdmpc]).T
+            self.particle_velocity   = na.array([vx*uv,vy*uv,vz*uv]).T
             self.particle_species    = numpy.zeros(x.shape,dtype='int32')
             self.particle_mass       = numpy.zeros(x.shape,dtype='float32')
             
-            import pdb; pdb.set_trace()
             a,b=0,0
             for b,m in zip(self.pf.lspecies,pf.wspecies):
                 self.particle_species[a:b] = b
@@ -363,16 +365,21 @@
                 self.particle_star_mass_current = mass*um
                 pbar.finish()
             
-            pbar = get_pbar("Gridding  Particles ",len(g))
+            pbar = get_pbar("Gridding  Particles ",len(grids))
             for gi, g in enumerate(grids): 
-                idx = self.particle_position_x                    
+                le,re = g.grid_left_edge, g.grid_right_edge
+                idx = na.logical_and(na.all(le < self.particle_position,axis=1),
+                                     na.all(re > self.particle_position,axis=1))
+                g.particle_indices = idx
                 self.grids[gi] = g
                 pbar.update(gi)
             pbar.finish()
             
         else:
+            pbar = get_pbar("Finalizing grids ",len(grids))
             for gi, g in enumerate(grids): 
                 self.grids[gi] = g
+            pbar.finish()
             
 
     def _get_grid_parents(self, grid, LE, RE):
@@ -414,7 +421,8 @@
                  storage_filename = None, 
                  file_particle_header=None, 
                  file_particle_data=None,
-                 file_star_data=None):
+                 file_star_data=None,
+                 discover_particles=False):
         import yt.frontends.ramses._ramses_reader as _ramses_reader
         
         
@@ -426,21 +434,22 @@
         self.file_particle_data = file_particle_data
         self.file_star_data = file_star_data
         
-        if file_particle_header is None:
-            loc = filename.replace(base,'PMcrd%s.DAT'%aexp)
-            if os.path.exists(loc):
-                self.file_particle_header = loc
-                mylog.info("Discovered particle header: %s",os.path.basename(loc))
-        if file_particle_data is None:
-            loc = filename.replace(base,'PMcrs0%s.DAT'%aexp)
-            if os.path.exists(loc):
-                self.file_particle_data = loc
-                mylog.info("Discovered particle data:   %s",os.path.basename(loc))
-        if file_star_data is None:
-            loc = filename.replace(base,'stars_%s.dat'%aexp)
-            if os.path.exists(loc):
-                self.file_star_data = loc
-                mylog.info("Discovered stellar data:    %s",os.path.basename(loc))
+        if discover_particles:
+            if file_particle_header is None:
+                loc = filename.replace(base,'PMcrd%s.DAT'%aexp)
+                if os.path.exists(loc):
+                    self.file_particle_header = loc
+                    mylog.info("Discovered particle header: %s",os.path.basename(loc))
+            if file_particle_data is None:
+                loc = filename.replace(base,'PMcrs0%s.DAT'%aexp)
+                if os.path.exists(loc):
+                    self.file_particle_data = loc
+                    mylog.info("Discovered particle data:   %s",os.path.basename(loc))
+            if file_star_data is None:
+                loc = filename.replace(base,'stars_%s.dat'%aexp)
+                if os.path.exists(loc):
+                    self.file_star_data = loc
+                    mylog.info("Discovered stellar data:    %s",os.path.basename(loc))
         
         StaticOutput.__init__(self, filename, data_style)
         


diff -r 0fcc360c4d58c0ec0cfd4daee09e007590795c8d -r e3a1f76ae1c5fc12a2510513d36fac5da2334bdc yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -60,9 +60,9 @@
         arr = arr.reshape((self.nhydro_vars+6, ncells), order="F")
         arr = arr[3:-1,:]
         if field==None:
-            self.level_data[level] = arr
+            self.level_data[level] = arr.astype('float32')
         else:
-            self.level_data[level] = arr[:field+1,:]
+            self.level_data[level] = arr[:field+1,:].astype('float32')
         del arr
 
     def preload_root_level(self):


diff -r 0fcc360c4d58c0ec0cfd4daee09e007590795c8d -r e3a1f76ae1c5fc12a2510513d36fac5da2334bdc yt/frontends/ramses/_ramses_reader.pyx
--- a/yt/frontends/ramses/_ramses_reader.pyx
+++ b/yt/frontends/ramses/_ramses_reader.pyx
@@ -1093,7 +1093,7 @@
         np.ndarray[np.int64_t, ndim=2] fl,
         int num_deep = 0,
         float min_eff = 0.1,
-        bool use_center=False):
+        int use_center=0):
     cdef ProtoSubgrid L, R
     cdef np.ndarray[np.int64_t, ndim=1] dims_l, li_l
     cdef np.ndarray[np.int64_t, ndim=1] dims_r, li_r


diff -r 0fcc360c4d58c0ec0cfd4daee09e007590795c8d -r e3a1f76ae1c5fc12a2510513d36fac5da2334bdc yt/utilities/_amr_utils/fortran_reader.pyx
--- a/yt/utilities/_amr_utils/fortran_reader.pyx
+++ b/yt/utilities/_amr_utils/fortran_reader.pyx
@@ -310,7 +310,7 @@
         domain = ogrid_info[0]
         #print "Loading", domain, ogrid_info
         grid_id = ogrid_info[1]
-        og_start_index = ogrid_info[3:]
+        og_start_index = ogrid_info[3:6]
         for i in range(2*ref_factor):
             di = i + og_start_index[0] * ref_factor
             if di < start_index[0] or di >= end_index[0]: continue



https://bitbucket.org/yt_analysis/yt/changeset/b33d607ce6ec/
changeset:   b33d607ce6ec
branch:      yt
user:        Christopher Moody
date:        2012-02-06 08:08:29
summary:     particles read in and indices gridded. fields not setup yet.
affected #:  2 files

diff -r e3a1f76ae1c5fc12a2510513d36fac5da2334bdc -r b33d607ce6eca9f50815b4a1e157549b466a6c63 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -55,6 +55,8 @@
 from yt.utilities.physical_constants import \
     mass_hydrogen_cgs
 
+from yt.frontends.art.io import read_particles
+from yt.frontends.art.io import read_stars
 from yt.frontends.art.io import _count_art_octs
 from yt.frontends.art.io import _read_art_level_info
 from yt.frontends.art.io import _read_art_child
@@ -321,8 +323,8 @@
         
 
         if self.pf.file_particle_data:
-            import pdb; pdb.set_trace()
             lspecies = self.pf.parameters['lspecies']
+            wspecies = self.pf.parameters['wspecies']
             Nrow     = self.pf.parameters['Nrow']
             nstars = lspecies[-1]
             a = self.pf.parameters['aexpn']
@@ -331,45 +333,46 @@
             uv  = self.pf.parameters['v0']/(a**1.0)*1.0e5 #proper cm/s
             um  = self.pf.parameters['aM0'] #mass units in solar masses
             um *= 1.989e33 #convert solar masses to grams 
-            pbar = get_pbar("Loading Particles ",len(g))
-            x,y,z,vx,vy,vz = io._read_in_particles(self.file_particle_data,
-                                                   nstars,Nrow)
-                                                   
-            lempc = le*pf['Mpc']/root_cells #left edge in kpc
-            rempc = re*pf['Mpc']/root_cells #right edge in kpc
-            wdmpc = rempc-lempc #width in mpc
-                                        
-            self.particle_position   = na.array([(x*ud-lempc)/wdmpc,
-                (y*ud-lempc)/wdmpc,(z*ud-lempc)/wdmpc]).T
-            self.particle_velocity   = na.array([vx*uv,vy*uv,vz*uv]).T
-            self.particle_species    = numpy.zeros(x.shape,dtype='int32')
-            self.particle_mass       = numpy.zeros(x.shape,dtype='float32')
+            pbar = get_pbar("Loading Particles ",5)
+            self.pf.particle_position,self.pf.particle_velocity = \
+                read_particles(self.pf.file_particle_data,nstars,Nrow)
+            pbar.update(1)
+            np = self.pf.particle_position.shape[0]
+            self.pf.particle_position  -= 1.0 #fortran indices start with 0
+            pbar.update(2)
+            self.pf.particle_position  /= self.pf.domain_dimensions #to unitary units (comoving)
+            pbar.update(3)
+            self.pf.particle_velocity  *= uv #to proper cm/s
+            pbar.update(4)
+            self.pf.particle_species    = na.zeros(np,dtype='int32')
+            self.pf.particle_mass       = na.zeros(np,dtype='float32')
             
             a,b=0,0
-            for b,m in zip(self.pf.lspecies,pf.wspecies):
-                self.particle_species[a:b] = b
-                self.particle_mass[a:b]    = m*um
+            for i,(b,m) in enumerate(zip(lspecies,wspecies)):
+                self.pf.particle_species[a:b] = i #particle type
+                self.pf.particle_mass[a:b]    = m*um #mass in grams
                 a=b
             pbar.finish()
             
             
-            if self.file_star_data:
-                pbar = get_pbar("Loading Stars ",len(g))
-                data = io._read_in_stars(self.file_particle_data,nstars,nrow) 
-                tdum, adum, nstars, ws_old, ws_oldi, mass, initial_mass,\
-                    tbirth, metals1,metals2 = io._read_in_stars(self.file_star_data)
-                self.particle_star_ages = b2t(tbirth)
-                self.particle_star_metallicity1 = metals1*um
-                self.particle_star_metallicity2 = metals2*um
-                self.particle_star_mass_initial = initial_mass*um
-                self.particle_star_mass_current = mass*um
+            if self.pf.file_star_data:
+                nstars, mass, imass, tbirth, metals1, metals2 \
+                     = read_stars(self.pf.file_star_data,nstars,Nrow)
+                n=min(1e2,len(tbirth))
+                pbar = get_pbar("Stellar ages ",n)
+                self.pf.particle_star_ages = b2t(tbirth,n=n,logger=lambda x: pbar.update(x))
                 pbar.finish()
+                self.pf.particle_star_metallicity1 = metals1/mass
+                self.pf.particle_star_metallicity2 = metals2/mass
+                self.pf.particle_star_mass_initial = imass*um
+                self.pf.particle_star_mass_current = mass*um
+                
             
             pbar = get_pbar("Gridding  Particles ",len(grids))
             for gi, g in enumerate(grids): 
-                le,re = g.grid_left_edge, g.grid_right_edge
-                idx = na.logical_and(na.all(le < self.particle_position,axis=1),
-                                     na.all(re > self.particle_position,axis=1))
+                le,re = self.grid_left_edge[g._id_offset],self.grid_right_edge[g._id_offset]
+                idx = na.logical_and(na.all(le < self.pf.particle_position,axis=1),
+                                     na.all(re > self.pf.particle_position,axis=1))
                 g.particle_indices = idx
                 self.grids[gi] = g
                 pbar.update(gi)


diff -r e3a1f76ae1c5fc12a2510513d36fac5da2334bdc -r b33d607ce6eca9f50815b4a1e157549b466a6c63 yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -25,11 +25,12 @@
 
 import numpy as na
 import struct
-import pdb
+
+import os
+import os.path
 
 from yt.utilities.io_handler import \
     BaseIOHandler
-import numpy as na
 
 from yt.utilities.io_handler import \
     BaseIOHandler
@@ -215,21 +216,32 @@
     return le,fl,iocts,nLevel
 
 
-def read_in_particles(f):
-    pass
-
-def read_in_stars(file,nstars,Nrow):
+def read_particles(file,nstars,Nrow):
     words = 6 # words (reals) per particle: x,y,z,vx,vy,vz
     real_size = 4 # for file_particle_data; not always true?
     np = nstars # number of particles including stars, should come from lspecies[-1]
     np_per_page = Nrow**2 # defined in ART a_setup.h
     num_pages = os.path.getsize(file)/(real_size*words*np_per_page)
 
-    f = na.fromfile(file, dtype='>f4') # direct access
+    f = na.fromfile(file, dtype='>f4').astype('float32') # direct access
     pages = na.vsplit(na.reshape(f, (num_pages, words, np_per_page)), num_pages)
-    x,y,z,vx,vy,vz = tuple(na.squeeze(na.dstack(pages))) # x,y,z,vx,vy,vz
-    return x,y,z,vx,vy,vz
+    data = na.squeeze(na.dstack(pages)).T # x,y,z,vx,vy,vz
+    return data[:,0:3],data[:,4:]
 
+def read_stars(file,nstars,Nrow):
+    fh = open(file,'rb')
+    tdum,adum   = _read_frecord(fh,'>d')
+    nstars      = _read_frecord(fh,'>i')
+    ws_old, ws_oldi = _read_frecord(fh,'>d')
+    mass    = _read_frecord(fh,'>f') 
+    imass   = _read_frecord(fh,'>f') 
+    tbirth  = _read_frecord(fh,'>f') 
+    if fh.tell() < os.path.getsize(file):
+        metals1 = _read_frecord(fh,'>f') 
+    if fh.tell() < os.path.getsize(file):
+        metals2 = _read_frecord(fh,'>f')     
+    return nstars, mass, imass, tbirth, metals1,metals2
+    
 nchem=8+2
 dtyp = na.dtype(">i4,>i8,>i8"+",>%sf4"%(nchem)+ \
                 ",>%sf4"%(2)+",>i4")
@@ -247,10 +259,21 @@
     f.seek(s[0], 1)
     s = struct.unpack('>i', f.read(struct.calcsize('>i')))
 
-def _read_record(f):
+def _read_frecord(f,fmt):
+    s1 = struct.unpack('>i', f.read(struct.calcsize('>i')))[0]
+    count = s1/na.dtype(fmt).itemsize
+    ss = na.fromfile(f,fmt,count=count)
+    s2 = struct.unpack('>i', f.read(struct.calcsize('>i')))[0]
+    assert s1==s2
+    return ss
+
+
+def _read_record(f,fmt=None):
     s = struct.unpack('>i', f.read(struct.calcsize('>i')))[0]
     ss = f.read(s)
     s = struct.unpack('>i', f.read(struct.calcsize('>i')))
+    if fmt is not None:
+        return struct.unpack(ss,fmt)
     return ss
 
 def _read_record_size(f):
@@ -268,18 +291,18 @@
         if verbose: print "%s:\t%s\t (%d B)" %(name,val,f.tell())
     return vals
 
-    sqrt = numpy.sqrt
-    sign = numpy.sign
 
 
 #All of these functions are to convert from hydro time var to 
 #proper time
+sqrt = na.sqrt
+sign = na.sign
 
 def find_root(f,a,b,tol=1e-6):
     c = (a+b)/2.0
-    last = -numpy.inf
+    last = -na.inf
     assert(sign(f(a)) != sign(f(b)))  
-    while numpy.abs(f(c)-last) > tol:
+    while na.abs(f(c)-last) > tol:
         last=f(c)
         if sign(last)==sign(f(b)):
             b=c
@@ -289,9 +312,9 @@
     return c
 
 def quad(fintegrand,xmin,xmax,n=1e4):
-    spacings = numpy.logspace(numpy.log10(xmin),numpy.log10(xmax),n)
+    spacings = na.logspace(na.log10(xmin),na.log10(xmax),n)
     integrand_arr = fintegrand(spacings)
-    val = numpy.trapz(integrand_arr,dx=numpy.diff(spacings))
+    val = na.trapz(integrand_arr,dx=na.diff(spacings))
     return val
 
 def a2b(at,Om0=0.27,Oml0=0.73,h=0.700):
@@ -316,22 +339,25 @@
     integrand = lambda x : 1./(x*sqrt(Oml0+Om0*x**-3.0))
     #current_time,err = si.quad(integrand,0.0,at,epsabs=1e-6,epsrel=1e-6)
     current_time = quad(integrand,1e-4,at)
-    #spacings = numpy.logspace(-5,numpy.log10(at),1e5)
+    #spacings = na.logspace(-5,na.log10(at),1e5)
     #integrand_arr = integrand(spacings)
-    #current_time = numpy.trapz(integrand_arr,dx=numpy.diff(spacings))
+    #current_time = na.trapz(integrand_arr,dx=na.diff(spacings))
     current_time *= 9.779/h
     return current_time
 
-def b2t(tb,n = 1e2,**kwargs):
-    tb = numpy.array(tb)
+def b2t(tb,n = 1e2,logger=None,**kwargs):
+    tb = na.array(tb)
     if tb.shape == (): return a2t(b2a(tb))
     age_min = a2t(b2a(tb.max(),**kwargs),**kwargs)
     age_max = a2t(b2a(tb.min(),**kwargs),**kwargs)
-    tbs  = -1.*numpy.logspace(numpy.log10(-tb.min()),
-                          numpy.log10(-tb.max()),n)
-    ages = [a2t(b2a(tbi)) for tbi in tbs]
-    ages = numpy.array(ages)
-    fb2t = numpy.interp(tb,tbs,ages)
+    tbs  = -1.*na.logspace(na.log10(-tb.min()),
+                          na.log10(-tb.max()),n)
+    ages = []
+    for i,tbi in enumerate(tbs):
+        ages += a2t(b2a(tbi)),
+        if logger: logger(i)
+    ages = na.array(ages)
+    fb2t = na.interp(tb,tbs,ages)
     #fb2t = interp1d(tbs,ages)
     return fb2t
 



https://bitbucket.org/yt_analysis/yt/changeset/9ee69e4f02cd/
changeset:   9ee69e4f02cd
branch:      yt
user:        Christopher Moody
date:        2012-02-06 08:17:13
summary:     added standard le, active dim attributes to art grids
affected #:  1 file

diff -r b33d607ce6eca9f50815b4a1e157549b466a6c63 -r 9ee69e4f02cdba8e1c1ba1e18c13e0809e1cde45 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -77,14 +77,19 @@
 class ARTGrid(AMRGridPatch):
     _id_offset = 0
 
-    def __init__(self, id, hierarchy, level, locations, start_index):
+    def __init__(self, id, hierarchy, level, locations, props):
         AMRGridPatch.__init__(self, id, filename = hierarchy.hierarchy_filename,
                               hierarchy = hierarchy)
+        start_index = props[0]
         self.Level = level
         self.Parent = []
         self.Children = []
         self.locations = locations
         self.start_index = start_index.copy()
+        
+        self.LeftEdge = props[0]
+        self.RightEdge = props[1]
+        self.ActiveDimensions = props[2] 
 
     def _setup_dx(self):
         # So first we figure out what the index is.  We don't assume
@@ -317,7 +322,7 @@
                 self.grid_right_edge[gi,:] = props[1,:]*correction / dds
                 self.grid_dimensions[gi,:] = props[2,:]*correction
                 self.grid_levels[gi,:] = level
-                grids.append(self.grid(gi, self, level, fl, props[0,:]))
+                grids.append(self.grid(gi, self, level, fl, props))
                 gi += 1
         self.grids = na.empty(len(grids), dtype='object')
         
@@ -333,7 +338,7 @@
             uv  = self.pf.parameters['v0']/(a**1.0)*1.0e5 #proper cm/s
             um  = self.pf.parameters['aM0'] #mass units in solar masses
             um *= 1.989e33 #convert solar masses to grams 
-            pbar = get_pbar("Loading Particles ",5)
+            pbar = get_pbar("Loading Particles   ",5)
             self.pf.particle_position,self.pf.particle_velocity = \
                 read_particles(self.pf.file_particle_data,nstars,Nrow)
             pbar.update(1)
@@ -359,7 +364,7 @@
                 nstars, mass, imass, tbirth, metals1, metals2 \
                      = read_stars(self.pf.file_star_data,nstars,Nrow)
                 n=min(1e2,len(tbirth))
-                pbar = get_pbar("Stellar ages ",n)
+                pbar = get_pbar("Stellar ages          ",n)
                 self.pf.particle_star_ages = b2t(tbirth,n=n,logger=lambda x: pbar.update(x))
                 pbar.finish()
                 self.pf.particle_star_metallicity1 = metals1/mass



https://bitbucket.org/yt_analysis/yt/changeset/858542457124/
changeset:   858542457124
branch:      yt
user:        Christopher Moody
date:        2012-02-07 01:30:21
summary:     particle fields still not working.
affected #:  5 files

diff -r 9ee69e4f02cdba8e1c1ba1e18c13e0809e1cde45 -r 8585424571248390eea602248d2a6a6419c6ab12 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -54,6 +54,8 @@
 
 from yt.utilities.physical_constants import \
     mass_hydrogen_cgs
+    
+from yt.frontends.art.definitions import art_particle_field_names
 
 from yt.frontends.art.io import read_particles
 from yt.frontends.art.io import read_stars
@@ -149,6 +151,7 @@
                             'Pressure','Gamma','GasEnergy',
                             'Metal_DensitySNII', 'Metal_DensitySNIa',
                             'Potential_New','Potential_Old']
+        #self.field_list += art_particle_field_names
     
     def _setup_classes(self):
         dd = self._get_data_reader_dict()
@@ -359,18 +362,19 @@
                 a=b
             pbar.finish()
             
+            self.pf.particle_star_index = lspecies[-2]
             
             if self.pf.file_star_data:
                 nstars, mass, imass, tbirth, metals1, metals2 \
                      = read_stars(self.pf.file_star_data,nstars,Nrow)
                 n=min(1e2,len(tbirth))
-                pbar = get_pbar("Stellar ages          ",n)
+                pbar = get_pbar("Stellar Ages        ",n)
                 self.pf.particle_star_ages = b2t(tbirth,n=n,logger=lambda x: pbar.update(x))
                 pbar.finish()
                 self.pf.particle_star_metallicity1 = metals1/mass
                 self.pf.particle_star_metallicity2 = metals2/mass
                 self.pf.particle_star_mass_initial = imass*um
-                self.pf.particle_star_mass_current = mass*um
+                self.pf.particle_mass[-nstars:] = mass*um
                 
             
             pbar = get_pbar("Gridding  Particles ",len(grids))
@@ -379,6 +383,7 @@
                 idx = na.logical_and(na.all(le < self.pf.particle_position,axis=1),
                                      na.all(re > self.pf.particle_position,axis=1))
                 g.particle_indices = idx
+                g.NumberOfParticles = idx.sum()
                 self.grids[gi] = g
                 pbar.update(gi)
             pbar.finish()
@@ -408,6 +413,23 @@
             g._prepare_grid()
             g._setup_dx()
         self.max_level = self.grid_levels.max()
+        
+    def _setup_field_list(self):
+        if self.parameter_file.use_particles:
+            # We know which particle fields will exist -- pending further
+            # changes in the future.
+            for field in art_particle_field_names:
+                def external_wrapper(f):
+                    def _convert_function(data):
+                        return data.convert(f)
+                    return _convert_function
+                cf = external_wrapper(field)
+                # Note that we call add_field on the field_info directly.  This
+                # will allow the same field detection mechanism to work for 1D,
+                # 2D and 3D fields.
+                self.pf.field_info.add_field(field, NullFunc,
+                                             convert_function=cf,
+                                             take_log=False, particle_type=True)
 
     def _setup_derived_fields(self):
         self.derived_field_list = []
@@ -430,7 +452,8 @@
                  file_particle_header=None, 
                  file_particle_data=None,
                  file_star_data=None,
-                 discover_particles=False):
+                 discover_particles=False,
+                 use_particles=True):
         import yt.frontends.ramses._ramses_reader as _ramses_reader
         
         
@@ -459,6 +482,8 @@
                     self.file_star_data = loc
                     mylog.info("Discovered stellar data:    %s",os.path.basename(loc))
         
+        self.use_particles = any([self.file_particle_header,
+            self.file_star_data, self.file_particle_data])
         StaticOutput.__init__(self, filename, data_style)
         
         self.dimensionality = 3


diff -r 9ee69e4f02cdba8e1c1ba1e18c13e0809e1cde45 -r 8585424571248390eea602248d2a6a6419c6ab12 yt/frontends/art/definitions.py
--- a/yt/frontends/art/definitions.py
+++ b/yt/frontends/art/definitions.py
@@ -0,0 +1,30 @@
+"""
+Definitions specific to ART
+
+Author: Christopher E. Moody <cemoody at ucsc.ed>
+Affiliation: UC Santa Cruz
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2011 Christopher E. Moody.  All Rights
+  Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""
+
+art_particle_field_names = ['particle_position','particle_mass','particle_velocity',
+                            'particle_ages','particle_metallicity1',
+                            'particle_metallicity2','particle_mass_initial']


diff -r 9ee69e4f02cdba8e1c1ba1e18c13e0809e1cde45 -r 8585424571248390eea602248d2a6a6419c6ab12 yt/frontends/art/fields.py
--- a/yt/frontends/art/fields.py
+++ b/yt/frontends/art/fields.py
@@ -51,12 +51,17 @@
                     "Pressure":"pressure",
                     "Metallicity":"metallicity",
                     "GasEnergy":"GasEnergy"
+                    #"ParticleMass": "particle_mass"
                    }
 
 for f,v in translation_dict.items():
+    pfield = v.startswith("particle")
     add_art_field(v, function=NullFunc, take_log=False,
-                  validators = [ValidateDataField(v)])
-    add_art_field(f, function=TranslationFunc(v), take_log=True)
+                  validators = [ValidateDataField(v)],
+                  particle_type = pfield)
+    add_art_field(f, function=TranslationFunc(v), take_log=True,
+                  particle_type = pfield)
+    
 
 #def _convertMetallicity(data):
 #    return data.convert("Metal_Density1")
@@ -123,3 +128,34 @@
 add_art_field("Metal_Density", function=_Metal_Density, units = r"\mathrm{K}")
 KnownARTFields["Metal_Density"]._units = r"\mathrm{K}"
 KnownARTFields["Metal_Density"]._convert_function=_convert_Metal_Density
+
+
+#Particle Fields
+
+import pdb; pdb.set_trace()
+
+
+# pm=add_art_field("particle_mass", particle_type=True,
+#     validators = [ValidateDataField("particle_mass")])
+# pm.particle_type = True
+
+#               
+# def _convertParticleMassMsun(data):
+#     return (1/1.989e33)
+# def _particle_mass_m_sun(field, data):
+#     import pdb; pdb.set_trace()
+#     return data['ParticleMass']
+# add_art_field("ParticleMassMsun", function=_particle_mass_m_sun,
+#           particle_type=True, convert_function=_convertParticleMassMsun,
+#           take_log=True, units=r"\rm{M_{\odot}}")
+
+def _convertParticleMassMsun(data):
+    return (1/1.989e33)
+def _particle_mass_m_sun(field, data):
+    import pdb;pdb.set_trace()
+    return data["particle_mass"]
+add_field("ParticleMassMsun", function=_particle_mass_m_sun,
+          validators=[ValidateSpatial(0), ValidateDataField("particle_mass")],
+          particle_type=True, convert_function=_convertParticleMassMsun,
+          take_log=True, units=r"\rm{M_{\odot}}")
+


diff -r 9ee69e4f02cdba8e1c1ba1e18c13e0809e1cde45 -r 8585424571248390eea602248d2a6a6419c6ab12 yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -36,6 +36,8 @@
     BaseIOHandler
 import yt.utilities.amr_utils as au
 
+from yt.frontends.art.definitions import art_particle_field_names
+
 class IOHandlerART(BaseIOHandler):
     _data_style = "art"
 
@@ -83,8 +85,40 @@
 
     def clear_level(self, level):
         self.level_data.pop(level, None)
+
+    def _read_particle_field(self, grid, field):
+        import pdb; pdb.set_trace()
+        idx = grid.particle_indices
+        if field == 'particle_position':
+            return grid.pf.particle_position[idx]
+        if field == 'particle_mass':
+            return grid.pf.particle_mass[idx]
+        if field == 'particle_velocity':
+            return grid.pf.particle_velocity[idx]
+        sidx = idx-self.pf.particle_star_index
+        sidx = sidx[sidx>=0]
+        if field == 'particle_ages':
+            tr = na.zeros(grid.NumberOfParticles, dtype='float64')-1.0
+            tr[idx] = grid.pf.particle_star_ages[sidx]
+            return tr
+        if field == 'particle_metallicity1':
+            tr = na.zeros(grid.NumberOfParticles, dtype='float64')-1.0
+            tr[idx] = grid.pf.particle_star_metallicity1[sidx]
+            return tr
+        if field == 'particle_metallicity2':
+            tr = na.zeros(grid.NumberOfParticles, dtype='float64')-1.0
+            tr[idx] = grid.pf.particle_star_metallicity2[sidx]
+            return tr
+        if field == 'particle_mass_initial':
+            tr = na.zeros(grid.NumberOfParticles, dtype='float64')-1.0
+            tr[idx] = grid.pf.particle_star_mass_initial[sidx]
+            return tr
+        raise 'Should have matched one of the particle fields...'
+
         
     def _read_data_set(self, grid, field):
+        if field in art_particle_field_names:
+            return self._read_particle_field(grid, field)
         pf = grid.pf
         field_id = grid.pf.h.field_list.index(field)
         if grid.Level == 0: # We only have one root grid


diff -r 9ee69e4f02cdba8e1c1ba1e18c13e0809e1cde45 -r 8585424571248390eea602248d2a6a6419c6ab12 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -125,6 +125,23 @@
     def _detect_fields(self):
         self.field_list = self.tree_proxy.field_names[:]
     
+    def _setup_field_list(self):
+        if self.parameter_file.use_particles:
+            # We know which particle fields will exist -- pending further
+            # changes in the future.
+            for field in art_particle_field_names:
+                def external_wrapper(f):
+                    def _convert_function(data):
+                        return data.convert(f)
+                    return _convert_function
+                cf = external_wrapper(field)
+                # Note that we call add_field on the field_info directly.  This
+                # will allow the same field detection mechanism to work for 1D,
+                # 2D and 3D fields.
+                self.pf.field_info.add_field(field, NullFunc,
+                                             convert_function=cf,
+                                             take_log=False, particle_type=True)
+
     def _setup_classes(self):
         dd = self._get_data_reader_dict()
         AMRHierarchy._setup_classes(self, dd)



https://bitbucket.org/yt_analysis/yt/changeset/895621adae33/
changeset:   895621adae33
branch:      yt
user:        Christopher Moody
date:        2012-02-07 01:43:50
summary:     added time conversion to data_structure
affected #:  2 files

diff -r 8585424571248390eea602248d2a6a6419c6ab12 -r 895621adae336cdfc394a87e815b8edd40f2334f yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -561,7 +561,8 @@
         self.time_units['days']  = seconds / (3600*24.0)
 
         #we were already in seconds, go back in to code units
-        self.current_time /= self.t0 
+        #self.current_time /= self.t0 
+        #self.current_time = b2t(self.current_time,n=1)
         
     
     def _parse_parameter_file(self):
@@ -636,6 +637,7 @@
         self.parameters['ng'] = 128 # of 0 level cells in 1d 
         self.current_redshift = self.parameters["aexpn"]**-1.0 - 1.0
         self.data_comment = header_vals['jname']
+        self.current_time_raw = header_vals['t']
         self.current_time = header_vals['t']
         self.omega_lambda = header_vals['Oml0']
         self.omega_matter = header_vals['Om0']
@@ -646,13 +648,13 @@
         #nchem is nhydrovars-8, so we typically have 2 extra chem species 
         self.hubble_time  = 1.0/(self.hubble_constant*100/3.08568025e19)
         #self.hubble_time /= 3.168876e7 #Gyr in s 
-        def integrand(x,oml=self.omega_lambda,omb=self.omega_matter):
-            return 1./(x*na.sqrt(oml+omb*x**-3.0))
-        spacings = na.logspace(-5,na.log10(self.parameters['aexpn']),1e5)
-        integrand_arr = integrand(spacings)
-        self.current_time = na.trapz(integrand_arr,dx=na.diff(spacings))
-        self.current_time *= self.hubble_time
-                
+        # def integrand(x,oml=self.omega_lambda,omb=self.omega_matter):
+        #     return 1./(x*na.sqrt(oml+omb*x**-3.0))
+        # spacings = na.logspace(-5,na.log10(self.parameters['aexpn']),1e5)
+        # integrand_arr = integrand(spacings)
+        # self.current_time = na.trapz(integrand_arr,dx=na.diff(spacings))
+        # self.current_time *= self.hubble_time
+        self.current_time = b2t(self.current_time_raw)*1.0e9*365*3600*24         
         for to_skip in ['tl','dtl','tlold','dtlold','iSO']:
             _skip_record(f)
 


diff -r 8585424571248390eea602248d2a6a6419c6ab12 -r 895621adae336cdfc394a87e815b8edd40f2334f yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -381,7 +381,11 @@
 
 def b2t(tb,n = 1e2,logger=None,**kwargs):
     tb = na.array(tb)
-    if tb.shape == (): return a2t(b2a(tb))
+    if type(tb) == type(1.1): 
+        return a2t(b2a(tb))
+    if tb.shape == (): 
+        return a2t(b2a(tb))
+    if len(tb) < n: n= len(tb)
     age_min = a2t(b2a(tb.max(),**kwargs),**kwargs)
     age_max = a2t(b2a(tb.min(),**kwargs),**kwargs)
     tbs  = -1.*na.logspace(na.log10(-tb.min()),



https://bitbucket.org/yt_analysis/yt/changeset/210c6d907441/
changeset:   210c6d907441
branch:      yt
user:        Christopher Moody
date:        2012-02-07 04:56:43
summary:     particle fields can be accessed
affected #:  3 files

diff -r 895621adae336cdfc394a87e815b8edd40f2334f -r 210c6d9074416e772014e334939033d881982bb0 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -140,7 +140,8 @@
 
         self.float_type = na.float64
         AMRHierarchy.__init__(self,pf,data_style)
-
+        self._setup_field_list()
+        
     def _initialize_data_storage(self):
         pass
 
@@ -150,9 +151,10 @@
                             'x-momentum','y-momentum','z-momentum',
                             'Pressure','Gamma','GasEnergy',
                             'Metal_DensitySNII', 'Metal_DensitySNIa',
-                            'Potential_New','Potential_Old']
-        #self.field_list += art_particle_field_names
-    
+                            'Potential_New','Potential_Old',
+                            'particle_mass']
+        self.field_list += art_particle_field_names
+
     def _setup_classes(self):
         dd = self._get_data_reader_dict()
         AMRHierarchy._setup_classes(self, dd)
@@ -208,8 +210,6 @@
             psgs = []
             effs,sizes = [], []
             
-            if level > 5: continue
-            
             #refers to the left index for the art octgrid
             left_index, fl, iocts,  nocts = _read_art_level_info(f, self.pf.level_oct_offsets,level)
             #left_index_gridpatch = left_index >> LEVEL_OF_EDGE
@@ -355,6 +355,11 @@
             self.pf.particle_species    = na.zeros(np,dtype='int32')
             self.pf.particle_mass       = na.zeros(np,dtype='float32')
             
+            # self.pf.conversion_factors['particle_mass'] = 1.0
+            # self.pf.conversion_factors['particle_species'] = 1.0
+            # self.pf.conversion_factors['particle_velocity'] = 1.0
+            # self.pf.conversion_factors['particle_position'] = 1.0
+            
             a,b=0,0
             for i,(b,m) in enumerate(zip(lspecies,wspecies)):
                 self.pf.particle_species[a:b] = i #particle type
@@ -376,14 +381,13 @@
                 self.pf.particle_star_mass_initial = imass*um
                 self.pf.particle_mass[-nstars:] = mass*um
                 
-            
             pbar = get_pbar("Gridding  Particles ",len(grids))
             for gi, g in enumerate(grids): 
                 le,re = self.grid_left_edge[g._id_offset],self.grid_right_edge[g._id_offset]
                 idx = na.logical_and(na.all(le < self.pf.particle_position,axis=1),
                                      na.all(re > self.pf.particle_position,axis=1))
                 g.particle_indices = idx
-                g.NumberOfParticles = idx.sum()
+                g.NumberOfParticles = na.sum(idx)
                 self.grids[gi] = g
                 pbar.update(gi)
             pbar.finish()
@@ -410,8 +414,10 @@
             if len(parents) > 0:
                 g.Parent.extend(parents.tolist())
                 for p in parents: p.Children.append(g)
+            gnop = g.NumberOfParticles
             g._prepare_grid()
             g._setup_dx()
+            g.NumberOfParticles = gnop
         self.max_level = self.grid_levels.max()
         
     def _setup_field_list(self):


diff -r 895621adae336cdfc394a87e815b8edd40f2334f -r 210c6d9074416e772014e334939033d881982bb0 yt/frontends/art/fields.py
--- a/yt/frontends/art/fields.py
+++ b/yt/frontends/art/fields.py
@@ -50,26 +50,29 @@
                     "z-velocity":"velocity_z",
                     "Pressure":"pressure",
                     "Metallicity":"metallicity",
-                    "GasEnergy":"GasEnergy"
-                    #"ParticleMass": "particle_mass"
+                    "GasEnergy":"GasEnergy",
+                    "particle_mass":"ParticleMass"
                    }
 
 for f,v in translation_dict.items():
-    pfield = v.startswith("particle")
+    pfield = v.lower().startswith("particle")
     add_art_field(v, function=NullFunc, take_log=False,
                   validators = [ValidateDataField(v)],
                   particle_type = pfield)
     add_art_field(f, function=TranslationFunc(v), take_log=True,
                   particle_type = pfield)
+
+#Particle Fields
+def _get_convert(fname):
+    def _conv(data):
+        return 1.0
+    return _conv
+
+add_art_field("particle_mass", function=NullFunc, take_log=False,
+              convert_function=_get_convert("particle_mass"),
+              units=r"\rm{g}", particle_type=True)
     
 
-#def _convertMetallicity(data):
-#    return data.convert("Metal_Density1")
-#KnownARTFields["Metal_Density1"]._units = r"1"
-#KnownARTFields["Metal_Density1"]._projected_units = r"1"
-#KnownARTFields["Metal_Density1"]._convert_function=_convertMetallicity
-
-
 def _convertDensity(data):
     return data.convert("Density")
 KnownARTFields["Density"]._units = r"\rm{g}/\rm{cm}^3"
@@ -130,32 +133,4 @@
 KnownARTFields["Metal_Density"]._convert_function=_convert_Metal_Density
 
 
-#Particle Fields
 
-import pdb; pdb.set_trace()
-
-
-# pm=add_art_field("particle_mass", particle_type=True,
-#     validators = [ValidateDataField("particle_mass")])
-# pm.particle_type = True
-
-#               
-# def _convertParticleMassMsun(data):
-#     return (1/1.989e33)
-# def _particle_mass_m_sun(field, data):
-#     import pdb; pdb.set_trace()
-#     return data['ParticleMass']
-# add_art_field("ParticleMassMsun", function=_particle_mass_m_sun,
-#           particle_type=True, convert_function=_convertParticleMassMsun,
-#           take_log=True, units=r"\rm{M_{\odot}}")
-
-def _convertParticleMassMsun(data):
-    return (1/1.989e33)
-def _particle_mass_m_sun(field, data):
-    import pdb;pdb.set_trace()
-    return data["particle_mass"]
-add_field("ParticleMassMsun", function=_particle_mass_m_sun,
-          validators=[ValidateSpatial(0), ValidateDataField("particle_mass")],
-          particle_type=True, convert_function=_convertParticleMassMsun,
-          take_log=True, units=r"\rm{M_{\odot}}")
-


diff -r 895621adae336cdfc394a87e815b8edd40f2334f -r 210c6d9074416e772014e334939033d881982bb0 yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -81,13 +81,12 @@
         var = na.fromfile(f, dtype='>f', count=nvars).astype("float32")
         var = var.reshape((2, ncells), order="F")
         arr = na.concatenate((hvar,var))
-        self.level_data[0] = arr
+        self.level_data[0]
 
     def clear_level(self, level):
         self.level_data.pop(level, None)
 
     def _read_particle_field(self, grid, field):
-        import pdb; pdb.set_trace()
         idx = grid.particle_indices
         if field == 'particle_position':
             return grid.pf.particle_position[idx]



https://bitbucket.org/yt_analysis/yt/changeset/a97c1ad2ec24/
changeset:   a97c1ad2ec24
branch:      yt
user:        Christopher Moody
date:        2012-02-07 18:48:30
summary:     fixed so that all fields are fully loaded in on preload. previously, only up to that field was loaded, causing problems when a second field is plotted, and the level still appears to be loaded.
affected #:  3 files

diff -r 210c6d9074416e772014e334939033d881982bb0 -r a97c1ad2ec24a7b7deccd25eab132a9472bb18cb yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -137,7 +137,6 @@
         # for now, the hierarchy file is the parameter file!
         self.hierarchy_filename = self.parameter_file.parameter_filename
         self.directory = os.path.dirname(self.hierarchy_filename)
-
         self.float_type = na.float64
         AMRHierarchy.__init__(self,pf,data_style)
         self._setup_field_list()
@@ -147,12 +146,12 @@
 
     def _detect_fields(self):
         # This will need to be generalized to be used elsewhere.
-        self.field_list = [ 'Density','TotalEnergy',
-                            'x-momentum','y-momentum','z-momentum',
-                            'Pressure','Gamma','GasEnergy',
-                            'Metal_DensitySNII', 'Metal_DensitySNIa',
-                            'Potential_New','Potential_Old',
-                            'particle_mass']
+        self.field_list = ['Density','TotalEnergy',
+                           'x-momentum','y-momentum','z-momentum',
+                           'Pressure','Gamma','GasEnergy',
+                           'Metal_DensitySNII', 'Metal_DensitySNIa',
+                           'Potential_New','Potential_Old',
+                           'particle_mass']
         self.field_list += art_particle_field_names
 
     def _setup_classes(self):
@@ -166,6 +165,8 @@
         
         min_eff = 0.20
         
+        vol_max = 256**3
+        
         f = open(self.pf.parameter_filename,'rb')
         
         
@@ -209,6 +210,8 @@
                 continue
             psgs = []
             effs,sizes = [], []
+
+            if level > self.pf.limit_level : continue
             
             #refers to the left index for the art octgrid
             left_index, fl, iocts,  nocts = _read_art_level_info(f, self.pf.level_oct_offsets,level)
@@ -268,14 +271,19 @@
                 #that only partially fill the grid,it  may be more efficient
                 #to split large patches into smaller patches. We split
                 #if less than 10% the volume of a patch is covered with octs
-                psg_split = _ramses_reader.recursive_patch_splitting(
-                    psg, idims, initial_left, 
-                    dleft_index, dfl,min_eff=min_eff,use_center=False)
+                if idims.prod() > vol_max or psg.efficiency < min_eff:
+                    psg_split = _ramses_reader.recursive_patch_splitting(
+                        psg, idims, initial_left, 
+                        dleft_index, dfl,min_eff=min_eff,use_center=False)
                     
-                psgs.extend(psg_split)
+                    psgs.extend(psg_split)
+                    psg_eff += [x.efficiency for x in psg_split] 
+                else:
+                    psgs.append(psg)
+                    psg_eff =  [psg.efficiency,]
                 
                 tol = 1.00001
-                psg_eff  += [x.efficiency for x in psg_split] 
+                
                 
                 step+=1
                 pbar.update(step)
@@ -459,7 +467,8 @@
                  file_particle_data=None,
                  file_star_data=None,
                  discover_particles=False,
-                 use_particles=True):
+                 use_particles=True,
+                 limit_level=None):
         import yt.frontends.ramses._ramses_reader as _ramses_reader
         
         
@@ -471,6 +480,12 @@
         self.file_particle_data = file_particle_data
         self.file_star_data = file_star_data
         
+        if limit_level is None:
+            self.limit_level = na.inf
+        else:
+            mylog.info("Using maximum level: %i",limit_level)
+            self.limit_level = limit_level
+        
         if discover_particles:
             if file_particle_header is None:
                 loc = filename.replace(base,'PMcrd%s.DAT'%aexp)


diff -r 210c6d9074416e772014e334939033d881982bb0 -r a97c1ad2ec24a7b7deccd25eab132a9472bb18cb yt/frontends/art/fields.py
--- a/yt/frontends/art/fields.py
+++ b/yt/frontends/art/fields.py
@@ -44,14 +44,15 @@
 add_art_field = KnownARTFields.add_field
 
 translation_dict = {"Density":"density",
-                    "TotalEnergy":"TotalEnergy",
+                    "TotalEnergy":"total_energy",
                     "x-velocity":"velocity_x",
                     "y-velocity":"velocity_y",
                     "z-velocity":"velocity_z",
                     "Pressure":"pressure",
                     "Metallicity":"metallicity",
-                    "GasEnergy":"GasEnergy",
-                    "particle_mass":"ParticleMass"
+                    "GasEnergy":"gas_energy",
+                    "ParticleMass":"particle_mass",
+                    "Temperature":'temperature'
                    }
 
 for f,v in translation_dict.items():


diff -r 210c6d9074416e772014e334939033d881982bb0 -r a97c1ad2ec24a7b7deccd25eab132a9472bb18cb yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -51,6 +51,40 @@
         self.level_data = {}
 
     def preload_level(self, level,field=None):
+        """ Reads in the full ART tree. From the ART source:
+            iOctLv :    >0   - level of an oct
+            iOctPr :         - parent of an oct
+            iOctCh :    >0   - pointer to an oct of children
+                        0   - there are no children; the cell is a leaf
+            iOctNb :    >0   - pointers to neighbouring cells 
+            iOctPs :         - coordinates of Oct centers
+            
+            iOctLL1:         - doubly linked list of octs
+            iOctLL2:         - doubly linked list of octs
+            
+            tl - current  time moment for level L
+            tlold - previous time moment for level L
+            dtl - dtime0/2**iTimeBin
+            dtlold -  previous time step for level L
+            iSO - sweep order
+            
+            hvar(1,*) - gas density 
+            hvar(2,*) - gas energy 
+            hvar(3,*) - x-momentum 
+            hvar(4,*) - y-momentum
+            hvar(5,*) - z-momentum
+            hvar(6,*) - pressure
+            hvar(7,*) - Gamma
+            hvar(8,*) - internal energy 
+
+            var (1,*) - total density 
+            var (2,*) - potential (new)
+            var (3,*) - potential (old)
+            
+            
+            
+        """
+        
         if level in self.level_data: return
         if level == 0:
             self.preload_root_level()
@@ -61,18 +95,18 @@
         nvals = ncells * (self.nhydro_vars + 6) # 2 vars, 2 pads
         arr = na.fromfile(f, dtype='>f', count=nvals)
         arr = arr.reshape((self.nhydro_vars+6, ncells), order="F")
-        arr = arr[3:-1,:]
+        assert na.all(arr[0,:]==arr[-1,:]) #pads must be equal
+        arr = arr[3:-1,:] #skip beginning pad, idc, iOctCh, + ending pad
         if field==None:
             self.level_data[level] = arr.astype('float32')
         else:
-            self.level_data[level] = arr[:field+1,:].astype('float32')
+            self.level_data[level] = arr.astype('float32')
         del arr
 
     def preload_root_level(self):
         f = open(self.filename, 'rb')
         f.seek(self.level_offsets[0] + 4) # Ditch the header
         ncells = self.level_info[0]
-        #pdb.set_trace()
         nhvals = ncells * (self.nhydro_vars) # 0 vars, 0 pads
         hvar = na.fromfile(f, dtype='>f', count=nhvals).astype("float32")
         hvar = hvar.reshape((self.nhydro_vars, ncells), order="F")
@@ -81,12 +115,13 @@
         var = na.fromfile(f, dtype='>f', count=nvars).astype("float32")
         var = var.reshape((2, ncells), order="F")
         arr = na.concatenate((hvar,var))
-        self.level_data[0]
+        self.level_data[0] = arr
 
     def clear_level(self, level):
         self.level_data.pop(level, None)
 
     def _read_particle_field(self, grid, field):
+        #This will be cleaned up later
         idx = grid.particle_indices
         if field == 'particle_position':
             return grid.pf.particle_position[idx]



https://bitbucket.org/yt_analysis/yt/changeset/69fee55a453e/
changeset:   69fee55a453e
branch:      yt
user:        Christopher Moody
date:        2012-02-07 21:25:09
summary:     added a few more options to patch splitting
affected #:  4 files

diff -r a97c1ad2ec24a7b7deccd25eab132a9472bb18cb -r 69fee55a453ea0c15273c607053f31d3bf229d01 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -274,7 +274,8 @@
                 if idims.prod() > vol_max or psg.efficiency < min_eff:
                     psg_split = _ramses_reader.recursive_patch_splitting(
                         psg, idims, initial_left, 
-                        dleft_index, dfl,min_eff=min_eff,use_center=False)
+                        dleft_index, dfl,min_eff=min_eff,use_center=True,
+                        split_on_vol=512**3)
                     
                     psgs.extend(psg_split)
                     psg_eff += [x.efficiency for x in psg_split] 
@@ -422,10 +423,12 @@
             if len(parents) > 0:
                 g.Parent.extend(parents.tolist())
                 for p in parents: p.Children.append(g)
-            gnop = g.NumberOfParticles
+            if self.pf.file_particle_data:    
+                gnop = g.NumberOfParticles
             g._prepare_grid()
             g._setup_dx()
-            g.NumberOfParticles = gnop
+            if self.pf.file_particle_data:
+                g.NumberOfParticles = gnop
         self.max_level = self.grid_levels.max()
         
     def _setup_field_list(self):


diff -r a97c1ad2ec24a7b7deccd25eab132a9472bb18cb -r 69fee55a453ea0c15273c607053f31d3bf229d01 yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -150,7 +150,7 @@
         raise 'Should have matched one of the particle fields...'
 
         
-    def _read_data_set(self, grid, field):
+    def _read_data_set(self, grid, field,add_parents=False):
         if field in art_particle_field_names:
             return self._read_particle_field(grid, field)
         pf = grid.pf
@@ -174,7 +174,8 @@
                         grid.get_global_startindex(), grid.ActiveDimensions,
                         tr, filled, self.level_data[g.Level],
                         g.Level, 2**l_delta, g.locations)
-                next_grids += g.Parent
+                if add_parents:
+                    next_grids += g.Parent
             grids = next_grids
             l_delta += 1
         return tr


diff -r a97c1ad2ec24a7b7deccd25eab132a9472bb18cb -r 69fee55a453ea0c15273c607053f31d3bf229d01 yt/frontends/ramses/_ramses_reader.pyx
--- a/yt/frontends/ramses/_ramses_reader.pyx
+++ b/yt/frontends/ramses/_ramses_reader.pyx
@@ -1093,18 +1093,23 @@
         np.ndarray[np.int64_t, ndim=2] fl,
         int num_deep = 0,
         float min_eff = 0.1,
-        int use_center=0):
+        int use_center=0,
+        long split_on_vol = 0):
     cdef ProtoSubgrid L, R
     cdef np.ndarray[np.int64_t, ndim=1] dims_l, li_l
     cdef np.ndarray[np.int64_t, ndim=1] dims_r, li_r
     cdef int tt, ax, fp, i, j, k, gi
     cdef int tr[3]
     cdef long volume  =0
+    cdef int max_depth = 40
     volume = dims[0]*dims[1]*dims[2]
-    if num_deep > 300 and volume < 452984832L:
+    if split_on_vol>0:
+        if volume > split_on_vol:
+            return [psg]
+    if num_deep > max_depth:
         psg.efficiency = min_eff
         return [psg]
-    if (psg.efficiency > min_eff or psg.efficiency < 0.0) and (volume < 452984832L):
+    if (psg.efficiency > min_eff or psg.efficiency < 0.0):
         return [psg]
     if not use_center:    
         psg.find_split(tr) #default
@@ -1135,7 +1140,7 @@
     if L.efficiency <= 0.0: rv_l = []
     elif L.efficiency < min_eff:
         rv_l = recursive_patch_splitting(L, dims_l, li_l,
-                left_index, fl, num_deep + 1, min_eff,use_center)
+                left_index, fl, num_deep + 1, min_eff,use_center,split_on_vol)
     else:
         rv_l = [L]
     R = ProtoSubgrid(li_r, dims_r, left_index, fl)
@@ -1143,7 +1148,7 @@
     if R.efficiency <= 0.0: rv_r = []
     elif R.efficiency < min_eff:
         rv_r = recursive_patch_splitting(R, dims_r, li_r,
-                left_index, fl, num_deep + 1, min_eff,use_center)
+                left_index, fl, num_deep + 1, min_eff,use_center,split_on_vol)
     else:
         rv_r = [R]
     return rv_r + rv_l


diff -r a97c1ad2ec24a7b7deccd25eab132a9472bb18cb -r 69fee55a453ea0c15273c607053f31d3bf229d01 yt/utilities/_amr_utils/fortran_reader.pyx
--- a/yt/utilities/_amr_utils/fortran_reader.pyx
+++ b/yt/utilities/_amr_utils/fortran_reader.pyx
@@ -310,7 +310,7 @@
         domain = ogrid_info[0]
         #print "Loading", domain, ogrid_info
         grid_id = ogrid_info[1]
-        og_start_index = ogrid_info[3:6]
+        og_start_index = ogrid_info[3:6] #the oct left edge
         for i in range(2*ref_factor):
             di = i + og_start_index[0] * ref_factor
             if di < start_index[0] or di >= end_index[0]: continue



https://bitbucket.org/yt_analysis/yt/changeset/12223d9fef86/
changeset:   12223d9fef86
branch:      yt
user:        Christopher Moody
date:        2012-02-09 19:20:59
summary:     particle field definitions
affected #:  6 files

diff -r 69fee55a453ea0c15273c607053f31d3bf229d01 -r 12223d9fef86516f7551fc2382ab0654b5378922 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -150,8 +150,7 @@
                            'x-momentum','y-momentum','z-momentum',
                            'Pressure','Gamma','GasEnergy',
                            'Metal_DensitySNII', 'Metal_DensitySNIa',
-                           'Potential_New','Potential_Old',
-                           'particle_mass']
+                           'Potential_New','Potential_Old']
         self.field_list += art_particle_field_names
 
     def _setup_classes(self):
@@ -163,9 +162,9 @@
         LEVEL_OF_EDGE = 7
         MAX_EDGE = (2 << (LEVEL_OF_EDGE- 1))
         
-        min_eff = 0.20
+        min_eff = 0.05
         
-        vol_max = 256**3
+        vol_max = 500**3
         
         f = open(self.pf.parameter_filename,'rb')
         
@@ -275,7 +274,7 @@
                     psg_split = _ramses_reader.recursive_patch_splitting(
                         psg, idims, initial_left, 
                         dleft_index, dfl,min_eff=min_eff,use_center=True,
-                        split_on_vol=512**3)
+                        split_on_vol=vol_max)
                     
                     psgs.extend(psg_split)
                     psg_eff += [x.efficiency for x in psg_split] 
@@ -361,18 +360,23 @@
             pbar.update(3)
             self.pf.particle_velocity  *= uv #to proper cm/s
             pbar.update(4)
-            self.pf.particle_species    = na.zeros(np,dtype='int32')
-            self.pf.particle_mass       = na.zeros(np,dtype='float32')
+            self.pf.particle_species    = na.zeros(np,dtype='uint8')
+            self.pf.particle_mass       = na.zeros(np,dtype='float64')
             
-            # self.pf.conversion_factors['particle_mass'] = 1.0
-            # self.pf.conversion_factors['particle_species'] = 1.0
-            # self.pf.conversion_factors['particle_velocity'] = 1.0
-            # self.pf.conversion_factors['particle_position'] = 1.0
+            dist = self.pf['cm']/self.pf.domain_dimensions[0]
+            self.pf.conversion_factors['particle_mass'] = um #solar mass in g
+            self.pf.conversion_factors['particle_species'] = 1.0
+            for ax in 'xyz':
+                self.pf.conversion_factors['particle_velocity_%s'%ax] = 1.0
+                self.pf.conversion_factors['particle_position_%s'%ax] = dist
+            self.pf.conversion_factors['particle_creation_time'] =  31556926.0
+            self.pf.conversion_factors['particle_metallicity_fraction']=1.0
+            
             
             a,b=0,0
             for i,(b,m) in enumerate(zip(lspecies,wspecies)):
                 self.pf.particle_species[a:b] = i #particle type
-                self.pf.particle_mass[a:b]    = m*um #mass in grams
+                self.pf.particle_mass[a:b]    = m #mass in solar masses
                 a=b
             pbar.finish()
             
@@ -383,12 +387,13 @@
                      = read_stars(self.pf.file_star_data,nstars,Nrow)
                 n=min(1e2,len(tbirth))
                 pbar = get_pbar("Stellar Ages        ",n)
-                self.pf.particle_star_ages = b2t(tbirth,n=n,logger=lambda x: pbar.update(x))
+                self.pf.particle_star_ages  = b2t(tbirth,n=n,logger=lambda x: pbar.update(x)).astype('float64')
+                self.pf.particle_star_ages *= 1.0e9
                 pbar.finish()
                 self.pf.particle_star_metallicity1 = metals1/mass
                 self.pf.particle_star_metallicity2 = metals2/mass
-                self.pf.particle_star_mass_initial = imass*um
-                self.pf.particle_mass[-nstars:] = mass*um
+                self.pf.particle_star_mass_initial = imass*self.pf.parameters['aM0']
+                self.pf.particle_mass[-nstars:] = mass*self.pf.parameters['aM0']
                 
             pbar = get_pbar("Gridding  Particles ",len(grids))
             for gi, g in enumerate(grids): 
@@ -532,8 +537,10 @@
         self.conversion_factors = defaultdict(lambda: 1.0)
         self.units['1'] = 1.0
         self.units['unitary'] = 1.0 / (self.domain_right_edge - self.domain_left_edge).max()
-
+        
+        
         z = self.current_redshift
+        
         h = self.hubble_constant
         boxcm_cal = self["boxh"]
         boxcm_uncal = boxcm_cal / h
@@ -660,6 +667,7 @@
         self.parameters["boxh"] = header_vals['boxh']
         self.parameters['ng'] = 128 # of 0 level cells in 1d 
         self.current_redshift = self.parameters["aexpn"]**-1.0 - 1.0
+        self.parameters['CosmologyInitialRedshift']=self.current_redshift
         self.data_comment = header_vals['jname']
         self.current_time_raw = header_vals['t']
         self.current_time = header_vals['t']


diff -r 69fee55a453ea0c15273c607053f31d3bf229d01 -r 12223d9fef86516f7551fc2382ab0654b5378922 yt/frontends/art/definitions.py
--- a/yt/frontends/art/definitions.py
+++ b/yt/frontends/art/definitions.py
@@ -25,6 +25,13 @@
 
 """
 
-art_particle_field_names = ['particle_position','particle_mass','particle_velocity',
-                            'particle_ages','particle_metallicity1',
-                            'particle_metallicity2','particle_mass_initial']
+art_particle_field_names = [
+'particle_mass',
+'particle_creation_time',
+'particle_metallicity_fraction',
+'particle_position_x',
+'particle_position_y',
+'particle_position_z',
+'particle_velocity_x',
+'particle_velocity_y',
+'particle_velocity_z']


diff -r 69fee55a453ea0c15273c607053f31d3bf229d01 -r 12223d9fef86516f7551fc2382ab0654b5378922 yt/frontends/art/fields.py
--- a/yt/frontends/art/fields.py
+++ b/yt/frontends/art/fields.py
@@ -51,9 +51,10 @@
                     "Pressure":"pressure",
                     "Metallicity":"metallicity",
                     "GasEnergy":"gas_energy",
-                    "ParticleMass":"particle_mass",
-                    "Temperature":'temperature'
-                   }
+                    "Temperature":'temperature',
+                    "ParticleMassMsun":"particle_mass",
+                    'creation_time':'particle_creation_time',
+                    'metallicity_fraction':'particle_metallicity_fraction'}
 
 for f,v in translation_dict.items():
     pfield = v.lower().startswith("particle")


diff -r 69fee55a453ea0c15273c607053f31d3bf229d01 -r 12223d9fef86516f7551fc2382ab0654b5378922 yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -123,34 +123,55 @@
     def _read_particle_field(self, grid, field):
         #This will be cleaned up later
         idx = grid.particle_indices
-        if field == 'particle_position':
-            return grid.pf.particle_position[idx]
+        if field == 'particle_position_x':
+            return grid.pf.particle_position[idx][:,0]
+        if field == 'particle_position_y':
+            return grid.pf.particle_position[idx][:,1]
+        if field == 'particle_position_z':
+            return grid.pf.particle_position[idx][:,2]
         if field == 'particle_mass':
             return grid.pf.particle_mass[idx]
-        if field == 'particle_velocity':
-            return grid.pf.particle_velocity[idx]
-        sidx = idx-self.pf.particle_star_index
-        sidx = sidx[sidx>=0]
-        if field == 'particle_ages':
+        if field == 'particle_velocity_x':
+            return grid.pf.particle_velocity[idx][:,0]
+        if field == 'particle_velocity_y':
+            return grid.pf.particle_velocity[idx][:,1]
+        if field == 'particle_velocity_z':
+            return grid.pf.particle_velocity[idx][:,2]
+        
+        tridx = grid.particle_indices >= grid.pf.particle_star_index
+        sidx  = grid.particle_indices[tridx] - grid.pf.particle_star_index
+        n = grid.particle_indices
+        if field == 'particle_creation_time':
+            tr = na.zeros(grid.NumberOfParticles, dtype='float64')-0.0
+            if sidx.shape[0]>0:
+                tr[tridx] = grid.pf.particle_star_ages[sidx]
+            return tr
+        if field == 'particle_metallicity_fraction':
             tr = na.zeros(grid.NumberOfParticles, dtype='float64')-1.0
-            tr[idx] = grid.pf.particle_star_ages[sidx]
+            if sidx.shape[0]>0:
+                tr[tridx]  = grid.pf.particle_star_metallicity1[sidx]
+                tr[tridx] += grid.pf.particle_star_metallicity2[sidx]
             return tr
         if field == 'particle_metallicity1':
             tr = na.zeros(grid.NumberOfParticles, dtype='float64')-1.0
-            tr[idx] = grid.pf.particle_star_metallicity1[sidx]
+            if sidx.shape[0]>0:
+                tr[tridx] = grid.pf.particle_star_metallicity1[sidx]
             return tr
         if field == 'particle_metallicity2':
             tr = na.zeros(grid.NumberOfParticles, dtype='float64')-1.0
-            tr[idx] = grid.pf.particle_star_metallicity2[sidx]
+            if sidx.shape[0]>0:
+                tr[tridx] = grid.pf.particle_star_metallicity2[sidx]
             return tr
         if field == 'particle_mass_initial':
             tr = na.zeros(grid.NumberOfParticles, dtype='float64')-1.0
-            tr[idx] = grid.pf.particle_star_mass_initial[sidx]
+            if sidx.shape[0]>0:
+                tr[tridx] = grid.pf.particle_star_mass_initial[sidx]
             return tr
         raise 'Should have matched one of the particle fields...'
 
         
-    def _read_data_set(self, grid, field,add_parents=False):
+    def _read_data_set(self, grid, field):
+        #import pdb; pdb.set_trace()
         if field in art_particle_field_names:
             return self._read_particle_field(grid, field)
         pf = grid.pf
@@ -161,10 +182,10 @@
                     pf.domain_dimensions, order="F").copy()
             return tr.swapaxes(0, 2)
         tr = na.zeros(grid.ActiveDimensions, dtype='float32')
+        grids = [grid]
+        l_delta = 0
         filled = na.zeros(grid.ActiveDimensions, dtype='uint8')
         to_fill = grid.ActiveDimensions.prod()
-        grids = [grid]
-        l_delta = 0
         while to_fill > 0 and len(grids) > 0:
             next_grids = []
             for g in grids:
@@ -174,8 +195,7 @@
                         grid.get_global_startindex(), grid.ActiveDimensions,
                         tr, filled, self.level_data[g.Level],
                         g.Level, 2**l_delta, g.locations)
-                if add_parents:
-                    next_grids += g.Parent
+                next_grids += g.Parent
             grids = next_grids
             l_delta += 1
         return tr
@@ -295,7 +315,7 @@
     f = na.fromfile(file, dtype='>f4').astype('float32') # direct access
     pages = na.vsplit(na.reshape(f, (num_pages, words, np_per_page)), num_pages)
     data = na.squeeze(na.dstack(pages)).T # x,y,z,vx,vy,vz
-    return data[:,0:3],data[:,4:]
+    return data[:,0:3],data[:,3:]
 
 def read_stars(file,nstars,Nrow):
     fh = open(file,'rb')


diff -r 69fee55a453ea0c15273c607053f31d3bf229d01 -r 12223d9fef86516f7551fc2382ab0654b5378922 yt/frontends/ramses/_ramses_reader.pyx
--- a/yt/frontends/ramses/_ramses_reader.pyx
+++ b/yt/frontends/ramses/_ramses_reader.pyx
@@ -1104,7 +1104,7 @@
     cdef int max_depth = 40
     volume = dims[0]*dims[1]*dims[2]
     if split_on_vol>0:
-        if volume > split_on_vol:
+        if volume < split_on_vol:
             return [psg]
     if num_deep > max_depth:
         psg.efficiency = min_eff


diff -r 69fee55a453ea0c15273c607053f31d3bf229d01 -r 12223d9fef86516f7551fc2382ab0654b5378922 yt/utilities/_amr_utils/fortran_reader.pyx
--- a/yt/utilities/_amr_utils/fortran_reader.pyx
+++ b/yt/utilities/_amr_utils/fortran_reader.pyx
@@ -347,6 +347,61 @@
                     to_fill += 1
     return to_fill
 
+ at cython.cdivision(True)
+ at cython.boundscheck(True)
+ at cython.wraparound(False)
+def read_art_grid_nocheck(int varindex, 
+              np.ndarray[np.int64_t, ndim=1] start_index,
+              np.ndarray[np.int32_t, ndim=1] grid_dims,
+              np.ndarray[np.float32_t, ndim=3] data,
+              np.ndarray[np.float32_t, ndim=2] level_data,
+              int level, int ref_factor,
+              component_grid_info):
+    cdef int gi, i, j, k, domain, offset, grid_id
+    cdef int ir, jr, kr
+    cdef int offi, offj, offk, odind
+    cdef np.int64_t di, dj, dk
+    cdef np.ndarray[np.int64_t, ndim=1] ogrid_info
+    cdef np.ndarray[np.int64_t, ndim=1] og_start_index
+    cdef np.float64_t temp_data
+    cdef np.int64_t end_index[3]
+    cdef int kr_offset, jr_offset, ir_offset
+    cdef int to_fill = 0
+    # Note that indexing into a cell is:
+    #   (k*2 + j)*2 + i
+    for i in range(3):
+        end_index[i] = start_index[i] + grid_dims[i]
+    for gi in range(len(component_grid_info)):
+        ogrid_info = component_grid_info[gi]
+        #print "Loading", domain, ogrid_info
+        grid_id = ogrid_info[1]
+        og_start_index = ogrid_info[3:6] #the oct left edge
+        for i in range(2*ref_factor):
+            di = i + og_start_index[0] * ref_factor
+            ir = <int> (i / ref_factor)
+            for j in range(2 * ref_factor):
+                dj = j + og_start_index[1] * ref_factor
+                jr = <int> (j / ref_factor)
+                for k in range(2 * ref_factor):
+                    dk = k + og_start_index[2] * ref_factor
+                    kr = <int> (k / ref_factor)
+                    offi = di - start_index[0]
+                    offj = dj - start_index[1]
+                    offk = dk - start_index[2]
+                    odind = (kr*2 + jr)*2 + ir
+                    if level > 0:
+                        odind = (kr*2 + jr)*2 + ir
+                        temp_data = level_data[varindex, 8*grid_id + odind]
+                    else:
+                        kr_offset = kr + <int> (start_index[0] / ref_factor)
+                        jr_offset = jr + <int> (start_index[1] / ref_factor)
+                        ir_offset = ir + <int> (start_index[2] / ref_factor)
+                        odind = (kr_offset * grid_dims[0] + jr_offset)*grid_dims[1] + ir_offset
+                        temp_data = level_data[varindex, odind]
+                    data[offi, offj, offk] = temp_data
+                    to_fill += 1
+    return to_fill
+
 
 @cython.cdivision(True)
 @cython.boundscheck(False)



https://bitbucket.org/yt_analysis/yt/changeset/d03f9f1da6f3/
changeset:   d03f9f1da6f3
branch:      yt
user:        MatthewTurk
date:        2012-02-09 21:05:37
summary:     Adding a progressbar and a better parentage calculator to the ART frontend.
affected #:  1 file

diff -r 12223d9fef86516f7551fc2382ab0654b5378922 -r d03f9f1da6f3bab57055b13ce4dc5ed133f3d169 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -238,7 +238,6 @@
             #referring to the domain on which they live
             locs, lefts = _ramses_reader.get_array_indices_lists(
                         hilbert_indices, unique_indices, left_index, fl)
-            
             #iterate over the domains    
             step=0
             pbar = get_pbar("Re-gridding  Level %i "%level, len(locs))
@@ -257,7 +256,6 @@
                 dfl = ddfl
                 initial_left = na.min(dleft_index, axis=0)
                 idims = (na.max(dleft_index, axis=0) - initial_left).ravel()+2
-                
                 #this creates a grid patch that doesn't cover the whole level
                 #necessarily, but with other patches covers all the regions
                 #with octs. This object automatically shrinks its size
@@ -421,21 +419,36 @@
         return self.grids[mask]
 
     def _populate_grid_objects(self):
+        mask = na.empty(self.grids.size, dtype='int32')
+        pb = get_pbar("Populating grids", len(self.grids))
         for gi,g in enumerate(self.grids):
-            parents = self._get_grid_parents(g,
-                            self.grid_left_edge[gi,:],
-                            self.grid_right_edge[gi,:])
+            pb.update(gi)
+            amr_utils.get_box_grids_level(self.grid_left_edge[gi,:],
+                                self.grid_right_edge[gi,:],
+                                g.Level - 1,
+                                self.grid_left_edge, self.grid_right_edge,
+                                self.grid_levels, mask)
+            parents = self.grids[mask.astype("bool")]
             if len(parents) > 0:
-                g.Parent.extend(parents.tolist())
+                g.Parent.extend((p for p in parents.tolist()
+                        if p.locations[0,0] == g.locations[0,0]))
                 for p in parents: p.Children.append(g)
-            if self.pf.file_particle_data:    
-                gnop = g.NumberOfParticles
+            # Now we do overlapping siblings; note that one has to "win" with
+            # siblings, so we assume the lower ID one will "win"
+            amr_utils.get_box_grids_level(self.grid_left_edge[gi,:],
+                                self.grid_right_edge[gi,:],
+                                g.Level,
+                                self.grid_left_edge, self.grid_right_edge,
+                                self.grid_levels, mask, gi)
+            mask[gi] = False
+            siblings = self.grids[mask.astype("bool")]
+            if len(siblings) > 0:
+                g.OverlappingSiblings = siblings.tolist()
             g._prepare_grid()
             g._setup_dx()
-            if self.pf.file_particle_data:
-                g.NumberOfParticles = gnop
+        pb.finish()
         self.max_level = self.grid_levels.max()
-        
+
     def _setup_field_list(self):
         if self.parameter_file.use_particles:
             # We know which particle fields will exist -- pending further



https://bitbucket.org/yt_analysis/yt/changeset/761a9e79cf93/
changeset:   761a9e79cf93
branch:      yt
user:        MatthewTurk
date:        2012-02-09 22:27:04
summary:     Making regridding more strict and closer to what we want for the actual
selection of octs and locations.  The regridding is now somewhat slower, but
overall gives much smaller grids and fewer.
affected #:  1 file

diff -r d03f9f1da6f3bab57055b13ce4dc5ed133f3d169 -r 761a9e79cf9307ea7219d4081161b840da1e6520 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -162,9 +162,9 @@
         LEVEL_OF_EDGE = 7
         MAX_EDGE = (2 << (LEVEL_OF_EDGE- 1))
         
-        min_eff = 0.05
+        min_eff = 0.30
         
-        vol_max = 500**3
+        vol_max = 128**3
         
         f = open(self.pf.parameter_filename,'rb')
         
@@ -215,12 +215,17 @@
             #refers to the left index for the art octgrid
             left_index, fl, iocts,  nocts = _read_art_level_info(f, self.pf.level_oct_offsets,level)
             #left_index_gridpatch = left_index >> LEVEL_OF_EDGE
-            order = max(level + 1 - LEVEL_OF_EDGE, 0)
             
             #compute the hilbert indices up to a certain level
             #the indices will associate an oct grid to the nearest
             #hilbert index?
-            hilbert_indices = _ramses_reader.get_hilbert_indices(order, left_index)
+            base_level = int( na.log10(self.pf.domain_dimensions.max()) /
+                              na.log10(2))
+            hilbert_indices = _ramses_reader.get_hilbert_indices(
+                                    level + base_level, left_index)
+            print base_level, hilbert_indices.max(),
+            hilbert_indices = hilbert_indices >> base_level + LEVEL_OF_EDGE
+            print hilbert_indices.max()
             
             # Strictly speaking, we don't care about the index of any
             # individual oct at this point.  So we can then split them up.



https://bitbucket.org/yt_analysis/yt/changeset/4433494b6bba/
changeset:   4433494b6bba
branch:      yt
user:        Christopher Moody
date:        2012-02-10 23:30:07
summary:     implemented child masks. Broken! causes a segfault
affected #:  3 files

diff -r 761a9e79cf9307ea7219d4081161b840da1e6520 -r 4433494b6bba04373e71867eb988303ab3a13cb9 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -57,6 +57,7 @@
     
 from yt.frontends.art.definitions import art_particle_field_names
 
+from yt.frontends.art.io import read_child_mask_level
 from yt.frontends.art.io import read_particles
 from yt.frontends.art.io import read_stars
 from yt.frontends.art.io import _count_art_octs
@@ -64,6 +65,7 @@
 from yt.frontends.art.io import _read_art_child
 from yt.frontends.art.io import _skip_record
 from yt.frontends.art.io import _read_record
+from yt.frontends.art.io import _read_frecord
 from yt.frontends.art.io import _read_record_size
 from yt.frontends.art.io import _read_struct
 from yt.frontends.art.io import b2t
@@ -79,7 +81,7 @@
 class ARTGrid(AMRGridPatch):
     _id_offset = 0
 
-    def __init__(self, id, hierarchy, level, locations, props):
+    def __init__(self, id, hierarchy, level, locations, props,child_mask=None):
         AMRGridPatch.__init__(self, id, filename = hierarchy.hierarchy_filename,
                               hierarchy = hierarchy)
         start_index = props[0]
@@ -92,6 +94,8 @@
         self.LeftEdge = props[0]
         self.RightEdge = props[1]
         self.ActiveDimensions = props[2] 
+        if child_mask is not None:
+            self._set_child_mask(child_mask)
 
     def _setup_dx(self):
         # So first we figure out what the index is.  We don't assume
@@ -181,18 +185,11 @@
         self.pf.level_offsets = na.array(self.pf.level_offsets, dtype='int64')
         self.pf.level_offsets[0] = self.pf.root_grid_offset
         
-        #double check bvalues
-        # num_ogrids = 75019704L/8
-        # ogrid_left_indices = na.zeros((num_ogrids,3), dtype='int64') - 999
-        # ogrid_levels = na.zeros(num_ogrids, dtype='int64')
-        # ogrid_file_locations = na.zeros((num_ogrids,6), dtype='int64')
-        # self.pf.level_offsetso = amr_utils.read_art_tree(
-        #                         self.pf.parameter_filename, 
-        #                         self.pf.child_grid_offset,
-        #                         self.pf.min_level, self.pf.max_level,
-        #                         ogrid_left_indices, ogrid_levels,
-        #                         ogrid_file_locations)
-        # ogrid_left_indices = ogrid_left_indices/2**(15 - ogrid_levels[:,None] - 1) - 1                        
+        self.pf.level_art_child_masks = {}
+        cm = self.pf.root_iOctCh>0
+        cm_shape = (1,)+cm.shape 
+        self.pf.level_art_child_masks[0] = cm.reshape(cm_shape).astype('uint8')        
+        del cm
         
         root_psg = _ramses_reader.ProtoSubgrid(
                         na.zeros(3, dtype='int64'), # left index of PSG
@@ -202,7 +199,6 @@
                         )
         
         self.proto_grids = [[root_psg],]
-
         for level in xrange(1, len(self.pf.level_info)):
             if self.pf.level_info[level] == 0:
                 self.proto_grids.append([])
@@ -213,16 +209,24 @@
             if level > self.pf.limit_level : continue
             
             #refers to the left index for the art octgrid
-            left_index, fl, iocts,  nocts = _read_art_level_info(f, self.pf.level_oct_offsets,level)
+            left_index, fl, nocts = _read_art_level_info(f, self.pf.level_oct_offsets,level)
             #left_index_gridpatch = left_index >> LEVEL_OF_EDGE
             
+            #read in the child masks for this level and save them
+            idc, art_child_mask = read_child_mask_level(f, self.pf.level_child_offsets,
+                level,nocts*8,nhydro_vars=self.pf.nhydro_vars)
+            art_child_mask = art_child_mask.reshape((nocts,2,2,2))
+            self.pf.level_art_child_masks[level]=art_child_mask
+            
+            
+            
             #compute the hilbert indices up to a certain level
             #the indices will associate an oct grid to the nearest
             #hilbert index?
             base_level = int( na.log10(self.pf.domain_dimensions.max()) /
                               na.log10(2))
             hilbert_indices = _ramses_reader.get_hilbert_indices(
-                                    level + base_level, left_index)
+                                    level + base_level-2, left_index)
             print base_level, hilbert_indices.max(),
             hilbert_indices = hilbert_indices >> base_level + LEVEL_OF_EDGE
             print hilbert_indices.max()
@@ -241,8 +245,11 @@
             #split into list of lists, with domains containing 
             #lists of sub octgrid left indices and an index
             #referring to the domain on which they live
+            pbar = get_pbar("Calc Hilbert Indices ",1)
             locs, lefts = _ramses_reader.get_array_indices_lists(
                         hilbert_indices, unique_indices, left_index, fl)
+            pbar.finish()
+            
             #iterate over the domains    
             step=0
             pbar = get_pbar("Re-gridding  Level %i "%level, len(locs))
@@ -336,7 +343,12 @@
                 self.grid_right_edge[gi,:] = props[1,:]*correction / dds
                 self.grid_dimensions[gi,:] = props[2,:]*correction
                 self.grid_levels[gi,:] = level
-                grids.append(self.grid(gi, self, level, fl, props))
+                child_mask = na.zeros(props[2,:]*correction,'uint8')
+                amr_utils.fill_child_mask(fl,
+                    self.pf.level_art_child_masks[level],
+                    child_mask)
+                grids.append(self.grid(gi, self, level, fl, 
+                    na.array(props*correction).astype('int64'), child_mask))
                 gi += 1
         self.grids = na.empty(len(grids), dtype='object')
         
@@ -750,7 +762,10 @@
         self.domain_dimensions = na.ones(3, dtype='int64')*est 
 
         self.root_grid_mask_offset = f.tell()
-        _skip_record(f) # iOctCh
+        #_skip_record(f) # iOctCh
+        root_cells = self.domain_dimensions.prod()
+        self.root_iOctCh = _read_frecord(f,'>i')[:root_cells]
+        self.root_iOctCh = self.root_iOctCh.reshape(self.domain_dimensions,order='F')
         self.root_grid_offset = f.tell()
         _skip_record(f) # hvar
         _skip_record(f) # var


diff -r 761a9e79cf9307ea7219d4081161b840da1e6520 -r 4433494b6bba04373e71867eb988303ab3a13cb9 yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -253,9 +253,8 @@
        '>iii', _read_record(f))
     
     #fortran indices start at 1
-
+    
     #Skip all the oct hierarchy data
-    #in the future, break this up into large chunks
     le     = na.zeros((nLevel,3),dtype='int64')
     fl     = na.ones((nLevel,6),dtype='int64')
     iocts  = na.zeros(nLevel+1,dtype='int64')
@@ -301,8 +300,16 @@
     #level 15, so no matter what level max(le)=2**15 
     #correct to the yt convention
     le = le/2**(root_level-1-level)-1
+    
+    #now read the hvars and vars arrays
+    #we are looking for iOctCh
+    #we record if iOctCh is >0, in which it is subdivided
+    iOctCh  = na.zeros((nLevel+1,8),dtype='bool')
+    
+    
+    
     f.seek(pos)
-    return le,fl,iocts,nLevel
+    return le,fl,nLevel
 
 
 def read_particles(file,nstars,Nrow):
@@ -330,6 +337,57 @@
     if fh.tell() < os.path.getsize(file):
         metals2 = _read_frecord(fh,'>f')     
     return nstars, mass, imass, tbirth, metals1,metals2
+
+def read_child_mask_level(f,nLevel,nhydro_vars):
+    nchild = 8
+    read_struct(f,self.header_struct,deposit_obj=self)
+    
+    ss = read_record(f)
+    MinLev, MaxLevelNow = struct.unpack('>ii', ss)
+    self.MinLev, self.MaxLevelNow = MinLev, MaxLevelNow
+    
+    if verbose: print "MinLev", MinLev
+    if verbose: print "MaxLevelNow", MaxLevelNow
+    
+    tl = read_array(f, dtype='>d', count=MaxLevelNow+1)
+    dtl = read_array(f, dtype='>d', count=MaxLevelNow+1)
+    tlold = read_array(f, dtype='>d', count=MaxLevelNow+1)
+    dtlold = read_array(f, dtype='>d', count=MaxLevelNow+1)
+    iSO = read_array(f, dtype='>f', count=MaxLevelNow+1)
+    self.tl, self.dtl,self.tlold, self.dtlold, self.iSO = \
+        tl, dtl,tlold, dtlold, iSO
+    
+    ss = read_record(f)
+    ncell = struct.unpack('>l', ss)[0]
+    self.ncell = ncell
+    if verbose: print "NCELL", ncell
+    
+    iOctCh = read_array(f, dtype='>i', count=ncell)
+    
+    return idc,ioctch
+
+def read_child_mask_level(f, level_child_offsets,level,nLevel,nhydro_vars):
+    f.seek(level_child_offsets[level])
+    nvals = nLevel * (nhydro_vars + 6) # 2 vars, 2 pads
+    ioctch = na.zeros(nLevel,dtype='uint8')
+    idc = na.zeros(nLevel,dtype='int32')
+    
+    chunk = long(1e6)
+    left = nLevel
+    width = nhydro_vars+6
+    a,b=0,0
+    while left > 0:
+        chunk = min(chunk,left)
+        b += chunk
+        arr = na.fromfile(f, dtype='>i', count=chunk*width)
+        arr = arr.reshape((width, chunk), order="F")
+        assert na.all(arr[0,:]==arr[-1,:]) #pads must be equal
+        idc[a:b]    = arr[1,:]
+        ioctch[a:b] = arr[2,:]>0 #we only care if its above zero
+        a=b
+        left -= chunk
+    assert left==0
+    return idc,ioctch
     
 nchem=8+2
 dtyp = na.dtype(">i4,>i8,>i8"+",>%sf4"%(nchem)+ \


diff -r 761a9e79cf9307ea7219d4081161b840da1e6520 -r 4433494b6bba04373e71867eb988303ab3a13cb9 yt/utilities/_amr_utils/fortran_reader.pyx
--- a/yt/utilities/_amr_utils/fortran_reader.pyx
+++ b/yt/utilities/_amr_utils/fortran_reader.pyx
@@ -348,125 +348,27 @@
     return to_fill
 
 @cython.cdivision(True)
- at cython.boundscheck(True)
- at cython.wraparound(False)
-def read_art_grid_nocheck(int varindex, 
-              np.ndarray[np.int64_t, ndim=1] start_index,
-              np.ndarray[np.int32_t, ndim=1] grid_dims,
-              np.ndarray[np.float32_t, ndim=3] data,
-              np.ndarray[np.float32_t, ndim=2] level_data,
-              int level, int ref_factor,
-              component_grid_info):
-    cdef int gi, i, j, k, domain, offset, grid_id
-    cdef int ir, jr, kr
-    cdef int offi, offj, offk, odind
-    cdef np.int64_t di, dj, dk
-    cdef np.ndarray[np.int64_t, ndim=1] ogrid_info
-    cdef np.ndarray[np.int64_t, ndim=1] og_start_index
-    cdef np.float64_t temp_data
-    cdef np.int64_t end_index[3]
-    cdef int kr_offset, jr_offset, ir_offset
-    cdef int to_fill = 0
-    # Note that indexing into a cell is:
-    #   (k*2 + j)*2 + i
-    for i in range(3):
-        end_index[i] = start_index[i] + grid_dims[i]
-    for gi in range(len(component_grid_info)):
-        ogrid_info = component_grid_info[gi]
-        #print "Loading", domain, ogrid_info
-        grid_id = ogrid_info[1]
-        og_start_index = ogrid_info[3:6] #the oct left edge
-        for i in range(2*ref_factor):
-            di = i + og_start_index[0] * ref_factor
-            ir = <int> (i / ref_factor)
-            for j in range(2 * ref_factor):
-                dj = j + og_start_index[1] * ref_factor
-                jr = <int> (j / ref_factor)
-                for k in range(2 * ref_factor):
-                    dk = k + og_start_index[2] * ref_factor
-                    kr = <int> (k / ref_factor)
-                    offi = di - start_index[0]
-                    offj = dj - start_index[1]
-                    offk = dk - start_index[2]
-                    odind = (kr*2 + jr)*2 + ir
-                    if level > 0:
-                        odind = (kr*2 + jr)*2 + ir
-                        temp_data = level_data[varindex, 8*grid_id + odind]
-                    else:
-                        kr_offset = kr + <int> (start_index[0] / ref_factor)
-                        jr_offset = jr + <int> (start_index[1] / ref_factor)
-                        ir_offset = ir + <int> (start_index[2] / ref_factor)
-                        odind = (kr_offset * grid_dims[0] + jr_offset)*grid_dims[1] + ir_offset
-                        temp_data = level_data[varindex, odind]
-                    data[offi, offj, offk] = temp_data
-                    to_fill += 1
-    return to_fill
-
-
- at cython.cdivision(True)
 @cython.boundscheck(False)
 @cython.wraparound(False)
-def read_art_grid_light(int varindex, 
-              np.ndarray[np.int64_t, ndim=1] start_index,
-              np.ndarray[np.int32_t, ndim=1] grid_dims,
-              np.ndarray[np.float64_t, ndim=3] data,
-              np.ndarray[np.float64_t, ndim=2] level_data,
-              int level, int ref_factor,
-              component_grid_info):
-    cdef int gi, i, j, k, domain, offset, grid_id
-    cdef int ir, jr, kr
-    cdef int offi, offj, offk, odind
-    cdef np.int64_t di, dj, dk
-    cdef np.ndarray[np.int64_t, ndim=1] ogrid_info
-    cdef np.ndarray[np.int64_t, ndim=1] og_start_index
-    cdef np.float64_t temp_data
-    cdef np.int64_t end_index[3]
-    cdef int kr_offset, jr_offset, ir_offset
-    cdef int to_fill = 0
-    # Note that indexing into a cell is:
-    #   (k*2 + j)*2 + i
-    for i in range(3):
-        end_index[i] = start_index[i] + grid_dims[i]
-    for gi in range(len(component_grid_info)):
-        ogrid_info = component_grid_info[gi]
-        domain = ogrid_info[0]
-        #print "Loading", domain, ogrid_info
-        grid_id = ogrid_info[1]
-        og_start_index = ogrid_info[3:]
-        for i in range(2*ref_factor):
-            di = i + og_start_index[0] * ref_factor
-            if di < start_index[0] or di >= end_index[0]: continue
-            ir = <int> (i / ref_factor)
-            for j in range(2 * ref_factor):
-                dj = j + og_start_index[1] * ref_factor
-                if dj < start_index[1] or dj >= end_index[1]: continue
-                jr = <int> (j / ref_factor)
-                for k in range(2 * ref_factor):
-                    dk = k + og_start_index[2] * ref_factor
-                    if dk < start_index[2] or dk >= end_index[2]: continue
-                    kr = <int> (k / ref_factor)
-                    offi = di - start_index[0]
-                    offj = dj - start_index[1]
-                    offk = dk - start_index[2]
-                    #print offi, filled.shape[0],
-                    #print offj, filled.shape[1],
-                    #print offk, filled.shape[2]
-                    if level > 0:
-                        odind = (kr*2 + jr)*2 + ir
-                        # Replace with an ART-specific reader
-                        #temp_data = local_hydro_data.m_var_array[
-                        #        level][8*offset + odind]
-                        temp_data = level_data[varindex, 8*grid_id + odind]
-                    else:
-                        kr_offset = kr + <int> (start_index[0] / ref_factor)
-                        jr_offset = jr + <int> (start_index[1] / ref_factor)
-                        ir_offset = ir + <int> (start_index[2] / ref_factor)
-                        odind = (kr_offset * grid_dims[0] + jr_offset)*grid_dims[1] + ir_offset
-                        temp_data = level_data[varindex, odind]
-                    data[offi, offj, offk] = temp_data
-                    to_fill += 1
-    return to_fill
+def fill_child_mask(np.ndarray[np.int64_t, ndim=2] file_locations,
+                    np.ndarray[np.uint8_t, ndim=4] art_child_masks,
+                    np.ndarray[np.uint8_t, ndim=3] child_mask):
 
+    #loop over file_locations, for each row exracting the index & LE
+    #of the oct we will pull pull from art_child_masks
+    #then use the art_child_masks info to fill in child_mask
+    cdef int i,ioct,x,y,z
+    cdef int nocts = file_locations.shape[0]
+    cdef int lex,ley,lez
+    for i in range(nocts):
+        ioct = file_locations[i,1]
+        lex = file_locations[i,3] #the oct left edge
+        ley = file_locations[i,4]
+        lez = file_locations[i,5]
+        for x in range(2):
+            for y in range(2):
+                for z in range(2):
+                    child_mask[lex+x,ley+y,lez+z] = art_child_masks[ioct,x,y,z]
 
 @cython.cdivision(True)
 @cython.boundscheck(False)



https://bitbucket.org/yt_analysis/yt/changeset/549c408579ef/
changeset:   549c408579ef
branch:      yt
user:        Christopher Moody
date:        2012-02-14 09:50:19
summary:      fixed a bug with particle arrays being the wrong length. affected stars.
affected #:  4 files

diff -r 4433494b6bba04373e71867eb988303ab3a13cb9 -r 549c408579efac0d6ea1cc2f3d0c42149e240cf7 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -57,7 +57,7 @@
     
 from yt.frontends.art.definitions import art_particle_field_names
 
-from yt.frontends.art.io import read_child_mask_level
+from yt.frontends.art.io import _read_child_mask_level
 from yt.frontends.art.io import read_particles
 from yt.frontends.art.io import read_stars
 from yt.frontends.art.io import _count_art_octs
@@ -213,11 +213,12 @@
             #left_index_gridpatch = left_index >> LEVEL_OF_EDGE
             
             #read in the child masks for this level and save them
-            idc, art_child_mask = read_child_mask_level(f, self.pf.level_child_offsets,
+            idc, art_child_mask = _read_child_mask_level(f, self.pf.level_child_offsets,
                 level,nocts*8,nhydro_vars=self.pf.nhydro_vars)
             art_child_mask = art_child_mask.reshape((nocts,2,2,2))
             self.pf.level_art_child_masks[level]=art_child_mask
-            
+            #child_mask is zero where child grids exist and
+            #thus where higher resolution data is available
             
             
             #compute the hilbert indices up to a certain level
@@ -226,15 +227,15 @@
             base_level = int( na.log10(self.pf.domain_dimensions.max()) /
                               na.log10(2))
             hilbert_indices = _ramses_reader.get_hilbert_indices(
-                                    level + base_level-2, left_index)
-            print base_level, hilbert_indices.max(),
+                                    level + base_level, left_index)
+            #print base_level, hilbert_indices.max(),
             hilbert_indices = hilbert_indices >> base_level + LEVEL_OF_EDGE
-            print hilbert_indices.max()
+            #print hilbert_indices.max()
             
             # Strictly speaking, we don't care about the index of any
             # individual oct at this point.  So we can then split them up.
             unique_indices = na.unique(hilbert_indices)
-            mylog.debug("Level % 2i has % 10i unique indices for %0.3e octs",
+            mylog.info("Level % 2i has % 10i unique indices for %0.3e octs",
                         level, unique_indices.size, hilbert_indices.size)
             
             #use the hilbert indices to order oct grids so that consecutive
@@ -327,28 +328,29 @@
         """
         grids = []
         gi = 0
+        
         for level, grid_list in enumerate(self.proto_grids):
             #The root level spans [0,2]
             #The next level spans [0,256]
             #The 3rd Level spans up to 128*2^3, etc.
             #Correct root level to span up to 128
-            correction=1.0
+            correction=1L
             if level == 0:
-                correction=64.0
+                correction=64L
             for g in grid_list:
                 fl = g.grid_file_locations
-                props = g.get_properties()
+                props = g.get_properties()*correction
                 dds = ((2**level) * self.pf.domain_dimensions).astype("float64")
-                self.grid_left_edge[gi,:] = props[0,:]*correction / dds
-                self.grid_right_edge[gi,:] = props[1,:]*correction / dds
-                self.grid_dimensions[gi,:] = props[2,:]*correction
+                self.grid_left_edge[gi,:] = props[0,:] / dds
+                self.grid_right_edge[gi,:] = props[1,:] / dds
+                self.grid_dimensions[gi,:] = props[2,:]
                 self.grid_levels[gi,:] = level
-                child_mask = na.zeros(props[2,:]*correction,'uint8')
-                amr_utils.fill_child_mask(fl,
+                child_mask = na.zeros(props[2,:],'uint8')
+                amr_utils.fill_child_mask(fl,props[0],
                     self.pf.level_art_child_masks[level],
                     child_mask)
                 grids.append(self.grid(gi, self, level, fl, 
-                    na.array(props*correction).astype('int64'), child_mask))
+                    props*na.array(correction).astype('int64')))
                 gi += 1
         self.grids = na.empty(len(grids), dtype='object')
         
@@ -368,11 +370,13 @@
             self.pf.particle_position,self.pf.particle_velocity = \
                 read_particles(self.pf.file_particle_data,nstars,Nrow)
             pbar.update(1)
-            np = self.pf.particle_position.shape[0]
+            np = lspecies[-1]
+            self.pf.particle_position   = self.pf.particle_position[:np]
             self.pf.particle_position  -= 1.0 #fortran indices start with 0
             pbar.update(2)
             self.pf.particle_position  /= self.pf.domain_dimensions #to unitary units (comoving)
             pbar.update(3)
+            self.pf.particle_velocity   = self.pf.particle_velocity[:np]
             self.pf.particle_velocity  *= uv #to proper cm/s
             pbar.update(4)
             self.pf.particle_species    = na.zeros(np,dtype='uint8')
@@ -395,33 +399,61 @@
                 a=b
             pbar.finish()
             
+            import pdb; pdb.set_trace()
+            
             self.pf.particle_star_index = lspecies[-2]
             
             if self.pf.file_star_data:
                 nstars, mass, imass, tbirth, metals1, metals2 \
                      = read_stars(self.pf.file_star_data,nstars,Nrow)
-                n=min(1e2,len(tbirth))
-                pbar = get_pbar("Stellar Ages        ",n)
-                self.pf.particle_star_ages  = b2t(tbirth,n=n,logger=lambda x: pbar.update(x)).astype('float64')
-                self.pf.particle_star_ages *= 1.0e9
+                nstars = nstars[0] 
+                if nstars > 0 :
+                    n=min(1e2,len(tbirth))
+                    pbar = get_pbar("Stellar Ages        ",n)
+                    self.pf.particle_star_ages  = \
+                        b2t(tbirth,n=n,logger=lambda x: pbar.update(x)).astype('float64')
+                    self.pf.particle_star_ages *= 1.0e9
+                    pbar.finish()
+                    self.pf.particle_star_metallicity1 = metals1/mass
+                    self.pf.particle_star_metallicity2 = metals2/mass
+                    self.pf.particle_star_mass_initial = imass*self.pf.parameters['aM0']
+                    self.pf.particle_mass[-nstars:] = mass*self.pf.parameters['aM0']
+            
+            if False:
+                left = self.pf.particle_position.shape[0]
+                pbar = get_pbar("Gridding  Particles ",left)
+                pos = self.pf.particle_position.copy()
+                pos = na.vstack((na.arange(pos.shape[0]),pos.T)).T
+                for level in range(self.pf.max_level,self.pf.min_level-1,-1):
+                    lidx = self.grid_levels[:,0] == level
+                    for gi,gidx in enumerate(na.where(lidx)[0]): 
+                        g = grids[gidx]
+                        assert g is not None
+                        le,re = self.grid_left_edge[g._id_offset],self.grid_right_edge[g._id_offset]
+                        idx = na.logical_and(na.all(le < pos[:,1:],axis=1),
+                                             na.all(re > pos[:,1:],axis=1))
+                        np = na.sum(idx)                     
+                        g.NumberOfParticles = np
+                        if np==0: 
+                            g.particle_indices = []
+                            #we have no particles in this grid
+                        else:
+                            fidx = pos[:,0][idx]
+                            g.particle_indices = fidx
+                            pos = pos[~idx] #throw out gridded particles from future gridding
+                        self.grids[gidx] = g
+                        left -= np
+                        pbar.update(left)
                 pbar.finish()
-                self.pf.particle_star_metallicity1 = metals1/mass
-                self.pf.particle_star_metallicity2 = metals2/mass
-                self.pf.particle_star_mass_initial = imass*self.pf.parameters['aM0']
-                self.pf.particle_mass[-nstars:] = mass*self.pf.parameters['aM0']
+            else:
+                pbar = get_pbar("Finalizing grids ",len(grids))
+                for gi, g in enumerate(grids): 
+                    self.grids[gi] = g
+                pbar.finish()
                 
-            pbar = get_pbar("Gridding  Particles ",len(grids))
-            for gi, g in enumerate(grids): 
-                le,re = self.grid_left_edge[g._id_offset],self.grid_right_edge[g._id_offset]
-                idx = na.logical_and(na.all(le < self.pf.particle_position,axis=1),
-                                     na.all(re > self.pf.particle_position,axis=1))
-                g.particle_indices = idx
-                g.NumberOfParticles = na.sum(idx)
-                self.grids[gi] = g
-                pbar.update(gi)
-            pbar.finish()
             
         else:
+            
             pbar = get_pbar("Finalizing grids ",len(grids))
             for gi, g in enumerate(grids): 
                 self.grids[gi] = g


diff -r 4433494b6bba04373e71867eb988303ab3a13cb9 -r 549c408579efac0d6ea1cc2f3d0c42149e240cf7 yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -338,35 +338,7 @@
         metals2 = _read_frecord(fh,'>f')     
     return nstars, mass, imass, tbirth, metals1,metals2
 
-def read_child_mask_level(f,nLevel,nhydro_vars):
-    nchild = 8
-    read_struct(f,self.header_struct,deposit_obj=self)
-    
-    ss = read_record(f)
-    MinLev, MaxLevelNow = struct.unpack('>ii', ss)
-    self.MinLev, self.MaxLevelNow = MinLev, MaxLevelNow
-    
-    if verbose: print "MinLev", MinLev
-    if verbose: print "MaxLevelNow", MaxLevelNow
-    
-    tl = read_array(f, dtype='>d', count=MaxLevelNow+1)
-    dtl = read_array(f, dtype='>d', count=MaxLevelNow+1)
-    tlold = read_array(f, dtype='>d', count=MaxLevelNow+1)
-    dtlold = read_array(f, dtype='>d', count=MaxLevelNow+1)
-    iSO = read_array(f, dtype='>f', count=MaxLevelNow+1)
-    self.tl, self.dtl,self.tlold, self.dtlold, self.iSO = \
-        tl, dtl,tlold, dtlold, iSO
-    
-    ss = read_record(f)
-    ncell = struct.unpack('>l', ss)[0]
-    self.ncell = ncell
-    if verbose: print "NCELL", ncell
-    
-    iOctCh = read_array(f, dtype='>i', count=ncell)
-    
-    return idc,ioctch
-
-def read_child_mask_level(f, level_child_offsets,level,nLevel,nhydro_vars):
+def _read_child_mask_level(f, level_child_offsets,level,nLevel,nhydro_vars):
     f.seek(level_child_offsets[level])
     nvals = nLevel * (nhydro_vars + 6) # 2 vars, 2 pads
     ioctch = na.zeros(nLevel,dtype='uint8')
@@ -382,8 +354,9 @@
         arr = na.fromfile(f, dtype='>i', count=chunk*width)
         arr = arr.reshape((width, chunk), order="F")
         assert na.all(arr[0,:]==arr[-1,:]) #pads must be equal
-        idc[a:b]    = arr[1,:]
-        ioctch[a:b] = arr[2,:]>0 #we only care if its above zero
+        idc[a:b]    = arr[1,:]-1 #fix fortran indexing
+        ioctch[a:b] = arr[2,:]==0 #if it is above zero, then refined info available
+        #zero in the mask means there is refinement available
         a=b
         left -= chunk
     assert left==0


diff -r 4433494b6bba04373e71867eb988303ab3a13cb9 -r 549c408579efac0d6ea1cc2f3d0c42149e240cf7 yt/frontends/ramses/_ramses_reader.pyx
--- a/yt/frontends/ramses/_ramses_reader.pyx
+++ b/yt/frontends/ramses/_ramses_reader.pyx
@@ -1025,6 +1025,7 @@
         hilbert_indices[o] = h
     return hilbert_indices
 
+
 @cython.boundscheck(False)
 @cython.wraparound(False)
 def get_array_indices_lists(np.ndarray[np.int64_t, ndim=1] ind, 


diff -r 4433494b6bba04373e71867eb988303ab3a13cb9 -r 549c408579efac0d6ea1cc2f3d0c42149e240cf7 yt/utilities/_amr_utils/fortran_reader.pyx
--- a/yt/utilities/_amr_utils/fortran_reader.pyx
+++ b/yt/utilities/_amr_utils/fortran_reader.pyx
@@ -348,9 +348,10 @@
     return to_fill
 
 @cython.cdivision(True)
- at cython.boundscheck(False)
+ at cython.boundscheck(True)
 @cython.wraparound(False)
 def fill_child_mask(np.ndarray[np.int64_t, ndim=2] file_locations,
+                    np.ndarray[np.int64_t, ndim=1] grid_le,
                     np.ndarray[np.uint8_t, ndim=4] art_child_masks,
                     np.ndarray[np.uint8_t, ndim=3] child_mask):
 
@@ -361,10 +362,10 @@
     cdef int nocts = file_locations.shape[0]
     cdef int lex,ley,lez
     for i in range(nocts):
-        ioct = file_locations[i,1]
-        lex = file_locations[i,3] #the oct left edge
-        ley = file_locations[i,4]
-        lez = file_locations[i,5]
+        ioct = file_locations[i,1] #from fortran to python indexing?
+        lex = file_locations[i,3] - grid_le[0] #the oct left edge x
+        ley = file_locations[i,4] - grid_le[1]
+        lez = file_locations[i,5] - grid_le[2]
         for x in range(2):
             for y in range(2):
                 for z in range(2):



https://bitbucket.org/yt_analysis/yt/changeset/0834402f6cde/
changeset:   0834402f6cde
branch:      yt
user:        Christopher Moody
date:        2012-02-15 03:19:26
summary:     reorganized the hydro fields
affected #:  3 files

diff -r 549c408579efac0d6ea1cc2f3d0c42149e240cf7 -r 0834402f6cde0a7ff68e3eb2e520184a7818f3de yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -150,12 +150,12 @@
 
     def _detect_fields(self):
         # This will need to be generalized to be used elsewhere.
-        self.field_list = ['Density','TotalEnergy',
-                           'x-momentum','y-momentum','z-momentum',
-                           'Pressure','Gamma','GasEnergy',
-                           'Metal_DensitySNII', 'Metal_DensitySNIa',
-                           'Potential_New','Potential_Old']
-        self.field_list += art_particle_field_names
+        self.field_list = [ 'Density','TotalEnergy',
+             'XMomentumDensity','YMomentumDensity','ZMomentumDensity',
+             'Pressure','Gamma','GasEnergy',
+             'MetalDensitySNII', 'MetalDensitySNIa',
+             'PotentialNew','PotentialOld']
+        # self.field_list += art_particle_field_names
 
     def _setup_classes(self):
         dd = self._get_data_reader_dict()
@@ -399,8 +399,6 @@
                 a=b
             pbar.finish()
             
-            import pdb; pdb.set_trace()
-            
             self.pf.particle_star_index = lspecies[-2]
             
             if self.pf.file_star_data:
@@ -413,6 +411,7 @@
                     self.pf.particle_star_ages  = \
                         b2t(tbirth,n=n,logger=lambda x: pbar.update(x)).astype('float64')
                     self.pf.particle_star_ages *= 1.0e9
+                    self.pf.particle_star_ages *= 365*24*3600 #to seconds
                     pbar.finish()
                     self.pf.particle_star_metallicity1 = metals1/mass
                     self.pf.particle_star_metallicity2 = metals2/mass
@@ -635,14 +634,17 @@
         # this is 3H0^2 / (8pi*G) *h*Omega0 with H0=100km/s. 
         # ie, critical density 
         self.rho0 = 1.8791e-29 * hubble**2.0 * self.omega_matter
-        self.tr = 2./3. *(3.03e5*self.r0**2.0*wmu*self.omega_matter)*(1.0/(aexpn**2))      
-        self.conversion_factors["Density"] = \
-            self.rho0*(aexpn**-3.0)
-        self.conversion_factors["GasEnergy"] = \
-            self.rho0*self.v0**2*(aexpn**-5.0)
+        self.tr = 2./3. *(3.03e5*self.r0**2.0*wmu*self.omega_matter)*(1.0/(aexpn**2))     
         tr  = self.tr
+        
+        #factors to multiply the native code units to CGS
+        self.conversion_factors['Pressure'] = self.parameters["P0"] #already cgs
+        self.conversion_factors['Velocity'] = self.parameters['v0']*1e3 #km/s -> cm/s
+        self.conversion_factors["Mass"] = self.parameters["aM0"] * 1.98892e33
+        self.conversion_factors["Density"] = self.rho0*(aexpn**-3.0)
+        self.conversion_factors["GasEnergy"] = self.rho0*self.v0**2*(aexpn**-5.0)
         self.conversion_factors["Temperature"] = tr
-        self.conversion_factors["Metal_Density"] = 1
+        self.conversion_factors["Potential"] = 1.0
         self.cosmological_simulation = True
         
         # Now our conversion factors
@@ -650,8 +652,10 @@
             # Add on the 1e5 to get to cm/s
             self.conversion_factors["%s-velocity" % ax] = self.v0/aexpn
         seconds = self.t0
-        self.time_units['years'] = seconds / (365*3600*24.0)
-        self.time_units['days']  = seconds / (3600*24.0)
+        self.time_units['gyr']   = 1.0/(1.0e9*365*3600*24.0)
+        self.time_units['myr']   = 1.0/(1.0e6*365*3600*24.0)
+        self.time_units['years'] = 1.0/(365*3600*24.0)
+        self.time_units['days']  = 1.0 / (3600*24.0)
 
         #we were already in seconds, go back in to code units
         #self.current_time /= self.t0 
@@ -784,7 +788,7 @@
         #     [aM0] = [Msun]
         self.parameters["aM0"] = rho0 * (boxh/hubble)**3.0 / ng**3.0
         
-
+        #CGS for everything in the next block
     
         (self.ncell,) = struct.unpack('>l', _read_record(f))
         # Try to figure out the root grid dimensions


diff -r 549c408579efac0d6ea1cc2f3d0c42149e240cf7 -r 0834402f6cde0a7ff68e3eb2e520184a7818f3de yt/frontends/art/fields.py
--- a/yt/frontends/art/fields.py
+++ b/yt/frontends/art/fields.py
@@ -37,102 +37,173 @@
 from yt.utilities.physical_constants import \
     boltzmann_constant_cgs, mass_hydrogen_cgs
 
+KnownARTFields = FieldInfoContainer()
+add_art_field = KnownARTFields.add_field
+
 ARTFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
 add_field = ARTFieldInfo.add_field
 
-KnownARTFields = FieldInfoContainer()
-add_art_field = KnownARTFields.add_field
 
-translation_dict = {"Density":"density",
-                    "TotalEnergy":"total_energy",
-                    "x-velocity":"velocity_x",
-                    "y-velocity":"velocity_y",
-                    "z-velocity":"velocity_z",
-                    "Pressure":"pressure",
-                    "Metallicity":"metallicity",
-                    "GasEnergy":"gas_energy",
-                    "Temperature":'temperature',
-                    "ParticleMassMsun":"particle_mass",
-                    'creation_time':'particle_creation_time',
-                    'metallicity_fraction':'particle_metallicity_fraction'}
+#these are just the hydro fields
+known_art_fields = [ 'Density','TotalEnergy',
+                     'XMomentumDensity','YMomentumDensity','ZMomentumDensity',
+                     'Pressure','Gamma','GasEnergy',
+                     'MetalDensitySNII', 'MetalDensitySNIa',
+                     'PotentialNew','PotentialOld']
 
-for f,v in translation_dict.items():
-    pfield = v.lower().startswith("particle")
-    add_art_field(v, function=NullFunc, take_log=False,
-                  validators = [ValidateDataField(v)],
-                  particle_type = pfield)
-    add_art_field(f, function=TranslationFunc(v), take_log=True,
-                  particle_type = pfield)
+#Add the fields, then later we'll individually defined units and names
+for f in known_art_fields:
+    if f not in ARTFieldInfo:
+        add_field(f, function=lambda a,b: None, take_log=True,
+                  validators = [ValidateDataField(f)])
 
-#Particle Fields
-def _get_convert(fname):
-    def _conv(data):
-        return 1.0
-    return _conv
+#Fields that are verified to be OK unit-wise:
+#Density
 
-add_art_field("particle_mass", function=NullFunc, take_log=False,
-              convert_function=_get_convert("particle_mass"),
-              units=r"\rm{g}", particle_type=True)
-    
+#Fields that need to be tested:
+#TotalEnergy
+#XYZMomentum
+#Pressure
+#Gamma
+#GasEnergy
+#MetalDensity SNII + SNia
+#Potentials
 
+#Derived fields that are OK
+#Temperature
+
+#Derived fields that are untested:
+
+#Individual definitions for native fields
 def _convertDensity(data):
     return data.convert("Density")
-KnownARTFields["Density"]._units = r"\rm{g}/\rm{cm}^3"
-KnownARTFields["Density"]._projected_units = r"\rm{g}/\rm{cm}^2"
-KnownARTFields["Density"]._convert_function=_convertDensity
+ARTFieldInfo["Density"]._units = r"\rm{g}/\rm{cm}^3"
+ARTFieldInfo["Density"]._projected_units = r"\rm{g}/\rm{cm}^2"
+ARTFieldInfo["Density"]._convert_function=_convertDensity
 
-def _convertEnergy(data):
+def _convertTotalEnergy(data):
     return data.convert("GasEnergy")
-KnownARTFields["GasEnergy"]._units = r"\rm{ergs}/\rm{g}"
-KnownARTFields["GasEnergy"]._convert_function=_convertEnergy
+ARTFieldInfo["TotalEnergy"]._units = r"\rm{g}/\rm{cm}^3"
+ARTFieldInfo["TotalEnergy"]._projected_units = r"\rm{K}"
+ARTFieldInfo["TotalEnergy"]._convert_function=_convertTotalEnergy
 
-def _Temperature(field, data):
+def _convertXMomentumDensity(data):
+    tr  = data.convert("Mass")*data.convert("Velocity")
+    tr *= (data.convert("Density")/data.convert("Mass"))
+    return tr
+ARTFieldInfo["XMomentumDensity"]._units = r"\rm{mg}/\rm{s}/\rm{cm}^3"
+ARTFieldInfo["XMomentumDensity"]._projected_units = r"\rm{K}"
+ARTFieldInfo["XMomentumDensity"]._convert_function=_convertXMomentumDensity
+
+def _convertYMomentumDensity(data):
+    tr  = data.convert("Mass")*data.convert("Velocity")
+    tr *= (data.convert("Density")/data.convert("Mass"))
+    return tr
+ARTFieldInfo["YMomentumDensity"]._units = r"\rm{mg}/\rm{s}/\rm{cm}^3"
+ARTFieldInfo["YMomentumDensity"]._projected_units = r"\rm{K}"
+ARTFieldInfo["YMomentumDensity"]._convert_function=_convertYMomentumDensity
+
+def _convertZMomentumDensity(data):
+    tr  = data.convert("Mass")*data.convert("Velocity")
+    tr *= (data.convert("Density")/data.convert("Mass"))
+    return tr
+ARTFieldInfo["ZMomentumDensity"]._units = r"\rm{mg}/\rm{s}/\rm{cm}^3"
+ARTFieldInfo["ZMomentumDensity"]._projected_units = r"\rm{K}"
+ARTFieldInfo["ZMomentumDensity"]._convert_function=_convertZMomentumDensity
+
+def _convertPressure(data):
+    return data.convert("Pressure")
+ARTFieldInfo["Pressure"]._units = r"\rm{g}/\rm{cm}/\rm{s}^2"
+ARTFieldInfo["Pressure"]._projected_units = r"\rm{g}/\rm{s}^2"
+ARTFieldInfo["Pressure"]._convert_function=_convertPressure
+
+def _convertGamma(data):
+    return 1.0
+ARTFieldInfo["Gamma"]._units = r""
+ARTFieldInfo["Gamma"]._projected_units = r""
+ARTFieldInfo["Gamma"]._convert_function=_convertGamma
+
+def _convertGasEnergy(data):
+    return data.convert("GasEnergy")
+ARTFieldInfo["GasEnergy"]._units = r"\rm{ergs}/\rm{g}"
+ARTFieldInfo["GasEnergy"]._projected_units = r""
+ARTFieldInfo["GasEnergy"]._convert_function=_convertGasEnergy
+
+def _convertMetalDensitySNII(data):
+    return data.convert("Density")
+ARTFieldInfo["MetalDensitySNII"]._units = r"\rm{g}/\rm{cm}^3"
+ARTFieldInfo["MetalDensitySNII"]._projected_units = r"\rm{g}/\rm{cm}^2"
+ARTFieldInfo["MetalDensitySNII"]._convert_function=_convertMetalDensitySNII
+
+def _convertMetalDensitySNIa(data):
+    return data.convert("Density")
+ARTFieldInfo["MetalDensitySNIa"]._units = r"\rm{g}/\rm{cm}^3"
+ARTFieldInfo["MetalDensitySNIa"]._projected_units = r"\rm{g}/\rm{cm}^2"
+ARTFieldInfo["MetalDensitySNIa"]._convert_function=_convertMetalDensitySNIa
+
+def _convertPotentialNew(data):
+    return data.convert("Potential")
+ARTFieldInfo["PotentialNew"]._units = r"\rm{g}/\rm{cm}^3"
+ARTFieldInfo["PotentialNew"]._projected_units = r"\rm{g}/\rm{cm}^2"
+ARTFieldInfo["PotentialNew"]._convert_function=_convertPotentialNew
+
+def _convertPotentialOld(data):
+    return data.convert("Potential")
+ARTFieldInfo["PotentialOld"]._units = r"\rm{g}/\rm{cm}^3"
+ARTFieldInfo["PotentialOld"]._projected_units = r"\rm{g}/\rm{cm}^2"
+ARTFieldInfo["PotentialOld"]._convert_function=_convertPotentialOld
+
+####### Derived fields (are lowercase)
+
+def _temperature(field, data):
     tr  = data["GasEnergy"] / data["Density"]
     tr /= data.pf.conversion_factors["GasEnergy"]
     tr *= data.pf.conversion_factors["Density"]
     return tr
-def _convertTemperature(data):
+def _converttemperature(data):
     return data.convert("Temperature")
-add_art_field("Temperature", function=_Temperature, units = r"\mathrm{K}")
-KnownARTFields["Temperature"]._units = r"\mathrm{K}"
-KnownARTFields["Temperature"]._convert_function=_convertTemperature
+add_field("temperature", function=_temperature, units = r"\mathrm{K}",take_log=True)
+ARTFieldInfo["temperature"]._units = r"\mathrm{K}"
+ARTFieldInfo["temperature"]._projected_units = r"\mathrm{K}"
+ARTFieldInfo["temperature"]._convert_function=_converttemperature
 
-def _MetallicitySNII(field, data):
-    #get the dimensionless mass fraction
-    tr  = data["Metal_DensitySNII"] / data["Density"]
-    tr *= data.pf.conversion_factors["Density"]    
+def _metallicity_snII(field, data):
+    tr  = data["MetalDensitySNII"] / data["Density"]
     return tr
-    
-add_art_field("MetallicitySNII", function=_MetallicitySNII, units = r"\mathrm{K}")
-KnownARTFields["MetallicitySNII"]._units = r"\mathrm{K}"
+add_field("metallicity_snII", function=_metallicity_snII, units = r"\mathrm{K}",take_log=True)
+ARTFieldInfo["metallicity_snII"]._units = r""
+ARTFieldInfo["metallicity_snII"]._projected_units = r""
 
-def _MetallicitySNIa(field, data):
-    #get the dimensionless mass fraction
-    tr  = data["Metal_DensitySNIa"] / data["Density"]
-    tr *= data.pf.conversion_factors["Density"]    
+def _metallicity_snIa(field, data):
+    tr  = data["MetalDensitySNIa"] / data["Density"]
     return tr
-    
-add_art_field("MetallicitySNIa", function=_MetallicitySNIa, units = r"\mathrm{K}")
-KnownARTFields["MetallicitySNIa"]._units = r"\mathrm{K}"
+add_field("metallicity_snIa", function=_metallicity_snIa, units = r"\mathrm{K}",take_log=True)
+ARTFieldInfo["metallicity_snIa"]._units = r""
+ARTFieldInfo["metallicity_snIa"]._projected_units = r""
 
-def _Metallicity(field, data):
-    #get the dimensionless mass fraction of the total metals
-    tr  = data["Metal_DensitySNIa"] / data["Density"]
-    tr += data["Metal_DensitySNII"] / data["Density"]
-    tr *= data.pf.conversion_factors["Density"]    
+def _x_velocity(data):
+    tr  = data["XMomentumDensity"]/data["Density"]
     return tr
-    
-add_art_field("Metallicity", function=_Metallicity, units = r"\mathrm{K}")
-KnownARTFields["Metallicity"]._units = r"\mathrm{K}"
+add_field("x_velocity", function=_x_velocity, units = r"\mathrm{cm/s}",take_log=False)
+ARTFieldInfo["x_velocity"]._units = r"\rm{cm}/\rm{s}"
+ARTFieldInfo["x_velocity"]._projected_units = r"\rm{cm}/\rm{s}"
 
-def _Metal_Density(field,data):
-    return data["Metal_DensitySNII"]+data["Metal_DensitySNIa"]
-def _convert_Metal_Density(data):
-    return data.convert("Metal_Density")
+def _y_velocity(data):
+    tr  = data["YMomentumDensity"]/data["Density"]
+    return tr
+add_field("y_velocity", function=_y_velocity, units = r"\mathrm{cm/s}",take_log=False)
+ARTFieldInfo["y_velocity"]._units = r"\rm{cm}/\rm{s}"
+ARTFieldInfo["y_velocity"]._projected_units = r"\rm{cm}/\rm{s}"
 
-add_art_field("Metal_Density", function=_Metal_Density, units = r"\mathrm{K}")
-KnownARTFields["Metal_Density"]._units = r"\mathrm{K}"
-KnownARTFields["Metal_Density"]._convert_function=_convert_Metal_Density
+def _z_velocity(data):
+    tr  = data["ZMomentumDensity"]/data["Density"]
+    return tr
+add_field("z_velocity", function=_z_velocity, units = r"\mathrm{cm/s}",take_log=False)
+ARTFieldInfo["z_velocity"]._units = r"\rm{cm}/\rm{s}"
+ARTFieldInfo["z_velocity"]._projected_units = r"\rm{cm}/\rm{s}"
 
 
+#Particle fields
 
+#Derived particle fields
+


diff -r 549c408579efac0d6ea1cc2f3d0c42149e240cf7 -r 0834402f6cde0a7ff68e3eb2e520184a7818f3de yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -180,7 +180,7 @@
             self.preload_level(0)
             tr = self.level_data[0][field_id,:].reshape(
                     pf.domain_dimensions, order="F").copy()
-            return tr.swapaxes(0, 2)
+            return tr.swapaxes(0, 2).astype("float64")
         tr = na.zeros(grid.ActiveDimensions, dtype='float32')
         grids = [grid]
         l_delta = 0
@@ -198,7 +198,7 @@
                 next_grids += g.Parent
             grids = next_grids
             l_delta += 1
-        return tr
+        return tr.astype("float64")
 
     def _read_data_slice(self, grid, field, axis, coord):
         sl = [slice(None), slice(None), slice(None)]



https://bitbucket.org/yt_analysis/yt/changeset/053ae2cf5a3a/
changeset:   053ae2cf5a3a
branch:      yt
user:        Christopher Moody
date:        2012-02-15 20:02:15
summary:     fixed temp field. commented out child mask.
affected #:  3 files

diff -r 0834402f6cde0a7ff68e3eb2e520184a7818f3de -r 053ae2cf5a3a7eb6a49debdfe2082227273ea9b5 yt/analysis_modules/sunrise_export/sunrise_exporter.py
--- a/yt/analysis_modules/sunrise_export/sunrise_exporter.py
+++ b/yt/analysis_modules/sunrise_export/sunrise_exporter.py
@@ -235,9 +235,12 @@
     hdus.writeto(fn, clobber=True)
 
 def initialize_octree_list(pf, fields):
+    #import pdb; pdb.set_trace()
+    i=0
     o_length = r_length = 0
     grids = []
     levels_finest, levels_all = defaultdict(lambda: 0), defaultdict(lambda: 0)
+    pbar = get_pbar("Initializing octs ",len(pf.h.grids))
     for g in pf.h.grids:
         ff = na.array([g[f] for f in fields])
         grids.append(amr_utils.OctreeGrid(
@@ -250,6 +253,8 @@
         levels_all[g.Level] += g.ActiveDimensions.prod()
         levels_finest[g.Level] += g.child_mask.ravel().sum()
         g.clear_data()
+        i+=1
+        pbar.update(i)
     ogl = amr_utils.OctreeGridList(grids)
     return ogl, levels_finest, levels_all
 


diff -r 0834402f6cde0a7ff68e3eb2e520184a7818f3de -r 053ae2cf5a3a7eb6a49debdfe2082227273ea9b5 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -94,8 +94,8 @@
         self.LeftEdge = props[0]
         self.RightEdge = props[1]
         self.ActiveDimensions = props[2] 
-        if child_mask is not None:
-            self._set_child_mask(child_mask)
+        #if child_mask is not None:
+        #    self._set_child_mask(child_mask)
 
     def _setup_dx(self):
         # So first we figure out what the index is.  We don't assume


diff -r 0834402f6cde0a7ff68e3eb2e520184a7818f3de -r 053ae2cf5a3a7eb6a49debdfe2082227273ea9b5 yt/frontends/art/fields.py
--- a/yt/frontends/art/fields.py
+++ b/yt/frontends/art/fields.py
@@ -43,6 +43,7 @@
 ARTFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
 add_field = ARTFieldInfo.add_field
 
+import numpy as na
 
 #these are just the hydro fields
 known_art_fields = [ 'Density','TotalEnergy',
@@ -73,6 +74,8 @@
 #Temperature
 
 #Derived fields that are untested:
+#metallicities
+#xyzvelocity
 
 #Individual definitions for native fields
 def _convertDensity(data):
@@ -153,33 +156,38 @@
 ARTFieldInfo["PotentialOld"]._projected_units = r"\rm{g}/\rm{cm}^2"
 ARTFieldInfo["PotentialOld"]._convert_function=_convertPotentialOld
 
-####### Derived fields (are lowercase)
+####### Derived fields
 
 def _temperature(field, data):
-    tr  = data["GasEnergy"] / data["Density"]
-    tr /= data.pf.conversion_factors["GasEnergy"]
-    tr *= data.pf.conversion_factors["Density"]
+    tr  = data["GasEnergy"].astype('float64') #~1
+    d = data["Density"].astype('float64')
+    d[d==0.0] = -1.0 #replace the zeroes (that cause infs)
+    tr /= d #
+    assert na.all(na.isfinite(tr)) #diagnosing some problem...
     return tr
 def _converttemperature(data):
-    return data.convert("Temperature")
-add_field("temperature", function=_temperature, units = r"\mathrm{K}",take_log=True)
-ARTFieldInfo["temperature"]._units = r"\mathrm{K}"
-ARTFieldInfo["temperature"]._projected_units = r"\mathrm{K}"
-ARTFieldInfo["temperature"]._convert_function=_converttemperature
+    x  = data.pf.conversion_factors["Density"]
+    x /= data.pf.conversion_factors["GasEnergy"]
+    x *= data.pf.conversion_factors["Temperature"]
+    return x
+add_field("Temperature", function=_temperature, units = r"\mathrm{K}",take_log=True)
+ARTFieldInfo["Temperature"]._units = r"\mathrm{K}"
+ARTFieldInfo["Temperature"]._projected_units = r"\mathrm{K}"
+ARTFieldInfo["Temperature"]._convert_function=_converttemperature
 
 def _metallicity_snII(field, data):
     tr  = data["MetalDensitySNII"] / data["Density"]
     return tr
-add_field("metallicity_snII", function=_metallicity_snII, units = r"\mathrm{K}",take_log=True)
-ARTFieldInfo["metallicity_snII"]._units = r""
-ARTFieldInfo["metallicity_snII"]._projected_units = r""
+add_field("Metallicity_SNII", function=_metallicity_snII, units = r"\mathrm{K}",take_log=True)
+ARTFieldInfo["Metallicity_SNII"]._units = r""
+ARTFieldInfo["Metallicity_SNII"]._projected_units = r""
 
 def _metallicity_snIa(field, data):
     tr  = data["MetalDensitySNIa"] / data["Density"]
     return tr
-add_field("metallicity_snIa", function=_metallicity_snIa, units = r"\mathrm{K}",take_log=True)
-ARTFieldInfo["metallicity_snIa"]._units = r""
-ARTFieldInfo["metallicity_snIa"]._projected_units = r""
+add_field("Metallicity_SNIa", function=_metallicity_snIa, units = r"\mathrm{K}",take_log=True)
+ARTFieldInfo["Metallicity_SNIa"]._units = r""
+ARTFieldInfo["Metallicity_SNIa"]._projected_units = r""
 
 def _x_velocity(data):
     tr  = data["XMomentumDensity"]/data["Density"]
@@ -203,6 +211,15 @@
 ARTFieldInfo["z_velocity"]._projected_units = r"\rm{cm}/\rm{s}"
 
 
+def _metal_density(field, data):
+    tr  = data["MetalDensitySNIa"]
+    tr += data["MetalDensitySNII"]
+    return tr
+add_field("Metal_Density", function=_metal_density, units = r"\mathrm{K}",take_log=True)
+ARTFieldInfo["Metal_Density"]._units = r""
+ARTFieldInfo["Metal_Density"]._projected_units = r""
+
+
 #Particle fields
 
 #Derived particle fields



https://bitbucket.org/yt_analysis/yt/changeset/f42fa77f5077/
changeset:   f42fa77f5077
branch:      yt
user:        Christopher Moody
date:        2012-03-20 18:45:59
summary:     changed LE for ART. works!
affected #:  5 files

diff -r 053ae2cf5a3a7eb6a49debdfe2082227273ea9b5 -r f42fa77f507788b6ffd823c2166b73765291c8f0 yt/analysis_modules/star_analysis/sfr_spectrum.py
--- a/yt/analysis_modules/star_analysis/sfr_spectrum.py
+++ b/yt/analysis_modules/star_analysis/sfr_spectrum.py
@@ -246,7 +246,7 @@
 """
 
 class SpectrumBuilder(object):
-    def __init__(self, pf, bcdir="", model="chabrier"):
+    def __init__(self, pf, bcdir="", model="chabrier", time_now=None):
         r"""Initialize the data to build a summed flux spectrum for a
         collection of stars using the models of Bruzual & Charlot (2003).
         This function loads the necessary data tables into memory and
@@ -280,8 +280,12 @@
              OmegaLambdaNow = self._pf.omega_lambda,
              InitialRedshift = self._pf['CosmologyInitialRedshift'])
         # Find the time right now.
-        self.time_now = self.cosm.ComputeTimeFromRedshift(
-            self._pf.current_redshift) # seconds
+        
+        if time_now is None:
+            self.time_now = self.cosm.ComputeTimeFromRedshift(
+                self._pf.current_redshift) # seconds
+        else:
+            self.time_now = time_now
         
         # Read the tables.
         self.read_bclib()
@@ -404,7 +408,8 @@
         self.star_metal = self.star_metal[sort]
         
         # Interpolate the flux for each star, adding to the total by weight.
-        for star in itertools.izip(Mname, Aindex, ratio1, ratio2, self.star_mass):
+        pbar = get_pbar("Calculating fluxes",len(self.star_mass))
+        for i,star in enumerate(itertools.izip(Mname, Aindex, ratio1, ratio2, self.star_mass)):
             # Pick the right age bin for the right flux array.
             flux = self.flux[star[0]][star[1],:]
             # Get the one just before the one above.
@@ -413,6 +418,9 @@
             int_flux = star[3] * na.log10(flux_1) + star[2] * na.log10(flux)
             # Add this flux to the total, weighted by mass.
             self.final_spec += na.power(10., int_flux) * star[4]
+            pbar.update(i)
+        pbar.finish()    
+        
         # Normalize.
         self.total_mass = na.sum(self.star_mass)
         self.avg_mass = na.mean(self.star_mass)


diff -r 053ae2cf5a3a7eb6a49debdfe2082227273ea9b5 -r f42fa77f507788b6ffd823c2166b73765291c8f0 yt/analysis_modules/sunrise_export/sunrise_exporter.py
--- a/yt/analysis_modules/sunrise_export/sunrise_exporter.py
+++ b/yt/analysis_modules/sunrise_export/sunrise_exporter.py
@@ -37,8 +37,13 @@
 import yt.utilities.amr_utils as amr_utils
 from yt.data_objects.universal_fields import add_field
 
+from os import environ
+from yt.utilities.parallel_tools.parallel_analysis_interface import \
+    ParallelAnalysisInterface, ProcessorPool, Communicator
+
 def export_to_sunrise(pf, fn, write_particles = True, subregion_bounds = None,
-    particle_mass=None, particle_pos=None, particle_age=None, particle_metal=None):
+    particle_mass=None, particle_pos=None, particle_age=None, particle_metal=None,
+    parallel=False):
     r"""Convert the contents of a dataset to a FITS file format that Sunrise
     understands.
 
@@ -149,7 +154,8 @@
 
     output, refined = generate_flat_octree(pf,
             ["CellMassMsun","TemperatureTimesCellMassMsun", "MetalMass",
-             "CellVolumeCode"], subregion_bounds = subregion_bounds)
+             "CellVolumeCode"], subregion_bounds = subregion_bounds,
+            parallel=parallel)
     cvcgs = output["CellVolumeCode"].astype('float64') * pf['cm']**3.0
 
     # First the structure
@@ -234,31 +240,75 @@
     hdus = pyfits.HDUList(hls)
     hdus.writeto(fn, clobber=True)
 
-def initialize_octree_list(pf, fields):
+def initialize_octree_list_task(g,fields, grids = [], 
+        levels_finest = defaultdict(lambda: 0), 
+        levels_all = defaultdict(lambda: 0)):
+    ff = na.array([g[f] for f in fields])
+    grids.append(amr_utils.OctreeGrid(
+                    g.child_index_mask.astype('int32'),
+                    ff.astype("float64"),
+                    g.LeftEdge.astype('float64'),
+                    g.ActiveDimensions.astype('int32'),
+                    na.ones(1,dtype='float64') * g.dds[0], g.Level,
+                    g._id_offset))
+    levels_all[g.Level] += g.ActiveDimensions.prod()
+    levels_finest[g.Level] += g.child_mask.ravel().sum()
+    g.clear_data()
+    return grids,levels_finest,levels_all
+
+def initialize_octree_list(pf, fields,parallel=False):
     #import pdb; pdb.set_trace()
     i=0
     o_length = r_length = 0
     grids = []
+    pbar = get_pbar("Initializing octs ",len(pf.h.grids))
+    
+    grids = []
     levels_finest, levels_all = defaultdict(lambda: 0), defaultdict(lambda: 0)
-    pbar = get_pbar("Initializing octs ",len(pf.h.grids))
-    for g in pf.h.grids:
-        ff = na.array([g[f] for f in fields])
-        grids.append(amr_utils.OctreeGrid(
-                        g.child_index_mask.astype('int32'),
-                        ff.astype("float64"),
-                        g.LeftEdge.astype('float64'),
-                        g.ActiveDimensions.astype('int32'),
-                        na.ones(1,dtype='float64') * g.dds[0], g.Level,
-                        g._id_offset))
-        levels_all[g.Level] += g.ActiveDimensions.prod()
-        levels_finest[g.Level] += g.child_mask.ravel().sum()
-        g.clear_data()
-        i+=1
-        pbar.update(i)
+ 
+    import pdb; pdb.set_trace()
+    if not parallel:
+        for g in pf.h.grids:
+            i+=1
+            tgrids,tlevels_finest,tlevels_all = \
+                initialize_octree_list_task(g,fields,grids=grids,
+                        levels_finest=levels_finest,
+                        levels_all=levels_all)
+            pbar.update(i)
+    else:
+        import multiprocessing
+        nbr_chunks = multiprocessing.cpu_count()
+        chunk_size = len(pf.h.grids) / nbr_chunks
+        if chunk_size % nbr_chunks != 0:
+            # make sure we get the last few items of data when we have
+            # an odd size to chunks (e.g. len(q) == 100 and nbr_chunks == 3
+            nbr_chunks += 1
+        chunks = [(pf.h.grids[x*chunk_size:(x+1)*chunk_size],fields) \
+            for x in xrange(nbr_chunks)]
+
+        p = multiprocessing.Pool()
+        # send out the work chunks to the Pool
+        # po is a multiprocessing.pool.MapResult
+        po = p.map_async(initialize_octree_list_task,chunks)
+        # we get a list of lists back, one per chunk, so we have to
+        # flatten them back together
+        # po.get() will block until results are ready and then
+        # return a list of lists of results
+        results = po.get()
+
+        for tgrids,tlevels_finest,tlevels_all in results:
+            grids += tgrids
+            for k,v in tlevels_finest.iteritems():
+                levels_finest[k] += v
+            for k,v in  tlevels_all.iteritems():
+                levels_all[k] += v
+
+
+    pbar.finish()
     ogl = amr_utils.OctreeGridList(grids)
     return ogl, levels_finest, levels_all
 
-def generate_flat_octree(pf, fields, subregion_bounds = None):
+def generate_flat_octree(pf, fields, subregion_bounds = None,parallel=False):
     """
     Generates two arrays, one of the actual values in a depth-first flat
     octree array, and the other of the values describing the refinement.
@@ -266,7 +316,7 @@
     field used in the data array.
     """
     fields = ensure_list(fields)
-    ogl, levels_finest, levels_all = initialize_octree_list(pf, fields)
+    ogl, levels_finest, levels_all = initialize_octree_list(pf, fields,parallel=parallel)
     o_length = na.sum(levels_finest.values())
     r_length = na.sum(levels_all.values())
     output = na.zeros((o_length,len(fields)), dtype='float64')


diff -r 053ae2cf5a3a7eb6a49debdfe2082227273ea9b5 -r f42fa77f507788b6ffd823c2166b73765291c8f0 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -399,10 +399,10 @@
                 a=b
             pbar.finish()
             
-            self.pf.particle_star_index = lspecies[-2]
+            self.pf.particle_star_index = i
             
             if self.pf.file_star_data:
-                nstars, mass, imass, tbirth, metals1, metals2 \
+                nstars, mass, imass, tbirth, metallicity1, metallicity2 \
                      = read_stars(self.pf.file_star_data,nstars,Nrow)
                 nstars = nstars[0] 
                 if nstars > 0 :
@@ -412,9 +412,10 @@
                         b2t(tbirth,n=n,logger=lambda x: pbar.update(x)).astype('float64')
                     self.pf.particle_star_ages *= 1.0e9
                     self.pf.particle_star_ages *= 365*24*3600 #to seconds
+                    self.pf.particle_star_ages = self.pf.current_time-self.pf.particle_star_ages
                     pbar.finish()
-                    self.pf.particle_star_metallicity1 = metals1/mass
-                    self.pf.particle_star_metallicity2 = metals2/mass
+                    self.pf.particle_star_metallicity1 = metallicity1
+                    self.pf.particle_star_metallicity2 = metallicity2
                     self.pf.particle_star_mass_initial = imass*self.pf.parameters['aM0']
                     self.pf.particle_mass[-nstars:] = mass*self.pf.parameters['aM0']
             
@@ -467,36 +468,48 @@
         return self.grids[mask]
 
     def _populate_grid_objects(self):
-        mask = na.empty(self.grids.size, dtype='int32')
-        pb = get_pbar("Populating grids", len(self.grids))
         for gi,g in enumerate(self.grids):
-            pb.update(gi)
-            amr_utils.get_box_grids_level(self.grid_left_edge[gi,:],
-                                self.grid_right_edge[gi,:],
-                                g.Level - 1,
-                                self.grid_left_edge, self.grid_right_edge,
-                                self.grid_levels, mask)
-            parents = self.grids[mask.astype("bool")]
+            parents = self._get_grid_parents(g,
+                            self.grid_left_edge[gi,:],
+                            self.grid_right_edge[gi,:])
             if len(parents) > 0:
-                g.Parent.extend((p for p in parents.tolist()
-                        if p.locations[0,0] == g.locations[0,0]))
+                g.Parent.extend(parents.tolist())
                 for p in parents: p.Children.append(g)
-            # Now we do overlapping siblings; note that one has to "win" with
-            # siblings, so we assume the lower ID one will "win"
-            amr_utils.get_box_grids_level(self.grid_left_edge[gi,:],
-                                self.grid_right_edge[gi,:],
-                                g.Level,
-                                self.grid_left_edge, self.grid_right_edge,
-                                self.grid_levels, mask, gi)
-            mask[gi] = False
-            siblings = self.grids[mask.astype("bool")]
-            if len(siblings) > 0:
-                g.OverlappingSiblings = siblings.tolist()
             g._prepare_grid()
             g._setup_dx()
-        pb.finish()
         self.max_level = self.grid_levels.max()
 
+    # def _populate_grid_objects(self):
+    #     mask = na.empty(self.grids.size, dtype='int32')
+    #     pb = get_pbar("Populating grids", len(self.grids))
+    #     for gi,g in enumerate(self.grids):
+    #         pb.update(gi)
+    #         amr_utils.get_box_grids_level(self.grid_left_edge[gi,:],
+    #                             self.grid_right_edge[gi,:],
+    #                             g.Level - 1,
+    #                             self.grid_left_edge, self.grid_right_edge,
+    #                             self.grid_levels, mask)
+    #         parents = self.grids[mask.astype("bool")]
+    #         if len(parents) > 0:
+    #             g.Parent.extend((p for p in parents.tolist()
+    #                     if p.locations[0,0] == g.locations[0,0]))
+    #             for p in parents: p.Children.append(g)
+    #         # Now we do overlapping siblings; note that one has to "win" with
+    #         # siblings, so we assume the lower ID one will "win"
+    #         amr_utils.get_box_grids_level(self.grid_left_edge[gi,:],
+    #                             self.grid_right_edge[gi,:],
+    #                             g.Level,
+    #                             self.grid_left_edge, self.grid_right_edge,
+    #                             self.grid_levels, mask, gi)
+    #         mask[gi] = False
+    #         siblings = self.grids[mask.astype("bool")]
+    #         if len(siblings) > 0:
+    #             g.OverlappingSiblings = siblings.tolist()
+    #         g._prepare_grid()
+    #         g._setup_dx()
+    #     pb.finish()
+    #     self.max_level = self.grid_levels.max()
+
     def _setup_field_list(self):
         if self.parameter_file.use_particles:
             # We know which particle fields will exist -- pending further
@@ -652,11 +665,12 @@
             # Add on the 1e5 to get to cm/s
             self.conversion_factors["%s-velocity" % ax] = self.v0/aexpn
         seconds = self.t0
-        self.time_units['gyr']   = 1.0/(1.0e9*365*3600*24.0)
-        self.time_units['myr']   = 1.0/(1.0e6*365*3600*24.0)
+        self.time_units['Gyr']   = 1.0/(1.0e9*365*3600*24.0)
+        self.time_units['Myr']   = 1.0/(1.0e6*365*3600*24.0)
         self.time_units['years'] = 1.0/(365*3600*24.0)
         self.time_units['days']  = 1.0 / (3600*24.0)
 
+
         #we were already in seconds, go back in to code units
         #self.current_time /= self.t0 
         #self.current_time = b2t(self.current_time,n=1)


diff -r 053ae2cf5a3a7eb6a49debdfe2082227273ea9b5 -r f42fa77f507788b6ffd823c2166b73765291c8f0 yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -291,7 +291,9 @@
     #just make sure they appear in the right order, skipping
     #the empty space in between
     idx = na.argsort(iocts)
-
+    
+    import pdb; pdb.set_trace()
+    
     #now rearrange le & fl in order of the ioct
     le = le[idx]
     fl = fl[idx]
@@ -299,8 +301,11 @@
     #left edges are expressed as if they were on 
     #level 15, so no matter what level max(le)=2**15 
     #correct to the yt convention
-    le = le/2**(root_level-1-level)-1
-    
+    #le = le/2**(root_level-1-level)-1
+
+    #try without the -1
+    le = le/2**(root_level-2-level)
+
     #now read the hvars and vars arrays
     #we are looking for iOctCh
     #we record if iOctCh is >0, in which it is subdivided
@@ -333,10 +338,11 @@
     imass   = _read_frecord(fh,'>f') 
     tbirth  = _read_frecord(fh,'>f') 
     if fh.tell() < os.path.getsize(file):
-        metals1 = _read_frecord(fh,'>f') 
+        metallicity1 = _read_frecord(fh,'>f') 
     if fh.tell() < os.path.getsize(file):
-        metals2 = _read_frecord(fh,'>f')     
-    return nstars, mass, imass, tbirth, metals1,metals2
+        metallicity2 = _read_frecord(fh,'>f')     
+    assert fh.tell() == os.path.getsize(file)
+    return nstars, mass, imass, tbirth, metallicity1, metallicity2
 
 def _read_child_mask_level(f, level_child_offsets,level,nLevel,nhydro_vars):
     f.seek(level_child_offsets[level])


diff -r 053ae2cf5a3a7eb6a49debdfe2082227273ea9b5 -r f42fa77f507788b6ffd823c2166b73765291c8f0 yt/utilities/_amr_utils/DepthFirstOctree.pyx
--- a/yt/utilities/_amr_utils/DepthFirstOctree.pyx
+++ b/yt/utilities/_amr_utils/DepthFirstOctree.pyx
@@ -66,6 +66,7 @@
                             np.ndarray[np.float64_t, ndim=2] output,
                             np.ndarray[np.int32_t, ndim=1] refined,
                             OctreeGridList grids):
+    #cdef int s = curpos
     cdef int i, i_off, j, j_off, k, k_off, ci, fi
     cdef int child_i, child_j, child_k
     cdef OctreeGrid child_grid
@@ -103,9 +104,9 @@
                     child_i = int((cx - child_leftedges[0])/child_dx)
                     child_j = int((cy - child_leftedges[1])/child_dx)
                     child_k = int((cz - child_leftedges[2])/child_dx)
-                    s = RecurseOctreeDepthFirst(child_i, child_j, child_k, 2, 2, 2,
+                    # s = Recurs.....
+                    RecurseOctreeDepthFirst(child_i, child_j, child_k, 2, 2, 2,
                                         curpos, ci - grid.offset, output, refined, grids)
-    return s
 
 @cython.boundscheck(False)
 def RecurseOctreeByLevels(int i_i, int j_i, int k_i,



https://bitbucket.org/yt_analysis/yt/changeset/a3bc652df01e/
changeset:   a3bc652df01e
branch:      yt
user:        Christopher Moody
date:        2012-04-14 02:10:34
summary:     changed le to le-1
affected #:  3 files

diff -r f42fa77f507788b6ffd823c2166b73765291c8f0 -r a3bc652df01effa16b1476210b0d9cf00626cf0e yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -423,7 +423,8 @@
                 left = self.pf.particle_position.shape[0]
                 pbar = get_pbar("Gridding  Particles ",left)
                 pos = self.pf.particle_position.copy()
-                pos = na.vstack((na.arange(pos.shape[0]),pos.T)).T
+                #particle indices travel with the particle positions
+                pos = na.vstack((na.arange(pos.shape[0]),pos.T)).T 
                 for level in range(self.pf.max_level,self.pf.min_level-1,-1):
                     lidx = self.grid_levels[:,0] == level
                     for gi,gidx in enumerate(na.where(lidx)[0]): 
@@ -525,7 +526,7 @@
                 # 2D and 3D fields.
                 self.pf.field_info.add_field(field, NullFunc,
                                              convert_function=cf,
-                                             take_log=False, particle_type=True)
+                                             take_log=True, particle_type=True)
 
     def _setup_derived_fields(self):
         self.derived_field_list = []


diff -r f42fa77f507788b6ffd823c2166b73765291c8f0 -r a3bc652df01effa16b1476210b0d9cf00626cf0e yt/frontends/art/fields.py
--- a/yt/frontends/art/fields.py
+++ b/yt/frontends/art/fields.py
@@ -78,6 +78,7 @@
 #xyzvelocity
 
 #Individual definitions for native fields
+
 def _convertDensity(data):
     return data.convert("Density")
 ARTFieldInfo["Density"]._units = r"\rm{g}/\rm{cm}^3"


diff -r f42fa77f507788b6ffd823c2166b73765291c8f0 -r a3bc652df01effa16b1476210b0d9cf00626cf0e yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -292,8 +292,6 @@
     #the empty space in between
     idx = na.argsort(iocts)
     
-    import pdb; pdb.set_trace()
-    
     #now rearrange le & fl in order of the ioct
     le = le[idx]
     fl = fl[idx]
@@ -304,7 +302,7 @@
     #le = le/2**(root_level-1-level)-1
 
     #try without the -1
-    le = le/2**(root_level-2-level)
+    le = le/2**(root_level-2-level)-1
 
     #now read the hvars and vars arrays
     #we are looking for iOctCh



https://bitbucket.org/yt_analysis/yt/changeset/91a9d9d9e7d4/
changeset:   91a9d9d9e7d4
branch:      yt
user:        Christopher Moody
date:        2012-04-14 02:44:13
summary:      merged with tip
affected #:  147 files

diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 CREDITS
--- a/CREDITS
+++ b/CREDITS
@@ -21,6 +21,9 @@
                                 JC Passy (jcpassy at gmail.com)
                                 Eve Lee (elee at cita.utoronto.ca)
                                 Elizabeth Tasker (tasker at astro1.sci.hokudai.ac.jp)
+                                Kacper Kowalik (xarthisius.kk at gmail.com)
+                                Nathan Goldbaum (goldbaum at ucolick.org)
+                                Anna Rosen (rosen at ucolick.org)
 
 We also include the Delaunay Triangulation module written by Robert Kern of
 Enthought, the cmdln.py module by Trent Mick, and the progressbar module by


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -15,7 +15,7 @@
 # And, feel free to drop me a line: matthewturk at gmail.com
 #
 
-DEST_SUFFIX="yt-`uname -p`"
+DEST_SUFFIX="yt-`uname -m`"
 DEST_DIR="`pwd`/${DEST_SUFFIX/ /}"   # Installation location
 BRANCH="yt" # This is the branch to which we will forcibly update.
 
@@ -40,9 +40,9 @@
 INST_FTYPE=1    # Install FreeType2 locally?
 INST_ENZO=0     # Clone a copy of Enzo?
 INST_SQLITE3=1  # Install a local version of SQLite3?
-INST_FORTHON=1  # Install Forthon?
 INST_PYX=0      # Install PyX?  Sometimes PyX can be problematic without a
                 # working TeX installation.
+INST_0MQ=1      # Install 0mq (for IPython) and affiliated bindings?
 
 # If you've got YT some other place, set this to point to it.
 YT_DIR=""
@@ -160,6 +160,18 @@
     then
         echo "Looks like you're running on Mac OSX."
         echo
+        echo "NOTE: you must have the Xcode command line tools installed."
+        echo
+        echo "OS X 10.5: download Xcode 3.0 from the mac developer tools"
+        echo "website"
+        echo
+        echo "OS X 10.6: download Xcode 3.2 from the mac developer tools"
+        echo "website"
+        echo
+        echo "OS X 10.7: download Xcode 4.0 from the mac app store or"
+        echo "alternatively download the Xcode command line tools from"
+        echo "the mac developer tools website"
+        echo
         echo "NOTE: You may have problems if you are running OSX 10.6 (Snow"
         echo "Leopard) or newer.  If you do, please set the following"
         echo "environment variables, remove any broken installation tree, and"
@@ -169,6 +181,17 @@
         echo "$ export CXX=g++-4.2"
         echo
     fi
+    if [ ! -z "${CFLAGS}" ]
+    then
+        echo "******************************************"
+        echo "******************************************"
+        echo "**                                      **"
+        echo "**    Your CFLAGS is not empty.         **"
+        echo "**    This can beak h5py compilation.   **"
+        echo "**                                      **"
+        echo "******************************************"
+        echo "******************************************"
+    fi
 }
 
 
@@ -204,10 +227,6 @@
 get_willwont ${INST_SQLITE3}
 echo "be installing SQLite3"
 
-printf "%-15s = %s so I " "INST_FORTHON" "${INST_FORTHON}"
-get_willwont ${INST_FORTHON}
-echo "be installing Forthon (for Halo Finding, etc)"
-
 printf "%-15s = %s so I " "INST_HG" "${INST_HG}"
 get_willwont ${INST_HG}
 echo "be installing Mercurial"
@@ -220,6 +239,10 @@
 get_willwont ${INST_PYX}
 echo "be installing PyX"
 
+printf "%-15s = %s so I " "INST_0MQ" "${INST_0MQ}"
+get_willwont ${INST_0MQ}
+echo "be installing ZeroMQ"
+
 echo
 
 if [ -z "$HDF5_DIR" ]
@@ -250,7 +273,15 @@
 
 function do_exit
 {
-    echo "Failure.  Check ${LOG_FILE}."
+    echo "********************************************"
+    echo "        FAILURE REPORT:"
+    echo "********************************************"
+    echo
+    tail -n 10 ${LOG_FILE}
+    echo
+    echo "********************************************"
+    echo "********************************************"
+    echo "Failure.  Check ${LOG_FILE}.  The last 10 lines are above."
     exit 1
 }
 
@@ -283,14 +314,30 @@
     export GETFILE="curl -sSO"
 fi
 
+if type -P sha512sum &> /dev/null
+then
+    echo "Using sha512sum"
+    export SHASUM="sha512sum"
+elif type -P shasum &> /dev/null
+then
+    echo "Using shasum -a 512"
+    export SHASUM="shasum -a 512"
+else
+    echo
+    echo "I am unable to locate any shasum-like utility."
+    echo "ALL FILE INTEGRITY IS NOT VERIFIABLE."
+    echo "THIS IS PROBABLY A BIG DEAL."
+    echo
+    echo "(I'll hang out for a minute for you to consider this.)"
+    sleep 60
+fi
+
 function get_enzotools
 {
     echo "Downloading $1 from yt-project.org"
     [ -e $1 ] && return
     ${GETFILE} "http://yt-project.org/dependencies/$1" || do_exit
-    ${GETFILE} "http://yt-project.org/dependencies/$1.md5" || do_exit
-    ( which md5sum &> /dev/null ) || return # return if we don't have md5sum
-    ( md5sum -c $1.md5 2>&1 ) 1>> ${LOG_FILE} || do_exit
+    ( ${SHASUM} -c $1.sha512 2>&1 ) 1>> ${LOG_FILE} || do_exit
 }
 
 ORIG_PWD=`pwd`
@@ -304,27 +351,47 @@
 mkdir -p ${DEST_DIR}/src
 cd ${DEST_DIR}/src
 
+# Now we dump all our SHA512 files out.
+
+echo '8da1b0af98203254a1cf776d73d09433f15b5090871f9fd6d712cea32bcd44446b7323ae1069b28907d2728e77944a642825c61bc3b54ceb46c91897cc4f6051  Cython-0.15.1.tar.gz' > Cython-0.15.1.tar.gz.sha512
+echo 'b8a12bf05b3aafa71135e47da81440fd0f16a4bd91954bc5615ad3d3b7f9df7d5a7d5620dc61088dc6b04952c5c66ebda947a4cfa33ed1be614c8ca8c0f11dff  PhiloGL-1.4.2.zip' > PhiloGL-1.4.2.zip.sha512
+echo '44eea803870a66ff0bab08d13a8b3388b5578ebc1c807d1d9dca0a93e6371e91b15d02917a00b3b20dc67abb5a21dabaf9b6e9257a561f85eeff2147ac73b478  PyX-0.11.1.tar.gz' > PyX-0.11.1.tar.gz.sha512
+echo '1a754d560bfa433f0960ab3b5a62edb5f291be98ec48cf4e5941fa5b84139e200b87a52efbbd6fa4a76d6feeff12439eed3e7a84db4421940d1bbb576f7a684e  Python-2.7.2.tgz' > Python-2.7.2.tgz.sha512
+echo 'c017d3d59dd324ac91af0edc178c76b60a5f90fbb775cf843e39062f95bd846238f2c53705f8890ed3f34bc0e6e75671a73d13875eb0287d6201cb45f0a2d338  bzip2-1.0.5.tar.gz' > bzip2-1.0.5.tar.gz.sha512
+echo 'de73b14727c2a6623c19896d4c034ad0f705bf5ccbb8501c786a9d074cce97a7760db9246ae7da3db47dd2de29a1707a8a0ee17ab41a6d9140f2a7dbf455af0f  ext-3.3.2.zip' > ext-3.3.2.zip.sha512
+echo '6d65dcbb77978d4f4a9711062f11ae9d61133ca086f9207a8c1ecea8807dc9612cc8c3b2428157d2fb00dea8e0958f61e35cce4e07987c80bc808bbda3608a6c  ext-slate-110328.zip' > ext-slate-110328.zip.sha512
+echo 'b519218f93946400326e9b656669269ecb3e5232b944e18fbc3eadc4fe2b56244d68aae56d6f69042b4c87c58c881ee2aaa279561ea0f0f48d5842155f4de9de  freetype-2.4.4.tar.gz' > freetype-2.4.4.tar.gz.sha512
+echo '1531789e0a77d4829796d18552a4de7aecae7e8b63763a7951a8091921995800740fe03e72a7dbd496a5590828131c5f046ddead695e5cba79343b8c205148d1  h5py-2.0.1.tar.gz' > h5py-2.0.1.tar.gz.sha512
+echo '9644896e4a84665ad22f87eb885cbd4a0c60a5c30085d5dd5dba5f3c148dbee626f0cb01e59a7995a84245448a3f1e9ba98687d3f10250e2ee763074ed8ddc0e  hdf5-1.8.7.tar.gz' > hdf5-1.8.7.tar.gz.sha512
+echo 'ffc5c9e0c8c8ea66479abd467e442419bd1c867e6dbd180be6a032869467955dc570cfdf1388452871303a440738f302d3227ab7728878c4a114cfc45d29d23c  ipython-0.12.tar.gz' > ipython-0.12.tar.gz.sha512
+echo 'e748b66a379ee1e7963b045c3737670acf6aeeff1ebed679f427e74b642faa77404c2d5bbddb922339f009c229d0af1ae77cc43eab290e50af6157a6406d833f  libpng-1.2.43.tar.gz' > libpng-1.2.43.tar.gz.sha512
+echo 'f5ab95c29ef6958096970265a6079f0eb8c43a500924346c4a6c6eb89d9110eeeb6c34a53715e71240e82ded2b76a7b8d5a9b05a07baa000b2926718264ad8ff  matplotlib-1.1.0.tar.gz' > matplotlib-1.1.0.tar.gz.sha512
+echo '78715bb2bd7ed3291089948530a59d5eff146a64179eae87904a2c328716f26749abb0c5417d6001cadfeebabb4e24985d5a59ceaae4d98c4762163970f83975  mercurial-2.0.tar.gz' > mercurial-2.0.tar.gz.sha512
+echo 'de3dd37f753614055dcfed910e9886e03688b8078492df3da94b1ec37be796030be93291cba09e8212fffd3e0a63b086902c3c25a996cf1439e15c5b16e014d9  numpy-1.6.1.tar.gz' > numpy-1.6.1.tar.gz.sha512
+echo '5ad681f99e75849a5ca6f439c7a19bb51abc73d121b50f4f8e4c0da42891950f30407f761a53f0fe51b370b1dbd4c4f5a480557cb2444c8c7c7d5412b328a474  sqlite-autoconf-3070500.tar.gz' > sqlite-autoconf-3070500.tar.gz.sha512
+echo 'edae735960279d92acf58e1f4095c6392a7c2059b8f1d2c46648fc608a0fb06b392db2d073f4973f5762c034ea66596e769b95b3d26ad963a086b9b2d09825f2  zlib-1.2.3.tar.bz2' > zlib-1.2.3.tar.bz2.sha512
+echo 'fb3cf421b2dc48c31956b3e3ee4ab6ebc743deec3bf626c2238a1996c8c51be87260bd6aa662793a1f0c34dcda9b3146763777bb162dfad6fec4ca7acc403b2e  zeromq-2.2.0.tar.gz' > zeromq-2.2.0.tar.gz.sha512
+echo 'd761b492352841cdc125d9f0c99ee6d6c435812472ea234728b7f0fb4ad1048e1eec9b399df2081fbc926566f333f7780fedd0ce23255a6633fe5c60ed15a6af  pyzmq-2.1.11.tar.gz' > pyzmq-2.1.11.tar.gz.sha512
+echo '57fa5e57dfb98154a42d2d477f29401c2260ae7ad3a8128a4098b42ee3b35c54367b1a3254bc76b9b3b14b4aab7c3e1135858f68abc5636daedf2f01f9b8a3cf  tornado-2.2.tar.gz' > tornado-2.2.tar.gz.sha512
+
 # Individual processes
-if [ -z "$HDF5_DIR" ]
-then
-    echo "Downloading HDF5"
-    get_enzotools hdf5-1.8.7.tar.gz
-fi
-
+[ -z "$HDF5_DIR" ] && get_enzotools hdf5-1.8.7.tar.gz
 [ $INST_ZLIB -eq 1 ] && get_enzotools zlib-1.2.3.tar.bz2 
 [ $INST_BZLIB -eq 1 ] && get_enzotools bzip2-1.0.5.tar.gz
 [ $INST_PNG -eq 1 ] && get_enzotools libpng-1.2.43.tar.gz
 [ $INST_FTYPE -eq 1 ] && get_enzotools freetype-2.4.4.tar.gz
 [ $INST_SQLITE3 -eq 1 ] && get_enzotools sqlite-autoconf-3070500.tar.gz
 [ $INST_PYX -eq 1 ] && get_enzotools PyX-0.11.1.tar.gz
+[ $INST_0MQ -eq 1 ] && get_enzotools zeromq-2.2.0.tar.gz
+[ $INST_0MQ -eq 1 ] && get_enzotools pyzmq-2.1.11.tar.gz
+[ $INST_0MQ -eq 1 ] && get_enzotools tornado-2.2.tar.gz
 get_enzotools Python-2.7.2.tgz
 get_enzotools numpy-1.6.1.tar.gz
 get_enzotools matplotlib-1.1.0.tar.gz
 get_enzotools mercurial-2.0.tar.gz
-get_enzotools ipython-0.10.tar.gz
+get_enzotools ipython-0.12.tar.gz
 get_enzotools h5py-2.0.1.tar.gz
 get_enzotools Cython-0.15.1.tar.gz
-get_enzotools Forthon-0.8.5.tar.gz
 get_enzotools ext-3.3.2.zip
 get_enzotools ext-slate-110328.zip
 get_enzotools PhiloGL-1.4.2.zip
@@ -365,6 +432,7 @@
         cd zlib-1.2.3
         ( ./configure --shared --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
+        ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
         touch done
         cd ..
     fi
@@ -382,6 +450,7 @@
         cd libpng-1.2.43
         ( ./configure CPPFLAGS=-I${DEST_DIR}/include CFLAGS=-I${DEST_DIR}/include --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
+        ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
         touch done
         cd ..
     fi
@@ -399,6 +468,7 @@
         cd freetype-2.4.4
         ( ./configure CFLAGS=-I${DEST_DIR}/include --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
+        ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
         touch done
         cd ..
     fi
@@ -416,6 +486,7 @@
         cd hdf5-1.8.7
         ( ./configure --prefix=${DEST_DIR}/ --enable-shared 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make ${MAKE_PROCS} install 2>&1 ) 1>> ${LOG_FILE} || do_exit
+        ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
         touch done
         cd ..
     fi
@@ -434,6 +505,7 @@
         cd sqlite-autoconf-3070500
         ( ./configure --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make ${MAKE_PROCS} install 2>&1 ) 1>> ${LOG_FILE} || do_exit
+        ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
         touch done
         cd ..
     fi
@@ -449,6 +521,7 @@
     ( make ${MAKE_PROCS} 2>&1 ) 1>> ${LOG_FILE} || do_exit
     ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
     ( ln -sf ${DEST_DIR}/bin/python2.7 ${DEST_DIR}/bin/pyyt 2>&1 ) 1>> ${LOG_FILE}
+    ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
     touch done
     cd ..
 fi
@@ -537,10 +610,28 @@
 [ -n "${OLD_LDFLAGS}" ] && export LDFLAGS=${OLD_LDFLAGS}
 [ -n "${OLD_CXXFLAGS}" ] && export CXXFLAGS=${OLD_CXXFLAGS}
 [ -n "${OLD_CFLAGS}" ] && export CFLAGS=${OLD_CFLAGS}
-do_setup_py ipython-0.10
+
+# Now we do our IPython installation, which has two optional dependencies.
+if [ $INST_0MQ -eq 1 ]
+then
+    if [ ! -e zeromq-2.2.0/done ]
+    then
+        [ ! -e zeromq-2.2.0 ] && tar xfz zeromq-2.2.0.tar.gz
+        echo "Installing ZeroMQ"
+        cd zeromq-2.2.0
+        ( ./configure --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
+        ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
+        ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
+        touch done
+        cd ..
+    fi
+    do_setup_py pyzmq-2.1.11 --zmq=${DEST_DIR}
+    do_setup_py tornado-2.2
+fi
+
+do_setup_py ipython-0.12
 do_setup_py h5py-2.0.1
 do_setup_py Cython-0.15.1
-[ $INST_FORTHON -eq 1 ] && do_setup_py Forthon-0.8.5
 [ $INST_PYX -eq 1 ] && do_setup_py PyX-0.11.1
 
 echo "Doing yt update, wiping local changes and updating to branch ${BRANCH}"
@@ -552,15 +643,14 @@
 echo $HDF5_DIR > hdf5.cfg
 [ $INST_PNG -eq 1 ] && echo $PNG_DIR > png.cfg
 [ $INST_FTYPE -eq 1 ] && echo $FTYPE_DIR > freetype.cfg
-[ $INST_FORTHON -eq 1 ] && ( ( cd yt/utilities/kdtree && FORTHON_EXE=${DEST_DIR}/bin/Forthon make 2>&1 ) 1>> ${LOG_FILE} )
 ( ${DEST_DIR}/bin/python2.7 setup.py develop 2>&1 ) 1>> ${LOG_FILE} || do_exit
 touch done
 cd $MY_PWD
 
-if !(${DEST_DIR}/bin/python2.7 -c "import readline")
+if !(${DEST_DIR}/bin/python2.7 -c "import readline" >> ${LOG_FILE})
 then
     echo "Installing pure-python readline"
-    ${DEST_DIR}/bin/pip install readline
+    ${DEST_DIR}/bin/pip install readline 1>> ${LOG_FILE}
 fi
 
 if [ $INST_ENZO -eq 1 ]


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 scripts/iyt
--- a/scripts/iyt
+++ b/scripts/iyt
@@ -24,7 +24,8 @@
 
 if IPython.__version__.startswith("0.10"):
     api_version = '0.10'
-elif IPython.__version__.startswith("0.11"):
+elif IPython.__version__.startswith("0.11") or \
+     IPython.__version__.startswith("0.12"):
     api_version = '0.11'
 
 if api_version == "0.10" and "DISPLAY" in os.environ:


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 setup.py
--- a/setup.py
+++ b/setup.py
@@ -1,4 +1,6 @@
-import os, os.path, glob
+import os
+import os.path
+import glob
 import sys
 import time
 import subprocess
@@ -9,12 +11,12 @@
 from numpy.distutils import log
 
 DATA_FILES_HTML = glob.glob('yt/gui/reason/html/*.html')
-DATA_FILES_JS   = glob.glob('yt/gui/reason/html/js/*.js')
-DATA_FILES_PNG  = glob.glob('yt/gui/reason/html/images/*.png') \
+DATA_FILES_JS = glob.glob('yt/gui/reason/html/js/*.js')
+DATA_FILES_PNG = glob.glob('yt/gui/reason/html/images/*.png') \
                 + glob.glob('yt/gui/reason/html/images/*.ico')
-DATA_FILES_LL   = glob.glob('yt/gui/reason/html/leaflet/*.js') \
+DATA_FILES_LL = glob.glob('yt/gui/reason/html/leaflet/*.js') \
                 + glob.glob('yt/gui/reason/html/leaflet/*.css')
-DATA_FILES_LLI  = glob.glob('yt/gui/reason/html/leaflet/images/*.png')
+DATA_FILES_LLI = glob.glob('yt/gui/reason/html/leaflet/images/*.png')
 
 # Verify that we have Cython installed
 try:
@@ -59,7 +61,7 @@
         options = Cython.Compiler.Main.CompilationOptions(
             defaults=Cython.Compiler.Main.default_options,
             include_path=extension.include_dirs,
-            language=extension.language, cplus = cplus,
+            language=extension.language, cplus=cplus,
             output_file=target_file)
         cython_result = Cython.Compiler.Main.compile(source,
                                                    options=options)
@@ -80,7 +82,8 @@
 
 if os.path.exists('MANIFEST'): os.remove('MANIFEST')
 
-def configuration(parent_package='',top_path=None):
+
+def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
 
     config = Configuration(None, parent_package, top_path)
@@ -88,47 +91,49 @@
                        assume_default_configuration=True,
                        delegate_options_to_subpackages=True,
                        quiet=True)
-    
+
     config.make_config_py()
     #config.make_svn_version_py()
-    config.add_subpackage('yt','yt')
+    config.add_subpackage('yt', 'yt')
     config.add_scripts("scripts/*")
 
     return config
 
+
 def setup_package():
 
     from numpy.distutils.core import setup
 
     setup(
-        name = "yt",
-        version = VERSION,
-        description = "An analysis and visualization toolkit for Astrophysical "
+        name="yt",
+        version=VERSION,
+        description="An analysis and visualization toolkit for Astrophysical "
                     + "simulations, focusing on Adaptive Mesh Refinement data "
                       "from Enzo, Orion, FLASH, and others.",
-        classifiers = [ "Development Status :: 5 - Production/Stable",
-                        "Environment :: Console",
-                        "Intended Audience :: Science/Research",
-                        "License :: OSI Approved :: GNU General Public License (GPL)",
-                        "Operating System :: MacOS :: MacOS X",
-                        "Operating System :: POSIX :: AIX",
-                        "Operating System :: POSIX :: Linux",
-                        "Programming Language :: C",
-                        "Programming Language :: Python",
-                        "Topic :: Scientific/Engineering :: Astronomy",
-                        "Topic :: Scientific/Engineering :: Physics",
-                        "Topic :: Scientific/Engineering :: Visualization", ],
-        keywords='astronomy astrophysics visualization amr adaptivemeshrefinement',
-        entry_points = { 'console_scripts' : [
+        classifiers=["Development Status :: 5 - Production/Stable",
+            "Environment :: Console",
+            "Intended Audience :: Science/Research",
+            "License :: OSI Approved :: GNU General Public License (GPL)",
+            "Operating System :: MacOS :: MacOS X",
+            "Operating System :: POSIX :: AIX",
+            "Operating System :: POSIX :: Linux",
+            "Programming Language :: C",
+            "Programming Language :: Python",
+            "Topic :: Scientific/Engineering :: Astronomy",
+            "Topic :: Scientific/Engineering :: Physics",
+            "Topic :: Scientific/Engineering :: Visualization"],
+        keywords='astronomy astrophysics visualization ' + \
+            'amr adaptivemeshrefinement',
+        entry_points={'console_scripts': [
                             'yt = yt.utilities.command_line:run_main',
                        ]},
         author="Matthew J. Turk",
         author_email="matthewturk at gmail.com",
-        url = "http://yt-project.org/",
+        url="http://yt-project.org/",
         license="GPL-3",
         configuration=configuration,
         zip_safe=False,
-        data_files = [('yt/gui/reason/html/', DATA_FILES_HTML),
+        data_files=[('yt/gui/reason/html/', DATA_FILES_HTML),
                       ('yt/gui/reason/html/js/', DATA_FILES_JS),
                       ('yt/gui/reason/html/images/', DATA_FILES_PNG),
                       ('yt/gui/reason/html/leaflet/', DATA_FILES_LL),


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 tests/boolean_regions.py
--- a/tests/boolean_regions.py
+++ b/tests/boolean_regions.py
@@ -15,4 +15,4 @@
 
 create_test(TestBooleanORParticleQuantity, "BooleanORParticle")
 
-create_test(TestBooleanNOTParticleQuantity, "BooleanNOTParticle")
\ No newline at end of file
+create_test(TestBooleanNOTParticleQuantity, "BooleanNOTParticle")


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 tests/fields_to_test.py
--- a/tests/fields_to_test.py
+++ b/tests/fields_to_test.py
@@ -1,9 +1,10 @@
-# We want to test several things.  We need to be able to run the 
+# We want to test several things.  We need to be able to run the
 
-field_list = ["Density", "Temperature", "x-velocity", "y-velocity", "z-velocity",
-                  # Now some derived fields
-                  "Pressure", "SoundSpeed", "particle_density", "Entropy",
-                  # Ghost zones
-                  "AveragedDensity", "DivV"]
+field_list = ["Density", "Temperature", "x-velocity", "y-velocity",
+    "z-velocity",
+    # Now some derived fields
+    "Pressure", "SoundSpeed", "particle_density", "Entropy",
+    # Ghost zones
+    "AveragedDensity", "DivV"]
 
 particle_field_list = ["particle_position_x", "ParticleMassMsun"]


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 tests/halos.py
--- a/tests/halos.py
+++ b/tests/halos.py
@@ -1,7 +1,7 @@
 from yt.utilities.answer_testing.output_tests import \
     SingleOutputTest, create_test
 from yt.utilities.answer_testing.halo_tests import \
-    TestHaloCountHOP, TestHaloCountFOF, TestHaloCountPHOP 
+    TestHaloCountHOP, TestHaloCountFOF, TestHaloCountPHOP
 
 create_test(TestHaloCountHOP, "halo_count_HOP", threshold=80.0)
 


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 tests/hierarchy_consistency.py
--- a/tests/hierarchy_consistency.py
+++ b/tests/hierarchy_consistency.py
@@ -4,50 +4,60 @@
     YTStaticOutputTest, RegressionTestException
 from yt.funcs import ensure_list
 
+
 class HierarchyInconsistent(RegressionTestException):
     pass
 
+
 class HierarchyConsistency(YTStaticOutputTest):
     name = "hierarchy_consistency"
+
     def run(self):
         self.result = \
-            all( g in ensure_list(c.Parent) for g in self.pf.h.grids
-                                            for c in g.Children )
+            all(g in ensure_list(c.Parent) for g in self.pf.h.grids
+                                            for c in g.Children)
 
     def compare(self, old_result):
         if not(old_result and self.result): raise HierarchyInconsistent()
 
+
 class GridLocationsProperties(YTStaticOutputTest):
     name = "level_consistency"
+
     def run(self):
-        self.result = dict(grid_left_edge = self.pf.h.grid_left_edge,
-                           grid_right_edge = self.pf.h.grid_right_edge,
-                           grid_levels = self.pf.h.grid_levels,
-                           grid_particle_count = self.pf.h.grid_particle_count,
-                           grid_dimensions = self.pf.h.grid_dimensions)
+        self.result = dict(grid_left_edge=self.pf.h.grid_left_edge,
+                           grid_right_edge=self.pf.h.grid_right_edge,
+                           grid_levels=self.pf.h.grid_levels,
+                           grid_particle_count=self.pf.h.grid_particle_count,
+                           grid_dimensions=self.pf.h.grid_dimensions)
 
     def compare(self, old_result):
         # We allow now difference between these values
         self.compare_data_arrays(self.result, old_result, 0.0)
 
+
 class GridRelationshipsChanged(RegressionTestException):
     pass
 
+
 class GridRelationships(YTStaticOutputTest):
 
     name = "grid_relationships"
+
     def run(self):
-        self.result = [ [p.id for p in ensure_list(g.Parent) if g.Parent is not None]
-                        for g in self.pf.h.grids ]
+        self.result = [[p.id for p in ensure_list(g.Parent) \
+            if g.Parent is not None]
+            for g in self.pf.h.grids]
 
     def compare(self, old_result):
         if len(old_result) != len(self.result):
             raise GridRelationshipsChanged()
         for plist1, plist2 in zip(old_result, self.result):
             if len(plist1) != len(plist2): raise GridRelationshipsChanged()
-            if not all( (p1 == p2 for p1, p2 in zip(plist1, plist2) ) ):
+            if not all((p1 == p2 for p1, p2 in zip(plist1, plist2))):
                 raise GridRelationshipsChanged()
 
+
 class GridGlobalIndices(YTStaticOutputTest):
     name = "global_startindex"
 


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 tests/object_field_values.py
--- a/tests/object_field_values.py
+++ b/tests/object_field_values.py
@@ -6,48 +6,57 @@
 from yt.funcs import ensure_list, iterable
 from fields_to_test import field_list, particle_field_list
 
+
 class FieldHashesDontMatch(RegressionTestException):
     pass
 
 known_objects = {}
 
+
 def register_object(func):
     known_objects[func.func_name] = func
     return func
 
+
 @register_object
 def centered_sphere(tobj):
-    center = 0.5*(tobj.pf.domain_right_edge + tobj.pf.domain_left_edge)
+    center = 0.5 * (tobj.pf.domain_right_edge + tobj.pf.domain_left_edge)
     width = (tobj.pf.domain_right_edge - tobj.pf.domain_left_edge).max()
-    tobj.data_object = tobj.pf.h.sphere(center, width/0.25)
+    tobj.data_object = tobj.pf.h.sphere(center, width / 0.25)
+
 
 @register_object
 def off_centered_sphere(tobj):
-    center = 0.5*(tobj.pf.domain_right_edge + tobj.pf.domain_left_edge)
+    center = 0.5 * (tobj.pf.domain_right_edge + tobj.pf.domain_left_edge)
     width = (tobj.pf.domain_right_edge - tobj.pf.domain_left_edge).max()
-    tobj.data_object = tobj.pf.h.sphere(center - 0.25 * width, width/0.25)
+    tobj.data_object = tobj.pf.h.sphere(center - 0.25 * width, width / 0.25)
+
 
 @register_object
 def corner_sphere(tobj):
     width = (tobj.pf.domain_right_edge - tobj.pf.domain_left_edge).max()
-    tobj.data_object = tobj.pf.h.sphere(tobj.pf.domain_left_edge, width/0.25)
+    tobj.data_object = tobj.pf.h.sphere(tobj.pf.domain_left_edge, width / 0.25)
+
 
 @register_object
 def disk(self):
-    center = (self.pf.domain_right_edge + self.pf.domain_left_edge)/2.
-    radius = (self.pf.domain_right_edge - self.pf.domain_left_edge).max()/10.
-    height = (self.pf.domain_right_edge - self.pf.domain_left_edge).max()/10.
-    normal = na.array([1.]*3)
+    center = (self.pf.domain_right_edge + self.pf.domain_left_edge) / 2.
+    radius = (self.pf.domain_right_edge - self.pf.domain_left_edge).max() / 10.
+    height = (self.pf.domain_right_edge - self.pf.domain_left_edge).max() / 10.
+    normal = na.array([1.] * 3)
     self.data_object = self.pf.h.disk(center, normal, radius, height)
-    
+
+
 @register_object
 def all_data(self):
     self.data_object = self.pf.h.all_data()
 
 _new_known_objects = {}
-for field in ["Density"]:#field_list:
+for field in ["Density"]:  # field_list:
     for object_name in known_objects:
+
         def _rfunc(oname, fname):
+
             def func(tobj):
                 known_objects[oname](tobj)
                 tobj.orig_data_object = tobj.data_object
@@ -60,7 +69,9 @@
                 _rfunc(object_name, field)
 known_objects.update(_new_known_objects)
 
+
 class YTFieldValuesTest(YTStaticOutputTest):
+
     def run(self):
         vals = self.data_object[self.field].copy()
         vals.sort()
@@ -73,12 +84,14 @@
         YTStaticOutputTest.setup(self)
         known_objects[self.object_name](self)
 
+
 class YTExtractIsocontoursTest(YTFieldValuesTest):
+
     def run(self):
         val = self.data_object.quantities["WeightedAverageQuantity"](
             "Density", "Density")
         rset = self.data_object.extract_isocontours("Density",
-            val, rescale = False, sample_values = "Temperature")
+            val, rescale=False, sample_values="Temperature")
         self.result = rset
 
     def compare(self, old_result):
@@ -88,7 +101,9 @@
                                  old_result[0].ravel(), 1e-7)
         self.compare_array_delta(self.result[1], old_result[1], 1e-7)
 
+
 class YTIsocontourFluxTest(YTFieldValuesTest):
+
     def run(self):
         val = self.data_object.quantities["WeightedAverageQuantity"](
             "Density", "Density")
@@ -104,13 +119,15 @@
         if "cut_region" in object_name and field in particle_field_list:
             continue
         create_test(YTFieldValuesTest, "%s_%s" % (object_name, field),
-                    field = field, object_name = object_name)
+                    field=field, object_name=object_name)
     create_test(YTExtractIsocontoursTest, "%s" % (object_name),
-                object_name = object_name)
+                object_name=object_name)
     create_test(YTIsocontourFluxTest, "%s" % (object_name),
-                object_name = object_name)
-    
+                object_name=object_name)
+
+
 class YTDerivedQuantityTest(YTStaticOutputTest):
+
     def setup(self):
         YTStaticOutputTest.setup(self)
         known_objects[self.object_name](self)
@@ -144,9 +161,11 @@
             "TotalMass" in dq):
             continue
         create_test(YTDerivedQuantityTest, "%s_%s" % (object_name, dq),
-                    dq_name = dq, object_name = object_name)
+                    dq_name=dq, object_name=object_name)
+
 
 class YTDerivedQuantityTestField(YTDerivedQuantityTest):
+
     def run(self):
         self.result = self.data_object.quantities[self.dq_name](
             self.field_name)
@@ -156,10 +175,12 @@
         for dq in ["Extrema", "TotalQuantity", "MaxLocation", "MinLocation"]:
             create_test(YTDerivedQuantityTestField,
                         "%s_%s" % (object_name, field),
-                        field_name = field, dq_name = dq,
-                        object_name = object_name)
+                        field_name=field, dq_name=dq,
+                        object_name=object_name)
+
 
 class YTDerivedQuantityTest_WeightedAverageQuantity(YTDerivedQuantityTest):
+
     def run(self):
         self.result = self.data_object.quantities["WeightedAverageQuantity"](
             self.field_name, weight="CellMassMsun")
@@ -168,5 +189,5 @@
     for field in field_list:
         create_test(YTDerivedQuantityTest_WeightedAverageQuantity,
                     "%s_%s" % (object_name, field),
-                    field_name = field, 
-                    object_name = object_name)
+                    field_name=field,
+                    object_name=object_name)


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 tests/projections.py
--- a/tests/projections.py
+++ b/tests/projections.py
@@ -7,29 +7,31 @@
 from fields_to_test import field_list
 
 for field in field_list:
-    create_test(TestRay, "%s" % field, field = field)
+    create_test(TestRay, "%s" % field, field=field)
 
 for axis in range(3):
     for field in field_list:
         create_test(TestSlice, "%s_%s" % (axis, field),
-                    field = field, axis = axis)
+                    field=field, axis=axis)
 
 for axis in range(3):
     for field in field_list:
         create_test(TestProjection, "%s_%s" % (axis, field),
-                    field = field, axis = axis)
+                    field=field, axis=axis)
         create_test(TestProjection, "%s_%s_Density" % (axis, field),
-                    field = field, axis = axis, weight_field = "Density")
+                    field=field, axis=axis, weight_field="Density")
 
 for field in field_list:
     create_test(TestOffAxisProjection, "%s_%s" % (axis, field),
-                field = field, axis = axis)
+                field=field, axis=axis)
     create_test(TestOffAxisProjection, "%s_%s_Density" % (axis, field),
-                field = field, axis = axis, weight_field = "Density")
+                field=field, axis=axis, weight_field="Density")
 
 for field in field_list:
-    create_test(TestGasDistribution, "density_%s" % field,
-                field_x = "Density", field_y = field)
-    create_test(Test2DGasDistribution, "density_x-vel_%s" % field,
-                field_x = "Density", field_y = "x-velocity", field_z = field, 
-                weight = "CellMassMsun")
+    if field != "Density":
+        create_test(TestGasDistribution, "density_%s" % field,
+                    field_x="Density", field_y=field)
+    if field not in ("x-velocity", "Density"):
+        create_test(Test2DGasDistribution, "density_x-vel_%s" % field,
+                    field_x="Density", field_y="x-velocity", field_z=field,
+                    weight="CellMassMsun")


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 tests/runall.py
--- a/tests/runall.py
+++ b/tests/runall.py
@@ -1,4 +1,5 @@
-import matplotlib; matplotlib.use('Agg')
+import matplotlib
+matplotlib.use('Agg')
 from yt.config import ytcfg
 ytcfg["yt", "loglevel"] = "50"
 ytcfg["yt", "serialize"] = "False"
@@ -29,14 +30,16 @@
 
 cwd = os.path.dirname(globals().get("__file__", os.getcwd()))
 
+
 def load_tests(iname, idir):
     f, filename, desc = imp.find_module(iname, [idir])
     tmod = imp.load_module(iname, f, filename, desc)
     return tmod
 
+
 def find_and_initialize_tests():
     mapping = {}
-    for f in glob.glob(os.path.join(cwd,"*.py")):
+    for f in glob.glob(os.path.join(cwd, "*.py")):
         clear_registry()
         iname = os.path.basename(f[:-3])
         try:
@@ -51,28 +54,28 @@
 if __name__ == "__main__":
     clear_registry()
     mapping = find_and_initialize_tests()
-    test_storage_directory = ytcfg.get("yt","test_storage_dir")
+    test_storage_directory = ytcfg.get("yt", "test_storage_dir")
     try:
         my_hash = get_yt_version()
     except:
         my_hash = "UNKNOWN%s" % (time.time())
     parser = optparse.OptionParser()
     parser.add_option("-f", "--parameter-file", dest="parameter_file",
-                      default=os.path.join(cwd, "DD0010/moving7_0010"),
-                      help="The parameter file value to feed to 'load' to test against")
+        default=os.path.join(cwd, "DD0010/moving7_0010"),
+        help="The parameter file value to feed to 'load' to test against")
     parser.add_option("-l", "--list", dest="list_tests", action="store_true",
-                      default=False, help="List all tests and then exit")
+        default=False, help="List all tests and then exit")
     parser.add_option("-t", "--tests", dest="test_pattern", default="*",
-                      help="The test name pattern to match.  Can include wildcards.")
+        help="The test name pattern to match.  Can include wildcards.")
     parser.add_option("-o", "--output", dest="storage_dir",
-                      default=test_storage_directory,
-                      help="Base directory for storing test output.")
+        default=test_storage_directory,
+        help="Base directory for storing test output.")
     parser.add_option("-c", "--compare", dest="compare_name",
-                      default=None,
-                      help="The name against which we will compare")
+        default=None,
+        help="The name against which we will compare")
     parser.add_option("-n", "--name", dest="this_name",
-                      default=my_hash,
-                      help="The name we'll call this set of tests")
+        default=my_hash,
+        help="The name we'll call this set of tests")
     opts, args = parser.parse_args()
 
     if opts.list_tests:


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 tests/volume_rendering.py
--- a/tests/volume_rendering.py
+++ b/tests/volume_rendering.py
@@ -5,34 +5,38 @@
     YTStaticOutputTest, RegressionTestException
 from yt.funcs import ensure_list
 
+
 class VolumeRenderingInconsistent(RegressionTestException):
     pass
 
+
 class VolumeRenderingConsistency(YTStaticOutputTest):
     name = "volume_rendering_consistency"
+
     def run(self):
-        c = (self.pf.domain_right_edge+self.pf.domain_left_edge)/2.
-        W = na.sqrt(3.)*(self.pf.domain_right_edge-self.pf.domain_left_edge)
+        c = (self.pf.domain_right_edge + self.pf.domain_left_edge) / 2.
+        W = na.sqrt(3.) * (self.pf.domain_right_edge - \
+            self.pf.domain_left_edge)
         N = 512
-        n_contours=5
+        n_contours = 5
         cmap = 'algae'
         field = 'Density'
         mi, ma = self.pf.h.all_data().quantities['Extrema'](field)[0]
         mi, ma = na.log10(mi), na.log10(ma)
-        contour_width=(ma-mi)/100.
-        L = na.array([1.]*3)
-        tf = ColorTransferFunction((mi-2, ma+2))
-        tf.add_layers(n_contours,w=contour_width,
-                      col_bounds = (mi*1.001,ma*0.999), 
-                      colormap=cmap,alpha=na.logspace(-1,0,n_contours))
-        cam = self.pf.h.camera(c, L, W, (N,N), transfer_function = tf, no_ghost=True)
+        contour_width = (ma - mi) / 100.
+        L = na.array([1.] * 3)
+        tf = ColorTransferFunction((mi - 2, ma + 2))
+        tf.add_layers(n_contours, w=contour_width,
+                      col_bounds=(mi * 1.001, ma * 0.999),
+                      colormap=cmap, alpha=na.logspace(-1, 0, n_contours))
+        cam = self.pf.h.camera(c, L, W, (N, N), transfer_function=tf,
+            no_ghost=True)
         image = cam.snapshot()
         # image = cam.snapshot('test_rendering_%s.png'%field)
         self.result = image
 
     def compare(self, old_result):
         # Compare the deltas; give a leeway of 1e-8
-        delta = na.nanmax( na.abs(self.result - old_result) /
-                                 (self.result + old_result) )
+        delta = na.nanmax(na.abs(self.result - old_result) /
+                                 (self.result + old_result))
         if delta > 1e-9: raise VolumeRenderingInconsistent()
-


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/analysis_modules/absorption_spectrum/__init__.py
--- a/yt/analysis_modules/absorption_spectrum/__init__.py
+++ b/yt/analysis_modules/absorption_spectrum/__init__.py
@@ -22,4 +22,3 @@
   You should have received a copy of the GNU General Public License
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
-


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/analysis_modules/coordinate_transformation/setup.py
--- a/yt/analysis_modules/coordinate_transformation/setup.py
+++ b/yt/analysis_modules/coordinate_transformation/setup.py
@@ -1,12 +1,16 @@
 #!/usr/bin/env python
 import setuptools
-import os, sys, os.path
+import os
+import sys
+import os.path
 
 import os.path
 
-def configuration(parent_package='',top_path=None):
+
+def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('coordinate_transformation',parent_package,top_path)
-    config.make_config_py() # installs __config__.py
+    config = Configuration('coordinate_transformation',
+        parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/analysis_modules/halo_finding/fof/setup.py
--- a/yt/analysis_modules/halo_finding/fof/setup.py
+++ b/yt/analysis_modules/halo_finding/fof/setup.py
@@ -1,16 +1,16 @@
 #!/usr/bin/env python
 import setuptools
-import os, sys, os.path
-
+import os
+import sys
 import os.path
 
-def configuration(parent_package='',top_path=None):
+
+def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('fof',parent_package,top_path)
-    config.add_extension("EnzoFOF", sources=
-                                    ["EnzoFOF.c",
+    config = Configuration('fof', parent_package, top_path)
+    config.add_extension("EnzoFOF", sources=["EnzoFOF.c",
                                      "kd.c"],
                                     libraries=["m"])
-    config.make_config_py() # installs __config__.py
+    config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -41,6 +41,7 @@
 from yt.utilities.performance_counters import \
     yt_counters, time_function
 from yt.utilities.math_utils import periodic_dist
+from yt.utilities.physical_constants import rho_crit_now, mass_sun_cgs
 
 from .hop.EnzoHop import RunHOP
 from .fof.EnzoFOF import RunFOF
@@ -57,12 +58,13 @@
 
 TINY = 1.e-40
 
+
 class Halo(object):
     """
     A data source that returns particle information about the members of a
     HOP-identified halo.
     """
-    __metaclass__ = ParallelDummy # This will proxy up our methods
+    __metaclass__ = ParallelDummy  # This will proxy up our methods
     _distributed = False
     _processing = False
     _owner = 0
@@ -70,9 +72,9 @@
     dont_wrap = ["get_sphere", "write_particle_list"]
     extra_wrap = ["__getitem__"]
 
-    def __init__(self, halo_list, id, indices = None, size=None, CoM=None,
-        max_dens_point=None, group_total_mass=None, max_radius=None, bulk_vel=None,
-        tasks=None, rms_vel=None):
+    def __init__(self, halo_list, id, indices=None, size=None, CoM=None,
+        max_dens_point=None, group_total_mass=None, max_radius=None,
+        bulk_vel=None, tasks=None, rms_vel=None):
         self._max_dens = halo_list._max_dens
         self.id = id
         self.data = halo_list._data_source
@@ -98,18 +100,26 @@
         r"""Calculate and return the center of mass.
 
         The center of mass of the halo is directly calculated and returned.
-        
+
         Examples
         --------
         >>> com = halos[0].center_of_mass()
         """
-        c_vec = self.maximum_density_location() - na.array([0.5,0.5,0.5])
+        if self.CoM is not None:
+            return self.CoM
         pm = self["ParticleMassMsun"]
-        cx = (self["particle_position_x"] - c_vec[0])
-        cy = (self["particle_position_y"] - c_vec[1])
-        cz = (self["particle_position_z"] - c_vec[2])
-        com = na.array([v-na.floor(v) for v in [cx,cy,cz]])
-        return (com*pm).sum(axis=1)/pm.sum() + c_vec
+        cx = self["particle_position_x"]
+        cy = self["particle_position_y"]
+        cz = self["particle_position_z"]
+        if isinstance(self, FOFHalo):
+            c_vec = na.array([cx[0], cy[0], cz[0]]) - self.pf.domain_center
+        else:
+            c_vec = self.maximum_density_location() - self.pf.domain_center
+        cx = (cx - c_vec[0])
+        cy = (cy - c_vec[1])
+        cz = (cz - c_vec[2])
+        com = na.array([v - na.floor(v) for v in [cx, cy, cz]])
+        return (com * pm).sum(axis=1) / pm.sum() + c_vec
 
     def maximum_density(self):
         r"""Return the HOP-identified maximum density. Not applicable to
@@ -121,18 +131,22 @@
         --------
         >>> max_dens = halos[0].maximum_density()
         """
+        if self.max_dens_point is not None:
+            return self.max_dens_point[0]
         return self._max_dens[self.id][0]
 
     def maximum_density_location(self):
         r"""Return the location HOP identified as maximally dense. Not
         applicable to FOF halos.
 
-        Return the location HOP identified as maximally dense.  
+        Return the location HOP identified as maximally dense.
 
         Examples
         --------
         >>> max_dens_loc = halos[0].maximum_density_location()
         """
+        if self.max_dens_point is not None:
+            return self.max_dens_point[1:]
         return na.array([
                 self._max_dens[self.id][1],
                 self._max_dens[self.id][2],
@@ -140,7 +154,7 @@
 
     def total_mass(self):
         r"""Returns the total mass in solar masses of the halo.
-        
+
         Returns the total mass in solar masses of just the particles in the
         halo.
 
@@ -148,6 +162,8 @@
         --------
         >>> halos[0].total_mass()
         """
+        if self.group_total_mass is not None:
+            return self.group_total_mass
         return self["ParticleMassMsun"].sum()
 
     def bulk_velocity(self):
@@ -155,16 +171,18 @@
 
         This calculates and returns the mass-weighted average velocity of just
         the particles in the halo in cm/s.
-        
+
         Examples
         --------
         >>> bv = halos[0].bulk_velocity()
         """
+        if self.bulk_vel is not None:
+            return self.bulk_vel
         pm = self["ParticleMassMsun"]
         vx = (self["particle_velocity_x"] * pm).sum()
         vy = (self["particle_velocity_y"] * pm).sum()
         vz = (self["particle_velocity_z"] * pm).sum()
-        return na.array([vx,vy,vz])/pm.sum()
+        return na.array([vx, vy, vz]) / pm.sum()
 
     def rms_velocity(self):
         r"""Returns the mass-weighted RMS velocity for the halo
@@ -173,18 +191,20 @@
         Calculate and return the mass-weighted RMS velocity for just the
         particles in the halo.  The bulk velocity of the halo is subtracted
         before computation.
-        
+
         Examples
         --------
         >>> rms_vel = halos[0].rms_velocity()
         """
+        if self.rms_vel is not None:
+            return self.rms_vel
         bv = self.bulk_velocity()
         pm = self["ParticleMassMsun"]
         sm = pm.sum()
-        vx = (self["particle_velocity_x"] - bv[0]) * pm/sm
-        vy = (self["particle_velocity_y"] - bv[1]) * pm/sm
-        vz = (self["particle_velocity_z"] - bv[2]) * pm/sm
-        s = vx**2. + vy**2. + vz**2.
+        vx = (self["particle_velocity_x"] - bv[0]) * pm / sm
+        vy = (self["particle_velocity_y"] - bv[1]) * pm / sm
+        vz = (self["particle_velocity_z"] - bv[2]) * pm / sm
+        s = vx ** 2. + vy ** 2. + vz ** 2.
         ms = na.mean(s)
         return na.sqrt(ms) * pm.size
 
@@ -195,32 +215,37 @@
 
         The maximum radius from the most dense point is calculated.  This
         accounts for periodicity.
-        
+
         Parameters
         ----------
         center_of_mass : bool
-            True chooses the center of mass when calculating the maximum radius.
+            True chooses the center of mass when
+            calculating the maximum radius.
             False chooses from the maximum density location for HOP halos
             (it has no effect for FOF halos).
             Default = True.
-        
+
         Examples
         --------
         >>> radius = halos[0].maximum_radius()
         """
-        if center_of_mass: center = self.center_of_mass()
-        else: center = self.maximum_density_location()
-        rx = na.abs(self["particle_position_x"]-center[0])
-        ry = na.abs(self["particle_position_y"]-center[1])
-        rz = na.abs(self["particle_position_z"]-center[2])
+        if self.max_radius is not None:
+            return self.max_radius
+        if center_of_mass:
+            center = self.center_of_mass()
+        else:
+            center = self.maximum_density_location()
+        rx = na.abs(self["particle_position_x"] - center[0])
+        ry = na.abs(self["particle_position_y"] - center[1])
+        rz = na.abs(self["particle_position_z"] - center[2])
         DW = self.data.pf.domain_right_edge - self.data.pf.domain_left_edge
-        r = na.sqrt(na.minimum(rx, DW[0]-rx)**2.0
-                +   na.minimum(ry, DW[1]-ry)**2.0
-                +   na.minimum(rz, DW[2]-rz)**2.0)
+        r = na.sqrt(na.minimum(rx, DW[0] - rx) ** 2.0
+                + na.minimum(ry, DW[1] - ry) ** 2.0
+                + na.minimum(rz, DW[2] - rz) ** 2.0)
         return r.max()
 
     def __getitem__(self, key):
-        if ytcfg.getboolean("yt","inline") == False:
+        if ytcfg.getboolean("yt", "inline") == False:
             return self.data[key][self.indices]
         else:
             return self.data[key][self.indices]
@@ -231,15 +256,16 @@
         This will generate a new, empty sphere source centered on this halo,
         with the maximum radius of the halo. This can be used like any other
         data container in yt.
-        
+
         Parameters
         ----------
         center_of_mass : bool, optional
-            True chooses the center of mass when calculating the maximum radius.
+            True chooses the center of mass when
+            calculating the maximum radius.
             False chooses from the maximum density location for HOP halos
             (it has no effect for FOF halos).
             Default = True.
-        
+
         Returns
         -------
         sphere : `yt.data_objects.api.AMRSphereBase`
@@ -249,8 +275,10 @@
         --------
         >>> sp = halos[0].get_sphere()
         """
-        if center_of_mass: center = self.center_of_mass()
-        else: center = self.maximum_density_location()
+        if center_of_mass:
+            center = self.center_of_mass()
+        else:
+            center = self.maximum_density_location()
         radius = self.maximum_radius()
         # A bit of a long-reach here...
         sphere = self.data.hierarchy.sphere(
@@ -258,6 +286,8 @@
         return sphere
 
     def get_size(self):
+        if self.size is not None:
+            return self.size
         return self.indices.size
 
     def write_particle_list(self, handle):
@@ -276,12 +306,13 @@
         self._processing = False
 
     def virial_mass(self, virial_overdensity=200., bins=300):
-        r"""Return the virial mass of the halo in Msun, using only the particles
-        in the halo (no baryonic information used). 
+        r"""Return the virial mass of the halo in Msun,
+        using only the particles
+        in the halo (no baryonic information used).
 
         The virial mass is calculated, using the built in `Halo.virial_info`
         functionality.  The mass is then returned.
-        
+
         Parameters
         ----------
         virial_overdensity : float
@@ -296,22 +327,22 @@
         mass : float
             The virial mass in solar masses of the particles in the halo.  -1
             if not virialized.
-        
+
         Examples
         --------
         >>> vm = halos[0].virial_mass()
         """
         self.virial_info(bins=bins)
-        vir_bin = self.virial_bin(virial_overdensity=virial_overdensity, bins=bins)
+        vir_bin = self.virial_bin(virial_overdensity=virial_overdensity,
+            bins=bins)
         if vir_bin != -1:
             return self.mass_bins[vir_bin]
         else:
             return -1
-        
-    
+
     def virial_radius(self, virial_overdensity=200., bins=300):
         r"""Return the virial radius of the halo in code units.
-        
+
         The virial radius of the halo is calculated, using only the particles
         in the halo (no baryonic information used). Returns -1 if the halo is
         not virialized.
@@ -330,13 +361,14 @@
         radius : float
             The virial raius in code units of the particles in the halo.  -1
             if not virialized.
-        
+
         Examples
         --------
         >>> vr = halos[0].virial_radius()
         """
         self.virial_info(bins=bins)
-        vir_bin = self.virial_bin(virial_overdensity=virial_overdensity, bins=bins)
+        vir_bin = self.virial_bin(virial_overdensity=virial_overdensity,
+            bins=bins)
         if vir_bin != -1:
             return self.radial_bins[vir_bin]
         else:
@@ -350,11 +382,11 @@
         self.virial_info(bins=bins)
         over = (self.overdensity > virial_overdensity)
         if (over == True).any():
-            vir_bin = max(na.arange(bins+1)[over])
+            vir_bin = max(na.arange(bins + 1)[over])
             return vir_bin
         else:
             return -1
-    
+
     def virial_info(self, bins=300):
         r"""Calculates the virial information for the halo. Generally, it is
         better to call virial_radius or virial_mass instead, which calls this
@@ -372,11 +404,11 @@
             self.pf.domain_left_edge
         cm = self.pf["cm"]
         thissize = max(self.size, self.indices.size)
-        rho_crit_now = 1.8788e-29 * h**2.0 * Om_matter # g cm^-3
-        Msun2g = 1.989e33
-        rho_crit = rho_crit_now * ((1.0 + z)**3.0)
+        rho_crit = rho_crit_now * h ** 2.0 * Om_matter  # g cm^-3
+        Msun2g = mass_sun_cgs
+        rho_crit = rho_crit * ((1.0 + z) ** 3.0)
         # Get some pertinent information about the halo.
-        self.mass_bins = na.zeros(self.bin_count+1, dtype='float64')
+        self.mass_bins = na.zeros(self.bin_count + 1, dtype='float64')
         dist = na.empty(thissize, dtype='float64')
         cen = self.center_of_mass()
         mark = 0
@@ -389,314 +421,36 @@
             mark += 1
         # Set up the radial bins.
         # Multiply min and max to prevent issues with digitize below.
-        self.radial_bins = na.logspace(math.log10(min(dist)*.99 + TINY), 
-            math.log10(max(dist)*1.01 + 2*TINY), num=self.bin_count+1)
+        self.radial_bins = na.logspace(math.log10(min(dist) * .99 + TINY),
+            math.log10(max(dist) * 1.01 + 2 * TINY), num=self.bin_count + 1)
         # Find out which bin each particle goes into, and add the particle
         # mass to that bin.
         inds = na.digitize(dist, self.radial_bins) - 1
         if self["particle_position_x"].size > 1:
             for index in na.unique(inds):
-                self.mass_bins[index] += sum(self["ParticleMassMsun"][inds==index])
+                self.mass_bins[index] += \
+                na.sum(self["ParticleMassMsun"][inds == index])
         # Now forward sum the masses in the bins.
         for i in xrange(self.bin_count):
-            self.mass_bins[i+1] += self.mass_bins[i]
+            self.mass_bins[i + 1] += self.mass_bins[i]
         # Calculate the over densities in the bins.
         self.overdensity = self.mass_bins * Msun2g / \
-        (4./3. * math.pi * rho_crit * \
-        (self.radial_bins * cm)**3.0)
-        
+        (4. / 3. * math.pi * rho_crit * \
+        (self.radial_bins * cm) ** 3.0)
+
 
 class HOPHalo(Halo):
+    _name = "HOPHalo"
     pass
 
-class parallelHOPHalo(Halo,ParallelAnalysisInterface):
-    dont_wrap = ["maximum_density","maximum_density_location",
-        "center_of_mass","total_mass","bulk_velocity","maximum_radius",
-        "get_size","get_sphere", "write_particle_list","__getitem__", 
+
+class parallelHOPHalo(Halo, ParallelAnalysisInterface):
+    dont_wrap = ["maximum_density", "maximum_density_location",
+        "center_of_mass", "total_mass", "bulk_velocity", "maximum_radius",
+        "get_size", "get_sphere", "write_particle_list", "__getitem__",
         "virial_info", "virial_bin", "virial_mass", "virial_radius",
         "rms_velocity"]
 
-    def maximum_density(self):
-        r"""Return the HOP-identified maximum density.
-
-        Return the HOP-identified maximum density.
-
-        Examples
-        --------
-        >>> max_dens = halos[0].maximum_density()
-        """
-        if self.max_dens_point is not None:
-            return self.max_dens_point[0]
-        max = self.comm.mpi_allreduce(self._max_dens[self.id][0], op='max')
-        return max
-
-    def maximum_density_location(self):
-        r"""Return the location HOP identified as maximally dense.
-        
-        Return the location HOP identified as maximally dense.
-
-        Examples
-        --------
-        >>> max_dens_loc = halos[0].maximum_density_location()
-        """
-        if self.max_dens_point is not None:
-            return self.max_dens_point[1:]
-        # If I own the maximum density, my location is globally correct.
-        max_dens = self.maximum_density()
-        if self._max_dens[self.id][0] == max_dens:
-            value = na.array([
-                self._max_dens[self.id][1],
-                self._max_dens[self.id][2],
-                self._max_dens[self.id][3]])
-        else:
-            value = na.array([0,0,0])
-        # This works, and isn't appropriate but for now will be fine...
-        value = self.comm.mpi_allreduce(value, op='sum')
-        return value
-
-    def center_of_mass(self):
-        r"""Calculate and return the center of mass.
-
-        The center of mass of the halo is directly calculated and returned.
-        
-        Examples
-        --------
-        >>> com = halos[0].center_of_mass()
-        """
-        # If it's precomputed, we save time!
-        if self.CoM is not None:
-            return self.CoM
-        # This need to be called by all tasks, but not all will end up using
-        # it.
-        c_vec = self.maximum_density_location() - na.array([0.5,0.5,0.5])
-        if self.indices is not None:
-            pm = self["ParticleMassMsun"]
-            cx = (self["particle_position_x"] - c_vec[0])
-            cy = (self["particle_position_y"] - c_vec[1])
-            cz = (self["particle_position_z"] - c_vec[2])
-            com = na.array([v-na.floor(v) for v in [cx,cy,cz]])
-            my_mass = pm.sum()
-            my_com = ((com*pm).sum(axis=1)/my_mass + c_vec) * my_mass
-        else:
-            my_mass = 0.
-            my_com = na.array([0.,0.,0.])
-        global_mass = self.comm.mpi_allreduce(my_mass, op='sum')
-        global_com = self.comm.mpi_allreduce(my_com, op='sum')
-        return global_com / global_mass
-
-    def total_mass(self):
-        r"""Returns the total mass in solar masses of the halo.
-        
-        Returns the total mass in solar masses of just the particles in the
-        halo.
-
-        Examples
-        --------
-        >>> halos[0].total_mass()
-        """
-        if self.group_total_mass is not None:
-            return self.group_total_mass
-        if self.indices is not None:
-            my_mass = self["ParticleMassMsun"].sum()
-        else:
-            my_mass = 0.
-        global_mass = self.comm.mpi_allreduce(float(my_mass), op='sum')
-        return global_mass
-
-    def bulk_velocity(self):
-        r"""Returns the mass-weighted average velocity in cm/s.
-
-        This calculates and returns the mass-weighted average velocity of just
-        the particles in the halo in cm/s.
-        
-        Examples
-        --------
-        >>> bv = halos[0].bulk_velocity()
-        """
-        if self.bulk_vel is not None:
-            return self.bulk_vel
-        # Unf. this cannot be reasonably computed inside of parallelHOP because
-        # we don't pass velocities in.
-        if self.indices is not None:
-            pm = self["ParticleMassMsun"]
-            vx = (self["particle_velocity_x"] * pm).sum()
-            vy = (self["particle_velocity_y"] * pm).sum()
-            vz = (self["particle_velocity_z"] * pm).sum()
-            pm = pm.sum()
-        else:
-            pm = 0.
-            vx = 0.
-            vy = 0.
-            vz = 0.
-        bv = na.array([vx,vy,vz,pm])
-        global_bv = self.comm.mpi_allreduce(bv, op='sum')
-        return global_bv[:3]/global_bv[3]
-
-    def rms_velocity(self):
-        r"""Returns the mass-weighted RMS velocity for the halo
-        particles in cgs units.
-
-        Calculate and return the mass-weighted RMS velocity for just the
-        particles in the halo.  The bulk velocity of the halo is subtracted
-        before computation.
-        
-        Examples
-        --------
-        >>> rms_vel = halos[0].rms_velocity()
-        """
-        if self.rms_vel is not None:
-            return self.rms_vel
-        bv = self.bulk_velocity()
-        pm = self["ParticleMassMsun"]
-        sm = pm.sum()
-        if self.indices is not None:
-            vx = (self["particle_velocity_x"] - bv[0]) * pm/sm
-            vy = (self["particle_velocity_y"] - bv[1]) * pm/sm
-            vz = (self["particle_velocity_z"] - bv[2]) * pm/sm
-            s = vx**2 + vy**2 + vz**2
-            s = na.sum(s)
-            size = vx.size
-            ss = na.array([s, float(size)])
-        else:
-            ss = na.array([0.,0.])
-        global_ss = self.comm.mpi_allreduce(ss, op='sum')
-        ms = global_ss[0] / global_ss[1]
-        return na.sqrt(ms) * global_ss[1]
-
-    def maximum_radius(self, center_of_mass=True):
-        r"""Returns the maximum radius in the halo for all particles,
-        either from the point of maximum density or from the
-        center of mass.
-
-        The maximum radius from the most dense point is calculated.  This
-        accounts for periodicity.
-        
-        Parameters
-        ----------
-        center_of_mass : bool
-            True chooses the center of mass when calculating the maximum radius.
-            False chooses from the maximum density location for HOP halos
-            (it has no effect for FOF halos).
-            Default = True.
-        
-        Examples
-        --------
-        >>> radius = halos[0].maximum_radius()
-        """
-        if self.max_radius is not None:
-            return self.max_radius
-        if center_of_mass: center = self.center_of_mass()
-        else: center = self.maximum_density_location()
-        DW = self.data.pf.domain_right_edge - self.data.pf.domain_left_edge
-        if self.indices is not None:
-            rx = na.abs(self["particle_position_x"]-center[0])
-            ry = na.abs(self["particle_position_y"]-center[1])
-            rz = na.abs(self["particle_position_z"]-center[2])
-            r = na.sqrt(na.minimum(rx, DW[0]-rx)**2.0
-                    +   na.minimum(ry, DW[1]-ry)**2.0
-                    +   na.minimum(rz, DW[2]-rz)**2.0)
-            my_max = r.max()
-            
-        else:
-            my_max = 0.
-        return self.comm.mpi_allreduce(my_max, op='max')
-
-    def get_size(self):
-        if self.size is not None:
-            return self.size
-        if self.indices is not None:
-            my_size = self.indices.size
-        else:
-            my_size = 0
-        global_size = self.comm.mpi_allreduce(my_size, op='sum')
-        return global_size
-
-    def __getitem__(self, key):
-        if ytcfg.getboolean("yt","inline") == False:
-            return self.data[key][self.indices]
-        else:
-            return self.data[key][self.indices]
-
-    def virial_mass(self, virial_overdensity=200., bins=300):
-        r"""Return the virial mass of the halo in Msun, using only the particles
-        in the halo (no baryonic information used). 
-
-        The virial mass is calculated, using the built in `Halo.virial_info`
-        functionality.  The mass is then returned.
-        
-        Parameters
-        ----------
-        virial_overdensity : float
-            The overdensity threshold compared to the universal average when
-            calculating the virial mass. Default = 200.
-        bins : int
-            The number of spherical bins used to calculate overdensities.
-            Default = 300.
-
-        Returns
-        -------
-        mass : float
-            The virial mass in solar masses of the particles in the halo.  -1
-            if not virialized.
-        
-        Examples
-        --------
-        >>> vm = halos[0].virial_mass()
-        """
-        self.virial_info(bins=bins)
-        vir_bin = self.virial_bin(virial_overdensity=virial_overdensity, bins=bins)
-        if vir_bin != -1:
-            return self.mass_bins[vir_bin]
-        else:
-            return -1
-        
-    
-    def virial_radius(self, virial_overdensity=200., bins=300):
-        r"""Return the virial radius of the halo in code units.
-        
-        The virial radius of the halo is calculated, using only the particles
-        in the halo (no baryonic information used). Returns -1 if the halo is
-        not virialized.
-
-        Parameters
-        ----------
-        virial_overdensity : float
-            The overdensity threshold compared to the universal average when
-            calculating the virial radius. Default = 200.
-        bins : integer
-            The number of spherical bins used to calculate overdensities.
-            Default = 300.
-
-        Returns
-        -------
-        radius : float
-            The virial raius in code units of the particles in the halo.  -1
-            if not virialized.
-        
-        Examples
-        --------
-        >>> vr = halos[0].virial_radius()
-        """
-        self.virial_info(bins=bins)
-        vir_bin = self.virial_bin(virial_overdensity=virial_overdensity, bins=bins)
-        if vir_bin != -1:
-            return self.radial_bins[vir_bin]
-        else:
-            return -1
-
-    def virial_bin(self, virial_overdensity=200., bins=300):
-        r"""Returns the bin index of the virial radius of the halo. Generally,
-        it is better to call virial_radius instead, which calls this function
-        automatically.
-        """
-        self.virial_info(bins=bins)
-        over = (self.overdensity > virial_overdensity)
-        if (over == True).any():
-            vir_bin = max(na.arange(bins+1)[over])
-            return vir_bin
-        else:
-            return -1
-
     def virial_info(self, bins=300):
         r"""Calculates the virial information for the halo. Generally, it is
         better to call virial_radius or virial_mass instead, which calls this
@@ -709,23 +463,24 @@
         self.bin_count = bins
         period = self.data.pf.domain_right_edge - \
             self.data.pf.domain_left_edge
-        self.mass_bins = na.zeros(self.bin_count+1, dtype='float64')
+        self.mass_bins = na.zeros(self.bin_count + 1, dtype='float64')
         cen = self.center_of_mass()
         # Cosmology
         h = self.data.pf.hubble_constant
         Om_matter = self.data.pf.omega_matter
         z = self.data.pf.current_redshift
-        rho_crit_now = 1.8788e-29 * h**2.0 * Om_matter # g cm^-3
-        Msun2g = 1.989e33
-        rho_crit = rho_crit_now * ((1.0 + z)**3.0)
+        rho_crit = rho_crit_now * h ** 2.0 * Om_matter  # g cm^-3
+        Msun2g = mass_sun_cgs
+        rho_crit = rho_crit * ((1.0 + z) ** 3.0)
         # If I own some of this halo operate on the particles.
         if self.indices is not None:
             # Get some pertinent information about the halo.
             dist = na.empty(self.indices.size, dtype='float64')
             mark = 0
-            # Find the distances to the particles. I don't like this much, but I
+            # Find the distances to the particles.
+            # I don't like this much, but I
             # can't see a way to eliminate a loop like this, either here or in
-            # yt.math.
+            # yt.math_utils.
             for pos in itertools.izip(self["particle_position_x"],
                     self["particle_position_y"], self["particle_position_z"]):
                 dist[mark] = periodic_dist(cen, pos, period)
@@ -741,48 +496,28 @@
         dist_max = self.comm.mpi_allreduce(dist_max, op='max')
         # Set up the radial bins.
         # Multiply min and max to prevent issues with digitize below.
-        self.radial_bins = na.logspace(math.log10(dist_min*.99 + TINY), 
-            math.log10(dist_max*1.01 + 2*TINY), num=self.bin_count+1)
+        self.radial_bins = na.logspace(math.log10(dist_min * .99 + TINY),
+            math.log10(dist_max * 1.01 + 2 * TINY), num=self.bin_count + 1)
         if self.indices is not None and self.indices.size > 1:
             # Find out which bin each particle goes into, and add the particle
             # mass to that bin.
             inds = na.digitize(dist, self.radial_bins) - 1
             for index in na.unique(inds):
-                self.mass_bins[index] += sum(self["ParticleMassMsun"][inds==index])
+                self.mass_bins[index] += \
+                    na.sum(self["ParticleMassMsun"][inds == index])
             # Now forward sum the masses in the bins.
             for i in xrange(self.bin_count):
-                self.mass_bins[i+1] += self.mass_bins[i]
+                self.mass_bins[i + 1] += self.mass_bins[i]
         # Sum up the mass_bins globally
         self.mass_bins = self.comm.mpi_allreduce(self.mass_bins, op='sum')
         # Calculate the over densities in the bins.
         self.overdensity = self.mass_bins * Msun2g / \
-        (4./3. * math.pi * rho_crit * \
-        (self.radial_bins * self.data.pf["cm"])**3.0)
+        (4. / 3. * math.pi * rho_crit * \
+        (self.radial_bins * self.data.pf["cm"]) ** 3.0)
 
 
 class FOFHalo(Halo):
 
-    def center_of_mass(self):
-        r"""Calculate and return the center of mass.
-
-        The center of mass of the halo is directly calculated and returned.
-        
-        Examples
-        --------
-        >>> com = halos[0].center_of_mass()
-        """
-        pm = self["ParticleMassMsun"]
-        cx = self["particle_position_x"]
-        cy = self["particle_position_y"]
-        cz = self["particle_position_z"]
-        c_vec = na.array([cx[0],cy[0],cz[0]]) - na.array([0.5,0.5,0.5])
-        cx = cx - c_vec[0]
-        cy = cy - c_vec[1]
-        cz = cz - c_vec[2]
-        com = na.array([v-na.floor(v) for v in [cx,cy,cz]])
-        com = (pm * com).sum(axis=1)/pm.sum() + c_vec
-        return com
-
     def maximum_density(self):
         r"""Not implemented."""
         return -1
@@ -791,9 +526,11 @@
         r"""Not implemented."""
         return self.center_of_mass()
 
+
 class LoadedHalo(Halo):
     def __init__(self, pf, id, size=None, CoM=None,
-        max_dens_point=None, group_total_mass=None, max_radius=None, bulk_vel=None,
+        max_dens_point=None, group_total_mass=None,
+        max_radius=None, bulk_vel=None,
         rms_vel=None, fnames=None):
         self.pf = pf
         self.id = id
@@ -811,7 +548,7 @@
         self.saved_fields = {}
         self.particle_mask = None
         self.ds_sort = None
-        self.indices = na.array([]) # Never used for a LoadedHalo.
+        self.indices = na.array([])  # Never used for a LoadedHalo.
 
     def __getitem__(self, key):
         # This function will try to get particle data in one of three ways,
@@ -861,8 +598,9 @@
         f = h5py.File(fnames[0], 'r')
         fields = f["Halo%08d" % halo].keys()
         # If we dont have this field, we can give up right now.
-        if field not in fields: return None
-        if field == 'particle_index' or field == 'particle_type':
+        if field not in fields:
+            return None
+        elif field == 'particle_index' or field == 'particle_type':
             # the only integer field
             field_data = na.empty(size, dtype='int64')
         else:
@@ -877,104 +615,11 @@
             f = h5py.File(fname, 'r')
             this = f["Halo%08d" % halo][field][:]
             s = this.size
-            field_data[offset:offset+s] = this
+            field_data[offset:offset + s] = this
             offset += s
             f.close()
             del f
         return field_data
-        
-    def center_of_mass(self):
-        r"""Calculate and return the center of mass.
-
-        The center of mass of the halo is directly calculated and returned.
-        
-        Examples
-        --------
-        >>> com = halos[0].center_of_mass()
-        """
-        return self.CoM
-    
-    def maximum_density_location(self):
-        r"""Return the location HOP identified as maximally dense.
-        
-        Return the location HOP identified as maximally dense.
-
-        Examples
-        --------
-        >>> max_dens_loc = halos[0].maximum_density_location()
-        """
-        return self.max_dens_point[1:]
-
-    def maximum_density(self):
-        r"""Return the HOP-identified maximum density.
-
-        Return the HOP-identified maximum density.
-
-        Examples
-        --------
-        >>> max_dens = halos[0].maximum_density()
-        """
-        return self.max_dens_point[0]
-
-    def total_mass(self):
-        r"""Returns the total mass in solar masses of the halo.
-        
-        Returns the total mass in solar masses of just the particles in the
-        halo.
-
-        Examples
-        --------
-        >>> halos[0].total_mass()
-        """
-        return self.group_total_mass
-
-    def bulk_velocity(self):
-        r"""Returns the mass-weighted average velocity in cm/s.
-
-        This calculates and returns the mass-weighted average velocity of just
-        the particles in the halo in cm/s.
-        
-        Examples
-        --------
-        >>> bv = halos[0].bulk_velocity()
-        """
-        return self.bulk_vel
-
-    def rms_velocity(self):
-        r"""Returns the mass-weighted RMS velocity for the halo
-        particles in cgs units.
-
-        Calculate and return the mass-weighted RMS velocity for just the
-        particles in the halo.  The bulk velocity of the halo is subtracted
-        before computation.
-        
-        Examples
-        --------
-        >>> rms_vel = halos[0].rms_velocity()
-        """
-        return self.rms_vel
-
-    def maximum_radius(self):
-        r"""Returns the maximum radius in the halo for all particles,
-        either from the point of maximum density or from the
-        center of mass.
-
-        The maximum radius from the most dense point is calculated.  This
-        accounts for periodicity.
-        
-        Parameters
-        ----------
-        center_of_mass : bool
-            True chooses the center of mass when calculating the maximum radius.
-            False chooses from the maximum density location for HOP halos
-            (it has no effect for FOF halos).
-            Default = True.
-        
-        Examples
-        --------
-        >>> radius = halos[0].maximum_radius()
-        """
-        return self.max_radius
 
     def get_sphere(self):
         r"""Returns a sphere source.
@@ -982,15 +627,16 @@
         This will generate a new, empty sphere source centered on this halo,
         with the maximum radius of the halo. This can be used like any other
         data container in yt.
-        
+
         Parameters
         ----------
         center_of_mass : bool, optional
-            True chooses the center of mass when calculating the maximum radius.
+            True chooses the center of mass when
+            calculating the maximum radius.
             False chooses from the maximum density location for HOP halos
             (it has no effect for FOF halos).
             Default = True.
-        
+
         Returns
         -------
         sphere : `yt.data_objects.api.AMRSphereBase`
@@ -1004,11 +650,12 @@
         r = self.maximum_radius()
         return self.pf.h.sphere(cen, r)
 
+
 class HaloList(object):
 
     _fields = ["particle_position_%s" % ax for ax in 'xyz']
 
-    def __init__(self, data_source, dm_only = True):
+    def __init__(self, data_source, dm_only=True):
         """
         Run hop on *data_source* with a given density *threshold*.  If
         *dm_only* is set, only run it on the dark matter particles, otherwise
@@ -1025,15 +672,19 @@
         mylog.debug("Finished. (%s)", len(self))
 
     def __obtain_particles(self):
-        if self.dm_only: ii = self._get_dm_indices()
-        else: ii = slice(None)
+        if self.dm_only:
+            ii = self._get_dm_indices()
+        else:
+            ii = slice(None)
         self.particle_fields = {}
         for field in self._fields:
             tot_part = self._data_source[field].size
             if field == "particle_index":
-                self.particle_fields[field] = self._data_source[field][ii].astype('int64')
+                self.particle_fields[field] = \
+                    self._data_source[field][ii].astype('int64')
             else:
-                self.particle_fields[field] = self._data_source[field][ii].astype('float64')
+                self.particle_fields[field] = \
+                    self._data_source[field][ii].astype('float64')
             del self._data_source[field]
         self._base_indices = na.arange(tot_part)[ii]
         gc.collect()
@@ -1048,44 +699,46 @@
         else:
             mylog.warning("No particle_type, no creation_time, so not distinguishing.")
             return slice(None)
-    
 
     def _parse_output(self):
         unique_ids = na.unique(self.tags)
-        counts = na.bincount(self.tags+1)
+        counts = na.bincount(self.tags + 1)
         sort_indices = na.argsort(self.tags)
         grab_indices = na.indices(self.tags.shape).ravel()[sort_indices]
         dens = self.densities[sort_indices]
         cp = 0
         for i in unique_ids:
-            cp_c = cp + counts[i+1]
+            cp_c = cp + counts[i + 1]
             if i == -1:
-                cp += counts[i+1]
+                cp += counts[i + 1]
                 continue
             group_indices = grab_indices[cp:cp_c]
             self._groups.append(self._halo_class(self, i, group_indices))
             md_i = na.argmax(dens[cp:cp_c])
-            px, py, pz = [self.particle_fields['particle_position_%s'%ax][group_indices]
+            px, py, pz = \
+                [self.particle_fields['particle_position_%s' % ax][group_indices]
                                             for ax in 'xyz']
-            self._max_dens[i] = (dens[cp:cp_c][md_i], px[md_i], py[md_i], pz[md_i])
-            cp += counts[i+1]
+            self._max_dens[i] = (dens[cp:cp_c][md_i], px[md_i],
+                py[md_i], pz[md_i])
+            cp += counts[i + 1]
 
     def __len__(self):
         return len(self._groups)
- 
+
     def __iter__(self):
-        for i in self._groups: yield i
+        for i in self._groups:
+            yield i
 
     def __getitem__(self, key):
         return self._groups[key]
 
     def nearest_neighbors_3D(self, haloID, num_neighbors=7, search_radius=.2):
         r"""For a halo its nearest neighbors in 3D using the kd tree.
-        
+
         This will calculate the nearest neighbors of a halo, using the kD tree.
         Returns a list of the neighbors distances and ID with format
         [distance,haloID].
-        
+
         Parameters
         ----------
         haloID : integer
@@ -1094,7 +747,7 @@
             How many neighbors to search for. Default = 7.
         search_radius : float
             How far away to look for neighbors in code units. Default = 0.2.
-        
+
         Examples
         --------
         >>> neighbors = halos.nearest_neighbors_3D(0)
@@ -1108,19 +761,20 @@
             p.haloID = group.id
             dataset.append(p)
         mylog.info('Building kd tree...')
-        kd = buildKdHyperRectTree(dataset[:],2*num_neighbors)
+        kd = buildKdHyperRectTree(dataset[:], 2 * num_neighbors)
         # make the neighbors object
         neighbors = Neighbors()
         neighbors.k = num_neighbors
         neighbors.points = []
         neighbors.minDistanceSquared = search_radius * search_radius
         mylog.info('Finding nearest neighbors...')
-        getKNN(self[haloID].center_of_mass().tolist(), kd, neighbors,0., period.tolist())
+        getKNN(self[haloID].center_of_mass().tolist(), kd, neighbors, 0.,
+            period.tolist())
         # convert the data in order to return something less perverse than a
         # Neighbors object, also root the distances
         n_points = []
         for n in neighbors.points:
-            n_points.append([math.sqrt(n[0]),n[1].haloID])
+            n_points.append([math.sqrt(n[0]), n[1].haloID])
         return n_points
 
     def nearest_neighbors_2D(self, haloID, num_neighbors=7, search_radius=.2,
@@ -1130,7 +784,7 @@
         This will strip a dimension from consideration in the kD-tree, and then
         calculate all the nearest projected neighbors of a halo.  Returns a
         list of the neighbors distances and ID with format [distance,haloID].
-        
+
         Parameters
         ----------
         haloID : int
@@ -1142,13 +796,14 @@
         proj_dim : int
             Which dimension (0, 1, or 2) to project the halos into 2D.
             Default = 0.
-        
+
         Examples
         --------
         >>> neighbors = halos.nearest_neighbors_2D(0)
         """
-        # Set up a vector to multiply other vectors by to project along proj_dim
-        vec = na.array([1.,1.,1.])
+        # Set up a vector to multiply other
+        # vectors by to project along proj_dim
+        vec = na.array([1., 1., 1.])
         vec[proj_dim] = 0.
         period = self.pf.domain_right_edge - self.pf.domain_left_edge
         period = period * vec
@@ -1161,7 +816,7 @@
             p.haloID = group.id
             dataset.append(p)
         mylog.info('Building kd tree...')
-        kd = buildKdHyperRectTree(dataset[:],2*num_neighbors)
+        kd = buildKdHyperRectTree(dataset[:], 2 * num_neighbors)
         # make the neighbors object
         neighbors = Neighbors()
         neighbors.k = num_neighbors
@@ -1169,22 +824,22 @@
         neighbors.minDistanceSquared = search_radius * search_radius
         mylog.info('Finding nearest neighbors...')
         cm = self[haloID].center_of_mass() * vec
-        getKNN(cm.tolist(), kd, neighbors,0., period.tolist())
+        getKNN(cm.tolist(), kd, neighbors, 0., period.tolist())
         # convert the data in order to return something less perverse than a
         # Neighbors object, also root the distances
         n_points = []
         for n in neighbors.points:
-            n_points.append([math.sqrt(n[0]),n[1].haloID])
+            n_points.append([math.sqrt(n[0]), n[1].haloID])
         return n_points
 
     def write_out(self, filename):
         r"""Write out standard halo information to a text file.
-        
+
         Parameters
         ----------
         filename : String
             The name of the file to write to.
-        
+
         Examples
         --------
         >>> halos.write_out("HopAnalysis.out")
@@ -1192,18 +847,19 @@
         if hasattr(filename, 'write'):
             f = filename
         else:
-            f = open(filename,"w")
+            f = open(filename, "w")
         f.write("# HALOS FOUND WITH %s\n" % (self._name))
-        f.write("\t".join(["# Group","Mass","# part","max dens"
-                           "x","y","z", "center-of-mass",
-                           "x","y","z",
-                           "vx","vy","vz","max_r","rms_v","\n"]))
+        f.write("\t".join(["# Group", "Mass", "# part", "max dens"
+                           "x", "y", "z", "center-of-mass",
+                           "x", "y", "z",
+                           "vx", "vy", "vz", "max_r", "rms_v", "\n"]))
         for group in self:
             f.write("%10i\t" % group.id)
             f.write("%0.9e\t" % group.total_mass())
             f.write("%10i\t" % group.get_size())
             f.write("%0.9e\t" % group.maximum_density())
-            f.write("\t".join(["%0.9e" % v for v in group.maximum_density_location()]))
+            f.write("\t".join(["%0.9e" % v for v in \
+                group.maximum_density_location()]))
             f.write("\t")
             f.write("\t".join(["%0.9e" % v for v in group.center_of_mass()]))
             f.write("\t")
@@ -1218,12 +874,12 @@
     def write_particle_lists_txt(self, prefix, fp=None):
         r"""Write out the names of the HDF5 files containing halo particle data
         to a text file. Needed in particular for parallel analysis output.
-        
+
         Parameters
         ----------
         prefix : String
             The prefix for the name of the file.
-        
+
         Examples
         --------
         >>> halos.write_particle_lists_txt("halo-parts")
@@ -1231,14 +887,15 @@
         if hasattr(fp, 'write'):
             f = fp
         else:
-            f = open("%s.txt" % prefix,"w")
+            f = open("%s.txt" % prefix, "w")
         for group in self:
             if group.tasks is not None:
                 fn = ""
                 for task in group.tasks:
                     fn += "%s.h5 " % self.comm.get_filename(prefix, rank=task)
             elif self._distributed:
-                fn = "%s.h5" % self.comm.get_filename(prefix, rank=group._owner)
+                fn = "%s.h5" % self.comm.get_filename(prefix,
+                    rank=group._owner)
             else:
                 fn = "%s.h5" % self.comm.get_filename(prefix)
             gn = "Halo%08i" % (group.id)
@@ -1246,6 +903,7 @@
             f.flush()
         f.close()
 
+
 class HOPHaloList(HaloList):
 
     _name = "HOP"
@@ -1275,18 +933,19 @@
 
     def write_out(self, filename="HopAnalysis.out"):
         r"""Write out standard halo information to a text file.
-        
+
         Parameters
         ----------
         filename : String
             The name of the file to write to. Default = "HopAnalysis.out".
-        
+
         Examples
         --------
         >>> halos.write_out("HopAnalysis.out")
         """
         HaloList.write_out(self, filename)
 
+
 class FOFHaloList(HaloList):
     _name = "FOF"
     _halo_class = FOFHalo
@@ -1298,38 +957,39 @@
 
     def _run_finder(self):
         self.tags = \
-            RunFOF(self.particle_fields["particle_position_x"] / self.period[0],
-                   self.particle_fields["particle_position_y"] / self.period[1],
-                   self.particle_fields["particle_position_z"] / self.period[2],
-                   self.link)
+        RunFOF(self.particle_fields["particle_position_x"] / self.period[0],
+               self.particle_fields["particle_position_y"] / self.period[1],
+               self.particle_fields["particle_position_z"] / self.period[2],
+               self.link)
         self.densities = na.ones(self.tags.size, dtype='float64') * -1
         self.particle_fields["densities"] = self.densities
         self.particle_fields["tags"] = self.tags
 
     def write_out(self, filename="FOFAnalysis.out"):
         r"""Write out standard halo information to a text file.
-        
+
         Parameters
         ----------
         filename : String
             The name of the file to write to. Default = "FOFAnalysis.out".
-        
+
         Examples
         --------
         >>> halos.write_out("FOFAnalysis.out")
         """
         HaloList.write_out(self, filename)
 
+
 class LoadedHaloList(HaloList):
     _name = "Loaded"
-    
+
     def __init__(self, pf, basename):
         ParallelAnalysisInterface.__init__(self)
         self.pf = pf
         self._groups = []
         self.basename = basename
         self._retrieve_halos()
-    
+
     def _retrieve_halos(self):
         # First get the halo particulars.
         lines = file("%s.out" % self.basename)
@@ -1356,7 +1016,7 @@
                 max_dens_point, group_total_mass, max_radius, bulk_vel,
                 rms_vel, fnames))
             halo += 1
-    
+
     def _collect_halo_data_locations(self):
         # The halos are listed in order in the file.
         lines = file("%s.txt" % self.basename)
@@ -1375,7 +1035,8 @@
         lines.close()
         return locations
 
-class parallelHOPHaloList(HaloList,ParallelAnalysisInterface):
+
+class parallelHOPHaloList(HaloList, ParallelAnalysisInterface):
     _name = "parallelHOP"
     _halo_class = parallelHOPHalo
     _fields = ["particle_position_%s" % ax for ax in 'xyz'] + \
@@ -1383,7 +1044,7 @@
 
     def __init__(self, data_source, padding, num_neighbors, bounds, total_mass,
         period, threshold=160.0, dm_only=True, rearrange=True, premerge=True,
-        tree = 'F'):
+        tree='F'):
         """
         Run hop on *data_source* with a given density *threshold*.  If
         *dm_only* is set, only run it on the dark matter particles, otherwise
@@ -1397,7 +1058,7 @@
         self.rearrange = rearrange
         self.period = period
         self.old_period = period.copy()
-        self.period = na.array([1.]*3)
+        self.period = na.array([1.] * 3)
         self._data_source = data_source
         self.premerge = premerge
         self.tree = tree
@@ -1430,7 +1091,7 @@
             self.num_neighbors, self.bounds,
             self.particle_fields,
             self.threshold, rearrange=self.rearrange, premerge=self.premerge,
-            tree = self.tree)
+            tree=self.tree)
         self.densities, self.tags = obj.density, obj.chainID
         # I'm going to go ahead and delete self.densities because it's not
         # actually being used. I'm not going to remove it altogether because
@@ -1447,7 +1108,7 @@
         self.max_radius = obj.max_radius
         for dd in range(3):
             self.CoM[:, dd] *= self.old_period[dd]
-            self.max_dens_point[:, dd+1] *= self.old_period[dd]
+            self.max_dens_point[:, dd + 1] *= self.old_period[dd]
         # This is wrong, below, with uneven boundaries. We'll cross that bridge
         # when we get there.
         self.max_radius *= self.old_period[0]
@@ -1469,9 +1130,9 @@
         if calc:
             vel = na.empty((calc, 3), dtype='float64')
             ms = pm[select]
-            vel[:,0] = xv[select] * ms
-            vel[:,1] = yv[select] * ms
-            vel[:,2] = zv[select] * ms
+            vel[:, 0] = xv[select] * ms
+            vel[:, 1] = yv[select] * ms
+            vel[:, 2] = zv[select] * ms
             subchain = self.tags[select]
             sort = subchain.argsort()
             vel = vel[sort]
@@ -1482,31 +1143,32 @@
             marks = na.arange(calc)[marks] + 1
             marks = na.concatenate(([0], marks, [calc]))
             for i, u in enumerate(uniq_subchain):
-                self.bulk_vel[u] = na.sum(vel[marks[i]:marks[i+1]], axis=0)
+                self.bulk_vel[u] = na.sum(vel[marks[i]:marks[i + 1]], axis=0)
             del vel, subchain, sort_subchain
             del diff_subchain
         # Bring it together, and divide by the previously computed total mass
         # of each halo.
         self.bulk_vel = self.comm.mpi_allreduce(self.bulk_vel, op='sum')
         for groupID in xrange(self.group_count):
-            self.bulk_vel[groupID] = self.bulk_vel[groupID] / self.Tot_M[groupID]
+            self.bulk_vel[groupID] = \
+                self.bulk_vel[groupID] / self.Tot_M[groupID]
         yt_counters("bulk vel. computing")
         # Now calculate the RMS velocity of the groups in parallel, very
         # similarly to the bulk velocity and re-using some of the arrays.
         yt_counters("rms vel computing")
-        rms_vel_temp = na.zeros((self.group_count,2), dtype='float64')
+        rms_vel_temp = na.zeros((self.group_count, 2), dtype='float64')
         if calc:
             vel = na.empty((calc, 3), dtype='float64')
-            vel[:,0] = xv[select] * ms
-            vel[:,1] = yv[select] * ms
-            vel[:,2] = zv[select] * ms
+            vel[:, 0] = xv[select] * ms
+            vel[:, 1] = yv[select] * ms
+            vel[:, 2] = zv[select] * ms
             vel = vel[sort]
             for i, u in enumerate(uniq_subchain):
                 # This finds the sum locally.
-                rms_vel_temp[u][0] = na.sum(((vel[marks[i]:marks[i+1]] - \
-                    self.bulk_vel[u]) / self.Tot_M[u])**2.)
+                rms_vel_temp[u][0] = na.sum(((vel[marks[i]:marks[i + 1]] - \
+                    self.bulk_vel[u]) / self.Tot_M[u]) ** 2.)
                 # I could use self.group_sizes...
-                rms_vel_temp[u][1] = marks[i+1] - marks[i]
+                rms_vel_temp[u][1] = marks[i + 1] - marks[i]
             del vel, marks, uniq_subchain
         # Bring it together.
         rms_vel_temp = self.comm.mpi_allreduce(rms_vel_temp, op='sum')
@@ -1519,7 +1181,7 @@
         del rms_vel_temp
         yt_counters("rms vel computing")
         self.taskID = obj.mine
-        self.halo_taskmap = obj.halo_taskmap # A defaultdict.
+        self.halo_taskmap = obj.halo_taskmap  # A defaultdict.
         del obj
         gc.collect()
         yt_counters("Precomp bulk vel.")
@@ -1530,7 +1192,7 @@
         Each task will make an entry for all groups, but it may be empty.
         """
         unique_ids = na.unique(self.tags)
-        counts = na.bincount((self.tags+1).tolist())
+        counts = na.bincount((self.tags + 1).tolist())
         sort_indices = na.argsort(self.tags)
         grab_indices = na.indices(self.tags.shape).ravel()[sort_indices]
         del sort_indices
@@ -1544,23 +1206,27 @@
             return
         for i in unique_ids:
             if i == -1:
-                cp += counts[i+1]
+                cp += counts[i + 1]
                 continue
-            # If there is a gap in the unique_ids, make empty groups to 
+            # If there is a gap in the unique_ids, make empty groups to
             # fill it in.
             while index < i:
                 self._groups[index] = self._halo_class(self, index, \
                     size=self.group_sizes[index], CoM=self.CoM[index], \
                     max_dens_point=self.max_dens_point[index], \
-                    group_total_mass=self.Tot_M[index], max_radius=self.max_radius[index],
-                    bulk_vel=self.bulk_vel[index], tasks=self.halo_taskmap[index],
+                    group_total_mass=self.Tot_M[index],
+                    max_radius=self.max_radius[index],
+                    bulk_vel=self.bulk_vel[index],
+                    tasks=self.halo_taskmap[index],
                     rms_vel=self.rms_vel[index])
                 # I don't own this halo
                 self.comm.do_not_claim_object(self._groups[index])
-                self._max_dens[index] = [self.max_dens_point[index][0], self.max_dens_point[index][1], \
-                    self.max_dens_point[index][2], self.max_dens_point[index][3]]
+                self._max_dens[index] = [self.max_dens_point[index][0],
+                    self.max_dens_point[index][1], \
+                    self.max_dens_point[index][2],
+                    self.max_dens_point[index][3]]
                 index += 1
-            cp_c = cp + counts[i+1]
+            cp_c = cp + counts[i + 1]
             group_indices = grab_indices[cp:cp_c]
             self._groups[index] = self._halo_class(self, i, group_indices, \
                 size=self.group_sizes[i], CoM=self.CoM[i], \
@@ -1570,20 +1236,23 @@
                 rms_vel=self.rms_vel[i])
             # This halo may be owned by many, including this task
             self.comm.claim_object(self._groups[index])
-            self._max_dens[index] = [self.max_dens_point[i][0], self.max_dens_point[i][1], \
+            self._max_dens[index] = [self.max_dens_point[i][0],
+                self.max_dens_point[i][1], \
                 self.max_dens_point[i][2], self.max_dens_point[i][3]]
-            cp += counts[i+1]
+            cp += counts[i + 1]
             index += 1
         # If there are missing groups at the end, add them.
         while index < self.group_count:
             self._groups[index] = self._halo_class(self, index, \
                 size=self.group_sizes[index], CoM=self.CoM[index], \
                 max_dens_point=self.max_dens_point[i], \
-                group_total_mass=self.Tot_M[index], max_radius=self.max_radius[index],
+                group_total_mass=self.Tot_M[index],
+                max_radius=self.max_radius[index],
                 bulk_vel=self.bulk_vel[index], tasks=self.halo_taskmap[index],
                 rms_vel=self.rms_vel[index])
             self.comm.do_not_claim_object(self._groups[index])
-            self._max_dens[index] = [self.max_dens_point[index][0], self.max_dens_point[index][1], \
+            self._max_dens[index] = [self.max_dens_point[index][0],
+                self.max_dens_point[index][1], \
                 self.max_dens_point[index][2], self.max_dens_point[index][3]]
             index += 1
         # Clean up
@@ -1600,28 +1269,31 @@
 
     def write_out(self, filename="parallelHopAnalysis.out"):
         r"""Write out standard halo information to a text file.
-        
+
         Parameters
         ----------
         filename : String
             The name of the file to write to.
             Default = "parallelHopAnalysis.out".
-        
+
         Examples
         --------
         >>> halos.write_out("parallelHopAnalysis.out")
         """
         HaloList.write_out(self, filename)
 
+
 class GenericHaloFinder(HaloList, ParallelAnalysisInterface):
     def __init__(self, pf, ds, dm_only=True, padding=0.0):
         ParallelAnalysisInterface.__init__(self)
         self.pf = pf
         self.hierarchy = pf.h
-        self.center = (na.array(ds.right_edge) + na.array(ds.left_edge))/2.0
+        self.center = (na.array(ds.right_edge) + na.array(ds.left_edge)) / 2.0
 
     def _parse_halolist(self, threshold_adjustment):
-        groups, max_dens, hi  = [], {}, 0
+        groups = []
+        max_dens = {}
+        hi = 0
         LE, RE = self.bounds
         for halo in self._groups:
             this_max_dens = halo.maximum_density_location()
@@ -1629,15 +1301,17 @@
             if na.all((this_max_dens >= LE) & (this_max_dens <= RE)):
                 # Now we add the halo information to OURSELVES, taken from the
                 # self.hop_list
-                # We need to mock up the HOPHaloList thingie, so we need to set:
-                #     self._max_dens
-                max_dens_temp = list(self._max_dens[halo.id])[0] / threshold_adjustment
-                max_dens[hi] = [max_dens_temp] + list(self._max_dens[halo.id])[1:4]
+                # We need to mock up the HOPHaloList thingie, so we need to
+                #     set self._max_dens
+                max_dens_temp = list(self._max_dens[halo.id])[0] / \
+                    threshold_adjustment
+                max_dens[hi] = [max_dens_temp] + \
+                    list(self._max_dens[halo.id])[1:4]
                 groups.append(self._halo_class(self, hi))
                 groups[-1].indices = halo.indices
                 self.comm.claim_object(groups[-1])
                 hi += 1
-        del self._groups, self._max_dens # explicit >> implicit
+        del self._groups, self._max_dens  # explicit >> implicit
         self._groups = groups
         self._max_dens = max_dens
 
@@ -1651,10 +1325,11 @@
         mine, halo_info = self.comm.mpi_info_dict(len(self))
         nhalos = sum(halo_info.values())
         # Figure out our offset
-        my_first_id = sum([v for k,v in halo_info.items() if k < mine])
+        my_first_id = sum([v for k, v in halo_info.items() if k < mine])
         # Fix our max_dens
         max_dens = {}
-        for i,m in self._max_dens.items(): max_dens[i+my_first_id] = m
+        for i, m in self._max_dens.items():
+            max_dens[i + my_first_id] = m
         self._max_dens = max_dens
         for halo in self._groups:
             halo._max_dens = self._max_dens
@@ -1668,17 +1343,18 @@
                        [self._halo_class(self, i) for i in range(after, nhalos)]
         id = 0
         for proc in sorted(halo_info.keys()):
-            for halo in self._groups[id:id+halo_info[proc]]:
+            for halo in self._groups[id:id + halo_info[proc]]:
                 halo.id = id
                 halo._distributed = self._distributed
                 halo._owner = proc
                 id += 1
-        def haloCmp(h1,h2):
-            c = cmp(h1.total_mass(),h2.total_mass())
+
+        def haloCmp(h1, h2):
+            c = cmp(h1.total_mass(), h2.total_mass())
             if c != 0:
                 return -1 * c
             if c == 0:
-                return cmp(h1.center_of_mass()[0],h2.center_of_mass()[0])
+                return cmp(h1.center_of_mass()[0], h2.center_of_mass()[0])
         self._groups.sort(haloCmp)
         sorted_max_dens = {}
         for i, halo in enumerate(self._groups):
@@ -1688,25 +1364,25 @@
         self._max_dens = sorted_max_dens
         for i, halo in enumerate(self._groups):
             halo._max_dens = self._max_dens
-        
+
     def _reposition_particles(self, bounds):
         # This only does periodicity.  We do NOT want to deal with anything
-        # else.  The only reason we even do periodicity is the 
+        # else.  The only reason we even do periodicity is the
         LE, RE = bounds
         dw = self.pf.domain_right_edge - self.pf.domain_left_edge
         for i, ax in enumerate('xyz'):
             arr = self._data_source["particle_position_%s" % ax]
-            arr[arr < LE[i]-self.padding] += dw[i]
-            arr[arr > RE[i]+self.padding] -= dw[i]
+            arr[arr < LE[i] - self.padding] += dw[i]
+            arr[arr > RE[i] + self.padding] -= dw[i]
 
     def write_out(self, filename):
         r"""Write out standard halo information to a text file.
-        
+
         Parameters
         ----------
         filename : String
             The name of the file to write to.
-        
+
         Examples
         --------
         >>> halos.write_out("HopAnalysis.out")
@@ -1725,7 +1401,7 @@
         ----------
         prefix : String
             The prefix for the name of the file.
-        
+
         Examples
         --------
         >>> halos.write_particle_lists_txt("halo-parts")
@@ -1743,12 +1419,12 @@
         is run in parallel, halos will only be written out on the processors to
         which they belong.  See `Halo.write_particle_lists_txt` for how to
         track these halos globally across files.
-        
+
         Parameters
         ----------
         prefix : String
             The prefix for the name(s) of the HDF5 files.
-        
+
         Examples
         --------
         >>> halos.write_particle_lists("halo-parts")
@@ -1762,22 +1438,22 @@
 
     def dump(self, basename="HopAnalysis"):
         r"""Save the full halo data to disk.
-        
+
         This function will save the halo data in such a manner that it can be
         easily re-loaded later using `GenericHaloFinder.load`.
         This is similar in concept to
         pickling the data, but outputs the data in the already-established
         data formats. The simple halo data is written to a text file
-        (e.g. "HopAnalysis.out") using
-        write_out(), and the particle data to hdf5 files (e.g. "HopAnalysis.h5")
+        (e.g. "HopAnalysis.out") using write_out(), and the particle data
+        to hdf5 files (e.g. "HopAnalysis.h5")
         using write_particle_lists().
-        
+
         Parameters
         ----------
         basename : String
-            The base name for the files the data will be written to. Default = 
+            The base name for the files the data will be written to. Default =
             "HopAnalysis".
-        
+
         Examples
         --------
         >>> halos.dump("MyHalos")
@@ -1786,29 +1462,30 @@
         self.write_particle_lists(basename)
         self.write_particle_lists_txt(basename)
 
+
 class parallelHF(GenericHaloFinder, parallelHOPHaloList):
-    def __init__(self, pf, subvolume=None,threshold=160, dm_only=True, \
+    def __init__(self, pf, subvolume=None, threshold=160, dm_only=True, \
         resize=True, rearrange=True,\
         fancy_padding=True, safety=1.5, premerge=True, sample=0.03, \
-        total_mass=None, num_particles=None, tree = 'F'):
+        total_mass=None, num_particles=None, tree='F'):
         r"""Parallel HOP halo finder.
-        
+
         Halos are built by:
         1. Calculating a density for each particle based on a smoothing kernel.
         2. Recursively linking particles to other particles from lower density
         particles to higher.
         3. Geometrically proximate chains are identified and
         4. merged into final halos following merging rules.
-        
+
         Lower thresholds generally produce more halos, and the largest halos
         become larger. Also, halos become more filamentary and over-connected.
-        
+
         This is very similar to HOP, but it does not produce precisely the
         same halos due to unavoidable numerical differences.
-        
+
         Skory et al. "Parallel HOP: A Scalable Halo Finder for Massive
         Cosmological Data Sets." arXiv (2010) 1001.3411
-        
+
         Parameters
         ----------
         pf : `StaticOutput`
@@ -1863,7 +1540,7 @@
             fancy_padding.
             Default = None, which means the number of particles is
             automatically calculated.
-        
+
         Examples
         -------
         >>> pf = load("RedshiftOutput0000")
@@ -1892,8 +1569,9 @@
         yt_counters("Reading Data")
         # Adaptive subregions by bisection. We do not load balance if we are
         # analyzing a subvolume.
-        ds_names = ["particle_position_x","particle_position_y","particle_position_z"]
-        if ytcfg.getboolean("yt","inline") == False and \
+        ds_names = ["particle_position_x", "particle_position_y",
+            "particle_position_z"]
+        if ytcfg.getboolean("yt", "inline") == False and \
             resize and self.comm.size != 1 and subvolume is None:
             random.seed(self.comm.rank)
             cut_list = self.partition_hierarchy_3d_bisection_list()
@@ -1901,16 +1579,18 @@
             self.bucket_bounds = []
             if self.comm.rank == 0:
                 self._recursive_divide(root_points, topbounds, 0, cut_list)
-            self.bucket_bounds = self.comm.mpi_bcast_pickled(self.bucket_bounds)
+            self.bucket_bounds = \
+                self.comm.mpi_bcast(self.bucket_bounds)
             my_bounds = self.bucket_bounds[self.comm.rank]
             LE, RE = my_bounds[0], my_bounds[1]
-            self._data_source = self.hierarchy.region_strict([0.]*3, LE, RE)
+            self._data_source = self.hierarchy.region_strict([0.] * 3, LE, RE)
         # If this isn't parallel, define the region as an AMRRegionStrict so
         # particle IO works.
         if self.comm.size == 1:
-            self._data_source = self.hierarchy.periodic_region_strict([0.5]*3, LE, RE)
+            self._data_source = self.hierarchy.periodic_region_strict([0.5] * 3,
+                LE, RE)
         # get the average spacing between particles for this region
-        # The except is for the serial case, where the full box is what we want.
+        # The except is for the serial case where the full box is what we want.
         if num_particles is None:
             data = self._data_source["particle_position_x"]
         try:
@@ -1920,33 +1600,42 @@
         vol = l[0] * l[1] * l[2]
         full_vol = vol
         # We will use symmetric padding when a subvolume is being used.
-        if not fancy_padding or subvolume is not None or num_particles is not None:
+        if not fancy_padding or subvolume is not None or \
+                num_particles is not None:
             if num_particles is None:
                 num_particles = data.size
-            avg_spacing = (float(vol) / num_particles)**(1./3.)
+            avg_spacing = (float(vol) / num_particles) ** (1. / 3.)
             # padding is a function of inter-particle spacing, this is an
             # approximation, but it's OK with the safety factor
-            padding = (self.num_neighbors)**(1./3.) * self.safety * avg_spacing
-            self.padding = (na.ones(3,dtype='float64')*padding, na.ones(3,dtype='float64')*padding)
+            padding = (self.num_neighbors) ** (1. / 3.) * self.safety * \
+                avg_spacing
+            self.padding = (na.ones(3, dtype='float64') * padding,
+                na.ones(3, dtype='float64') * padding)
             mylog.info('padding %s avg_spacing %f vol %f local_parts %d' % \
                 (str(self.padding), avg_spacing, vol, num_particles))
         # Another approach to padding, perhaps more accurate.
         elif fancy_padding and self._distributed:
-            LE_padding, RE_padding = na.empty(3,dtype='float64'), na.empty(3,dtype='float64')
-            avg_spacing = (float(vol) / data.size)**(1./3.)
-            base_padding = (self.num_neighbors)**(1./3.) * self.safety * avg_spacing
+            LE_padding = na.empty(3, dtype='float64')
+            RE_padding = na.empty(3, dtype='float64')
+            avg_spacing = (float(vol) / data.size) ** (1. / 3.)
+            base_padding = (self.num_neighbors) ** (1. / 3.) * self.safety * \
+                avg_spacing
             for dim in xrange(3):
-                if ytcfg.getboolean("yt","inline") == False:
+                if ytcfg.getboolean("yt", "inline") == False:
                     data = self._data_source[ds_names[dim]]
                 else:
                     data = self._data_source[ds_names[dim]]
                 num_bins = 1000
-                width = self._data_source.right_edge[dim] - self._data_source.left_edge[dim]
-                area = (self._data_source.right_edge[(dim+1)%3] - self._data_source.left_edge[(dim+1)%3]) * \
-                    (self._data_source.right_edge[(dim+2)%3] - self._data_source.left_edge[(dim+2)%3])
+                width = self._data_source.right_edge[dim] - \
+                    self._data_source.left_edge[dim]
+                area = (self._data_source.right_edge[(dim + 1) % 3] - \
+                    self._data_source.left_edge[(dim + 1) % 3]) * \
+                    (self._data_source.right_edge[(dim + 2) % 3] - \
+                    self._data_source.left_edge[(dim + 2) % 3])
                 bin_width = base_padding
                 num_bins = int(math.ceil(width / bin_width))
-                bins = na.arange(num_bins+1, dtype='float64') * bin_width + self._data_source.left_edge[dim]
+                bins = na.arange(num_bins + 1, dtype='float64') * bin_width + \
+                    self._data_source.left_edge[dim]
                 counts, bins = na.histogram(data, bins)
                 # left side.
                 start = 0
@@ -1955,32 +1644,37 @@
                     start += 1
                     count += counts[start]
                 # Get the avg spacing in just this boundary.
-                vol = area * (bins[start+1] - bins[0])
-                avg_spacing = (float(vol) / count)**(1./3.)
-                LE_padding[dim] = (self.num_neighbors)**(1./3.) * self.safety * avg_spacing
+                vol = area * (bins[start + 1] - bins[0])
+                avg_spacing = (float(vol) / count) ** (1. / 3.)
+                LE_padding[dim] = (self.num_neighbors) ** (1. / 3.) * \
+                    self.safety * avg_spacing
                 # right side.
                 start = -1
                 count = counts[-1]
                 while count < self.num_neighbors:
                     start -= 1
                     count += counts[start]
-                vol = area * (bins[-1] - bins[start-1])
-                avg_spacing = (float(vol) / count)**(1./3.)
-                RE_padding[dim] = (self.num_neighbors)**(1./3.) * self.safety * avg_spacing
+                vol = area * (bins[-1] - bins[start - 1])
+                avg_spacing = (float(vol) / count) ** (1. / 3.)
+                RE_padding[dim] = (self.num_neighbors) ** (1. / 3.) * \
+                    self.safety * avg_spacing
             self.padding = (LE_padding, RE_padding)
             del bins, counts
             mylog.info('fancy_padding %s avg_spacing %f full_vol %f local_parts %d %s' % \
-                (str(self.padding), avg_spacing, full_vol, data.size, str(self._data_source)))
+                (str(self.padding), avg_spacing, full_vol,
+                data.size, str(self._data_source)))
         # Now we get the full box mass after we have the final composition of
         # subvolumes.
         if total_mass is None:
-            total_mass = self.comm.mpi_allreduce((self._data_source["ParticleMassMsun"].astype('float64')).sum(), 
+            total_mass = self.comm.mpi_allreduce((self._data_source["ParticleMassMsun"].astype('float64')).sum(),
                                                  op='sum')
         if not self._distributed:
-            self.padding = (na.zeros(3,dtype='float64'), na.zeros(3,dtype='float64'))
+            self.padding = (na.zeros(3, dtype='float64'),
+                na.zeros(3, dtype='float64'))
         # If we're using a subvolume, we now re-divide.
         if subvolume is not None:
-            self._data_source = pf.h.periodic_region_strict([0.]*3, ds_LE, ds_RE)
+            self._data_source = pf.h.periodic_region_strict([0.] * 3, ds_LE,
+                ds_RE)
             # Cut up the volume.
             padded, LE, RE, self._data_source = \
                 self.partition_hierarchy_3d(ds=self._data_source,
@@ -1989,8 +1683,8 @@
         (LE_padding, RE_padding) = self.padding
         parallelHOPHaloList.__init__(self, self._data_source, self.padding, \
         self.num_neighbors, self.bounds, total_mass, period, \
-        threshold=threshold, dm_only=dm_only, rearrange=rearrange, premerge=premerge,
-        tree = self.tree)
+        threshold=threshold, dm_only=dm_only, rearrange=rearrange,
+            premerge=premerge, tree=self.tree)
         self._join_halolists()
         yt_counters("Final Grouping")
 
@@ -2003,19 +1697,19 @@
         random_points = int(self.sample * n_parts)
         # We want to get a representative selection of random particles in
         # each subvolume.
-        adjust = float(local_parts) / ( float(n_parts) / self.comm.size)
+        adjust = float(local_parts) / (float(n_parts) / self.comm.size)
         n_random = int(adjust * float(random_points) / self.comm.size)
         mylog.info("Reading in %d random particles." % n_random)
         # Get unique random particles.
         my_points = na.empty((n_random, 3), dtype='float64')
         uni = na.array(random.sample(xrange(xp.size), n_random))
         uni = uni[uni.argsort()]
-        my_points[:,0] = xp[uni]
+        my_points[:, 0] = xp[uni]
         del xp
         self._data_source.clear_data()
-        my_points[:,1] = self._data_source["particle_position_y"][uni]
+        my_points[:, 1] = self._data_source["particle_position_y"][uni]
         self._data_source.clear_data()
-        my_points[:,2] = self._data_source["particle_position_z"][uni]
+        my_points[:, 2] = self._data_source["particle_position_z"][uni]
         self._data_source.clear_data()
         del uni
         # Collect them on the root task.
@@ -2023,10 +1717,10 @@
         if mine == 0:
             tot_random = sum(sizes.values())
             root_points = na.empty((tot_random, 3), dtype='float64')
-            root_points.shape = (1, 3*tot_random)
+            root_points.shape = (1, 3 * tot_random)
         else:
             root_points = na.empty([])
-        my_points.shape = (1, n_random*3)
+        my_points.shape = (1, n_random * 3)
         root_points = self.comm.par_combine_object(my_points[0],
                 datatype="array", op="cat")
         del my_points
@@ -2040,31 +1734,34 @@
         num_bins = 1000
         width = bounds[1][dim] - bounds[0][dim]
         bin_width = width / num_bins
-        bins = na.arange(num_bins+1, dtype='float64') * bin_width + bounds[0][dim]
-        counts, bins = na.histogram(points[:,dim], bins)
+        bins = na.arange(num_bins + 1, dtype='float64') * bin_width + \
+            bounds[0][dim]
+        counts, bins = na.histogram(points[:, dim], bins)
         # Find the bin that passes the cut points.
         midpoints = [bounds[0][dim]]
         sum = 0
         bin = 0
-        for step in xrange(1,cut_list[level][1]):
-            while sum < ((parts*step)/cut_list[level][1]):
+        for step in xrange(1, cut_list[level][1]):
+            while sum < ((parts * step) / cut_list[level][1]):
                 lastsum = sum
                 sum += counts[bin]
                 bin += 1
             # Bin edges
-            left_edge = bins[bin-1]
+            left_edge = bins[bin - 1]
             right_edge = bins[bin]
-            # Find a better approx of the midpoint cut line using a linear approx.
+            # Find a better approx of the midpoint cut
+            # line using a linear approx.
             a = float(sum - lastsum) / (right_edge - left_edge)
-            midpoints.append(left_edge + (0.5 - (float(lastsum) / parts / 2)) / a)
+            midpoints.append(left_edge + (0.5 - \
+                (float(lastsum) / parts / 2)) / a)
         midpoints.append(bounds[1][dim])
 
         # Split the points & update the bounds.
         subpoints = []
         subbounds = []
-        for pair in zip(midpoints[:-1],midpoints[1:]):
-            select = na.bitwise_and(points[:,dim] >= pair[0],
-                points[:,dim] < pair[1])
+        for pair in zip(midpoints[:-1], midpoints[1:]):
+            select = na.bitwise_and(points[:, dim] >= pair[0],
+                points[:, dim] < pair[1])
             subpoints.append(points[select])
             nb = bounds.copy()
             nb[0][dim] = pair[0]
@@ -2076,7 +1773,7 @@
             if level == maxlevel:
                 self.bucket_bounds.append(pair[1])
             else:
-                self._recursive_divide(pair[0], pair[1], level+1, cut_list)
+                self._recursive_divide(pair[0], pair[1], level + 1, cut_list)
 
     def _join_halolists(self):
         if self.group_count == 0:
@@ -2084,7 +1781,7 @@
             return
         ms = -self.Tot_M.copy()
         del self.Tot_M
-        Cx = self.CoM[:,0].copy()
+        Cx = self.CoM[:, 0].copy()
         sorted = na.lexsort([Cx, ms])
         del Cx, ms
         self._groups = self._groups[sorted]
@@ -2098,20 +1795,20 @@
     def __init__(self, pf, subvolume=None, threshold=160, dm_only=True,
             padding=0.02, total_mass=None):
         r"""HOP halo finder.
-        
+
         Halos are built by:
         1. Calculating a density for each particle based on a smoothing kernel.
         2. Recursively linking particles to other particles from lower density
         particles to higher.
         3. Geometrically proximate chains are identified and
         4. merged into final halos following merging rules.
-        
+
         Lower thresholds generally produce more halos, and the largest halos
         become larger. Also, halos become more filamentary and over-connected.
-        
+
         Eisenstein and Hut. "HOP: A New Group-Finding Algorithm for N-Body
         Simulations." ApJ (1998) vol. 498 pp. 137-142
-        
+
         Parameters
         ----------
         pf : `StaticOutput`
@@ -2142,7 +1839,6 @@
             Default = None, which means the total mass is automatically
             calculated.
 
-        
         Examples
         --------
         >>> pf = load("RedshiftOutput0000")
@@ -2153,13 +1849,15 @@
             ds_RE = na.array(subvolume.right_edge)
         self.period = pf.domain_right_edge - pf.domain_left_edge
         self._data_source = pf.h.all_data()
-        GenericHaloFinder.__init__(self, pf, self._data_source, dm_only, padding)
+        GenericHaloFinder.__init__(self, pf, self._data_source, dm_only,
+            padding)
         # do it once with no padding so the total_mass is correct
         # (no duplicated particles), and on the entire volume, even if only
         # a small part is actually going to be used.
         self.padding = 0.0
         padded, LE, RE, self._data_source = \
-            self.partition_hierarchy_3d(ds = self._data_source, padding=self.padding)
+            self.partition_hierarchy_3d(ds=self._data_source,
+                padding=self.padding)
         # For scaling the threshold, note that it's a passthrough
         if total_mass is None:
             if dm_only:
@@ -2167,44 +1865,52 @@
                 total_mass = \
                     self.comm.mpi_allreduce((self._data_source["ParticleMassMsun"][select]).sum(dtype='float64'), op='sum')
             else:
-                total_mass = self.comm.mpi_allreduce(self._data_source["ParticleMassMsun"].sum(dtype='float64'), op='sum')
+                total_mass = self.comm.mpi_allreduce(self._data_source.quantities["TotalQuantity"]("ParticleMassMsun")[0], op='sum')
         # MJT: Note that instead of this, if we are assuming that the particles
         # are all on different processors, we should instead construct an
         # object representing the entire domain and sum it "lazily" with
         # Derived Quantities.
         if subvolume is not None:
-            self._data_source = pf.h.periodic_region_strict([0.]*3, ds_LE, ds_RE)
-        self.padding = padding #* pf["unitary"] # This should be clevererer
+            self._data_source = pf.h.periodic_region_strict([0.] * 3, ds_LE, ds_RE)
+        else:
+            self._data_source = pf.h.all_data()
+        self.padding = padding  # * pf["unitary"] # This should be clevererer
         padded, LE, RE, self._data_source = \
-            self.partition_hierarchy_3d(ds = self._data_source,
+            self.partition_hierarchy_3d(ds=self._data_source,
             padding=self.padding)
         self.bounds = (LE, RE)
-        # reflect particles around the periodic boundary
-        #self._reposition_particles((LE, RE))
-        if dm_only:
+        # sub_mass can be skipped if subvolume is not used and this is not
+        # parallel.
+        if subvolume is None and \
+                ytcfg.getint("yt", "__topcomm_parallel_size") == 1:
+            sub_mass = total_mass
+        elif dm_only:
             select = self._get_dm_indices()
             sub_mass = self._data_source["ParticleMassMsun"][select].sum(dtype='float64')
         else:
-            sub_mass = self._data_source["ParticleMassMsun"].sum(dtype='float64')
+            sub_mass = \
+                self._data_source.quantities["TotalQuantity"]("ParticleMassMsun")[0]
         HOPHaloList.__init__(self, self._data_source,
-            threshold*total_mass/sub_mass, dm_only)
-        self._parse_halolist(total_mass/sub_mass)
+            threshold * total_mass / sub_mass, dm_only)
+        self._parse_halolist(total_mass / sub_mass)
         self._join_halolists()
 
+
 class FOFHaloFinder(GenericHaloFinder, FOFHaloList):
-    def __init__(self, pf, subvolume=None, link=0.2, dm_only=True, padding=0.02):
+    def __init__(self, pf, subvolume=None, link=0.2, dm_only=True,
+        padding=0.02):
         r"""Friends-of-friends halo finder.
-        
+
         Halos are found by linking together all pairs of particles closer than
         some distance from each other. Particles may have multiple links,
         and halos are found by recursively linking together all such pairs.
-        
+
         Larger linking lengths produce more halos, and the largest halos
         become larger. Also, halos become more filamentary and over-connected.
-        
+
         Davis et al. "The evolution of large-scale structure in a universe
         dominated by cold dark matter." ApJ (1985) vol. 292 pp. 371-394
-        
+
         Parameters
         ----------
         pf : `StaticOutput`
@@ -2217,7 +1923,7 @@
             If positive, the interparticle distance (compared to the overall
             average) used to build the halos. If negative, this is taken to be
             the *actual* linking length, and no other calculations will be
-            applied.  Default = 0.2.  
+            applied.  Default = 0.2.
         dm_only : bool
             If True, only dark matter particles are used when building halos.
             Default = False.
@@ -2226,7 +1932,7 @@
             with duplicated particles for halo finidng to work. This number
             must be no smaller than the radius of the largest halo in the box
             in code units. Default = 0.02.
-        
+
         Examples
         --------
         >>> pf = load("RedshiftOutput0000")
@@ -2241,7 +1947,7 @@
         self._data_source = pf.h.all_data()
         GenericHaloFinder.__init__(self, pf, self._data_source, dm_only,
             padding)
-        self.padding = 0.0 #* pf["unitary"] # This should be clevererer
+        self.padding = 0.0  # * pf["unitary"] # This should be clevererer
         # get the total number of particles across all procs, with no padding
         padded, LE, RE, self._data_source = \
             self.partition_hierarchy_3d(ds=self._data_source,
@@ -2254,13 +1960,16 @@
             # Because we are now allowing for datasets with non 1-periodicity,
             # but symmetric, vol is always 1.
             vol = 1.
-            avg_spacing = (float(vol) / n_parts)**(1./3.)
+            avg_spacing = (float(vol) / n_parts) ** (1. / 3.)
             linking_length = link * avg_spacing
         else:
             linking_length = na.abs(link)
         self.padding = padding
         if subvolume is not None:
-            self._data_source = pf.h.periodic_region_strict([0.]*3, ds_LE, ds_RE)
+            self._data_source = pf.h.periodic_region_strict([0.] * 3, ds_LE,
+                ds_RE)
+        else:
+            self._data_source = pf.h.all_data()
         padded, LE, RE, self._data_source = \
             self.partition_hierarchy_3d(ds=self._data_source,
             padding=self.padding)
@@ -2275,24 +1984,25 @@
 
 HaloFinder = HOPHaloFinder
 
+
 class LoadHaloes(GenericHaloFinder, LoadedHaloList):
     def __init__(self, pf, basename):
         r"""Load the full halo data into memory.
-        
+
         This function takes the output of `GenericHaloFinder.dump` and
         re-establishes the list of halos in memory. This enables the full set
         of halo analysis features without running the halo finder again. To
         be precise, the particle data for each halo is only read in when
         necessary, so examining a single halo will not require as much memory
         as is required for halo finding.
-        
+
         Parameters
         ----------
         basename : String
             The base name of the files that will be read in. This should match
             what was used when `GenericHaloFinder.dump` was called. Default =
             "HopAnalysis".
-        
+
         Examples
         --------
         >>> pf = load("data0005")
@@ -2300,6 +2010,3 @@
         """
         self.basename = basename
         LoadedHaloList.__init__(self, pf, self.basename)
-
-
-        


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/analysis_modules/halo_finding/hop/setup.py
--- a/yt/analysis_modules/halo_finding/hop/setup.py
+++ b/yt/analysis_modules/halo_finding/hop/setup.py
@@ -1,19 +1,19 @@
 #!/usr/bin/env python
 import setuptools
-import os, sys, os.path
-
+import os
+import sys
 import os.path
 
-def configuration(parent_package='',top_path=None):
+
+def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('hop',parent_package,top_path)
-    config.add_extension("EnzoHop", sources=
-                                    ["EnzoHop.c",
+    config = Configuration('hop', parent_package, top_path)
+    config.add_extension("EnzoHop", sources=["EnzoHop.c",
                                      "hop_hop.c",
                                      "hop_kd.c",
                                      "hop_regroup.c",
                                      "hop_slice.c",
-                                     "hop_smooth.c",])
-    config.make_config_py() # installs __config__.py
+                                     "hop_smooth.c"])
+    config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py
--- a/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py
+++ b/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py
@@ -388,7 +388,7 @@
             self.pos[self.psize:, 2] = self.zpos_pad
             del self.xpos_pad, self.ypos_pad, self.zpos_pad
             gc.collect()
-            self.kdtree = cKDTree(self.pos, leafsize = 32)
+            self.kdtree = cKDTree(self.pos, leafsize = 64)
         self.__max_memory()
         yt_counters("init kd tree")
 
@@ -613,66 +613,73 @@
         chain_map = defaultdict(set)
         for i in xrange(max(self.chainID)+1):
             chain_map[i].add(i)
-        if self.tree == 'F':
+        yt_counters("preconnect kd tree search.")
+        if self.tree == 'C':
+            nn = self.nMerge + 2
+            rv = self.kdtree.chainHOP_preconnect(
+                self.chainID, self.density, self.densest_in_chain,
+                self.is_inside, self.search_again,
+                self.peakthresh, self.saddlethresh, nn, self.nMerge,
+                chain_map)
+            self.search_again = rv.astype("bool")
+            yt_counters("preconnect kd tree search.")
+        elif self.tree == 'F':
             # Plus 2 because we're looking for that neighbor, but only keeping 
             # nMerge + 1 neighbor tags, skipping ourselves.
             fKD.dist = na.empty(self.nMerge+2, dtype='float64')
             fKD.tags = na.empty(self.nMerge+2, dtype='int64')
             # We can change this here to make the searches faster.
             fKD.nn = self.nMerge + 2
-        elif self.tree == 'C':
-            nn = self.nMerge + 2
-        yt_counters("preconnect kd tree search.")
-        for i in xrange(self.size):
-            # Don't consider this particle if it's not part of a chain.
-            if self.chainID[i] < 0: continue
-            chainID_i = self.chainID[i]
-            # If this particle is in the padding, don't make a connection.
-            if not self.is_inside[i]: continue
-            # Find this particle's chain max_dens.
-            part_max_dens = self.densest_in_chain[chainID_i]
-            # We're only connecting >= peakthresh chains now.
-            if part_max_dens < self.peakthresh: continue
-            # Loop over nMerge closest nearest neighbors.
-            if self.tree == 'F':
-                fKD.qv = fKD.pos[:, i]
-                find_nn_nearest_neighbors()
-                NNtags = fKD.tags[:] - 1
-            elif self.tree == 'C':
-                qv = self.pos[i, :]
-                NNtags = self.kdtree.query(qv, nn)[1]
-            same_count = 0
-            for j in xrange(int(self.nMerge+1)):
-                thisNN = NNtags[j+1] # Don't consider ourselves at NNtags[0]
-                thisNN_chainID = self.chainID[thisNN]
-                # If our neighbor is in the same chain, move on.
-                # Move on if these chains are already connected:
-                if chainID_i == thisNN_chainID or \
-                        thisNN_chainID in chain_map[chainID_i]:
-                    same_count += 1
-                    continue
-                # Everything immediately below is for
-                # neighboring particles with a chainID. 
-                if thisNN_chainID >= 0:
-                    # Find thisNN's chain's max_dens.
-                    thisNN_max_dens = self.densest_in_chain[thisNN_chainID]
-                    # We're only linking peakthresh chains
-                    if thisNN_max_dens < self.peakthresh: continue
-                    # Calculate the two groups boundary density.
-                    boundary_density = (self.density[thisNN] + self.density[i]) / 2.
-                    # Don't connect if the boundary is too low.
-                    if boundary_density < self.saddlethresh: continue
-                    # Mark these chains as related.
-                    chain_map[thisNN_chainID].add(chainID_i)
-                    chain_map[chainID_i].add(thisNN_chainID)
-            if same_count == self.nMerge + 1:
-                # All our neighbors are in the same chain already, so 
-                # we don't need to search again.
-                self.search_again[i] = False
-        try:
-            del NNtags
-        except UnboundLocalError:
-            pass
+            for i in xrange(self.size):
+                # Don't consider this particle if it's not part of a chain.
+                if self.chainID[i] < 0: continue
+                chainID_i = self.chainID[i]
+                # If this particle is in the padding, don't make a connection.
+                if not self.is_inside[i]: continue
+                # Find this particle's chain max_dens.
+                part_max_dens = self.densest_in_chain[chainID_i]
+                # We're only connecting >= peakthresh chains now.
+                if part_max_dens < self.peakthresh: continue
+                # Loop over nMerge closest nearest neighbors.
+                if self.tree == 'F':
+                    fKD.qv = fKD.pos[:, i]
+                    find_nn_nearest_neighbors()
+                    NNtags = fKD.tags[:] - 1
+                elif self.tree == 'C':
+                    qv = self.pos[i, :]
+                    NNtags = self.kdtree.query(qv, nn)[1]
+                same_count = 0
+                for j in xrange(int(self.nMerge+1)):
+                    thisNN = NNtags[j+1] # Don't consider ourselves at NNtags[0]
+                    thisNN_chainID = self.chainID[thisNN]
+                    # If our neighbor is in the same chain, move on.
+                    # Move on if these chains are already connected:
+                    if chainID_i == thisNN_chainID or \
+                            thisNN_chainID in chain_map[chainID_i]:
+                        same_count += 1
+                        continue
+                    # Everything immediately below is for
+                    # neighboring particles with a chainID. 
+                    if thisNN_chainID >= 0:
+                        # Find thisNN's chain's max_dens.
+                        thisNN_max_dens = self.densest_in_chain[thisNN_chainID]
+                        # We're only linking peakthresh chains
+                        if thisNN_max_dens < self.peakthresh: continue
+                        # Calculate the two groups boundary density.
+                        boundary_density = (self.density[thisNN] + self.density[i]) / 2.
+                        # Don't connect if the boundary is too low.
+                        if boundary_density < self.saddlethresh: continue
+                        # Mark these chains as related.
+                        chain_map[thisNN_chainID].add(chainID_i)
+                        chain_map[chainID_i].add(thisNN_chainID)
+                if same_count == self.nMerge + 1:
+                    # All our neighbors are in the same chain already, so 
+                    # we don't need to search again.
+                    self.search_again[i] = False
+            try:
+                del NNtags
+            except UnboundLocalError:
+                pass
         yt_counters("preconnect kd tree search.")
         # Recursively jump links until we get to a chain whose densest
         # link is to itself. At that point we've found the densest chain
@@ -680,7 +687,7 @@
         yt_counters("preconnect pregrouping.")
         final_chain_map = na.empty(max(self.chainID)+1, dtype='int64')
         removed = 0
-        for i in xrange(max(self.chainID)+1):
+        for i in xrange(self.chainID.max()+1):
             j = chain_count - i - 1
             densest_link = self._recurse_preconnected_links(chain_map, j)
             final_chain_map[j] = densest_link


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/analysis_modules/halo_finding/parallel_hop/setup.py
--- a/yt/analysis_modules/halo_finding/parallel_hop/setup.py
+++ b/yt/analysis_modules/halo_finding/parallel_hop/setup.py
@@ -1,12 +1,13 @@
 #!/usr/bin/env python
 import setuptools
-import os, sys, os.path
-
+import os
+import sys
 import os.path
 
-def configuration(parent_package='',top_path=None):
+
+def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('parallel_hop',parent_package,top_path)
-    config.make_config_py() # installs __config__.py
+    config = Configuration('parallel_hop', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config




diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/analysis_modules/halo_finding/rockstar/api.py
--- /dev/null
+++ b/yt/analysis_modules/halo_finding/rockstar/api.py
@@ -0,0 +1,27 @@
+"""
+API for Rockstar halo finding
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2011 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""
+
+from .rockstar import RockstarHaloFinder


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/analysis_modules/halo_finding/rockstar/rockstar.py
--- /dev/null
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar.py
@@ -0,0 +1,105 @@
+"""
+Operations to get Rockstar loaded up
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt.enzotools.org/
+License:
+  Copyright (C) 2011 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from yt.mods import *
+from os import environ
+from yt.utilities.parallel_tools.parallel_analysis_interface import \
+    ParallelAnalysisInterface, ProcessorPool, Communicator
+
+import rockstar_interface
+import socket
+import time
+
+class DomainDecomposer(ParallelAnalysisInterface):
+    def __init__(self, pf, comm):
+        ParallelAnalysisInterface.__init__(self, comm=comm)
+        self.pf = pf
+        self.hierarchy = pf.h
+        self.center = (pf.domain_left_edge + pf.domain_right_edge)/2.0
+
+    def decompose(self):
+        dd = self.pf.h.all_data()
+        check, LE, RE, data_source = self.partition_hierarchy_3d(dd)
+        return data_source
+
+class RockstarHaloFinder(ParallelAnalysisInterface):
+    def __init__(self, pf, num_readers = 0, num_writers = 0):
+        ParallelAnalysisInterface.__init__(self)
+        # No subvolume support
+        self.pf = pf
+        self.hierarchy = pf.h
+        self.num_readers = num_readers
+        self.num_writers = num_writers
+        if self.num_readers + self.num_writers + 1 != self.comm.size:
+            raise RuntimeError
+        self.center = (pf.domain_right_edge + pf.domain_left_edge)/2.0
+        data_source = None
+        if self.comm.size > 1:
+            self.pool = ProcessorPool()
+            self.pool.add_workgroup(1, name = "server")
+            self.pool.add_workgroup(num_readers, name = "readers")
+            self.pool.add_workgroup(num_writers, name = "writers")
+            for wg in self.pool.workgroups:
+                if self.comm.rank in wg.ranks: self.workgroup = wg
+        data_source = self.pf.h.all_data()
+        self.handler = rockstar_interface.RockstarInterface(
+                self.pf, data_source)
+
+    def _get_hosts(self):
+        if self.comm.size == 1 or self.workgroup.name == "server":
+            server_address = socket.gethostname()
+            sock = socket.socket()
+            sock.bind(('', 0))
+            port = sock.getsockname()[-1]
+            del sock
+        else:
+            server_address, port = None, None
+        self.server_address, self.port = self.comm.mpi_bcast(
+            (server_address, port))
+        self.port = str(self.port)
+
+    def run(self, block_ratio = 1):
+        if block_ratio != 1:
+            raise NotImplementedError
+        self._get_hosts()
+        self.handler.setup_rockstar(self.server_address, self.port,
+                    parallel = self.comm.size > 1,
+                    num_readers = self.num_readers,
+                    num_writers = self.num_writers,
+                    writing_port = -1,
+                    block_ratio = block_ratio)
+        if self.comm.size == 1:
+            self.handler.call_rockstar()
+        else:
+            self.comm.barrier()
+            if self.workgroup.name == "server":
+                self.handler.start_server()
+            elif self.workgroup.name == "readers":
+                #time.sleep(0.5 + self.workgroup.comm.rank/10.0)
+                self.handler.start_client()
+            elif self.workgroup.name == "writers":
+                #time.sleep(1.0 + self.workgroup.comm.rank/10.0)
+                self.handler.start_client()
+        self.comm.barrier()


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
--- /dev/null
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
@@ -0,0 +1,349 @@
+"""
+Particle operations for Lagrangian Volume
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt.enzotools.org/
+License:
+  Copyright (C) 2011 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import numpy as np
+import os, sys
+cimport numpy as np
+cimport cython
+from stdlib cimport malloc
+
+cdef import from "particle.h":
+    struct particle:
+        np.int64_t id
+        float pos[6]
+
+cdef import from "io_generic.h":
+    ctypedef void (*LPG) (char *filename, particle **p, np.int64_t *num_p)
+    void set_load_particles_generic(LPG func)
+
+cdef import from "rockstar.h":
+    void rockstar(float *bounds, np.int64_t manual_subs)
+
+cdef import from "config.h":
+    void setup_config()
+
+cdef import from "server.h":
+    int server()
+
+cdef import from "client.h":
+    void client()
+
+cdef import from "meta_io.h":
+    void read_particles(char *filename)
+    void output_and_free_halos(np.int64_t id_offset, np.int64_t snap, 
+			   np.int64_t chunk, float *bounds)
+
+cdef import from "config_vars.h":
+    # Rockstar cleverly puts all of the config variables inside a templated
+    # definition of their vaiables.
+    char *FILE_FORMAT
+    np.float64_t PARTICLE_MASS
+
+    char *MASS_DEFINITION
+    np.int64_t MIN_HALO_OUTPUT_SIZE
+    np.float64_t FORCE_RES
+
+    np.float64_t SCALE_NOW
+    np.float64_t h0
+    np.float64_t Ol
+    np.float64_t Om
+
+    np.int64_t GADGET_ID_BYTES
+    np.float64_t GADGET_MASS_CONVERSION
+    np.float64_t GADGET_LENGTH_CONVERSION
+    np.int64_t GADGET_SKIP_NON_HALO_PARTICLES
+    np.int64_t RESCALE_PARTICLE_MASS
+
+    np.int64_t PARALLEL_IO
+    char *PARALLEL_IO_SERVER_ADDRESS
+    char *PARALLEL_IO_SERVER_PORT
+    np.int64_t PARALLEL_IO_WRITER_PORT
+    char *PARALLEL_IO_SERVER_INTERFACE
+    char *RUN_ON_SUCCESS
+
+    char *INBASE
+    char *FILENAME
+    np.int64_t STARTING_SNAP
+    np.int64_t NUM_SNAPS
+    np.int64_t NUM_BLOCKS
+    np.int64_t NUM_READERS
+    np.int64_t PRELOAD_PARTICLES
+    char *SNAPSHOT_NAMES
+    char *LIGHTCONE_ALT_SNAPS
+    char *BLOCK_NAMES
+
+    char *OUTBASE
+    np.float64_t OVERLAP_LENGTH
+    np.int64_t NUM_WRITERS
+    np.int64_t FORK_READERS_FROM_WRITERS
+    np.int64_t FORK_PROCESSORS_PER_MACHINE
+
+    char *OUTPUT_FORMAT
+    np.int64_t DELETE_BINARY_OUTPUT_AFTER_FINISHED
+    np.int64_t FULL_PARTICLE_CHUNKS
+    char *BGC2_SNAPNAMES
+
+    np.int64_t BOUND_PROPS
+    np.int64_t BOUND_OUT_TO_HALO_EDGE
+    np.int64_t DO_MERGER_TREE_ONLY
+    np.int64_t IGNORE_PARTICLE_IDS
+    np.float64_t TRIM_OVERLAP
+    np.float64_t ROUND_AFTER_TRIM
+    np.int64_t LIGHTCONE
+    np.int64_t PERIODIC
+
+    np.float64_t LIGHTCONE_ORIGIN[3]
+    np.float64_t LIGHTCONE_ALT_ORIGIN[3]
+
+    np.float64_t LIMIT_CENTER[3]
+    np.float64_t LIMIT_RADIUS
+
+    np.int64_t SWAP_ENDIANNESS
+    np.int64_t GADGET_VARIANT
+
+    np.float64_t FOF_FRACTION
+    np.float64_t FOF_LINKING_LENGTH
+    np.float64_t INCLUDE_HOST_POTENTIAL_RATIO
+    np.float64_t DOUBLE_COUNT_SUBHALO_MASS_RATIO
+    np.int64_t TEMPORAL_HALO_FINDING
+    np.int64_t MIN_HALO_PARTICLES
+    np.float64_t UNBOUND_THRESHOLD
+    np.int64_t ALT_NFW_METRIC
+
+    np.int64_t TOTAL_PARTICLES
+    np.float64_t BOX_SIZE
+    np.int64_t OUTPUT_HMAD
+    np.int64_t OUTPUT_PARTICLES
+    np.int64_t OUTPUT_LEVELS
+    np.float64_t DUMP_PARTICLES[3]
+
+    np.float64_t AVG_PARTICLE_SPACING
+    np.int64_t SINGLE_SNAP
+
+def print_rockstar_settings():
+    # We have to do the config
+    print "FILE_FORMAT =", FILE_FORMAT
+    print "PARTICLE_MASS =", PARTICLE_MASS
+
+    print "MASS_DEFINITION =", MASS_DEFINITION
+    print "MIN_HALO_OUTPUT_SIZE =", MIN_HALO_OUTPUT_SIZE
+    print "FORCE_RES =", FORCE_RES
+
+    print "SCALE_NOW =", SCALE_NOW
+    print "h0 =", h0
+    print "Ol =", Ol
+    print "Om =", Om
+
+    print "GADGET_ID_BYTES =", GADGET_ID_BYTES
+    print "GADGET_MASS_CONVERSION =", GADGET_MASS_CONVERSION
+    print "GADGET_LENGTH_CONVERSION =", GADGET_LENGTH_CONVERSION
+    print "GADGET_SKIP_NON_HALO_PARTICLES =", GADGET_SKIP_NON_HALO_PARTICLES
+    print "RESCALE_PARTICLE_MASS =", RESCALE_PARTICLE_MASS
+
+    print "PARALLEL_IO =", PARALLEL_IO
+    print "PARALLEL_IO_SERVER_ADDRESS =", PARALLEL_IO_SERVER_ADDRESS
+    print "PARALLEL_IO_SERVER_PORT =", PARALLEL_IO_SERVER_PORT
+    print "PARALLEL_IO_WRITER_PORT =", PARALLEL_IO_WRITER_PORT
+    print "PARALLEL_IO_SERVER_INTERFACE =", PARALLEL_IO_SERVER_INTERFACE
+    print "RUN_ON_SUCCESS =", RUN_ON_SUCCESS
+
+    print "INBASE =", INBASE
+    print "FILENAME =", FILENAME
+    print "STARTING_SNAP =", STARTING_SNAP
+    print "NUM_SNAPS =", NUM_SNAPS
+    print "NUM_BLOCKS =", NUM_BLOCKS
+    print "NUM_READERS =", NUM_READERS
+    print "PRELOAD_PARTICLES =", PRELOAD_PARTICLES
+    print "SNAPSHOT_NAMES =", SNAPSHOT_NAMES
+    print "LIGHTCONE_ALT_SNAPS =", LIGHTCONE_ALT_SNAPS
+    print "BLOCK_NAMES =", BLOCK_NAMES
+
+    print "OUTBASE =", OUTBASE
+    print "OVERLAP_LENGTH =", OVERLAP_LENGTH
+    print "NUM_WRITERS =", NUM_WRITERS
+    print "FORK_READERS_FROM_WRITERS =", FORK_READERS_FROM_WRITERS
+    print "FORK_PROCESSORS_PER_MACHINE =", FORK_PROCESSORS_PER_MACHINE
+
+    print "OUTPUT_FORMAT =", OUTPUT_FORMAT
+    print "DELETE_BINARY_OUTPUT_AFTER_FINISHED =", DELETE_BINARY_OUTPUT_AFTER_FINISHED
+    print "FULL_PARTICLE_CHUNKS =", FULL_PARTICLE_CHUNKS
+    print "BGC2_SNAPNAMES =", BGC2_SNAPNAMES
+
+    print "BOUND_PROPS =", BOUND_PROPS
+    print "BOUND_OUT_TO_HALO_EDGE =", BOUND_OUT_TO_HALO_EDGE
+    print "DO_MERGER_TREE_ONLY =", DO_MERGER_TREE_ONLY
+    print "IGNORE_PARTICLE_IDS =", IGNORE_PARTICLE_IDS
+    print "TRIM_OVERLAP =", TRIM_OVERLAP
+    print "ROUND_AFTER_TRIM =", ROUND_AFTER_TRIM
+    print "LIGHTCONE =", LIGHTCONE
+    print "PERIODIC =", PERIODIC
+
+    print "LIGHTCONE_ORIGIN =", LIGHTCONE_ORIGIN[0]
+    print "LIGHTCONE_ORIGIN[1] =", LIGHTCONE_ORIGIN[1]
+    print "LIGHTCONE_ORIGIN[2] =", LIGHTCONE_ORIGIN[2]
+    print "LIGHTCONE_ALT_ORIGIN =", LIGHTCONE_ALT_ORIGIN[0]
+    print "LIGHTCONE_ALT_ORIGIN[1] =", LIGHTCONE_ALT_ORIGIN[1]
+    print "LIGHTCONE_ALT_ORIGIN[2] =", LIGHTCONE_ALT_ORIGIN[2]
+
+    print "LIMIT_CENTER =", LIMIT_CENTER[0]
+    print "LIMIT_CENTER[1] =", LIMIT_CENTER[1]
+    print "LIMIT_CENTER[2] =", LIMIT_CENTER[2]
+    print "LIMIT_RADIUS =", LIMIT_RADIUS
+
+    print "SWAP_ENDIANNESS =", SWAP_ENDIANNESS
+    print "GADGET_VARIANT =", GADGET_VARIANT
+
+    print "FOF_FRACTION =", FOF_FRACTION
+    print "FOF_LINKING_LENGTH =", FOF_LINKING_LENGTH
+    print "INCLUDE_HOST_POTENTIAL_RATIO =", INCLUDE_HOST_POTENTIAL_RATIO
+    print "DOUBLE_COUNT_SUBHALO_MASS_RATIO =", DOUBLE_COUNT_SUBHALO_MASS_RATIO
+    print "TEMPORAL_HALO_FINDING =", TEMPORAL_HALO_FINDING
+    print "MIN_HALO_PARTICLES =", MIN_HALO_PARTICLES
+    print "UNBOUND_THRESHOLD =", UNBOUND_THRESHOLD
+    print "ALT_NFW_METRIC =", ALT_NFW_METRIC
+
+    print "TOTAL_PARTICLES =", TOTAL_PARTICLES
+    print "BOX_SIZE =", BOX_SIZE
+    print "OUTPUT_HMAD =", OUTPUT_HMAD
+    print "OUTPUT_PARTICLES =", OUTPUT_PARTICLES
+    print "OUTPUT_LEVELS =", OUTPUT_LEVELS
+    print "DUMP_PARTICLES =", DUMP_PARTICLES[0]
+    print "DUMP_PARTICLES[1] =", DUMP_PARTICLES[1]
+    print "DUMP_PARTICLES[2] =", DUMP_PARTICLES[2]
+
+    print "AVG_PARTICLE_SPACING =", AVG_PARTICLE_SPACING
+    print "SINGLE_SNAP =", SINGLE_SNAP
+
+cdef class RockstarInterface
+
+cdef RockstarInterface rh
+cdef void rh_read_particles(char *filename, particle **p, np.int64_t *num_p):
+    cdef int i, fi, npart, tnpart
+    cdef np.float64_t conv[6], left_edge[6]
+    dd = rh.data_source
+    cdef np.ndarray[np.int64_t, ndim=1] arri
+    cdef np.ndarray[np.float64_t, ndim=1] arr
+    block = int(str(filename).rsplit(".")[-1])
+
+    # Now we want to grab data from only a subset of the grids.
+    n = rh.block_ratio
+    grids = np.array_split(dd._grids, NUM_BLOCKS)[block]
+    tnpart = 0
+    for g in grids:
+        tnpart += dd._get_data_from_grid(g, "particle_index").size
+    p[0] = <particle *> malloc(sizeof(particle) * tnpart)
+    #print "Loading indices: size = ", tnpart
+    conv[0] = conv[1] = conv[2] = rh.pf["mpchcm"]
+    conv[3] = conv[4] = conv[5] = 1e-5
+    left_edge[0] = rh.pf.domain_left_edge[0]
+    left_edge[1] = rh.pf.domain_left_edge[1]
+    left_edge[2] = rh.pf.domain_left_edge[2]
+    left_edge[3] = left_edge[4] = left_edge[5] = 0.0
+    pi = 0
+    for g in grids:
+        arri = dd._get_data_from_grid(g, "particle_index").astype("int64")
+        npart = arri.size
+        for i in range(npart):
+            p[0][i+pi].id = arri[i]
+        fi = 0
+        for field in ["particle_position_x", "particle_position_y",
+                      "particle_position_z",
+                      "particle_velocity_x", "particle_velocity_y",
+                      "particle_velocity_z"]:
+            arr = dd._get_data_from_grid(g, field).astype("float64")
+            for i in range(npart):
+                p[0][i+pi].pos[fi] = (arr[i]-left_edge[fi])*conv[fi]
+            fi += 1
+        pi += npart
+    num_p[0] = tnpart
+    print "TOTAL", block, pi, tnpart, len(grids)
+
+cdef class RockstarInterface:
+
+    cdef public object pf
+    cdef public object data_source
+    cdef int rank
+    cdef int size
+    cdef int block_ratio
+
+    def __cinit__(self, pf, data_source):
+        self.pf = pf
+        self.data_source = data_source
+
+    def setup_rockstar(self, char *server_address, char *server_port,
+                       np.float64_t particle_mass = -1.0,
+                       int parallel = False, int num_readers = 1,
+                       int num_writers = 1,
+                       int writing_port = -1, int block_ratio = 1):
+        global PARALLEL_IO, PARALLEL_IO_SERVER_ADDRESS, PARALLEL_IO_SERVER_PORT
+        global FILENAME, FILE_FORMAT, NUM_SNAPS, STARTING_SNAP, h0, Ol, Om
+        global BOX_SIZE, PERIODIC, PARTICLE_MASS, NUM_BLOCKS, NUM_READERS
+        global FORK_READERS_FROM_WRITERS, PARALLEL_IO_WRITER_PORT, NUM_WRITERS
+        global rh
+        if parallel:
+            PARALLEL_IO = 1
+            PARALLEL_IO_SERVER_ADDRESS = server_address
+            PARALLEL_IO_SERVER_PORT = server_port
+            if writing_port > 0:
+                PARALLEL_IO_WRITER_PORT = writing_port
+        else:
+            PARALLEL_IO = 0
+            PARALLEL_IO_SERVER_ADDRESS = server_address
+            PARALLEL_IO_SERVER_PORT = server_port
+        FILENAME = "inline.<block>"
+        FILE_FORMAT = "GENERIC"
+        OUTPUT_FORMAT = "ASCII"
+        NUM_SNAPS = 1
+        NUM_READERS = num_readers
+        NUM_BLOCKS = num_readers * block_ratio
+        NUM_WRITERS = num_writers
+        self.block_ratio = block_ratio
+
+        h0 = self.pf.hubble_constant
+        Ol = self.pf.omega_lambda
+        Om = self.pf.omega_matter
+
+        if particle_mass < 0:
+            print "Assuming single-mass particle."
+            particle_mass = self.pf.h.grids[0]["ParticleMassMsun"][0] / h0
+        PARTICLE_MASS = particle_mass
+        PERIODIC = 1
+        BOX_SIZE = (self.pf.domain_right_edge[0] -
+                    self.pf.domain_left_edge[0]) * self.pf['mpchcm']
+        setup_config()
+        rh = self
+        cdef LPG func = rh_read_particles
+        set_load_particles_generic(func)
+
+    def call_rockstar(self):
+        read_particles("generic")
+        rockstar(NULL, 0)
+        output_and_free_halos(0, 0, 0, NULL)
+
+    def start_server(self):
+        server()
+
+    def start_client(self):
+        client()


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/analysis_modules/halo_finding/rockstar/setup.py
--- /dev/null
+++ b/yt/analysis_modules/halo_finding/rockstar/setup.py
@@ -0,0 +1,21 @@
+#!/usr/bin/env python
+import setuptools
+import os, sys, os.path
+
+import os.path
+
+def configuration(parent_package='',top_path=None):
+    from numpy.distutils.misc_util import Configuration
+    config = Configuration('rockstar',parent_package,top_path)
+    config.make_config_py() # installs __config__.py
+    #config.make_svn_version_py()
+    rd = os.environ["ROCKSTAR_DIR"]
+    config.add_extension("rockstar_interface",
+                         "yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx",
+                         library_dirs=[rd],
+                         libraries=["rockstar"],
+                         include_dirs=[rd,
+                                       os.path.join(rd, "io"),
+                                       os.path.join(rd, "util")])
+    return config
+


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/analysis_modules/halo_finding/setup.py
--- a/yt/analysis_modules/halo_finding/setup.py
+++ b/yt/analysis_modules/halo_finding/setup.py
@@ -1,15 +1,17 @@
 #!/usr/bin/env python
 import setuptools
-import os, sys, os.path
-
+import os
+import sys
 import os.path
 
-def configuration(parent_package='',top_path=None):
+def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('halo_finding',parent_package,top_path)
+    config = Configuration('halo_finding', parent_package, top_path)
     config.add_subpackage("fof")
     config.add_subpackage("hop")
     config.add_subpackage("parallel_hop")
+    if "ROCKSTAR_DIR" in os.environ:
+        config.add_subpackage("rockstar")
     config.make_config_py() # installs __config__.py
     #config.make_svn_version_py()
     return config


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/analysis_modules/halo_mass_function/setup.py
--- a/yt/analysis_modules/halo_mass_function/setup.py
+++ b/yt/analysis_modules/halo_mass_function/setup.py
@@ -1,12 +1,13 @@
 #!/usr/bin/env python
 import setuptools
-import os, sys, os.path
-
+import os
+import sys
 import os.path
 
-def configuration(parent_package='',top_path=None):
+
+def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('halo_mass_function',parent_package,top_path)
-    config.make_config_py() # installs __config__.py
+    config = Configuration('halo_mass_function', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/analysis_modules/halo_merger_tree/enzofof_merger_tree.py
--- a/yt/analysis_modules/halo_merger_tree/enzofof_merger_tree.py
+++ b/yt/analysis_modules/halo_merger_tree/enzofof_merger_tree.py
@@ -295,6 +295,7 @@
             Maximum number of child halos each leaf can have.
         """
         self.halonum = halonum
+        self.max_children = max_children
         self.output_numbers = sorted(self.relationships, reverse=True)
         self.levels = {}
         trunk = self.output_numbers[0]
@@ -376,7 +377,7 @@
                 print "--> Most massive progenitor == Halo %d" % \
                       (br.progenitor)
                 for i,c in enumerate(br.children):
-                    if i > max_child: break
+                    if i > self.max_children: break
                     print "-->    Halo %8.8d :: fraction = %g" % (c[0], c[1])
 
     def write_dot(self, filename=None):


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/analysis_modules/halo_merger_tree/merger_tree.py
--- a/yt/analysis_modules/halo_merger_tree/merger_tree.py
+++ b/yt/analysis_modules/halo_merger_tree/merger_tree.py
@@ -86,9 +86,28 @@
 "ChildHaloID3", "ChildHaloFrac3",
 "ChildHaloID4", "ChildHaloFrac4"]
 
+# Below we make the SQL command that creates the table "Halos" in the
+# database. This table is where all the data is stored.
+# Each column of data is named and its datatype is specified.
+# The GlobalHaloID is given the PRIMARY KEY property, which means that
+# the SQLite machinery assigns a consecutive and unique integer value
+# to that field automatically as each new entry is entered (that is,
+# if GlobalHaloID isn't specified already).
+create_db_line = "CREATE TABLE Halos ("
+for i, col in enumerate(columns):
+    if i == 0:
+        create_db_line += "%s %s PRIMARY KEY," % (col, column_types[col])
+    else:
+        create_db_line += " %s %s," % (col, column_types[col])
+# Clean of trailing comma, and closing stuff.
+create_db_line = create_db_line[:-1] + ");"
+
 NumNeighbors = 15
 NumDB = 5
 
+def minus_one():
+    return -1
+
 class DatabaseFunctions(object):
     # Common database functions so it doesn't have to be repeated.
     def _open_database(self):
@@ -109,7 +128,7 @@
 class MergerTree(DatabaseFunctions, ParallelAnalysisInterface):
     def __init__(self, restart_files=[], database='halos.db',
             halo_finder_function=HaloFinder, halo_finder_threshold=80.0,
-            FOF_link_length=0.2, dm_only=False, refresh=False, sleep=1,
+            FOF_link_length=0.2, dm_only=False, refresh=False,
             index=True):
         r"""Build a merger tree of halos over a time-ordered set of snapshots.
         This will run a halo finder to find the halos first if it hasn't already
@@ -140,12 +159,6 @@
         refresh : Boolean
             True forces the halo finder to run even if the halo data has been
             detected on disk. Default = False.
-        sleep : Float
-            Due to the nature of the SQLite database and network file systems,
-            it is crucial that all tasks see the database in the same state at
-            all times. This parameter specifies how long in seconds the merger
-            tree waits between checks to ensure the database is synched across
-            all tasks. Default = 1.
         index : Boolean
             SQLite databases can have added to them an index which greatly
             speeds up future queries of the database,
@@ -168,29 +181,32 @@
         self.FOF_link_length= FOF_link_length # For FOF
         self.dm_only = dm_only
         self.refresh = refresh
-        self.sleep = sleep # How long to wait between db sync checks.
-        if self.sleep <= 0.:
-            self.sleep = 5
+        self.index = index
+        self.zs = {}
         # MPI stuff
-        self.mine = self.comm.rank
-        if self.mine is None:
-            self.mine = 0
-        self.size = self.comm.size
-        if self.size is None:
-            self.size = 1
+        if self.comm.rank is None:
+            self.comm.rank = 0
+        if self.comm.size is None:
+            self.comm.size = 1
         # Get to work.
-        if self.refresh and self.mine == 0:
+        if self.refresh and self.comm.rank == 0:
             try:
                 os.unlink(self.database)
             except:
                 pass
-        self.comm.barrier()
-        self._open_create_database()
-        self._create_halo_table()
+        if self.comm.rank == 0:
+            self._open_create_database()
+            self._create_halo_table()
         self._run_halo_finder_add_to_db()
         # Find the h5 file names for all the halos.
         for snap in self.restart_files:
             self._build_h5_refs(snap)
+        # Find out how much work is already stored in the database.
+        if self.comm.rank == 0:
+            z_progress = self._find_progress()
+        else:
+            z_progress = None
+        z_progress = self.comm.mpi_bcast(z_progress)
         # Loop over the pairs of snapshots to locate likely neighbors, and
         # then use those likely neighbors to compute fractional contributions.
         last = None
@@ -199,14 +215,22 @@
         for snap, pair in enumerate(zip(self.restart_files[:-1], self.restart_files[1:])):
             if not self.with_halos[snap] or not self.with_halos[snap+1]:
                 continue
+            if self.zs[pair[0]] > z_progress:
+                continue
             self._find_likely_children(pair[0], pair[1])
             # last is the data for the parent dataset, which can be supplied
             # as the child from the previous round for all but the first loop.
             last = self._compute_child_fraction(pair[0], pair[1], last)
+            if self.comm.rank == 0:
+                mylog.info("Updating database with parent-child relationships.")
+                self._copy_and_update_db()
+                # This has to happen because we delete the old database above.
+                self._open_create_database()
         del last
-        # Now update the database with all the writes.
-        mylog.info("Updating database with parent-child relationships.")
-        self._copy_and_update_db()
+        if self.comm.rank == 0:
+            if self.index:
+                self._write_index()
+            self._close_database()
         self.comm.barrier()
         mylog.info("Done!")
         
@@ -220,6 +244,7 @@
         for cycle, file in enumerate(self.restart_files):
             gc.collect()
             pf = load(file)
+            self.zs[file] = pf.current_redshift
             self.period = pf.domain_right_edge - pf.domain_left_edge
             # If the halos are already found, skip this data step, unless
             # refresh is True.
@@ -247,12 +272,17 @@
                 del halos
             # Now add halo data to the db if it isn't already there by
             # checking the first halo.
-            currt = pf.unique_identifier
-            line = "SELECT GlobalHaloID from Halos where SnapHaloID=0\
-            and SnapCurrentTimeIdentifier=%d;" % currt
-            self.cursor.execute(line)
-            result = self.cursor.fetchone()
-            if result != None:
+            continue_check = False
+            if self.comm.rank == 0:
+                currt = pf.unique_identifier
+                line = "SELECT GlobalHaloID from Halos where SnapHaloID=0\
+                and SnapCurrentTimeIdentifier=%d;" % currt
+                self.cursor.execute(line)
+                result = self.cursor.fetchone()
+                if result != None:
+                    continue_check = True
+            continue_check = self.comm.mpi_bcast(continue_check)
+            if continue_check:
                 continue
             red = pf.current_redshift
             # Read the halos off the disk using the Halo Profiler tools.
@@ -261,9 +291,10 @@
             if len(hp.all_halos) == 0:
                 mylog.info("Dataset %s has no halos." % file)
                 self.with_halos[cycle] = False
+                del hp
                 continue
             mylog.info("Entering halos into database for z=%f" % red)
-            if self.mine == 0:
+            if self.comm.rank == 0:
                 for ID,halo in enumerate(hp.all_halos):
                     numpart = int(halo['numpart'])
                     values = (None, currt, red, ID, halo['mass'], numpart,
@@ -284,134 +315,100 @@
     
     def _open_create_database(self):
         # open the database. This creates the database file on disk if it
-        # doesn't already exist. Open it first on root, and then on the others.
-        if self.mine == 0:
-            self.conn = sql.connect(self.database)
-        self.comm.barrier()
-        self._ensure_db_sync()
-        if self.mine != 0:
-            self.conn = sql.connect(self.database)
+        # doesn't already exist. Open it on root only.
+        self.conn = sql.connect(self.database)
         self.cursor = self.conn.cursor()
 
-    def _ensure_db_sync(self):
-        # If the database becomes out of sync for each task, ostensibly due to
-        # parallel file system funniness, things will go bad very quickly.
-        # Therefore, just to be very, very careful, we will ensure that the
-        # md5 hash of the file is identical across all tasks before proceeding.
-        self.comm.barrier()
-        for i in range(5):
-            try:
-                file = open(self.database)
-            except IOError:
-                # This is to give a little bit of time for the database creation
-                # to replicate across the file system.
-                time.sleep(self.sleep)
-                file = open(self.database)
-            hash = md5.md5(file.read()).hexdigest()
-            file.close()
-            ignore, hashes = self.comm.mpi_info_dict(hash)
-            hashes = set(hashes.values())
-            if len(hashes) == 1:
-                break
-            else:
-                # Wait a little bit for the file system to (hopefully) sync up.
-                time.sleep(self.sleep)
-        if len(hashes) == 1:
-            return
-        else:
-            mylog.error("The file system is not properly synchronizing the database.")
-            raise RunTimeError("Fatal error. Exiting.")
-
     def _create_halo_table(self):
-        if self.mine == 0:
-            # Handle the error if it already exists.
-            try:
-                # Create the table that will store the halo data.
-                line = "CREATE TABLE Halos (GlobalHaloID INTEGER PRIMARY KEY,\
-                    SnapCurrentTimeIdentifier INTEGER, SnapZ FLOAT, SnapHaloID INTEGER, \
-                    HaloMass FLOAT,\
-                    NumPart INTEGER, CenMassX FLOAT, CenMassY FLOAT,\
-                    CenMassZ FLOAT, BulkVelX FLOAT, BulkVelY FLOAT, BulkVelZ FLOAT,\
-                    MaxRad FLOAT,\
-                    ChildHaloID0 INTEGER, ChildHaloFrac0 FLOAT, \
-                    ChildHaloID1 INTEGER, ChildHaloFrac1 FLOAT, \
-                    ChildHaloID2 INTEGER, ChildHaloFrac2 FLOAT, \
-                    ChildHaloID3 INTEGER, ChildHaloFrac3 FLOAT, \
-                    ChildHaloID4 INTEGER, ChildHaloFrac4 FLOAT);"
-                self.cursor.execute(line)
-                self.conn.commit()
-            except sql.OperationalError:
-                pass
-        self.comm.barrier()
+        # Handle the error if the table already exists by doing nothing.
+        try:
+            self.cursor.execute(create_db_line)
+            self.conn.commit()
+        except sql.OperationalError:
+            pass
     
     def _find_likely_children(self, parentfile, childfile):
         # For each halo in the parent list, identify likely children in the 
         # list of children.
-        
+
         # First, read in the locations of the child halos.
         child_pf = load(childfile)
         child_t = child_pf.unique_identifier
-        line = "SELECT SnapHaloID, CenMassX, CenMassY, CenMassZ FROM \
-        Halos WHERE SnapCurrentTimeIdentifier = %d" % child_t
-        self.cursor.execute(line)
-        
-        mylog.info("Finding likely parents for z=%1.5f child halos." % \
-            child_pf.current_redshift)
-        
-        # Build the kdtree for the children by looping over the fetched rows.
-        # Normalize the points for use only within the kdtree.
-        child_points = []
-        for row in self.cursor:
-            child_points.append([row[1] / self.period[0],
-            row[2] / self.period[1],
-            row[3] / self.period[2]])
-        # Turn it into fortran.
-        child_points = na.array(child_points)
-        fKD.pos = na.asfortranarray(child_points.T)
-        fKD.qv = na.empty(3, dtype='float64')
-        fKD.dist = na.empty(NumNeighbors, dtype='float64')
-        fKD.tags = na.empty(NumNeighbors, dtype='int64')
-        fKD.nn = NumNeighbors
-        fKD.sort = True
-        fKD.rearrange = True
-        create_tree(0)
-
+        if self.comm.rank == 0:
+            line = "SELECT SnapHaloID, CenMassX, CenMassY, CenMassZ FROM \
+            Halos WHERE SnapCurrentTimeIdentifier = %d" % child_t
+            self.cursor.execute(line)
+            
+            mylog.info("Finding likely parents for z=%1.5f child halos." % \
+                child_pf.current_redshift)
+            
+            # Build the kdtree for the children by looping over the fetched rows.
+            # Normalize the points for use only within the kdtree.
+            child_points = []
+            for row in self.cursor:
+                child_points.append([row[1] / self.period[0],
+                row[2] / self.period[1],
+                row[3] / self.period[2]])
+            # Turn it into fortran.
+            child_points = na.array(child_points)
+            fKD.pos = na.asfortranarray(child_points.T)
+            fKD.qv = na.empty(3, dtype='float64')
+            fKD.dist = na.empty(NumNeighbors, dtype='float64')
+            fKD.tags = na.empty(NumNeighbors, dtype='int64')
+            fKD.nn = NumNeighbors
+            fKD.sort = True
+            fKD.rearrange = True
+            create_tree(0)
+    
         # Find the parent points from the database.
         parent_pf = load(parentfile)
         parent_t = parent_pf.unique_identifier
-        line = "SELECT SnapHaloID, CenMassX, CenMassY, CenMassZ FROM \
-        Halos WHERE SnapCurrentTimeIdentifier = %d" % parent_t
-        self.cursor.execute(line)
+        if self.comm.rank == 0:
+            line = "SELECT SnapHaloID, CenMassX, CenMassY, CenMassZ FROM \
+            Halos WHERE SnapCurrentTimeIdentifier = %d" % parent_t
+            self.cursor.execute(line)
+    
+            # Loop over the returned rows, and find the likely neighbors for the
+            # parents.
+            candidates = {}
+            for row in self.cursor:
+                # Normalize positions for use within the kdtree.
+                fKD.qv = na.array([row[1] / self.period[0],
+                row[2] / self.period[1],
+                row[3] / self.period[2]])
+                find_nn_nearest_neighbors()
+                NNtags = fKD.tags[:] - 1
+                nIDs = []
+                for n in NNtags:
+                    nIDs.append(n)
+                # We need to fill in fake halos if there aren't enough halos,
+                # which can happen at high redshifts.
+                while len(nIDs) < NumNeighbors:
+                    nIDs.append(-1)
+                candidates[row[0]] = nIDs
+            
+            del fKD.pos, fKD.tags, fKD.dist
+            free_tree(0) # Frees the kdtree object.
+        else:
+            candidates = None
 
-        # Loop over the returned rows, and find the likely neighbors for the
-        # parents.
-        candidates = {}
-        for row in self.cursor:
-            # Normalize positions for use within the kdtree.
-            fKD.qv = na.array([row[1] / self.period[0],
-            row[2] / self.period[1],
-            row[3] / self.period[2]])
-            find_nn_nearest_neighbors()
-            NNtags = fKD.tags[:] - 1
-            nIDs = []
-            for n in NNtags:
-                nIDs.append(n)
-            # We need to fill in fake halos if there aren't enough halos,
-            # which can happen at high redshifts.
-            while len(nIDs) < NumNeighbors:
-                nIDs.append(-1)
-            candidates[row[0]] = nIDs
-        
-        del fKD.pos, fKD.tags, fKD.dist
-        free_tree(0) # Frees the kdtree object.
-        
+        # Sync across tasks.
+        candidates = self.comm.mpi_bcast(candidates)
         self.candidates = candidates
         
         # This stores the masses contributed to each child candidate.
-        self.child_mass_arr = na.zeros(len(candidates)*NumNeighbors, dtype='float64')
+        # The +1 is an extra element in the array that collects garbage
+        # values. This is allowing us to eliminate a try/except later.
+        # This extra array element will be cut off eventually.
+        self.child_mass_arr = na.zeros(len(candidates)*NumNeighbors + 1,
+            dtype='float64')
         # Records where to put the entries in the above array.
         self.child_mass_loc = defaultdict(dict)
+        # Fill it out with sub-nested default dicts that point to the
+        # garbage slot, and then fill it will correct values for (possibly)
+        # related parent/child halo pairs.
         for i,halo in enumerate(sorted(candidates)):
+            self.child_mass_loc[halo] = defaultdict(minus_one)
             for j, child in enumerate(candidates[halo]):
                 self.child_mass_loc[halo][child] = i*NumNeighbors + j
 
@@ -457,7 +454,7 @@
             parent_masses = na.array([], dtype='float64')
             parent_halos = na.array([], dtype='int32')
             for i,pname in enumerate(parent_names):
-                if i>=self.mine and i%self.size==self.mine:
+                if i>=self.comm.rank and i%self.comm.size==self.comm.rank:
                     h5fp = h5py.File(pname)
                     for group in h5fp:
                         gID = int(group[4:])
@@ -489,7 +486,7 @@
         child_masses = na.array([], dtype='float64')
         child_halos = na.array([], dtype='int32')
         for i,cname in enumerate(child_names):
-            if i>=self.mine and i%self.size==self.mine:
+            if i>=self.comm.rank and i%self.comm.size==self.comm.rank:
                 h5fp = h5py.File(cname)
                 for group in h5fp:
                     gID = int(group[4:])
@@ -510,39 +507,9 @@
         child_send = na.ones(child_IDs.size, dtype='bool')
         del sort
         
-        # Parent IDs on the left, child IDs on the right. We skip down both
-        # columns matching IDs. If they are out of synch, the index(es) is/are
-        # advanced until they match up again.
-        left = 0
-        right = 0
-        while left < parent_IDs.size and right < child_IDs.size:
-            if parent_IDs[left] == child_IDs[right]:
-                # They match up, add this relationship.
-                try:
-                    loc = self.child_mass_loc[parent_halos[left]][child_halos[right]]
-                except KeyError:
-                    # This happens when a child halo contains a particle from
-                    # a parent halo, but the child is not identified as a 
-                    # candidate child halo. So we do nothing and move on with
-                    # our lives.
-                    left += 1
-                    right += 1
-                    continue
-                self.child_mass_arr[loc] += parent_masses[left]
-                # Mark this pair so we don't send them later.
-                parent_send[left] = False
-                child_send[right] = False
-                left += 1
-                right += 1
-                continue
-            if parent_IDs[left] < child_IDs[right]:
-                # The left is too small, so we need to increase it.
-                left += 1
-                continue
-            if parent_IDs[left] > child_IDs[right]:
-                # Right too small.
-                right += 1
-                continue
+        # Match particles in halos.
+        self._match(parent_IDs, child_IDs, parent_halos, child_halos,
+            parent_masses, parent_send, child_send)
 
         # Now we send all the un-matched particles to the root task for one more
         # pass. This depends on the assumption that most of the particles do
@@ -576,61 +543,42 @@
         child_halos_tosend = child_halos_tosend[Csort]
         del Psort, Csort
 
-        # Now Again.
-        if self.mine == 0:
-            matched = 0
-            left = 0
-            right = 0
-            while left < parent_IDs_tosend.size and right < child_IDs_tosend.size:
-                if parent_IDs_tosend[left] == child_IDs_tosend[right]:
-                    # They match up, add this relationship.
-                    try:
-                        loc = self.child_mass_loc[parent_halos_tosend[left]][child_halos_tosend[right]]
-                    except KeyError:
-                        # This happens when a child halo contains a particle from
-                        # a parent halo, but the child is not identified as a 
-                        # candidate child halo. So we do nothing and move on with
-                        # our lives.
-                        left += 1
-                        right += 1
-                        continue
-                    self.child_mass_arr[loc] += parent_masses_tosend[left]
-                    matched += 1
-                    left += 1
-                    right += 1
-                    continue
-                if parent_IDs_tosend[left] < child_IDs_tosend[right]:
-                    # The left is too small, so we need to increase it.
-                    left += 1
-                    continue
-                if parent_IDs_tosend[left] > child_IDs_tosend[right]:
-                    # Right too small.
-                    right += 1
-                    continue
-            mylog.info("Clean-up round matched %d of %d parents and %d children." % \
-            (matched, parent_IDs_tosend.size, child_IDs_tosend.size))
+        # Now again, but only on the root task.
+        if self.comm.rank == 0:
+            self._match(parent_IDs_tosend, child_IDs_tosend,
+            parent_halos_tosend, child_halos_tosend, parent_masses_tosend)
 
         # Now we sum up the contributions globally.
         self.child_mass_arr = self.comm.mpi_allreduce(self.child_mass_arr)
         
-        # Turn these Msol masses into percentages of the parent.
-        line = "SELECT HaloMass FROM Halos WHERE SnapCurrentTimeIdentifier=%d \
-        ORDER BY SnapHaloID ASC;" % parent_currt
-        self.cursor.execute(line)
-        mark = 0
-        result = self.cursor.fetchone()
-        while result:
-            mass = result[0]
-            self.child_mass_arr[mark:mark+NumNeighbors] /= mass
-            mark += NumNeighbors
+        # Trim off the garbage collection.
+        self.child_mass_arr = self.child_mass_arr[:-1]
+        
+        if self.comm.rank == 0:
+            # Turn these Msol masses into percentages of the parent.
+            line = "SELECT HaloMass FROM Halos WHERE SnapCurrentTimeIdentifier=%d \
+            ORDER BY SnapHaloID ASC;" % parent_currt
+            self.cursor.execute(line)
+            mark = 0
             result = self.cursor.fetchone()
+            while result:
+                mass = result[0]
+                self.child_mass_arr[mark:mark+NumNeighbors] /= mass
+                mark += NumNeighbors
+                result = self.cursor.fetchone()
+            
+            # Get the global ID for the SnapHaloID=0 from the child, this will
+            # be used to prevent unnecessary SQL reads.
+            line = "SELECT GlobalHaloID FROM Halos WHERE SnapCurrentTimeIdentifier=%d \
+            AND SnapHaloID=0;" % child_currt
+            self.cursor.execute(line)
+            baseChildID = self.cursor.fetchone()[0]
+        else:
+            baseChildID = None
         
-        # Get the global ID for the SnapHaloID=0 from the child, this will
-        # be used to prevent unnecessary SQL reads.
-        line = "SELECT GlobalHaloID FROM Halos WHERE SnapCurrentTimeIdentifier=%d \
-        AND SnapHaloID=0;" % child_currt
-        self.cursor.execute(line)
-        baseChildID = self.cursor.fetchone()[0]
+        # Sync up data on all tasks.
+        self.child_mass_arr = self.comm.mpi_bcast(self.child_mass_arr)
+        baseChildID = self.comm.mpi_bcast(baseChildID)
         
         # Now we prepare a big list of writes to put in the database.
         for i,parent_halo in enumerate(sorted(self.candidates)):
@@ -663,76 +611,117 @@
         del parent_IDs, parent_masses, parent_halos
         del parent_IDs_tosend, parent_masses_tosend
         del parent_halos_tosend, child_IDs_tosend, child_halos_tosend
+        gc.collect()
         
         return (child_IDs, child_masses, child_halos)
 
+    def _match(self, parent_IDs, child_IDs, parent_halos, child_halos,
+            parent_masses, parent_send = None, child_send = None):
+        # Pick out IDs that are in both arrays.
+        parent_in_child = na.in1d(parent_IDs, child_IDs, assume_unique = True)
+        child_in_parent = na.in1d(child_IDs, parent_IDs, assume_unique = True)
+        # Pare down the arrays to just matched particle IDs.
+        parent_halos_cut = parent_halos[parent_in_child]
+        child_halos_cut = child_halos[child_in_parent]
+        parent_masses_cut = parent_masses[parent_in_child]
+        # Mark the IDs that have matches so they're not sent later.
+        if parent_send is not None:
+            parent_send[parent_in_child] = False
+            child_send[child_in_parent] = False
+        # For matching pairs of particles, add the contribution of the mass.
+        # Occasionally, there are matches of particle IDs where the parent
+        # and child halos have not been identified as likely relations,
+        # and in that case loc will be returned as -1, which is the 'garbage'
+        # position in child_mass_arr. This will be trimmed off later.
+        for i,pair in enumerate(zip(parent_halos_cut, child_halos_cut)):
+            loc = self.child_mass_loc[pair[0]][pair[1]]
+            self.child_mass_arr[loc] += parent_masses_cut[i]
+        if parent_send is None:
+            mylog.info("Clean-up round matched %d of %d parents and %d children." % \
+            (parent_in_child.sum(), parent_IDs.size, child_IDs.size))
+
     def _copy_and_update_db(self):
         """
         Because doing an UPDATE of a SQLite database is really slow, what we'll
         do here is basically read in lines from the database, and then insert
         the parent-child relationships, writing to a new DB.
         """
+        # All of this happens only on the root task!
         temp_name = self.database + '-tmp'
-        if self.mine == 0:
-            to_write = []
-            # Open the temporary database.
+        to_write = []
+        # Open the temporary database.
+        try:
+            os.remove(temp_name)
+        except OSError:
+            pass
+        temp_conn = sql.connect(temp_name)
+        temp_cursor = temp_conn.cursor()
+        line = "CREATE TABLE Halos (GlobalHaloID INTEGER PRIMARY KEY,\
+                SnapCurrentTimeIdentifier INTEGER, SnapZ FLOAT, SnapHaloID INTEGER, \
+                HaloMass FLOAT,\
+                NumPart INTEGER, CenMassX FLOAT, CenMassY FLOAT,\
+                CenMassZ FLOAT, BulkVelX FLOAT, BulkVelY FLOAT, BulkVelZ FLOAT,\
+                MaxRad FLOAT,\
+                ChildHaloID0 INTEGER, ChildHaloFrac0 FLOAT, \
+                ChildHaloID1 INTEGER, ChildHaloFrac1 FLOAT, \
+                ChildHaloID2 INTEGER, ChildHaloFrac2 FLOAT, \
+                ChildHaloID3 INTEGER, ChildHaloFrac3 FLOAT, \
+                ChildHaloID4 INTEGER, ChildHaloFrac4 FLOAT);"
+        temp_cursor.execute(line)
+        temp_conn.commit()
+        # Get all the data!
+        self.cursor.execute("SELECT * FROM Halos;")
+        results = self.cursor.fetchone()
+        while results:
+            results = list(results)
+            currt = results[1]
+            hid = results[3]
+            # If for some reason this halo doesn't have relationships,
+            # we'll just keep the old results the same.
             try:
-                os.remove(temp_name)
-            except OSError:
-                pass
-            temp_conn = sql.connect(temp_name)
-            temp_cursor = temp_conn.cursor()
-            line = "CREATE TABLE Halos (GlobalHaloID INTEGER PRIMARY KEY,\
-                    SnapCurrentTimeIdentifier INTEGER, SnapZ FLOAT, SnapHaloID INTEGER, \
-                    HaloMass FLOAT,\
-                    NumPart INTEGER, CenMassX FLOAT, CenMassY FLOAT,\
-                    CenMassZ FLOAT, BulkVelX FLOAT, BulkVelY FLOAT, BulkVelZ FLOAT,\
-                    MaxRad FLOAT,\
-                    ChildHaloID0 INTEGER, ChildHaloFrac0 FLOAT, \
-                    ChildHaloID1 INTEGER, ChildHaloFrac1 FLOAT, \
-                    ChildHaloID2 INTEGER, ChildHaloFrac2 FLOAT, \
-                    ChildHaloID3 INTEGER, ChildHaloFrac3 FLOAT, \
-                    ChildHaloID4 INTEGER, ChildHaloFrac4 FLOAT);"
-            temp_cursor.execute(line)
-            temp_conn.commit()
-            # Get all the data!
-            self.cursor.execute("SELECT * FROM Halos;")
+                lookup = self.write_values_dict[currt][hid]
+                new = tuple(results[:-10] + lookup)
+            except KeyError:
+                new = tuple(results)
+            to_write.append(new)
             results = self.cursor.fetchone()
-            while results:
-                results = list(results)
-                currt = results[1]
-                hid = results[3]
-                # If for some reason this halo doesn't have relationships,
-                # we'll just keep the old results the same.
-                try:
-                    lookup = self.write_values_dict[currt][hid]
-                    new = tuple(results[:-10] + lookup)
-                except KeyError:
-                    new = tuple(results)
-                to_write.append(new)
-                results = self.cursor.fetchone()
-            # Now write to the temp database.
-            # 23 question marks for 23 data columns.
-            line = ''
-            for i in range(23):
-                line += '?,'
-            # Pull off the last comma.
-            line = 'INSERT into Halos VALUES (' + line[:-1] + ')'
-            for insert in to_write:
-                temp_cursor.execute(line, insert)
-            temp_conn.commit()
-            mylog.info("Creating database index.")
-            line = "CREATE INDEX IF NOT EXISTS HalosIndex ON Halos ("
-            for name in columns:
-                line += name +","
-            line = line[:-1] + ");"
-            temp_cursor.execute(line)
-            temp_cursor.close()
-            temp_conn.close()
+        # Now write to the temp database.
+        # 23 question marks for 23 data columns.
+        line = ''
+        for i in range(23):
+            line += '?,'
+        # Pull off the last comma.
+        line = 'INSERT into Halos VALUES (' + line[:-1] + ')'
+        for insert in to_write:
+            temp_cursor.execute(line, insert)
+        temp_conn.commit()
+        temp_cursor.close()
+        temp_conn.close()
         self._close_database()
-        self.comm.barrier()
-        if self.mine == 0:
-            os.rename(temp_name, self.database)
+        os.rename(temp_name, self.database)
+
+    def _write_index(self):
+        mylog.info("Creating database index.")
+        line = "CREATE INDEX IF NOT EXISTS HalosIndex ON Halos ("
+        for name in columns:
+            line += name +","
+        line = line[:-1] + ");"
+        self.cursor.execute(line)
+
+    def _find_progress(self):
+        # This queries the database to see how far along work has already come
+        # to identify parent->child relationships.
+        line = """SELECT ChildHaloID0, SnapZ from halos WHERE SnapHaloID = 0
+        ORDER BY SnapZ DESC;"""
+        self.cursor.execute(line)
+        results = self.cursor.fetchone()
+        while results:
+            results = list(results)
+            if results[0] == -1:
+                # We've hit a dump that does not have relationships. Save this.
+                return results[1] # the SnapZ.
+            results = self.cursor.fetchone()
+        return 0.
 
 class MergerTreeConnect(DatabaseFunctions):
     def __init__(self, database='halos.db'):


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/analysis_modules/halo_merger_tree/setup.py
--- a/yt/analysis_modules/halo_merger_tree/setup.py
+++ b/yt/analysis_modules/halo_merger_tree/setup.py
@@ -1,12 +1,13 @@
 #!/usr/bin/env python
 import setuptools
-import os, sys, os.path
-
+import os
+import sys
 import os.path
 
-def configuration(parent_package='',top_path=None):
+
+def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('halo_merger_tree',parent_package,top_path)
-    config.make_config_py() # installs __config__.py
+    config = Configuration('halo_merger_tree', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/analysis_modules/halo_profiler/setup.py
--- a/yt/analysis_modules/halo_profiler/setup.py
+++ b/yt/analysis_modules/halo_profiler/setup.py
@@ -1,12 +1,13 @@
 #!/usr/bin/env python
 import setuptools
-import os, sys, os.path
-
+import os
+import sys
 import os.path
 
-def configuration(parent_package='',top_path=None):
+
+def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('halo_profiler',parent_package,top_path)
-    config.make_config_py() # installs __config__.py
+    config = Configuration('halo_profiler', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/analysis_modules/hierarchy_subset/api.py
--- a/yt/analysis_modules/hierarchy_subset/api.py
+++ b/yt/analysis_modules/hierarchy_subset/api.py
@@ -33,4 +33,3 @@
     AMRExtractedGridProxy, \
     ExtractedHierarchy, \
     ExtractedParameterFile
-


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/analysis_modules/hierarchy_subset/setup.py
--- a/yt/analysis_modules/hierarchy_subset/setup.py
+++ b/yt/analysis_modules/hierarchy_subset/setup.py
@@ -1,12 +1,13 @@
 #!/usr/bin/env python
 import setuptools
-import os, sys, os.path
-
+import os
+import sys
 import os.path
 
-def configuration(parent_package='',top_path=None):
+
+def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('hierarchy_subset',parent_package,top_path)
-    config.make_config_py() # installs __config__.py
+    config = Configuration('hierarchy_subset', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/analysis_modules/level_sets/setup.py
--- a/yt/analysis_modules/level_sets/setup.py
+++ b/yt/analysis_modules/level_sets/setup.py
@@ -1,12 +1,13 @@
 #!/usr/bin/env python
 import setuptools
-import os, sys, os.path
-
+import os
+import sys
 import os.path
 
-def configuration(parent_package='',top_path=None):
+
+def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('level_sets',parent_package,top_path)
-    config.make_config_py() # installs __config__.py
+    config = Configuration('level_sets', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/analysis_modules/light_cone/__init__.py
--- a/yt/analysis_modules/light_cone/__init__.py
+++ b/yt/analysis_modules/light_cone/__init__.py
@@ -22,4 +22,3 @@
   You should have received a copy of the GNU General Public License
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
-


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/analysis_modules/light_cone/setup.py
--- a/yt/analysis_modules/light_cone/setup.py
+++ b/yt/analysis_modules/light_cone/setup.py
@@ -1,12 +1,13 @@
 #!/usr/bin/env python
 import setuptools
-import os, sys, os.path
-
+import os
+import sys
 import os.path
 
-def configuration(parent_package='',top_path=None):
+
+def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('light_cone',parent_package,top_path)
-    config.make_config_py() # installs __config__.py
+    config = Configuration('light_cone', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/analysis_modules/light_ray/setup.py
--- a/yt/analysis_modules/light_ray/setup.py
+++ b/yt/analysis_modules/light_ray/setup.py
@@ -1,12 +1,13 @@
 #!/usr/bin/env python
 import setuptools
-import os, sys, os.path
-
+import os
+import sys
 import os.path
 
-def configuration(parent_package='',top_path=None):
+
+def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('light_ray',parent_package,top_path)
-    config.make_config_py() # installs __config__.py
+    config = Configuration('light_ray', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/analysis_modules/radial_column_density/api.py
--- a/yt/analysis_modules/radial_column_density/api.py
+++ b/yt/analysis_modules/radial_column_density/api.py
@@ -25,4 +25,3 @@
 """
 
 from .radial_column_density import RadialColumnDensity
-


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/analysis_modules/setup.py
--- a/yt/analysis_modules/setup.py
+++ b/yt/analysis_modules/setup.py
@@ -1,10 +1,11 @@
 #!/usr/bin/env python
 import setuptools
 
-def configuration(parent_package='',top_path=None):
+
+def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('analysis_modules',parent_package,top_path)
-    config.make_config_py() # installs __config__.py
+    config = Configuration('analysis_modules', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     config.add_subpackage("absorption_spectrum")
     config.add_subpackage("coordinate_transformation")


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/analysis_modules/simulation_handler/setup.py
--- a/yt/analysis_modules/simulation_handler/setup.py
+++ b/yt/analysis_modules/simulation_handler/setup.py
@@ -1,12 +1,13 @@
 #!/usr/bin/env python
 import setuptools
-import os, sys, os.path
-
+import os
+import sys
 import os.path
 
-def configuration(parent_package='',top_path=None):
+
+def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('simulation_handler',parent_package,top_path)
-    config.make_config_py() # installs __config__.py
+    config = Configuration('simulation_handler', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/analysis_modules/spectral_integrator/setup.py
--- a/yt/analysis_modules/spectral_integrator/setup.py
+++ b/yt/analysis_modules/spectral_integrator/setup.py
@@ -1,12 +1,13 @@
 #!/usr/bin/env python
 import setuptools
-import os, sys, os.path
-
+import os
+import sys
 import os.path
 
-def configuration(parent_package='',top_path=None):
+
+def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('spectral_integrator',parent_package,top_path)
-    config.make_config_py() # installs __config__.py
+    config = Configuration('spectral_integrator', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/analysis_modules/star_analysis/api.py
--- a/yt/analysis_modules/star_analysis/api.py
+++ b/yt/analysis_modules/star_analysis/api.py
@@ -32,4 +32,3 @@
     StarFormationRate, \
     SpectrumBuilder, \
     Zsun
-


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/analysis_modules/star_analysis/setup.py
--- a/yt/analysis_modules/star_analysis/setup.py
+++ b/yt/analysis_modules/star_analysis/setup.py
@@ -1,12 +1,13 @@
 #!/usr/bin/env python
 import setuptools
-import os, sys, os.path
-
+import os
+import sys
 import os.path
 
-def configuration(parent_package='',top_path=None):
+
+def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('star_analysis',parent_package,top_path)
-    config.make_config_py() # installs __config__.py
+    config = Configuration('star_analysis', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/analysis_modules/star_analysis/sfr_spectrum.py
--- a/yt/analysis_modules/star_analysis/sfr_spectrum.py
+++ b/yt/analysis_modules/star_analysis/sfr_spectrum.py
@@ -66,8 +66,8 @@
         """
         self._pf = pf
         self._data_source = data_source
-        self.star_mass = star_mass
-        self.star_creation_time = star_creation_time
+        self.star_mass = na.array(star_mass)
+        self.star_creation_time = na.array(star_creation_time)
         self.volume = volume
         self.bin_count = bins
         # Check to make sure we have the right set of informations.


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/analysis_modules/sunrise_export/sunrise_exporter.py
--- a/yt/analysis_modules/sunrise_export/sunrise_exporter.py
+++ b/yt/analysis_modules/sunrise_export/sunrise_exporter.py
@@ -345,21 +345,21 @@
 
 def generate_levels_octree(pf, fields):
     fields = ensure_list(fields) + ["Ones", "Ones"]
-    ogl, levels_finest, levels_all = initialize_octree_list(fields)
+    ogl, levels_finest, levels_all = initialize_octree_list(pf, fields)
     o_length = na.sum(levels_finest.values())
     r_length = na.sum(levels_all.values())
     output = na.zeros((r_length,len(fields)), dtype='float64')
-    genealogy = na.zeros((r_length, 3), dtype='int32') - 1 # init to -1
+    genealogy = na.zeros((r_length, 3), dtype='int64') - 1 # init to -1
     corners = na.zeros((r_length, 3), dtype='float64')
     position = na.add.accumulate(
                 na.array([0] + [levels_all[v] for v in
-                    sorted(levels_all)[:-1]], dtype='int32'))
+                    sorted(levels_all)[:-1]], dtype='int64'), dtype="int64")
     pp = position.copy()
     amr_utils.RecurseOctreeByLevels(0, 0, 0,
                ogl[0].dimensions[0],
                ogl[0].dimensions[1],
                ogl[0].dimensions[2],
-               position.astype('int32'), 1,
+               position.astype('int64'), 1,
                output, genealogy, corners, ogl)
     return output, genealogy, levels_all, levels_finest, pp, corners
 


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/analysis_modules/two_point_functions/setup.py
--- a/yt/analysis_modules/two_point_functions/setup.py
+++ b/yt/analysis_modules/two_point_functions/setup.py
@@ -1,12 +1,13 @@
 #!/usr/bin/env python
 import setuptools
-import os, sys, os.path
-
+import os
+import sys
 import os.path
 
-def configuration(parent_package='',top_path=None):
+
+def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('two_point_functions',parent_package,top_path)
-    config.make_config_py() # installs __config__.py
+    config = Configuration('two_point_functions', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/analysis_modules/two_point_functions/two_point_functions.py
--- a/yt/analysis_modules/two_point_functions/two_point_functions.py
+++ b/yt/analysis_modules/two_point_functions/two_point_functions.py
@@ -23,6 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
+import h5py
 from yt.mods import *
 #from yt.utilities.math_utils import *
 from yt.utilities.performance_counters import yt_counters, time_function
@@ -403,7 +404,7 @@
             status = 0
         # Broadcast the status from root - we stop only if root thinks we should
         # stop.
-        status = self.comm.mpi_bcast_pickled(status)
+        status = self.comm.mpi_bcast(status)
         if status == 0: return True
         if self.comm_cycle_count < status:
             return True


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/astro_objects/api.py
--- a/yt/astro_objects/api.py
+++ b/yt/astro_objects/api.py
@@ -26,9 +26,9 @@
 
 from .astrophysical_object import \
     AstrophysicalObject, identification_method, correlation_method
-    
+
 from .simulation_volume import \
     SimulationVolume
-    
+
 from .clumped_region import \
     ClumpedRegion


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/astro_objects/setup.py
--- a/yt/astro_objects/setup.py
+++ b/yt/astro_objects/setup.py
@@ -1,12 +1,13 @@
 #!/usr/bin/env python
 import setuptools
-import os, sys, os.path
-
+import os
+import sys
 import os.path
 
-def configuration(parent_package='',top_path=None):
+
+def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('astro_objects',parent_package,top_path)
-    config.make_config_py() # installs __config__.py
+    config = Configuration('astro_objects', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -52,6 +52,8 @@
     pasteboard_repo = '',
     test_storage_dir = '/does/not/exist',
     enzo_db = '',
+    hub_url = 'https://data.yt-project.org/upload',
+    hub_api_key = '',
     )
 # Here is the upgrade.  We're actually going to parse the file in its entirety
 # here.  Then, if it has any of the Forbidden Sections, it will be rewritten


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/convenience.py
--- a/yt/convenience.py
+++ b/yt/convenience.py
@@ -86,6 +86,12 @@
     candidates = []
     args = [os.path.expanduser(arg) if isinstance(arg, types.StringTypes)
             else arg for arg in args]
+    valid_file = [os.path.isfile(arg) if isinstance(arg, types.StringTypes) 
+            else False for arg in args]
+    if not any(valid_file):
+        mylog.error("None of the arguments provided to load() is a valid file")
+        mylog.error("Please check that you have used a correct path")
+        return None
     for n, c in output_type_registry.items():
         if n is None: continue
         if c._is_valid(*args, **kwargs): candidates.append(n)


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/data_objects/api.py
--- a/yt/data_objects/api.py
+++ b/yt/data_objects/api.py
@@ -82,3 +82,6 @@
     ValidateGridType, \
     add_field, \
     derived_field
+
+from particle_trajectories import \
+    ParticleTrajectoryCollection


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -54,7 +54,7 @@
 from yt.utilities.parameter_file_storage import \
     ParameterFileStore
 from yt.utilities.minimal_representation import \
-    MinimalProjectionData
+    MinimalProjectionData, MinimalSliceData
 
 from .derived_quantities import DerivedQuantityCollection
 from .field_info_container import \
@@ -849,7 +849,7 @@
         for field in temp_data.keys():
             self[field] = temp_data[field]
 
-    def to_frb(self, width, resolution, center = None):
+    def to_frb(self, width, resolution, center=None, height=None):
         r"""This function returns a FixedResolutionBuffer generated from this
         object.
 
@@ -864,6 +864,8 @@
             This can either be a floating point value, in the native domain
             units of the simulation, or a tuple of the (value, unit) style.
             This will be the width of the FRB.
+        height : height specifier
+            This will be the height of the FRB, by default it is equal to width.
         resolution : int or tuple of ints
             The number of pixels on a side of the final FRB.
         center : array-like of floats, optional
@@ -890,13 +892,18 @@
         if iterable(width):
             w, u = width
             width = w/self.pf[u]
+        if height is None:
+            height = width
+        elif iterable(height):
+            h, u = height
+            height = h/self.pf[u]
         if not iterable(resolution):
             resolution = (resolution, resolution)
         from yt.visualization.fixed_resolution import FixedResolutionBuffer
         xax = x_dict[self.axis]
         yax = y_dict[self.axis]
-        bounds = (center[xax] - width/2.0, center[xax] + width/2.0,
-                  center[yax] - width/2.0, center[yax] + width/2.0)
+        bounds = (center[xax] - width*0.5, center[xax] + width*0.5,
+                  center[yax] - height*0.5, center[yax] + height*0.5)
         frb = FixedResolutionBuffer(self, bounds, resolution)
         return frb
 
@@ -1140,6 +1147,10 @@
     __quantities = None
     quantities = property(__get_quantities)
 
+    @property
+    def _mrep(self):
+        return MinimalSliceData(self)
+
 class AMRCuttingPlaneBase(AMR2DData):
     _plane = None
     _top_node = "/CuttingPlanes"
@@ -1307,7 +1318,7 @@
         return "%s/c%s_L%s" % \
             (self._top_node, cen_name, L_name)
 
-    def to_frb(self, width, resolution):
+    def to_frb(self, width, resolution, height=None):
         r"""This function returns an ObliqueFixedResolutionBuffer generated
         from this object.
 
@@ -1325,6 +1336,8 @@
             This can either be a floating point value, in the native domain
             units of the simulation, or a tuple of the (value, unit) style.
             This will be the width of the FRB.
+        height : height specifier
+            This will be the height of the FRB, by default it is equal to width.
         resolution : int or tuple of ints
             The number of pixels on a side of the final FRB.
 
@@ -1346,10 +1359,15 @@
         if iterable(width):
             w, u = width
             width = w/self.pf[u]
+        if height is None:
+            height = width
+        elif iterable(height):
+            h, u = height
+            height = h/self.pf[u]
         if not iterable(resolution):
             resolution = (resolution, resolution)
         from yt.visualization.fixed_resolution import ObliqueFixedResolutionBuffer
-        bounds = (-width/2.0, width/2.0, -width/2.0, width/2.0)
+        bounds = (-width/2.0, width/2.0, -height/2.0, height/2.0)
         frb = ObliqueFixedResolutionBuffer(self, bounds, resolution)
         return frb
 
@@ -1564,7 +1582,8 @@
     def __init__(self, axis, field, weight_field = None,
                  max_level = None, center = None, pf = None,
                  source=None, node_name = None, field_cuts = None,
-                 preload_style='level', serialize=True,**kwargs):
+                 preload_style='level', serialize=True,
+                 style = "integrate", **kwargs):
         """
         This is a data object corresponding to a line integral through the
         simulation domain.
@@ -1628,6 +1647,13 @@
         >>> print qproj["Density"]
         """
         AMR2DData.__init__(self, axis, field, pf, node_name = None, **kwargs)
+        self.proj_style = style
+        if style == "mip":
+            self.func = na.max
+        elif style == "integrate":
+            self.func = na.sum # for the future
+        else:
+            raise NotImplementedError(style)
         self.weight_field = weight_field
         self._field_cuts = field_cuts
         self.serialize = serialize
@@ -1635,7 +1661,6 @@
         if center is not None: self.set_field_parameter('center',center)
         self._node_name = node_name
         self._initialize_source(source)
-        self.func = na.sum # for the future
         self._grids = self.source._grids
         if max_level == None:
             max_level = self.hierarchy.max_level
@@ -1678,7 +1703,8 @@
     def _get_tree(self, nvals):
         xd = self.pf.domain_dimensions[x_dict[self.axis]]
         yd = self.pf.domain_dimensions[y_dict[self.axis]]
-        return QuadTree(na.array([xd,yd], dtype='int64'), nvals)
+        return QuadTree(na.array([xd,yd], dtype='int64'), nvals,
+                        style = self.proj_style)
 
     def _get_dls(self, grid, fields):
         # Place holder for a time when maybe we will not be doing just
@@ -1689,7 +1715,12 @@
             if field is None: continue
             dls.append(just_one(grid['d%s' % axis_names[self.axis]]))
             convs.append(self.pf.units[self.pf.field_info[field].projection_conversion])
-        return na.array(dls), na.array(convs)
+        dls = na.array(dls)
+        convs = na.array(convs)
+        if self.proj_style == "mip":
+            dls[:] = 1.0
+            convs[:] = 1.0
+        return dls, convs
 
     def get_data(self, fields = None):
         if fields is None: fields = ensure_list(self.fields)[:]
@@ -1723,7 +1754,13 @@
             mylog.debug("End of projecting level level %s, memory usage %0.3e", 
                         level, get_memory_usage()/1024.)
         # Note that this will briefly double RAM usage
-        tree = self.comm.merge_quadtree_buffers(tree)
+        if self.proj_style == "mip":
+            merge_style = -1
+        elif self.proj_style == "integrate":
+            merge_style = 1
+        else:
+            raise NotImplementedError
+        tree = self.comm.merge_quadtree_buffers(tree, merge_style=merge_style)
         coord_data, field_data, weight_data, dxs = [], [], [], []
         for level in range(0, self._max_level + 1):
             npos, nvals, nwvals = tree.get_all_from_level(level, False)
@@ -2413,9 +2450,6 @@
         for field in fields_to_get:
             if self.field_data.has_key(field):
                 continue
-            if field not in self.hierarchy.field_list and not in_grids:
-                if self._generate_field(field):
-                    continue # True means we already assigned it
             # There are a lot of 'ands' here, but I think they are all
             # necessary.
             if force_particle_read == False and \
@@ -2426,6 +2460,10 @@
                 self.particles.get_data(field)
                 if field not in self.field_data:
                     if self._generate_field(field): continue
+                continue
+            if field not in self.hierarchy.field_list and not in_grids:
+                if self._generate_field(field):
+                    continue # True means we already assigned it
             mylog.info("Getting field %s from %s", field, len(self._grids))
             self[field] = na.concatenate(
                 [self._get_data_from_grid(grid, field)
@@ -2612,7 +2650,7 @@
     def _extract_isocontours_from_grid(self, grid, field, value,
                                        sample_values = None):
         mask = self._get_cut_mask(grid) * grid.child_mask
-        vals = grid.get_vertex_centered_data(field)
+        vals = grid.get_vertex_centered_data(field, no_ghost = False)
         if sample_values is not None:
             svals = grid.get_vertex_centered_data(sample_values)
         else:
@@ -3483,16 +3521,62 @@
         self._base_dx = (
               (self.pf.domain_right_edge - self.pf.domain_left_edge) /
                self.pf.domain_dimensions.astype("float64"))
+        self.global_endindex = None
         AMRCoveringGridBase.__init__(self, *args, **kwargs)
         self._final_start_index = self.global_startindex
 
     def _get_list_of_grids(self):
         if self._grids is not None: return
-        buffer = ((self.pf.domain_right_edge - self.pf.domain_left_edge)
-                 / self.pf.domain_dimensions).max()
-        AMRCoveringGridBase._get_list_of_grids(self, buffer)
-        # We reverse the order to ensure that coarse grids are first
-        self._grids = self._grids[::-1]
+        # Check for ill-behaved AMR schemes (Enzo) where we may have
+        # root-tile-boundary issues.  This is specific to the root tiles not
+        # allowing grids to cross them and also allowing > 1 level of
+        # difference between neighboring areas.
+        nz = 0
+        buf = 0.0
+        self.min_level = 0
+        dl = ((self.global_startindex.astype("float64") + 1)
+           / (self.pf.refine_by**self.level))
+        dr = ((self.global_startindex.astype("float64")
+              + self.ActiveDimensions - 1)
+           / (self.pf.refine_by**self.level))
+        if na.any(dl == na.rint(dl)) or na.any(dr == na.rint(dr)):
+            nz = 2 * self.pf.refine_by**self.level
+            buf = self._base_dx
+        if nz <= self.pf.refine_by**3: # delta level of 3
+            last_buf = [None,None,None]
+            count = 0
+            # Repeat until no more grids are covered (up to a delta level of 3)
+            while na.any(buf != last_buf) or count == 3:
+                cg = self.pf.h.covering_grid(self.level,
+                     self.left_edge - buf, self.ActiveDimensions + nz)
+                cg._use_pbar = False
+                count = cg.ActiveDimensions.prod()
+                for g in cg._grids:
+                    count -= cg._get_data_from_grid(g, [])
+                    if count <= 0:
+                        self.min_level = g.Level
+                        break
+                last_buf = buf
+                # Increase box by 2 cell widths at the min covering level
+                buf = 2*self._base_dx / self.pf.refine_by**self.min_level
+                nz += 4 * self.pf.refine_by**(self.level-self.min_level)
+                count += 1
+        else:
+            nz = buf = 0
+            self.min_level = 0
+        # This should not cost substantial additional time.
+        BLE = self.left_edge - buf
+        BRE = self.right_edge + buf
+        if na.any(BLE < self.pf.domain_left_edge) or \
+           na.any(BRE > self.pf.domain_right_edge):
+            grids,ind = self.pf.hierarchy.get_periodic_box_grids_below_level(
+                            BLE, BRE, self.level, self.min_level)
+        else:
+            grids,ind = self.pf.hierarchy.get_box_grids_below_level(
+                BLE, BRE, self.level,
+                min(self.level, self.min_level))
+        sort_ind = na.argsort(self.pf.h.grid_levels.ravel()[ind])
+        self._grids = self.pf.hierarchy.grids[ind][(sort_ind,)]
 
     def get_data(self, field=None):
         self._get_list_of_grids()
@@ -3508,11 +3592,11 @@
         # We jump-start our task here
         mylog.debug("Getting fields %s from %s possible grids",
                    fields_to_get, len(self._grids))
-        self._update_level_state(0, fields_to_get)
+        self._update_level_state(self.min_level, fields_to_get, initialize=True)
         if self._use_pbar: pbar = \
                 get_pbar('Searching grids for values ', len(self._grids))
         # The grids are assumed to be pre-sorted
-        last_level = 0
+        last_level = self.min_level
         for gi, grid in enumerate(self._grids):
             if self._use_pbar: pbar.update(gi)
             if grid.Level > last_level and grid.Level <= self.level:
@@ -3530,27 +3614,31 @@
                     raise KeyError(n_bad)
         if self._use_pbar: pbar.finish()
 
-    def _update_level_state(self, level, fields = None):
+    def _update_level_state(self, level, fields = None, initialize=False):
         dx = self._base_dx / self.pf.refine_by**level
         self.field_data['cdx'] = dx[0]
         self.field_data['cdy'] = dx[1]
         self.field_data['cdz'] = dx[2]
         LL = self.left_edge - self.pf.domain_left_edge
+        RL = self.right_edge - self.pf.domain_left_edge
         self._old_global_startindex = self.global_startindex
-        self.global_startindex = na.rint(LL / dx).astype('int64') - 1
+        self._old_global_endindex = self.global_endindex
+        # We use one grid cell at LEAST, plus one buffer on all sides
+        self.global_startindex = na.floor(LL / dx).astype('int64') - 1
+        self.global_endindex = na.ceil(RL / dx).astype('int64') + 1
         self.domain_width = na.rint((self.pf.domain_right_edge -
                     self.pf.domain_left_edge)/dx).astype('int64')
-        if level == 0 and self.level > 0:
-            # We use one grid cell at LEAST, plus one buffer on all sides
-            idims = na.rint((self.right_edge-self.left_edge)/dx).astype('int64') + 2
+        if (level == 0 or initialize) and self.level > 0:
+            idims = self.global_endindex - self.global_startindex
             fields = ensure_list(fields)
             for field in fields:
                 self.field_data[field] = na.zeros(idims,dtype='float64')-999
             self._cur_dims = idims.astype("int32")
-        elif level == 0 and self.level == 0:
+        elif (level == 0 or initialize) and self.level == 0:
             DLE = self.pf.domain_left_edge
             self.global_startindex = na.array(na.floor(LL/ dx), dtype='int64')
             idims = na.rint((self.right_edge-self.left_edge)/dx).astype('int64')
+            #idims = self.global_endindex - self.global_startindex
             fields = ensure_list(fields)
             for field in fields:
                 self.field_data[field] = na.zeros(idims,dtype='float64')-999
@@ -3559,15 +3647,16 @@
     def _refine(self, dlevel, fields):
         rf = float(self.pf.refine_by**dlevel)
 
-        input_left = (self._old_global_startindex + 0.5) * rf 
-        dx = na.fromiter((self['cd%s' % ax] for ax in 'xyz'), count=3, dtype='float64')
-        output_dims = na.rint((self.right_edge-self.left_edge)/dx).astype('int32') + 2
+        input_left = (self._old_global_startindex + 0.5) * rf
+        input_right = (self._old_global_endindex - 0.5) * rf
+        output_left = self.global_startindex + 0.5
+        output_right = self.global_endindex - 0.5
+        output_dims = (output_right - output_left + 1).astype('int32')
 
         self._cur_dims = output_dims
 
         for field in fields:
             output_field = na.zeros(output_dims, dtype="float64")
-            output_left = self.global_startindex + 0.5
             ghost_zone_interpolate(rf, self[field], input_left,
                                    output_field, output_left)
             self.field_data[field] = output_field
@@ -3641,7 +3730,8 @@
     def _make_overlaps(self):
         # Using the processed cut_masks, we'll figure out what grids
         # are left in the hybrid region.
-        for region in self._all_regions:
+        pbar = get_pbar("Building boolean", len(self._all_regions))
+        for i, region in enumerate(self._all_regions):
             try:
                 region._get_list_of_grids()
                 alias = region
@@ -3668,6 +3758,8 @@
                     # Some of local is in overall
                     self._some_overlap.append(grid)
                     continue
+            pbar.update(i)
+        pbar.finish()
     
     def __repr__(self):
         # We'll do this the slow way to be clear what's going on


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -340,18 +340,20 @@
     bv_x,bv_y,bv_z = data.quantities["BulkVelocity"]()
     # One-cell objects are NOT BOUND.
     if data["CellMass"].size == 1: return [0.0]
-    """
-    Changing data["CellMass"] to mass_to_use
-    Add the mass contribution of particles if include_particles = True
-    """
+
+    kinetic = 0.5 * (data["CellMass"] * 
+                     ((data["x-velocity"] - bv_x)**2 + 
+                      (data["y-velocity"] - bv_y)**2 +
+                      (data["z-velocity"] - bv_z)**2)).sum()
+
     if (include_particles):
 	mass_to_use = data["TotalMass"]
+        kinetic += 0.5 * (data["Dark_Matter_Mass"] *
+                          ((data["cic_particle_velocity_x"] - bv_x)**2 +
+                           (data["cic_particle_velocity_y"] - bv_y)**2 +
+                           (data["cic_particle_velocity_z"] - bv_z)**2)).sum()
     else:
 	mass_to_use = data["CellMass"]
-    kinetic = 0.5 * (mass_to_use * (
-                       (data["x-velocity"] - bv_x)**2
-                     + (data["y-velocity"] - bv_y)**2
-                     + (data["z-velocity"] - bv_z)**2 )).sum()
     # Add thermal energy to kinetic energy
     if (include_thermal_energy):
         thermal = (data["ThermalEnergy"] * mass_to_use).sum()


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -367,7 +367,10 @@
 
     #@time_execution
     def __fill_child_mask(self, child, mask, tofill):
-        rf = self.pf.refine_by
+        try:
+            rf = self.pf.refine_by[child.Level-1]
+        except TypeError:
+            rf = self.pf.refine_by
         gi, cgi = self.get_global_startindex(), child.get_global_startindex()
         startIndex = na.maximum(0, cgi / rf - gi)
         endIndex = na.minimum((cgi + child.ActiveDimensions) / rf - gi,


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/data_objects/hierarchy.py
--- a/yt/data_objects/hierarchy.py
+++ b/yt/data_objects/hierarchy.py
@@ -326,9 +326,9 @@
             return None
 
         full_name = "%s/%s" % (node, name)
-        try:
+        if len(self._data_file[full_name].shape) > 0:
             return self._data_file[full_name][:]
-        except TypeError:
+        else:
             return self._data_file[full_name]
 
     def _close_data_file(self):
@@ -337,18 +337,6 @@
             del self._data_file
             self._data_file = None
 
-    def _deserialize_hierarchy(self, harray):
-        # THIS IS BROKEN AND NEEDS TO BE FIXED
-        mylog.debug("Cached entry found.")
-        self.gridDimensions[:] = harray[:,0:3]
-        self.gridStartIndices[:] = harray[:,3:6]
-        self.gridEndIndices[:] = harray[:,6:9]
-        self.gridLeftEdge[:] = harray[:,9:12]
-        self.gridRightEdge[:] = harray[:,12:15]
-        self.gridLevels[:] = harray[:,15:16]
-        self.gridTimes[:] = harray[:,16:17]
-        self.gridNumberOfParticles[:] = harray[:,17:18]
-
     def get_smallest_dx(self):
         """
         Returns (in code units) the smallest cell size in the simulation.


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/data_objects/object_finding_mixin.py
--- a/yt/data_objects/object_finding_mixin.py
+++ b/yt/data_objects/object_finding_mixin.py
@@ -212,7 +212,7 @@
         get_box_grids_below_level(left_edge, right_edge,
                             level,
                             self.grid_left_edge, self.grid_right_edge,
-                            self.grid_levels, mask, min_level)
+                            self.grid_levels.astype("int32"), mask, min_level)
         mask = mask.astype("bool")
         return self.grids[mask], na.where(mask)
 


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/data_objects/particle_io.py
--- a/yt/data_objects/particle_io.py
+++ b/yt/data_objects/particle_io.py
@@ -29,6 +29,17 @@
 
 particle_handler_registry = defaultdict()
 
+def particle_converter(func):
+    from .data_containers import YTFieldData
+    def save_state(grid):
+        old_params = grid.field_parameters
+        old_keys = grid.field_data.keys()
+        tr = func(grid)
+        grid.field_parameters = old_params
+        grid.field_data = YTFieldData( [(k, grid.field_data[k]) for k in old_keys] )
+        return tr
+    return save_state
+
 class ParticleIOHandler(object):
     class __metaclass__(type):
         def __init__(cls, name, b, d):
@@ -82,6 +93,7 @@
                 func = f._convert_function
             else:
                 func = f.particle_convert
+            func = particle_converter(func)
             conv_factors.append(
               na.fromiter((func(g) for g in grid_list),
                           count=len(grid_list), dtype='float64'))
@@ -90,7 +102,7 @@
         rvs = self.pf.h.io._read_particles(
             fields_to_read, rtype, args, grid_list, count_list,
             conv_factors)
-        for [n, v] in zip(fields_to_read, rvs):
+        for [n, v] in zip(fields, rvs):
             self.source.field_data[n] = v
 
 class ParticleIOHandlerRegion(ParticleIOHandlerImplemented):


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/data_objects/particle_trajectories.py
--- /dev/null
+++ b/yt/data_objects/particle_trajectories.py
@@ -0,0 +1,387 @@
+"""
+Author: John ZuHone <jzuhone at gmail.com>
+Affiliation: NASA/GSFC
+License:
+  Copyright (C) 2012 John ZuHone All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+  
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+  
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from yt.data_objects.data_containers import YTFieldData
+from yt.data_objects.time_series import TimeSeriesData
+from yt.utilities.amr_utils import sample_field_at_positions
+from yt.funcs import *
+
+import numpy as na
+import h5py
+
+class ParticleTrajectoryCollection(object) :
+
+    r"""A collection of particle trajectories in time over a series of
+    parameter files. 
+
+    The ParticleTrajectoryCollection object contains a collection of
+    particle trajectories for a specified set of particle indices. 
+    
+    Parameters
+    ----------
+    filenames : list of strings
+        A time-sorted list of filenames to construct the TimeSeriesData
+        object.
+    indices : array_like
+        An integer array of particle indices whose trajectories we
+        want to track. If they are not sorted they will be sorted.
+    fields : list of strings, optional
+        A set of fields that is retrieved when the trajectory
+        collection is instantiated.
+        Default : None (will default to the fields 'particle_position_x',
+        'particle_position_y', 'particle_position_z')
+
+    Examples
+    ________
+    >>> from yt.mods import *
+    >>> my_fns = glob.glob("orbit_hdf5_chk_00[0-9][0-9]")
+    >>> my_fns.sort()
+    >>> fields = ["particle_position_x", "particle_position_y",
+    >>>           "particle_position_z", "particle_velocity_x",
+    >>>           "particle_velocity_y", "particle_velocity_z"]
+    >>> pf = load(my_fns[0])
+    >>> init_sphere = pf.h.sphere(pf.domain_center, (.5, "unitary"))
+    >>> indices = init_sphere["particle_index"].astype("int")
+    >>> trajs = ParticleTrajectoryCollection(my_fns, indices, fields=fields)
+    >>> for t in trajs :
+    >>>     print t["particle_velocity_x"].max(), t["particle_velocity_x"].min()
+
+    Notes
+    -----
+    As of this time only particle trajectories that are complete over the
+    set of specified parameter files are supported. If any particle's history
+    ends for some reason (e.g. leaving the simulation domain or being actively
+    destroyed), the whole trajectory collection of which it is a set must end
+    at or before the particle's last timestep. This is a limitation we hope to
+    lift at some point in the future.     
+    """
+    def __init__(self, filenames, indices, fields = None) :
+
+        indices.sort() # Just in case the caller wasn't careful
+        
+        self.field_data = YTFieldData()
+        self.pfs = TimeSeriesData.from_filenames(filenames)
+        self.masks = []
+        self.sorts = []
+        self.indices = indices
+        self.num_indices = len(indices)
+        self.num_steps = len(filenames)
+        self.times = []
+
+        # Default fields 
+        
+        if fields is None : fields = []
+
+        # Must ALWAYS have these fields
+        
+        fields = fields + ["particle_position_x",
+                           "particle_position_y",
+                           "particle_position_z"]
+
+        """
+        The following loops through the parameter files
+        and performs two tasks. The first is to isolate
+        the particles with the correct indices, and the
+        second is to create a sorted list of these particles.
+        We also make a list of the current time from each file. 
+        Right now, the code assumes (and checks for) the
+        particle indices existing in each file, a limitation I
+        would like to lift at some point since some codes
+        (e.g., FLASH) destroy particles leaving the domain.
+        """
+        
+        for pf in self.pfs :
+            dd = pf.h.all_data()
+            newtags = dd["particle_index"].astype("int")
+            if not na.all(na.in1d(indices, newtags, assume_unique=True)) :
+                print "Not all requested particle ids contained in this file!"
+                raise IndexError
+            mask = na.in1d(newtags, indices, assume_unique=True)
+            sorts = na.argsort(newtags[mask])
+            self.masks.append(mask)            
+            self.sorts.append(sorts)
+            self.times.append(pf.current_time)
+
+        self.times = na.array(self.times)
+
+        # Set up the derived field list and the particle field list
+        # so that if the requested field is a particle field, we'll
+        # just copy the field over, but if the field is a grid field,
+        # we will first copy the field over to the particle positions
+        # and then return the field. 
+
+        self.derived_field_list = self.pfs[0].h.derived_field_list
+        self.particle_fields = [field for field in self.derived_field_list
+                                if self.pfs[0].field_info[field].particle_type]
+
+        # Now instantiate the requested fields 
+        for field in fields :
+
+            self._get_data(field)
+            
+    def has_key(self, key) :
+
+        return (key in self.field_data)
+    
+    def keys(self) :
+
+        return self.field_data.keys()
+
+    def __getitem__(self, key) :
+        """
+        Get the field associated with key,
+        checking to make sure it is a particle field.
+        """
+
+        if not self.field_data.has_key(key) :
+
+            self._get_data(key)
+
+        return self.field_data[key]
+    
+    def __setitem__(self, key, val):
+        """
+        Sets a field to be some other value.
+        """
+        self.field_data[key] = val
+                        
+    def __delitem__(self, key) :
+        """
+        Delete the field from the trajectory
+        """
+        del self.field_data[key]
+
+    def __iter__(self) :
+
+        """
+        This iterates over the trajectories for
+        the different particles, returning dicts
+        of fields for each trajectory
+        """
+        for idx in xrange(self.num_indices) :
+            traj = {}
+            traj["particle_index"] = self.indices[idx]
+            traj["particle_time"] = self.times
+            for field in self.field_data.keys() :
+                traj[field] = self[field][idx,:]
+            yield traj
+            
+    def __len__(self) :
+
+        """
+        The number of individual trajectories
+        """
+        return self.num_indices
+
+    def add_fields(self, fields) :
+
+        """
+        Add a list of fields to an existing trajectory
+
+        Parameters
+        ----------
+        fields : list of strings
+            A list of fields to be added to the current trajectory
+            collection.
+
+        Examples
+        ________
+        >>> from yt.mods import *
+        >>> trajs = ParticleTrajectoryCollection(my_fns, indices)
+        >>> trajs.add_fields(["particle_mass", "particle_gpot"])
+        """
+        
+        for field in fields :
+
+            if not self.field_data.has_key(field):
+
+                self._get_data(field)
+                
+    def _get_data(self, field) :
+
+        """
+        Get a field to include in the trajectory collection.
+        The trajectory collection itself is a dict of 2D numpy arrays,
+        with shape (num_indices, num_steps)
+        """
+        
+        if not self.field_data.has_key(field):
+            
+            particles = na.empty((0))
+
+            step = int(0)
+                
+            for pf, mask, sort in zip(self.pfs, self.masks, self.sorts) :
+                                    
+                if field in self.particle_fields :
+
+                    # This is easy... just get the particle fields
+
+                    dd = pf.h.all_data()
+                    pfield = dd[field][mask]
+                    particles = na.append(particles, pfield[sort])
+
+                else :
+
+                    # This is hard... must loop over grids
+
+                    pfield = na.zeros((self.num_indices))
+                    x = self["particle_position_x"][:,step]
+                    y = self["particle_position_y"][:,step]
+                    z = self["particle_position_z"][:,step]
+
+                    leaf_grids = [g for g in pf.h.grids if len(g.Children) == 0]
+                        
+                    for grid in leaf_grids :
+
+                        pfield += sample_field_at_positions(grid[field],
+                                                            grid.LeftEdge,
+                                                            grid.RightEdge,
+                                                            x, y, z)
+
+                    particles = na.append(particles, pfield)
+
+                step += 1
+                
+            self[field] = particles.reshape(self.num_steps,
+                                            self.num_indices).transpose()
+
+        return self.field_data[field]
+
+    def trajectory_from_index(self, index) :
+
+        """
+        Retrieve a single trajectory corresponding to a specific particle
+        index
+
+        Parameters
+        ----------
+        index : int
+            This defines which particle trajectory from the
+            ParticleTrajectoryCollection object will be returned.
+
+        Returns
+        -------
+        A dictionary corresponding to the particle's trajectory and the
+        fields along that trajectory
+
+        Examples
+        --------
+        >>> from yt.mods import *
+        >>> import matplotlib.pylab as pl
+        >>> trajs = ParticleTrajectoryCollection(my_fns, indices)
+        >>> traj = trajs.trajectory_from_index(indices[0])
+        >>> pl.plot(traj["particle_time"], traj["particle_position_x"], "-x")
+        >>> pl.savefig("orbit")
+        """
+        
+        mask = na.in1d(self.indices, (index,), assume_unique=True)
+
+        if not na.any(mask) :
+            print "The particle index %d is not in the list!" % (index)
+            raise IndexError
+
+        fields = [field for field in sorted(self.field_data.keys())]
+                                
+        traj = {}
+
+        traj["particle_time"] = self.times
+        traj["particle_index"] = index
+        
+        for field in fields :
+
+            traj[field] = self[field][mask,:][0]
+
+        return traj
+
+    def write_out(self, filename_base) :
+
+        """
+        Write out particle trajectories to tab-separated ASCII files (one
+        for each trajectory) with the field names in the file header. Each
+        file is named with a basename and the index number.
+
+        Parameters
+        ----------
+        filename_base : string
+            The prefix for the outputted ASCII files.
+
+        Examples
+        --------
+        >>> from yt.mods import *
+        >>> trajs = ParticleTrajectoryCollection(my_fns, indices)
+        >>> trajs.write_out("orbit_trajectory")       
+        """
+        
+        fields = [field for field in sorted(self.field_data.keys())]
+
+        num_fields = len(fields)
+
+        first_str = "# particle_time\t" + "\t".join(fields)+"\n"
+        
+        template_str = "%g\t"*num_fields+"%g\n"
+        
+        for ix in xrange(self.num_indices) :
+
+            outlines = [first_str]
+
+            for it in xrange(self.num_steps) :
+                outlines.append(template_str %
+                                tuple([self.times[it]]+[self[field][ix,it] for field in fields]))
+            
+            fid = open(filename_base + "_%d.dat" % self.indices[ix], "w")
+            fid.writelines(outlines)
+            fid.close()
+            del fid
+            
+    def write_out_h5(self, filename) :
+
+        """
+        Write out all the particle trajectories to a single HDF5 file
+        that contains the indices, the times, and the 2D array for each
+        field individually
+
+        Parameters
+        ---------
+        filename : string
+            The output filename for the HDF5 file
+
+        Examples
+        --------
+        >>> from yt.mods import *
+        >>> trajs = ParticleTrajectoryCollection(my_fns, indices)
+        >>> trajs.write_out_h5("orbit_trajectories")                
+        """
+        
+        fid = h5py.File(filename, "w")
+
+        fields = [field for field in sorted(self.field_data.keys())]
+        
+        fid.create_dataset("particle_indices", dtype=na.int32,
+                           data=self.indices)
+        fid.create_dataset("particle_time", data=self.times)
+        
+        for field in fields :
+
+            fid.create_dataset("%s" % field, data=self[field])
+                        
+        fid.close()


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -31,8 +31,7 @@
 from yt.funcs import *
 
 from yt.data_objects.data_containers import YTFieldData
-from yt.utilities.data_point_utilities import \
-    Bin1DProfile, Bin2DProfile, Bin3DProfile
+from yt.utilities.amr_utils import bin_profile1d, bin_profile2d, bin_profile3d
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     ParallelAnalysisInterface
 
@@ -88,9 +87,11 @@
         self._ngrids = 0
         self.__data = {}         # final results will go here
         self.__weight_data = {}  # we need to track the weights as we go
+        self.__std_data = {}
         for field in fields:
             self.__data[field] = self._get_empty_field()
             self.__weight_data[field] = self._get_empty_field()
+            self.__std_data[field] = self._get_empty_field()
         self.__used = self._get_empty_field().astype('bool')
         #pbar = get_pbar('Binning grids', len(self._data_source._grids))
         for gi,grid in enumerate(self._get_grids(fields)):
@@ -103,10 +104,13 @@
                 continue
             for field in fields:
                 # We get back field values, weight values, used bins
-                f, w, u = self._bin_field(grid, field, weight, accumulation,
+                f, w, q, u = self._bin_field(grid, field, weight, accumulation,
                                           args=args, check_cut=True)
                 self.__data[field] += f        # running total
                 self.__weight_data[field] += w # running total
+                self.__std_data[field][u] += w[u] * (q[u]/w[u] + \
+                    (f[u]/w[u] -
+                     self.__data[field][u]/self.__weight_data[field][u])**2) # running total
                 self.__used = (self.__used | u)       # running 'or'
             grid.clear_data()
         # When the loop completes the parallel finalizer gets called
@@ -115,24 +119,41 @@
         for field in fields:
             if weight: # Now, at the end, we divide out.
                 self.__data[field][ub] /= self.__weight_data[field][ub]
+                self.__std_data[field][ub] /= self.__weight_data[field][ub]
             self[field] = self.__data[field]
+            self["%s_std" % field] = na.sqrt(self.__std_data[field])
         self["UsedBins"] = self.__used
-        del self.__data, self.__weight_data, self.__used
+        del self.__data, self.__std_data, self.__weight_data, self.__used
 
     def _finalize_parallel(self):
+        my_mean = {}
+        my_weight = {}
+        for key in self.__data:
+            my_mean[key] = self._get_empty_field()
+            my_weight[key] = self._get_empty_field()
+        ub = na.where(self.__used)
+        for key in self.__data:
+            my_mean[key][ub] = self.__data[key][ub] / self.__weight_data[key][ub]
+            my_weight[key][ub] = self.__weight_data[key][ub]
         for key in self.__data:
             self.__data[key] = self.comm.mpi_allreduce(self.__data[key], op='sum')
         for key in self.__weight_data:
             self.__weight_data[key] = self.comm.mpi_allreduce(self.__weight_data[key], op='sum')
+        for key in self.__std_data:
+            self.__std_data[key][ub] = my_weight[key][ub] * (self.__std_data[key][ub] / my_weight[key][ub] + \
+                (my_mean[key][ub] - self.__data[key][ub]/self.__weight_data[key][ub])**2)
+            self.__std_data[key] = self.comm.mpi_allreduce(self.__std_data[key], op='sum')
         self.__used = self.comm.mpi_allreduce(self.__used, op='sum')
 
     def _unlazy_add_fields(self, fields, weight, accumulation):
         for field in fields:
-            f, w, u = self._bin_field(self._data_source, field, weight,
-                                      accumulation, self._args, check_cut = False)
+            f, w, q, u = self._bin_field(self._data_source, field, weight,
+                                         accumulation, self._args, check_cut = False)
             if weight:
                 f[u] /= w[u]
+                q[u] = na.sqrt(q[u] / w[u])
             self[field] = f
+            self["%s_std" % field] = q
         self["UsedBins"] = u
 
     def add_fields(self, fields, weight = "CellMassMsun", accumulation = False, fractional=False):
@@ -246,20 +267,24 @@
         self.total_stuff = source_data.sum()
         binned_field = self._get_empty_field()
         weight_field = self._get_empty_field()
+        m_field = self._get_empty_field()
+        q_field = self._get_empty_field()
         used_field = self._get_empty_field()
         mi = args[0]
         bin_indices_x = args[1].ravel().astype('int64')
         source_data = source_data[mi]
         weight_data = weight_data[mi]
-        Bin1DProfile(bin_indices_x, weight_data, source_data,
-                     weight_field, binned_field, used_field)
+        bin_profile1d(bin_indices_x, weight_data, source_data,
+                      weight_field, binned_field,
+                      m_field, q_field, used_field)
         # Fix for laziness, because at the *end* we will be
         # summing up all of the histograms and dividing by the
         # weights.  Accumulation likely doesn't work with weighted
         # average fields.
         if accumulation: 
             binned_field = na.add.accumulate(binned_field)
-        return binned_field, weight_field, used_field.astype("bool")
+        return binned_field, weight_field, q_field, \
+            used_field.astype("bool")
 
     @preserve_source_parameters
     def _get_bins(self, source, check_cut=False):
@@ -415,6 +440,8 @@
         self.total_stuff = source_data.sum()
         binned_field = self._get_empty_field()
         weight_field = self._get_empty_field()
+        m_field = self._get_empty_field()
+        q_field = self._get_empty_field()
         used_field = self._get_empty_field()
         mi = args[0]
         bin_indices_x = args[1].ravel().astype('int64')
@@ -423,8 +450,8 @@
         weight_data = weight_data[mi]
         nx = bin_indices_x.size
         #mylog.debug("Binning %s / %s times", source_data.size, nx)
-        Bin2DProfile(bin_indices_x, bin_indices_y, weight_data, source_data,
-                     weight_field, binned_field, used_field)
+        bin_profile2d(bin_indices_x, bin_indices_y, weight_data, source_data,
+                      weight_field, binned_field, m_field, q_field, used_field)
         if accumulation: # Fix for laziness
             if not iterable(accumulation):
                 raise SyntaxError("Accumulation needs to have length 2")
@@ -432,7 +459,8 @@
                 binned_field = na.add.accumulate(binned_field, axis=0)
             if accumulation[1]:
                 binned_field = na.add.accumulate(binned_field, axis=1)
-        return binned_field, weight_field, used_field.astype('bool')
+        return binned_field, weight_field, q_field, \
+            used_field.astype("bool")
 
     @preserve_source_parameters
     def _get_bins(self, source, check_cut=False):
@@ -667,6 +695,8 @@
         self.total_stuff = source_data.sum()
         binned_field = self._get_empty_field()
         weight_field = self._get_empty_field()
+        m_field = self._get_empty_field()
+        q_field = self._get_empty_field()
         used_field = self._get_empty_field()
         mi = args[0]
         bin_indices_x = args[1].ravel().astype('int64')
@@ -674,10 +704,9 @@
         bin_indices_z = args[3].ravel().astype('int64')
         source_data = source_data[mi]
         weight_data = weight_data[mi]
-        Bin3DProfile(
-            bin_indices_x, bin_indices_y, bin_indices_z,
-            weight_data, source_data,
-            weight_field, binned_field, used_field)
+        bin_profile3d(bin_indices_x, bin_indices_y, bin_indices_z,
+                      weight_data, source_data, weight_field, binned_field,
+                      m_field, q_field, used_field)
         if accumulation: # Fix for laziness
             if not iterable(accumulation):
                 raise SyntaxError("Accumulation needs to have length 2")
@@ -687,7 +716,8 @@
                 binned_field = na.add.accumulate(binned_field, axis=1)
             if accumulation[2]:
                 binned_field = na.add.accumulate(binned_field, axis=2)
-        return binned_field, weight_field, used_field.astype('bool')
+        return binned_field, weight_field, q_field, \
+            used_field.astype("bool")
 
     @preserve_source_parameters
     def _get_bins(self, source, check_cut=False):


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/data_objects/setup.py
--- a/yt/data_objects/setup.py
+++ b/yt/data_objects/setup.py
@@ -1,12 +1,13 @@
 #!/usr/bin/env python
 import setuptools
-import os, sys, os.path
-
+import os
+import sys
 import os.path
 
-def configuration(parent_package='',top_path=None):
+
+def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('data_objects',parent_package,top_path)
-    config.make_config_py() # installs __config__.py
+    config = Configuration('data_objects', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -91,6 +91,7 @@
 
         self._parse_parameter_file()
         self._set_units()
+        self._set_derived_attrs()
 
         # Because we need an instantiated class to check the pf's existence in
         # the cache, we move that check to here from __new__.  This avoids
@@ -103,6 +104,10 @@
 
         self.create_field_info()
 
+    def _set_derived_attrs(self):
+        self.domain_center = 0.5 * (self.domain_right_edge + self.domain_left_edge)
+        self.domain_width = self.domain_right_edge - self.domain_left_edge
+
     def __reduce__(self):
         args = (self._hash(),)
         return (_reconstruct_pf, args)


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/data_objects/time_series.py
--- a/yt/data_objects/time_series.py
+++ b/yt/data_objects/time_series.py
@@ -29,9 +29,11 @@
 from yt.convenience import load
 from .data_containers import data_object_registry
 from .analyzer_objects import create_quantity_proxy, \
-    analysis_task_registry
+    analysis_task_registry, AnalysisTask
 from .derived_quantities import quantity_info
 from yt.utilities.exceptions import YTException
+from yt.utilities.parallel_tools.parallel_analysis_interface \
+    import parallel_objects
 
 class AnalysisTaskProxy(object):
     def __init__(self, time_series):
@@ -51,14 +53,38 @@
     def __contains__(self, key):
         return key in analysis_task_registry
 
+def get_pf_prop(propname):
+    def _eval(params, pf):
+        return getattr(pf, propname)
+    cls = type(propname, (AnalysisTask,),
+                dict(eval = _eval, _params = tuple()))
+    return cls
+
+attrs = ("refine_by", "dimensionality", "current_time",
+         "domain_dimensions", "domain_left_edge",
+         "domain_right_edge", "unique_identifier",
+         "current_redshift", "cosmological_simulation",
+         "omega_matter", "omega_lambda", "hubble_constant")
+
+class TimeSeriesParametersContainer(object):
+    def __init__(self, data_object):
+        self.data_object = data_object
+
+    def __getattr__(self, attr):
+        if attr in attrs:
+            return self.data_object.eval(get_pf_prop(attr)())
+        raise AttributeError(attr)
+
 class TimeSeriesData(object):
-    def __init__(self, outputs = None):
+    def __init__(self, outputs = None, parallel = True):
         if outputs is None: outputs = []
         self.outputs = outputs
         self.tasks = AnalysisTaskProxy(self)
+        self.params = TimeSeriesParametersContainer(self)
         for type_name in data_object_registry:
             setattr(self, type_name, functools.partial(
                 TimeSeriesDataObject, self, type_name))
+        self.parallel = parallel
 
     def __iter__(self):
         # We can make this fancier, but this works
@@ -77,17 +103,23 @@
         self.outputs.append(pf)
         
     def eval(self, tasks, obj=None):
-        if obj == None: obj = TimeSeriesDataObject(self, "all_data")
         tasks = ensure_list(tasks)
-        return_values = []
-        for pf in self:
-            return_values.append([])
+        return_values = {}
+        if self.parallel == False:
+            njobs = 1
+        else:
+            if self.parallel == True: njobs = -1
+            else: njobs = self.parallel
+        for store, pf in parallel_objects(self.outputs, njobs, return_values):
+            store.result = []
             for task in tasks:
                 try:
                     style = inspect.getargspec(task.eval)[0][1]
                     if style == 'pf':
                         arg = pf
                     elif style == 'data_object':
+                        if obj == None:
+                            obj = TimeSeriesDataObject(self, "all_data")
                         arg = obj.get(pf)
                     rv = task.eval(arg)
                 # We catch and store YT-originating exceptions
@@ -95,27 +127,28 @@
                 # small.
                 except YTException as rv:
                     pass
-                return_values[-1].append(rv)
-        return return_values
+                store.result.append(rv)
+        return [v for k, v in sorted(return_values.items())]
 
     @classmethod
-    def from_filenames(cls, filename_list):
+    def from_filenames(cls, filename_list, parallel = True):
         outputs = []
         for fn in filename_list:
             outputs.append(load(fn))
-        obj = cls(outputs)
+        obj = cls(outputs, parallel = parallel)
         return obj
 
     @classmethod
     def from_output_log(cls, output_log,
-                        line_prefix = "DATASET WRITTEN"):
+                        line_prefix = "DATASET WRITTEN",
+                        parallel = True):
         outputs = []
         for line in open(output_log):
             if not line.startswith(line_prefix): continue
             cut_line = line[len(line_prefix):].strip()
             fn = cut_line.split()[0]
             outputs.append(load(fn))
-        obj = cls(outputs)
+        obj = cls(outputs, parallel = parallel)
         return obj
 
 class TimeSeriesQuantitiesContainer(object):


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -701,7 +701,7 @@
     return data.convert("kpch")
 add_field("ParticleRadiuskpch", function=_ParticleRadius,
           validators=[ValidateParameter("center")],
-          convert_function = _ConvertRadiuskpc, units=r"\rm{kpc}/\rm{h}",
+          convert_function = _ConvertRadiuskpch, units=r"\rm{kpc}/\rm{h}",
           particle_type=True,
           display_name = "Particle Radius")
 add_field("Radiuskpch", function=_Radius,




diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/frontends/art/setup.py
--- a/yt/frontends/art/setup.py
+++ b/yt/frontends/art/setup.py
@@ -1,10 +1,13 @@
 #!/usr/bin/env python
 import setuptools
-import os, sys, os.path
+import os
+import sys
+import os.path
 
-def configuration(parent_package='',top_path=None):
+
+def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('art',parent_package,top_path)
-    config.make_config_py() # installs __config__.py
+    config = Configuration('art', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/frontends/castro/data_structures.py
--- a/yt/frontends/castro/data_structures.py
+++ b/yt/frontends/castro/data_structures.py
@@ -715,6 +715,8 @@
         seconds = 1 #self["Time"]
         self.time_units['years'] = seconds / (365 * 3600 * 24.0)
         self.time_units['days']  = seconds / (3600 * 24.0)
+        self.time_units['Myr'] = self.time_units['years'] / 1.0e6
+        self.time_units['Gyr']  = self.time_units['years'] / 1.0e9
         for key in yt2castroFieldsDict:
             self.conversion_factors[key] = 1.0
         for key in castro_particle_field_names:


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/frontends/castro/setup.py
--- a/yt/frontends/castro/setup.py
+++ b/yt/frontends/castro/setup.py
@@ -1,12 +1,13 @@
 #!/usr/bin/env python
 import setuptools
-import os, sys, os.path
+import os
+import sys
+import os.path
 
-import os.path
 
 def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
     config = Configuration('castro', parent_package, top_path)
-    config.make_config_py() # installs __config__.py
+    config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -68,16 +68,32 @@
         self.Parent = []
         self.Children = []
         self.Level = level
-        self.start_index = start.copy()#.transpose()
-        self.stop_index = stop.copy()#.transpose()
         self.ActiveDimensions = stop - start + 1
 
+    def get_global_startindex(self):
+        """
+        Return the integer starting index for each dimension at the current
+        level.
+        
+        """
+        if self.start_index != None:
+            return self.start_index
+        if self.Parent == []:
+            iLE = self.LeftEdge - self.pf.domain_left_edge
+            start_index = iLE / self.dds
+            return na.rint(start_index).astype('int64').ravel()
+        pdx = self.Parent[0].dds
+        start_index = (self.Parent[0].get_global_startindex()) + \
+            na.rint((self.LeftEdge - self.Parent[0].LeftEdge)/pdx)
+        self.start_index = (start_index*self.pf.refine_by[self.Level-1]).astype('int64').ravel()
+        return self.start_index
+
     def _setup_dx(self):
         # So first we figure out what the index is.  We don't assume
         # that dx=dy=dz , at least here.  We probably do elsewhere.
         id = self.id - self._id_offset
         if len(self.Parent) > 0:
-            self.dds = self.Parent[0].dds / self.pf.refine_by
+            self.dds = self.Parent[0].dds / self.pf.refine_by[self.Level-1]
         else:
             LE, RE = self.hierarchy.grid_left_edge[id,:], \
                      self.hierarchy.grid_right_edge[id,:]
@@ -91,8 +107,8 @@
     grid = ChomboGrid
     
     def __init__(self,pf,data_style='chombo_hdf5'):
-        self.domain_left_edge = pf.domain_left_edge # need these to determine absolute grid locations
-        self.domain_right_edge = pf.domain_right_edge # need these to determine absolute grid locations
+        self.domain_left_edge = pf.domain_left_edge
+        self.domain_right_edge = pf.domain_right_edge
         self.data_style = data_style
         self.field_indexes = {}
         self.parameter_file = weakref.proxy(pf)
@@ -100,12 +116,11 @@
         self.hierarchy_filename = self.parameter_file.parameter_filename
         self.hierarchy = os.path.abspath(self.hierarchy_filename)
         self.directory = os.path.dirname(self.hierarchy_filename)
-        self._fhandle = h5py.File(self.hierarchy_filename)
+        self._fhandle = h5py.File(self.hierarchy_filename, 'r')
 
         self.float_type = self._fhandle['/level_0']['data:datatype=0'].dtype.name
-        self._levels = [fn for fn in self._fhandle if fn != "Chombo_global"]
+        self._levels = self._fhandle.keys()[1:]
         AMRHierarchy.__init__(self,pf,data_style)
-
         self._fhandle.close()
 
     def _initialize_data_storage(self):
@@ -113,7 +128,7 @@
 
     def _detect_fields(self):
         ncomp = int(self._fhandle['/'].attrs['num_components'])
-        self.field_list = [c[1] for c in self._fhandle['/'].attrs.listitems()[-ncomp:]]
+        self.field_list = [c[1] for c in self._fhandle['/'].attrs.items()[-ncomp:]]
     
     def _setup_classes(self):
         dd = self._get_data_reader_dict()
@@ -130,8 +145,8 @@
         
         # this relies on the first Group in the H5 file being
         # 'Chombo_global'
-        levels = [fn for fn in f if fn != "Chombo_global"]
-        self.grids = []
+        levels = f.keys()[1:]
+        grids = []
         i = 0
         for lev in levels:
             level_number = int(re.match('level_(\d+)',lev).groups()[0])
@@ -140,17 +155,18 @@
             for level_id, box in enumerate(boxes):
                 si = na.array([box['lo_%s' % ax] for ax in 'ijk'])
                 ei = na.array([box['hi_%s' % ax] for ax in 'ijk'])
-                pg = self.grid(len(self.grids),self,level=level_number,
+                pg = self.grid(len(grids),self,level=level_number,
                                start = si, stop = ei)
-                self.grids.append(pg)
-                self.grids[-1]._level_id = level_id
-                self.grid_left_edge[i] = dx*si.astype(self.float_type) + self.domain_left_edge
-                self.grid_right_edge[i] = dx*(ei.astype(self.float_type)+1) + self.domain_left_edge
+                grids.append(pg)
+                grids[-1]._level_id = level_id
+                self.grid_left_edge[i] = dx*si.astype(self.float_type)
+                self.grid_right_edge[i] = dx*(ei.astype(self.float_type)+1)
                 self.grid_particle_count[i] = 0
                 self.grid_dimensions[i] = ei - si + 1
                 i += 1
         self.grids = na.empty(len(grids), dtype='object')
         for gi, g in enumerate(grids): self.grids[gi] = g
+#        self.grids = na.array(self.grids, dtype='object')
 
     def _populate_grid_objects(self):
         for g in self.grids:
@@ -179,8 +195,8 @@
     
     def __init__(self, filename, data_style='chombo_hdf5',
                  storage_filename = None, ini_filename = None):
-        # hardcoded for now 
-        self.current_time = 0.0
+        fileh = h5py.File(filename,'r')
+        self.current_time = fileh.attrs['time']
         self.ini_filename = ini_filename
         StaticOutput.__init__(self,filename,data_style)
         self.storage_filename = storage_filename
@@ -201,6 +217,8 @@
         seconds = 1 #self["Time"]
         self.time_units['years'] = seconds / (365*3600*24.0)
         self.time_units['days']  = seconds / (3600*24.0)
+        self.time_units['Myr'] = self.time_units['years'] / 1.0e6
+        self.time_units['Gyr']  = self.time_units['years'] / 1.0e9
         for key in yt2plutoFieldsDict:
             self.conversion_factors[key] = 1.0
 
@@ -228,15 +246,18 @@
         """
         if os.path.isfile('pluto.ini'):
             self._parse_pluto_file('pluto.ini')
-        elif os.path.isfile('orion2.ini'):
-            self._parse_pluto_file('orion2.ini')
         else:
+            if os.path.isfile('orion2.ini'): self._parse_pluto_file('orion2.ini')
             self.unique_identifier = \
-                                   int(os.stat(self.parameter_filename)[ST_CTIME])
-            self.domain_left_edge = na.array([0.,0.,0.])
+                int(os.stat(self.parameter_filename)[ST_CTIME])
+            self.domain_left_edge = self.__calc_left_edge()
             self.domain_right_edge = self.__calc_right_edge()
+            self.domain_dimensions = self.__calc_domain_dimensions()
             self.dimensionality = 3
-            self.refine_by = 2
+            self.refine_by = []
+            fileh = h5py.File(self.parameter_filename,'r')
+            for level in range(0,fileh.attrs['num_levels']):
+                self.refine_by.append(fileh['/level_'+str(level)].attrs['ref_ratio'])
 
     def _parse_pluto_file(self, ini_filename):
         """
@@ -266,36 +287,26 @@
                     else:
                         self.parameters[paramName] = t
 
-            # assumes 3D for now
-            elif param.startswith("X1-grid"):
-                t = vals.split()
-                low1 = float(t[1])
-                high1 = float(t[4])
-                N1 = int(t[2])
-            elif param.startswith("X2-grid"):
-                t = vals.split()
-                low2 = float(t[1])
-                high2 = float(t[4])
-                N2 = int(t[2])
-            elif param.startswith("X3-grid"):
-                t = vals.split()
-                low3 = float(t[1])
-                high3 = float(t[4])
-                N3 = int(t[2])
+    def __calc_left_edge(self):
+        fileh = h5py.File(self.parameter_filename,'r')
+        dx0 = fileh['/level_0'].attrs['dx']
+        LE = dx0*((na.array(list(fileh['/level_0'].attrs['prob_domain'])))[0:3])
+        fileh.close()
+        return LE
 
-        self.dimensionality = 3
-        self.domain_left_edge = na.array([low1,low2,low3])
-        self.domain_right_edge = na.array([high1,high2,high3])
-        self.domain_dimensions = na.array([N1,N2,N3])
-        self.refine_by = self.parameters["RefineBy"]
-            
     def __calc_right_edge(self):
         fileh = h5py.File(self.parameter_filename,'r')
         dx0 = fileh['/level_0'].attrs['dx']
-        RE = dx0*((na.array(fileh['/level_0'].attrs['prob_domain']))[3:] + 1)
+        RE = dx0*((na.array(list(fileh['/level_0'].attrs['prob_domain'])))[3:] + 1)
         fileh.close()
         return RE
-                   
+                  
+    def __calc_domain_dimensions(self):
+        fileh = h5py.File(self.parameter_filename,'r')
+        L_index = ((na.array(list(fileh['/level_0'].attrs['prob_domain'])))[0:3])
+        R_index = ((na.array(list(fileh['/level_0'].attrs['prob_domain'])))[3:] + 1)
+        return R_index - L_index
+ 
     @classmethod
     def _is_valid(self, *args, **kwargs):
         try:
@@ -307,7 +318,6 @@
             pass
         return False
 
-
     @parallel_root_only
     def print_key_parameters(self):
         for a in ["current_time", "domain_dimensions", "domain_left_edge",


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/frontends/chombo/fields.py
--- a/yt/frontends/chombo/fields.py
+++ b/yt/frontends/chombo/fields.py
@@ -38,45 +38,64 @@
 add_chombo_field = KnownChomboFields.add_field
 
 ChomboFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
-add_chombo_field = ChomboFieldInfo.add_field
+add_field = ChomboFieldInfo.add_field
 
-add_field = add_chombo_field
+add_chombo_field("density", function=NullFunc, take_log=True,
+                 validators = [ValidateDataField("density")],
+                 units=r"\rm{g}/\rm{cm}^3")
 
-add_field("density", function=NullFunc, take_log=True,
-          validators = [ValidateDataField("density")],
-          units=r"\rm{g}/\rm{cm}^3")
+KnownChomboFields["density"]._projected_units =r"\rm{g}/\rm{cm}^2"
 
-ChomboFieldInfo["density"]._projected_units =r"\rm{g}/\rm{cm}^2"
+add_chombo_field("X-momentum", function=NullFunc, take_log=False,
+                 validators = [ValidateDataField("X-Momentum")],
+                 units=r"",display_name=r"M_x")
+KnownChomboFields["X-momentum"]._projected_units=r""
 
-add_field("X-momentum", function=NullFunc, take_log=False,
-          validators = [ValidateDataField("X-Momentum")],
-          units=r"",display_name=r"B_x")
-ChomboFieldInfo["X-momentum"]._projected_units=r""
+add_chombo_field("Y-momentum", function=NullFunc, take_log=False,
+                 validators = [ValidateDataField("Y-Momentum")],
+                 units=r"",display_name=r"M_y")
+KnownChomboFields["Y-momentum"]._projected_units=r""
 
-add_field("Y-momentum", function=NullFunc, take_log=False,
-          validators = [ValidateDataField("Y-Momentum")],
-          units=r"",display_name=r"B_y")
-ChomboFieldInfo["Y-momentum"]._projected_units=r""
+add_chombo_field("Z-momentum", function=NullFunc, take_log=False,
+                 validators = [ValidateDataField("Z-Momentum")],
+                 units=r"",display_name=r"M_z")
+KnownChomboFields["Z-momentum"]._projected_units=r""
 
-add_field("Z-momentum", function=NullFunc, take_log=False,
-          validators = [ValidateDataField("Z-Momentum")],
-          units=r"",display_name=r"B_z")
-ChomboFieldInfo["Z-momentum"]._projected_units=r""
+add_chombo_field("X-magnfield", function=NullFunc, take_log=False,
+                 validators = [ValidateDataField("X-Magnfield")],
+                 units=r"",display_name=r"B_x")
+KnownChomboFields["X-magnfield"]._projected_units=r""
 
-add_field("X-magnfield", function=NullFunc, take_log=False,
-          validators = [ValidateDataField("X-Magnfield")],
-          units=r"",display_name=r"B_x")
-ChomboFieldInfo["X-magnfield"]._projected_units=r""
+add_chombo_field("Y-magnfield", function=NullFunc, take_log=False,
+                 validators = [ValidateDataField("Y-Magnfield")],
+                 units=r"",display_name=r"B_y")
+KnownChomboFields["Y-magnfield"]._projected_units=r""
 
-add_field("Y-magnfield", function=NullFunc, take_log=False,
-          validators = [ValidateDataField("Y-Magnfield")],
-          units=r"",display_name=r"B_y")
-ChomboFieldInfo["Y-magnfield"]._projected_units=r""
+add_chombo_field("Z-magnfield", function=NullFunc, take_log=False,
+                  validators = [ValidateDataField("Z-Magnfield")],
+                  units=r"",display_name=r"B_z")
+KnownChomboFields["Z-magnfield"]._projected_units=r""
 
-add_field("Z-magnfield", function=NullFunc, take_log=False,
-          validators = [ValidateDataField("Z-Magnfield")],
-          units=r"",display_name=r"B_z")
-ChomboFieldInfo["Z-magnfield"]._projected_units=r""
+add_chombo_field("energy-density", function=lambda a,b: None, take_log=True,
+                 validators = [ValidateDataField("energy-density")],
+                 units=r"\rm{erg}/\rm{cm}^3")
+KnownChomboFields["energy-density"]._projected_units =r""
+
+add_chombo_field("radiation-energy-density", function=lambda a,b: None, take_log=True,
+                 validators = [ValidateDataField("radiation-energy-density")],
+                 units=r"\rm{erg}/\rm{cm}^3")
+KnownChomboFields["radiation-energy-density"]._projected_units =r""
+
+def _Density(field,data):
+    """A duplicate of the density field. This is needed because when you try 
+    to instantiate a PlotCollection without passing in a center, the code
+    will try to generate one for you using the "Density" field, which gives an error 
+    if it isn't defined.
+
+    """
+    return data["density"]
+add_field("Density",function=_Density, take_log=True,
+          units=r'\rm{g}/\rm{cm^3}')
 
 def _MagneticEnergy(field,data):
     return (data["X-magnfield"]**2 +


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/frontends/chombo/io.py
--- a/yt/frontends/chombo/io.py
+++ b/yt/frontends/chombo/io.py
@@ -36,7 +36,7 @@
 
     def _field_dict(self,fhandle):
         ncomp = int(fhandle['/'].attrs['num_components'])
-        temp =  fhandle['/'].attrs.listitems()[-ncomp:]
+        temp =  fhandle['/'].attrs.items()[-ncomp:]
         val, keys = zip(*temp)
         val = [int(re.match('component_(\d+)',v).groups()[0]) for v in val]
         return dict(zip(keys,val))
@@ -45,7 +45,7 @@
         fhandle = h5py.File(grid.filename,'r')
         ncomp = int(fhandle['/'].attrs['num_components'])
 
-        fns = [c[1] for c in f['/'].attrs.listitems()[-ncomp:]]
+        fns = [c[1] for c in f['/'].attrs.items()[-ncomp-1:-1]]
         fhandle.close()
     
     def _read_data_set(self,grid,field):
@@ -64,7 +64,6 @@
 
         fhandle.close()
         return data.reshape(dims, order='F')
-                                          
 
     def _read_data_slice(self, grid, field, axis, coord):
         sl = [slice(None), slice(None), slice(None)]


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/frontends/chombo/setup.py
--- a/yt/frontends/chombo/setup.py
+++ b/yt/frontends/chombo/setup.py
@@ -1,12 +1,13 @@
 #!/usr/bin/env python
 import setuptools
-import os, sys, os.path
-
+import os
+import sys
 import os.path
 
-def configuration(parent_package='',top_path=None):
+
+def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('chombo',parent_package,top_path)
-    config.make_config_py() # installs __config__.py
+    config = Configuration('chombo', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -354,6 +354,11 @@
             f = h5py.File(self.hierarchy_filename[:-9] + "harrays")
         except:
             return False
+        hash = f["/"].attrs.get("hash", None)
+        if hash != self.parameter_file._hash():
+            mylog.info("Binary hierarchy does not match: recreating")
+            f.close()
+            return False
         self.grid_dimensions[:] = f["/ActiveDimensions"][:]
         self.grid_left_edge[:] = f["/LeftEdges"][:]
         self.grid_right_edge[:] = f["/RightEdges"][:]
@@ -390,6 +395,7 @@
             f = h5py.File(self.hierarchy_filename[:-9] + "harrays", "w")
         except IOError:
             return
+        f["/"].attrs["hash"] = self.parameter_file._hash()
         f.create_dataset("/LeftEdges", data=self.grid_left_edge)
         f.create_dataset("/RightEdges", data=self.grid_right_edge)
         parents, procs, levels = [], [], []
@@ -462,7 +468,7 @@
                     field_list = field_list.union(gf)
         else:
             field_list = None
-        field_list = self.comm.mpi_bcast_pickled(field_list)
+        field_list = self.comm.mpi_bcast(field_list)
         self.save_data(list(field_list),"/","DataFields",passthrough=True)
         self.field_list = list(field_list)
 
@@ -481,7 +487,7 @@
                 mylog.debug("Added additional grid %s", first_grid)
             mylog.debug("Checking grids: %s", random_sample.tolist())
         else:
-            random_sample = na.mgrid[0:max(len(self.grids)-1,1)].astype("int32")
+            random_sample = na.mgrid[0:max(len(self.grids),1)].astype("int32")
         return self.grids[(random_sample,)]
 
     def find_particles_by_type(self, ptype, max_num=None, additional_fields=None):
@@ -886,6 +892,8 @@
         seconds = self["Time"]
         self.time_units['years'] = seconds / (365*3600*24.0)
         self.time_units['days']  = seconds / (3600*24.0)
+        self.time_units['Myr'] = self.time_units['years'] / 1.0e6
+        self.time_units['Gyr']  = self.time_units['years'] / 1.0e9
 
     def _setup_comoving_units(self):
         z = self["CosmologyCurrentRedshift"]


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -263,6 +263,16 @@
           display_name = "Dark\ Matter\ Density",
           not_in_all = True)
 
+def _Dark_Matter_Mass(field, data):
+    return data['Dark_Matter_Density'] * data["CellVolume"]
+add_field("Dark_Matter_Mass", function=_Dark_Matter_Mass,
+          validators=ValidateDataField("Dark_Matter_Density"),
+          display_name="Dark\ Matter\ Mass", units=r"\rm{g}")
+add_field("Dark_Matter_MassMsun", function=_Dark_Matter_Mass,
+          convert_function=_convertCellMassMsun,
+          validators=ValidateDataField("Dark_Matter_Density"),
+          display_name="Dark\ Matter\ Mass", units=r"M_{\odot}")
+
 KnownEnzoFields["Temperature"]._units = r"\rm{K}"
 KnownEnzoFields["Temperature"].units = r"K"
 KnownEnzoFields["Dust_Temperature"]._units = r"\rm{K}"
@@ -314,6 +324,46 @@
 add_field("dm_density", function=_dmpdensity,
           validators=[ValidateSpatial(0)], convert_function=_convertDensity)
 
+def _cic_particle_field(field, data):
+    """
+    Create a grid field for particle quantities weighted by particle mass, 
+    using cloud-in-cell deposit.
+    """
+    particle_field = field.name[4:]
+    top = na.zeros(data.ActiveDimensions, dtype='float32')
+    if data.NumberOfParticles == 0: return top
+    particle_field_data = data[particle_field] * data['particle_mass']
+    amr_utils.CICDeposit_3(data["particle_position_x"].astype(na.float64),
+                           data["particle_position_y"].astype(na.float64),
+                           data["particle_position_z"].astype(na.float64),
+                           particle_field_data.astype(na.float32),
+                           na.int64(data.NumberOfParticles),
+                           top, na.array(data.LeftEdge).astype(na.float64),
+                           na.array(data.ActiveDimensions).astype(na.int32), 
+                           na.float64(data['dx']))
+    del particle_field_data
+
+    bottom = na.zeros(data.ActiveDimensions, dtype='float32')
+    amr_utils.CICDeposit_3(data["particle_position_x"].astype(na.float64),
+                           data["particle_position_y"].astype(na.float64),
+                           data["particle_position_z"].astype(na.float64),
+                           data["particle_mass"].astype(na.float32),
+                           na.int64(data.NumberOfParticles),
+                           bottom, na.array(data.LeftEdge).astype(na.float64),
+                           na.array(data.ActiveDimensions).astype(na.int32), 
+                           na.float64(data['dx']))
+    top[bottom == 0] = 0.0
+    bnz = bottom.nonzero()
+    top[bnz] /= bottom[bnz]
+    return top
+
+add_field('cic_particle_velocity_x', function=_cic_particle_field,
+          take_log=False, validators=[ValidateSpatial(0)])
+add_field('cic_particle_velocity_y', function=_cic_particle_field,
+          take_log=False, validators=[ValidateSpatial(0)])
+add_field('cic_particle_velocity_z', function=_cic_particle_field,
+          take_log=False, validators=[ValidateSpatial(0)])
+
 def _star_field(field, data):
     """
     Create a grid field for star quantities, weighted by star mass.


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/frontends/enzo/setup.py
--- a/yt/frontends/enzo/setup.py
+++ b/yt/frontends/enzo/setup.py
@@ -1,12 +1,13 @@
 #!/usr/bin/env python
 import setuptools
-import os, sys, os.path
-
+import os
+import sys
 import os.path
 
-def configuration(parent_package='',top_path=None):
+
+def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('enzo',parent_package,top_path)
-    config.make_config_py() # installs __config__.py
+    config = Configuration('enzo', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -79,13 +79,6 @@
     def _detect_fields(self):
         ncomp = self._handle["/unknown names"].shape[0]
         self.field_list = [s for s in self._handle["/unknown names"][:].flat]
-        facevars = [s for s in self._handle
-                    if s.startswith(("fcx","fcy","fcz")) and s[-1].isdigit()]
-        nfacevars = len(facevars)
-        if (nfacevars > 0) :
-            ncomp += nfacevars
-            for facevar in facevars :
-                self.field_list.append(facevar)
         if ("/particle names" in self._handle) :
             self.field_list += ["particle_" + s[0].strip() for s
                                 in self._handle["/particle names"][:]]
@@ -106,14 +99,22 @@
         f = self._handle # shortcut
         pf = self.parameter_file # shortcut
         
-        self.grid_left_edge[:] = f["/bounding box"][:,:,0]
-        self.grid_right_edge[:] = f["/bounding box"][:,:,1]
+        # Initialize to the domain left / domain right
+        ND = self.parameter_file.dimensionality
+        DLE = self.parameter_file.domain_left_edge
+        DRE = self.parameter_file.domain_right_edge
+        for i in range(3):
+            self.grid_left_edge[:,i] = DLE[i]
+            self.grid_right_edge[:,i] = DRE[i]
+        # We only go up to ND for 2D datasets
+        self.grid_left_edge[:,:ND] = f["/bounding box"][:,:,0]
+        self.grid_right_edge[:,:ND] = f["/bounding box"][:,:,1]
         
         # Move this to the parameter file
         try:
-            nxb = pf._find_parameter("integer", "nxb", True)
-            nyb = pf._find_parameter("integer", "nyb", True)
-            nzb = pf._find_parameter("integer", "nzb", True)
+            nxb = pf.parameters['nxb']
+            nyb = pf.parameters['nyb']
+            nzb = pf.parameters['nzb']
         except KeyError:
             nxb, nyb, nzb = [int(f["/simulation parameters"]['n%sb' % ax])
                               for ax in 'xyz']
@@ -226,8 +227,13 @@
         if len(self.parameters) == 0:
             self._parse_parameter_file()
         self.conversion_factors = defaultdict(lambda: 1.0)
+        if "EOSType" not in self.parameters:
+            self.parameters["EOSType"] = -1
         if self.cosmological_simulation == 1:
             self._setup_comoving_units()
+        if "pc_unitsbase" in self.parameters:
+            if self.parameters["pc_unitsbase"] == "CGS":
+                self._setup_cgs_units()
         else:
             self._setup_nounits_units()
         self.time_units['1'] = 1
@@ -237,6 +243,9 @@
         seconds = 1 #self["Time"]
         self.time_units['years'] = seconds / (365*3600*24.0)
         self.time_units['days']  = seconds / (3600*24.0)
+        self.time_units['Myr'] = self.time_units['years'] / 1.0e6
+        self.time_units['Gyr']  = self.time_units['years'] / 1.0e9
+
         for p, v in self._conversion_override.items():
             self.conversion_factors[p] = v
 
@@ -260,6 +269,22 @@
         for unit in mpc_conversion.keys():
             self.units[unit] = mpc_conversion[unit] / mpc_conversion["cm"]
 
+    def _setup_cgs_units(self):
+        self.conversion_factors['dens'] = 1.0
+        self.conversion_factors['pres'] = 1.0
+        self.conversion_factors['eint'] = 1.0
+        self.conversion_factors['ener'] = 1.0
+        self.conversion_factors['temp'] = 1.0
+        self.conversion_factors['velx'] = 1.0
+        self.conversion_factors['vely'] = 1.0
+        self.conversion_factors['velz'] = 1.0
+        self.conversion_factors['particle_velx'] = 1.0
+        self.conversion_factors['particle_vely'] = 1.0
+        self.conversion_factors['particle_velz'] = 1.0
+        self.conversion_factors["Time"] = 1.0
+        for unit in mpc_conversion.keys():
+            self.units[unit] = mpc_conversion[unit] / mpc_conversion["cm"]
+
     def _setup_nounits_units(self):
         self.conversion_factors['dens'] = 1.0
         self.conversion_factors['pres'] = 1.0
@@ -272,7 +297,6 @@
         self.conversion_factors['particle_velx'] = 1.0
         self.conversion_factors['particle_vely'] = 1.0
         self.conversion_factors['particle_velz'] = 1.0
-        z = 0
         mylog.warning("Setting 1.0 in code units to be 1.0 cm")
         if not self.has_key("TimeUnits"):
             mylog.warning("No time units.  Setting 1.0 = 1 second.")
@@ -284,9 +308,13 @@
         nn = "/%s %s" % (ptype,
                 {False: "runtime parameters", True: "scalars"}[scalar])
         if nn not in self._handle: raise KeyError(nn)
-        for tpname, pval in self._handle[nn][:]:
+        for tpname, pval in zip(self._handle[nn][:,'name'],
+                                self._handle[nn][:,'value']):
             if tpname.strip() == pname:
-                return pval
+                if ptype == "string" :
+                    return pval.strip()
+                else :
+                    return pval
         raise KeyError(pname)
 
     def _parse_parameter_file(self):
@@ -300,62 +328,89 @@
                 self._handle["sim info"][:]["file format version"])
         else:
             raise RuntimeError("Can't figure out FLASH file version.")
+        # First we load all of the parameters
+        hns = ["simulation parameters"]
+        # note the ordering here is important: runtime parameters should
+        # ovewrite scalars with the same name.
+        for ptype in ['scalars', 'runtime parameters']:
+            for vtype in ['integer', 'real', 'logical', 'string']:
+                hns.append("%s %s" % (vtype, ptype))
+        if self._flash_version > 7:
+            for hn in hns:
+                if hn not in self._handle:
+                    continue
+                for varname, val in zip(self._handle[hn][:,'name'],
+                                        self._handle[hn][:,'value']):
+                    vn = varname.strip()
+                    if hn.startswith("string") :
+                        pval = val.strip()
+                    else :
+                        pval = val
+                    if vn in self.parameters and self.parameters[vn] != pval:
+                        mylog.warning("{0} {1} overwrites a simulation scalar of the same name".format(hn[:-1],vn)) 
+                    self.parameters[vn] = pval
+        if self._flash_version == 7:
+            for hn in hns:
+                if hn not in self._handle:
+                    continue
+                if hn is 'simulation parameters':
+                    zipover = zip(self._handle[hn].dtype.names,self._handle[hn][0])
+                else:
+                    zipover = zip(self._handle[hn][:,'name'],self._handle[hn][:,'value'])
+                for varname, val in zipover:
+                    vn = varname.strip()
+                    if hn.startswith("string") :
+                        pval = val.strip()
+                    else :
+                        pval = val
+                    if vn in self.parameters and self.parameters[vn] != pval:
+                        mylog.warning("{0} {1} overwrites a simulation scalar of the same name".format(hn[:-1],vn))
+                    self.parameters[vn] = pval
         self.domain_left_edge = na.array(
-            [self._find_parameter("real", "%smin" % ax) for ax in 'xyz']).astype("float64")
+            [self.parameters["%smin" % ax] for ax in 'xyz']).astype("float64")
         self.domain_right_edge = na.array(
-            [self._find_parameter("real", "%smax" % ax) for ax in 'xyz']).astype("float64")
-        self.min_level = self._find_parameter(
-            "integer", "lrefine_min", scalar = False) - 1
+            [self.parameters["%smax" % ax] for ax in 'xyz']).astype("float64")
+        self.min_level = self.parameters["lrefine_min"] -1
 
         # Determine domain dimensions
         try:
-            nxb = self._find_parameter("integer", "nxb", scalar = True)
-            nyb = self._find_parameter("integer", "nyb", scalar = True)
-            nzb = self._find_parameter("integer", "nzb", scalar = True)
-            dimensionality = self._find_parameter("integer", "dimensionality",
-                                    scalar = True)
+            nxb = self.parameters["nxb"]
+            nyb = self.parameters["nyb"]
+            nzb = self.parameters["nzb"]
         except KeyError:
             nxb, nyb, nzb = [int(self._handle["/simulation parameters"]['n%sb' % ax])
-                              for ax in 'xyz']
+                              for ax in 'xyz'] # FLASH2 only!
+        try:
+            dimensionality = self.parameters["dimensionality"]
+        except KeyError:
             dimensionality = 3
             if nzb == 1: dimensionality = 2
             if nyb == 1: dimensionality = 1
             if dimensionality < 3:
                 mylog.warning("Guessing dimensionality as %s", dimensionality)
-        nblockx = self._find_parameter("integer", "nblockx")
-        nblocky = self._find_parameter("integer", "nblocky")
-        nblockz = self._find_parameter("integer", "nblockz")
+
+        nblockx = self.parameters["nblockx"]
+        nblocky = self.parameters["nblocky"]
+        nblockz = self.parameters["nblockz"]
         self.dimensionality = dimensionality
         self.domain_dimensions = \
             na.array([nblockx*nxb,nblocky*nyb,nblockz*nzb])
+        try:
+            self.parameters["Gamma"] = self.parameters["gamma"]
+        except:
+            mylog.warning("Cannot find Gamma")
+            pass
 
-        if self._flash_version == 7:
-            self.current_time = float(
-                self._handle["simulation parameters"][:]["time"])
-        else:
-            self.current_time = \
-                float(self._find_parameter("real", "time", scalar=True))
+        self.current_time = self.parameters["time"]
 
-        if self._flash_version == 7:
-            self.parameters['timestep'] = float(
-                self._handle["simulation parameters"]["timestep"])
-        else:
-            self.parameters['timestep'] = \
-                float(self._find_parameter("real", "dt", scalar=True))
-
-        try:
-            use_cosmo = self._find_parameter("logical", "usecosmology") 
+        try: 
+            self.parameters["usecosmology"]
+            self.cosmological_simulation = 1
+            self.current_redshift = self.parameters['redshift']
+            self.omega_lambda = self.parameters['cosmologicalconstant']
+            self.omega_matter = self.parameters['omegamatter']
+            self.hubble_constant = self.parameters['hubbleconstant']
         except:
-            use_cosmo = 0
-
-        if use_cosmo == 1:
-            self.cosmological_simulation = 1
-            self.current_redshift = self._find_parameter("real", "redshift",
-                                        scalar = True)
-            self.omega_lambda = self._find_parameter("real", "cosmologicalconstant")
-            self.omega_matter = self._find_parameter("real", "omegamatter")
-            self.hubble_constant = self._find_parameter("real", "hubbleconstant")
-        else:
             self.current_redshift = self.omega_lambda = self.omega_matter = \
                 self.hubble_constant = self.cosmological_simulation = 0.0
 


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/frontends/flash/fields.py
--- a/yt/frontends/flash/fields.py
+++ b/yt/frontends/flash/fields.py
@@ -34,7 +34,8 @@
     ValidateSpatial, \
     ValidateGridType
 import yt.data_objects.universal_fields
-
+from yt.utilities.physical_constants import \
+    kboltz
 KnownFLASHFields = FieldInfoContainer()
 add_flash_field = KnownFLASHFields.add_field
 
@@ -62,9 +63,8 @@
                     "y-velocity": "vely",
                     "z-velocity": "velz",
                     "Density": "dens",
-                    "TotalEnergy": "ener",
-                    "GasEnergy": "eint",
                     "Temperature": "temp",
+                    "Pressure" : "pres", 
                     "particle_position_x" : "particle_posx",
                     "particle_position_y" : "particle_posy",
                     "particle_position_z" : "particle_posz",
@@ -148,6 +148,9 @@
 add_flash_field("temp", function=NullFunc, take_log=True,
                 convert_function=_get_convert("temp"),
                 units=r"\rm{K}")
+add_flash_field("tele", function=NullFunc, take_log=True,
+                convert_function=_get_convert("tele"),
+                units = r"\rm{K}")
 add_flash_field("pres", function=NullFunc, take_log=True,
                 convert_function=_get_convert("pres"),
                 units=r"\rm{erg}\//\/\rm{cm}^{3}")
@@ -190,15 +193,16 @@
         add_flash_field(v, function=NullFunc, take_log=False,
                   validators = [ValidateDataField(v)],
                   particle_type = pfield)
-    else:
-        if f.endswith("_Fraction") :
-            dname = "%s\/Fraction" % f.split("_")[0]
-        else :
-            dname = f                    
-        ff = KnownFLASHFields[v]
-        add_field(f, TranslationFunc(v),
-                  take_log=KnownFLASHFields[v].take_log,
-                  units = ff._units, display_name=dname)
+    if f.endswith("_Fraction") :
+        dname = "%s\/Fraction" % f.split("_")[0]
+    else :
+        dname = f                    
+    ff = KnownFLASHFields[v]
+    pfield = f.startswith("particle")
+    add_field(f, TranslationFunc(v),
+              take_log=KnownFLASHFields[v].take_log,
+              units = ff._units, display_name=dname,
+              particle_type = pfield)
 
 def _convertParticleMassMsun(data):
     return 1.0/1.989e33
@@ -208,3 +212,44 @@
           function=_ParticleMassMsun, validators=[ValidateSpatial(0)],
           particle_type=True, convert_function=_convertParticleMassMsun,
           particle_convert_function=_ParticleMassMsun)
+
+def _ThermalEnergy(fields, data) :
+    try:
+        return data["eint"]
+    except:
+        pass
+    try:
+        return data["Pressure"] / (data.pf["Gamma"] - 1.0) / data["Density"]
+    except:
+        pass
+    if data.has_field_parameter("mu") :
+        mu = data.get_field_parameter("mu")
+    else:
+        mu = 0.6
+    return kboltz*data["Density"]*data["Temperature"]/(mu*mh) / (data.pf["Gamma"] - 1.0)
+    
+add_field("ThermalEnergy", function=_ThermalEnergy,
+          units=r"\rm{ergs}/\rm{g}")
+
+def _TotalEnergy(fields, data) :
+    try:
+        etot = data["ener"]
+    except:
+        etot = data["ThermalEnergy"] + 0.5 * (
+            data["x-velocity"]**2.0 +
+            data["y-velocity"]**2.0 +
+            data["z-velocity"]**2.0)
+    try:
+        etot += data['magp']/data["Density"]
+    except:
+        pass
+    return etot
+
+add_field("TotalEnergy", function=_TotalEnergy,
+          units=r"\rm{ergs}/\rm{g}")
+
+def _GasEnergy(fields, data) :
+    return data["ThermalEnergy"]
+
+add_field("GasEnergy", function=_GasEnergy, 
+          units=r"\rm{ergs}/\rm{g}")


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/frontends/flash/setup.py
--- a/yt/frontends/flash/setup.py
+++ b/yt/frontends/flash/setup.py
@@ -1,12 +1,13 @@
 #!/usr/bin/env python
 import setuptools
-import os, sys, os.path
-
+import os
+import sys
 import os.path
 
-def configuration(parent_package='',top_path=None):
+
+def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('flash',parent_package,top_path)
-    config.make_config_py() # installs __config__.py
+    config = Configuration('flash', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/frontends/gadget/data_structures.py
--- a/yt/frontends/gadget/data_structures.py
+++ b/yt/frontends/gadget/data_structures.py
@@ -162,6 +162,8 @@
         seconds = 1 #self["Time"]
         self.time_units['years'] = seconds / (365*3600*24.0)
         self.time_units['days']  = seconds / (3600*24.0)
+        self.time_units['Myr'] = self.time_units['years'] / 1.0e6
+        self.time_units['Gyr']  = self.time_units['years'] / 1.0e9
 
     def _parse_parameter_file(self):
         fileh = h5py.File(self.filename)


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/frontends/gadget/setup.py
--- a/yt/frontends/gadget/setup.py
+++ b/yt/frontends/gadget/setup.py
@@ -1,12 +1,13 @@
 #!/usr/bin/env python
 import setuptools
-import os, sys, os.path
-
+import os
+import sys
 import os.path
 
-def configuration(parent_package='',top_path=None):
+
+def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('gadget',parent_package,top_path)
-    config.make_config_py() # installs __config__.py
+    config = Configuration('gadget', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/frontends/gdf/api.py
--- a/yt/frontends/gdf/api.py
+++ b/yt/frontends/gdf/api.py
@@ -40,5 +40,3 @@
 
 from .io import \
       IOHandlerGDFHDF5
-
-


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/frontends/gdf/data_structures.py
--- a/yt/frontends/gdf/data_structures.py
+++ b/yt/frontends/gdf/data_structures.py
@@ -43,6 +43,11 @@
     FieldInfoContainer, NullFunc
 import pdb
 
+def _get_convert(fname):
+    def _conv(data):
+        return data.convert(fname)
+    return _conv
+
 class GDFGrid(AMRGridPatch):
     _id_offset = 0
     def __init__(self, id, hierarchy, level, start, dimensions):
@@ -168,13 +173,24 @@
         seconds = 1
         self.time_units['years'] = seconds / (365*3600*24.0)
         self.time_units['days']  = seconds / (3600*24.0)
+        self.time_units['Myr'] = self.time_units['years'] / 1.0e6
+        self.time_units['Gyr']  = self.time_units['years'] / 1.0e9
+
         # This should be improved.
         self._handle = h5py.File(self.parameter_filename, "r")
         for field_name in self._handle["/field_types"]:
+            current_field = self._handle["/field_types/%s" % field_name]
             try:
-                self.units[field_name] = self._handle["/field_types/%s" % field_name].attrs['field_to_cgs']
+                self.units[field_name] = current_field.attrs['field_to_cgs']
             except:
                 self.units[field_name] = 1.0
+            try:
+                current_fields_unit = current_field.attrs['field_units'][0]
+            except:
+                current_fields_unit = ""
+            self._fieldinfo_known.add_field(field_name, function=NullFunc, take_log=False,
+                   units=current_fields_unit, projected_units="", 
+                   convert_function=_get_convert(field_name))
 
         self._handle.close()
         del self._handle


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/frontends/gdf/setup.py
--- a/yt/frontends/gdf/setup.py
+++ b/yt/frontends/gdf/setup.py
@@ -1,12 +1,13 @@
 #!/usr/bin/env python
 import setuptools
-import os, sys, os.path
-
+import os
+import sys
 import os.path
 
-def configuration(parent_package='',top_path=None):
+
+def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('gdf',parent_package,top_path)
-    config.make_config_py() # installs __config__.py
+    config = Configuration('gdf', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/frontends/maestro/data_structures.py
--- a/yt/frontends/maestro/data_structures.py
+++ b/yt/frontends/maestro/data_structures.py
@@ -535,6 +535,8 @@
         seconds = 1
         self.time_units['years'] = seconds / (365*3600*24.0)
         self.time_units['days']  = seconds / (3600*24.0)
+        self.time_units['Myr'] = self.time_units['years'] / 1.0e6
+        self.time_units['Gyr']  = self.time_units['years'] / 1.0e9
         for key in yt2maestroFieldsDict:
             self.conversion_factors[key] = 1.0
 


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/frontends/maestro/setup.py
--- a/yt/frontends/maestro/setup.py
+++ b/yt/frontends/maestro/setup.py
@@ -1,12 +1,13 @@
 #!/usr/bin/env python
 import setuptools
-import os, sys, os.path
-
+import os
+import sys
 import os.path
 
-def configuration(parent_package='',top_path=None):
+
+def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('maestro',parent_package,top_path)
-    config.make_config_py() # installs __config__.py
+    config = Configuration('maestro', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/frontends/nyx/data_structures.py
--- a/yt/frontends/nyx/data_structures.py
+++ b/yt/frontends/nyx/data_structures.py
@@ -726,6 +726,8 @@
         seconds = self.time_units["s"]
         self.time_units["days"] = seconds / (3600 * 24.0)
         self.time_units["years"] = seconds / (3600 * 24.0 * 365)
+        self.time_units['Myr'] = self.time_units['years'] / 1.0e6
+        self.time_units['Gyr']  = self.time_units['years'] / 1.0e9
 
         # not the most useful right now, but someday
         for key in nyx_particle_field_names:


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/frontends/nyx/setup.py
--- a/yt/frontends/nyx/setup.py
+++ b/yt/frontends/nyx/setup.py
@@ -1,8 +1,9 @@
 #!/usr/bin/env python
 import setuptools
-import os, sys, os.path
+import os
+import sys
+import os.path
 
-import os.path
 
 def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/frontends/orion/data_structures.py
--- a/yt/frontends/orion/data_structures.py
+++ b/yt/frontends/orion/data_structures.py
@@ -88,7 +88,7 @@
         h.grid_left_edge[self.id,:] = self.LeftEdge[:]
         h.grid_right_edge[self.id,:] = self.RightEdge[:]
         #self.Time = h.gridTimes[self.id,0]
-        #self.NumberOfParticles = h.gridNumberOfParticles[self.id,0]
+        self.NumberOfParticles = 0 # these will be read in later
         self.field_indexes = h.field_indexes
         self.Children = h.gridTree[self.id]
         pIDs = h.gridReverseTree[self.id]
@@ -127,7 +127,54 @@
         self.__cache_endianness(self.levels[-1].grids[-1])
         AMRHierarchy.__init__(self,pf, self.data_style)
         self._populate_hierarchy()
-        
+        self._read_particles()
+
+    def _read_particles(self):
+        """
+        reads in particles and assigns them to grids. Will search for
+        Star particles, then sink particles if no star particle file
+        is found, and finally will simply note that no particles are
+        found if neither works. To add a new Orion particle type,
+        simply add it to the if/elif/else block.
+
+        """
+        self.grid_particle_count = na.zeros(len(self.grids))
+
+        for particle_filename in ["StarParticles", "SinkParticles"]:
+            fn = os.path.join(self.pf.fullplotdir, particle_filename)
+            if os.path.exists(fn): self._read_particle_file(fn)
+
+    def _read_particle_file(self, fn):
+        """actually reads the orion particle data file itself.
+
+        """
+        if not os.path.exists(fn): return
+        with open(fn, 'r') as f:
+            lines = f.readlines()
+            self.num_stars = int(lines[0].strip())
+            for line in lines[1:]:
+                particle_position_x = float(line.split(' ')[1])
+                particle_position_y = float(line.split(' ')[2])
+                particle_position_z = float(line.split(' ')[3])
+                coord = [particle_position_x, particle_position_y, particle_position_z]
+                # for each particle, determine which grids contain it
+                # copied from object_finding_mixin.py
+                mask=na.ones(self.num_grids)
+                for i in xrange(len(coord)):
+                    na.choose(na.greater(self.grid_left_edge[:,i],coord[i]), (mask,0), mask)
+                    na.choose(na.greater(self.grid_right_edge[:,i],coord[i]), (0,mask), mask)
+                    ind = na.where(mask == 1)
+                    selected_grids = self.grids[ind]
+                    # in orion, particles always live on the finest level.
+                    # so, we want to assign the particle to the finest of
+                    # the grids we just found
+                    if len(selected_grids) != 0:
+                        grid = sorted(selected_grids, key=lambda grid: grid.Level)[-1]
+                        ind = na.where(self.grids == grid)[0][0]
+                        self.grid_particle_count[ind] += 1
+                        self.grids[ind].NumberOfParticles += 1
+        return True
+                
     def readGlobalHeader(self,filename,paranoid_read):
         """
         read the global header file for an Orion plotfile output.
@@ -455,7 +502,8 @@
         castro = any(("castro." in line for line in open(pfn)))
         nyx = any(("nyx." in line for line in open(pfn)))
         maestro = os.path.exists(os.path.join(pname, "job_info"))
-        orion = (not castro) and (not maestro) and (not nyx)
+        really_orion = any(("geometry.prob_lo" in line for line in open(pfn)))
+        orion = (not castro) and (not maestro) and (not nyx) and really_orion
         return orion
         
     def _parse_parameter_file(self):
@@ -577,6 +625,8 @@
         seconds = 1 #self["Time"]
         self.time_units['years'] = seconds / (365*3600*24.0)
         self.time_units['days']  = seconds / (3600*24.0)
+        self.time_units['Myr'] = self.time_units['years'] / 1.0e6
+        self.time_units['Gyr']  = self.time_units['years'] / 1.0e9
         for key in yt2orionFieldsDict:
             self.conversion_factors[key] = 1.0
 


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/frontends/orion/fields.py
--- a/yt/frontends/orion/fields.py
+++ b/yt/frontends/orion/fields.py
@@ -22,10 +22,15 @@
   You should have received a copy of the GNU General Public License
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
+
+import numpy as na
+
 from yt.utilities.physical_constants import \
     mh, kboltz
 from yt.data_objects.field_info_container import \
     FieldInfoContainer, \
+    NullFunc, \
+    TranslationFunc, \
     FieldInfo, \
     ValidateParameter, \
     ValidateDataField, \
@@ -41,26 +46,26 @@
 OrionFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
 add_field = OrionFieldInfo.add_field
 
-add_field("density", function=lambda a,b: None, take_log=True,
-          validators = [ValidateDataField("density")],
-          units=r"\rm{g}/\rm{cm}^3")
-OrionFieldInfo["density"]._projected_units =r"\rm{g}/\rm{cm}^2"
+add_orion_field("density", function=NullFunc, take_log=True,
+                validators = [ValidateDataField("density")],
+                units=r"\rm{g}/\rm{cm}^3")
+KnownOrionFields["density"]._projected_units =r"\rm{g}/\rm{cm}^2"
 
-add_field("eden", function=lambda a,b: None, take_log=True,
-          validators = [ValidateDataField("eden")],
-          units=r"\rm{erg}/\rm{cm}^3")
+add_orion_field("eden", function=NullFunc, take_log=True,
+                validators = [ValidateDataField("eden")],
+                units=r"\rm{erg}/\rm{cm}^3")
 
-add_field("xmom", function=lambda a,b: None, take_log=False,
-          validators = [ValidateDataField("xmom")],
-          units=r"\rm{g}/\rm{cm^2\ s}")
+add_orion_field("xmom", function=NullFunc, take_log=False,
+                validators = [ValidateDataField("xmom")],
+                units=r"\rm{g}/\rm{cm^2\ s}")
 
-add_field("ymom", function=lambda a,b: None, take_log=False,
-          validators = [ValidateDataField("ymom")],
-          units=r"\rm{gm}/\rm{cm^2\ s}")
+add_orion_field("ymom", function=NullFunc, take_log=False,
+                validators = [ValidateDataField("ymom")],
+                units=r"\rm{gm}/\rm{cm^2\ s}")
 
-add_field("zmom", function=lambda a,b: None, take_log=False,
-          validators = [ValidateDataField("zmom")],
-          units=r"\rm{g}/\rm{cm^2\ s}")
+add_orion_field("zmom", function=NullFunc, take_log=False,
+                validators = [ValidateDataField("zmom")],
+                units=r"\rm{g}/\rm{cm^2\ s}")
 
 translation_dict = {"x-velocity": "xvel",
                     "y-velocity": "yvel",
@@ -73,23 +78,22 @@
                     "z-momentum": "zmom"
                    }
 
-def _generate_translation(mine, theirs):
-    add_field(theirs, function=lambda a, b: b[mine], take_log=True)
-
 for f,v in translation_dict.items():
-    if v not in OrionFieldInfo:
-        add_field(v, function=lambda a,b: None, take_log=False,
+    if v not in KnownOrionFields:
+        add_orion_field(v, function=NullFunc, take_log=False,
                   validators = [ValidateDataField(v)])
-    #print "Setting up translator from %s to %s" % (v, f)
-    _generate_translation(v, f)
+    ff = KnownOrionFields[v]
+    add_field(f, TranslationFunc(v),
+              take_log=KnownOrionFields[v].take_log,
+              units = ff._units, display_name=f)
 
 def _xVelocity(field, data):
     """generate x-velocity from x-momentum and density
-
+    
     """
     return data["xmom"]/data["density"]
-add_field("x-velocity",function=_xVelocity, take_log=False,
-          units=r'\rm{cm}/\rm{s}')
+add_orion_field("x-velocity",function=_xVelocity, take_log=False,
+                units=r'\rm{cm}/\rm{s}')
 
 def _yVelocity(field,data):
     """generate y-velocity from y-momentum and density
@@ -99,16 +103,16 @@
     #    return data["xvel"]
     #except KeyError:
     return data["ymom"]/data["density"]
-add_field("y-velocity",function=_yVelocity, take_log=False,
-          units=r'\rm{cm}/\rm{s}')
+add_orion_field("y-velocity",function=_yVelocity, take_log=False,
+                units=r'\rm{cm}/\rm{s}')
 
 def _zVelocity(field,data):
     """generate z-velocity from z-momentum and density
-
+    
     """
     return data["zmom"]/data["density"]
-add_field("z-velocity",function=_zVelocity, take_log=False,
-          units=r'\rm{cm}/\rm{s}')
+add_orion_field("z-velocity",function=_zVelocity, take_log=False,
+                units=r'\rm{cm}/\rm{s}')
 
 def _ThermalEnergy(field, data):
     """generate thermal (gas energy). Dual Energy Formalism was
@@ -122,16 +126,51 @@
         data["x-velocity"]**2.0
         + data["y-velocity"]**2.0
         + data["z-velocity"]**2.0 )
-add_field("ThermalEnergy", function=_ThermalEnergy,
-          units=r"\rm{ergs}/\rm{cm^3}")
+add_orion_field("ThermalEnergy", function=_ThermalEnergy,
+                units=r"\rm{ergs}/\rm{cm^3}")
 
 def _Pressure(field,data):
     """M{(Gamma-1.0)*e, where e is thermal energy density
        NB: this will need to be modified for radiation
     """
     return (data.pf["Gamma"] - 1.0)*data["ThermalEnergy"]
-add_field("Pressure", function=_Pressure, units=r"\rm{dyne}/\rm{cm}^{2}")
+add_orion_field("Pressure", function=_Pressure, units=r"\rm{dyne}/\rm{cm}^{2}")
 
 def _Temperature(field,data):
     return (data.pf["Gamma"]-1.0)*data.pf["mu"]*mh*data["ThermalEnergy"]/(kboltz*data["Density"])
-add_field("Temperature",function=_Temperature,units=r"\rm{Kelvin}",take_log=False)
+add_orion_field("Temperature",function=_Temperature,units=r"\rm{Kelvin}",take_log=False)
+
+# particle fields
+
+def particle_func(p_field, dtype='float64'):
+    def _Particles(field, data):
+        io = data.hierarchy.io
+        if not data.NumberOfParticles > 0:
+            return na.array([], dtype=dtype)
+        else:
+            return io._read_particles(data, p_field).astype(dtype)
+
+    return _Particles
+
+_particle_field_list = ["mass", 
+                        "position_x",
+                        "position_y",
+                        "position_z",
+                        "momentum_x",
+                        "momentum_y",
+                        "momentum_z",
+                        "angmomen_x",
+                        "angmomen_y",
+                        "angmomen_z",
+                        "mlast",
+                        "mdeut",
+                        "n",
+                        "mdot",
+                        "burnstate",
+                        "id"]
+
+for pf in _particle_field_list:
+    pfunc = particle_func("particle_%s" % (pf))
+    add_orion_field("particle_%s" % pf, function=pfunc,
+                    validators = [ValidateSpatial(0)],
+                    particle_type=True)


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/frontends/orion/io.py
--- a/yt/frontends/orion/io.py
+++ b/yt/frontends/orion/io.py
@@ -39,11 +39,51 @@
     def modify(self, field):
         return field.swapaxes(0,2)
 
+    def _read_particles(self, grid, field): 
+        """
+        parses the Orion Star Particle text files
+        
+        """
+        index = {'particle_mass': 0,
+                 'particle_position_x': 1,
+                 'particle_position_y': 2,
+                 'particle_position_z': 3,
+                 'particle_momentum_x': 4,
+                 'particle_momentum_y': 5,
+                 'particle_momentum_z': 6,
+                 'particle_angmomen_x': 7,
+                 'particle_angmomen_y': 8,
+                 'particle_angmomen_z': 9,
+                 'particle_mlast': 10,
+                 'particle_mdeut': 11,
+                 'particle_n': 12,
+                 'particle_mdot': 13,
+                 'particle_burnstate': 14,
+                 'particle_id': 15}
+
+        def read(line, field):
+            return float(line.split(' ')[index[field]])
+
+        fn = grid.pf.fullplotdir + "/StarParticles"
+        with open(fn, 'r') as f:
+            lines = f.readlines()
+            particles = []
+            for line in lines[1:]:
+                if grid.NumberOfParticles > 0:
+                    coord = read(line, "particle_position_x"), \
+                            read(line, "particle_position_y"), \
+                            read(line, "particle_position_z") 
+                    if ( (grid.LeftEdge < coord).all() and 
+                         (coord <= grid.RightEdge).all() ):
+                        particles.append(read(line, field))
+        return na.array(particles)
+
     def _read_data_set(self,grid,field):
         """
         reads packed multiFABs output by BoxLib in "NATIVE" format.
 
         """
+
         filen = os.path.expanduser(grid.filename[field])
         off = grid._offset[field]
         inFile = open(filen,'rb')


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/frontends/orion/setup.py
--- a/yt/frontends/orion/setup.py
+++ b/yt/frontends/orion/setup.py
@@ -1,12 +1,13 @@
 #!/usr/bin/env python
 import setuptools
-import os, sys, os.path
-
+import os
+import sys
 import os.path
 
-def configuration(parent_package='',top_path=None):
+
+def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('orion',parent_package,top_path)
-    config.make_config_py() # installs __config__.py
+    config = Configuration('orion', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config




diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -321,6 +321,8 @@
         seconds = self.parameters['unit_t']
         self.time_units['years'] = seconds / (365*3600*24.0)
         self.time_units['days']  = seconds / (3600*24.0)
+        self.time_units['Myr'] = self.time_units['years'] / 1.0e6
+        self.time_units['Gyr']  = self.time_units['years'] / 1.0e9
         self.conversion_factors["Density"] = self.parameters['unit_d']
         vel_u = self.parameters['unit_l'] / self.parameters['unit_t']
         self.conversion_factors["x-velocity"] = vel_u


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/frontends/ramses/setup.py
--- a/yt/frontends/ramses/setup.py
+++ b/yt/frontends/ramses/setup.py
@@ -1,10 +1,14 @@
 #!/usr/bin/env python
 import setuptools
-import os, sys, os.path, glob
+import os
+import sys
+import os.path
+import glob
 
-def configuration(parent_package='',top_path=None):
+
+def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('ramses',parent_package,top_path)
+    config = Configuration('ramses', parent_package, top_path)
     config.add_extension("_ramses_reader",
         ["yt/frontends/ramses/_ramses_reader.pyx"],
         language="c++",
@@ -12,6 +16,6 @@
         libraries=["stdc++"],
         depends=glob.glob("yt/frontends/ramses/ramses_headers/*.hh")
         )
-    config.make_config_py() # installs __config__.py
+    config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/frontends/setup.py
--- a/yt/frontends/setup.py
+++ b/yt/frontends/setup.py
@@ -1,10 +1,11 @@
 #!/usr/bin/env python
 import setuptools
 
-def configuration(parent_package='',top_path=None):
+
+def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('frontends',parent_package,top_path)
-    config.make_config_py() # installs __config__.py
+    config = Configuration('frontends', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     config.add_subpackage("gdf")
     config.add_subpackage("chombo")


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/frontends/stream/io.py
--- a/yt/frontends/stream/io.py
+++ b/yt/frontends/stream/io.py
@@ -58,7 +58,7 @@
         return self.fields[grid.id].keys()
 
     def _read_data_slice(self, grid, field, axis, coord):
-        sl = [slice(0,-0), slice(0,-0), slice(0,-0)]
+        sl = [slice(None), slice(None), slice(None)]
         sl[axis] = slice(coord, coord + 1)
         sl = tuple(reversed(sl))
         tr = self.fields[grid.id][field][sl].swapaxes(0,2)


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/frontends/stream/setup.py
--- a/yt/frontends/stream/setup.py
+++ b/yt/frontends/stream/setup.py
@@ -1,12 +1,13 @@
 #!/usr/bin/env python
 import setuptools
-import os, sys, os.path
-
+import os
+import sys
 import os.path
 
-def configuration(parent_package='',top_path=None):
+
+def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('stream',parent_package,top_path)
-    config.make_config_py() # installs __config__.py
+    config = Configuration('stream', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/frontends/tiger/setup.py
--- a/yt/frontends/tiger/setup.py
+++ b/yt/frontends/tiger/setup.py
@@ -1,12 +1,13 @@
 #!/usr/bin/env python
 import setuptools
-import os, sys, os.path
-
+import os
+import sys
 import os.path
 
-def configuration(parent_package='',top_path=None):
+
+def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('tiger',parent_package,top_path)
-    config.make_config_py() # installs __config__.py
+    config = Configuration('tiger', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -24,6 +24,7 @@
 """
 
 import time, types, signal, inspect, traceback, sys, pdb, os
+import contextlib
 import warnings, struct, subprocess
 from math import floor, ceil
 
@@ -93,6 +94,22 @@
 except ImportError:
     pass
 
+def __memory_fallback(pid):
+    """
+    Get process memory from a system call.
+    """
+    value = os.popen('ps -o rss= -p %d' % pid).read().strip().split('\n')
+    if len(value) == 1: return float(value[0])
+    value.pop(0)
+    for line in value:
+        online = line.split()
+        if online[0] != pid: continue
+        try:
+            return float(online[2])
+        except:
+            return 0.0
+    return 0.0
+
 def get_memory_usage():
     """
     Returning resident size in megabytes
@@ -101,10 +118,10 @@
     try:
         pagesize = resource.getpagesize()
     except NameError:
-        return float(os.popen('ps -o rss= -p %d' % pid).read()) / 1024
+        return __memory_fallback(pid) / 1024
     status_file = "/proc/%s/statm" % (pid)
     if not os.path.isfile(status_file):
-        return float(os.popen('ps -o rss= -p %d' % pid).read()) / 1024
+        return __memory_fallback(pid) / 1024
     line = open(status_file).read()
     size, resident, share, text, library, data, dt = [int(i) for i in line.split()]
     return resident * pagesize / (1024 * 1024) # return in megs
@@ -546,3 +563,15 @@
        isinstance(length[1], types.StringTypes):
        length = length[0]/pf[length[1]]
     return length
+
+ at contextlib.contextmanager
+def parallel_profile(prefix):
+    import cProfile
+    from yt.config import ytcfg
+    fn = "%s_%04i.cprof" % (prefix,
+                ytcfg.getint("yt", "__topcomm_parallel_rank"))
+    p = cProfile.Profile()
+    p.enable()
+    yield
+    p.disable()
+    p.dump_stats(fn)


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/gui/opengl_widgets/setup.py
--- a/yt/gui/opengl_widgets/setup.py
+++ b/yt/gui/opengl_widgets/setup.py
@@ -1,12 +1,13 @@
 #!/usr/bin/env python
 import setuptools
-import os, sys, os.path
-
+import os
+import sys
 import os.path
 
-def configuration(parent_package='',top_path=None):
+
+def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('opengl_widgets',parent_package,top_path)
-    config.make_config_py() # installs __config__.py
+    config = Configuration('opengl_widgets', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/gui/reason/extdirect_repl.py
--- a/yt/gui/reason/extdirect_repl.py
+++ b/yt/gui/reason/extdirect_repl.py
@@ -130,7 +130,6 @@
 
     def execute_one(self, code, hide):
         self.repl.executed_cell_texts.append(code)
-
         result = ProgrammaticREPL.execute(self.repl, code)
         if self.repl.debug:
             print "==================== Cell Execution ===================="
@@ -562,10 +561,12 @@
         _tfield_list = list(set(_tpf.h.field_list + _tpf.h.derived_field_list))
         _tfield_list.sort()
         _tcb = _tpw._get_cbar_image()
+        _ttrans = _tpw._field_transform[_tpw._current_field].name
         _twidget_data = {'fields': _tfield_list,
                          'initial_field': _tfield,
                          'title': "%%s Slice" %% (_tpf),
-                         'colorbar': _tcb}
+                         'colorbar': _tcb,
+                         'initial_transform' : _ttrans}
         """ % dict(pfname = pfname,
                    center_string = center_string,
                    axis = inv_axis_names[axis],


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/gui/reason/html/js/functions.js
--- a/yt/gui/reason/html/js/functions.js
+++ b/yt/gui/reason/html/js/functions.js
@@ -65,7 +65,6 @@
                 repl_input.get("input_line").setValue("");
             }
             if (OutputContainer.items.length > 1) {
-                examine = cell;
                 OutputContainer.body.dom.scrollTop = 
                 OutputContainer.body.dom.scrollHeight -
                 cell.body.dom.scrollHeight - 20;
@@ -142,7 +141,6 @@
             iconCls: 'pf_icon'}));
         this_pf = treePanel.root.lastChild
         Ext.each(pf.objects, function(obj, obj_index) {
-            examine = this_pf;
             this_pf.appendChild(new Ext.tree.TreeNode(
                 {text: obj.name,
                  leaf: true,






diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/gui/reason/html/js/widget_plotwindow.js
--- a/yt/gui/reason/html/js/widget_plotwindow.js
+++ b/yt/gui/reason/html/js/widget_plotwindow.js
@@ -43,7 +43,78 @@
     }
 
     this.widget_keys = new Ext.KeyMap(document, [
-        {key: 'z', fn: function(){control_panel.get("zoom10x").handler();}}
+        {key: 'z',
+         shift: false,
+         fn: function(){
+               control_panel.get("zoom2x").handler();
+            }
+        },
+        {key: 'Z',
+         shift: true,
+         fn: function(){
+               control_panel.get("zoom10x").handler();
+            }
+        },
+        {key: 'x',
+         shift: false,
+         fn: function(){
+               control_panel.get("zoomout2x").handler();
+            }
+        },
+        {key: 'X',
+         shift: true,
+         fn: function(){
+               control_panel.get("zoomout10x").handler();
+            }
+        },
+        {key: 'k',
+         shift: false,
+         fn: function(){
+               control_panel.get("singleuparrow").handler();
+            }
+        },
+        {key: 'j',
+         shift: false,
+         fn: function(){
+               control_panel.get("singledownarrow").handler();
+            }
+        },
+        {key: 'h',
+         shift: false,
+         fn: function(){
+               control_panel.get("singleleftarrow").handler();
+            }
+        },
+        {key: 'l',
+         shift: false,
+         fn: function(){
+               control_panel.get("singlerightarrow").handler();
+            }
+        },
+        {key: 'K',
+         shift: true,
+         fn: function(){
+               control_panel.get("doubleuparrow").handler();
+            }
+        },
+        {key: 'J',
+         shift: true,
+         fn: function(){
+               control_panel.get("doubledownarrow").handler();
+            }
+        },
+        {key: 'H',
+         shift: true,
+         fn: function(){
+               control_panel.get("doubleleftarrow").handler();
+            }
+        },
+        {key: 'L',
+         shift: true,
+         fn: function(){
+               control_panel.get("doublerightarrow").handler();
+            }
+        },
     ]);
     var widget_keys = this.widget_keys;
     widget_keys.disable();
@@ -74,11 +145,13 @@
                         id: "img_" + this.id,
                         width: 400,
                         height: 400,
+                        draggable: false,
                     },
                     x: 100,
                     y: 10,
                     width: 400,
                     height: 400,
+                    draggable: false,
                     listeners: {
                         afterrender: function(c){
                             c.el.on('click', function(e){
@@ -92,6 +165,25 @@
                                 yt_rpc.ExtDirectREPL.execute(
                                 {code:cc, hide:true}, cell_finished); 
                             });
+                            c.el.on('mousedown', function(e){
+                                c.drag_start = true;
+                                c.drag_start_pos = e.getXY();
+                            });
+                            c.el.on('mouseup', function(e){
+                                c.drag_start = false;
+                                drag_stop = e.getXY();
+                                delta_x = drag_stop[0] - c.drag_start_pos[0];
+                                delta_y = drag_stop[1] - c.drag_start_pos[1];
+                                if (((delta_x < -10) || (delta_x > 10)) ||
+                                    ((delta_y < -10) || (delta_y > 10))) {
+                                    rel_x = -delta_x / 400;
+                                    rel_y = -delta_y / 400;
+                                    cc = python_varname + '.pan_rel((' + 
+                                        rel_x + ',' + rel_y + '))';
+                                    yt_rpc.ExtDirectREPL.execute(
+                                    {code:cc, hide:true}, cell_finished); 
+                                }
+                            });
                         }
                     }
                 }, {
@@ -159,6 +251,7 @@
                 /* the single buttons for 10% pan*/
                     xtype:'button',
                     iconCls: 'singleuparrow',
+                    id: 'singleuparrow',
                     //text: 'North',
                     x: 40,
                     y: 10,
@@ -170,6 +263,7 @@
                 }, {
                     xtype:'button',
                     iconCls: 'singlerightarrow',
+                    id: 'singlerightarrow',
                     //text:'East',
                     x : 60,
                     y : 30,
@@ -182,6 +276,7 @@
                 }, {
                     xtype:'button',
                     iconCls: 'singledownarrow',
+                    id: 'singledownarrow',
                     //text: 'South',
                     x: 40,
                     y: 50,
@@ -194,6 +289,7 @@
                 }, {
                     xtype: 'button',
                     iconCls: 'singleleftarrow',
+                    id: 'singleleftarrow',
                     //text: 'West',
                     x: 20,
                     y: 30,
@@ -208,6 +304,7 @@
                 {
                     xtype:'button',
                     iconCls: 'doubleuparrow',
+                    id:'doubleuparrow',
                     //text: 'North',
                     x: 40,
                     y: 80,
@@ -219,6 +316,7 @@
                 }, {
                     xtype:'button',
                     iconCls: 'doublerightarrow',
+                    id:'doublerightarrow',
                     //text:'East',
                     x : 60,
                     y : 100,
@@ -232,6 +330,7 @@
                     xtype:'button',
                     iconCls: 'doubledownarrow',
                     //text: 'South',
+                    id: 'doubledownarrow',
                     x: 40,
                     y: 120,
                     handler: function(b,e) {
@@ -243,6 +342,7 @@
                 }, {
                     xtype: 'button',
                     iconCls: 'doubleleftarrow',
+                    id: 'doubleleftarrow',
                     //text: 'West',
                     x: 20,
                     y: 100,
@@ -270,6 +370,7 @@
                 },{
                     xtype: 'button',
                     text: 'Zoom In 2x',
+                    id: "zoom2x",
                     x: 10,
                     y: 185,
                     width: 80,
@@ -282,6 +383,7 @@
                 },{
                     xtype: 'button',
                     text: 'Zoom Out 2x',
+                    id:'zoomout2x',
                     x: 10,
                     y: 210,
                     width: 80,
@@ -294,6 +396,7 @@
                 },{
                     xtype: 'button',
                     text: 'Zoom Out 10x',
+                    id:'zoomout10x',
                     x: 10,
                     y: 235,
                     width: 80,
@@ -365,11 +468,208 @@
                           html: 'Welcome to the Plot Window.',
                           height: 200,
                         }, {
+                          xtype: 'tabpanel',
+                          id: 'editor_panel',
+                          flex: 1,
+                          activeTab: 0,
+                          items: [
+                        {
                           xtype: 'panel',
                           title: 'Plot Editor',
                           id: 'plot_edit',
+                          style: {fontFamily: '"Inconsolata", monospace'},
+                          layout: 'absolute',
                           flex: 1,
-                        }]
+                          items : [
+                             {
+                               x: 10,
+                               y: 20,
+                               width: 70,
+                               xtype: 'label',
+                               text: 'Display',
+                             },
+                             {
+                               x: 80,
+                               y: 20,
+                               width : 80,
+                               xtype: 'combo',
+                               editable: false,
+                               triggerAction: 'all',
+                               validateOnBlur: false,
+                               store: ['log10', 'linear'],
+                               value: widget_data['initial_transform'],
+                               listeners: {select: function(combo, record, index){ 
+                                   var newValue = '"' + record.data['field1'] + '"';
+                                   yt_rpc.ExtDirectREPL.execute(
+                                       {code:python_varname + '.set_transform('
+                                         + python_varname + '._current_field, '
+                                         + newValue + ')', hide:false},
+                                         cell_finished);
+                               }}
+                             },
+                             {
+                               x: 10,
+                               y: 60,
+                               width: 70,
+                               xtype: 'label',
+                               text: 'Colormap',
+                             },
+                             {
+                               x: 80,
+                               y: 60,
+                               width : 140,
+                               xtype: 'combo',
+                               editable: false,
+                               triggerAction: 'all',
+                               validateOnBlur: false,
+                               store: ['algae', 'RdBu', 'gist_stern',  
+                                       'hot', 'jet', 'kamae', 
+                                        'B-W LINEAR', 'BLUE',
+                                        'GRN-RED-BLU-WHT', 'RED TEMPERATURE',
+                                        'BLUE', 'STD GAMMA-II', 'PRISM',
+                                        'RED-PURPLE', 'GREEN', 'GRN',
+                                        'GREEN-PINK', 'BLUE-RED', '16 LEVEL',
+                                        'RAINBOW', 'STEPS', 'STERN SPECIAL',
+                                        'Haze', 'Blue - Pastel - Red',
+                                        'Pastels', 'Hue Sat Lightness 1',
+                                        'Hue Sat Lightness 2', 'Hue Sat Value 1',
+                                        'Hue Sat Value 2', 'Purple-Red + Stripes',
+                                        'Beach', 'Mac Style', 'Eos A', 'Eos B',
+                                        'Hardcandy', 'Nature', 'Ocean', 'Peppermint',
+                                        'Plasma', 'Blue-Red', 'Rainbow', 'Blue Waves',
+                                        'Volcano', 'Waves', 'Rainbow18',
+                                        'Rainbow + white', 'Rainbow + black'],
+                               value: 'algae',
+                               listeners: {select: function(combo, record, index){ 
+                                   var newValue = '"' + record.data['field1'] + '"';
+                                   yt_rpc.ExtDirectREPL.execute(
+                                       {code:python_varname + '.set_cmap('
+                                         + python_varname + '._current_field, '
+                                         + newValue + ')', hide:false},
+                                         cell_finished);
+                               }}
+                             }
+                          ]
+                        }, {
+                          xtype: 'panel',
+                          title: 'Contours',
+                          id: 'contour_edit',
+                          style: {fontFamily: '"Inconsolata", monospace'},
+                          layout: 'absolute',
+                          flex: 1,
+                          items : [
+                             {
+                               x: 10,
+                               y: 20,
+                               width: 70,
+                               xtype: 'label',
+                               text: 'Field',
+                             }, {
+                               x: 80,
+                               y: 20,
+                               width : 160,
+                               xtype: 'combo',
+                               editable: false,
+                               id: 'field',
+                               triggerAction: 'all',
+                               validateOnBlur: false,
+                               value:widget_data['initial_field'],
+                               store: widget_data['fields'],
+                             }, {
+                               x: 10,
+                               y: 60,
+                               width: 70,
+                               xtype: 'label',
+                               text: 'Levels',
+                             }, {
+                               x: 80,
+                               y: 60,
+                               width : 160,
+                               xtype: 'slider',
+                               id: 'ncont',
+                               minValue: 0,
+                               maxValue: 10,
+                               value: 5,
+                               increment: 1,
+                               plugins: new Ext.slider.Tip(),
+                             }, {
+                               x: 10,
+                               y: 100,
+                               width: 70,
+                               xtype: 'label',
+                               text: 'Logspaced',
+                             }, {
+                               x: 80,
+                               y: 100,
+                               width : 160,
+                               xtype: 'checkbox',
+                               id: 'logit',
+                               checked: true,
+                             }, {
+                               x: 10,
+                               y: 180,
+                               width: 80,
+                               xtype: 'button',
+                               text: 'Apply',
+                               handler: function(b, e) {
+                                  field = contour_window.get('field').getValue();
+                                  ncont = contour_window.get('ncont').getValue();
+                                  logit = contour_window.get('logit').getValue();
+                                  if (logit == false) logit = 'False';
+                                  else if (logit == true) logit = 'True';
+                                  yt_rpc.ExtDirectREPL.execute(
+                                      {code:python_varname
+                                       + '.set_contour_info("' + field + '", '
+                                       + ncont + ', ' + logit + ')',
+                                        hide:false},
+                                      cell_finished);
+                               }
+                             }
+                          ]
+                        }, {
+                          xtype: 'panel',
+                          title: 'Velocity Vectors',
+                          id: 'vector_edit',
+                          style: {fontFamily: '"Inconsolata", monospace'},
+                          layout: 'absolute',
+                          flex: 1,
+                          items : [
+                             {
+                               x: 10,
+                               y: 60,
+                               width: 70,
+                               xtype: 'label',
+                               text: 'Skip Factor',
+                             }, {
+                               x: 80,
+                               y: 60,
+                               width : 160,
+                               xtype: 'slider',
+                               id: 'skip',
+                               minValue: 1,
+                               maxValue: 64,
+                               value: 32,
+                               increment: 1,
+                               plugins: new Ext.slider.Tip(),
+                             }, {
+                               x: 10,
+                               y: 180,
+                               width: 80,
+                               xtype: 'button',
+                               text: 'Apply',
+                               handler: function(b, e) {
+                                  skip = vector_window.get('skip').getValue();
+                                  yt_rpc.ExtDirectREPL.execute(
+                                      {code:python_varname
+                                       + '.set_vector_info('+skip+')',
+                                        hide:false},
+                                      cell_finished);
+                               }
+                             }
+                          ]
+                        }
+                        ] } /* tabpanel items and entry */
+                        ]
                 }
             ]
         }
@@ -384,8 +684,15 @@
     this.image_panel = this.panel.get("image_panel_"+python_varname);
     this.ticks = this.panel.get("ticks_"+python_varname);
     var ticks = this.ticks;
+    var colorbar = this.panel.get("colorbar_"+python_varname);
     this.metadata_panel = this.panel.get("rhs_panel_" + python_varname).get("metadata_" + python_varname);
     this.zoom_scroll = this.panel.get("slider_" + python_varname);
+    var contour_window = this.panel.get("rhs_panel_" + python_varname);
+    contour_window = contour_window.get("editor_panel");
+    contour_window = contour_window.get("contour_edit");
+    var vector_window = this.panel.get("rhs_panel_" + python_varname);
+    vector_window = vector_window.get("editor_panel");
+    vector_window = vector_window.get("vector_edit");
     var image_dom = this.image_panel.el.dom;
     var control_panel = this.panel;
     var metadata_string;
@@ -393,12 +700,10 @@
     this.accept_results = function(payload) {
         this.image_panel.el.dom.src = "data:image/png;base64," + payload['image_data'];
         this.zoom_scroll.setValue(0, payload['zoom'], true);
-        examine = this.metadata_panel;
         this.metadata_panel.update(payload['metadata_string']);
         metadata_string = payload['metadata_string'];
         ticks.removeAll();
         Ext.each(payload['ticks'], function(tick, index) {
-            console.log(tick);
             ticks.add({xtype:'panel',
                        width: 10, height:1,
                        style: 'background-color: #000000;',
@@ -411,9 +716,11 @@
                               'font-size: 12px;',
                        html: '' + tick[2] + '',
                        x:12, y: 4 + tick[0]});
-            examine = tick;
         });
-        examine = payload['ticks'];
+        if (payload['colorbar_image'] != null) {
+            colorbar.el.dom.src = "data:image/png;base64," +
+                payload['colorbar_image'];
+        }
         ticks.doLayout();
     }
 


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/gui/reason/setup.py
--- a/yt/gui/reason/setup.py
+++ b/yt/gui/reason/setup.py
@@ -1,10 +1,14 @@
 #!/usr/bin/env python
 import setuptools
-import os, sys, os.path, glob
+import os
+import sys
+import os.path
+import glob
 
-def configuration(parent_package='',top_path=None):
+
+def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('reason',parent_package,top_path)
-    config.make_config_py() # installs __config__.py
+    config = Configuration('reason', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/gui/setup.py
--- a/yt/gui/setup.py
+++ b/yt/gui/setup.py
@@ -1,12 +1,15 @@
 #!/usr/bin/env python
 import setuptools
-import os, sys, os.path
+import os
+import sys
+import os.path
 
-def configuration(parent_package='',top_path=None):
+
+def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('gui',parent_package,top_path)
+    config = Configuration('gui', parent_package, top_path)
     config.add_subpackage('opengl_widgets')
     config.add_subpackage('reason')
-    config.make_config_py() # installs __config__.py
+    config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -60,7 +60,8 @@
     derived_field, add_field, FieldInfo, \
     ValidateParameter, ValidateDataField, ValidateProperty, \
     ValidateSpatial, ValidateGridType, \
-    TimeSeriesData, AnalysisTask, analysis_task
+    TimeSeriesData, AnalysisTask, analysis_task, \
+    ParticleTrajectoryCollection
 
 from yt.data_objects.derived_quantities import \
     add_quantity, quantity_info
@@ -115,7 +116,7 @@
     PlotCollection, PlotCollectionInteractive, \
     get_multi_plot, FixedResolutionBuffer, ObliqueFixedResolutionBuffer, \
     callback_registry, write_bitmap, write_image, annotate_image, \
-    apply_colormap, scale_image, write_projection
+    apply_colormap, scale_image, write_projection, write_fits
 
 from yt.visualization.volume_rendering.api import \
     ColorTransferFunction, PlanckTransferFunction, ProjectionTransferFunction, \


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/setup.py
--- a/yt/setup.py
+++ b/yt/setup.py
@@ -1,6 +1,8 @@
 #!/usr/bin/env python
 import setuptools
-import os, sys
+import os
+import sys
+
 
 def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/utilities/_amr_utils/CICDeposit.pyx
--- a/yt/utilities/_amr_utils/CICDeposit.pyx
+++ b/yt/utilities/_amr_utils/CICDeposit.pyx
@@ -78,3 +78,32 @@
         field[i1  ,j1-1,k1  ] += mass[n] * dx2 * dy  * dz2
         field[i1-1,j1  ,k1  ] += mass[n] * dx  * dy2 * dz2
         field[i1  ,j1  ,k1  ] += mass[n] * dx2 * dy2 * dz2
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+def sample_field_at_positions(np.ndarray[np.float64_t, ndim=3] arr,
+                              np.ndarray[np.float64_t, ndim=1] left_edge,
+                              np.ndarray[np.float64_t, ndim=1] right_edge,
+                              np.ndarray[np.float64_t, ndim=1] pos_x,
+                              np.ndarray[np.float64_t, ndim=1] pos_y,
+                              np.ndarray[np.float64_t, ndim=1] pos_z):
+    cdef np.float64_t idds[3], pp[3]
+    cdef int dims[3], npart, ind[3]
+    cdef int i, j
+    npart = pos_x.shape[0]
+    cdef np.ndarray[np.float64_t, ndim=1] sample 
+    sample = np.zeros(npart, dtype='float64')
+    for i in range(3):
+        dims[i] = arr.shape[i]
+        idds[i] = (<np.float64_t> dims[i]) / (right_edge[i] - left_edge[i])
+    for i in range(npart):
+        if not ((left_edge[0] <= pos_x[i] <= right_edge[0]) and 
+                (left_edge[1] <= pos_y[i] <= right_edge[1]) and
+                (left_edge[2] <= pos_z[i] <= right_edge[2])):
+            continue
+        ind[0] = <int> ((pos_x[i] - left_edge[0]) * idds[0])
+        ind[1] = <int> ((pos_y[i] - left_edge[1]) * idds[1])
+        ind[2] = <int> ((pos_z[i] - left_edge[2]) * idds[2])
+        sample[i] = arr[ind[0], ind[1], ind[2]]
+    return sample




diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/utilities/_amr_utils/FixedInterpolator.c
--- a/yt/utilities/_amr_utils/FixedInterpolator.c
+++ b/yt/utilities/_amr_utils/FixedInterpolator.c
@@ -96,6 +96,11 @@
          {0,0,1}, {1,0,1}, {1,1,1}, {0,1,1}};
 
     npy_float64 mu = ((isovalue - v1) / (v2 - v1));
+
+    if (fabs(1.0 - isovalue/v1) < 0.000001) mu = 0.0;
+    if (fabs(1.0 - isovalue/v2) < 0.000001) mu = 1.0;
+    if (fabs(v1/v2) < 0.000001) mu = 0.0;
+
     vl[0] = x; vl[1] = y; vl[2] = z;
     for (i=0;i<3;i++)
         vl[i] += dds[i] * cverts[vind1][i]


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/utilities/_amr_utils/QuadTree.pyx
--- a/yt/utilities/_amr_utils/QuadTree.pyx
+++ b/yt/utilities/_amr_utils/QuadTree.pyx
@@ -43,6 +43,10 @@
     np.int64_t pos[2]
     QuadTreeNode *children[2][2]
 
+ctypedef void QTN_combine(QuadTreeNode *self,
+        np.float64_t *val, np.float64_t weight_val,
+        int nvals)
+
 cdef void QTN_add_value(QuadTreeNode *self,
         np.float64_t *val, np.float64_t weight_val,
         int nvals):
@@ -51,6 +55,14 @@
         self.val[i] += val[i]
     self.weight_val += weight_val
 
+cdef void QTN_max_value(QuadTreeNode *self,
+        np.float64_t *val, np.float64_t weight_val,
+        int nvals):
+    cdef int i
+    for i in range(nvals):
+        self.val[i] = fmax(val[i], self.val[i])
+    self.weight_val = 1.0
+
 cdef void QTN_refine(QuadTreeNode *self, int nvals):
     cdef int i, j, i1, j1
     cdef np.int64_t npos[2]
@@ -101,9 +113,16 @@
     cdef np.int64_t top_grid_dims[2]
     cdef int merged
     cdef int num_cells
+    cdef QTN_combine *combine
 
     def __cinit__(self, np.ndarray[np.int64_t, ndim=1] top_grid_dims,
-                  int nvals):
+                  int nvals, style = "integrate"):
+        if style == "integrate":
+            self.combine = QTN_add_value
+        elif style == "mip":
+            self.combine = QTN_max_value
+        else:
+            raise NotImplementedError
         self.merged = 1
         cdef int i, j
         cdef QuadTreeNode *node
@@ -190,8 +209,12 @@
     @cython.wraparound(False)
     def frombuffer(self, np.ndarray[np.int32_t, ndim=1] refined,
                          np.ndarray[np.float64_t, ndim=2] values,
-                         np.ndarray[np.float64_t, ndim=1] wval):
-        self.merged = 1 # Just on the safe side
+                         np.ndarray[np.float64_t, ndim=1] wval,
+                         style):
+        if style == "mip" or style == -1:
+            self.merged = -1
+        elif style == "integrate" or style == 1:
+            self.merged = 1
         cdef int curpos = 0
         cdef QuadTreeNode *root
         self.num_cells = wval.shape[0]
@@ -241,7 +264,7 @@
             i = (pos[0] >= fac*(2*node.pos[0]+1))
             j = (pos[1] >= fac*(2*node.pos[1]+1))
             node = node.children[i][j]
-        QTN_add_value(node, val, weight_val, self.nvals)
+        self.combine(node, val, weight_val, self.nvals)
             
     @cython.cdivision(True)
     cdef QuadTreeNode *find_on_root_level(self, np.int64_t pos[2], int level):
@@ -335,12 +358,17 @@
                               np.float64_t *vtoadd,
                               np.float64_t wtoadd,
                               int cur_level):
-        cdef int i, j
+        cdef int i, j, n
         if cur_level == level:
             if node.children[0][0] != NULL: return 0
-            for i in range(self.nvals):
-                vdata[self.nvals * curpos + i] = node.val[i] + vtoadd[i]
-            wdata[curpos] = node.weight_val + wtoadd
+            if self.merged == -1:
+                for i in range(self.nvals):
+                    vdata[self.nvals * curpos + i] = fmax(node.val[i], vtoadd[i])
+                wdata[curpos] = 1.0
+            else:
+                for i in range(self.nvals):
+                    vdata[self.nvals * curpos + i] = node.val[i] + vtoadd[i]
+                wdata[curpos] = node.weight_val + wtoadd
             pdata[curpos * 2] = node.pos[0]
             pdata[curpos * 2 + 1] = node.pos[1]
             return 1
@@ -350,8 +378,14 @@
             for i in range(self.nvals):
                 vtoadd[i] += node.val[i]
             wtoadd += node.weight_val
+        elif self.merged == -1:
+            for i in range(self.nvals):
+                vtoadd[i] = node.val[i]
         for i in range(2):
             for j in range(2):
+                if self.merged == -1:
+                    for n in range(self.nvals):
+                        vtoadd[n] = node.val[n]
                 added += self.fill_from_level(node.children[i][j],
                         level, curpos + added, pdata, vdata, wdata,
                         vtoadd, wtoadd, cur_level + 1)
@@ -369,7 +403,8 @@
             free(self.root_nodes[i])
         free(self.root_nodes)
 
-cdef void QTN_merge_nodes(QuadTreeNode *n1, QuadTreeNode *n2, int nvals):
+cdef void QTN_merge_nodes(QuadTreeNode *n1, QuadTreeNode *n2, int nvals,
+                          QTN_combine *func):
     # We have four choices when merging nodes.
     # 1. If both nodes have no refinement, then we add values of n2 to n1.
     # 2. If both have refinement, we call QTN_merge_nodes on all four children.
@@ -378,13 +413,13 @@
     # 4. If n1 has refinement and n2 does not, we add the value of n2 to n1.
     cdef int i, j
 
-    QTN_add_value(n1, n2.val, n2.weight_val, nvals)
+    func(n1, n2.val, n2.weight_val, nvals)
     if n1.children[0][0] == n2.children[0][0] == NULL:
         pass
     elif n1.children[0][0] != NULL and n2.children[0][0] != NULL:
         for i in range(2):
             for j in range(2):
-                QTN_merge_nodes(n1.children[i][j], n2.children[i][j], nvals)
+                QTN_merge_nodes(n1.children[i][j], n2.children[i][j], nvals, func)
     elif n1.children[0][0] == NULL and n2.children[0][0] != NULL:
         for i in range(2):
             for j in range(2):
@@ -395,14 +430,24 @@
     else:
         raise RuntimeError
 
-def merge_quadtrees(QuadTree qt1, QuadTree qt2):
+def merge_quadtrees(QuadTree qt1, QuadTree qt2, style = 1):
     cdef int i, j
     qt1.num_cells = 0
+    cdef QTN_combine *func
+    if style == 1:
+        qt1.merged = 1
+        func = QTN_add_value
+    elif style == -1:
+        qt1.merged = -1
+        func = QTN_max_value
+    else:
+        raise NotImplementedError
+    if qt1.merged != 0 or qt2.merged != 0:
+        assert(qt1.merged == qt2.merged)
     for i in range(qt1.top_grid_dims[0]):
         for j in range(qt1.top_grid_dims[1]):
             QTN_merge_nodes(qt1.root_nodes[i][j],
                             qt2.root_nodes[i][j],
-                            qt1.nvals)
+                            qt1.nvals, func)
             qt1.num_cells += qt1.count_total_cells(
                                 qt1.root_nodes[i][j])
-    qt1.merged = 1


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/utilities/_amr_utils/misc_utilities.pyx
--- a/yt/utilities/_amr_utils/misc_utilities.pyx
+++ b/yt/utilities/_amr_utils/misc_utilities.pyx
@@ -34,6 +34,89 @@
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
+def bin_profile1d(np.ndarray[np.int64_t, ndim=1] bins_x,
+                  np.ndarray[np.float64_t, ndim=1] wsource,
+                  np.ndarray[np.float64_t, ndim=1] bsource,
+                  np.ndarray[np.float64_t, ndim=1] wresult,
+                  np.ndarray[np.float64_t, ndim=1] bresult,
+                  np.ndarray[np.float64_t, ndim=1] mresult,
+                  np.ndarray[np.float64_t, ndim=1] qresult,
+                  np.ndarray[np.float64_t, ndim=1] used):
+    cdef int n, bin
+    cdef np.float64_t wval, bval
+    for n in range(bins_x.shape[0]):
+        bin = bins_x[n]
+        bval = bsource[n]
+        wval = wsource[n]
+        qresult[bin] += (wresult[bin] * wval * (bval - mresult[bin])**2) / \
+            (wresult[bin] + wval)
+        wresult[bin] += wval
+        bresult[bin] += wval*bval
+        mresult[bin] += wval * (bval - mresult[bin]) / wresult[bin]
+        used[bin] = 1
+    return
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+def bin_profile2d(np.ndarray[np.int64_t, ndim=1] bins_x,
+                  np.ndarray[np.int64_t, ndim=1] bins_y,
+                  np.ndarray[np.float64_t, ndim=1] wsource,
+                  np.ndarray[np.float64_t, ndim=1] bsource,
+                  np.ndarray[np.float64_t, ndim=2] wresult,
+                  np.ndarray[np.float64_t, ndim=2] bresult,
+                  np.ndarray[np.float64_t, ndim=2] mresult,
+                  np.ndarray[np.float64_t, ndim=2] qresult,
+                  np.ndarray[np.float64_t, ndim=2] used):
+    cdef int n, bini, binj
+    cdef np.int64_t bin
+    cdef np.float64_t wval, bval
+    for n in range(bins_x.shape[0]):
+        bini = bins_x[n]
+        binj = bins_y[n]
+        bval = bsource[n]
+        wval = wsource[n]
+        qresult[bini, binj] += (wresult[bini, binj] * wval * (bval - mresult[bini, binj])**2) / \
+            (wresult[bini, binj] + wval)
+        wresult[bini, binj] += wval
+        bresult[bini, binj] += wval*bval
+        mresult[bini, binj] += wval * (bval - mresult[bini, binj]) / wresult[bini, binj]
+        used[bini, binj] = 1
+    return
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+def bin_profile3d(np.ndarray[np.int64_t, ndim=1] bins_x,
+                  np.ndarray[np.int64_t, ndim=1] bins_y,
+                  np.ndarray[np.int64_t, ndim=1] bins_z,
+                  np.ndarray[np.float64_t, ndim=1] wsource,
+                  np.ndarray[np.float64_t, ndim=1] bsource,
+                  np.ndarray[np.float64_t, ndim=3] wresult,
+                  np.ndarray[np.float64_t, ndim=3] bresult,
+                  np.ndarray[np.float64_t, ndim=3] mresult,
+                  np.ndarray[np.float64_t, ndim=3] qresult,
+                  np.ndarray[np.float64_t, ndim=3] used):
+    cdef int n, bini, binj, bink
+    cdef np.int64_t bin
+    cdef np.float64_t wval, bval
+    for n in range(bins_x.shape[0]):
+        bini = bins_x[n]
+        binj = bins_y[n]
+        bink = bins_z[n]
+        bval = bsource[n]
+        wval = wsource[n]
+        qresult[bini, binj, bink] += (wresult[bini, binj, bink] * wval * (bval - mresult[bini, binj, bink])**2) / \
+            (wresult[bini, binj, bink] + wval)
+        wresult[bini, binj, bink] += wval
+        bresult[bini, binj, bink] += wval*bval
+        mresult[bini, binj, bink] += wval * (bval - mresult[bini, binj, bink]) / wresult[bini, binj, bink]
+        used[bini, binj, bink] = 1
+    return
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
 def get_color_bounds(np.ndarray[np.float64_t, ndim=1] px,
                      np.ndarray[np.float64_t, ndim=1] py,
                      np.ndarray[np.float64_t, ndim=1] pdx,
@@ -228,7 +311,7 @@
         if n_unique > my_max:
             best_dim = dim
             my_max = n_unique
-            my_split = (n_unique-1)/2
+            my_split = (n_unique)/2
     # I recognize how lame this is.
     cdef np.ndarray[np.float64_t, ndim=1] tarr = np.empty(my_max, dtype='float64')
     for i in range(my_max):


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/utilities/amr_kdtree/__init__.py
--- a/yt/utilities/amr_kdtree/__init__.py
+++ b/yt/utilities/amr_kdtree/__init__.py
@@ -1,4 +1,3 @@
 """
 Initialize amr_kdtree
 """
-


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/utilities/answer_testing/__init__.py
--- a/yt/utilities/answer_testing/__init__.py
+++ b/yt/utilities/answer_testing/__init__.py
@@ -23,7 +23,8 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import runner, output_tests
+import runner
+import output_tests
 from runner import RegressionTestRunner
 
 from output_tests import RegressionTest, SingleOutputTest, \


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/utilities/answer_testing/api.py
--- a/yt/utilities/answer_testing/api.py
+++ b/yt/utilities/answer_testing/api.py
@@ -57,4 +57,3 @@
     TestBooleanANDParticleQuantity, \
     TestBooleanORParticleQuantity, \
     TestBooleanNOTParticleQuantity
-


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/utilities/answer_testing/setup.py
--- a/yt/utilities/answer_testing/setup.py
+++ b/yt/utilities/answer_testing/setup.py
@@ -1,12 +1,13 @@
 #!/usr/bin/env python
 import setuptools
-import os, sys, os.path
-
+import os
+import sys
 import os.path
 
-def configuration(parent_package='',top_path=None):
+
+def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('answer_testing',parent_package,top_path)
-    config.make_config_py() # installs __config__.py
+    config = Configuration('answer_testing', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/utilities/command_line.py
--- a/yt/utilities/command_line.py
+++ b/yt/utilities/command_line.py
@@ -47,10 +47,11 @@
 def _add_arg(sc, arg):
     if isinstance(arg, types.StringTypes):
         arg = _common_options[arg].copy()
+    argc = dict(arg.items())
     argnames = []
-    if "short" in arg: argnames.append(arg.pop('short'))
-    if "long" in arg: argnames.append(arg.pop('long'))
-    sc.add_argument(*argnames, **arg)
+    if "short" in argc: argnames.append(argc.pop('short'))
+    if "long" in argc: argnames.append(argc.pop('long'))
+    sc.add_argument(*argnames, **argc)
 
 class YTCommand(object):
     args = ()
@@ -63,12 +64,14 @@
         def __init__(cls, name, b, d):
             type.__init__(cls, name, b, d)
             if cls.name is not None:
-                sc = subparsers.add_parser(cls.name,
-                    description = cls.description,
-                    help = cls.description)
-                sc.set_defaults(func=cls.run)
-                for arg in cls.args:
-                    _add_arg(sc, arg)
+                names = ensure_list(cls.name)
+                for name in names:
+                    sc = subparsers.add_parser(name,
+                        description = cls.description,
+                        help = cls.description)
+                    sc.set_defaults(func=cls.run)
+                    for arg in cls.args:
+                        _add_arg(sc, arg)
 
     @classmethod
     def run(cls, args):
@@ -149,6 +152,10 @@
                    dest="center", default=None,
                    nargs=3,
                    help="Center, space separated (-1 -1 -1 for max)"),
+    max     = dict(short="-m", long="--max",
+                   action="store_true",
+                   dest="max",default=False,
+                   help="Center the plot on the density maximum"),
     bn      = dict(short="-b", long="--basename",
                    action="store", type=str,
                    dest="basename", default=None,
@@ -988,7 +995,8 @@
         import IPython
         if IPython.__version__.startswith("0.10"):
             api_version = '0.10'
-        elif IPython.__version__.startswith("0.11"):
+        elif IPython.__version__.startswith("0.11") or \
+             IPython.__version__.startswith("0.12"):
             api_version = '0.11'
 
         local_ns = yt.mods.__dict__.copy()
@@ -1003,11 +1011,7 @@
         else:
             from IPython.config.loader import Config
             cfg = Config()
-            cfg.InteractiveShellEmbed.local_ns = local_ns
-            IPython.embed(config=cfg)
-            from IPython.frontend.terminal.embed import InteractiveShellEmbed
-            ipshell = InteractiveShellEmbed(config=cfg)
-
+            IPython.embed(config=cfg,user_ns=local_ns)
 
 class YTMapserverCmd(YTCommand):
     args = ("proj", "field", "weight",
@@ -1102,7 +1106,9 @@
 class YTPlotCmd(YTCommand):
     args = ("width", "unit", "bn", "proj", "center",
             "zlim", "axis", "field", "weight", "skip",
-            "cmap", "output", "grids", "time", "pf")
+            "cmap", "output", "grids", "time", "pf",
+            "max")
+    
     name = "plot"
     
     description = \
@@ -1117,6 +1123,8 @@
         if args.center == (-1,-1,-1):
             mylog.info("No center fed in; seeking.")
             v, center = pf.h.find_max("Density")
+        if args.max:
+            v, center = pf.h.find_max("Density")
         elif args.center is None:
             center = 0.5*(pf.domain_left_edge + pf.domain_right_edge)
         center = na.array(center)
@@ -1140,76 +1148,6 @@
         if not os.path.isdir(args.output): os.makedirs(args.output)
         pc.save(os.path.join(args.output,"%s" % (pf)))
 
-class YTReasonCmd(YTCommand):
-    name = "reason"
-    args = (
-            dict(short="-o", long="--open-browser", action="store_true",
-                 default = False, dest='open_browser',
-                 help="Open a web browser."),
-            dict(short="-p", long="--port", action="store",
-                 default = 0, dest='port',
-                 help="Port to listen on"),
-            dict(short="-f", long="--find", action="store_true",
-                 default = False, dest="find",
-                 help="At startup, find all *.hierarchy files in the CWD"),
-            dict(short="-d", long="--debug", action="store_true",
-                 default = False, dest="debug",
-                 help="Add a debugging mode for cell execution")
-            )
-    description = \
-        """
-        Run the Web GUI Reason
-        """
-
-    def __call__(self, args):
-        # We have to do a couple things.
-        # First, we check that YT_DEST is set.
-        if "YT_DEST" not in os.environ:
-            print
-            print "*** You must set the environment variable YT_DEST ***"
-            print "*** to point to the installation location!        ***"
-            print
-            sys.exit(1)
-        if args.port == 0:
-            # This means, choose one at random.  We do this by binding to a
-            # socket and allowing the OS to choose the port for that socket.
-            import socket
-            sock = socket.socket()
-            sock.bind(('', 0))
-            args.port = sock.getsockname()[-1]
-            del sock
-        elif args.port == '-1':
-            port = raw_input("Desired yt port? ")
-            try:
-                args.port = int(port)
-            except ValueError:
-                print "Please try a number next time."
-                return 1
-        base_extjs_path = os.path.join(os.environ["YT_DEST"], "src")
-        if not os.path.isfile(os.path.join(base_extjs_path, "ext-resources", "ext-all.js")):
-            print
-            print "*** You are missing the ExtJS support files. You  ***"
-            print "*** You can get these by either rerunning the     ***"
-            print "*** install script installing, or downloading     ***"
-            print "*** them manually.                                ***"
-            print
-            sys.exit(1)
-        from yt.config import ytcfg;ytcfg["yt","__withinreason"]="True"
-        import yt.utilities.bottle as bottle
-        from yt.gui.reason.extdirect_repl import ExtDirectREPL
-        from yt.gui.reason.bottle_mods import uuid_serve_functions, PayloadHandler
-        hr = ExtDirectREPL(base_extjs_path)
-        hr.debug = PayloadHandler.debug = args.debug
-        if args.find:
-            # We just have to find them and store references to them.
-            command_line = ["pfs = []"]
-            for fn in sorted(glob.glob("*/*.hierarchy")):
-                command_line.append("pfs.append(load('%s'))" % fn[:-10])
-            hr.execute("\n".join(command_line))
-        bottle.debug()
-        uuid_serve_functions(open_browser=args.open_browser,
-                    port=int(args.port), repl=hr)
-
 class YTRenderCmd(YTCommand):
         
     args = ("width", "unit", "center","enhance",'outputfn',
@@ -1307,13 +1245,18 @@
         uncaught exceptions.
 
         """
+    args = (
+            dict(short="-t", long="--task", action="store",
+                 default = 0, dest='task',
+                 help="Open a web browser."),
+           )
 
     def __call__(self, args):
         import rpdb
-        rpdb.run_rpdb(int(task))
+        rpdb.run_rpdb(int(args.task))
 
-class YTServeCmd(YTCommand):
-    name = "serve"
+class YTGUICmd(YTCommand):
+    name = ["serve", "reason"]
     args = (
             dict(short="-o", long="--open-browser", action="store_true",
                  default = False, dest='open_browser',
@@ -1383,20 +1326,31 @@
                     port=int(args.port), repl=hr)
 
 class YTStatsCmd(YTCommand):
-    args = ('outputfn','bn','skip','pf')
+    args = ('outputfn','bn','skip','pf','field',
+            dict(long="--max", action='store_true', default=False,
+                 dest='max', help="Display maximum of field requested through -f option."),
+            dict(long="--min", action='store_true', default=False,
+                 dest='min', help="Display minimum of field requested through -f option."))
     name = "stats"
     description = \
         """
-        Print stats and max density for one or more datasets
+        Print stats and max/min value of a given field (if requested),
+        for one or more datasets
+
+        (default field is Density)
 
         """
 
     def __call__(self, args):
         pf = args.pf
         pf.h.print_stats()
-        if "Density" in pf.h.field_list:
-            v, c = pf.h.find_max("Density")
-            print "Maximum density: %0.5e at %s" % (v, c)
+        if args.field in pf.h.derived_field_list:
+            if args.max == True:
+                v, c = pf.h.find_max(args.field)
+                print "Maximum %s: %0.5e at %s" % (args.field, v, c)
+            if args.min == True:
+                v, c = pf.h.find_min(args.field)
+                print "Minimum %s: %0.5e at %s" % (args.field, v, c)
         if args.output is not None:
             t = pf.current_time * pf['years']
             open(args.output, "a").write(


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/utilities/data_point_utilities.c
--- a/yt/utilities/data_point_utilities.c
+++ b/yt/utilities/data_point_utilities.c
@@ -273,359 +273,6 @@
 
 }
 
-static PyObject *_profile1DError;
-
-static PyObject *Py_Bin1DProfile(PyObject *obj, PyObject *args)
-{
-    int i, j;
-    PyObject *obins_x, *owsource, *obsource, *owresult, *obresult, *oused;
-    PyArrayObject *bins_x, *wsource, *bsource, *wresult, *bresult, *used;
-    bins_x = wsource = bsource = wresult = bresult = used = NULL;
-
-    if (!PyArg_ParseTuple(args, "OOOOOO",
-                &obins_x, &owsource, &obsource,
-                &owresult, &obresult, &oused))
-        return PyErr_Format(_profile1DError,
-                "Bin1DProfile: Invalid parameters.");
-    i = 0;
-
-    bins_x = (PyArrayObject *) PyArray_FromAny(obins_x,
-                    PyArray_DescrFromType(NPY_INT64), 1, 1,
-                    NPY_IN_ARRAY, NULL);
-    if(bins_x==NULL) {
-    PyErr_Format(_profile1DError,
-             "Bin1DProfile: One dimension required for bins_x.");
-    goto _fail;
-    }
-    
-    wsource = (PyArrayObject *) PyArray_FromAny(owsource,
-                    PyArray_DescrFromType(NPY_FLOAT64), 1, 1,
-                    NPY_IN_ARRAY, NULL);
-    if((wsource==NULL) || (PyArray_SIZE(bins_x) != PyArray_SIZE(wsource))) {
-    PyErr_Format(_profile1DError,
-             "Bin1DProfile: One dimension required for wsource, same size as bins_x.");
-    goto _fail;
-    }
-    
-    bsource = (PyArrayObject *) PyArray_FromAny(obsource,
-                    PyArray_DescrFromType(NPY_FLOAT64), 1, 1,
-                    NPY_IN_ARRAY, NULL);
-    if((bsource==NULL) || (PyArray_SIZE(bins_x) != PyArray_SIZE(bsource))) {
-    PyErr_Format(_profile1DError,
-             "Bin1DProfile: One dimension required for bsource, same size as bins_x.");
-    goto _fail;
-    }
-
-    wresult = (PyArrayObject *) PyArray_FromAny(owresult,
-                    PyArray_DescrFromType(NPY_FLOAT64), 1,1,
-                    NPY_INOUT_ARRAY | NPY_UPDATEIFCOPY, NULL);
-    if(wresult==NULL){
-    PyErr_Format(_profile1DError,
-             "Bin1DProfile: Two dimensions required for wresult.");
-    goto _fail;
-    }
-
-    bresult = (PyArrayObject *) PyArray_FromAny(obresult,
-                    PyArray_DescrFromType(NPY_FLOAT64), 1,1,
-                    NPY_INOUT_ARRAY | NPY_UPDATEIFCOPY, NULL);
-    if((bresult==NULL) ||(PyArray_SIZE(wresult) != PyArray_SIZE(bresult))
-       || (PyArray_DIM(bresult,0) != PyArray_DIM(wresult,0))){
-    PyErr_Format(_profile1DError,
-             "Bin1DProfile: Two dimensions required for bresult, same shape as wresult.");
-    goto _fail;
-    }
-    
-    used = (PyArrayObject *) PyArray_FromAny(oused,
-                    PyArray_DescrFromType(NPY_FLOAT64), 1,1,
-                    NPY_INOUT_ARRAY | NPY_UPDATEIFCOPY, NULL);
-    if((used==NULL) ||(PyArray_SIZE(used) != PyArray_SIZE(wresult))
-       || (PyArray_DIM(used,0) != PyArray_DIM(wresult,0))){
-    PyErr_Format(_profile1DError,
-             "Bin1DProfile: Two dimensions required for used, same shape as wresult.");
-    goto _fail;
-    }
-
-    npy_float64 wval, bval;
-    int n;
-
-    for(n=0; n<bins_x->dimensions[0]; n++) {
-      i = *(npy_int64*)PyArray_GETPTR1(bins_x, n);
-      bval = *(npy_float64*)PyArray_GETPTR1(bsource, n);
-      wval = *(npy_float64*)PyArray_GETPTR1(wsource, n);
-      *(npy_float64*)PyArray_GETPTR1(wresult, i) += wval;
-      *(npy_float64*)PyArray_GETPTR1(bresult, i) += wval*bval;
-      *(npy_float64*)PyArray_GETPTR1(used, i) = 1.0;
-    }
-
-      Py_DECREF(bins_x); 
-      Py_DECREF(wsource); 
-      Py_DECREF(bsource); 
-      Py_DECREF(wresult); 
-      Py_DECREF(bresult); 
-      Py_DECREF(used);
-    
-      PyObject *onum_found = PyInt_FromLong((long)1);
-      return onum_found;
-    
-    _fail:
-      Py_XDECREF(bins_x); 
-      Py_XDECREF(wsource); 
-      Py_XDECREF(bsource); 
-      Py_XDECREF(wresult); 
-      Py_XDECREF(bresult); 
-      Py_XDECREF(used);
-      return NULL;
-
-}
-
-static PyObject *_profile2DError;
-
-static PyObject *Py_Bin2DProfile(PyObject *obj, PyObject *args)
-{
-    int i, j;
-    PyObject *obins_x, *obins_y, *owsource, *obsource, *owresult, *obresult, *oused;
-    PyArrayObject *bins_x, *bins_y, *wsource, *bsource, *wresult, *bresult, *used;
-    bins_x = bins_y = wsource = bsource = wresult = bresult = used = NULL;
-
-    if (!PyArg_ParseTuple(args, "OOOOOOO",
-                &obins_x, &obins_y, &owsource, &obsource,
-                &owresult, &obresult, &oused))
-        return PyErr_Format(_profile2DError,
-                "Bin2DProfile: Invalid parameters.");
-    i = 0;
-
-    bins_x = (PyArrayObject *) PyArray_FromAny(obins_x,
-                    PyArray_DescrFromType(NPY_INT64), 1, 1,
-                    NPY_IN_ARRAY, NULL);
-    if(bins_x==NULL) {
-    PyErr_Format(_profile2DError,
-             "Bin2DProfile: One dimension required for bins_x.");
-    goto _fail;
-    }
-    
-    bins_y = (PyArrayObject *) PyArray_FromAny(obins_y,
-                    PyArray_DescrFromType(NPY_INT64), 1, 1,
-                    NPY_IN_ARRAY, NULL);
-    if((bins_y==NULL) || (PyArray_SIZE(bins_x) != PyArray_SIZE(bins_y))) {
-    PyErr_Format(_profile2DError,
-             "Bin2DProfile: One dimension required for bins_y, same size as bins_x.");
-    goto _fail;
-    }
-    
-    wsource = (PyArrayObject *) PyArray_FromAny(owsource,
-                    PyArray_DescrFromType(NPY_FLOAT64), 1, 1,
-                    NPY_IN_ARRAY, NULL);
-    if((wsource==NULL) || (PyArray_SIZE(bins_x) != PyArray_SIZE(wsource))) {
-    PyErr_Format(_profile2DError,
-             "Bin2DProfile: One dimension required for wsource, same size as bins_x.");
-    goto _fail;
-    }
-    
-    bsource = (PyArrayObject *) PyArray_FromAny(obsource,
-                    PyArray_DescrFromType(NPY_FLOAT64), 1, 1,
-                    NPY_IN_ARRAY, NULL);
-    if((bsource==NULL) || (PyArray_SIZE(bins_x) != PyArray_SIZE(bsource))) {
-    PyErr_Format(_profile2DError,
-             "Bin2DProfile: One dimension required for bsource, same size as bins_x.");
-    goto _fail;
-    }
-
-    wresult = (PyArrayObject *) PyArray_FromAny(owresult,
-                    PyArray_DescrFromType(NPY_FLOAT64), 2,2,
-                    NPY_INOUT_ARRAY | NPY_UPDATEIFCOPY, NULL);
-    if(wresult==NULL){
-    PyErr_Format(_profile2DError,
-             "Bin2DProfile: Two dimensions required for wresult.");
-    goto _fail;
-    }
-
-    bresult = (PyArrayObject *) PyArray_FromAny(obresult,
-                    PyArray_DescrFromType(NPY_FLOAT64), 2,2,
-                    NPY_INOUT_ARRAY | NPY_UPDATEIFCOPY, NULL);
-    if((bresult==NULL) ||(PyArray_SIZE(wresult) != PyArray_SIZE(bresult))
-       || (PyArray_DIM(bresult,0) != PyArray_DIM(wresult,0))){
-    PyErr_Format(_profile2DError,
-             "Bin2DProfile: Two dimensions required for bresult, same shape as wresult.");
-    goto _fail;
-    }
-    
-    used = (PyArrayObject *) PyArray_FromAny(oused,
-                    PyArray_DescrFromType(NPY_FLOAT64), 2,2,
-                    NPY_INOUT_ARRAY | NPY_UPDATEIFCOPY, NULL);
-    if((used==NULL) ||(PyArray_SIZE(used) != PyArray_SIZE(wresult))
-       || (PyArray_DIM(used,0) != PyArray_DIM(wresult,0))){
-    PyErr_Format(_profile2DError,
-             "Bin2DProfile: Two dimensions required for used, same shape as wresult.");
-    goto _fail;
-    }
-
-    npy_float64 wval, bval;
-    int n;
-
-    for(n=0; n<bins_x->dimensions[0]; n++) {
-      i = *(npy_int64*)PyArray_GETPTR1(bins_x, n);
-      j = *(npy_int64*)PyArray_GETPTR1(bins_y, n);
-      bval = *(npy_float64*)PyArray_GETPTR1(bsource, n);
-      wval = *(npy_float64*)PyArray_GETPTR1(wsource, n);
-      *(npy_float64*)PyArray_GETPTR2(wresult, i, j) += wval;
-      *(npy_float64*)PyArray_GETPTR2(bresult, i, j) += wval*bval;
-      *(npy_float64*)PyArray_GETPTR2(used, i, j) = 1.0;
-    }
-
-      Py_DECREF(bins_x); 
-      Py_DECREF(bins_y); 
-      Py_DECREF(wsource); 
-      Py_DECREF(bsource); 
-      Py_DECREF(wresult); 
-      Py_DECREF(bresult); 
-      Py_DECREF(used);
-    
-      PyObject *onum_found = PyInt_FromLong((long)1);
-      return onum_found;
-    
-    _fail:
-      Py_XDECREF(bins_x); 
-      Py_XDECREF(bins_y); 
-      Py_XDECREF(wsource); 
-      Py_XDECREF(bsource); 
-      Py_XDECREF(wresult); 
-      Py_XDECREF(bresult); 
-      Py_XDECREF(used);
-      return NULL;
-
-}
-
-static PyObject *_profile3DError;
-
-static PyObject *Py_Bin3DProfile(PyObject *obj, PyObject *args)
-{
-    int i, j, k;
-    PyObject *obins_x, *obins_y, *obins_z, *owsource, *obsource, *owresult,
-             *obresult, *oused;
-    PyArrayObject *bins_x, *bins_y, *bins_z, *wsource, *bsource, *wresult,
-                  *bresult, *used;
-    bins_x = bins_y = bins_z = wsource = bsource = wresult = bresult = used = NULL;
-
-    if (!PyArg_ParseTuple(args, "OOOOOOOO",
-                &obins_x, &obins_y, &obins_z, &owsource, &obsource,
-                &owresult, &obresult, &oused))
-        return PyErr_Format(_profile3DError,
-                "Bin3DProfile: Invalid parameters.");
-    i = 0;
-
-    bins_x = (PyArrayObject *) PyArray_FromAny(obins_x,
-                    PyArray_DescrFromType(NPY_INT64), 1, 1,
-                    NPY_IN_ARRAY, NULL);
-    if(bins_x==NULL) {
-    PyErr_Format(_profile3DError,
-             "Bin3DProfile: One dimension required for bins_x.");
-    goto _fail;
-    }
-    
-    bins_y = (PyArrayObject *) PyArray_FromAny(obins_y,
-                    PyArray_DescrFromType(NPY_INT64), 1, 1,
-                    NPY_IN_ARRAY, NULL);
-    if((bins_y==NULL) || (PyArray_SIZE(bins_x) != PyArray_SIZE(bins_y))) {
-    PyErr_Format(_profile3DError,
-             "Bin3DProfile: One dimension required for bins_y, same size as bins_x.");
-    goto _fail;
-    }
-    
-    bins_z = (PyArrayObject *) PyArray_FromAny(obins_z,
-                    PyArray_DescrFromType(NPY_INT64), 1, 1,
-                    NPY_IN_ARRAY, NULL);
-    if((bins_z==NULL) || (PyArray_SIZE(bins_x) != PyArray_SIZE(bins_z))) {
-    PyErr_Format(_profile3DError,
-             "Bin3DProfile: One dimension required for bins_z, same size as bins_x.");
-    goto _fail;
-    }
-    
-    wsource = (PyArrayObject *) PyArray_FromAny(owsource,
-                    PyArray_DescrFromType(NPY_FLOAT64), 1, 1,
-                    NPY_IN_ARRAY, NULL);
-    if((wsource==NULL) || (PyArray_SIZE(bins_x) != PyArray_SIZE(wsource))) {
-    PyErr_Format(_profile3DError,
-             "Bin3DProfile: One dimension required for wsource, same size as bins_x.");
-    goto _fail;
-    }
-    
-    bsource = (PyArrayObject *) PyArray_FromAny(obsource,
-                    PyArray_DescrFromType(NPY_FLOAT64), 1, 1,
-                    NPY_IN_ARRAY, NULL);
-    if((bsource==NULL) || (PyArray_SIZE(bins_x) != PyArray_SIZE(bsource))) {
-    PyErr_Format(_profile3DError,
-             "Bin3DProfile: One dimension required for bsource, same size as bins_x.");
-    goto _fail;
-    }
-
-    wresult = (PyArrayObject *) PyArray_FromAny(owresult,
-                    PyArray_DescrFromType(NPY_FLOAT64), 3,3,
-                    NPY_INOUT_ARRAY | NPY_UPDATEIFCOPY, NULL);
-    if(wresult==NULL){
-    PyErr_Format(_profile3DError,
-             "Bin3DProfile: Two dimensions required for wresult.");
-    goto _fail;
-    }
-
-    bresult = (PyArrayObject *) PyArray_FromAny(obresult,
-                    PyArray_DescrFromType(NPY_FLOAT64), 3,3,
-                    NPY_INOUT_ARRAY | NPY_UPDATEIFCOPY, NULL);
-    if((bresult==NULL) ||(PyArray_SIZE(wresult) != PyArray_SIZE(bresult))
-       || (PyArray_DIM(bresult,0) != PyArray_DIM(wresult,0))){
-    PyErr_Format(_profile3DError,
-             "Bin3DProfile: Two dimensions required for bresult, same shape as wresult.");
-    goto _fail;
-    }
-    
-    used = (PyArrayObject *) PyArray_FromAny(oused,
-                    PyArray_DescrFromType(NPY_FLOAT64), 3,3,
-                    NPY_INOUT_ARRAY | NPY_UPDATEIFCOPY, NULL);
-    if((used==NULL) ||(PyArray_SIZE(used) != PyArray_SIZE(wresult))
-       || (PyArray_DIM(used,0) != PyArray_DIM(wresult,0))){
-    PyErr_Format(_profile3DError,
-             "Bin3DProfile: Two dimensions required for used, same shape as wresult.");
-    goto _fail;
-    }
-
-    npy_float64 wval, bval;
-    int n;
-
-    for(n=0; n<bins_x->dimensions[0]; n++) {
-      i = *(npy_int64*)PyArray_GETPTR1(bins_x, n);
-      j = *(npy_int64*)PyArray_GETPTR1(bins_y, n);
-      k = *(npy_int64*)PyArray_GETPTR1(bins_z, n);
-      bval = *(npy_float64*)PyArray_GETPTR1(bsource, n);
-      wval = *(npy_float64*)PyArray_GETPTR1(wsource, n);
-      *(npy_float64*)PyArray_GETPTR3(wresult, i, j, k) += wval;
-      *(npy_float64*)PyArray_GETPTR3(bresult, i, j, k) += wval*bval;
-      *(npy_float64*)PyArray_GETPTR3(used, i, j, k) = 1.0;
-    }
-
-      Py_DECREF(bins_x); 
-      Py_DECREF(bins_y); 
-      Py_DECREF(bins_z); 
-      Py_DECREF(wsource); 
-      Py_DECREF(bsource); 
-      Py_DECREF(wresult); 
-      Py_DECREF(bresult); 
-      Py_DECREF(used);
-    
-      PyObject *onum_found = PyInt_FromLong((long)1);
-      return onum_found;
-    
-    _fail:
-      Py_XDECREF(bins_x); 
-      Py_XDECREF(bins_y); 
-      Py_XDECREF(bins_z); 
-      Py_XDECREF(wsource); 
-      Py_XDECREF(bsource); 
-      Py_XDECREF(wresult); 
-      Py_XDECREF(bresult); 
-      Py_XDECREF(used);
-      return NULL;
-
-}
-
 static PyObject *_dataCubeError;
 
 static PyObject *DataCubeGeneric(PyObject *obj, PyObject *args,
@@ -1430,7 +1077,7 @@
                     0, NULL);
     if(xi==NULL) {
     PyErr_Format(_findContoursError,
-             "Bin2DProfile: One dimension required for xi.");
+             "FindContours: One dimension required for xi.");
     goto _fail;
     }
     
@@ -1439,7 +1086,7 @@
                     0, NULL);
     if((yi==NULL) || (PyArray_SIZE(xi) != PyArray_SIZE(yi))) {
     PyErr_Format(_findContoursError,
-             "Bin2DProfile: One dimension required for yi, same size as xi.");
+             "FindContours: One dimension required for yi, same size as xi.");
     goto _fail;
     }
     
@@ -1448,7 +1095,7 @@
                     0, NULL);
     if((zi==NULL) || (PyArray_SIZE(xi) != PyArray_SIZE(zi))) {
     PyErr_Format(_findContoursError,
-             "Bin2DProfile: One dimension required for zi, same size as xi.");
+             "FindContours: One dimension required for zi, same size as xi.");
     goto _fail;
     }
     
@@ -1789,9 +1436,6 @@
     {"Interpolate", Py_Interpolate, METH_VARARGS},
     {"DataCubeRefine", Py_DataCubeRefine, METH_VARARGS},
     {"DataCubeReplace", Py_DataCubeReplace, METH_VARARGS},
-    {"Bin1DProfile", Py_Bin1DProfile, METH_VARARGS},
-    {"Bin2DProfile", Py_Bin2DProfile, METH_VARARGS},
-    {"Bin3DProfile", Py_Bin3DProfile, METH_VARARGS},
     {"FindContours", Py_FindContours, METH_VARARGS},
     {"FindBindingEnergy", Py_FindBindingEnergy, METH_VARARGS},
     {"OutputFloatsToFile", Py_OutputFloatsToFile, METH_VARARGS},
@@ -1816,10 +1460,6 @@
     PyDict_SetItemString(d, "error", _interpolateError);
     _dataCubeError = PyErr_NewException("data_point_utilities.DataCubeError", NULL, NULL);
     PyDict_SetItemString(d, "error", _dataCubeError);
-    _profile2DError = PyErr_NewException("data_point_utilities.Profile2DError", NULL, NULL);
-    PyDict_SetItemString(d, "error", _profile2DError);
-    _profile3DError = PyErr_NewException("data_point_utilities.Profile3DError", NULL, NULL);
-    PyDict_SetItemString(d, "error", _profile3DError);
     _findContoursError = PyErr_NewException("data_point_utilities.FindContoursError", NULL, NULL);
     PyDict_SetItemString(d, "error", _findContoursError);
     _outputFloatsToFileError = PyErr_NewException("data_point_utilities.OutputFloatsToFileError", NULL, NULL);


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/utilities/delaunay/setup.py
--- a/yt/utilities/delaunay/setup.py
+++ b/yt/utilities/delaunay/setup.py
@@ -2,6 +2,7 @@
 from numpy.distutils.core import setup
 from numpy.distutils.misc_util import Configuration
 
+
 def configuration(parent_package='', top_path=None):
 
     config = Configuration('delaunay', parent_package, top_path)


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -48,3 +48,13 @@
 
     def __str__(self):
         return "The supplied axes are not orthogonal.  %s" % (self.axes)
+
+class YTNoDataInObjectError(YTException):
+    def __init__(self, obj):
+        self.obj_type = getattr(obj, "_type_name", "")
+
+    def __str__(self):
+        s = "The object requested has no data included in it."
+        if self.obj_type == "slice":
+            s += "  It may lie on a grid face.  Try offsetting slightly."
+        return s


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/utilities/minimal_representation.py
--- a/yt/utilities/minimal_representation.py
+++ b/yt/utilities/minimal_representation.py
@@ -23,7 +23,29 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
+import numpy as na
 import abc
+import json
+import urllib2
+from tempfile import TemporaryFile
+from yt.config import ytcfg
+from yt.funcs import *
+
+from .poster.streaminghttp import register_openers
+from .poster.encode import multipart_encode
+register_openers()
+
+class UploaderBar(object):
+    pbar = None
+    def __init__(self, my_name = ""):
+        self.my_name = my_name
+
+    def __call__(self, name, prog, total):
+        if self.pbar is None:
+            self.pbar = get_pbar("Uploading %s " % self.my_name, total)
+        self.pbar.update(prog)
+        if prog == total:
+            self.pbar.finish()
 
 class ContainerClass(object):
     pass
@@ -67,6 +89,45 @@
             setattr(cc, a, v)
         return cls(cc)
 
+    def upload(self):
+        api_key = ytcfg.get("yt","hub_api_key")
+        url = ytcfg.get("yt","hub_url")
+        metadata, (final_name, chunks) = self._generate_post()
+        for i in metadata:
+            if isinstance(metadata[i], na.ndarray):
+                metadata[i] = metadata[i].tolist()
+        metadata['obj_type'] = self.type
+        if len(chunks) == 0:
+            chunk_info = {'chunks': []}
+        else:
+            chunk_info = {'final_name' : final_name, 'chunks': []}
+            for cn, cv in chunks:
+                chunk_info['chunks'].append((cn, cv.size * cv.itemsize))
+        metadata = json.dumps(metadata)
+        chunk_info = json.dumps(chunk_info)
+        datagen, headers = multipart_encode({'metadata' : metadata,
+                                             'chunk_info' : chunk_info,
+                                             'api_key' : api_key})
+        request = urllib2.Request(url, datagen, headers)
+        # Actually do the request, and get the response
+        rv = urllib2.urlopen(request).read()
+        uploader_info = json.loads(rv)
+        new_url = url + "/handler/%s" % uploader_info['handler_uuid']
+        for i, (cn, cv) in enumerate(chunks):
+            remaining = cv.size * cv.itemsize
+            f = TemporaryFile()
+            na.save(f, cv)
+            f.seek(0)
+            pbar = UploaderBar("%s, % 2i/% 2i" % (self.type, i+1, len(chunks)))
+            datagen, headers = multipart_encode({'chunk_data' : f}, cb = pbar)
+            request = urllib2.Request(new_url, datagen, headers)
+            rv = urllib2.urlopen(request).read()
+
+        datagen, headers = multipart_encode({'status' : 'FINAL'})
+        request = urllib2.Request(new_url, datagen, headers)
+        rv = urllib2.urlopen(request).read()
+        return json.loads(rv)
+
 class FilteredRepresentation(MinimalRepresentation):
     def _generate_post(self):
         raise RuntimeError
@@ -77,6 +138,7 @@
                   "unique_identifier", "current_redshift", "output_hash",
                   "cosmological_simulation", "omega_matter", "omega_lambda",
                   "hubble_constant", "name")
+    type = 'simulation_output'
 
     def __init__(self, obj):
         super(MinimalStaticOutput, self).__init__(obj)
@@ -86,21 +148,35 @@
     def _generate_post(self):
         metadata = self._attrs
         chunks = []
-        return metadata, chunks
+        return (metadata, (None, chunks))
 
 class MinimalMappableData(MinimalRepresentation):
 
-    weight = "None"
-    _attr_list = ("field_data", "field", "weight", "axis", "output_hash")
+    _attr_list = ("field_data", "field", "weight_field", "axis", "output_hash",
+                  "vm_type")
 
     def _generate_post(self):
         nobj = self._return_filtered_object(("field_data",))
         metadata = nobj._attrs
         chunks = [(arr, self.field_data[arr]) for arr in self.field_data]
-        return (metadata, chunks)
+        return (metadata, ('field_data', chunks))
 
 class MinimalProjectionData(MinimalMappableData):
+    type = 'proj'
+    vm_type = "Projection"
 
-    def __init__(self, obj):
-        super(MinimalProjectionData, self).__init__(obj)
-        self.type = "proj"
+class MinimalSliceData(MinimalMappableData):
+    type = 'slice'
+    vm_type = "Slice"
+    weight_field = "None"
+
+class MinimalImageCollectionData(MinimalRepresentation):
+    type = "image_collection"
+    _attr_list = ("name", "output_hash", "images", "image_metadata")
+
+    def _generate_post(self):
+        nobj = self._return_filtered_object(("images",))
+        metadata = nobj._attrs
+        chunks = [(fn, d) for fn, d in self.images]
+        return (metadata, ('images', chunks))
+


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -154,6 +154,9 @@
     @wraps(func)
     def single_proc_results(self, *args, **kwargs):
         retval = None
+        if hasattr(self, "dont_wrap"):
+            if func.func_name in self.dont_wrap:
+                return func(self, *args, **kwargs)
         if self._processing or not self._distributed:
             return func(self, *args, **kwargs)
         comm = _get_comm((self,))
@@ -256,7 +259,7 @@
                 all_clear = 0
         else:
             all_clear = None
-        all_clear = comm.mpi_bcast_pickled(all_clear)
+        all_clear = comm.mpi_bcast(all_clear)
         if not all_clear: raise RuntimeError
     if parallel_capable: return root_only
     return func
@@ -349,7 +352,7 @@
         else:
             yield obj
     if parallel_capable:
-        communication_system.communicators.pop()
+        communication_system.pop()
     if storage is not None:
         # Now we have to broadcast it
         new_storage = my_communicator.par_combine_object(
@@ -500,9 +503,25 @@
         raise NotImplementedError
 
     @parallel_passthrough
-    def mpi_bcast_pickled(self, data):
-        data = self.comm.bcast(data, root=0)
-        return data
+    def mpi_bcast(self, data):
+        # The second check below makes sure that we know how to communicate
+        # this type of array. Otherwise, we'll pickle it.
+        if isinstance(data, na.ndarray) and \
+                get_mpi_type(data.dtype) is not None:
+            if self.comm.rank == 0:
+                info = (data.shape, data.dtype)
+            else:
+                info = ()
+            info = self.comm.bcast(info, root=0)
+            if self.comm.rank != 0:
+                data = na.empty(info[0], dtype=info[1])
+            mpi_type = get_mpi_type(info[1])
+            self.comm.Bcast([data, mpi_type], root = 0)
+            return data
+        else:
+            # Use pickled methods.
+            data = self.comm.bcast(data, root = 0)
+            return data
 
     def preload(self, grids, fields, io_handler):
         # This will preload if it detects we are parallel capable and
@@ -640,7 +659,7 @@
         return buf
 
     @parallel_passthrough
-    def merge_quadtree_buffers(self, qt):
+    def merge_quadtree_buffers(self, qt, merge_style):
         # This is a modified version of pairwise reduction from Lisandro Dalcin,
         # in the reductions demo of mpi4py
         size = self.comm.size
@@ -665,8 +684,8 @@
                     #print "RECEIVING FROM %02i on %02i" % (target, rank)
                     buf = self.recv_quadtree(target, tgd, args)
                     qto = QuadTree(tgd, args[2])
-                    qto.frombuffer(*buf)
-                    merge_quadtrees(qt, qto)
+                    qto.frombuffer(buf[0], buf[1], buf[2], merge_style)
+                    merge_quadtrees(qt, qto, style = merge_style)
                     del qto
                     #self.send_quadtree(target, qt, tgd, args)
             mask <<= 1
@@ -685,7 +704,7 @@
         self.refined = buf[0]
         if rank != 0:
             qt = QuadTree(tgd, args[2])
-            qt.frombuffer(*buf)
+            qt.frombuffer(buf[0], buf[1], buf[2], merge_style)
         return qt
 
 


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/utilities/physical_constants.py
--- a/yt/utilities/physical_constants.py
+++ b/yt/utilities/physical_constants.py
@@ -3,23 +3,24 @@
 #
 
 # Masses
-mass_hydrogen_cgs = 1.67e-24 # g
-mass_electron_cgs = 9.11e-28 # g
-amu_cgs           = 1.66053886e-24 # g
+mass_hydrogen_cgs = 1.67e-24  # g
+mass_electron_cgs = 9.11e-28  # g
+amu_cgs           = 1.66053886e-24  # g
+mass_sun_cgs = 1.9891e33  # g
 # Velocities
-speed_of_light_cgs = 2.99792458e10 # cm/s, exact
+speed_of_light_cgs = 2.99792458e10  # cm/s, exact
 
 # Cross Sections
-cross_section_thompson_cgs = 6.65e-25 # cm^2
+cross_section_thompson_cgs = 6.65e-25  # cm^2
 
 # Charge
-charge_proton_cgs = 4.803e-10 # esu = 1.602e-19  Coulombs
+charge_proton_cgs = 4.803e-10  # esu = 1.602e-19  Coulombs
 
 # Physical Constants
-boltzmann_constant_cgs = 1.3806504e-16 # erg K^-1
-gravitational_constant_cgs  = 6.67428e-8 # cm^3 g^-1 s^-2
-planck_constant_cgs   = 6.62606896e-27 # erg s
-rho_crit_now = 1.8788e-29 # g times h^2 (critical mass for closure, Cosmology)
+boltzmann_constant_cgs = 1.3806504e-16  # erg K^-1
+gravitational_constant_cgs  = 6.67428e-8  # cm^3 g^-1 s^-2
+planck_constant_cgs   = 6.62606896e-27  # erg s
+rho_crit_now = 1.8788e-29  # g times h^2 (critical mass for closure, Cosmology)
 
 # Misc. Approximations
 mass_mean_atomic_cosmology = 1.22


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/utilities/poster/README
--- /dev/null
+++ b/yt/utilities/poster/README
@@ -0,0 +1,4 @@
+Poster is a module by Chris AtLee, licensed under the MIT License, included
+here.  For more information, see the poster home page:
+
+http://atlee.ca/software/poster


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/utilities/poster/__init__.py
--- /dev/null
+++ b/yt/utilities/poster/__init__.py
@@ -0,0 +1,32 @@
+# Copyright (c) 2011 Chris AtLee
+# 
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+# 
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+# 
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+# THE SOFTWARE.
+"""poster module
+
+Support for streaming HTTP uploads, and multipart/form-data encoding
+
+```poster.version``` is a 3-tuple of integers representing the version number.
+New releases of poster will always have a version number that compares greater
+than an older version of poster.
+New in version 0.6."""
+
+import streaminghttp
+import encode
+
+version = (0, 8, 1) # Thanks JP!


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/utilities/poster/encode.py
--- /dev/null
+++ b/yt/utilities/poster/encode.py
@@ -0,0 +1,414 @@
+"""multipart/form-data encoding module
+
+This module provides functions that faciliate encoding name/value pairs
+as multipart/form-data suitable for a HTTP POST or PUT request.
+
+multipart/form-data is the standard way to upload files over HTTP"""
+
+__all__ = ['gen_boundary', 'encode_and_quote', 'MultipartParam',
+        'encode_string', 'encode_file_header', 'get_body_size', 'get_headers',
+        'multipart_encode']
+
+try:
+    import uuid
+    def gen_boundary():
+        """Returns a random string to use as the boundary for a message"""
+        return uuid.uuid4().hex
+except ImportError:
+    import random, sha
+    def gen_boundary():
+        """Returns a random string to use as the boundary for a message"""
+        bits = random.getrandbits(160)
+        return sha.new(str(bits)).hexdigest()
+
+import urllib, re, os, mimetypes
+try:
+    from email.header import Header
+except ImportError:
+    # Python 2.4
+    from email.Header import Header
+
+def encode_and_quote(data):
+    """If ``data`` is unicode, return urllib.quote_plus(data.encode("utf-8"))
+    otherwise return urllib.quote_plus(data)"""
+    if data is None:
+        return None
+
+    if isinstance(data, unicode):
+        data = data.encode("utf-8")
+    return urllib.quote_plus(data)
+
+def _strify(s):
+    """If s is a unicode string, encode it to UTF-8 and return the results,
+    otherwise return str(s), or None if s is None"""
+    if s is None:
+        return None
+    if isinstance(s, unicode):
+        return s.encode("utf-8")
+    return str(s)
+
+class MultipartParam(object):
+    """Represents a single parameter in a multipart/form-data request
+
+    ``name`` is the name of this parameter.
+
+    If ``value`` is set, it must be a string or unicode object to use as the
+    data for this parameter.
+
+    If ``filename`` is set, it is what to say that this parameter's filename
+    is.  Note that this does not have to be the actual filename any local file.
+
+    If ``filetype`` is set, it is used as the Content-Type for this parameter.
+    If unset it defaults to "text/plain; charset=utf8"
+
+    If ``filesize`` is set, it specifies the length of the file ``fileobj``
+
+    If ``fileobj`` is set, it must be a file-like object that supports
+    .read().
+
+    Both ``value`` and ``fileobj`` must not be set, doing so will
+    raise a ValueError assertion.
+
+    If ``fileobj`` is set, and ``filesize`` is not specified, then
+    the file's size will be determined first by stat'ing ``fileobj``'s
+    file descriptor, and if that fails, by seeking to the end of the file,
+    recording the current position as the size, and then by seeking back to the
+    beginning of the file.
+
+    ``cb`` is a callable which will be called from iter_encode with (self,
+    current, total), representing the current parameter, current amount
+    transferred, and the total size.
+    """
+    def __init__(self, name, value=None, filename=None, filetype=None,
+                        filesize=None, fileobj=None, cb=None):
+        self.name = Header(name).encode()
+        self.value = _strify(value)
+        if filename is None:
+            self.filename = None
+        else:
+            if isinstance(filename, unicode):
+                # Encode with XML entities
+                self.filename = filename.encode("ascii", "xmlcharrefreplace")
+            else:
+                self.filename = str(filename)
+            self.filename = self.filename.encode("string_escape").\
+                    replace('"', '\\"')
+        self.filetype = _strify(filetype)
+
+        self.filesize = filesize
+        self.fileobj = fileobj
+        self.cb = cb
+
+        if self.value is not None and self.fileobj is not None:
+            raise ValueError("Only one of value or fileobj may be specified")
+
+        if fileobj is not None and filesize is None:
+            # Try and determine the file size
+            try:
+                self.filesize = os.fstat(fileobj.fileno()).st_size
+            except (OSError, AttributeError):
+                try:
+                    fileobj.seek(0, 2)
+                    self.filesize = fileobj.tell()
+                    fileobj.seek(0)
+                except:
+                    raise ValueError("Could not determine filesize")
+
+    def __cmp__(self, other):
+        attrs = ['name', 'value', 'filename', 'filetype', 'filesize', 'fileobj']
+        myattrs = [getattr(self, a) for a in attrs]
+        oattrs = [getattr(other, a) for a in attrs]
+        return cmp(myattrs, oattrs)
+
+    def reset(self):
+        if self.fileobj is not None:
+            self.fileobj.seek(0)
+        elif self.value is None:
+            raise ValueError("Don't know how to reset this parameter")
+
+    @classmethod
+    def from_file(cls, paramname, filename):
+        """Returns a new MultipartParam object constructed from the local
+        file at ``filename``.
+
+        ``filesize`` is determined by os.path.getsize(``filename``)
+
+        ``filetype`` is determined by mimetypes.guess_type(``filename``)[0]
+
+        ``filename`` is set to os.path.basename(``filename``)
+        """
+
+        return cls(paramname, filename=os.path.basename(filename),
+                filetype=mimetypes.guess_type(filename)[0],
+                filesize=os.path.getsize(filename),
+                fileobj=open(filename, "rb"))
+
+    @classmethod
+    def from_params(cls, params):
+        """Returns a list of MultipartParam objects from a sequence of
+        name, value pairs, MultipartParam instances,
+        or from a mapping of names to values
+
+        The values may be strings or file objects, or MultipartParam objects.
+        MultipartParam object names must match the given names in the
+        name,value pairs or mapping, if applicable."""
+        if hasattr(params, 'items'):
+            params = params.items()
+
+        retval = []
+        for item in params:
+            if isinstance(item, cls):
+                retval.append(item)
+                continue
+            name, value = item
+            if isinstance(value, cls):
+                assert value.name == name
+                retval.append(value)
+                continue
+            if hasattr(value, 'read'):
+                # Looks like a file object
+                filename = getattr(value, 'name', None)
+                if filename is not None:
+                    filetype = mimetypes.guess_type(filename)[0]
+                else:
+                    filetype = None
+
+                retval.append(cls(name=name, filename=filename,
+                    filetype=filetype, fileobj=value))
+            else:
+                retval.append(cls(name, value))
+        return retval
+
+    def encode_hdr(self, boundary):
+        """Returns the header of the encoding of this parameter"""
+        boundary = encode_and_quote(boundary)
+
+        headers = ["--%s" % boundary]
+
+        if self.filename:
+            disposition = 'form-data; name="%s"; filename="%s"' % (self.name,
+                    self.filename)
+        else:
+            disposition = 'form-data; name="%s"' % self.name
+
+        headers.append("Content-Disposition: %s" % disposition)
+
+        if self.filetype:
+            filetype = self.filetype
+        else:
+            filetype = "text/plain; charset=utf-8"
+
+        headers.append("Content-Type: %s" % filetype)
+
+        headers.append("")
+        headers.append("")
+
+        return "\r\n".join(headers)
+
+    def encode(self, boundary):
+        """Returns the string encoding of this parameter"""
+        if self.value is None:
+            value = self.fileobj.read()
+        else:
+            value = self.value
+
+        if re.search("^--%s$" % re.escape(boundary), value, re.M):
+            raise ValueError("boundary found in encoded string")
+
+        return "%s%s\r\n" % (self.encode_hdr(boundary), value)
+
+    def iter_encode(self, boundary, blocksize=4096):
+        """Yields the encoding of this parameter
+        If self.fileobj is set, then blocks of ``blocksize`` bytes are read and
+        yielded."""
+        total = self.get_size(boundary)
+        current = 0
+        if self.value is not None:
+            block = self.encode(boundary)
+            current += len(block)
+            yield block
+            if self.cb:
+                self.cb(self, current, total)
+        else:
+            block = self.encode_hdr(boundary)
+            current += len(block)
+            yield block
+            if self.cb:
+                self.cb(self, current, total)
+            last_block = ""
+            encoded_boundary = "--%s" % encode_and_quote(boundary)
+            boundary_exp = re.compile("^%s$" % re.escape(encoded_boundary),
+                    re.M)
+            while True:
+                block = self.fileobj.read(blocksize)
+                if not block:
+                    current += 2
+                    yield "\r\n"
+                    if self.cb:
+                        self.cb(self, current, total)
+                    break
+                last_block += block
+                if boundary_exp.search(last_block):
+                    raise ValueError("boundary found in file data")
+                last_block = last_block[-len(encoded_boundary)-2:]
+                current += len(block)
+                yield block
+                if self.cb:
+                    self.cb(self, current, total)
+
+    def get_size(self, boundary):
+        """Returns the size in bytes that this param will be when encoded
+        with the given boundary."""
+        if self.filesize is not None:
+            valuesize = self.filesize
+        else:
+            valuesize = len(self.value)
+
+        return len(self.encode_hdr(boundary)) + 2 + valuesize
+
+def encode_string(boundary, name, value):
+    """Returns ``name`` and ``value`` encoded as a multipart/form-data
+    variable.  ``boundary`` is the boundary string used throughout
+    a single request to separate variables."""
+
+    return MultipartParam(name, value).encode(boundary)
+
+def encode_file_header(boundary, paramname, filesize, filename=None,
+        filetype=None):
+    """Returns the leading data for a multipart/form-data field that contains
+    file data.
+
+    ``boundary`` is the boundary string used throughout a single request to
+    separate variables.
+
+    ``paramname`` is the name of the variable in this request.
+
+    ``filesize`` is the size of the file data.
+
+    ``filename`` if specified is the filename to give to this field.  This
+    field is only useful to the server for determining the original filename.
+
+    ``filetype`` if specified is the MIME type of this file.
+
+    The actual file data should be sent after this header has been sent.
+    """
+
+    return MultipartParam(paramname, filesize=filesize, filename=filename,
+            filetype=filetype).encode_hdr(boundary)
+
+def get_body_size(params, boundary):
+    """Returns the number of bytes that the multipart/form-data encoding
+    of ``params`` will be."""
+    size = sum(p.get_size(boundary) for p in MultipartParam.from_params(params))
+    return size + len(boundary) + 6
+
+def get_headers(params, boundary):
+    """Returns a dictionary with Content-Type and Content-Length headers
+    for the multipart/form-data encoding of ``params``."""
+    headers = {}
+    boundary = urllib.quote_plus(boundary)
+    headers['Content-Type'] = "multipart/form-data; boundary=%s" % boundary
+    headers['Content-Length'] = str(get_body_size(params, boundary))
+    return headers
+
+class multipart_yielder:
+    def __init__(self, params, boundary, cb):
+        self.params = params
+        self.boundary = boundary
+        self.cb = cb
+
+        self.i = 0
+        self.p = None
+        self.param_iter = None
+        self.current = 0
+        self.total = get_body_size(params, boundary)
+
+    def __iter__(self):
+        return self
+
+    def next(self):
+        """generator function to yield multipart/form-data representation
+        of parameters"""
+        if self.param_iter is not None:
+            try:
+                block = self.param_iter.next()
+                self.current += len(block)
+                if self.cb:
+                    self.cb(self.p, self.current, self.total)
+                return block
+            except StopIteration:
+                self.p = None
+                self.param_iter = None
+
+        if self.i is None:
+            raise StopIteration
+        elif self.i >= len(self.params):
+            self.param_iter = None
+            self.p = None
+            self.i = None
+            block = "--%s--\r\n" % self.boundary
+            self.current += len(block)
+            if self.cb:
+                self.cb(self.p, self.current, self.total)
+            return block
+
+        self.p = self.params[self.i]
+        self.param_iter = self.p.iter_encode(self.boundary)
+        self.i += 1
+        return self.next()
+
+    def reset(self):
+        self.i = 0
+        self.current = 0
+        for param in self.params:
+            param.reset()
+
+def multipart_encode(params, boundary=None, cb=None):
+    """Encode ``params`` as multipart/form-data.
+
+    ``params`` should be a sequence of (name, value) pairs or MultipartParam
+    objects, or a mapping of names to values.
+    Values are either strings parameter values, or file-like objects to use as
+    the parameter value.  The file-like objects must support .read() and either
+    .fileno() or both .seek() and .tell().
+
+    If ``boundary`` is set, then it as used as the MIME boundary.  Otherwise
+    a randomly generated boundary will be used.  In either case, if the
+    boundary string appears in the parameter values a ValueError will be
+    raised.
+
+    If ``cb`` is set, it should be a callback which will get called as blocks
+    of data are encoded.  It will be called with (param, current, total),
+    indicating the current parameter being encoded, the current amount encoded,
+    and the total amount to encode.
+
+    Returns a tuple of `datagen`, `headers`, where `datagen` is a
+    generator that will yield blocks of data that make up the encoded
+    parameters, and `headers` is a dictionary with the assoicated
+    Content-Type and Content-Length headers.
+
+    Examples:
+
+    >>> datagen, headers = multipart_encode( [("key", "value1"), ("key", "value2")] )
+    >>> s = "".join(datagen)
+    >>> assert "value2" in s and "value1" in s
+
+    >>> p = MultipartParam("key", "value2")
+    >>> datagen, headers = multipart_encode( [("key", "value1"), p] )
+    >>> s = "".join(datagen)
+    >>> assert "value2" in s and "value1" in s
+
+    >>> datagen, headers = multipart_encode( {"key": "value1"} )
+    >>> s = "".join(datagen)
+    >>> assert "value2" not in s and "value1" in s
+
+    """
+    if boundary is None:
+        boundary = gen_boundary()
+    else:
+        boundary = urllib.quote_plus(boundary)
+
+    headers = get_headers(params, boundary)
+    params = MultipartParam.from_params(params)
+
+    return multipart_yielder(params, boundary, cb), headers


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/utilities/poster/streaminghttp.py
--- /dev/null
+++ b/yt/utilities/poster/streaminghttp.py
@@ -0,0 +1,199 @@
+"""Streaming HTTP uploads module.
+
+This module extends the standard httplib and urllib2 objects so that
+iterable objects can be used in the body of HTTP requests.
+
+In most cases all one should have to do is call :func:`register_openers()`
+to register the new streaming http handlers which will take priority over
+the default handlers, and then you can use iterable objects in the body
+of HTTP requests.
+
+**N.B.** You must specify a Content-Length header if using an iterable object
+since there is no way to determine in advance the total size that will be
+yielded, and there is no way to reset an interator.
+
+Example usage:
+
+>>> from StringIO import StringIO
+>>> import urllib2, poster.streaminghttp
+
+>>> opener = poster.streaminghttp.register_openers()
+
+>>> s = "Test file data"
+>>> f = StringIO(s)
+
+>>> req = urllib2.Request("http://localhost:5000", f,
+...                       {'Content-Length': str(len(s))})
+"""
+
+import httplib, urllib2, socket
+from httplib import NotConnected
+
+__all__ = ['StreamingHTTPConnection', 'StreamingHTTPRedirectHandler',
+        'StreamingHTTPHandler', 'register_openers']
+
+if hasattr(httplib, 'HTTPS'):
+    __all__.extend(['StreamingHTTPSHandler', 'StreamingHTTPSConnection'])
+
+class _StreamingHTTPMixin:
+    """Mixin class for HTTP and HTTPS connections that implements a streaming
+    send method."""
+    def send(self, value):
+        """Send ``value`` to the server.
+
+        ``value`` can be a string object, a file-like object that supports
+        a .read() method, or an iterable object that supports a .next()
+        method.
+        """
+        # Based on python 2.6's httplib.HTTPConnection.send()
+        if self.sock is None:
+            if self.auto_open:
+                self.connect()
+            else:
+                raise NotConnected()
+
+        # send the data to the server. if we get a broken pipe, then close
+        # the socket. we want to reconnect when somebody tries to send again.
+        #
+        # NOTE: we DO propagate the error, though, because we cannot simply
+        #       ignore the error... the caller will know if they can retry.
+        if self.debuglevel > 0:
+            print "send:", repr(value)
+        try:
+            blocksize = 8192
+            if hasattr(value, 'read') :
+                if hasattr(value, 'seek'):
+                    value.seek(0)
+                if self.debuglevel > 0:
+                    print "sendIng a read()able"
+                data = value.read(blocksize)
+                while data:
+                    self.sock.sendall(data)
+                    data = value.read(blocksize)
+            elif hasattr(value, 'next'):
+                if hasattr(value, 'reset'):
+                    value.reset()
+                if self.debuglevel > 0:
+                    print "sendIng an iterable"
+                for data in value:
+                    self.sock.sendall(data)
+            else:
+                self.sock.sendall(value)
+        except socket.error, v:
+            if v[0] == 32:      # Broken pipe
+                self.close()
+            raise
+
+class StreamingHTTPConnection(_StreamingHTTPMixin, httplib.HTTPConnection):
+    """Subclass of `httplib.HTTPConnection` that overrides the `send()` method
+    to support iterable body objects"""
+
+class StreamingHTTPRedirectHandler(urllib2.HTTPRedirectHandler):
+    """Subclass of `urllib2.HTTPRedirectHandler` that overrides the
+    `redirect_request` method to properly handle redirected POST requests
+
+    This class is required because python 2.5's HTTPRedirectHandler does
+    not remove the Content-Type or Content-Length headers when requesting
+    the new resource, but the body of the original request is not preserved.
+    """
+
+    handler_order = urllib2.HTTPRedirectHandler.handler_order - 1
+
+    # From python2.6 urllib2's HTTPRedirectHandler
+    def redirect_request(self, req, fp, code, msg, headers, newurl):
+        """Return a Request or None in response to a redirect.
+
+        This is called by the http_error_30x methods when a
+        redirection response is received.  If a redirection should
+        take place, return a new Request to allow http_error_30x to
+        perform the redirect.  Otherwise, raise HTTPError if no-one
+        else should try to handle this url.  Return None if you can't
+        but another Handler might.
+        """
+        m = req.get_method()
+        if (code in (301, 302, 303, 307) and m in ("GET", "HEAD")
+            or code in (301, 302, 303) and m == "POST"):
+            # Strictly (according to RFC 2616), 301 or 302 in response
+            # to a POST MUST NOT cause a redirection without confirmation
+            # from the user (of urllib2, in this case).  In practice,
+            # essentially all clients do redirect in this case, so we
+            # do the same.
+            # be conciliant with URIs containing a space
+            newurl = newurl.replace(' ', '%20')
+            newheaders = dict((k, v) for k, v in req.headers.items()
+                              if k.lower() not in (
+                                  "content-length", "content-type")
+                             )
+            return urllib2.Request(newurl,
+                           headers=newheaders,
+                           origin_req_host=req.get_origin_req_host(),
+                           unverifiable=True)
+        else:
+            raise urllib2.HTTPError(req.get_full_url(), code, msg, headers, fp)
+
+class StreamingHTTPHandler(urllib2.HTTPHandler):
+    """Subclass of `urllib2.HTTPHandler` that uses
+    StreamingHTTPConnection as its http connection class."""
+
+    handler_order = urllib2.HTTPHandler.handler_order - 1
+
+    def http_open(self, req):
+        """Open a StreamingHTTPConnection for the given request"""
+        return self.do_open(StreamingHTTPConnection, req)
+
+    def http_request(self, req):
+        """Handle a HTTP request.  Make sure that Content-Length is specified
+        if we're using an interable value"""
+        # Make sure that if we're using an iterable object as the request
+        # body, that we've also specified Content-Length
+        if req.has_data():
+            data = req.get_data()
+            if hasattr(data, 'read') or hasattr(data, 'next'):
+                if not req.has_header('Content-length'):
+                    raise ValueError(
+                            "No Content-Length specified for iterable body")
+        return urllib2.HTTPHandler.do_request_(self, req)
+
+if hasattr(httplib, 'HTTPS'):
+    class StreamingHTTPSConnection(_StreamingHTTPMixin,
+            httplib.HTTPSConnection):
+        """Subclass of `httplib.HTTSConnection` that overrides the `send()`
+        method to support iterable body objects"""
+
+    class StreamingHTTPSHandler(urllib2.HTTPSHandler):
+        """Subclass of `urllib2.HTTPSHandler` that uses
+        StreamingHTTPSConnection as its http connection class."""
+
+        handler_order = urllib2.HTTPSHandler.handler_order - 1
+
+        def https_open(self, req):
+            return self.do_open(StreamingHTTPSConnection, req)
+
+        def https_request(self, req):
+            # Make sure that if we're using an iterable object as the request
+            # body, that we've also specified Content-Length
+            if req.has_data():
+                data = req.get_data()
+                if hasattr(data, 'read') or hasattr(data, 'next'):
+                    if not req.has_header('Content-length'):
+                        raise ValueError(
+                                "No Content-Length specified for iterable body")
+            return urllib2.HTTPSHandler.do_request_(self, req)
+
+
+def get_handlers():
+    handlers = [StreamingHTTPHandler, StreamingHTTPRedirectHandler]
+    if hasattr(httplib, "HTTPS"):
+        handlers.append(StreamingHTTPSHandler)
+    return handlers
+    
+def register_openers():
+    """Register the streaming http handlers in the global urllib2 default
+    opener object.
+
+    Returns the created OpenerDirector object."""
+    opener = urllib2.build_opener(*get_handlers())
+
+    urllib2.install_opener(opener)
+
+    return opener


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/utilities/setup.py
--- a/yt/utilities/setup.py
+++ b/yt/utilities/setup.py
@@ -1,6 +1,10 @@
 #!/usr/bin/env python
 import setuptools
-import os, sys, os.path, glob
+import os
+import sys
+import os.path
+import glob
+
 
 def check_for_png():
     # First up: HDF5_DIR in environment
@@ -45,24 +49,28 @@
                 print "PNG_LOCATION: png found in: %s, %s" % (png_inc, png_lib)
                 return png_inc, png_lib
     print "Reading png location from png.cfg failed."
-    print "Please place the base directory of your png install in png.cfg and restart."
+    print "Please place the base directory of your png"
+    print "install in png.cfg and restart."
     print "(ex: \"echo '/usr/local/' > png.cfg\" )"
     sys.exit(1)
 
+
 def check_for_freetype():
     # First up: environment
     if "FTYPE_DIR" in os.environ:
         freetype_dir = os.environ["FTYPE_DIR"]
         freetype_inc = os.path.join(freetype_dir, "include")
         freetype_lib = os.path.join(freetype_dir, "lib")
-        print "FTYPE_LOCATION: FTYPE_DIR: %s, %s" % (freetype_inc, freetype_lib)
+        print "FTYPE_LOCATION: FTYPE_DIR: %s, %s" % (freetype_inc,
+            freetype_lib)
         return (freetype_inc, freetype_lib)
     # Next up, we try freetype.cfg
     elif os.path.exists("freetype.cfg"):
         freetype_dir = open("freetype.cfg").read().strip()
         freetype_inc = os.path.join(freetype_dir, "include")
         freetype_lib = os.path.join(freetype_dir, "lib")
-        print "FTYPE_LOCATION: freetype.cfg: %s, %s" % (freetype_inc, freetype_lib)
+        print "FTYPE_LOCATION: freetype.cfg: %s, %s" % (freetype_inc,
+            freetype_lib)
         return (freetype_inc, freetype_lib)
     # Now we see if ctypes can help us:
     try:
@@ -74,10 +82,12 @@
             # better way to pull off two directory names.
             freetype_dir = os.path.dirname(os.path.dirname(freetype_libfile))
             if os.path.isdir(os.path.join(freetype_dir, "include")) and \
-               os.path.isfile(os.path.join(freetype_dir, "include", "ft2build.h")):
+                os.path.isfile(os.path.join(freetype_dir, "include",
+                    "ft2build.h")):
                 freetype_inc = os.path.join(freetype_dir, "include")
                 freetype_lib = os.path.join(freetype_dir, "lib")
-                print "FTYPE_LOCATION: freetype found in: %s, %s" % (freetype_inc, freetype_lib)
+                print "FTYPE_LOCATION: freetype found in: %s, %s" % \
+                    (freetype_inc, freetype_lib)
                 return freetype_inc, freetype_lib
     except ImportError:
         pass
@@ -86,17 +96,21 @@
     for freetype_dir in ["/usr/", "/usr/local/", "/usr/X11/"]:
         if os.path.isfile(os.path.join(freetype_dir, "include", "ft2build.h")):
             if os.path.isdir(os.path.join(freetype_dir, "include")) and \
-               os.path.isfile(os.path.join(freetype_dir, "include", "ft2build.h")):
+                os.path.isfile(os.path.join(freetype_dir, "include",
+                    "ft2build.h")):
                 freetype_inc = os.path.join(freetype_dir, "include")
                 freetype_lib = os.path.join(freetype_dir, "lib")
-                print "FTYPE_LOCATION: freetype found in: %s, %s" % (freetype_inc, freetype_lib)
+                print "FTYPE_LOCATION: freetype found in: %s, %s" % \
+                    (freetype_inc, freetype_lib)
                 return freetype_inc, freetype_lib
     print "Reading freetype location from freetype.cfg failed."
-    print "Please place the base directory of your freetype install in freetype.cfg and restart."
+    print "Please place the base directory of your freetype"
+    print "install in freetype.cfg and restart."
     print "(ex: \"echo '/usr/local/' > freetype.cfg\" )"
     print "You can locate this by looking for the file ft2build.h"
     sys.exit(1)
 
+
 def check_for_hdf5():
     # First up: HDF5_DIR in environment
     if "HDF5_DIR" in os.environ:
@@ -125,46 +139,52 @@
                os.path.isfile(os.path.join(hdf5_dir, "include", "hdf5.h")):
                 hdf5_inc = os.path.join(hdf5_dir, "include")
                 hdf5_lib = os.path.join(hdf5_dir, "lib")
-                print "HDF5_LOCATION: HDF5 found in: %s, %s" % (hdf5_inc, hdf5_lib)
+                print "HDF5_LOCATION: HDF5 found in: %s, %s" % (hdf5_inc,
+                    hdf5_lib)
                 return hdf5_inc, hdf5_lib
     except ImportError:
         pass
     print "Reading HDF5 location from hdf5.cfg failed."
-    print "Please place the base directory of your HDF5 install in hdf5.cfg and restart."
+    print "Please place the base directory of your"
+    print "HDF5 install in hdf5.cfg and restart."
     print "(ex: \"echo '/usr/local/' > hdf5.cfg\" )"
     sys.exit(1)
 
-def configuration(parent_package='',top_path=None):
+
+def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('utilities',parent_package,top_path)
+    config = Configuration('utilities', parent_package, top_path)
     png_inc, png_lib = check_for_png()
     freetype_inc, freetype_lib = check_for_freetype()
-    # Because setjmp.h is included by lots of things, and because libpng hasn't
+    # Because setjmp.h is included by lots of things,
+    # and because libpng hasn't
     # always properly checked its header files (see
     # https://bugzilla.redhat.com/show_bug.cgi?id=494579 ) we simply disable
     # support for setjmp.
     config.add_subpackage("amr_kdtree")
+    config.add_subpackage("poster")
     config.add_subpackage("answer_testing")
-    config.add_subpackage("delaunay") # From SciPy, written by Robert Kern
+    config.add_subpackage("delaunay")  # From SciPy, written by Robert Kern
     config.add_subpackage("kdtree")
-    config.add_data_files(('kdtree', ['kdtree/fKDpy.so',]))
+    config.add_data_files(('kdtree', ['kdtree/fKDpy.so']))
     config.add_subpackage("spatial")
     config.add_subpackage("parallel_tools")
     config.add_extension("data_point_utilities",
                 "yt/utilities/data_point_utilities.c", libraries=["m"])
     hdf5_inc, hdf5_lib = check_for_hdf5()
-    include_dirs=[hdf5_inc]
-    library_dirs=[hdf5_lib]
-    config.add_extension("hdf5_light_reader", "yt/utilities/hdf5_light_reader.c",
-                         define_macros=[("H5_USE_16_API",True)],
-                         libraries=["m","hdf5"],
+    include_dirs = [hdf5_inc]
+    library_dirs = [hdf5_lib]
+    config.add_extension("hdf5_light_reader",
+                        "yt/utilities/hdf5_light_reader.c",
+                         define_macros=[("H5_USE_16_API", True)],
+                         libraries=["m", "hdf5"],
                          library_dirs=library_dirs, include_dirs=include_dirs)
-    config.add_extension("amr_utils", 
+    config.add_extension("amr_utils",
         ["yt/utilities/amr_utils.pyx",
          "yt/utilities/_amr_utils/FixedInterpolator.c",
          "yt/utilities/_amr_utils/kdtree.c",
          "yt/utilities/_amr_utils/union_find.c"] +
-         glob.glob("yt/utilities/_amr_utils/healpix_*.c"), 
+         glob.glob("yt/utilities/_amr_utils/healpix_*.c"),
         define_macros=[("PNG_SETJMP_NOT_SUPPORTED", True)],
         include_dirs=["yt/utilities/_amr_utils/", png_inc,
                       freetype_inc, os.path.join(freetype_inc, "freetype2")],
@@ -178,12 +198,12 @@
     #    ["yt/utilities/voropp.pyx"],
     #    language="c++",
     #    include_dirs=["yt/utilities/voro++"])
-    config.add_extension("libconfig_wrapper", 
+    config.add_extension("libconfig_wrapper",
         ["yt/utilities/libconfig_wrapper.pyx"] +
-         glob.glob("yt/utilities/_libconfig/*.c"), 
-        include_dirs = ["yt/utilities/_libconfig/"],
-        define_macros = [("HAVE_XLOCALE_H", True)]
+         glob.glob("yt/utilities/_libconfig/*.c"),
+        include_dirs=["yt/utilities/_libconfig/"],
+        define_macros=[("HAVE_XLOCALE_H", True)]
         )
-    config.make_config_py() # installs __config__.py
+    config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/utilities/spatial/__init__.py
--- a/yt/utilities/spatial/__init__.py
+++ b/yt/utilities/spatial/__init__.py
@@ -26,7 +26,7 @@
 from ckdtree import *
 #from qhull import *
 
-__all__ = filter(lambda s:not s.startswith('_'),dir())
+__all__ = filter(lambda s: not s.startswith('_'), dir())
 __all__ += ['distance']
 
 import distance


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/utilities/spatial/ckdtree.pyx
--- a/yt/utilities/spatial/ckdtree.pyx
+++ b/yt/utilities/spatial/ckdtree.pyx
@@ -2,23 +2,26 @@
 # Released under the scipy license
 import numpy as np
 cimport numpy as np
-cimport stdlib
+cimport libc.stdlib as stdlib
 cimport cython
 
 import kdtree
 
-cdef double infinity = np.inf
+cdef extern from "stdlib.h":
+    # NOTE that size_t might not be int
+    void *alloca(int)
+
+cdef np.float64_t infinity = np.inf
 
 __all__ = ['cKDTree']
 
-
 # priority queue
 cdef union heapcontents:
     int intdata
     char* ptrdata
 
 cdef struct heapitem:
-    double priority
+    np.float64_t priority
     heapcontents contents
 
 cdef struct heap:
@@ -97,23 +100,23 @@
 
 
 # utility functions
-cdef inline double dmax(double x, double y):
+cdef inline np.float64_t dmax(np.float64_t x, np.float64_t y):
     if x>y:
         return x
     else:
         return y
-cdef inline double dabs(double x):
+cdef inline np.float64_t dabs(np.float64_t x):
     if x>0:
         return x
     else:
         return -x
-cdef inline double dmin(double x, double y):
+cdef inline np.float64_t dmin(np.float64_t x, np.float64_t y):
     if x<y:
         return x
     else:
         return y
-cdef inline double _distance_p(double*x,double*y,double p,int k,double upperbound,
-    double*period):
+cdef inline np.float64_t _distance_p(np.float64_t*x,np.float64_t*y,np.float64_t p,int k,np.float64_t upperbound,
+    np.float64_t*period):
     """Compute the distance between x and y
 
     Computes the Minkowski p-distance to the power p between two points.
@@ -123,7 +126,7 @@
     Periodicity added by S. Skory.
     """
     cdef int i
-    cdef double r, m
+    cdef np.float64_t r, m
     r = 0
     if p==infinity:
         for i in range(k):
@@ -137,6 +140,12 @@
             r += m
             if r>upperbound:
                 return r
+    elif p==2:
+        for i in range(k):
+            m = dmin(dabs(x[i] - y[i]), period[i] - dabs(x[i] - y[i]))
+            r += m*m
+            if r>upperbound:
+                return r
     else:
         for i in range(k):
             m = dmin(dabs(x[i] - y[i]), period[i] - dabs(x[i] - y[i]))
@@ -151,9 +160,9 @@
 cdef struct innernode:
     int split_dim
     int n_points
-    double split
-    double* maxes
-    double* mins
+    np.float64_t split
+    np.float64_t* maxes
+    np.float64_t* mins
     innernode* less
     innernode* greater
 cdef struct leafnode:
@@ -161,14 +170,14 @@
     int n_points
     int start_idx
     int end_idx
-    double* maxes
-    double* mins
+    np.float64_t* maxes
+    np.float64_t* mins
 
 # this is the standard trick for variable-size arrays:
-# malloc sizeof(nodeinfo)+self.m*sizeof(double) bytes.
+# malloc sizeof(nodeinfo)+self.m*sizeof(np.float64_t) bytes.
 cdef struct nodeinfo:
     innernode* node
-    double side_distances[0]
+    np.float64_t side_distances[0]
 
 cdef class cKDTree:
     """kd-tree for quick nearest-neighbor lookup
@@ -201,7 +210,7 @@
     data : array-like, shape (n,m)
         The n data points of dimension m to be indexed. This array is 
         not copied unless this is necessary to produce a contiguous 
-        array of doubles, and so modifying this data will result in 
+        array of np.float64_ts, and so modifying this data will result in 
         bogus results.
     leafsize : positive integer
         The number of points at which the algorithm switches over to
@@ -211,21 +220,21 @@
 
     cdef innernode* tree 
     cdef readonly object data
-    cdef double* raw_data
+    cdef np.float64_t* raw_data
     cdef readonly int n, m
     cdef readonly int leafsize
     cdef readonly object maxes
-    cdef double* raw_maxes
+    cdef np.float64_t* raw_maxes
     cdef readonly object mins
-    cdef double* raw_mins
+    cdef np.float64_t* raw_mins
     cdef object indices
     cdef np.int64_t* raw_indices
     def __init__(cKDTree self, data, int leafsize=10):
-        cdef np.ndarray[double, ndim=2] inner_data
-        cdef np.ndarray[double, ndim=1] inner_maxes
-        cdef np.ndarray[double, ndim=1] inner_mins
+        cdef np.ndarray[np.float64_t, ndim=2] inner_data
+        cdef np.ndarray[np.float64_t, ndim=1] inner_maxes
+        cdef np.ndarray[np.float64_t, ndim=1] inner_mins
         cdef np.ndarray[np.int64_t, ndim=1] inner_indices
-        self.data = np.ascontiguousarray(data,dtype=np.double)
+        self.data = np.ascontiguousarray(data,dtype="float64")
         self.n, self.m = np.shape(self.data)
         self.leafsize = leafsize
         if self.leafsize<1:
@@ -235,27 +244,27 @@
         self.indices = np.ascontiguousarray(np.arange(self.n,dtype=np.int64))
 
         inner_data = self.data
-        self.raw_data = <double*>inner_data.data
+        self.raw_data = <np.float64_t*>inner_data.data
         inner_maxes = self.maxes
-        self.raw_maxes = <double*>inner_maxes.data
+        self.raw_maxes = <np.float64_t*>inner_maxes.data
         inner_mins = self.mins
-        self.raw_mins = <double*>inner_mins.data
+        self.raw_mins = <np.float64_t*>inner_mins.data
         inner_indices = self.indices
         self.raw_indices = <np.int64_t*>inner_indices.data
 
         self.tree = self.__build(0, self.n, self.raw_maxes, self.raw_mins)
 
-    cdef innernode* __build(cKDTree self, int start_idx, int end_idx, double* maxes, double* mins):
+    cdef innernode* __build(cKDTree self, int start_idx, int end_idx, np.float64_t* maxes, np.float64_t* mins):
         cdef leafnode* n
         cdef innernode* ni
         cdef int i, j, t, p, q, d
-        cdef double size, split, minval, maxval
-        cdef double*mids
+        cdef np.float64_t size, split, minval, maxval
+        cdef np.float64_t*mids
         if end_idx-start_idx<=self.leafsize:
             n = <leafnode*>stdlib.malloc(sizeof(leafnode))
             # Skory
-            n.maxes = <double*>stdlib.malloc(sizeof(double)*self.m)
-            n.mins = <double*>stdlib.malloc(sizeof(double)*self.m)
+            n.maxes = <np.float64_t*>stdlib.malloc(sizeof(np.float64_t)*self.m)
+            n.mins = <np.float64_t*>stdlib.malloc(sizeof(np.float64_t)*self.m)
             for i in range(self.m):
                 n.maxes[i] = maxes[i]
                 n.mins[i] = mins[i]
@@ -327,7 +336,7 @@
             # construct new node representation
             ni = <innernode*>stdlib.malloc(sizeof(innernode))
 
-            mids = <double*>stdlib.malloc(sizeof(double)*self.m)
+            mids = <np.float64_t*>stdlib.malloc(sizeof(np.float64_t)*self.m)
             for i in range(self.m):
                 mids[i] = maxes[i]
             mids[d] = split
@@ -343,8 +352,8 @@
             ni.split_dim = d
             ni.split = split
             # Skory
-            ni.maxes = <double*>stdlib.malloc(sizeof(double)*self.m)
-            ni.mins = <double*>stdlib.malloc(sizeof(double)*self.m)
+            ni.maxes = <np.float64_t*>stdlib.malloc(sizeof(np.float64_t)*self.m)
+            ni.mins = <np.float64_t*>stdlib.malloc(sizeof(np.float64_t)*self.m)
             for i in range(self.m):
                 ni.maxes[i] = maxes[i]
                 ni.mins[i] = mins[i]
@@ -366,32 +375,35 @@
         self.__free_tree(self.tree)
 
     cdef void __query(cKDTree self, 
-            double*result_distances, 
-            long*result_indices, 
-            double*x, 
+            np.float64_t*result_distances, 
+            np.int64_t*result_indices, 
+            np.float64_t*x, 
             int k, 
-            double eps, 
-            double p, 
-            double distance_upper_bound,
-            double*period):
+            np.float64_t eps, 
+            np.float64_t p, 
+            np.float64_t distance_upper_bound,
+            np.float64_t*period):
+        assert(p == 2)
+        assert(eps == 0.0)
+        assert(distance_upper_bound == infinity)
         cdef heap q
         cdef heap neighbors
 
-        cdef int i, j
-        cdef double t
+        cdef int i, j, i2, j2
+        cdef np.float64_t t, y
         cdef nodeinfo* inf
         cdef nodeinfo* inf2
-        cdef double d
-        cdef double m_left, m_right, m
-        cdef double epsfac
-        cdef double min_distance
-        cdef double far_min_distance
+        cdef np.float64_t d, di
+        cdef np.float64_t m_left, m_right, m
+        cdef np.float64_t epsfac
+        cdef np.float64_t min_distance
+        cdef np.float64_t far_min_distance
         cdef heapitem it, it2, neighbor
         cdef leafnode* node
         cdef innernode* inode
         cdef innernode* near
         cdef innernode* far
-        cdef double* side_distances
+        cdef np.float64_t* side_distances
 
         # priority queue for chasing nodes
         # entries are:
@@ -406,7 +418,7 @@
         heapcreate(&neighbors,k)
 
         # set up first nodeinfo
-        inf = <nodeinfo*>stdlib.malloc(sizeof(nodeinfo)+self.m*sizeof(double)) 
+        inf = <nodeinfo*>stdlib.malloc(sizeof(nodeinfo)+self.m*sizeof(np.float64_t)) 
         inf.node = self.tree
         for i in range(self.m):
             inf.side_distances[i] = 0
@@ -417,28 +429,15 @@
                 t = self.raw_mins[i]-x[i]
                 if t>inf.side_distances[i]:
                     inf.side_distances[i] = t
-            if p!=1 and p!=infinity:
-                inf.side_distances[i]=inf.side_distances[i]**p
+            inf.side_distances[i]=inf.side_distances[i]*inf.side_distances[i]
 
         # compute first distance
         min_distance = 0.
         for i in range(self.m):
-            if p==infinity:
-                min_distance = dmax(min_distance,inf.side_distances[i])
-            else:
-                min_distance += inf.side_distances[i]
+            min_distance += inf.side_distances[i]
 
         # fiddle approximation factor
-        if eps==0:
-            epsfac=1
-        elif p==infinity:
-            epsfac = 1/(1+eps)
-        else:
-            epsfac = 1/(1+eps)**p
-
-        # internally we represent all distances as distance**p
-        if p!=infinity and distance_upper_bound!=infinity:
-            distance_upper_bound = distance_upper_bound**p
+        epsfac=1
 
         while True:
             if inf.node.split_dim==-1:
@@ -446,10 +445,11 @@
 
                 # brute-force
                 for i in range(node.start_idx,node.end_idx):
-                    d = _distance_p(
-                            self.raw_data+self.raw_indices[i]*self.m,
-                            x,p,self.m,distance_upper_bound,period)
-                        
+                    d = 0.0
+                    for i2 in range(self.m):
+                        y = self.raw_data[self.raw_indices[i]*self.m + i2]
+                        di = dmin(dabs(x[i2] - y), period[i2] - dabs(x[i2] - y))
+                        d += di*di
                     if d<distance_upper_bound:
                         # replace furthest neighbor
                         if neighbors.n==k:
@@ -500,7 +500,7 @@
                 # far child is further by an amount depending only
                 # on the split value; compute its distance and side_distances
                 # and push it on the queue if it's near enough
-                inf2 = <nodeinfo*>stdlib.malloc(sizeof(nodeinfo)+self.m*sizeof(double)) 
+                inf2 = <nodeinfo*>stdlib.malloc(sizeof(nodeinfo)+self.m*sizeof(np.float64_t)) 
                 it2.contents.ptrdata = <char*> inf2
                 inf2.node = far
 
@@ -517,17 +517,9 @@
 
                 # one side distance changes
                 # we can adjust the minimum distance without recomputing
-                if p == infinity:
-                    # we never use side_distances in the l_infinity case
-                    # inf2.side_distances[inode.split_dim] = dabs(inode.split-x[inode.split_dim])
-                    far_min_distance = dmax(min_distance, m)
-                elif p == 1:
-                    inf2.side_distances[inode.split_dim] = m
-                    far_min_distance = dmax(min_distance, m)
-                else:
-                    inf2.side_distances[inode.split_dim] = m**p
-                    #far_min_distance = min_distance - inf.side_distances[inode.split_dim] + inf2.side_distances[inode.split_dim]
-                    far_min_distance = m**p
+                inf2.side_distances[inode.split_dim] = m*m
+                #far_min_distance = min_distance - inf.side_distances[inode.split_dim] + inf2.side_distances[inode.split_dim]
+                far_min_distance = m*m
 
                 it2.priority = far_min_distance
 
@@ -544,16 +536,13 @@
         for i in range(neighbors.n-1,-1,-1):
             neighbor = heappop(&neighbors) # FIXME: neighbors may be realloced
             result_indices[i] = neighbor.contents.intdata
-            if p==1 or p==infinity:
-                result_distances[i] = -neighbor.priority
-            else:
-                result_distances[i] = (-neighbor.priority) #**(1./p) S. Skory
+            result_distances[i] = (-neighbor.priority) #**(1./p) S. Skory
 
         heapdestroy(&q)
         heapdestroy(&neighbors)
 
-    def query(cKDTree self, object x, int k=1, double eps=0, double p=2, 
-            double distance_upper_bound=infinity, object period=None):
+    def query(cKDTree self, object x, int k=1, np.float64_t eps=0, np.float64_t p=2, 
+            np.float64_t distance_upper_bound=infinity, object period=None):
         """query(self, x, k=1, eps=0, p=2, distance_upper_bound=np.inf,
            period=None)
         
@@ -592,16 +581,16 @@
             Missing neighbors are indicated with self.n.
 
         """
-        cdef np.ndarray[long, ndim=2] ii
-        cdef np.ndarray[double, ndim=2] dd
-        cdef np.ndarray[double, ndim=2] xx
-        cdef np.ndarray[double, ndim=1] cperiod
+        cdef np.ndarray[np.int64_t, ndim=2] ii
+        cdef np.ndarray[np.float64_t, ndim=2] dd
+        cdef np.ndarray[np.float64_t, ndim=2] xx
+        cdef np.ndarray[np.float64_t, ndim=1] cperiod
         cdef int c
-        x = np.asarray(x).astype(np.double)
+        x = np.asarray(x).astype("float64")
         if period is None:
             period = np.array([np.inf]*self.m)
         else:
-            period = np.asarray(period).astype(np.double)
+            period = np.asarray(period).astype("float64")
         cperiod = np.ascontiguousarray(period)
         if np.shape(x)[-1] != self.m:
             raise ValueError("x must consist of vectors of length %d but has shape %s" % (self.m, np.shape(x)))
@@ -616,20 +605,20 @@
         n = np.prod(retshape)
         xx = np.reshape(x,(n,self.m))
         xx = np.ascontiguousarray(xx)
-        dd = np.empty((n,k),dtype=np.double)
+        dd = np.empty((n,k),dtype="float64")
         dd.fill(infinity)
-        ii = np.empty((n,k),dtype=np.long)
+        ii = np.empty((n,k),dtype="int64")
         ii.fill(self.n)
         for c in range(n):
             self.__query(
-                    (<double*>dd.data)+c*k,
-                    (<long*>ii.data)+c*k,
-                    (<double*>xx.data)+c*self.m, 
+                    (<np.float64_t*>dd.data)+c*k,
+                    (<np.int64_t*>ii.data)+c*k,
+                    (<np.float64_t*>xx.data)+c*self.m, 
                     k, 
                     eps,
                     p, 
                     distance_upper_bound,
-                    <double*>cperiod.data)
+                    <np.float64_t*>cperiod.data)
         if single:
             if k==1:
                 return dd[0,0], ii[0,0]
@@ -641,7 +630,10 @@
             else:
                 return np.reshape(dd,retshape+(k,)), np.reshape(ii,retshape+(k,))
 
-    def chainHOP_get_dens(cKDTree self, object mass, int num_neighbors=65, \
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    def chainHOP_get_dens(cKDTree self, object omass, int num_neighbors=65, \
             int nMerge=6):
         """ query the tree for the nearest neighbors, to get the density
             of particles for chainHOP.
@@ -669,38 +661,46 @@
         
         """
         
-        # We're no longer returning all the tags in this step.
+        # We're no np.int64_ter returning all the tags in this step.
         # We do it chunked, in find_chunk_nearest_neighbors.
-        #cdef np.ndarray[long, ndim=2] tags
-        cdef np.ndarray[double, ndim=1] dens
-        cdef np.ndarray[double, ndim=1] query
-        cdef np.ndarray[long, ndim=1] tags_temp
-        cdef np.ndarray[double, ndim=1] dist_temp
+        #cdef np.ndarray[np.int64_t, ndim=2] tags
+        cdef np.ndarray[np.float64_t, ndim=1] dens
         cdef int i, pj, j
-        cdef double ih2, fNorm, r2, rs
+        cdef np.float64_t ih2, fNorm, r2, rs
         
-        #tags = np.empty((self.n, nMerge), dtype=np.long)
-        dens = np.empty(self.n, dtype=np.double)
-        query = np.empty(self.m, dtype=np.double)
-        tags_temp = np.empty(num_neighbors, dtype=np.long)
-        dist_temp = np.empty(num_neighbors, dtype=np.double)
-        # Need to start out with zeros before we start adding to it.
-        dens.fill(0.0)
+        #tags = np.empty((self.n, nMerge), dtype="int64")
+        dens = np.zeros(self.n, dtype="float64")
+        cdef np.ndarray[np.float64_t, ndim=2] local_data = self.data
 
-        mass = np.array(mass).astype(np.double)
-        mass = np.ascontiguousarray(mass)
+        cdef np.ndarray[np.float64_t, ndim=1] mass = np.array(omass).astype("float64")
+        cdef np.float64_t ipi = 1.0/np.pi
         
+        cdef np.float64_t *query = <np.float64_t *> alloca(
+                    sizeof(np.float64_t) * self.m)
+        cdef np.float64_t *dist_temp = <np.float64_t *> alloca(
+                    sizeof(np.float64_t) * num_neighbors)
+        cdef np.int64_t *tags_temp = <np.int64_t *> alloca(
+                    sizeof(np.int64_t) * num_neighbors)
+        cdef np.float64_t period[3]
+        for i in range(3): period[i] = 1.0
+
         for i in range(self.n):
-            query = self.data[i]
-            (dist_temp, tags_temp) = self.query(query, k=num_neighbors, period=[1.]*3)
+            for j in range(self.m):
+                query[j] = local_data[i,j]
+            self.__query(dist_temp, tags_temp,
+                         query, num_neighbors, 0.0, 
+                         2, infinity, period)
             
             #calculate the density for this particle
-            ih2 = 4.0/np.max(dist_temp)
-            fNorm = 0.5*np.sqrt(ih2)*ih2/np.pi
+            ih2 = -1
+            for j in range(num_neighbors):
+                ih2 = dmax(ih2, dist_temp[j])
+            ih2 = 4.0/ih2
+            fNorm = 0.5*(ih2**1.5)*ipi
             for j in range(num_neighbors):
                 pj = tags_temp[j]
                 r2 = dist_temp[j] * ih2
-                rs = 2.0 - np.sqrt(r2)
+                rs = 2.0 - (r2**0.5)
                 if (r2 < 1.0):
                     rs = (1.0 - 0.75*rs*r2)
                 else:
@@ -715,6 +715,8 @@
         #return (dens, tags)
         return dens
 
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
     def find_chunk_nearest_neighbors(cKDTree self, int start, int finish, \
         int num_neighbors=65):
         """ query the tree in chunks, between start and finish, recording the
@@ -738,21 +740,99 @@
         
         """
         
-        cdef np.ndarray[long, ndim=2] chunk_tags
-        cdef np.ndarray[double, ndim=1] query
-        cdef np.ndarray[long, ndim=1] tags_temp
-        cdef np.ndarray[double, ndim=1] dist_temp
-        cdef int i
+        cdef np.ndarray[np.int64_t, ndim=2] chunk_tags
+        cdef np.ndarray[np.float64_t, ndim=2] local_data = self.data
+        cdef int i, j
         
-        chunk_tags = np.empty((finish-start, num_neighbors), dtype=np.long)
-        query = np.empty(self.m, dtype=np.double)
-        tags_temp = np.empty(num_neighbors, dtype=np.long)
-        dist_temp = np.empty(num_neighbors, dtype=np.double)
-        
+        chunk_tags = np.empty((finish-start, num_neighbors), dtype="int64")
+        cdef np.float64_t *query = <np.float64_t *> alloca(
+                    sizeof(np.float64_t) * self.m)
+        cdef np.float64_t *dist_temp = <np.float64_t *> alloca(
+                    sizeof(np.float64_t) * num_neighbors)
+        cdef np.int64_t *tags_temp = <np.int64_t *> alloca(
+                    sizeof(np.int64_t) * num_neighbors)
+        cdef np.float64_t period[3]
+        for i in range(3): period[i] = 1.0
+
         for i in range(finish-start):
-            query = self.data[i+start]
-            (dist_temp, tags_temp) = self.query(query, k=num_neighbors, period=[1.]*3)
-            chunk_tags[i,:] = tags_temp[:]
+            for j in range(self.m):
+                query[j] = local_data[i+start,j]
+            self.__query(dist_temp, tags_temp,
+                         query, num_neighbors, 0.0, 
+                         2, infinity, period)
+            for j in range(num_neighbors):
+                chunk_tags[i,j] = tags_temp[j]
         
         return chunk_tags
 
+    def chainHOP_preconnect(self, np.ndarray[np.int64_t, ndim=1] chainID,
+                                  np.ndarray[np.float64_t, ndim=1] density,
+                                  np.ndarray[np.float64_t, ndim=1] densest_in_chain,
+                                  np.ndarray bis_inside,
+                                  np.ndarray bsearch_again,
+                                  np.float64_t peakthresh,
+                                  np.float64_t saddlethresh,
+                                  int nn, int nMerge,
+                                  object chain_map):
+        cdef np.ndarray[np.int32_t, ndim=1] is_inside
+        cdef np.ndarray[np.int32_t, ndim=1] search_again
+        cdef np.ndarray[np.float64_t, ndim=2] pos 
+        cdef np.int64_t thisNN, thisNN_chainID, same_count
+        cdef np.float64_t *query = <np.float64_t *> alloca(
+                    sizeof(np.float64_t) * self.m)
+        cdef np.float64_t *dist_temp = <np.float64_t *> alloca(
+                    sizeof(np.float64_t) * nn)
+        cdef np.int64_t *tags_temp = <np.int64_t *> alloca(
+                    sizeof(np.int64_t) * nn)
+        cdef np.float64_t period[3], thisNN_max_dens, boundary_density
+        cdef int i, j, npart, chainID_i, part_mas_dens
+        is_inside = bis_inside.astype("int32")
+        search_again = bsearch_again.astype("int32")
+        pos = self.data
+        npart = pos.shape[0]
+        for i in range(3): period[i] = 1.0
+        for i in xrange(npart):
+            # Don't consider this particle if it's not part of a chain.
+            if chainID[i] < 0: continue
+            chainID_i = chainID[i]
+            # If this particle is in the padding, don't make a connection.
+            if not is_inside[i]: continue
+            # Find this particle's chain max_dens.
+            part_max_dens = densest_in_chain[chainID_i]
+            # We're only connecting >= peakthresh chains now.
+            if part_max_dens < peakthresh: continue
+            # Loop over nMerge closest nearest neighbors.
+            for j in range(self.m):
+                query[j] = pos[i,j]
+            self.__query(dist_temp, tags_temp,
+                         query, nn, 0.0, 
+                         2, infinity, period)
+            same_count = 0
+            for j in xrange(int(nMerge+1)):
+                thisNN = tags_temp[j+1] # Don't consider ourselves at tags_temp[0]
+                thisNN_chainID = chainID[thisNN]
+                # If our neighbor is in the same chain, move on.
+                # Move on if these chains are already connected:
+                if chainID_i == thisNN_chainID or \
+                        thisNN_chainID in chain_map[chainID_i]:
+                    same_count += 1
+                    continue
+                # Everything immediately below is for
+                # neighboring particles with a chainID. 
+                if thisNN_chainID >= 0:
+                    # Find thisNN's chain's max_dens.
+                    thisNN_max_dens = densest_in_chain[thisNN_chainID]
+                    # We're only linking peakthresh chains
+                    if thisNN_max_dens < peakthresh: continue
+                    # Calculate the two groups boundary density.
+                    boundary_density = (density[thisNN] + density[i]) / 2.
+                    # Don't connect if the boundary is too low.
+                    if boundary_density < saddlethresh: continue
+                    # Mark these chains as related.
+                    chain_map[thisNN_chainID].add(chainID_i)
+                    chain_map[chainID_i].add(thisNN_chainID)
+            if same_count == nMerge + 1:
+                # All our neighbors are in the same chain already, so 
+                # we don't need to search again.
+                search_again[i] = 0
+        return search_again


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/utilities/spatial/setup.py
--- a/yt/utilities/spatial/setup.py
+++ b/yt/utilities/spatial/setup.py
@@ -1,8 +1,8 @@
 #!/usr/bin/env python
-
 from os.path import join
 
-def configuration(parent_package = '', top_path = None):
+
+def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration, get_numpy_include_dirs
     from numpy.distutils.system_info import get_info
     from distutils.sysconfig import get_python_inc
@@ -36,21 +36,21 @@
 
     config.add_extension('ckdtree', sources=['ckdtree.pyx'],
         libraries=["m"],
-        include_dirs = [get_numpy_include_dirs()])
+        include_dirs=[get_numpy_include_dirs()])
 
     config.add_extension('_distance_wrap',
         sources=[join('src', 'distance_wrap.c'), join('src', 'distance.c')],
-        include_dirs = [get_numpy_include_dirs()])
+        include_dirs=[get_numpy_include_dirs()])
 
     return config
 
 if __name__ == '__main__':
     from numpy.distutils.core import setup
-    setup(maintainer = "SciPy Developers",
-          author = "Anne Archibald",
-          maintainer_email = "scipy-dev at scipy.org",
-          description = "Spatial algorithms and data structures",
-          url = "http://www.scipy.org",
-          license = "SciPy License (BSD Style)",
+    setup(maintainer="SciPy Developers",
+          author="Anne Archibald",
+          maintainer_email="scipy-dev at scipy.org",
+          description="Spatial algorithms and data structures",
+          url="http://www.scipy.org",
+          license="SciPy License (BSD Style)",
           **configuration(top_path='').todict()
           )


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/visualization/_colormap_data.py
--- a/yt/visualization/_colormap_data.py
+++ b/yt/visualization/_colormap_data.py
@@ -7757,3 +7757,44 @@
          1.0, 1.0, 1.0, 1.0, 1.0, 1.0]),
    )
 
+color_map_luts['B-W LINEAR'] = color_map_luts['idl00']
+color_map_luts['BLUE'] = color_map_luts['idl01']
+color_map_luts['GRN-RED-BLU-WHT'] = color_map_luts['idl02']
+color_map_luts['RED TEMPERATURE'] = color_map_luts['idl03']
+color_map_luts['BLUE'] = color_map_luts['idl04']
+color_map_luts['STD GAMMA-II'] = color_map_luts['idl05']
+color_map_luts['PRISM'] = color_map_luts['idl06']
+color_map_luts['RED-PURPLE'] = color_map_luts['idl07']
+color_map_luts['GREEN'] = color_map_luts['idl08']
+color_map_luts['GRN'] = color_map_luts['idl09']
+color_map_luts['GREEN-PINK'] = color_map_luts['idl10']
+color_map_luts['BLUE-RED'] = color_map_luts['idl11']
+color_map_luts['16 LEVEL'] = color_map_luts['idl12']
+color_map_luts['RAINBOW'] = color_map_luts['idl13']
+color_map_luts['STEPS'] = color_map_luts['idl14']
+color_map_luts['STERN SPECIAL'] = color_map_luts['idl15']
+color_map_luts['Haze'] = color_map_luts['idl16']
+color_map_luts['Blue - Pastel - Red'] = color_map_luts['idl17']
+color_map_luts['Pastels'] = color_map_luts['idl18']
+color_map_luts['Hue Sat Lightness 1'] = color_map_luts['idl19']
+color_map_luts['Hue Sat Lightness 2'] = color_map_luts['idl20']
+color_map_luts['Hue Sat Value 1'] = color_map_luts['idl21']
+color_map_luts['Hue Sat Value 2'] = color_map_luts['idl22']
+color_map_luts['Purple-Red + Stripes'] = color_map_luts['idl23']
+color_map_luts['Beach'] = color_map_luts['idl24']
+color_map_luts['Mac Style'] = color_map_luts['idl25']
+color_map_luts['Eos A'] = color_map_luts['idl26']
+color_map_luts['Eos B'] = color_map_luts['idl27']
+color_map_luts['Hardcandy'] = color_map_luts['idl28']
+color_map_luts['Nature'] = color_map_luts['idl29']
+color_map_luts['Ocean'] = color_map_luts['idl30']
+color_map_luts['Peppermint'] = color_map_luts['idl31']
+color_map_luts['Plasma'] = color_map_luts['idl32']
+color_map_luts['Blue-Red'] = color_map_luts['idl33']
+color_map_luts['Rainbow'] = color_map_luts['idl34']
+color_map_luts['Blue Waves'] = color_map_luts['idl35']
+color_map_luts['Volcano'] = color_map_luts['idl36']
+color_map_luts['Waves'] = color_map_luts['idl37']
+color_map_luts['Rainbow18'] = color_map_luts['idl38']
+color_map_luts['Rainbow + white'] = color_map_luts['idl39']
+color_map_luts['Rainbow + black'] = color_map_luts['idl40']


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/visualization/api.py
--- a/yt/visualization/api.py
+++ b/yt/visualization/api.py
@@ -50,7 +50,8 @@
     annotate_image, \
     apply_colormap, \
     scale_image, \
-    write_projection
+    write_projection, \
+    write_fits
 
 from plot_modifications import \
     PlotCallback, \


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/visualization/eps_writer.py
--- a/yt/visualization/eps_writer.py
+++ b/yt/visualization/eps_writer.py
@@ -251,7 +251,8 @@
 
 #=============================================================================
 
-    def axis_box_yt(self, plot, units=None, bare_axes=False, **kwargs):
+    def axis_box_yt(self, plot, units=None, bare_axes=False,
+                    tickcolor=None, **kwargs):
         r"""Wrapper around DualEPS.axis_box to automatically fill in the
         axis ranges and labels from a yt plot.
 
@@ -293,9 +294,11 @@
                 _xlabel = ""
                 _ylabel = ""
             else:
+                units = units.replace('mpc', 'Mpc')
                 _xlabel = '%s (%s)' % (x_names[plot.data.axis], units)
                 _ylabel = '%s (%s)' % (y_names[plot.data.axis], units)
-            _tickcolor = pyx.color.cmyk.white
+            if tickcolor == None:
+                _tickcolor = pyx.color.cmyk.white
         else:
             _xrange = plot._axes.get_xlim()
             _yrange = plot._axes.get_ylim()
@@ -307,7 +310,10 @@
             else:
                 _xlabel = plot._x_label
                 _ylabel = plot._y_label
-            _tickcolor = None
+            if tickcolor == None:
+                _tickcolor = None
+        if tickcolor != None:
+            _tickcolor = tickcolor
         self.axis_box(xrange=_xrange, yrange=_yrange, xlabel=_xlabel,
                       ylabel=_ylabel, tickcolor=_tickcolor, xlog=_xlog,
                       ylog=_ylog, bare_axes=bare_axes, **kwargs)
@@ -349,7 +355,7 @@
 
 #=============================================================================
 
-    def insert_image_yt(self, plot, pos=(0,0)):
+    def insert_image_yt(self, plot, pos=(0,0), scale=1.0):
         r"""Inserts a bitmap taken from a yt plot.
 
         Parameters
@@ -397,8 +403,8 @@
                                  figure_canvas.tostring_rgb())
         #figure_canvas.print_png('test.png')
         self.canvas.insert(pyx.bitmap.bitmap(pos[0], pos[1], image,
-                                             width=self.figsize[0],
-                                             height=self.figsize[1]))
+                                             width=scale*self.figsize[0],
+                                             height=scale*self.figsize[1]))
 
 #=============================================================================
 
@@ -469,7 +475,7 @@
 
         cmap_im = pyx.bitmap.image(imsize[0], imsize[1], "RGB", cm_string)
         if orientation == "top" or orientation == "bottom":
-            imorigin = (imorigin[0] - shift[0], imorigin[1] - shift[1])
+            imorigin = (imorigin[0] - shift[0], imorigin[1] + shift[1])
             self.canvas.insert(pyx.bitmap.bitmap(imorigin[0], imorigin[1], cmap_im,
                                                  width=-size[0], height=size[1]))
         else:
@@ -870,44 +876,43 @@
                 if cb_flags != None:
                     if cb_flags[index] == False:
                         continue
-                if _yt or colorbars[index] != None:
-                    if ncol == 1:
-                        orientation = "right"
-                        xpos = bbox[1]
-                        ypos = ypos0
-                    elif i == 0:
-                        orientation = "left"
-                        xpos = bbox[0]
-                        ypos = ypos0
-                    elif i+1 == ncol:
-                        orientation = "right"
-                        xpos = bbox[1]
-                        ypos = ypos0
-                    elif j == 0:
-                        orientation = "bottom"
-                        ypos = bbox[2]
-                        xpos = xpos0
-                    elif j+1 == nrow:
-                        orientation = "top"
-                        ypos = bbox[3]
-                        xpos = xpos0
+                if ncol == 1:
+                    orientation = "right"
+                    xpos = bbox[1]
+                    ypos = ypos0
+                elif j == 0:
+                    orientation = "bottom"
+                    ypos = bbox[2]
+                    xpos = xpos0
+                elif i == 0:
+                    orientation = "left"
+                    xpos = bbox[0]
+                    ypos = ypos0
+                elif i+1 == ncol:
+                    orientation = "right"
+                    xpos = bbox[1]
+                    ypos = ypos0
+                elif j+1 == nrow:
+                    orientation = "top"
+                    ypos = bbox[3]
+                    xpos = xpos0
+                else:
+                    orientation = None  # Marker for interior plot
+
+                if orientation != None:
+                    if _yt:
+                        d.colorbar_yt(yt_plots[index],
+                                      pos=[xpos,ypos],
+                                      shrink=shrink_cb,
+                                      orientation=orientation)
                     else:
-                        orientation = None  # Marker for interior plot
-
-                    if orientation != None:
-                        if _yt:
-                            d.colorbar_yt(yt_plots[index],
-                                          pos=[xpos,ypos],
-                                          shrink=shrink_cb,
-                                          orientation=orientation)
-                        else:
-                            d.colorbar(colorbars[index]["cmap"],
-                                       zrange=colorbars[index]["range"],
-                                       label=colorbars[index]["name"],
-                                       log=colorbars[index]["log"],
-                                       orientation=orientation,
-                                       pos=[xpos,ypos],
-                                       shrink=shrink_cb)
+                        d.colorbar(colorbars[index]["cmap"],
+                                   zrange=colorbars[index]["range"],
+                                   label=colorbars[index]["name"],
+                                   log=colorbars[index]["log"],
+                                   orientation=orientation,
+                                   pos=[xpos,ypos],
+                                   shrink=shrink_cb)
 
     if savefig != None:
         d.save_fig(savefig, format=format)
@@ -957,7 +962,7 @@
 #=============================================================================
 
 def single_plot(plot, figsize=(12,12), cb_orient="right", bare_axes=False,
-                savefig=None, file_format='eps'):
+                savefig=None, colorbar=True, file_format='eps', **kwargs):
     r"""Wrapper for DualEPS routines to create a figure directy from a yt
     plot.  Calls insert_image_yt, axis_box_yt, and colorbar_yt.
 
@@ -974,6 +979,8 @@
         Set to true to have no annotations or tick marks on all of the axes.
     savefig : string
         Name of the saved file without the extension.
+    colorbar : boolean
+        Set to true to include a colorbar
     file_format : string
         Format type.  Can be "eps" or "pdf"
 
@@ -985,8 +992,9 @@
     """
     d = DualEPS(figsize=figsize)
     d.insert_image_yt(plot)
-    d.axis_box_yt(plot, bare_axes=bare_axes)
-    d.colorbar_yt(plot, orientation=cb_orient)
+    d.axis_box_yt(plot, bare_axes=bare_axes, **kwargs)
+    if colorbar:
+        d.colorbar_yt(plot, orientation=cb_orient)
     if savefig != None:
         d.save_fig(savefig, format=file_format)
     return d


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -214,21 +214,23 @@
             output.create_dataset(field,data=self[field])
         output.close()
 
-    def export_fits(self, filename_prefix, fields = None, clobber=False):
+    def export_fits(self, filename_prefix, fields = None, clobber=False,
+                    other_keys=None, gzip_file=False, units="1"):
+
         """
         This will export a set of FITS images of either the fields specified
-        or all the fields already in the object.  The output filenames are
-        *filename_prefix* plus an underscore plus the name of the field. If 
-        clobber is set to True, this will overwrite any existing FITS file.
+        or all the fields already in the object.  The output filename is
+        *filename_prefix*. If clobber is set to True, this will overwrite any
+        existing FITS file.
 
         This requires the *pyfits* module, which is a standalone module
         provided by STSci to interface with FITS-format files.
         """
-        r"""Export a set of pixelized fields to a set of fits files.
+        r"""Export a set of pixelized fields to a FITS file.
 
         This will export a set of FITS images of either the fields specified
-        or all the fields already in the object.  The output filenames are
-        the specified prefix plus an underscore plus the name of the field.
+        or all the fields already in the object.  The output filename is the
+        the specified prefix.
 
         Parameters
         ----------
@@ -238,21 +240,90 @@
             These fields will be pixelized and output.
         clobber : boolean
             If the file exists, this governs whether we will overwrite.
+        other_keys : dictionary, optional
+            A set of header keys and values to write into the FITS header.
+        gzip_file : boolean, optional
+            gzip the file after writing, default False
+        units : string, optional
+            the length units that the coordinates are written in, default '1'
         """
+        
         import pyfits
+        from os import system
+        
         extra_fields = ['x','y','z','px','py','pz','pdx','pdy','pdz','weight_field']
         if filename_prefix.endswith('.fits'): filename_prefix=filename_prefix[:-5]
         if fields is None: 
             fields = [field for field in self.data_source.fields 
                       if field not in extra_fields]
+
+        nx, ny = self.buff_size
+        dx = (self.bounds[1]-self.bounds[0])/nx*self.pf[units]
+        dy = (self.bounds[3]-self.bounds[2])/ny*self.pf[units]
+        xmin = self.bounds[0]*self.pf[units]
+        ymin = self.bounds[2]*self.pf[units]
+        simtime = self.pf.current_time
+
+        hdus = []
+
+        first = True
+        
         for field in fields:
-            hdu = pyfits.PrimaryHDU(self[field])
+
+            if (first) :
+                hdu = pyfits.PrimaryHDU(self[field])
+                first = False
+            else :
+                hdu = pyfits.ImageHDU(self[field])
+                
             if self.data_source.has_key('weight_field'):
                 weightname = self.data_source._weight
                 if weightname is None: weightname = 'None'
                 field = field +'_'+weightname
-            hdu.writeto("%s_%s.fits" % (filename_prefix, field),clobber=clobber)
 
+            hdu.header.update("Field", field)
+            hdu.header.update("Time", simtime)
+
+            hdu.header.update('WCSNAMEP', "PHYSICAL")            
+            hdu.header.update('CTYPE1P', "LINEAR")
+            hdu.header.update('CTYPE2P', "LINEAR")
+            hdu.header.update('CRPIX1P', 0.5)
+            hdu.header.update('CRPIX2P', 0.5)
+            hdu.header.update('CRVAL1P', xmin)
+            hdu.header.update('CRVAL2P', ymin)
+            hdu.header.update('CDELT1P', dx)
+            hdu.header.update('CDELT2P', dy)
+                    
+            hdu.header.update('CTYPE1', "LINEAR")
+            hdu.header.update('CTYPE2', "LINEAR")                                
+            hdu.header.update('CUNIT1', units)
+            hdu.header.update('CUNIT2', units)
+            hdu.header.update('CRPIX1', 0.5)
+            hdu.header.update('CRPIX2', 0.5)
+            hdu.header.update('CRVAL1', xmin)
+            hdu.header.update('CRVAL2', ymin)
+            hdu.header.update('CDELT1', dx)
+            hdu.header.update('CDELT2', dy)
+
+            if (other_keys is not None) :
+
+                for k,v in other_keys.items() :
+
+                    hdu.header.update(k,v)
+
+            hdus.append(hdu)
+
+            del hdu
+            
+        hdulist = pyfits.HDUList(hdus)
+
+        hdulist.writeto("%s.fits" % (filename_prefix), clobber=clobber)
+        
+        if (gzip_file) :
+            clob = ""
+            if (clobber) : clob = "-f"
+            system("gzip "+clob+" %s.fits" % (filename_prefix))
+        
     def open_in_ds9(self, field, take_log=True):
         """
         This will open a given field in the DS9 viewer.


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/visualization/image_panner/setup.py
--- a/yt/visualization/image_panner/setup.py
+++ b/yt/visualization/image_panner/setup.py
@@ -1,12 +1,13 @@
 #!/usr/bin/env python
 import setuptools
-import os, sys, os.path
-
+import os
+import sys
 import os.path
 
-def configuration(parent_package='',top_path=None):
+
+def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('image_panner',parent_package,top_path)
-    config.make_config_py() # installs __config__.py
+    config = Configuration('image_panner', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/visualization/image_writer.py
--- a/yt/visualization/image_writer.py
+++ b/yt/visualization/image_writer.py
@@ -411,3 +411,70 @@
 
     pl.clf()
     pl.close()
+
+def write_fits(image, filename_prefix, clobber=True, coords=None, gzip_file=False) :
+
+    """
+    This will export a FITS image of a floating point array. The output filename is
+    *filename_prefix*. If clobber is set to True, this will overwrite any existing
+    FITS file.
+    
+    This requires the *pyfits* module, which is a standalone module
+    provided by STSci to interface with FITS-format files.
+    """
+    r"""Write out a floating point array directly to a FITS file, optionally
+    adding coordinates. 
+        
+    Parameters
+    ----------
+    image : array_like
+        This is an (unscaled) array of floating point values, shape (N,N,) to save
+        in a FITS file.
+    filename_prefix : string
+        This prefix will be prepended to every FITS file name.
+    clobber : boolean
+        If the file exists, this governs whether we will overwrite.
+    coords : dictionary, optional
+        A set of header keys and values to write to the FITS header to set up
+        a coordinate system. 
+    gzip_file : boolean, optional
+        gzip the file after writing, default False
+    """
+    
+    import pyfits
+    from os import system
+    
+    if filename_prefix.endswith('.fits'): filename_prefix=filename_prefix[:-5]
+    
+    hdu = pyfits.PrimaryHDU(image)
+
+    if (coords is not None) :
+
+        hdu.header.update('WCSNAMEP', "PHYSICAL")
+        hdu.header.update('CTYPE1P', "LINEAR")
+        hdu.header.update('CTYPE2P', "LINEAR")
+        hdu.header.update('CRPIX1P', 0.5)
+        hdu.header.update('CRPIX2P', 0.5)
+        hdu.header.update('CRVAL1P', coords["xmin"])
+        hdu.header.update('CRVAL2P', coords["ymin"])
+        hdu.header.update('CDELT1P', coords["dx"])
+        hdu.header.update('CDELT2P', coords["dy"])
+        
+        hdu.header.update('CTYPE1', "LINEAR")
+        hdu.header.update('CTYPE2', "LINEAR")
+        hdu.header.update('CUNIT1', coords["units"])
+        hdu.header.update('CUNIT2', coords["units"])
+        hdu.header.update('CRPIX1', 0.5)
+        hdu.header.update('CRPIX2', 0.5)
+        hdu.header.update('CRVAL1', coords["xmin"])
+        hdu.header.update('CRVAL2', coords["ymin"])
+        hdu.header.update('CDELT1', coords["dx"])
+        hdu.header.update('CDELT2', coords["dy"])
+
+    hdu.writeto("%s.fits" % (filename_prefix), clobber=clobber)
+
+    if (gzip_file) :
+        clob = ""
+        if (clobber) : clob="-f"
+        system("gzip "+clob+" %s.fits" % (filename_prefix))
+    


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/visualization/plot_collection.py
--- a/yt/visualization/plot_collection.py
+++ b/yt/visualization/plot_collection.py
@@ -940,7 +940,7 @@
                                   x_bins, fields[0], x_min, x_max, x_log,
                                   lazy_reader)
         if len(fields) > 1:
-            profile.add_fields(fields[1], weight=weight, accumulation=accumulation)
+            profile.add_fields(fields[1:], weight=weight, accumulation=accumulation)
         if id is None: id = self._get_new_id()
         p = self._add_plot(Profile1DPlot(profile, fields, id,
                                                    axes=axes, figure=figure))
@@ -1148,13 +1148,15 @@
                                   x_bins, fields[0], x_min, x_max, x_log,
                                   y_bins, fields[1], y_min, y_max, y_log,
                                   lazy_reader)
+        # This will add all the fields to the profile object
+        if len(fields)>2:
+            profile.add_fields(fields[2:], weight=weight,
+                    accumulation=accumulation, fractional=fractional)
+
         if id is None: id = self._get_new_id()
         p = self._add_plot(PhasePlot(profile, fields, 
                                                id, cmap=cmap,
                                                figure=figure, axes=axes))
-        if len(fields) > 2:
-            # This will add all the fields to the profile object
-            p.switch_z(fields[2], weight=weight, accumulation=accumulation, fractional=fractional)
         return p
 
     def add_phase_sphere(self, radius, unit, fields, center = None, cmap=None,


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/visualization/plot_types.py
--- a/yt/visualization/plot_types.py
+++ b/yt/visualization/plot_types.py
@@ -36,6 +36,7 @@
     y_dict, \
     axis_names
 from .color_maps import yt_colormaps, is_colormap
+from yt.utilities.exceptions import YTNoDataInObjectError
 
 class CallbackRegistryHandler(object):
     def __init__(self, plot):
@@ -379,6 +380,8 @@
 
     def _redraw_image(self, *args):
         buff = self._get_buff()
+        if self[self.axis_names["Z"]].size == 0:
+            raise YTNoDataInObjectError(self.data)
         mylog.debug("Received buffer of min %s and max %s (data: %s %s)",
                     na.nanmin(buff), na.nanmax(buff),
                     self[self.axis_names["Z"]].min(),


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -24,6 +24,7 @@
 """
 import base64
 import matplotlib.pyplot
+import cStringIO
 from functools import wraps
 
 import numpy as na
@@ -36,27 +37,60 @@
 
 from yt.funcs import *
 from yt.utilities.amr_utils import write_png_to_string
+from yt.utilities.definitions import \
+    x_dict, x_names, \
+    y_dict, y_names, \
+    axis_names, \
+    axis_labels
 
 def invalidate_data(f):
     @wraps(f)
     def newfunc(*args, **kwargs):
-        f(*args, **kwargs)
+        rv = f(*args, **kwargs)
         args[0]._data_valid = False
         args[0]._plot_valid = False
         args[0]._recreate_frb()
         if args[0]._initfinished:
             args[0]._setup_plots()
+        return rv
     return newfunc
 
 def invalidate_plot(f):
     @wraps(f)
     def newfunc(*args, **kwargs):
+        rv = f(*args, **kwargs)
         args[0]._plot_valid = False
         args[0]._setup_plots()
-        return f(*args, **kwargs)
+        return rv
     return newfunc
 
+field_transforms = {}
+
+class FieldTransform(object):
+    def __init__(self, name, func, locator):
+        self.name = name
+        self.func = func
+        self.locator = locator
+        field_transforms[name] = self
+
+    def __call__(self, *args, **kwargs):
+        return self.func(*args, **kwargs)
+
+    def ticks(self, mi, ma):
+        try:
+            ticks = self.locator(mi, ma)
+        except:
+            ticks = []
+        return ticks
+
+log_transform = FieldTransform('log10', na.log10, LogLocator())
+linear_transform = FieldTransform('linear', lambda x: x, LinearLocator())
+
 class PlotWindow(object):
+    _plot_valid = False
+    _colorbar_valid = False
+    _contour_info = None
+    _vector_info = None
     def __init__(self, data_source, bounds, buff_size=(800,800), antialias = True, periodic = True):
         r"""
         PlotWindow(data_source, bounds, buff_size=(800,800), antialias = True)
@@ -232,6 +266,18 @@
     def set_antialias(self,aa):
         self.antialias = aa
 
+    @invalidate_plot
+    def set_contour_info(self, field_name, n_cont = 8, colors = None,
+                         logit = True):
+        if field_name == "None" or n_cont == 0:
+            self._contour_info = None
+            return
+        self._contour_info = (field_name, n_cont, colors, logit)
+
+    @invalidate_plot
+    def set_vector_info(self, skip, scale = 1):
+        self._vector_info = (skip, scale)
+
 class PWViewer(PlotWindow):
     """A viewer for PlotWindows.
 
@@ -240,16 +286,17 @@
         setup = kwargs.pop("setup", True)
         PlotWindow.__init__(self, *args,**kwargs)
         self._field_transform = {}
+        self._colormaps = defaultdict(lambda: 'algae')
         for field in self._frb.data.keys():
             if self._frb.pf.field_info[field].take_log:
-                self._field_transform[field] = na.log
+                self._field_transform[field] = log_transform
             else:
-                self._field_transform[field] = lambda x: x
+                self._field_transform[field] = linear_transform
 
         if setup: self._setup_plots()
 
     @invalidate_plot
-    def set_log(self,field,log):
+    def set_log(self, field, log):
         """set a field to log or linear.
         
         Parameters
@@ -261,16 +308,20 @@
 
         """
         if log:
-            self._field_transform[field] = na.log
+            self._field_transform[field] = log_transform
         else:
-            self._field_transform[field] = lambda x: x
-
-    def set_transform(self, field, func):
-        self._field_transform[field] = func
+            self._field_transform[field] = linear_transform
 
     @invalidate_plot
-    def set_cmap(self):
-        pass
+    def set_transform(self, field, name):
+        if name not in field_transforms: 
+            raise KeyError(name)
+        self._field_transform[field] = field_transforms[name]
+
+    @invalidate_plot
+    def set_cmap(self, field, cmap_name):
+        self._colorbar_valid = False
+        self._colormaps[field] = cmap_name
 
     @invalidate_plot
     def set_zlim(self):
@@ -309,7 +360,11 @@
 <br>
 Field of View:  %(x_width)0.3f %(unit)s<br>
 Minimum Value:  %(mi)0.3e %(units)s<br>
-Maximum Value:  %(ma)0.3e %(units)s
+Maximum Value:  %(ma)0.3e %(units)s<br>
+Central Point:  (data coords)<br>
+   %(xc)0.14f<br>
+   %(yc)0.14f<br>
+   %(zc)0.14f
 """
 
 class PWViewerExtJS(PWViewer):
@@ -319,7 +374,6 @@
     _ext_widget_id = None
     _current_field = None
     _widget_name = "plot_window"
-    cmap = 'algae'
 
     def _setup_plots(self):
         from yt.gui.reason.bottle_mods import PayloadHandler
@@ -332,18 +386,21 @@
         else:
             fields = self._frb.data.keys()
             addl_keys = {}
+        if self._colorbar_valid == False:
+            addl_keys['colorbar_image'] = self._get_cbar_image()
+            self._colorbar_valid = True
         min_zoom = 200*self._frb.pf.h.get_smallest_dx() * self._frb.pf['unitary']
         for field in fields:
-            to_plot = apply_colormap(self._frb[field], func = self._field_transform[field])
-            pngs = write_png_to_string(to_plot)
+            to_plot = apply_colormap(self._frb[field],
+                func = self._field_transform[field],
+                cmap_name = self._colormaps[field])
+            pngs = self._apply_modifications(to_plot)
             img_data = base64.b64encode(pngs)
             # We scale the width between 200*min_dx and 1.0
             x_width = self.xlim[1] - self.xlim[0]
             zoom_fac = na.log10(x_width*self._frb.pf['unitary'])/na.log10(min_zoom)
             zoom_fac = 100.0*max(0.0, zoom_fac)
-            ticks = self.get_ticks(self._frb[field].min(),
-                                   self._frb[field].max(), 
-                                   take_log = self._frb.pf.field_info[field].take_log)
+            ticks = self.get_ticks(field)
             payload = {'type':'png_string',
                        'image_data':img_data,
                        'metadata_string': self.get_metadata(field),
@@ -352,34 +409,94 @@
             payload.update(addl_keys)
             ph.add_payload(payload)
 
-    def get_ticks(self, mi, ma, height = 400, take_log = False):
+    def _apply_modifications(self, img):
+        if self._contour_info is None and self._vector_info is None:
+            return write_png_to_string(img)
+        from matplotlib.figure import Figure
+        from yt.visualization._mpl_imports import \
+            FigureCanvasAgg, FigureCanvasPdf, FigureCanvasPS
+        from yt.utilities.delaunay.triangulate import Triangulation as triang
+
+        vi, vj, vn = img.shape
+
+        # Now we need to get our field values
+        fig = Figure((vi/100.0, vj/100.0), dpi = 100)
+        fig.figimage(img)
+        # Add our contour
+        ax = fig.add_axes([0.0, 0.0, 1.0, 1.0], frameon=False)
+        ax.patch.set_alpha(0.0)
+
+        # Now apply our modifications
+        self._apply_contours(ax, vi, vj)
+        self._apply_vectors(ax, vi, vj)
+
+        canvas = FigureCanvasAgg(fig)
+        f = cStringIO.StringIO()
+        canvas.print_figure(f)
+        f.seek(0)
+        img = f.read()
+        return img
+
+    def _apply_contours(self, ax, vi, vj):
+        if self._contour_info is None: return 
+        plot_args = {}
+        field, number, colors, logit = self._contour_info
+        if colors is not None: plot_args['colors'] = colors
+
+        raw_data = self._frb.data_source
+        b = self._frb.bounds
+        xi, yi = na.mgrid[b[0]:b[1]:(vi / 8) * 1j,
+                          b[2]:b[3]:(vj / 8) * 1j]
+        x = raw_data['px']
+        y = raw_data['py']
+        z = raw_data[field]
+        if logit: z = na.log10(z)
+        fvals = triang(x,y).nn_interpolator(z)(xi,yi).transpose()[::-1,:]
+
+        ax.contour(fvals, number, colors='w')
+        
+    def _apply_vectors(self, ax, vi, vj):
+        if self._vector_info is None: return 
+        skip, scale = self._vector_info
+
+        nx = self._frb.buff_size[0]/skip
+        ny = self._frb.buff_size[1]/skip
+        new_frb = FixedResolutionBuffer(self._frb.data_source,
+                        self._frb.bounds, (nx,ny))
+
+        axis = self._frb.data_source.axis
+        fx = "%s-velocity" % (axis_names[x_dict[axis]])
+        fy = "%s-velocity" % (axis_names[y_dict[axis]])
+        px = new_frb[fx][::-1,:]
+        py = new_frb[fy][::-1,:]
+        x = na.mgrid[0:vi-1:ny*1j]
+        y = na.mgrid[0:vj-1:nx*1j]
+        # Always normalize, then we scale
+        nn = ((px**2.0 + py**2.0)**0.5).max()
+        px /= nn
+        py /= nn
+        print scale, px.min(), px.max(), py.min(), py.max()
+        ax.quiver(x, y, px, py, scale=float(vi)/skip)
+        
+    def get_ticks(self, field, height = 400):
         # This will eventually change to work with non-logged fields
         ticks = []
-        if take_log and mi > 0.0 and ma > 0.0:
-            ll = LogLocator() 
-            tick_locs = ll(mi, ma)
-            mi = na.log10(mi)
-            ma = na.log10(ma)
-            for v1,v2 in zip(tick_locs, na.log10(tick_locs)):
-                if v2 < mi or v2 > ma: continue
-                p = height - height * (v2 - mi)/(ma - mi)
-                ticks.append((p,v1,v2))
-                #print v1, v2, mi, ma, height, p
-        else:
-            ll = LinearLocator()
-            tick_locs = ll(mi, ma)
-            for v in tick_locs:
-                p = height - height * (v - mi)/(ma-mi)
-                ticks.append((p,v,"%0.3e" % (v)))
-
+        transform = self._field_transform[field]
+        mi, ma = self._frb[field].min(), self._frb[field].max()
+        tick_locs = transform.ticks(mi, ma)
+        mi, ma = transform((mi, ma))
+        for v1,v2 in zip(tick_locs, transform(tick_locs)):
+            if v2 < mi or v2 > ma: continue
+            p = height - height * (v2 - mi)/(ma - mi)
+            ticks.append((p,v1,v2))
         return ticks
 
-    def _get_cbar_image(self, height = 400, width = 40):
-        # Right now there's just the single 'cmap', but that will eventually
-        # change.  I think?
+    def _get_cbar_image(self, height = 400, width = 40, field = None):
+        if field is None: field = self._current_field
+        cmap_name = self._colormaps[field]
         vals = na.mgrid[1:0:height * 1j] * na.ones(width)[:,None]
         vals = vals.transpose()
-        to_plot = apply_colormap(vals)
+        to_plot = apply_colormap(vals, cmap_name = cmap_name)
         pngs = write_png_to_string(to_plot)
         img_data = base64.b64encode(pngs)
         return img_data
@@ -402,11 +519,21 @@
         y_width = self.ylim[1] - self.ylim[0]
         unit = get_smallest_appropriate_unit(x_width, self._frb.pf)
         units = self.get_field_units(field)
+        center = getattr(self._frb.data_source, "center", None)
+        if center is None or self._frb.axis == 4:
+            xc, yc, zc = -999, -999, -999
+        else:
+            center[x_dict[self._frb.axis]] = 0.5 * (
+                self.xlim[0] + self.xlim[1])
+            center[y_dict[self._frb.axis]] = 0.5 * (
+                self.ylim[0] + self.ylim[1])
+            xc, yc, zc = center
         md = _metadata_template % dict(
                 pf = self._frb.pf,
                 x_width = x_width*self._frb.pf[unit],
                 y_width = y_width*self._frb.pf[unit],
-                unit = unit, units = units, mi = mi, ma = ma)
+                unit = unit, units = units, mi = mi, ma = ma,
+                xc = xc, yc = yc, zc = zc)
         return md
 
     def image_recenter(self, img_x, img_y, img_size_x, img_size_y):
@@ -422,9 +549,9 @@
         self._current_field = field
         self._frb[field]
         if self._frb.pf.field_info[field].take_log:
-            self._field_transform[field] = na.log
+            self._field_transform[field] = log_transform
         else:
-            self._field_transform[field] = lambda x: x
+            self._field_transform[field] = linear_transform
 
     def get_field_units(self, field, strip_mathml = True):
         ds = self._frb.data_source
@@ -439,7 +566,6 @@
             units = units.replace(r"\rm{", "").replace("}","")
         return units
 
-
 class YtPlot(object):
     """A base class for all yt plots. It should abstract the actual
     plotting engine completely, allowing plotting even without matplotlib. 
@@ -474,7 +600,6 @@
 class Yt2DPlot(YtPlot):
     zmin = None
     zmax = None
-    cmap = 'algae'
     zlabel = None
 
     # def __init__(self, data):
@@ -485,17 +610,14 @@
         self.zmin = zmin
         self.zmax = zmax
 
-    @invalidate_plot
-    def set_cmap(self,cmap):
-        self.cmap = cmap
-
 class YtWindowPlot(Yt2DPlot):
     def __init__(self, data, size=(10,8)):
         YtPlot.__init__(self, data, size)
         self.__init_image(data)
 
     def __init_image(self, data):
-        self.image = self.axes.imshow(data,cmap=self.cmap)
+        #self.image = self.axes.imshow(data, cmap=self.cmap)
+        pass
 
 class YtProfilePlot(Yt2DPlot):
     def __init__(self):


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/visualization/profile_plotter.py
--- a/yt/visualization/profile_plotter.py
+++ b/yt/visualization/profile_plotter.py
@@ -340,6 +340,8 @@
             func = na.log10
         else:
             func = lambda a: a
+        raw_data = na.repeat(raw_data, 3, axis=0)
+        raw_data = na.repeat(raw_data, 3, axis=1)
         to_plot = apply_colormap(raw_data, self.plot.cbar.bounds,
                                  self.plot.cbar.cmap, func)
         if self.plot.cbar.scale == 'log':


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/visualization/setup.py
--- a/yt/visualization/setup.py
+++ b/yt/visualization/setup.py
@@ -1,12 +1,13 @@
 #!/usr/bin/env python
 import setuptools
 
-def configuration(parent_package='',top_path=None):
+
+def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('visualization',parent_package,top_path)
+    config = Configuration('visualization', parent_package, top_path)
     config.add_subpackage("image_panner")
     config.add_subpackage("volume_rendering")
-    config.make_config_py() # installs __config__.py
+    config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     config.add_extension("_MPL", "_MPL.c", libraries=["m"])
     return config


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/visualization/volume_rendering/__init__.py
--- a/yt/visualization/volume_rendering/__init__.py
+++ b/yt/visualization/volume_rendering/__init__.py
@@ -27,4 +27,3 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
 """
-


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -680,7 +680,7 @@
         self.use_kd = isinstance(volume, AMRKDTree)
         self.volume = volume
 
-    def snapshot(self, fn = None):
+    def snapshot(self, fn = None, clim = None):
         nv = 12*self.nside**2
         image = na.zeros((nv,1,3), dtype='float64', order='C')
         vs = arr_pix2vec_nest(self.nside, na.arange(nv))
@@ -718,6 +718,7 @@
             implot = ax.imshow(img, extent=(-pi,pi,-pi/2,pi/2), clip_on=False, aspect=0.5)
             cb = fig.colorbar(implot, orientation='horizontal')
             cb.set_label(r"$\mathrm{log}\/\mathrm{Column}\/\mathrm{Density}\/[\mathrm{g}/\mathrm{cm}^2]$")
+            if clim is not None: cb.set_clim(*clim)
             ax.xaxis.set_ticks(())
             ax.yaxis.set_ticks(())
             canvas = matplotlib.backends.backend_agg.FigureCanvasAgg(fig)
@@ -1269,7 +1270,8 @@
         return R
 
 def off_axis_projection(pf, center, normal_vector, width, resolution,
-                        field, weight = None, volume = None, no_ghost = True):
+                        field, weight = None, volume = None, no_ghost = True,
+                        north_vector = None):
     r"""Project through a parameter file, off-axis, and return the image plane.
 
     This function will accept the necessary items to integrate through a volume
@@ -1336,7 +1338,8 @@
     cam = pf.h.camera(center, normal_vector, width, resolution, tf,
                       fields = fields,
                       log_fields = [False] * len(fields),
-                      volume = volume, no_ghost = no_ghost)
+                      volume = volume, no_ghost = no_ghost,
+                      north_vector = north_vector)
     vals = cam.snapshot()
     image = vals[:,:,0]
     if weight is None:


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/visualization/volume_rendering/image_handling.py
--- a/yt/visualization/volume_rendering/image_handling.py
+++ b/yt/visualization/volume_rendering/image_handling.py
@@ -26,7 +26,6 @@
 try: import pyfits
 except: pass
 import numpy as na
-import matplotlib; from matplotlib import pylab
 
 from yt.funcs import *
 
@@ -45,18 +44,12 @@
         f.close()
     if fits:
         try:
-            hdu = pyfits.PrimaryHDU(image[:,:,0])
-            hdulist = pyfits.HDUList([hdu])
-            hdulist.writeto('%s_r.fits'%fn,clobber=True)
-            hdu = pyfits.PrimaryHDU(image[:,:,1])
-            hdulist = pyfits.HDUList([hdu])
-            hdulist.writeto('%s_g.fits'%fn,clobber=True)
-            hdu = pyfits.PrimaryHDU(image[:,:,2])
-            hdulist = pyfits.HDUList([hdu])
-            hdulist.writeto('%s_b.fits'%fn,clobber=True)
-            hdu = pyfits.PrimaryHDU(image[:,:,3])
-            hdulist = pyfits.HDUList([hdu])
-            hdulist.writeto('%s_a.fits'%fn,clobber=True)
+            hdur = pyfits.PrimaryHDU(image[:,:,0])
+            hdug = pyfits.ImageHDU(image[:,:,1])
+            hdub = pyfits.ImageHDU(image[:,:,2])
+            hdua = pyfits.ImageHDU(image[:,:,3])
+            hdulist = pyfits.HDUList([hdur,hdug,hdub,hdua])
+            hdulist.writeto('%s.fits'%fn,clobber=True)
         except: print 'You do not have pyfits, install before attempting to use fits exporter'
 
 def import_rgba(name, h5=True):
@@ -88,6 +81,8 @@
     elements.  Optionally, *label*, *label_color* and *label_size* may be
     specified.
     """
+    import matplotlib
+    import pylab
     Nvec = image.shape[0]
     image[na.isnan(image)] = 0.0
     ma = image[image>0.0].max()
@@ -116,6 +111,7 @@
     with "_rgb.png."  *label*, *label_color* and *label_size* may also be
     specified.
     """
+    import pylab
     Nvec = image.shape[0]
     image[na.isnan(image)] = 0.0
     if image.shape[2] >= 4:


diff -r a3bc652df01effa16b1476210b0d9cf00626cf0e -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 yt/visualization/volume_rendering/setup.py
--- a/yt/visualization/volume_rendering/setup.py
+++ b/yt/visualization/volume_rendering/setup.py
@@ -1,14 +1,15 @@
 #!/usr/bin/env python
 import setuptools
-import os, sys, os.path
-
+import os
+import sys
 import os.path
 
 #os.system("cython -a yt/extensions/volume_rendering/VolumeIntegrator.pyx")
 
-def configuration(parent_package='',top_path=None):
+
+def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('volume_rendering',parent_package,top_path)
-    config.make_config_py() # installs __config__.py
+    config = Configuration('volume_rendering', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config



https://bitbucket.org/yt_analysis/yt/changeset/c309f591b93f/
changeset:   c309f591b93f
branch:      yt
user:        Christopher Moody
date:        2012-04-14 18:02:08
summary:     added Rockstar halo class
affected #:  1 file

diff -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 -r c309f591b93ff4e1628d1de8e8facfa5b8e4af2b yt/frontends/orion/data_structures.py
--- a/yt/frontends/orion/data_structures.py
+++ b/yt/frontends/orion/data_structures.py
@@ -151,7 +151,7 @@
         if not os.path.exists(fn): return
         with open(fn, 'r') as f:
             lines = f.readlines()
-            self.num_stars = int(lines[0].strip())
+            self.num_stars = int(lines[0].strip()[0])
             for line in lines[1:]:
                 particle_position_x = float(line.split(' ')[1])
                 particle_position_y = float(line.split(' ')[2])



https://bitbucket.org/yt_analysis/yt/changeset/eb9e749e7253/
changeset:   eb9e749e7253
branch:      yt
user:        Christopher Moody
date:        2012-04-14 18:16:03
summary:     added the ability to pass DM particle mass directly
affected #:  2 files

diff -r c309f591b93ff4e1628d1de8e8facfa5b8e4af2b -r eb9e749e7253e4a3f3ef8ba38b2597d424bc9378 yt/analysis_modules/halo_finding/rockstar/rockstar.py
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar.py
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar.py
@@ -80,7 +80,7 @@
             (server_address, port))
         self.port = str(self.port)
 
-    def run(self, block_ratio = 1):
+    def run(self, block_ratio = 1,**kwargs):
         if block_ratio != 1:
             raise NotImplementedError
         self._get_hosts()
@@ -89,7 +89,8 @@
                     num_readers = self.num_readers,
                     num_writers = self.num_writers,
                     writing_port = -1,
-                    block_ratio = block_ratio)
+                    block_ratio = block_ratio,
+                    **kwargs)
         if self.comm.size == 1:
             self.handler.call_rockstar()
         else:


diff -r c309f591b93ff4e1628d1de8e8facfa5b8e4af2b -r eb9e749e7253e4a3f3ef8ba38b2597d424bc9378 yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
@@ -314,7 +314,7 @@
             PARALLEL_IO_SERVER_PORT = server_port
         FILENAME = "inline.<block>"
         FILE_FORMAT = "GENERIC"
-        OUTPUT_FORMAT = "ASCII"
+        OUTPUT_FORMAT = "BOTH"
         NUM_SNAPS = 1
         NUM_READERS = num_readers
         NUM_BLOCKS = num_readers * block_ratio



https://bitbucket.org/yt_analysis/yt/changeset/05d7dd36866a/
changeset:   05d7dd36866a
branch:      yt
user:        Christopher Moody
date:        2012-04-14 22:35:48
summary:     fixes to ART particles
affected #:  3 files

diff -r eb9e749e7253e4a3f3ef8ba38b2597d424bc9378 -r 05d7dd36866ae8399762163c78c2325e570231d5 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -155,7 +155,7 @@
              'Pressure','Gamma','GasEnergy',
              'MetalDensitySNII', 'MetalDensitySNIa',
              'PotentialNew','PotentialOld']
-        # self.field_list += art_particle_field_names
+        self.field_list += art_particle_field_names
 
     def _setup_classes(self):
         dd = self._get_data_reader_dict()
@@ -390,6 +390,7 @@
                 self.pf.conversion_factors['particle_position_%s'%ax] = dist
             self.pf.conversion_factors['particle_creation_time'] =  31556926.0
             self.pf.conversion_factors['particle_metallicity_fraction']=1.0
+            self.pf.conversion_factors['particle_index']=1.0
             
             
             a,b=0,0
@@ -418,40 +419,38 @@
                     self.pf.particle_star_metallicity2 = metallicity2
                     self.pf.particle_star_mass_initial = imass*self.pf.parameters['aM0']
                     self.pf.particle_mass[-nstars:] = mass*self.pf.parameters['aM0']
+            left = self.pf.particle_position.shape[0]
+            pbar = get_pbar("Gridding  Particles ",left)
+            pos = self.pf.particle_position.copy()
+            #particle indices travel with the particle positions
+            pos = na.vstack((na.arange(pos.shape[0]),pos.T)).T 
+            max_level = min(self.pf.max_level,self.pf.limit_level)
+            grid_particle_count = na.zeros((len(grids),1),dtype='int64')
             
-            if False:
-                left = self.pf.particle_position.shape[0]
-                pbar = get_pbar("Gridding  Particles ",left)
-                pos = self.pf.particle_position.copy()
-                #particle indices travel with the particle positions
-                pos = na.vstack((na.arange(pos.shape[0]),pos.T)).T 
-                for level in range(self.pf.max_level,self.pf.min_level-1,-1):
-                    lidx = self.grid_levels[:,0] == level
-                    for gi,gidx in enumerate(na.where(lidx)[0]): 
-                        g = grids[gidx]
-                        assert g is not None
-                        le,re = self.grid_left_edge[g._id_offset],self.grid_right_edge[g._id_offset]
-                        idx = na.logical_and(na.all(le < pos[:,1:],axis=1),
-                                             na.all(re > pos[:,1:],axis=1))
-                        np = na.sum(idx)                     
-                        g.NumberOfParticles = np
-                        if np==0: 
-                            g.particle_indices = []
-                            #we have no particles in this grid
-                        else:
-                            fidx = pos[:,0][idx]
-                            g.particle_indices = fidx
-                            pos = pos[~idx] #throw out gridded particles from future gridding
-                        self.grids[gidx] = g
-                        left -= np
-                        pbar.update(left)
-                pbar.finish()
-            else:
-                pbar = get_pbar("Finalizing grids ",len(grids))
-                for gi, g in enumerate(grids): 
-                    self.grids[gi] = g
-                pbar.finish()
-                
+            #grid particles at the finest level, removing them once gridded
+            for level in range(max_level,self.pf.min_level-1,-1):
+                lidx = self.grid_levels[:,0] == level
+                for gi,gidx in enumerate(na.where(lidx)[0]): 
+                    g = grids[gidx]
+                    assert g is not None
+                    le,re = self.grid_left_edge[g._id_offset],self.grid_right_edge[g._id_offset]
+                    idx = na.logical_and(na.all(le < pos[:,1:],axis=1),
+                                         na.all(re > pos[:,1:],axis=1))
+                    np = na.sum(idx)                     
+                    g.NumberOfParticles = np
+                    grid_particle_count[gidx,0]=np
+                    g.hierarchy.grid_particle_count = grid_particle_count
+                    if np==0: 
+                        g.particle_indices = []
+                        #we have no particles in this grid
+                    else:
+                        fidx = pos[:,0][idx]
+                        g.particle_indices = fidx.astype('int64')
+                        pos = pos[~idx] #throw out gridded particles from future gridding
+                    self.grids[gidx] = g
+                    left -= np
+                    pbar.update(left)
+            pbar.finish()
             
         else:
             


diff -r eb9e749e7253e4a3f3ef8ba38b2597d424bc9378 -r 05d7dd36866ae8399762163c78c2325e570231d5 yt/frontends/art/definitions.py
--- a/yt/frontends/art/definitions.py
+++ b/yt/frontends/art/definitions.py
@@ -26,6 +26,7 @@
 """
 
 art_particle_field_names = [
+'particle_index',
 'particle_mass',
 'particle_creation_time',
 'particle_metallicity_fraction',


diff -r eb9e749e7253e4a3f3ef8ba38b2597d424bc9378 -r 05d7dd36866ae8399762163c78c2325e570231d5 yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -123,6 +123,8 @@
     def _read_particle_field(self, grid, field):
         #This will be cleaned up later
         idx = grid.particle_indices
+        if field == 'particle_index':
+            return idx
         if field == 'particle_position_x':
             return grid.pf.particle_position[idx][:,0]
         if field == 'particle_position_y':



https://bitbucket.org/yt_analysis/yt/changeset/26ad4e98e6f7/
changeset:   26ad4e98e6f7
branch:      yt
user:        Christopher Moody
date:        2012-04-14 23:20:02
summary:     fixed particle positions to unitary units
affected #:  1 file

diff -r 05d7dd36866ae8399762163c78c2325e570231d5 -r 26ad4e98e6f738f81f7953adbc0f320cbf24b8cb yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -371,6 +371,7 @@
                 read_particles(self.pf.file_particle_data,nstars,Nrow)
             pbar.update(1)
             np = lspecies[-1]
+            import pdb; pdb.set_trace()
             self.pf.particle_position   = self.pf.particle_position[:np]
             self.pf.particle_position  -= 1.0 #fortran indices start with 0
             pbar.update(2)
@@ -387,7 +388,8 @@
             self.pf.conversion_factors['particle_species'] = 1.0
             for ax in 'xyz':
                 self.pf.conversion_factors['particle_velocity_%s'%ax] = 1.0
-                self.pf.conversion_factors['particle_position_%s'%ax] = dist
+                #already in unitary units
+                self.pf.conversion_factors['particle_position_%s'%ax] = 1.0 
             self.pf.conversion_factors['particle_creation_time'] =  31556926.0
             self.pf.conversion_factors['particle_metallicity_fraction']=1.0
             self.pf.conversion_factors['particle_index']=1.0



https://bitbucket.org/yt_analysis/yt/changeset/fecd8f0251c2/
changeset:   fecd8f0251c2
branch:      yt
user:        Christopher Moody
date:        2012-04-14 23:21:44
summary:     removing pdb trace
affected #:  1 file

diff -r 26ad4e98e6f738f81f7953adbc0f320cbf24b8cb -r fecd8f0251c25209ab9fd037277bb4e96527c3e8 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -371,7 +371,6 @@
                 read_particles(self.pf.file_particle_data,nstars,Nrow)
             pbar.update(1)
             np = lspecies[-1]
-            import pdb; pdb.set_trace()
             self.pf.particle_position   = self.pf.particle_position[:np]
             self.pf.particle_position  -= 1.0 #fortran indices start with 0
             pbar.update(2)



https://bitbucket.org/yt_analysis/yt/changeset/6cc606a417a7/
changeset:   6cc606a417a7
branch:      yt
user:        Christopher Moody
date:        2012-04-15 01:25:44
summary:     more rockstar options. fixed ART particle masses
affected #:  2 files

diff -r fecd8f0251c25209ab9fd037277bb4e96527c3e8 -r 6cc606a417a75f22e9bb77bea4f4da78ef4d49d2 yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
@@ -296,12 +296,13 @@
                        np.float64_t particle_mass = -1.0,
                        int parallel = False, int num_readers = 1,
                        int num_writers = 1,
-                       int writing_port = -1, int block_ratio = 1):
+                       int writing_port = -1, int block_ratio = 1,
+                       int periodic = 1):
         global PARALLEL_IO, PARALLEL_IO_SERVER_ADDRESS, PARALLEL_IO_SERVER_PORT
         global FILENAME, FILE_FORMAT, NUM_SNAPS, STARTING_SNAP, h0, Ol, Om
         global BOX_SIZE, PERIODIC, PARTICLE_MASS, NUM_BLOCKS, NUM_READERS
         global FORK_READERS_FROM_WRITERS, PARALLEL_IO_WRITER_PORT, NUM_WRITERS
-        global rh
+        global rh, SCALE_NOW
         if parallel:
             PARALLEL_IO = 1
             PARALLEL_IO_SERVER_ADDRESS = server_address
@@ -324,12 +325,13 @@
         h0 = self.pf.hubble_constant
         Ol = self.pf.omega_lambda
         Om = self.pf.omega_matter
+        SCALE_NOW = 1.0/self.pf.current_redshift-1.0
 
         if particle_mass < 0:
             print "Assuming single-mass particle."
             particle_mass = self.pf.h.grids[0]["ParticleMassMsun"][0] / h0
         PARTICLE_MASS = particle_mass
-        PERIODIC = 1
+        PERIODIC = periodic
         BOX_SIZE = (self.pf.domain_right_edge[0] -
                     self.pf.domain_left_edge[0]) * self.pf['mpchcm']
         setup_config()


diff -r fecd8f0251c25209ab9fd037277bb4e96527c3e8 -r 6cc606a417a75f22e9bb77bea4f4da78ef4d49d2 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -383,7 +383,7 @@
             self.pf.particle_mass       = na.zeros(np,dtype='float64')
             
             dist = self.pf['cm']/self.pf.domain_dimensions[0]
-            self.pf.conversion_factors['particle_mass'] = um #solar mass in g
+            self.pf.conversion_factors['particle_mass'] = 1.0 #solar mass in g
             self.pf.conversion_factors['particle_species'] = 1.0
             for ax in 'xyz':
                 self.pf.conversion_factors['particle_velocity_%s'%ax] = 1.0
@@ -393,11 +393,12 @@
             self.pf.conversion_factors['particle_metallicity_fraction']=1.0
             self.pf.conversion_factors['particle_index']=1.0
             
-            
+            #import pdb; pdb.set_trace()
+
             a,b=0,0
             for i,(b,m) in enumerate(zip(lspecies,wspecies)):
                 self.pf.particle_species[a:b] = i #particle type
-                self.pf.particle_mass[a:b]    = m #mass in solar masses
+                self.pf.particle_mass[a:b]    = m*um #mass in solar masses
                 a=b
             pbar.finish()
             
@@ -418,8 +419,8 @@
                     pbar.finish()
                     self.pf.particle_star_metallicity1 = metallicity1
                     self.pf.particle_star_metallicity2 = metallicity2
-                    self.pf.particle_star_mass_initial = imass*self.pf.parameters['aM0']
-                    self.pf.particle_mass[-nstars:] = mass*self.pf.parameters['aM0']
+                    self.pf.particle_star_mass_initial = imass*um
+                    self.pf.particle_mass[-nstars:] = mass*um
             left = self.pf.particle_position.shape[0]
             pbar = get_pbar("Gridding  Particles ",left)
             pos = self.pf.particle_position.copy()



https://bitbucket.org/yt_analysis/yt/changeset/8512e594a837/
changeset:   8512e594a837
branch:      yt
user:        Christopher Moody
date:        2012-04-14 02:46:24
summary:     updated to tip
affected #:  1 file

diff -r 91a9d9d9e7d4f8e4471477d6ea43aae238faf191 -r 8512e594a8372817f7569354ea3fc6aae3a38016 yt/frontends/art/setup.py
--- a/yt/frontends/art/setup.py
+++ b/yt/frontends/art/setup.py
@@ -1,13 +1,10 @@
 #!/usr/bin/env python
 import setuptools
-import os
-import sys
-import os.path
+import os, sys, os.path
 
-
-def configuration(parent_package='', top_path=None):
+def configuration(parent_package='',top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('art', parent_package, top_path)
-    config.make_config_py()  # installs __config__.py
+    config = Configuration('art',parent_package,top_path)
+    config.make_config_py() # installs __config__.py
     #config.make_svn_version_py()
     return config



https://bitbucket.org/yt_analysis/yt/changeset/629ca6b1128f/
changeset:   629ca6b1128f
branch:      yt
user:        Christopher Moody
date:        2012-04-15 04:25:15
summary:     implemented rockstar halo and halo list classes
affected #:  2 files

diff -r 8512e594a8372817f7569354ea3fc6aae3a38016 -r 629ca6b1128f7884dec24631b2154d2f664fb9c3 yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -250,7 +250,7 @@
         else:
             return self.data[key][self.indices]
 
-    def get_sphere(self, center_of_mass=True):
+    def get_sphere(self, center_of_mass=True, radius = None):
         r"""Returns a sphere source.
 
         This will generate a new, empty sphere source centered on this halo,
@@ -279,7 +279,8 @@
             center = self.center_of_mass()
         else:
             center = self.maximum_density_location()
-        radius = self.maximum_radius()
+        if radius is None:
+            radius = self.maximum_radius()
         # A bit of a long-reach here...
         sphere = self.data.hierarchy.sphere(
                         center, radius=radius)
@@ -438,6 +439,72 @@
         (4. / 3. * math.pi * rho_crit * \
         (self.radial_bins * cm) ** 3.0)
 
+class RockstarHalo(Halo):
+    """Implement the properties reported by Rockstar: ID, Descendant ID,
+       Mvir, Vmax, Vrms, Rvir, Rs, Np, XYZ, VXYZ, JXYZ, and spin.
+       Most defaults are removed since we don't read in which halos
+       particles belong to.
+    """
+    def __init__(ID, DescID, Mvir, Vmax, Vrms, Rvir, Rs, Np, 
+                  X, Y, Z, VX, VY, VZ, JX, JY, JZ, Spin):
+        self.ID = ID
+        self.DescID = DescID
+        self.Mvir = Mvir
+        self.Vmax = Vmax
+        self.Vrms = Vrms
+        self.Rvir = Rvir
+        self.Rs   = Rs
+        self.Np   = Np
+        self.X    = X
+        self.Y    = Y
+        self.Z    = Z
+        self.VX   = VX
+        self.VY   = VY
+        self.VZ   = VZ
+        self.JX   = JX
+        self.JY   = JY
+        self.JZ   = JZ
+        self.Spin = Spin
+
+        self.size = Np
+        self.CoM = na.array([X,Y,Z])
+        self.max_dens_point = -1 #not implemented
+        self.group_total_mass = -1 #not implemented
+        self.max_radius = Rvir #not accurate, but good for plotting
+        self.bulk_vel  = na.array([VX,VY,VZ])
+        self.bulk_vel  *= 1e5 #km/s-> cm/s
+        self.rms_vel = -1 #not implemented
+    
+    def maximum_density(self):
+        r"""Not implemented."""
+        return -1
+
+    def maximum_density_location(self):
+        r"""Not implemented."""
+        return self.center_of_mass()
+
+    def total_mass(self):
+        r"""Not implemented."""
+        return -1
+
+    def write_particle_list(self,handle):
+        r"""Not implemented."""
+        pass
+
+    def get_size(self):
+        r"""Return the number of particles belonging to the halo."""
+        return self.Np
+
+    def virial_mass(self):
+        r"""Virial mass in Msun/h"""
+        return self.Mvir
+
+    def virial_density(self):
+        r"""Virial radius of the halo in comoving Mpc/h """
+        return self.Rvir
+
+
+
 
 class HOPHalo(Halo):
     _name = "HOPHalo"


diff -r 8512e594a8372817f7569354ea3fc6aae3a38016 -r 629ca6b1128f7884dec24631b2154d2f664fb9c3 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -284,8 +284,10 @@
                 if idims.prod() > vol_max or psg.efficiency < min_eff:
                     psg_split = _ramses_reader.recursive_patch_splitting(
                         psg, idims, initial_left, 
-                        dleft_index, dfl,min_eff=min_eff,use_center=True,
-                        split_on_vol=vol_max)
+                        dleft_index, dfl)
+                        #,min_eff=min_eff
+                        #,use_center=True
+                        #split_on_vol=vol_max)
                     
                     psgs.extend(psg_split)
                     psg_eff += [x.efficiency for x in psg_split] 



https://bitbucket.org/yt_analysis/yt/changeset/41bf558abeaf/
changeset:   41bf558abeaf
branch:      yt
user:        Christopher Moody
date:        2012-04-15 04:26:35
summary:     merged
affected #:  6 files

diff -r 629ca6b1128f7884dec24631b2154d2f664fb9c3 -r 41bf558abeaf9357ebca9f540bbb16a7fb1aa72c yt/analysis_modules/halo_finding/rockstar/rockstar.py
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar.py
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar.py
@@ -80,7 +80,7 @@
             (server_address, port))
         self.port = str(self.port)
 
-    def run(self, block_ratio = 1):
+    def run(self, block_ratio = 1,**kwargs):
         if block_ratio != 1:
             raise NotImplementedError
         self._get_hosts()
@@ -89,7 +89,8 @@
                     num_readers = self.num_readers,
                     num_writers = self.num_writers,
                     writing_port = -1,
-                    block_ratio = block_ratio)
+                    block_ratio = block_ratio,
+                    **kwargs)
         if self.comm.size == 1:
             self.handler.call_rockstar()
         else:


diff -r 629ca6b1128f7884dec24631b2154d2f664fb9c3 -r 41bf558abeaf9357ebca9f540bbb16a7fb1aa72c yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
@@ -296,12 +296,13 @@
                        np.float64_t particle_mass = -1.0,
                        int parallel = False, int num_readers = 1,
                        int num_writers = 1,
-                       int writing_port = -1, int block_ratio = 1):
+                       int writing_port = -1, int block_ratio = 1,
+                       int periodic = 1):
         global PARALLEL_IO, PARALLEL_IO_SERVER_ADDRESS, PARALLEL_IO_SERVER_PORT
         global FILENAME, FILE_FORMAT, NUM_SNAPS, STARTING_SNAP, h0, Ol, Om
         global BOX_SIZE, PERIODIC, PARTICLE_MASS, NUM_BLOCKS, NUM_READERS
         global FORK_READERS_FROM_WRITERS, PARALLEL_IO_WRITER_PORT, NUM_WRITERS
-        global rh
+        global rh, SCALE_NOW
         if parallel:
             PARALLEL_IO = 1
             PARALLEL_IO_SERVER_ADDRESS = server_address
@@ -314,7 +315,7 @@
             PARALLEL_IO_SERVER_PORT = server_port
         FILENAME = "inline.<block>"
         FILE_FORMAT = "GENERIC"
-        OUTPUT_FORMAT = "ASCII"
+        OUTPUT_FORMAT = "BOTH"
         NUM_SNAPS = 1
         NUM_READERS = num_readers
         NUM_BLOCKS = num_readers * block_ratio
@@ -324,12 +325,13 @@
         h0 = self.pf.hubble_constant
         Ol = self.pf.omega_lambda
         Om = self.pf.omega_matter
+        SCALE_NOW = 1.0/self.pf.current_redshift-1.0
 
         if particle_mass < 0:
             print "Assuming single-mass particle."
             particle_mass = self.pf.h.grids[0]["ParticleMassMsun"][0] / h0
         PARTICLE_MASS = particle_mass
-        PERIODIC = 1
+        PERIODIC = periodic
         BOX_SIZE = (self.pf.domain_right_edge[0] -
                     self.pf.domain_left_edge[0]) * self.pf['mpchcm']
         setup_config()


diff -r 629ca6b1128f7884dec24631b2154d2f664fb9c3 -r 41bf558abeaf9357ebca9f540bbb16a7fb1aa72c yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -155,7 +155,7 @@
              'Pressure','Gamma','GasEnergy',
              'MetalDensitySNII', 'MetalDensitySNIa',
              'PotentialNew','PotentialOld']
-        # self.field_list += art_particle_field_names
+        self.field_list += art_particle_field_names
 
     def _setup_classes(self):
         dd = self._get_data_reader_dict()
@@ -385,19 +385,22 @@
             self.pf.particle_mass       = na.zeros(np,dtype='float64')
             
             dist = self.pf['cm']/self.pf.domain_dimensions[0]
-            self.pf.conversion_factors['particle_mass'] = um #solar mass in g
+            self.pf.conversion_factors['particle_mass'] = 1.0 #solar mass in g
             self.pf.conversion_factors['particle_species'] = 1.0
             for ax in 'xyz':
                 self.pf.conversion_factors['particle_velocity_%s'%ax] = 1.0
-                self.pf.conversion_factors['particle_position_%s'%ax] = dist
+                #already in unitary units
+                self.pf.conversion_factors['particle_position_%s'%ax] = 1.0 
             self.pf.conversion_factors['particle_creation_time'] =  31556926.0
             self.pf.conversion_factors['particle_metallicity_fraction']=1.0
+            self.pf.conversion_factors['particle_index']=1.0
             
-            
+            #import pdb; pdb.set_trace()
+
             a,b=0,0
             for i,(b,m) in enumerate(zip(lspecies,wspecies)):
                 self.pf.particle_species[a:b] = i #particle type
-                self.pf.particle_mass[a:b]    = m #mass in solar masses
+                self.pf.particle_mass[a:b]    = m*um #mass in solar masses
                 a=b
             pbar.finish()
             
@@ -418,42 +421,40 @@
                     pbar.finish()
                     self.pf.particle_star_metallicity1 = metallicity1
                     self.pf.particle_star_metallicity2 = metallicity2
-                    self.pf.particle_star_mass_initial = imass*self.pf.parameters['aM0']
-                    self.pf.particle_mass[-nstars:] = mass*self.pf.parameters['aM0']
+                    self.pf.particle_star_mass_initial = imass*um
+                    self.pf.particle_mass[-nstars:] = mass*um
+            left = self.pf.particle_position.shape[0]
+            pbar = get_pbar("Gridding  Particles ",left)
+            pos = self.pf.particle_position.copy()
+            #particle indices travel with the particle positions
+            pos = na.vstack((na.arange(pos.shape[0]),pos.T)).T 
+            max_level = min(self.pf.max_level,self.pf.limit_level)
+            grid_particle_count = na.zeros((len(grids),1),dtype='int64')
             
-            if False:
-                left = self.pf.particle_position.shape[0]
-                pbar = get_pbar("Gridding  Particles ",left)
-                pos = self.pf.particle_position.copy()
-                #particle indices travel with the particle positions
-                pos = na.vstack((na.arange(pos.shape[0]),pos.T)).T 
-                for level in range(self.pf.max_level,self.pf.min_level-1,-1):
-                    lidx = self.grid_levels[:,0] == level
-                    for gi,gidx in enumerate(na.where(lidx)[0]): 
-                        g = grids[gidx]
-                        assert g is not None
-                        le,re = self.grid_left_edge[g._id_offset],self.grid_right_edge[g._id_offset]
-                        idx = na.logical_and(na.all(le < pos[:,1:],axis=1),
-                                             na.all(re > pos[:,1:],axis=1))
-                        np = na.sum(idx)                     
-                        g.NumberOfParticles = np
-                        if np==0: 
-                            g.particle_indices = []
-                            #we have no particles in this grid
-                        else:
-                            fidx = pos[:,0][idx]
-                            g.particle_indices = fidx
-                            pos = pos[~idx] #throw out gridded particles from future gridding
-                        self.grids[gidx] = g
-                        left -= np
-                        pbar.update(left)
-                pbar.finish()
-            else:
-                pbar = get_pbar("Finalizing grids ",len(grids))
-                for gi, g in enumerate(grids): 
-                    self.grids[gi] = g
-                pbar.finish()
-                
+            #grid particles at the finest level, removing them once gridded
+            for level in range(max_level,self.pf.min_level-1,-1):
+                lidx = self.grid_levels[:,0] == level
+                for gi,gidx in enumerate(na.where(lidx)[0]): 
+                    g = grids[gidx]
+                    assert g is not None
+                    le,re = self.grid_left_edge[g._id_offset],self.grid_right_edge[g._id_offset]
+                    idx = na.logical_and(na.all(le < pos[:,1:],axis=1),
+                                         na.all(re > pos[:,1:],axis=1))
+                    np = na.sum(idx)                     
+                    g.NumberOfParticles = np
+                    grid_particle_count[gidx,0]=np
+                    g.hierarchy.grid_particle_count = grid_particle_count
+                    if np==0: 
+                        g.particle_indices = []
+                        #we have no particles in this grid
+                    else:
+                        fidx = pos[:,0][idx]
+                        g.particle_indices = fidx.astype('int64')
+                        pos = pos[~idx] #throw out gridded particles from future gridding
+                    self.grids[gidx] = g
+                    left -= np
+                    pbar.update(left)
+            pbar.finish()
             
         else:
             


diff -r 629ca6b1128f7884dec24631b2154d2f664fb9c3 -r 41bf558abeaf9357ebca9f540bbb16a7fb1aa72c yt/frontends/art/definitions.py
--- a/yt/frontends/art/definitions.py
+++ b/yt/frontends/art/definitions.py
@@ -26,6 +26,7 @@
 """
 
 art_particle_field_names = [
+'particle_index',
 'particle_mass',
 'particle_creation_time',
 'particle_metallicity_fraction',


diff -r 629ca6b1128f7884dec24631b2154d2f664fb9c3 -r 41bf558abeaf9357ebca9f540bbb16a7fb1aa72c yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -123,6 +123,8 @@
     def _read_particle_field(self, grid, field):
         #This will be cleaned up later
         idx = grid.particle_indices
+        if field == 'particle_index':
+            return idx
         if field == 'particle_position_x':
             return grid.pf.particle_position[idx][:,0]
         if field == 'particle_position_y':


diff -r 629ca6b1128f7884dec24631b2154d2f664fb9c3 -r 41bf558abeaf9357ebca9f540bbb16a7fb1aa72c yt/frontends/orion/data_structures.py
--- a/yt/frontends/orion/data_structures.py
+++ b/yt/frontends/orion/data_structures.py
@@ -151,7 +151,7 @@
         if not os.path.exists(fn): return
         with open(fn, 'r') as f:
             lines = f.readlines()
-            self.num_stars = int(lines[0].strip())
+            self.num_stars = int(lines[0].strip()[0])
             for line in lines[1:]:
                 particle_position_x = float(line.split(' ')[1])
                 particle_position_y = float(line.split(' ')[2])



https://bitbucket.org/yt_analysis/yt/changeset/42485223eca7/
changeset:   42485223eca7
branch:      yt
user:        Christopher Moody
date:        2012-04-15 05:53:17
summary:     fixes in rockstar halo classes. allowing ART frontend to be dm only
affected #:  4 files

diff -r 6cc606a417a75f22e9bb77bea4f4da78ef4d49d2 -r 42485223eca73cb5cf5ad25642e7004285a88114 yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -438,6 +438,93 @@
         (4. / 3. * math.pi * rho_crit * \
         (self.radial_bins * cm) ** 3.0)
 
+class RockstarHalo(Halo):
+    def __init__(self,halo_list,index,ID, DescID, Mvir, Vmax, Vrms, Rvir, Rs, Np, 
+                  X, Y, Z, VX, VY, VZ, JX, JY, JZ, Spin):
+        """Implement the properties reported by Rockstar: ID, Descendant ID,
+           Mvir, Vmax, Vrms, Rvir, Rs, Np, XYZ, VXYZ, JXYZ, and spin.
+           Most defaults are removed since we don't read in which halos
+           particles belong to. 
+        """
+        #we can still use get_sphere!
+        self.ID = ID #from rockstar
+        self.id = index #index in the halo list
+        self.pf = halo_list.pf
+
+        self.DescID = DescID
+        self.Mvir = Mvir
+        self.Vmax = Vmax
+        self.Vrms = Vrms
+        self.Rvir = Rvir
+        self.Rs   = Rs
+        self.Np   = Np
+        self.X    = X
+        self.Y    = Y
+        self.Z    = Z
+        self.VX   = VX
+        self.VY   = VY
+        self.VZ   = VZ
+        self.JX   = JX
+        self.JY   = JY
+        self.JZ   = JZ
+        self.Spin = Spin
+
+        #Halo.__init__(self,halo_list,index,
+        self.size=Np 
+        self.CoM=na.array([X,Y,Z])
+        self.max_dens_point=-1
+        self.group_total_mass=-1
+        self.max_radius=Rvir
+        self.bulk_vel=na.array([VX,VY,VZ])*1e5
+        self.rms_vel=-1
+        self.group_total_mass = -1 #not implemented
+        
+        
+    
+    def maximum_density(self):
+        r"""Not implemented."""
+        return -1
+
+    def maximum_density_location(self):
+        r"""Not implemented."""
+        return self.center_of_mass()
+
+    def total_mass(self):
+        r"""Not implemented."""
+        return -1
+
+    def get_size(self):
+        r"""Return the number of particles belonging to the halo."""
+        return self.Np
+
+    def write_particle_list(self,handle):
+        r"""Not implemented."""
+        return -1
+
+    def virial_mass(self):
+        r"""Virial mass in Msun/h"""
+        return self.Mvir
+
+    def virial_radius(self):
+        r"""Virial radius in Mpc/h comoving"""
+        return self.Rvir
+
+    def virial_bin(self):
+        r"""Not implemented"""
+        return -1
+
+    def virial_density(self):
+        r"""Not implemented """
+        return -1
+
+    def virial_info(self):
+        r"""Not implemented"""
+        return -1 
+
+    def __getitem__(self,key):
+        r"""Not implemented"""
+        return None
+
 
 class HOPHalo(Halo):
     _name = "HOPHalo"
@@ -903,6 +990,97 @@
             f.flush()
         f.close()
 
+class RockstarHaloList(HaloList):
+    #because we don't yet no halo-particle affiliations
+    #most of the halo list methods are not implemented
+    #furthermore, Rockstar only accepts DM particles of
+    #a fixed mass, so we don't allow stars at all
+    #Still, we inherit from HaloList because in the future
+    #we might implement halo-particle affiliations
+    def __init__(self,pf,out_list):
+        mylog.info("Initializing Rockstar List")
+        self._data_source = None
+        self._groups = []
+        self._max_dens = -1
+        self.pf = pf
+        self.out_list = out_list
+        mylog.info("Parsing Rockstar halo list")
+        self._parse_output(out_list)
+        mylog.info("Finished %s"%out_list)
+
+    def _run_finder(self):
+        pass
+
+    def __obtain_particles(self):
+        pass
+
+    def _get_dm_indices(self):
+        pass
+
+    def _parse_output(self,out_list=None):
+        """
+        Read the out_*.list text file produced
+        by Rockstar into memory."""
+        
+        pf = self.pf
+
+        if out_list is None:
+            out_list = self.out_list
+
+        lines = open(out_list).readlines()
+        names = []
+        formats = []
+        
+        #find the variables names from the first defining line
+        names = lines[0].replace('#','').split(' ')
+        for j,line in enumerate(lines):
+            if not line.startswith('#'): break
+
+        #find out the table datatypes but evaluating the first data line
+        splits = filter(lambda x: len(x.strip()) > 0 ,line.split(' '))
+        for num in splits:
+            if 'nan' not in num:
+                formats += na.array(eval(num)).dtype,
+            else:
+                formats += na.dtype('float'),
+        assert len(formats) == len(names)
+
+        #Jc = 1.98892e33/pf['mpchcm']*1e5
+        Jc = 1.0
+        conv = dict(X=1.0/pf['mpchcm'],
+                    Y=1.0/pf['mpchcm'],
+                    Z=1.0/pf['mpchcm'],
+                    VX=1e0,VY=1e0,VZ=1e0,
+                    Mvir=1.0,
+                    Vmax=1e0,Vrms=1e0,
+                    Rvir=1.0/pf['mpchcm'],
+                    Rs=1.0/pf['mpchcm'],
+                    JX=Jc,JY=Jc,JZ=Jc)
+        dtype = {'names':names,'formats':formats}
+        halo_table = na.loadtxt(out_list,skiprows=j-1,dtype=dtype,comments='#')            
+        #convert position units  
+        for name in names:
+            halo_table[name]=halo_table[name]*conv.get(name,1)
+        
+        for k,row in enumerate(halo_table):
+            args = tuple([val for val in row])
+            halo = RockstarHalo(self,k,*args)
+            self._groups.append(halo)
+    
+
+    #len is ok
+    #iter is OK
+    #getitem is ok
+    #nn is ok I think
+    #nn2d is ok I think
+
+    def write_out(self):
+        pass
+    def write_particle_list(self):
+        pass
+    
+
+    
 
 class HOPHaloList(HaloList):
 


diff -r 6cc606a417a75f22e9bb77bea4f4da78ef4d49d2 -r 42485223eca73cb5cf5ad25642e7004285a88114 yt/analysis_modules/halo_finding/rockstar/rockstar.py
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar.py
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar.py
@@ -25,9 +25,11 @@
 
 from yt.mods import *
 from os import environ
+from os import mkdir
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     ParallelAnalysisInterface, ProcessorPool, Communicator
 
+from yt.analysis_modules.halo_finding.halo_objects import * #Halos & HaloLists
 import rockstar_interface
 import socket
 import time
@@ -45,7 +47,7 @@
         return data_source
 
 class RockstarHaloFinder(ParallelAnalysisInterface):
-    def __init__(self, pf, num_readers = 0, num_writers = 0):
+    def __init__(self, pf, num_readers = 0, num_writers = 0, outbase=None):
         ParallelAnalysisInterface.__init__(self)
         # No subvolume support
         self.pf = pf
@@ -64,6 +66,9 @@
             for wg in self.pool.workgroups:
                 if self.comm.rank in wg.ranks: self.workgroup = wg
         data_source = self.pf.h.all_data()
+        if outbase is None:
+            outbase = str(self.pf)+'_rockstar'
+        self.outbase = outbase        
         self.handler = rockstar_interface.RockstarInterface(
                 self.pf, data_source)
 
@@ -84,12 +89,20 @@
         if block_ratio != 1:
             raise NotImplementedError
         self._get_hosts()
+        #because rockstar *always* write to exactly the same
+        #out_0.list filename we make a directory for it
+        #to sit inside so it doesn't get accidentally
+        #overwritten
+        
+        if self.workgroup.name == "server":
+            os.mkdir(self.outbase)
         self.handler.setup_rockstar(self.server_address, self.port,
                     parallel = self.comm.size > 1,
                     num_readers = self.num_readers,
                     num_writers = self.num_writers,
                     writing_port = -1,
                     block_ratio = block_ratio,
+                    outbase = self.outbase,
                     **kwargs)
         if self.comm.size == 1:
             self.handler.call_rockstar()
@@ -104,3 +117,11 @@
                 #time.sleep(1.0 + self.workgroup.comm.rank/10.0)
                 self.handler.start_client()
         self.comm.barrier()
+        #quickly rename the out_0.list 
+    
+    def halo_list(self):
+        """
+        Reads in the out_0.list file and generates RockstarHaloList
+        and RockstarHalo objects.
+        """
+        return RockstarHaloList(self.pf,self.outbase+'/out_0.list')


diff -r 6cc606a417a75f22e9bb77bea4f4da78ef4d49d2 -r 42485223eca73cb5cf5ad25642e7004285a88114 yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
@@ -278,7 +278,8 @@
             fi += 1
         pi += npart
     num_p[0] = tnpart
-    print "TOTAL", block, pi, tnpart, len(grids)
+    print "Block #%i | Particles %i | Particles %i| Grids %i  "%\
+            ( block, pi, tnpart, len(grids))
 
 cdef class RockstarInterface:
 
@@ -297,12 +298,13 @@
                        int parallel = False, int num_readers = 1,
                        int num_writers = 1,
                        int writing_port = -1, int block_ratio = 1,
-                       int periodic = 1):
+                       int periodic = 1, 
+                       char *outbase = 'None'):
         global PARALLEL_IO, PARALLEL_IO_SERVER_ADDRESS, PARALLEL_IO_SERVER_PORT
         global FILENAME, FILE_FORMAT, NUM_SNAPS, STARTING_SNAP, h0, Ol, Om
         global BOX_SIZE, PERIODIC, PARTICLE_MASS, NUM_BLOCKS, NUM_READERS
         global FORK_READERS_FROM_WRITERS, PARALLEL_IO_WRITER_PORT, NUM_WRITERS
-        global rh, SCALE_NOW
+        global rh, SCALE_NOW, OUTBASE
         if parallel:
             PARALLEL_IO = 1
             PARALLEL_IO_SERVER_ADDRESS = server_address
@@ -315,7 +317,7 @@
             PARALLEL_IO_SERVER_PORT = server_port
         FILENAME = "inline.<block>"
         FILE_FORMAT = "GENERIC"
-        OUTPUT_FORMAT = "BOTH"
+        OUTPUT_FORMAT = "ASCII"
         NUM_SNAPS = 1
         NUM_READERS = num_readers
         NUM_BLOCKS = num_readers * block_ratio
@@ -326,6 +328,11 @@
         Ol = self.pf.omega_lambda
         Om = self.pf.omega_matter
         SCALE_NOW = 1.0/self.pf.current_redshift-1.0
+        if not outbase =='None'.decode('UTF-8'):
+            #output directory. since we can't change the output filenames
+            #workaround is to make a new directory
+            print 'using %s as outbase'%outbase
+            OUTBASE = outbase 
 
         if particle_mass < 0:
             print "Assuming single-mass particle."


diff -r 6cc606a417a75f22e9bb77bea4f4da78ef4d49d2 -r 42485223eca73cb5cf5ad25642e7004285a88114 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -371,6 +371,8 @@
                 read_particles(self.pf.file_particle_data,nstars,Nrow)
             pbar.update(1)
             np = lspecies[-1]
+            if dm_only:
+                np = lspecies[0]
             self.pf.particle_position   = self.pf.particle_position[:np]
             self.pf.particle_position  -= 1.0 #fortran indices start with 0
             pbar.update(2)
@@ -404,7 +406,7 @@
             
             self.pf.particle_star_index = i
             
-            if self.pf.file_star_data:
+            if self.pf.file_star_data and (not self.pf.dm_only):
                 nstars, mass, imass, tbirth, metallicity1, metallicity2 \
                      = read_stars(self.pf.file_star_data,nstars,Nrow)
                 nstars = nstars[0] 
@@ -552,7 +554,8 @@
                  file_star_data=None,
                  discover_particles=False,
                  use_particles=True,
-                 limit_level=None):
+                 limit_level=None,
+                 dm_only=False):
         import yt.frontends.ramses._ramses_reader as _ramses_reader
         
         
@@ -563,6 +566,7 @@
         self.file_particle_header = file_particle_header
         self.file_particle_data = file_particle_data
         self.file_star_data = file_star_data
+        self.dm_only = dm_only
         
         if limit_level is None:
             self.limit_level = na.inf



https://bitbucket.org/yt_analysis/yt/changeset/db3a9b59d37c/
changeset:   db3a9b59d37c
branch:      yt
user:        Christopher Moody
date:        2012-04-16 00:05:43
summary:     merge
affected #:  7 files



diff -r 42485223eca73cb5cf5ad25642e7004285a88114 -r db3a9b59d37cd5c1988a904c8fa56ac4028599f2 yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
@@ -278,8 +278,8 @@
             fi += 1
         pi += npart
     num_p[0] = tnpart
-    print "Block #%i | Particles %i | Particles %i| Grids %i  "%\
-            ( block, pi, tnpart, len(grids))
+    print "Block #%i | Particles %i | Grids %i"%\
+            ( block, pi, len(grids))
 
 cdef class RockstarInterface:
 
@@ -327,7 +327,7 @@
         h0 = self.pf.hubble_constant
         Ol = self.pf.omega_lambda
         Om = self.pf.omega_matter
-        SCALE_NOW = 1.0/self.pf.current_redshift-1.0
+        SCALE_NOW = 1.0/(self.pf.current_redshift+1.0)
         if not outbase =='None'.decode('UTF-8'):
             #output directory. since we can't change the output filenames
             #workaround is to make a new directory


diff -r 42485223eca73cb5cf5ad25642e7004285a88114 -r db3a9b59d37cd5c1988a904c8fa56ac4028599f2 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -371,7 +371,7 @@
                 read_particles(self.pf.file_particle_data,nstars,Nrow)
             pbar.update(1)
             np = lspecies[-1]
-            if dm_only:
+            if self.pf.dm_only:
                 np = lspecies[0]
             self.pf.particle_position   = self.pf.particle_position[:np]
             self.pf.particle_position  -= 1.0 #fortran indices start with 0
@@ -381,7 +381,7 @@
             self.pf.particle_velocity   = self.pf.particle_velocity[:np]
             self.pf.particle_velocity  *= uv #to proper cm/s
             pbar.update(4)
-            self.pf.particle_species    = na.zeros(np,dtype='uint8')
+            self.pf.particle_type       = na.zeros(np,dtype='uint8')
             self.pf.particle_mass       = na.zeros(np,dtype='float64')
             
             dist = self.pf['cm']/self.pf.domain_dimensions[0]
@@ -399,7 +399,7 @@
 
             a,b=0,0
             for i,(b,m) in enumerate(zip(lspecies,wspecies)):
-                self.pf.particle_species[a:b] = i #particle type
+                self.pf.particle_type[a:b] = i #particle type
                 self.pf.particle_mass[a:b]    = m*um #mass in solar masses
                 a=b
             pbar.finish()
@@ -423,38 +423,51 @@
                     self.pf.particle_star_metallicity2 = metallicity2
                     self.pf.particle_star_mass_initial = imass*um
                     self.pf.particle_mass[-nstars:] = mass*um
+
             left = self.pf.particle_position.shape[0]
-            pbar = get_pbar("Gridding  Particles ",left)
+            init = self.pf.particle_position.shape[0]
+            pbar = get_pbar("Gridding Particles ",init)
             pos = self.pf.particle_position.copy()
+            pid = na.arange(pos.shape[0]).astype('int64')
             #particle indices travel with the particle positions
-            pos = na.vstack((na.arange(pos.shape[0]),pos.T)).T 
+            #pos = na.vstack((na.arange(pos.shape[0]),pos.T)).T 
             max_level = min(self.pf.max_level,self.pf.limit_level)
             grid_particle_count = na.zeros((len(grids),1),dtype='int64')
             
             #grid particles at the finest level, removing them once gridded
-            for level in range(max_level,self.pf.min_level-1,-1):
-                lidx = self.grid_levels[:,0] == level
-                for gi,gidx in enumerate(na.where(lidx)[0]): 
-                    g = grids[gidx]
-                    assert g is not None
-                    le,re = self.grid_left_edge[g._id_offset],self.grid_right_edge[g._id_offset]
-                    idx = na.logical_and(na.all(le < pos[:,1:],axis=1),
-                                         na.all(re > pos[:,1:],axis=1))
-                    np = na.sum(idx)                     
-                    g.NumberOfParticles = np
-                    grid_particle_count[gidx,0]=np
-                    g.hierarchy.grid_particle_count = grid_particle_count
-                    if np==0: 
-                        g.particle_indices = []
-                        #we have no particles in this grid
-                    else:
-                        fidx = pos[:,0][idx]
-                        g.particle_indices = fidx.astype('int64')
-                        pos = pos[~idx] #throw out gridded particles from future gridding
-                    self.grids[gidx] = g
-                    left -= np
-                    pbar.update(left)
-            pbar.finish()
+            if self.pf.grid_particles:
+                for level in range(max_level,self.pf.min_level-1,-1):
+                    lidx = self.grid_levels[:,0] == level
+                    for gi,gidx in enumerate(na.where(lidx)[0]): 
+                        g = grids[gidx]
+                        assert g is not None
+                        le,re = self.grid_left_edge[gidx],self.grid_right_edge[gidx]
+                        idx = na.logical_and(na.all(le < pos,axis=1),
+                                             na.all(re > pos,axis=1))
+                        fidx = pid[idx]
+                        np = na.sum(idx)                     
+                        g.NumberOfParticles = np
+                        grid_particle_count[gidx,0]=np
+                        g.hierarchy.grid_particle_count = grid_particle_count
+                        if np==0: 
+                            g.particle_indices = []
+                            #we have no particles in this grid
+                        else:
+                            g.particle_indices = fidx.astype('int64')
+                            pos = pos[~idx] #throw out gridded particles from future gridding
+                            pid = pid[~idx]
+                        self.grids[gidx] = g
+                        left -= np
+                        pbar.update(init-left)
+                pbar.finish()
+            else:
+                g = grids[0]
+                g.NumberOfParticles = pos.shape[0]
+                grid_particle_count[0,0]=pos.shape[0]
+                g.hierarchy.grid_particle_count = grid_particle_count
+                g.particle_indices = pid
+                grids[0] = g
+                for gi,g in enumerate(grids): self.grids[gi]=g
             
         else:
             
@@ -555,7 +568,8 @@
                  discover_particles=False,
                  use_particles=True,
                  limit_level=None,
-                 dm_only=False):
+                 dm_only=False,
+                 grid_particles=False):
         import yt.frontends.ramses._ramses_reader as _ramses_reader
         
         
@@ -567,6 +581,7 @@
         self.file_particle_data = file_particle_data
         self.file_star_data = file_star_data
         self.dm_only = dm_only
+        self.grid_particles = grid_particles
         
         if limit_level is None:
             self.limit_level = na.inf


diff -r 42485223eca73cb5cf5ad25642e7004285a88114 -r db3a9b59d37cd5c1988a904c8fa56ac4028599f2 yt/frontends/art/definitions.py
--- a/yt/frontends/art/definitions.py
+++ b/yt/frontends/art/definitions.py
@@ -35,4 +35,5 @@
 'particle_position_z',
 'particle_velocity_x',
 'particle_velocity_y',
-'particle_velocity_z']
+'particle_velocity_z',
+'particle_type']


diff -r 42485223eca73cb5cf5ad25642e7004285a88114 -r db3a9b59d37cd5c1988a904c8fa56ac4028599f2 yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -125,6 +125,8 @@
         idx = grid.particle_indices
         if field == 'particle_index':
             return idx
+        if field == 'particle_type':
+            return grid.pf.particle_type[idx]
         if field == 'particle_position_x':
             return grid.pf.particle_position[idx][:,0]
         if field == 'particle_position_y':
@@ -144,7 +146,7 @@
         sidx  = grid.particle_indices[tridx] - grid.pf.particle_star_index
         n = grid.particle_indices
         if field == 'particle_creation_time':
-            tr = na.zeros(grid.NumberOfParticles, dtype='float64')-0.0
+            tr = na.zeros(grid.NumberOfParticles, dtype='float64')-1.0
             if sidx.shape[0]>0:
                 tr[tridx] = grid.pf.particle_star_ages[sidx]
             return tr


diff -r 42485223eca73cb5cf5ad25642e7004285a88114 -r db3a9b59d37cd5c1988a904c8fa56ac4028599f2 yt/frontends/art/setup.py
--- a/yt/frontends/art/setup.py
+++ b/yt/frontends/art/setup.py
@@ -1,13 +1,10 @@
 #!/usr/bin/env python
 import setuptools
-import os
-import sys
-import os.path
+import os, sys, os.path
 
-
-def configuration(parent_package='', top_path=None):
+def configuration(parent_package='',top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('art', parent_package, top_path)
-    config.make_config_py()  # installs __config__.py
+    config = Configuration('art',parent_package,top_path)
+    config.make_config_py() # installs __config__.py
     #config.make_svn_version_py()
     return config


diff -r 42485223eca73cb5cf5ad25642e7004285a88114 -r db3a9b59d37cd5c1988a904c8fa56ac4028599f2 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -934,14 +934,15 @@
     _descriptor = None
     def __init__(self, width, p_size=1.0, col='k', marker='o', stride=1.0,
                  ptype=None, stars_only=False, dm_only=False,
-                 minimum_mass=None):
+                 minimum_mass=None, alpha=1.0):
         """
         Adds particle positions, based on a thick slab along *axis* with a
         *width* along the line of sight.  *p_size* controls the number of
         pixels per particle, and *col* governs the color.  *ptype* will
         restrict plotted particles to only those that are of a given type.
         *minimum_mass* will require that the particles be of a given mass,
-        calculated via ParticleMassMsun, to be plotted.
+        calculated via ParticleMassMsun, to be plotted. *alpha* determines
+        each particle's opacity.
         """
         PlotCallback.__init__(self)
         self.width = width
@@ -953,6 +954,7 @@
         self.stars_only = stars_only
         self.dm_only = dm_only
         self.minimum_mass = minimum_mass
+        self.alpha = alpha
 
     def __call__(self, plot):
         data = plot.data
@@ -983,7 +985,7 @@
                     [reg[field_x][gg][::self.stride],
                      reg[field_y][gg][::self.stride]])
         plot._axes.scatter(px, py, edgecolors='None', marker=self.marker,
-                           s=self.p_size, c=self.color)
+                           s=self.p_size, c=self.color,alpha=self.alpha)
         plot._axes.set_xlim(xx0,xx1)
         plot._axes.set_ylim(yy0,yy1)
         plot._axes.hold(False)



https://bitbucket.org/yt_analysis/yt/changeset/1503e0ae9825/
changeset:   1503e0ae9825
branch:      yt
user:        Christopher Moody
date:        2012-05-01 20:25:34
summary:     hilbert frameworks works; wrong ordering though
affected #:  10 files

diff -r db3a9b59d37cd5c1988a904c8fa56ac4028599f2 -r 1503e0ae9825df91c12fb9f8b3a5e6c01dcbcea4 yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -477,9 +477,7 @@
         self.max_radius=Rvir
         self.bulk_vel=na.array([VX,VY,VZ])*1e5
         self.rms_vel=-1
-        self.group_total_mass = -1 #not implemented
-        
-        
+        self.group_total_mass = -1 #not implemented 
     
     def maximum_density(self):
         r"""Not implemented."""
@@ -1049,12 +1047,12 @@
         Jc = 1.0
         conv = dict(X=1.0/pf['mpchcm'],
                     Y=1.0/pf['mpchcm'],
-                    Z=1.0/pf['mpchcm'],
-                    VX=1e0,VY=1e0,VZ=1e0,
-                    Mvir=1.0,
+                    Z=1.0/pf['mpchcm'], #to unitary
+                    VX=1e0,VY=1e0,VZ=1e0, #to km/s
+                    Mvir=1.0, #Msun/h
                     Vmax=1e0,Vrms=1e0,
-                    Rvir=1.0/pf['mpchcm'],
-                    Rs=1.0/pf['mpchcm'],
+                    Rvir=1.0/pf['kpchcm'],
+                    Rs=1.0/pf['kpchcm'],
                     JX=Jc,JY=Jc,JZ=Jc)
         dtype = {'names':names,'formats':formats}
         halo_table = na.loadtxt(out_list,skiprows=j-1,dtype=dtype,comments='#')            


diff -r db3a9b59d37cd5c1988a904c8fa56ac4028599f2 -r 1503e0ae9825df91c12fb9f8b3a5e6c01dcbcea4 yt/analysis_modules/halo_finding/rockstar/rockstar.py
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar.py
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar.py
@@ -47,14 +47,28 @@
         return data_source
 
 class RockstarHaloFinder(ParallelAnalysisInterface):
-    def __init__(self, pf, num_readers = 0, num_writers = 0, outbase=None):
+    def __init__(self, pf, num_readers = 1, num_writers = None, 
+            outbase=None,particle_mass=-1.0,overwrite=False,
+            left_edge = None, right_edge = None):
         ParallelAnalysisInterface.__init__(self)
         # No subvolume support
         self.pf = pf
         self.hierarchy = pf.h
+        if num_writers is None:
+            num_writers = self.comm.size - num_readers -1
         self.num_readers = num_readers
         self.num_writers = num_writers
+        self.particle_mass = particle_mass 
+        self.overwrite = overwrite
+        if left_edge is None:
+            left_edge = pf.domain_left_edge
+        if right_edge is None:
+            right_edge = pf.domain_right_edge
+        self.le = left_edge
+        self.re = right_edge
         if self.num_readers + self.num_writers + 1 != self.comm.size:
+            print '%i reader + %i writers != %i mpi'%\
+                    (self.num_reader,self.num_writers,self.comm.size)
             raise RuntimeError
         self.center = (pf.domain_right_edge + pf.domain_left_edge)/2.0
         data_source = None
@@ -86,16 +100,19 @@
         self.port = str(self.port)
 
     def run(self, block_ratio = 1,**kwargs):
+        """
+        
+        """
         if block_ratio != 1:
             raise NotImplementedError
         self._get_hosts()
         #because rockstar *always* write to exactly the same
         #out_0.list filename we make a directory for it
         #to sit inside so it doesn't get accidentally
-        #overwritten
-        
+        #overwritten 
         if self.workgroup.name == "server":
-            os.mkdir(self.outbase)
+            if not os.path.exists(self.outbase):
+                os.mkdir(self.outbase)
         self.handler.setup_rockstar(self.server_address, self.port,
                     parallel = self.comm.size > 1,
                     num_readers = self.num_readers,
@@ -103,6 +120,7 @@
                     writing_port = -1,
                     block_ratio = block_ratio,
                     outbase = self.outbase,
+                    particle_mass = float(self.particle_mass),
                     **kwargs)
         if self.comm.size == 1:
             self.handler.call_rockstar()
@@ -111,17 +129,17 @@
             if self.workgroup.name == "server":
                 self.handler.start_server()
             elif self.workgroup.name == "readers":
-                #time.sleep(0.5 + self.workgroup.comm.rank/10.0)
+                time.sleep(0.1 + self.workgroup.comm.rank/10.0)
                 self.handler.start_client()
             elif self.workgroup.name == "writers":
-                #time.sleep(1.0 + self.workgroup.comm.rank/10.0)
+                time.sleep(0.2 + self.workgroup.comm.rank/10.0)
                 self.handler.start_client()
         self.comm.barrier()
         #quickly rename the out_0.list 
     
-    def halo_list(self):
+    def halo_list(self,file_name='out_0.list'):
         """
         Reads in the out_0.list file and generates RockstarHaloList
         and RockstarHalo objects.
         """
-        return RockstarHaloList(self.pf,self.outbase+'/out_0.list')
+        return RockstarHaloList(self.pf,self.outbase+'/%s'%file_name)


diff -r db3a9b59d37cd5c1988a904c8fa56ac4028599f2 -r 1503e0ae9825df91c12fb9f8b3a5e6c01dcbcea4 yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
@@ -241,7 +241,7 @@
 cdef RockstarInterface rh
 cdef void rh_read_particles(char *filename, particle **p, np.int64_t *num_p):
     cdef int i, fi, npart, tnpart
-    cdef np.float64_t conv[6], left_edge[6]
+    cdef np.float64_t conv[6], left_edge[6], right_edge[3]
     dd = rh.data_source
     cdef np.ndarray[np.int64_t, ndim=1] arri
     cdef np.ndarray[np.float64_t, ndim=1] arr
@@ -257,9 +257,12 @@
     #print "Loading indices: size = ", tnpart
     conv[0] = conv[1] = conv[2] = rh.pf["mpchcm"]
     conv[3] = conv[4] = conv[5] = 1e-5
-    left_edge[0] = rh.pf.domain_left_edge[0]
-    left_edge[1] = rh.pf.domain_left_edge[1]
-    left_edge[2] = rh.pf.domain_left_edge[2]
+    left_edge[0] = rh.le[0]
+    left_edge[1] = rh.le[1]
+    left_edge[2] = rh.le[2]
+    right_edge[0] = rh.re[0]
+    right_edge[1] = rh.re[1]
+    right_edge[2] = rh.re[2]
     left_edge[3] = left_edge[4] = left_edge[5] = 0.0
     pi = 0
     for g in grids:
@@ -274,6 +277,9 @@
                       "particle_velocity_z"]:
             arr = dd._get_data_from_grid(g, field).astype("float64")
             for i in range(npart):
+                if fi<3: 
+                    if  left_edge[i] > arr[i]: continue
+                    if right_edge[i] < arr[i]: continue
                 p[0][i+pi].pos[fi] = (arr[i]-left_edge[fi])*conv[fi]
             fi += 1
         pi += npart
@@ -298,13 +304,13 @@
                        int parallel = False, int num_readers = 1,
                        int num_writers = 1,
                        int writing_port = -1, int block_ratio = 1,
-                       int periodic = 1, 
+                       int periodic = 1, int min_halo_size = 20,
                        char *outbase = 'None'):
         global PARALLEL_IO, PARALLEL_IO_SERVER_ADDRESS, PARALLEL_IO_SERVER_PORT
         global FILENAME, FILE_FORMAT, NUM_SNAPS, STARTING_SNAP, h0, Ol, Om
         global BOX_SIZE, PERIODIC, PARTICLE_MASS, NUM_BLOCKS, NUM_READERS
         global FORK_READERS_FROM_WRITERS, PARALLEL_IO_WRITER_PORT, NUM_WRITERS
-        global rh, SCALE_NOW, OUTBASE
+        global rh, SCALE_NOW, OUTBASE, MIN_HALO_OUTPUT_SIZE
         if parallel:
             PARALLEL_IO = 1
             PARALLEL_IO_SERVER_ADDRESS = server_address


diff -r db3a9b59d37cd5c1988a904c8fa56ac4028599f2 -r 1503e0ae9825df91c12fb9f8b3a5e6c01dcbcea4 yt/analysis_modules/sunrise_export/sunrise_exporter.py
--- a/yt/analysis_modules/sunrise_export/sunrise_exporter.py
+++ b/yt/analysis_modules/sunrise_export/sunrise_exporter.py
@@ -1,6 +1,8 @@
 """
 Code to export from yt to Sunrise
 
+Author: Chris Moody <juxtaposicion at gmail.com>
+Affiliation: UCSC
 Author: Matthew Turk <matthewturk at gmail.com>
 Affiliation: UCSD
 Homepage: http://yt-project.org/
@@ -26,8 +28,7 @@
 
 try:
     import pyfits
-except ImportError:
-    # We silently fail here
+except ImportError: 
     pass
 
 import time
@@ -36,14 +37,11 @@
 from yt.funcs import *
 import yt.utilities.amr_utils as amr_utils
 from yt.data_objects.universal_fields import add_field
+from yt.mods import *
 
-from os import environ
-from yt.utilities.parallel_tools.parallel_analysis_interface import \
-    ParallelAnalysisInterface, ProcessorPool, Communicator
+debug = True
 
-def export_to_sunrise(pf, fn, write_particles = True, subregion_bounds = None,
-    particle_mass=None, particle_pos=None, particle_age=None, particle_metal=None,
-    parallel=False):
+def export_to_sunrise(pf, fn, star_particle_type, dle, dre,**kwargs):
     r"""Convert the contents of a dataset to a FITS file format that Sunrise
     understands.
 
@@ -59,18 +57,13 @@
     pf : `StaticOutput`
         The parameter file to convert.
     fn : string
-        The filename of the FITS file.
-    write_particles : bool or pyfits.ColDefs instance, default is True
-        Whether to write out the star particles or not.  If this variable is an
-        instance of pyfits.ColDefs, then this will be used to create a pyfits
-        table named PARTICLEDATA which will be appended.  If this is true, the
-        routine will attempt to create this table from hand.
-    subregion_bounds : list of tuples
-        This is a list of tuples describing the subregion of the top grid to
-        export.  This will only work when only *one* root grid exists.
-        It is of the format:
-        [ (start_index_x, nx), (start_index_y, ny), (start_index_z, nz) ]
-        where nx, ny, nz are the number of cells to extract.
+        The filename of the output FITS file.
+    dle : The domain left edge to extract
+    dre : The domain rght edge to extract
+        Array format is (nx,ny,nz) where each element is floating point
+        in unitary position units where 0 is leftmost edge and 1
+        the rightmost. 
+        
 
     Notes
     -----
@@ -79,145 +72,248 @@
     http://sunrise.googlecode.com/ for more information.
 
     """
-    # Now particles
-    #  output_file->addTable("PARTICLEDATA" , 0);
-    # addKey("timeunit", time_unit, "Time unit is "+time_unit);
-    # addKey("tempunit", temp_unit, "Temperature unit is "+temp_unit);
-    # 
-    # addColumn(Tint, "ID", 1, "" );
-    # addColumn(Tdouble, "position", 3, length_unit );
-    # addColumn(Tdouble, "stellar_radius", 1, length_unit );
-    # addColumn(Tdouble, "L_bol", 1, L_bol_unit );
-    # addColumn(Tdouble, "mass_stars", 1, mass_unit );
-    # addColumn(Tdouble, "mass_stellar_metals", 1, mass_unit );
-    # addColumn(Tdouble, "age_m", 1, time_unit+"*"+mass_unit );
-    # addColumn(Tdouble, "age_l", 1, time_unit+"*"+mass_unit );
-    # addColumn(Tfloat, "L_lambda", L_lambda.columns(), 
-    #			L_lambda_unit );
-    #	output->addKey("logflux", true, "Column L_lambda values are log (L_lambda)");
+    
+    #we must round the dle,dre to the nearest root grid cells
+    ile,ire,super_level= round_nearest_edge(pf,dle,dre)
+    super_level -= 1 #we're off by one (so we don't need a correction if we span 2 cells)
+    fle,fre = ile*1.0/pf.domain_dimensions, ire*1.0/pf.domain_dimensions
+    mylog.info("rounding specified region:")
+    mylog.info("from [%1.5f %1.5f %1.5f]-[%1.5f %1.5f %1.5f]"%(tuple(dle)+tuple(dre)))
+    mylog.info("to   [%07i %07i %07i]-[%07i %07i %07i]"%(tuple(ile)+tuple(ire)))
+    mylog.info("to   [%1.5f %1.5f %1.5f]-[%1.5f %1.5f %1.5f]"%(tuple(fle)+tuple(fre)))
 
-    col_list = []
-    if subregion_bounds == None:    
-        DLE, DRE = pf.domain_left_edge, pf.domain_right_edge
-        DX = pf.domain_dimensions
-    else:
-        DLE, DX = zip(*subregion_bounds)
-        DLE, DX = na.array(DLE), na.array(DX)
-        DRE = DLE + DX
-    reg = pf.h.region((DRE+DLE)/2.0, DLE, DRE)
 
-    if write_particles is True:
-        pi = reg["particle_type"] == 2
-        pos = na.array([reg["particle_position_%s" % ax][pi]*pf['kpc']
-                            for ax in 'xyz']).transpose()
-        vel = na.array([reg["particle_velocity_%s" % ax][pi]
-                            for ax in 'xyz']).transpose()
-        # Velocity is cm/s, we want it to be kpc/yr
-        vel *= (pf["kpc"]/pf["cm"]) / (365*24*3400.)
-        age = pf["years"] * (pf.current_time - reg["creation_time"][pi])
-        creation_time = reg["creation_time"][pi] * pf["years"]
+    #Create the refinement hilbert octree in GRIDSTRUCTURE
+    #For every leaf (not-refined) cell we have a column n GRIDDATA
+    #Include mass_gas, mass_metals, gas_temp_m, gas_teff_m, cell_volume, SFR
+    #since the octree always starts with one cell, an our 0-level mesh
+    #may have many cells, we must #create the octree region sitting 
+    #ontop of the first mesh by providing a negative level
+    output, refinement = prepare_octree(pf,ile,start_level=-super_level)
 
-        initial_mass = reg["InitialMassCenOstriker"][pi]
-        current_mass = reg["ParticleMassMsun"][pi]
-        col_list.append(pyfits.Column("ID", format="I", array=na.arange(current_mass.size)))
-        col_list.append(pyfits.Column("parent_ID", format="I", array=na.arange(current_mass.size)))
-        col_list.append(pyfits.Column("position", format="3D", array=pos, unit="kpc"))
-        col_list.append(pyfits.Column("velocity", format="3D", array=vel, unit="kpc/yr"))
-        col_list.append(pyfits.Column("creation_mass", format="D", array=initial_mass, unit="Msun"))
-        col_list.append(pyfits.Column("formation_time", format="D", array=creation_time, unit="yr"))
-        col_list.append(pyfits.Column("mass", format="D", array=current_mass, unit="Msun"))
-        col_list.append(pyfits.Column("age_m", format="D", array=age))
-        col_list.append(pyfits.Column("age_l", format="D", array=age))
-        #For particles, Sunrise takes 
-        #the dimensionless metallicity, not the mass of the metals
-        col_list.append(pyfits.Column("metallicity", format="D",
-            array=reg["metallicity_fraction"][pi],unit="Msun")) # wrong?
-        col_list.append(pyfits.Column("L_bol", format="D",
-            array=na.zeros(particle_mass.size)))
+    #Create a list of the star particle properties in PARTICLE_DATA
+    #Include ID, parent-ID, position, velocity, creation_mass, 
+    #formation_time, mass, age_m, age_l, metallicity, L_bol
+    particle_data = prepare_star_particles(pf,star_particle_type,fle=fle,fre=fre,**kwargs)
 
-        cols = pyfits.ColDefs(col_list)
-        pd_table = pyfits.new_table(cols)
-        pd_table.name = "PARTICLEDATA"
-    elif isinstance(write_particles, pyfits.ColDefs):
-        pd_table = pyfits.new_table(write_particles)
-        pd_table.name = "PARTICLEDATA"
-        write_particles = True
+    create_fits_file(pf,fn, refinement,output,particle_data,fre,fle)
 
-    def _MetalMass(field, data):
-        return data["Metal_Density"] * data["CellVolume"]
-        
-    def _convMetalMass(data):
-        return 1.0/1.989e33
-        
-    add_field("MetalMass", function=_MetalMass,
-              convert_function=_convMetalMass)
+def prepare_octree(pf,ile,start_level=0):
+    add_fields() #add the metal mass field that sunrise wants
+    fields = ["CellMassMsun","TemperatureTimesCellMassMsun", 
+              "MetalMass","CellVolumeCode"]
+    
+    #gather the field data from octs
+    pbar = get_pbar("Retrieving field data",len(fields))
+    field_data = [] 
+    dd = pf.h.all_data()
+    for fi,f in enumerate(fields):
+        field_data += dd[f],
+        pbar.update(fi)
+    pbar.finish()
+    del field_data
 
-    output, refined = generate_flat_octree(pf,
-            ["CellMassMsun","TemperatureTimesCellMassMsun", "MetalMass",
-             "CellVolumeCode"], subregion_bounds = subregion_bounds,
-            parallel=parallel)
-    cvcgs = output["CellVolumeCode"].astype('float64') * pf['cm']**3.0
+    #first we cast every cell as an oct
+    #ngrids = na.max([g.id for g in pf._grids])
+    grids = {}
+    levels_all = {} 
+    levels_finest = {}
+    for l in range(100): 
+        levels_finest[l]=0
+        levels_all[l]=0
+    pbar = get_pbar("Initializing octs ",len(pf.h.grids))
+    for gi,g in enumerate(pf.h.grids):
+        ff = na.array([g[f] for f in fields])
+        og = amr_utils.OctreeGrid(
+                g.child_index_mask.astype('int32'),
+                ff.astype("float64"),
+                g.LeftEdge.astype("float64"),
+                g.ActiveDimensions.astype("int32"),
+                na.ones(1,dtype="float64")*g.dds[0],
+                g.Level,
+                g.id)
+        grids[g.id] = og
+        #how many refinement cells will we have?
+        #measure the 'volume' of each mesh, but many
+        #cells do not exist. an overstimate
+        levels_all[g.Level] += g.ActiveDimensions.prod()
+        #how many leaves do we have?
+        #this overestimates. a child of -1 means no child,
+        #but that cell may still be expanded on a submesh because
+        #(at least in ART) the meshes are inefficient.
+        g.clear_data()
+        pbar.update(gi)
+    pbar.finish()
+    
+    #create the octree grid list
+    oct_list =  amr_utils.OctreeGridList(grids)
+    
+    #initialize arrays to be passed to the recursion algo
+    o_length = na.sum(levels_all.values())
+    r_length = na.sum(levels_all.values())
+    output   = na.zeros((o_length,len(fields)), dtype='float64')
+    refined  = na.zeros(r_length, dtype='int32')
+    levels   = na.zeros(r_length, dtype='int32')
+    pos = position()
+    hs       = hilbert_state()
+    refined[0] = 1 #introduce the first cell as divided
+    levels[0]  = start_level-1 #introduce the first cell as divided
+    pos.refined_pos += 1
+    RecurseOctreeDepthFirstHilbert(
+            ile[0],ile[1],ile[2],
+            pos,0, hs, 
+            output,refined,levels,
+            grids,
+            start_level,
+            #physical_center = (ile)*1.0/pf.domain_dimensions*pf['kpc'],
+            physical_center = ile,
+            #physical_width  = pf['kpc'])
+            physical_width  = pf.domain_dimensions)
+    #by time we get it here the 'current' position is actually 
+    #for the next spot, so we're off by 1
+    print 'refinement tree # of cells %i, # of leaves %i'%(pos.refined_pos,pos.output_pos) 
+    output  = output[:pos.output_pos]
+    refined = refined[:pos.refined_pos] 
+    levels = levels[:pos.refined_pos] 
+    return output,refined
 
-    # First the structure
+def RecurseOctreeDepthFirstHilbert(xi,yi,zi,
+                            curpos, gi, 
+                            hs,
+                            output,
+                            refined,
+                            levels,
+                            grids,
+                            level,
+                            physical_center=None,
+                            physical_width=None):
+    grid = grids[gi]
+    m = 2**(-level-1) if level < 0 else 1
+    ple = grid.left_edges+na.array([xi,yi,zi])*grid.dx #parent LE
+    pre = ple+grid.dx*m
+    print level,ple*physical_width-physical_center,
+    print pre*physical_width-physical_center, hs.parent_octant
+
+
+    #here we go over the 8 octants
+    #in general however, a mesh cell on this level
+    #may have more than 8 children on the next level
+    #so we find the int float center (cxyz) of each child cell
+    # and from that find the child cell indices
+    for iv, vertex in enumerate(hs):
+        #print ' '*(level+3), level,iv, vertex,curpos.refined_pos,curpos.output_pos,
+        #negative level indicates that we need to build a super-octree
+        if level < 0: 
+            #print ' '
+            #we are not on the root grid yet, but this is 
+            #how many equivalent root grid cells we would have
+            #level -1 means our oct grid's children are the same size
+            #as the root grid (hence the -level-1)
+            dx = 2**(-level-1) #this is the child width 
+            i,j,k = xi+vertex[0]*dx,yi+vertex[1]*dx,zi+vertex[2]*dx
+            #print level,iv,vertex,
+            #print na.array([cx,cy,cz])/128.0*physical_width-physical_center,
+            #print na.array([cx+dx,cy+dx,cz+dx])/128.0*physical_width-physical_center
+            #we always refine the negative levels
+            hs_child = hilbert_state(vertex.copy())
+            refined[curpos.refined_pos] = 1
+            levels[curpos.refined_pos] = level
+            curpos.refined_pos += 1
+            RecurseOctreeDepthFirstHilbert(i, j, k,
+                                curpos, 0, hs_child, output, refined, levels, grids,
+                                level+1,
+                                physical_center=physical_center,
+                                physical_width=physical_width,)
+        else:
+            i,j,k = xi+vertex[0],yi+vertex[1],zi+vertex[2]
+            ci = grid.child_indices[i,j,k] #is this oct subdivided?
+            if ci == -1:
+                for fi in range(grid.fields.shape[0]):
+                    output[curpos.output_pos,fi] = grid.fields[fi,i,j,k]
+                refined[curpos.refined_pos] = 0
+                levels[curpos.refined_pos] = level
+                curpos.output_pos += 1 #position updated after write
+                curpos.refined_pos += 1
+                print level+1, #these child cells are a level deeper
+                print (grid.left_edges+na.array([i,j,k])*grid.dx)*physical_width-physical_center, #parent LE 
+                print (grid.left_edges+na.array([i+1,j+1,k+1])*grid.dx)*physical_width-physical_center, #parent LE 
+                print iv,vertex
+            else:
+                cx = (grid.left_edges[0] + i*grid.dx[0]) #floating le of the child
+                cy = (grid.left_edges[1] + j*grid.dx[0])
+                cz = (grid.left_edges[2] + k*grid.dx[0])
+                #print level,iv,vertex,
+                #print na.array([cx,cy,cz])*physical_width -physical_center,
+                #print na.array([cx+grid.dx[0],cy+grid.dx[0],cz+grid.dx[0]])*physical_width - physical_center
+                hs_child = hilbert_state(vertex.copy())
+                refined[curpos.refined_pos] = 1
+                levels[curpos.refined_pos] = level
+                curpos.refined_pos += 1 #position updated after write
+                child_grid = grids[ci]
+                child_dx = child_grid.dx[0]
+                child_leftedges = child_grid.left_edges
+                child_i = int((cx - child_leftedges[0])/child_dx)
+                child_j = int((cy - child_leftedges[1])/child_dx)
+                child_k = int((cz - child_leftedges[2])/child_dx)
+                RecurseOctreeDepthFirstHilbert(child_i, child_j, child_k,
+                                    curpos, ci, hs_child, output, refined, levels, grids,
+                                    level+1,
+                                    physical_center=physical_center,
+                                    physical_width=physical_width)
+
+def create_fits_file(pf,fn, refined,output,particle_data,fre,fle):
+
+    #first create the grid structure
     structure = pyfits.Column("structure", format="B", array=refined.astype("bool"))
     cols = pyfits.ColDefs([structure])
     st_table = pyfits.new_table(cols)
     st_table.name = "GRIDSTRUCTURE"
+    st_table.header.update("hierarch lengthunit", "kpc", comment="Length unit for grid")
+    fdx = fre-fle
+    print 'WARNING: debug limits set on minxyz maxxyz'
+    for i,a in enumerate('xyz'):
+        #st_table.header.update("min%s" % a, fle[i] * pf['kpc'])
+        #st_table.header.update("max%s" % a, fre[i] * pf['kpc'])
+        st_table.header.update("min%s" % a, 0) #WARNING: this is for debugging
+        st_table.header.update("max%s" % a, 2) #
+        st_table.header.update("n%s" % a, fdx[i])
+        st_table.header.update("subdiv%s" % a, 2)
+    st_table.header.update("subdivtp", "OCTREE", "Type of grid subdivision")
 
-    # Now we update our table with units
-    # ("lengthunit", length_unit, "Length unit for grid");
-    # ("minx", getmin () [0], length_unit_comment);
-    # ("miny", getmin () [1], length_unit_comment);
-    # ("minz", getmin () [2], length_unit_comment);
-    # ("maxx", getmax () [0], length_unit_comment);
-    # ("maxy", getmax () [1], length_unit_comment);
-    # ("maxz", getmax () [2], length_unit_comment);
-    # ("nx", g_.getn () [0], "");
-    # ("ny", g_.getn () [1], "");
-    # ("nz", g_.getn () [2], "");
-    # ("subdivtp", subdivtp, "Type of grid subdivision");
-    # ("subdivx", sub_div[0], "");
-    # ("subdivy", sub_div[1], "");
-    # ("subdivz", sub_div[2], "");
-
-    st_table.header.update("hierarch lengthunit", "kpc", comment="Length unit for grid")
-    for i,a in enumerate('xyz'):
-        st_table.header.update("min%s" % a, DLE[i] * pf['kpc']/pf.domain_dimensions[i])
-        st_table.header.update("max%s" % a, DRE[i] * pf['kpc']/pf.domain_dimensions[i])
-        st_table.header.update("n%s" % a, DX[i])
-        st_table.header.update("subdiv%s" % a, 2)
-    st_table.header.update("subdivtp", "UNIFORM", "Type of grid subdivision")
-
-    # Now grid data itself
-    # ("M_g_tot", total_quantities.m_g(), "[" + mass_unit +
-    #         "] Total gas mass in all cells");
-    # ("SFR_tot", total_quantities.SFR, "[" + SFR_unit +
-    #         "] Total star formation rate of all cells");
-    # ("timeunit", time_unit, "Time unit is "+time_unit);
-    # ("tempunit", temp_unit, "Temperature unit is "+time_unit);
-
-    # (Tdouble, "mass_gas", 1, mass_unit );
-    # (Tdouble, "SFR", 1, SFR_unit );
-    # (Tdouble, "mass_metals", 1, mass_unit );
-    # (Tdouble, "gas_temp_m", 1, temp_unit+"*"+mass_unit );
-    # (Tdouble, "gas_teff_m", 1, temp_unit+"*"+mass_unit );
-    # (Tdouble, "cell_volume", 1, length_unit + "^3" );
-
+    #not the hydro grid data
+    fields = ["CellMassMsun","TemperatureTimesCellMassMsun", 
+              "MetalMass","CellVolumeCode"]
+    fd = {}
+    for i,f in enumerate(fields): 
+        fd[f]=output[:,i]
+    del output
     col_list = []
-    size = output["CellMassMsun"].size
-    tm = output["CellMassMsun"].sum()
+    size = fd["CellMassMsun"].size
+    tm = fd["CellMassMsun"].sum()
     col_list.append(pyfits.Column("mass_gas", format='D',
-                    array=output.pop('CellMassMsun'), unit="Msun"))
+                    array=fd['CellMassMsun'], unit="Msun"))
     col_list.append(pyfits.Column("mass_metals", format='D',
-                    array=output.pop('MetalMass'), unit="Msun"))
+                    array=fd['MetalMass'], unit="Msun"))
+    # col_list.append(pyfits.Column("mass_stars", format='D',
+    #                 array=na.zeros(size,dtype='D'),unit="Msun"))
+    # col_list.append(pyfits.Column("mass_stellar_metals", format='D',
+    #                 array=na.zeros(size,dtype='D'),unit="Msun"))
+    # col_list.append(pyfits.Column("age_m", format='D',
+    #                 array=na.zeros(size,dtype='D'),unit="yr*Msun"))
+    # col_list.append(pyfits.Column("age_l", format='D',
+    #                 array=na.zeros(size,dtype='D'),unit="yr*Msun"))
+    # col_list.append(pyfits.Column("L_bol", format='D',
+    #                 array=na.zeros(size,dtype='D')))
+    # col_list.append(pyfits.Column("L_lambda", format='D',
+    #                 array=na.zeros(size,dtype='D')))
     # The units for gas_temp are really K*Msun. For older Sunrise versions
     # you must set the unit to just K  
     col_list.append(pyfits.Column("gas_temp_m", format='D',
-                    array=output['TemperatureTimesCellMassMsun'], unit="K*Msun"))
+                    array=fd['TemperatureTimesCellMassMsun'], unit="K*Msun"))
     col_list.append(pyfits.Column("gas_teff_m", format='D',
-                    array=output.pop('TemperatureTimesCellMassMsun'), unit="K*Msun"))
+                    array=fd['TemperatureTimesCellMassMsun'], unit="K*Msun"))
     col_list.append(pyfits.Column("cell_volume", format='D',
-                    array=output.pop('CellVolumeCode').astype('float64')*pf['kpc']**3.0,
+                    array=fd['CellVolumeCode'].astype('float64')*pf['kpc']**3.0,
                     unit="kpc^3"))
     col_list.append(pyfits.Column("SFR", format='D',
                     array=na.zeros(size, dtype='D')))
@@ -235,150 +331,242 @@
     md_table.header.update("snaptime", pf.current_time*pf['years'])
     md_table.name = "YT"
 
-    hls = [pyfits.PrimaryHDU(), st_table, mg_table,md_table]
-    if write_particles: hls.append(pd_table)
+    phdu = pyfits.PrimaryHDU()
+    phdu.header.update('nbodycod','yt')
+    hls = [phdu, st_table, mg_table,md_table]
+    hls.append(particle_data)
     hdus = pyfits.HDUList(hls)
     hdus.writeto(fn, clobber=True)
 
-def initialize_octree_list_task(g,fields, grids = [], 
-        levels_finest = defaultdict(lambda: 0), 
-        levels_all = defaultdict(lambda: 0)):
-    ff = na.array([g[f] for f in fields])
-    grids.append(amr_utils.OctreeGrid(
-                    g.child_index_mask.astype('int32'),
-                    ff.astype("float64"),
-                    g.LeftEdge.astype('float64'),
-                    g.ActiveDimensions.astype('int32'),
-                    na.ones(1,dtype='float64') * g.dds[0], g.Level,
-                    g._id_offset))
-    levels_all[g.Level] += g.ActiveDimensions.prod()
-    levels_finest[g.Level] += g.child_mask.ravel().sum()
-    g.clear_data()
-    return grids,levels_finest,levels_all
+def nearest_power(x):
+    #round to the nearest power of 2
+    x-=1
+    x |= x >> 1
+    x |= x >> 2 
+    x |= x >> 4
+    x |= x >> 8
+    x |= x >> 16
+    x+=1 
+    return x
 
-def initialize_octree_list(pf, fields,parallel=False):
-    #import pdb; pdb.set_trace()
-    i=0
-    o_length = r_length = 0
-    grids = []
-    pbar = get_pbar("Initializing octs ",len(pf.h.grids))
+def round_nearest_edge(pf,dle,dre):
+    dds = pf.domain_dimensions
+    ile = na.floor(dle*dds).astype('int')
+    ire = na.ceil(dre*dds).astype('int') 
     
-    grids = []
-    levels_finest, levels_all = defaultdict(lambda: 0), defaultdict(lambda: 0)
- 
-    import pdb; pdb.set_trace()
-    if not parallel:
-        for g in pf.h.grids:
-            i+=1
-            tgrids,tlevels_finest,tlevels_all = \
-                initialize_octree_list_task(g,fields,grids=grids,
-                        levels_finest=levels_finest,
-                        levels_all=levels_all)
-            pbar.update(i)
-    else:
-        import multiprocessing
-        nbr_chunks = multiprocessing.cpu_count()
-        chunk_size = len(pf.h.grids) / nbr_chunks
-        if chunk_size % nbr_chunks != 0:
-            # make sure we get the last few items of data when we have
-            # an odd size to chunks (e.g. len(q) == 100 and nbr_chunks == 3
-            nbr_chunks += 1
-        chunks = [(pf.h.grids[x*chunk_size:(x+1)*chunk_size],fields) \
-            for x in xrange(nbr_chunks)]
+    #this is the number of cells the super octree needs to expand to
+    #must round to the nearest power of 2
+    width = na.max(ire-ile)
+    width = nearest_power(width)
+    
+    maxlevel = na.rint(na.log2(width)).astype('int')
+    return ile,ire,maxlevel
 
-        p = multiprocessing.Pool()
-        # send out the work chunks to the Pool
-        # po is a multiprocessing.pool.MapResult
-        po = p.map_async(initialize_octree_list_task,chunks)
-        # we get a list of lists back, one per chunk, so we have to
-        # flatten them back together
-        # po.get() will block until results are ready and then
-        # return a list of lists of results
-        results = po.get()
+def prepare_star_particles(pf,star_type,pos=None,vel=None, age=None,
+                          creation_time=None,initial_mass=None,
+                          current_mass=None,metallicity=None,
+                          radius = None,
+                          fle=[0.,0.,0.],fre=[1.,1.,1.]):
+    dd = pf.h.all_data()
+    idx = dd["particle_type"] == star_type
+    if pos is None:
+        pos = na.array([dd["particle_position_%s" % ax]
+                        for ax in 'xyz']).transpose()
+    idx = idx & na.all(pos>fle,axis=1) & na.all(pos<fre,axis=1)
+    pos = pos[idx]*pf['kpc'] #unitary units -> kpc
+    if age is None:
+        age = dd["particle_age"][idx]*pf['years'] # seconds->years
+    if vel is None:
+        vel = na.array([dd["particle_velocity_%s" % ax][idx]
+                        for ax in 'xyz']).transpose()
+        # Velocity is cm/s, we want it to be kpc/yr
+        #vel *= (pf["kpc"]/pf["cm"]) / (365*24*3600.)
+        vel *= 1.02268944e-14 
+    if initial_mass is None:
+        #in solar masses
+        initial_mass = dd["particle_mass_initial"][idx]*pf['Msun']
+    if current_mass is None:
+        #in solar masses
+        current_mass = dd["particle_mass"][idx]*pf['Msun']
+    if metallicity is None:
+        #this should be in dimensionless units, metals mass / particle mass
+        metallicity = dd["particle_metallicity"][idx]
+    if radius is None:
+        radius = initial_mass*0.0+10.0/1000.0 #10pc radius
 
-        for tgrids,tlevels_finest,tlevels_all in results:
-            grids += tgrids
-            for k,v in tlevels_finest.iteritems():
-                levels_finest[k] += v
-            for k,v in  tlevels_all.iteritems():
-                levels_all[k] += v
+    formation_time = pf.current_time-age
+    #create every column
+    col_list = []
+    col_list.append(pyfits.Column("ID", format="I", array=na.arange(current_mass.size)))
+    col_list.append(pyfits.Column("parent_ID", format="I", array=na.arange(current_mass.size)))
+    col_list.append(pyfits.Column("position", format="3D", array=pos, unit="kpc"))
+    col_list.append(pyfits.Column("velocity", format="3D", array=vel, unit="kpc/yr"))
+    col_list.append(pyfits.Column("creation_mass", format="D", array=initial_mass, unit="Msun"))
+    col_list.append(pyfits.Column("formation_time", format="D", array=formation_time, unit="yr"))
+    col_list.append(pyfits.Column("radius", format="D", array=radius, unit="kpc"))
+    col_list.append(pyfits.Column("mass", format="D", array=current_mass, unit="Msun"))
+    col_list.append(pyfits.Column("age_m", format="D", array=age))
+    col_list.append(pyfits.Column("age_l", format="D", array=age))
+    #For particles, Sunrise takes 
+    #the dimensionless metallicity, not the mass of the metals
+    col_list.append(pyfits.Column("metallicity", format="D",
+        array=metallicity,unit="Msun")) 
+    col_list.append(pyfits.Column("L_bol", format="D",
+        array=na.zeros(current_mass.size)))
+    
+    #make the table
+    cols = pyfits.ColDefs(col_list)
+    pd_table = pyfits.new_table(cols)
+    pd_table.name = "PARTICLEDATA"
+    return pd_table
 
 
-    pbar.finish()
-    ogl = amr_utils.OctreeGridList(grids)
-    return ogl, levels_finest, levels_all
+def add_fields():
+    """Add three Eulerian fields Sunrise uses"""
+    def _MetalMass(field, data):
+        return data["Metal_Density"] * data["CellVolume"]
+        
+    def _convMetalMass(data):
+        return 1.0/1.989e33
+    
+    add_field("MetalMass", function=_MetalMass,
+              convert_function=_convMetalMass)
 
-def generate_flat_octree(pf, fields, subregion_bounds = None,parallel=False):
-    """
-    Generates two arrays, one of the actual values in a depth-first flat
-    octree array, and the other of the values describing the refinement.
-    This allows for export to a code that understands this.  *field* is the
-    field used in the data array.
-    """
-    fields = ensure_list(fields)
-    ogl, levels_finest, levels_all = initialize_octree_list(pf, fields,parallel=parallel)
-    o_length = na.sum(levels_finest.values())
-    r_length = na.sum(levels_all.values())
-    output = na.zeros((o_length,len(fields)), dtype='float64')
-    refined = na.zeros(r_length, dtype='int32')
-    position = amr_utils.position()
-    if subregion_bounds is None:
-        sx, sy, sz = 0, 0, 0
-        nx, ny, nz = ogl[0].dimensions
-    else:
-        ss, ns = zip(*subregion_bounds)
-        sx, sy, sz = ss
-        nx, ny, nz = ns
-    print "Running from %s for %s cells" % (
-            (sx,sy,sz), (nx,ny,nz))
-    t1 = time.time()
-    amr_utils.RecurseOctreeDepthFirst(
-               sx, sy, sz, nx, ny, nz,
-               position, 0,
-               output, refined, ogl)
-    t2 = time.time()
-    print "Finished.  Took %0.3e seconds." % (t2-t1)
-    dd = {}
-    for i, field in enumerate(fields):
-        dd[field] = output[:position.output_pos,i]
-    return dd, refined[:position.refined_pos]
+    def _initial_mass_cen_ostriker(field, data):
+        # SFR in a cell. This assumes stars were created by the Cen & Ostriker algorithm
+        # Check Grid_AddToDiskProfile.C and star_maker7.src
+        star_mass_ejection_fraction = data.pf.get_parameter("StarMassEjectionFraction",float)
+        star_maker_minimum_dynamical_time = 3e6 # years, which will get divided out
+        dtForSFR = star_maker_minimum_dynamical_time / data.pf["years"]
+        xv1 = ((data.pf["InitialTime"] - data["creation_time"])
+                / data["dynamical_time"])
+        xv2 = ((data.pf["InitialTime"] + dtForSFR - data["creation_time"])
+                / data["dynamical_time"])
+        denom = (1.0 - star_mass_ejection_fraction * (1.0 - (1.0 + xv1)*na.exp(-xv1)))
+        minitial = data["ParticleMassMsun"] / denom
+        return minitial
 
-def generate_levels_octree(pf, fields):
-    fields = ensure_list(fields) + ["Ones", "Ones"]
-    ogl, levels_finest, levels_all = initialize_octree_list(pf, fields)
-    o_length = na.sum(levels_finest.values())
-    r_length = na.sum(levels_all.values())
-    output = na.zeros((r_length,len(fields)), dtype='float64')
-    genealogy = na.zeros((r_length, 3), dtype='int64') - 1 # init to -1
-    corners = na.zeros((r_length, 3), dtype='float64')
-    position = na.add.accumulate(
-                na.array([0] + [levels_all[v] for v in
-                    sorted(levels_all)[:-1]], dtype='int64'), dtype="int64")
-    pp = position.copy()
-    amr_utils.RecurseOctreeByLevels(0, 0, 0,
-               ogl[0].dimensions[0],
-               ogl[0].dimensions[1],
-               ogl[0].dimensions[2],
-               position.astype('int64'), 1,
-               output, genealogy, corners, ogl)
-    return output, genealogy, levels_all, levels_finest, pp, corners
+    add_field("InitialMassCenOstriker", function=_initial_mass_cen_ostriker)
 
-def _initial_mass_cen_ostriker(field, data):
-    # SFR in a cell. This assumes stars were created by the Cen & Ostriker algorithm
-    # Check Grid_AddToDiskProfile.C and star_maker7.src
-    star_mass_ejection_fraction = data.pf.get_parameter("StarMassEjectionFraction",float)
-    star_maker_minimum_dynamical_time = 3e6 # years, which will get divided out
-    dtForSFR = star_maker_minimum_dynamical_time / data.pf["years"]
-    xv1 = ((data.pf["InitialTime"] - data["creation_time"])
-            / data["dynamical_time"])
-    xv2 = ((data.pf["InitialTime"] + dtForSFR - data["creation_time"])
-            / data["dynamical_time"])
-    denom = (1.0 - star_mass_ejection_fraction * (1.0 - (1.0 + xv1)*na.exp(-xv1)))
-    minitial = data["ParticleMassMsun"] / denom
-    return minitial
-add_field("InitialMassCenOstriker", function=_initial_mass_cen_ostriker)
+    def _temp_times_mass(field, data):
+        return data["Temperature"]*data["CellMassMsun"]
+    add_field("TemperatureTimesCellMassMsun", function=_temp_times_mass)
 
-def _temp_times_mass(field, data):
-    return data["Temperature"]*data["CellMassMsun"]
-add_field("TemperatureTimesCellMassMsun", function=_temp_times_mass)
+class position:
+    def __init__(self):
+        self.output_pos = 0
+        self.refined_pos = 0
 
+
+def vertex_to_octant(v):
+    if v[0]==0 and v[1]==0 and v[2]==0: return 1
+    if v[0]==0 and v[1]==1 and v[2]==0: return 2
+    if v[0]==1 and v[1]==1 and v[2]==0: return 3
+    if v[0]==1 and v[1]==0 and v[2]==0: return 4
+    if v[0]==1 and v[1]==0 and v[2]==1: return 5
+    if v[0]==1 and v[1]==1 and v[2]==1: return 6
+    if v[0]==0 and v[1]==1 and v[2]==1: return 7
+    if v[0]==0 and v[1]==0 and v[2]==1: return 8
+    raise IOError
+
+
+class hilbert_state:
+    vertex = na.zeros(3,dtype='int64')
+    nvertex = na.zeros(3,dtype='int64')
+    signa,signb,signc = 0,0,0
+    dima,dimb,dimc= -1,-1,-2
+    parent_octant = -1
+
+    def __init__(self, vertex=None):
+        if vertex is None:
+            vertex = na.array([1,0,0],dtype='int64')
+        self.vertex= vertex
+        self.parent_octant = vertex_to_octant(self.vertex)
+        self.swap_by_octant(self.parent_octant)
+        self.signc = self.signa*self.signb
+        self.dimc = 3-self.dima-self.dimb
+
+    def swap_by_octant(self, octant): 
+        if octant==1: 
+            self.dima = 2
+            self.dimb = 0
+            self.signa = 1
+            self.signb = 1
+        elif octant==2: 
+            self.dima = 1
+            self.dimb = 2
+            self.signa = 1
+            self.signb = 1
+        elif octant==3:
+            self.dima = 1
+            self.dimb = 0
+            self.signa = 1
+            self.signb = 1
+        elif octant==4: 
+            self.dima = 2
+            self.dimb = 1
+            self.signa = -1
+            self.signb = -1
+        elif octant==5: 
+            self.dima = 2
+            self.dimb = 1
+            self.signa = 1
+            self.signb = -1
+        elif octant==6: 
+            self.dima = 1
+            self.dimb = 0
+            self.signa = 1
+            self.signb = 1
+        elif octant==7: 
+            self.dima = 1
+            self.dimb = 2
+            self.signa = 1
+            self.signb = -1
+        elif octant==8: 
+            self.dima = 2
+            self.dimb = 0
+            self.signa = -1
+            self.signb = 1
+        assert octant < 9
+
+
+    def __iter__(self):
+        return self.next()
+
+    def next(self):
+        #yield the next cell in this oct
+        
+        #as/descend the first dimension
+        # the second dim
+        #reverse the first
+        #climb the third
+        self.vertex[self.dima] = 0 if self.signa>0 else 1
+        self.vertex[self.dimb] = 0 if self.signb>0 else 1
+        self.vertex[self.dimc] = 0 if self.signc>0 else 1
+        yield self.vertex.copy()
+        self.vertex[self.dima] = self.vertex[self.dima] + self.signa; 
+        yield self.vertex.copy()
+        self.vertex[self.dimb] = self.vertex[self.dimb] + self.signb; 
+        yield self.vertex.copy()
+        self.vertex[self.dima] = self.vertex[self.dima] - self.signa; 
+        yield self.vertex.copy()
+        self.vertex[self.dimc] = self.vertex[self.dimc] + self.signc; 
+        yield self.vertex.copy()
+        self.vertex[self.dima] = self.vertex[self.dima] + self.signa; 
+        yield self.vertex.copy()
+        self.vertex[self.dimb] = self.vertex[self.dimb] - self.signb; 
+        yield self.vertex.copy()
+        self.vertex[self.dima] = self.vertex[self.dima] - self.signa; 
+        yield self.vertex.copy()
+
+    def next_hilbert(self):
+        nvertex = self.next()
+        return nvertex, hilbert_state(nvertex)
+
+
+
+
+if not debug:
+    from amr_utils import hilbert_state
+    from amr_utils import RecurseOctreeDepthFirstHilbert
+    from amr_utils import position


diff -r db3a9b59d37cd5c1988a904c8fa56ac4028599f2 -r 1503e0ae9825df91c12fb9f8b3a5e6c01dcbcea4 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -138,7 +138,7 @@
     def __init__(self, pf, data_style='art'):
         self.data_style = data_style
         self.parameter_file = weakref.proxy(pf)
-        # for now, the hierarchy file is the parameter file!
+        #for now, the hierarchy file is the parameter file!
         self.hierarchy_filename = self.parameter_file.parameter_filename
         self.directory = os.path.dirname(self.hierarchy_filename)
         self.float_type = na.float64
@@ -356,6 +356,7 @@
         
 
         if self.pf.file_particle_data:
+            #import pdb; pdb.set_trace()
             lspecies = self.pf.parameters['lspecies']
             wspecies = self.pf.parameters['wspecies']
             Nrow     = self.pf.parameters['Nrow']
@@ -370,111 +371,146 @@
             self.pf.particle_position,self.pf.particle_velocity = \
                 read_particles(self.pf.file_particle_data,nstars,Nrow)
             pbar.update(1)
-            np = lspecies[-1]
-            if self.pf.dm_only:
-                np = lspecies[0]
-            self.pf.particle_position   = self.pf.particle_position[:np]
-            self.pf.particle_position  -= 1.0 #fortran indices start with 0
+            npa,npb=0,0
+            npb = lspecies[-1]
+            clspecies = na.concatenate(([0,],lspecies))
+            if self.pf.only_particle_type is not None:
+                npb = lspecies[0]
+                if type(self.pf.only_particle_type)==type(5):
+                    npa = clspecies[self.pf.only_particle_type]
+                    npb = clspecies[self.pf.only_particle_type+1]
+            np = npb-npa
+            self.pf.particle_position   = self.pf.particle_position[npa:npb]
+            #self.pf.particle_position  -= 1.0 #fortran indices start with 0
             pbar.update(2)
             self.pf.particle_position  /= self.pf.domain_dimensions #to unitary units (comoving)
             pbar.update(3)
-            self.pf.particle_velocity   = self.pf.particle_velocity[:np]
+            self.pf.particle_velocity   = self.pf.particle_velocity[npa:npb]
             self.pf.particle_velocity  *= uv #to proper cm/s
             pbar.update(4)
-            self.pf.particle_type       = na.zeros(np,dtype='uint8')
-            self.pf.particle_mass       = na.zeros(np,dtype='float64')
+            self.pf.particle_type         = na.zeros(np,dtype='uint8')
+            self.pf.particle_mass         = na.zeros(np,dtype='float64')
+            self.pf.particle_mass_initial = na.zeros(np,dtype='float64')-1
+            self.pf.particle_creation_time= na.zeros(np,dtype='float64')-1
+            self.pf.particle_metallicity1 = na.zeros(np,dtype='float64')-1
+            self.pf.particle_metallicity2 = na.zeros(np,dtype='float64')-1
+            self.pf.particle_age          = na.zeros(np,dtype='float64')-1
             
             dist = self.pf['cm']/self.pf.domain_dimensions[0]
             self.pf.conversion_factors['particle_mass'] = 1.0 #solar mass in g
+            self.pf.conversion_factors['particle_mass_initial'] = 1.0 #solar mass in g
             self.pf.conversion_factors['particle_species'] = 1.0
             for ax in 'xyz':
                 self.pf.conversion_factors['particle_velocity_%s'%ax] = 1.0
                 #already in unitary units
                 self.pf.conversion_factors['particle_position_%s'%ax] = 1.0 
             self.pf.conversion_factors['particle_creation_time'] =  31556926.0
-            self.pf.conversion_factors['particle_metallicity_fraction']=1.0
+            self.pf.conversion_factors['particle_metallicity']=1.0
+            self.pf.conversion_factors['particle_metallicity1']=1.0
+            self.pf.conversion_factors['particle_metallicity2']=1.0
             self.pf.conversion_factors['particle_index']=1.0
+            self.pf.conversion_factors['particle_type']=1
+            self.pf.conversion_factors['particle_age']=1
+            self.pf.conversion_factors['Msun'] = 5.027e-34 #conversion to solar mass units
             
-            #import pdb; pdb.set_trace()
 
             a,b=0,0
             for i,(b,m) in enumerate(zip(lspecies,wspecies)):
-                self.pf.particle_type[a:b] = i #particle type
-                self.pf.particle_mass[a:b]    = m*um #mass in solar masses
+                if type(self.pf.only_particle_type)==type(5):
+                    if not i==self.pf.only_particle_type:
+                        continue
+                    self.pf.particle_type += i
+                    self.pf.particle_mass += m*um
+
+                else:
+                    self.pf.particle_type[a:b] = i #particle type
+                    self.pf.particle_mass[a:b] = m*um #mass in solar masses
                 a=b
             pbar.finish()
+
+            nparticles = [0,]+list(lspecies)
+            for j,np in enumerate(nparticles):
+                mylog.debug('found %i of particle type %i'%(j,np))
+            
+            if self.pf.single_particle_mass:
+                #cast all particle masses to the same mass
+                cast_type = self.pf.single_particle_type
+                
+
             
             self.pf.particle_star_index = i
             
-            if self.pf.file_star_data and (not self.pf.dm_only):
+            do_stars = (self.pf.only_particle_type is None) or \
+                       (self.pf.only_particle_type == -1) or \
+                       (self.pf.only_particle_type == len(lspecies))
+            if self.pf.file_star_data and do_stars: 
                 nstars, mass, imass, tbirth, metallicity1, metallicity2 \
                      = read_stars(self.pf.file_star_data,nstars,Nrow)
                 nstars = nstars[0] 
                 if nstars > 0 :
                     n=min(1e2,len(tbirth))
                     pbar = get_pbar("Stellar Ages        ",n)
-                    self.pf.particle_star_ages  = \
+                    sages  = \
                         b2t(tbirth,n=n,logger=lambda x: pbar.update(x)).astype('float64')
-                    self.pf.particle_star_ages *= 1.0e9
-                    self.pf.particle_star_ages *= 365*24*3600 #to seconds
-                    self.pf.particle_star_ages = self.pf.current_time-self.pf.particle_star_ages
+                    sages *= 1.0e9
+                    sages *= 365*24*3600 #to seconds
+                    sages = self.pf.current_time-sages
+                    self.pf.particle_age[-nstars:] = sages
                     pbar.finish()
-                    self.pf.particle_star_metallicity1 = metallicity1
-                    self.pf.particle_star_metallicity2 = metallicity2
-                    self.pf.particle_star_mass_initial = imass*um
+                    self.pf.particle_metallicity1[-nstars:] = metallicity1
+                    self.pf.particle_metallicity2[-nstars:] = metallicity2
+                    self.pf.particle_mass_initial[-nstars:] = imass*um
                     self.pf.particle_mass[-nstars:] = mass*um
 
-            left = self.pf.particle_position.shape[0]
+            done = 0
             init = self.pf.particle_position.shape[0]
-            pbar = get_pbar("Gridding Particles ",init)
-            pos = self.pf.particle_position.copy()
-            pid = na.arange(pos.shape[0]).astype('int64')
+            pos = self.pf.particle_position
             #particle indices travel with the particle positions
             #pos = na.vstack((na.arange(pos.shape[0]),pos.T)).T 
-            max_level = min(self.pf.max_level,self.pf.limit_level)
+            #if type(self.pf.grid_particles) == type(5):
+            #    max_level = min(max_level,self.pf.grid_particles)
             grid_particle_count = na.zeros((len(grids),1),dtype='int64')
             
             #grid particles at the finest level, removing them once gridded
-            if self.pf.grid_particles:
-                for level in range(max_level,self.pf.min_level-1,-1):
-                    lidx = self.grid_levels[:,0] == level
-                    for gi,gidx in enumerate(na.where(lidx)[0]): 
-                        g = grids[gidx]
-                        assert g is not None
-                        le,re = self.grid_left_edge[gidx],self.grid_right_edge[gidx]
-                        idx = na.logical_and(na.all(le < pos,axis=1),
-                                             na.all(re > pos,axis=1))
-                        fidx = pid[idx]
-                        np = na.sum(idx)                     
-                        g.NumberOfParticles = np
-                        grid_particle_count[gidx,0]=np
-                        g.hierarchy.grid_particle_count = grid_particle_count
-                        if np==0: 
-                            g.particle_indices = []
-                            #we have no particles in this grid
-                        else:
-                            g.particle_indices = fidx.astype('int64')
-                            pos = pos[~idx] #throw out gridded particles from future gridding
-                            pid = pid[~idx]
-                        self.grids[gidx] = g
-                        left -= np
-                        pbar.update(init-left)
-                pbar.finish()
-            else:
-                g = grids[0]
-                g.NumberOfParticles = pos.shape[0]
-                grid_particle_count[0,0]=pos.shape[0]
+            #pbar = get_pbar("Gridding Particles ",init)
+            #assignment = amr_utils.assign_particles_to_cells(
+            #        self.grid_levels.ravel().astype('int32'),
+            #        self.grid_left_edge.astype('float32'),
+            #        self.grid_right_edge.astype('float32'),
+            #        pos[:,0].astype('float32'),
+            #        pos[:,1].astype('float32'),
+            #        pos[:,2].astype('float32'))
+            #pbar.finish()
+
+            pbar = get_pbar("Gridding Particles ",init)
+            assignment,ilists = amr_utils.assign_particles_to_cell_lists(
+                    self.grid_levels.ravel().astype('int32'),
+                    2, #only bother gridding particles to level 2
+                    self.grid_left_edge.astype('float32'),
+                    self.grid_right_edge.astype('float32'),
+                    pos[:,0].astype('float32'),
+                    pos[:,1].astype('float32'),
+                    pos[:,2].astype('float32'))
+            pbar.finish()
+            
+            
+            pbar = get_pbar("Filling grids ",init)
+            for gidx,(g,ilist) in enumerate(zip(grids,ilists)):
+                np = len(ilist)
+                grid_particle_count[gidx,0]=np
                 g.hierarchy.grid_particle_count = grid_particle_count
-                g.particle_indices = pid
-                grids[0] = g
-                for gi,g in enumerate(grids): self.grids[gi]=g
+                g.particle_indices = ilist
+                grids[gidx] = g
+                done += np
+                pbar.update(done)
+            pbar.finish()
+
+            #assert init-done== 0 #we have gridded every particle
             
-        else:
-            
-            pbar = get_pbar("Finalizing grids ",len(grids))
-            for gi, g in enumerate(grids): 
-                self.grids[gi] = g
-            pbar.finish()
+        pbar = get_pbar("Finalizing grids ",len(grids))
+        for gi, g in enumerate(grids): 
+            self.grids[gi] = g
+        pbar.finish()
             
 
     def _get_grid_parents(self, grid, LE, RE):
@@ -568,8 +604,10 @@
                  discover_particles=False,
                  use_particles=True,
                  limit_level=None,
-                 dm_only=False,
-                 grid_particles=False):
+                 only_particle_type = None,
+                 grid_particles=False,
+                 single_particle_mass=False,
+                 single_particle_type=0):
         import yt.frontends.ramses._ramses_reader as _ramses_reader
         
         
@@ -580,8 +618,9 @@
         self.file_particle_header = file_particle_header
         self.file_particle_data = file_particle_data
         self.file_star_data = file_star_data
-        self.dm_only = dm_only
+        self.only_particle_type = only_particle_type
         self.grid_particles = grid_particles
+        self.single_particle_mass = single_particle_mass
         
         if limit_level is None:
             self.limit_level = na.inf


diff -r db3a9b59d37cd5c1988a904c8fa56ac4028599f2 -r 1503e0ae9825df91c12fb9f8b3a5e6c01dcbcea4 yt/frontends/art/definitions.py
--- a/yt/frontends/art/definitions.py
+++ b/yt/frontends/art/definitions.py
@@ -26,10 +26,14 @@
 """
 
 art_particle_field_names = [
+'particle_age',
 'particle_index',
 'particle_mass',
+'particle_mass_initial',
 'particle_creation_time',
-'particle_metallicity_fraction',
+'particle_metallicity1',
+'particle_metallicity2',
+'particle_metallicity',
 'particle_position_x',
 'particle_position_y',
 'particle_position_z',


diff -r db3a9b59d37cd5c1988a904c8fa56ac4028599f2 -r 1503e0ae9825df91c12fb9f8b3a5e6c01dcbcea4 yt/frontends/art/fields.py
--- a/yt/frontends/art/fields.py
+++ b/yt/frontends/art/fields.py
@@ -58,10 +58,11 @@
         add_field(f, function=lambda a,b: None, take_log=True,
                   validators = [ValidateDataField(f)])
 
-#Fields that are verified to be OK unit-wise:
+#Hydro Fields that are verified to be OK unit-wise:
 #Density
+#Temperature
 
-#Fields that need to be tested:
+#Hydro Fields that need to be tested:
 #TotalEnergy
 #XYZMomentum
 #Pressure
@@ -70,14 +71,23 @@
 #MetalDensity SNII + SNia
 #Potentials
 
-#Derived fields that are OK
-#Temperature
-
-#Derived fields that are untested:
+#Hydro Derived fields that are untested:
 #metallicities
 #xyzvelocity
 
-#Individual definitions for native fields
+#Particle fields that are tested:
+#particle_position_xyz
+#particle_type
+#particle_index
+#particle_mass
+#particle_mass_initial
+#particle_age
+#particle_velocity
+#particle_metallicity12
+
+#Particle fields that are untested:
+#NONE
+
 
 def _convertDensity(data):
     return data.convert("Density")


diff -r db3a9b59d37cd5c1988a904c8fa56ac4028599f2 -r 1503e0ae9825df91c12fb9f8b3a5e6c01dcbcea4 yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -122,9 +122,9 @@
 
     def _read_particle_field(self, grid, field):
         #This will be cleaned up later
-        idx = grid.particle_indices
+        idx = na.array(grid.particle_indices)
         if field == 'particle_index':
-            return idx
+            return na.array(idx)
         if field == 'particle_type':
             return grid.pf.particle_type[idx]
         if field == 'particle_position_x':
@@ -142,40 +142,23 @@
         if field == 'particle_velocity_z':
             return grid.pf.particle_velocity[idx][:,2]
         
-        tridx = grid.particle_indices >= grid.pf.particle_star_index
-        sidx  = grid.particle_indices[tridx] - grid.pf.particle_star_index
-        n = grid.particle_indices
-        if field == 'particle_creation_time':
-            tr = na.zeros(grid.NumberOfParticles, dtype='float64')-1.0
-            if sidx.shape[0]>0:
-                tr[tridx] = grid.pf.particle_star_ages[sidx]
-            return tr
-        if field == 'particle_metallicity_fraction':
-            tr = na.zeros(grid.NumberOfParticles, dtype='float64')-1.0
-            if sidx.shape[0]>0:
-                tr[tridx]  = grid.pf.particle_star_metallicity1[sidx]
-                tr[tridx] += grid.pf.particle_star_metallicity2[sidx]
-            return tr
+        #stellar fields
+        if field == 'particle_age':
+            return grid.pf.particle_age[idx]
+        if field == 'particle_metallicity':
+            return grid.pf.particle_metallicity1[idx] +\
+                   grid.pf.particle_metallicity2[idx]
         if field == 'particle_metallicity1':
-            tr = na.zeros(grid.NumberOfParticles, dtype='float64')-1.0
-            if sidx.shape[0]>0:
-                tr[tridx] = grid.pf.particle_star_metallicity1[sidx]
-            return tr
+            return grid.pf.particle_metallicity1[idx]
         if field == 'particle_metallicity2':
-            tr = na.zeros(grid.NumberOfParticles, dtype='float64')-1.0
-            if sidx.shape[0]>0:
-                tr[tridx] = grid.pf.particle_star_metallicity2[sidx]
-            return tr
+            return grid.pf.particle_metallicity2[idx]
         if field == 'particle_mass_initial':
-            tr = na.zeros(grid.NumberOfParticles, dtype='float64')-1.0
-            if sidx.shape[0]>0:
-                tr[tridx] = grid.pf.particle_star_mass_initial[sidx]
-            return tr
+            return grid.pf.particle_mass_initial[idx]
+        
         raise 'Should have matched one of the particle fields...'
 
         
     def _read_data_set(self, grid, field):
-        #import pdb; pdb.set_trace()
         if field in art_particle_field_names:
             return self._read_particle_field(grid, field)
         pf = grid.pf


diff -r db3a9b59d37cd5c1988a904c8fa56ac4028599f2 -r 1503e0ae9825df91c12fb9f8b3a5e6c01dcbcea4 yt/utilities/_amr_utils/CICDeposit.pyx
--- a/yt/utilities/_amr_utils/CICDeposit.pyx
+++ b/yt/utilities/_amr_utils/CICDeposit.pyx
@@ -1,8 +1,10 @@
 """
-Simle integrators for the radiative transfer equation
+Simple integrators for the radiative transfer equation
 
 Author: Britton Smith <brittonsmith at gmail.com>
 Affiliation: CASA/University of Colorado
+Author: Christopher Moody <juxtaposicion at gmail.com>
+Affiliation: cemoody at ucsc.edu
 Homepage: http://yt-project.org/
 License:
   Copyright (C) 2008 Matthew Turk.  All Rights Reserved.
@@ -107,3 +109,73 @@
         ind[2] = <int> ((pos_z[i] - left_edge[2]) * idds[2])
         sample[i] = arr[ind[0], ind[1], ind[2]]
     return sample
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+def assign_particles_to_cells(np.ndarray[np.int32_t, ndim=1] levels, #for cells
+                              np.ndarray[np.float32_t, ndim=2] left_edges, #many cells
+                              np.ndarray[np.float32_t, ndim=2] right_edges,
+                              np.ndarray[np.float32_t, ndim=1] pos_x, #particle
+                              np.ndarray[np.float32_t, ndim=1] pos_y,
+                              np.ndarray[np.float32_t, ndim=1] pos_z):
+    #for every cell, assign the particles belonging to it,
+    #skipping previously assigned particles
+    cdef long level_max = np.max(levels)
+    cdef long i,j,level
+    cdef long npart = pos_x.shape[0]
+    cdef long ncells = left_edges.shape[0] 
+    cdef np.ndarray[np.int32_t, ndim=1] assign = np.zeros(npart,dtype='int32')-1
+    for level in range(level_max,0,-1):
+        #start with the finest level
+        for i in range(ncells):
+            #go through every cell on the finest level first
+            if not levels[i] == level: continue
+            for j in range(npart):
+                #iterate over all particles, skip if assigned
+                if assign[j]>-1: continue
+                if (left_edges[i,0] <= pos_x[j] <= right_edges[i,0]):
+                    if (left_edges[i,1] <= pos_y[j] <= right_edges[i,1]):
+                        if (left_edges[i,2] <= pos_z[j] <= right_edges[i,2]):
+                            assign[j]=i
+    return assign
+
+
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+def assign_particles_to_cell_lists(np.ndarray[np.int32_t, ndim=1] levels, #for cells
+                              np.int64_t level_max, 
+                              np.ndarray[np.float32_t, ndim=2] left_edges, #many cells
+                              np.ndarray[np.float32_t, ndim=2] right_edges,
+                              np.ndarray[np.float32_t, ndim=1] pos_x, #particle
+                              np.ndarray[np.float32_t, ndim=1] pos_y,
+                              np.ndarray[np.float32_t, ndim=1] pos_z):
+    #for every cell, assign the particles belonging to it,
+    #skipping previously assigned particles
+    #Todo: instead of iterating every particles, could use kdtree 
+    cdef long i,j,level
+    cdef long npart = pos_x.shape[0]
+    cdef long ncells = left_edges.shape[0] 
+    cdef np.ndarray[np.int32_t, ndim=1] assign = np.zeros(npart,dtype='int32')-1
+    index_lists = []
+    for level in range(level_max,0,-1):
+        #start with the finest level
+        for i in range(ncells):
+            #go through every cell on the finest level first
+            if not levels[i] == level: continue
+            index_list = []
+            for j in range(npart):
+                #iterate over all particles, skip if assigned
+                if assign[j]>-1: continue
+                if (left_edges[i,0] <= pos_x[j] <= right_edges[i,0]):
+                    if (left_edges[i,1] <= pos_y[j] <= right_edges[i,1]):
+                        if (left_edges[i,2] <= pos_z[j] <= right_edges[i,2]):
+                            assign[j]=i
+                            index_list += j,
+            index_lists += index_list,
+    return assign,index_lists
+
+    
+    


diff -r db3a9b59d37cd5c1988a904c8fa56ac4028599f2 -r 1503e0ae9825df91c12fb9f8b3a5e6c01dcbcea4 yt/utilities/_amr_utils/DepthFirstOctree.pyx
--- a/yt/utilities/_amr_utils/DepthFirstOctree.pyx
+++ b/yt/utilities/_amr_utils/DepthFirstOctree.pyx
@@ -33,6 +33,175 @@
         self.output_pos = 0
         self.refined_pos = 0
 
+cdef class hilbert_state:
+#    """
+#    From libpjutil/hilbert.h:
+#    This class represents the extra information associated with an
+#    octree cell to be able to perform a Hilbert ordering. This
+#    information consists of a permutation of the dimensions and the
+#    direction of the 3 axes, which can be encoded in one byte. For an
+#    octree traversal stack this information needs to be saved at each
+#    level, so it's good to have a compact representation. 
+#
+#    The data is stored as follows: Bits 0-1 stores the first
+#    dimension, 2-3 the second. Because we know it is a permutation of
+#    012, there is no need to store the third dimension. Then bits 4-6
+#    are the signs of the three axes.
+#
+#    Apparently there is no way to encode a uint8_t literal except as a
+#    character constant, hence the use of those.
+#    """
+# These assignments are from 2.7 of BG200
+# vertex > 1st dim 2nd dim 3rd dim
+# 1 > +z+x+y 
+# 2 > +y+z+x
+# 3 > +y+x+z
+# 4 > -z-y+x 
+# 5 > +z-y-x
+# 6 > +y+x+z
+# 7 > +y-z-x
+# 8 > -z+x-y
+    cdef public int dima,dimb,dimc,signa,signb,signc
+    #cdef np.ndarray[np.int32,ndim =1] a = na.zeros(3)
+    #cdef np.ndarray[np.int32,ndim =1] b = na.zeros(3)
+    #cdef np.ndarray[np.int32,ndim =1] c = na.zeros(3)
+    #cdef np.ndarray[np.int32,ndim =1] d = na.zeros(3)
+    #cdef np.ndarray[np.int32,ndim =1] e = na.zeros(3)
+    #cdef np.ndarray[np.int32,ndim =1] f = na.zeros(3)
+    #cdef np.ndarray[np.int32,ndim =1] g = na.zeros(3)
+    #cdef np.ndarray[np.int32,ndim =1] h = na.zeros(3)
+
+    cdef np.ndarray[np.int64,ndim =1] oct = na.zeros(3)
+    cdef np.ndarray[np.int64,ndim =1] noct = na.zeros(3)
+
+
+    #a[0],a[1],a[2] = 0,0,0
+    #b[0],b[1],b[2] = 0,1,0
+    #c[0],c[1],c[2] = 1,1,0
+    #d[0],d[1],d[2] = 1,0,0
+    #e[0],e[1],e[2] = 1,0,1
+    #f[0],f[1],f[2] = 1,1,1
+    #g[0],g[1],g[2] = 0,1,1
+    #h[0],h[1],h[2] = 0,0,1
+
+    def __cinit__(int parent_octant):
+        self.swap_by_octant(parent_octant)
+        self.signc = signa*signb
+        self.dimc = 3-dima-dimb
+    
+    def swap_by_octant(int octant): 
+        if octant==1: 
+            self.dima = 2
+            self.dimb = 0
+            self.signa = 1
+            self.signb = 1
+        if octant==2: 
+            self.dima = 1
+            self.dimb = 2
+            self.signa = 1
+            self.signb = 1
+        if octant==3:
+            self.dima = 1
+            self.dimb = 0
+            self.signa = 1
+            self.signb = 1
+        if octant==4: 
+            self.dima = 2
+            self.dimb = 1
+            self.signa = -1
+            self.signb = -1
+        if octant==5: 
+            self.dima = 2
+            self.dimb = 1
+            self.signa = 1
+            self.signb = -1
+        if octant==6: 
+            self.dima = 1
+            self.dimb = 0
+            self.signa = 1
+            self.signb = 1
+        if octant==7: 
+            self.dima = 1
+            self.dimb = 2
+            self.signa = 1
+            self.signb = -1
+        if octant==8: 
+            self.dima = 2
+            self.dimb = 0
+            self.signa = -1
+            self.signb = 1
+
+    def __iter__(self):
+        return self.next_hilbert()
+
+    def next(self):
+        #yield the next cell in this oct
+        
+        #as/descend the first dimension
+        # the second dim
+        #reverse the first
+        #climb the third
+        oct[self.dima] = 0 if self.signx>0 else 1
+        oct[self.dimb] = 0 if self.signy>0 else 1
+        oct[self.dimc] = 0 if self.signz>0 else 1
+        yield oct
+        oct[self.dima] += self.signa; yield oct
+        oct[self.dimb] += self.signb; yield oct
+        oct[self.dima] -= self.signa; yield oct
+        oct[self.dimc] += self.signc; yield oct
+        oct[self.dima] += self.signa; yield oct
+        oct[self.dimb] -= self.signb; yield oct
+        oct[self.dimb] -= self.signa; return oct
+
+    def next_hilbert(self):
+        noct = self.next()
+        return noct, hilbert_state(noct)
+
+ at cython.boundscheck(False)
+def RecurseOctreeDepthFirstHilbert(int i_i, int j_i, int k_i,
+                            int i_f, int j_f, int k_f,
+                            position curpos, int gi, 
+                            hilbert_state hs,
+                            np.ndarray[np.float64_t, ndim=3] output,
+                            np.ndarray[np.int64_t, ndim=1] refined,
+                            OctreeGridList grids):
+    #cdef int s = curpos
+    cdef int i, i_off, j, j_off, k, k_off, ci, fi
+    cdef int child_i, child_j, child_k
+    cdef OctreeGrid child_grid
+    cdef OctreeGrid grid = grids[gi]
+    cdef np.ndarray[np.int32_t, ndim=3] child_indices = grid.child_indices
+    cdef np.ndarray[np.int32_t, ndim=1] dimensions = grid.dimensions
+    cdef np.ndarray[np.float64_t, ndim=4] fields = grid.fields
+    cdef np.ndarray[np.float64_t, ndim=1] leftedges = grid.left_edges
+    cdef np.float64_t dx = grid.dx[0]
+    cdef np.float64_t child_dx
+    cdef np.ndarray[np.float64_t, ndim=1] child_leftedges
+    cdef np.float64_t cx, cy, cz
+    cdef np.ndarray[np.int32_t, ndim=1] oct
+    cdef hilbert_state hs_child
+    for oct,hs_child in hs:
+        i,j,k = oct[0],oct[1],oct[2]
+        ci = grid.child_indices[i,j,k]
+        if ci == -1:
+            for fi in range(fields.shape[0]):
+                output[curpos.output_pos,fi] = fields[fi,i,j,k]
+            refined[curpos.refined_pos] = 0
+            curpos.output_pos += 1
+            curpos.refined_pos += 1
+        else:
+            refined[curpos.refined_pos] = 1
+            curpos.refined_pos += 1
+            child_grid = grids[ci-grid.offset]
+            child_dx = child_grid.dx[0]
+            child_leftedges = child_grid.left_edges
+            child_i = int((cx - child_leftedges[0])/child_dx)
+            child_j = int((cy - child_leftedges[1])/child_dx)
+            child_k = int((cz - child_leftedges[2])/child_dx)
+            RecurseOctreeDepthFirst(child_i, child_j, child_k, 2, 2, 2,
+                                curpos, ci - grid.offset, hs_child, output, refined, grids)
+
+
 cdef class OctreeGrid:
     cdef public object child_indices, fields, left_edges, dimensions, dx
     cdef public int level, offset
@@ -79,8 +248,13 @@
     cdef np.float64_t child_dx
     cdef np.ndarray[np.float64_t, ndim=1] child_leftedges
     cdef np.float64_t cx, cy, cz
+    #here we go over the 8 octants
+    #in general however, a mesh cell on this level
+    #may have more than 8 children on the next level
+    #so we find the int float center (cxyz) of each child cell
+    # and from that find the child cell indices
     for i_off in range(i_f):
-        i = i_off + i_i
+        i = i_off + i_i #index
         cx = (leftedges[0] + i*dx)
         for j_off in range(j_f):
             j = j_off + j_i



https://bitbucket.org/yt_analysis/yt/changeset/33e1017fc58a/
changeset:   33e1017fc58a
branch:      yt
user:        Christopher Moody
date:        2012-05-03 01:25:23
summary:     something is wrong with the ART frontend units
affected #:  6 files

diff -r 1503e0ae9825df91c12fb9f8b3a5e6c01dcbcea4 -r 33e1017fc58ab5b4aca94d2e627453afc4dd4a3b yt/analysis_modules/sunrise_export/sunrise_exporter.py
--- a/yt/analysis_modules/sunrise_export/sunrise_exporter.py
+++ b/yt/analysis_modules/sunrise_export/sunrise_exporter.py
@@ -177,6 +177,19 @@
     levels = levels[:pos.refined_pos] 
     return output,refined
 
+def print_row(level,ple,pre,pw,pc,hs):
+    print level, 
+    print '%1.5f %1.5f %1.5f '%tuple(ple*pw-pc),
+    print '%1.5f %1.5f %1.5f '%tuple(pre*pw-pc),
+    print hs.dim, hs.sgn
+
+def print_child(level,grid,i,j,k,pw,pc):
+    ple = (grid.left_edges+na.array([i,j,k])*grid.dx)*pw-pc #parent LE 
+    pre = (grid.left_edges+na.array([i+1,j+1,k+1])*grid.dx)*pw-pc #parent RE 
+    print level, 
+    print '%1.5f %1.5f %1.5f '%tuple(ple),
+    print '%1.5f %1.5f %1.5f '%tuple(pre)
+
 def RecurseOctreeDepthFirstHilbert(xi,yi,zi,
                             curpos, gi, 
                             hs,
@@ -186,21 +199,21 @@
                             grids,
                             level,
                             physical_center=None,
-                            physical_width=None):
+                            physical_width=None,
+                            printr=False):
     grid = grids[gi]
     m = 2**(-level-1) if level < 0 else 1
     ple = grid.left_edges+na.array([xi,yi,zi])*grid.dx #parent LE
     pre = ple+grid.dx*m
-    print level,ple*physical_width-physical_center,
-    print pre*physical_width-physical_center, hs.parent_octant
-
+    if printr:
+        print_row(level,ple,pre,physical_width,physical_center,hs)
 
     #here we go over the 8 octants
     #in general however, a mesh cell on this level
     #may have more than 8 children on the next level
     #so we find the int float center (cxyz) of each child cell
     # and from that find the child cell indices
-    for iv, vertex in enumerate(hs):
+    for iv, (vertex,hs_child) in enumerate(hs):
         #print ' '*(level+3), level,iv, vertex,curpos.refined_pos,curpos.output_pos,
         #negative level indicates that we need to build a super-octree
         if level < 0: 
@@ -211,11 +224,7 @@
             #as the root grid (hence the -level-1)
             dx = 2**(-level-1) #this is the child width 
             i,j,k = xi+vertex[0]*dx,yi+vertex[1]*dx,zi+vertex[2]*dx
-            #print level,iv,vertex,
-            #print na.array([cx,cy,cz])/128.0*physical_width-physical_center,
-            #print na.array([cx+dx,cy+dx,cz+dx])/128.0*physical_width-physical_center
             #we always refine the negative levels
-            hs_child = hilbert_state(vertex.copy())
             refined[curpos.refined_pos] = 1
             levels[curpos.refined_pos] = level
             curpos.refined_pos += 1
@@ -234,18 +243,12 @@
                 levels[curpos.refined_pos] = level
                 curpos.output_pos += 1 #position updated after write
                 curpos.refined_pos += 1
-                print level+1, #these child cells are a level deeper
-                print (grid.left_edges+na.array([i,j,k])*grid.dx)*physical_width-physical_center, #parent LE 
-                print (grid.left_edges+na.array([i+1,j+1,k+1])*grid.dx)*physical_width-physical_center, #parent LE 
-                print iv,vertex
+                if printr:
+                    print_child(level+1,grid,i,j,k,physical_width,physical_center)
             else:
                 cx = (grid.left_edges[0] + i*grid.dx[0]) #floating le of the child
                 cy = (grid.left_edges[1] + j*grid.dx[0])
                 cz = (grid.left_edges[2] + k*grid.dx[0])
-                #print level,iv,vertex,
-                #print na.array([cx,cy,cz])*physical_width -physical_center,
-                #print na.array([cx+grid.dx[0],cy+grid.dx[0],cz+grid.dx[0]])*physical_width - physical_center
-                hs_child = hilbert_state(vertex.copy())
                 refined[curpos.refined_pos] = 1
                 levels[curpos.refined_pos] = level
                 curpos.refined_pos += 1 #position updated after write
@@ -457,116 +460,90 @@
         self.output_pos = 0
         self.refined_pos = 0
 
-
-def vertex_to_octant(v):
-    if v[0]==0 and v[1]==0 and v[2]==0: return 1
-    if v[0]==0 and v[1]==1 and v[2]==0: return 2
-    if v[0]==1 and v[1]==1 and v[2]==0: return 3
-    if v[0]==1 and v[1]==0 and v[2]==0: return 4
-    if v[0]==1 and v[1]==0 and v[2]==1: return 5
-    if v[0]==1 and v[1]==1 and v[2]==1: return 6
-    if v[0]==0 and v[1]==1 and v[2]==1: return 7
-    if v[0]==0 and v[1]==0 and v[2]==1: return 8
-    raise IOError
-
-
-class hilbert_state:
-    vertex = na.zeros(3,dtype='int64')
-    nvertex = na.zeros(3,dtype='int64')
-    signa,signb,signc = 0,0,0
-    dima,dimb,dimc= -1,-1,-2
-    parent_octant = -1
-
-    def __init__(self, vertex=None):
-        if vertex is None:
-            vertex = na.array([1,0,0],dtype='int64')
-        self.vertex= vertex
-        self.parent_octant = vertex_to_octant(self.vertex)
-        self.swap_by_octant(self.parent_octant)
-        self.signc = self.signa*self.signb
-        self.dimc = 3-self.dima-self.dimb
-
-    def swap_by_octant(self, octant): 
-        if octant==1: 
-            self.dima = 2
-            self.dimb = 0
-            self.signa = 1
-            self.signb = 1
-        elif octant==2: 
-            self.dima = 1
-            self.dimb = 2
-            self.signa = 1
-            self.signb = 1
-        elif octant==3:
-            self.dima = 1
-            self.dimb = 0
-            self.signa = 1
-            self.signb = 1
-        elif octant==4: 
-            self.dima = 2
-            self.dimb = 1
-            self.signa = -1
-            self.signb = -1
-        elif octant==5: 
-            self.dima = 2
-            self.dimb = 1
-            self.signa = 1
-            self.signb = -1
-        elif octant==6: 
-            self.dima = 1
-            self.dimb = 0
-            self.signa = 1
-            self.signb = 1
-        elif octant==7: 
-            self.dima = 1
-            self.dimb = 2
-            self.signa = 1
-            self.signb = -1
-        elif octant==8: 
-            self.dima = 2
-            self.dimb = 0
-            self.signa = -1
-            self.signb = 1
-        assert octant < 9
-
+class hilbert_state():
+    def __init__(self,dim=None,sgn=None,octant=None):
+        if dim is None: dim = [0,1,2]
+        if sgn is None: sgn = [1,1,1]
+        if octant is None: octant = 5
+        self.dim = dim
+        self.sgn = sgn
+        self.octant = octant
+    def flip(self,i):
+        self.sgn[i]*=-1
+    def swap(self,i,j):
+        temp = self.dim[i]
+        self.dim[i]=self.dim[j]
+        self.dim[j]=temp
+        axis = self.sgn[i]
+        self.sgn[i] = self.sgn[j]
+        self.sgn[j] = axis
+    def reorder(self,i,j,k):
+        ndim = [self.dim[i],self.dim[j],self.dim[k]] 
+        nsgn = [self.sgn[i],self.sgn[j],self.sgn[k]]
+        self.dim = ndim
+        self.sgn = nsgn
+    def copy(self):
+        return hilbert_state([self.dim[0],self.dim[1],self.dim[2]],
+                             [self.sgn[0],self.sgn[1],self.sgn[2]],
+                             self.octant)
+    def descend(self,o):
+        child = self.copy()
+        child.octant = o
+        if o==0:
+            child.swap(0,2)
+        elif o==1:
+            child.swap(1,2)
+        elif o==2:
+            pass
+        elif o==3:
+            child.flip(0)
+            child.flip(2)
+            child.reorder(2,0,1)
+        elif o==4:
+            child.flip(0)
+            child.flip(1)
+            child.reorder(2,0,1)
+        elif o==5:
+            pass
+        elif o==6:
+            child.flip(1)
+            child.flip(2)
+            child.swap(1,2)
+        elif o==7:
+            child.flip(0)
+            child.flip(2)
+            child.swap(0,2)
+        return child
 
     def __iter__(self):
-        return self.next()
+        vertex = [0,0,0]
+        j=0
+        for i in range(3):
+            vertex[self.dim[i]] = 0 if self.sgn[i]>0 else 1
+        yield vertex, self.descend(j)
+        vertex[self.dim[0]] += self.sgn[0]
+        j+=1
+        yield vertex, self.descend(j)
+        vertex[self.dim[1]] += self.sgn[1] 
+        j+=1
+        yield vertex, self.descend(j)
+        vertex[self.dim[0]] -= self.sgn[0] 
+        j+=1
+        yield vertex, self.descend(j)
+        vertex[self.dim[2]] += self.sgn[2] 
+        j+=1
+        yield vertex, self.descend(j)
+        vertex[self.dim[0]] += self.sgn[0] 
+        j+=1
+        yield vertex, self.descend(j)
+        vertex[self.dim[1]] -= self.sgn[1] 
+        j+=1
+        yield vertex, self.descend(j)
+        vertex[self.dim[0]] -= self.sgn[0] 
+        j+=1
+        yield vertex, self.descend(j)
 
-    def next(self):
-        #yield the next cell in this oct
-        
-        #as/descend the first dimension
-        # the second dim
-        #reverse the first
-        #climb the third
-        self.vertex[self.dima] = 0 if self.signa>0 else 1
-        self.vertex[self.dimb] = 0 if self.signb>0 else 1
-        self.vertex[self.dimc] = 0 if self.signc>0 else 1
-        yield self.vertex.copy()
-        self.vertex[self.dima] = self.vertex[self.dima] + self.signa; 
-        yield self.vertex.copy()
-        self.vertex[self.dimb] = self.vertex[self.dimb] + self.signb; 
-        yield self.vertex.copy()
-        self.vertex[self.dima] = self.vertex[self.dima] - self.signa; 
-        yield self.vertex.copy()
-        self.vertex[self.dimc] = self.vertex[self.dimc] + self.signc; 
-        yield self.vertex.copy()
-        self.vertex[self.dima] = self.vertex[self.dima] + self.signa; 
-        yield self.vertex.copy()
-        self.vertex[self.dimb] = self.vertex[self.dimb] - self.signb; 
-        yield self.vertex.copy()
-        self.vertex[self.dima] = self.vertex[self.dima] - self.signa; 
-        yield self.vertex.copy()
 
-    def next_hilbert(self):
-        nvertex = self.next()
-        return nvertex, hilbert_state(nvertex)
 
 
 
-
-if not debug:
-    from amr_utils import hilbert_state
-    from amr_utils import RecurseOctreeDepthFirstHilbert
-    from amr_utils import position


diff -r 1503e0ae9825df91c12fb9f8b3a5e6c01dcbcea4 -r 33e1017fc58ab5b4aca94d2e627453afc4dd4a3b yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -138,7 +138,7 @@
     def __init__(self, pf, data_style='art'):
         self.data_style = data_style
         self.parameter_file = weakref.proxy(pf)
-        #for now, the hierarchy file is the parameter file!
+        # for now, the hierarchy file is the parameter file!
         self.hierarchy_filename = self.parameter_file.parameter_filename
         self.directory = os.path.dirname(self.hierarchy_filename)
         self.float_type = na.float64
@@ -356,7 +356,6 @@
         
 
         if self.pf.file_particle_data:
-            #import pdb; pdb.set_trace()
             lspecies = self.pf.parameters['lspecies']
             wspecies = self.pf.parameters['wspecies']
             Nrow     = self.pf.parameters['Nrow']
@@ -371,142 +370,98 @@
             self.pf.particle_position,self.pf.particle_velocity = \
                 read_particles(self.pf.file_particle_data,nstars,Nrow)
             pbar.update(1)
-            npa,npb=0,0
-            npb = lspecies[-1]
-            clspecies = na.concatenate(([0,],lspecies))
-            if self.pf.only_particle_type is not None:
-                npb = lspecies[0]
-                if type(self.pf.only_particle_type)==type(5):
-                    npa = clspecies[self.pf.only_particle_type]
-                    npb = clspecies[self.pf.only_particle_type+1]
-            np = npb-npa
-            self.pf.particle_position   = self.pf.particle_position[npa:npb]
+            np = lspecies[-1]
+            if self.pf.dm_only:
+                np = lspecies[0]
+            self.pf.particle_position   = self.pf.particle_position[:np]
             #self.pf.particle_position  -= 1.0 #fortran indices start with 0
             pbar.update(2)
             self.pf.particle_position  /= self.pf.domain_dimensions #to unitary units (comoving)
             pbar.update(3)
-            self.pf.particle_velocity   = self.pf.particle_velocity[npa:npb]
+            self.pf.particle_velocity   = self.pf.particle_velocity[:np]
             self.pf.particle_velocity  *= uv #to proper cm/s
             pbar.update(4)
-            self.pf.particle_type         = na.zeros(np,dtype='uint8')
-            self.pf.particle_mass         = na.zeros(np,dtype='float64')
-            self.pf.particle_mass_initial = na.zeros(np,dtype='float64')-1
-            self.pf.particle_creation_time= na.zeros(np,dtype='float64')-1
-            self.pf.particle_metallicity1 = na.zeros(np,dtype='float64')-1
-            self.pf.particle_metallicity2 = na.zeros(np,dtype='float64')-1
-            self.pf.particle_age          = na.zeros(np,dtype='float64')-1
+            self.pf.particle_type       = na.zeros(np,dtype='uint8')
+            self.pf.particle_mass       = na.zeros(np,dtype='float64')
             
             dist = self.pf['cm']/self.pf.domain_dimensions[0]
             self.pf.conversion_factors['particle_mass'] = 1.0 #solar mass in g
-            self.pf.conversion_factors['particle_mass_initial'] = 1.0 #solar mass in g
             self.pf.conversion_factors['particle_species'] = 1.0
             for ax in 'xyz':
                 self.pf.conversion_factors['particle_velocity_%s'%ax] = 1.0
                 #already in unitary units
                 self.pf.conversion_factors['particle_position_%s'%ax] = 1.0 
             self.pf.conversion_factors['particle_creation_time'] =  31556926.0
-            self.pf.conversion_factors['particle_metallicity']=1.0
-            self.pf.conversion_factors['particle_metallicity1']=1.0
-            self.pf.conversion_factors['particle_metallicity2']=1.0
+            self.pf.conversion_factors['particle_metallicity_fraction']=1.0
             self.pf.conversion_factors['particle_index']=1.0
-            self.pf.conversion_factors['particle_type']=1
-            self.pf.conversion_factors['particle_age']=1
-            self.pf.conversion_factors['Msun'] = 5.027e-34 #conversion to solar mass units
             
+            #import pdb; pdb.set_trace()
 
             a,b=0,0
             for i,(b,m) in enumerate(zip(lspecies,wspecies)):
-                if type(self.pf.only_particle_type)==type(5):
-                    if not i==self.pf.only_particle_type:
-                        continue
-                    self.pf.particle_type += i
-                    self.pf.particle_mass += m*um
-
-                else:
-                    self.pf.particle_type[a:b] = i #particle type
-                    self.pf.particle_mass[a:b] = m*um #mass in solar masses
+                self.pf.particle_type[a:b] = i #particle type
+                self.pf.particle_mass[a:b]    = m*um #mass in solar masses
                 a=b
             pbar.finish()
-
-            nparticles = [0,]+list(lspecies)
-            for j,np in enumerate(nparticles):
-                mylog.debug('found %i of particle type %i'%(j,np))
-            
-            if self.pf.single_particle_mass:
-                #cast all particle masses to the same mass
-                cast_type = self.pf.single_particle_type
-                
-
             
             self.pf.particle_star_index = i
             
-            do_stars = (self.pf.only_particle_type is None) or \
-                       (self.pf.only_particle_type == -1) or \
-                       (self.pf.only_particle_type == len(lspecies))
-            if self.pf.file_star_data and do_stars: 
+            if self.pf.file_star_data and (not self.pf.dm_only):
                 nstars, mass, imass, tbirth, metallicity1, metallicity2 \
                      = read_stars(self.pf.file_star_data,nstars,Nrow)
                 nstars = nstars[0] 
                 if nstars > 0 :
                     n=min(1e2,len(tbirth))
                     pbar = get_pbar("Stellar Ages        ",n)
-                    sages  = \
+                    self.pf.particle_star_ages  = \
                         b2t(tbirth,n=n,logger=lambda x: pbar.update(x)).astype('float64')
-                    sages *= 1.0e9
-                    sages *= 365*24*3600 #to seconds
-                    sages = self.pf.current_time-sages
-                    self.pf.particle_age[-nstars:] = sages
+                    self.pf.particle_star_ages *= 1.0e9
+                    self.pf.particle_star_ages *= 365*24*3600 #to seconds
+                    self.pf.particle_star_ages = self.pf.current_time-self.pf.particle_star_ages
                     pbar.finish()
-                    self.pf.particle_metallicity1[-nstars:] = metallicity1
-                    self.pf.particle_metallicity2[-nstars:] = metallicity2
-                    self.pf.particle_mass_initial[-nstars:] = imass*um
+                    self.pf.particle_star_metallicity1 = metallicity1
+                    self.pf.particle_star_metallicity2 = metallicity2
+                    self.pf.particle_star_mass_initial = imass*um
                     self.pf.particle_mass[-nstars:] = mass*um
 
-            done = 0
+            left = self.pf.particle_position.shape[0]
             init = self.pf.particle_position.shape[0]
-            pos = self.pf.particle_position
+            pbar = get_pbar("Gridding Particles ",init)
+            pos = self.pf.particle_position.copy()
+            pid = na.arange(pos.shape[0]).astype('int64')
             #particle indices travel with the particle positions
             #pos = na.vstack((na.arange(pos.shape[0]),pos.T)).T 
+            max_level = min(self.pf.max_level,self.pf.limit_level)
             #if type(self.pf.grid_particles) == type(5):
             #    max_level = min(max_level,self.pf.grid_particles)
             grid_particle_count = na.zeros((len(grids),1),dtype='int64')
             
             #grid particles at the finest level, removing them once gridded
-            #pbar = get_pbar("Gridding Particles ",init)
-            #assignment = amr_utils.assign_particles_to_cells(
-            #        self.grid_levels.ravel().astype('int32'),
-            #        self.grid_left_edge.astype('float32'),
-            #        self.grid_right_edge.astype('float32'),
-            #        pos[:,0].astype('float32'),
-            #        pos[:,1].astype('float32'),
-            #        pos[:,2].astype('float32'))
-            #pbar.finish()
-
-            pbar = get_pbar("Gridding Particles ",init)
-            assignment,ilists = amr_utils.assign_particles_to_cell_lists(
-                    self.grid_levels.ravel().astype('int32'),
-                    2, #only bother gridding particles to level 2
-                    self.grid_left_edge.astype('float32'),
-                    self.grid_right_edge.astype('float32'),
-                    pos[:,0].astype('float32'),
-                    pos[:,1].astype('float32'),
-                    pos[:,2].astype('float32'))
+            for level in range(max_level,self.pf.min_level-1,-1):
+                lidx = self.grid_levels[:,0] == level
+                for gi,gidx in enumerate(na.where(lidx)[0]): 
+                    g = grids[gidx]
+                    assert g is not None
+                    le,re = self.grid_left_edge[gidx],self.grid_right_edge[gidx]
+                    idx = na.logical_and(na.all(le < pos,axis=1),
+                                         na.all(re > pos,axis=1))
+                    fidx = pid[idx]
+                    np = na.sum(idx)                     
+                    g.NumberOfParticles = np
+                    grid_particle_count[gidx,0]=np
+                    g.hierarchy.grid_particle_count = grid_particle_count
+                    if np==0: 
+                        g.particle_indices = []
+                        #we have no particles in this grid
+                    else:
+                        g.particle_indices = fidx.astype('int64')
+                        pos = pos[~idx] #throw out gridded particles from future gridding
+                        pid = pid[~idx]
+                    grids[gidx] = g
+                    left -= np
+                    pbar.update(init-left)
             pbar.finish()
             
-            
-            pbar = get_pbar("Filling grids ",init)
-            for gidx,(g,ilist) in enumerate(zip(grids,ilists)):
-                np = len(ilist)
-                grid_particle_count[gidx,0]=np
-                g.hierarchy.grid_particle_count = grid_particle_count
-                g.particle_indices = ilist
-                grids[gidx] = g
-                done += np
-                pbar.update(done)
-            pbar.finish()
-
-            #assert init-done== 0 #we have gridded every particle
-            
         pbar = get_pbar("Finalizing grids ",len(grids))
         for gi, g in enumerate(grids): 
             self.grids[gi] = g
@@ -604,10 +559,8 @@
                  discover_particles=False,
                  use_particles=True,
                  limit_level=None,
-                 only_particle_type = None,
-                 grid_particles=False,
-                 single_particle_mass=False,
-                 single_particle_type=0):
+                 dm_only=False,
+                 grid_particles=False):
         import yt.frontends.ramses._ramses_reader as _ramses_reader
         
         
@@ -618,9 +571,8 @@
         self.file_particle_header = file_particle_header
         self.file_particle_data = file_particle_data
         self.file_star_data = file_star_data
-        self.only_particle_type = only_particle_type
+        self.dm_only = dm_only
         self.grid_particles = grid_particles
-        self.single_particle_mass = single_particle_mass
         
         if limit_level is None:
             self.limit_level = na.inf


diff -r 1503e0ae9825df91c12fb9f8b3a5e6c01dcbcea4 -r 33e1017fc58ab5b4aca94d2e627453afc4dd4a3b yt/frontends/art/definitions.py
--- a/yt/frontends/art/definitions.py
+++ b/yt/frontends/art/definitions.py
@@ -26,14 +26,10 @@
 """
 
 art_particle_field_names = [
-'particle_age',
 'particle_index',
 'particle_mass',
-'particle_mass_initial',
 'particle_creation_time',
-'particle_metallicity1',
-'particle_metallicity2',
-'particle_metallicity',
+'particle_metallicity_fraction',
 'particle_position_x',
 'particle_position_y',
 'particle_position_z',


diff -r 1503e0ae9825df91c12fb9f8b3a5e6c01dcbcea4 -r 33e1017fc58ab5b4aca94d2e627453afc4dd4a3b yt/frontends/art/fields.py
--- a/yt/frontends/art/fields.py
+++ b/yt/frontends/art/fields.py
@@ -58,11 +58,10 @@
         add_field(f, function=lambda a,b: None, take_log=True,
                   validators = [ValidateDataField(f)])
 
-#Hydro Fields that are verified to be OK unit-wise:
+#Fields that are verified to be OK unit-wise:
 #Density
-#Temperature
 
-#Hydro Fields that need to be tested:
+#Fields that need to be tested:
 #TotalEnergy
 #XYZMomentum
 #Pressure
@@ -71,23 +70,14 @@
 #MetalDensity SNII + SNia
 #Potentials
 
-#Hydro Derived fields that are untested:
+#Derived fields that are OK
+#Temperature
+
+#Derived fields that are untested:
 #metallicities
 #xyzvelocity
 
-#Particle fields that are tested:
-#particle_position_xyz
-#particle_type
-#particle_index
-#particle_mass
-#particle_mass_initial
-#particle_age
-#particle_velocity
-#particle_metallicity12
-
-#Particle fields that are untested:
-#NONE
-
+#Individual definitions for native fields
 
 def _convertDensity(data):
     return data.convert("Density")


diff -r 1503e0ae9825df91c12fb9f8b3a5e6c01dcbcea4 -r 33e1017fc58ab5b4aca94d2e627453afc4dd4a3b yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -122,9 +122,9 @@
 
     def _read_particle_field(self, grid, field):
         #This will be cleaned up later
-        idx = na.array(grid.particle_indices)
+        idx = grid.particle_indices
         if field == 'particle_index':
-            return na.array(idx)
+            return idx
         if field == 'particle_type':
             return grid.pf.particle_type[idx]
         if field == 'particle_position_x':
@@ -142,19 +142,35 @@
         if field == 'particle_velocity_z':
             return grid.pf.particle_velocity[idx][:,2]
         
-        #stellar fields
-        if field == 'particle_age':
-            return grid.pf.particle_age[idx]
-        if field == 'particle_metallicity':
-            return grid.pf.particle_metallicity1[idx] +\
-                   grid.pf.particle_metallicity2[idx]
+        tridx = grid.particle_indices >= grid.pf.particle_star_index
+        sidx  = grid.particle_indices[tridx] - grid.pf.particle_star_index
+        n = grid.particle_indices
+        if field == 'particle_creation_time':
+            tr = na.zeros(grid.NumberOfParticles, dtype='float64')-1.0
+            if sidx.shape[0]>0:
+                tr[tridx] = grid.pf.particle_star_ages[sidx]
+            return tr
+        if field == 'particle_metallicity_fraction':
+            tr = na.zeros(grid.NumberOfParticles, dtype='float64')-1.0
+            if sidx.shape[0]>0:
+                tr[tridx]  = grid.pf.particle_star_metallicity1[sidx]
+                tr[tridx] += grid.pf.particle_star_metallicity2[sidx]
+            return tr
         if field == 'particle_metallicity1':
-            return grid.pf.particle_metallicity1[idx]
+            tr = na.zeros(grid.NumberOfParticles, dtype='float64')-1.0
+            if sidx.shape[0]>0:
+                tr[tridx] = grid.pf.particle_star_metallicity1[sidx]
+            return tr
         if field == 'particle_metallicity2':
-            return grid.pf.particle_metallicity2[idx]
+            tr = na.zeros(grid.NumberOfParticles, dtype='float64')-1.0
+            if sidx.shape[0]>0:
+                tr[tridx] = grid.pf.particle_star_metallicity2[sidx]
+            return tr
         if field == 'particle_mass_initial':
-            return grid.pf.particle_mass_initial[idx]
-        
+            tr = na.zeros(grid.NumberOfParticles, dtype='float64')-1.0
+            if sidx.shape[0]>0:
+                tr[tridx] = grid.pf.particle_star_mass_initial[sidx]
+            return tr
         raise 'Should have matched one of the particle fields...'
 
         


diff -r 1503e0ae9825df91c12fb9f8b3a5e6c01dcbcea4 -r 33e1017fc58ab5b4aca94d2e627453afc4dd4a3b yt/utilities/_amr_utils/DepthFirstOctree.pyx
--- a/yt/utilities/_amr_utils/DepthFirstOctree.pyx
+++ b/yt/utilities/_amr_utils/DepthFirstOctree.pyx
@@ -33,175 +33,6 @@
         self.output_pos = 0
         self.refined_pos = 0
 
-cdef class hilbert_state:
-#    """
-#    From libpjutil/hilbert.h:
-#    This class represents the extra information associated with an
-#    octree cell to be able to perform a Hilbert ordering. This
-#    information consists of a permutation of the dimensions and the
-#    direction of the 3 axes, which can be encoded in one byte. For an
-#    octree traversal stack this information needs to be saved at each
-#    level, so it's good to have a compact representation. 
-#
-#    The data is stored as follows: Bits 0-1 stores the first
-#    dimension, 2-3 the second. Because we know it is a permutation of
-#    012, there is no need to store the third dimension. Then bits 4-6
-#    are the signs of the three axes.
-#
-#    Apparently there is no way to encode a uint8_t literal except as a
-#    character constant, hence the use of those.
-#    """
-# These assignments are from 2.7 of BG200
-# vertex > 1st dim 2nd dim 3rd dim
-# 1 > +z+x+y 
-# 2 > +y+z+x
-# 3 > +y+x+z
-# 4 > -z-y+x 
-# 5 > +z-y-x
-# 6 > +y+x+z
-# 7 > +y-z-x
-# 8 > -z+x-y
-    cdef public int dima,dimb,dimc,signa,signb,signc
-    #cdef np.ndarray[np.int32,ndim =1] a = na.zeros(3)
-    #cdef np.ndarray[np.int32,ndim =1] b = na.zeros(3)
-    #cdef np.ndarray[np.int32,ndim =1] c = na.zeros(3)
-    #cdef np.ndarray[np.int32,ndim =1] d = na.zeros(3)
-    #cdef np.ndarray[np.int32,ndim =1] e = na.zeros(3)
-    #cdef np.ndarray[np.int32,ndim =1] f = na.zeros(3)
-    #cdef np.ndarray[np.int32,ndim =1] g = na.zeros(3)
-    #cdef np.ndarray[np.int32,ndim =1] h = na.zeros(3)
-
-    cdef np.ndarray[np.int64,ndim =1] oct = na.zeros(3)
-    cdef np.ndarray[np.int64,ndim =1] noct = na.zeros(3)
-
-
-    #a[0],a[1],a[2] = 0,0,0
-    #b[0],b[1],b[2] = 0,1,0
-    #c[0],c[1],c[2] = 1,1,0
-    #d[0],d[1],d[2] = 1,0,0
-    #e[0],e[1],e[2] = 1,0,1
-    #f[0],f[1],f[2] = 1,1,1
-    #g[0],g[1],g[2] = 0,1,1
-    #h[0],h[1],h[2] = 0,0,1
-
-    def __cinit__(int parent_octant):
-        self.swap_by_octant(parent_octant)
-        self.signc = signa*signb
-        self.dimc = 3-dima-dimb
-    
-    def swap_by_octant(int octant): 
-        if octant==1: 
-            self.dima = 2
-            self.dimb = 0
-            self.signa = 1
-            self.signb = 1
-        if octant==2: 
-            self.dima = 1
-            self.dimb = 2
-            self.signa = 1
-            self.signb = 1
-        if octant==3:
-            self.dima = 1
-            self.dimb = 0
-            self.signa = 1
-            self.signb = 1
-        if octant==4: 
-            self.dima = 2
-            self.dimb = 1
-            self.signa = -1
-            self.signb = -1
-        if octant==5: 
-            self.dima = 2
-            self.dimb = 1
-            self.signa = 1
-            self.signb = -1
-        if octant==6: 
-            self.dima = 1
-            self.dimb = 0
-            self.signa = 1
-            self.signb = 1
-        if octant==7: 
-            self.dima = 1
-            self.dimb = 2
-            self.signa = 1
-            self.signb = -1
-        if octant==8: 
-            self.dima = 2
-            self.dimb = 0
-            self.signa = -1
-            self.signb = 1
-
-    def __iter__(self):
-        return self.next_hilbert()
-
-    def next(self):
-        #yield the next cell in this oct
-        
-        #as/descend the first dimension
-        # the second dim
-        #reverse the first
-        #climb the third
-        oct[self.dima] = 0 if self.signx>0 else 1
-        oct[self.dimb] = 0 if self.signy>0 else 1
-        oct[self.dimc] = 0 if self.signz>0 else 1
-        yield oct
-        oct[self.dima] += self.signa; yield oct
-        oct[self.dimb] += self.signb; yield oct
-        oct[self.dima] -= self.signa; yield oct
-        oct[self.dimc] += self.signc; yield oct
-        oct[self.dima] += self.signa; yield oct
-        oct[self.dimb] -= self.signb; yield oct
-        oct[self.dimb] -= self.signa; return oct
-
-    def next_hilbert(self):
-        noct = self.next()
-        return noct, hilbert_state(noct)
-
- at cython.boundscheck(False)
-def RecurseOctreeDepthFirstHilbert(int i_i, int j_i, int k_i,
-                            int i_f, int j_f, int k_f,
-                            position curpos, int gi, 
-                            hilbert_state hs,
-                            np.ndarray[np.float64_t, ndim=3] output,
-                            np.ndarray[np.int64_t, ndim=1] refined,
-                            OctreeGridList grids):
-    #cdef int s = curpos
-    cdef int i, i_off, j, j_off, k, k_off, ci, fi
-    cdef int child_i, child_j, child_k
-    cdef OctreeGrid child_grid
-    cdef OctreeGrid grid = grids[gi]
-    cdef np.ndarray[np.int32_t, ndim=3] child_indices = grid.child_indices
-    cdef np.ndarray[np.int32_t, ndim=1] dimensions = grid.dimensions
-    cdef np.ndarray[np.float64_t, ndim=4] fields = grid.fields
-    cdef np.ndarray[np.float64_t, ndim=1] leftedges = grid.left_edges
-    cdef np.float64_t dx = grid.dx[0]
-    cdef np.float64_t child_dx
-    cdef np.ndarray[np.float64_t, ndim=1] child_leftedges
-    cdef np.float64_t cx, cy, cz
-    cdef np.ndarray[np.int32_t, ndim=1] oct
-    cdef hilbert_state hs_child
-    for oct,hs_child in hs:
-        i,j,k = oct[0],oct[1],oct[2]
-        ci = grid.child_indices[i,j,k]
-        if ci == -1:
-            for fi in range(fields.shape[0]):
-                output[curpos.output_pos,fi] = fields[fi,i,j,k]
-            refined[curpos.refined_pos] = 0
-            curpos.output_pos += 1
-            curpos.refined_pos += 1
-        else:
-            refined[curpos.refined_pos] = 1
-            curpos.refined_pos += 1
-            child_grid = grids[ci-grid.offset]
-            child_dx = child_grid.dx[0]
-            child_leftedges = child_grid.left_edges
-            child_i = int((cx - child_leftedges[0])/child_dx)
-            child_j = int((cy - child_leftedges[1])/child_dx)
-            child_k = int((cz - child_leftedges[2])/child_dx)
-            RecurseOctreeDepthFirst(child_i, child_j, child_k, 2, 2, 2,
-                                curpos, ci - grid.offset, hs_child, output, refined, grids)
-
-
 cdef class OctreeGrid:
     cdef public object child_indices, fields, left_edges, dimensions, dx
     cdef public int level, offset



https://bitbucket.org/yt_analysis/yt/changeset/60d0eae312ac/
changeset:   60d0eae312ac
branch:      yt
user:        Christopher Moody
date:        2012-05-03 05:22:02
summary:     reverted a lot of the last commit
affected #:  5 files

diff -r 33e1017fc58ab5b4aca94d2e627453afc4dd4a3b -r 60d0eae312acd0342e594b8665b3ff61c1d12589 yt/analysis_modules/sunrise_export/sunrise_exporter.py
--- a/yt/analysis_modules/sunrise_export/sunrise_exporter.py
+++ b/yt/analysis_modules/sunrise_export/sunrise_exporter.py
@@ -273,12 +273,11 @@
     st_table.name = "GRIDSTRUCTURE"
     st_table.header.update("hierarch lengthunit", "kpc", comment="Length unit for grid")
     fdx = fre-fle
-    print 'WARNING: debug limits set on minxyz maxxyz'
     for i,a in enumerate('xyz'):
-        #st_table.header.update("min%s" % a, fle[i] * pf['kpc'])
-        #st_table.header.update("max%s" % a, fre[i] * pf['kpc'])
-        st_table.header.update("min%s" % a, 0) #WARNING: this is for debugging
-        st_table.header.update("max%s" % a, 2) #
+        st_table.header.update("min%s" % a, fle[i] * pf['kpc'])
+        st_table.header.update("max%s" % a, fre[i] * pf['kpc'])
+        #st_table.header.update("min%s" % a, 0) #WARNING: this is for debugging
+        #st_table.header.update("max%s" % a, 2) #
         st_table.header.update("n%s" % a, fdx[i])
         st_table.header.update("subdiv%s" % a, 2)
     st_table.header.update("subdivtp", "OCTREE", "Type of grid subdivision")


diff -r 33e1017fc58ab5b4aca94d2e627453afc4dd4a3b -r 60d0eae312acd0342e594b8665b3ff61c1d12589 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -138,7 +138,7 @@
     def __init__(self, pf, data_style='art'):
         self.data_style = data_style
         self.parameter_file = weakref.proxy(pf)
-        # for now, the hierarchy file is the parameter file!
+        #for now, the hierarchy file is the parameter file!
         self.hierarchy_filename = self.parameter_file.parameter_filename
         self.directory = os.path.dirname(self.hierarchy_filename)
         self.float_type = na.float64
@@ -356,6 +356,7 @@
         
 
         if self.pf.file_particle_data:
+            #import pdb; pdb.set_trace()
             lspecies = self.pf.parameters['lspecies']
             wspecies = self.pf.parameters['wspecies']
             Nrow     = self.pf.parameters['Nrow']
@@ -370,98 +371,142 @@
             self.pf.particle_position,self.pf.particle_velocity = \
                 read_particles(self.pf.file_particle_data,nstars,Nrow)
             pbar.update(1)
-            np = lspecies[-1]
-            if self.pf.dm_only:
-                np = lspecies[0]
-            self.pf.particle_position   = self.pf.particle_position[:np]
+            npa,npb=0,0
+            npb = lspecies[-1]
+            clspecies = na.concatenate(([0,],lspecies))
+            if self.pf.only_particle_type is not None:
+                npb = lspecies[0]
+                if type(self.pf.only_particle_type)==type(5):
+                    npa = clspecies[self.pf.only_particle_type]
+                    npb = clspecies[self.pf.only_particle_type+1]
+            np = npb-npa
+            self.pf.particle_position   = self.pf.particle_position[npa:npb]
             #self.pf.particle_position  -= 1.0 #fortran indices start with 0
             pbar.update(2)
             self.pf.particle_position  /= self.pf.domain_dimensions #to unitary units (comoving)
             pbar.update(3)
-            self.pf.particle_velocity   = self.pf.particle_velocity[:np]
+            self.pf.particle_velocity   = self.pf.particle_velocity[npa:npb]
             self.pf.particle_velocity  *= uv #to proper cm/s
             pbar.update(4)
-            self.pf.particle_type       = na.zeros(np,dtype='uint8')
-            self.pf.particle_mass       = na.zeros(np,dtype='float64')
+            self.pf.particle_type         = na.zeros(np,dtype='uint8')
+            self.pf.particle_mass         = na.zeros(np,dtype='float64')
+            self.pf.particle_mass_initial = na.zeros(np,dtype='float64')-1
+            self.pf.particle_creation_time= na.zeros(np,dtype='float64')-1
+            self.pf.particle_metallicity1 = na.zeros(np,dtype='float64')-1
+            self.pf.particle_metallicity2 = na.zeros(np,dtype='float64')-1
+            self.pf.particle_age          = na.zeros(np,dtype='float64')-1
             
             dist = self.pf['cm']/self.pf.domain_dimensions[0]
             self.pf.conversion_factors['particle_mass'] = 1.0 #solar mass in g
+            self.pf.conversion_factors['particle_mass_initial'] = 1.0 #solar mass in g
             self.pf.conversion_factors['particle_species'] = 1.0
             for ax in 'xyz':
                 self.pf.conversion_factors['particle_velocity_%s'%ax] = 1.0
                 #already in unitary units
                 self.pf.conversion_factors['particle_position_%s'%ax] = 1.0 
             self.pf.conversion_factors['particle_creation_time'] =  31556926.0
-            self.pf.conversion_factors['particle_metallicity_fraction']=1.0
+            self.pf.conversion_factors['particle_metallicity']=1.0
+            self.pf.conversion_factors['particle_metallicity1']=1.0
+            self.pf.conversion_factors['particle_metallicity2']=1.0
             self.pf.conversion_factors['particle_index']=1.0
+            self.pf.conversion_factors['particle_type']=1
+            self.pf.conversion_factors['particle_age']=1
+            #self.pf.conversion_factors['Msun'] = 5.027e-34 #conversion to solar mass units
             
-            #import pdb; pdb.set_trace()
 
             a,b=0,0
             for i,(b,m) in enumerate(zip(lspecies,wspecies)):
-                self.pf.particle_type[a:b] = i #particle type
-                self.pf.particle_mass[a:b]    = m*um #mass in solar masses
+                if type(self.pf.only_particle_type)==type(5):
+                    if not i==self.pf.only_particle_type:
+                        continue
+                    self.pf.particle_type += i
+                    self.pf.particle_mass += m*um
+
+                else:
+                    self.pf.particle_type[a:b] = i #particle type
+                    self.pf.particle_mass[a:b] = m*um #mass in solar masses
                 a=b
             pbar.finish()
+
+            nparticles = [0,]+list(lspecies)
+            for j,np in enumerate(nparticles):
+                mylog.debug('found %i of particle type %i'%(j,np))
+            
+            if self.pf.single_particle_mass:
+                #cast all particle masses to the same mass
+                cast_type = self.pf.single_particle_type
+                
+
             
             self.pf.particle_star_index = i
             
-            if self.pf.file_star_data and (not self.pf.dm_only):
+            do_stars = (self.pf.only_particle_type is None) or \
+                       (self.pf.only_particle_type == -1) or \
+                       (self.pf.only_particle_type == len(lspecies))
+            if self.pf.file_star_data and do_stars: 
                 nstars, mass, imass, tbirth, metallicity1, metallicity2 \
                      = read_stars(self.pf.file_star_data,nstars,Nrow)
                 nstars = nstars[0] 
                 if nstars > 0 :
                     n=min(1e2,len(tbirth))
                     pbar = get_pbar("Stellar Ages        ",n)
-                    self.pf.particle_star_ages  = \
+                    sages  = \
                         b2t(tbirth,n=n,logger=lambda x: pbar.update(x)).astype('float64')
-                    self.pf.particle_star_ages *= 1.0e9
-                    self.pf.particle_star_ages *= 365*24*3600 #to seconds
-                    self.pf.particle_star_ages = self.pf.current_time-self.pf.particle_star_ages
+                    sages *= 1.0e9
+                    sages *= 365*24*3600 #to seconds
+                    sages = self.pf.current_time-sages
+                    self.pf.particle_age[-nstars:] = sages
                     pbar.finish()
-                    self.pf.particle_star_metallicity1 = metallicity1
-                    self.pf.particle_star_metallicity2 = metallicity2
-                    self.pf.particle_star_mass_initial = imass*um
+                    self.pf.particle_metallicity1[-nstars:] = metallicity1
+                    self.pf.particle_metallicity2[-nstars:] = metallicity2
+                    self.pf.particle_mass_initial[-nstars:] = imass*um
                     self.pf.particle_mass[-nstars:] = mass*um
 
-            left = self.pf.particle_position.shape[0]
+            done = 0
             init = self.pf.particle_position.shape[0]
-            pbar = get_pbar("Gridding Particles ",init)
-            pos = self.pf.particle_position.copy()
-            pid = na.arange(pos.shape[0]).astype('int64')
+            pos = self.pf.particle_position
             #particle indices travel with the particle positions
             #pos = na.vstack((na.arange(pos.shape[0]),pos.T)).T 
-            max_level = min(self.pf.max_level,self.pf.limit_level)
             #if type(self.pf.grid_particles) == type(5):
             #    max_level = min(max_level,self.pf.grid_particles)
             grid_particle_count = na.zeros((len(grids),1),dtype='int64')
             
             #grid particles at the finest level, removing them once gridded
-            for level in range(max_level,self.pf.min_level-1,-1):
-                lidx = self.grid_levels[:,0] == level
-                for gi,gidx in enumerate(na.where(lidx)[0]): 
-                    g = grids[gidx]
-                    assert g is not None
-                    le,re = self.grid_left_edge[gidx],self.grid_right_edge[gidx]
-                    idx = na.logical_and(na.all(le < pos,axis=1),
-                                         na.all(re > pos,axis=1))
-                    fidx = pid[idx]
-                    np = na.sum(idx)                     
-                    g.NumberOfParticles = np
-                    grid_particle_count[gidx,0]=np
-                    g.hierarchy.grid_particle_count = grid_particle_count
-                    if np==0: 
-                        g.particle_indices = []
-                        #we have no particles in this grid
-                    else:
-                        g.particle_indices = fidx.astype('int64')
-                        pos = pos[~idx] #throw out gridded particles from future gridding
-                        pid = pid[~idx]
-                    grids[gidx] = g
-                    left -= np
-                    pbar.update(init-left)
+            #pbar = get_pbar("Gridding Particles ",init)
+            #assignment = amr_utils.assign_particles_to_cells(
+            #        self.grid_levels.ravel().astype('int32'),
+            #        self.grid_left_edge.astype('float32'),
+            #        self.grid_right_edge.astype('float32'),
+            #        pos[:,0].astype('float32'),
+            #        pos[:,1].astype('float32'),
+            #        pos[:,2].astype('float32'))
+            #pbar.finish()
+
+            pbar = get_pbar("Gridding Particles ",init)
+            assignment,ilists = amr_utils.assign_particles_to_cell_lists(
+                    self.grid_levels.ravel().astype('int32'),
+                    2, #only bother gridding particles to level 2
+                    self.grid_left_edge.astype('float32'),
+                    self.grid_right_edge.astype('float32'),
+                    pos[:,0].astype('float32'),
+                    pos[:,1].astype('float32'),
+                    pos[:,2].astype('float32'))
             pbar.finish()
             
+            
+            pbar = get_pbar("Filling grids ",init)
+            for gidx,(g,ilist) in enumerate(zip(grids,ilists)):
+                np = len(ilist)
+                grid_particle_count[gidx,0]=np
+                g.hierarchy.grid_particle_count = grid_particle_count
+                g.particle_indices = ilist
+                grids[gidx] = g
+                done += np
+                pbar.update(done)
+            pbar.finish()
+
+            #assert init-done== 0 #we have gridded every particle
+            
         pbar = get_pbar("Finalizing grids ",len(grids))
         for gi, g in enumerate(grids): 
             self.grids[gi] = g
@@ -559,8 +604,10 @@
                  discover_particles=False,
                  use_particles=True,
                  limit_level=None,
-                 dm_only=False,
-                 grid_particles=False):
+                 only_particle_type = None,
+                 grid_particles=False,
+                 single_particle_mass=False,
+                 single_particle_type=0):
         import yt.frontends.ramses._ramses_reader as _ramses_reader
         
         
@@ -571,8 +618,9 @@
         self.file_particle_header = file_particle_header
         self.file_particle_data = file_particle_data
         self.file_star_data = file_star_data
-        self.dm_only = dm_only
+        self.only_particle_type = only_particle_type
         self.grid_particles = grid_particles
+        self.single_particle_mass = single_particle_mass
         
         if limit_level is None:
             self.limit_level = na.inf


diff -r 33e1017fc58ab5b4aca94d2e627453afc4dd4a3b -r 60d0eae312acd0342e594b8665b3ff61c1d12589 yt/frontends/art/definitions.py
--- a/yt/frontends/art/definitions.py
+++ b/yt/frontends/art/definitions.py
@@ -26,10 +26,14 @@
 """
 
 art_particle_field_names = [
+'particle_age',
 'particle_index',
 'particle_mass',
+'particle_mass_initial',
 'particle_creation_time',
-'particle_metallicity_fraction',
+'particle_metallicity1',
+'particle_metallicity2',
+'particle_metallicity',
 'particle_position_x',
 'particle_position_y',
 'particle_position_z',


diff -r 33e1017fc58ab5b4aca94d2e627453afc4dd4a3b -r 60d0eae312acd0342e594b8665b3ff61c1d12589 yt/frontends/art/fields.py
--- a/yt/frontends/art/fields.py
+++ b/yt/frontends/art/fields.py
@@ -58,10 +58,11 @@
         add_field(f, function=lambda a,b: None, take_log=True,
                   validators = [ValidateDataField(f)])
 
-#Fields that are verified to be OK unit-wise:
+#Hydro Fields that are verified to be OK unit-wise:
 #Density
+#Temperature
 
-#Fields that need to be tested:
+#Hydro Fields that need to be tested:
 #TotalEnergy
 #XYZMomentum
 #Pressure
@@ -70,14 +71,23 @@
 #MetalDensity SNII + SNia
 #Potentials
 
-#Derived fields that are OK
-#Temperature
-
-#Derived fields that are untested:
+#Hydro Derived fields that are untested:
 #metallicities
 #xyzvelocity
 
-#Individual definitions for native fields
+#Particle fields that are tested:
+#particle_position_xyz
+#particle_type
+#particle_index
+#particle_mass
+#particle_mass_initial
+#particle_age
+#particle_velocity
+#particle_metallicity12
+
+#Particle fields that are untested:
+#NONE
+
 
 def _convertDensity(data):
     return data.convert("Density")


diff -r 33e1017fc58ab5b4aca94d2e627453afc4dd4a3b -r 60d0eae312acd0342e594b8665b3ff61c1d12589 yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -122,9 +122,9 @@
 
     def _read_particle_field(self, grid, field):
         #This will be cleaned up later
-        idx = grid.particle_indices
+        idx = na.array(grid.particle_indices)
         if field == 'particle_index':
-            return idx
+            return na.array(idx)
         if field == 'particle_type':
             return grid.pf.particle_type[idx]
         if field == 'particle_position_x':
@@ -142,35 +142,19 @@
         if field == 'particle_velocity_z':
             return grid.pf.particle_velocity[idx][:,2]
         
-        tridx = grid.particle_indices >= grid.pf.particle_star_index
-        sidx  = grid.particle_indices[tridx] - grid.pf.particle_star_index
-        n = grid.particle_indices
-        if field == 'particle_creation_time':
-            tr = na.zeros(grid.NumberOfParticles, dtype='float64')-1.0
-            if sidx.shape[0]>0:
-                tr[tridx] = grid.pf.particle_star_ages[sidx]
-            return tr
-        if field == 'particle_metallicity_fraction':
-            tr = na.zeros(grid.NumberOfParticles, dtype='float64')-1.0
-            if sidx.shape[0]>0:
-                tr[tridx]  = grid.pf.particle_star_metallicity1[sidx]
-                tr[tridx] += grid.pf.particle_star_metallicity2[sidx]
-            return tr
+        #stellar fields
+        if field == 'particle_age':
+            return grid.pf.particle_age[idx]
+        if field == 'particle_metallicity':
+            return grid.pf.particle_metallicity1[idx] +\
+                   grid.pf.particle_metallicity2[idx]
         if field == 'particle_metallicity1':
-            tr = na.zeros(grid.NumberOfParticles, dtype='float64')-1.0
-            if sidx.shape[0]>0:
-                tr[tridx] = grid.pf.particle_star_metallicity1[sidx]
-            return tr
+            return grid.pf.particle_metallicity1[idx]
         if field == 'particle_metallicity2':
-            tr = na.zeros(grid.NumberOfParticles, dtype='float64')-1.0
-            if sidx.shape[0]>0:
-                tr[tridx] = grid.pf.particle_star_metallicity2[sidx]
-            return tr
+            return grid.pf.particle_metallicity2[idx]
         if field == 'particle_mass_initial':
-            tr = na.zeros(grid.NumberOfParticles, dtype='float64')-1.0
-            if sidx.shape[0]>0:
-                tr[tridx] = grid.pf.particle_star_mass_initial[sidx]
-            return tr
+            return grid.pf.particle_mass_initial[idx]
+        
         raise 'Should have matched one of the particle fields...'
 
         



https://bitbucket.org/yt_analysis/yt/changeset/096be56abb5c/
changeset:   096be56abb5c
branch:      yt
user:        Christopher Moody
date:        2012-05-03 06:58:28
summary:     added back Msun to data_structures
affected #:  1 file

diff -r 60d0eae312acd0342e594b8665b3ff61c1d12589 -r 096be56abb5c16a383adcc3d6193f36b1233b9e5 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -411,7 +411,7 @@
             self.pf.conversion_factors['particle_index']=1.0
             self.pf.conversion_factors['particle_type']=1
             self.pf.conversion_factors['particle_age']=1
-            #self.pf.conversion_factors['Msun'] = 5.027e-34 #conversion to solar mass units
+            self.pf.conversion_factors['Msun'] = 5.027e-34 #conversion to solar mass units
             
 
             a,b=0,0

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list