[yt-svn] commit/yt: 3 new changesets

Bitbucket commits-noreply at bitbucket.org
Thu May 24 14:54:26 PDT 2012


3 new commits in yt:


https://bitbucket.org/yt_analysis/yt/changeset/4f9a48935bff/
changeset:   4f9a48935bff
branch:      yt
user:        samskillman
date:        2012-05-07 22:28:06
summary:     Adding a fallback for datasets with overlapping grids. This may be fragile, so it needs to be tested on more datasets.
affected #:  2 files

diff -r 39cfea952be1f77e33e05b99c781aa3ecc7edc7c -r 4f9a48935bffc2e34d3a315781b55ac5f36cd663 yt/utilities/_amr_utils/misc_utilities.pyx
--- a/yt/utilities/_amr_utils/misc_utilities.pyx
+++ b/yt/utilities/_amr_utils/misc_utilities.pyx
@@ -287,6 +287,7 @@
         uniquedims[i] = <np.float64_t *> \
                 alloca(2*n_grids * sizeof(np.float64_t))
     my_max = 0
+    best_dim = -1
     for dim in range(3):
         n_unique = 0
         uniques = uniquedims[dim]


diff -r 39cfea952be1f77e33e05b99c781aa3ecc7edc7c -r 4f9a48935bffc2e34d3a315781b55ac5f36cd663 yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -1012,7 +1012,7 @@
                     # This node belongs to someone else, move along
                     current_node, previous_node = self.step_depth(current_node, previous_node)
                     continue
-                
+
             # If we are down to one grid, we are either in it or the parent grid
             if len(current_node.grids) == 1:
                 thisgrid = current_node.grids[0]
@@ -1031,25 +1031,27 @@
                         if len(children) > 0:
                             current_node.grids = self.pf.hierarchy.grids[na.array(children,copy=False)]
                             current_node.parent_grid = thisgrid
-                            # print 'My single grid covers the rest of the volume, and I have children, about to iterate on them'
+                            #print 'My single grid covers the rest of the volume, and I have children, about to iterate on them'
                             del children
                             continue
 
                     # Else make a leaf node (brick container)
+                    #print 'My single grid covers the rest of the volume, and I have no children', thisgrid
                     set_leaf(current_node, thisgrid, current_node.l_corner, current_node.r_corner)
-                    # print 'My single grid covers the rest of the volume, and I have no children'
                     current_node, previous_node = self.step_depth(current_node, previous_node)
                     continue
 
             # If we don't have any grids, this volume belongs to the parent        
             if len(current_node.grids) == 0:
+                #print 'This volume does not have a child grid, so it belongs to my parent!'
                 set_leaf(current_node, current_node.parent_grid, current_node.l_corner, current_node.r_corner)
-                # print 'This volume does not have a child grid, so it belongs to my parent!'
                 current_node, previous_node = self.step_depth(current_node, previous_node)
                 continue
 
             # If we've made it this far, time to build a dividing node
-            self._build_dividing_node(current_node)
+            # print 'Building dividing node'
+            # Continue if building failed
+            if self._build_dividing_node(current_node): continue
 
             # Step to the nest node in a depth-first traversal.
             current_node, previous_node = self.step_depth(current_node, previous_node)
@@ -1058,10 +1060,10 @@
         '''
         Given a node, finds all the choices for the next dividing plane.  
         '''
-        data = na.array([(child.LeftEdge, child.RightEdge) for child in current_node.grids],copy=False)
         # For some reason doing dim 0 separately is slightly faster.
         # This could be rewritten to all be in the loop below.
 
+        data = na.array([(child.LeftEdge, child.RightEdge) for child in current_node.grids],copy=False)
         best_dim, split, less_ids, greater_ids = \
             kdtree_get_choices(data, current_node.l_corner, current_node.r_corner)
         return data[:,:,best_dim], best_dim, split, less_ids, greater_ids
@@ -1071,8 +1073,19 @@
         Makes the current node a dividing node, and initializes the
         left and right children.
         '''
-        
-        data,best_dim,split,less_ids,greater_ids = self._get_choices(current_node)
+
+        data = na.array([(child.LeftEdge, child.RightEdge) for child in current_node.grids],copy=False)
+        best_dim, split, less_ids, greater_ids = \
+            kdtree_get_choices(data, current_node.l_corner, current_node.r_corner)
+
+        del data
+
+        # Here we break out if no unique grids were found. In this case, there
+        # are likely overlapping grids, and we assume that the first grid takes
+        # precedence.  This is fragile.
+        if best_dim == -1:
+            current_node.grids = [current_node.grids[0]]
+            return 1
 
         current_node.split_ax = best_dim
         current_node.split_pos = split
@@ -1080,7 +1093,7 @@
         #greater_ids0 = (split < data[:,1])
         #assert(na.all(less_ids0 == less_ids))
         #assert(na.all(greater_ids0 == greater_ids))
-        
+
         current_node.left_child = MasterNode(my_id=_lchild_id(current_node.id),
                                              parent=current_node,
                                              parent_grid=current_node.parent_grid,
@@ -1099,7 +1112,9 @@
         # build to work.  The other deletions are just to save memory.
         del current_node.grids, current_node.parent_grid, current_node.brick,\
             current_node.li, current_node.ri, current_node.dims
-        
+
+        return 0
+
     def traverse(self, back_center, front_center, image):
         r"""Traverses the kd-Tree, casting the partitioned grids from back to
             front.



https://bitbucket.org/yt_analysis/yt/changeset/a9d337474d85/
changeset:   a9d337474d85
branch:      yt
user:        samskillman
date:        2012-05-07 22:30:27
summary:     Merging
affected #:  36 files

diff -r 4f9a48935bffc2e34d3a315781b55ac5f36cd663 -r a9d337474d854e121b6c92215cde35a6fddd316a yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -438,6 +438,91 @@
         (4. / 3. * math.pi * rho_crit * \
         (self.radial_bins * cm) ** 3.0)
 
+class RockstarHalo(Halo):
+    def __init__(self,halo_list,index,ID, DescID, Mvir, Vmax, Vrms, Rvir, Rs, Np, 
+                  X, Y, Z, VX, VY, VZ, JX, JY, JZ, Spin):
+        """Implement the properties reported by Rockstar: ID, Descendant ID,
+           Mvir, Vmax, Vrms, Rvir, Rs, Np, XYZ, VXYZ, JXYZ, and spin.
+           Most defaults are removed since we don't read in which halos
+           particles belong to. 
+        """
+        #we can still use get_sphere!
+        self.ID = ID #from rockstar
+        self.id = index #index in the halo list
+        self.pf = halo_list.pf
+
+        self.DescID = DescID
+        self.Mvir = Mvir
+        self.Vmax = Vmax
+        self.Vrms = Vrms
+        self.Rvir = Rvir
+        self.Rs   = Rs
+        self.Np   = Np
+        self.X    = X
+        self.Y    = Y
+        self.Z    = Z
+        self.VX   = VX
+        self.VY   = VY
+        self.VZ   = VZ
+        self.JX   = JX
+        self.JY   = JY
+        self.JZ   = JZ
+        self.Spin = Spin
+
+        #Halo.__init__(self,halo_list,index,
+        self.size=Np 
+        self.CoM=na.array([X,Y,Z])
+        self.max_dens_point=-1
+        self.group_total_mass=-1
+        self.max_radius=Rvir
+        self.bulk_vel=na.array([VX,VY,VZ])*1e5
+        self.rms_vel=-1
+        self.group_total_mass = -1 #not implemented 
+    
+    def maximum_density(self):
+        r"""Not implemented."""
+        return -1
+
+    def maximum_density_location(self):
+        r"""Not implemented."""
+        return self.center_of_mass()
+
+    def total_mass(self):
+        r"""Not implemented."""
+        return -1
+
+    def get_size(self):
+        r"""Return the number of particles belonging to the halo."""
+        return self.Np
+
+    def write_particle_list(self,handle):
+        r"""Not implemented."""
+        return -1
+
+    def virial_mass(self):
+        r"""Virial mass in Msun/h"""
+        return self.Mvir
+
+    def virial_radius(self):
+        r"""Virial radius in Mpc/h comoving"""
+        return self.Rvir
+
+    def virial_bin(self):
+        r"""Not implemented"""
+        return -1
+
+    def virial_density(self):
+        r"""Not implemented """
+        return -1
+
+    def virial_info(self):
+        r"""Not implemented"""
+        return -1 
+
+    def __getitem__(self,key):
+        r"""Not implemented"""
+        return None
+
 
 class HOPHalo(Halo):
     _name = "HOPHalo"
@@ -903,6 +988,97 @@
             f.flush()
         f.close()
 
+class RockstarHaloList(HaloList):
+    #because we don't yet no halo-particle affiliations
+    #most of the halo list methods are not implemented
+    #furthermore, Rockstar only accepts DM particles of
+    #a fixed mass, so we don't allow stars at all
+    #Still, we inherit from HaloList because in the future
+    #we might implement halo-particle affiliations
+    def __init__(self,pf,out_list):
+        mylog.info("Initializing Rockstar List")
+        self._data_source = None
+        self._groups = []
+        self._max_dens = -1
+        self.pf = pf
+        self.out_list = out_list
+        mylog.info("Parsing Rockstar halo list")
+        self._parse_output(out_list)
+        mylog.info("Finished %s"%out_list)
+
+    def _run_finder(self):
+        pass
+
+    def __obtain_particles(self):
+        pass
+
+    def _get_dm_indices(self):
+        pass
+
+    def _parse_output(self,out_list=None):
+        """
+        Read the out_*.list text file produced
+        by Rockstar into memory."""
+        
+        pf = self.pf
+
+        if out_list is None:
+            out_list = self.out_list
+
+        lines = open(out_list).readlines()
+        names = []
+        formats = []
+        
+        #find the variables names from the first defining line
+        names = lines[0].replace('#','').split(' ')
+        for j,line in enumerate(lines):
+            if not line.startswith('#'): break
+
+        #find out the table datatypes but evaluating the first data line
+        splits = filter(lambda x: len(x.strip()) > 0 ,line.split(' '))
+        for num in splits:
+            if 'nan' not in num:
+                formats += na.array(eval(num)).dtype,
+            else:
+                formats += na.dtype('float'),
+        assert len(formats) == len(names)
+
+        #Jc = 1.98892e33/pf['mpchcm']*1e5
+        Jc = 1.0
+        conv = dict(X=1.0/pf['mpchcm'],
+                    Y=1.0/pf['mpchcm'],
+                    Z=1.0/pf['mpchcm'], #to unitary
+                    VX=1e0,VY=1e0,VZ=1e0, #to km/s
+                    Mvir=1.0, #Msun/h
+                    Vmax=1e0,Vrms=1e0,
+                    Rvir=1.0/pf['kpchcm'],
+                    Rs=1.0/pf['kpchcm'],
+                    JX=Jc,JY=Jc,JZ=Jc)
+        dtype = {'names':names,'formats':formats}
+        halo_table = na.loadtxt(out_list,skiprows=j-1,dtype=dtype,comments='#')            
+        #convert position units  
+        for name in names:
+            halo_table[name]=halo_table[name]*conv.get(name,1)
+        
+        for k,row in enumerate(halo_table):
+            args = tuple([val for val in row])
+            halo = RockstarHalo(self,k,*args)
+            self._groups.append(halo)
+    
+
+    #len is ok
+    #iter is OK
+    #getitem is ok
+    #nn is ok I think
+    #nn2d is ok I think
+
+    def write_out(self):
+        pass
+    def write_particle_list(self):
+        pass
+    
+
+    
 
 class HOPHaloList(HaloList):
 
@@ -1245,7 +1421,7 @@
         while index < self.group_count:
             self._groups[index] = self._halo_class(self, index, \
                 size=self.group_sizes[index], CoM=self.CoM[index], \
-                max_dens_point=self.max_dens_point[i], \
+                max_dens_point=self.max_dens_point[index], \
                 group_total_mass=self.Tot_M[index],
                 max_radius=self.max_radius[index],
                 bulk_vel=self.bulk_vel[index], tasks=self.halo_taskmap[index],


diff -r 4f9a48935bffc2e34d3a315781b55ac5f36cd663 -r a9d337474d854e121b6c92215cde35a6fddd316a yt/analysis_modules/halo_finding/rockstar/rockstar.py
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar.py
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar.py
@@ -25,9 +25,11 @@
 
 from yt.mods import *
 from os import environ
+from os import mkdir
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     ParallelAnalysisInterface, ProcessorPool, Communicator
 
+from yt.analysis_modules.halo_finding.halo_objects import * #Halos & HaloLists
 import rockstar_interface
 import socket
 import time
@@ -45,14 +47,28 @@
         return data_source
 
 class RockstarHaloFinder(ParallelAnalysisInterface):
-    def __init__(self, pf, num_readers = 0, num_writers = 0):
+    def __init__(self, pf, num_readers = 1, num_writers = None, 
+            outbase=None,particle_mass=-1.0,overwrite=False,
+            left_edge = None, right_edge = None):
         ParallelAnalysisInterface.__init__(self)
         # No subvolume support
         self.pf = pf
         self.hierarchy = pf.h
+        if num_writers is None:
+            num_writers = self.comm.size - num_readers -1
         self.num_readers = num_readers
         self.num_writers = num_writers
+        self.particle_mass = particle_mass 
+        self.overwrite = overwrite
+        if left_edge is None:
+            left_edge = pf.domain_left_edge
+        if right_edge is None:
+            right_edge = pf.domain_right_edge
+        self.le = left_edge
+        self.re = right_edge
         if self.num_readers + self.num_writers + 1 != self.comm.size:
+            print '%i reader + %i writers != %i mpi'%\
+                    (self.num_reader,self.num_writers,self.comm.size)
             raise RuntimeError
         self.center = (pf.domain_right_edge + pf.domain_left_edge)/2.0
         data_source = None
@@ -64,6 +80,9 @@
             for wg in self.pool.workgroups:
                 if self.comm.rank in wg.ranks: self.workgroup = wg
         data_source = self.pf.h.all_data()
+        if outbase is None:
+            outbase = str(self.pf)+'_rockstar'
+        self.outbase = outbase        
         self.handler = rockstar_interface.RockstarInterface(
                 self.pf, data_source)
 
@@ -80,16 +99,29 @@
             (server_address, port))
         self.port = str(self.port)
 
-    def run(self, block_ratio = 1):
+    def run(self, block_ratio = 1,**kwargs):
+        """
+        
+        """
         if block_ratio != 1:
             raise NotImplementedError
         self._get_hosts()
+        #because rockstar *always* write to exactly the same
+        #out_0.list filename we make a directory for it
+        #to sit inside so it doesn't get accidentally
+        #overwritten 
+        if self.workgroup.name == "server":
+            if not os.path.exists(self.outbase):
+                os.mkdir(self.outbase)
         self.handler.setup_rockstar(self.server_address, self.port,
                     parallel = self.comm.size > 1,
                     num_readers = self.num_readers,
                     num_writers = self.num_writers,
                     writing_port = -1,
-                    block_ratio = block_ratio)
+                    block_ratio = block_ratio,
+                    outbase = self.outbase,
+                    particle_mass = float(self.particle_mass),
+                    **kwargs)
         if self.comm.size == 1:
             self.handler.call_rockstar()
         else:
@@ -97,9 +129,17 @@
             if self.workgroup.name == "server":
                 self.handler.start_server()
             elif self.workgroup.name == "readers":
-                #time.sleep(0.5 + self.workgroup.comm.rank/10.0)
+                time.sleep(0.1 + self.workgroup.comm.rank/10.0)
                 self.handler.start_client()
             elif self.workgroup.name == "writers":
-                #time.sleep(1.0 + self.workgroup.comm.rank/10.0)
+                time.sleep(0.2 + self.workgroup.comm.rank/10.0)
                 self.handler.start_client()
         self.comm.barrier()
+        #quickly rename the out_0.list 
+    
+    def halo_list(self,file_name='out_0.list'):
+        """
+        Reads in the out_0.list file and generates RockstarHaloList
+        and RockstarHalo objects.
+        """
+        return RockstarHaloList(self.pf,self.outbase+'/%s'%file_name)


diff -r 4f9a48935bffc2e34d3a315781b55ac5f36cd663 -r a9d337474d854e121b6c92215cde35a6fddd316a yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
@@ -241,7 +241,7 @@
 cdef RockstarInterface rh
 cdef void rh_read_particles(char *filename, particle **p, np.int64_t *num_p):
     cdef int i, fi, npart, tnpart
-    cdef np.float64_t conv[6], left_edge[6]
+    cdef np.float64_t conv[6], left_edge[6], right_edge[3]
     dd = rh.data_source
     cdef np.ndarray[np.int64_t, ndim=1] arri
     cdef np.ndarray[np.float64_t, ndim=1] arr
@@ -257,9 +257,12 @@
     #print "Loading indices: size = ", tnpart
     conv[0] = conv[1] = conv[2] = rh.pf["mpchcm"]
     conv[3] = conv[4] = conv[5] = 1e-5
-    left_edge[0] = rh.pf.domain_left_edge[0]
-    left_edge[1] = rh.pf.domain_left_edge[1]
-    left_edge[2] = rh.pf.domain_left_edge[2]
+    left_edge[0] = rh.le[0]
+    left_edge[1] = rh.le[1]
+    left_edge[2] = rh.le[2]
+    right_edge[0] = rh.re[0]
+    right_edge[1] = rh.re[1]
+    right_edge[2] = rh.re[2]
     left_edge[3] = left_edge[4] = left_edge[5] = 0.0
     pi = 0
     for g in grids:
@@ -274,11 +277,15 @@
                       "particle_velocity_z"]:
             arr = dd._get_data_from_grid(g, field).astype("float64")
             for i in range(npart):
+                if fi<3: 
+                    if  left_edge[i] > arr[i]: continue
+                    if right_edge[i] < arr[i]: continue
                 p[0][i+pi].pos[fi] = (arr[i]-left_edge[fi])*conv[fi]
             fi += 1
         pi += npart
     num_p[0] = tnpart
-    print "TOTAL", block, pi, tnpart, len(grids)
+    print "Block #%i | Particles %i | Grids %i"%\
+            ( block, pi, len(grids))
 
 cdef class RockstarInterface:
 
@@ -296,12 +303,14 @@
                        np.float64_t particle_mass = -1.0,
                        int parallel = False, int num_readers = 1,
                        int num_writers = 1,
-                       int writing_port = -1, int block_ratio = 1):
+                       int writing_port = -1, int block_ratio = 1,
+                       int periodic = 1, int min_halo_size = 20,
+                       char *outbase = 'None'):
         global PARALLEL_IO, PARALLEL_IO_SERVER_ADDRESS, PARALLEL_IO_SERVER_PORT
         global FILENAME, FILE_FORMAT, NUM_SNAPS, STARTING_SNAP, h0, Ol, Om
         global BOX_SIZE, PERIODIC, PARTICLE_MASS, NUM_BLOCKS, NUM_READERS
         global FORK_READERS_FROM_WRITERS, PARALLEL_IO_WRITER_PORT, NUM_WRITERS
-        global rh
+        global rh, SCALE_NOW, OUTBASE, MIN_HALO_OUTPUT_SIZE
         if parallel:
             PARALLEL_IO = 1
             PARALLEL_IO_SERVER_ADDRESS = server_address
@@ -324,12 +333,18 @@
         h0 = self.pf.hubble_constant
         Ol = self.pf.omega_lambda
         Om = self.pf.omega_matter
+        SCALE_NOW = 1.0/(self.pf.current_redshift+1.0)
+        if not outbase =='None'.decode('UTF-8'):
+            #output directory. since we can't change the output filenames
+            #workaround is to make a new directory
+            print 'using %s as outbase'%outbase
+            OUTBASE = outbase 
 
         if particle_mass < 0:
             print "Assuming single-mass particle."
             particle_mass = self.pf.h.grids[0]["ParticleMassMsun"][0] / h0
         PARTICLE_MASS = particle_mass
-        PERIODIC = 1
+        PERIODIC = periodic
         BOX_SIZE = (self.pf.domain_right_edge[0] -
                     self.pf.domain_left_edge[0]) * self.pf['mpchcm']
         setup_config()


diff -r 4f9a48935bffc2e34d3a315781b55ac5f36cd663 -r a9d337474d854e121b6c92215cde35a6fddd316a yt/analysis_modules/star_analysis/sfr_spectrum.py
--- a/yt/analysis_modules/star_analysis/sfr_spectrum.py
+++ b/yt/analysis_modules/star_analysis/sfr_spectrum.py
@@ -246,7 +246,7 @@
 """
 
 class SpectrumBuilder(object):
-    def __init__(self, pf, bcdir="", model="chabrier"):
+    def __init__(self, pf, bcdir="", model="chabrier", time_now=None):
         r"""Initialize the data to build a summed flux spectrum for a
         collection of stars using the models of Bruzual & Charlot (2003).
         This function loads the necessary data tables into memory and
@@ -280,8 +280,12 @@
              OmegaLambdaNow = self._pf.omega_lambda,
              InitialRedshift = self._pf['CosmologyInitialRedshift'])
         # Find the time right now.
-        self.time_now = self.cosm.ComputeTimeFromRedshift(
-            self._pf.current_redshift) # seconds
+        
+        if time_now is None:
+            self.time_now = self.cosm.ComputeTimeFromRedshift(
+                self._pf.current_redshift) # seconds
+        else:
+            self.time_now = time_now
         
         # Read the tables.
         self.read_bclib()
@@ -404,7 +408,8 @@
         self.star_metal = self.star_metal[sort]
         
         # Interpolate the flux for each star, adding to the total by weight.
-        for star in itertools.izip(Mname, Aindex, ratio1, ratio2, self.star_mass):
+        pbar = get_pbar("Calculating fluxes",len(self.star_mass))
+        for i,star in enumerate(itertools.izip(Mname, Aindex, ratio1, ratio2, self.star_mass)):
             # Pick the right age bin for the right flux array.
             flux = self.flux[star[0]][star[1],:]
             # Get the one just before the one above.
@@ -413,6 +418,9 @@
             int_flux = star[3] * na.log10(flux_1) + star[2] * na.log10(flux)
             # Add this flux to the total, weighted by mass.
             self.final_spec += na.power(10., int_flux) * star[4]
+            pbar.update(i)
+        pbar.finish()    
+        
         # Normalize.
         self.total_mass = na.sum(self.star_mass)
         self.avg_mass = na.mean(self.star_mass)


diff -r 4f9a48935bffc2e34d3a315781b55ac5f36cd663 -r a9d337474d854e121b6c92215cde35a6fddd316a yt/analysis_modules/sunrise_export/sunrise_exporter.py
--- a/yt/analysis_modules/sunrise_export/sunrise_exporter.py
+++ b/yt/analysis_modules/sunrise_export/sunrise_exporter.py
@@ -1,6 +1,8 @@
 """
 Code to export from yt to Sunrise
 
+Author: Chris Moody <juxtaposicion at gmail.com>
+Affiliation: UCSC
 Author: Matthew Turk <matthewturk at gmail.com>
 Affiliation: UCSD
 Homepage: http://yt-project.org/
@@ -26,8 +28,7 @@
 
 try:
     import pyfits
-except ImportError:
-    # We silently fail here
+except ImportError: 
     pass
 
 import time
@@ -36,9 +37,11 @@
 from yt.funcs import *
 import yt.utilities.amr_utils as amr_utils
 from yt.data_objects.universal_fields import add_field
+from yt.mods import *
 
-def export_to_sunrise(pf, fn, write_particles = True, subregion_bounds = None,
-    particle_mass=None, particle_pos=None, particle_age=None, particle_metal=None):
+debug = True
+
+def export_to_sunrise(pf, fn, star_particle_type, dle, dre,**kwargs):
     r"""Convert the contents of a dataset to a FITS file format that Sunrise
     understands.
 
@@ -54,18 +57,13 @@
     pf : `StaticOutput`
         The parameter file to convert.
     fn : string
-        The filename of the FITS file.
-    write_particles : bool or pyfits.ColDefs instance, default is True
-        Whether to write out the star particles or not.  If this variable is an
-        instance of pyfits.ColDefs, then this will be used to create a pyfits
-        table named PARTICLEDATA which will be appended.  If this is true, the
-        routine will attempt to create this table from hand.
-    subregion_bounds : list of tuples
-        This is a list of tuples describing the subregion of the top grid to
-        export.  This will only work when only *one* root grid exists.
-        It is of the format:
-        [ (start_index_x, nx), (start_index_y, ny), (start_index_z, nz) ]
-        where nx, ny, nz are the number of cells to extract.
+        The filename of the output FITS file.
+    dle : The domain left edge to extract
+    dre : The domain rght edge to extract
+        Array format is (nx,ny,nz) where each element is floating point
+        in unitary position units where 0 is leftmost edge and 1
+        the rightmost. 
+        
 
     Notes
     -----
@@ -74,144 +72,250 @@
     http://sunrise.googlecode.com/ for more information.
 
     """
-    # Now particles
-    #  output_file->addTable("PARTICLEDATA" , 0);
-    # addKey("timeunit", time_unit, "Time unit is "+time_unit);
-    # addKey("tempunit", temp_unit, "Temperature unit is "+temp_unit);
-    # 
-    # addColumn(Tint, "ID", 1, "" );
-    # addColumn(Tdouble, "position", 3, length_unit );
-    # addColumn(Tdouble, "stellar_radius", 1, length_unit );
-    # addColumn(Tdouble, "L_bol", 1, L_bol_unit );
-    # addColumn(Tdouble, "mass_stars", 1, mass_unit );
-    # addColumn(Tdouble, "mass_stellar_metals", 1, mass_unit );
-    # addColumn(Tdouble, "age_m", 1, time_unit+"*"+mass_unit );
-    # addColumn(Tdouble, "age_l", 1, time_unit+"*"+mass_unit );
-    # addColumn(Tfloat, "L_lambda", L_lambda.columns(), 
-    #			L_lambda_unit );
-    #	output->addKey("logflux", true, "Column L_lambda values are log (L_lambda)");
+    
+    #we must round the dle,dre to the nearest root grid cells
+    ile,ire,super_level= round_nearest_edge(pf,dle,dre)
+    super_level -= 1 #we're off by one (so we don't need a correction if we span 2 cells)
+    fle,fre = ile*1.0/pf.domain_dimensions, ire*1.0/pf.domain_dimensions
+    mylog.info("rounding specified region:")
+    mylog.info("from [%1.5f %1.5f %1.5f]-[%1.5f %1.5f %1.5f]"%(tuple(dle)+tuple(dre)))
+    mylog.info("to   [%07i %07i %07i]-[%07i %07i %07i]"%(tuple(ile)+tuple(ire)))
+    mylog.info("to   [%1.5f %1.5f %1.5f]-[%1.5f %1.5f %1.5f]"%(tuple(fle)+tuple(fre)))
 
-    col_list = []
-    if subregion_bounds == None:    
-        DLE, DRE = pf.domain_left_edge, pf.domain_right_edge
-        DX = pf.domain_dimensions
-    else:
-        DLE, DX = zip(*subregion_bounds)
-        DLE, DX = na.array(DLE), na.array(DX)
-        DRE = DLE + DX
-    reg = pf.h.region((DRE+DLE)/2.0, DLE, DRE)
 
-    if write_particles is True:
-        pi = reg["particle_type"] == 2
-        pos = na.array([reg["particle_position_%s" % ax][pi]*pf['kpc']
-                            for ax in 'xyz']).transpose()
-        vel = na.array([reg["particle_velocity_%s" % ax][pi]
-                            for ax in 'xyz']).transpose()
-        # Velocity is cm/s, we want it to be kpc/yr
-        vel *= (pf["kpc"]/pf["cm"]) / (365*24*3400.)
-        age = pf["years"] * (pf.current_time - reg["creation_time"][pi])
-        creation_time = reg["creation_time"][pi] * pf["years"]
+    #Create the refinement hilbert octree in GRIDSTRUCTURE
+    #For every leaf (not-refined) cell we have a column n GRIDDATA
+    #Include mass_gas, mass_metals, gas_temp_m, gas_teff_m, cell_volume, SFR
+    #since the octree always starts with one cell, an our 0-level mesh
+    #may have many cells, we must #create the octree region sitting 
+    #ontop of the first mesh by providing a negative level
+    output, refinement = prepare_octree(pf,ile,start_level=-super_level)
 
-        initial_mass = reg["InitialMassCenOstriker"][pi]
-        current_mass = reg["ParticleMassMsun"][pi]
-        col_list.append(pyfits.Column("ID", format="I", array=na.arange(current_mass.size)))
-        col_list.append(pyfits.Column("parent_ID", format="I", array=na.arange(current_mass.size)))
-        col_list.append(pyfits.Column("position", format="3D", array=pos, unit="kpc"))
-        col_list.append(pyfits.Column("velocity", format="3D", array=vel, unit="kpc/yr"))
-        col_list.append(pyfits.Column("creation_mass", format="D", array=initial_mass, unit="Msun"))
-        col_list.append(pyfits.Column("formation_time", format="D", array=creation_time, unit="yr"))
-        col_list.append(pyfits.Column("mass", format="D", array=current_mass, unit="Msun"))
-        col_list.append(pyfits.Column("age_m", format="D", array=age))
-        col_list.append(pyfits.Column("age_l", format="D", array=age))
-        #For particles, Sunrise takes 
-        #the dimensionless metallicity, not the mass of the metals
-        col_list.append(pyfits.Column("metallicity", format="D",
-            array=reg["metallicity_fraction"][pi],unit="Msun")) # wrong?
-        col_list.append(pyfits.Column("L_bol", format="D",
-            array=na.zeros(particle_mass.size)))
+    #Create a list of the star particle properties in PARTICLE_DATA
+    #Include ID, parent-ID, position, velocity, creation_mass, 
+    #formation_time, mass, age_m, age_l, metallicity, L_bol
+    particle_data = prepare_star_particles(pf,star_particle_type,fle=fle,fre=fre,**kwargs)
 
-        cols = pyfits.ColDefs(col_list)
-        pd_table = pyfits.new_table(cols)
-        pd_table.name = "PARTICLEDATA"
-    elif isinstance(write_particles, pyfits.ColDefs):
-        pd_table = pyfits.new_table(write_particles)
-        pd_table.name = "PARTICLEDATA"
-        write_particles = True
+    create_fits_file(pf,fn, refinement,output,particle_data,fre,fle)
 
-    def _MetalMass(field, data):
-        return data["Metal_Density"] * data["CellVolume"]
-        
-    def _convMetalMass(data):
-        return 1.0/1.989e33
-        
-    add_field("MetalMass", function=_MetalMass,
-              convert_function=_convMetalMass)
+def prepare_octree(pf,ile,start_level=0):
+    add_fields() #add the metal mass field that sunrise wants
+    fields = ["CellMassMsun","TemperatureTimesCellMassMsun", 
+              "MetalMass","CellVolumeCode"]
+    
+    #gather the field data from octs
+    pbar = get_pbar("Retrieving field data",len(fields))
+    field_data = [] 
+    dd = pf.h.all_data()
+    for fi,f in enumerate(fields):
+        field_data += dd[f],
+        pbar.update(fi)
+    pbar.finish()
+    del field_data
 
-    output, refined = generate_flat_octree(pf,
-            ["CellMassMsun","TemperatureTimesCellMassMsun", "MetalMass",
-             "CellVolumeCode"], subregion_bounds = subregion_bounds)
-    cvcgs = output["CellVolumeCode"].astype('float64') * pf['cm']**3.0
+    #first we cast every cell as an oct
+    #ngrids = na.max([g.id for g in pf._grids])
+    grids = {}
+    levels_all = {} 
+    levels_finest = {}
+    for l in range(100): 
+        levels_finest[l]=0
+        levels_all[l]=0
+    pbar = get_pbar("Initializing octs ",len(pf.h.grids))
+    for gi,g in enumerate(pf.h.grids):
+        ff = na.array([g[f] for f in fields])
+        og = amr_utils.OctreeGrid(
+                g.child_index_mask.astype('int32'),
+                ff.astype("float64"),
+                g.LeftEdge.astype("float64"),
+                g.ActiveDimensions.astype("int32"),
+                na.ones(1,dtype="float64")*g.dds[0],
+                g.Level,
+                g.id)
+        grids[g.id] = og
+        #how many refinement cells will we have?
+        #measure the 'volume' of each mesh, but many
+        #cells do not exist. an overstimate
+        levels_all[g.Level] += g.ActiveDimensions.prod()
+        #how many leaves do we have?
+        #this overestimates. a child of -1 means no child,
+        #but that cell may still be expanded on a submesh because
+        #(at least in ART) the meshes are inefficient.
+        g.clear_data()
+        pbar.update(gi)
+    pbar.finish()
+    
+    #create the octree grid list
+    oct_list =  amr_utils.OctreeGridList(grids)
+    
+    #initialize arrays to be passed to the recursion algo
+    o_length = na.sum(levels_all.values())
+    r_length = na.sum(levels_all.values())
+    output   = na.zeros((o_length,len(fields)), dtype='float64')
+    refined  = na.zeros(r_length, dtype='int32')
+    levels   = na.zeros(r_length, dtype='int32')
+    pos = position()
+    hs       = hilbert_state()
+    refined[0] = 1 #introduce the first cell as divided
+    levels[0]  = start_level-1 #introduce the first cell as divided
+    pos.refined_pos += 1
+    RecurseOctreeDepthFirstHilbert(
+            ile[0],ile[1],ile[2],
+            pos,0, hs, 
+            output,refined,levels,
+            grids,
+            start_level,
+            #physical_center = (ile)*1.0/pf.domain_dimensions*pf['kpc'],
+            physical_center = ile,
+            #physical_width  = pf['kpc'])
+            physical_width  = pf.domain_dimensions)
+    #by time we get it here the 'current' position is actually 
+    #for the next spot, so we're off by 1
+    print 'refinement tree # of cells %i, # of leaves %i'%(pos.refined_pos,pos.output_pos) 
+    output  = output[:pos.output_pos]
+    refined = refined[:pos.refined_pos] 
+    levels = levels[:pos.refined_pos] 
+    return output,refined
 
-    # First the structure
+def print_row(level,ple,pre,pw,pc,hs):
+    print level, 
+    print '%1.5f %1.5f %1.5f '%tuple(ple*pw-pc),
+    print '%1.5f %1.5f %1.5f '%tuple(pre*pw-pc),
+    print hs.dim, hs.sgn
+
+def print_child(level,grid,i,j,k,pw,pc):
+    ple = (grid.left_edges+na.array([i,j,k])*grid.dx)*pw-pc #parent LE 
+    pre = (grid.left_edges+na.array([i+1,j+1,k+1])*grid.dx)*pw-pc #parent RE 
+    print level, 
+    print '%1.5f %1.5f %1.5f '%tuple(ple),
+    print '%1.5f %1.5f %1.5f '%tuple(pre)
+
+def RecurseOctreeDepthFirstHilbert(xi,yi,zi,
+                            curpos, gi, 
+                            hs,
+                            output,
+                            refined,
+                            levels,
+                            grids,
+                            level,
+                            physical_center=None,
+                            physical_width=None,
+                            printr=False):
+    grid = grids[gi]
+    m = 2**(-level-1) if level < 0 else 1
+    ple = grid.left_edges+na.array([xi,yi,zi])*grid.dx #parent LE
+    pre = ple+grid.dx*m
+    if printr:
+        print_row(level,ple,pre,physical_width,physical_center,hs)
+
+    #here we go over the 8 octants
+    #in general however, a mesh cell on this level
+    #may have more than 8 children on the next level
+    #so we find the int float center (cxyz) of each child cell
+    # and from that find the child cell indices
+    for iv, (vertex,hs_child) in enumerate(hs):
+        #print ' '*(level+3), level,iv, vertex,curpos.refined_pos,curpos.output_pos,
+        #negative level indicates that we need to build a super-octree
+        if level < 0: 
+            #print ' '
+            #we are not on the root grid yet, but this is 
+            #how many equivalent root grid cells we would have
+            #level -1 means our oct grid's children are the same size
+            #as the root grid (hence the -level-1)
+            dx = 2**(-level-1) #this is the child width 
+            i,j,k = xi+vertex[0]*dx,yi+vertex[1]*dx,zi+vertex[2]*dx
+            #we always refine the negative levels
+            refined[curpos.refined_pos] = 1
+            levels[curpos.refined_pos] = level
+            curpos.refined_pos += 1
+            RecurseOctreeDepthFirstHilbert(i, j, k,
+                                curpos, 0, hs_child, output, refined, levels, grids,
+                                level+1,
+                                physical_center=physical_center,
+                                physical_width=physical_width,)
+        else:
+            i,j,k = xi+vertex[0],yi+vertex[1],zi+vertex[2]
+            ci = grid.child_indices[i,j,k] #is this oct subdivided?
+            if ci == -1:
+                for fi in range(grid.fields.shape[0]):
+                    output[curpos.output_pos,fi] = grid.fields[fi,i,j,k]
+                refined[curpos.refined_pos] = 0
+                levels[curpos.refined_pos] = level
+                curpos.output_pos += 1 #position updated after write
+                curpos.refined_pos += 1
+                if printr:
+                    print_child(level+1,grid,i,j,k,physical_width,physical_center)
+            else:
+                cx = (grid.left_edges[0] + i*grid.dx[0]) #floating le of the child
+                cy = (grid.left_edges[1] + j*grid.dx[0])
+                cz = (grid.left_edges[2] + k*grid.dx[0])
+                refined[curpos.refined_pos] = 1
+                levels[curpos.refined_pos] = level
+                curpos.refined_pos += 1 #position updated after write
+                child_grid = grids[ci]
+                child_dx = child_grid.dx[0]
+                child_leftedges = child_grid.left_edges
+                child_i = int((cx - child_leftedges[0])/child_dx)
+                child_j = int((cy - child_leftedges[1])/child_dx)
+                child_k = int((cz - child_leftedges[2])/child_dx)
+                RecurseOctreeDepthFirstHilbert(child_i, child_j, child_k,
+                                    curpos, ci, hs_child, output, refined, levels, grids,
+                                    level+1,
+                                    physical_center=physical_center,
+                                    physical_width=physical_width)
+
+def create_fits_file(pf,fn, refined,output,particle_data,fre,fle):
+
+    #first create the grid structure
     structure = pyfits.Column("structure", format="B", array=refined.astype("bool"))
     cols = pyfits.ColDefs([structure])
     st_table = pyfits.new_table(cols)
     st_table.name = "GRIDSTRUCTURE"
+    st_table.header.update("hierarch lengthunit", "kpc", comment="Length unit for grid")
+    fdx = fre-fle
+    for i,a in enumerate('xyz'):
+        st_table.header.update("min%s" % a, fle[i] * pf['kpc'])
+        st_table.header.update("max%s" % a, fre[i] * pf['kpc'])
+        #st_table.header.update("min%s" % a, 0) #WARNING: this is for debugging
+        #st_table.header.update("max%s" % a, 2) #
+        st_table.header.update("n%s" % a, fdx[i])
+        st_table.header.update("subdiv%s" % a, 2)
+    st_table.header.update("subdivtp", "OCTREE", "Type of grid subdivision")
 
-    # Now we update our table with units
-    # ("lengthunit", length_unit, "Length unit for grid");
-    # ("minx", getmin () [0], length_unit_comment);
-    # ("miny", getmin () [1], length_unit_comment);
-    # ("minz", getmin () [2], length_unit_comment);
-    # ("maxx", getmax () [0], length_unit_comment);
-    # ("maxy", getmax () [1], length_unit_comment);
-    # ("maxz", getmax () [2], length_unit_comment);
-    # ("nx", g_.getn () [0], "");
-    # ("ny", g_.getn () [1], "");
-    # ("nz", g_.getn () [2], "");
-    # ("subdivtp", subdivtp, "Type of grid subdivision");
-    # ("subdivx", sub_div[0], "");
-    # ("subdivy", sub_div[1], "");
-    # ("subdivz", sub_div[2], "");
-
-    st_table.header.update("hierarch lengthunit", "kpc", comment="Length unit for grid")
-    for i,a in enumerate('xyz'):
-        st_table.header.update("min%s" % a, DLE[i] * pf['kpc']/pf.domain_dimensions[i])
-        st_table.header.update("max%s" % a, DRE[i] * pf['kpc']/pf.domain_dimensions[i])
-        st_table.header.update("n%s" % a, DX[i])
-        st_table.header.update("subdiv%s" % a, 2)
-    st_table.header.update("subdivtp", "UNIFORM", "Type of grid subdivision")
-
-    # Now grid data itself
-    # ("M_g_tot", total_quantities.m_g(), "[" + mass_unit +
-    #         "] Total gas mass in all cells");
-    # ("SFR_tot", total_quantities.SFR, "[" + SFR_unit +
-    #         "] Total star formation rate of all cells");
-    # ("timeunit", time_unit, "Time unit is "+time_unit);
-    # ("tempunit", temp_unit, "Temperature unit is "+time_unit);
-
-    # (Tdouble, "mass_gas", 1, mass_unit );
-    # (Tdouble, "SFR", 1, SFR_unit );
-    # (Tdouble, "mass_metals", 1, mass_unit );
-    # (Tdouble, "gas_temp_m", 1, temp_unit+"*"+mass_unit );
-    # (Tdouble, "gas_teff_m", 1, temp_unit+"*"+mass_unit );
-    # (Tdouble, "cell_volume", 1, length_unit + "^3" );
-
+    #not the hydro grid data
+    fields = ["CellMassMsun","TemperatureTimesCellMassMsun", 
+              "MetalMass","CellVolumeCode"]
+    fd = {}
+    for i,f in enumerate(fields): 
+        fd[f]=output[:,i]
+    del output
     col_list = []
-    size = output["CellMassMsun"].size
-    tm = output["CellMassMsun"].sum()
+    size = fd["CellMassMsun"].size
+    tm = fd["CellMassMsun"].sum()
     col_list.append(pyfits.Column("mass_gas", format='D',
-                    array=output.pop('CellMassMsun'), unit="Msun"))
+                    array=fd['CellMassMsun'], unit="Msun"))
     col_list.append(pyfits.Column("mass_metals", format='D',
-                    array=output.pop('MetalMass'), unit="Msun"))
+                    array=fd['MetalMass'], unit="Msun"))
+    # col_list.append(pyfits.Column("mass_stars", format='D',
+    #                 array=na.zeros(size,dtype='D'),unit="Msun"))
+    # col_list.append(pyfits.Column("mass_stellar_metals", format='D',
+    #                 array=na.zeros(size,dtype='D'),unit="Msun"))
+    # col_list.append(pyfits.Column("age_m", format='D',
+    #                 array=na.zeros(size,dtype='D'),unit="yr*Msun"))
+    # col_list.append(pyfits.Column("age_l", format='D',
+    #                 array=na.zeros(size,dtype='D'),unit="yr*Msun"))
+    # col_list.append(pyfits.Column("L_bol", format='D',
+    #                 array=na.zeros(size,dtype='D')))
+    # col_list.append(pyfits.Column("L_lambda", format='D',
+    #                 array=na.zeros(size,dtype='D')))
     # The units for gas_temp are really K*Msun. For older Sunrise versions
     # you must set the unit to just K  
     col_list.append(pyfits.Column("gas_temp_m", format='D',
-                    array=output['TemperatureTimesCellMassMsun'], unit="K*Msun"))
+                    array=fd['TemperatureTimesCellMassMsun'], unit="K*Msun"))
     col_list.append(pyfits.Column("gas_teff_m", format='D',
-                    array=output.pop('TemperatureTimesCellMassMsun'), unit="K*Msun"))
+                    array=fd['TemperatureTimesCellMassMsun'], unit="K*Msun"))
     col_list.append(pyfits.Column("cell_volume", format='D',
-                    array=output.pop('CellVolumeCode').astype('float64')*pf['kpc']**3.0,
+                    array=fd['CellVolumeCode'].astype('float64')*pf['kpc']**3.0,
                     unit="kpc^3"))
     col_list.append(pyfits.Column("SFR", format='D',
                     array=na.zeros(size, dtype='D')))
@@ -229,101 +333,216 @@
     md_table.header.update("snaptime", pf.current_time*pf['years'])
     md_table.name = "YT"
 
-    hls = [pyfits.PrimaryHDU(), st_table, mg_table,md_table]
-    if write_particles: hls.append(pd_table)
+    phdu = pyfits.PrimaryHDU()
+    phdu.header.update('nbodycod','yt')
+    hls = [phdu, st_table, mg_table,md_table]
+    hls.append(particle_data)
     hdus = pyfits.HDUList(hls)
     hdus.writeto(fn, clobber=True)
 
-def initialize_octree_list(pf, fields):
-    o_length = r_length = 0
-    grids = []
-    levels_finest, levels_all = defaultdict(lambda: 0), defaultdict(lambda: 0)
-    for g in pf.h.grids:
-        ff = na.array([g[f] for f in fields])
-        grids.append(amr_utils.OctreeGrid(
-                        g.child_index_mask.astype('int32'),
-                        ff.astype("float64"),
-                        g.LeftEdge.astype('float64'),
-                        g.ActiveDimensions.astype('int32'),
-                        na.ones(1,dtype='float64') * g.dds[0], g.Level,
-                        g._id_offset))
-        levels_all[g.Level] += g.ActiveDimensions.prod()
-        levels_finest[g.Level] += g.child_mask.ravel().sum()
-        g.clear_data()
-    ogl = amr_utils.OctreeGridList(grids)
-    return ogl, levels_finest, levels_all
+def nearest_power(x):
+    #round to the nearest power of 2
+    x-=1
+    x |= x >> 1
+    x |= x >> 2 
+    x |= x >> 4
+    x |= x >> 8
+    x |= x >> 16
+    x+=1 
+    return x
 
-def generate_flat_octree(pf, fields, subregion_bounds = None):
-    """
-    Generates two arrays, one of the actual values in a depth-first flat
-    octree array, and the other of the values describing the refinement.
-    This allows for export to a code that understands this.  *field* is the
-    field used in the data array.
-    """
-    fields = ensure_list(fields)
-    ogl, levels_finest, levels_all = initialize_octree_list(pf, fields)
-    o_length = na.sum(levels_finest.values())
-    r_length = na.sum(levels_all.values())
-    output = na.zeros((o_length,len(fields)), dtype='float64')
-    refined = na.zeros(r_length, dtype='int32')
-    position = amr_utils.position()
-    if subregion_bounds is None:
-        sx, sy, sz = 0, 0, 0
-        nx, ny, nz = ogl[0].dimensions
-    else:
-        ss, ns = zip(*subregion_bounds)
-        sx, sy, sz = ss
-        nx, ny, nz = ns
-    print "Running from %s for %s cells" % (
-            (sx,sy,sz), (nx,ny,nz))
-    t1 = time.time()
-    amr_utils.RecurseOctreeDepthFirst(
-               sx, sy, sz, nx, ny, nz,
-               position, 0,
-               output, refined, ogl)
-    t2 = time.time()
-    print "Finished.  Took %0.3e seconds." % (t2-t1)
-    dd = {}
-    for i, field in enumerate(fields):
-        dd[field] = output[:position.output_pos,i]
-    return dd, refined[:position.refined_pos]
+def round_nearest_edge(pf,dle,dre):
+    dds = pf.domain_dimensions
+    ile = na.floor(dle*dds).astype('int')
+    ire = na.ceil(dre*dds).astype('int') 
+    
+    #this is the number of cells the super octree needs to expand to
+    #must round to the nearest power of 2
+    width = na.max(ire-ile)
+    width = nearest_power(width)
+    
+    maxlevel = na.rint(na.log2(width)).astype('int')
+    return ile,ire,maxlevel
 
-def generate_levels_octree(pf, fields):
-    fields = ensure_list(fields) + ["Ones", "Ones"]
-    ogl, levels_finest, levels_all = initialize_octree_list(pf, fields)
-    o_length = na.sum(levels_finest.values())
-    r_length = na.sum(levels_all.values())
-    output = na.zeros((r_length,len(fields)), dtype='float64')
-    genealogy = na.zeros((r_length, 3), dtype='int64') - 1 # init to -1
-    corners = na.zeros((r_length, 3), dtype='float64')
-    position = na.add.accumulate(
-                na.array([0] + [levels_all[v] for v in
-                    sorted(levels_all)[:-1]], dtype='int64'), dtype="int64")
-    pp = position.copy()
-    amr_utils.RecurseOctreeByLevels(0, 0, 0,
-               ogl[0].dimensions[0],
-               ogl[0].dimensions[1],
-               ogl[0].dimensions[2],
-               position.astype('int64'), 1,
-               output, genealogy, corners, ogl)
-    return output, genealogy, levels_all, levels_finest, pp, corners
+def prepare_star_particles(pf,star_type,pos=None,vel=None, age=None,
+                          creation_time=None,initial_mass=None,
+                          current_mass=None,metallicity=None,
+                          radius = None,
+                          fle=[0.,0.,0.],fre=[1.,1.,1.]):
+    dd = pf.h.all_data()
+    idx = dd["particle_type"] == star_type
+    if pos is None:
+        pos = na.array([dd["particle_position_%s" % ax]
+                        for ax in 'xyz']).transpose()
+    idx = idx & na.all(pos>fle,axis=1) & na.all(pos<fre,axis=1)
+    pos = pos[idx]*pf['kpc'] #unitary units -> kpc
+    if age is None:
+        age = dd["particle_age"][idx]*pf['years'] # seconds->years
+    if vel is None:
+        vel = na.array([dd["particle_velocity_%s" % ax][idx]
+                        for ax in 'xyz']).transpose()
+        # Velocity is cm/s, we want it to be kpc/yr
+        #vel *= (pf["kpc"]/pf["cm"]) / (365*24*3600.)
+        vel *= 1.02268944e-14 
+    if initial_mass is None:
+        #in solar masses
+        initial_mass = dd["particle_mass_initial"][idx]*pf['Msun']
+    if current_mass is None:
+        #in solar masses
+        current_mass = dd["particle_mass"][idx]*pf['Msun']
+    if metallicity is None:
+        #this should be in dimensionless units, metals mass / particle mass
+        metallicity = dd["particle_metallicity"][idx]
+    if radius is None:
+        radius = initial_mass*0.0+10.0/1000.0 #10pc radius
 
-def _initial_mass_cen_ostriker(field, data):
-    # SFR in a cell. This assumes stars were created by the Cen & Ostriker algorithm
-    # Check Grid_AddToDiskProfile.C and star_maker7.src
-    star_mass_ejection_fraction = data.pf.get_parameter("StarMassEjectionFraction",float)
-    star_maker_minimum_dynamical_time = 3e6 # years, which will get divided out
-    dtForSFR = star_maker_minimum_dynamical_time / data.pf["years"]
-    xv1 = ((data.pf["InitialTime"] - data["creation_time"])
-            / data["dynamical_time"])
-    xv2 = ((data.pf["InitialTime"] + dtForSFR - data["creation_time"])
-            / data["dynamical_time"])
-    denom = (1.0 - star_mass_ejection_fraction * (1.0 - (1.0 + xv1)*na.exp(-xv1)))
-    minitial = data["ParticleMassMsun"] / denom
-    return minitial
-add_field("InitialMassCenOstriker", function=_initial_mass_cen_ostriker)
+    formation_time = pf.current_time-age
+    #create every column
+    col_list = []
+    col_list.append(pyfits.Column("ID", format="I", array=na.arange(current_mass.size)))
+    col_list.append(pyfits.Column("parent_ID", format="I", array=na.arange(current_mass.size)))
+    col_list.append(pyfits.Column("position", format="3D", array=pos, unit="kpc"))
+    col_list.append(pyfits.Column("velocity", format="3D", array=vel, unit="kpc/yr"))
+    col_list.append(pyfits.Column("creation_mass", format="D", array=initial_mass, unit="Msun"))
+    col_list.append(pyfits.Column("formation_time", format="D", array=formation_time, unit="yr"))
+    col_list.append(pyfits.Column("radius", format="D", array=radius, unit="kpc"))
+    col_list.append(pyfits.Column("mass", format="D", array=current_mass, unit="Msun"))
+    col_list.append(pyfits.Column("age_m", format="D", array=age))
+    col_list.append(pyfits.Column("age_l", format="D", array=age))
+    #For particles, Sunrise takes 
+    #the dimensionless metallicity, not the mass of the metals
+    col_list.append(pyfits.Column("metallicity", format="D",
+        array=metallicity,unit="Msun")) 
+    col_list.append(pyfits.Column("L_bol", format="D",
+        array=na.zeros(current_mass.size)))
+    
+    #make the table
+    cols = pyfits.ColDefs(col_list)
+    pd_table = pyfits.new_table(cols)
+    pd_table.name = "PARTICLEDATA"
+    return pd_table
 
-def _temp_times_mass(field, data):
-    return data["Temperature"]*data["CellMassMsun"]
-add_field("TemperatureTimesCellMassMsun", function=_temp_times_mass)
 
+def add_fields():
+    """Add three Eulerian fields Sunrise uses"""
+    def _MetalMass(field, data):
+        return data["Metal_Density"] * data["CellVolume"]
+        
+    def _convMetalMass(data):
+        return 1.0/1.989e33
+    
+    add_field("MetalMass", function=_MetalMass,
+              convert_function=_convMetalMass)
+
+    def _initial_mass_cen_ostriker(field, data):
+        # SFR in a cell. This assumes stars were created by the Cen & Ostriker algorithm
+        # Check Grid_AddToDiskProfile.C and star_maker7.src
+        star_mass_ejection_fraction = data.pf.get_parameter("StarMassEjectionFraction",float)
+        star_maker_minimum_dynamical_time = 3e6 # years, which will get divided out
+        dtForSFR = star_maker_minimum_dynamical_time / data.pf["years"]
+        xv1 = ((data.pf["InitialTime"] - data["creation_time"])
+                / data["dynamical_time"])
+        xv2 = ((data.pf["InitialTime"] + dtForSFR - data["creation_time"])
+                / data["dynamical_time"])
+        denom = (1.0 - star_mass_ejection_fraction * (1.0 - (1.0 + xv1)*na.exp(-xv1)))
+        minitial = data["ParticleMassMsun"] / denom
+        return minitial
+
+    add_field("InitialMassCenOstriker", function=_initial_mass_cen_ostriker)
+
+    def _temp_times_mass(field, data):
+        return data["Temperature"]*data["CellMassMsun"]
+    add_field("TemperatureTimesCellMassMsun", function=_temp_times_mass)
+
+class position:
+    def __init__(self):
+        self.output_pos = 0
+        self.refined_pos = 0
+
+class hilbert_state():
+    def __init__(self,dim=None,sgn=None,octant=None):
+        if dim is None: dim = [0,1,2]
+        if sgn is None: sgn = [1,1,1]
+        if octant is None: octant = 5
+        self.dim = dim
+        self.sgn = sgn
+        self.octant = octant
+    def flip(self,i):
+        self.sgn[i]*=-1
+    def swap(self,i,j):
+        temp = self.dim[i]
+        self.dim[i]=self.dim[j]
+        self.dim[j]=temp
+        axis = self.sgn[i]
+        self.sgn[i] = self.sgn[j]
+        self.sgn[j] = axis
+    def reorder(self,i,j,k):
+        ndim = [self.dim[i],self.dim[j],self.dim[k]] 
+        nsgn = [self.sgn[i],self.sgn[j],self.sgn[k]]
+        self.dim = ndim
+        self.sgn = nsgn
+    def copy(self):
+        return hilbert_state([self.dim[0],self.dim[1],self.dim[2]],
+                             [self.sgn[0],self.sgn[1],self.sgn[2]],
+                             self.octant)
+    def descend(self,o):
+        child = self.copy()
+        child.octant = o
+        if o==0:
+            child.swap(0,2)
+        elif o==1:
+            child.swap(1,2)
+        elif o==2:
+            pass
+        elif o==3:
+            child.flip(0)
+            child.flip(2)
+            child.reorder(2,0,1)
+        elif o==4:
+            child.flip(0)
+            child.flip(1)
+            child.reorder(2,0,1)
+        elif o==5:
+            pass
+        elif o==6:
+            child.flip(1)
+            child.flip(2)
+            child.swap(1,2)
+        elif o==7:
+            child.flip(0)
+            child.flip(2)
+            child.swap(0,2)
+        return child
+
+    def __iter__(self):
+        vertex = [0,0,0]
+        j=0
+        for i in range(3):
+            vertex[self.dim[i]] = 0 if self.sgn[i]>0 else 1
+        yield vertex, self.descend(j)
+        vertex[self.dim[0]] += self.sgn[0]
+        j+=1
+        yield vertex, self.descend(j)
+        vertex[self.dim[1]] += self.sgn[1] 
+        j+=1
+        yield vertex, self.descend(j)
+        vertex[self.dim[0]] -= self.sgn[0] 
+        j+=1
+        yield vertex, self.descend(j)
+        vertex[self.dim[2]] += self.sgn[2] 
+        j+=1
+        yield vertex, self.descend(j)
+        vertex[self.dim[0]] += self.sgn[0] 
+        j+=1
+        yield vertex, self.descend(j)
+        vertex[self.dim[1]] -= self.sgn[1] 
+        j+=1
+        yield vertex, self.descend(j)
+        vertex[self.dim[0]] -= self.sgn[0] 
+        j+=1
+        yield vertex, self.descend(j)
+
+
+
+
+


diff -r 4f9a48935bffc2e34d3a315781b55ac5f36cd663 -r a9d337474d854e121b6c92215cde35a6fddd316a yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -55,6 +55,7 @@
     ParameterFileStore
 from yt.utilities.minimal_representation import \
     MinimalProjectionData, MinimalSliceData
+from yt.utilities.orientation import Orientation
 
 from .derived_quantities import DerivedQuantityCollection
 from .field_info_container import \
@@ -1158,7 +1159,7 @@
     _type_name = "cutting"
     _con_args = ('normal', 'center')
     def __init__(self, normal, center, fields = None, node_name = None,
-                 **kwargs):
+                 north_vector = None, **kwargs):
         """
         This is a data object corresponding to an oblique slice through the
         simulation domain.
@@ -1207,16 +1208,11 @@
         self.set_field_parameter('center',center)
         # Let's set up our plane equation
         # ax + by + cz + d = 0
-        self._norm_vec = normal/na.sqrt(na.dot(normal,normal))
+        self.orienter = Orientation(normal, north_vector = north_vector)
+        self._norm_vec = self.orienter.normal_vector
         self._d = -1.0 * na.dot(self._norm_vec, self.center)
-        # First we try all three, see which has the best result:
-        vecs = na.identity(3)
-        _t = na.cross(self._norm_vec, vecs).sum(axis=1)
-        ax = _t.argmax()
-        self._x_vec = na.cross(vecs[ax,:], self._norm_vec).ravel()
-        self._x_vec /= na.sqrt(na.dot(self._x_vec, self._x_vec))
-        self._y_vec = na.cross(self._norm_vec, self._x_vec).ravel()
-        self._y_vec /= na.sqrt(na.dot(self._y_vec, self._y_vec))
+        self._x_vec = self.orienter.unit_vectors[0]
+        self._y_vec = self.orienter.unit_vectors[1]
         self._rot_mat = na.array([self._x_vec,self._y_vec,self._norm_vec])
         self._inv_mat = na.linalg.pinv(self._rot_mat)
         self.set_field_parameter('cp_x_vec',self._x_vec)
@@ -1336,7 +1332,7 @@
             This can either be a floating point value, in the native domain
             units of the simulation, or a tuple of the (value, unit) style.
             This will be the width of the FRB.
-        height : height specifier
+        height : height specifier, optional
             This will be the height of the FRB, by default it is equal to width.
         resolution : int or tuple of ints
             The number of pixels on a side of the final FRB.


diff -r 4f9a48935bffc2e34d3a315781b55ac5f36cd663 -r a9d337474d854e121b6c92215cde35a6fddd316a yt/data_objects/hierarchy.py
--- a/yt/data_objects/hierarchy.py
+++ b/yt/data_objects/hierarchy.py
@@ -354,7 +354,7 @@
         #   1 = number of cells
         #   2 = blank
         desc = {'names': ['numgrids','numcells','level'],
-                'formats':['Int32']*3}
+                'formats':['Int64']*3}
         self.level_stats = blankRecordArray(desc, MAXLEVEL)
         self.level_stats['level'] = [i for i in range(MAXLEVEL)]
         self.level_stats['numgrids'] = [0 for i in range(MAXLEVEL)]


diff -r 4f9a48935bffc2e34d3a315781b55ac5f36cd663 -r a9d337474d854e121b6c92215cde35a6fddd316a yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -30,8 +30,6 @@
 import os
 import struct
 
-import pdb
-
 from yt.funcs import *
 from yt.data_objects.grid_patch import \
       AMRGridPatch
@@ -56,6 +54,21 @@
 
 from yt.utilities.physical_constants import \
     mass_hydrogen_cgs
+    
+from yt.frontends.art.definitions import art_particle_field_names
+
+from yt.frontends.art.io import _read_child_mask_level
+from yt.frontends.art.io import read_particles
+from yt.frontends.art.io import read_stars
+from yt.frontends.art.io import _count_art_octs
+from yt.frontends.art.io import _read_art_level_info
+from yt.frontends.art.io import _read_art_child
+from yt.frontends.art.io import _skip_record
+from yt.frontends.art.io import _read_record
+from yt.frontends.art.io import _read_frecord
+from yt.frontends.art.io import _read_record_size
+from yt.frontends.art.io import _read_struct
+from yt.frontends.art.io import b2t
 
 def num_deep_inc(f):
     def wrap(self, *args, **kwargs):
@@ -68,14 +81,21 @@
 class ARTGrid(AMRGridPatch):
     _id_offset = 0
 
-    def __init__(self, id, hierarchy, level, locations, start_index):
+    def __init__(self, id, hierarchy, level, locations, props,child_mask=None):
         AMRGridPatch.__init__(self, id, filename = hierarchy.hierarchy_filename,
                               hierarchy = hierarchy)
+        start_index = props[0]
         self.Level = level
         self.Parent = []
         self.Children = []
         self.locations = locations
         self.start_index = start_index.copy()
+        
+        self.LeftEdge = props[0]
+        self.RightEdge = props[1]
+        self.ActiveDimensions = props[2] 
+        #if child_mask is not None:
+        #    self._set_child_mask(child_mask)
 
     def _setup_dx(self):
         # So first we figure out what the index is.  We don't assume
@@ -118,92 +138,58 @@
     def __init__(self, pf, data_style='art'):
         self.data_style = data_style
         self.parameter_file = weakref.proxy(pf)
-        # for now, the hierarchy file is the parameter file!
+        #for now, the hierarchy file is the parameter file!
         self.hierarchy_filename = self.parameter_file.parameter_filename
         self.directory = os.path.dirname(self.hierarchy_filename)
-
         self.float_type = na.float64
         AMRHierarchy.__init__(self,pf,data_style)
-
+        self._setup_field_list()
+        
     def _initialize_data_storage(self):
         pass
 
     def _detect_fields(self):
         # This will need to be generalized to be used elsewhere.
         self.field_list = [ 'Density','TotalEnergy',
-                            'x-momentum','y-momentum','z-momentum',
-                            'Pressure','Gamma','GasEnergy',
-                            'Metal_DensitySNII', 'Metal_DensitySNIa',
-                            'Potential_New','Potential_Old']
-    
+             'XMomentumDensity','YMomentumDensity','ZMomentumDensity',
+             'Pressure','Gamma','GasEnergy',
+             'MetalDensitySNII', 'MetalDensitySNIa',
+             'PotentialNew','PotentialOld']
+        self.field_list += art_particle_field_names
+
     def _setup_classes(self):
         dd = self._get_data_reader_dict()
         AMRHierarchy._setup_classes(self, dd)
         self.object_types.sort()
 
     def _count_grids(self):
-        # We have to do all the patch-coalescing here.
-        #level_info is used by the IO so promoting it to the static
-        # output class
-        #self.pf.level_info = [self.pf.ncell] # skip root grid for now
-        #leve_info = []
-        # amr_utils.count_art_octs(
-        #         self.pf.parameter_filename, self.pf.child_grid_offset,
-        #         self.pf.min_level, self.pf.max_level, self.pf.nhydro_vars,
-        #         self.pf.level_info)
+        LEVEL_OF_EDGE = 7
+        MAX_EDGE = (2 << (LEVEL_OF_EDGE- 1))
+        
+        min_eff = 0.30
+        
+        vol_max = 128**3
         
         f = open(self.pf.parameter_filename,'rb')
-        self.pf.nhydro_vars, self.pf.level_info = _count_art_octs(f, 
+        
+        
+        (self.pf.nhydro_vars, self.pf.level_info,
+        self.pf.level_oct_offsets, 
+        self.pf.level_child_offsets) = \
+                         _count_art_octs(f, 
                           self.pf.child_grid_offset,
                           self.pf.min_level, self.pf.max_level)
         self.pf.level_info[0]=self.pf.ncell
-        f.close()
-        self.pf.level_info = na.array(self.pf.level_info)
-        num_ogrids = sum(self.pf.level_info) + self.pf.iOctFree
-        print 'found %i oct grids'%num_ogrids
-        num_ogrids *=7
-        print 'instantiating... %i grids'%num_ogrids
-        ogrid_left_indices = na.zeros((num_ogrids,3), dtype='int64') - 999
-        ogrid_levels = na.zeros(num_ogrids, dtype='int64')
-        ogrid_file_locations = na.zeros((num_ogrids,6), dtype='int64')
-        
-        #don't need parents?
-        #ogrid_parents = na.zeros(num_ogrids, dtype="int64")
-        
-        #don't need masks?
-        #ochild_masks = na.zeros((num_ogrids, 8), dtype='int64').ravel()
-        
-        self.pf.level_offsets = amr_utils.read_art_tree(
-                                self.pf.parameter_filename, 
-                                self.pf.child_grid_offset,
-                                self.pf.min_level, self.pf.max_level,
-                                ogrid_left_indices, ogrid_levels,
-                                ogrid_file_locations)
-                                #ochild_masks,
-                                #ogrid_parents, 
-                                
+        self.pf.level_info = na.array(self.pf.level_info)        
+        self.pf.level_offsets = self.pf.level_child_offsets
         self.pf.level_offsets = na.array(self.pf.level_offsets, dtype='int64')
         self.pf.level_offsets[0] = self.pf.root_grid_offset
-        #ochild_masks.reshape((num_ogrids, 8), order="F")
-        ogrid_levels[ogrid_left_indices[:,0] == -999] = -1
-        # This bit of code comes from Chris, and I'm still not sure I have a
-        # handle on what it does.
-        final_indices =  ogrid_left_indices[na.where(ogrid_levels==self.pf.max_level)[0]]
-        divisible=[na.all((final_indices%2**(level))==0) 
-            for level in xrange(self.pf.max_level*2)]
-        root_level = self.pf.max_level+na.where(na.logical_not(divisible))[0][0] 
-        ogrid_dimension = na.zeros(final_indices.shape,dtype='int')+2
-        ogrid_left_indices = ogrid_left_indices/2**(root_level - ogrid_levels[:,None] - 1) - 1
-
-        # Now we can rescale
-        # root_psg = _ramses_reader.ProtoSubgrid(
-        #                 na.zeros(3, dtype='int64'), # left index of PSG
-        #                 self.pf.domain_dimensions, # dim of PSG
-        #                 na.zeros((1,3), dtype='int64'), # left edges of grids
-        #                 self.pf.domain_dimensions[None,:], # right edges of grids
-        #                 self.pf.domain_dimensions[None,:], # dims of grids
-        #                 na.zeros((1,6), dtype='int64') # empty
-        #                 )
+        
+        self.pf.level_art_child_masks = {}
+        cm = self.pf.root_iOctCh>0
+        cm_shape = (1,)+cm.shape 
+        self.pf.level_art_child_masks[0] = cm.reshape(cm_shape).astype('uint8')        
+        del cm
         
         root_psg = _ramses_reader.ProtoSubgrid(
                         na.zeros(3, dtype='int64'), # left index of PSG
@@ -217,198 +203,315 @@
             if self.pf.level_info[level] == 0:
                 self.proto_grids.append([])
                 continue
-            ggi = (ogrid_levels == level).ravel()
-            mylog.info("Re-gridding level %s: %s octree grids", level, ggi.sum())
-            nd = self.pf.domain_dimensions * 2**level
-            dims = na.ones((ggi.sum(), 3), dtype='int64') * 2
-            fl = ogrid_file_locations[ggi,:]
-            # Now our initial protosubgrid
-            #if level == 6: raise RuntimeError
-            # We want grids that cover no more than MAX_EDGE cells in every direction
-            MAX_EDGE = 128
             psgs = []
+            effs,sizes = [], []
+
+            if level > self.pf.limit_level : continue
+            
             #refers to the left index for the art octgrid
-            left_index = ogrid_left_indices[ggi,:]
-            right_index = left_index + 2
-            #Since we are re-gridding these octs on larger meshes
-            #each sub grid has length MAX_EDGE, and so get the LE of
-            #grids fit inside the domain
-            # nd is the dimensions of the domain at this level
-            lefts = [na.mgrid[0:nd[i]:MAX_EDGE] for i in range(3)]
-            #lefts = zip(*[l.ravel() for l in lefts])
-            pbar = get_pbar("Re-gridding ", lefts[0].size)
-            min_ind = na.min(left_index, axis=0)
-            max_ind = na.max(right_index, axis=0)
+            left_index, fl, nocts = _read_art_level_info(f, self.pf.level_oct_offsets,level)
+            #left_index_gridpatch = left_index >> LEVEL_OF_EDGE
             
-            #iterate over the ith dimension of the yt grids
-            for i,dli in enumerate(lefts[0]):
-                pbar.update(i)
+            #read in the child masks for this level and save them
+            idc, art_child_mask = _read_child_mask_level(f, self.pf.level_child_offsets,
+                level,nocts*8,nhydro_vars=self.pf.nhydro_vars)
+            art_child_mask = art_child_mask.reshape((nocts,2,2,2))
+            self.pf.level_art_child_masks[level]=art_child_mask
+            #child_mask is zero where child grids exist and
+            #thus where higher resolution data is available
+            
+            
+            #compute the hilbert indices up to a certain level
+            #the indices will associate an oct grid to the nearest
+            #hilbert index?
+            base_level = int( na.log10(self.pf.domain_dimensions.max()) /
+                              na.log10(2))
+            hilbert_indices = _ramses_reader.get_hilbert_indices(
+                                    level + base_level, left_index)
+            #print base_level, hilbert_indices.max(),
+            hilbert_indices = hilbert_indices >> base_level + LEVEL_OF_EDGE
+            #print hilbert_indices.max()
+            
+            # Strictly speaking, we don't care about the index of any
+            # individual oct at this point.  So we can then split them up.
+            unique_indices = na.unique(hilbert_indices)
+            mylog.info("Level % 2i has % 10i unique indices for %0.3e octs",
+                        level, unique_indices.size, hilbert_indices.size)
+            
+            #use the hilbert indices to order oct grids so that consecutive
+            #items on a list are spatially near each other
+            #this is useful because we will define grid patches over these
+            #octs, which are more efficient if the octs are spatially close
+            
+            #split into list of lists, with domains containing 
+            #lists of sub octgrid left indices and an index
+            #referring to the domain on which they live
+            pbar = get_pbar("Calc Hilbert Indices ",1)
+            locs, lefts = _ramses_reader.get_array_indices_lists(
+                        hilbert_indices, unique_indices, left_index, fl)
+            pbar.finish()
+            
+            #iterate over the domains    
+            step=0
+            pbar = get_pbar("Re-gridding  Level %i "%level, len(locs))
+            psg_eff = []
+            for ddleft_index, ddfl in zip(lefts, locs):
+                #iterate over just the unique octs
+                #why would we ever have non-unique octs?
+                #perhaps the hilbert ordering may visit the same
+                #oct multiple times - review only unique octs 
+                #for idomain in na.unique(ddfl[:,1]):
+                #dom_ind = ddfl[:,1] == idomain
+                #dleft_index = ddleft_index[dom_ind,:]
+                #dfl = ddfl[dom_ind,:]
                 
-                #skip this grid if there are no art grids inside
-                #of the zeroeth dimension
-                if min_ind[0] > dli + nd[0]: continue
-                if max_ind[0] < dli: continue
+                dleft_index = ddleft_index
+                dfl = ddfl
+                initial_left = na.min(dleft_index, axis=0)
+                idims = (na.max(dleft_index, axis=0) - initial_left).ravel()+2
+                #this creates a grid patch that doesn't cover the whole level
+                #necessarily, but with other patches covers all the regions
+                #with octs. This object automatically shrinks its size
+                #to barely encompass the octs inside of it.
+                psg = _ramses_reader.ProtoSubgrid(initial_left, idims,
+                                dleft_index, dfl)
+                if psg.efficiency <= 0: continue
                 
-                # span of the current domain limited to max_edge
-                idim = min(nd[0] - dli, MAX_EDGE)
-
-                #gdi finds all of the art octs grids inside the 
-                #ith dimension of our current grid
-                gdi = ((dli  <= right_index[:,0])
-                     & (dli + idim >= left_index[:,0]))
-                     
-
-                #if none of our art octs fit inside, skip                    
-                if not na.any(gdi): continue
+                #because grid patches may still be mostly empty, and with octs
+                #that only partially fill the grid,it  may be more efficient
+                #to split large patches into smaller patches. We split
+                #if less than 10% the volume of a patch is covered with octs
+                if idims.prod() > vol_max or psg.efficiency < min_eff:
+                    psg_split = _ramses_reader.recursive_patch_splitting(
+                        psg, idims, initial_left, 
+                        dleft_index, dfl,min_eff=min_eff,use_center=True,
+                        split_on_vol=vol_max)
+                    
+                    psgs.extend(psg_split)
+                    psg_eff += [x.efficiency for x in psg_split] 
+                else:
+                    psgs.append(psg)
+                    psg_eff =  [psg.efficiency,]
                 
-                #iterate over the jth dimension of the yt grids
-                for dlj in lefts[1]:
-                    
-                    #this is the same process as in the previous dimension
-                    #find art octs inside this grid's jth dimension, 
-                    #skip if there are none
-                    if min_ind[1] > dlj + nd[1]: continue
-                    if max_ind[1] < dlj: continue
-                    idim = min(nd[1] - dlj, MAX_EDGE)
-                    gdj = ((dlj  <= right_index[:,1])
-                         & (dlj + idim >= left_index[:,1])
-                         & (gdi))
-                    if not na.any(gdj): continue
-                    
-                    #Same story: iterate over kth dimension grids
-                    for dlk in lefts[2]:
-                        if min_ind[2] > dlk + nd[2]: continue
-                        if max_ind[2] < dlk: continue
-                        idim = min(nd[2] - dlk, MAX_EDGE)
-                        gdk = ((dlk  <= right_index[:,2])
-                             & (dlk + idim >= left_index[:,2])
-                             & (gdj))
-                        if not na.any(gdk): continue
-                        
-                        #these are coordinates for yt grid
-                        left = na.array([dli, dlj, dlk])
-                        
-                        #does this ravel really do anything?
-                        domain_left = left.ravel()
-                        
-                        #why are we adding this to zero?
-                        initial_left = na.zeros(3, dtype='int64') + domain_left
-                        
-                        #still not sure why multiplying against one 
-                        #just type casting?
-                        idims = na.ones(3, dtype='int64') * na.minimum(nd - domain_left, MAX_EDGE)
-                        
-                        # We want to find how many grids are inside.
-                        
-                        #this gives us the LE and RE, domain dims,
-                        # and file locations
-                        # for art octs within this grid
-                        dleft_index = left_index[gdk,:]
-                        dright_index = right_index[gdk,:]
-                        ddims = dims[gdk,:]
-                        dfl = fl[gdk,:]
-                        
-                        #create a sub grid composed
-                        #of the new yt grid LE, span,
-                        #and a series of the contained art grid properties:
-                        # left edge, right edge, (not sure what dims is) and file locations
-                        psg = _ramses_reader.ProtoSubgrid(initial_left, idims,
-                                        dleft_index, dfl)
-                        
-                        #print "Gridding from %s to %s + %s" % (
-                        #    initial_left, initial_left, idims)
-                        if psg.efficiency <= 0: continue
-                        self.num_deep = 0
-                        # psgs.extend(self._recursive_patch_splitting(
-                        #     psg, idims, initial_left, 
-                        #     dleft_index, dright_index, ddims, dfl))
-                        
-                        #I'm not sure how this patch splitting process
-                        #does, or how it works
-                        psgs.extend(_ramses_reader.recursive_patch_splitting(
-                            psg, idims, initial_left, dleft_index, dfl))
-                        
-                        # psgs.extend(self._recursive_patch_splitting(
-                        #     psg, idims, initial_left, 
-                        #     dleft_index, dright_index, ddims, dfl))
-                        psgs.extend([psg])
+                tol = 1.00001
+                
+                
+                step+=1
+                pbar.update(step)
+            eff_mean = na.mean(psg_eff)
+            eff_nmin = na.sum([e<=min_eff*tol for e in psg_eff])
+            eff_nall = len(psg_eff)
+            mylog.info("Average subgrid efficiency %02.1f %%",
+                        eff_mean*100.0)
+            mylog.info("%02.1f%% (%i/%i) of grids had minimum efficiency",
+                        eff_nmin*100.0/eff_nall,eff_nmin,eff_nall)
+            
+        
+            mylog.debug("Done with level % 2i", level)
             pbar.finish()
             self.proto_grids.append(psgs)
-            sums = na.zeros(3, dtype='int64')
-            mylog.info("Final grid count: %s", len(self.proto_grids[level]))
+            #print sum(len(psg.grid_file_locations) for psg in psgs)
+            #mylog.info("Final grid count: %s", len(self.proto_grids[level]))
             if len(self.proto_grids[level]) == 1: continue
-            # for g in self.proto_grids[level]:
-            #     sums += [s.sum() for s in g.sigs]
-            # assert(na.all(sums == dims.prod(axis=1).sum()))
         self.num_grids = sum(len(l) for l in self.proto_grids)
+                    
+            
+            
 
     num_deep = 0
 
-    # @num_deep_inc
-    # def _recursive_patch_splitting(self, psg, dims, ind,
-    #         left_index, right_index, gdims, fl):
-    #     min_eff = 0.1 # This isn't always respected.
-    #     if self.num_deep > 40:
-    #         # If we've recursed more than 100 times, we give up.
-    #         psg.efficiency = min_eff
-    #         return [psg]
-    #     if psg.efficiency > min_eff or psg.efficiency < 0.0:
-    #         return [psg]
-    #     tt, ax, fp = psg.find_split()
-    #     if (fp % 2) != 0:
-    #         if dims[ax] != fp + 1:
-    #             fp += 1
-    #         else:
-    #             fp -= 1
-    #     #print " " * self.num_deep + "Got ax", ax, "fp", fp
-    #     dims_l = dims.copy()
-    #     dims_l[ax] = fp
-    #     li_l = ind.copy()
-    #     if na.any(dims_l <= 0): return [psg]
-    #     L = _ramses_reader.ProtoSubgrid(
-    #             li_l, dims_l, left_index, right_index, gdims, fl)
-    #     #print " " * self.num_deep + "L", tt, L.efficiency
-    #     #if L.efficiency > 1.0: raise RuntimeError
-    #     if L.efficiency <= 0.0: L = []
-    #     elif L.efficiency < min_eff:
-    #         L = self._recursive_patch_splitting(L, dims_l, li_l,
-    #                 left_index, right_index, gdims, fl)
-    #     else:
-    #         L = [L]
-    #     dims_r = dims.copy()
-    #     dims_r[ax] -= fp
-    #     li_r = ind.copy()
-    #     li_r[ax] += fp
-    #     if na.any(dims_r <= 0): return [psg]
-    #     R = _ramses_reader.ProtoSubgrid(
-    #             li_r, dims_r, left_index, right_index, gdims, fl)
-    #     #print " " * self.num_deep + "R", tt, R.efficiency
-    #     #if R.efficiency > 1.0: raise RuntimeError
-    #     if R.efficiency <= 0.0: R = []
-    #     elif R.efficiency < min_eff:
-    #         R = self._recursive_patch_splitting(R, dims_r, li_r,
-    #                 left_index, right_index, gdims, fl)
-    #     else:
-    #         R = [R]
-    #     return L + R
         
     def _parse_hierarchy(self):
-        # We have important work to do
+        """ The root grid has no octs except one which is refined.
+        Still, it is the size of 128 cells along a length.
+        Ignore the proto subgrid created for the root grid - it is wrong.
+        """
         grids = []
         gi = 0
+        
         for level, grid_list in enumerate(self.proto_grids):
+            #The root level spans [0,2]
+            #The next level spans [0,256]
+            #The 3rd Level spans up to 128*2^3, etc.
+            #Correct root level to span up to 128
+            correction=1L
+            if level == 0:
+                correction=64L
             for g in grid_list:
                 fl = g.grid_file_locations
-                props = g.get_properties()
+                props = g.get_properties()*correction
                 dds = ((2**level) * self.pf.domain_dimensions).astype("float64")
                 self.grid_left_edge[gi,:] = props[0,:] / dds
                 self.grid_right_edge[gi,:] = props[1,:] / dds
                 self.grid_dimensions[gi,:] = props[2,:]
                 self.grid_levels[gi,:] = level
-                grids.append(self.grid(gi, self, level, fl, props[0,:]))
+                child_mask = na.zeros(props[2,:],'uint8')
+                amr_utils.fill_child_mask(fl,props[0],
+                    self.pf.level_art_child_masks[level],
+                    child_mask)
+                grids.append(self.grid(gi, self, level, fl, 
+                    props*na.array(correction).astype('int64')))
                 gi += 1
         self.grids = na.empty(len(grids), dtype='object')
-        for gi, g in enumerate(grids): self.grids[gi] = g
+        
+
+        if self.pf.file_particle_data:
+            #import pdb; pdb.set_trace()
+            lspecies = self.pf.parameters['lspecies']
+            wspecies = self.pf.parameters['wspecies']
+            Nrow     = self.pf.parameters['Nrow']
+            nstars = lspecies[-1]
+            a = self.pf.parameters['aexpn']
+            hubble = self.pf.parameters['hubble']
+            ud  = self.pf.parameters['r0']*a/hubble #proper Mpc units
+            uv  = self.pf.parameters['v0']/(a**1.0)*1.0e5 #proper cm/s
+            um  = self.pf.parameters['aM0'] #mass units in solar masses
+            um *= 1.989e33 #convert solar masses to grams 
+            pbar = get_pbar("Loading Particles   ",5)
+            self.pf.particle_position,self.pf.particle_velocity = \
+                read_particles(self.pf.file_particle_data,nstars,Nrow)
+            pbar.update(1)
+            npa,npb=0,0
+            npb = lspecies[-1]
+            clspecies = na.concatenate(([0,],lspecies))
+            if self.pf.only_particle_type is not None:
+                npb = lspecies[0]
+                if type(self.pf.only_particle_type)==type(5):
+                    npa = clspecies[self.pf.only_particle_type]
+                    npb = clspecies[self.pf.only_particle_type+1]
+            np = npb-npa
+            self.pf.particle_position   = self.pf.particle_position[npa:npb]
+            #self.pf.particle_position  -= 1.0 #fortran indices start with 0
+            pbar.update(2)
+            self.pf.particle_position  /= self.pf.domain_dimensions #to unitary units (comoving)
+            pbar.update(3)
+            self.pf.particle_velocity   = self.pf.particle_velocity[npa:npb]
+            self.pf.particle_velocity  *= uv #to proper cm/s
+            pbar.update(4)
+            self.pf.particle_type         = na.zeros(np,dtype='uint8')
+            self.pf.particle_mass         = na.zeros(np,dtype='float64')
+            self.pf.particle_mass_initial = na.zeros(np,dtype='float64')-1
+            self.pf.particle_creation_time= na.zeros(np,dtype='float64')-1
+            self.pf.particle_metallicity1 = na.zeros(np,dtype='float64')-1
+            self.pf.particle_metallicity2 = na.zeros(np,dtype='float64')-1
+            self.pf.particle_age          = na.zeros(np,dtype='float64')-1
+            
+            dist = self.pf['cm']/self.pf.domain_dimensions[0]
+            self.pf.conversion_factors['particle_mass'] = 1.0 #solar mass in g
+            self.pf.conversion_factors['particle_mass_initial'] = 1.0 #solar mass in g
+            self.pf.conversion_factors['particle_species'] = 1.0
+            for ax in 'xyz':
+                self.pf.conversion_factors['particle_velocity_%s'%ax] = 1.0
+                #already in unitary units
+                self.pf.conversion_factors['particle_position_%s'%ax] = 1.0 
+            self.pf.conversion_factors['particle_creation_time'] =  31556926.0
+            self.pf.conversion_factors['particle_metallicity']=1.0
+            self.pf.conversion_factors['particle_metallicity1']=1.0
+            self.pf.conversion_factors['particle_metallicity2']=1.0
+            self.pf.conversion_factors['particle_index']=1.0
+            self.pf.conversion_factors['particle_type']=1
+            self.pf.conversion_factors['particle_age']=1
+            self.pf.conversion_factors['Msun'] = 5.027e-34 #conversion to solar mass units
+            
+
+            a,b=0,0
+            for i,(b,m) in enumerate(zip(lspecies,wspecies)):
+                if type(self.pf.only_particle_type)==type(5):
+                    if not i==self.pf.only_particle_type:
+                        continue
+                    self.pf.particle_type += i
+                    self.pf.particle_mass += m*um
+
+                else:
+                    self.pf.particle_type[a:b] = i #particle type
+                    self.pf.particle_mass[a:b] = m*um #mass in solar masses
+                a=b
+            pbar.finish()
+
+            nparticles = [0,]+list(lspecies)
+            for j,np in enumerate(nparticles):
+                mylog.debug('found %i of particle type %i'%(j,np))
+            
+            if self.pf.single_particle_mass:
+                #cast all particle masses to the same mass
+                cast_type = self.pf.single_particle_type
+                
+
+            
+            self.pf.particle_star_index = i
+            
+            do_stars = (self.pf.only_particle_type is None) or \
+                       (self.pf.only_particle_type == -1) or \
+                       (self.pf.only_particle_type == len(lspecies))
+            if self.pf.file_star_data and do_stars: 
+                nstars, mass, imass, tbirth, metallicity1, metallicity2 \
+                     = read_stars(self.pf.file_star_data,nstars,Nrow)
+                nstars = nstars[0] 
+                if nstars > 0 :
+                    n=min(1e2,len(tbirth))
+                    pbar = get_pbar("Stellar Ages        ",n)
+                    sages  = \
+                        b2t(tbirth,n=n,logger=lambda x: pbar.update(x)).astype('float64')
+                    sages *= 1.0e9
+                    sages *= 365*24*3600 #to seconds
+                    sages = self.pf.current_time-sages
+                    self.pf.particle_age[-nstars:] = sages
+                    pbar.finish()
+                    self.pf.particle_metallicity1[-nstars:] = metallicity1
+                    self.pf.particle_metallicity2[-nstars:] = metallicity2
+                    self.pf.particle_mass_initial[-nstars:] = imass*um
+                    self.pf.particle_mass[-nstars:] = mass*um
+
+            done = 0
+            init = self.pf.particle_position.shape[0]
+            pos = self.pf.particle_position
+            #particle indices travel with the particle positions
+            #pos = na.vstack((na.arange(pos.shape[0]),pos.T)).T 
+            #if type(self.pf.grid_particles) == type(5):
+            #    max_level = min(max_level,self.pf.grid_particles)
+            grid_particle_count = na.zeros((len(grids),1),dtype='int64')
+            
+            #grid particles at the finest level, removing them once gridded
+            #pbar = get_pbar("Gridding Particles ",init)
+            #assignment = amr_utils.assign_particles_to_cells(
+            #        self.grid_levels.ravel().astype('int32'),
+            #        self.grid_left_edge.astype('float32'),
+            #        self.grid_right_edge.astype('float32'),
+            #        pos[:,0].astype('float32'),
+            #        pos[:,1].astype('float32'),
+            #        pos[:,2].astype('float32'))
+            #pbar.finish()
+
+            pbar = get_pbar("Gridding Particles ",init)
+            assignment,ilists = amr_utils.assign_particles_to_cell_lists(
+                    self.grid_levels.ravel().astype('int32'),
+                    2, #only bother gridding particles to level 2
+                    self.grid_left_edge.astype('float32'),
+                    self.grid_right_edge.astype('float32'),
+                    pos[:,0].astype('float32'),
+                    pos[:,1].astype('float32'),
+                    pos[:,2].astype('float32'))
+            pbar.finish()
+            
+            
+            pbar = get_pbar("Filling grids ",init)
+            for gidx,(g,ilist) in enumerate(zip(grids,ilists)):
+                np = len(ilist)
+                grid_particle_count[gidx,0]=np
+                g.hierarchy.grid_particle_count = grid_particle_count
+                g.particle_indices = ilist
+                grids[gidx] = g
+                done += np
+                pbar.update(done)
+            pbar.finish()
+
+            #assert init-done== 0 #we have gridded every particle
+            
+        pbar = get_pbar("Finalizing grids ",len(grids))
+        for gi, g in enumerate(grids): 
+            self.grids[gi] = g
+        pbar.finish()
+            
 
     def _get_grid_parents(self, grid, LE, RE):
         mask = na.zeros(self.num_grids, dtype='bool')
@@ -429,6 +532,54 @@
             g._setup_dx()
         self.max_level = self.grid_levels.max()
 
+    # def _populate_grid_objects(self):
+    #     mask = na.empty(self.grids.size, dtype='int32')
+    #     pb = get_pbar("Populating grids", len(self.grids))
+    #     for gi,g in enumerate(self.grids):
+    #         pb.update(gi)
+    #         amr_utils.get_box_grids_level(self.grid_left_edge[gi,:],
+    #                             self.grid_right_edge[gi,:],
+    #                             g.Level - 1,
+    #                             self.grid_left_edge, self.grid_right_edge,
+    #                             self.grid_levels, mask)
+    #         parents = self.grids[mask.astype("bool")]
+    #         if len(parents) > 0:
+    #             g.Parent.extend((p for p in parents.tolist()
+    #                     if p.locations[0,0] == g.locations[0,0]))
+    #             for p in parents: p.Children.append(g)
+    #         # Now we do overlapping siblings; note that one has to "win" with
+    #         # siblings, so we assume the lower ID one will "win"
+    #         amr_utils.get_box_grids_level(self.grid_left_edge[gi,:],
+    #                             self.grid_right_edge[gi,:],
+    #                             g.Level,
+    #                             self.grid_left_edge, self.grid_right_edge,
+    #                             self.grid_levels, mask, gi)
+    #         mask[gi] = False
+    #         siblings = self.grids[mask.astype("bool")]
+    #         if len(siblings) > 0:
+    #             g.OverlappingSiblings = siblings.tolist()
+    #         g._prepare_grid()
+    #         g._setup_dx()
+    #     pb.finish()
+    #     self.max_level = self.grid_levels.max()
+
+    def _setup_field_list(self):
+        if self.parameter_file.use_particles:
+            # We know which particle fields will exist -- pending further
+            # changes in the future.
+            for field in art_particle_field_names:
+                def external_wrapper(f):
+                    def _convert_function(data):
+                        return data.convert(f)
+                    return _convert_function
+                cf = external_wrapper(field)
+                # Note that we call add_field on the field_info directly.  This
+                # will allow the same field detection mechanism to work for 1D,
+                # 2D and 3D fields.
+                self.pf.field_info.add_field(field, NullFunc,
+                                             convert_function=cf,
+                                             take_log=True, particle_type=True)
+
     def _setup_derived_fields(self):
         self.derived_field_list = []
 
@@ -446,16 +597,65 @@
     _handle = None
     
     def __init__(self, filename, data_style='art',
-                 storage_filename = None):
+                 storage_filename = None, 
+                 file_particle_header=None, 
+                 file_particle_data=None,
+                 file_star_data=None,
+                 discover_particles=False,
+                 use_particles=True,
+                 limit_level=None,
+                 only_particle_type = None,
+                 grid_particles=False,
+                 single_particle_mass=False,
+                 single_particle_type=0):
         import yt.frontends.ramses._ramses_reader as _ramses_reader
+        
+        
+        dirn = os.path.dirname(filename)
+        base = os.path.basename(filename)
+        aexp = base.split('_')[2].replace('.d','')
+        
+        self.file_particle_header = file_particle_header
+        self.file_particle_data = file_particle_data
+        self.file_star_data = file_star_data
+        self.only_particle_type = only_particle_type
+        self.grid_particles = grid_particles
+        self.single_particle_mass = single_particle_mass
+        
+        if limit_level is None:
+            self.limit_level = na.inf
+        else:
+            mylog.info("Using maximum level: %i",limit_level)
+            self.limit_level = limit_level
+        
+        if discover_particles:
+            if file_particle_header is None:
+                loc = filename.replace(base,'PMcrd%s.DAT'%aexp)
+                if os.path.exists(loc):
+                    self.file_particle_header = loc
+                    mylog.info("Discovered particle header: %s",os.path.basename(loc))
+            if file_particle_data is None:
+                loc = filename.replace(base,'PMcrs0%s.DAT'%aexp)
+                if os.path.exists(loc):
+                    self.file_particle_data = loc
+                    mylog.info("Discovered particle data:   %s",os.path.basename(loc))
+            if file_star_data is None:
+                loc = filename.replace(base,'stars_%s.dat'%aexp)
+                if os.path.exists(loc):
+                    self.file_star_data = loc
+                    mylog.info("Discovered stellar data:    %s",os.path.basename(loc))
+        
+        self.use_particles = any([self.file_particle_header,
+            self.file_star_data, self.file_particle_data])
         StaticOutput.__init__(self, filename, data_style)
-        self.storage_filename = storage_filename
         
         self.dimensionality = 3
         self.refine_by = 2
         self.parameters["HydroMethod"] = 'art'
         self.parameters["Time"] = 1. # default unit is 1...
         self.parameters["InitialTime"]=self.current_time
+        self.storage_filename = storage_filename
+        
         
     def __repr__(self):
         return self.basename.rsplit(".", 1)[0]
@@ -471,8 +671,10 @@
         self.conversion_factors = defaultdict(lambda: 1.0)
         self.units['1'] = 1.0
         self.units['unitary'] = 1.0 / (self.domain_right_edge - self.domain_left_edge).max()
-
+        
+        
         z = self.current_redshift
+        
         h = self.hubble_constant
         boxcm_cal = self["boxh"]
         boxcm_uncal = boxcm_cal / h
@@ -505,29 +707,35 @@
         # this is 3H0^2 / (8pi*G) *h*Omega0 with H0=100km/s. 
         # ie, critical density 
         self.rho0 = 1.8791e-29 * hubble**2.0 * self.omega_matter
-        self.tr = 2./3. *(3.03e5*self.r0**2.0*wmu*self.omega_matter)*(1.0/(aexpn**2))      
-        self.conversion_factors["Density"] = \
-            self.rho0*(aexpn**-3.0)
-        self.conversion_factors["GasEnergy"] = \
-            self.rho0*self.v0**2*(aexpn**-5.0)
+        self.tr = 2./3. *(3.03e5*self.r0**2.0*wmu*self.omega_matter)*(1.0/(aexpn**2))     
         tr  = self.tr
+        
+        #factors to multiply the native code units to CGS
+        self.conversion_factors['Pressure'] = self.parameters["P0"] #already cgs
+        self.conversion_factors['Velocity'] = self.parameters['v0']*1e3 #km/s -> cm/s
+        self.conversion_factors["Mass"] = self.parameters["aM0"] * 1.98892e33
+        self.conversion_factors["Density"] = self.rho0*(aexpn**-3.0)
+        self.conversion_factors["GasEnergy"] = self.rho0*self.v0**2*(aexpn**-5.0)
         self.conversion_factors["Temperature"] = tr
-        self.conversion_factors["Metal_Density"] = 1
+        self.conversion_factors["Potential"] = 1.0
+        self.cosmological_simulation = True
         
         # Now our conversion factors
         for ax in 'xyz':
             # Add on the 1e5 to get to cm/s
             self.conversion_factors["%s-velocity" % ax] = self.v0/aexpn
         seconds = self.t0
-        self.time_units['years'] = seconds / (365*3600*24.0)
-        self.time_units['days']  = seconds / (3600*24.0)
-        self.time_units['Myr'] = self.time_units['years'] / 1.0e6
-        self.time_units['Gyr']  = self.time_units['years'] / 1.0e9
+        self.time_units['Gyr']   = 1.0/(1.0e9*365*3600*24.0)
+        self.time_units['Myr']   = 1.0/(1.0e6*365*3600*24.0)
+        self.time_units['years'] = 1.0/(365*3600*24.0)
+        self.time_units['days']  = 1.0 / (3600*24.0)
+
 
         #we were already in seconds, go back in to code units
-        self.current_time /= self.t0 
+        #self.current_time /= self.t0 
+        #self.current_time = b2t(self.current_time,n=1)
         
-        
+    
     def _parse_parameter_file(self):
         # We set our domain to run from 0 .. 1 since we are otherwise
         # unconstrained.
@@ -594,8 +802,14 @@
         self.parameters["Y_p"] = 0.245
         self.parameters["wmu"] = 4.0/(8.0-5.0*self.parameters["Y_p"])
         self.parameters["gamma"] = 5./3.
+        self.parameters["T_CMB0"] = 2.726  
+        self.parameters["T_min"] = 300.0 #T floor in K
+        self.parameters["boxh"] = header_vals['boxh']
+        self.parameters['ng'] = 128 # of 0 level cells in 1d 
         self.current_redshift = self.parameters["aexpn"]**-1.0 - 1.0
+        self.parameters['CosmologyInitialRedshift']=self.current_redshift
         self.data_comment = header_vals['jname']
+        self.current_time_raw = header_vals['t']
         self.current_time = header_vals['t']
         self.omega_lambda = header_vals['Oml0']
         self.omega_matter = header_vals['Om0']
@@ -606,26 +820,62 @@
         #nchem is nhydrovars-8, so we typically have 2 extra chem species 
         self.hubble_time  = 1.0/(self.hubble_constant*100/3.08568025e19)
         #self.hubble_time /= 3.168876e7 #Gyr in s 
-        def integrand(x,oml=self.omega_lambda,omb=self.omega_matter):
-            return 1./(x*na.sqrt(oml+omb*x**-3.0))
-        spacings = na.logspace(-5,na.log10(self.parameters['aexpn']),1e5)
-        integrand_arr = integrand(spacings)
-        self.current_time = na.trapz(integrand_arr,dx=na.diff(spacings))
-        self.current_time *= self.hubble_time
-                
+        # def integrand(x,oml=self.omega_lambda,omb=self.omega_matter):
+        #     return 1./(x*na.sqrt(oml+omb*x**-3.0))
+        # spacings = na.logspace(-5,na.log10(self.parameters['aexpn']),1e5)
+        # integrand_arr = integrand(spacings)
+        # self.current_time = na.trapz(integrand_arr,dx=na.diff(spacings))
+        # self.current_time *= self.hubble_time
+        self.current_time = b2t(self.current_time_raw)*1.0e9*365*3600*24         
         for to_skip in ['tl','dtl','tlold','dtlold','iSO']:
             _skip_record(f)
 
+        
+        Om0 = self.parameters['Om0']
+        hubble = self.parameters['hubble']
+        dummy = 100.0 * hubble * na.sqrt(Om0)
+        ng = self.parameters['ng']
+        wmu = self.parameters["wmu"]
+        boxh = header_vals['boxh'] 
+        
+        #distance unit #boxh is units of h^-1 Mpc
+        self.parameters["r0"] = self.parameters["boxh"] / self.parameters['ng']
+        r0 = self.parameters["r0"]
+        #time, yrs
+        self.parameters["t0"] = 2.0 / dummy * 3.0856e19 / 3.15e7
+        #velocity velocity units in km/s
+        self.parameters["v0"] = 50.0*self.parameters["r0"]*\
+                na.sqrt(self.parameters["Om0"])
+        #density = 3H0^2 * Om0 / (8*pi*G) - unit of density in Msun/Mpc^3
+        self.parameters["rho0"] = 2.776e11 * hubble**2.0 * Om0
+        rho0 = self.parameters["rho0"]
+        #Pressure = rho0 * v0**2 - unit of pressure in g/cm/s^2
+        self.parameters["P0"] = 4.697e-16 * Om0**2.0 * r0**2.0 * hubble**2.0
+        #T_0 = unit of temperature in K and in keV)
+        #T_0 = 2.61155 * r0**2 * wmu * Om0 ! [keV]
+        self.parameters["T_0"] = 3.03e5 * r0**2.0 * wmu * Om0 # [K]
+        #S_0 = unit of entropy in keV * cm^2
+        self.parameters["S_0"] = 52.077 * wmu**(5.0/3.0) * hubble**(-4.0/3.0)*Om0**(1.0/3.0)*r0**2.0
+        
+        #mass conversion (Mbox = rho0 * Lbox^3, Mbox_code = Ng^3
+        #     for non-cosmological run aM0 must be defined during initialization
+        #     [aM0] = [Msun]
+        self.parameters["aM0"] = rho0 * (boxh/hubble)**3.0 / ng**3.0
+        
+        #CGS for everything in the next block
+    
         (self.ncell,) = struct.unpack('>l', _read_record(f))
         # Try to figure out the root grid dimensions
-        est = na.log2(self.ncell) / 3
-        if int(est) != est: raise RuntimeError
+        est = int(na.rint(self.ncell**(1.0/3.0)))
         # Note here: this is the number of *cells* on the root grid.
         # This is not the same as the number of Octs.
-        self.domain_dimensions = na.ones(3, dtype='int64') * int(2**est)
+        self.domain_dimensions = na.ones(3, dtype='int64')*est 
 
         self.root_grid_mask_offset = f.tell()
-        _skip_record(f) # iOctCh
+        #_skip_record(f) # iOctCh
+        root_cells = self.domain_dimensions.prod()
+        self.root_iOctCh = _read_frecord(f,'>i')[:root_cells]
+        self.root_iOctCh = self.root_iOctCh.reshape(self.domain_dimensions,order='F')
         self.root_grid_offset = f.tell()
         _skip_record(f) # hvar
         _skip_record(f) # var
@@ -634,61 +884,71 @@
         self.child_grid_offset = f.tell()
 
         f.close()
+        
+        if self.file_particle_header is not None:
+            self._read_particle_header(self.file_particle_header)
+        
+    def _read_particle_header(self,fn):    
+        """ Reads control information, various parameters from the 
+            particle data set. Adapted from Daniel Ceverino's 
+            Read_Particles_Binary in analysis_ART.F   
+        """ 
+        header_struct = [
+            ('>i','pad'),
+            ('45s','header'), 
+            ('>f','aexpn'),
+            ('>f','aexp0'),
+            ('>f','amplt'),
+            ('>f','astep'),
+
+            ('>i','istep'),
+            ('>f','partw'),
+            ('>f','tintg'),
+
+            ('>f','Ekin'),
+            ('>f','Ekin1'),
+            ('>f','Ekin2'),
+            ('>f','au0'),
+            ('>f','aeu0'),
+
+
+            ('>i','Nrow'),
+            ('>i','Ngridc'),
+            ('>i','Nspecies'),
+            ('>i','Nseed'),
+
+            ('>f','Om0'),
+            ('>f','Oml0'),
+            ('>f','hubble'),
+            ('>f','Wp5'),
+            ('>f','Ocurv'),
+            ('>f','Omb0'),
+            ('>%ds'%(396),'extras'),
+            ('>f','unknown'),
+
+            ('>i','pad')]
+        fh = open(fn,'rb')
+        vals = _read_struct(fh,header_struct)
+        
+        for k,v in vals.iteritems():
+            self.parameters[k]=v
+        
+        seek_extras = 137
+        fh.seek(seek_extras)
+        n = self.parameters['Nspecies']
+        self.parameters['wspecies'] = na.fromfile(fh,dtype='>f',count=10)
+        self.parameters['lspecies'] = na.fromfile(fh,dtype='>i',count=10)
+        self.parameters['wspecies'] = self.parameters['wspecies'][:n]
+        self.parameters['lspecies'] = self.parameters['lspecies'][:n]
+        fh.close()
+        
+        ls_nonzero = [ls for ls in self.parameters['lspecies'] if ls>0 ]
+        mylog.debug("Discovered %i species of particles",len(ls_nonzero))
+        mylog.debug("Particle populations: "+'%1.1e '*len(ls_nonzero),ls_nonzero)
+        
 
     @classmethod
     def _is_valid(self, *args, **kwargs):
         return False # We make no effort to auto-detect ART data
 
-def _skip_record(f):
-    s = struct.unpack('>i', f.read(struct.calcsize('>i')))
-    f.seek(s[0], 1)
-    s = struct.unpack('>i', f.read(struct.calcsize('>i')))
 
-def _read_record(f):
-    s = struct.unpack('>i', f.read(struct.calcsize('>i')))[0]
-    ss = f.read(s)
-    s = struct.unpack('>i', f.read(struct.calcsize('>i')))
-    return ss
-
-def _read_record_size(f):
-    pos = f.tell()
-    s = struct.unpack('>i', f.read(struct.calcsize('>i')))
-    f.seek(pos)
-    return s[0]
-
-def _count_art_octs(f, offset,
-                   MinLev, MaxLevelNow):
-    import gc
-    f.seek(offset)
-    nchild,ntot=8,0
-    Level = na.zeros(MaxLevelNow+1 - MinLev, dtype='i')
-    iNOLL = na.zeros(MaxLevelNow+1 - MinLev, dtype='i')
-    iHOLL = na.zeros(MaxLevelNow+1 - MinLev, dtype='i')
-    for Lev in xrange(MinLev + 1, MaxLevelNow+1):
-        #Get the info for this level, skip the rest
-        #print "Reading oct tree data for level", Lev
-        #print 'offset:',f.tell()
-        Level[Lev], iNOLL[Lev], iHOLL[Lev] = struct.unpack(
-           '>iii', _read_record(f))
-        print 'Level %i : '%Lev, iNOLL
-        #print 'offset after level record:',f.tell()
-        iOct = iHOLL[Lev] - 1
-        nLevel = iNOLL[Lev]
-        nLevCells = nLevel * nchild
-        ntot = ntot + nLevel
-
-        #Skip all the oct hierarchy data
-        ns = _read_record_size(f)
-        size = struct.calcsize('>i') + ns + struct.calcsize('>i')
-        f.seek(f.tell()+size * nLevel)
-        
-        #Skip the child vars data
-        ns = _read_record_size(f)
-        size = struct.calcsize('>i') + ns + struct.calcsize('>i')
-        f.seek(f.tell()+size * nLevel*nchild)
-        
-        #find nhydrovars
-        nhydrovars = 8+2
-    f.seek(offset)
-    return nhydrovars, iNOLL
-


diff -r 4f9a48935bffc2e34d3a315781b55ac5f36cd663 -r a9d337474d854e121b6c92215cde35a6fddd316a yt/frontends/art/definitions.py
--- a/yt/frontends/art/definitions.py
+++ b/yt/frontends/art/definitions.py
@@ -0,0 +1,43 @@
+"""
+Definitions specific to ART
+
+Author: Christopher E. Moody <cemoody at ucsc.ed>
+Affiliation: UC Santa Cruz
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2011 Christopher E. Moody.  All Rights
+  Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""
+
+art_particle_field_names = [
+'particle_age',
+'particle_index',
+'particle_mass',
+'particle_mass_initial',
+'particle_creation_time',
+'particle_metallicity1',
+'particle_metallicity2',
+'particle_metallicity',
+'particle_position_x',
+'particle_position_y',
+'particle_position_z',
+'particle_velocity_x',
+'particle_velocity_y',
+'particle_velocity_z',
+'particle_type']


diff -r 4f9a48935bffc2e34d3a315781b55ac5f36cd663 -r a9d337474d854e121b6c92215cde35a6fddd316a yt/frontends/art/fields.py
--- a/yt/frontends/art/fields.py
+++ b/yt/frontends/art/fields.py
@@ -37,89 +37,201 @@
 from yt.utilities.physical_constants import \
     boltzmann_constant_cgs, mass_hydrogen_cgs
 
+KnownARTFields = FieldInfoContainer()
+add_art_field = KnownARTFields.add_field
+
 ARTFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
 add_field = ARTFieldInfo.add_field
 
-KnownARTFields = FieldInfoContainer()
-add_art_field = KnownARTFields.add_field
+import numpy as na
 
-translation_dict = {"Density":"density",
-                    "TotalEnergy":"TotalEnergy",
-                    "x-velocity":"velocity_x",
-                    "y-velocity":"velocity_y",
-                    "z-velocity":"velocity_z",
-                    "Pressure":"pressure",
-                    "Metallicity":"metallicity",
-                    "GasEnergy":"GasEnergy"
-                   }
+#these are just the hydro fields
+known_art_fields = [ 'Density','TotalEnergy',
+                     'XMomentumDensity','YMomentumDensity','ZMomentumDensity',
+                     'Pressure','Gamma','GasEnergy',
+                     'MetalDensitySNII', 'MetalDensitySNIa',
+                     'PotentialNew','PotentialOld']
 
-for f,v in translation_dict.items():
-    add_art_field(v, function=NullFunc, take_log=False,
-                  validators = [ValidateDataField(v)])
-    add_art_field(f, function=TranslationFunc(v), take_log=True)
+#Add the fields, then later we'll individually defined units and names
+for f in known_art_fields:
+    if f not in ARTFieldInfo:
+        add_field(f, function=lambda a,b: None, take_log=True,
+                  validators = [ValidateDataField(f)])
 
-#def _convertMetallicity(data):
-#    return data.convert("Metal_Density1")
-#KnownARTFields["Metal_Density1"]._units = r"1"
-#KnownARTFields["Metal_Density1"]._projected_units = r"1"
-#KnownARTFields["Metal_Density1"]._convert_function=_convertMetallicity
+#Hydro Fields that are verified to be OK unit-wise:
+#Density
+#Temperature
+
+#Hydro Fields that need to be tested:
+#TotalEnergy
+#XYZMomentum
+#Pressure
+#Gamma
+#GasEnergy
+#MetalDensity SNII + SNia
+#Potentials
+
+#Hydro Derived fields that are untested:
+#metallicities
+#xyzvelocity
+
+#Particle fields that are tested:
+#particle_position_xyz
+#particle_type
+#particle_index
+#particle_mass
+#particle_mass_initial
+#particle_age
+#particle_velocity
+#particle_metallicity12
+
+#Particle fields that are untested:
+#NONE
 
 
 def _convertDensity(data):
     return data.convert("Density")
-KnownARTFields["Density"]._units = r"\rm{g}/\rm{cm}^3"
-KnownARTFields["Density"]._projected_units = r"\rm{g}/\rm{cm}^2"
-KnownARTFields["Density"]._convert_function=_convertDensity
+ARTFieldInfo["Density"]._units = r"\rm{g}/\rm{cm}^3"
+ARTFieldInfo["Density"]._projected_units = r"\rm{g}/\rm{cm}^2"
+ARTFieldInfo["Density"]._convert_function=_convertDensity
 
-def _convertEnergy(data):
+def _convertTotalEnergy(data):
     return data.convert("GasEnergy")
-KnownARTFields["GasEnergy"]._units = r"\rm{ergs}/\rm{g}"
-KnownARTFields["GasEnergy"]._convert_function=_convertEnergy
+ARTFieldInfo["TotalEnergy"]._units = r"\rm{g}/\rm{cm}^3"
+ARTFieldInfo["TotalEnergy"]._projected_units = r"\rm{K}"
+ARTFieldInfo["TotalEnergy"]._convert_function=_convertTotalEnergy
 
-def _Temperature(field, data):
-    tr  = data["GasEnergy"] / data["Density"]
-    tr /= data.pf.conversion_factors["GasEnergy"]
-    tr *= data.pf.conversion_factors["Density"]
+def _convertXMomentumDensity(data):
+    tr  = data.convert("Mass")*data.convert("Velocity")
+    tr *= (data.convert("Density")/data.convert("Mass"))
     return tr
-def _convertTemperature(data):
-    return data.convert("Temperature")
-add_art_field("Temperature", function=_Temperature, units = r"\mathrm{K}")
-KnownARTFields["Temperature"]._units = r"\mathrm{K}"
-KnownARTFields["Temperature"]._convert_function=_convertTemperature
+ARTFieldInfo["XMomentumDensity"]._units = r"\rm{mg}/\rm{s}/\rm{cm}^3"
+ARTFieldInfo["XMomentumDensity"]._projected_units = r"\rm{K}"
+ARTFieldInfo["XMomentumDensity"]._convert_function=_convertXMomentumDensity
 
-def _MetallicitySNII(field, data):
-    #get the dimensionless mass fraction
-    tr  = data["Metal_DensitySNII"] / data["Density"]
-    tr *= data.pf.conversion_factors["Density"]    
+def _convertYMomentumDensity(data):
+    tr  = data.convert("Mass")*data.convert("Velocity")
+    tr *= (data.convert("Density")/data.convert("Mass"))
     return tr
-    
-add_art_field("MetallicitySNII", function=_MetallicitySNII, units = r"\mathrm{K}")
-KnownARTFields["MetallicitySNII"]._units = r"\mathrm{K}"
+ARTFieldInfo["YMomentumDensity"]._units = r"\rm{mg}/\rm{s}/\rm{cm}^3"
+ARTFieldInfo["YMomentumDensity"]._projected_units = r"\rm{K}"
+ARTFieldInfo["YMomentumDensity"]._convert_function=_convertYMomentumDensity
 
-def _MetallicitySNIa(field, data):
-    #get the dimensionless mass fraction
-    tr  = data["Metal_DensitySNIa"] / data["Density"]
-    tr *= data.pf.conversion_factors["Density"]    
+def _convertZMomentumDensity(data):
+    tr  = data.convert("Mass")*data.convert("Velocity")
+    tr *= (data.convert("Density")/data.convert("Mass"))
     return tr
-    
-add_art_field("MetallicitySNIa", function=_MetallicitySNIa, units = r"\mathrm{K}")
-KnownARTFields["MetallicitySNIa"]._units = r"\mathrm{K}"
+ARTFieldInfo["ZMomentumDensity"]._units = r"\rm{mg}/\rm{s}/\rm{cm}^3"
+ARTFieldInfo["ZMomentumDensity"]._projected_units = r"\rm{K}"
+ARTFieldInfo["ZMomentumDensity"]._convert_function=_convertZMomentumDensity
 
-def _Metallicity(field, data):
-    #get the dimensionless mass fraction of the total metals
-    tr  = data["Metal_DensitySNIa"] / data["Density"]
-    tr += data["Metal_DensitySNII"] / data["Density"]
-    tr *= data.pf.conversion_factors["Density"]    
+def _convertPressure(data):
+    return data.convert("Pressure")
+ARTFieldInfo["Pressure"]._units = r"\rm{g}/\rm{cm}/\rm{s}^2"
+ARTFieldInfo["Pressure"]._projected_units = r"\rm{g}/\rm{s}^2"
+ARTFieldInfo["Pressure"]._convert_function=_convertPressure
+
+def _convertGamma(data):
+    return 1.0
+ARTFieldInfo["Gamma"]._units = r""
+ARTFieldInfo["Gamma"]._projected_units = r""
+ARTFieldInfo["Gamma"]._convert_function=_convertGamma
+
+def _convertGasEnergy(data):
+    return data.convert("GasEnergy")
+ARTFieldInfo["GasEnergy"]._units = r"\rm{ergs}/\rm{g}"
+ARTFieldInfo["GasEnergy"]._projected_units = r""
+ARTFieldInfo["GasEnergy"]._convert_function=_convertGasEnergy
+
+def _convertMetalDensitySNII(data):
+    return data.convert("Density")
+ARTFieldInfo["MetalDensitySNII"]._units = r"\rm{g}/\rm{cm}^3"
+ARTFieldInfo["MetalDensitySNII"]._projected_units = r"\rm{g}/\rm{cm}^2"
+ARTFieldInfo["MetalDensitySNII"]._convert_function=_convertMetalDensitySNII
+
+def _convertMetalDensitySNIa(data):
+    return data.convert("Density")
+ARTFieldInfo["MetalDensitySNIa"]._units = r"\rm{g}/\rm{cm}^3"
+ARTFieldInfo["MetalDensitySNIa"]._projected_units = r"\rm{g}/\rm{cm}^2"
+ARTFieldInfo["MetalDensitySNIa"]._convert_function=_convertMetalDensitySNIa
+
+def _convertPotentialNew(data):
+    return data.convert("Potential")
+ARTFieldInfo["PotentialNew"]._units = r"\rm{g}/\rm{cm}^3"
+ARTFieldInfo["PotentialNew"]._projected_units = r"\rm{g}/\rm{cm}^2"
+ARTFieldInfo["PotentialNew"]._convert_function=_convertPotentialNew
+
+def _convertPotentialOld(data):
+    return data.convert("Potential")
+ARTFieldInfo["PotentialOld"]._units = r"\rm{g}/\rm{cm}^3"
+ARTFieldInfo["PotentialOld"]._projected_units = r"\rm{g}/\rm{cm}^2"
+ARTFieldInfo["PotentialOld"]._convert_function=_convertPotentialOld
+
+####### Derived fields
+
+def _temperature(field, data):
+    tr  = data["GasEnergy"].astype('float64') #~1
+    d = data["Density"].astype('float64')
+    d[d==0.0] = -1.0 #replace the zeroes (that cause infs)
+    tr /= d #
+    assert na.all(na.isfinite(tr)) #diagnosing some problem...
     return tr
-    
-add_art_field("Metallicity", function=_Metallicity, units = r"\mathrm{K}")
-KnownARTFields["Metallicity"]._units = r"\mathrm{K}"
+def _converttemperature(data):
+    x  = data.pf.conversion_factors["Density"]
+    x /= data.pf.conversion_factors["GasEnergy"]
+    x *= data.pf.conversion_factors["Temperature"]
+    return x
+add_field("Temperature", function=_temperature, units = r"\mathrm{K}",take_log=True)
+ARTFieldInfo["Temperature"]._units = r"\mathrm{K}"
+ARTFieldInfo["Temperature"]._projected_units = r"\mathrm{K}"
+ARTFieldInfo["Temperature"]._convert_function=_converttemperature
 
-def _Metal_Density(field,data):
-    return data["Metal_DensitySNII"]+data["Metal_DensitySNIa"]
-def _convert_Metal_Density(data):
-    return data.convert("Metal_Density")
+def _metallicity_snII(field, data):
+    tr  = data["MetalDensitySNII"] / data["Density"]
+    return tr
+add_field("Metallicity_SNII", function=_metallicity_snII, units = r"\mathrm{K}",take_log=True)
+ARTFieldInfo["Metallicity_SNII"]._units = r""
+ARTFieldInfo["Metallicity_SNII"]._projected_units = r""
 
-add_art_field("Metal_Density", function=_Metal_Density, units = r"\mathrm{K}")
-KnownARTFields["Metal_Density"]._units = r"\mathrm{K}"
-KnownARTFields["Metal_Density"]._convert_function=_convert_Metal_Density
+def _metallicity_snIa(field, data):
+    tr  = data["MetalDensitySNIa"] / data["Density"]
+    return tr
+add_field("Metallicity_SNIa", function=_metallicity_snIa, units = r"\mathrm{K}",take_log=True)
+ARTFieldInfo["Metallicity_SNIa"]._units = r""
+ARTFieldInfo["Metallicity_SNIa"]._projected_units = r""
+
+def _x_velocity(data):
+    tr  = data["XMomentumDensity"]/data["Density"]
+    return tr
+add_field("x_velocity", function=_x_velocity, units = r"\mathrm{cm/s}",take_log=False)
+ARTFieldInfo["x_velocity"]._units = r"\rm{cm}/\rm{s}"
+ARTFieldInfo["x_velocity"]._projected_units = r"\rm{cm}/\rm{s}"
+
+def _y_velocity(data):
+    tr  = data["YMomentumDensity"]/data["Density"]
+    return tr
+add_field("y_velocity", function=_y_velocity, units = r"\mathrm{cm/s}",take_log=False)
+ARTFieldInfo["y_velocity"]._units = r"\rm{cm}/\rm{s}"
+ARTFieldInfo["y_velocity"]._projected_units = r"\rm{cm}/\rm{s}"
+
+def _z_velocity(data):
+    tr  = data["ZMomentumDensity"]/data["Density"]
+    return tr
+add_field("z_velocity", function=_z_velocity, units = r"\mathrm{cm/s}",take_log=False)
+ARTFieldInfo["z_velocity"]._units = r"\rm{cm}/\rm{s}"
+ARTFieldInfo["z_velocity"]._projected_units = r"\rm{cm}/\rm{s}"
+
+
+def _metal_density(field, data):
+    tr  = data["MetalDensitySNIa"]
+    tr += data["MetalDensitySNII"]
+    return tr
+add_field("Metal_Density", function=_metal_density, units = r"\mathrm{K}",take_log=True)
+ARTFieldInfo["Metal_Density"]._units = r""
+ARTFieldInfo["Metal_Density"]._projected_units = r""
+
+
+#Particle fields
+
+#Derived particle fields
+


diff -r 4f9a48935bffc2e34d3a315781b55ac5f36cd663 -r a9d337474d854e121b6c92215cde35a6fddd316a yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -25,16 +25,19 @@
 
 import numpy as na
 import struct
-import pdb
+
+import os
+import os.path
 
 from yt.utilities.io_handler import \
     BaseIOHandler
-import numpy as na
 
 from yt.utilities.io_handler import \
     BaseIOHandler
 import yt.utilities.amr_utils as au
 
+from yt.frontends.art.definitions import art_particle_field_names
+
 class IOHandlerART(BaseIOHandler):
     _data_style = "art"
 
@@ -47,7 +50,41 @@
         self.level_offsets = level_offsets
         self.level_data = {}
 
-    def preload_level(self, level):
+    def preload_level(self, level,field=None):
+        """ Reads in the full ART tree. From the ART source:
+            iOctLv :    >0   - level of an oct
+            iOctPr :         - parent of an oct
+            iOctCh :    >0   - pointer to an oct of children
+                        0   - there are no children; the cell is a leaf
+            iOctNb :    >0   - pointers to neighbouring cells 
+            iOctPs :         - coordinates of Oct centers
+            
+            iOctLL1:         - doubly linked list of octs
+            iOctLL2:         - doubly linked list of octs
+            
+            tl - current  time moment for level L
+            tlold - previous time moment for level L
+            dtl - dtime0/2**iTimeBin
+            dtlold -  previous time step for level L
+            iSO - sweep order
+            
+            hvar(1,*) - gas density 
+            hvar(2,*) - gas energy 
+            hvar(3,*) - x-momentum 
+            hvar(4,*) - y-momentum
+            hvar(5,*) - z-momentum
+            hvar(6,*) - pressure
+            hvar(7,*) - Gamma
+            hvar(8,*) - internal energy 
+
+            var (1,*) - total density 
+            var (2,*) - potential (new)
+            var (3,*) - potential (old)
+            
+            
+            
+        """
+        
         if level in self.level_data: return
         if level == 0:
             self.preload_root_level()
@@ -58,44 +95,88 @@
         nvals = ncells * (self.nhydro_vars + 6) # 2 vars, 2 pads
         arr = na.fromfile(f, dtype='>f', count=nvals)
         arr = arr.reshape((self.nhydro_vars+6, ncells), order="F")
-        arr = arr[3:-1,:].astype("float64")
-        self.level_data[level] = arr
+        assert na.all(arr[0,:]==arr[-1,:]) #pads must be equal
+        arr = arr[3:-1,:] #skip beginning pad, idc, iOctCh, + ending pad
+        if field==None:
+            self.level_data[level] = arr.astype('float32')
+        else:
+            self.level_data[level] = arr.astype('float32')
+        del arr
 
     def preload_root_level(self):
         f = open(self.filename, 'rb')
         f.seek(self.level_offsets[0] + 4) # Ditch the header
         ncells = self.level_info[0]
-        #pdb.set_trace()
         nhvals = ncells * (self.nhydro_vars) # 0 vars, 0 pads
-        hvar = na.fromfile(f, dtype='>f', count=nhvals).astype("float64")
+        hvar = na.fromfile(f, dtype='>f', count=nhvals).astype("float32")
         hvar = hvar.reshape((self.nhydro_vars, ncells), order="F")
         na.fromfile(f,dtype='>i',count=2) #throw away the pads
         nvars = ncells * (2) # 0 vars, 0 pads
-        var = na.fromfile(f, dtype='>f', count=nvars).astype("float64")
+        var = na.fromfile(f, dtype='>f', count=nvars).astype("float32")
         var = var.reshape((2, ncells), order="F")
         arr = na.concatenate((hvar,var))
         self.level_data[0] = arr
 
     def clear_level(self, level):
         self.level_data.pop(level, None)
+
+    def _read_particle_field(self, grid, field):
+        #This will be cleaned up later
+        idx = na.array(grid.particle_indices)
+        if field == 'particle_index':
+            return na.array(idx)
+        if field == 'particle_type':
+            return grid.pf.particle_type[idx]
+        if field == 'particle_position_x':
+            return grid.pf.particle_position[idx][:,0]
+        if field == 'particle_position_y':
+            return grid.pf.particle_position[idx][:,1]
+        if field == 'particle_position_z':
+            return grid.pf.particle_position[idx][:,2]
+        if field == 'particle_mass':
+            return grid.pf.particle_mass[idx]
+        if field == 'particle_velocity_x':
+            return grid.pf.particle_velocity[idx][:,0]
+        if field == 'particle_velocity_y':
+            return grid.pf.particle_velocity[idx][:,1]
+        if field == 'particle_velocity_z':
+            return grid.pf.particle_velocity[idx][:,2]
+        
+        #stellar fields
+        if field == 'particle_age':
+            return grid.pf.particle_age[idx]
+        if field == 'particle_metallicity':
+            return grid.pf.particle_metallicity1[idx] +\
+                   grid.pf.particle_metallicity2[idx]
+        if field == 'particle_metallicity1':
+            return grid.pf.particle_metallicity1[idx]
+        if field == 'particle_metallicity2':
+            return grid.pf.particle_metallicity2[idx]
+        if field == 'particle_mass_initial':
+            return grid.pf.particle_mass_initial[idx]
+        
+        raise 'Should have matched one of the particle fields...'
+
         
     def _read_data_set(self, grid, field):
+        if field in art_particle_field_names:
+            return self._read_particle_field(grid, field)
         pf = grid.pf
         field_id = grid.pf.h.field_list.index(field)
         if grid.Level == 0: # We only have one root grid
             self.preload_level(0)
             tr = self.level_data[0][field_id,:].reshape(
                     pf.domain_dimensions, order="F").copy()
-            return tr.swapaxes(0, 2)
-        tr = na.zeros(grid.ActiveDimensions, dtype='float64')
-        filled = na.zeros(grid.ActiveDimensions, dtype='int32')
-        to_fill = grid.ActiveDimensions.prod()
+            return tr.swapaxes(0, 2).astype("float64")
+        tr = na.zeros(grid.ActiveDimensions, dtype='float32')
         grids = [grid]
         l_delta = 0
+        filled = na.zeros(grid.ActiveDimensions, dtype='uint8')
+        to_fill = grid.ActiveDimensions.prod()
         while to_fill > 0 and len(grids) > 0:
             next_grids = []
             for g in grids:
-                self.preload_level(g.Level)
+                self.preload_level(g.Level,field=field_id)
                 #print "Filling %s from %s (%s)" % (grid, g, g.Level)
                 to_fill -= au.read_art_grid(field_id, 
                         grid.get_global_startindex(), grid.ActiveDimensions,
@@ -104,11 +185,294 @@
                 next_grids += g.Parent
             grids = next_grids
             l_delta += 1
-        return tr
+        return tr.astype("float64")
 
     def _read_data_slice(self, grid, field, axis, coord):
         sl = [slice(None), slice(None), slice(None)]
         sl[axis] = slice(coord, coord + 1)
         return self._read_data_set(grid, field)[sl]
 
+def _count_art_octs(f, offset, 
+                   MinLev, MaxLevelNow):
+    level_oct_offsets= [0,]
+    level_child_offsets= [0,]
+    f.seek(offset)
+    nchild,ntot=8,0
+    Level = na.zeros(MaxLevelNow+1 - MinLev, dtype='i')
+    iNOLL = na.zeros(MaxLevelNow+1 - MinLev, dtype='i')
+    iHOLL = na.zeros(MaxLevelNow+1 - MinLev, dtype='i')
+    for Lev in xrange(MinLev + 1, MaxLevelNow+1):
+        level_oct_offsets.append(f.tell())
 
+        #Get the info for this level, skip the rest
+        #print "Reading oct tree data for level", Lev
+        #print 'offset:',f.tell()
+        Level[Lev], iNOLL[Lev], iHOLL[Lev] = struct.unpack(
+           '>iii', _read_record(f))
+        #print 'Level %i : '%Lev, iNOLL
+        #print 'offset after level record:',f.tell()
+        iOct = iHOLL[Lev] - 1
+        nLevel = iNOLL[Lev]
+        nLevCells = nLevel * nchild
+        ntot = ntot + nLevel
+
+        #Skip all the oct hierarchy data
+        ns = _read_record_size(f)
+        size = struct.calcsize('>i') + ns + struct.calcsize('>i')
+        f.seek(f.tell()+size * nLevel)
+
+        level_child_offsets.append(f.tell())
+        #Skip the child vars data
+        ns = _read_record_size(f)
+        size = struct.calcsize('>i') + ns + struct.calcsize('>i')
+        f.seek(f.tell()+size * nLevel*nchild)
+
+        #find nhydrovars
+        nhydrovars = 8+2
+    f.seek(offset)
+    return nhydrovars, iNOLL, level_oct_offsets, level_child_offsets
+
+def _read_art_level_info(f, level_oct_offsets,level,root_level=15):
+    pos = f.tell()
+    f.seek(level_oct_offsets[level])
+    #Get the info for this level, skip the rest
+    junk, nLevel, iOct = struct.unpack(
+       '>iii', _read_record(f))
+    
+    #fortran indices start at 1
+    
+    #Skip all the oct hierarchy data
+    le     = na.zeros((nLevel,3),dtype='int64')
+    fl     = na.ones((nLevel,6),dtype='int64')
+    iocts  = na.zeros(nLevel+1,dtype='int64')
+    idxa,idxb = 0,0
+    chunk = long(1e6) #this is ~111MB for 15 dimensional 64 bit arrays
+    left = nLevel
+    while left > 0 :
+        this_chunk = min(chunk,left)
+        idxb=idxa+this_chunk
+        data = na.fromfile(f,dtype='>i',count=this_chunk*15)
+        data=data.reshape(this_chunk,15)
+        left-=this_chunk
+        le[idxa:idxb,:] = data[:,1:4]
+        fl[idxa:idxb,1] = na.arange(idxa,idxb)
+        #pad byte is last, LL2, then ioct right before it
+        iocts[idxa:idxb] = data[:,-3] 
+        idxa=idxa+this_chunk
+    del data
+    
+    #ioct always represents the index of the next variable
+    #not the current, so shift forward one index
+    #the last index isn't used
+    ioctso = iocts.copy()
+    iocts[1:]=iocts[:-1] #shift
+    iocts = iocts[:nLevel] #chop off the last index
+    iocts[0]=iOct #starting value
+
+    #now correct iocts for fortran indices start @ 1
+    iocts = iocts-1
+
+    assert na.unique(iocts).shape[0] == nLevel
+    
+    #ioct tries to access arrays much larger than le & fl
+    #just make sure they appear in the right order, skipping
+    #the empty space in between
+    idx = na.argsort(iocts)
+    
+    #now rearrange le & fl in order of the ioct
+    le = le[idx]
+    fl = fl[idx]
+
+    #left edges are expressed as if they were on 
+    #level 15, so no matter what level max(le)=2**15 
+    #correct to the yt convention
+    #le = le/2**(root_level-1-level)-1
+
+    #try without the -1
+    le = le/2**(root_level-2-level)-1
+
+    #now read the hvars and vars arrays
+    #we are looking for iOctCh
+    #we record if iOctCh is >0, in which it is subdivided
+    iOctCh  = na.zeros((nLevel+1,8),dtype='bool')
+    
+    
+    
+    f.seek(pos)
+    return le,fl,nLevel
+
+
+def read_particles(file,nstars,Nrow):
+    words = 6 # words (reals) per particle: x,y,z,vx,vy,vz
+    real_size = 4 # for file_particle_data; not always true?
+    np = nstars # number of particles including stars, should come from lspecies[-1]
+    np_per_page = Nrow**2 # defined in ART a_setup.h
+    num_pages = os.path.getsize(file)/(real_size*words*np_per_page)
+
+    f = na.fromfile(file, dtype='>f4').astype('float32') # direct access
+    pages = na.vsplit(na.reshape(f, (num_pages, words, np_per_page)), num_pages)
+    data = na.squeeze(na.dstack(pages)).T # x,y,z,vx,vy,vz
+    return data[:,0:3],data[:,3:]
+
+def read_stars(file,nstars,Nrow):
+    fh = open(file,'rb')
+    tdum,adum   = _read_frecord(fh,'>d')
+    nstars      = _read_frecord(fh,'>i')
+    ws_old, ws_oldi = _read_frecord(fh,'>d')
+    mass    = _read_frecord(fh,'>f') 
+    imass   = _read_frecord(fh,'>f') 
+    tbirth  = _read_frecord(fh,'>f') 
+    if fh.tell() < os.path.getsize(file):
+        metallicity1 = _read_frecord(fh,'>f') 
+    if fh.tell() < os.path.getsize(file):
+        metallicity2 = _read_frecord(fh,'>f')     
+    assert fh.tell() == os.path.getsize(file)
+    return nstars, mass, imass, tbirth, metallicity1, metallicity2
+
+def _read_child_mask_level(f, level_child_offsets,level,nLevel,nhydro_vars):
+    f.seek(level_child_offsets[level])
+    nvals = nLevel * (nhydro_vars + 6) # 2 vars, 2 pads
+    ioctch = na.zeros(nLevel,dtype='uint8')
+    idc = na.zeros(nLevel,dtype='int32')
+    
+    chunk = long(1e6)
+    left = nLevel
+    width = nhydro_vars+6
+    a,b=0,0
+    while left > 0:
+        chunk = min(chunk,left)
+        b += chunk
+        arr = na.fromfile(f, dtype='>i', count=chunk*width)
+        arr = arr.reshape((width, chunk), order="F")
+        assert na.all(arr[0,:]==arr[-1,:]) #pads must be equal
+        idc[a:b]    = arr[1,:]-1 #fix fortran indexing
+        ioctch[a:b] = arr[2,:]==0 #if it is above zero, then refined info available
+        #zero in the mask means there is refinement available
+        a=b
+        left -= chunk
+    assert left==0
+    return idc,ioctch
+    
+nchem=8+2
+dtyp = na.dtype(">i4,>i8,>i8"+",>%sf4"%(nchem)+ \
+                ",>%sf4"%(2)+",>i4")
+def _read_art_child(f, level_child_offsets,level,nLevel,field):
+    pos=f.tell()
+    f.seek(level_child_offsets[level])
+    arr = na.fromfile(f, dtype='>f', count=nLevel * 8)
+    arr = arr.reshape((nLevel,16), order="F")
+    arr = arr[3:-1,:].astype("float64")
+    f.seek(pos)
+    return arr[field,:]
+
+def _skip_record(f):
+    s = struct.unpack('>i', f.read(struct.calcsize('>i')))
+    f.seek(s[0], 1)
+    s = struct.unpack('>i', f.read(struct.calcsize('>i')))
+
+def _read_frecord(f,fmt):
+    s1 = struct.unpack('>i', f.read(struct.calcsize('>i')))[0]
+    count = s1/na.dtype(fmt).itemsize
+    ss = na.fromfile(f,fmt,count=count)
+    s2 = struct.unpack('>i', f.read(struct.calcsize('>i')))[0]
+    assert s1==s2
+    return ss
+
+
+def _read_record(f,fmt=None):
+    s = struct.unpack('>i', f.read(struct.calcsize('>i')))[0]
+    ss = f.read(s)
+    s = struct.unpack('>i', f.read(struct.calcsize('>i')))
+    if fmt is not None:
+        return struct.unpack(ss,fmt)
+    return ss
+
+def _read_record_size(f):
+    pos = f.tell()
+    s = struct.unpack('>i', f.read(struct.calcsize('>i')))
+    f.seek(pos)
+    return s[0]
+
+def _read_struct(f,structure,verbose=False):
+    vals = {}
+    for format,name in structure:
+        size = struct.calcsize(format)
+        (val,) = struct.unpack(format,f.read(size))
+        vals[name] = val
+        if verbose: print "%s:\t%s\t (%d B)" %(name,val,f.tell())
+    return vals
+
+
+
+#All of these functions are to convert from hydro time var to 
+#proper time
+sqrt = na.sqrt
+sign = na.sign
+
+def find_root(f,a,b,tol=1e-6):
+    c = (a+b)/2.0
+    last = -na.inf
+    assert(sign(f(a)) != sign(f(b)))  
+    while na.abs(f(c)-last) > tol:
+        last=f(c)
+        if sign(last)==sign(f(b)):
+            b=c
+        else:
+            a=c
+        c = (a+b)/2.0
+    return c
+
+def quad(fintegrand,xmin,xmax,n=1e4):
+    spacings = na.logspace(na.log10(xmin),na.log10(xmax),n)
+    integrand_arr = fintegrand(spacings)
+    val = na.trapz(integrand_arr,dx=na.diff(spacings))
+    return val
+
+def a2b(at,Om0=0.27,Oml0=0.73,h=0.700):
+    def f_a2b(x):
+        val = 0.5*sqrt(Om0) / x**3.0
+        val /= sqrt(Om0/x**3.0 +Oml0 +(1.0 - Om0-Oml0)/x**2.0)
+        return val
+    #val, err = si.quad(f_a2b,1,at)
+    val = quad(f_a2b,1,at)
+    return val
+
+def b2a(bt,**kwargs):
+    #converts code time into expansion factor 
+    #if Om0 ==1and OmL == 0 then b2a is (1 / (1-td))**2
+    #if bt < -190.0 or bt > -.10:  raise 'bt outside of range'
+    f_b2a = lambda at: a2b(at,**kwargs)-bt
+    return find_root(f_b2a,1e-4,1.1)
+    #return so.brenth(f_b2a,1e-4,1.1)
+    #return brent.brent(f_b2a)
+
+def a2t(at,Om0=0.27,Oml0=0.73,h=0.700):
+    integrand = lambda x : 1./(x*sqrt(Oml0+Om0*x**-3.0))
+    #current_time,err = si.quad(integrand,0.0,at,epsabs=1e-6,epsrel=1e-6)
+    current_time = quad(integrand,1e-4,at)
+    #spacings = na.logspace(-5,na.log10(at),1e5)
+    #integrand_arr = integrand(spacings)
+    #current_time = na.trapz(integrand_arr,dx=na.diff(spacings))
+    current_time *= 9.779/h
+    return current_time
+
+def b2t(tb,n = 1e2,logger=None,**kwargs):
+    tb = na.array(tb)
+    if type(tb) == type(1.1): 
+        return a2t(b2a(tb))
+    if tb.shape == (): 
+        return a2t(b2a(tb))
+    if len(tb) < n: n= len(tb)
+    age_min = a2t(b2a(tb.max(),**kwargs),**kwargs)
+    age_max = a2t(b2a(tb.min(),**kwargs),**kwargs)
+    tbs  = -1.*na.logspace(na.log10(-tb.min()),
+                          na.log10(-tb.max()),n)
+    ages = []
+    for i,tbi in enumerate(tbs):
+        ages += a2t(b2a(tbi)),
+        if logger: logger(i)
+    ages = na.array(ages)
+    fb2t = na.interp(tb,tbs,ages)
+    #fb2t = interp1d(tbs,ages)
+    return fb2t
+


diff -r 4f9a48935bffc2e34d3a315781b55ac5f36cd663 -r a9d337474d854e121b6c92215cde35a6fddd316a yt/frontends/art/setup.py
--- a/yt/frontends/art/setup.py
+++ b/yt/frontends/art/setup.py
@@ -1,13 +1,10 @@
 #!/usr/bin/env python
 import setuptools
-import os
-import sys
-import os.path
+import os, sys, os.path
 
-
-def configuration(parent_package='', top_path=None):
+def configuration(parent_package='',top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('art', parent_package, top_path)
-    config.make_config_py()  # installs __config__.py
+    config = Configuration('art',parent_package,top_path)
+    config.make_config_py() # installs __config__.py
     #config.make_svn_version_py()
     return config


diff -r 4f9a48935bffc2e34d3a315781b55ac5f36cd663 -r a9d337474d854e121b6c92215cde35a6fddd316a yt/frontends/ramses/_ramses_reader.pyx
--- a/yt/frontends/ramses/_ramses_reader.pyx
+++ b/yt/frontends/ramses/_ramses_reader.pyx
@@ -829,7 +829,7 @@
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
-    cdef void find_split(self, int *tr):
+    cdef void find_split(self, int *tr,):
         # First look for zeros
         cdef int i, center, ax
         cdef np.ndarray[ndim=1, dtype=np.int64_t] axes
@@ -837,9 +837,9 @@
         axes = np.argsort(self.dd)[::-1]
         cdef np.int64_t *sig
         for axi in range(3):
-            ax = axes[axi]
-            center = self.dimensions[ax] / 2
-            sig = self.sigs[ax]
+            ax = axes[axi] #iterate over domain dimensions
+            center = self.dimensions[ax] / 2 
+            sig = self.sigs[ax] #an array for the dimension, number of cells along that dim
             for i in range(self.dimensions[ax]):
                 if sig[i] == 0 and i > 0 and i < self.dimensions[ax] - 1:
                     #print "zero: %s (%s)" % (i, self.dimensions[ax])
@@ -871,6 +871,61 @@
         tr[0] = 1; tr[1] = ax; tr[2] = zcp
         return
 
+    @cython.wraparound(False)
+    cdef void find_split_center(self, int *tr,):
+        # First look for zeros
+        cdef int i, center, ax
+        cdef int flip
+        cdef np.ndarray[ndim=1, dtype=np.int64_t] axes
+        cdef np.int64_t strength, zcstrength, zcp
+        axes = np.argsort(self.dd)[::-1]
+        cdef np.int64_t *sig
+        for axi in range(3):
+            ax = axes[axi] #iterate over domain dimensions
+            center = self.dimensions[ax] / 2 
+            sig = self.sigs[ax] #an array for the dimension, number of cells along that dim
+            #frequently get stuck with many zeroes near the edge of the grid
+            #let's start from the middle, working out
+            for j in range(self.dimensions[ax]/2):
+                flip = 1
+                i = self.dimensions[ax]/2+j
+                if sig[i] == 0 and i > 0 and i < self.dimensions[ax] - 1:
+                    #print "zero: %s (%s)" % (i, self.dimensions[ax])
+                    tr[0] = 0; tr[1] = ax; tr[2] = i
+                    return
+                i = self.dimensions[ax]/2-j
+                if sig[i] == 0 and i > 0 and i < self.dimensions[ax] - 1:
+                    #print "zero: %s (%s)" % (i, self.dimensions[ax])
+                    tr[0] = 0; tr[1] = ax; tr[2] = i
+                    return
+                    
+                
+        zcstrength = 0
+        zcp = 0
+        zca = -1
+        cdef int temp
+        cdef np.int64_t *sig2d
+        for axi in range(3):
+            ax = axes[axi]
+            sig = self.sigs[ax]
+            sig2d = <np.int64_t *> malloc(sizeof(np.int64_t) * self.dimensions[ax])
+            sig2d[0] = sig2d[self.dimensions[ax]-1] = 0
+            for i in range(1, self.dimensions[ax] - 1):
+                sig2d[i] = sig[i-1] - 2*sig[i] + sig[i+1]
+            for i in range(1, self.dimensions[ax] - 1):
+                if sig2d[i] * sig2d[i+1] <= 0:
+                    strength = labs(sig2d[i] - sig2d[i+1])
+                    if (strength > zcstrength) or \
+                       (strength == zcstrength and (abs(center - i) <
+                                                    abs(center - zcp))):
+                        zcstrength = strength
+                        zcp = i
+                        zca = ax
+            free(sig2d)
+        #print "zcp: %s (%s)" % (zcp, self.dimensions[ax])
+        tr[0] = 1; tr[1] = ax; tr[2] = zcp
+        return
+
     @cython.boundscheck(False)
     @cython.wraparound(False)
     def get_properties(self):
@@ -970,21 +1025,29 @@
         hilbert_indices[o] = h
     return hilbert_indices
 
+
 @cython.boundscheck(False)
 @cython.wraparound(False)
-def get_array_indices_lists(np.ndarray[np.int64_t, ndim=1] ind,
+def get_array_indices_lists(np.ndarray[np.int64_t, ndim=1] ind, 
                             np.ndarray[np.int64_t, ndim=1] uind,
                             np.ndarray[np.int64_t, ndim=2] lefts,
                             np.ndarray[np.int64_t, ndim=2] files):
+    #ind are the hilbert indices 
+    #uind are the unique hilbert indices                        
+    #count[n] track of how many times the nth index of uind occurs in ind
+    
     cdef np.ndarray[np.int64_t, ndim=1] count = np.zeros(uind.shape[0], 'int64')
     cdef int n, i
     cdef np.int64_t mi, mui
+    
+    #fill in the count array
     for i in range(ind.shape[0]):
         mi = ind[i]
         for n in range(uind.shape[0]):
             if uind[n] == mi:
                 count[n] += 1
                 break
+    
     cdef np.int64_t **alefts
     cdef np.int64_t **afiles
     afiles = <np.int64_t **> malloc(sizeof(np.int64_t *) * uind.shape[0])
@@ -994,6 +1057,9 @@
     cdef np.ndarray[np.int64_t, ndim=2] left
     all_locations = []
     all_lefts = []
+    
+    #having measure the repetition of each hilbert index,
+    #we can know declare how much memory we will use
     for n in range(uind.shape[0]):
         locations = np.zeros((count[n], 6), 'int64')
         left = np.zeros((count[n], 3), 'int64')
@@ -1002,7 +1068,11 @@
         afiles[n] = <np.int64_t *> locations.data
         alefts[n] = <np.int64_t *> left.data
         li[n] = 0
+    
     cdef int fi
+    #now arrange all_locations and all_lefts sequentially
+    #such that when they return to python
+    #the 1d array mutates into a list of lists?
     for i in range(ind.shape[0]):
         mi = ind[i]
         for n in range(uind.shape[0]):
@@ -1022,19 +1092,31 @@
         np.ndarray[np.int64_t, ndim=1] ind,
         np.ndarray[np.int64_t, ndim=2] left_index,
         np.ndarray[np.int64_t, ndim=2] fl,
-        int num_deep = 0):
-    cdef float min_eff = 0.1
+        int num_deep = 0,
+        float min_eff = 0.1,
+        int use_center=0,
+        long split_on_vol = 0):
     cdef ProtoSubgrid L, R
     cdef np.ndarray[np.int64_t, ndim=1] dims_l, li_l
     cdef np.ndarray[np.int64_t, ndim=1] dims_r, li_r
     cdef int tt, ax, fp, i, j, k, gi
     cdef int tr[3]
-    if num_deep > 40:
+    cdef long volume  =0
+    cdef int max_depth = 40
+    volume = dims[0]*dims[1]*dims[2]
+    if split_on_vol>0:
+        if volume < split_on_vol:
+            return [psg]
+    if num_deep > max_depth:
         psg.efficiency = min_eff
         return [psg]
-    if psg.efficiency > min_eff or psg.efficiency < 0.0:
+    if (psg.efficiency > min_eff or psg.efficiency < 0.0):
         return [psg]
-    psg.find_split(tr)
+    if not use_center:    
+        psg.find_split(tr) #default
+    else:
+        psg.find_split_center(tr)    
+        
     tt = tr[0]
     ax = tr[1]
     fp = tr[2]
@@ -1059,7 +1141,7 @@
     if L.efficiency <= 0.0: rv_l = []
     elif L.efficiency < min_eff:
         rv_l = recursive_patch_splitting(L, dims_l, li_l,
-                left_index, fl, num_deep + 1)
+                left_index, fl, num_deep + 1, min_eff,use_center,split_on_vol)
     else:
         rv_l = [L]
     R = ProtoSubgrid(li_r, dims_r, left_index, fl)
@@ -1067,7 +1149,7 @@
     if R.efficiency <= 0.0: rv_r = []
     elif R.efficiency < min_eff:
         rv_r = recursive_patch_splitting(R, dims_r, li_r,
-                left_index, fl, num_deep + 1)
+                left_index, fl, num_deep + 1, min_eff,use_center,split_on_vol)
     else:
         rv_r = [R]
     return rv_r + rv_l


diff -r 4f9a48935bffc2e34d3a315781b55ac5f36cd663 -r a9d337474d854e121b6c92215cde35a6fddd316a yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -125,6 +125,23 @@
     def _detect_fields(self):
         self.field_list = self.tree_proxy.field_names[:]
     
+    def _setup_field_list(self):
+        if self.parameter_file.use_particles:
+            # We know which particle fields will exist -- pending further
+            # changes in the future.
+            for field in art_particle_field_names:
+                def external_wrapper(f):
+                    def _convert_function(data):
+                        return data.convert(f)
+                    return _convert_function
+                cf = external_wrapper(field)
+                # Note that we call add_field on the field_info directly.  This
+                # will allow the same field detection mechanism to work for 1D,
+                # 2D and 3D fields.
+                self.pf.field_info.add_field(field, NullFunc,
+                                             convert_function=cf,
+                                             take_log=False, particle_type=True)
+
     def _setup_classes(self):
         dd = self._get_data_reader_dict()
         AMRHierarchy._setup_classes(self, dd)




diff -r 4f9a48935bffc2e34d3a315781b55ac5f36cd663 -r a9d337474d854e121b6c92215cde35a6fddd316a yt/utilities/_amr_utils/CICDeposit.pyx
--- a/yt/utilities/_amr_utils/CICDeposit.pyx
+++ b/yt/utilities/_amr_utils/CICDeposit.pyx
@@ -1,8 +1,10 @@
 """
-Simle integrators for the radiative transfer equation
+Simple integrators for the radiative transfer equation
 
 Author: Britton Smith <brittonsmith at gmail.com>
 Affiliation: CASA/University of Colorado
+Author: Christopher Moody <juxtaposicion at gmail.com>
+Affiliation: cemoody at ucsc.edu
 Homepage: http://yt-project.org/
 License:
   Copyright (C) 2008 Matthew Turk.  All Rights Reserved.
@@ -107,3 +109,73 @@
         ind[2] = <int> ((pos_z[i] - left_edge[2]) * idds[2])
         sample[i] = arr[ind[0], ind[1], ind[2]]
     return sample
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+def assign_particles_to_cells(np.ndarray[np.int32_t, ndim=1] levels, #for cells
+                              np.ndarray[np.float32_t, ndim=2] left_edges, #many cells
+                              np.ndarray[np.float32_t, ndim=2] right_edges,
+                              np.ndarray[np.float32_t, ndim=1] pos_x, #particle
+                              np.ndarray[np.float32_t, ndim=1] pos_y,
+                              np.ndarray[np.float32_t, ndim=1] pos_z):
+    #for every cell, assign the particles belonging to it,
+    #skipping previously assigned particles
+    cdef long level_max = np.max(levels)
+    cdef long i,j,level
+    cdef long npart = pos_x.shape[0]
+    cdef long ncells = left_edges.shape[0] 
+    cdef np.ndarray[np.int32_t, ndim=1] assign = np.zeros(npart,dtype='int32')-1
+    for level in range(level_max,0,-1):
+        #start with the finest level
+        for i in range(ncells):
+            #go through every cell on the finest level first
+            if not levels[i] == level: continue
+            for j in range(npart):
+                #iterate over all particles, skip if assigned
+                if assign[j]>-1: continue
+                if (left_edges[i,0] <= pos_x[j] <= right_edges[i,0]):
+                    if (left_edges[i,1] <= pos_y[j] <= right_edges[i,1]):
+                        if (left_edges[i,2] <= pos_z[j] <= right_edges[i,2]):
+                            assign[j]=i
+    return assign
+
+
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+def assign_particles_to_cell_lists(np.ndarray[np.int32_t, ndim=1] levels, #for cells
+                              np.int64_t level_max, 
+                              np.ndarray[np.float32_t, ndim=2] left_edges, #many cells
+                              np.ndarray[np.float32_t, ndim=2] right_edges,
+                              np.ndarray[np.float32_t, ndim=1] pos_x, #particle
+                              np.ndarray[np.float32_t, ndim=1] pos_y,
+                              np.ndarray[np.float32_t, ndim=1] pos_z):
+    #for every cell, assign the particles belonging to it,
+    #skipping previously assigned particles
+    #Todo: instead of iterating every particles, could use kdtree 
+    cdef long i,j,level
+    cdef long npart = pos_x.shape[0]
+    cdef long ncells = left_edges.shape[0] 
+    cdef np.ndarray[np.int32_t, ndim=1] assign = np.zeros(npart,dtype='int32')-1
+    index_lists = []
+    for level in range(level_max,0,-1):
+        #start with the finest level
+        for i in range(ncells):
+            #go through every cell on the finest level first
+            if not levels[i] == level: continue
+            index_list = []
+            for j in range(npart):
+                #iterate over all particles, skip if assigned
+                if assign[j]>-1: continue
+                if (left_edges[i,0] <= pos_x[j] <= right_edges[i,0]):
+                    if (left_edges[i,1] <= pos_y[j] <= right_edges[i,1]):
+                        if (left_edges[i,2] <= pos_z[j] <= right_edges[i,2]):
+                            assign[j]=i
+                            index_list += j,
+            index_lists += index_list,
+    return assign,index_lists
+
+    
+    


diff -r 4f9a48935bffc2e34d3a315781b55ac5f36cd663 -r a9d337474d854e121b6c92215cde35a6fddd316a yt/utilities/_amr_utils/DepthFirstOctree.pyx
--- a/yt/utilities/_amr_utils/DepthFirstOctree.pyx
+++ b/yt/utilities/_amr_utils/DepthFirstOctree.pyx
@@ -27,21 +27,6 @@
 cimport numpy as np
 cimport cython
 
-cdef extern from "math.h":
-    double exp(double x)
-    float expf(float x)
-    long double expl(long double x)
-    double floor(double x)
-    double ceil(double x)
-    double fmod(double x, double y)
-    double log2(double x)
-    long int lrint(double x)
-    double fabs(double x)
-    double cos(double x)
-    double sin(double x)
-    double asin(double x)
-    double acos(double x)
-
 cdef class position:
     cdef public int output_pos, refined_pos
     def __cinit__(self):
@@ -81,6 +66,7 @@
                             np.ndarray[np.float64_t, ndim=2] output,
                             np.ndarray[np.int32_t, ndim=1] refined,
                             OctreeGridList grids):
+    #cdef int s = curpos
     cdef int i, i_off, j, j_off, k, k_off, ci, fi
     cdef int child_i, child_j, child_k
     cdef OctreeGrid child_grid
@@ -93,8 +79,13 @@
     cdef np.float64_t child_dx
     cdef np.ndarray[np.float64_t, ndim=1] child_leftedges
     cdef np.float64_t cx, cy, cz
+    #here we go over the 8 octants
+    #in general however, a mesh cell on this level
+    #may have more than 8 children on the next level
+    #so we find the int float center (cxyz) of each child cell
+    # and from that find the child cell indices
     for i_off in range(i_f):
-        i = i_off + i_i
+        i = i_off + i_i #index
         cx = (leftedges[0] + i*dx)
         for j_off in range(j_f):
             j = j_off + j_i
@@ -118,19 +109,20 @@
                     child_i = int((cx - child_leftedges[0])/child_dx)
                     child_j = int((cy - child_leftedges[1])/child_dx)
                     child_k = int((cz - child_leftedges[2])/child_dx)
-                    s = RecurseOctreeDepthFirst(child_i, child_j, child_k, 2, 2, 2,
+                    # s = Recurs.....
+                    RecurseOctreeDepthFirst(child_i, child_j, child_k, 2, 2, 2,
                                         curpos, ci - grid.offset, output, refined, grids)
-    return s
 
+ at cython.boundscheck(False)
 def RecurseOctreeByLevels(int i_i, int j_i, int k_i,
                           int i_f, int j_f, int k_f,
-                          np.ndarray[np.int64_t, ndim=1] curpos,
+                          np.ndarray[np.int32_t, ndim=1] curpos,
                           int gi, 
                           np.ndarray[np.float64_t, ndim=2] output,
-                          np.ndarray[np.int64_t, ndim=2] genealogy,
+                          np.ndarray[np.int32_t, ndim=2] genealogy,
                           np.ndarray[np.float64_t, ndim=2] corners,
                           OctreeGridList grids):
-    cdef np.int64_t i, i_off, j, j_off, k, k_off, ci, fi
+    cdef np.int32_t i, i_off, j, j_off, k, k_off, ci, fi
     cdef int child_i, child_j, child_k
     cdef OctreeGrid child_grid
     cdef OctreeGrid grid = grids[gi-1]
@@ -143,11 +135,11 @@
     cdef np.float64_t child_dx
     cdef np.ndarray[np.float64_t, ndim=1] child_leftedges
     cdef np.float64_t cx, cy, cz
-    cdef np.int64_t cp
-    cdef int s = 0
+    cdef int cp
     for i_off in range(i_f):
         i = i_off + i_i
         cx = (leftedges[0] + i*dx)
+        if i_f > 2: print k, cz
         for j_off in range(j_f):
             j = j_off + j_i
             cy = (leftedges[1] + j*dx)
@@ -167,15 +159,16 @@
                     child_grid = grids[ci-1]
                     child_dx = child_grid.dx[0]
                     child_leftedges = child_grid.left_edges
-                    child_i = lrint((cx-child_leftedges[0])/child_dx)
-                    child_j = lrint((cy-child_leftedges[1])/child_dx)
-                    child_k = lrint((cz-child_leftedges[2])/child_dx)
+                    child_i = int((cx-child_leftedges[0])/child_dx)
+                    child_j = int((cy-child_leftedges[1])/child_dx)
+                    child_k = int((cz-child_leftedges[2])/child_dx)
                     # set current child id to id of next cell to examine
                     genealogy[cp, 0] = curpos[level+1] 
                     # set next parent id to id of current cell
                     genealogy[curpos[level+1]:curpos[level+1]+8, 1] = cp
-                    RecurseOctreeByLevels(child_i, child_j, child_k, 2, 2, 2,
+                    s = RecurseOctreeByLevels(child_i, child_j, child_k, 2, 2, 2,
                                               curpos, ci, output, genealogy,
                                               corners, grids)
                 curpos[level] += 1
-    return
+    return s
+


diff -r 4f9a48935bffc2e34d3a315781b55ac5f36cd663 -r a9d337474d854e121b6c92215cde35a6fddd316a yt/utilities/_amr_utils/fortran_reader.pyx
--- a/yt/utilities/_amr_utils/fortran_reader.pyx
+++ b/yt/utilities/_amr_utils/fortran_reader.pyx
@@ -142,8 +142,6 @@
     # points to the start of the record *following* the reading of iOctFree and
     # nOct.  For those following along at home, we only need to read:
     #   iOctPr, iOctLv
-    print min_level, max_level 
-    
     cdef int nchild = 8
     cdef int i, Lev, cell_ind, iOct, nLevel, nLevCells, ic1
     cdef np.int64_t next_record
@@ -170,7 +168,7 @@
         fread(&readin, sizeof(int), 1, f); FIX_LONG(readin)
         iOct = iHOLL[Level] - 1
         nLevel = iNOLL[Level]
-        print "Reading Hierarchy for Level", Lev, Level, nLevel, iOct
+        #print "Reading Hierarchy for Level", Lev, Level, nLevel, iOct
         #print ftell(f)
         for ic1 in range(nLevel):
             iOctMax = max(iOctMax, iOct)
@@ -218,7 +216,7 @@
         
         #find the length of all of the children section
         child_record = ftell(f) +  (next_record+2*sizeof(int))*nLevel*nchild
-        print 'Skipping over hydro vars', ftell(f), child_record
+        #print 'Skipping over hydro vars', ftell(f), child_record
         fseek(f, child_record, SEEK_SET)
         
         # for ic1 in range(nLevel * nchild):
@@ -288,9 +286,9 @@
 def read_art_grid(int varindex, 
               np.ndarray[np.int64_t, ndim=1] start_index,
               np.ndarray[np.int32_t, ndim=1] grid_dims,
-              np.ndarray[np.float64_t, ndim=3] data,
-              np.ndarray[np.int32_t, ndim=3] filled,
-              np.ndarray[np.float64_t, ndim=2] level_data,
+              np.ndarray[np.float32_t, ndim=3] data,
+              np.ndarray[np.uint8_t, ndim=3] filled,
+              np.ndarray[np.float32_t, ndim=2] level_data,
               int level, int ref_factor,
               component_grid_info):
     cdef int gi, i, j, k, domain, offset, grid_id
@@ -312,7 +310,7 @@
         domain = ogrid_info[0]
         #print "Loading", domain, ogrid_info
         grid_id = ogrid_info[1]
-        og_start_index = ogrid_info[3:]
+        og_start_index = ogrid_info[3:6] #the oct left edge
         for i in range(2*ref_factor):
             di = i + og_start_index[0] * ref_factor
             if di < start_index[0] or di >= end_index[0]: continue
@@ -350,6 +348,30 @@
     return to_fill
 
 @cython.cdivision(True)
+ at cython.boundscheck(True)
+ at cython.wraparound(False)
+def fill_child_mask(np.ndarray[np.int64_t, ndim=2] file_locations,
+                    np.ndarray[np.int64_t, ndim=1] grid_le,
+                    np.ndarray[np.uint8_t, ndim=4] art_child_masks,
+                    np.ndarray[np.uint8_t, ndim=3] child_mask):
+
+    #loop over file_locations, for each row exracting the index & LE
+    #of the oct we will pull pull from art_child_masks
+    #then use the art_child_masks info to fill in child_mask
+    cdef int i,ioct,x,y,z
+    cdef int nocts = file_locations.shape[0]
+    cdef int lex,ley,lez
+    for i in range(nocts):
+        ioct = file_locations[i,1] #from fortran to python indexing?
+        lex = file_locations[i,3] - grid_le[0] #the oct left edge x
+        ley = file_locations[i,4] - grid_le[1]
+        lez = file_locations[i,5] - grid_le[2]
+        for x in range(2):
+            for y in range(2):
+                for z in range(2):
+                    child_mask[lex+x,ley+y,lez+z] = art_child_masks[ioct,x,y,z]
+
+ at cython.cdivision(True)
 @cython.boundscheck(False)
 @cython.wraparound(False)
 def read_castro_particles(char *fn, int offset, int fieldindex, int nfields,


diff -r 4f9a48935bffc2e34d3a315781b55ac5f36cd663 -r a9d337474d854e121b6c92215cde35a6fddd316a yt/utilities/grid_data_format/__init__.py
--- /dev/null
+++ b/yt/utilities/grid_data_format/__init__.py
@@ -0,0 +1,2 @@
+from conversion import *
+


diff -r 4f9a48935bffc2e34d3a315781b55ac5f36cd663 -r a9d337474d854e121b6c92215cde35a6fddd316a yt/utilities/grid_data_format/conversion/__init__.py
--- /dev/null
+++ b/yt/utilities/grid_data_format/conversion/__init__.py
@@ -0,0 +1,3 @@
+from conversion_abc import Converter
+from conversion_athena import AthenaDistributedConverter, AthenaConverter
+


diff -r 4f9a48935bffc2e34d3a315781b55ac5f36cd663 -r a9d337474d854e121b6c92215cde35a6fddd316a yt/utilities/grid_data_format/conversion/conversion_abc.py
--- /dev/null
+++ b/yt/utilities/grid_data_format/conversion/conversion_abc.py
@@ -0,0 +1,7 @@
+
+class Converter(object):
+    def __init__(self, basename, outname=None):
+        self.basename = basename
+        self.outname = outname
+    def convert(self):
+        pass


diff -r 4f9a48935bffc2e34d3a315781b55ac5f36cd663 -r a9d337474d854e121b6c92215cde35a6fddd316a yt/utilities/grid_data_format/conversion/conversion_athena.py
--- /dev/null
+++ b/yt/utilities/grid_data_format/conversion/conversion_athena.py
@@ -0,0 +1,503 @@
+import os
+import weakref
+import numpy as na
+import h5py as h5
+from conversion_abc import *
+from glob import glob
+from collections import \
+    defaultdict
+from string import \
+    strip, \
+    rstrip
+from stat import \
+    ST_CTIME
+
+translation_dict = {}
+translation_dict['density'] = 'density'
+translation_dict['total_energy'] = 'specific_energy'
+translation_dict['velocity_x'] = 'velocity_x'
+translation_dict['velocity_y'] = 'velocity_y'
+translation_dict['velocity_z'] = 'velocity_z'
+translation_dict['cell_centered_B_x'] = 'mag_field_x'
+translation_dict['cell_centered_B_y'] = 'mag_field_y'
+translation_dict['cell_centered_B_z'] = 'mag_field_z'
+
+class AthenaDistributedConverter(Converter):
+    def __init__(self, basename, outname=None, source_dir=None, field_conversions=None):
+        self.fields = []
+        self.current_time=0.0
+        name = basename.split('.')
+        self.ddn = int(name[1])
+        if source_dir is None:
+            source_dir = './'
+        self.source_dir = source_dir+'/'
+        self.basename = name[0]
+        if outname is None:
+            outname = self.basename+'.%04i'%self.ddn+'.gdf'
+        self.outname = outname
+        if field_conversions is None:
+            field_conversions = {}
+        self.field_conversions = field_conversions
+        self.handle = None
+
+    def parse_line(self,line, grid):
+    #    print line
+        # grid is a dictionary
+        splitup = line.strip().split()
+        if "vtk" in splitup:
+            grid['vtk_version'] = splitup[-1]
+        elif "Really" in splitup:
+            grid['time'] = splitup[-1]
+            self.current_time = grid['time']
+        elif 'PRIMITIVE' in splitup:
+            grid['time'] = float(splitup[4].rstrip(','))
+            grid['level'] = int(splitup[6].rstrip(','))
+            grid['domain'] = int(splitup[8].rstrip(','))
+            self.current_time = grid['time']
+        elif "DIMENSIONS" in splitup:
+            grid['dimensions'] = na.array(splitup[-3:]).astype('int')
+        elif "ORIGIN" in splitup:
+            grid['left_edge'] = na.array(splitup[-3:]).astype('float64')
+        elif "SPACING" in splitup:
+            grid['dds'] = na.array(splitup[-3:]).astype('float64')
+        elif "CELL_DATA" in splitup:
+            grid["ncells"] = int(splitup[-1])
+        elif "SCALARS" in splitup:
+            field = splitup[1]
+            grid['read_field'] = field
+            grid['read_type'] = 'scalar'
+        elif "VECTORS" in splitup:
+            field = splitup[1]
+            grid['read_field'] = field
+            grid['read_type'] = 'vector'
+
+    def write_gdf_field(self, fn, grid_number, field, data):
+        f = self.handle
+        ## --------- Store Grid Data --------- ##
+        if 'grid_%010i'%grid_number not in f['data'].keys():
+            g = f['data'].create_group('grid_%010i'%grid_number)
+        else:
+            g = f['data']['grid_%010i'%grid_number]
+        name = field
+        try:
+            name = translation_dict[name]
+        except:
+            pass
+        # print 'Writing %s' % name
+        if not name in g.keys(): 
+            g.create_dataset(name,data=data)
+        
+
+
+    def read_and_write_hierarchy(self,basename, ddn, gdf_name):
+        """ Read Athena legacy vtk file from multiple cpus """
+        proc_names = glob(self.source_dir+'id*')
+        #print 'Reading a dataset from %i Processor Files' % len(proc_names)
+        N = len(proc_names)
+        grid_dims = na.empty([N,3],dtype='int64')
+        grid_left_edges = na.empty([N,3],dtype='float64')
+        grid_dds = na.empty([N,3],dtype='float64')
+        grid_levels = na.zeros(N,dtype='int64')
+        grid_parent_ids = -1*na.ones(N,dtype='int64')
+        grid_particle_counts = na.zeros([N,1],dtype='int64')
+
+        for i in range(N):
+            if i == 0:
+                fn = self.source_dir+'id%i/'%i + basename + '.%04i'%ddn + '.vtk'
+            else:
+                fn = self.source_dir+'id%i/'%i + basename + '-id%i'%i + '.%04i'%ddn + '.vtk'
+
+            print 'Reading file %s' % fn
+            f = open(fn,'rb')
+            grid = {}
+            grid['read_field'] = None
+            grid['read_type'] = None
+            table_read=False
+            line = f.readline()
+            while grid['read_field'] is None:
+                self.parse_line(line, grid)
+                if "SCALAR" in line.strip().split():
+                    break
+                if "VECTOR" in line.strip().split():
+                    break
+                if 'TABLE' in line.strip().split():
+                    break
+                if len(line) == 0: break
+                del line
+                line = f.readline()
+
+            if len(line) == 0: break
+            
+            if na.prod(grid['dimensions']) != grid['ncells']:
+                grid['dimensions'] -= 1
+                grid['dimensions'][grid['dimensions']==0]=1
+            if na.prod(grid['dimensions']) != grid['ncells']:
+                print 'product of dimensions %i not equal to number of cells %i' % \
+                      (na.prod(grid['dimensions']), grid['ncells'])
+                raise TypeError
+
+            # Append all hierachy info before reading this grid's data
+            grid_dims[i]=grid['dimensions']
+            grid_left_edges[i]=grid['left_edge']
+            grid_dds[i]=grid['dds']
+            #grid_ncells[i]=grid['ncells']
+            del grid
+
+            f.close()
+            del f
+        f = self.handle 
+
+        ## --------- Begin level nodes --------- ##
+        g = f.create_group('gridded_data_format')
+        g.attrs['format_version']=na.float32(1.0)
+        g.attrs['data_software']='athena'
+        data_g = f.create_group('data')
+        field_g = f.create_group('field_types')
+        part_g = f.create_group('particle_types')
+        pars_g = f.create_group('simulation_parameters')
+
+
+        gles = grid_left_edges
+        gdims = grid_dims
+        dle = na.min(gles,axis=0)
+        dre = na.max(gles+grid_dims*grid_dds,axis=0)
+        glis = ((gles - dle)/grid_dds).astype('int64')
+        gris = glis + gdims
+
+        ddims = (dre-dle)/grid_dds[0]
+
+        # grid_left_index
+        gli = f.create_dataset('grid_left_index',data=glis)
+        # grid_dimensions
+        gdim = f.create_dataset('grid_dimensions',data=gdims)
+
+        # grid_level
+        level = f.create_dataset('grid_level',data=grid_levels)
+
+        ## ----------QUESTIONABLE NEXT LINE--------- ##
+        # This data needs two dimensions for now. 
+        part_count = f.create_dataset('grid_particle_count',data=grid_particle_counts)
+
+        # grid_parent_id
+        pids = f.create_dataset('grid_parent_id',data=grid_parent_ids)
+
+        ## --------- Done with top level nodes --------- ##
+
+        pars_g.attrs['refine_by'] = na.int64(1)
+        pars_g.attrs['dimensionality'] = na.int64(3)
+        pars_g.attrs['domain_dimensions'] = ddims
+        pars_g.attrs['current_time'] = self.current_time
+        pars_g.attrs['domain_left_edge'] = dle
+        pars_g.attrs['domain_right_edge'] = dre
+        pars_g.attrs['unique_identifier'] = 'athenatest'
+        pars_g.attrs['cosmological_simulation'] = na.int64(0)
+        pars_g.attrs['num_ghost_zones'] = na.int64(0)
+        pars_g.attrs['field_ordering'] = na.int64(1)
+        pars_g.attrs['boundary_conditions'] = na.int64([0]*6) # For Now
+
+        # Extra pars:
+        # pars_g.attrs['n_cells'] = grid['ncells']
+        pars_g.attrs['vtk_version'] = 1.0
+
+        # Add particle types
+        # Nothing to do here
+
+        # Add particle field attributes
+        #f.close()
+
+
+    def read_and_write_data(self, basename, ddn, gdf_name):
+        proc_names = glob(self.source_dir+'id*')
+        #print 'Reading a dataset from %i Processor Files' % len(proc_names)
+        N = len(proc_names)
+        for i in range(N):
+            if i == 0:
+                fn = self.source_dir+'id%i/'%i + basename + '.%04i'%ddn + '.vtk'
+            else:
+                fn = self.source_dir+'id%i/'%i + basename + '-id%i'%i + '.%04i'%ddn + '.vtk'
+            f = open(fn,'rb')
+            #print 'Reading data from %s' % fn
+            line = f.readline()
+            while line is not '':
+                # print line
+                if len(line) == 0: break
+                splitup = line.strip().split()
+
+                if "DIMENSIONS" in splitup:
+                    grid_dims = na.array(splitup[-3:]).astype('int')
+                    line = f.readline()
+                    continue
+                elif "CELL_DATA" in splitup:
+                    grid_ncells = int(splitup[-1])
+                    line = f.readline()
+                    if na.prod(grid_dims) != grid_ncells:
+                        grid_dims -= 1
+                        grid_dims[grid_dims==0]=1
+                    if na.prod(grid_dims) != grid_ncells:
+                        print 'product of dimensions %i not equal to number of cells %i' % \
+                              (na.prod(grid_dims), grid_ncells)
+                        raise TypeError
+                    break
+                else:
+                    del line
+                    line = f.readline()
+            read_table = False
+            while line is not '':
+                if len(line) == 0: break
+                splitup = line.strip().split()
+                if 'SCALARS' in splitup:
+                    field = splitup[1]
+                    if not read_table:
+                        line = f.readline() # Read the lookup table line
+                        read_table = True
+                    data = na.fromfile(f, dtype='>f4', count=grid_ncells).reshape(grid_dims,order='F')
+                    if i == 0:
+                        self.fields.append(field)
+                    # print 'writing field %s' % field
+                    self.write_gdf_field(gdf_name, i, field, data)
+                    read_table=False
+
+                elif 'VECTORS' in splitup:
+                    field = splitup[1]
+                    data = na.fromfile(f, dtype='>f4', count=3*grid_ncells)
+                    data_x = data[0::3].reshape(grid_dims,order='F')
+                    data_y = data[1::3].reshape(grid_dims,order='F')
+                    data_z = data[2::3].reshape(grid_dims,order='F')
+                    if i == 0:
+                        self.fields.append(field+'_x')
+                        self.fields.append(field+'_y')
+                        self.fields.append(field+'_z')
+
+                    # print 'writing field %s' % field
+                    self.write_gdf_field(gdf_name, i, field+'_x', data_x)
+                    self.write_gdf_field(gdf_name, i, field+'_y', data_y)
+                    self.write_gdf_field(gdf_name, i, field+'_z', data_z)
+                    del data, data_x, data_y, data_z
+                del line
+                line = f.readline()
+            f.close()
+            del f
+
+        f = self.handle 
+        field_g = f['field_types']
+        # Add Field Attributes
+        for name in self.fields:
+            tname = name
+            try:
+                tname = translation_dict[name]
+            except:
+                pass
+            this_field = field_g.create_group(tname)
+            if name in self.field_conversions.keys():
+                this_field.attrs['field_to_cgs'] = self.field_conversions[name]
+            else:
+                this_field.attrs['field_to_cgs'] = na.float64('1.0') # For Now
+            
+
+    def convert(self, hierarchy=True, data=True):
+        self.handle = h5.File(self.outname, 'a')
+        if hierarchy:
+            self.read_and_write_hierarchy(self.basename, self.ddn ,self.outname)
+        if data:
+            self.read_and_write_data(self.basename, self.ddn ,self.outname)
+        self.handle.close()
+
+class AthenaConverter(Converter):
+    def __init__(self, basename, outname=None, field_conversions=None):
+        self.fields = []
+        self.basename = basename
+        name = basename.split('.')
+        fn = '%s.%04i'%(name[0],int(name[1]))
+        self.ddn = int(name[1])
+        self.basename = fn
+        if outname is None:
+            outname = fn+'.gdf'
+        self.outname = outname
+        if field_conversions is None:
+            field_conversions = {}
+        self.field_conversions = field_conversions
+
+
+    def parse_line(self, line, grid):
+    #    print line
+        # grid is a dictionary
+        splitup = line.strip().split()
+        if "vtk" in splitup:
+            grid['vtk_version'] = splitup[-1]
+        elif "Really" in splitup:
+            grid['time'] = splitup[-1]
+        elif "DIMENSIONS" in splitup:
+            grid['dimensions'] = na.array(splitup[-3:]).astype('int')
+        elif "ORIGIN" in splitup:
+            grid['left_edge'] = na.array(splitup[-3:]).astype('float64')
+        elif "SPACING" in splitup:
+            grid['dds'] = na.array(splitup[-3:]).astype('float64')
+        elif "CELL_DATA" in splitup:
+            grid["ncells"] = int(splitup[-1])
+        elif "SCALARS" in splitup:
+            field = splitup[1]
+            grid['read_field'] = field
+            grid['read_type'] = 'scalar'
+        elif "VECTORS" in splitup:
+            field = splitup[1]
+            grid['read_field'] = field
+            grid['read_type'] = 'vector'
+        
+    def read_grid(self, filename):
+        """ Read Athena legacy vtk file from single cpu """
+        f = open(filename,'rb')
+        #print 'Reading from %s'%filename
+        grid = {}
+        grid['read_field'] = None
+        grid['read_type'] = None
+        table_read=False
+        line = f.readline()
+        while line is not '':
+            while grid['read_field'] is None:
+                self.parse_line(line, grid)
+                if grid['read_type'] is 'vector':
+                    break
+                if table_read is False:             
+                    line = f.readline()
+                if 'TABLE' in line.strip().split():
+                    table_read = True
+                if len(line) == 0: break
+            #    print line
+
+            if len(line) == 0: break
+            if na.prod(grid['dimensions']) != grid['ncells']:
+                grid['dimensions'] -= 1
+            if na.prod(grid['dimensions']) != grid['ncells']:
+                print 'product of dimensions %i not equal to number of cells %i' % \
+                      (na.prod(grid['dimensions']), grid['ncells'])
+                raise TypeError
+
+            if grid['read_type'] is 'scalar':
+                grid[grid['read_field']] = \
+                    na.fromfile(f, dtype='>f4', count=grid['ncells']).reshape(grid['dimensions'],order='F')
+                self.fields.append(grid['read_field'])
+            elif grid['read_type'] is 'vector':
+                data = na.fromfile(f, dtype='>f4', count=3*grid['ncells'])
+                grid[grid['read_field']+'_x'] = data[0::3].reshape(grid['dimensions'],order='F')
+                grid[grid['read_field']+'_y'] = data[1::3].reshape(grid['dimensions'],order='F')
+                grid[grid['read_field']+'_z'] = data[2::3].reshape(grid['dimensions'],order='F')
+                self.fields.append(grid['read_field']+'_x')
+                self.fields.append(grid['read_field']+'_y')
+                self.fields.append(grid['read_field']+'_z')
+            else:
+                raise TypeError
+            grid['read_field'] = None
+            grid['read_type'] = None
+            line = f.readline()
+            if len(line) == 0: break
+        grid['right_edge'] = grid['left_edge']+grid['dds']*(grid['dimensions'])
+        return grid
+
+    def write_to_gdf(self, fn, grid):
+        f = h5.File(fn,'a')
+
+        ## --------- Begin level nodes --------- ##
+        g = f.create_group('gridded_data_format')
+        g.attrs['format_version']=na.float32(1.0)
+        g.attrs['data_software']='athena'
+        data_g = f.create_group('data')
+        field_g = f.create_group('field_types')
+        part_g = f.create_group('particle_types')
+        pars_g = f.create_group('simulation_parameters')
+
+        dle = grid['left_edge'] # True only in this case of one grid for the domain
+        gles = na.array([grid['left_edge']])
+        gdims = na.array([grid['dimensions']])
+        glis = ((gles - dle)/grid['dds']).astype('int64')
+        gris = glis + gdims
+
+        # grid_left_index
+        gli = f.create_dataset('grid_left_index',data=glis)
+        # grid_dimensions
+        gdim = f.create_dataset('grid_dimensions',data=gdims)
+
+        levels = na.array([0]).astype('int64') # unigrid example
+        # grid_level
+        level = f.create_dataset('grid_level',data=levels)
+
+        ## ----------QUESTIONABLE NEXT LINE--------- ##
+        # This data needs two dimensions for now. 
+        n_particles = na.array([[0]]).astype('int64')
+        #grid_particle_count
+        part_count = f.create_dataset('grid_particle_count',data=n_particles)
+
+        # Assume -1 means no parent.
+        parent_ids = na.array([-1]).astype('int64')
+        # grid_parent_id
+        pids = f.create_dataset('grid_parent_id',data=parent_ids)
+
+        ## --------- Done with top level nodes --------- ##
+
+        f.create_group('hierarchy')
+
+        ## --------- Store Grid Data --------- ##
+
+        g0 = data_g.create_group('grid_%010i'%0)
+        for field in self.fields:
+            name = field
+            if field in translation_dict.keys():
+                name = translation_dict[name]
+            if not name in g0.keys(): 
+                g0.create_dataset(name,data=grid[field])
+
+        ## --------- Store Particle Data --------- ##
+
+        # Nothing to do
+
+        ## --------- Attribute Tables --------- ##
+
+        pars_g.attrs['refine_by'] = na.int64(1)
+        pars_g.attrs['dimensionality'] = na.int64(3)
+        pars_g.attrs['domain_dimensions'] = grid['dimensions']
+        try:
+            pars_g.attrs['current_time'] = grid['time']
+        except:
+            pars_g.attrs['current_time'] = 0.0
+        pars_g.attrs['domain_left_edge'] = grid['left_edge'] # For Now
+        pars_g.attrs['domain_right_edge'] = grid['right_edge'] # For Now
+        pars_g.attrs['unique_identifier'] = 'athenatest'
+        pars_g.attrs['cosmological_simulation'] = na.int64(0)
+        pars_g.attrs['num_ghost_zones'] = na.int64(0)
+        pars_g.attrs['field_ordering'] = na.int64(0)
+        pars_g.attrs['boundary_conditions'] = na.int64([0]*6) # For Now
+
+        # Extra pars:
+        pars_g.attrs['n_cells'] = grid['ncells']
+        pars_g.attrs['vtk_version'] = grid['vtk_version']
+
+        # Add Field Attributes
+        for name in g0.keys():
+            tname = name
+            try:
+                tname = translation_dict[name]
+            except:
+                pass
+            this_field = field_g.create_group(tname)
+        if name in self.field_conversions.keys():
+            this_field.attrs['field_to_cgs'] = self.field_conversions[name]
+        else:
+            this_field.attrs['field_to_cgs'] = na.float64('1.0') # For Now
+
+        # Add particle types
+        # Nothing to do here
+
+        # Add particle field attributes
+        f.close()
+
+    def convert(self):
+        grid = self.read_grid(self.basename+'.vtk')
+        self.write_to_gdf(self.outname,grid)
+        
+# import sys
+# if __name__ == '__main__':
+#     n = sys.argv[-1]
+#     n = n.split('.')
+#     fn = '%s.%04i'%(n[0],int(n[1]))
+#     grid = read_grid(fn+'.vtk')
+#     write_to_hdf5(fn+'.gdf',grid)
+    


diff -r 4f9a48935bffc2e34d3a315781b55ac5f36cd663 -r a9d337474d854e121b6c92215cde35a6fddd316a yt/utilities/grid_data_format/conversion/setup.py
--- /dev/null
+++ b/yt/utilities/grid_data_format/conversion/setup.py
@@ -0,0 +1,13 @@
+#!/usr/bin/env python
+import setuptools
+import os
+import sys
+import os.path
+
+
+def configuration(parent_package='', top_path=None):
+    from numpy.distutils.misc_util import Configuration
+    config = Configuration('conversion', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
+    #config.make_svn_version_py()
+    return config


diff -r 4f9a48935bffc2e34d3a315781b55ac5f36cd663 -r a9d337474d854e121b6c92215cde35a6fddd316a yt/utilities/grid_data_format/docs/IRATE_notes.txt
--- /dev/null
+++ b/yt/utilities/grid_data_format/docs/IRATE_notes.txt
@@ -0,0 +1,39 @@
+Here is info from Erik Tollerud about the IRATE data format.
+
+The bitbucket project is at https://bitbucket.org/eteq/irate-format
+and I've posted a copy of the docs at
+http://www.physics.uci.edu/~etolleru/irate-docs/ , in particular
+http://www.physics.uci.edu/~etolleru/irate-docs/formatspec.html ,
+which details the actual requirements for data to fit in the format.
+As far as I can tell, the following steps are needed to make GDF fit
+inside the IRATE format:
+
+*move everything except "/simulation_parameters" into a group named "/GridData"
+
+*rename "/simulation_parameters" to "SimulationParameters"
+
+*remove the 'field_types' group (this is not absolutely necessary, but
+the convention we had in mind for IRATE is that the dataset names
+themselves (e.g. the datasets like /data/gridxxxxxx/density)  serve as
+the field definitions.
+
+* The unit information that's in 'field_types' should then be
+attributes in either "/GridData" or "/GridData/data" following the
+naming scheme e.g. "densityunitcgs" following the unit form given in
+the IRATE doc and an additional attribute e.g. "densityunitname"
+should be added with the human-readable name of the unit. This unit
+information can also live at the dataset level, but it probably makes
+more sense to put it instead at the higher level (IRATE supports both
+ways of doing it)
+
+* The Cosmology group (as defined in the IRATE specification) must be
+added - for simulations that are not technically "cosmological", you
+can just use one of the default cosmologies (WMAP7 is a reasonable
+choice - there's a function in the IRATE tools that automatically
+takes care of all the details for this).
+
+* optional: redo all the group names to follow the CamelCase
+convention - that's what we've been using elsewhere in IRATE.  This is
+an arbitrary choice, but it would be nice for it to be consistent
+throughout the format.
+


diff -r 4f9a48935bffc2e34d3a315781b55ac5f36cd663 -r a9d337474d854e121b6c92215cde35a6fddd316a yt/utilities/grid_data_format/docs/gdf_specification.txt
--- /dev/null
+++ b/yt/utilities/grid_data_format/docs/gdf_specification.txt
@@ -0,0 +1,282 @@
+Gridded Data Format
+===================
+
+This is a pre-release of version 1.0 of this format.  Lots of formats have come
+before, but this one is simple and will work with yt; the idea is to create an
+import and export function in yt that will read this, so that other codes (such
+as ZEUS-MP) can export directly to it or convert their data to it, and so that
+yt can export to it from any format it recognizes and reads.
+
+Caveats and Notes
+-----------------
+
+#. We avoid having many attributes on many nodes, as access can be quite slow
+#. Cartesian data only for now
+#. All grids must have the same number of ghost zones.
+#. If “/grid_parent” does not exist, parentage relationships will be
+   reconstructed and assumed to allow multiple grids
+#. No parentage can skip levels
+#. All grids are at the same time
+#. This format is designed for single-fluid calculations (with color fields)
+   but it should be viewed as extensible to multiple-fluids.
+#. All fluid quantities are assumed to be in every grid, filling every zone.  Inside
+   a given grid, for a given particle type, all the affiliated fields must be the
+   same length.  (i.e., dark matter's velocity must be the same in all dimensions.)
+#. Everything is in a single file; for extremely large datasets, the user may
+   utilize HDF5 external links to link to files other than the primary.  (This
+   enables, for instance, Enzo datasets to have only a thin wrapper that creates
+   this format.)
+#. All fluid fields in this version of the format are assumed to have the
+   dimensionality of the grid they reside in plus any ghost zones, plus any
+   additionaly dimensionality required by the staggering property.
+#. Particles may have dataspaces affiliated with them.  (See Enzo's
+   OutputParticleTypeGrouping for more information.)  This enables a light
+   wrapper around data formats with interspersed particle types.
+#. Boundary conditions are very simply specified -- future revisions
+   will feature more complicated and rich specifications for the boundary.
+
+Furthermore, we make a distinction between fluid quantities and particle
+quantities.  Particles remain affiliated with grid nodes.  Positions of
+particles are global, but this will change with future versions of this
+document.
+
+Format Declaration
+------------------
+
+The file type is HDF5.  We require version 1.8 or greater.  At the root level,
+this group must exist: ::
+
+   /gridded_data_format
+
+This must contain the (float) attribute ``format_version``.  This document
+describes version 1.0.  Optional attributes may exist:
+
+``data_software``
+   string, references the application creating the file, not the
+   author of the data
+``data_software_version``
+   string, should reference a unique version number
+``data_author``
+   string, references the person or persons who created the data,
+   should include an email address
+``data_comment``
+   string, anything about the data
+
+Top Level Nodes
+---------------
+
+At least five top-level groups must exist, although some may be empty. ::
+
+   /gridded_data_format
+   /data
+   /simulation_parameters
+   /field_types
+   /particle_types
+
+Additionally, the grid structure elements must exist.  The 0-indexed index into this array
+defines a unique "Grid ID".
+
+``/grid_left_index``
+   (int64, Nx3): global, relative to current level, and only the active region
+``/grid_dimensions``
+   (int64, Nx3): only the active regions
+``/grid_level``
+   (int64, N): level, indexed by zero
+``/grid_particle_count``
+   (int64, N): total number of particles.  (May change in subsequent versions.)
+``/grid_parent_id``
+   (int64, N): optional, may only reference a single parent
+
+Grid Fields
+-----------
+
+Underneath ``/data/`` there must be entries for every grid, of the format
+``/data/grid_%010i``.  These grids need no attributes, and underneath them
+datasets live.
+
+Fluid Fields
+++++++++++++
+
+For every grid we then define ``/data/grid_%010i/%(field)s``.
+
+Where ``%(field)s`` draws from all of the fields defined.  We define no
+standard for which fields must be present, only the names and units.  Units
+should always be ''proper'' cgs (or conversion factors should be supplied, below), and
+field names should be drawn from this list, with these names.  Not all fields
+must be represented.  Field must extend beyond the active region if ghost zones
+are included.  All pre-defined fields are assumed to be cell-centered unless this
+is overridden in ``field_types``.
+
+  * ``density`` (g/cc)
+  * ``temperature`` (K)
+  * ``specific_thermal_energy`` (erg/g)
+  * ``specific_energy`` (erg/g, includes kinetic and magnetic)
+  * ``magnetic_energy`` (erg/g)
+  * ``velocity_x`` (cm/s)
+  * ``velocity_y`` (cm/s)
+  * ``velocity_z`` (cm/s)
+  * ``species_density_%s`` (g/cc) where %s is the species name including ionization
+    state, such as H2I, HI, HII, CO, "elec" for electron
+  * ``mag_field_x``
+  * ``mag_field_y``
+  * ``mag_field_z``
+
+Particle Fields
++++++++++++++++
+
+Particles are more expensive to sort and identify based on "type" -- for
+instance, dark matter versus star particles.  The particles should be separated
+based on type, under the group ``/data/grid_%010i/particles/``.
+
+The particles group will have sub-groups, each of which will be named after the
+type of particle it represents.  We only specify "dark_matter" as a type;
+anything else must be specified as described below.
+
+Each node, for instance ``/data/grid_%010i/particles/dark_matter/``, must
+contain the following fields:
+
+  * ``mass`` (g)
+  * ``id``
+  * ``position_x`` (in physical units)
+  * ``position_y`` (in physical units)
+  * ``position_z`` (in physical units)
+  * ``velocity_x`` (cm/s)
+  * ``velocity_y`` (cm/s)
+  * ``velocity_z`` (cm/s)
+  * ``dataspace`` (optional) an HDF5 dataspace to be used when opening
+    all affiliated fields.   If this is to be used, it must be appropriately set in
+    the particle type definition.  This is of type ``H5T_STD_REF_DSETREG``.
+    (See Enzo's OutputParticleTypeGrouping for an example.)
+
+Additional Fields
++++++++++++++++++
+
+Any additional fields from the data can be added, but must have a corresponding
+entry in the root field table (described below.)  The naming scheme is to be as
+explicit as possible, with units in cgs (or a conversion factor to the standard
+cgs unit, in the field table.)
+
+Attribute Table
+---------------
+
+In the root node, we define several groups which contain attributes.
+
+Simulation Parameters
++++++++++++++++++++++
+
+These attributes will all be associated with ``/simulation_parameters``.
+
+``refine_by``
+   relative global refinement
+``dimensionality``
+   1-, 2- or 3-D data
+``domain_dimensions``
+   dimensions in the top grid
+``current_time``
+   current time in simulation, in seconds, from “start” of simulation
+``domain_left_edge``
+   the left edge of the domain, in cm
+``domain_right_edge``
+   the right edge of the domain, in cm
+``unique_identifier``
+   regarded as a string, but can be anything
+``cosmological_simulation``
+   0 or 1
+``num_ghost_zones``
+   integer
+``field_ordering``
+   integer: 0 for C, 1 for Fortran
+``boundary_conditions``
+   integer (6): 0 for periodic, 1 for mirrored, 2 for outflow.  Needs one for each face
+   of the cube.  Any past the dimensionality should be set to -1.  The order of specification
+   goes left in 0th dimension, right in 0th dimension, left in 1st dimension, right in 1st dimensions,
+   left in 2nd dimension, right in 2nd dimension.  Note also that yt does not currently support non-periodic
+   boundary conditions, and that the assumption of periodicity shows up primarily in plots and
+   covering grids.
+
+Optionally, attributes for cosmological simulations can be provided, if
+cosmological_simulation above is set to 1:
+
+  * current_redshift
+  * omega_matter (at z=0)
+  * omega_lambda (at z=0)
+  * hubble_constant (h100)
+
+Fluid Field Attributes
+++++++++++++++++++++++
+
+Every field that is included that is not both in CGS already and in the list
+above requires parameters.  If a field is in the above list but is not in CGS,
+only the field_to_cgs attribute is necessary.  These will be stored under
+``/field_types`` and each must possess the following attributes:
+
+``field_name``
+   a string that will be used to describe the field; can contain spaces.
+``field_to_cgs``
+   a float that will be used to convert the field to cgs units, if necessary.
+   Set to 1.0 if no conversion necessary.  Note that if non-CGS units are desired
+   this field should simply be viewed as the value by which field values are
+   multiplied to get to some internally consistent unit system.
+``field_units``
+   a string that names the units.
+``staggering``
+   an integer: 0 for cell-centered, 1 for face-centered, 2 for vertex-centered.
+   Non-cellcentered data will be linearly-interpolated; more complicated
+   reconstruction will be defined in a future version of this standard; for 1.0
+   we only allow for simple definitions.
+
+Particle Types
+++++++++++++++
+
+Every particle type that is not recognized (i.e., all non-Dark Matter types)
+needs to have an entry under ``/particle_types``.  Each entry must possess the
+following attributes:
+
+``particle_type_name``
+   a string that will be used to describe the field; can contain spaces.
+``particle_use_dataspace``
+   (optional) if 1, the dataspace (see particle field definition above) will be used
+   for all particle fields for this type of particle.  Useful if a given type of particle
+   is embedded inside a larger list of different types of particle.
+``particle_type_num``
+   an integer giving the total number of particles of this type.
+
+For instance, to define a particle of type ``accreting_black_hole``, the file
+must contain ``/particle_types/accreting_black_hole``, with the
+``particle_type_name`` attribute of "Accreting Black Hole".
+
+Particle Field Attributes
++++++++++++++++++++++++++
+
+Every particle type that contains a new field (for instance, ``accretion_rate``)
+needs to have an entry under ``/particle_types/{particle_type_name}/{field_name}``
+containing the following attributes:
+
+``field_name``
+   a string that will be used to describe the field; can contain spaces.
+``field_to_cgs``
+   a float that will be used to convert the field to cgs units, if necessary.
+   Set to 1.0 if no conversion necessary.
+``field_units``
+   a string that names the units.
+
+Role of YT
+----------
+
+yt will provide a reader for this data, so that any data in this format can be
+used by the code.  Additionally, the names and specifications in this code
+reflect the internal yt data structures.
+
+yt will also provide a writer for this data, which will operate on any existing
+data format.  Provided that a simulation code can read this data, this will
+enable cross-platform comparison.  Furthermore, any external piece of software
+(i.e., Stranger) that implements reading this format will be able to read any
+format of data tha yt understands.
+
+Example File
+------------
+
+An example file constructed from the ``RD0005-mine`` dataset is available
+at http://yt.enzotools.org/files/RD0005.gdf .  It is not yet a complete
+conversion, but it is a working proof of concept.  Readers and writers are
+forthcoming.


diff -r 4f9a48935bffc2e34d3a315781b55ac5f36cd663 -r a9d337474d854e121b6c92215cde35a6fddd316a yt/utilities/grid_data_format/scripts/convert_distributed_athena.py
--- /dev/null
+++ b/yt/utilities/grid_data_format/scripts/convert_distributed_athena.py
@@ -0,0 +1,22 @@
+from grid_data_format import *
+import sys
+# Assumes that last input is the basename for the athena dataset.
+# i.e. kh_3d_mhd_hlld_128_beta5000_sub_tanhd.0030
+basename = sys.argv[-1]
+converter = AthenaDistributedConverter(basename)
+converter.convert()
+
+# If you have units information, set up a conversion dictionary for
+# each field.  Each key is the name of the field that Athena uses.
+# Each value is what you have to multiply the raw output from Athena
+# by to get cgs units.
+
+# code_to_cgs = {'density':1.0e3,
+# 	       'total_energy':1.0e-3,
+# 	       'velocity_x':1.2345,
+# 	       'velocity_y':1.2345,
+# 	       'velocity_z':1.2345}
+
+# converter = AthenaDistributedConverter(basename, field_conversions=code_to_cgs)
+# converter.convert()
+


diff -r 4f9a48935bffc2e34d3a315781b55ac5f36cd663 -r a9d337474d854e121b6c92215cde35a6fddd316a yt/utilities/grid_data_format/scripts/convert_single_athena.py
--- /dev/null
+++ b/yt/utilities/grid_data_format/scripts/convert_single_athena.py
@@ -0,0 +1,23 @@
+from grid_data_format import *
+import sys
+# Assumes that last input is the basename for the athena dataset.
+# i.e. kh_3d_mhd_hlld_128_beta5000_sub_tanhd.0030
+basename = sys.argv[-1]
+converter = AthenaConverter(basename)
+converter.convert()
+
+# If you have units information, set up a conversion dictionary for
+# each field.  Each key is the name of the field that Athena uses.
+# Each value is what you have to multiply the raw output from Athena
+# by to get cgs units.
+
+# code_to_cgs = {'density':1.0e3,
+# 	       'total_energy':1.0e-3,
+# 	       'velocity_x':1.2345,
+# 	       'velocity_y':1.2345,
+# 	       'velocity_z':1.2345}
+
+# converter = AthenaDistributedConverter(basename, field_conversions=code_to_cgs)
+# converter.convert()
+
+


diff -r 4f9a48935bffc2e34d3a315781b55ac5f36cd663 -r a9d337474d854e121b6c92215cde35a6fddd316a yt/utilities/grid_data_format/setup.py
--- /dev/null
+++ b/yt/utilities/grid_data_format/setup.py
@@ -0,0 +1,14 @@
+#!/usr/bin/env python
+import setuptools
+import os
+import sys
+import os.path
+
+
+def configuration(parent_package='', top_path=None):
+    from numpy.distutils.misc_util import Configuration
+    config = Configuration('grid_data_format', parent_package, top_path)
+    config.add_subpackage("conversion")
+    config.make_config_py()  # installs __config__.py
+    #config.make_svn_version_py()
+    return config


diff -r 4f9a48935bffc2e34d3a315781b55ac5f36cd663 -r a9d337474d854e121b6c92215cde35a6fddd316a yt/utilities/math_utils.py
--- a/yt/utilities/math_utils.py
+++ b/yt/utilities/math_utils.py
@@ -619,3 +619,16 @@
         # and check, use out array.
         result.append(na.mean(sorted[indexer], axis=axis, out=out))
     return na.array(result)
+
+def get_rotation_matrix(self, theta, rot_vector):
+    ux = rot_vector[0]
+    uy = rot_vector[1]
+    uz = rot_vector[2]
+    cost = na.cos(theta)
+    sint = na.sin(theta)
+    
+    R = na.array([[cost+ux**2*(1-cost), ux*uy*(1-cost)-uz*sint, ux*uz*(1-cost)+uy*sint],
+                  [uy*ux*(1-cost)+uz*sint, cost+uy**2*(1-cost), uy*uz*(1-cost)-ux*sint],
+                  [uz*ux*(1-cost)-uy*sint, uz*uy*(1-cost)+ux*sint, cost+uz**2*(1-cost)]])
+    
+    return R


diff -r 4f9a48935bffc2e34d3a315781b55ac5f36cd663 -r a9d337474d854e121b6c92215cde35a6fddd316a yt/utilities/orientation.py
--- /dev/null
+++ b/yt/utilities/orientation.py
@@ -0,0 +1,101 @@
+"""
+A class that manages the coordinate system for orientable data
+containers and cameras.
+
+Author: Nathan Goldbaum <goldbaum at ucolick.org>
+Affiliation: UCSC Astronomy
+License:
+  Copyright (C) 2008-2011 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import numpy as na
+
+from yt.funcs import *
+from yt.utilities.math_utils import get_rotation_matrix
+
+class Orientation:
+    def __init__(self, normal_vector, north_vector=None, steady_north=False):
+        r"""An object that returns a set of basis vectors for orienting
+        cameras a data containers.
+
+        Parameters
+        ----------
+        center        : array_like
+           The current "center" of the view port -- the normal_vector connects
+           the center and the origin
+        normal_vector : array_like
+           A vector normal to the image plane
+        north_vector  : array_like, optional
+           The 'up' direction to orient the image plane.  
+           If not specified, gets calculated automatically
+        steady_north  : bool, optional
+           Boolean to control whether to normalize the north_vector
+           by subtracting off the dot product of it and the normal 
+           vector.  Makes it easier to do rotations along a single
+           axis.  If north_vector is specified, is switched to
+           True.  Default: False
+           
+        """
+        self.steady_north = steady_north
+        if na.all(north_vector == normal_vector):
+            mylog.error("North vector and normal vector are the same.  Disregarding north vector.")
+            north_vector = None
+        if north_vector is not None: self.steady_north = True
+        self._setup_normalized_vectors(normal_vector, north_vector)
+
+    def _setup_normalized_vectors(self, normal_vector, north_vector):
+        # Now we set up our various vectors
+        normal_vector /= na.sqrt( na.dot(normal_vector, normal_vector))
+        if north_vector is None:
+            vecs = na.identity(3)
+            t = na.cross(normal_vector, vecs).sum(axis=1)
+            ax = t.argmax()
+            east_vector = na.cross(vecs[ax,:], normal_vector).ravel()
+            north_vector = na.cross(normal_vector, east_vector).ravel()
+        else:
+            if self.steady_north:
+                north_vector = north_vector - na.dot(north_vector,normal_vector)*normal_vector
+            east_vector = na.cross(north_vector, normal_vector).ravel()
+        north_vector /= na.sqrt(na.dot(north_vector, north_vector))
+        east_vector /= na.sqrt(na.dot(east_vector, east_vector))
+        self.normal_vector = normal_vector
+        self.north_vector = north_vector
+        self.unit_vectors = [east_vector, north_vector, normal_vector]
+        self.inv_mat = na.linalg.pinv(self.unit_vectors)
+        
+    def switch_orientation(self, normal_vector=None, north_vector=None):
+        r"""Change the view direction based on any of the orientation parameters.
+
+        This will recalculate all the necessary vectors and vector planes related
+        to a an orientable object.
+
+        Parameters
+        ----------
+        normal_vector: array_like, optional
+            The new looking vector.
+        north_vector : array_like, optional
+            The 'up' direction for the plane of rays.  If not specific,
+            calculated automatically.
+        """
+        if north_vector is None:
+            north_vector = self.north_vector
+        if normal_vector is None:
+            normal_vector = self.normal_vector
+        self._setup_normalized_vectors(normal_vector, north_vector)
+
+        


diff -r 4f9a48935bffc2e34d3a315781b55ac5f36cd663 -r a9d337474d854e121b6c92215cde35a6fddd316a yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -530,6 +530,7 @@
     def preload(self, grids, fields, io_handler):
         # This will preload if it detects we are parallel capable and
         # if so, we load *everything* that we need.  Use with some care.
+        if len(fields) == 0: return
         mylog.debug("Preloading %s from %s grids", fields, len(grids))
         if not self._distributed: return
         io_handler.preload(grids, fields)


diff -r 4f9a48935bffc2e34d3a315781b55ac5f36cd663 -r a9d337474d854e121b6c92215cde35a6fddd316a yt/utilities/setup.py
--- a/yt/utilities/setup.py
+++ b/yt/utilities/setup.py
@@ -168,6 +168,7 @@
     config.add_subpackage("kdtree")
     config.add_data_files(('kdtree', ['kdtree/fKDpy.so']))
     config.add_subpackage("spatial")
+    config.add_subpackage("grid_data_format")
     config.add_subpackage("parallel_tools")
     config.add_extension("data_point_utilities",
                 "yt/utilities/data_point_utilities.c", libraries=["m"])


diff -r 4f9a48935bffc2e34d3a315781b55ac5f36cd663 -r a9d337474d854e121b6c92215cde35a6fddd316a yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -374,6 +374,6 @@
                                self.data_source.center, self.data_source._inv_mat, indices,
                                self.data_source[item],
                                self.buff_size[0], self.buff_size[1],
-                               self.bounds).transpose()
+                               self.bounds)
         self[item] = buff
         return buff


diff -r 4f9a48935bffc2e34d3a315781b55ac5f36cd663 -r a9d337474d854e121b6c92215cde35a6fddd316a yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -935,14 +935,15 @@
     _descriptor = None
     def __init__(self, width, p_size=1.0, col='k', marker='o', stride=1.0,
                  ptype=None, stars_only=False, dm_only=False,
-                 minimum_mass=None):
+                 minimum_mass=None, alpha=1.0):
         """
         Adds particle positions, based on a thick slab along *axis* with a
         *width* along the line of sight.  *p_size* controls the number of
         pixels per particle, and *col* governs the color.  *ptype* will
         restrict plotted particles to only those that are of a given type.
         *minimum_mass* will require that the particles be of a given mass,
-        calculated via ParticleMassMsun, to be plotted.
+        calculated via ParticleMassMsun, to be plotted. *alpha* determines
+        each particle's opacity.
         """
         PlotCallback.__init__(self)
         self.width = width
@@ -954,6 +955,7 @@
         self.stars_only = stars_only
         self.dm_only = dm_only
         self.minimum_mass = minimum_mass
+        self.alpha = alpha
 
     def __call__(self, plot):
         data = plot.data
@@ -984,7 +986,7 @@
                     [reg[field_x][gg][::self.stride],
                      reg[field_y][gg][::self.stride]])
         plot._axes.scatter(px, py, edgecolors='None', marker=self.marker,
-                           s=self.p_size, c=self.color)
+                           s=self.p_size, c=self.color,alpha=self.alpha)
         plot._axes.set_xlim(xx0,xx1)
         plot._axes.set_ylim(yy0,yy1)
         plot._axes.hold(False)


diff -r 4f9a48935bffc2e34d3a315781b55ac5f36cd663 -r a9d337474d854e121b6c92215cde35a6fddd316a yt/visualization/volume_rendering/api.py
--- a/yt/visualization/volume_rendering/api.py
+++ b/yt/visualization/volume_rendering/api.py
@@ -39,5 +39,6 @@
                              import_partitioned_grids
 from image_handling import export_rgba, import_rgba, \
                            plot_channel, plot_rgb
+
 from camera import Camera, PerspectiveCamera, StereoPairCamera, \
     off_axis_projection, FisheyeCamera, MosaicFisheyeCamera


diff -r 4f9a48935bffc2e34d3a315781b55ac5f36cd663 -r a9d337474d854e121b6c92215cde35a6fddd316a yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -33,12 +33,13 @@
 from yt.utilities.amr_utils import TransferFunctionProxy, VectorPlane, \
     arr_vec2pix_nest, arr_pix2vec_nest, AdaptiveRaySource, \
     arr_ang2pix_nest, arr_fisheye_vectors, rotate_vectors
+from yt.utilities.math_utils import get_rotation_matrix
+from yt.utilities.orientation import Orientation
 from yt.visualization.image_writer import write_bitmap
 from yt.data_objects.data_containers import data_object_registry
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     ParallelAnalysisInterface, ProcessorPool
 from yt.utilities.amr_kdtree.api import AMRKDTree
-from numpy import pi
 
 class Camera(ParallelAnalysisInterface):
     def __init__(self, center, normal_vector, width,
@@ -48,8 +49,7 @@
                  log_fields = None,
                  sub_samples = 5, pf = None,
                  use_kd=True, l_max=None, no_ghost=True,
-                 tree_type='domain',expand_factor=1.0,
-                 le=None, re=None):
+                 tree_type='domain',le=None, re=None):
         r"""A viewpoint into a volume, for volume rendering.
 
         The camera represents the eye of an observer, which will be used to
@@ -74,7 +74,7 @@
             Boolean to control whether to normalize the north_vector
             by subtracting off the dot product of it and the normal
             vector.  Makes it easier to do rotations along a single
-            axis.  If north_vector is specifies, is switched to
+            axis.  If north_vector is specified, is switched to
             True. Default: False
         volume : `yt.extensions.volume_rendering.HomogenizedVolume`, optional
             The volume to ray cast through.  Can be specified for finer-grained
@@ -136,12 +136,6 @@
             prone to longer data IO times.  If all the data can fit in
             memory on each cpu, this can be the fastest option for
             multiple ray casts on the same dataset.
-        expand_factor: float, optional
-            A parameter to be used with the PerspectiveCamera.
-            Controls how much larger a volume to render, which is
-            currently difficult to gauge for the PerspectiveCamera.
-            For full box renders, values in the 2.0-3.0 range seem to
-            produce desirable results. Default: 1.0
         le: array_like, optional
             Specifies the left edge of the volume to be rendered.
             Currently only works with use_kd=True.
@@ -188,23 +182,13 @@
         self.sub_samples = sub_samples
         if not iterable(width):
             width = (width, width, width) # left/right, top/bottom, front/back 
-        self.width = width
-        self.center = center
-        self.steady_north = steady_north
-        self.expand_factor = expand_factor
-        # This seems to be necessary for now.  Not sure what goes wrong when not true.
-        if na.all(north_vector == normal_vector):
-            mylog.error("North vector and normal vector are the same.  Disregarding north vector.")
-            north_vector == None
-        if north_vector is not None: self.steady_north=True
-        self.north_vector = north_vector
-        self.rotation_vector = north_vector
+        self.orienter = Orientation(normal_vector, north_vector=north_vector, steady_north=steady_north)
+        self._setup_box_properties(width, center, self.orienter.unit_vectors)
         if fields is None: fields = ["Density"]
         self.fields = fields
         if transfer_function is None:
             transfer_function = ProjectionTransferFunction()
         self.transfer_function = transfer_function
-        self._setup_normalized_vectors(normal_vector, north_vector)
         self.log_fields = log_fields
         self.use_kd = use_kd
         self.l_max = l_max
@@ -223,40 +207,21 @@
             self.use_kd = isinstance(volume, AMRKDTree)
         self.volume = volume
 
-    def _setup_normalized_vectors(self, normal_vector, north_vector):
-        # Now we set up our various vectors
-        normal_vector /= na.sqrt( na.dot(normal_vector, normal_vector))
-        if north_vector is None:
-            vecs = na.identity(3)
-            t = na.cross(normal_vector, vecs).sum(axis=1)
-            ax = t.argmax()
-            north_vector = na.cross(vecs[ax,:], normal_vector).ravel()
-            if self.rotation_vector is None:
-                self.rotation_vector=north_vector
-        else:
-            if self.steady_north:
-                north_vector = north_vector - na.dot(north_vector,normal_vector)*normal_vector
-        north_vector /= na.sqrt(na.dot(north_vector, north_vector))
-        east_vector = -na.cross(north_vector, normal_vector).ravel()
-        east_vector /= na.sqrt(na.dot(east_vector, east_vector))
-        self.normal_vector = normal_vector
-        self.unit_vectors = [east_vector, north_vector, normal_vector]
-        self.box_vectors = na.array([self.unit_vectors[0]*self.width[0],
-                                     self.unit_vectors[1]*self.width[1],
-                                     self.unit_vectors[2]*self.width[2]])
-
-        self.origin = self.center - 0.5*self.width[0]*self.unit_vectors[0] \
-                                  - 0.5*self.width[1]*self.unit_vectors[1] \
-                                  - 0.5*self.width[2]*self.unit_vectors[2]
-        self.back_center = self.center - 0.5*self.width[2]*self.unit_vectors[2]
-        self.front_center = self.center + 0.5*self.width[2]*self.unit_vectors[2]
-        self.inv_mat = na.linalg.pinv(self.unit_vectors)
+    def _setup_box_properties(self, width, center, unit_vectors):
+        self.width = width
+        self.center = center
+        self.box_vectors = na.array([unit_vectors[0]*width[0],
+                                     unit_vectors[1]*width[1],
+                                     unit_vectors[2]*width[2]])
+        self.origin = center - 0.5*na.dot(width,unit_vectors)
+        self.back_center =  center - 0.5*width[0]*unit_vectors[2]
+        self.front_center = center + 0.5*width[0]*unit_vectors[2]         
 
     def look_at(self, new_center, north_vector = None):
         r"""Change the view direction based on a new focal point.
 
-        This will recalculate all the necessary vectors and vector planes related
-        to a camera to point at a new location.
+        This will recalculate all the necessary vectors and vector planes to orient
+        the image plane so that it points at a new location.
 
         Parameters
         ----------
@@ -268,13 +233,14 @@
             calculated automatically.
         """
         normal_vector = self.front_center - new_center
-        self._setup_normalized_vectors(normal_vector, north_vector)
+        self.orienter.switch_orientation(normal_vector=normal_vector,
+                                         north_vector = north_vector)
 
     def switch_view(self, normal_vector=None, width=None, center=None, north_vector=None):
-        r"""Change the view direction based on any of the view parameters.
+        r"""Change the view based on any of the view parameters.
 
-        This will recalculate all the necessary vectors and vector planes related
-        to a camera with new normal vectors, widths, centers, or north vectors.
+        This will recalculate the orientation and width based on any of
+        normal_vector, width, center, and north_vector.
 
         Parameters
         ----------
@@ -297,11 +263,13 @@
         if center is not None:
             self.center = center
         if north_vector is None:
-            north_vector = self.north_vector
+            north_vector = self.orienter.north_vector
         if normal_vector is None:
-            normal_vector = self.front_center-self.center
-        self._setup_normalized_vectors(normal_vector, north_vector)
-        
+            normal_vector = self.front_cemter - self.center
+        self.orienter.switch_orientation(normal_vector = normal_vector,
+                                         north_vector = north_vector)
+        self._setup_box_properties(width, center, self.orienter.unit_vectors)
+
     def get_vector_plane(self, image):
         # We should move away from pre-generation of vectors like this and into
         # the usage of on-the-fly generation in the VolumeIntegrator module
@@ -310,8 +278,7 @@
                          self.resolution[0])[:,None]
         py = na.linspace(-self.width[1]/2.0, self.width[1]/2.0,
                          self.resolution[1])[None,:]
-        inv_mat = self.inv_mat
-        bc = self.back_center
+        inv_mat = self.orienter.inv_mat
         positions = na.zeros((self.resolution[0], self.resolution[1], 3),
                           dtype='float64', order='C')
         positions[:,:,0] = inv_mat[0,0]*px+inv_mat[0,1]*py+self.back_center[0]
@@ -320,8 +287,8 @@
         bounds = (px.min(), px.max(), py.min(), py.max())
         vector_plane = VectorPlane(positions, self.box_vectors[2],
                                       self.back_center, bounds, image,
-                                      self.unit_vectors[0],
-                                      self.unit_vectors[1])
+                                      self.orienter.unit_vectors[0],
+                                      self.orienter.unit_vectors[1])
         return vector_plane
 
     def snapshot(self, fn = None, clip_ratio = None, double_check = False):
@@ -393,8 +360,7 @@
 
         """
         self.width = [w / factor for w in self.width]
-        self._setup_normalized_vectors(
-                self.unit_vectors[2], self.unit_vectors[1])
+        self._setup_box_properties(self.width, self.center, self.orienter.unit_vectors)
 
     def zoomin(self, final, n_steps, clip_ratio = None):
         r"""Loop over a zoomin and return snapshots along the way.
@@ -506,15 +472,7 @@
         if rot_vector is None:
             rot_vector = self.rotation_vector
             
-        ux = rot_vector[0]
-        uy = rot_vector[1]
-        uz = rot_vector[2]
-        cost = na.cos(theta)
-        sint = na.sin(theta)
-        
-        R = na.array([[cost+ux**2*(1-cost), ux*uy*(1-cost)-uz*sint, ux*uz*(1-cost)+uy*sint],
-                      [uy*ux*(1-cost)+uz*sint, cost+uy**2*(1-cost), uy*uz*(1-cost)-ux*sint],
-                      [uz*ux*(1-cost)-uy*sint, uz*uy*(1-cost)+ux*sint, cost+uz**2*(1-cost)]])
+        R = get_rotation_matrix(self, theta, rot_vector)
 
         normal_vector = self.front_center-self.center
 
@@ -563,8 +521,7 @@
                  log_fields = None,
                  sub_samples = 5, pf = None,
                  use_kd=True, l_max=None, no_ghost=True,
-                 tree_type='domain',expand_factor=1.0,
-                 le=None, re=None):
+                 tree_type='domain',le=None, re=None):
         self.frames = []
         Camera.__init__(self, center, normal_vector, width,
                  resolution, transfer_function,
@@ -573,8 +530,7 @@
                  log_fields = log_fields,
                  sub_samples = sub_samples, pf = pf,
                  use_kd=use_kd, l_max=l_max, no_ghost=no_ghost,
-                 tree_type=tree_type,expand_factor=expand_factor,
-                 le=le, re=re)
+                 tree_type=tree_type,le=le, re=re)
 
     def snapshot(self, fn = None, clip_ratio = None):
         import matplotlib
@@ -612,6 +568,26 @@
 data_object_registry["interactive_camera"] = InteractiveCamera
 
 class PerspectiveCamera(Camera):
+    def __init__(self, center, normal_vector, width,
+                 resolution, transfer_function,
+                 north_vector = None, steady_north=False,
+                 volume = None, fields = None,
+                 log_fields = None,
+                 sub_samples = 5, pf = None,
+                 use_kd=True, l_max=None, no_ghost=True,
+                 tree_type='domain', expand_factor = 1.0,
+                 le=None, re=None):
+        self.expand_factor = 1.0
+        Camera.__init__(self, center, normal_vector, width,
+                 resolution, transfer_function,
+                 north_vector = None, steady_north=False,
+                 volume = None, fields = None,
+                 log_fields = None,
+                 sub_samples = 5, pf = None,
+                 use_kd=True, l_max=None, no_ghost=True,
+                 tree_type='domain', le=None, re=None)
+        
+
     def get_vector_plane(self, image):
         # We should move away from pre-generation of vectors like this and into
         # the usage of on-the-fly generation in the VolumeIntegrator module
@@ -623,8 +599,7 @@
                          self.resolution[0])[:,None]
         py = self.expand_factor*na.linspace(-self.width[1]/2.0, self.width[1]/2.0,
                          self.resolution[1])[None,:]
-        inv_mat = self.inv_mat
-        bc = self.back_center
+        inv_mat = self.orienter.inv_mat
         positions = na.zeros((self.resolution[0], self.resolution[1], 3),
                           dtype='float64', order='C')
         positions[:,:,0] = inv_mat[0,0]*px+inv_mat[0,1]*py+self.back_center[0]
@@ -639,8 +614,8 @@
 
         vector_plane = VectorPlane(positions, vectors,
                                       self.back_center, bounds, image,
-                                      self.unit_vectors[0],
-                                      self.unit_vectors[1])
+                                      self.orienter.unit_vectors[0],
+                                      self.orienter.unit_vectors[1])
         return vector_plane
 
 def corners(left_edge, right_edge):
@@ -1064,7 +1039,7 @@
             rot_vector = na.cross(rvec, self.normal_vector)
             rot_vector /= (rot_vector**2).sum()**0.5
             
-            self.rotation_matrix = self.get_rotation_matrix(angle,rot_vector)
+            self.rotation_matrix = get_rotation_matrix(angle,rot_vector)
             self.normal_vector = na.dot(self.rotation_matrix,self.normal_vector)
             self.north_vector = na.dot(self.rotation_matrix,self.north_vector)
             self.east_vector = na.dot(self.rotation_matrix,self.east_vector)
@@ -1181,7 +1156,7 @@
         
         dist = ((self.focal_center - self.center)**2).sum()**0.5
         
-        R = self.get_rotation_matrix(theta, rot_vector)
+        R = get_rotation_matrix(theta, rot_vector)
 
         self.vp = rotate_vectors(self.vp, R)
         self.normal_vector = na.dot(R,self.normal_vector)
@@ -1255,19 +1230,6 @@
                 self.center += dx
             yield self.snapshot()
 
-    def get_rotation_matrix(self, theta, rot_vector):
-        ux = rot_vector[0]
-        uy = rot_vector[1]
-        uz = rot_vector[2]
-        cost = na.cos(theta)
-        sint = na.sin(theta)
-        
-        R = na.array([[cost+ux**2*(1-cost), ux*uy*(1-cost)-uz*sint, ux*uz*(1-cost)+uy*sint],
-                      [uy*ux*(1-cost)+uz*sint, cost+uy**2*(1-cost), uy*uz*(1-cost)-ux*sint],
-                      [uz*ux*(1-cost)-uy*sint, uz*uy*(1-cost)+ux*sint, cost+uz**2*(1-cost)]])
-
-        return R
-
 def off_axis_projection(pf, center, normal_vector, width, resolution,
                         field, weight = None, volume = None, no_ghost = True,
                         north_vector = None):



https://bitbucket.org/yt_analysis/yt/changeset/3ef84de0a88d/
changeset:   3ef84de0a88d
branch:      yt
user:        MatthewTurk
date:        2012-05-24 23:54:24
summary:     Merged in samskillman/yt (pull request #160)
affected #:  2 files

diff -r d9fd498edfe2b42e4c6fc062106014352e527736 -r 3ef84de0a88dded0fec22f8883fb8066eca0b4ea yt/utilities/_amr_utils/misc_utilities.pyx
--- a/yt/utilities/_amr_utils/misc_utilities.pyx
+++ b/yt/utilities/_amr_utils/misc_utilities.pyx
@@ -287,6 +287,7 @@
         uniquedims[i] = <np.float64_t *> \
                 alloca(2*n_grids * sizeof(np.float64_t))
     my_max = 0
+    best_dim = -1
     for dim in range(3):
         n_unique = 0
         uniques = uniquedims[dim]


diff -r d9fd498edfe2b42e4c6fc062106014352e527736 -r 3ef84de0a88dded0fec22f8883fb8066eca0b4ea yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -1012,7 +1012,7 @@
                     # This node belongs to someone else, move along
                     current_node, previous_node = self.step_depth(current_node, previous_node)
                     continue
-                
+
             # If we are down to one grid, we are either in it or the parent grid
             if len(current_node.grids) == 1:
                 thisgrid = current_node.grids[0]
@@ -1031,25 +1031,27 @@
                         if len(children) > 0:
                             current_node.grids = self.pf.hierarchy.grids[na.array(children,copy=False)]
                             current_node.parent_grid = thisgrid
-                            # print 'My single grid covers the rest of the volume, and I have children, about to iterate on them'
+                            #print 'My single grid covers the rest of the volume, and I have children, about to iterate on them'
                             del children
                             continue
 
                     # Else make a leaf node (brick container)
+                    #print 'My single grid covers the rest of the volume, and I have no children', thisgrid
                     set_leaf(current_node, thisgrid, current_node.l_corner, current_node.r_corner)
-                    # print 'My single grid covers the rest of the volume, and I have no children'
                     current_node, previous_node = self.step_depth(current_node, previous_node)
                     continue
 
             # If we don't have any grids, this volume belongs to the parent        
             if len(current_node.grids) == 0:
+                #print 'This volume does not have a child grid, so it belongs to my parent!'
                 set_leaf(current_node, current_node.parent_grid, current_node.l_corner, current_node.r_corner)
-                # print 'This volume does not have a child grid, so it belongs to my parent!'
                 current_node, previous_node = self.step_depth(current_node, previous_node)
                 continue
 
             # If we've made it this far, time to build a dividing node
-            self._build_dividing_node(current_node)
+            # print 'Building dividing node'
+            # Continue if building failed
+            if self._build_dividing_node(current_node): continue
 
             # Step to the nest node in a depth-first traversal.
             current_node, previous_node = self.step_depth(current_node, previous_node)
@@ -1058,10 +1060,10 @@
         '''
         Given a node, finds all the choices for the next dividing plane.  
         '''
-        data = na.array([(child.LeftEdge, child.RightEdge) for child in current_node.grids],copy=False)
         # For some reason doing dim 0 separately is slightly faster.
         # This could be rewritten to all be in the loop below.
 
+        data = na.array([(child.LeftEdge, child.RightEdge) for child in current_node.grids],copy=False)
         best_dim, split, less_ids, greater_ids = \
             kdtree_get_choices(data, current_node.l_corner, current_node.r_corner)
         return data[:,:,best_dim], best_dim, split, less_ids, greater_ids
@@ -1071,8 +1073,19 @@
         Makes the current node a dividing node, and initializes the
         left and right children.
         '''
-        
-        data,best_dim,split,less_ids,greater_ids = self._get_choices(current_node)
+
+        data = na.array([(child.LeftEdge, child.RightEdge) for child in current_node.grids],copy=False)
+        best_dim, split, less_ids, greater_ids = \
+            kdtree_get_choices(data, current_node.l_corner, current_node.r_corner)
+
+        del data
+
+        # Here we break out if no unique grids were found. In this case, there
+        # are likely overlapping grids, and we assume that the first grid takes
+        # precedence.  This is fragile.
+        if best_dim == -1:
+            current_node.grids = [current_node.grids[0]]
+            return 1
 
         current_node.split_ax = best_dim
         current_node.split_pos = split
@@ -1080,7 +1093,7 @@
         #greater_ids0 = (split < data[:,1])
         #assert(na.all(less_ids0 == less_ids))
         #assert(na.all(greater_ids0 == greater_ids))
-        
+
         current_node.left_child = MasterNode(my_id=_lchild_id(current_node.id),
                                              parent=current_node,
                                              parent_grid=current_node.parent_grid,
@@ -1099,7 +1112,9 @@
         # build to work.  The other deletions are just to save memory.
         del current_node.grids, current_node.parent_grid, current_node.brick,\
             current_node.li, current_node.ri, current_node.dims
-        
+
+        return 0
+
     def traverse(self, back_center, front_center, image):
         r"""Traverses the kd-Tree, casting the partitioned grids from back to
             front.

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list