[yt-svn] commit/yt: 2 new changesets

Bitbucket commits-noreply at bitbucket.org
Tue Nov 27 20:22:11 PST 2012


2 new commits in yt:


https://bitbucket.org/yt_analysis/yt/changeset/bcc9a9de41a1/
changeset:   bcc9a9de41a1
branch:      yt
user:        samskillman
date:        2012-11-28 05:21:02
summary:     Disabling GridValuesTest (bitwise) in the enzo frontend answer_testing_support for now until it can be controlled with a
--bitwise flag.
affected #:  1 file

diff -r 1597098bb078178b6569c3e37c345cc46bc58351 -r bcc9a9de41a1c0be4c8135a4685ac9b56e278615 yt/frontends/enzo/answer_testing_support.py
--- a/yt/frontends/enzo/answer_testing_support.py
+++ b/yt/frontends/enzo/answer_testing_support.py
@@ -61,7 +61,7 @@
     if not can_run_pf(pf_fn): return
     dso = [None]
     for field in fields:
-        yield GridValuesTest(pf_fn, field)
+        # yield GridValuesTest(pf_fn, field)
         if 'particle' in field: continue
         for ds in dso:
             for axis in [0, 1, 2]:



https://bitbucket.org/yt_analysis/yt/changeset/33bfca118708/
changeset:   33bfca118708
branch:      yt
user:        samskillman
date:        2012-11-28 05:21:40
summary:     Merging.
affected #:  14 files

diff -r bcc9a9de41a1c0be4c8135a4685ac9b56e278615 -r 33bfca118708b7f4a2aab2a1fcea7155ed131403 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -43,7 +43,7 @@
 INST_PYX=0      # Install PyX?  Sometimes PyX can be problematic without a
                 # working TeX installation.
 INST_0MQ=1      # Install 0mq (for IPython) and affiliated bindings?
-INST_ROCKSTAR=1 # Install the Rockstar halo finder?
+INST_ROCKSTAR=0 # Install the Rockstar halo finder?
 
 # If you've got YT some other place, set this to point to it.
 YT_DIR=""






diff -r bcc9a9de41a1c0be4c8135a4685ac9b56e278615 -r 33bfca118708b7f4a2aab2a1fcea7155ed131403 yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
@@ -244,6 +244,7 @@
     cdef np.float64_t conv[6], left_edge[6]
     cdef np.ndarray[np.int64_t, ndim=1] arri
     cdef np.ndarray[np.float64_t, ndim=1] arr
+    cdef unsigned long long pi,fi,i
     pf = rh.tsl.next()
     print 'reading from particle filename %s: %s'%(filename,pf.basename)
     block = int(str(filename).rsplit(".")[-1])


diff -r bcc9a9de41a1c0be4c8135a4685ac9b56e278615 -r 33bfca118708b7f4a2aab2a1fcea7155ed131403 yt/analysis_modules/sunrise_export/sunrise_exporter.py
--- a/yt/analysis_modules/sunrise_export/sunrise_exporter.py
+++ b/yt/analysis_modules/sunrise_export/sunrise_exporter.py
@@ -33,16 +33,11 @@
 
 import time
 import numpy as np
-import numpy.linalg as linalg
-import collections
-
 from yt.funcs import *
 import yt.utilities.lib as amr_utils
 from yt.data_objects.universal_fields import add_field
 from yt.mods import *
 
-debug = True
-
 def export_to_sunrise(pf, fn, star_particle_type, fc, fwidth, ncells_wide=None,
         debug=False,dd=None,**kwargs):
     r"""Convert the contents of a dataset to a FITS file format that Sunrise
@@ -77,7 +72,6 @@
     http://sunrise.googlecode.com/ for more information.
 
     """
-
     fc = np.array(fc)
     fwidth = np.array(fwidth)
     
@@ -95,7 +89,7 @@
     #Create a list of the star particle properties in PARTICLE_DATA
     #Include ID, parent-ID, position, velocity, creation_mass, 
     #formation_time, mass, age_m, age_l, metallicity, L_bol
-    particle_data = prepare_star_particles(pf,star_particle_type,fle=fle,fre=fre,
+    particle_data,nstars = prepare_star_particles(pf,star_particle_type,fle=fle,fre=fre,
                                            dd=dd,**kwargs)
 
     #Create the refinement hilbert octree in GRIDSTRUCTURE
@@ -109,7 +103,7 @@
 
     create_fits_file(pf,fn, refinement,output,particle_data,fle,fre)
 
-    return fle,fre,ile,ire,dd,nleaf
+    return fle,fre,ile,ire,dd,nleaf,nstars
 
 def export_to_sunrise_from_halolist(pf,fni,star_particle_type,
                                         halo_list,domains_list=None,**kwargs):
@@ -193,17 +187,23 @@
     domains_halos  = [d[2] for d in domains_list]
     return domains_list
 
-def prepare_octree(pf,ile,start_level=0,debug=False,dd=None,center=None):
-    add_fields() #add the metal mass field that sunrise wants
+def prepare_octree(pf,ile,start_level=0,debug=True,dd=None,center=None):
+    if dd is None:
+        #we keep passing dd around to not regenerate the data all the time
+        dd = pf.h.all_data()
+    try:
+        dd['MetalMass']
+    except KeyError:
+        add_fields() #add the metal mass field that sunrise wants
+    def _temp_times_mass(field, data):
+        return data["Temperature"]*data["CellMassMsun"]
+    add_field("TemperatureTimesCellMassMsun", function=_temp_times_mass)
     fields = ["CellMassMsun","TemperatureTimesCellMassMsun", 
               "MetalMass","CellVolumeCode"]
     
     #gather the field data from octs
     pbar = get_pbar("Retrieving field data",len(fields))
     field_data = [] 
-    if dd is None:
-        #we keep passing dd around to not regenerate the data all the time
-        dd = pf.h.all_data()
     for fi,f in enumerate(fields):
         field_data += dd[f],
         pbar.update(fi)
@@ -251,6 +251,7 @@
     output   = np.zeros((o_length,len(fields)), dtype='float64')
     refined  = np.zeros(r_length, dtype='int32')
     levels   = np.zeros(r_length, dtype='int32')
+    ids      = np.zeros(r_length, dtype='int32')
     pos = position()
     hs       = hilbert_state()
     start_time = time.time()
@@ -259,7 +260,7 @@
             c = center*pf['kpc']
         else:
             c = ile*1.0/pf.domain_dimensions*pf['kpc']
-        printing = lambda x: print_oct(x,pf['kpc'],c)
+        printing = lambda x: print_oct(x)
     else:
         printing = None
     pbar = get_pbar("Building Hilbert DFO octree",len(refined))
@@ -271,6 +272,7 @@
             output,refined,levels,
             grids,
             start_level,
+            ids,
             debug=printing,
             tracker=pbar)
     pbar.finish()
@@ -278,6 +280,7 @@
     #for the next spot, so we're off by 1
     print 'took %1.2e seconds'%(time.time()-start_time)
     print 'refinement tree # of cells %i, # of leaves %i'%(pos.refined_pos,pos.output_pos) 
+    print 'first few entries :',refined[:12]
     output  = output[:pos.output_pos]
     refined = refined[:pos.refined_pos] 
     levels = levels[:pos.refined_pos] 
@@ -287,6 +290,7 @@
     ci = data['cell_index']
     l  = data['level']
     g  = data['grid']
+    o  = g.offset
     fle = g.left_edges+g.dx*ci
     fre = g.left_edges+g.dx*(ci+1)
     if nd is not None:
@@ -295,12 +299,14 @@
         if nc is not None:
             fle -= nc
             fre -= nc
-    txt  = '%1i '
-    txt += '%1.3f '*3+'- '
-    txt += '%1.3f '*3
-    print txt%((l,)+tuple(fle)+tuple(fre))
+    txt  = '%+1i '
+    txt += '%+1i '
+    txt += '%+1.3f '*3+'- '
+    txt += '%+1.3f '*3
+    if l<2:
+        print txt%((l,)+(o,)+tuple(fle)+tuple(fre))
 
-def RecurseOctreeDepthFirstHilbert(cell_index, #integer (rep as a float) on the grids[grid_index]
+def RecurseOctreeDepthFirstHilbert(cell_index, #integer (rep as a float) on the [grid_index]
                             pos, #the output hydro data position and refinement position
                             grid,  #grid that this oct lives on (not its children)
                             hilbert,  #the hilbert state
@@ -309,6 +315,7 @@
                             levels, #For a given Oct, what is the level
                             grids, #list of all patch grids available to us
                             level, #starting level of the oct (not the children)
+                            ids, #record the oct ID
                             debug=None,tracker=True):
     if tracker is not None:
         if pos.refined_pos%1000 == 500 : tracker.update(pos.refined_pos)
@@ -316,16 +323,19 @@
         debug(vars())
     child_grid_index = grid.child_indices[cell_index[0],cell_index[1],cell_index[2]]
     #record the refinement state
-    refined[pos.refined_pos] = child_grid_index!=-1
-    levels[pos.output_pos]  = level
+    levels[pos.refined_pos]  = level
+    is_leaf = (child_grid_index==-1) and (level>0)
+    refined[pos.refined_pos] = not is_leaf #True is oct, False is leaf
+    ids[pos.refined_pos] = child_grid_index #True is oct, False is leaf
     pos.refined_pos+= 1 
-    if child_grid_index == -1 and level>=0: #never subdivide if we are on a superlevel
+    if is_leaf: #never subdivide if we are on a superlevel
         #then we have hit a leaf cell; write it out
         for field_index in range(grid.fields.shape[0]):
             output[pos.output_pos,field_index] = \
                     grid.fields[field_index,cell_index[0],cell_index[1],cell_index[2]]
         pos.output_pos+= 1 
     else:
+        assert child_grid_index>-1
         #find the grid we descend into
         #then find the eight cells we break up into
         subgrid = grids[child_grid_index]
@@ -338,18 +348,21 @@
             #denote each of the 8 octs
             if level < 0:
                 subgrid = grid #we don't actually descend if we're a superlevel
-                child_ile = cell_index + vertex*2**(-level)
+                #child_ile = cell_index + np.array(vertex)*2**(-level)
+                child_ile = cell_index + np.array(vertex)*2**(-(level+1))
+                child_ile = child_ile.astype('int')
             else:
                 child_ile = subgrid_ile+np.array(vertex)
                 child_ile = child_ile.astype('int')
+
             RecurseOctreeDepthFirstHilbert(child_ile,pos,
-                    subgrid,hilbert_child,output,refined,levels,grids,level+1,
-                    debug=debug,tracker=tracker)
+                subgrid,hilbert_child,output,refined,levels,grids,
+                level+1,ids = ids,
+                debug=debug,tracker=tracker)
 
 
 
 def create_fits_file(pf,fn, refined,output,particle_data,fle,fre):
-
     #first create the grid structure
     structure = pyfits.Column("structure", format="B", array=refined.astype("bool"))
     cols = pyfits.ColDefs([structure])
@@ -360,8 +373,6 @@
     for i,a in enumerate('xyz'):
         st_table.header.update("min%s" % a, fle[i] * pf['kpc'])
         st_table.header.update("max%s" % a, fre[i] * pf['kpc'])
-        #st_table.header.update("min%s" % a, 0) #WARNING: this is for debugging
-        #st_table.header.update("max%s" % a, 2) #
         st_table.header.update("n%s" % a, fdx[i])
         st_table.header.update("subdiv%s" % a, 2)
     st_table.header.update("subdivtp", "OCTREE", "Type of grid subdivision")
@@ -457,6 +468,7 @@
             #quit if idxq is true:
             idxq = idx[0]>0 and np.all(idx==idx[0])
             out  = np.all(fle>cfle) and np.all(fre<cfre) 
+            out &= abs(np.log2(idx[0])-np.rint(np.log2(idx[0])))<1e-5 #nwide should be a power of 2
             assert width[0] < 1.1 #can't go larger than the simulation volume
         nwide = idx[0]
     else:
@@ -495,11 +507,15 @@
                           dd=None):
     if dd is None:
         dd = pf.h.all_data()
-    idx = dd["particle_type"] == star_type
+    idxst = dd["particle_type"] == star_type
+
+    #make sure we select more than a single particle
+    assert na.sum(idxst)>0
     if pos is None:
         pos = np.array([dd["particle_position_%s" % ax]
                         for ax in 'xyz']).transpose()
-    idx = idx & np.all(pos>fle,axis=1) & np.all(pos<fre,axis=1)
+    idx = idxst & np.all(pos>fle,axis=1) & np.all(pos<fre,axis=1)
+    assert np.sum(idx)>0
     pos = pos[idx]*pf['kpc'] #unitary units -> kpc
     if age is None:
         age = dd["particle_age"][idx]*pf['years'] # seconds->years
@@ -518,8 +534,7 @@
     if metallicity is None:
         #this should be in dimensionless units, metals mass / particle mass
         metallicity = dd["particle_metallicity"][idx]
-        #metallicity *=0.0198
-        #print 'WARNING: multiplying metallicirt by 0.0198'
+        assert np.all(metallicity>0.0)
     if radius is None:
         radius = initial_mass*0.0+10.0/1000.0 #10pc radius
     formation_time = pf.current_time*pf['years']-age
@@ -534,19 +549,19 @@
     col_list.append(pyfits.Column("radius", format="D", array=radius, unit="kpc"))
     col_list.append(pyfits.Column("mass", format="D", array=current_mass, unit="Msun"))
     col_list.append(pyfits.Column("age", format="D", array=age,unit='yr'))
-    #col_list.append(pyfits.Column("age_l", format="D", array=age, unit = 'yr'))
     #For particles, Sunrise takes 
     #the dimensionless metallicity, not the mass of the metals
     col_list.append(pyfits.Column("metallicity", format="D",
         array=metallicity,unit="Msun")) 
-    #col_list.append(pyfits.Column("L_bol", format="D",
-    #    array=np.zeros(current_mass.size)))
     
     #make the table
     cols = pyfits.ColDefs(col_list)
     pd_table = pyfits.new_table(cols)
     pd_table.name = "PARTICLEDATA"
-    return pd_table
+    
+    #make sure we have nonzero particle number
+    assert pd_table.data.shape[0]>0
+    return pd_table,na.sum(idx)
 
 
 def add_fields():
@@ -556,10 +571,8 @@
         
     def _convMetalMass(data):
         return 1.0
-    
     add_field("MetalMass", function=_MetalMass,
               convert_function=_convMetalMass)
-
     def _initial_mass_cen_ostriker(field, data):
         # SFR in a cell. This assumes stars were created by the Cen & Ostriker algorithm
         # Check Grid_AddToDiskProfile.C and star_maker7.src
@@ -576,9 +589,6 @@
 
     add_field("InitialMassCenOstriker", function=_initial_mass_cen_ostriker)
 
-    def _temp_times_mass(field, data):
-        return data["Temperature"]*data["CellMassMsun"]
-    add_field("TemperatureTimesCellMassMsun", function=_temp_times_mass)
 
 class position:
     def __init__(self):
@@ -668,254 +678,3 @@
         j+=1
         yield vertex, self.descend(j)
 
-def generate_sunrise_cameraset_positions(pf,sim_center,cameraset=None,**kwargs):
-    if cameraset is None:
-        cameraset =cameraset_vertex 
-    campos =[]
-    names = []
-    dd = pf.h.all_data()
-    for name, (scene_pos,scene_up, scene_rot)  in cameraset.iteritems():
-        kwargs['scene_position']=scene_pos
-        kwargs['scene_up']=scene_up
-        kwargs['scene_rot']=scene_rot
-        kwargs['dd']=dd
-        line = generate_sunrise_camera_position(pf,sim_center,**kwargs)
-        campos += line,
-        names += name,
-    return names,campos     
-
-def generate_sunrise_camera_position(pf,sim_center,sim_axis_short=None,sim_axis_long=None,
-                                     sim_sphere_radius=None,sim_halo_radius=None,
-                                     scene_position=[0.0,0.0,1.0],scene_distance=None,
-                                     scene_up=[0.,0.,1.],scene_fov=None,scene_rot=True,
-                                     dd=None):
-    """Translate the simulation to center on sim_center, 
-    then rotate such that sim_up is along the +z direction. Then we are in the 
-    'scene' basis coordinates from which scene_up and scene_offset are defined.
-    Then a position vector, direction vector, up vector and angular field of view
-    are returned. The 3-vectors are in absolute physical kpc, not relative to the center.
-    The angular field of view is in radians. The 10 numbers should match the inputs to
-    camera_positions in Sunrise.
-    """
-
-    sim_center = np.array(sim_center)
-    if sim_sphere_radius is None:
-        sim_sphere_radius = 10.0/pf['kpc']
-    if sim_axis_short is None:
-        if dd is None:
-            dd = pf.h.all_data()
-        pos = np.array([dd["particle_position_%s"%i] for i in "xyz"]).T
-        idx = np.sqrt(np.sum((pos-sim_center)**2.0,axis=1))<sim_sphere_radius
-        mas = dd["particle_mass"]
-        pos = pos[idx]
-        mas = mas[idx]
-        mo_inertia = position_moment(pos,mas)
-        eigva, eigvc = linalg.eig(mo_inertia)
-        #order into short, long axes
-        order = eigva.real.argsort()
-        ax_short,ax_med,ax_long = [ eigvc[:,order[i]] for i in (0,1,2)]
-    else:
-        ax_short = sim_axis_short
-        ax_long  = sim_axis_long
-    if sim_halo_radius is None:
-        sim_halo_radius = 200.0/pf['kpc']
-    if scene_distance is  None:
-        scene_distance = 1e4/pf['kpc'] #this is how far the camera is from the target
-    if scene_fov is None:
-        radii = np.sqrt(np.sum((pos-sim_center)**2.0,axis=1))
-        #idx= radii < sim_halo_radius*0.10
-        #radii = radii[idx]
-        #mass  = mas[idx] #copying mass into mas
-        si = np.argsort(radii)
-        radii = radii[si]
-        mass  = mas[si]
-        idx, = np.where(np.cumsum(mass)>mass.sum()/2.0)
-        re = radii[idx[0]]
-        scene_fov = 5*re
-        scene_fov = max(scene_fov,3.0/pf['kpc']) #min size is 3kpc
-        scene_fov = min(scene_fov,20.0/pf['kpc']) #max size is 3kpc
-    #find rotation matrix
-    angles=find_half_euler_angles(ax_short,ax_long)
-    rotation  = euler_matrix(*angles)
-    irotation = numpy.linalg.inv(rotation)
-    axs = (ax_short,ax_med,ax_long)
-    ax_rs,ax_rm,ax_rl = (matmul(rotation,ax) for ax in axs)
-    axs = ([1,0,0],[0,1,0],[0,0,1])
-    ax_is,ax_im,ax_il = (matmul(irotation,ax) for ax in axs)
-    
-    #rotate the camera
-    if scene_rot :
-        irotation = np.eye(3)
-    sunrise_pos = matmul(irotation,np.array(scene_position)*scene_distance) #do NOT include sim center
-    sunrise_up  = matmul(irotation,scene_up)
-    sunrise_direction = -sunrise_pos
-    sunrise_afov = 2.0*np.arctan((scene_fov/2.0)/scene_distance)#convert from distance FOV to angular
-
-    #change to physical kpc
-    sunrise_pos *= pf['kpc']
-    sunrise_direction *= pf['kpc']
-    return sunrise_pos,sunrise_direction,sunrise_up,sunrise_afov,scene_fov
-
-def matmul(m, v):
-    """Multiply a matrix times a set of vectors, or a single vector.
-    My nPart x nDim convention leads to two transpositions, which is
-    why this is hidden away in a function.  Note that if you try to
-    use this to muliply two matricies, it will think that you're
-    trying to multiply by a set of vectors and all hell will break
-    loose."""    
-    assert type(v) is not np.matrix
-    v = np.asarray(v)
-    m, vs = [np.asmatrix(a) for a in (m, v)]
-
-    result = np.asarray(np.transpose(m * np.transpose(vs)))    
-    if len(v.shape) == 1:
-        return result[0]
-    return result
-
-
-def mag(vs):
-    """Compute the norms of a set of vectors or a single vector."""
-    vs = np.asarray(vs)
-    if len(vs.shape) == 1:
-        return np.sqrt( (vs**2).sum() )
-    return np.sqrt( (vs**2).sum(axis=1) )
-
-def mag2(vs):
-    """Compute the norms of a set of vectors or a single vector."""
-    vs = np.asarray(vs)
-    if len(vs.shape) == 1:
-        return (vs**2).sum()
-    return (vs**2).sum(axis=1)
-
-
-def position_moment(rs, ms=None, axes=None):
-    """Find second position moment tensor.
-    If axes is specified, weight by the elliptical radius (Allgood 2005)"""
-    rs = np.asarray(rs)
-    Npart, N = rs.shape
-    if ms is None: ms = np.ones(Npart)
-    else: ms = np.asarray(ms)    
-    if axes is not None:
-        axes = np.asarray(axes,dtype=float64)
-        axes = axes/axes.max()
-        norms2 = mag2(rs/axes)
-    else:
-        norms2 = np.ones(Npart)
-    M = ms.sum()
-    result = np.zeros((N,N))
-    # matrix is symmetric, so only compute half of it then fill in the
-    # other half
-    for i in range(N):
-        for j in range(i+1):
-            result[i,j] = ( rs[:,i] * rs[:,j] * ms / norms2).sum() / M
-        
-    result = result + result.transpose() - np.identity(N)*result
-    return result
-    
-
-
-def find_half_euler_angles(v,w,check=True):
-    """Find the passive euler angles that will make v lie along the z
-    axis and w lie along the x axis.  v and w are uncertain up to
-    inversions (ie, eigenvectors) so this routine removes degeneracies
-    associated with that
-
-    (old) Calculate angles to bring a body into alignment with the
-    coordinate system.  If v1 is the SHORTEST axis and v2 is the
-    LONGEST axis, then this will return the angle (Euler angles) to
-    make the long axis line up with the x axis and the short axis line
-    up with the x (z) axis for the 2 (3) dimensional case."""
-    # Make sure the vectors are normalized and orthogonal
-    mag = lambda x: np.sqrt(np.sum(x**2.0))
-    v = v/mag(v)
-    w = w/mag(w)    
-    if check:
-        if abs((v*w).sum()) / (mag(v)*mag(w)) > 1e-5: raise ValueError
-
-    # Break eigenvector scaling degeneracy by forcing it to have a positive
-    # z component
-    if v[2] < 0: v = -v
-    phi,theta = find_euler_phi_theta(v)
-
-    # Rotate w according to phi,theta and then break inversion
-    # degeneracy by requiring that resulting vector has positive
-    # x component
-    w_prime = euler_passive(w,phi,theta,0.)
-    if w_prime[0] < 0: w_prime = -w_prime
-    # Now last Euler angle should just be this:
-    psi = np.arctan2(w_prime[1],w_prime[0])
-    return phi, theta, psi
-
-def find_euler_phi_theta(v):
-    """Find (passive) euler angles that will make v point in the z
-    direction"""
-    # Make sure the vector is normalized
-    v = v/mag(v)
-    theta = np.arccos(v[2])
-    phi = np.arctan2(v[0],-v[1])
-    return phi,theta
-
-def euler_matrix(phi, the, psi):
-    """Make an Euler transformation matrix"""
-    cpsi=np.cos(psi)
-    spsi=np.sin(psi)
-    cphi=np.cos(phi)
-    sphi=np.sin(phi)
-    cthe=np.cos(the)
-    sthe=np.sin(the)
-    m = np.mat(np.zeros((3,3)))
-    m[0,0] = cpsi*cphi - cthe*sphi*spsi
-    m[0,1] = cpsi*sphi + cthe*cphi*spsi
-    m[0,2] = spsi*sthe
-    m[1,0] = -spsi*cphi - cthe*sphi*cpsi
-    m[1,1] = -spsi*sphi + cthe*cphi*cpsi 
-    m[1,2] = cpsi*sthe
-    m[2,0] = sthe*sphi
-    m[2,1] = -sthe*cphi
-    m[2,2] = cthe
-    return m
-
-def euler_passive(v, phi, the, psi):
-    """Passive Euler transform"""
-    m = euler_matrix(phi, the, psi)
-    return matmul(m,v)
-
-
-#the format for these camerasets is name,up vector,camera location, 
-#rotate to the galaxy's up direction?
-cameraset_compass = collections.OrderedDict([
-    ['top',([0.,0.,1.],[0.,-1.,0],True)], #up is north=+y
-    ['bottom',([0.,0.,-1.],[0.,-1.,0.],True)],#up is north=+y
-    ['north',([0.,1.,0.],[0.,0.,-1.],True)],#up is along z
-    ['south',([0.,-1.,0.],[0.,0.,-1.],True)],#up is along z
-    ['east',([1.,0.,0.],[0.,0.,-1.],True)],#up is along z
-    ['west',([-1.,0.,0.],[0.,0.,-1.],True)],#up is along z
-    ['top-north',([0.,0.7071,0.7071],[0., 0., -1.],True)],
-    ['top-south',([0.,-0.7071,0.7071],[0., 0., -1.],True)],
-    ['top-east',([ 0.7071,0.,0.7071],[0., 0., -1.],True)],
-    ['top-west',([-0.7071,0.,0.7071],[0., 0., -1.],True)]
-    ])
-
-cameraset_vertex = collections.OrderedDict([
-    ['top',([0.,0.,1.],[0.,-1.,0],True)], #up is north=+y
-    ['north',([0.,1.,0.],[0.,0.,-1.],True)],#up is along z
-    ['top-north',([0.,0.7071,0.7071],[0., 0., -1.],True)],
-    ['Z',([0.,0.,1.],[0.,-1.,0],False)], #up is north=+y
-    ['Y',([0.,1.,0.],[0.,0.,-1.],False)],#up is along z
-    ['ZY',([0.,0.7071,0.7071],[0., 0., -1.],False)]
-    ])
-
-#up is 45deg down from z, towards north
-#'bottom-north':([0.,0.7071,-0.7071],[0., 0., -1.])
-#up is -45deg down from z, towards north
-
-cameraset_ring = collections.OrderedDict()
-
-segments = 20
-for angle in np.linspace(0,360,segments):
-    pos = [np.cos(angle),0.,np.sin(angle)]
-    vc  = [np.cos(90-angle),0.,np.sin(90-angle)] 
-    cameraset_ring['02i'%angle]=(pos,vc)
-            
-
-


diff -r bcc9a9de41a1c0be4c8135a4685ac9b56e278615 -r 33bfca118708b7f4a2aab2a1fcea7155ed131403 yt/data_objects/time_series.py
--- a/yt/data_objects/time_series.py
+++ b/yt/data_objects/time_series.py
@@ -78,7 +78,7 @@
         raise AttributeError(attr)
 
 class TimeSeriesData(object):
-    def __init__(self, outputs, parallel = True):
+    def __init__(self, outputs, parallel = True ,**kwargs):
         r"""The TimeSeriesData object is a container of multiple datasets,
         allowing easy iteration and computation on them.
 
@@ -107,12 +107,13 @@
             setattr(self, type_name, functools.partial(
                 TimeSeriesDataObject, self, type_name))
         self.parallel = parallel
+        self.kwargs = kwargs
 
     def __iter__(self):
         # We can make this fancier, but this works
         for o in self._pre_outputs:
             if isinstance(o, types.StringTypes):
-                yield load(o)
+                yield load(o,**self.kwargs)
             else:
                 yield o
 
@@ -124,7 +125,7 @@
             return TimeSeriesData(self._pre_outputs[key], self.parallel)
         o = self._pre_outputs[key]
         if isinstance(o, types.StringTypes):
-            o = load(o)
+            o = load(o,**self.kwargs)
         return o
 
     def __len__(self):
@@ -223,7 +224,7 @@
         return [v for k, v in sorted(return_values.items())]
 
     @classmethod
-    def from_filenames(cls, filenames, parallel = True):
+    def from_filenames(cls, filenames, parallel = True, **kwargs):
         r"""Create a time series from either a filename pattern or a list of
         filenames.
 
@@ -258,12 +259,9 @@
 
         """
         if isinstance(filenames, types.StringTypes):
-            pattern = filenames
             filenames = glob.glob(filenames)
             filenames.sort()
-            if len(filenames) == 0:
-                raise YTNoFilenamesMatchPattern(pattern)
-        obj = cls(filenames[:], parallel = parallel)
+        obj = cls(filenames[:], parallel = parallel, **kwargs)
         return obj
 
     @classmethod


diff -r bcc9a9de41a1c0be4c8135a4685ac9b56e278615 -r 33bfca118708b7f4a2aab2a1fcea7155ed131403 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -3,6 +3,8 @@
 
 Author: Matthew Turk <matthewturk at gmail.com>
 Affiliation: UCSD
+Author: Christopher Moody <cemoody at ucsc.edu>
+Affiliation: UCSC
 Homepage: http://yt-project.org/
 License:
   Copyright (C) 2010-2011 Matthew Turk.  All Rights Reserved.
@@ -18,17 +20,16 @@
   but WITHOUT ANY WARRANTY; without even the implied warranty of
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   GNU General Public License for more details.
-
+.
   You should have received a copy of the GNU General Public License
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
-
 import numpy as np
+import os.path
+import glob
 import stat
 import weakref
-import cPickle
-import os
-import struct
+import cStringIO
 
 from yt.funcs import *
 from yt.data_objects.grid_patch import \
@@ -42,64 +43,65 @@
 from .fields import \
     ARTFieldInfo, add_art_field, KnownARTFields
 from yt.utilities.definitions import \
-    mpc_conversion, sec_conversion
+    mpc_conversion
 from yt.utilities.io_handler import \
     io_registry
+from yt.utilities.lib import \
+    get_box_grids_level
 import yt.utilities.lib as amr_utils
 
-try:
-    import yt.frontends.ramses._ramses_reader as _ramses_reader
-except ImportError:
-    _ramses_reader = None
+from .definitions import *
+from io import _read_child_mask_level
+from io import read_particles
+from io import read_stars
+from io import spread_ages
+from io import _count_art_octs
+from io import _read_art_level_info
+from io import _read_art_child
+from io import _skip_record
+from io import _read_record
+from io import _read_frecord
+from io import _read_record_size
+from io import _read_struct
+from io import b2t
 
+
+import yt.frontends.ramses._ramses_reader as _ramses_reader
+
+from .fields import ARTFieldInfo, KnownARTFields
+from yt.utilities.definitions import \
+    mpc_conversion, sec_conversion
+from yt.utilities.lib import \
+    get_box_grids_level
+from yt.utilities.io_handler import \
+    io_registry
+from yt.data_objects.field_info_container import \
+    FieldInfoContainer, NullFunc
 from yt.utilities.physical_constants import \
     mass_hydrogen_cgs, sec_per_Gyr
 
-from yt.frontends.art.definitions import art_particle_field_names
-
-from yt.frontends.art.io import _read_child_mask_level
-from yt.frontends.art.io import read_particles
-from yt.frontends.art.io import read_stars
-from yt.frontends.art.io import _count_art_octs
-from yt.frontends.art.io import _read_art_level_info
-from yt.frontends.art.io import _read_art_child
-from yt.frontends.art.io import _skip_record
-from yt.frontends.art.io import _read_record
-from yt.frontends.art.io import _read_frecord
-from yt.frontends.art.io import _read_record_size
-from yt.frontends.art.io import _read_struct
-from yt.frontends.art.io import b2t
-
-def num_deep_inc(f):
-    def wrap(self, *args, **kwargs):
-        self.num_deep += 1
-        rv = f(self, *args, **kwargs)
-        self.num_deep -= 1
-        return rv
-    return wrap
-
 class ARTGrid(AMRGridPatch):
     _id_offset = 0
 
-    def __init__(self, id, hierarchy, level, locations, props,child_mask=None):
+    def __init__(self, id, hierarchy, level, locations,start_index, le,re,gd,
+            child_mask=None,nop=0):
         AMRGridPatch.__init__(self, id, filename = hierarchy.hierarchy_filename,
                               hierarchy = hierarchy)
-        start_index = props[0]
+        start_index =start_index 
         self.Level = level
         self.Parent = []
         self.Children = []
         self.locations = locations
         self.start_index = start_index.copy()
         
-        self.LeftEdge = props[0]
-        self.RightEdge = props[1]
-        self.ActiveDimensions = props[2] 
-        #if child_mask is not None:
-        #    self._set_child_mask(child_mask)
+        self.LeftEdge = le
+        self.RightEdge = re
+        self.ActiveDimensions = gd
+        self.NumberOfParticles=nop
+        for particle_field in particle_fields:
+            setattr(self,particle_field,np.array([]))
 
     def _setup_dx(self):
-        # So first we figure out what the index is.  We don't assume
-        # that dx=dy=dz , at least here.  We probably do elsewhere.
         id = self.id - self._id_offset
         if len(self.Parent) > 0:
             self.dds = self.Parent[0].dds / self.pf.refine_by
@@ -109,7 +111,8 @@
             self.dds = np.array((RE-LE)/self.ActiveDimensions)
         if self.pf.dimensionality < 2: self.dds[1] = 1.0
         if self.pf.dimensionality < 3: self.dds[2] = 1.0
-        self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
+        self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] \
+                = self.dds
 
     def get_global_startindex(self):
         """
@@ -124,381 +127,272 @@
         pdx = self.Parent[0].dds
         start_index = (self.Parent[0].get_global_startindex()) + \
                        np.rint((self.LeftEdge - self.Parent[0].LeftEdge)/pdx)
-        self.start_index = (start_index*self.pf.refine_by).astype('int64').ravel()
+        self.start_index = (start_index*self.pf.refine_by)\
+                           .astype('int64').ravel()
         return self.start_index
 
     def __repr__(self):
         return "ARTGrid_%04i (%s)" % (self.id, self.ActiveDimensions)
 
 class ARTHierarchy(AMRHierarchy):
-
     grid = ARTGrid
     _handle = None
     
     def __init__(self, pf, data_style='art'):
         self.data_style = data_style
         self.parameter_file = weakref.proxy(pf)
-        #for now, the hierarchy file is the parameter file!
+        self.max_level = pf.max_level
         self.hierarchy_filename = self.parameter_file.parameter_filename
         self.directory = os.path.dirname(self.hierarchy_filename)
         self.float_type = np.float64
         AMRHierarchy.__init__(self,pf,data_style)
         self._setup_field_list()
-        
+
     def _initialize_data_storage(self):
         pass
-
+    
     def _detect_fields(self):
-        # This will need to be generalized to be used elsewhere.
-        self.field_list = [ 'Density','TotalEnergy',
-             'XMomentumDensity','YMomentumDensity','ZMomentumDensity',
-             'Pressure','Gamma','GasEnergy',
-             'MetalDensitySNII', 'MetalDensitySNIa',
-             'PotentialNew','PotentialOld']
-        self.field_list += art_particle_field_names
-
+        self.field_list = []
+        self.field_list += fluid_fields
+        self.field_list += particle_fields
+        
     def _setup_classes(self):
         dd = self._get_data_reader_dict()
         AMRHierarchy._setup_classes(self, dd)
         self.object_types.sort()
-
+            
     def _count_grids(self):
         LEVEL_OF_EDGE = 7
         MAX_EDGE = (2 << (LEVEL_OF_EDGE- 1))
-        
         min_eff = 0.30
-        
         vol_max = 128**3
-        
-        f = open(self.pf.parameter_filename,'rb')
-        
-        
-        (self.pf.nhydro_vars, self.pf.level_info,
-        self.pf.level_oct_offsets, 
-        self.pf.level_child_offsets) = \
-                         _count_art_octs(f, 
-                          self.pf.child_grid_offset,
-                          self.pf.min_level, self.pf.max_level)
-        self.pf.level_info[0]=self.pf.ncell
-        self.pf.level_info = np.array(self.pf.level_info)        
-        self.pf.level_offsets = self.pf.level_child_offsets
-        self.pf.level_offsets = np.array(self.pf.level_offsets, dtype='int64')
-        self.pf.level_offsets[0] = self.pf.root_grid_offset
-        
-        self.pf.level_art_child_masks = {}
-        cm = self.pf.root_iOctCh>0
-        cm_shape = (1,)+cm.shape 
-        self.pf.level_art_child_masks[0] = cm.reshape(cm_shape).astype('uint8')        
-        del cm
-        
-        root_psg = _ramses_reader.ProtoSubgrid(
-                        np.zeros(3, dtype='int64'), # left index of PSG
-                        self.pf.domain_dimensions, # dim of PSG
-                        np.zeros((1,3), dtype='int64'), # left edges of grids
-                        np.zeros((1,6), dtype='int64') # empty
-                        )
-        
-        self.proto_grids = [[root_psg],]
-        for level in xrange(1, len(self.pf.level_info)):
-            if self.pf.level_info[level] == 0:
-                self.proto_grids.append([])
-                continue
-            psgs = []
-            effs,sizes = [], []
-
-            if level > self.pf.limit_level : continue
-            
-            #refers to the left index for the art octgrid
-            left_index, fl, nocts = _read_art_level_info(f, self.pf.level_oct_offsets,level)
-            #left_index_gridpatch = left_index >> LEVEL_OF_EDGE
-            
-            #read in the child masks for this level and save them
-            idc, art_child_mask = _read_child_mask_level(f, self.pf.level_child_offsets,
-                level,nocts*8,nhydro_vars=self.pf.nhydro_vars)
-            art_child_mask = art_child_mask.reshape((nocts,2,2,2))
-            self.pf.level_art_child_masks[level]=art_child_mask
-            #child_mask is zero where child grids exist and
-            #thus where higher resolution data is available
-            
-            
-            #compute the hilbert indices up to a certain level
-            #the indices will associate an oct grid to the nearest
-            #hilbert index?
-            base_level = int( np.log10(self.pf.domain_dimensions.max()) /
-                              np.log10(2))
-            hilbert_indices = _ramses_reader.get_hilbert_indices(
-                                    level + base_level, left_index)
-            #print base_level, hilbert_indices.max(),
-            hilbert_indices = hilbert_indices >> base_level + LEVEL_OF_EDGE
-            #print hilbert_indices.max()
-            
-            # Strictly speaking, we don't care about the index of any
-            # individual oct at this point.  So we can then split them up.
-            unique_indices = np.unique(hilbert_indices)
-            mylog.info("Level % 2i has % 10i unique indices for %0.3e octs",
-                        level, unique_indices.size, hilbert_indices.size)
-            
-            #use the hilbert indices to order oct grids so that consecutive
-            #items on a list are spatially near each other
-            #this is useful because we will define grid patches over these
-            #octs, which are more efficient if the octs are spatially close
-            
-            #split into list of lists, with domains containing 
-            #lists of sub octgrid left indices and an index
-            #referring to the domain on which they live
-            pbar = get_pbar("Calc Hilbert Indices ",1)
-            locs, lefts = _ramses_reader.get_array_indices_lists(
-                        hilbert_indices, unique_indices, left_index, fl)
-            pbar.finish()
-            
-            #iterate over the domains    
-            step=0
-            pbar = get_pbar("Re-gridding  Level %i "%level, len(locs))
-            psg_eff = []
-            for ddleft_index, ddfl in zip(lefts, locs):
-                #iterate over just the unique octs
-                #why would we ever have non-unique octs?
-                #perhaps the hilbert ordering may visit the same
-                #oct multiple times - review only unique octs 
-                #for idomain in np.unique(ddfl[:,1]):
-                #dom_ind = ddfl[:,1] == idomain
-                #dleft_index = ddleft_index[dom_ind,:]
-                #dfl = ddfl[dom_ind,:]
+        with open(self.pf.parameter_filename,'rb') as f:
+            (self.pf.nhydro_vars, self.pf.level_info,
+            self.pf.level_oct_offsets, 
+            self.pf.level_child_offsets) = \
+                             _count_art_octs(f, 
+                              self.pf.child_grid_offset,
+                              self.pf.min_level, self.pf.max_level)
+            self.pf.level_info[0]=self.pf.ncell
+            self.pf.level_info = np.array(self.pf.level_info)
+            self.pf.level_offsets = self.pf.level_child_offsets
+            self.pf.level_offsets = np.array(self.pf.level_offsets, 
+                                             dtype='int64')
+            self.pf.level_offsets[0] = self.pf.root_grid_offset
+            self.pf.level_art_child_masks = {}
+            cm = self.pf.root_iOctCh>0
+            cm_shape = (1,)+cm.shape 
+            self.pf.level_art_child_masks[0] = \
+                    cm.reshape(cm_shape).astype('uint8')        
+            del cm
+            root_psg = _ramses_reader.ProtoSubgrid(
+                            np.zeros(3, dtype='int64'), # left index of PSG
+                            self.pf.domain_dimensions, # dim of PSG
+                            np.zeros((1,3), dtype='int64'),# left edges of grids
+                            np.zeros((1,6), dtype='int64') # empty
+                            )
+            self.proto_grids = [[root_psg],]
+            for level in xrange(1, len(self.pf.level_info)):
+                if self.pf.level_info[level] == 0:
+                    self.proto_grids.append([])
+                    continue
+                psgs = []
+                effs,sizes = [], []
+                if self.pf.limit_level:
+                    if level > self.pf.limit_level : continue
+                #refers to the left index for the art octgrid
+                left_index, fl, nocts,root_level = _read_art_level_info(f, 
+                        self.pf.level_oct_offsets,level,
+                        coarse_grid=self.pf.domain_dimensions[0])
+                if level>1:
+                    assert root_level == last_root_level
+                last_root_level = root_level
+                #left_index_gridpatch = left_index >> LEVEL_OF_EDGE
+                #read in the child masks for this level and save them
+                idc, art_child_mask = _read_child_mask_level(f, 
+                        self.pf.level_child_offsets,
+                    level,nocts*8,nhydro_vars=self.pf.nhydro_vars)
+                art_child_mask = art_child_mask.reshape((nocts,2,2,2))
+                self.pf.level_art_child_masks[level]=art_child_mask
+                #child_mask is zero where child grids exist and
+                #thus where higher resolution data is available
+                #compute the hilbert indices up to a certain level
+                #the indices will associate an oct grid to the nearest
+                #hilbert index?
+                base_level = int( np.log10(self.pf.domain_dimensions.max()) /
+                                  np.log10(2))
+                hilbert_indices = _ramses_reader.get_hilbert_indices(
+                                        level + base_level, left_index)
+                #print base_level, hilbert_indices.max(),
+                hilbert_indices = hilbert_indices >> base_level + LEVEL_OF_EDGE
+                #print hilbert_indices.max()
+                # Strictly speaking, we don't care about the index of any
+                # individual oct at this point.  So we can then split them up.
+                unique_indices = np.unique(hilbert_indices)
+                mylog.info("Level % 2i has % 10i unique indices for %0.3e octs",
+                            level, unique_indices.size, hilbert_indices.size)
+                #use the hilbert indices to order oct grids so that consecutive
+                #items on a list are spatially near each other
+                #this is useful because we will define grid patches over these
+                #octs, which are more efficient if the octs are spatially close
+                #split into list of lists, with domains containing 
+                #lists of sub octgrid left indices and an index
+                #referring to the domain on which they live
+                pbar = get_pbar("Calc Hilbert Indices ",1)
+                locs, lefts = _ramses_reader.get_array_indices_lists(
+                            hilbert_indices, unique_indices, left_index, fl)
+                pbar.finish()
+                #iterate over the domains    
+                step=0
+                pbar = get_pbar("Re-gridding  Level %i "%level, len(locs))
+                psg_eff = []
+                for ddleft_index, ddfl in zip(lefts, locs):
+                    #iterate over just the unique octs
+                    #why would we ever have non-unique octs?
+                    #perhaps the hilbert ordering may visit the same
+                    #oct multiple times - review only unique octs 
+                    #for idomain in np.unique(ddfl[:,1]):
+                    #dom_ind = ddfl[:,1] == idomain
+                    #dleft_index = ddleft_index[dom_ind,:]
+                    #dfl = ddfl[dom_ind,:]
+                    dleft_index = ddleft_index
+                    dfl = ddfl
+                    initial_left = np.min(dleft_index, axis=0)
+                    idims = (np.max(dleft_index, axis=0) - initial_left).ravel()
+                    idims +=2
+                    #this creates a grid patch that doesn't cover the whole leve
+                    #necessarily, but with other patches covers all the regions
+                    #with octs. This object automatically shrinks its size
+                    #to barely encompass the octs inside of it.
+                    psg = _ramses_reader.ProtoSubgrid(initial_left, idims,
+                                    dleft_index, dfl)
+                    if psg.efficiency <= 0: continue
+                    #because grid patches maybe mostly empty, and with octs
+                    #that only partially fill the grid, it may be more efficient
+                    #to split large patches into smaller patches. We split
+                    #if less than 10% the volume of a patch is covered with octs
+                    if idims.prod() > vol_max or psg.efficiency < min_eff:
+                        psg_split = _ramses_reader.recursive_patch_splitting(
+                            psg, idims, initial_left, 
+                            dleft_index, dfl,min_eff=min_eff,use_center=True,
+                            split_on_vol=vol_max)
+                        psgs.extend(psg_split)
+                        psg_eff += [x.efficiency for x in psg_split] 
+                    else:
+                        psgs.append(psg)
+                        psg_eff =  [psg.efficiency,]
+                    tol = 1.00001
+                    step+=1
+                    pbar.update(step)
+                eff_mean = np.mean(psg_eff)
+                eff_nmin = np.sum([e<=min_eff*tol for e in psg_eff])
+                eff_nall = len(psg_eff)
+                mylog.info("Average subgrid efficiency %02.1f %%",
+                            eff_mean*100.0)
+                mylog.info("%02.1f%% (%i/%i) of grids had minimum efficiency",
+                            eff_nmin*100.0/eff_nall,eff_nmin,eff_nall)
                 
-                dleft_index = ddleft_index
-                dfl = ddfl
-                initial_left = np.min(dleft_index, axis=0)
-                idims = (np.max(dleft_index, axis=0) - initial_left).ravel()+2
-                #this creates a grid patch that doesn't cover the whole level
-                #necessarily, but with other patches covers all the regions
-                #with octs. This object automatically shrinks its size
-                #to barely encompass the octs inside of it.
-                psg = _ramses_reader.ProtoSubgrid(initial_left, idims,
-                                dleft_index, dfl)
-                if psg.efficiency <= 0: continue
-                
-                #because grid patches may still be mostly empty, and with octs
-                #that only partially fill the grid,it  may be more efficient
-                #to split large patches into smaller patches. We split
-                #if less than 10% the volume of a patch is covered with octs
-                if idims.prod() > vol_max or psg.efficiency < min_eff:
-                    psg_split = _ramses_reader.recursive_patch_splitting(
-                        psg, idims, initial_left, 
-                        dleft_index, dfl,min_eff=min_eff,use_center=True,
-                        split_on_vol=vol_max)
-                    
-                    psgs.extend(psg_split)
-                    psg_eff += [x.efficiency for x in psg_split] 
-                else:
-                    psgs.append(psg)
-                    psg_eff =  [psg.efficiency,]
-                
-                tol = 1.00001
-                
-                
-                step+=1
-                pbar.update(step)
-            eff_mean = np.mean(psg_eff)
-            eff_nmin = np.sum([e<=min_eff*tol for e in psg_eff])
-            eff_nall = len(psg_eff)
-            mylog.info("Average subgrid efficiency %02.1f %%",
-                        eff_mean*100.0)
-            mylog.info("%02.1f%% (%i/%i) of grids had minimum efficiency",
-                        eff_nmin*100.0/eff_nall,eff_nmin,eff_nall)
-            
-        
-            mylog.debug("Done with level % 2i", level)
-            pbar.finish()
-            self.proto_grids.append(psgs)
-            #print sum(len(psg.grid_file_locations) for psg in psgs)
-            #mylog.info("Final grid count: %s", len(self.proto_grids[level]))
-            if len(self.proto_grids[level]) == 1: continue
+                mylog.info("Done with level % 2i; max LE %i", level,
+                           np.max(left_index))
+                pbar.finish()
+                self.proto_grids.append(psgs)
+                if len(self.proto_grids[level]) == 1: continue
         self.num_grids = sum(len(l) for l in self.proto_grids)
-                    
-            
-            
-
-    num_deep = 0
-
         
     def _parse_hierarchy(self):
-        """ The root grid has no octs except one which is refined.
-        Still, it is the size of 128 cells along a length.
-        Ignore the proto subgrid created for the root grid - it is wrong.
-        """
         grids = []
         gi = 0
-        
+        dd=self.pf.domain_dimensions
         for level, grid_list in enumerate(self.proto_grids):
-            #The root level spans [0,2]
-            #The next level spans [0,256]
-            #The 3rd Level spans up to 128*2^3, etc.
-            #Correct root level to span up to 128
-            correction=1L
-            if level == 0:
-                correction=64L
+            dds = ((2**level) * dd).astype("float64")
             for g in grid_list:
                 fl = g.grid_file_locations
-                props = g.get_properties()*correction
-                dds = ((2**level) * self.pf.domain_dimensions).astype("float64")
-                self.grid_left_edge[gi,:] = props[0,:] / dds
-                self.grid_right_edge[gi,:] = props[1,:] / dds
-                self.grid_dimensions[gi,:] = props[2,:]
+                props = g.get_properties()
+                start_index = props[0,:]
+                le = props[0,:].astype('float64')/dds
+                re = props[1,:].astype('float64')/dds
+                gd = props[2,:].astype('int64')
+                if level==0:
+                    le = np.zeros(3,dtype='float64')
+                    re = np.ones(3,dtype='float64')
+                    gd = dd
+                self.grid_left_edge[gi,:] = le
+                self.grid_right_edge[gi,:] = re
+                self.grid_dimensions[gi,:] = gd
+                assert np.all(self.grid_left_edge[gi,:]<=1.0)    
                 self.grid_levels[gi,:] = level
                 child_mask = np.zeros(props[2,:],'uint8')
-                amr_utils.fill_child_mask(fl,props[0],
+                amr_utils.fill_child_mask(fl,start_index,
                     self.pf.level_art_child_masks[level],
                     child_mask)
                 grids.append(self.grid(gi, self, level, fl, 
-                    props*np.array(correction).astype('int64')))
+                    start_index,le,re,gd))
                 gi += 1
         self.grids = np.empty(len(grids), dtype='object')
-        
-
-        if self.pf.file_particle_data:
+        if not self.pf.skip_particles and self.pf.file_particle_data:
             lspecies = self.pf.parameters['lspecies']
             wspecies = self.pf.parameters['wspecies']
-            Nrow     = self.pf.parameters['Nrow']
-            nstars = lspecies[-1]
-            a = self.pf.parameters['aexpn']
-            hubble = self.pf.parameters['hubble']
-            ud  = self.pf.parameters['r0']*a/hubble #proper Mpc units
-            uv  = self.pf.parameters['v0']/(a**1.0)*1.0e5 #proper cm/s
-            um  = self.pf.parameters['aM0'] #mass units in solar masses
-            um *= 1.989e33 #convert solar masses to grams 
-            pbar = get_pbar("Loading Particles   ",5)
-            self.pf.particle_position,self.pf.particle_velocity = \
-                read_particles(self.pf.file_particle_data,nstars,Nrow)
-            pbar.update(1)
-            npa,npb=0,0
-            npb = lspecies[-1]
-            clspecies = np.concatenate(([0,],lspecies))
-            if self.pf.only_particle_type is not None:
-                npb = lspecies[0]
-                if type(self.pf.only_particle_type)==type(5):
-                    npa = clspecies[self.pf.only_particle_type]
-                    npb = clspecies[self.pf.only_particle_type+1]
-            np = npb-npa
-            self.pf.particle_position   = self.pf.particle_position[npa:npb]
-            #do NOT correct by an offset of 1.0
-            #self.pf.particle_position  -= 1.0 #fortran indices start with 0
-            pbar.update(2)
-            self.pf.particle_position  /= self.pf.domain_dimensions #to unitary units (comoving)
-            pbar.update(3)
-            self.pf.particle_velocity   = self.pf.particle_velocity[npa:npb]
-            self.pf.particle_velocity  *= uv #to proper cm/s
-            pbar.update(4)
-            self.pf.particle_type         = np.zeros(np,dtype='uint8')
-            self.pf.particle_mass         = np.zeros(np,dtype='float64')
-            self.pf.particle_mass_initial = np.zeros(np,dtype='float64')-1
-            self.pf.particle_creation_time= np.zeros(np,dtype='float64')-1
-            self.pf.particle_metallicity1 = np.zeros(np,dtype='float64')-1
-            self.pf.particle_metallicity2 = np.zeros(np,dtype='float64')-1
-            self.pf.particle_age          = np.zeros(np,dtype='float64')-1
-            
-            dist = self.pf['cm']/self.pf.domain_dimensions[0]
-            self.pf.conversion_factors['particle_mass'] = 1.0 #solar mass in g
-            self.pf.conversion_factors['particle_mass_initial'] = 1.0 #solar mass in g
-            self.pf.conversion_factors['particle_species'] = 1.0
-            for ax in 'xyz':
-                self.pf.conversion_factors['particle_velocity_%s'%ax] = 1.0
-                #already in unitary units
-                self.pf.conversion_factors['particle_position_%s'%ax] = 1.0 
-            self.pf.conversion_factors['particle_creation_time'] =  31556926.0
-            self.pf.conversion_factors['particle_metallicity']=1.0
-            self.pf.conversion_factors['particle_metallicity1']=1.0
-            self.pf.conversion_factors['particle_metallicity2']=1.0
-            self.pf.conversion_factors['particle_index']=1.0
-            self.pf.conversion_factors['particle_type']=1
-            self.pf.conversion_factors['particle_age']=1
-            self.pf.conversion_factors['Msun'] = 5.027e-34 #conversion to solar mass units
-            
-
-            a,b=0,0
+            self.pf.particle_position,self.pf.particle_velocity= \
+                read_particles(self.pf.file_particle_data,
+                        self.pf.parameters['Nrow'])
+            nparticles = lspecies[-1]
+            if not np.all(self.pf.particle_position[nparticles:]==0.0):
+                mylog.info('WARNING: unused particles discovered from lspecies')
+            self.pf.particle_position = self.pf.particle_position[:nparticles]
+            self.pf.particle_velocity = self.pf.particle_velocity[:nparticles]
+            self.pf.particle_position  /= self.pf.domain_dimensions 
+            self.pf.particle_type = np.zeros(nparticles,dtype='int')
+            self.pf.particle_mass = np.zeros(nparticles,dtype='float64')
+            self.pf.particle_star_index = len(wspecies)-1
+            a=0
             for i,(b,m) in enumerate(zip(lspecies,wspecies)):
-                if type(self.pf.only_particle_type)==type(5):
-                    if not i==self.pf.only_particle_type:
-                        continue
-                    self.pf.particle_type += i
-                    self.pf.particle_mass += m*um
-
+                if i == self.pf.particle_star_index:
+                    assert m==0.0
+                    sa,sb = a,b
                 else:
-                    self.pf.particle_type[a:b] = i #particle type
-                    self.pf.particle_mass[a:b] = m*um #mass in solar masses
+                    assert m>0.0
+                self.pf.particle_type[a:b] = i #particle type
+                self.pf.particle_mass[a:b] = m #mass in code units
                 a=b
-            pbar.finish()
-
-            nparticles = [0,]+list(lspecies)
-            for j,np in enumerate(nparticles):
-                mylog.debug('found %i of particle type %i'%(j,np))
-            
-            self.pf.particle_star_index = i
-            
-            do_stars = (self.pf.only_particle_type is None) or \
-                       (self.pf.only_particle_type == -1) or \
-                       (self.pf.only_particle_type == len(lspecies))
-            if self.pf.file_star_data and do_stars: 
-                nstars, mass, imass, tbirth, metallicity1, metallicity2 \
-                     = read_stars(self.pf.file_star_data,nstars,Nrow)
-                nstars = nstars[0] 
-                if nstars > 0 :
+            if not self.pf.skip_stars and self.pf.file_particle_stars: 
+                (nstars_rs,), mass, imass, tbirth, metallicity1, metallicity2, \
+                        ws_old,ws_oldi,tdum,adum \
+                     = read_stars(self.pf.file_particle_stars)
+                self.pf.nstars_rs = nstars_rs     
+                self.pf.nstars_pa = b-a
+                inconsistent=self.pf.particle_type==self.pf.particle_star_index
+                if not nstars_rs==np.sum(inconsistent):
+                    mylog.info('WARNING!: nstars is inconsistent!')
+                del inconsistent
+                if nstars_rs > 0 :
                     n=min(1e2,len(tbirth))
-                    pbar = get_pbar("Stellar Ages        ",n)
-                    sages  = \
-                        b2t(tbirth,n=n,logger=lambda x: pbar.update(x)).astype('float64')
-                    sages *= sec_per_Gyr #from Gyr to seconds
-                    sages = self.pf.current_time-sages
-                    self.pf.particle_age[-nstars:] = sages
-                    pbar.finish()
-                    self.pf.particle_metallicity1[-nstars:] = metallicity1
-                    self.pf.particle_metallicity2[-nstars:] = metallicity2
-                    #self.pf.particle_metallicity1 *= 0.0199 
-                    #self.pf.particle_metallicity2 *= 0.0199 
-                    self.pf.particle_mass_initial[-nstars:] = imass*um
-                    self.pf.particle_mass[-nstars:] = mass*um
-
-            done = 0
-            init = self.pf.particle_position.shape[0]
-            pos = self.pf.particle_position
-            #particle indices travel with the particle positions
-            #pos = np.vstack((np.arange(pos.shape[0]),pos.T)).T 
-            if type(self.pf.grid_particles) == type(5):
-                particle_level = min(self.pf.max_level,self.pf.grid_particles)
-            else:
-                particle_level = 2
-            grid_particle_count = np.zeros((len(grids),1),dtype='int64')
-
-            pbar = get_pbar("Gridding Particles ",init)
-            assignment,ilists = amr_utils.assign_particles_to_cell_lists(
-                    self.grid_levels.ravel().astype('int32'),
-                    np.zeros(len(pos[:,0])).astype('int32')-1,
-                    particle_level, #dont grid particles past this
-                    self.grid_left_edge.astype('float32'),
-                    self.grid_right_edge.astype('float32'),
-                    pos[:,0].astype('float32'),
-                    pos[:,1].astype('float32'),
-                    pos[:,2].astype('float32'))
-            pbar.finish()
-            
-            pbar = get_pbar("Filling grids ",init)
-            for gidx,(g,ilist) in enumerate(zip(grids,ilists)):
-                np = len(ilist)
-                grid_particle_count[gidx,0]=np
-                g.hierarchy.grid_particle_count = grid_particle_count
-                g.particle_indices = ilist
-                grids[gidx] = g
-                done += np
-                pbar.update(done)
-            pbar.finish()
-
-            #assert init-done== 0 #we have gridded every particle
-            
-        pbar = get_pbar("Finalizing grids ",len(grids))
-        for gi, g in enumerate(grids): 
-            self.grids[gi] = g
-        pbar.finish()
-            
-
+                    birthtimes= b2t(tbirth,n=n)
+                    birthtimes = birthtimes.astype('float64')
+                    assert birthtimes.shape == tbirth.shape    
+                    birthtimes*= 1.0e9 #from Gyr to yr
+                    birthtimes*= 365*24*3600 #to seconds
+                    ages = self.pf.current_time-birthtimes
+                    spread = self.pf.spread_age
+                    if type(spread)==type(5.5):
+                        ages = spread_ages(ages,spread=spread)
+                    elif spread:
+                        ages = spread_ages(ages)
+                    idx = self.pf.particle_type == self.pf.particle_star_index
+                    for psf in particle_star_fields:
+                        if getattr(self.pf,psf,None) is None:
+                            setattr(self.pf,psf,
+                                    np.zeros(nparticles,dtype='float64'))
+                    self.pf.particle_age[sa:sb] = ages
+                    self.pf.particle_mass[sa:sb] = mass
+                    self.pf.particle_mass_initial[sa:sb] = imass
+                    self.pf.particle_creation_time[sa:sb] = birthtimes
+                    self.pf.particle_metallicity1[sa:sb] = metallicity1
+                    self.pf.particle_metallicity2[sa:sb] = metallicity2
+                    self.pf.particle_metallicity[sa:sb]  = metallicity1\
+                                                          + metallicity2
+        for gi,g in enumerate(grids):    
+            self.grids[gi]=g
+                    
     def _get_grid_parents(self, grid, LE, RE):
         mask = np.zeros(self.num_grids, dtype='bool')
         grids, grid_ind = self.get_box_grids(LE, RE)
@@ -507,53 +401,58 @@
         return self.grids[mask]
 
     def _populate_grid_objects(self):
+        mask = np.empty(self.grids.size, dtype='int32')
+        pb = get_pbar("Populating grids", len(self.grids))
         for gi,g in enumerate(self.grids):
-            parents = self._get_grid_parents(g,
-                            self.grid_left_edge[gi,:],
-                            self.grid_right_edge[gi,:])
+            pb.update(gi)
+            amr_utils.get_box_grids_level(self.grid_left_edge[gi,:],
+                                self.grid_right_edge[gi,:],
+                                g.Level - 1,
+                                self.grid_left_edge, self.grid_right_edge,
+                                self.grid_levels, mask)
+            parents = self.grids[mask.astype("bool")]
             if len(parents) > 0:
-                g.Parent.extend(parents.tolist())
+                g.Parent.extend((p for p in parents.tolist()
+                        if p.locations[0,0] == g.locations[0,0]))
                 for p in parents: p.Children.append(g)
+            #Now we do overlapping siblings; note that one has to "win" with
+            #siblings, so we assume the lower ID one will "win"
+            amr_utils.get_box_grids_level(self.grid_left_edge[gi,:],
+                                self.grid_right_edge[gi,:],
+                                g.Level,
+                                self.grid_left_edge, self.grid_right_edge,
+                                self.grid_levels, mask, gi)
+            mask[gi] = False
+            siblings = self.grids[mask.astype("bool")]
+            if len(siblings) > 0:
+                g.OverlappingSiblings = siblings.tolist()
             g._prepare_grid()
             g._setup_dx()
+            #instead of gridding particles assign them all to the root grid
+            if gi==0:
+                for particle_field in particle_fields:
+                    source = getattr(self.pf,particle_field,None)
+                    if source is None:
+                        for i,ax in enumerate('xyz'):
+                            pf = particle_field.replace('_%s'%ax,'')
+                            source = getattr(self.pf,pf,None)
+                            if source is not None:
+                                source = source[:,i]
+                                break
+                    if source is not None:
+                        mylog.info("Attaching %s to the root grid",
+                                    particle_field)
+                        g.NumberOfParticles = source.shape[0]
+                        setattr(g,particle_field,source)
+                g.particle_index = np.arange(g.NumberOfParticles)
+        pb.finish()
         self.max_level = self.grid_levels.max()
 
-    # def _populate_grid_objects(self):
-    #     mask = np.empty(self.grids.size, dtype='int32')
-    #     pb = get_pbar("Populating grids", len(self.grids))
-    #     for gi,g in enumerate(self.grids):
-    #         pb.update(gi)
-    #         amr_utils.get_box_grids_level(self.grid_left_edge[gi,:],
-    #                             self.grid_right_edge[gi,:],
-    #                             g.Level - 1,
-    #                             self.grid_left_edge, self.grid_right_edge,
-    #                             self.grid_levels, mask)
-    #         parents = self.grids[mask.astype("bool")]
-    #         if len(parents) > 0:
-    #             g.Parent.extend((p for p in parents.tolist()
-    #                     if p.locations[0,0] == g.locations[0,0]))
-    #             for p in parents: p.Children.append(g)
-    #         # Now we do overlapping siblings; note that one has to "win" with
-    #         # siblings, so we assume the lower ID one will "win"
-    #         amr_utils.get_box_grids_level(self.grid_left_edge[gi,:],
-    #                             self.grid_right_edge[gi,:],
-    #                             g.Level,
-    #                             self.grid_left_edge, self.grid_right_edge,
-    #                             self.grid_levels, mask, gi)
-    #         mask[gi] = False
-    #         siblings = self.grids[mask.astype("bool")]
-    #         if len(siblings) > 0:
-    #             g.OverlappingSiblings = siblings.tolist()
-    #         g._prepare_grid()
-    #         g._setup_dx()
-    #     pb.finish()
-    #     self.max_level = self.grid_levels.max()
-
     def _setup_field_list(self):
-        if self.parameter_file.use_particles:
+        if not self.parameter_file.skip_particles:
             # We know which particle fields will exist -- pending further
             # changes in the future.
-            for field in art_particle_field_names:
+            for field in particle_fields:
                 def external_wrapper(f):
                     def _convert_function(data):
                         return data.convert(f)
@@ -580,97 +479,67 @@
     _hierarchy_class = ARTHierarchy
     _fieldinfo_fallback = ARTFieldInfo
     _fieldinfo_known = KnownARTFields
-    _handle = None
     
-    def __init__(self, filename, data_style='art',
-                 storage_filename = None, 
-                 file_particle_header=None, 
-                 file_particle_data=None,
-                 file_star_data=None,
-                 discover_particles=True,
-                 use_particles=True,
-                 limit_level=None,
-                 only_particle_type = None,
-                 grid_particles=False,
-                 single_particle_mass=False,
-                 single_particle_type=0):
-        
-        #dirn = os.path.dirname(filename)
-        base = os.path.basename(filename)
-        aexp = base.split('_')[2].replace('.d','')
-        if not aexp.startswith('a'):
-            aexp = '_'+aexp
-        
-        self.file_particle_header = file_particle_header
-        self.file_particle_data = file_particle_data
-        self.file_star_data = file_star_data
-        self.only_particle_type = only_particle_type
-        self.grid_particles = grid_particles
-        self.single_particle_mass = single_particle_mass
-        
-        if limit_level is None:
-            self.limit_level = np.inf
-        else:
-            limit_level = int(limit_level)
-            mylog.info("Using maximum level: %i",limit_level)
-            self.limit_level = limit_level
-        
-        def repu(x):
-            for i in range(5):
-                x=x.replace('__','_')
-            return x    
-        if discover_particles:
-            if file_particle_header is None:
-                loc = filename.replace(base,'PMcrd%s.DAT'%aexp)
-                loc = repu(loc)
-                if os.path.exists(loc):
-                    self.file_particle_header = loc
-                    mylog.info("Discovered particle header: %s",os.path.basename(loc))
-            if file_particle_data is None:
-                loc = filename.replace(base,'PMcrs0%s.DAT'%aexp)
-                loc = repu(loc)
-                if os.path.exists(loc):
-                    self.file_particle_data = loc
-                    mylog.info("Discovered particle data:   %s",os.path.basename(loc))
-            if file_star_data is None:
-                loc = filename.replace(base,'stars_%s.dat'%aexp)
-                loc = repu(loc)
-                if os.path.exists(loc):
-                    self.file_star_data = loc
-                    mylog.info("Discovered stellar data:    %s",os.path.basename(loc))
-        
-        self.use_particles = any([self.file_particle_header,
-            self.file_star_data, self.file_particle_data])
-        StaticOutput.__init__(self, filename, data_style)
-        
-        self.dimensionality = 3
-        self.refine_by = 2
-        self.parameters["HydroMethod"] = 'art'
-        self.parameters["Time"] = 1. # default unit is 1...
-        self.parameters["InitialTime"]=self.current_time
+    def __init__(self, file_amr, storage_filename = None,
+            skip_particles=False,skip_stars=False,limit_level=None,
+            spread_age=True,data_style='art'):
+        self.data_style = data_style
+        self._find_files(file_amr)
+        self.skip_particles = skip_particles
+        self.skip_stars = skip_stars
+        self.file_amr = file_amr
+        self.parameter_filename = file_amr
+        self.limit_level = limit_level
+        self.spread_age = spread_age
+        self.domain_left_edge  = np.zeros(3,dtype='float64')
+        self.domain_right_edge = np.ones(3,dtype='float64') 
+        StaticOutput.__init__(self, file_amr, data_style)
         self.storage_filename = storage_filename
-        
-        
+
+    def _find_files(self,file_amr):
+        """
+        Given the AMR base filename, attempt to find the
+        particle header, star files, etc.
+        """
+        prefix,suffix = filename_pattern['amr'].split('%s')
+        affix = os.path.basename(file_amr).replace(prefix,'')
+        affix = affix.replace(suffix,'')
+        affix = affix.replace('_','')
+        affix = affix[1:-1]
+        dirname = os.path.dirname(file_amr)
+        for filetype, pattern in filename_pattern.items():
+            #sometimes the affix is surrounded by an extraneous _
+            #so check for an extra character on either side
+            check_filename = dirname+'/'+pattern%('?%s?'%affix)
+            filenames = glob.glob(check_filename)
+            if len(filenames)==1:
+                setattr(self,"file_"+filetype,filenames[0])
+                mylog.info('discovered %s',filetype)
+            elif len(filenames)>1:
+                setattr(self,"file_"+filetype,None)
+                mylog.info("Ambiguous number of files found for %s",
+                        check_filename)
+            else:
+                setattr(self,"file_"+filetype,None)
+
     def __repr__(self):
         return self.basename.rsplit(".", 1)[0]
         
     def _set_units(self):
         """
-        Generates the conversion to various physical _units based on the parameter file
+        Generates the conversion to various physical units based 
+		on the parameters from the header
         """
         self.units = {}
         self.time_units = {}
-        if len(self.parameters) == 0:
-            self._parse_parameter_file()
-        self.conversion_factors = defaultdict(lambda: 1.0)
+        self.time_units['1'] = 1
         self.units['1'] = 1.0
-        self.units['unitary'] = 1.0 / (self.domain_right_edge - self.domain_left_edge).max()
-        
-        
-        z = self.current_redshift
-        
-        h = self.hubble_constant
-        boxcm_cal = self["boxh"]
+        self.units['unitary'] = 1.0
+
+        #spatial units
+        z   = self.current_redshift
+        h   = self.hubble_constant
+        boxcm_cal = self.parameters["boxh"]
         boxcm_uncal = boxcm_cal / h
         box_proper = boxcm_uncal/(1+z)
         aexpn = self["aexpn"]
@@ -679,269 +548,130 @@
             self.units[unit+'h'] = mpc_conversion[unit] * box_proper * h
             self.units[unit+'cm'] = mpc_conversion[unit] * boxcm_uncal
             self.units[unit+'hcm'] = mpc_conversion[unit] * boxcm_cal
-        # Variable names have been chosen to reflect primary reference
-        #Om0 = self["Om0"]
-        #boxh = self["boxh"]
-        wmu = self["wmu"]
-        #ng = self.domain_dimensions[0]
-        #r0 = self["cmh"]/ng # comoving cm h^-1
-        #t0 = 6.17e17/(self.hubble_constant + np.sqrt(self.omega_matter))
-        #v0 = r0 / t0
-        #rho0 = 1.8791e-29 * self.hubble_constant**2.0 * self.omega_matter
-        #e0 = v0**2.0
+
+        #all other units
+        wmu = self.parameters["wmu"]
+        Om0 = self.parameters['Om0']
+        ng  = self.parameters['ng']
+        wmu = self.parameters["wmu"]
+        boxh   = self.parameters['boxh'] 
+        aexpn  = self.parameters["aexpn"]
+        hubble = self.parameters['hubble']
+
+        cf = defaultdict(lambda: 1.0)
+        r0 = boxh/ng
+        P0= 4.697e-16 * Om0**2.0 * r0**2.0 * hubble**2.0
+        T_0 = 3.03e5 * r0**2.0 * wmu * Om0 # [K]
+        S_0 = 52.077 * wmu**(5.0/3.0)
+        S_0 *= hubble**(-4.0/3.0)*Om0**(1.0/3.0)*r0**2.0
+        #v0 =  r0 * 50.0*1.0e5 * np.sqrt(self.omega_matter)  #cm/s
+        v0 = 50.0*r0*np.sqrt(Om0)
+        t0 = r0/v0
+        #rho0 = 1.8791e-29 * hubble**2.0 * self.omega_matter
+        rho0 = 2.776e11 * hubble**2.0 * Om0
+        tr = 2./3. *(3.03e5*r0**2.0*wmu*self.omega_matter)*(1.0/(aexpn**2))     
+        aM0 = rho0 * (boxh/hubble)**3.0 / ng**3.0
+        cf['r0']=r0
+        cf['P0']=P0
+        cf['T_0']=T_0
+        cf['S_0']=S_0
+        cf['v0']=v0
+        cf['t0']=t0
+        cf['rho0']=rho0
+        cf['tr']=tr
+        cf['aM0']=aM0
+
+        #factors to multiply the native code units to CGS
+        cf['Pressure'] = P0 #already cgs
+        cf['Velocity'] = v0/aexpn*1.0e5 #proper cm/s
+        cf["Mass"] = aM0 * 1.98892e33
+        cf["Density"] = rho0*(aexpn**-3.0)
+        cf["GasEnergy"] = rho0*v0**2*(aexpn**-5.0)
+        cf["Potential"] = 1.0
+        cf["Entropy"] = S_0
+        cf["Temperature"] = tr
+        self.cosmological_simulation = True
+        self.conversion_factors = cf
         
-        wmu = self["wmu"]
-        boxh = self["boxh"]
-        aexpn = self["aexpn"]
-        hubble = self.hubble_constant
-        ng = self.domain_dimensions[0]
-        self.r0 = boxh/ng
-        self.v0 =  self.r0 * 50.0*1.0e5 * np.sqrt(self.omega_matter)  #cm/s
-        self.t0 = self.r0/self.v0
-        # this is 3H0^2 / (8pi*G) *h*Omega0 with H0=100km/s. 
-        # ie, critical density 
-        self.rho0 = 1.8791e-29 * hubble**2.0 * self.omega_matter
-        self.tr = 2./3. *(3.03e5*self.r0**2.0*wmu*self.omega_matter)*(1.0/(aexpn**2))     
-        tr  = self.tr
-        
-        #factors to multiply the native code units to CGS
-        self.conversion_factors['Pressure'] = self.parameters["P0"] #already cgs
-        self.conversion_factors['Velocity'] = self.parameters['v0']*1e3 #km/s -> cm/s
-        self.conversion_factors["Mass"] = self.parameters["aM0"] * 1.98892e33
-        self.conversion_factors["Density"] = self.rho0*(aexpn**-3.0)
-        self.conversion_factors["GasEnergy"] = self.rho0*self.v0**2*(aexpn**-5.0)
-        #self.conversion_factors["Temperature"] = tr 
-        self.conversion_factors["Potential"] = 1.0
-        self.cosmological_simulation = True
-        
-        # Now our conversion factors
+        for particle_field in particle_fields:
+            self.conversion_factors[particle_field] =  1.0
         for ax in 'xyz':
-            # Add on the 1e5 to get to cm/s
-            self.conversion_factors["%s-velocity" % ax] = self.v0/aexpn
-        seconds = self.t0
+            self.conversion_factors["%s-velocity" % ax] = 1.0
+            self.conversion_factors["particle_velocity_%s"%ax] = cf['Velocity']
         for unit in sec_conversion.keys():
             self.time_units[unit] = 1.0 / sec_conversion[unit]
+        self.conversion_factors['particle_mass'] = cf['Mass']
+        self.conversion_factors['particle_creation_time'] =  31556926.0
+        self.conversion_factors['Msun'] = 5.027e-34 
 
-        #we were already in seconds, go back in to code units
-        #self.current_time /= self.t0 
-        #self.current_time = b2t(self.current_time,n=1)
-        
-    
     def _parse_parameter_file(self):
-        # We set our domain to run from 0 .. 1 since we are otherwise
-        # unconstrained.
-        self.domain_left_edge = np.zeros(3, dtype="float64")
-        self.domain_right_edge = np.ones(3, dtype="float64")
+        """
+        Get the various simulation parameters & constants.
+        """
+        self.dimensionality = 3
+        self.refine_by = 2
+        self.cosmological_simulation = True
+        self.parameters = {}
         self.unique_identifier = \
             int(os.stat(self.parameter_filename)[stat.ST_CTIME])
-        self.parameters = {}
-
-        header_struct = [
-            ('>i','pad byte'),
-            ('>256s','jname'),
-            ('>i','pad byte'),
-            
-            ('>i','pad byte'),
-            ('>i','istep'),
-            ('>d','t'),
-            ('>d','dt'),
-            ('>f','aexpn'),
-            ('>f','ainit'),
-            ('>i','pad byte'),
-            
-            ('>i','pad byte'),
-            ('>f','boxh'),
-            ('>f','Om0'),
-            ('>f','Oml0'),
-            ('>f','Omb0'),
-            ('>f','hubble'),
-            ('>i','pad byte'),
-            
-            ('>i','pad byte'),
-            ('>i','nextras'),
-            ('>i','pad byte'),
-
-            ('>i','pad byte'),
-            ('>f','extra1'),
-            ('>f','extra2'),
-            ('>i','pad byte'),
-
-            ('>i','pad byte'),
-            ('>256s','lextra'),
-            ('>256s','lextra'),
-            ('>i','pad byte'),
-            
-            ('>i', 'pad byte'),
-            ('>i', 'min_level'),
-            ('>i', 'max_level'),
-            ('>i', 'pad byte'),
-            ]
-        
-        f = open(self.parameter_filename, "rb")
-        header_vals = {}
-        for format, name in header_struct:
-            size = struct.calcsize(format)
-            # We parse single values at a time, so this will
-            # always need to be indexed with 0
-            output = struct.unpack(format, f.read(size))[0]
-            header_vals[name] = output
-        self.dimensionality = 3 # We only support three
-        self.refine_by = 2 # Octree
-        # Update our parameters with the header and with some compile-time
-        # constants we will set permanently.
-        self.parameters.update(header_vals)
-        self.parameters["Y_p"] = 0.245
-        self.parameters["wmu"] = 4.0/(8.0-5.0*self.parameters["Y_p"])
-        self.parameters["gamma"] = 5./3.
-        self.parameters["T_CMB0"] = 2.726  
-        self.parameters["T_min"] = 300.0 #T floor in K
-        self.parameters["boxh"] = header_vals['boxh']
-        self.parameters['ng'] = 128 # of 0 level cells in 1d 
+        self.parameters.update(constants)
+        with open(self.file_amr,'rb') as f:
+            amr_header_vals = _read_struct(f,amr_header_struct)
+            for to_skip in ['tl','dtl','tlold','dtlold','iSO']:
+                _skip_record(f)
+            (self.ncell,) = struct.unpack('>l', _read_record(f))
+            # Try to figure out the root grid dimensions
+            est = int(np.rint(self.ncell**(1.0/3.0)))
+            # Note here: this is the number of *cells* on the root grid.
+            # This is not the same as the number of Octs.
+            self.domain_dimensions = np.ones(3, dtype='int64')*est 
+            self.root_grid_mask_offset = f.tell()
+            root_cells = self.domain_dimensions.prod()
+            self.root_iOctCh = _read_frecord(f,'>i')[:root_cells]
+            self.root_iOctCh = self.root_iOctCh.reshape(self.domain_dimensions,
+                 order='F')
+            self.root_grid_offset = f.tell()
+            _skip_record(f) # hvar
+            _skip_record(f) # var
+            self.iOctFree, self.nOct = struct.unpack('>ii', _read_record(f))
+            self.child_grid_offset = f.tell()
+        self.parameters.update(amr_header_vals)
+        if not self.skip_particles and self.file_particle_header:
+            with open(self.file_particle_header,"rb") as fh:
+                particle_header_vals = _read_struct(fh,particle_header_struct)
+                fh.seek(seek_extras)
+                n = particle_header_vals['Nspecies']
+                wspecies = np.fromfile(fh,dtype='>f',count=10)
+                lspecies = np.fromfile(fh,dtype='>i',count=10)
+            self.parameters['wspecies'] = wspecies[:n]
+            self.parameters['lspecies'] = lspecies[:n]
+            ls_nonzero = np.diff(lspecies)[:n-1]
+            mylog.info("Discovered %i species of particles",len(ls_nonzero))
+            mylog.info("Particle populations: "+'%1.1e '*len(ls_nonzero),
+                *ls_nonzero)
+            for k,v in particle_header_vals.items():
+                if k in self.parameters.keys():
+                    if not self.parameters[k] == v:
+                        mylog.info("Inconsistent parameter %s %1.1e  %1.1e",k,v,
+                                   self.parameters[k])
+                else:
+                    self.parameters[k]=v
+            self.parameters_particles = particle_header_vals
+    
+        #setup standard simulation yt expects to see
         self.current_redshift = self.parameters["aexpn"]**-1.0 - 1.0
-        self.parameters['CosmologyInitialRedshift']=self.current_redshift
-        self.data_comment = header_vals['jname']
-        self.current_time_raw = header_vals['t']
-        self.current_time = header_vals['t']
-        self.omega_lambda = header_vals['Oml0']
-        self.omega_matter = header_vals['Om0']
-        self.hubble_constant = header_vals['hubble']
-        self.min_level = header_vals['min_level']
-        self.max_level = header_vals['max_level']
-        self.nhydro_vars = 10 #this gets updated later, but we'll default to this
-        #nchem is nhydrovars-8, so we typically have 2 extra chem species 
+        self.omega_lambda = amr_header_vals['Oml0']
+        self.omega_matter = amr_header_vals['Om0']
+        self.hubble_constant = amr_header_vals['hubble']
+        self.min_level = amr_header_vals['min_level']
+        self.max_level = amr_header_vals['max_level']
         self.hubble_time  = 1.0/(self.hubble_constant*100/3.08568025e19)
-        #self.hubble_time /= 3.168876e7 #Gyr in s 
-        # def integrand(x,oml=self.omega_lambda,omb=self.omega_matter):
-        #     return 1./(x*np.sqrt(oml+omb*x**-3.0))
-        # spacings = np.logspace(-5,np.log10(self.parameters['aexpn']),1e5)
-        # integrand_arr = integrand(spacings)
-        # self.current_time = np.trapz(integrand_arr,dx=np.diff(spacings))
-        # self.current_time *= self.hubble_time
-        self.current_time = b2t(self.current_time_raw) * sec_per_Gyr
-        for to_skip in ['tl','dtl','tlold','dtlold','iSO']:
-            _skip_record(f)
-
-        
-        Om0 = self.parameters['Om0']
-        hubble = self.parameters['hubble']
-        dummy = 100.0 * hubble * np.sqrt(Om0)
-        ng = self.parameters['ng']
-        wmu = self.parameters["wmu"]
-        boxh = header_vals['boxh'] 
-        
-        #distance unit #boxh is units of h^-1 Mpc
-        self.parameters["r0"] = self.parameters["boxh"] / self.parameters['ng']
-        r0 = self.parameters["r0"]
-        #time, yrs
-        self.parameters["t0"] = 2.0 / dummy * 3.0856e19 / 3.15e7
-        #velocity velocity units in km/s
-        self.parameters["v0"] = 50.0*self.parameters["r0"]*\
-                np.sqrt(self.parameters["Om0"])
-        #density = 3H0^2 * Om0 / (8*pi*G) - unit of density in Msun/Mpc^3
-        self.parameters["rho0"] = 2.776e11 * hubble**2.0 * Om0
-        rho0 = self.parameters["rho0"]
-        #Pressure = rho0 * v0**2 - unit of pressure in g/cm/s^2
-        self.parameters["P0"] = 4.697e-16 * Om0**2.0 * r0**2.0 * hubble**2.0
-        #T_0 = unit of temperature in K and in keV)
-        #T_0 = 2.61155 * r0**2 * wmu * Om0 ! [keV]
-        self.parameters["T_0"] = 3.03e5 * r0**2.0 * wmu * Om0 # [K]
-        #S_0 = unit of entropy in keV * cm^2
-        self.parameters["S_0"] = 52.077 * wmu**(5.0/3.0) * hubble**(-4.0/3.0)*Om0**(1.0/3.0)*r0**2.0
-        
-        #mass conversion (Mbox = rho0 * Lbox^3, Mbox_code = Ng^3
-        #     for non-cosmological run aM0 must be defined during initialization
-        #     [aM0] = [Msun]
-        self.parameters["aM0"] = rho0 * (boxh/hubble)**3.0 / ng**3.0
-        
-        #CGS for everything in the next block
-    
-        (self.ncell,) = struct.unpack('>l', _read_record(f))
-        # Try to figure out the root grid dimensions
-        est = int(np.rint(self.ncell**(1.0/3.0)))
-        # Note here: this is the number of *cells* on the root grid.
-        # This is not the same as the number of Octs.
-        self.domain_dimensions = np.ones(3, dtype='int64')*est 
-
-        self.root_grid_mask_offset = f.tell()
-        #_skip_record(f) # iOctCh
-        root_cells = self.domain_dimensions.prod()
-        self.root_iOctCh = _read_frecord(f,'>i')[:root_cells]
-        self.root_iOctCh = self.root_iOctCh.reshape(self.domain_dimensions,order='F')
-        self.root_grid_offset = f.tell()
-        _skip_record(f) # hvar
-        _skip_record(f) # var
-
-        self.iOctFree, self.nOct = struct.unpack('>ii', _read_record(f))
-        self.child_grid_offset = f.tell()
-
-        f.close()
-        
-        if self.file_particle_header is not None:
-            self._read_particle_header(self.file_particle_header)
-        
-    def _read_particle_header(self,fn):    
-        """ Reads control information, various parameters from the 
-            particle data set. Adapted from Daniel Ceverino's 
-            Read_Particles_Binary in analysis_ART.F   
-        """ 
-        header_struct = [
-            ('>i','pad'),
-            ('45s','header'), 
-            ('>f','aexpn'),
-            ('>f','aexp0'),
-            ('>f','amplt'),
-            ('>f','astep'),
-
-            ('>i','istep'),
-            ('>f','partw'),
-            ('>f','tintg'),
-
-            ('>f','Ekin'),
-            ('>f','Ekin1'),
-            ('>f','Ekin2'),
-            ('>f','au0'),
-            ('>f','aeu0'),
-
-
-            ('>i','Nrow'),
-            ('>i','Ngridc'),
-            ('>i','Nspecies'),
-            ('>i','Nseed'),
-
-            ('>f','Om0'),
-            ('>f','Oml0'),
-            ('>f','hubble'),
-            ('>f','Wp5'),
-            ('>f','Ocurv'),
-            ('>f','Omb0'),
-            ('>%ds'%(396),'extras'),
-            ('>f','unknown'),
-
-            ('>i','pad')]
-        fh = open(fn,'rb')
-        vals = _read_struct(fh,header_struct)
-        
-        for k,v in vals.iteritems():
-            self.parameters[k]=v
-        
-        seek_extras = 137
-        fh.seek(seek_extras)
-        n = self.parameters['Nspecies']
-        self.parameters['wspecies'] = np.fromfile(fh,dtype='>f',count=10)
-        self.parameters['lspecies'] = np.fromfile(fh,dtype='>i',count=10)
-        self.parameters['wspecies'] = self.parameters['wspecies'][:n]
-        self.parameters['lspecies'] = self.parameters['lspecies'][:n]
-        fh.close()
-        
-        ls_nonzero = [ls for ls in self.parameters['lspecies'] if ls>0 ]
-        mylog.debug("Discovered %i species of particles",len(ls_nonzero))
-        mylog.debug("Particle populations: "+'%1.1e '*len(ls_nonzero),ls_nonzero)
-        
+        self.current_time = b2t(self.parameters['t']) * sec_per_Gyr
 
     @classmethod
     def _is_valid(self, *args, **kwargs):
         """
-        Defined for Daniel Ceverino's file naming scheme.
+        Defined for the NMSU file naming scheme.
         This could differ for other formats.
         """
         fn = ("%s" % (os.path.basename(args[0])))


diff -r bcc9a9de41a1c0be4c8135a4685ac9b56e278615 -r 33bfca118708b7f4a2aab2a1fcea7155ed131403 yt/frontends/art/definitions.py
--- a/yt/frontends/art/definitions.py
+++ b/yt/frontends/art/definitions.py
@@ -1,7 +1,7 @@
 """
 Definitions specific to ART
 
-Author: Christopher E. Moody <cemoody at ucsc.ed>
+Author: Christopher E. Moody <cemoody at ucsc.edu>
 Affiliation: UC Santa Cruz
 Homepage: http://yt-project.org/
 License:
@@ -25,19 +25,128 @@
 
 """
 
-art_particle_field_names = [
-'particle_age',
-'particle_index',
-'particle_mass',
-'particle_mass_initial',
-'particle_creation_time',
-'particle_metallicity1',
-'particle_metallicity2',
-'particle_metallicity',
-'particle_position_x',
-'particle_position_y',
-'particle_position_z',
-'particle_velocity_x',
-'particle_velocity_y',
-'particle_velocity_z',
-'particle_type']
+fluid_fields= [ 
+    'Density',
+    'TotalEnergy',
+    'XMomentumDensity',
+    'YMomentumDensity',
+    'ZMomentumDensity',
+    'Pressure',
+    'Gamma',
+    'GasEnergy',
+    'MetalDensitySNII',
+    'MetalDensitySNIa',
+    'PotentialNew',
+    'PotentialOld'
+]
+
+particle_fields= [
+    'particle_age',
+    'particle_index',
+    'particle_mass',
+    'particle_mass_initial',
+    'particle_creation_time',
+    'particle_metallicity1',
+    'particle_metallicity2',
+    'particle_metallicity',
+    'particle_position_x',
+    'particle_position_y',
+    'particle_position_z',
+    'particle_velocity_x',
+    'particle_velocity_y',
+    'particle_velocity_z',
+    'particle_type'
+]
+
+particle_star_fields = [
+    'particle_age',
+    'particle_mass',
+    'particle_mass_initial',
+    'particle_creation_time',
+    'particle_metallicity1',
+    'particle_metallicity2',
+    'particle_metallicity',
+]
+
+filename_pattern = {				
+	'amr':'10MpcBox_csf512_%s.d',
+	'particle_header':'PMcrd%s.DAT',
+	'particle_data':'PMcrs0%s.DAT',
+	'particle_stars':'stars_%s.dat'
+}
+
+amr_header_struct = [
+    ('>i','pad byte'),
+    ('>256s','jname'),
+    ('>i','pad byte'),
+    ('>i','pad byte'),
+    ('>i','istep'),
+    ('>d','t'),
+    ('>d','dt'),
+    ('>f','aexpn'),
+    ('>f','ainit'),
+    ('>i','pad byte'),
+    ('>i','pad byte'),
+    ('>f','boxh'),
+    ('>f','Om0'),
+    ('>f','Oml0'),
+    ('>f','Omb0'),
+    ('>f','hubble'),
+    ('>i','pad byte'),
+    ('>i','pad byte'),
+    ('>i','nextras'),
+    ('>i','pad byte'),
+    ('>i','pad byte'),
+    ('>f','extra1'),
+    ('>f','extra2'),
+    ('>i','pad byte'),
+    ('>i','pad byte'),
+    ('>256s','lextra'),
+    ('>256s','lextra'),
+    ('>i','pad byte'),
+    ('>i', 'pad byte'),
+    ('>i', 'min_level'),
+    ('>i', 'max_level'),
+    ('>i', 'pad byte'),
+]
+
+particle_header_struct =[
+    ('>i','pad'),
+    ('45s','header'), 
+    ('>f','aexpn'),
+    ('>f','aexp0'),
+    ('>f','amplt'),
+    ('>f','astep'),
+    ('>i','istep'),
+    ('>f','partw'),
+    ('>f','tintg'),
+    ('>f','Ekin'),
+    ('>f','Ekin1'),
+    ('>f','Ekin2'),
+    ('>f','au0'),
+    ('>f','aeu0'),
+    ('>i','Nrow'),
+    ('>i','Ngridc'),
+    ('>i','Nspecies'),
+    ('>i','Nseed'),
+    ('>f','Om0'),
+    ('>f','Oml0'),
+    ('>f','hubble'),
+    ('>f','Wp5'),
+    ('>f','Ocurv'),
+    ('>f','Omb0'),
+    ('>%ds'%(396),'extras'),
+    ('>f','unknown'),
+    ('>i','pad')
+]
+
+constants = {
+    "Y_p":0.245,
+    "gamma":5./3.,
+    "T_CMB0":2.726,
+    "T_min":300.,
+    "ng":128,
+    "wmu":4.0/(8.0-5.0*0.245)
+}
+
+seek_extras = 137


diff -r bcc9a9de41a1c0be4c8135a4685ac9b56e278615 -r 33bfca118708b7f4a2aab2a1fcea7155ed131403 yt/frontends/art/fields.py
--- a/yt/frontends/art/fields.py
+++ b/yt/frontends/art/fields.py
@@ -34,8 +34,6 @@
     ValidateSpatial, \
     ValidateGridType
 import yt.data_objects.universal_fields
-from yt.utilities.physical_constants import \
-    boltzmann_constant_cgs, mass_hydrogen_cgs
 import yt.utilities.lib as amr_utils
 
 KnownARTFields = FieldInfoContainer()
@@ -62,6 +60,7 @@
 #Density
 #Temperature
 #metallicities
+#MetalDensity SNII + SNia
 
 #Hydro Fields that need to be tested:
 #TotalEnergy
@@ -69,7 +68,6 @@
 #Pressure
 #Gamma
 #GasEnergy
-#MetalDensity SNII + SNia
 #Potentials
 #xyzvelocity
 
@@ -170,32 +168,27 @@
 ####### Derived fields
 
 def _temperature(field, data):
-    cd = data.pf.conversion_factors["Density"]
-    cg = data.pf.conversion_factors["GasEnergy"]
-    ct = data.pf.tr
     dg = data["GasEnergy"].astype('float64')
+    dg /= data.pf.conversion_factors["GasEnergy"]
     dd = data["Density"].astype('float64')
-    di = dd==0.0
+    dd /= data.pf.conversion_factors["Density"]
+    tr = dg/dd*data.pf.tr
+    #ghost cells have zero density?
+    tr[np.isnan(tr)] = 0.0
     #dd[di] = -1.0
-    tr = dg/dd
-    #tr[np.isnan(tr)] = 0.0
     #if data.id==460:
-    #    import pdb;pdb.set_trace()
-    tr /= data.pf.conversion_factors["GasEnergy"]
-    tr *= data.pf.conversion_factors["Density"]
-    tr *= data.pf.tr
     #tr[di] = -1.0 #replace the zero-density points with zero temp
     #print tr.min()
     #assert np.all(np.isfinite(tr))
     return tr
 def _converttemperature(data):
-    x = data.pf.conversion_factors["Temperature"]
+    #x = data.pf.conversion_factors["Temperature"]
     x = 1.0
     return x
 add_field("Temperature", function=_temperature, units = r"\mathrm{K}",take_log=True)
 ARTFieldInfo["Temperature"]._units = r"\mathrm{K}"
 ARTFieldInfo["Temperature"]._projected_units = r"\mathrm{K}"
-ARTFieldInfo["Temperature"]._convert_function=_converttemperature
+#ARTFieldInfo["Temperature"]._convert_function=_converttemperature
 
 def _metallicity_snII(field, data):
     tr  = data["MetalDensitySNII"] / data["Density"]
@@ -218,28 +211,27 @@
 ARTFieldInfo["Metallicity"]._units = r""
 ARTFieldInfo["Metallicity"]._projected_units = r""
 
-def _x_velocity(data):
+def _x_velocity(field,data):
     tr  = data["XMomentumDensity"]/data["Density"]
     return tr
 add_field("x-velocity", function=_x_velocity, units = r"\mathrm{cm/s}",take_log=False)
 ARTFieldInfo["x-velocity"]._units = r"\rm{cm}/\rm{s}"
 ARTFieldInfo["x-velocity"]._projected_units = r"\rm{cm}/\rm{s}"
 
-def _y_velocity(data):
+def _y_velocity(field,data):
     tr  = data["YMomentumDensity"]/data["Density"]
     return tr
 add_field("y-velocity", function=_y_velocity, units = r"\mathrm{cm/s}",take_log=False)
 ARTFieldInfo["y-velocity"]._units = r"\rm{cm}/\rm{s}"
 ARTFieldInfo["y-velocity"]._projected_units = r"\rm{cm}/\rm{s}"
 
-def _z_velocity(data):
+def _z_velocity(field,data):
     tr  = data["ZMomentumDensity"]/data["Density"]
     return tr
 add_field("z-velocity", function=_z_velocity, units = r"\mathrm{cm/s}",take_log=False)
 ARTFieldInfo["z-velocity"]._units = r"\rm{cm}/\rm{s}"
 ARTFieldInfo["z-velocity"]._projected_units = r"\rm{cm}/\rm{s}"
 
-
 def _metal_density(field, data):
     tr  = data["MetalDensitySNIa"]
     tr += data["MetalDensitySNII"]
@@ -251,20 +243,63 @@
 
 #Particle fields
 
+def ParticleMass(field,data):
+    return data['particle_mass']
+add_field("ParticleMass",function=ParticleMass,units=r"\rm{g}",particle_type=True)
+
+
 #Derived particle fields
 
+def ParticleMassMsun(field,data):
+    return data['particle_mass']*data.pf['Msun']
+add_field("ParticleMassMsun",function=ParticleMassMsun,units=r"\rm{g}",particle_type=True)
+
+def _creation_time(field,data):
+    pa = data["particle_age"]
+    tr = np.zeros(pa.shape,dtype='float')-1.0
+    tr[pa>0] = pa[pa>0]
+    return tr
+add_field("creation_time",function=_creation_time,units=r"\rm{s}",particle_type=True)
+
 def mass_dm(field, data):
+    tr = np.ones(data.ActiveDimensions, dtype='float32')
     idx = data["particle_type"]<5
     #make a dumb assumption that the mass is evenly spread out in the grid
     #must return an array the shape of the grid cells
-    tr  = data["Ones"] #create a grid in the right size
     if np.sum(idx)>0:
-        tr /= np.prod(tr.shape) #divide by the volume
-        tr *= np.sum(data['particle_mass'][idx]) #Multiply by total contaiend mass
+        tr /= np.prod(data['CellVolumeCode']*data.pf['mpchcm']**3.0) #divide by the volume
+        tr *= np.sum(data['particle_mass'][idx])*data.pf['Msun'] #Multiply by total contaiend mass
+        print tr.shape
         return tr
     else:
-        return tr*0.0
+        return tr*1e-9
 
-add_field("particle_cell_mass_dm", function=mass_dm,
-          validators=[ValidateSpatial(0)])
+add_field("particle_cell_mass_dm", function=mass_dm, units = r"\mathrm{M_{sun}}",
+        validators=[ValidateSpatial(0)],        
+        take_log=False,
+        projection_conversion="1")
 
+def _spdensity(field, data):
+    grid_mass = np.zeros(data.ActiveDimensions, dtype='float32')
+    if data.star_mass.shape[0] ==0 : return grid_mass 
+    amr_utils.CICDeposit_3(data.star_position_x,
+                           data.star_position_y,
+                           data.star_position_z,
+                           data.star_mass.astype('float32'),
+                           data.star_mass.shape[0],
+                           grid_mass, 
+                           np.array(data.LeftEdge).astype(np.float64),
+                           np.array(data.ActiveDimensions).astype(np.int32), 
+                           np.float64(data['dx']))
+    return grid_mass 
+
+#add_field("star_density", function=_spdensity,
+#          validators=[ValidateSpatial(0)], convert_function=_convertDensity)
+
+def _simple_density(field,data):
+    mass = np.sum(data.star_mass)
+    volume = data['dx']*data.ActiveDimensions.prod().astype('float64')
+    return mass/volume
+
+add_field("star_density", function=_simple_density,
+          validators=[ValidateSpatial(0)], convert_function=_convertDensity)


diff -r bcc9a9de41a1c0be4c8135a4685ac9b56e278615 -r 33bfca118708b7f4a2aab2a1fcea7155ed131403 yt/frontends/art/io.py
--- a/yt/frontends/art/io.py
+++ b/yt/frontends/art/io.py
@@ -36,7 +36,7 @@
     BaseIOHandler
 import yt.utilities.lib as au
 
-from yt.frontends.art.definitions import art_particle_field_names
+from yt.frontends.art.definitions import *
 
 class IOHandlerART(BaseIOHandler):
     _data_style = "art"
@@ -121,45 +121,19 @@
         self.level_data.pop(level, None)
 
     def _read_particle_field(self, grid, field):
-        #This will be cleaned up later
-        idx = np.array(grid.particle_indices)
-        if field == 'particle_index':
-            return np.array(idx)
-        if field == 'particle_type':
-            return grid.pf.particle_type[idx]
-        if field == 'particle_position_x':
-            return grid.pf.particle_position[idx][:,0]
-        if field == 'particle_position_y':
-            return grid.pf.particle_position[idx][:,1]
-        if field == 'particle_position_z':
-            return grid.pf.particle_position[idx][:,2]
-        if field == 'particle_mass':
-            return grid.pf.particle_mass[idx]
-        if field == 'particle_velocity_x':
-            return grid.pf.particle_velocity[idx][:,0]
-        if field == 'particle_velocity_y':
-            return grid.pf.particle_velocity[idx][:,1]
-        if field == 'particle_velocity_z':
-            return grid.pf.particle_velocity[idx][:,2]
-        
-        #stellar fields
-        if field == 'particle_age':
-            return grid.pf.particle_age[idx]
-        if field == 'particle_metallicity':
-            return grid.pf.particle_metallicity1[idx] +\
-                   grid.pf.particle_metallicity2[idx]
-        if field == 'particle_metallicity1':
-            return grid.pf.particle_metallicity1[idx]
-        if field == 'particle_metallicity2':
-            return grid.pf.particle_metallicity2[idx]
-        if field == 'particle_mass_initial':
-            return grid.pf.particle_mass_initial[idx]
-        
-        raise 'Should have matched one of the particle fields...'
-
+        dat = getattr(grid,field,None)
+        if dat is not None: 
+            return dat
+        starfield = field.replace('star','particle')
+        dat = getattr(grid,starfield,None)
+        if dat is not None:
+            psi = grid.pf.particle_star_index
+            idx = grid.particle_type==psi
+            return dat[idx]
+        raise KeyError
         
     def _read_data_set(self, grid, field):
-        if field in art_particle_field_names:
+        if field in particle_fields:
             return self._read_particle_field(grid, field)
         pf = grid.pf
         field_id = grid.pf.h.field_list.index(field)
@@ -198,9 +172,9 @@
     level_child_offsets= [0,]
     f.seek(offset)
     nchild,ntot=8,0
-    Level = np.zeros(MaxLevelNow+1 - MinLev, dtype='i')
-    iNOLL = np.zeros(MaxLevelNow+1 - MinLev, dtype='i')
-    iHOLL = np.zeros(MaxLevelNow+1 - MinLev, dtype='i')
+    Level = np.zeros(MaxLevelNow+1 - MinLev, dtype='int64')
+    iNOLL = np.zeros(MaxLevelNow+1 - MinLev, dtype='int64')
+    iHOLL = np.zeros(MaxLevelNow+1 - MinLev, dtype='int64')
     for Lev in xrange(MinLev + 1, MaxLevelNow+1):
         level_oct_offsets.append(f.tell())
 
@@ -232,7 +206,7 @@
     f.seek(offset)
     return nhydrovars, iNOLL, level_oct_offsets, level_child_offsets
 
-def _read_art_level_info(f, level_oct_offsets,level,root_level=15):
+def _read_art_level_info(f, level_oct_offsets,level,coarse_grid=128):
     pos = f.tell()
     f.seek(level_oct_offsets[level])
     #Get the info for this level, skip the rest
@@ -283,13 +257,18 @@
     le = le[idx]
     fl = fl[idx]
 
+
     #left edges are expressed as if they were on 
     #level 15, so no matter what level max(le)=2**15 
     #correct to the yt convention
     #le = le/2**(root_level-1-level)-1
 
+    #try to find the root_level first
+    root_level=np.floor(np.log2(le.max()*1.0/coarse_grid))
+    root_level = root_level.astype('int64')
+
     #try without the -1
-    le = le/2**(root_level-2-level)-1
+    le = le/2**(root_level+1-level)-1
 
     #now read the hvars and vars arrays
     #we are looking for iOctCh
@@ -299,13 +278,12 @@
     
     
     f.seek(pos)
-    return le,fl,nLevel
+    return le,fl,nLevel,root_level
 
 
-def read_particles(file,nstars,Nrow):
+def read_particles(file,Nrow):
     words = 6 # words (reals) per particle: x,y,z,vx,vy,vz
     real_size = 4 # for file_particle_data; not always true?
-    np = nstars # number of particles including stars, should come from lspecies[-1]
     np_per_page = Nrow**2 # defined in ART a_setup.h
     num_pages = os.path.getsize(file)/(real_size*words*np_per_page)
 
@@ -314,7 +292,7 @@
     data = np.squeeze(np.dstack(pages)).T # x,y,z,vx,vy,vz
     return data[:,0:3],data[:,3:]
 
-def read_stars(file,nstars,Nrow):
+def read_stars(file):
     fh = open(file,'rb')
     tdum,adum   = _read_frecord(fh,'>d')
     nstars      = _read_frecord(fh,'>i')
@@ -327,7 +305,8 @@
     if fh.tell() < os.path.getsize(file):
         metallicity2 = _read_frecord(fh,'>f')     
     assert fh.tell() == os.path.getsize(file)
-    return nstars, mass, imass, tbirth, metallicity1, metallicity2
+    return  nstars, mass, imass, tbirth, metallicity1, metallicity2,\
+            ws_old,ws_oldi,tdum,adum
 
 def _read_child_mask_level(f, level_child_offsets,level,nLevel,nhydro_vars):
     f.seek(level_child_offsets[level])
@@ -346,7 +325,7 @@
         arr = arr.reshape((width, chunk), order="F")
         assert np.all(arr[0,:]==arr[-1,:]) #pads must be equal
         idc[a:b]    = arr[1,:]-1 #fix fortran indexing
-        ioctch[a:b] = arr[2,:]==0 #if it is above zero, then refined info available
+        ioctch[a:b] = arr[2,:]==0 #if it is above zero, then refined available
         #zero in the mask means there is refinement available
         a=b
         left -= chunk
@@ -476,3 +455,29 @@
     #fb2t = interp1d(tbs,ages)
     return fb2t
 
+def spread_ages(ages,logger=None,spread=1.0e7*365*24*3600):
+    #stars are formed in lumps; spread out the ages linearly
+    da= np.diff(ages)
+    assert np.all(da<=0)
+    #ages should always be decreasing, and ordered so
+    agesd = np.zeros(ages.shape)
+    idx, = np.where(da<0)
+    idx+=1 #mark the right edges
+    #spread this age evenly out to the next age
+    lidx=0
+    lage=0
+    for i in idx:
+        n = i-lidx #n stars affected
+        rage = ages[i]
+        lage = max(rage-spread,0.0)
+        agesd[lidx:i]=np.linspace(lage,rage,n)
+        lidx=i
+        #lage=rage
+        if logger: logger(i)
+    #we didn't get the last iter
+    i=ages.shape[0]-1
+    n = i-lidx #n stars affected
+    rage = ages[i]
+    lage = max(rage-spread,0.0)
+    agesd[lidx:i]=np.linspace(lage,rage,n)
+    return agesd


diff -r bcc9a9de41a1c0be4c8135a4685ac9b56e278615 -r 33bfca118708b7f4a2aab2a1fcea7155ed131403 yt/utilities/initial_conditions.py
--- a/yt/utilities/initial_conditions.py
+++ b/yt/utilities/initial_conditions.py
@@ -67,6 +67,47 @@
             val = ((r[ind] - cr2) / (r2 - cr2))**0.5 * (outer_val - inner_val)
             grid[field][ind] = val + inner_val
 
+class BetaModelSphere(FluidOperator):
+    def __init__(self, beta, core_radius, radius, center, fields):
+        self.radius = radius
+        self.center = center
+        self.fields = fields
+        self.core_radius = core_radius
+        self.beta = beta
+        
+    def __call__(self, grid, sub_select = None):
+        r = np.zeros(grid.ActiveDimensions, dtype="float64")
+        r2 = self.radius**2
+        cr2 = self.core_radius**2
+        for i, ax in enumerate("xyz"):
+            np.add(r, (grid[ax] - self.center[i])**2.0, r)            
+        ind = (r <= r2)
+        if sub_select is not None:
+            ind &= sub_select            
+        for field, core_val in self.fields.iteritems() :
+            val = core_val*(1.+r[ind]/cr2)**(-1.5*self.beta)
+            grid[field][ind] = val
+
+class NFWModelSphere(FluidOperator):
+    def __init__(self, scale_radius, radius, center, fields):
+        self.radius = radius
+        self.center = center
+        self.fields = fields
+        self.scale_radius = scale_radius
+        
+    def __call__(self, grid, sub_select = None):
+        r = np.zeros(grid.ActiveDimensions, dtype="float64")
+        for i, ax in enumerate("xyz"):
+            np.add(r, (grid[ax] - self.center[i])**2.0, r)
+        np.sqrt(r,r)
+        ind = (r <= self.radius)
+        r /= self.scale_radius
+        if sub_select is not None:
+            ind &= sub_select
+        for field, scale_val in self.fields.iteritems() :
+            val = scale_val/(r[ind]*(1.+r[ind])**2)
+            grid[field][ind] = val
+            
 class RandomFluctuation(FluidOperator):
     def __init__(self, fields):
         self.fields = fields


diff -r bcc9a9de41a1c0be4c8135a4685ac9b56e278615 -r 33bfca118708b7f4a2aab2a1fcea7155ed131403 yt/utilities/lib/CICDeposit.pyx
--- a/yt/utilities/lib/CICDeposit.pyx
+++ b/yt/utilities/lib/CICDeposit.pyx
@@ -117,6 +117,63 @@
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
+def CICSample_3(np.ndarray[np.float64_t, ndim=1] posx,
+                np.ndarray[np.float64_t, ndim=1] posy,
+                np.ndarray[np.float64_t, ndim=1] posz,
+                np.ndarray[np.float64_t, ndim=1] sample,
+                np.int64_t npositions,
+                np.ndarray[np.float64_t, ndim=3] field,
+                np.ndarray[np.float64_t, ndim=1] leftEdge,
+                np.ndarray[np.int32_t, ndim=1] gridDimension,
+                np.float64_t cellSize):
+    
+    cdef int i1, j1, k1, n
+    cdef np.float64_t xpos, ypos, zpos
+    cdef np.float64_t fact, edge0, edge1, edge2
+    cdef np.float64_t le0, le1, le2
+    cdef np.float64_t dx, dy, dz, dx2, dy2, dz2
+    
+    edge0 = (<np.float64_t> gridDimension[0]) - 0.5001
+    edge1 = (<np.float64_t> gridDimension[1]) - 0.5001
+    edge2 = (<np.float64_t> gridDimension[2]) - 0.5001
+    fact = 1.0 / cellSize
+    
+    le0 = leftEdge[0] 
+    le1 = leftEdge[1] 
+    le2 = leftEdge[2] 
+                                                    
+    for n in range(npositions):
+
+        # Compute the position of the central cell
+        xpos = fclip((posx[n] - le0)*fact, 0.5001, edge0)
+        ypos = fclip((posy[n] - le1)*fact, 0.5001, edge1)
+        zpos = fclip((posz[n] - le2)*fact, 0.5001, edge2)
+
+        i1  = <int> (xpos + 0.5)
+        j1  = <int> (ypos + 0.5)
+        k1  = <int> (zpos + 0.5)
+        
+        # Compute the weights
+        dx = (<float> i1) + 0.5 - xpos
+        dy = (<float> j1) + 0.5 - ypos
+        dz = (<float> k1) + 0.5 - zpos
+        dx2 =  1.0 - dx
+        dy2 =  1.0 - dy
+        dz2 =  1.0 - dz
+
+        # Interpolate from field onto the particle
+        sample[n] = (field[i1-1,j1-1,k1-1] * dx  * dy  * dz +
+                     field[i1  ,j1-1,k1-1] * dx2 * dy  * dz +
+                     field[i1-1,j1  ,k1-1] * dx  * dy2 * dz +
+                     field[i1  ,j1  ,k1-1] * dx2 * dy2 * dz +
+                     field[i1-1,j1-1,k1  ] * dx  * dy  * dz2 +
+                     field[i1  ,j1-1,k1  ] * dx2 * dy  * dz2 +
+                     field[i1-1,j1  ,k1  ] * dx  * dy2 * dz2 +
+                     field[i1  ,j1  ,k1  ] * dx2 * dy2 * dz2)
+                                                                                                                        
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
 def assign_particles_to_cells(np.ndarray[np.int32_t, ndim=1] levels, #for cells
                               np.ndarray[np.float32_t, ndim=2] left_edges, #many cells
                               np.ndarray[np.float32_t, ndim=2] right_edges,


diff -r bcc9a9de41a1c0be4c8135a4685ac9b56e278615 -r 33bfca118708b7f4a2aab2a1fcea7155ed131403 yt/utilities/lib/tests/test_sample.py
--- /dev/null
+++ b/yt/utilities/lib/tests/test_sample.py
@@ -0,0 +1,42 @@
+import numpy as np
+
+from yt.testing import *
+from yt.utilities.lib import CICSample_3
+
+def setup():
+    pass
+
+def test_sample() :
+
+    grid = {}
+
+    dims = np.array([64,64,64], dtype='int32')
+    
+    inds = np.indices(dims)
+    grid["x"] = inds[0] + 0.5
+    grid["y"] = inds[1] + 0.5
+    grid["z"] = inds[2] + 0.5
+
+    num_particles = np.int64(1000)
+    
+    xp = np.random.uniform(low=1.0, high=63.0, size=num_particles)
+    yp = np.random.uniform(low=1.0, high=63.0, size=num_particles)
+    zp = np.random.uniform(low=1.0, high=63.0, size=num_particles)
+
+    xfield = np.zeros((num_particles))
+    yfield = np.zeros((num_particles))
+    zfield = np.zeros((num_particles))
+
+    dx = 1.
+    le = np.zeros((3))
+    
+    CICSample_3(xp,yp,zp,xfield,num_particles,grid["x"],
+                le,dims,dx)
+    CICSample_3(xp,yp,zp,yfield,num_particles,grid["y"],
+                le,dims,dx)
+    CICSample_3(xp,yp,zp,zfield,num_particles,grid["z"],
+                le,dims,dx)
+
+    assert_allclose(xp,xfield)
+    assert_allclose(yp,yfield)
+    assert_allclose(zp,zfield)

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list