[yt-svn] commit/yt: 6 new changesets

Bitbucket commits-noreply at bitbucket.org
Tue Jul 10 17:35:42 PDT 2012


6 new commits in yt:


https://bitbucket.org/yt_analysis/yt/changeset/98e4616f576d/
changeset:   98e4616f576d
branch:      yt
user:        sskory
date:        2012-07-10 00:05:50
summary:     Adding LoadTextHaloes, when all you have is a text file with position and radius.
There's room for expansion to more columns if there is a need for it.
affected #:  2 files

diff -r 5cbdc449cf53f66a1f01b1238b8dec0990577b21 -r 98e4616f576de5d9d6a9bd34db6b9d353eb7fd6d yt/analysis_modules/halo_finding/api.py
--- a/yt/analysis_modules/halo_finding/api.py
+++ b/yt/analysis_modules/halo_finding/api.py
@@ -44,4 +44,5 @@
     HOPHaloFinder, \
     FOFHaloFinder, \
     HaloFinder, \
-    LoadHaloes
+    LoadHaloes, \
+    LoadTextHaloes


diff -r 5cbdc449cf53f66a1f01b1238b8dec0990577b21 -r 98e4616f576de5d9d6a9bd34db6b9d353eb7fd6d yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -409,7 +409,7 @@
         period = self.pf.domain_right_edge - \
             self.pf.domain_left_edge
         cm = self.pf["cm"]
-        thissize = max(self.size, self.indices.size)
+        thissize = self.get_size()
         rho_crit = rho_crit_now * h ** 2.0 * Om_matter  # g cm^-3
         Msun2g = mass_sun_cgs
         rho_crit = rho_crit * ((1.0 + z) ** 3.0)
@@ -990,6 +990,78 @@
         r = self.maximum_radius()
         return self.pf.h.sphere(cen, r)
 
+class TextHalo(LoadedHalo):
+    def __init__(self, pf, id, size=None, CoM=None,
+
+        max_dens_point=None, group_total_mass=None, max_radius=None, bulk_vel=None,
+        rms_vel=None, fnames=None, mag_A=None, mag_B=None, mag_C=None,
+        e1_vec=None, tilt=None):
+
+        self.pf = pf
+        self.gridsize = (self.pf.domain_right_edge - \
+            self.pf.domain_left_edge)
+        self.id = id
+        self.size = size
+        self.CoM = CoM
+        self.max_dens_point = max_dens_point
+        self.group_total_mass = group_total_mass
+        self.max_radius = max_radius
+        self.bulk_vel = bulk_vel
+        self.rms_vel = rms_vel
+        self.mag_A = mag_A
+        self.mag_B = mag_B
+        self.mag_C = mag_C
+        self.e1_vec = e1_vec
+        self.tilt = tilt
+        self.bin_count = None
+        self.overdensity = None
+        self.indices = na.array([])  # Never used for a LoadedHalo.
+
+    def __getitem__(self, key):
+        # We'll just pull it from the sphere.
+        return self.get_sphere()[key]
+
+    def get_sphere(self):
+        r"""Returns a sphere source.
+
+        This will generate a new, empty sphere source centered on this halo,
+        with the maximum radius of the halo. This can be used like any other
+        data container in yt.
+
+        Parameters
+        ----------
+        center_of_mass : bool, optional
+            True chooses the center of mass when
+            calculating the maximum radius.
+            False chooses from the maximum density location for HOP halos
+            (it has no effect for FOF halos).
+            Default = True.
+
+        Returns
+        -------
+        sphere : `yt.data_objects.api.AMRSphereBase`
+            The empty data source.
+
+        Examples
+        --------
+        >>> sp = halos[0].get_sphere()
+        """
+        cen = self.center_of_mass()
+        r = self.maximum_radius()
+        return self.pf.h.sphere(cen, r)
+
+    def maximum_density(self):
+        r"""Undefined for text halos."""
+        return -1
+    
+    def maximum_density_location(self):
+        r"""Undefined, default to CoM"""
+        return self.center_of_mass()
+    
+    def get_size(self):
+        # Have to just get it from the sphere.
+        return self["particle_position_x"].size
+
 
 class HaloList(object):
 
@@ -1514,6 +1586,34 @@
         lines.close()
         return locations
 
+class TextHaloList(HaloList):
+    _name = "Text"
+
+    def __init__(self, pf, fname, columns, comment):
+        ParallelAnalysisInterface.__init__(self)
+        self.pf = pf
+        self._groups = []
+        self._retrieve_halos(fname, columns, comment)
+
+    def _retrieve_halos(self, fname, columns, comment):
+        # First get the halo particulars.
+        lines = file(fname)
+        halo = 0
+        for line in lines:
+            # Skip commented lines.
+            if line[0] == comment: continue
+            line = line.split()
+            x = float(line[columns['x']])
+            y = float(line[columns['y']])
+            z = float(line[columns['z']])
+            r = float(line[columns['r']])
+            cen = na.array([x, y, z])
+            self._groups.append(TextHalo(self.pf, halo, size = None,
+                CoM = cen, max_dens_point = cen,
+                group_total_mass = None, max_radius = r, bulk_vel = None,
+                rms_vel = None, fnames = None))
+            halo += 1
+
 
 class parallelHOPHaloList(HaloList, ParallelAnalysisInterface):
     _name = "parallelHOP"
@@ -2497,3 +2597,33 @@
         """
         self.basename = basename
         LoadedHaloList.__init__(self, pf, self.basename)
+
+class LoadTextHaloes(GenericHaloFinder, TextHaloList):
+    def __init__(self, pf, filename, columns, comment = "#"):
+        r"""Load a text file of halos.
+        
+        Like LoadHaloes, but when all that is available is a plain
+        text file. This assumes the text file has the 3-positions of halos
+        along with a radius. The halo objects created are spheres.
+
+        Parameters
+        ----------
+        fname : String
+            The name of the text file to read in.
+        
+        columns : dict
+            A dict listing the column name : column number pairs for data
+            in the text file. It is zero-based (like Python).
+            An example is {'x':0, 'y':1, 'z':2, 'r':3}.
+        
+        comment : String
+            If the first character of a line is equal to this, the line is
+            skipped. Default = "#".
+
+        Examples
+        --------
+        >>> pf = load("data0005")
+        >>> halos = LoadHaloes(pf, "list.txt", {'x':0, 'y':1, 'z':2, 'r':3},
+            comment = ";")
+        """
+        TextHaloList.__init__(self, pf, filename, columns, comment)



https://bitbucket.org/yt_analysis/yt/changeset/d55db8915ed2/
changeset:   d55db8915ed2
branch:      yt
user:        sskory
date:        2012-07-10 00:13:03
summary:     Fixed typo.
affected #:  1 file

diff -r 98e4616f576de5d9d6a9bd34db6b9d353eb7fd6d -r d55db8915ed2caee08bc2dd380a8ffaa09a24d2b yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -2623,7 +2623,7 @@
         Examples
         --------
         >>> pf = load("data0005")
-        >>> halos = LoadHaloes(pf, "list.txt", {'x':0, 'y':1, 'z':2, 'r':3},
+        >>> halos = LoadTextHaloes(pf, "list.txt", {'x':0, 'y':1, 'z':2, 'r':3},
             comment = ";")
         """
         TextHaloList.__init__(self, pf, filename, columns, comment)



https://bitbucket.org/yt_analysis/yt/changeset/858ba433d24a/
changeset:   858ba433d24a
branch:      yt
user:        sskory
date:        2012-07-10 00:26:52
summary:     A better default for TextHalos.
affected #:  1 file

diff -r d55db8915ed2caee08bc2dd380a8ffaa09a24d2b -r 858ba433d24ab2f8d77f8c9d6f495e05d4c7d9c1 yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -1609,7 +1609,7 @@
             r = float(line[columns['r']])
             cen = na.array([x, y, z])
             self._groups.append(TextHalo(self.pf, halo, size = None,
-                CoM = cen, max_dens_point = cen,
+                CoM = cen, max_dens_point = None,
                 group_total_mass = None, max_radius = r, bulk_vel = None,
                 rms_vel = None, fnames = None))
             halo += 1



https://bitbucket.org/yt_analysis/yt/changeset/9f4a72c839c5/
changeset:   9f4a72c839c5
branch:      yt
user:        sskory
date:        2012-07-11 00:00:22
summary:     Merge.
affected #:  5 files

diff -r 858ba433d24ab2f8d77f8c9d6f495e05d4c7d9c1 -r 9f4a72c839c5921e92f78b9ed624a6174f41fb73 yt/analysis_modules/sunrise_export/sunrise_exporter.py
--- a/yt/analysis_modules/sunrise_export/sunrise_exporter.py
+++ b/yt/analysis_modules/sunrise_export/sunrise_exporter.py
@@ -33,6 +33,8 @@
 
 import time
 import numpy as na
+import numpy.linalg as linalg
+import collections
 
 from yt.funcs import *
 import yt.utilities.lib as amr_utils
@@ -41,7 +43,8 @@
 
 debug = True
 
-def export_to_sunrise(pf, fn, star_particle_type, dle, dre,**kwargs):
+def export_to_sunrise(pf, fn, star_particle_type, fc, fwidth, ncells_wide=None,
+        debug=False,dd=None,**kwargs):
     r"""Convert the contents of a dataset to a FITS file format that Sunrise
     understands.
 
@@ -54,12 +57,14 @@
 
     Parameters
     ----------
-    pf : `StaticOutput`
-        The parameter file to convert.
-    fn : string
-        The filename of the output FITS file.
-    dle : The domain left edge to extract
-    dre : The domain rght edge to extract
+    pf      : `StaticOutput`
+                The parameter file to convert.
+    fn      : string
+                The filename of the output FITS file.
+    fc      : array
+                The center of the extraction region
+    fwidth  : array  
+                Ensure this radius around the center is enclosed
         Array format is (nx,ny,nz) where each element is floating point
         in unitary position units where 0 is leftmost edge and 1
         the rightmost. 
@@ -72,33 +77,123 @@
     http://sunrise.googlecode.com/ for more information.
 
     """
+
+    fc = na.array(fc)
+    fwidth = na.array(fwidth)
     
     #we must round the dle,dre to the nearest root grid cells
-    ile,ire,super_level= round_nearest_edge(pf,dle,dre)
-    super_level -= 1 #we're off by one (so we don't need a correction if we span 2 cells)
+    ile,ire,super_level,ncells_wide= \
+            round_ncells_wide(pf.domain_dimensions,fc-fwidth,fc+fwidth,nwide=ncells_wide)
+
+    assert na.all((ile-ire)==(ile-ire)[0])
+    mylog.info("rounding specified region:")
+    mylog.info("from [%1.5f %1.5f %1.5f]-[%1.5f %1.5f %1.5f]"%(tuple(fc-fwidth)+tuple(fc+fwidth)))
+    mylog.info("to   [%07i %07i %07i]-[%07i %07i %07i]"%(tuple(ile)+tuple(ire)))
     fle,fre = ile*1.0/pf.domain_dimensions, ire*1.0/pf.domain_dimensions
-    mylog.info("rounding specified region:")
-    mylog.info("from [%1.5f %1.5f %1.5f]-[%1.5f %1.5f %1.5f]"%(tuple(dle)+tuple(dre)))
-    mylog.info("to   [%07i %07i %07i]-[%07i %07i %07i]"%(tuple(ile)+tuple(ire)))
     mylog.info("to   [%1.5f %1.5f %1.5f]-[%1.5f %1.5f %1.5f]"%(tuple(fle)+tuple(fre)))
 
+    #Create a list of the star particle properties in PARTICLE_DATA
+    #Include ID, parent-ID, position, velocity, creation_mass, 
+    #formation_time, mass, age_m, age_l, metallicity, L_bol
+    particle_data = prepare_star_particles(pf,star_particle_type,fle=fle,fre=fre,
+                                           dd=dd,**kwargs)
 
     #Create the refinement hilbert octree in GRIDSTRUCTURE
     #For every leaf (not-refined) cell we have a column n GRIDDATA
     #Include mass_gas, mass_metals, gas_temp_m, gas_teff_m, cell_volume, SFR
     #since the octree always starts with one cell, an our 0-level mesh
-    #may have many cells, we must #create the octree region sitting 
+    #may have many cells, we must create the octree region sitting 
     #ontop of the first mesh by providing a negative level
-    output, refinement = prepare_octree(pf,ile,start_level=-super_level)
+    output, refinement,dd,nleaf = prepare_octree(pf,ile,start_level=super_level,
+            debug=debug,dd=dd,center=fc)
 
-    #Create a list of the star particle properties in PARTICLE_DATA
-    #Include ID, parent-ID, position, velocity, creation_mass, 
-    #formation_time, mass, age_m, age_l, metallicity, L_bol
-    particle_data = prepare_star_particles(pf,star_particle_type,fle=fle,fre=fre,**kwargs)
+    create_fits_file(pf,fn, refinement,output,particle_data,fle,fre)
 
-    create_fits_file(pf,fn, refinement,output,particle_data,fre,fle)
+    return fle,fre,ile,ire,dd,nleaf
 
-def prepare_octree(pf,ile,start_level=0):
+def export_to_sunrise_from_halolist(pf,fni,star_particle_type,
+                                        halo_list,domains_list=None,**kwargs):
+    """
+    Using the center of mass and the virial radius
+    for a halo, calculate the regions to extract for sunrise.
+    The regions are defined on the root grid, and so individual
+    octs may span a large range encompassing many halos
+    and subhalos. Instead of repeating the oct extraction for each
+    halo, arrange halos such that we only calculate what we need to.
+
+    Parameters
+    ----------
+    pf : `StaticOutput`
+        The parameter file to convert. We use the root grid to specify the domain.
+    fni : string
+        The filename of the output FITS file, but depends on the domain. The
+        dle and dre are appended to the name.
+    particle_type : int
+        The particle index for stars
+    halo_list : list of halo objects
+        The halo list objects must have halo.CoM and halo.Rvir,
+        both of which are assumed to be in unitary length units.
+    frvir (optional) : float
+        Ensure that CoM +/- frvir*Rvir is contained within each domain
+    domains_list (optiona): dict of halos
+        Organize halos into a dict of domains. Keys are DLE/DRE tuple
+        values are a list of halos
+    """
+    dn = pf.domain_dimensions
+    if domains_list is None:
+        domains_list = domains_from_halos(pf,halo_list,**kwargs)
+    if fni.endswith('.fits'):
+        fni = fni.replace('.fits','')
+
+    ndomains_finished = 0
+    for (num_halos, domain, halos) in domains_list:
+        dle,dre = domain
+        print 'exporting: '
+        print "[%03i %03i %03i] -"%tuple(dle),
+        print "[%03i %03i %03i] "%tuple(dre),
+        print " with %i halos"%num_halos
+        dle,dre = domain
+        dle, dre = na.array(dle),na.array(dre)
+        fn = fni 
+        fn += "%03i_%03i_%03i-"%tuple(dle)
+        fn += "%03i_%03i_%03i"%tuple(dre)
+        fnf = fn + '.fits'
+        fnt = fn + '.halos'
+        if os.path.exists(fnt):
+            os.remove(fnt)
+        fh = open(fnt,'w')
+        for halo in halos:
+            fh.write("%i "%halo.ID)
+            fh.write("%6.6e "%(halo.CoM[0]*pf['kpc']))
+            fh.write("%6.6e "%(halo.CoM[1]*pf['kpc']))
+            fh.write("%6.6e "%(halo.CoM[2]*pf['kpc']))
+            fh.write("%6.6e "%(halo.Mvir))
+            fh.write("%6.6e \n"%(halo.Rvir*pf['kpc']))
+        fh.close()
+        export_to_sunrise(pf, fnf, star_particle_type, dle*1.0/dn, dre*1.0/dn)
+        ndomains_finished +=1
+
+def domains_from_halos(pf,halo_list,frvir=0.15):
+    domains = {}
+    dn = pf.domain_dimensions
+    for halo in halo_list:
+        fle, fre = halo.CoM-frvir*halo.Rvir,halo.CoM+frvir*halo.Rvir
+        dle,dre = na.floor(fle*dn), na.ceil(fre*dn)
+        dle,dre = tuple(dle.astype('int')),tuple(dre.astype('int'))
+        if (dle,dre) in domains.keys():
+            domains[(dle,dre)] += halo,
+        else:
+            domains[(dle,dre)] = [halo,]
+    #for niceness, let's process the domains in order of 
+    #the one with the most halos
+    domains_list = [(len(v),k,v) for k,v in domains.iteritems()]
+    domains_list.sort() 
+    domains_list.reverse() #we want the most populated domains first
+    domains_limits = [d[1] for d in domains_list]
+    domains_halos  = [d[2] for d in domains_list]
+    return domains_list
+
+def prepare_octree(pf,ile,start_level=0,debug=False,dd=None,center=None):
     add_fields() #add the metal mass field that sunrise wants
     fields = ["CellMassMsun","TemperatureTimesCellMassMsun", 
               "MetalMass","CellVolumeCode"]
@@ -106,7 +201,9 @@
     #gather the field data from octs
     pbar = get_pbar("Retrieving field data",len(fields))
     field_data = [] 
-    dd = pf.h.all_data()
+    if dd is None:
+        #we keep passing dd around to not regenerate the data all the time
+        dd = pf.h.all_data()
     for fi,f in enumerate(fields):
         field_data += dd[f],
         pbar.update(fi)
@@ -146,7 +243,7 @@
     pbar.finish()
     
     #create the octree grid list
-    oct_list =  amr_utils.OctreeGridList(grids)
+    #oct_list =  amr_utils.OctreeGridList(grids)
     
     #initialize arrays to be passed to the recursion algo
     o_length = na.sum(levels_all.values())
@@ -156,115 +253,102 @@
     levels   = na.zeros(r_length, dtype='int32')
     pos = position()
     hs       = hilbert_state()
-    refined[0] = 1 #introduce the first cell as divided
-    levels[0]  = start_level-1 #introduce the first cell as divided
-    pos.refined_pos += 1
+    start_time = time.time()
+    if debug:
+        if center is not None: 
+            c = center*pf['kpc']
+        else:
+            c = ile*1.0/pf.domain_dimensions*pf['kpc']
+        printing = lambda x: print_oct(x,pf['kpc'],c)
+    else:
+        printing = None
+    pbar = get_pbar("Building Hilbert DFO octree",len(refined))
     RecurseOctreeDepthFirstHilbert(
-            ile[0],ile[1],ile[2],
-            pos,0, hs, 
+            ile,
+            pos,
+            grids[0], #we always start on the root grid
+            hs, 
             output,refined,levels,
             grids,
             start_level,
-            #physical_center = (ile)*1.0/pf.domain_dimensions*pf['kpc'],
-            physical_center = ile,
-            #physical_width  = pf['kpc'])
-            physical_width  = pf.domain_dimensions)
+            debug=printing,
+            tracker=pbar)
+    pbar.finish()
     #by time we get it here the 'current' position is actually 
     #for the next spot, so we're off by 1
+    print 'took %1.2e seconds'%(time.time()-start_time)
     print 'refinement tree # of cells %i, # of leaves %i'%(pos.refined_pos,pos.output_pos) 
     output  = output[:pos.output_pos]
     refined = refined[:pos.refined_pos] 
     levels = levels[:pos.refined_pos] 
-    return output,refined
+    return output,refined,dd,pos.refined_pos
 
-def print_row(level,ple,pre,pw,pc,hs):
-    print level, 
-    print '%1.5f %1.5f %1.5f '%tuple(ple*pw-pc),
-    print '%1.5f %1.5f %1.5f '%tuple(pre*pw-pc),
-    print hs.dim, hs.sgn
+def print_oct(data,nd=None,nc=None):
+    ci = data['cell_index']
+    l  = data['level']
+    g  = data['grid']
+    fle = g.left_edges+g.dx*ci
+    fre = g.left_edges+g.dx*(ci+1)
+    if nd is not None:
+        fle *= nd
+        fre *= nd
+        if nc is not None:
+            fle -= nc
+            fre -= nc
+    txt  = '%1i '
+    txt += '%1.3f '*3+'- '
+    txt += '%1.3f '*3
+    print txt%((l,)+tuple(fle)+tuple(fre))
 
-def print_child(level,grid,i,j,k,pw,pc):
-    ple = (grid.left_edges+na.array([i,j,k])*grid.dx)*pw-pc #parent LE 
-    pre = (grid.left_edges+na.array([i+1,j+1,k+1])*grid.dx)*pw-pc #parent RE 
-    print level, 
-    print '%1.5f %1.5f %1.5f '%tuple(ple),
-    print '%1.5f %1.5f %1.5f '%tuple(pre)
+def RecurseOctreeDepthFirstHilbert(cell_index, #integer (rep as a float) on the grids[grid_index]
+                            pos, #the output hydro data position and refinement position
+                            grid,  #grid that this oct lives on (not its children)
+                            hilbert,  #the hilbert state
+                            output, #holds the hydro data
+                            refined, #holds the refinement status  of Octs, 0s and 1s
+                            levels, #For a given Oct, what is the level
+                            grids, #list of all patch grids available to us
+                            level, #starting level of the oct (not the children)
+                            debug=None,tracker=True):
+    if tracker is not None:
+        if pos.refined_pos%1000 == 500 : tracker.update(pos.refined_pos)
+    if debug is not None: 
+        debug(vars())
+    child_grid_index = grid.child_indices[cell_index[0],cell_index[1],cell_index[2]]
+    #record the refinement state
+    refined[pos.refined_pos] = child_grid_index!=-1
+    levels[pos.output_pos]  = level
+    pos.refined_pos+= 1 
+    if child_grid_index == -1 and level>=0: #never subdivide if we are on a superlevel
+        #then we have hit a leaf cell; write it out
+        for field_index in range(grid.fields.shape[0]):
+            output[pos.output_pos,field_index] = \
+                    grid.fields[field_index,cell_index[0],cell_index[1],cell_index[2]]
+        pos.output_pos+= 1 
+    else:
+        #find the grid we descend into
+        #then find the eight cells we break up into
+        subgrid = grids[child_grid_index]
+        #calculate the floating point LE of the children
+        #then translate onto the subgrid integer index 
+        parent_fle  = grid.left_edges + cell_index*grid.dx
+        subgrid_ile = na.floor((parent_fle - subgrid.left_edges)/subgrid.dx)
+        for i, (vertex,hilbert_child) in enumerate(hilbert):
+            #vertex is a combination of three 0s and 1s to 
+            #denote each of the 8 octs
+            if level < 0:
+                subgrid = grid #we don't actually descend if we're a superlevel
+                child_ile = cell_index + vertex*2**(-level)
+            else:
+                child_ile = subgrid_ile+na.array(vertex)
+                child_ile = child_ile.astype('int')
+            RecurseOctreeDepthFirstHilbert(child_ile,pos,
+                    subgrid,hilbert_child,output,refined,levels,grids,level+1,
+                    debug=debug,tracker=tracker)
 
-def RecurseOctreeDepthFirstHilbert(xi,yi,zi,
-                            curpos, gi, 
-                            hs,
-                            output,
-                            refined,
-                            levels,
-                            grids,
-                            level,
-                            physical_center=None,
-                            physical_width=None,
-                            printr=False):
-    grid = grids[gi]
-    m = 2**(-level-1) if level < 0 else 1
-    ple = grid.left_edges+na.array([xi,yi,zi])*grid.dx #parent LE
-    pre = ple+grid.dx*m
-    if printr:
-        print_row(level,ple,pre,physical_width,physical_center,hs)
 
-    #here we go over the 8 octants
-    #in general however, a mesh cell on this level
-    #may have more than 8 children on the next level
-    #so we find the int float center (cxyz) of each child cell
-    # and from that find the child cell indices
-    for iv, (vertex,hs_child) in enumerate(hs):
-        #print ' '*(level+3), level,iv, vertex,curpos.refined_pos,curpos.output_pos,
-        #negative level indicates that we need to build a super-octree
-        if level < 0: 
-            #print ' '
-            #we are not on the root grid yet, but this is 
-            #how many equivalent root grid cells we would have
-            #level -1 means our oct grid's children are the same size
-            #as the root grid (hence the -level-1)
-            dx = 2**(-level-1) #this is the child width 
-            i,j,k = xi+vertex[0]*dx,yi+vertex[1]*dx,zi+vertex[2]*dx
-            #we always refine the negative levels
-            refined[curpos.refined_pos] = 1
-            levels[curpos.refined_pos] = level
-            curpos.refined_pos += 1
-            RecurseOctreeDepthFirstHilbert(i, j, k,
-                                curpos, 0, hs_child, output, refined, levels, grids,
-                                level+1,
-                                physical_center=physical_center,
-                                physical_width=physical_width,)
-        else:
-            i,j,k = xi+vertex[0],yi+vertex[1],zi+vertex[2]
-            ci = grid.child_indices[i,j,k] #is this oct subdivided?
-            if ci == -1:
-                for fi in range(grid.fields.shape[0]):
-                    output[curpos.output_pos,fi] = grid.fields[fi,i,j,k]
-                refined[curpos.refined_pos] = 0
-                levels[curpos.refined_pos] = level
-                curpos.output_pos += 1 #position updated after write
-                curpos.refined_pos += 1
-                if printr:
-                    print_child(level+1,grid,i,j,k,physical_width,physical_center)
-            else:
-                cx = (grid.left_edges[0] + i*grid.dx[0]) #floating le of the child
-                cy = (grid.left_edges[1] + j*grid.dx[0])
-                cz = (grid.left_edges[2] + k*grid.dx[0])
-                refined[curpos.refined_pos] = 1
-                levels[curpos.refined_pos] = level
-                curpos.refined_pos += 1 #position updated after write
-                child_grid = grids[ci]
-                child_dx = child_grid.dx[0]
-                child_leftedges = child_grid.left_edges
-                child_i = int((cx - child_leftedges[0])/child_dx)
-                child_j = int((cy - child_leftedges[1])/child_dx)
-                child_k = int((cz - child_leftedges[2])/child_dx)
-                RecurseOctreeDepthFirstHilbert(child_i, child_j, child_k,
-                                    curpos, ci, hs_child, output, refined, levels, grids,
-                                    level+1,
-                                    physical_center=physical_center,
-                                    physical_width=physical_width)
 
-def create_fits_file(pf,fn, refined,output,particle_data,fre,fle):
+def create_fits_file(pf,fn, refined,output,particle_data,fle,fre):
 
     #first create the grid structure
     structure = pyfits.Column("structure", format="B", array=refined.astype("bool"))
@@ -351,25 +435,66 @@
     x+=1 
     return x
 
-def round_nearest_edge(pf,dle,dre):
+def round_ncells_wide(dds,fle,fre,nwide=None):
+    fc = (fle+fre)/2.0
+    assert na.all(fle < fc)
+    assert na.all(fre > fc)
+    ic = na.rint(fc*dds) #nearest vertex to the center
+    ile,ire = ic.astype('int'),ic.astype('int')
+    cfle,cfre = fc.copy(),fc.copy()
+    idx = na.array([0,0,0]) #just a random non-equal array
+    width = 0.0
+    if nwide is None:
+        #expand until borders are included and
+        #we have an equaly-sized, non-zero box
+        idxq,out=False,True
+        while not out or not idxq:
+            cfle,cfre = fc-width, fc+width
+            ile = na.rint(cfle*dds).astype('int')
+            ire = na.rint(cfre*dds).astype('int')
+            idx = ire-ile
+            width += 0.1/dds
+            #quit if idxq is true:
+            idxq = idx[0]>0 and na.all(idx==idx[0])
+            out  = na.all(fle>cfle) and na.all(fre<cfre) 
+            assert width[0] < 1.1 #can't go larger than the simulation volume
+        nwide = idx[0]
+    else:
+        #expand until we are nwide cells span
+        while not na.all(idx==nwide):
+            assert na.any(idx<=nwide)
+            cfle,cfre = fc-width, fc+width
+            ile = na.rint(cfle*dds).astype('int')
+            ire = na.rint(cfre*dds).astype('int')
+            idx = ire-ile
+            width += 1e-2*1.0/dds
+    assert na.all(idx==nwide)
+    assert idx[0]>0
+    maxlevel = -na.rint(na.log2(nwide)).astype('int')
+    assert abs(na.log2(nwide)-na.rint(na.log2(nwide)))<1e-5 #nwide should be a power of 2
+    return ile,ire,maxlevel,nwide
+
+def round_nearest_edge(pf,fle,fre):
     dds = pf.domain_dimensions
-    ile = na.floor(dle*dds).astype('int')
-    ire = na.ceil(dre*dds).astype('int') 
+    ile = na.floor(fle*dds).astype('int')
+    ire = na.ceil(fre*dds).astype('int') 
     
     #this is the number of cells the super octree needs to expand to
     #must round to the nearest power of 2
     width = na.max(ire-ile)
     width = nearest_power(width)
     
-    maxlevel = na.rint(na.log2(width)).astype('int')
+    maxlevel = -na.rint(na.log2(width)).astype('int')
     return ile,ire,maxlevel
 
 def prepare_star_particles(pf,star_type,pos=None,vel=None, age=None,
                           creation_time=None,initial_mass=None,
                           current_mass=None,metallicity=None,
                           radius = None,
-                          fle=[0.,0.,0.],fre=[1.,1.,1.]):
-    dd = pf.h.all_data()
+                          fle=[0.,0.,0.],fre=[1.,1.,1.],
+                          dd=None):
+    if dd is None:
+        dd = pf.h.all_data()
     idx = dd["particle_type"] == star_type
     if pos is None:
         pos = na.array([dd["particle_position_%s" % ax]
@@ -393,28 +518,29 @@
     if metallicity is None:
         #this should be in dimensionless units, metals mass / particle mass
         metallicity = dd["particle_metallicity"][idx]
+        #metallicity *=0.0198
+        #print 'WARNING: multiplying metallicirt by 0.0198'
     if radius is None:
         radius = initial_mass*0.0+10.0/1000.0 #10pc radius
-
-    formation_time = pf.current_time-age
+    formation_time = pf.current_time*pf['years']-age
     #create every column
     col_list = []
-    col_list.append(pyfits.Column("ID", format="I", array=na.arange(current_mass.size)))
-    col_list.append(pyfits.Column("parent_ID", format="I", array=na.arange(current_mass.size)))
+    col_list.append(pyfits.Column("ID", format="J", array=na.arange(current_mass.size).astype('int32')))
+    col_list.append(pyfits.Column("parent_ID", format="J", array=na.arange(current_mass.size).astype('int32')))
     col_list.append(pyfits.Column("position", format="3D", array=pos, unit="kpc"))
     col_list.append(pyfits.Column("velocity", format="3D", array=vel, unit="kpc/yr"))
     col_list.append(pyfits.Column("creation_mass", format="D", array=initial_mass, unit="Msun"))
     col_list.append(pyfits.Column("formation_time", format="D", array=formation_time, unit="yr"))
     col_list.append(pyfits.Column("radius", format="D", array=radius, unit="kpc"))
     col_list.append(pyfits.Column("mass", format="D", array=current_mass, unit="Msun"))
-    col_list.append(pyfits.Column("age_m", format="D", array=age))
-    col_list.append(pyfits.Column("age_l", format="D", array=age))
+    col_list.append(pyfits.Column("age", format="D", array=age,unit='yr'))
+    #col_list.append(pyfits.Column("age_l", format="D", array=age, unit = 'yr'))
     #For particles, Sunrise takes 
     #the dimensionless metallicity, not the mass of the metals
     col_list.append(pyfits.Column("metallicity", format="D",
         array=metallicity,unit="Msun")) 
-    col_list.append(pyfits.Column("L_bol", format="D",
-        array=na.zeros(current_mass.size)))
+    #col_list.append(pyfits.Column("L_bol", format="D",
+    #    array=na.zeros(current_mass.size)))
     
     #make the table
     cols = pyfits.ColDefs(col_list)
@@ -426,10 +552,10 @@
 def add_fields():
     """Add three Eulerian fields Sunrise uses"""
     def _MetalMass(field, data):
-        return data["Metal_Density"] * data["CellVolume"]
+        return data["Metallicity"] * data["CellMassMsun"]
         
     def _convMetalMass(data):
-        return 1.0/1.989e33
+        return 1.0
     
     add_field("MetalMass", function=_MetalMass,
               convert_function=_convMetalMass)
@@ -542,7 +668,254 @@
         j+=1
         yield vertex, self.descend(j)
 
+def generate_sunrise_cameraset_positions(pf,sim_center,cameraset=None,**kwargs):
+    if cameraset is None:
+        cameraset =cameraset_vertex 
+    campos =[]
+    names = []
+    dd = pf.h.all_data()
+    for name, (scene_pos,scene_up, scene_rot)  in cameraset.iteritems():
+        kwargs['scene_position']=scene_pos
+        kwargs['scene_up']=scene_up
+        kwargs['scene_rot']=scene_rot
+        kwargs['dd']=dd
+        line = generate_sunrise_camera_position(pf,sim_center,**kwargs)
+        campos += line,
+        names += name,
+    return names,campos     
 
+def generate_sunrise_camera_position(pf,sim_center,sim_axis_short=None,sim_axis_long=None,
+                                     sim_sphere_radius=None,sim_halo_radius=None,
+                                     scene_position=[0.0,0.0,1.0],scene_distance=None,
+                                     scene_up=[0.,0.,1.],scene_fov=None,scene_rot=True,
+                                     dd=None):
+    """Translate the simulation to center on sim_center, 
+    then rotate such that sim_up is along the +z direction. Then we are in the 
+    'scene' basis coordinates from which scene_up and scene_offset are defined.
+    Then a position vector, direction vector, up vector and angular field of view
+    are returned. The 3-vectors are in absolute physical kpc, not relative to the center.
+    The angular field of view is in radians. The 10 numbers should match the inputs to
+    camera_positions in Sunrise.
+    """
 
+    sim_center = na.array(sim_center)
+    if sim_sphere_radius is None:
+        sim_sphere_radius = 10.0/pf['kpc']
+    if sim_axis_short is None:
+        if dd is None:
+            dd = pf.h.all_data()
+        pos = na.array([dd["particle_position_%s"%i] for i in "xyz"]).T
+        idx = na.sqrt(na.sum((pos-sim_center)**2.0,axis=1))<sim_sphere_radius
+        mas = dd["particle_mass"]
+        pos = pos[idx]
+        mas = mas[idx]
+        mo_inertia = position_moment(pos,mas)
+        eigva, eigvc = linalg.eig(mo_inertia)
+        #order into short, long axes
+        order = eigva.real.argsort()
+        ax_short,ax_med,ax_long = [ eigvc[:,order[i]] for i in (0,1,2)]
+    else:
+        ax_short = sim_axis_short
+        ax_long  = sim_axis_long
+    if sim_halo_radius is None:
+        sim_halo_radius = 200.0/pf['kpc']
+    if scene_distance is  None:
+        scene_distance = 1e4/pf['kpc'] #this is how far the camera is from the target
+    if scene_fov is None:
+        radii = na.sqrt(na.sum((pos-sim_center)**2.0,axis=1))
+        #idx= radii < sim_halo_radius*0.10
+        #radii = radii[idx]
+        #mass  = mas[idx] #copying mass into mas
+        si = na.argsort(radii)
+        radii = radii[si]
+        mass  = mas[si]
+        idx, = na.where(na.cumsum(mass)>mass.sum()/2.0)
+        re = radii[idx[0]]
+        scene_fov = 5*re
+        scene_fov = max(scene_fov,3.0/pf['kpc']) #min size is 3kpc
+        scene_fov = min(scene_fov,20.0/pf['kpc']) #max size is 3kpc
+    #find rotation matrix
+    angles=find_half_euler_angles(ax_short,ax_long)
+    rotation  = euler_matrix(*angles)
+    irotation = numpy.linalg.inv(rotation)
+    axs = (ax_short,ax_med,ax_long)
+    ax_rs,ax_rm,ax_rl = (matmul(rotation,ax) for ax in axs)
+    axs = ([1,0,0],[0,1,0],[0,0,1])
+    ax_is,ax_im,ax_il = (matmul(irotation,ax) for ax in axs)
+    
+    #rotate the camera
+    if scene_rot :
+        irotation = na.eye(3)
+    sunrise_pos = matmul(irotation,na.array(scene_position)*scene_distance) #do NOT include sim center
+    sunrise_up  = matmul(irotation,scene_up)
+    sunrise_direction = -sunrise_pos
+    sunrise_afov = 2.0*na.arctan((scene_fov/2.0)/scene_distance)#convert from distance FOV to angular
 
+    #change to physical kpc
+    sunrise_pos *= pf['kpc']
+    sunrise_direction *= pf['kpc']
+    return sunrise_pos,sunrise_direction,sunrise_up,sunrise_afov,scene_fov
 
+def matmul(m, v):
+    """Multiply a matrix times a set of vectors, or a single vector.
+    My nPart x nDim convention leads to two transpositions, which is
+    why this is hidden away in a function.  Note that if you try to
+    use this to muliply two matricies, it will think that you're
+    trying to multiply by a set of vectors and all hell will break
+    loose."""    
+    assert type(v) is not na.matrix
+    v = na.asarray(v)
+    m, vs = [na.asmatrix(a) for a in (m, v)]
+
+    result = na.asarray(na.transpose(m * na.transpose(vs)))    
+    if len(v.shape) == 1:
+        return result[0]
+    return result
+
+
+def mag(vs):
+    """Compute the norms of a set of vectors or a single vector."""
+    vs = na.asarray(vs)
+    if len(vs.shape) == 1:
+        return na.sqrt( (vs**2).sum() )
+    return na.sqrt( (vs**2).sum(axis=1) )
+
+def mag2(vs):
+    """Compute the norms of a set of vectors or a single vector."""
+    vs = na.asarray(vs)
+    if len(vs.shape) == 1:
+        return (vs**2).sum()
+    return (vs**2).sum(axis=1)
+
+
+def position_moment(rs, ms=None, axes=None):
+    """Find second position moment tensor.
+    If axes is specified, weight by the elliptical radius (Allgood 2005)"""
+    rs = na.asarray(rs)
+    Npart, N = rs.shape
+    if ms is None: ms = na.ones(Npart)
+    else: ms = na.asarray(ms)    
+    if axes is not None:
+        axes = na.asarray(axes,dtype=float64)
+        axes = axes/axes.max()
+        norms2 = mag2(rs/axes)
+    else:
+        norms2 = na.ones(Npart)
+    M = ms.sum()
+    result = na.zeros((N,N))
+    # matrix is symmetric, so only compute half of it then fill in the
+    # other half
+    for i in range(N):
+        for j in range(i+1):
+            result[i,j] = ( rs[:,i] * rs[:,j] * ms / norms2).sum() / M
+        
+    result = result + result.transpose() - na.identity(N)*result
+    return result
+    
+
+
+def find_half_euler_angles(v,w,check=True):
+    """Find the passive euler angles that will make v lie along the z
+    axis and w lie along the x axis.  v and w are uncertain up to
+    inversions (ie, eigenvectors) so this routine removes degeneracies
+    associated with that
+
+    (old) Calculate angles to bring a body into alignment with the
+    coordinate system.  If v1 is the SHORTEST axis and v2 is the
+    LONGEST axis, then this will return the angle (Euler angles) to
+    make the long axis line up with the x axis and the short axis line
+    up with the x (z) axis for the 2 (3) dimensional case."""
+    # Make sure the vectors are normalized and orthogonal
+    mag = lambda x: na.sqrt(na.sum(x**2.0))
+    v = v/mag(v)
+    w = w/mag(w)    
+    if check:
+        if abs((v*w).sum()) / (mag(v)*mag(w)) > 1e-5: raise ValueError
+
+    # Break eigenvector scaling degeneracy by forcing it to have a positive
+    # z component
+    if v[2] < 0: v = -v
+    phi,theta = find_euler_phi_theta(v)
+
+    # Rotate w according to phi,theta and then break inversion
+    # degeneracy by requiring that resulting vector has positive
+    # x component
+    w_prime = euler_passive(w,phi,theta,0.)
+    if w_prime[0] < 0: w_prime = -w_prime
+    # Now last Euler angle should just be this:
+    psi = na.arctan2(w_prime[1],w_prime[0])
+    return phi, theta, psi
+
+def find_euler_phi_theta(v):
+    """Find (passive) euler angles that will make v point in the z
+    direction"""
+    # Make sure the vector is normalized
+    v = v/mag(v)
+    theta = na.arccos(v[2])
+    phi = na.arctan2(v[0],-v[1])
+    return phi,theta
+
+def euler_matrix(phi, the, psi):
+    """Make an Euler transformation matrix"""
+    cpsi=na.cos(psi)
+    spsi=na.sin(psi)
+    cphi=na.cos(phi)
+    sphi=na.sin(phi)
+    cthe=na.cos(the)
+    sthe=na.sin(the)
+    m = na.mat(na.zeros((3,3)))
+    m[0,0] = cpsi*cphi - cthe*sphi*spsi
+    m[0,1] = cpsi*sphi + cthe*cphi*spsi
+    m[0,2] = spsi*sthe
+    m[1,0] = -spsi*cphi - cthe*sphi*cpsi
+    m[1,1] = -spsi*sphi + cthe*cphi*cpsi 
+    m[1,2] = cpsi*sthe
+    m[2,0] = sthe*sphi
+    m[2,1] = -sthe*cphi
+    m[2,2] = cthe
+    return m
+
+def euler_passive(v, phi, the, psi):
+    """Passive Euler transform"""
+    m = euler_matrix(phi, the, psi)
+    return matmul(m,v)
+
+
+#the format for these camerasets is name,up vector,camera location, 
+#rotate to the galaxy's up direction?
+cameraset_compass = collections.OrderedDict([
+    ['top',([0.,0.,1.],[0.,-1.,0],True)], #up is north=+y
+    ['bottom',([0.,0.,-1.],[0.,-1.,0.],True)],#up is north=+y
+    ['north',([0.,1.,0.],[0.,0.,-1.],True)],#up is along z
+    ['south',([0.,-1.,0.],[0.,0.,-1.],True)],#up is along z
+    ['east',([1.,0.,0.],[0.,0.,-1.],True)],#up is along z
+    ['west',([-1.,0.,0.],[0.,0.,-1.],True)],#up is along z
+    ['top-north',([0.,0.7071,0.7071],[0., 0., -1.],True)],
+    ['top-south',([0.,-0.7071,0.7071],[0., 0., -1.],True)],
+    ['top-east',([ 0.7071,0.,0.7071],[0., 0., -1.],True)],
+    ['top-west',([-0.7071,0.,0.7071],[0., 0., -1.],True)]
+    ])
+
+cameraset_vertex = collections.OrderedDict([
+    ['top',([0.,0.,1.],[0.,-1.,0],True)], #up is north=+y
+    ['north',([0.,1.,0.],[0.,0.,-1.],True)],#up is along z
+    ['top-north',([0.,0.7071,0.7071],[0., 0., -1.],True)],
+    ['Z',([0.,0.,1.],[0.,-1.,0],False)], #up is north=+y
+    ['Y',([0.,1.,0.],[0.,0.,-1.],False)],#up is along z
+    ['ZY',([0.,0.7071,0.7071],[0., 0., -1.],False)]
+    ])
+
+#up is 45deg down from z, towards north
+#'bottom-north':([0.,0.7071,-0.7071],[0., 0., -1.])
+#up is -45deg down from z, towards north
+
+cameraset_ring = collections.OrderedDict()
+
+segments = 20
+for angle in na.linspace(0,360,segments):
+    pos = [na.cos(angle),0.,na.sin(angle)]
+    vc  = [na.cos(90-angle),0.,na.sin(90-angle)] 
+    cameraset_ring['02i'%angle]=(pos,vc)
+            
+
+


diff -r 858ba433d24ab2f8d77f8c9d6f495e05d4c7d9c1 -r 9f4a72c839c5921e92f78b9ed624a6174f41fb73 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -356,7 +356,6 @@
         
 
         if self.pf.file_particle_data:
-            #import pdb; pdb.set_trace()
             lspecies = self.pf.parameters['lspecies']
             wspecies = self.pf.parameters['wspecies']
             Nrow     = self.pf.parameters['Nrow']
@@ -381,6 +380,7 @@
                     npb = clspecies[self.pf.only_particle_type+1]
             np = npb-npa
             self.pf.particle_position   = self.pf.particle_position[npa:npb]
+            #do NOT correct by an offset of 1.0
             #self.pf.particle_position  -= 1.0 #fortran indices start with 0
             pbar.update(2)
             self.pf.particle_position  /= self.pf.domain_dimensions #to unitary units (comoving)
@@ -432,12 +432,6 @@
             for j,np in enumerate(nparticles):
                 mylog.debug('found %i of particle type %i'%(j,np))
             
-            if self.pf.single_particle_mass:
-                #cast all particle masses to the same mass
-                cast_type = self.pf.single_particle_type
-                
-
-            
             self.pf.particle_star_index = i
             
             do_stars = (self.pf.only_particle_type is None) or \
@@ -452,13 +446,15 @@
                     pbar = get_pbar("Stellar Ages        ",n)
                     sages  = \
                         b2t(tbirth,n=n,logger=lambda x: pbar.update(x)).astype('float64')
-                    sages *= 1.0e9
+                    sages *= 1.0e9 #from Gyr to yr
                     sages *= 365*24*3600 #to seconds
                     sages = self.pf.current_time-sages
                     self.pf.particle_age[-nstars:] = sages
                     pbar.finish()
                     self.pf.particle_metallicity1[-nstars:] = metallicity1
                     self.pf.particle_metallicity2[-nstars:] = metallicity2
+                    #self.pf.particle_metallicity1 *= 0.0199 
+                    #self.pf.particle_metallicity2 *= 0.0199 
                     self.pf.particle_mass_initial[-nstars:] = imass*um
                     self.pf.particle_mass[-nstars:] = mass*um
 
@@ -467,25 +463,17 @@
             pos = self.pf.particle_position
             #particle indices travel with the particle positions
             #pos = na.vstack((na.arange(pos.shape[0]),pos.T)).T 
-            #if type(self.pf.grid_particles) == type(5):
-            #    max_level = min(max_level,self.pf.grid_particles)
+            if type(self.pf.grid_particles) == type(5):
+                particle_level = min(self.pf.max_level,self.pf.grid_particles)
+            else:
+                particle_level = 2
             grid_particle_count = na.zeros((len(grids),1),dtype='int64')
-            
-            #grid particles at the finest level, removing them once gridded
-            #pbar = get_pbar("Gridding Particles ",init)
-            #assignment = amr_utils.assign_particles_to_cells(
-            #        self.grid_levels.ravel().astype('int32'),
-            #        self.grid_left_edge.astype('float32'),
-            #        self.grid_right_edge.astype('float32'),
-            #        pos[:,0].astype('float32'),
-            #        pos[:,1].astype('float32'),
-            #        pos[:,2].astype('float32'))
-            #pbar.finish()
 
             pbar = get_pbar("Gridding Particles ",init)
             assignment,ilists = amr_utils.assign_particles_to_cell_lists(
                     self.grid_levels.ravel().astype('int32'),
-                    2, #only bother gridding particles to level 2
+                    na.zeros(len(pos[:,0])).astype('int32')-1,
+                    particle_level, #dont grid particles past this
                     self.grid_left_edge.astype('float32'),
                     self.grid_right_edge.astype('float32'),
                     pos[:,0].astype('float32'),
@@ -493,7 +481,6 @@
                     pos[:,2].astype('float32'))
             pbar.finish()
             
-            
             pbar = get_pbar("Filling grids ",init)
             for gidx,(g,ilist) in enumerate(zip(grids,ilists)):
                 np = len(ilist)
@@ -601,19 +588,19 @@
                  file_particle_header=None, 
                  file_particle_data=None,
                  file_star_data=None,
-                 discover_particles=False,
+                 discover_particles=True,
                  use_particles=True,
                  limit_level=None,
                  only_particle_type = None,
                  grid_particles=False,
                  single_particle_mass=False,
                  single_particle_type=0):
-        import yt.frontends.ramses._ramses_reader as _ramses_reader
         
-        
-        dirn = os.path.dirname(filename)
+        #dirn = os.path.dirname(filename)
         base = os.path.basename(filename)
         aexp = base.split('_')[2].replace('.d','')
+        if not aexp.startswith('a'):
+            aexp = '_'+aexp
         
         self.file_particle_header = file_particle_header
         self.file_particle_data = file_particle_data
@@ -625,22 +612,30 @@
         if limit_level is None:
             self.limit_level = na.inf
         else:
+            limit_level = int(limit_level)
             mylog.info("Using maximum level: %i",limit_level)
             self.limit_level = limit_level
         
+        def repu(x):
+            for i in range(5):
+                x=x.replace('__','_')
+            return x    
         if discover_particles:
             if file_particle_header is None:
                 loc = filename.replace(base,'PMcrd%s.DAT'%aexp)
+                loc = repu(loc)
                 if os.path.exists(loc):
                     self.file_particle_header = loc
                     mylog.info("Discovered particle header: %s",os.path.basename(loc))
             if file_particle_data is None:
                 loc = filename.replace(base,'PMcrs0%s.DAT'%aexp)
+                loc = repu(loc)
                 if os.path.exists(loc):
                     self.file_particle_data = loc
                     mylog.info("Discovered particle data:   %s",os.path.basename(loc))
             if file_star_data is None:
                 loc = filename.replace(base,'stars_%s.dat'%aexp)
+                loc = repu(loc)
                 if os.path.exists(loc):
                     self.file_star_data = loc
                     mylog.info("Discovered stellar data:    %s",os.path.basename(loc))
@@ -716,7 +711,7 @@
         self.conversion_factors["Mass"] = self.parameters["aM0"] * 1.98892e33
         self.conversion_factors["Density"] = self.rho0*(aexpn**-3.0)
         self.conversion_factors["GasEnergy"] = self.rho0*self.v0**2*(aexpn**-5.0)
-        self.conversion_factors["Temperature"] = tr
+        #self.conversion_factors["Temperature"] = tr 
         self.conversion_factors["Potential"] = 1.0
         self.cosmological_simulation = True
         
@@ -951,4 +946,3 @@
     def _is_valid(self, *args, **kwargs):
         return False # We make no effort to auto-detect ART data
 
-


diff -r 858ba433d24ab2f8d77f8c9d6f495e05d4c7d9c1 -r 9f4a72c839c5921e92f78b9ed624a6174f41fb73 yt/frontends/art/fields.py
--- a/yt/frontends/art/fields.py
+++ b/yt/frontends/art/fields.py
@@ -36,6 +36,7 @@
 import yt.data_objects.universal_fields
 from yt.utilities.physical_constants import \
     boltzmann_constant_cgs, mass_hydrogen_cgs
+import yt.utilities.amr_utils as amr_utils
 
 KnownARTFields = FieldInfoContainer()
 add_art_field = KnownARTFields.add_field
@@ -43,6 +44,7 @@
 ARTFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
 add_field = ARTFieldInfo.add_field
 
+import yt.utilities.amr_utils as amr_utils
 import numpy as na
 
 #these are just the hydro fields
@@ -60,6 +62,7 @@
 #Hydro Fields that are verified to be OK unit-wise:
 #Density
 #Temperature
+#metallicities
 
 #Hydro Fields that need to be tested:
 #TotalEnergy
@@ -69,9 +72,6 @@
 #GasEnergy
 #MetalDensity SNII + SNia
 #Potentials
-
-#Hydro Derived fields that are untested:
-#metallicities
 #xyzvelocity
 
 #Particle fields that are tested:
@@ -87,6 +87,8 @@
 #Particle fields that are untested:
 #NONE
 
+#Other checks:
+#CellMassMsun == Density * CellVolume
 
 def _convertDensity(data):
     return data.convert("Density")
@@ -143,13 +145,13 @@
 KnownARTFields["GasEnergy"]._convert_function=_convertGasEnergy
 
 def _convertMetalDensitySNII(data):
-    return data.convert("Density")
+    return data.convert('Density')
 KnownARTFields["MetalDensitySNII"]._units = r"\rm{g}/\rm{cm}^3"
 KnownARTFields["MetalDensitySNII"]._projected_units = r"\rm{g}/\rm{cm}^2"
 KnownARTFields["MetalDensitySNII"]._convert_function=_convertMetalDensitySNII
 
 def _convertMetalDensitySNIa(data):
-    return data.convert("Density")
+    return data.convert('Density')
 KnownARTFields["MetalDensitySNIa"]._units = r"\rm{g}/\rm{cm}^3"
 KnownARTFields["MetalDensitySNIa"]._projected_units = r"\rm{g}/\rm{cm}^2"
 KnownARTFields["MetalDensitySNIa"]._convert_function=_convertMetalDensitySNIa
@@ -169,68 +171,101 @@
 ####### Derived fields
 
 def _temperature(field, data):
-    tr  = data["GasEnergy"].astype('float64') #~1
-    d = data["Density"].astype('float64')
-    d[d==0.0] = -1.0 #replace the zeroes (that cause infs)
-    tr /= d #
-    assert na.all(na.isfinite(tr)) #diagnosing some problem...
+    cd = data.pf.conversion_factors["Density"]
+    cg = data.pf.conversion_factors["GasEnergy"]
+    ct = data.pf.tr
+    dg = data["GasEnergy"].astype('float64')
+    dd = data["Density"].astype('float64')
+    di = dd==0.0
+    #dd[di] = -1.0
+    tr = dg/dd
+    #tr[na.isnan(tr)] = 0.0 
+    #if data.id==460:
+    #    import pdb;pdb.set_trace()
+    tr /= data.pf.conversion_factors["GasEnergy"]
+    tr *= data.pf.conversion_factors["Density"]
+    tr *= data.pf.tr
+    #tr[di] = -1.0 #replace the zero-density points with zero temp
+    #print tr.min()
+    #assert na.all(na.isfinite(tr))
     return tr
 def _converttemperature(data):
-    x  = data.pf.conversion_factors["Density"]
-    x /= data.pf.conversion_factors["GasEnergy"]
-    x *= data.pf.conversion_factors["Temperature"]
+    x = data.pf.conversion_factors["Temperature"]
+    x = 1.0
     return x
-add_art_field("Temperature", function=_temperature, units = r"\mathrm{K}",take_log=True)
-KnownARTFields["Temperature"]._units = r"\mathrm{K}"
-KnownARTFields["Temperature"]._projected_units = r"\mathrm{K}"
-KnownARTFields["Temperature"]._convert_function=_converttemperature
+add_field("Temperature", function=_temperature, units = r"\mathrm{K}",take_log=True)
+ARTFieldInfo["Temperature"]._units = r"\mathrm{K}"
+ARTFieldInfo["Temperature"]._projected_units = r"\mathrm{K}"
+ARTFieldInfo["Temperature"]._convert_function=_converttemperature
 
 def _metallicity_snII(field, data):
     tr  = data["MetalDensitySNII"] / data["Density"]
     return tr
-add_art_field("Metallicity_SNII", function=_metallicity_snII, units = r"\mathrm{K}",take_log=True)
-KnownARTFields["Metallicity_SNII"]._units = r""
-KnownARTFields["Metallicity_SNII"]._projected_units = r""
+add_field("Metallicity_SNII", function=_metallicity_snII, units = r"\mathrm{K}",take_log=True)
+ARTFieldInfo["Metallicity_SNII"]._units = r""
+ARTFieldInfo["Metallicity_SNII"]._projected_units = r""
 
 def _metallicity_snIa(field, data):
     tr  = data["MetalDensitySNIa"] / data["Density"]
     return tr
-add_art_field("Metallicity_SNIa", function=_metallicity_snIa, units = r"\mathrm{K}",take_log=True)
-KnownARTFields["Metallicity_SNIa"]._units = r""
-KnownARTFields["Metallicity_SNIa"]._projected_units = r""
+add_field("Metallicity_SNIa", function=_metallicity_snIa, units = r"\mathrm{K}",take_log=True)
+ARTFieldInfo["Metallicity_SNIa"]._units = r""
+ARTFieldInfo["Metallicity_SNIa"]._projected_units = r""
+
+def _metallicity(field, data):
+    tr  = data["Metal_Density"] / data["Density"]
+    return tr
+add_field("Metallicity", function=_metallicity, units = r"\mathrm{K}",take_log=True)
+ARTFieldInfo["Metallicity"]._units = r""
+ARTFieldInfo["Metallicity"]._projected_units = r""
 
 def _x_velocity(data):
     tr  = data["XMomentumDensity"]/data["Density"]
     return tr
-add_field("x_velocity", function=_x_velocity, units = r"\mathrm{cm/s}",take_log=False)
-ARTFieldInfo["x_velocity"]._units = r"\rm{cm}/\rm{s}"
-ARTFieldInfo["x_velocity"]._projected_units = r"\rm{cm}/\rm{s}"
+add_field("x-velocity", function=_x_velocity, units = r"\mathrm{cm/s}",take_log=False)
+ARTFieldInfo["x-velocity"]._units = r"\rm{cm}/\rm{s}"
+ARTFieldInfo["x-velocity"]._projected_units = r"\rm{cm}/\rm{s}"
 
 def _y_velocity(data):
     tr  = data["YMomentumDensity"]/data["Density"]
     return tr
-add_field("y_velocity", function=_y_velocity, units = r"\mathrm{cm/s}",take_log=False)
-ARTFieldInfo["y_velocity"]._units = r"\rm{cm}/\rm{s}"
-ARTFieldInfo["y_velocity"]._projected_units = r"\rm{cm}/\rm{s}"
+add_field("y-velocity", function=_y_velocity, units = r"\mathrm{cm/s}",take_log=False)
+ARTFieldInfo["y-velocity"]._units = r"\rm{cm}/\rm{s}"
+ARTFieldInfo["y-velocity"]._projected_units = r"\rm{cm}/\rm{s}"
 
 def _z_velocity(data):
     tr  = data["ZMomentumDensity"]/data["Density"]
     return tr
-add_field("z_velocity", function=_z_velocity, units = r"\mathrm{cm/s}",take_log=False)
-ARTFieldInfo["z_velocity"]._units = r"\rm{cm}/\rm{s}"
-ARTFieldInfo["z_velocity"]._projected_units = r"\rm{cm}/\rm{s}"
+add_field("z-velocity", function=_z_velocity, units = r"\mathrm{cm/s}",take_log=False)
+ARTFieldInfo["z-velocity"]._units = r"\rm{cm}/\rm{s}"
+ARTFieldInfo["z-velocity"]._projected_units = r"\rm{cm}/\rm{s}"
 
 
 def _metal_density(field, data):
     tr  = data["MetalDensitySNIa"]
     tr += data["MetalDensitySNII"]
     return tr
-add_art_field("Metal_Density", function=_metal_density, units = r"\mathrm{K}",take_log=True)
-KnownARTFields["Metal_Density"]._units = r""
-KnownARTFields["Metal_Density"]._projected_units = r""
+add_field("Metal_Density", function=_metal_density, units = r"\mathrm{K}",take_log=True)
+ARTFieldInfo["Metal_Density"]._units = r""
+ARTFieldInfo["Metal_Density"]._projected_units = r""
 
 
 #Particle fields
 
 #Derived particle fields
 
+def mass_dm(field, data):
+    idx = data["particle_type"]<5
+    #make a dumb assumption that the mass is evenly spread out in the grid
+    #must return an array the shape of the grid cells
+    tr  = data["Ones"] #create a grid in the right size
+    if na.sum(idx)>0:
+        tr /= na.prod(tr.shape) #divide by the volume
+        tr *= na.sum(data['particle_mass'][idx]) #Multiply by total contaiend mass
+        return tr
+    else:
+        return tr*0.0
+    
+add_field("particle_cell_mass_dm", function=mass_dm,
+          validators=[ValidateSpatial(0)])
+


diff -r 858ba433d24ab2f8d77f8c9d6f495e05d4c7d9c1 -r 9f4a72c839c5921e92f78b9ed624a6174f41fb73 yt/utilities/lib/CICDeposit.pyx
--- a/yt/utilities/lib/CICDeposit.pyx
+++ b/yt/utilities/lib/CICDeposit.pyx
@@ -150,6 +150,7 @@
 @cython.wraparound(False)
 @cython.cdivision(True)
 def assign_particles_to_cell_lists(np.ndarray[np.int32_t, ndim=1] levels, #for cells
+                              np.ndarray[np.int32_t,ndim=1] assign,
                               np.int64_t level_max, 
                               np.ndarray[np.float32_t, ndim=2] left_edges, #many cells
                               np.ndarray[np.float32_t, ndim=2] right_edges,
@@ -162,9 +163,10 @@
     cdef long i,j,level
     cdef long npart = pos_x.shape[0]
     cdef long ncells = left_edges.shape[0] 
-    cdef np.ndarray[np.int32_t, ndim=1] assign = np.zeros(npart,dtype='int32')-1
+    #cdef np.ndarray[np.int32_t, ndim=1] assign 
+    #assign = np.zeros(npart,dtype='int32')-1
     index_lists = []
-    for level in range(level_max,0,-1):
+    for level in range(level_max,-1,-1):
         #start with the finest level
         for i in range(ncells):
             #go through every cell on the finest level first
@@ -182,4 +184,39 @@
     return assign,index_lists
 
     
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+def recursive_particle_assignment(grids, grid, 
+                                  np.ndarray[np.float32_t, ndim=2] left_edges, #many cells
+                                  np.ndarray[np.float32_t, ndim=2] right_edges,
+                                  np.ndarray[np.float32_t, ndim=1] pos_x, #particle
+                                  np.ndarray[np.float32_t, ndim=1] pos_y,
+                                  np.ndarray[np.float32_t, ndim=1] pos_z):
+    #start on level zero, grid particles onto every mesh
+    #every particle we are fed, we can assume it exists on our grid
+    #must fill in the grid_particle_count array
+    #and particle_indices for every grid
+    cdef long i,j,level
+    cdef long npart = pos_x.shape[0]
+    cdef long ncells = left_edges.shape[0] 
+    cdef np.ndarray[np.int32_t, ndim=1] assigned       = np.zeros(npart,dtype='int32')
+    cdef np.ndarray[np.int32_t, ndim=1] never_assigned = np.ones(npart,dtype='int32')
+    for i in np.unique(grid.child_index_mask):
+        if i== -1: continue
+        #assigned to this subgrid
+        assigned = np.zeros(npart,dtype='int32') 
+        if (left_edges[i,0] <= pos_x[j] <= right_edges[i,0]):
+            if (left_edges[i,1] <= pos_y[j] <= right_edges[i,1]):
+                if (left_edges[i,2] <= pos_z[j] <= right_edges[i,2]):
+                    assigned[j]=1
+                    never_assigned[j]=0
+        if np.sum(assigned)>0:
+            recursive_particle_assignment(grids,grid,left_edges,right_edges,
+                                           pos_x[assigned],pos_y[assigned],pos_z[assigned])
+    #now we have assigned particles to other subgrids, we are left with particles on our grid
     
+            
+
+
+


diff -r 858ba433d24ab2f8d77f8c9d6f495e05d4c7d9c1 -r 9f4a72c839c5921e92f78b9ed624a6174f41fb73 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -602,7 +602,8 @@
     def get_field_units(self, field, strip_mathml = True):
         ds = self._frb.data_source
         pf = self.pf
-        if ds._type_name == "slice" or "cutting":
+        if ds._type_name in ("slice", "cutting") or \
+           (ds._type_name == "proj" and ds.weight_field is not None):
             units = pf.field_info[field].get_units()
         elif ds._type_name == "proj":
             units = pf.field_info[field].get_projected_units()



https://bitbucket.org/yt_analysis/yt/changeset/e92897d304c3/
changeset:   e92897d304c3
branch:      yt
user:        sskory
date:        2012-07-11 00:19:38
summary:     Adding the ability to auto-sense extra fields for text halos.
affected #:  1 file

diff -r 9f4a72c839c5921e92f78b9ed624a6174f41fb73 -r e92897d304c39ce052dffa90bc654e4c693563d8 yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -78,7 +78,7 @@
 
     def __init__(self, halo_list, id, indices=None, size=None, CoM=None,
         max_dens_point=None, group_total_mass=None, max_radius=None,
-        bulk_vel=None, tasks=None, rms_vel=None):
+        bulk_vel=None, tasks=None, rms_vel=None, supp=None):
         self._max_dens = halo_list._max_dens
         self.id = id
         self.data = halo_list._data_source
@@ -101,6 +101,11 @@
         self.rms_vel = rms_vel
         self.bin_count = None
         self.overdensity = None
+        # A supplementary data dict.
+        if supp is None:
+            self.supp = {}
+        else:
+            self.supp = supp
 
     def center_of_mass(self):
         r"""Calculate and return the center of mass.
@@ -801,7 +806,7 @@
 
         max_dens_point=None, group_total_mass=None, max_radius=None, bulk_vel=None,
         rms_vel=None, fnames=None, mag_A=None, mag_B=None, mag_C=None,
-        e1_vec=None, tilt=None):
+        e1_vec=None, tilt=None, supp=None):
 
         self.pf = pf
         self.gridsize = (self.pf.domain_right_edge - \
@@ -827,6 +832,11 @@
         self.particle_mask = None
         self.ds_sort = None
         self.indices = na.array([])  # Never used for a LoadedHalo.
+        # A supplementary data dict.
+        if supp is None:
+            self.supp = {}
+        else:
+            self.supp = supp
 
     def __getitem__(self, key):
         # This function will try to get particle data in one of three ways,
@@ -995,7 +1005,7 @@
 
         max_dens_point=None, group_total_mass=None, max_radius=None, bulk_vel=None,
         rms_vel=None, fnames=None, mag_A=None, mag_B=None, mag_C=None,
-        e1_vec=None, tilt=None):
+        e1_vec=None, tilt=None, supp=None):
 
         self.pf = pf
         self.gridsize = (self.pf.domain_right_edge - \
@@ -1016,40 +1026,16 @@
         self.bin_count = None
         self.overdensity = None
         self.indices = na.array([])  # Never used for a LoadedHalo.
+        # A supplementary data dict.
+        if supp is None:
+            self.supp = {}
+        else:
+            self.supp = supp
 
     def __getitem__(self, key):
         # We'll just pull it from the sphere.
         return self.get_sphere()[key]
 
-    def get_sphere(self):
-        r"""Returns a sphere source.
-
-        This will generate a new, empty sphere source centered on this halo,
-        with the maximum radius of the halo. This can be used like any other
-        data container in yt.
-
-        Parameters
-        ----------
-        center_of_mass : bool, optional
-            True chooses the center of mass when
-            calculating the maximum radius.
-            False chooses from the maximum density location for HOP halos
-            (it has no effect for FOF halos).
-            Default = True.
-
-        Returns
-        -------
-        sphere : `yt.data_objects.api.AMRSphereBase`
-            The empty data source.
-
-        Examples
-        --------
-        >>> sp = halos[0].get_sphere()
-        """
-        cen = self.center_of_mass()
-        r = self.maximum_radius()
-        return self.pf.h.sphere(cen, r)
-
     def maximum_density(self):
         r"""Undefined for text halos."""
         return -1
@@ -1599,6 +1585,9 @@
         # First get the halo particulars.
         lines = file(fname)
         halo = 0
+        base_set = ['x', 'y', 'z', 'r']
+        keys = columns.keys()
+        extra = (len(keys) > 4)
         for line in lines:
             # Skip commented lines.
             if line[0] == comment: continue
@@ -1608,10 +1597,15 @@
             z = float(line[columns['z']])
             r = float(line[columns['r']])
             cen = na.array([x, y, z])
-            self._groups.append(TextHalo(self.pf, halo, size = None,
-                CoM = cen, max_dens_point = None,
-                group_total_mass = None, max_radius = r, bulk_vel = None,
-                rms_vel = None, fnames = None))
+            # Now we see if there's anything else.
+            if extra:
+                temp_dict = {}
+                for key in columns:
+                    if key not in base_set:
+                        val = float(line[columns[key]])
+                        temp_dict[key] = val
+            self._groups.append(TextHalo(self.pf, halo,
+                CoM = cen, max_radius = r, supp = temp_dict))
             halo += 1
 
 
@@ -2614,7 +2608,10 @@
         columns : dict
             A dict listing the column name : column number pairs for data
             in the text file. It is zero-based (like Python).
-            An example is {'x':0, 'y':1, 'z':2, 'r':3}.
+            An example is {'x':0, 'y':1, 'z':2, 'r':3, 'm':4}.
+            Any column name outside of ['x', 'y', 'z', 'r'] will be attached
+            to each halo object in the supplementary dict 'supp'. See
+            example.
         
         comment : String
             If the first character of a line is equal to this, the line is
@@ -2623,7 +2620,10 @@
         Examples
         --------
         >>> pf = load("data0005")
-        >>> halos = LoadTextHaloes(pf, "list.txt", {'x':0, 'y':1, 'z':2, 'r':3},
+        >>> halos = LoadTextHaloes(pf, "list.txt",
+            {'x':0, 'y':1, 'z':2, 'r':3, 'm':4},
             comment = ";")
+        >>> halos[0].supp['m']
+            3.28392048e14
         """
         TextHaloList.__init__(self, pf, filename, columns, comment)



https://bitbucket.org/yt_analysis/yt/changeset/b10081b78743/
changeset:   b10081b78743
branch:      yt
user:        sskory
date:        2012-07-11 00:31:10
summary:     Merge.
affected #:  1 file

diff -r e92897d304c39ce052dffa90bc654e4c693563d8 -r b10081b7874364fdd7af1de194adbeb024d6196f yt/utilities/command_line.py
--- a/yt/utilities/command_line.py
+++ b/yt/utilities/command_line.py
@@ -1137,15 +1137,24 @@
             axes = range(3)
         else:
             axes = [args.axis]
+
+        unit = args.unit
+        if unit is None:
+            unit = '1'
+        width = args.width
+        if width is None:
+            width = 0.5*(pf.domain_right_edge - pf.domain_left_edge)
+        width /= pf[unit]
+
         for ax in axes:
             mylog.info("Adding plot for axis %i", ax)
             if args.projection:
                 plt = ProjectionPlot(pf, ax, args.field, center=center,
-                                     width=args.width,
+                                     width=width,
                                      weight_field=args.weight)
             else:
                 plt = SlicePlot(pf, ax, args.field, center=center,
-                                width=args.width)
+                                width=width)
             if args.grids:
                 plt.draw_grids()
             if args.time:

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list