[yt-svn] commit/yt-3.0: 2 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Wed Mar 27 06:29:02 PDT 2013


2 new commits in yt-3.0:

https://bitbucket.org/yt_analysis/yt-3.0/commits/f49fc8747a99/
Changeset:   f49fc8747a99
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-03-27 14:27:56
Summary:     Fixing RAMSES particle fields.

Previously, RAMSES read the particles completely incorrectly.  This fixes that
by applying validation and splitting the particle IO into a different routine.

Still need by-type validation and reading, which I think will be eased by a
better structuring of the IO handler.

Example: http://paste.yt-project.org/show/3310/
Affected #:  2 files

diff -r b7a4dbc55e0c8e835ff314c1fe0e46e5f592ee7e -r f49fc8747a994d9f8c7326d7ba2a25f05430e56a yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -143,6 +143,7 @@
             fpu.skip(f, 1)
             field_offsets[field] = f.tell()
         self.particle_field_offsets = field_offsets
+        self.particle_field_types = dict(particle_fields)
 
     def _read_amr_header(self):
         hvals = {}

diff -r b7a4dbc55e0c8e835ff314c1fe0e46e5f592ee7e -r f49fc8747a994d9f8c7326d7ba2a25f05430e56a yt/frontends/ramses/io.py
--- a/yt/frontends/ramses/io.py
+++ b/yt/frontends/ramses/io.py
@@ -60,33 +60,45 @@
     def _read_particle_selection(self, chunks, selector, fields):
         size = 0
         masks = {}
+        chunks = list(chunks)
+        pos_fields = [("all","particle_position_%s" % ax) for ax in "xyz"]
         for chunk in chunks:
             for subset in chunk.objs:
                 # We read the whole thing, then feed it back to the selector
-                offsets = []
-                f = open(subset.domain.part_fn, "rb")
-                foffsets = subset.domain.particle_field_offsets
-                selection = {}
-                for ax in 'xyz':
-                    field = "particle_position_%s" % ax
-                    f.seek(foffsets[field])
-                    selection[ax] = fpu.read_vector(f, 'd')
-                mask = selector.select_points(selection['x'],
-                            selection['y'], selection['z'])
+                selection = self._read_particle_subset(subset, pos_fields)
+                mask = selector.select_points(
+                    selection["all", "particle_position_x"],
+                    selection["all", "particle_position_y"],
+                    selection["all", "particle_position_z"])
                 if mask is None: continue
+                #print "MASK", mask
                 size += mask.sum()
                 masks[id(subset)] = mask
         # Now our second pass
-        tr = dict((f, np.empty(size, dtype="float64")) for f in fields)
+        tr = {}
+        pos = 0
         for chunk in chunks:
             for subset in chunk.objs:
-                f = open(subset.domain.part_fn, "rb")
+                selection = self._read_particle_subset(subset, fields)
                 mask = masks.pop(id(subset), None)
                 if mask is None: continue
-                for ftype, fname in fields:
-                    offsets.append((foffsets[fname], (ftype,fname)))
-                for offset, field in sorted(offsets):
-                    f.seek(offset)
-                    tr[field] = fpu.read_vector(f, 'd')[mask]
+                count = mask.sum()
+                for field in fields:
+                    ti = selection.pop(field)[mask]
+                    if field not in tr:
+                        dt = subset.domain.particle_field_types[field[1]]
+                        tr[field] = np.empty(size, dt)
+                    tr[field][pos:pos+count] = ti
+                pos += count
         return tr
 
+    def _read_particle_subset(self, subset, fields):
+        f = open(subset.domain.part_fn, "rb")
+        foffsets = subset.domain.particle_field_offsets
+        tr = {}
+        #for field in sorted(fields, key=lambda a:foffsets[a]):
+        for field in fields:
+            f.seek(foffsets[field[1]])
+            dt = subset.domain.particle_field_types[field[1]]
+            tr[field] = fpu.read_vector(f, dt)
+        return tr


https://bitbucket.org/yt_analysis/yt-3.0/commits/6cf22697b73c/
Changeset:   6cf22697b73c
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-03-27 14:28:51
Summary:     Merge
Affected #:  15 files

diff -r f49fc8747a994d9f8c7326d7ba2a25f05430e56a -r 6cf22697b73c1490164684de693252380b7c2d18 yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -1059,7 +1059,7 @@
 
     _fields = ["particle_position_%s" % ax for ax in 'xyz']
 
-    def __init__(self, data_source, dm_only=True):
+    def __init__(self, data_source, dm_only=True, redshift=-1):
         """
         Run hop on *data_source* with a given density *threshold*.  If
         *dm_only* is set, only run it on the dark matter particles, otherwise

diff -r f49fc8747a994d9f8c7326d7ba2a25f05430e56a -r 6cf22697b73c1490164684de693252380b7c2d18 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -210,6 +210,8 @@
         """
         Deletes a field
         """
+        if key  not in self.field_data:
+            key = self._determine_fields(key)[0]
         del self.field_data[key]
 
     def _generate_field(self, field):

diff -r f49fc8747a994d9f8c7326d7ba2a25f05430e56a -r 6cf22697b73c1490164684de693252380b7c2d18 yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -678,3 +678,37 @@
     return [np.sum(totals[:,i]) for i in range(n_fields)]
 add_quantity("TotalQuantity", function=_TotalQuantity,
                 combine_function=_combTotalQuantity, n_ret=2)
+
+def _ParticleDensityCenter(data,nbins=3,particle_type="all"):
+    """
+    Find the center of the particle density
+    by histogramming the particles iteratively.
+    """
+    pos = [data[(particle_type,"particle_position_%s"%ax)] for ax in "xyz"]
+    pos = np.array(pos).T
+    mas = data[(particle_type,"particle_mass")]
+    calc_radius= lambda x,y:np.sqrt(np.sum((x-y)**2.0,axis=1))
+    density = 0
+    if pos.shape[0]==0:
+        return -1.0,[-1.,-1.,-1.]
+    while pos.shape[0] > 1:
+        table,bins=np.histogramdd(pos,bins=nbins, weights=mas)
+        bin_size = min((np.max(bins,axis=1)-np.min(bins,axis=1))/nbins)
+        centeridx = np.where(table==table.max())
+        le = np.array([bins[0][centeridx[0][0]],
+                       bins[1][centeridx[1][0]],
+                       bins[2][centeridx[2][0]]])
+        re = np.array([bins[0][centeridx[0][0]+1],
+                       bins[1][centeridx[1][0]+1],
+                       bins[2][centeridx[2][0]+1]])
+        center = 0.5*(le+re)
+        idx = calc_radius(pos,center)<bin_size
+        pos, mas = pos[idx],mas[idx]
+        density = max(density,mas.sum()/bin_size**3.0)
+    return density, center
+def _combParticleDensityCenter(data,densities,centers):
+    i = np.argmax(densities)
+    return densities[i],centers[i]
+
+add_quantity("ParticleDensityCenter",function=_ParticleDensityCenter,
+             combine_function=_combParticleDensityCenter,n_ret=2)

diff -r f49fc8747a994d9f8c7326d7ba2a25f05430e56a -r 6cf22697b73c1490164684de693252380b7c2d18 yt/data_objects/tests/test_fields.py
--- a/yt/data_objects/tests/test_fields.py
+++ b/yt/data_objects/tests/test_fields.py
@@ -33,6 +33,7 @@
     pf.conversion_factors.update( dict((f, 1.0) for f in fields) )
     pf.current_redshift = 0.0001
     pf.hubble_constant = 0.7
+    pf.omega_matter = 0.27
     for unit in mpc_conversion:
         pf.units[unit+'h'] = pf.units[unit]
         pf.units[unit+'cm'] = pf.units[unit]

diff -r f49fc8747a994d9f8c7326d7ba2a25f05430e56a -r 6cf22697b73c1490164684de693252380b7c2d18 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -30,6 +30,8 @@
 import stat
 import weakref
 import cStringIO
+import difflib
+import glob
 
 from yt.funcs import *
 from yt.geometry.oct_geometry_handler import \
@@ -37,9 +39,9 @@
 from yt.geometry.geometry_handler import \
     GeometryHandler, YTDataChunk
 from yt.data_objects.static_output import \
-      StaticOutput
+    StaticOutput
 from yt.geometry.oct_container import \
-    RAMSESOctreeContainer
+    ARTOctreeContainer
 from yt.data_objects.field_info_container import \
     FieldInfoContainer, NullFunc
 from .fields import \
@@ -52,20 +54,15 @@
     get_box_grids_level
 import yt.utilities.lib as amr_utils
 
-from .definitions import *
-from .io import _read_frecord
-from .io import _read_record
-from .io import _read_struct
+from yt.frontends.art.definitions import *
+from yt.utilities.fortran_utils import *
 from .io import _read_art_level_info
 from .io import _read_child_mask_level
 from .io import _read_child_level
 from .io import _read_root_level
-from .io import _read_record_size
-from .io import _skip_record
 from .io import _count_art_octs
 from .io import b2t
 
-
 import yt.frontends.ramses._ramses_reader as _ramses_reader
 
 from .fields import ARTFieldInfo, KnownARTFields
@@ -80,13 +77,9 @@
 from yt.utilities.physical_constants import \
     mass_hydrogen_cgs, sec_per_Gyr
 
+
 class ARTGeometryHandler(OctreeGeometryHandler):
-    def __init__(self,pf,data_style="art"):
-        """
-        Life is made simpler because we only have one AMR file
-        and one domain. However, we are matching to the RAMSES
-        multi-domain architecture.
-        """
+    def __init__(self, pf, data_style="art"):
         self.fluid_field_list = fluid_fields
         self.data_style = data_style
         self.parameter_file = weakref.proxy(pf)
@@ -94,7 +87,16 @@
         self.directory = os.path.dirname(self.hierarchy_filename)
         self.max_level = pf.max_level
         self.float_type = np.float64
-        super(ARTGeometryHandler,self).__init__(pf,data_style)
+        super(ARTGeometryHandler, self).__init__(pf, data_style)
+
+    def get_smallest_dx(self):
+        """
+        Returns (in code units) the smallest cell size in the simulation.
+        """
+        # Overloaded
+        pf = self.parameter_file
+        return (1.0/pf.domain_dimensions.astype('f8') /
+                (2**self.max_level)).min()
 
     def _initialize_oct_handler(self):
         """
@@ -102,23 +104,37 @@
         allocate the requisite memory in the oct tree
         """
         nv = len(self.fluid_field_list)
-        self.domains = [ARTDomainFile(self.parameter_file,1,nv)]
+        self.domains = [ARTDomainFile(self.parameter_file, l+1, nv, l)
+                        for l in range(self.pf.max_level)]
         self.octs_per_domain = [dom.level_count.sum() for dom in self.domains]
         self.total_octs = sum(self.octs_per_domain)
-        self.oct_handler = RAMSESOctreeContainer(
-            self.parameter_file.domain_dimensions/2, #dd is # of root cells
+        self.oct_handler = ARTOctreeContainer(
+            self.parameter_file.domain_dimensions/2,  # dd is # of root cells
             self.parameter_file.domain_left_edge,
             self.parameter_file.domain_right_edge)
         mylog.debug("Allocating %s octs", self.total_octs)
         self.oct_handler.allocate_domains(self.octs_per_domain)
         for domain in self.domains:
-            domain._read_amr(self.oct_handler)
+            if domain.domain_level == 0:
+                domain._read_amr_root(self.oct_handler)
+            else:
+                domain._read_amr_level(self.oct_handler)
 
     def _detect_fields(self):
         self.particle_field_list = particle_fields
-        self.field_list = set(fluid_fields + particle_fields + particle_star_fields)
+        self.field_list = set(fluid_fields + particle_fields +
+                              particle_star_fields)
         self.field_list = list(self.field_list)
-    
+        # now generate all of the possible particle fields
+        if "wspecies" in self.parameter_file.parameters.keys():
+            wspecies = self.parameter_file.parameters['wspecies']
+            nspecies = len(wspecies)
+            self.parameter_file.particle_types = ["all", "darkmatter", "stars"]
+            for specie in range(nspecies):
+                self.parameter_file.particle_types.append("specie%i" % specie)
+        else:
+            self.parameter_file.particle_types = []
+
     def _setup_classes(self):
         dd = self._get_data_reader_dict()
         super(ARTGeometryHandler, self)._setup_classes(dd)
@@ -127,19 +143,23 @@
     def _identify_base_chunk(self, dobj):
         """
         Take the passed in data source dobj, and use its embedded selector
-        to calculate the domain mask, build the reduced domain 
+        to calculate the domain mask, build the reduced domain
         subsets and oct counts. Attach this information to dobj.
         """
         if getattr(dobj, "_chunk_info", None) is None:
-            #Get all octs within this oct handler
+            # Get all octs within this oct handler
             mask = dobj.selector.select_octs(self.oct_handler)
-            if mask.sum()==0:
+            if mask.sum() == 0:
                 mylog.debug("Warning: selected zero octs")
             counts = self.oct_handler.count_cells(dobj.selector, mask)
-            #For all domains, figure out how many counts we have 
-            #and build a subset=mask of domains 
-            subsets = [ARTDomainSubset(d, mask, c)
-                       for d, c in zip(self.domains, counts) if c > 0]
+            # For all domains, figure out how many counts we have
+            # and build a subset=mask of domains
+            subsets = []
+            for d, c in zip(self.domains, counts):
+                if c < 1:
+                    continue
+                subset = ARTDomainSubset(d, mask, c, d.domain_level)
+                subsets.append(subset)
             dobj._chunk_info = subsets
             dobj.size = sum(counts)
             dobj.shape = (dobj.size,)
@@ -147,8 +167,8 @@
 
     def _chunk_all(self, dobj):
         oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
-        #We pass the chunk both the current chunk and list of chunks,
-        #as well as the referring data source
+        # We pass the chunk both the current chunk and list of chunks,
+        # as well as the referring data source
         yield YTDataChunk(dobj, "all", oobjs, dobj.size)
 
     def _chunk_spatial(self, dobj, ngz):
@@ -157,7 +177,7 @@
     def _chunk_io(self, dobj):
         """
         Since subsets are calculated per domain,
-        i.e. per file, yield each domain at a time to 
+        i.e. per file, yield each domain at a time to
         organize by IO. We will eventually chunk out NMSU ART
         to be level-by-level.
         """
@@ -165,77 +185,66 @@
         for subset in oobjs:
             yield YTDataChunk(dobj, "io", [subset], subset.cell_count)
 
+
 class ARTStaticOutput(StaticOutput):
     _hierarchy_class = ARTGeometryHandler
     _fieldinfo_fallback = ARTFieldInfo
     _fieldinfo_known = KnownARTFields
 
-    def __init__(self,filename,data_style='art',
-                 fields = None, storage_filename = None,
-                 skip_particles=False,skip_stars=False,
-                 limit_level=None,spread_age=True):
+    def __init__(self, filename, data_style='art',
+                 fields=None, storage_filename=None,
+                 skip_particles=False, skip_stars=False,
+                 limit_level=None, spread_age=True,
+                 force_max_level=None, file_particle_header=None,
+                 file_particle_data=None, file_particle_stars=None):
         if fields is None:
             fields = fluid_fields
         filename = os.path.abspath(filename)
         self._fields_in_file = fields
+        self._file_amr = filename
+        self._file_particle_header = file_particle_header
+        self._file_particle_data = file_particle_data
+        self._file_particle_stars = file_particle_stars
         self._find_files(filename)
-        self.file_amr = filename
         self.parameter_filename = filename
         self.skip_particles = skip_particles
         self.skip_stars = skip_stars
         self.limit_level = limit_level
         self.max_level = limit_level
+        self.force_max_level = force_max_level
         self.spread_age = spread_age
-        self.domain_left_edge = np.zeros(3,dtype='float')
-        self.domain_right_edge = np.zeros(3,dtype='float')+1.0
-        StaticOutput.__init__(self,filename,data_style)
+        self.domain_left_edge = np.zeros(3, dtype='float')
+        self.domain_right_edge = np.zeros(3, dtype='float')+1.0
+        StaticOutput.__init__(self, filename, data_style)
         self.storage_filename = storage_filename
 
-    def _find_files(self,file_amr):
+    def _find_files(self, file_amr):
         """
         Given the AMR base filename, attempt to find the
         particle header, star files, etc.
         """
-        prefix,suffix = filename_pattern['amr'].split('%s')
-        affix = os.path.basename(file_amr).replace(prefix,'')
-        affix = affix.replace(suffix,'')
-        affix = affix.replace('_','')
-        full_affix = affix
-        affix = affix[1:-1]
-        dirname = os.path.dirname(file_amr)
-        for fp in (filename_pattern_hf,filename_pattern):
-            for filetype, pattern in fp.items():
-                #if this attribute is already set skip it
-                if getattr(self,"file_"+filetype,None) is not None:
-                    continue
-                #sometimes the affix is surrounded by an extraneous _
-                #so check for an extra character on either side
-                check_filename = dirname+'/'+pattern%('?%s?'%affix)
-                filenames = glob.glob(check_filename)
-                if len(filenames)>1:
-                    check_filename_strict = \
-                            dirname+'/'+pattern%('?%s'%full_affix[1:])
-                    filenames = glob.glob(check_filename_strict)
-                
-                if len(filenames)==1:
-                    setattr(self,"file_"+filetype,filenames[0])
-                    mylog.info('discovered %s:%s',filetype,filenames[0])
-                elif len(filenames)>1:
-                    setattr(self,"file_"+filetype,None)
-                    mylog.info("Ambiguous number of files found for %s",
-                            check_filename)
-                    for fn in filenames:
-                        faffix = float(affix)
-                else:
-                    setattr(self,"file_"+filetype,None)
+        base_prefix, base_suffix = filename_pattern['amr']
+        possibles = glob.glob(os.path.dirname(file_amr)+"/*")
+        for filetype, (prefix, suffix) in filename_pattern.iteritems():
+            # if this attribute is already set skip it
+            if getattr(self, "_file_"+filetype, None) is not None:
+                continue
+            stripped = file_amr.replace(base_prefix, prefix)
+            stripped = stripped.replace(base_suffix, suffix)
+            match, = difflib.get_close_matches(stripped, possibles, 1, 0.6)
+            if match is not None:
+                mylog.info('discovered %s:%s', filetype, match)
+                setattr(self, "_file_"+filetype, match)
+            else:
+                setattr(self, "_file_"+filetype, None)
 
     def __repr__(self):
-        return self.file_amr.rsplit(".",1)[0]
+        return self._file_amr.split('/')[-1]
 
     def _set_units(self):
         """
-        Generates the conversion to various physical units based 
-		on the parameters from the header
+        Generates the conversion to various physical units based
+                on the parameters from the header
         """
         self.units = {}
         self.time_units = {}
@@ -243,9 +252,9 @@
         self.units['1'] = 1.0
         self.units['unitary'] = 1.0
 
-        #spatial units
-        z   = self.current_redshift
-        h   = self.hubble_constant
+        # spatial units
+        z = self.current_redshift
+        h = self.hubble_constant
         boxcm_cal = self.parameters["boxh"]
         boxcm_uncal = boxcm_cal / h
         box_proper = boxcm_uncal/(1+z)
@@ -256,55 +265,59 @@
             self.units[unit+'cm'] = mpc_conversion[unit] * boxcm_uncal
             self.units[unit+'hcm'] = mpc_conversion[unit] * boxcm_cal
 
-        #all other units
+        # all other units
         wmu = self.parameters["wmu"]
         Om0 = self.parameters['Om0']
-        ng  = self.parameters['ng']
+        ng = self.parameters['ng']
         wmu = self.parameters["wmu"]
-        boxh   = self.parameters['boxh'] 
-        aexpn  = self.parameters["aexpn"]
+        boxh = self.parameters['boxh']
+        aexpn = self.parameters["aexpn"]
         hubble = self.parameters['hubble']
 
         cf = defaultdict(lambda: 1.0)
         r0 = boxh/ng
-        P0= 4.697e-16 * Om0**2.0 * r0**2.0 * hubble**2.0
-        T_0 = 3.03e5 * r0**2.0 * wmu * Om0 # [K]
+        P0 = 4.697e-16 * Om0**2.0 * r0**2.0 * hubble**2.0
+        T_0 = 3.03e5 * r0**2.0 * wmu * Om0  # [K]
         S_0 = 52.077 * wmu**(5.0/3.0)
         S_0 *= hubble**(-4.0/3.0)*Om0**(1.0/3.0)*r0**2.0
-        #v0 =  r0 * 50.0*1.0e5 * np.sqrt(self.omega_matter)  #cm/s
+        # v0 =  r0 * 50.0*1.0e5 * np.sqrt(self.omega_matter)  #cm/s
         v0 = 50.0*r0*np.sqrt(Om0)
         t0 = r0/v0
         rho1 = 1.8791e-29 * hubble**2.0 * self.omega_matter
         rho0 = 2.776e11 * hubble**2.0 * Om0
-        tr = 2./3. *(3.03e5*r0**2.0*wmu*self.omega_matter)*(1.0/(aexpn**2))     
+        tr = 2./3. * (3.03e5*r0**2.0*wmu*self.omega_matter)*(1.0/(aexpn**2))
         aM0 = rho0 * (boxh/hubble)**3.0 / ng**3.0
-        cf['r0']=r0
-        cf['P0']=P0
-        cf['T_0']=T_0
-        cf['S_0']=S_0
-        cf['v0']=v0
-        cf['t0']=t0
-        cf['rho0']=rho0
-        cf['rho1']=rho1
-        cf['tr']=tr
-        cf['aM0']=aM0
+        cf['r0'] = r0
+        cf['P0'] = P0
+        cf['T_0'] = T_0
+        cf['S_0'] = S_0
+        cf['v0'] = v0
+        cf['t0'] = t0
+        cf['rho0'] = rho0
+        cf['rho1'] = rho1
+        cf['tr'] = tr
+        cf['aM0'] = aM0
 
-        #factors to multiply the native code units to CGS
-        cf['Pressure'] = P0 #already cgs
-        cf['Velocity'] = v0/aexpn*1.0e5 #proper cm/s
+        # factors to multiply the native code units to CGS
+        cf['Pressure'] = P0  # already cgs
+        cf['Velocity'] = v0/aexpn*1.0e5  # proper cm/s
         cf["Mass"] = aM0 * 1.98892e33
         cf["Density"] = rho1*(aexpn**-3.0)
         cf["GasEnergy"] = rho0*v0**2*(aexpn**-5.0)
         cf["Potential"] = 1.0
         cf["Entropy"] = S_0
         cf["Temperature"] = tr
+        cf["Time"] = 1.0
+        cf["particle_mass"] = cf['Mass']
+        cf["particle_mass_initial"] = cf['Mass']
         self.cosmological_simulation = True
         self.conversion_factors = cf
-        
-        for particle_field in particle_fields:
-            self.conversion_factors[particle_field] =  1.0
+
         for ax in 'xyz':
             self.conversion_factors["%s-velocity" % ax] = 1.0
+        for pt in particle_fields:
+            if pt not in self.conversion_factors.keys():
+                self.conversion_factors[pt] = 1.0
         for unit in sec_conversion.keys():
             self.time_units[unit] = 1.0 / sec_conversion[unit]
 
@@ -320,72 +333,89 @@
         self.unique_identifier = \
             int(os.stat(self.parameter_filename)[stat.ST_CTIME])
         self.parameters.update(constants)
-        #read the amr header
-        with open(self.file_amr,'rb') as f:
-            amr_header_vals = _read_struct(f,amr_header_struct)
-            for to_skip in ['tl','dtl','tlold','dtlold','iSO']:
-                _skip_record(f)
-            (self.ncell,) = struct.unpack('>l', _read_record(f))
+        self.parameters['Time'] = 1.0
+        # read the amr header
+        with open(self._file_amr, 'rb') as f:
+            amr_header_vals = read_attrs(f, amr_header_struct, '>')
+            for to_skip in ['tl', 'dtl', 'tlold', 'dtlold', 'iSO']:
+                skipped = skip(f, endian='>')
+            (self.ncell) = read_vector(f, 'i', '>')[0]
             # Try to figure out the root grid dimensions
             est = int(np.rint(self.ncell**(1.0/3.0)))
             # Note here: this is the number of *cells* on the root grid.
             # This is not the same as the number of Octs.
-            #domain dimensions is the number of root *cells*
+            # domain dimensions is the number of root *cells*
             self.domain_dimensions = np.ones(3, dtype='int64')*est
             self.root_grid_mask_offset = f.tell()
             self.root_nocts = self.domain_dimensions.prod()/8
             self.root_ncells = self.root_nocts*8
-            mylog.debug("Estimating %i cells on a root grid side,"+ \
-                        "%i root octs",est,self.root_nocts)
-            self.root_iOctCh = _read_frecord(f,'>i')[:self.root_ncells]
+            mylog.debug("Estimating %i cells on a root grid side," +
+                        "%i root octs", est, self.root_nocts)
+            self.root_iOctCh = read_vector(f, 'i', '>')[:self.root_ncells]
             self.root_iOctCh = self.root_iOctCh.reshape(self.domain_dimensions,
-                 order='F')
+                                                        order='F')
             self.root_grid_offset = f.tell()
-            #_skip_record(f) # hvar
-            #_skip_record(f) # var
-            self.root_nhvar = _read_frecord(f,'>f',size_only=True)
-            self.root_nvar  = _read_frecord(f,'>f',size_only=True)
-            #make sure that the number of root variables is a multiple of rootcells
-            assert self.root_nhvar%self.root_ncells==0
-            assert self.root_nvar%self.root_ncells==0
-            self.nhydro_variables = ((self.root_nhvar+self.root_nvar)/ 
-                                    self.root_ncells)
-            self.iOctFree, self.nOct = struct.unpack('>ii', _read_record(f))
+            self.root_nhvar = skip(f, endian='>')
+            self.root_nvar = skip(f, endian='>')
+            # make sure that the number of root variables is a multiple of
+            # rootcells
+            assert self.root_nhvar % self.root_ncells == 0
+            assert self.root_nvar % self.root_ncells == 0
+            self.nhydro_variables = ((self.root_nhvar+self.root_nvar) /
+                                     self.root_ncells)
+            self.iOctFree, self.nOct = read_vector(f, 'i', '>')
             self.child_grid_offset = f.tell()
             self.parameters.update(amr_header_vals)
             self.parameters['ncell0'] = self.parameters['ng']**3
-        #read the particle header
-        if not self.skip_particles and self.file_particle_header:
-            with open(self.file_particle_header,"rb") as fh:
-                particle_header_vals = _read_struct(fh,particle_header_struct)
+            # estimate the root level
+            float_center, fl, iocts, nocts, root_level = _read_art_level_info(
+                f,
+                [0, self.child_grid_offset], 1,
+                coarse_grid=self.domain_dimensions[0])
+            del float_center, fl, iocts, nocts
+            self.root_level = root_level
+            mylog.info("Using root level of %02i", self.root_level)
+        # read the particle header
+        if not self.skip_particles and self._file_particle_header:
+            with open(self._file_particle_header, "rb") as fh:
+                particle_header_vals = read_attrs(
+                    fh, particle_header_struct, '>')
                 fh.seek(seek_extras)
                 n = particle_header_vals['Nspecies']
-                wspecies = np.fromfile(fh,dtype='>f',count=10)
-                lspecies = np.fromfile(fh,dtype='>i',count=10)
+                wspecies = np.fromfile(fh, dtype='>f', count=10)
+                lspecies = np.fromfile(fh, dtype='>i', count=10)
             self.parameters['wspecies'] = wspecies[:n]
             self.parameters['lspecies'] = lspecies[:n]
             ls_nonzero = np.diff(lspecies)[:n-1]
-            mylog.info("Discovered %i species of particles",len(ls_nonzero))
+            self.star_type = len(ls_nonzero)
+            mylog.info("Discovered %i species of particles", len(ls_nonzero))
             mylog.info("Particle populations: "+'%1.1e '*len(ls_nonzero),
-                *ls_nonzero)
-            for k,v in particle_header_vals.items():
+                       *ls_nonzero)
+            for k, v in particle_header_vals.items():
                 if k in self.parameters.keys():
                     if not self.parameters[k] == v:
-                        mylog.info("Inconsistent parameter %s %1.1e  %1.1e",k,v,
-                                   self.parameters[k])
+                        mylog.info(
+                            "Inconsistent parameter %s %1.1e  %1.1e", k, v,
+                            self.parameters[k])
                 else:
-                    self.parameters[k]=v
+                    self.parameters[k] = v
             self.parameters_particles = particle_header_vals
-    
-        #setup standard simulation params yt expects to see
+
+        # setup standard simulation params yt expects to see
         self.current_redshift = self.parameters["aexpn"]**-1.0 - 1.0
         self.omega_lambda = amr_header_vals['Oml0']
         self.omega_matter = amr_header_vals['Om0']
         self.hubble_constant = amr_header_vals['hubble']
         self.min_level = amr_header_vals['min_level']
         self.max_level = amr_header_vals['max_level']
-        self.hubble_time  = 1.0/(self.hubble_constant*100/3.08568025e19)
+        if self.limit_level is not None:
+            self.max_level = min(
+                self.limit_level, amr_header_vals['max_level'])
+        if self.force_max_level is not None:
+            self.max_level = self.force_max_level
+        self.hubble_time = 1.0/(self.hubble_constant*100/3.08568025e19)
         self.current_time = b2t(self.parameters['t']) * sec_per_Gyr
+        mylog.info("Max level is %02i", self.max_level)
 
     @classmethod
     def _is_valid(self, *args, **kwargs):
@@ -393,20 +423,24 @@
         Defined for the NMSU file naming scheme.
         This could differ for other formats.
         """
-        fn = ("%s" % (os.path.basename(args[0])))
         f = ("%s" % args[0])
-        prefix, suffix = filename_pattern['amr'].split('%s')
-        if fn.endswith(suffix) and fn.startswith(prefix) and\
-                os.path.exists(f): 
+        prefix, suffix = filename_pattern['amr']
+        with open(f, 'rb') as fh:
+            try:
+                amr_header_vals = read_attrs(fh, amr_header_struct, '>')
                 return True
+            except AssertionError:
+                return False
         return False
 
+
 class ARTDomainSubset(object):
-    def __init__(self, domain, mask, cell_count):
+    def __init__(self, domain, mask, cell_count, domain_level):
         self.mask = mask
         self.domain = domain
         self.oct_handler = domain.pf.h.oct_handler
         self.cell_count = cell_count
+        self.domain_level = domain_level
         level_counts = self.oct_handler.count_levels(
             self.domain.pf.max_level, self.domain.domain_id, mask)
         assert(level_counts.sum() == cell_count)
@@ -432,12 +466,12 @@
     def select_fwidth(self, dobj):
         base_dx = 1.0/self.domain.pf.domain_dimensions
         widths = np.empty((self.cell_count, 3), dtype="float64")
-        dds = (2**self.ires(dobj))
+        dds = (2**self.select_ires(dobj))
         for i in range(3):
-            widths[:,i] = base_dx[i] / dds
+            widths[:, i] = base_dx[i] / dds
         return widths
 
-    def fill(self, content, fields):
+    def fill_root(self, content, ftfields):
         """
         This is called from IOHandler. It takes content
         which is a binary stream, reads the requested field
@@ -446,135 +480,153 @@
         the order they are in in the octhandler.
         """
         oct_handler = self.oct_handler
-        all_fields  = self.domain.pf.h.fluid_field_list
-        fields = [f for ft, f in fields]
-        dest= {}
-        filled = pos = level_offset = 0
+        all_fields = self.domain.pf.h.fluid_field_list
+        fields = [f for ft, f in ftfields]
+        level_offset = 0
         field_idxs = [all_fields.index(f) for f in fields]
+        dest = {}
         for field in fields:
-            dest[field] = np.zeros(self.cell_count, 'float64')
-        for level, offset in enumerate(self.domain.level_offsets):
-            no = self.domain.level_count[level]
-            if level==0:
-                data = _read_root_level(content,self.domain.level_child_offsets,
-                                       self.domain.level_count)
-                data = data[field_idxs,:]
-            else:
-                data = _read_child_level(content,self.domain.level_child_offsets,
-                                         self.domain.level_offsets,
-                                         self.domain.level_count,level,fields,
-                                         self.domain.pf.domain_dimensions,
-                                         self.domain.pf.parameters['ncell0'])
-            source= {}
-            for i,field in enumerate(fields):
-                source[field] = np.empty((no, 8), dtype="float64")
-                source[field][:,:] = np.reshape(data[i,:],(no,8))
-            level_offset += oct_handler.fill_level(self.domain.domain_id, 
-                                   level, dest, source, self.mask, level_offset)
+            dest[field] = np.zeros(self.cell_count, 'float64')-1.
+        level = self.domain_level
+        source = {}
+        data = _read_root_level(content, self.domain.level_child_offsets,
+                                self.domain.level_count)
+        for field, i in zip(fields, field_idxs):
+            temp = np.reshape(data[i, :], self.domain.pf.domain_dimensions,
+                              order='F').astype('float64').T
+            source[field] = temp
+        level_offset += oct_handler.fill_level_from_grid(
+            self.domain.domain_id,
+            level, dest, source, self.mask, level_offset)
         return dest
 
+    def fill_level(self, content, ftfields):
+        oct_handler = self.oct_handler
+        fields = [f for ft, f in ftfields]
+        level_offset = 0
+        dest = {}
+        for field in fields:
+            dest[field] = np.zeros(self.cell_count, 'float64')-1.
+        level = self.domain_level
+        no = self.domain.level_count[level]
+        noct_range = [0, no]
+        source = _read_child_level(
+            content, self.domain.level_child_offsets,
+            self.domain.level_offsets,
+            self.domain.level_count, level, fields,
+            self.domain.pf.domain_dimensions,
+            self.domain.pf.parameters['ncell0'],
+            noct_range=noct_range)
+        nocts_filling = noct_range[1]-noct_range[0]
+        level_offset += oct_handler.fill_level(self.domain.domain_id,
+                                               level, dest, source,
+                                               self.mask, level_offset,
+                                               noct_range[0],
+                                               nocts_filling)
+        return dest
+
+
 class ARTDomainFile(object):
     """
     Read in the AMR, left/right edges, fill out the octhandler
     """
-    #We already read in the header in static output,
-    #and since these headers are defined in only a single file it's
-    #best to leave them in the static output
+    # We already read in the header in static output,
+    # and since these headers are defined in only a single file it's
+    # best to leave them in the static output
     _last_mask = None
     _last_seletor_id = None
 
-    def __init__(self,pf,domain_id,nvar):
+    def __init__(self, pf, domain_id, nvar, level):
         self.nvar = nvar
         self.pf = pf
         self.domain_id = domain_id
+        self.domain_level = level
         self._level_count = None
         self._level_oct_offsets = None
         self._level_child_offsets = None
 
     @property
     def level_count(self):
-        #this is number of *octs*
-        if self._level_count is not None: return self._level_count
+        # this is number of *octs*
+        if self._level_count is not None:
+            return self._level_count
         self.level_offsets
-        return self._level_count
+        return self._level_count[self.domain_level]
 
     @property
     def level_child_offsets(self):
-        if self._level_count is not None: return self._level_child_offsets
+        if self._level_count is not None:
+            return self._level_child_offsets
         self.level_offsets
         return self._level_child_offsets
 
     @property
-    def level_offsets(self): 
-        #this is used by the IO operations to find the file offset,
-        #and then start reading to fill values
-        #note that this is called hydro_offset in ramses
-        if self._level_oct_offsets is not None: 
+    def level_offsets(self):
+        # this is used by the IO operations to find the file offset,
+        # and then start reading to fill values
+        # note that this is called hydro_offset in ramses
+        if self._level_oct_offsets is not None:
             return self._level_oct_offsets
         # We now have to open the file and calculate it
-        f = open(self.pf.file_amr, "rb")
+        f = open(self.pf._file_amr, "rb")
         nhydrovars, inoll, _level_oct_offsets, _level_child_offsets = \
             _count_art_octs(f,  self.pf.child_grid_offset, self.pf.min_level,
                             self.pf.max_level)
-        #remember that the root grid is by itself; manually add it back in
+        # remember that the root grid is by itself; manually add it back in
         inoll[0] = self.pf.domain_dimensions.prod()/8
         _level_child_offsets[0] = self.pf.root_grid_offset
         self.nhydrovars = nhydrovars
-        self.inoll = inoll #number of octs
+        self.inoll = inoll  # number of octs
         self._level_oct_offsets = _level_oct_offsets
         self._level_child_offsets = _level_child_offsets
         self._level_count = inoll
         return self._level_oct_offsets
-    
-    def _read_amr(self, oct_handler):
+
+    def _read_amr_level(self, oct_handler):
         """Open the oct file, read in octs level-by-level.
-           For each oct, only the position, index, level and domain 
+           For each oct, only the position, index, level and domain
            are needed - its position in the octree is found automatically.
            The most important is finding all the information to feed
            oct_handler.add
         """
-        #on the root level we typically have 64^3 octs
-        #giving rise to 128^3 cells
-        #but on level 1 instead of 128^3 octs, we have 256^3 octs
-        #leave this code here instead of static output - it's memory intensive
         self.level_offsets
-        f = open(self.pf.file_amr, "rb")
-        #add the root *cell* not *oct* mesh
+        f = open(self.pf._file_amr, "rb")
+        level = self.domain_level
+        unitary_center, fl, iocts, nocts, root_level = _read_art_level_info(
+            f,
+            self._level_oct_offsets, level,
+            coarse_grid=self.pf.domain_dimensions[0],
+            root_level=self.pf.root_level)
+        nocts_check = oct_handler.add(self.domain_id, level, nocts,
+                                      unitary_center, self.domain_id)
+        assert(nocts_check == nocts)
+        mylog.debug("Added %07i octs on level %02i, cumulative is %07i",
+                    nocts, level, oct_handler.nocts)
+
+    def _read_amr_root(self, oct_handler):
+        self.level_offsets
+        f = open(self.pf._file_amr, "rb")
+        # add the root *cell* not *oct* mesh
+        level = self.domain_level
         root_octs_side = self.pf.domain_dimensions[0]/2
         NX = np.ones(3)*root_octs_side
+        octs_side = NX*2**level
         LE = np.array([0.0, 0.0, 0.0], dtype='float64')
         RE = np.array([1.0, 1.0, 1.0], dtype='float64')
         root_dx = (RE - LE) / NX
         LL = LE + root_dx/2.0
         RL = RE - root_dx/2.0
-        #compute floating point centers of root octs
-        root_fc= np.mgrid[LL[0]:RL[0]:NX[0]*1j,
-                          LL[1]:RL[1]:NX[1]*1j,
-                          LL[2]:RL[2]:NX[2]*1j ]
-        root_fc= np.vstack([p.ravel() for p in root_fc]).T
-        nocts_check = oct_handler.add(1, 0, root_octs_side**3,
+        # compute floating point centers of root octs
+        root_fc = np.mgrid[LL[0]:RL[0]:NX[0]*1j,
+                           LL[1]:RL[1]:NX[1]*1j,
+                           LL[2]:RL[2]:NX[2]*1j]
+        root_fc = np.vstack([p.ravel() for p in root_fc]).T
+        nocts_check = oct_handler.add(self.domain_id, level,
+                                      root_octs_side**3,
                                       root_fc, self.domain_id)
         assert(oct_handler.nocts == root_fc.shape[0])
-        nocts_added = root_fc.shape[0]
         mylog.debug("Added %07i octs on level %02i, cumulative is %07i",
-                    root_octs_side**3, 0,nocts_added)
-        for level in xrange(1, self.pf.max_level+1):
-            left_index, fl, iocts, nocts,root_level = _read_art_level_info(f, 
-                self._level_oct_offsets,level,
-                coarse_grid=self.pf.domain_dimensions[0])
-            left_index/=2
-            #at least one of the indices should be odd
-            #assert np.sum(left_index[:,0]%2==1)>0
-            octs_side = NX*2**level
-            float_left_edge = left_index.astype("float64") / octs_side
-            float_center = float_left_edge + 0.5*1.0/octs_side
-            #all floatin unitary positions should fit inside the domain
-            assert np.all(float_center<1.0)
-            nocts_check = oct_handler.add(1,level, nocts, float_left_edge, self.domain_id)
-            nocts_added += nocts
-            assert(oct_handler.nocts == nocts_added)
-            mylog.debug("Added %07i octs on level %02i, cumulative is %07i",
-                        nocts, level,nocts_added)
+                    root_octs_side**3, 0, oct_handler.nocts)
 
     def select(self, selector):
         if id(selector) == self._last_selector_id:
@@ -585,8 +637,8 @@
 
     def count(self, selector):
         if id(selector) == self._last_selector_id:
-            if self._last_mask is None: return 0
+            if self._last_mask is None:
+                return 0
             return self._last_mask.sum()
         self.select(selector)
         return self.count(selector)
-

diff -r f49fc8747a994d9f8c7326d7ba2a25f05430e56a -r 6cf22697b73c1490164684de693252380b7c2d18 yt/frontends/art/definitions.py
--- a/yt/frontends/art/definitions.py
+++ b/yt/frontends/art/definitions.py
@@ -25,7 +25,10 @@
 
 """
 
-fluid_fields= [ 
+# If not otherwise specified, we are big endian
+endian = '>'
+
+fluid_fields = [
     'Density',
     'TotalEnergy',
     'XMomentumDensity',
@@ -40,32 +43,29 @@
     'PotentialOld'
 ]
 
-hydro_struct = [('pad1','>i'),('idc','>i'),('iOctCh','>i')]
+hydro_struct = [('pad1', '>i'), ('idc', '>i'), ('iOctCh', '>i')]
 for field in fluid_fields:
-    hydro_struct += (field,'>f'),
-hydro_struct += ('pad2','>i'),
+    hydro_struct += (field, '>f'),
+hydro_struct += ('pad2', '>i'),
 
-particle_fields= [
-    'particle_age',
+particle_fields = [
+    'particle_mass',  # stars have variable mass
     'particle_index',
-    'particle_mass',
-    'particle_mass_initial',
-    'particle_creation_time',
-    'particle_metallicity1',
-    'particle_metallicity2',
-    'particle_metallicity',
+    'particle_type',
     'particle_position_x',
     'particle_position_y',
     'particle_position_z',
     'particle_velocity_x',
     'particle_velocity_y',
     'particle_velocity_z',
-    'particle_type',
-    'particle_index'
+    'particle_mass_initial',
+    'particle_creation_time',
+    'particle_metallicity1',
+    'particle_metallicity2',
+    'particle_metallicity',
 ]
 
 particle_star_fields = [
-    'particle_age',
     'particle_mass',
     'particle_mass_initial',
     'particle_creation_time',
@@ -74,110 +74,65 @@
     'particle_metallicity',
 ]
 
-filename_pattern = {				
-	'amr':'10MpcBox_csf512_%s.d',
-	'particle_header':'PMcrd%s.DAT',
-	'particle_data':'PMcrs0%s.DAT',
-	'particle_stars':'stars_%s.dat'
-}
 
-filename_pattern_hf = {				
-	'particle_header':'PMcrd_%s.DAT',
-	'particle_data':'PMcrs0_%s.DAT',
+filename_pattern = {
+    'amr': ['10MpcBox_', '.d'],
+    'particle_header': ['PMcrd', '.DAT'],
+    'particle_data': ['PMcrs', '.DAT'],
+    'particle_stars': ['stars', '.dat']
 }
 
 amr_header_struct = [
-    ('>i','pad byte'),
-    ('>256s','jname'),
-    ('>i','pad byte'),
-    ('>i','pad byte'),
-    ('>i','istep'),
-    ('>d','t'),
-    ('>d','dt'),
-    ('>f','aexpn'),
-    ('>f','ainit'),
-    ('>i','pad byte'),
-    ('>i','pad byte'),
-    ('>f','boxh'),
-    ('>f','Om0'),
-    ('>f','Oml0'),
-    ('>f','Omb0'),
-    ('>f','hubble'),
-    ('>i','pad byte'),
-    ('>i','pad byte'),
-    ('>i','nextras'),
-    ('>i','pad byte'),
-    ('>i','pad byte'),
-    ('>f','extra1'),
-    ('>f','extra2'),
-    ('>i','pad byte'),
-    ('>i','pad byte'),
-    ('>256s','lextra'),
-    ('>256s','lextra'),
-    ('>i','pad byte'),
-    ('>i', 'pad byte'),
-    ('>i', 'min_level'),
-    ('>i', 'max_level'),
-    ('>i', 'pad byte'),
+    ('jname', 1, '256s'),
+    (('istep', 't', 'dt', 'aexpn', 'ainit'), 1, 'iddff'),
+    (('boxh', 'Om0', 'Oml0', 'Omb0', 'hubble'), 5, 'f'),
+    ('nextras', 1, 'i'),
+    (('extra1', 'extra2'), 2, 'f'),
+    ('lextra', 1, '512s'),
+    (('min_level', 'max_level'), 2, 'i')
 ]
 
-particle_header_struct =[
-    ('>i','pad'),
-    ('45s','header'), 
-    ('>f','aexpn'),
-    ('>f','aexp0'),
-    ('>f','amplt'),
-    ('>f','astep'),
-    ('>i','istep'),
-    ('>f','partw'),
-    ('>f','tintg'),
-    ('>f','Ekin'),
-    ('>f','Ekin1'),
-    ('>f','Ekin2'),
-    ('>f','au0'),
-    ('>f','aeu0'),
-    ('>i','Nrow'),
-    ('>i','Ngridc'),
-    ('>i','Nspecies'),
-    ('>i','Nseed'),
-    ('>f','Om0'),
-    ('>f','Oml0'),
-    ('>f','hubble'),
-    ('>f','Wp5'),
-    ('>f','Ocurv'),
-    ('>f','Omb0'),
-    ('>%ds'%(396),'extras'),
-    ('>f','unknown'),
-    ('>i','pad')
+particle_header_struct = [
+    (('header',
+     'aexpn', 'aexp0', 'amplt', 'astep',
+     'istep',
+     'partw', 'tintg',
+     'Ekin', 'Ekin1', 'Ekin2',
+     'au0', 'aeu0',
+     'Nrow', 'Ngridc', 'Nspecies', 'Nseed',
+     'Om0', 'Oml0', 'hubble', 'Wp5', 'Ocurv', 'Omb0',
+     'extras', 'unknown'),
+     1,
+     '45sffffi'+'fffffff'+'iiii'+'ffffff'+'396s'+'f')
 ]
 
 star_struct = [
-        ('>d',('tdum','adum')),
-        ('>i','nstars'),
-        ('>d',('ws_old','ws_oldi')),
-        ('>f','mass'),
-        ('>f','imass'),
-        ('>f','tbirth'),
-        ('>f','metallicity1'),
-        ('>f','metallicity2')
-        ]
+    ('>d', ('tdum', 'adum')),
+    ('>i', 'nstars'),
+    ('>d', ('ws_old', 'ws_oldi')),
+    ('>f', 'particle_mass'),
+    ('>f', 'particle_mass_initial'),
+    ('>f', 'particle_creation_time'),
+    ('>f', 'particle_metallicity1'),
+    ('>f', 'particle_metallicity2')
+]
 
 star_name_map = {
-        'particle_mass':'mass',
-        'particle_mass_initial':'imass',
-        'particle_age':'tbirth',
-        'particle_metallicity1':'metallicity1',
-        'particle_metallicity2':'metallicity2',
-        'particle_metallicity':'metallicity',
-        }
+    'particle_mass': 'mass',
+    'particle_mass_initial': 'imass',
+    'particle_creation_time': 'tbirth',
+    'particle_metallicity1': 'metallicity1',
+    'particle_metallicity2': 'metallicity2',
+    'particle_metallicity': 'metallicity',
+}
 
 constants = {
-    "Y_p":0.245,
-    "gamma":5./3.,
-    "T_CMB0":2.726,
-    "T_min":300.,
-    "ng":128,
-    "wmu":4.0/(8.0-5.0*0.245)
+    "Y_p": 0.245,
+    "gamma": 5./3.,
+    "T_CMB0": 2.726,
+    "T_min": 300.,
+    "ng": 128,
+    "wmu": 4.0/(8.0-5.0*0.245)
 }
 
 seek_extras = 137

diff -r f49fc8747a994d9f8c7326d7ba2a25f05430e56a -r 6cf22697b73c1490164684de693252380b7c2d18 yt/frontends/art/fields.py
--- a/yt/frontends/art/fields.py
+++ b/yt/frontends/art/fields.py
@@ -3,6 +3,8 @@
 
 Author: Matthew Turk <matthewturk at gmail.com>
 Affiliation: UCSD
+Author: Chris Moody <matthewturk at gmail.com>
+Affiliation: UCSC
 Homepage: http://yt-project.org/
 License:
   Copyright (C) 2010-2011 Matthew Turk.  All Rights Reserved.
@@ -22,7 +24,7 @@
   You should have received a copy of the GNU General Public License
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
-
+import numpy as np
 from yt.data_objects.field_info_container import \
     FieldInfoContainer, \
     FieldInfo, \
@@ -35,210 +37,221 @@
     ValidateGridType
 import yt.data_objects.universal_fields
 import yt.utilities.lib as amr_utils
+from yt.utilities.physical_constants import mass_sun_cgs
+from yt.frontends.art.definitions import *
 
 KnownARTFields = FieldInfoContainer()
 add_art_field = KnownARTFields.add_field
-
 ARTFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
 add_field = ARTFieldInfo.add_field
 
-import numpy as np
+for f in fluid_fields:
+    add_art_field(f, function=NullFunc, take_log=True,
+                  validators=[ValidateDataField(f)])
 
-#these are just the hydro fields
-known_art_fields = [ 'Density','TotalEnergy',
-                     'XMomentumDensity','YMomentumDensity','ZMomentumDensity',
-                     'Pressure','Gamma','GasEnergy',
-                     'MetalDensitySNII', 'MetalDensitySNIa',
-                     'PotentialNew','PotentialOld']
-
-#Add the fields, then later we'll individually defined units and names
-for f in known_art_fields:
+for f in particle_fields:
     add_art_field(f, function=NullFunc, take_log=True,
-              validators = [ValidateDataField(f)])
-
-#Hydro Fields that are verified to be OK unit-wise:
-#Density
-#Temperature
-#metallicities
-#MetalDensity SNII + SNia
-
-#Hydro Fields that need to be tested:
-#TotalEnergy
-#XYZMomentum
-#Pressure
-#Gamma
-#GasEnergy
-#Potentials
-#xyzvelocity
-
-#Particle fields that are tested:
-#particle_position_xyz
-#particle_type
-#particle_index
-#particle_mass
-#particle_mass_initial
-#particle_age
-#particle_velocity
-#particle_metallicity12
-
-#Particle fields that are untested:
-#NONE
-
-#Other checks:
-#CellMassMsun == Density * CellVolume
+                  validators=[ValidateDataField(f)],
+                  particle_type=True)
+add_art_field("particle_mass", function=NullFunc, take_log=True,
+              validators=[ValidateDataField(f)],
+              particle_type=True,
+              convert_function=lambda x: x.convert("particle_mass"))
+add_art_field("particle_mass_initial", function=NullFunc, take_log=True,
+              validators=[ValidateDataField(f)],
+              particle_type=True,
+              convert_function=lambda x: x.convert("particle_mass"))
 
 def _convertDensity(data):
     return data.convert("Density")
 KnownARTFields["Density"]._units = r"\rm{g}/\rm{cm}^3"
 KnownARTFields["Density"]._projected_units = r"\rm{g}/\rm{cm}^2"
-KnownARTFields["Density"]._convert_function=_convertDensity
+KnownARTFields["Density"]._convert_function = _convertDensity
 
 def _convertTotalEnergy(data):
     return data.convert("GasEnergy")
-KnownARTFields["TotalEnergy"]._units = r"\rm{g}/\rm{cm}^3"
-KnownARTFields["TotalEnergy"]._projected_units = r"\rm{K}"
-KnownARTFields["TotalEnergy"]._convert_function=_convertTotalEnergy
+KnownARTFields["TotalEnergy"]._units = r"\rm{g}\rm{cm}^2/\rm{s}^2"
+KnownARTFields["TotalEnergy"]._projected_units = r"\rm{g}\rm{cm}^3/\rm{s}^2"
+KnownARTFields["TotalEnergy"]._convert_function = _convertTotalEnergy
 
 def _convertXMomentumDensity(data):
-    tr  = data.convert("Mass")*data.convert("Velocity")
+    tr = data.convert("Mass")*data.convert("Velocity")
     tr *= (data.convert("Density")/data.convert("Mass"))
     return tr
-KnownARTFields["XMomentumDensity"]._units = r"\rm{mg}/\rm{s}/\rm{cm}^3"
-KnownARTFields["XMomentumDensity"]._projected_units = r"\rm{K}"
-KnownARTFields["XMomentumDensity"]._convert_function=_convertXMomentumDensity
+KnownARTFields["XMomentumDensity"]._units = r"\rm{g}/\rm{s}/\rm{cm}^3"
+KnownARTFields["XMomentumDensity"]._projected_units = r"\rm{g}/\rm{s}/\rm{cm}^2"
+KnownARTFields["XMomentumDensity"]._convert_function = _convertXMomentumDensity
 
 def _convertYMomentumDensity(data):
-    tr  = data.convert("Mass")*data.convert("Velocity")
+    tr = data.convert("Mass")*data.convert("Velocity")
     tr *= (data.convert("Density")/data.convert("Mass"))
     return tr
-KnownARTFields["YMomentumDensity"]._units = r"\rm{mg}/\rm{s}/\rm{cm}^3"
-KnownARTFields["YMomentumDensity"]._projected_units = r"\rm{K}"
-KnownARTFields["YMomentumDensity"]._convert_function=_convertYMomentumDensity
+KnownARTFields["YMomentumDensity"]._units = r"\rm{g}/\rm{s}/\rm{cm}^3"
+KnownARTFields["YMomentumDensity"]._projected_units = r"\rm{g}/\rm{s}/\rm{cm}^2"
+KnownARTFields["YMomentumDensity"]._convert_function = _convertYMomentumDensity
 
 def _convertZMomentumDensity(data):
-    tr  = data.convert("Mass")*data.convert("Velocity")
+    tr = data.convert("Mass")*data.convert("Velocity")
     tr *= (data.convert("Density")/data.convert("Mass"))
     return tr
-KnownARTFields["ZMomentumDensity"]._units = r"\rm{mg}/\rm{s}/\rm{cm}^3"
-KnownARTFields["ZMomentumDensity"]._projected_units = r"\rm{K}"
-KnownARTFields["ZMomentumDensity"]._convert_function=_convertZMomentumDensity
+KnownARTFields["ZMomentumDensity"]._units = r"\rm{g}/\rm{s}/\rm{cm}^3"
+KnownARTFields["ZMomentumDensity"]._projected_units = r"\rm{g}/\rm{s}/\rm{cm}^2"
+KnownARTFields["ZMomentumDensity"]._convert_function = _convertZMomentumDensity
 
 def _convertPressure(data):
     return data.convert("Pressure")
-KnownARTFields["Pressure"]._units = r"\rm{g}/\rm{cm}/\rm{s}^2"
+KnownARTFields["Pressure"]._units = r"\rm{g}/\rm{s}^2/\rm{cm}^1"
 KnownARTFields["Pressure"]._projected_units = r"\rm{g}/\rm{s}^2"
-KnownARTFields["Pressure"]._convert_function=_convertPressure
+KnownARTFields["Pressure"]._convert_function = _convertPressure
 
 def _convertGamma(data):
     return 1.0
 KnownARTFields["Gamma"]._units = r""
 KnownARTFields["Gamma"]._projected_units = r""
-KnownARTFields["Gamma"]._convert_function=_convertGamma
+KnownARTFields["Gamma"]._convert_function = _convertGamma
 
 def _convertGasEnergy(data):
     return data.convert("GasEnergy")
-KnownARTFields["GasEnergy"]._units = r"\rm{ergs}/\rm{g}"
-KnownARTFields["GasEnergy"]._projected_units = r""
-KnownARTFields["GasEnergy"]._convert_function=_convertGasEnergy
+KnownARTFields["GasEnergy"]._units = r"\rm{g}\rm{cm}^2/\rm{s}^2"
+KnownARTFields["GasEnergy"]._projected_units = r"\rm{g}\rm{cm}^3/\rm{s}^2"
+KnownARTFields["GasEnergy"]._convert_function = _convertGasEnergy
 
 def _convertMetalDensitySNII(data):
     return data.convert('Density')
 KnownARTFields["MetalDensitySNII"]._units = r"\rm{g}/\rm{cm}^3"
 KnownARTFields["MetalDensitySNII"]._projected_units = r"\rm{g}/\rm{cm}^2"
-KnownARTFields["MetalDensitySNII"]._convert_function=_convertMetalDensitySNII
+KnownARTFields["MetalDensitySNII"]._convert_function = _convertMetalDensitySNII
 
 def _convertMetalDensitySNIa(data):
     return data.convert('Density')
 KnownARTFields["MetalDensitySNIa"]._units = r"\rm{g}/\rm{cm}^3"
 KnownARTFields["MetalDensitySNIa"]._projected_units = r"\rm{g}/\rm{cm}^2"
-KnownARTFields["MetalDensitySNIa"]._convert_function=_convertMetalDensitySNIa
+KnownARTFields["MetalDensitySNIa"]._convert_function = _convertMetalDensitySNIa
 
 def _convertPotentialNew(data):
     return data.convert("Potential")
-KnownARTFields["PotentialNew"]._units = r"\rm{g}/\rm{cm}^3"
-KnownARTFields["PotentialNew"]._projected_units = r"\rm{g}/\rm{cm}^2"
-KnownARTFields["PotentialNew"]._convert_function=_convertPotentialNew
+KnownARTFields["PotentialNew"]._units = r"\rm{g}\rm{cm}^2/\rm{s}^2"
+KnownARTFields["PotentialNew"]._projected_units = r"\rm{g}\rm{cm}^3/\rm{s}^2"
+KnownARTFields["PotentialNew"]._convert_function = _convertPotentialNew
 
 def _convertPotentialOld(data):
     return data.convert("Potential")
-KnownARTFields["PotentialOld"]._units = r"\rm{g}/\rm{cm}^3"
-KnownARTFields["PotentialOld"]._projected_units = r"\rm{g}/\rm{cm}^2"
-KnownARTFields["PotentialOld"]._convert_function=_convertPotentialOld
+KnownARTFields["PotentialOld"]._units = r"\rm{g}\rm{cm}^2/\rm{s}^2"
+KnownARTFields["PotentialOld"]._projected_units = r"\rm{g}\rm{cm}^3/\rm{s}^2"
+KnownARTFields["PotentialOld"]._convert_function = _convertPotentialOld
 
 ####### Derived fields
+def _temperature(field, data):
+    tr = data["GasEnergy"]/data["Density"]
+    tr /= data.pf.conversion_factors["GasEnergy"]
+    tr *= data.pf.conversion_factors["Density"]
+    tr *= data.pf.conversion_factors['tr']
+    return tr
 
-def _temperature(field, data):
-    dg = data["GasEnergy"] #.astype('float64')
-    dg /= data.pf.conversion_factors["GasEnergy"]
-    dd = data["Density"] #.astype('float64')
-    dd /= data.pf.conversion_factors["Density"]
-    tr = dg/dd*data.pf.conversion_factors['tr']
-    #ghost cells have zero density?
-    tr[np.isnan(tr)] = 0.0
-    #dd[di] = -1.0
-    #if data.id==460:
-    #tr[di] = -1.0 #replace the zero-density points with zero temp
-    #print tr.min()
-    #assert np.all(np.isfinite(tr))
-    return tr
 def _converttemperature(data):
-    #x = data.pf.conversion_factors["Temperature"]
-    x = 1.0
-    return x
-add_field("Temperature", function=_temperature, units = r"\mathrm{K}",take_log=True)
+    return 1.0
+add_field("Temperature", function=_temperature,
+          units=r"\mathrm{K}", take_log=True)
 ARTFieldInfo["Temperature"]._units = r"\mathrm{K}"
 ARTFieldInfo["Temperature"]._projected_units = r"\mathrm{K}"
-#ARTFieldInfo["Temperature"]._convert_function=_converttemperature
 
 def _metallicity_snII(field, data):
-    tr  = data["MetalDensitySNII"] / data["Density"]
+    tr = data["MetalDensitySNII"] / data["Density"]
     return tr
-add_field("Metallicity_SNII", function=_metallicity_snII, units = r"\mathrm{K}",take_log=True)
+add_field("Metallicity_SNII", function=_metallicity_snII,
+          units=r"\mathrm{K}", take_log=True)
 ARTFieldInfo["Metallicity_SNII"]._units = r""
 ARTFieldInfo["Metallicity_SNII"]._projected_units = r""
 
 def _metallicity_snIa(field, data):
-    tr  = data["MetalDensitySNIa"] / data["Density"]
+    tr = data["MetalDensitySNIa"] / data["Density"]
     return tr
-add_field("Metallicity_SNIa", function=_metallicity_snIa, units = r"\mathrm{K}",take_log=True)
+add_field("Metallicity_SNIa", function=_metallicity_snIa,
+          units=r"\mathrm{K}", take_log=True)
 ARTFieldInfo["Metallicity_SNIa"]._units = r""
 ARTFieldInfo["Metallicity_SNIa"]._projected_units = r""
 
 def _metallicity(field, data):
-    tr  = data["Metal_Density"] / data["Density"]
+    tr = data["Metal_Density"] / data["Density"]
     return tr
-add_field("Metallicity", function=_metallicity, units = r"\mathrm{K}",take_log=True)
+add_field("Metallicity", function=_metallicity,
+          units=r"\mathrm{K}", take_log=True)
 ARTFieldInfo["Metallicity"]._units = r""
 ARTFieldInfo["Metallicity"]._projected_units = r""
 
-def _x_velocity(field,data):
-    tr  = data["XMomentumDensity"]/data["Density"]
+def _x_velocity(field, data):
+    tr = data["XMomentumDensity"]/data["Density"]
     return tr
-add_field("x-velocity", function=_x_velocity, units = r"\mathrm{cm/s}",take_log=False)
+add_field("x-velocity", function=_x_velocity,
+          units=r"\mathrm{cm/s}", take_log=False)
 ARTFieldInfo["x-velocity"]._units = r"\rm{cm}/\rm{s}"
 ARTFieldInfo["x-velocity"]._projected_units = r"\rm{cm}/\rm{s}"
 
-def _y_velocity(field,data):
-    tr  = data["YMomentumDensity"]/data["Density"]
+def _y_velocity(field, data):
+    tr = data["YMomentumDensity"]/data["Density"]
     return tr
-add_field("y-velocity", function=_y_velocity, units = r"\mathrm{cm/s}",take_log=False)
+add_field("y-velocity", function=_y_velocity,
+          units=r"\mathrm{cm/s}", take_log=False)
 ARTFieldInfo["y-velocity"]._units = r"\rm{cm}/\rm{s}"
 ARTFieldInfo["y-velocity"]._projected_units = r"\rm{cm}/\rm{s}"
 
-def _z_velocity(field,data):
-    tr  = data["ZMomentumDensity"]/data["Density"]
+def _z_velocity(field, data):
+    tr = data["ZMomentumDensity"]/data["Density"]
     return tr
-add_field("z-velocity", function=_z_velocity, units = r"\mathrm{cm/s}",take_log=False)
+add_field("z-velocity", function=_z_velocity,
+          units=r"\mathrm{cm/s}", take_log=False)
 ARTFieldInfo["z-velocity"]._units = r"\rm{cm}/\rm{s}"
 ARTFieldInfo["z-velocity"]._projected_units = r"\rm{cm}/\rm{s}"
 
 def _metal_density(field, data):
-    tr  = data["MetalDensitySNIa"]
+    tr = data["MetalDensitySNIa"]
     tr += data["MetalDensitySNII"]
     return tr
-add_field("Metal_Density", function=_metal_density, units = r"\mathrm{K}",take_log=True)
-ARTFieldInfo["Metal_Density"]._units = r""
-ARTFieldInfo["Metal_Density"]._projected_units = r""
+add_field("Metal_Density", function=_metal_density,
+          units=r"\mathrm{K}", take_log=True)
+ARTFieldInfo["Metal_Density"]._units = r"\rm{g}/\rm{cm}^3"
+ARTFieldInfo["Metal_Density"]._projected_units = r"\rm{g}/\rm{cm}^2"
 
+# Particle fields
+def _particle_age(field, data):
+    tr = data["particle_creation_time"]
+    return data.pf.current_time - tr
+add_field("particle_age", function=_particle_age, units=r"\mathrm{s}",
+          take_log=True, particle_type=True)
 
-#Particle fields
+def spread_ages(ages, spread=1.0e7*365*24*3600):
+    # stars are formed in lumps; spread out the ages linearly
+    da = np.diff(ages)
+    assert np.all(da <= 0)
+    # ages should always be decreasing, and ordered so
+    agesd = np.zeros(ages.shape)
+    idx, = np.where(da < 0)
+    idx += 1  # mark the right edges
+    # spread this age evenly out to the next age
+    lidx = 0
+    lage = 0
+    for i in idx:
+        n = i-lidx  # n stars affected
+        rage = ages[i]
+        lage = max(rage-spread, 0.0)
+        agesd[lidx:i] = np.linspace(lage, rage, n)
+        lidx = i
+        # lage=rage
+    # we didn't get the last iter
+    n = agesd.shape[0]-lidx
+    rage = ages[-1]
+    lage = max(rage-spread, 0.0)
+    agesd[lidx:] = np.linspace(lage, rage, n)
+    return agesd
+
+def _particle_age_spread(field, data):
+    tr = data["particle_creation_time"]
+    return spread_ages(data.pf.current_time - tr)
+
+add_field("particle_age_spread", function=_particle_age_spread,
+          particle_type=True, take_log=True, units=r"\rm{s}")
+
+def _ParticleMassMsun(field, data):
+    return data["particle_mass"]/mass_sun_cgs
+add_field("ParticleMassMsun", function=_ParticleMassMsun, particle_type=True,
+          take_log=True, units=r"\rm{Msun}")

This diff is so big that we needed to truncate the remainder.

Repository URL: https://bitbucket.org/yt_analysis/yt-3.0/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list